max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
tes_client/__init__.py | elixir-europe/TES-cli | 3 | 6624651 | """
Client for the mockup GA4GH Task Execution Service `mock-TES`.
"""
from urllib.parse import urlparse
from bravado.client import SwaggerClient
from bravado_core.formatter import DEFAULT_FORMATS
from bravado.requests_client import RequestsClient
# Bravado configuration
DEFAULT_CONFIG = {
"validate_requests": False,
"validate_responses": False,
"headers": None,
"formats": [DEFAULT_FORMATS["int64"]],
"include_missing_properties": True,
}
class Client:
"""Client for mock-TES service."""
def __init__(
self,
url,
jwt=None,
config=DEFAULT_CONFIG
):
swagger_path = "{url}/swagger.json".format(url=url.rstrip("/"))
if jwt:
http_client = RequestsClient()
http_client.set_api_key(
host=urlparse(url).netloc,
api_key=f"Bearer {jwt}",
param_name="Authorization",
param_in="header"
)
else:
http_client = None
self.models = SwaggerClient.from_url(
swagger_path,
config=config
)
self.client = self.models.TaskService
def getTaskInfo(
self,
timeout: float = 3,
**kwargs,
):
tesResources = self.models.get_model("tesResources")
request = tesResources(
**kwargs,
)
return self.client.GetTaskInfo(
body=request
).result(timeout=timeout)
def updateTaskInfoConfig(
self,
currency,
unit_costs,
timeout: float = 3,
):
tesTaskInfoConfig = self.models.get_model("tesTaskInfoConfig")
request = tesTaskInfoConfig(
currency=currency,
unit_costs={
"cpu_usage": unit_costs["cpu_usage"],
"memory_consumption": unit_costs["memory_consumption"],
"data_storage": unit_costs["data_storage"],
"data_transfer": unit_costs["data_transfer"],
}
)
return self.client.UpdateTaskInfoConfig(
body=request
).result(timeout=timeout)
| """
Client for the mockup GA4GH Task Execution Service `mock-TES`.
"""
from urllib.parse import urlparse
from bravado.client import SwaggerClient
from bravado_core.formatter import DEFAULT_FORMATS
from bravado.requests_client import RequestsClient
# Bravado configuration
DEFAULT_CONFIG = {
"validate_requests": False,
"validate_responses": False,
"headers": None,
"formats": [DEFAULT_FORMATS["int64"]],
"include_missing_properties": True,
}
class Client:
"""Client for mock-TES service."""
def __init__(
self,
url,
jwt=None,
config=DEFAULT_CONFIG
):
swagger_path = "{url}/swagger.json".format(url=url.rstrip("/"))
if jwt:
http_client = RequestsClient()
http_client.set_api_key(
host=urlparse(url).netloc,
api_key=f"Bearer {jwt}",
param_name="Authorization",
param_in="header"
)
else:
http_client = None
self.models = SwaggerClient.from_url(
swagger_path,
config=config
)
self.client = self.models.TaskService
def getTaskInfo(
self,
timeout: float = 3,
**kwargs,
):
tesResources = self.models.get_model("tesResources")
request = tesResources(
**kwargs,
)
return self.client.GetTaskInfo(
body=request
).result(timeout=timeout)
def updateTaskInfoConfig(
self,
currency,
unit_costs,
timeout: float = 3,
):
tesTaskInfoConfig = self.models.get_model("tesTaskInfoConfig")
request = tesTaskInfoConfig(
currency=currency,
unit_costs={
"cpu_usage": unit_costs["cpu_usage"],
"memory_consumption": unit_costs["memory_consumption"],
"data_storage": unit_costs["data_storage"],
"data_transfer": unit_costs["data_transfer"],
}
)
return self.client.UpdateTaskInfoConfig(
body=request
).result(timeout=timeout)
| en | 0.700297 | Client for the mockup GA4GH Task Execution Service `mock-TES`. # Bravado configuration Client for mock-TES service. | 2.157849 | 2 |
coro/ldap/test/t0.py | amitdev/shrapnel | 98 | 6624652 | # -*- Mode: Python -*-
import unittest
import sys
from coro.asn1.ber import *
from coro.ldap.query import *
C = 'context'
pq_tests = [
# simple equality
('(xxx=yyy)',
((C, 3, ['xxx', 'yyy']),
12)),
# simple expression, plus 'present'
('(|(xx=y)(zz=*))',
((C, 1, [(C, 3, ['xx', 'y']), (C, 7, 'zz')]),
15)),
# nary expressions
('(|(a=b)(b=c)(c=d)(e=f)(f=g)(h=i))',
((C, 1, [(C, 3, ['a', 'b']), (C, 3, ['b', 'c']), (C, 3, ['c', 'd']), (C, 3, ['e', 'f']), (C, 3, ['f', 'g']), (C, 3, ['h', 'i'])]), # noqa
50)),
('(|(!(a=*))(&(b=c)(d=e))(x<=y))',
((C, 1, [(C, 2, [(C, 7, 'a')]), (C, 0, [(C, 3, ['b', 'c']), (C, 3, ['d', 'e'])]), (C, 6, ['x', 'y'])]),
33)),
# approximate match
('(zz~=yy)', ((C, 8, ['zz', 'yy']), 10)),
# substring
('(a=ins*tiga*tor)', ((C, 4, ['a', [(C, 0, 'ins'), (C, 1, 'tiga'), (C, 2, 'tor')]]), 23)),
('(a=*y)', ((C, 4, ['a', [(C, 2, 'y')]]), 10)),
('(a=y*)', ((C, 4, ['a', [(C, 0, 'y')]]), 10)),
('(a=*y*)', ((C, 4, ['a', [(C, 1, 'y')]]), 10)),
('(a=*x*y)', ((C, 4, ['a', [(C, 1, 'x'), (C, 2, 'y')]]), 13)),
('(a=*x*y*)', ((C, 4, ['a', [(C, 1, 'x'), (C, 1, 'y')]]), 13)),
('(a=*x*y*z)', ((C, 4, ['a', [(C, 1, 'x'), (C, 1, 'y'), (C, 2, 'z')]]), 16)),
# syntax errors
('(a=', QuerySyntaxError),
('(a<b)', QuerySyntaxError),
# good hex escape
('(a=some\\AAthing)', ((C, 3, ['a', 'some\252thing']), 17)),
# bad hex escape
('(a=some\\AZthing)', QuerySyntaxError),
# upper/lower case hex escape
('(a=xy\\Aaz)', ((C, 3, ['a', 'xy\252z']), 11)),
# escaped splat
('(a=x*y\\2az)', ((C, 4, ['a', [(C, 0, 'x'), (C, 2, 'y*z')]]), 15)),
# illegal splat
('(a~=sam*son)', QuerySyntaxError),
# junk/illegal
('junk', QuerySyntaxError),
# lots of parens
(('(' * 100), QuerySyntaxError),
# expression too complex
(('(!' * 55) + '(x=y)' + (')' * 55), QuerySyntaxError),
# expression not too complex
(('(!' * 10) + '(x=y)' + (')' * 10),
((C, 2, [(C, 2, [(C, 2, [(C, 2, [(C, 2, [(C, 2, [(C, 2, [(C, 2, [(C, 2, [(C, 2, [(C, 3, ['x', 'y'])])])])])])])])])])]), # noqa
28)),
]
class parse_query_test (unittest.TestCase):
def runTest (self):
for q, e in pq_tests:
try:
self.assertEqual (decode (parse_query (q)), e)
except AssertionError:
raise
except:
self.assertEqual (sys.exc_info()[0], e)
def suite():
suite = unittest.TestSuite()
suite.addTest (parse_query_test())
return suite
if __name__ == '__main__':
unittest.main (defaultTest='suite')
| # -*- Mode: Python -*-
import unittest
import sys
from coro.asn1.ber import *
from coro.ldap.query import *
C = 'context'
pq_tests = [
# simple equality
('(xxx=yyy)',
((C, 3, ['xxx', 'yyy']),
12)),
# simple expression, plus 'present'
('(|(xx=y)(zz=*))',
((C, 1, [(C, 3, ['xx', 'y']), (C, 7, 'zz')]),
15)),
# nary expressions
('(|(a=b)(b=c)(c=d)(e=f)(f=g)(h=i))',
((C, 1, [(C, 3, ['a', 'b']), (C, 3, ['b', 'c']), (C, 3, ['c', 'd']), (C, 3, ['e', 'f']), (C, 3, ['f', 'g']), (C, 3, ['h', 'i'])]), # noqa
50)),
('(|(!(a=*))(&(b=c)(d=e))(x<=y))',
((C, 1, [(C, 2, [(C, 7, 'a')]), (C, 0, [(C, 3, ['b', 'c']), (C, 3, ['d', 'e'])]), (C, 6, ['x', 'y'])]),
33)),
# approximate match
('(zz~=yy)', ((C, 8, ['zz', 'yy']), 10)),
# substring
('(a=ins*tiga*tor)', ((C, 4, ['a', [(C, 0, 'ins'), (C, 1, 'tiga'), (C, 2, 'tor')]]), 23)),
('(a=*y)', ((C, 4, ['a', [(C, 2, 'y')]]), 10)),
('(a=y*)', ((C, 4, ['a', [(C, 0, 'y')]]), 10)),
('(a=*y*)', ((C, 4, ['a', [(C, 1, 'y')]]), 10)),
('(a=*x*y)', ((C, 4, ['a', [(C, 1, 'x'), (C, 2, 'y')]]), 13)),
('(a=*x*y*)', ((C, 4, ['a', [(C, 1, 'x'), (C, 1, 'y')]]), 13)),
('(a=*x*y*z)', ((C, 4, ['a', [(C, 1, 'x'), (C, 1, 'y'), (C, 2, 'z')]]), 16)),
# syntax errors
('(a=', QuerySyntaxError),
('(a<b)', QuerySyntaxError),
# good hex escape
('(a=some\\AAthing)', ((C, 3, ['a', 'some\252thing']), 17)),
# bad hex escape
('(a=some\\AZthing)', QuerySyntaxError),
# upper/lower case hex escape
('(a=xy\\Aaz)', ((C, 3, ['a', 'xy\252z']), 11)),
# escaped splat
('(a=x*y\\2az)', ((C, 4, ['a', [(C, 0, 'x'), (C, 2, 'y*z')]]), 15)),
# illegal splat
('(a~=sam*son)', QuerySyntaxError),
# junk/illegal
('junk', QuerySyntaxError),
# lots of parens
(('(' * 100), QuerySyntaxError),
# expression too complex
(('(!' * 55) + '(x=y)' + (')' * 55), QuerySyntaxError),
# expression not too complex
(('(!' * 10) + '(x=y)' + (')' * 10),
((C, 2, [(C, 2, [(C, 2, [(C, 2, [(C, 2, [(C, 2, [(C, 2, [(C, 2, [(C, 2, [(C, 2, [(C, 3, ['x', 'y'])])])])])])])])])])]), # noqa
28)),
]
class parse_query_test (unittest.TestCase):
def runTest (self):
for q, e in pq_tests:
try:
self.assertEqual (decode (parse_query (q)), e)
except AssertionError:
raise
except:
self.assertEqual (sys.exc_info()[0], e)
def suite():
suite = unittest.TestSuite()
suite.addTest (parse_query_test())
return suite
if __name__ == '__main__':
unittest.main (defaultTest='suite')
| en | 0.573388 | # -*- Mode: Python -*- # simple equality # simple expression, plus 'present' # nary expressions # noqa # approximate match # substring # syntax errors # good hex escape # bad hex escape # upper/lower case hex escape # escaped splat # illegal splat # junk/illegal # lots of parens # expression too complex # expression not too complex # noqa | 2.184822 | 2 |
utils/PyRSS2Gen.py | louis-pre/NewsBlur | 3,073 | 6624653 | <filename>utils/PyRSS2Gen.py
"""PyRSS2Gen - A Python library for generating RSS 2.0 feeds."""
__name__ = "PyRSS2Gen"
__version__ = (1, 0, 0)
__author__ = "<NAME> <<EMAIL>>"
_generator_name = __name__ + "-" + ".".join(map(str, __version__))
import datetime
# Could make this the base class; will need to add 'publish'
class WriteXmlMixin:
def write_xml(self, outfile, encoding = "iso-8859-1"):
from xml.sax import saxutils
handler = saxutils.XMLGenerator(outfile, encoding)
handler.startDocument()
self.publish(handler)
handler.endDocument()
def to_xml(self, encoding = "iso-8859-1"):
try:
import io as StringIO
except ImportError:
import io
f = io.StringIO()
self.write_xml(f, encoding)
return f.getvalue()
def _element(handler, name, obj, d = {}):
if isinstance(obj, str) or obj is None:
# special-case handling to make the API easier
# to use for the common case.
handler.startElement(name, d)
if obj is not None:
handler.characters(obj)
handler.endElement(name)
else:
# It better know how to emit the correct XML.
obj.publish(handler)
def _opt_element(handler, name, obj):
if obj is None:
return
_element(handler, name, obj)
def _format_date(dt):
"""convert a datetime into an RFC 822 formatted date
Input date must be in GMT.
"""
# Looks like:
# Sat, 07 Sep 2002 00:00:01 GMT
# Can't use strftime because that's locale dependent
#
# Isn't there a standard way to do this for Python? The
# rfc822 and email.Utils modules assume a timestamp. The
# following is based on the rfc822 module.
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (
["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][dt.weekday()],
dt.day,
["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][dt.month-1],
dt.year, dt.hour, dt.minute, dt.second)
##
# A couple simple wrapper objects for the fields which
# take a simple value other than a string.
class IntElement:
"""implements the 'publish' API for integers
Takes the tag name and the integer value to publish.
(Could be used for anything which uses str() to be published
to text for XML.)
"""
element_attrs = {}
def __init__(self, name, val):
self.name = name
self.val = val
def publish(self, handler):
handler.startElement(self.name, self.element_attrs)
handler.characters(str(self.val))
handler.endElement(self.name)
class DateElement:
"""implements the 'publish' API for a datetime.datetime
Takes the tag name and the datetime to publish.
Converts the datetime to RFC 2822 timestamp (4-digit year).
"""
def __init__(self, name, dt):
self.name = name
self.dt = dt
def publish(self, handler):
_element(handler, self.name, _format_date(self.dt))
####
class Category:
"""Publish a category element"""
def __init__(self, category, domain = None):
self.category = category
self.domain = domain
def publish(self, handler):
d = {}
if self.domain is not None:
d["domain"] = self.domain
_element(handler, "category", self.category, d)
class Cloud:
"""Publish a cloud"""
def __init__(self, domain, port, path,
registerProcedure, protocol):
self.domain = domain
self.port = port
self.path = path
self.registerProcedure = registerProcedure
self.protocol = protocol
def publish(self, handler):
_element(handler, "cloud", None, {
"domain": self.domain,
"port": str(self.port),
"path": self.path,
"registerProcedure": self.registerProcedure,
"protocol": self.protocol})
class Image:
"""Publish a channel Image"""
element_attrs = {}
def __init__(self, url, title, link,
width = None, height = None, description = None):
self.url = url
self.title = title
self.link = link
self.width = width
self.height = height
self.description = description
def publish(self, handler):
handler.startElement("image", self.element_attrs)
_element(handler, "url", self.url)
_element(handler, "title", self.title)
_element(handler, "link", self.link)
width = self.width
if isinstance(width, int):
width = IntElement("width", width)
_opt_element(handler, "width", width)
height = self.height
if isinstance(height, int):
height = IntElement("height", height)
_opt_element(handler, "height", height)
_opt_element(handler, "description", self.description)
handler.endElement("image")
class Guid:
"""Publish a guid
Defaults to being a permalink, which is the assumption if it's
omitted. Hence strings are always permalinks.
"""
def __init__(self, guid, isPermaLink = 1):
self.guid = guid
self.isPermaLink = isPermaLink
def publish(self, handler):
d = {}
if self.isPermaLink:
d["isPermaLink"] = "true"
else:
d["isPermaLink"] = "false"
_element(handler, "guid", self.guid, d)
class TextInput:
"""Publish a textInput
Apparently this is rarely used.
"""
element_attrs = {}
def __init__(self, title, description, name, link):
self.title = title
self.description = description
self.name = name
self.link = link
def publish(self, handler):
handler.startElement("textInput", self.element_attrs)
_element(handler, "title", self.title)
_element(handler, "description", self.description)
_element(handler, "name", self.name)
_element(handler, "link", self.link)
handler.endElement("textInput")
class Enclosure:
"""Publish an enclosure"""
def __init__(self, url, length, type):
self.url = url
self.length = length
self.type = type
def publish(self, handler):
_element(handler, "enclosure", None,
{"url": self.url,
"length": str(self.length),
"type": self.type,
})
class Source:
"""Publish the item's original source, used by aggregators"""
def __init__(self, name, url):
self.name = name
self.url = url
def publish(self, handler):
_element(handler, "source", self.name, {"url": self.url})
class SkipHours:
"""Publish the skipHours
This takes a list of hours, as integers.
"""
element_attrs = {}
def __init__(self, hours):
self.hours = hours
def publish(self, handler):
if self.hours:
handler.startElement("skipHours", self.element_attrs)
for hour in self.hours:
_element(handler, "hour", str(hour))
handler.endElement("skipHours")
class SkipDays:
"""Publish the skipDays
This takes a list of days as strings.
"""
element_attrs = {}
def __init__(self, days):
self.days = days
def publish(self, handler):
if self.days:
handler.startElement("skipDays", self.element_attrs)
for day in self.days:
_element(handler, "day", day)
handler.endElement("skipDays")
class RSS2(WriteXmlMixin):
"""The main RSS class.
Stores the channel attributes, with the "category" elements under
".categories" and the RSS items under ".items".
"""
rss_attrs = {"version": "2.0"}
element_attrs = {}
def __init__(self,
title,
link,
description,
language = None,
copyright = None,
managingEditor = None,
webMaster = None,
pubDate = None, # a datetime, *in* *GMT*
lastBuildDate = None, # a datetime
categories = None, # list of strings or Category
generator = _generator_name,
docs = "http://blogs.law.harvard.edu/tech/rss",
cloud = None, # a Cloud
ttl = None, # integer number of minutes
image = None, # an Image
rating = None, # a string; I don't know how it's used
textInput = None, # a TextInput
skipHours = None, # a SkipHours with a list of integers
skipDays = None, # a SkipDays with a list of strings
items = None, # list of RSSItems
):
self.title = title
self.link = link
self.description = description
self.language = language
self.copyright = copyright
self.managingEditor = managingEditor
self.webMaster = webMaster
self.pubDate = pubDate
self.lastBuildDate = lastBuildDate
if categories is None:
categories = []
self.categories = categories
self.generator = generator
self.docs = docs
self.cloud = cloud
self.ttl = ttl
self.image = image
self.rating = rating
self.textInput = textInput
self.skipHours = skipHours
self.skipDays = skipDays
if items is None:
items = []
self.items = items
def publish(self, handler):
handler.startElement("rss", self.rss_attrs)
handler.startElement("channel", self.element_attrs)
_element(handler, "title", self.title)
_element(handler, "link", self.link)
_element(handler, "description", self.description)
self.publish_extensions(handler)
_opt_element(handler, "language", self.language)
_opt_element(handler, "copyright", self.copyright)
_opt_element(handler, "managingEditor", self.managingEditor)
_opt_element(handler, "webMaster", self.webMaster)
pubDate = self.pubDate
if isinstance(pubDate, datetime.datetime):
pubDate = DateElement("pubDate", pubDate)
_opt_element(handler, "pubDate", pubDate)
lastBuildDate = self.lastBuildDate
if isinstance(lastBuildDate, datetime.datetime):
lastBuildDate = DateElement("lastBuildDate", lastBuildDate)
_opt_element(handler, "lastBuildDate", lastBuildDate)
for category in self.categories:
if isinstance(category, str):
category = Category(category)
category.publish(handler)
_opt_element(handler, "generator", self.generator)
_opt_element(handler, "docs", self.docs)
if self.cloud is not None:
self.cloud.publish(handler)
ttl = self.ttl
if isinstance(self.ttl, int):
ttl = IntElement("ttl", ttl)
_opt_element(handler, "tt", ttl)
if self.image is not None:
self.image.publish(handler)
_opt_element(handler, "rating", self.rating)
if self.textInput is not None:
self.textInput.publish(handler)
if self.skipHours is not None:
self.skipHours.publish(handler)
if self.skipDays is not None:
self.skipDays.publish(handler)
for item in self.items:
item.publish(handler)
handler.endElement("channel")
handler.endElement("rss")
def publish_extensions(self, handler):
# Derived classes can hook into this to insert
# output after the three required fields.
pass
class RSSItem(WriteXmlMixin):
"""Publish an RSS Item"""
element_attrs = {}
def __init__(self,
title = None, # string
link = None, # url as string
description = None, # string
author = None, # email address as string
categories = None, # list of string or Category
comments = None, # url as string
enclosure = None, # an Enclosure
guid = None, # a unique string
pubDate = None, # a datetime
source = None, # a Source
):
if title is None and description is None:
raise TypeError(
"must define at least one of 'title' or 'description'")
self.title = title
self.link = link
self.description = description
self.author = author
if categories is None:
categories = []
self.categories = categories
self.comments = comments
self.enclosure = enclosure
self.guid = guid
self.pubDate = pubDate
self.source = source
# It sure does get tedious typing these names three times...
def publish(self, handler):
handler.startElement("item", self.element_attrs)
_opt_element(handler, "title", self.title)
_opt_element(handler, "link", self.link)
self.publish_extensions(handler)
_opt_element(handler, "description", self.description)
_opt_element(handler, "author", self.author)
for category in self.categories:
if isinstance(category, str):
category = Category(category)
category.publish(handler)
_opt_element(handler, "comments", self.comments)
if self.enclosure is not None:
self.enclosure.publish(handler)
_opt_element(handler, "guid", self.guid)
pubDate = self.pubDate
if isinstance(pubDate, datetime.datetime):
pubDate = DateElement("pubDate", pubDate)
_opt_element(handler, "pubDate", pubDate)
if self.source is not None:
self.source.publish(handler)
handler.endElement("item")
def publish_extensions(self, handler):
# Derived classes can hook into this to insert
# output after the title and link elements
pass
| <filename>utils/PyRSS2Gen.py
"""PyRSS2Gen - A Python library for generating RSS 2.0 feeds."""
__name__ = "PyRSS2Gen"
__version__ = (1, 0, 0)
__author__ = "<NAME> <<EMAIL>>"
_generator_name = __name__ + "-" + ".".join(map(str, __version__))
import datetime
# Could make this the base class; will need to add 'publish'
class WriteXmlMixin:
def write_xml(self, outfile, encoding = "iso-8859-1"):
from xml.sax import saxutils
handler = saxutils.XMLGenerator(outfile, encoding)
handler.startDocument()
self.publish(handler)
handler.endDocument()
def to_xml(self, encoding = "iso-8859-1"):
try:
import io as StringIO
except ImportError:
import io
f = io.StringIO()
self.write_xml(f, encoding)
return f.getvalue()
def _element(handler, name, obj, d = {}):
if isinstance(obj, str) or obj is None:
# special-case handling to make the API easier
# to use for the common case.
handler.startElement(name, d)
if obj is not None:
handler.characters(obj)
handler.endElement(name)
else:
# It better know how to emit the correct XML.
obj.publish(handler)
def _opt_element(handler, name, obj):
if obj is None:
return
_element(handler, name, obj)
def _format_date(dt):
"""convert a datetime into an RFC 822 formatted date
Input date must be in GMT.
"""
# Looks like:
# Sat, 07 Sep 2002 00:00:01 GMT
# Can't use strftime because that's locale dependent
#
# Isn't there a standard way to do this for Python? The
# rfc822 and email.Utils modules assume a timestamp. The
# following is based on the rfc822 module.
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (
["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][dt.weekday()],
dt.day,
["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][dt.month-1],
dt.year, dt.hour, dt.minute, dt.second)
##
# A couple simple wrapper objects for the fields which
# take a simple value other than a string.
class IntElement:
"""implements the 'publish' API for integers
Takes the tag name and the integer value to publish.
(Could be used for anything which uses str() to be published
to text for XML.)
"""
element_attrs = {}
def __init__(self, name, val):
self.name = name
self.val = val
def publish(self, handler):
handler.startElement(self.name, self.element_attrs)
handler.characters(str(self.val))
handler.endElement(self.name)
class DateElement:
"""implements the 'publish' API for a datetime.datetime
Takes the tag name and the datetime to publish.
Converts the datetime to RFC 2822 timestamp (4-digit year).
"""
def __init__(self, name, dt):
self.name = name
self.dt = dt
def publish(self, handler):
_element(handler, self.name, _format_date(self.dt))
####
class Category:
"""Publish a category element"""
def __init__(self, category, domain = None):
self.category = category
self.domain = domain
def publish(self, handler):
d = {}
if self.domain is not None:
d["domain"] = self.domain
_element(handler, "category", self.category, d)
class Cloud:
"""Publish a cloud"""
def __init__(self, domain, port, path,
registerProcedure, protocol):
self.domain = domain
self.port = port
self.path = path
self.registerProcedure = registerProcedure
self.protocol = protocol
def publish(self, handler):
_element(handler, "cloud", None, {
"domain": self.domain,
"port": str(self.port),
"path": self.path,
"registerProcedure": self.registerProcedure,
"protocol": self.protocol})
class Image:
"""Publish a channel Image"""
element_attrs = {}
def __init__(self, url, title, link,
width = None, height = None, description = None):
self.url = url
self.title = title
self.link = link
self.width = width
self.height = height
self.description = description
def publish(self, handler):
handler.startElement("image", self.element_attrs)
_element(handler, "url", self.url)
_element(handler, "title", self.title)
_element(handler, "link", self.link)
width = self.width
if isinstance(width, int):
width = IntElement("width", width)
_opt_element(handler, "width", width)
height = self.height
if isinstance(height, int):
height = IntElement("height", height)
_opt_element(handler, "height", height)
_opt_element(handler, "description", self.description)
handler.endElement("image")
class Guid:
"""Publish a guid
Defaults to being a permalink, which is the assumption if it's
omitted. Hence strings are always permalinks.
"""
def __init__(self, guid, isPermaLink = 1):
self.guid = guid
self.isPermaLink = isPermaLink
def publish(self, handler):
d = {}
if self.isPermaLink:
d["isPermaLink"] = "true"
else:
d["isPermaLink"] = "false"
_element(handler, "guid", self.guid, d)
class TextInput:
"""Publish a textInput
Apparently this is rarely used.
"""
element_attrs = {}
def __init__(self, title, description, name, link):
self.title = title
self.description = description
self.name = name
self.link = link
def publish(self, handler):
handler.startElement("textInput", self.element_attrs)
_element(handler, "title", self.title)
_element(handler, "description", self.description)
_element(handler, "name", self.name)
_element(handler, "link", self.link)
handler.endElement("textInput")
class Enclosure:
"""Publish an enclosure"""
def __init__(self, url, length, type):
self.url = url
self.length = length
self.type = type
def publish(self, handler):
_element(handler, "enclosure", None,
{"url": self.url,
"length": str(self.length),
"type": self.type,
})
class Source:
"""Publish the item's original source, used by aggregators"""
def __init__(self, name, url):
self.name = name
self.url = url
def publish(self, handler):
_element(handler, "source", self.name, {"url": self.url})
class SkipHours:
"""Publish the skipHours
This takes a list of hours, as integers.
"""
element_attrs = {}
def __init__(self, hours):
self.hours = hours
def publish(self, handler):
if self.hours:
handler.startElement("skipHours", self.element_attrs)
for hour in self.hours:
_element(handler, "hour", str(hour))
handler.endElement("skipHours")
class SkipDays:
"""Publish the skipDays
This takes a list of days as strings.
"""
element_attrs = {}
def __init__(self, days):
self.days = days
def publish(self, handler):
if self.days:
handler.startElement("skipDays", self.element_attrs)
for day in self.days:
_element(handler, "day", day)
handler.endElement("skipDays")
class RSS2(WriteXmlMixin):
"""The main RSS class.
Stores the channel attributes, with the "category" elements under
".categories" and the RSS items under ".items".
"""
rss_attrs = {"version": "2.0"}
element_attrs = {}
def __init__(self,
title,
link,
description,
language = None,
copyright = None,
managingEditor = None,
webMaster = None,
pubDate = None, # a datetime, *in* *GMT*
lastBuildDate = None, # a datetime
categories = None, # list of strings or Category
generator = _generator_name,
docs = "http://blogs.law.harvard.edu/tech/rss",
cloud = None, # a Cloud
ttl = None, # integer number of minutes
image = None, # an Image
rating = None, # a string; I don't know how it's used
textInput = None, # a TextInput
skipHours = None, # a SkipHours with a list of integers
skipDays = None, # a SkipDays with a list of strings
items = None, # list of RSSItems
):
self.title = title
self.link = link
self.description = description
self.language = language
self.copyright = copyright
self.managingEditor = managingEditor
self.webMaster = webMaster
self.pubDate = pubDate
self.lastBuildDate = lastBuildDate
if categories is None:
categories = []
self.categories = categories
self.generator = generator
self.docs = docs
self.cloud = cloud
self.ttl = ttl
self.image = image
self.rating = rating
self.textInput = textInput
self.skipHours = skipHours
self.skipDays = skipDays
if items is None:
items = []
self.items = items
def publish(self, handler):
handler.startElement("rss", self.rss_attrs)
handler.startElement("channel", self.element_attrs)
_element(handler, "title", self.title)
_element(handler, "link", self.link)
_element(handler, "description", self.description)
self.publish_extensions(handler)
_opt_element(handler, "language", self.language)
_opt_element(handler, "copyright", self.copyright)
_opt_element(handler, "managingEditor", self.managingEditor)
_opt_element(handler, "webMaster", self.webMaster)
pubDate = self.pubDate
if isinstance(pubDate, datetime.datetime):
pubDate = DateElement("pubDate", pubDate)
_opt_element(handler, "pubDate", pubDate)
lastBuildDate = self.lastBuildDate
if isinstance(lastBuildDate, datetime.datetime):
lastBuildDate = DateElement("lastBuildDate", lastBuildDate)
_opt_element(handler, "lastBuildDate", lastBuildDate)
for category in self.categories:
if isinstance(category, str):
category = Category(category)
category.publish(handler)
_opt_element(handler, "generator", self.generator)
_opt_element(handler, "docs", self.docs)
if self.cloud is not None:
self.cloud.publish(handler)
ttl = self.ttl
if isinstance(self.ttl, int):
ttl = IntElement("ttl", ttl)
_opt_element(handler, "tt", ttl)
if self.image is not None:
self.image.publish(handler)
_opt_element(handler, "rating", self.rating)
if self.textInput is not None:
self.textInput.publish(handler)
if self.skipHours is not None:
self.skipHours.publish(handler)
if self.skipDays is not None:
self.skipDays.publish(handler)
for item in self.items:
item.publish(handler)
handler.endElement("channel")
handler.endElement("rss")
def publish_extensions(self, handler):
# Derived classes can hook into this to insert
# output after the three required fields.
pass
class RSSItem(WriteXmlMixin):
"""Publish an RSS Item"""
element_attrs = {}
def __init__(self,
title = None, # string
link = None, # url as string
description = None, # string
author = None, # email address as string
categories = None, # list of string or Category
comments = None, # url as string
enclosure = None, # an Enclosure
guid = None, # a unique string
pubDate = None, # a datetime
source = None, # a Source
):
if title is None and description is None:
raise TypeError(
"must define at least one of 'title' or 'description'")
self.title = title
self.link = link
self.description = description
self.author = author
if categories is None:
categories = []
self.categories = categories
self.comments = comments
self.enclosure = enclosure
self.guid = guid
self.pubDate = pubDate
self.source = source
# It sure does get tedious typing these names three times...
def publish(self, handler):
handler.startElement("item", self.element_attrs)
_opt_element(handler, "title", self.title)
_opt_element(handler, "link", self.link)
self.publish_extensions(handler)
_opt_element(handler, "description", self.description)
_opt_element(handler, "author", self.author)
for category in self.categories:
if isinstance(category, str):
category = Category(category)
category.publish(handler)
_opt_element(handler, "comments", self.comments)
if self.enclosure is not None:
self.enclosure.publish(handler)
_opt_element(handler, "guid", self.guid)
pubDate = self.pubDate
if isinstance(pubDate, datetime.datetime):
pubDate = DateElement("pubDate", pubDate)
_opt_element(handler, "pubDate", pubDate)
if self.source is not None:
self.source.publish(handler)
handler.endElement("item")
def publish_extensions(self, handler):
# Derived classes can hook into this to insert
# output after the title and link elements
pass
| en | 0.811039 | PyRSS2Gen - A Python library for generating RSS 2.0 feeds. # Could make this the base class; will need to add 'publish' # special-case handling to make the API easier # to use for the common case. # It better know how to emit the correct XML. convert a datetime into an RFC 822 formatted date Input date must be in GMT. # Looks like: # Sat, 07 Sep 2002 00:00:01 GMT # Can't use strftime because that's locale dependent # # Isn't there a standard way to do this for Python? The # rfc822 and email.Utils modules assume a timestamp. The # following is based on the rfc822 module. ## # A couple simple wrapper objects for the fields which # take a simple value other than a string. implements the 'publish' API for integers Takes the tag name and the integer value to publish. (Could be used for anything which uses str() to be published to text for XML.) implements the 'publish' API for a datetime.datetime Takes the tag name and the datetime to publish. Converts the datetime to RFC 2822 timestamp (4-digit year). #### Publish a category element Publish a cloud Publish a channel Image Publish a guid Defaults to being a permalink, which is the assumption if it's omitted. Hence strings are always permalinks. Publish a textInput Apparently this is rarely used. Publish an enclosure Publish the item's original source, used by aggregators Publish the skipHours This takes a list of hours, as integers. Publish the skipDays This takes a list of days as strings. The main RSS class. Stores the channel attributes, with the "category" elements under ".categories" and the RSS items under ".items". # a datetime, *in* *GMT* # a datetime # list of strings or Category # a Cloud # integer number of minutes # an Image # a string; I don't know how it's used # a TextInput # a SkipHours with a list of integers # a SkipDays with a list of strings # list of RSSItems # Derived classes can hook into this to insert # output after the three required fields. Publish an RSS Item # string # url as string # string # email address as string # list of string or Category # url as string # an Enclosure # a unique string # a datetime # a Source # It sure does get tedious typing these names three times... # Derived classes can hook into this to insert # output after the title and link elements | 2.748684 | 3 |
nilmtk/feature_detectors/cluster.py | erayon/nilmtk | 1 | 6624654 | from __future__ import print_function, division
import numpy as np
import pandas as pd
# Fix the seed for repeatability of experiments
SEED = 42
np.random.seed(SEED)
def cluster(X, max_num_clusters=3, exact_num_clusters=None):
'''Applies clustering on reduced data,
i.e. data where power is greater than threshold.
Parameters
----------
X : pd.Series or single-column pd.DataFrame
max_num_clusters : int
Returns
-------
centroids : ndarray of int32s
Power in different states of an appliance, sorted
'''
# Find where power consumption is greater than 10
data = _transform_data(X)
# Find clusters
centroids = _apply_clustering(data, max_num_clusters, exact_num_clusters)
centroids = np.append(centroids, 0) # add 'off' state
centroids = np.round(centroids).astype(np.int32)
centroids = np.unique(centroids) # np.unique also sorts
# TODO: Merge similar clusters
return centroids
def _transform_data(data):
'''Subsamples if needed and converts to column vector (which is what
scikit-learn requires).
Parameters
----------
data : pd.Series or single column pd.DataFrame
Returns
-------
data_above_thresh : ndarray
column vector
'''
MAX_NUMBER_OF_SAMPLES = 2000
MIN_NUMBER_OF_SAMPLES = 20
DATA_THRESHOLD = 10
data_above_thresh = data[data > DATA_THRESHOLD].dropna().values
n_samples = len(data_above_thresh)
if n_samples < MIN_NUMBER_OF_SAMPLES:
return np.zeros((MAX_NUMBER_OF_SAMPLES, 1))
elif n_samples > MAX_NUMBER_OF_SAMPLES:
# Randomly subsample (we don't want to smoothly downsample
# because that is likely to change the values)
random_indices = np.random.randint(0, n_samples, MAX_NUMBER_OF_SAMPLES)
resampled = data_above_thresh[random_indices]
return resampled.reshape(MAX_NUMBER_OF_SAMPLES, 1)
else:
return data_above_thresh.reshape(n_samples, 1)
def _apply_clustering_n_clusters(X, n_clusters):
"""
:param X: ndarray
:param n_clusters: exact number of clusters to use
:return:
"""
from sklearn.cluster import KMeans
k_means = KMeans(init='k-means++', n_clusters=n_clusters)
k_means.fit(X)
return k_means.labels_, k_means.cluster_centers_
def _apply_clustering(X, max_num_clusters, exact_num_clusters=None):
'''
Parameters
----------
X : ndarray
max_num_clusters : int
Returns
-------
centroids : list of numbers
List of power in different states of an appliance
'''
# If we import sklearn at the top of the file then it makes autodoc fail
from sklearn import metrics
# sklearn produces lots of DepreciationWarnings with PyTables
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Finds whether 2 or 3 gives better Silhouellete coefficient
# Whichever is higher serves as the number of clusters for that
# appliance
num_clus = -1
sh = -1
k_means_labels = {}
k_means_cluster_centers = {}
k_means_labels_unique = {}
# If the exact number of clusters are specified, then use that
if exact_num_clusters is not None:
labels, centers = _apply_clustering_n_clusters(X, exact_num_clusters)
return centers.flatten()
# Exact number of clusters are not specified, use the cluster validity measures
# to find the optimal number
for n_clusters in range(1, max_num_clusters):
try:
labels, centers = _apply_clustering_n_clusters(X, n_clusters)
k_means_labels[n_clusters] = labels
k_means_cluster_centers[n_clusters] = centers
k_means_labels_unique[n_clusters] = np.unique(labels)
try:
sh_n = metrics.silhouette_score(
X, k_means_labels[n_clusters], metric='euclidean')
if sh_n > sh:
sh = sh_n
num_clus = n_clusters
except Exception:
num_clus = n_clusters
except Exception:
if num_clus > -1:
return k_means_cluster_centers[num_clus]
else:
return np.array([0])
return k_means_cluster_centers[num_clus].flatten()
def hart85_means_shift_cluster(pair_buffer_df, cols):
from sklearn.cluster import MeanShift
# Creating feature vector
cluster_df = pd.DataFrame()
power_types = [col[1] for col in cols]
if 'active' in power_types:
cluster_df['active'] = pd.Series(pair_buffer_df.apply(lambda row:
((np.fabs(row['T1 Active']) + np.fabs(row['T2 Active'])) / 2), axis=1), index=pair_buffer_df.index)
if 'reactive' in power_types:
cluster_df['reactive'] = pd.Series(pair_buffer_df.apply(lambda row:
((np.fabs(row['T1 Reactive']) + np.fabs(row['T2 Reactive'])) / 2), axis=1), index=pair_buffer_df.index)
X = cluster_df.values.reshape((len(cluster_df.index), len(cols)))
ms = MeanShift(bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
return pd.DataFrame(cluster_centers, columns=cols)
| from __future__ import print_function, division
import numpy as np
import pandas as pd
# Fix the seed for repeatability of experiments
SEED = 42
np.random.seed(SEED)
def cluster(X, max_num_clusters=3, exact_num_clusters=None):
'''Applies clustering on reduced data,
i.e. data where power is greater than threshold.
Parameters
----------
X : pd.Series or single-column pd.DataFrame
max_num_clusters : int
Returns
-------
centroids : ndarray of int32s
Power in different states of an appliance, sorted
'''
# Find where power consumption is greater than 10
data = _transform_data(X)
# Find clusters
centroids = _apply_clustering(data, max_num_clusters, exact_num_clusters)
centroids = np.append(centroids, 0) # add 'off' state
centroids = np.round(centroids).astype(np.int32)
centroids = np.unique(centroids) # np.unique also sorts
# TODO: Merge similar clusters
return centroids
def _transform_data(data):
'''Subsamples if needed and converts to column vector (which is what
scikit-learn requires).
Parameters
----------
data : pd.Series or single column pd.DataFrame
Returns
-------
data_above_thresh : ndarray
column vector
'''
MAX_NUMBER_OF_SAMPLES = 2000
MIN_NUMBER_OF_SAMPLES = 20
DATA_THRESHOLD = 10
data_above_thresh = data[data > DATA_THRESHOLD].dropna().values
n_samples = len(data_above_thresh)
if n_samples < MIN_NUMBER_OF_SAMPLES:
return np.zeros((MAX_NUMBER_OF_SAMPLES, 1))
elif n_samples > MAX_NUMBER_OF_SAMPLES:
# Randomly subsample (we don't want to smoothly downsample
# because that is likely to change the values)
random_indices = np.random.randint(0, n_samples, MAX_NUMBER_OF_SAMPLES)
resampled = data_above_thresh[random_indices]
return resampled.reshape(MAX_NUMBER_OF_SAMPLES, 1)
else:
return data_above_thresh.reshape(n_samples, 1)
def _apply_clustering_n_clusters(X, n_clusters):
"""
:param X: ndarray
:param n_clusters: exact number of clusters to use
:return:
"""
from sklearn.cluster import KMeans
k_means = KMeans(init='k-means++', n_clusters=n_clusters)
k_means.fit(X)
return k_means.labels_, k_means.cluster_centers_
def _apply_clustering(X, max_num_clusters, exact_num_clusters=None):
'''
Parameters
----------
X : ndarray
max_num_clusters : int
Returns
-------
centroids : list of numbers
List of power in different states of an appliance
'''
# If we import sklearn at the top of the file then it makes autodoc fail
from sklearn import metrics
# sklearn produces lots of DepreciationWarnings with PyTables
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Finds whether 2 or 3 gives better Silhouellete coefficient
# Whichever is higher serves as the number of clusters for that
# appliance
num_clus = -1
sh = -1
k_means_labels = {}
k_means_cluster_centers = {}
k_means_labels_unique = {}
# If the exact number of clusters are specified, then use that
if exact_num_clusters is not None:
labels, centers = _apply_clustering_n_clusters(X, exact_num_clusters)
return centers.flatten()
# Exact number of clusters are not specified, use the cluster validity measures
# to find the optimal number
for n_clusters in range(1, max_num_clusters):
try:
labels, centers = _apply_clustering_n_clusters(X, n_clusters)
k_means_labels[n_clusters] = labels
k_means_cluster_centers[n_clusters] = centers
k_means_labels_unique[n_clusters] = np.unique(labels)
try:
sh_n = metrics.silhouette_score(
X, k_means_labels[n_clusters], metric='euclidean')
if sh_n > sh:
sh = sh_n
num_clus = n_clusters
except Exception:
num_clus = n_clusters
except Exception:
if num_clus > -1:
return k_means_cluster_centers[num_clus]
else:
return np.array([0])
return k_means_cluster_centers[num_clus].flatten()
def hart85_means_shift_cluster(pair_buffer_df, cols):
from sklearn.cluster import MeanShift
# Creating feature vector
cluster_df = pd.DataFrame()
power_types = [col[1] for col in cols]
if 'active' in power_types:
cluster_df['active'] = pd.Series(pair_buffer_df.apply(lambda row:
((np.fabs(row['T1 Active']) + np.fabs(row['T2 Active'])) / 2), axis=1), index=pair_buffer_df.index)
if 'reactive' in power_types:
cluster_df['reactive'] = pd.Series(pair_buffer_df.apply(lambda row:
((np.fabs(row['T1 Reactive']) + np.fabs(row['T2 Reactive'])) / 2), axis=1), index=pair_buffer_df.index)
X = cluster_df.values.reshape((len(cluster_df.index), len(cols)))
ms = MeanShift(bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
return pd.DataFrame(cluster_centers, columns=cols)
| en | 0.754337 | # Fix the seed for repeatability of experiments Applies clustering on reduced data, i.e. data where power is greater than threshold. Parameters ---------- X : pd.Series or single-column pd.DataFrame max_num_clusters : int Returns ------- centroids : ndarray of int32s Power in different states of an appliance, sorted # Find where power consumption is greater than 10 # Find clusters # add 'off' state # np.unique also sorts # TODO: Merge similar clusters Subsamples if needed and converts to column vector (which is what scikit-learn requires). Parameters ---------- data : pd.Series or single column pd.DataFrame Returns ------- data_above_thresh : ndarray column vector # Randomly subsample (we don't want to smoothly downsample # because that is likely to change the values) :param X: ndarray :param n_clusters: exact number of clusters to use :return: Parameters ---------- X : ndarray max_num_clusters : int Returns ------- centroids : list of numbers List of power in different states of an appliance # If we import sklearn at the top of the file then it makes autodoc fail # sklearn produces lots of DepreciationWarnings with PyTables # Finds whether 2 or 3 gives better Silhouellete coefficient # Whichever is higher serves as the number of clusters for that # appliance # If the exact number of clusters are specified, then use that # Exact number of clusters are not specified, use the cluster validity measures # to find the optimal number # Creating feature vector | 3.404694 | 3 |
eland/ndframe.py | davidkyle/eland | 0 | 6624655 | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import pandas as pd # type: ignore
from eland.query_compiler import QueryCompiler
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
from eland.index import Index
"""
NDFrame
---------
Abstract base class for eland.DataFrame and eland.Series.
The underlying data resides in Elasticsearch and the API aligns as much as
possible with pandas APIs.
This allows the eland.DataFrame to access large datasets stored in Elasticsearch,
without storing the dataset in local memory.
Implementation Details
----------------------
Elasticsearch indexes can be configured in many different ways, and these indexes
utilise different data structures to pandas.
eland.DataFrame operations that return individual rows (e.g. df.head()) return
_source data. If _source is not enabled, this data is not accessible.
Similarly, only Elasticsearch searchable fields can be searched or filtered, and
only Elasticsearch aggregatable fields can be aggregated or grouped.
"""
class NDFrame(ABC):
def __init__(
self,
es_client: Optional[
Union[str, List[str], Tuple[str, ...], "Elasticsearch"]
] = None,
es_index_pattern: Optional[str] = None,
columns: Optional[List[str]] = None,
es_index_field: Optional[str] = None,
_query_compiler: Optional[QueryCompiler] = None,
) -> None:
"""
pandas.DataFrame/Series like API that proxies into Elasticsearch index(es).
Parameters
----------
client : elasticsearch.Elasticsearch
A reference to a Elasticsearch python client
"""
if _query_compiler is None:
_query_compiler = QueryCompiler(
client=es_client,
index_pattern=es_index_pattern,
display_names=columns,
index_field=es_index_field,
)
self._query_compiler = _query_compiler
@property
def index(self) -> "Index":
"""
Return eland index referencing Elasticsearch field to index a DataFrame/Series
Returns
-------
eland.Index:
Note eland.Index has a very limited API compared to pandas.Index
See Also
--------
:pandas_api_docs:`pandas.DataFrame.index`
:pandas_api_docs:`pandas.Series.index`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights')
>>> assert isinstance(df.index, ed.Index)
>>> df.index.es_index_field
'_id'
>>> s = df['Carrier']
>>> assert isinstance(s.index, ed.Index)
>>> s.index.es_index_field
'_id'
"""
return self._query_compiler.index
@property
def dtypes(self) -> pd.Series:
"""
Return the pandas dtypes in the DataFrame. Elasticsearch types are mapped
to pandas dtypes via Mappings._es_dtype_to_pd_dtype.__doc__
Returns
-------
pandas.Series
The data type of each column.
See Also
--------
:pandas_api_docs:`pandas.DataFrame.dtypes`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=['Origin', 'AvgTicketPrice', 'timestamp', 'dayOfWeek'])
>>> df.dtypes
Origin object
AvgTicketPrice float64
timestamp datetime64[ns]
dayOfWeek int64
dtype: object
"""
return self._query_compiler.dtypes
@property
def es_dtypes(self) -> pd.Series:
"""
Return the Elasticsearch dtypes in the index
Returns
-------
pandas.Series
The data type of each column.
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=['Origin', 'AvgTicketPrice', 'timestamp', 'dayOfWeek'])
>>> df.es_dtypes
Origin keyword
AvgTicketPrice float
timestamp date
dayOfWeek byte
dtype: object
"""
return self._query_compiler.es_dtypes
def _build_repr(self, num_rows: int) -> pd.DataFrame:
# self could be Series or DataFrame
if len(self.index) <= num_rows:
return self.to_pandas()
num_rows = num_rows
head_rows = int(num_rows / 2) + num_rows % 2
tail_rows = num_rows - head_rows
head = self.head(head_rows).to_pandas()
tail = self.tail(tail_rows).to_pandas()
return head.append(tail)
def __sizeof__(self) -> int:
# Don't default to pandas, just return approximation TODO - make this more accurate
return sys.getsizeof(self._query_compiler)
def __len__(self) -> int:
"""Gets the length of the DataFrame.
Returns:
Returns an integer length of the DataFrame object.
"""
return len(self.index)
def _es_info(self, buf):
self._query_compiler.es_info(buf)
def mean(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return mean value for each numeric column
TODO - implement remainder of pandas arguments, currently non-numerics are not supported
Parameters
----------
numeric_only: {True, False, None} Default is None
Which datatype to be returned
- True: Returns all values as float64, NaN/NaT values are removed
- None: Returns all values as the same dtype where possible, NaN/NaT are removed
- False: Returns all values as the same dtype where possible, NaN/NaT are preserved
Returns
-------
pandas.Series
mean value for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.mean`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.mean() # doctest: +SKIP
AvgTicketPrice 628.254
Cancelled 0.128494
dayOfWeek 2.83598
timestamp 2018-01-21 19:20:45.564438232
dtype: object
>>> df.mean(numeric_only=True)
AvgTicketPrice 628.253689
Cancelled 0.128494
dayOfWeek 2.835975
dtype: float64
>>> df.mean(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 628.254
Cancelled 0.128494
dayOfWeek 2.83598
timestamp 2018-01-21 19:20:45.564438232
DestCountry NaN
dtype: object
"""
return self._query_compiler.mean(numeric_only=numeric_only)
def sum(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return sum for each numeric column
TODO - implement remainder of pandas arguments, currently non-numerics are not supported
Parameters
----------
numeric_only: {True, False, None} Default is None
Which datatype to be returned
- True: Returns all values as float64, NaN/NaT values are removed
- None: Returns all values as the same dtype where possible, NaN/NaT are removed
- False: Returns all values as the same dtype where possible, NaN/NaT are preserved
Returns
-------
pandas.Series
sum for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.sum`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.sum() # doctest: +SKIP
AvgTicketPrice 8.20436e+06
Cancelled 1678
dayOfWeek 37035
dtype: object
>>> df.sum(numeric_only=True)
AvgTicketPrice 8.204365e+06
Cancelled 1.678000e+03
dayOfWeek 3.703500e+04
dtype: float64
>>> df.sum(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 8.20436e+06
Cancelled 1678
dayOfWeek 37035
timestamp NaT
DestCountry NaN
dtype: object
"""
return self._query_compiler.sum(numeric_only=numeric_only)
def min(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return the minimum value for each numeric column
TODO - implement remainder of pandas arguments, currently non-numerics are not supported
Parameters
----------
numeric_only: {True, False, None} Default is None
Which datatype to be returned
- True: Returns all values as float64, NaN/NaT values are removed
- None: Returns all values as the same dtype where possible, NaN/NaT are removed
- False: Returns all values as the same dtype where possible, NaN/NaT are preserved
Returns
-------
pandas.Series
min value for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.min`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.min() # doctest: +SKIP
AvgTicketPrice 100.021
Cancelled False
dayOfWeek 0
timestamp 2018-01-01 00:00:00
dtype: object
>>> df.min(numeric_only=True)
AvgTicketPrice 100.020531
Cancelled 0.000000
dayOfWeek 0.000000
dtype: float64
>>> df.min(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 100.021
Cancelled False
dayOfWeek 0
timestamp 2018-01-01 00:00:00
DestCountry NaN
dtype: object
"""
return self._query_compiler.min(numeric_only=numeric_only)
def var(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return variance for each numeric column
Parameters
----------
numeric_only: {True, False, None} Default is None
Which datatype to be returned
- True: Returns all values as float64, NaN/NaT values are removed
- None: Returns all values as the same dtype where possible, NaN/NaT are removed
- False: Returns all values as the same dtype where possible, NaN/NaT are preserved
Returns
-------
pandas.Series
The value of the variance for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.var`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.var() # doctest: +SKIP
AvgTicketPrice 70964.570234
Cancelled 0.111987
dayOfWeek 3.761279
dtype: float64
>>> df.var(numeric_only=True)
AvgTicketPrice 70964.570234
Cancelled 0.111987
dayOfWeek 3.761279
dtype: float64
>>> df.var(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 70964.6
Cancelled 0.111987
dayOfWeek 3.76128
timestamp NaT
DestCountry NaN
dtype: object
"""
return self._query_compiler.var(numeric_only=numeric_only)
def std(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return standard deviation for each numeric column
Parameters
----------
numeric_only: {True, False, None} Default is None
Which datatype to be returned
- True: Returns all values as float64, NaN/NaT values are removed
- None: Returns all values as the same dtype where possible, NaN/NaT are removed
- False: Returns all values as the same dtype where possible, NaN/NaT are preserved
Returns
-------
pandas.Series
The value of the standard deviation for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.std`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.std() # doctest: +SKIP
AvgTicketPrice 266.407061
Cancelled 0.334664
dayOfWeek 1.939513
dtype: float64
>>> df.std(numeric_only=True)
AvgTicketPrice 266.407061
Cancelled 0.334664
dayOfWeek 1.939513
dtype: float64
>>> df.std(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 266.407
Cancelled 0.334664
dayOfWeek 1.93951
timestamp NaT
DestCountry NaN
dtype: object
"""
return self._query_compiler.std(numeric_only=numeric_only)
def median(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return the median value for each numeric column
Parameters
----------
numeric_only: {True, False, None} Default is None
Which datatype to be returned
- True: Returns all values as float64, NaN/NaT values are removed
- None: Returns all values as the same dtype where possible, NaN/NaT are removed
- False: Returns all values as the same dtype where possible, NaN/NaT are preserved
Returns
-------
pandas.Series
median value for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.median`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.median() # doctest: +SKIP
AvgTicketPrice 640.363
Cancelled False
dayOfWeek 3
timestamp 2018-01-21 23:54:06.624776611
dtype: object
>>> df.median(numeric_only=True) # doctest: +SKIP
AvgTicketPrice 640.362667
Cancelled 0.000000
dayOfWeek 3.000000
dtype: float64
>>> df.median(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 640.387
Cancelled False
dayOfWeek 3
timestamp 2018-01-21 23:54:06.624776611
DestCountry NaN
dtype: object
"""
return self._query_compiler.median(numeric_only=numeric_only)
def max(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return the maximum value for each numeric column
TODO - implement remainder of pandas arguments, currently non-numerics are not supported
Parameters
----------
numeric_only: {True, False, None} Default is None
Which datatype to be returned
- True: Returns all values as float64, NaN/NaT values are removed
- None: Returns all values as the same dtype where possible, NaN/NaT are removed
- False: Returns all values as the same dtype where possible, NaN/NaT are preserved
Returns
-------
pandas.Series
max value for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.max`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.max() # doctest: +SKIP
AvgTicketPrice 1199.73
Cancelled True
dayOfWeek 6
timestamp 2018-02-11 23:50:12
dtype: object
>>> df.max(numeric_only=True)
AvgTicketPrice 1199.729004
Cancelled 1.000000
dayOfWeek 6.000000
dtype: float64
>>> df.max(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 1199.73
Cancelled True
dayOfWeek 6
timestamp 2018-02-11 23:50:12
DestCountry NaN
dtype: object
"""
return self._query_compiler.max(numeric_only=numeric_only)
def nunique(self) -> pd.Series:
"""
Return cardinality of each field.
**Note we can only do this for aggregatable Elasticsearch fields - (in general) numeric and keyword
rather than text fields**
This method will try and field aggregatable fields if possible if mapping has::
"customer_first_name" : {
"type" : "text",
"fields" : {
"keyword" : {
"type" : "keyword",
"ignore_above" : 256
}
}
}
we will aggregate ``customer_first_name`` columns using ``customer_first_name.keyword``.
TODO - implement remainder of pandas arguments
Returns
-------
pandas.Series
cardinality of each column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.nunique`
Examples
--------
>>> columns = ['category', 'currency', 'customer_birth_date', 'customer_first_name', 'user']
>>> df = ed.DataFrame('localhost', 'ecommerce', columns=columns)
>>> df.nunique()
category 6
currency 1
customer_birth_date 0
customer_first_name 46
user 46
dtype: int64
"""
return self._query_compiler.nunique()
def mad(self, numeric_only: bool = True) -> pd.Series:
"""
Return standard deviation for each numeric column
Returns
-------
pandas.Series
The value of the standard deviation for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.std`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.mad() # doctest: +SKIP
AvgTicketPrice 213.35497
dayOfWeek 2.00000
dtype: float64
>>> df.mad(numeric_only=True) # doctest: +SKIP
AvgTicketPrice 213.473011
dayOfWeek 2.000000
dtype: float64
>>> df.mad(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 213.484
Cancelled NaN
dayOfWeek 2
timestamp NaT
DestCountry NaN
dtype: object
"""
return self._query_compiler.mad(numeric_only=numeric_only)
def _hist(self, num_bins):
return self._query_compiler._hist(num_bins)
def describe(self) -> pd.DataFrame:
"""
Generate descriptive statistics that summarize the central tendency, dispersion and shape of a
dataset’s distribution, excluding NaN values.
Analyzes both numeric and object series, as well as DataFrame column sets of mixed data types.
The output will vary depending on what is provided. Refer to the notes below for more detail.
TODO - add additional arguments (current only numeric values supported)
Returns
-------
pandas.Dataframe:
Summary information
See Also
--------
:pandas_api_docs:`pandas.DataFrame.describe`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=['AvgTicketPrice', 'FlightDelayMin'])
>>> df.describe() # ignoring percentiles as they don't generate consistent results
AvgTicketPrice FlightDelayMin
count 13059.000000 13059.000000
mean 628.253689 47.335171
std 266.386661 96.743006
min 100.020531 0.000000
...
...
...
max 1199.729004 360.000000
"""
return self._query_compiler.describe()
@abstractmethod
def to_pandas(self, show_progress: bool = False) -> pd.DataFrame:
raise NotImplementedError
@abstractmethod
def head(self, n: int = 5) -> "NDFrame":
raise NotImplementedError
@abstractmethod
def tail(self, n: int = 5) -> "NDFrame":
raise NotImplementedError
@abstractmethod
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
random_state: Optional[int] = None,
) -> "NDFrame":
raise NotImplementedError
@property
def shape(self) -> Tuple[int, ...]:
raise NotImplementedError
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame.
Returns
-------
int:
Number of elements in the object
See Also
--------
:pandas_api_docs:`pandas.DataFrame.size`
"""
product = 0
for dim in self.shape:
product = (product or 1) * dim
return product
| # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import pandas as pd # type: ignore
from eland.query_compiler import QueryCompiler
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
from eland.index import Index
"""
NDFrame
---------
Abstract base class for eland.DataFrame and eland.Series.
The underlying data resides in Elasticsearch and the API aligns as much as
possible with pandas APIs.
This allows the eland.DataFrame to access large datasets stored in Elasticsearch,
without storing the dataset in local memory.
Implementation Details
----------------------
Elasticsearch indexes can be configured in many different ways, and these indexes
utilise different data structures to pandas.
eland.DataFrame operations that return individual rows (e.g. df.head()) return
_source data. If _source is not enabled, this data is not accessible.
Similarly, only Elasticsearch searchable fields can be searched or filtered, and
only Elasticsearch aggregatable fields can be aggregated or grouped.
"""
class NDFrame(ABC):
def __init__(
self,
es_client: Optional[
Union[str, List[str], Tuple[str, ...], "Elasticsearch"]
] = None,
es_index_pattern: Optional[str] = None,
columns: Optional[List[str]] = None,
es_index_field: Optional[str] = None,
_query_compiler: Optional[QueryCompiler] = None,
) -> None:
"""
pandas.DataFrame/Series like API that proxies into Elasticsearch index(es).
Parameters
----------
client : elasticsearch.Elasticsearch
A reference to a Elasticsearch python client
"""
if _query_compiler is None:
_query_compiler = QueryCompiler(
client=es_client,
index_pattern=es_index_pattern,
display_names=columns,
index_field=es_index_field,
)
self._query_compiler = _query_compiler
@property
def index(self) -> "Index":
"""
Return eland index referencing Elasticsearch field to index a DataFrame/Series
Returns
-------
eland.Index:
Note eland.Index has a very limited API compared to pandas.Index
See Also
--------
:pandas_api_docs:`pandas.DataFrame.index`
:pandas_api_docs:`pandas.Series.index`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights')
>>> assert isinstance(df.index, ed.Index)
>>> df.index.es_index_field
'_id'
>>> s = df['Carrier']
>>> assert isinstance(s.index, ed.Index)
>>> s.index.es_index_field
'_id'
"""
return self._query_compiler.index
@property
def dtypes(self) -> pd.Series:
"""
Return the pandas dtypes in the DataFrame. Elasticsearch types are mapped
to pandas dtypes via Mappings._es_dtype_to_pd_dtype.__doc__
Returns
-------
pandas.Series
The data type of each column.
See Also
--------
:pandas_api_docs:`pandas.DataFrame.dtypes`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=['Origin', 'AvgTicketPrice', 'timestamp', 'dayOfWeek'])
>>> df.dtypes
Origin object
AvgTicketPrice float64
timestamp datetime64[ns]
dayOfWeek int64
dtype: object
"""
return self._query_compiler.dtypes
@property
def es_dtypes(self) -> pd.Series:
"""
Return the Elasticsearch dtypes in the index
Returns
-------
pandas.Series
The data type of each column.
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=['Origin', 'AvgTicketPrice', 'timestamp', 'dayOfWeek'])
>>> df.es_dtypes
Origin keyword
AvgTicketPrice float
timestamp date
dayOfWeek byte
dtype: object
"""
return self._query_compiler.es_dtypes
def _build_repr(self, num_rows: int) -> pd.DataFrame:
# self could be Series or DataFrame
if len(self.index) <= num_rows:
return self.to_pandas()
num_rows = num_rows
head_rows = int(num_rows / 2) + num_rows % 2
tail_rows = num_rows - head_rows
head = self.head(head_rows).to_pandas()
tail = self.tail(tail_rows).to_pandas()
return head.append(tail)
def __sizeof__(self) -> int:
# Don't default to pandas, just return approximation TODO - make this more accurate
return sys.getsizeof(self._query_compiler)
def __len__(self) -> int:
"""Gets the length of the DataFrame.
Returns:
Returns an integer length of the DataFrame object.
"""
return len(self.index)
def _es_info(self, buf):
self._query_compiler.es_info(buf)
def mean(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return mean value for each numeric column
TODO - implement remainder of pandas arguments, currently non-numerics are not supported
Parameters
----------
numeric_only: {True, False, None} Default is None
Which datatype to be returned
- True: Returns all values as float64, NaN/NaT values are removed
- None: Returns all values as the same dtype where possible, NaN/NaT are removed
- False: Returns all values as the same dtype where possible, NaN/NaT are preserved
Returns
-------
pandas.Series
mean value for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.mean`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.mean() # doctest: +SKIP
AvgTicketPrice 628.254
Cancelled 0.128494
dayOfWeek 2.83598
timestamp 2018-01-21 19:20:45.564438232
dtype: object
>>> df.mean(numeric_only=True)
AvgTicketPrice 628.253689
Cancelled 0.128494
dayOfWeek 2.835975
dtype: float64
>>> df.mean(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 628.254
Cancelled 0.128494
dayOfWeek 2.83598
timestamp 2018-01-21 19:20:45.564438232
DestCountry NaN
dtype: object
"""
return self._query_compiler.mean(numeric_only=numeric_only)
def sum(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return sum for each numeric column
TODO - implement remainder of pandas arguments, currently non-numerics are not supported
Parameters
----------
numeric_only: {True, False, None} Default is None
Which datatype to be returned
- True: Returns all values as float64, NaN/NaT values are removed
- None: Returns all values as the same dtype where possible, NaN/NaT are removed
- False: Returns all values as the same dtype where possible, NaN/NaT are preserved
Returns
-------
pandas.Series
sum for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.sum`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.sum() # doctest: +SKIP
AvgTicketPrice 8.20436e+06
Cancelled 1678
dayOfWeek 37035
dtype: object
>>> df.sum(numeric_only=True)
AvgTicketPrice 8.204365e+06
Cancelled 1.678000e+03
dayOfWeek 3.703500e+04
dtype: float64
>>> df.sum(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 8.20436e+06
Cancelled 1678
dayOfWeek 37035
timestamp NaT
DestCountry NaN
dtype: object
"""
return self._query_compiler.sum(numeric_only=numeric_only)
def min(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return the minimum value for each numeric column
TODO - implement remainder of pandas arguments, currently non-numerics are not supported
Parameters
----------
numeric_only: {True, False, None} Default is None
Which datatype to be returned
- True: Returns all values as float64, NaN/NaT values are removed
- None: Returns all values as the same dtype where possible, NaN/NaT are removed
- False: Returns all values as the same dtype where possible, NaN/NaT are preserved
Returns
-------
pandas.Series
min value for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.min`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.min() # doctest: +SKIP
AvgTicketPrice 100.021
Cancelled False
dayOfWeek 0
timestamp 2018-01-01 00:00:00
dtype: object
>>> df.min(numeric_only=True)
AvgTicketPrice 100.020531
Cancelled 0.000000
dayOfWeek 0.000000
dtype: float64
>>> df.min(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 100.021
Cancelled False
dayOfWeek 0
timestamp 2018-01-01 00:00:00
DestCountry NaN
dtype: object
"""
return self._query_compiler.min(numeric_only=numeric_only)
def var(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return variance for each numeric column
Parameters
----------
numeric_only: {True, False, None} Default is None
Which datatype to be returned
- True: Returns all values as float64, NaN/NaT values are removed
- None: Returns all values as the same dtype where possible, NaN/NaT are removed
- False: Returns all values as the same dtype where possible, NaN/NaT are preserved
Returns
-------
pandas.Series
The value of the variance for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.var`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.var() # doctest: +SKIP
AvgTicketPrice 70964.570234
Cancelled 0.111987
dayOfWeek 3.761279
dtype: float64
>>> df.var(numeric_only=True)
AvgTicketPrice 70964.570234
Cancelled 0.111987
dayOfWeek 3.761279
dtype: float64
>>> df.var(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 70964.6
Cancelled 0.111987
dayOfWeek 3.76128
timestamp NaT
DestCountry NaN
dtype: object
"""
return self._query_compiler.var(numeric_only=numeric_only)
def std(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return standard deviation for each numeric column
Parameters
----------
numeric_only: {True, False, None} Default is None
Which datatype to be returned
- True: Returns all values as float64, NaN/NaT values are removed
- None: Returns all values as the same dtype where possible, NaN/NaT are removed
- False: Returns all values as the same dtype where possible, NaN/NaT are preserved
Returns
-------
pandas.Series
The value of the standard deviation for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.std`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.std() # doctest: +SKIP
AvgTicketPrice 266.407061
Cancelled 0.334664
dayOfWeek 1.939513
dtype: float64
>>> df.std(numeric_only=True)
AvgTicketPrice 266.407061
Cancelled 0.334664
dayOfWeek 1.939513
dtype: float64
>>> df.std(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 266.407
Cancelled 0.334664
dayOfWeek 1.93951
timestamp NaT
DestCountry NaN
dtype: object
"""
return self._query_compiler.std(numeric_only=numeric_only)
def median(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return the median value for each numeric column
Parameters
----------
numeric_only: {True, False, None} Default is None
Which datatype to be returned
- True: Returns all values as float64, NaN/NaT values are removed
- None: Returns all values as the same dtype where possible, NaN/NaT are removed
- False: Returns all values as the same dtype where possible, NaN/NaT are preserved
Returns
-------
pandas.Series
median value for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.median`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.median() # doctest: +SKIP
AvgTicketPrice 640.363
Cancelled False
dayOfWeek 3
timestamp 2018-01-21 23:54:06.624776611
dtype: object
>>> df.median(numeric_only=True) # doctest: +SKIP
AvgTicketPrice 640.362667
Cancelled 0.000000
dayOfWeek 3.000000
dtype: float64
>>> df.median(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 640.387
Cancelled False
dayOfWeek 3
timestamp 2018-01-21 23:54:06.624776611
DestCountry NaN
dtype: object
"""
return self._query_compiler.median(numeric_only=numeric_only)
def max(self, numeric_only: Optional[bool] = None) -> pd.Series:
"""
Return the maximum value for each numeric column
TODO - implement remainder of pandas arguments, currently non-numerics are not supported
Parameters
----------
numeric_only: {True, False, None} Default is None
Which datatype to be returned
- True: Returns all values as float64, NaN/NaT values are removed
- None: Returns all values as the same dtype where possible, NaN/NaT are removed
- False: Returns all values as the same dtype where possible, NaN/NaT are preserved
Returns
-------
pandas.Series
max value for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.max`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.max() # doctest: +SKIP
AvgTicketPrice 1199.73
Cancelled True
dayOfWeek 6
timestamp 2018-02-11 23:50:12
dtype: object
>>> df.max(numeric_only=True)
AvgTicketPrice 1199.729004
Cancelled 1.000000
dayOfWeek 6.000000
dtype: float64
>>> df.max(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 1199.73
Cancelled True
dayOfWeek 6
timestamp 2018-02-11 23:50:12
DestCountry NaN
dtype: object
"""
return self._query_compiler.max(numeric_only=numeric_only)
def nunique(self) -> pd.Series:
"""
Return cardinality of each field.
**Note we can only do this for aggregatable Elasticsearch fields - (in general) numeric and keyword
rather than text fields**
This method will try and field aggregatable fields if possible if mapping has::
"customer_first_name" : {
"type" : "text",
"fields" : {
"keyword" : {
"type" : "keyword",
"ignore_above" : 256
}
}
}
we will aggregate ``customer_first_name`` columns using ``customer_first_name.keyword``.
TODO - implement remainder of pandas arguments
Returns
-------
pandas.Series
cardinality of each column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.nunique`
Examples
--------
>>> columns = ['category', 'currency', 'customer_birth_date', 'customer_first_name', 'user']
>>> df = ed.DataFrame('localhost', 'ecommerce', columns=columns)
>>> df.nunique()
category 6
currency 1
customer_birth_date 0
customer_first_name 46
user 46
dtype: int64
"""
return self._query_compiler.nunique()
def mad(self, numeric_only: bool = True) -> pd.Series:
"""
Return standard deviation for each numeric column
Returns
-------
pandas.Series
The value of the standard deviation for each numeric column
See Also
--------
:pandas_api_docs:`pandas.DataFrame.std`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"])
>>> df.mad() # doctest: +SKIP
AvgTicketPrice 213.35497
dayOfWeek 2.00000
dtype: float64
>>> df.mad(numeric_only=True) # doctest: +SKIP
AvgTicketPrice 213.473011
dayOfWeek 2.000000
dtype: float64
>>> df.mad(numeric_only=False) # doctest: +SKIP
AvgTicketPrice 213.484
Cancelled NaN
dayOfWeek 2
timestamp NaT
DestCountry NaN
dtype: object
"""
return self._query_compiler.mad(numeric_only=numeric_only)
def _hist(self, num_bins):
return self._query_compiler._hist(num_bins)
def describe(self) -> pd.DataFrame:
"""
Generate descriptive statistics that summarize the central tendency, dispersion and shape of a
dataset’s distribution, excluding NaN values.
Analyzes both numeric and object series, as well as DataFrame column sets of mixed data types.
The output will vary depending on what is provided. Refer to the notes below for more detail.
TODO - add additional arguments (current only numeric values supported)
Returns
-------
pandas.Dataframe:
Summary information
See Also
--------
:pandas_api_docs:`pandas.DataFrame.describe`
Examples
--------
>>> df = ed.DataFrame('localhost', 'flights', columns=['AvgTicketPrice', 'FlightDelayMin'])
>>> df.describe() # ignoring percentiles as they don't generate consistent results
AvgTicketPrice FlightDelayMin
count 13059.000000 13059.000000
mean 628.253689 47.335171
std 266.386661 96.743006
min 100.020531 0.000000
...
...
...
max 1199.729004 360.000000
"""
return self._query_compiler.describe()
@abstractmethod
def to_pandas(self, show_progress: bool = False) -> pd.DataFrame:
raise NotImplementedError
@abstractmethod
def head(self, n: int = 5) -> "NDFrame":
raise NotImplementedError
@abstractmethod
def tail(self, n: int = 5) -> "NDFrame":
raise NotImplementedError
@abstractmethod
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
random_state: Optional[int] = None,
) -> "NDFrame":
raise NotImplementedError
@property
def shape(self) -> Tuple[int, ...]:
raise NotImplementedError
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame.
Returns
-------
int:
Number of elements in the object
See Also
--------
:pandas_api_docs:`pandas.DataFrame.size`
"""
product = 0
for dim in self.shape:
product = (product or 1) * dim
return product
| en | 0.560687 | # Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # type: ignore NDFrame --------- Abstract base class for eland.DataFrame and eland.Series. The underlying data resides in Elasticsearch and the API aligns as much as possible with pandas APIs. This allows the eland.DataFrame to access large datasets stored in Elasticsearch, without storing the dataset in local memory. Implementation Details ---------------------- Elasticsearch indexes can be configured in many different ways, and these indexes utilise different data structures to pandas. eland.DataFrame operations that return individual rows (e.g. df.head()) return _source data. If _source is not enabled, this data is not accessible. Similarly, only Elasticsearch searchable fields can be searched or filtered, and only Elasticsearch aggregatable fields can be aggregated or grouped. pandas.DataFrame/Series like API that proxies into Elasticsearch index(es). Parameters ---------- client : elasticsearch.Elasticsearch A reference to a Elasticsearch python client Return eland index referencing Elasticsearch field to index a DataFrame/Series Returns ------- eland.Index: Note eland.Index has a very limited API compared to pandas.Index See Also -------- :pandas_api_docs:`pandas.DataFrame.index` :pandas_api_docs:`pandas.Series.index` Examples -------- >>> df = ed.DataFrame('localhost', 'flights') >>> assert isinstance(df.index, ed.Index) >>> df.index.es_index_field '_id' >>> s = df['Carrier'] >>> assert isinstance(s.index, ed.Index) >>> s.index.es_index_field '_id' Return the pandas dtypes in the DataFrame. Elasticsearch types are mapped to pandas dtypes via Mappings._es_dtype_to_pd_dtype.__doc__ Returns ------- pandas.Series The data type of each column. See Also -------- :pandas_api_docs:`pandas.DataFrame.dtypes` Examples -------- >>> df = ed.DataFrame('localhost', 'flights', columns=['Origin', 'AvgTicketPrice', 'timestamp', 'dayOfWeek']) >>> df.dtypes Origin object AvgTicketPrice float64 timestamp datetime64[ns] dayOfWeek int64 dtype: object Return the Elasticsearch dtypes in the index Returns ------- pandas.Series The data type of each column. Examples -------- >>> df = ed.DataFrame('localhost', 'flights', columns=['Origin', 'AvgTicketPrice', 'timestamp', 'dayOfWeek']) >>> df.es_dtypes Origin keyword AvgTicketPrice float timestamp date dayOfWeek byte dtype: object # self could be Series or DataFrame # Don't default to pandas, just return approximation TODO - make this more accurate Gets the length of the DataFrame. Returns: Returns an integer length of the DataFrame object. Return mean value for each numeric column TODO - implement remainder of pandas arguments, currently non-numerics are not supported Parameters ---------- numeric_only: {True, False, None} Default is None Which datatype to be returned - True: Returns all values as float64, NaN/NaT values are removed - None: Returns all values as the same dtype where possible, NaN/NaT are removed - False: Returns all values as the same dtype where possible, NaN/NaT are preserved Returns ------- pandas.Series mean value for each numeric column See Also -------- :pandas_api_docs:`pandas.DataFrame.mean` Examples -------- >>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"]) >>> df.mean() # doctest: +SKIP AvgTicketPrice 628.254 Cancelled 0.128494 dayOfWeek 2.83598 timestamp 2018-01-21 19:20:45.564438232 dtype: object >>> df.mean(numeric_only=True) AvgTicketPrice 628.253689 Cancelled 0.128494 dayOfWeek 2.835975 dtype: float64 >>> df.mean(numeric_only=False) # doctest: +SKIP AvgTicketPrice 628.254 Cancelled 0.128494 dayOfWeek 2.83598 timestamp 2018-01-21 19:20:45.564438232 DestCountry NaN dtype: object Return sum for each numeric column TODO - implement remainder of pandas arguments, currently non-numerics are not supported Parameters ---------- numeric_only: {True, False, None} Default is None Which datatype to be returned - True: Returns all values as float64, NaN/NaT values are removed - None: Returns all values as the same dtype where possible, NaN/NaT are removed - False: Returns all values as the same dtype where possible, NaN/NaT are preserved Returns ------- pandas.Series sum for each numeric column See Also -------- :pandas_api_docs:`pandas.DataFrame.sum` Examples -------- >>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"]) >>> df.sum() # doctest: +SKIP AvgTicketPrice 8.20436e+06 Cancelled 1678 dayOfWeek 37035 dtype: object >>> df.sum(numeric_only=True) AvgTicketPrice 8.204365e+06 Cancelled 1.678000e+03 dayOfWeek 3.703500e+04 dtype: float64 >>> df.sum(numeric_only=False) # doctest: +SKIP AvgTicketPrice 8.20436e+06 Cancelled 1678 dayOfWeek 37035 timestamp NaT DestCountry NaN dtype: object Return the minimum value for each numeric column TODO - implement remainder of pandas arguments, currently non-numerics are not supported Parameters ---------- numeric_only: {True, False, None} Default is None Which datatype to be returned - True: Returns all values as float64, NaN/NaT values are removed - None: Returns all values as the same dtype where possible, NaN/NaT are removed - False: Returns all values as the same dtype where possible, NaN/NaT are preserved Returns ------- pandas.Series min value for each numeric column See Also -------- :pandas_api_docs:`pandas.DataFrame.min` Examples -------- >>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"]) >>> df.min() # doctest: +SKIP AvgTicketPrice 100.021 Cancelled False dayOfWeek 0 timestamp 2018-01-01 00:00:00 dtype: object >>> df.min(numeric_only=True) AvgTicketPrice 100.020531 Cancelled 0.000000 dayOfWeek 0.000000 dtype: float64 >>> df.min(numeric_only=False) # doctest: +SKIP AvgTicketPrice 100.021 Cancelled False dayOfWeek 0 timestamp 2018-01-01 00:00:00 DestCountry NaN dtype: object Return variance for each numeric column Parameters ---------- numeric_only: {True, False, None} Default is None Which datatype to be returned - True: Returns all values as float64, NaN/NaT values are removed - None: Returns all values as the same dtype where possible, NaN/NaT are removed - False: Returns all values as the same dtype where possible, NaN/NaT are preserved Returns ------- pandas.Series The value of the variance for each numeric column See Also -------- :pandas_api_docs:`pandas.DataFrame.var` Examples -------- >>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"]) >>> df.var() # doctest: +SKIP AvgTicketPrice 70964.570234 Cancelled 0.111987 dayOfWeek 3.761279 dtype: float64 >>> df.var(numeric_only=True) AvgTicketPrice 70964.570234 Cancelled 0.111987 dayOfWeek 3.761279 dtype: float64 >>> df.var(numeric_only=False) # doctest: +SKIP AvgTicketPrice 70964.6 Cancelled 0.111987 dayOfWeek 3.76128 timestamp NaT DestCountry NaN dtype: object Return standard deviation for each numeric column Parameters ---------- numeric_only: {True, False, None} Default is None Which datatype to be returned - True: Returns all values as float64, NaN/NaT values are removed - None: Returns all values as the same dtype where possible, NaN/NaT are removed - False: Returns all values as the same dtype where possible, NaN/NaT are preserved Returns ------- pandas.Series The value of the standard deviation for each numeric column See Also -------- :pandas_api_docs:`pandas.DataFrame.std` Examples -------- >>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"]) >>> df.std() # doctest: +SKIP AvgTicketPrice 266.407061 Cancelled 0.334664 dayOfWeek 1.939513 dtype: float64 >>> df.std(numeric_only=True) AvgTicketPrice 266.407061 Cancelled 0.334664 dayOfWeek 1.939513 dtype: float64 >>> df.std(numeric_only=False) # doctest: +SKIP AvgTicketPrice 266.407 Cancelled 0.334664 dayOfWeek 1.93951 timestamp NaT DestCountry NaN dtype: object Return the median value for each numeric column Parameters ---------- numeric_only: {True, False, None} Default is None Which datatype to be returned - True: Returns all values as float64, NaN/NaT values are removed - None: Returns all values as the same dtype where possible, NaN/NaT are removed - False: Returns all values as the same dtype where possible, NaN/NaT are preserved Returns ------- pandas.Series median value for each numeric column See Also -------- :pandas_api_docs:`pandas.DataFrame.median` Examples -------- >>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"]) >>> df.median() # doctest: +SKIP AvgTicketPrice 640.363 Cancelled False dayOfWeek 3 timestamp 2018-01-21 23:54:06.624776611 dtype: object >>> df.median(numeric_only=True) # doctest: +SKIP AvgTicketPrice 640.362667 Cancelled 0.000000 dayOfWeek 3.000000 dtype: float64 >>> df.median(numeric_only=False) # doctest: +SKIP AvgTicketPrice 640.387 Cancelled False dayOfWeek 3 timestamp 2018-01-21 23:54:06.624776611 DestCountry NaN dtype: object Return the maximum value for each numeric column TODO - implement remainder of pandas arguments, currently non-numerics are not supported Parameters ---------- numeric_only: {True, False, None} Default is None Which datatype to be returned - True: Returns all values as float64, NaN/NaT values are removed - None: Returns all values as the same dtype where possible, NaN/NaT are removed - False: Returns all values as the same dtype where possible, NaN/NaT are preserved Returns ------- pandas.Series max value for each numeric column See Also -------- :pandas_api_docs:`pandas.DataFrame.max` Examples -------- >>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"]) >>> df.max() # doctest: +SKIP AvgTicketPrice 1199.73 Cancelled True dayOfWeek 6 timestamp 2018-02-11 23:50:12 dtype: object >>> df.max(numeric_only=True) AvgTicketPrice 1199.729004 Cancelled 1.000000 dayOfWeek 6.000000 dtype: float64 >>> df.max(numeric_only=False) # doctest: +SKIP AvgTicketPrice 1199.73 Cancelled True dayOfWeek 6 timestamp 2018-02-11 23:50:12 DestCountry NaN dtype: object Return cardinality of each field. **Note we can only do this for aggregatable Elasticsearch fields - (in general) numeric and keyword rather than text fields** This method will try and field aggregatable fields if possible if mapping has:: "customer_first_name" : { "type" : "text", "fields" : { "keyword" : { "type" : "keyword", "ignore_above" : 256 } } } we will aggregate ``customer_first_name`` columns using ``customer_first_name.keyword``. TODO - implement remainder of pandas arguments Returns ------- pandas.Series cardinality of each column See Also -------- :pandas_api_docs:`pandas.DataFrame.nunique` Examples -------- >>> columns = ['category', 'currency', 'customer_birth_date', 'customer_first_name', 'user'] >>> df = ed.DataFrame('localhost', 'ecommerce', columns=columns) >>> df.nunique() category 6 currency 1 customer_birth_date 0 customer_first_name 46 user 46 dtype: int64 Return standard deviation for each numeric column Returns ------- pandas.Series The value of the standard deviation for each numeric column See Also -------- :pandas_api_docs:`pandas.DataFrame.std` Examples -------- >>> df = ed.DataFrame('localhost', 'flights', columns=["AvgTicketPrice", "Cancelled", "dayOfWeek", "timestamp", "DestCountry"]) >>> df.mad() # doctest: +SKIP AvgTicketPrice 213.35497 dayOfWeek 2.00000 dtype: float64 >>> df.mad(numeric_only=True) # doctest: +SKIP AvgTicketPrice 213.473011 dayOfWeek 2.000000 dtype: float64 >>> df.mad(numeric_only=False) # doctest: +SKIP AvgTicketPrice 213.484 Cancelled NaN dayOfWeek 2 timestamp NaT DestCountry NaN dtype: object Generate descriptive statistics that summarize the central tendency, dispersion and shape of a dataset’s distribution, excluding NaN values. Analyzes both numeric and object series, as well as DataFrame column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. TODO - add additional arguments (current only numeric values supported) Returns ------- pandas.Dataframe: Summary information See Also -------- :pandas_api_docs:`pandas.DataFrame.describe` Examples -------- >>> df = ed.DataFrame('localhost', 'flights', columns=['AvgTicketPrice', 'FlightDelayMin']) >>> df.describe() # ignoring percentiles as they don't generate consistent results AvgTicketPrice FlightDelayMin count 13059.000000 13059.000000 mean 628.253689 47.335171 std 266.386661 96.743006 min 100.020531 0.000000 ... ... ... max 1199.729004 360.000000 Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. Returns ------- int: Number of elements in the object See Also -------- :pandas_api_docs:`pandas.DataFrame.size` | 2.070422 | 2 |
src/openprocurement/tender/pricequotation/models/requirement.py | ProzorroUKR/openprocurement.api | 10 | 6624656 | <reponame>ProzorroUKR/openprocurement.api
from schematics.types import StringType, MD5Type
from schematics.types.compound import ModelType
from schematics.transforms import blacklist
from schematics.validate import ValidationError
from uuid import uuid4
from openprocurement.api.models import schematics_default_role, schematics_embedded_role
from openprocurement.api.models import Model
from openprocurement.api.models import Unit as BaseUnit
from openprocurement.api.utils import get_now, get_first_revision_date
from openprocurement.api.constants import PQ_CRITERIA_ID_FROM
from openprocurement.tender.core.validation import validate_value_type
from openprocurement.tender.core.models import get_tender
class Unit(BaseUnit):
name = StringType(required=True)
class ValidateIdMixing(Model):
id = StringType(required=True, default=lambda: uuid4().hex)
def validate_id(self, data, value):
tender = get_tender(data["__parent__"])
if get_first_revision_date(tender, default=get_now()) > PQ_CRITERIA_ID_FROM:
field = MD5Type()
value = field.to_native(value)
field.validate(value)
def validate_criteria_id_uniq(objs, *args):
if objs:
tender = get_tender(objs[0])
if get_first_revision_date(tender, default=get_now()) > PQ_CRITERIA_ID_FROM:
ids = [i.id for i in objs]
if len(set(ids)) != len(ids):
raise ValidationError("Criteria id should be uniq")
rg_ids = [rg.id for c in objs for rg in c.requirementGroups]
if len(rg_ids) != len(set(rg_ids)):
raise ValidationError("Requirement group id should be uniq in tender")
req_ids = [req.id for c in objs for rg in c.requirementGroups for req in rg.requirements]
if len(req_ids) != len(set(req_ids)):
raise ValidationError("Requirement id should be uniq for all requirements in tender")
class Requirement(ValidateIdMixing, Model):
class Options:
namespace = "Requirement"
roles = {
"create": blacklist(),
"edit_draft": blacklist(),
"embedded": schematics_embedded_role,
"view": schematics_default_role,
}
title = StringType(required=True)
description = StringType()
dataType = StringType(required=True,
choices=["string", "number", "integer", "boolean"])
unit = ModelType(Unit)
minValue = StringType()
maxValue = StringType()
expectedValue = StringType()
def validate_minValue(self, data, value):
validate_value_type(value, data['dataType'])
def validate_maxValue(self, data, value):
validate_value_type(value, data['dataType'])
def validate_expectedValue(self, data, value):
validate_value_type(value, data['dataType'])
| from schematics.types import StringType, MD5Type
from schematics.types.compound import ModelType
from schematics.transforms import blacklist
from schematics.validate import ValidationError
from uuid import uuid4
from openprocurement.api.models import schematics_default_role, schematics_embedded_role
from openprocurement.api.models import Model
from openprocurement.api.models import Unit as BaseUnit
from openprocurement.api.utils import get_now, get_first_revision_date
from openprocurement.api.constants import PQ_CRITERIA_ID_FROM
from openprocurement.tender.core.validation import validate_value_type
from openprocurement.tender.core.models import get_tender
class Unit(BaseUnit):
name = StringType(required=True)
class ValidateIdMixing(Model):
id = StringType(required=True, default=lambda: uuid4().hex)
def validate_id(self, data, value):
tender = get_tender(data["__parent__"])
if get_first_revision_date(tender, default=get_now()) > PQ_CRITERIA_ID_FROM:
field = MD5Type()
value = field.to_native(value)
field.validate(value)
def validate_criteria_id_uniq(objs, *args):
if objs:
tender = get_tender(objs[0])
if get_first_revision_date(tender, default=get_now()) > PQ_CRITERIA_ID_FROM:
ids = [i.id for i in objs]
if len(set(ids)) != len(ids):
raise ValidationError("Criteria id should be uniq")
rg_ids = [rg.id for c in objs for rg in c.requirementGroups]
if len(rg_ids) != len(set(rg_ids)):
raise ValidationError("Requirement group id should be uniq in tender")
req_ids = [req.id for c in objs for rg in c.requirementGroups for req in rg.requirements]
if len(req_ids) != len(set(req_ids)):
raise ValidationError("Requirement id should be uniq for all requirements in tender")
class Requirement(ValidateIdMixing, Model):
class Options:
namespace = "Requirement"
roles = {
"create": blacklist(),
"edit_draft": blacklist(),
"embedded": schematics_embedded_role,
"view": schematics_default_role,
}
title = StringType(required=True)
description = StringType()
dataType = StringType(required=True,
choices=["string", "number", "integer", "boolean"])
unit = ModelType(Unit)
minValue = StringType()
maxValue = StringType()
expectedValue = StringType()
def validate_minValue(self, data, value):
validate_value_type(value, data['dataType'])
def validate_maxValue(self, data, value):
validate_value_type(value, data['dataType'])
def validate_expectedValue(self, data, value):
validate_value_type(value, data['dataType']) | none | 1 | 2.23782 | 2 | |
models/sandbox_currency.py | NikolayXHD/tinkoff-api-python | 0 | 6624657 | <reponame>NikolayXHD/tinkoff-api-python
from __future__ import annotations
import enum
class SandboxCurrency(enum.Enum):
RUB = 'RUB'
USD = 'USD'
EUR = 'EUR'
GBP = 'GBP'
HKD = 'HKD'
CHF = 'CHF'
JPY = 'JPY'
CNY = 'CNY'
TRY = 'TRY'
| from __future__ import annotations
import enum
class SandboxCurrency(enum.Enum):
RUB = 'RUB'
USD = 'USD'
EUR = 'EUR'
GBP = 'GBP'
HKD = 'HKD'
CHF = 'CHF'
JPY = 'JPY'
CNY = 'CNY'
TRY = 'TRY' | none | 1 | 2.51757 | 3 | |
tests/python/ConfigTest.py | elsandosgrande/OpenColorIO | 611 | 6624658 | <reponame>elsandosgrande/OpenColorIO
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
import copy
import unittest
import os
import sys
import PyOpenColorIO as OCIO
from UnitTestUtils import (SIMPLE_CONFIG_VIRTUAL_DISPLAY,
SIMPLE_CONFIG_VIRTUAL_DISPLAY_ACTIVE_DISPLAY,
SIMPLE_CONFIG_VIRTUAL_DISPLAY_V1,
SIMPLE_CONFIG_VIRTUAL_DISPLAY_EXCEPTION)
# Legacy tests kept for reference.
#
# class ConfigTest(unittest.TestCase):
#
# SIMPLE_PROFILE = """ocio_profile_version: 1
#
# search_path: luts
# strictparsing: false
# luma: [0.2126, 0.7152, 0.0722]
#
# roles:
# default: raw
# scene_linear: lnh
#
# displays:
# sRGB:
# - !<View> {name: Film1D, colorspace: vd8}
# - !<View> {name: Raw, colorspace: raw}
#
# active_displays: []
# active_views: []
#
# colorspaces:
# - !<ColorSpace>
# name: raw
# family: raw
# equalitygroup: ""
# bitdepth: 32f
# description: |
# A raw color space. Conversions to and from this space are no-ops.
#
# isdata: true
# allocation: uniform
#
# - !<ColorSpace>
# name: lnh
# family: ln
# equalitygroup: ""
# bitdepth: 16f
# description: |
# The show reference space. This is a sensor referred linear
# representation of the scene with primaries that correspond to
# scanned film. 0.18 in this space corresponds to a properly
# exposed 18% grey card.
#
# isdata: false
# allocation: lg2
#
# - !<ColorSpace>
# name: vd8
# family: vd8
# equalitygroup: ""
# bitdepth: 8ui
# description: |
# how many transforms can we use?
#
# isdata: false
# allocation: uniform
# to_reference: !<GroupTransform>
# children:
# - !<ExponentTransform> {value: 2.2}
# - !<MatrixTransform> {matrix: [1, 2, 3, 4, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], offset: [1, 2, 0, 0]}
# - !<CDLTransform> {slope: [0.9, 1, 1], offset: [0.1, 0.3, 0.4], power: [1.1, 1.1, 1.1], sat: 0.9}
# """
#
# def setUp(self):
#
# osx_hack = ''
# if osname=="Darwin":
# osx_hack = """
# // OSX segfault work-around: Force a no-op sampling of the 3D LUT.
# texture3D(lut3d, 0.96875 * out_pixel.rgb + 0.015625).rgb;"""
#
# self.GLSLResult = """
# // Generated by OpenColorIO
#
# vec4 pytestocio(in vec4 inPixel,
# const sampler3D lut3d)
# {
# vec4 out_pixel = inPixel;
# out_pixel = out_pixel * mat4(1.0874889, -0.079466686, -0.0080222245, 0., -0.023622228, 1.0316445, -0.0080222245, 0., -0.023622226, -0.079466686, 1.1030889, 0., 0., 0., 0., 1.);
# out_pixel = pow(max(out_pixel, vec4(0., 0., 0., 0.)), vec4(0.90909088, 0.90909088, 0.90909088, 1.));
# out_pixel = out_pixel * mat4(1.1111112, -2., -3., -4., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1.);
# out_pixel = vec4(4.688889, -2.3, -0.40000001, -0.) + out_pixel;
# out_pixel = pow(max(out_pixel, vec4(0., 0., 0., 0.)), vec4(0.45454544, 0.45454544, 0.45454544, 1.));""" \
# + osx_hack + \
# """
# return out_pixel;
# }
#
# """
#
# def test_is_editable(self):
#
# cfg = OCIO.Config().CreateFromStream(self.SIMPLE_PROFILE)
# self.assertEqual(cfg.isEditable(), False)
# cfg = cfg.createEditableCopy()
# self.assertEqual(cfg.isEditable(), True)
# ctx = cfg.getCurrentContext()
# self.assertEqual(ctx.isEditable(), False)
# ctx = ctx.createEditableCopy()
# self.assertEqual(ctx.isEditable(), True)
# ctx.setEnvironmentMode(OCIO.ENV_ENVIRONMENT_LOAD_ALL)
#
# def test_interface(self):
#
# _cfge = OCIO.Config().CreateFromStream(self.SIMPLE_PROFILE)
# _cfge.clearEnvironmentVars()
# self.assertEqual(0, _cfge.getNumEnvironmentVars())
# _cfge.addEnvironmentVar("FOO", "test1")
# _cfge.addEnvironmentVar("FOO2", "test2${FOO}")
# self.assertEqual(2, _cfge.getNumEnvironmentVars())
# self.assertEqual("FOO", _cfge.getEnvironmentVarNameByIndex(0))
# self.assertEqual("FOO2", _cfge.getEnvironmentVarNameByIndex(1))
# self.assertEqual("test1", _cfge.getEnvironmentVarDefault("FOO"))
# self.assertEqual("test2${FOO}", _cfge.getEnvironmentVarDefault("FOO2"))
# self.assertEqual("test2test1", _cfge.getCurrentContext().resolveStringVar("${FOO2}"))
# self.assertEqual({'FOO': 'test1', 'FOO2': 'test2${FOO}'}, _cfge.getEnvironmentVarDefaults())
# _cfge.clearEnvironmentVars()
# self.assertEqual(0, _cfge.getNumEnvironmentVars())
# self.assertEqual("luts", _cfge.getSearchPath())
# _cfge.setSearchPath("otherdir")
# self.assertEqual("otherdir", _cfge.getSearchPath())
# _cfge.validate()
# _cfge.setDescription("testdesc")
# self.assertEqual("testdesc", _cfge.getDescription())
# self.assertEqual(self.SIMPLE_PROFILE, _cfg.serialize())
# #self.assertEqual("$07d1fb1509eeae1837825fd4242f8a69:$885ad1683add38a11f7bbe34e8bf9ac0",
# # _cfg.getCacheID())
# con = _cfge.getCurrentContext()
# self.assertNotEqual(0, con.getNumStringVars())
# _cfge.setWorkingDir("/foobar")
# self.assertEqual("/foobar", _cfge.getWorkingDir())
# self.assertEqual(3, _cfge.getNumColorSpaces())
# self.assertEqual("lnh", _cfge.getColorSpaceNameByIndex(1))
# lnh = _cfge.getColorSpace("lnh")
# self.assertEqual("ln", lnh.getFamily())
# self.assertEqual(-1, _cfge.getIndexForColorSpace("foobar"))
# cs = OCIO.ColorSpace()
# cs.setName("blah")
# _cfge.addColorSpace(cs)
# self.assertEqual(3, _cfge.getIndexForColorSpace("blah"))
# #_cfge.clearColorSpaces()
# #_cfge.parseColorSpaceFromString("foo")
# self.assertEqual(False, _cfg.isStrictParsingEnabled())
# _cfge.setStrictParsingEnabled(True)
# self.assertEqual(True, _cfge.isStrictParsingEnabled())
# self.assertEqual(2, _cfge.getNumRoles())
# self.assertEqual(False, _cfg.hasRole("foo"))
# _cfge.setRole("foo", "vd8")
# self.assertEqual(3, _cfge.getNumRoles())
# self.assertEqual(True, _cfge.hasRole("foo"))
# self.assertEqual("foo", _cfge.getRoleName(1))
# self.assertEqual("sRGB", _cfge.getDefaultDisplay())
# self.assertEqual(1, _cfge.getNumDisplays())
# self.assertEqual("sRGB", _cfge.getDisplay(0))
# self.assertEqual("Film1D", _cfge.getDefaultView("sRGB"))
# self.assertEqual(2, _cfge.getNumViews("sRGB"))
# self.assertEqual("Raw", _cfge.getView("sRGB", 1))
# self.assertEqual("vd8", _cfge.getDisplayColorSpaceName("sRGB", "Film1D"))
# self.assertEqual("", _cfg.getDisplayLooks("sRGB", "Film1D"))
# _cfge.addDisplay("foo", "bar", "foo", "wee")
# _cfge.clearDisplays()
# _cfge.setActiveDisplays("sRGB")
# self.assertEqual("sRGB", _cfge.getActiveDisplays())
# _cfge.setActiveViews("Film1D")
# self.assertEqual("Film1D", _cfge.getActiveViews())
# luma = _cfge.getDefaultLumaCoefs()
# self.assertAlmostEqual(0.2126, luma[0], delta=1e-8)
# _cfge.setDefaultLumaCoefs([0.1, 0.2, 0.3])
# tnewluma = _cfge.getDefaultLumaCoefs()
# self.assertAlmostEqual(0.1, tnewluma[0], delta=1e-8)
# self.assertEqual(0, _cfge.getNumLooks())
# lk = OCIO.Look()
# lk.setName("coollook")
# lk.setProcessSpace("somespace")
# et = OCIO.ExponentTransform()
# et.setValue([0.1, 0.2, 0.3, 0.4])
# lk.setTransform(et)
# iet = OCIO.ExponentTransform()
# iet.setValue([-0.1, -0.2, -0.3, -0.4])
# lk.setInverseTransform(iet)
# _cfge.addLook(lk)
# self.assertEqual(1, _cfge.getNumLooks())
# self.assertEqual("coollook", _cfge.getLookNameByIndex(0))
# glk = _cfge.getLook("coollook")
# self.assertEqual("somespace", glk.getProcessSpace())
# _cfge.clearLooks()
# self.assertEqual(0, _cfge.getNumLooks())
#
# #getProcessor(context, srcColorSpace, dstColorSpace)
# #getProcessor(context, srcName,dstName);
# #getProcessor(transform);
# #getProcessor(transform, direction);
# #getProcessor(context, transform, direction);
#
# _proc = _cfg.getProcessor("lnh", "vd8")
# self.assertEqual(False, _proc.isNoOp())
# self.assertEqual(True, _proc.hasChannelCrosstalk())
#
# #float packedpix[] = new float[]{0.48f, 0.18f, 0.9f, 1.0f,
# # 0.48f, 0.18f, 0.18f, 1.0f,
# # 0.48f, 0.18f, 0.18f, 1.0f,
# # 0.48f, 0.18f, 0.18f, 1.0f };
# #FloatBuffer buf = ByteBuffer.allocateDirect(2 * 2 * 4 * Float.SIZE / 8).asFloatBuffer();
# #buf.put(packedpix);
# #PackedImageDesc foo = new PackedImageDesc(buf, 2, 2, 4);
# #_proc.apply(foo);
# #FloatBuffer wee = foo.getData();
# #self.assertEqual(-2.4307251581696764E-35f, wee.get(2), 1e-8);
#
# # TODO: these should work in-place
# rgbfoo = _proc.applyRGB([0.48, 0.18, 0.18])
# self.assertAlmostEqual(1.9351077, rgbfoo[0], delta=1e-7);
# # TODO: these should work in-place
# rgbafoo = _proc.applyRGBA([0.48, 0.18, 0.18, 1.0])
# self.assertAlmostEqual(1.0, rgbafoo[3], delta=1e-8)
# #self.assertEqual("$a92ef63abd9edf61ad5a7855da064648", _proc.getCpuCacheID())
#
# _cfge.clearSearchPaths()
# self.assertEqual(0, _cfge.getNumSearchPaths())
# _cfge.addSearchPath("First/ Path")
# self.assertEqual(1, _cfge.getNumSearchPaths())
# _cfge.addSearchPath("D:\\Second\\Path\\")
# self.assertEqual(2, _cfge.getNumSearchPaths())
# self.assertEqual("First/ Path", _cfge.getSearchPathByIndex(0))
# self.assertEqual("D:\\Second\\Path\\", _cfge.getSearchPathByIndex(1))
#
# del _cfge
# del _cfg
class ConfigTest(unittest.TestCase):
def test_copy(self):
"""
Test the deepcopy() method.
"""
cfg = OCIO.Config.CreateRaw()
cfg.setMajorVersion(2)
cfg.setMinorVersion(1)
cfg.setName('test config')
cfg.setDescription('test description')
cfg.addColorSpace(
OCIO.ColorSpace(OCIO.REFERENCE_SPACE_DISPLAY,
"display_cs",
toReference=OCIO.CDLTransform(sat=1.5)))
cfg.addColorSpace(
OCIO.ColorSpace(OCIO.REFERENCE_SPACE_SCENE,
"raw",
isData=True))
rules = OCIO.FileRules()
rules.insertRule(0, 'A', 'raw', '*', 'exr')
rules.insertRule(1, 'B', 'display_cs', '*', 'png')
cfg.setFileRules(rules)
other = copy.deepcopy(cfg)
self.assertFalse(other is cfg)
self.assertEqual(other.getMajorVersion(), cfg.getMajorVersion())
self.assertEqual(other.getMinorVersion(), cfg.getMinorVersion())
self.assertEqual(other.getName(), cfg.getName())
self.assertEqual(other.getDescription(), cfg.getDescription())
self.assertEqual(list(other.getColorSpaceNames()), list(cfg.getColorSpaceNames()))
self.assertEqual(other.getFileRules().getNumEntries(), cfg.getFileRules().getNumEntries())
# Check that the file rules are not shared between the two config instances.
rules.removeRule(0)
other.setFileRules(rules)
self.assertEqual(other.getFileRules().getNumEntries(), cfg.getFileRules().getNumEntries() - 1)
def test_shared_views(self):
# Test these Config functions: addSharedView, getSharedViews, removeSharedView.
cfg = OCIO.Config.CreateRaw()
views = cfg.getSharedViews()
self.assertEqual(0, len(views))
# Shared view has to have a name.
with self.assertRaises(OCIO.Exception):
cfg.addSharedView(view='',
viewTransformName='',
colorSpaceName='c1',
looks='',
ruleName='',
description='')
# Shared view has to have a color space name.
with self.assertRaises(OCIO.Exception):
cfg.addSharedView(view='view1',
viewTransformName='',
colorSpaceName='',
looks='',
ruleName='',
description='')
cfg.addSharedView(view='view1',
viewTransformName='',
colorSpaceName='c1',
looks='',
ruleName='',
description='')
cfg.addSharedView(view='view2',
colorSpaceName='c2',
viewTransformName='t2',
looks='',
ruleName='',
description='')
cfg.addSharedView(view='view3',
colorSpaceName='c3',
looks='l3',
viewTransformName='',
ruleName='',
description='')
cfg.addSharedView(view='view4',
colorSpaceName='c4',
ruleName='r4',
looks='',
viewTransformName='',
description='')
cfg.addSharedView(view='view5',
colorSpaceName='c5',
ruleName='',
looks='',
viewTransformName='',
description='description 5')
cfg.addSharedView('view6', 't6', 'c6', 'l6', 'r6', 'desc6')
views = cfg.getSharedViews()
self.assertEqual(6, len(views))
self.assertEqual('view1', next(views))
self.assertEqual('view2', next(views))
self.assertEqual('view3', next(views))
self.assertEqual('view4', next(views))
self.assertEqual('view5', next(views))
self.assertEqual('view6', next(views))
self.assertEqual('', cfg.getDisplayViewTransformName('', 'view1'))
self.assertEqual('t2', cfg.getDisplayViewTransformName('', 'view2'))
self.assertEqual('', cfg.getDisplayViewTransformName('', 'view3'))
self.assertEqual('', cfg.getDisplayViewTransformName('', 'view4'))
self.assertEqual('', cfg.getDisplayViewTransformName('', 'view5'))
self.assertEqual('t6', cfg.getDisplayViewTransformName('', 'view6'))
self.assertEqual('c1', cfg.getDisplayViewColorSpaceName('', 'view1'))
self.assertEqual('c2', cfg.getDisplayViewColorSpaceName('', 'view2'))
self.assertEqual('c3', cfg.getDisplayViewColorSpaceName('', 'view3'))
self.assertEqual('c4', cfg.getDisplayViewColorSpaceName('', 'view4'))
self.assertEqual('c5', cfg.getDisplayViewColorSpaceName('', 'view5'))
self.assertEqual('c6', cfg.getDisplayViewColorSpaceName('', 'view6'))
self.assertEqual('', cfg.getDisplayViewLooks('', 'view1'))
self.assertEqual('', cfg.getDisplayViewLooks('', 'view2'))
self.assertEqual('l3', cfg.getDisplayViewLooks('', 'view3'))
self.assertEqual('', cfg.getDisplayViewLooks('', 'view4'))
self.assertEqual('', cfg.getDisplayViewLooks('', 'view5'))
self.assertEqual('l6', cfg.getDisplayViewLooks('', 'view6'))
self.assertEqual('', cfg.getDisplayViewRule('', 'view1'))
self.assertEqual('', cfg.getDisplayViewRule('', 'view2'))
self.assertEqual('', cfg.getDisplayViewRule('', 'view3'))
self.assertEqual('r4', cfg.getDisplayViewRule('', 'view4'))
self.assertEqual('', cfg.getDisplayViewRule('', 'view5'))
self.assertEqual('r6', cfg.getDisplayViewRule('', 'view6'))
self.assertEqual('', cfg.getDisplayViewDescription('', 'view1'))
self.assertEqual('', cfg.getDisplayViewDescription('', 'view2'))
self.assertEqual('', cfg.getDisplayViewDescription('', 'view3'))
self.assertEqual('', cfg.getDisplayViewDescription('', 'view4'))
self.assertEqual('description 5', cfg.getDisplayViewDescription('', 'view5'))
self.assertEqual('desc6', cfg.getDisplayViewDescription('', 'view6'))
# Adding a shared view using an existing name is replacing the existing view.
cfg.addSharedView(view='view3',
colorSpaceName='c3 new',
looks='l3 new',
viewTransformName='t3 new',
ruleName='r3 new',
description='desc3 new')
views = cfg.getSharedViews()
self.assertEqual(6, len(views))
self.assertEqual(
't3 new', cfg.getDisplayViewTransformName('', 'view3'))
self.assertEqual(
'c3 new', cfg.getDisplayViewColorSpaceName('', 'view3'))
self.assertEqual('l3 new', cfg.getDisplayViewLooks('', 'view3'))
self.assertEqual('r3 new', cfg.getDisplayViewRule('', 'view3'))
self.assertEqual(
'desc3 new', cfg.getDisplayViewDescription('', 'view3'))
# Remove shared views.
# View has to exist.
with self.assertRaises(OCIO.Exception):
cfg.removeSharedView('unknown view')
# Existing views can be removed.
cfg.removeSharedView('view3')
views = cfg.getSharedViews()
self.assertEqual(5, len(views))
cfg.removeSharedView('view4')
cfg.removeSharedView('view5')
cfg.removeSharedView('view6')
cfg.removeSharedView('view1')
cfg.removeSharedView('view2')
views = cfg.getSharedViews()
self.assertEqual(0, len(views))
def test_ruled_views(self):
# Test these Config functions: getDisplays, getViews, removeDisplayView
SIMPLE_PROFILE = """ocio_profile_version: 2
search_path: ""
strictparsing: true
luma: [0.2126, 0.7152, 0.0722]
roles:
default: raw
scene_linear: c3
file_rules:
- !<Rule> {name: ColorSpaceNamePathSearch}
- !<Rule> {name: Default, colorspace: raw}
viewing_rules:
- !<Rule> {name: Rule_1, colorspaces: c1}
- !<Rule> {name: Rule_2, colorspaces: [c2, c3]}
- !<Rule> {name: Rule_3, colorspaces: scene_linear}
- !<Rule> {name: Rule_4, colorspaces: [c3, c4]}
- !<Rule> {name: Rule_5, encodings: log}
- !<Rule> {name: Rule_6, encodings: [log, video]}
shared_views:
- !<View> {name: SView_a, colorspace: raw, rule: Rule_2}
- !<View> {name: SView_b, colorspace: raw, rule: Rule_3}
- !<View> {name: SView_c, colorspace: raw}
- !<View> {name: SView_d, colorspace: raw, rule: Rule_5}
- !<View> {name: SView_e, colorspace: raw}
displays:
sRGB:
- !<View> {name: View_a, colorspace: raw, rule: Rule_1}
- !<View> {name: View_b, colorspace: raw, rule: Rule_2}
- !<View> {name: View_c, colorspace: raw, rule: Rule_2}
- !<View> {name: View_d, colorspace: raw, rule: Rule_3}
- !<View> {name: View_e, colorspace: raw, rule: Rule_4}
- !<View> {name: View_f, colorspace: raw, rule: Rule_5}
- !<View> {name: View_g, colorspace: raw, rule: Rule_6}
- !<View> {name: View_h, colorspace: raw}
- !<Views> [SView_a, SView_b, SView_d, SView_e]
active_displays: []
active_views: []
colorspaces:
- !<ColorSpace>
name: raw
family: ""
equalitygroup: ""
bitdepth: unknown
isdata: false
allocation: uniform
- !<ColorSpace>
name: c1
family: ""
equalitygroup: ""
bitdepth: unknown
isdata: false
encoding: video
allocation: uniform
- !<ColorSpace>
name: c2
family: ""
equalitygroup: ""
bitdepth: unknown
isdata: false
allocation: uniform
- !<ColorSpace>
name: c3
family: ""
equalitygroup: ""
bitdepth: unknown
isdata: false
allocation: uniform
- !<ColorSpace>
name: c4
family: ""
equalitygroup: ""
bitdepth: unknown
isdata: false
encoding: log
allocation: uniform
- !<ColorSpace>
name: c5
family: ""
equalitygroup: ""
bitdepth: unknown
isdata: false
encoding: data
allocation: uniform
- !<ColorSpace>
name: c6
family: ""
equalitygroup: ""
bitdepth: unknown
isdata: false
encoding: video
allocation: uniform
"""
# Create a config.
cfg = OCIO.Config.CreateFromStream(SIMPLE_PROFILE)
# Check number of displays.
displays = cfg.getDisplays()
self.assertEqual(1, len(displays))
# Add a view in a new display.
cfg.addDisplayView('otherDisplay', 'otherView', 'c6', '')
# Check there is a new display and check view.
displays = cfg.getDisplays()
self.assertEqual(2, len(displays))
self.assertEqual('sRGB', next(displays))
self.assertEqual('otherDisplay', next(displays))
views = cfg.getViews('otherDisplay')
self.assertEqual(1, len(views))
self.assertEqual('otherView', next(views))
# Parameter case does not matter.
views = cfg.getViews('oTHerdISplay')
self.assertEqual(1, len(views))
# Add a shared view to the new display.
cfg.addDisplaySharedView('otherDisplay', 'SView_a')
views = cfg.getViews('otherDisplay')
self.assertEqual(2, len(views))
self.assertEqual('otherView', next(views))
self.assertEqual('SView_a', next(views))
# Remove the views (and the display).
cfg.removeDisplayView('otherDisplay', 'otherView')
displays = cfg.getDisplays()
self.assertEqual(2, len(displays))
cfg.removeDisplayView('otherDisplay', 'SView_a')
displays = cfg.getDisplays()
self.assertEqual(1, len(displays))
# Check shared views defined by config.
views = cfg.getSharedViews()
self.assertEqual(5, len(views))
self.assertEqual('SView_a', next(views))
self.assertEqual('SView_b', next(views))
self.assertEqual('SView_c', next(views))
self.assertEqual('SView_d', next(views))
self.assertEqual('SView_e', next(views))
# Check views for sRGB display.
views = cfg.getViews('sRGB')
self.assertEqual(12, len(views))
# Active views are taken into account for getViews.
cfg.setActiveViews('View_a, View_b, SView_a, SView_b')
views = cfg.getViews('sRGB')
self.assertEqual(4, len(views))
cfg.setActiveViews('')
# Views filtered by viewing rules.
views = cfg.getViews('sRGB', 'c3')
self.assertEqual(8, len(views))
# View_b rule is Rule_2 that lists c3.
self.assertEqual('View_b', next(views))
# View_c rule is Rule_2 that lists c3.
self.assertEqual('View_c', next(views))
# View_d rule is Rule_3 that lists c3.
self.assertEqual('View_d', next(views))
# View_e rule is Rule_4 that lists c3.
self.assertEqual('View_e', next(views))
# View_h has no rule.
self.assertEqual('View_h', next(views))
# SView_a has rule Rule_2 that lists c3.
self.assertEqual('SView_a', next(views))
# SView_b has rule Rule_3 that lists c3.
self.assertEqual('SView_b', next(views))
# SView_e has no rule.
self.assertEqual('SView_e', next(views))
views = cfg.getViews('sRGB', 'c4')
self.assertEqual(6, len(views))
# View_e rule is Rule_4 that lists c4.
self.assertEqual('View_e', next(views))
# View_f rule is Rule_5 that lists encoding log, c4 has encoding log.
self.assertEqual('View_f', next(views))
# View_g rule is Rule_6 that lists encoding log, c4 has encoding log.
self.assertEqual('View_g', next(views))
# View_h has no rule.
self.assertEqual('View_h', next(views))
# SView_d rule is Rule_5 that lists encoding log, c4 has encoding log.
self.assertEqual('SView_d', next(views))
# SView_e has no rule.
self.assertEqual('SView_e', next(views))
views = cfg.getViews('sRGB', 'c6')
self.assertEqual(3, len(views))
# View_g rule is Rule_6 that lists encoding video, c6 has encoding video.
self.assertEqual('View_g', next(views))
# View_h has no rule.
self.assertEqual('View_h', next(views))
# SView_e has no rule.
self.assertEqual('SView_e', next(views))
def test_named_transform(self):
# Test these Config functions: addNamedTransform, getNamedTransforms,
# getNamedTransformNames, clearNamedTransforms.
cfg = OCIO.Config.CreateRaw()
nt_names = cfg.getNamedTransformNames()
self.assertEqual(0, len(nt_names))
nts = cfg.getNamedTransforms()
self.assertEqual(0, len(nts))
# Add named transform.
# Missing name.
nt = OCIO.NamedTransform(forwardTransform=OCIO.RangeTransform())
with self.assertRaises(OCIO.Exception):
cfg.addNamedTransform(nt)
# Missing forward or inverse transform.
nt = OCIO.NamedTransform(name="namedTransform")
with self.assertRaises(OCIO.Exception):
cfg.addNamedTransform(nt)
# Legal named transform can be added.
nt = OCIO.NamedTransform(
name="namedTransform",
forwardTransform=OCIO.RangeTransform())
cfg.addNamedTransform(nt)
nt = OCIO.NamedTransform(
name="other",
inverseTransform=OCIO.RangeTransform())
cfg.addNamedTransform(nt)
nt_names = cfg.getNamedTransformNames()
self.assertEqual(2, len(nt_names))
self.assertEqual('namedTransform', next(nt_names))
self.assertEqual('other', next(nt_names))
nts = cfg.getNamedTransforms()
self.assertEqual(2, len(nts))
nt = next(nts)
self.assertEqual('namedTransform', nt.getName())
cur_tr = nt.getTransform(OCIO.TRANSFORM_DIR_FORWARD)
self.assertIsInstance(cur_tr, OCIO.RangeTransform)
cur_tr = nt.getTransform(OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(cur_tr, None)
nt = next(nts)
self.assertEqual('other', nt.getName())
cur_tr = nt.getTransform(OCIO.TRANSFORM_DIR_FORWARD)
self.assertEqual(cur_tr, None)
cur_tr = nt.getTransform(OCIO.TRANSFORM_DIR_INVERSE)
self.assertIsInstance(cur_tr, OCIO.RangeTransform)
nts = cfg.getNamedTransforms()
self.assertEqual(2, len(nts))
cfg.clearNamedTransforms()
nts = cfg.getNamedTransforms()
self.assertEqual(0, len(nts))
def test_inactive_named_transform(self):
# Test the active/inactive version of these Config functions and classes: getNamedTransforms,
# getNamedTransformNames, NamedTransformIterator, NamedTransformNameIterator.
cfg = OCIO.Config.CreateRaw()
nt_names = cfg.getNamedTransformNames()
self.assertEqual(0, len(nt_names))
nts = cfg.getNamedTransforms()
self.assertEqual(0, len(nts))
# Add named transforms.
nt = OCIO.NamedTransform(
name="nt1",
forwardTransform=OCIO.RangeTransform())
cfg.addNamedTransform(nt)
nt = OCIO.NamedTransform(
name="nt2",
inverseTransform=OCIO.RangeTransform())
cfg.addNamedTransform(nt)
nt = OCIO.NamedTransform(
name="nt3",
forwardTransform=OCIO.RangeTransform())
cfg.addNamedTransform(nt)
cfg.setInactiveColorSpaces("nt2")
# Check the list of active/inactive named transforms.
nt_names = cfg.getNamedTransformNames()
self.assertEqual(2, len(nt_names))
self.assertEqual('nt1', next(nt_names))
self.assertEqual('nt3', next(nt_names))
nts = cfg.getNamedTransforms()
self.assertEqual(2, len(nts))
nt = next(nts)
self.assertEqual('nt1', nt.getName())
nt = next(nts)
self.assertEqual('nt3', nt.getName())
nt_names = cfg.getNamedTransformNames(OCIO.NAMEDTRANSFORM_ACTIVE)
self.assertEqual(2, len(nt_names))
self.assertEqual('nt1', next(nt_names))
self.assertEqual('nt3', next(nt_names))
nts = cfg.getNamedTransforms(OCIO.NAMEDTRANSFORM_ACTIVE)
self.assertEqual(2, len(nts))
nt = next(nts)
self.assertEqual('nt1', nt.getName())
nt = next(nts)
self.assertEqual('nt3', nt.getName())
nt_names = cfg.getNamedTransformNames(OCIO.NAMEDTRANSFORM_ALL)
self.assertEqual(3, len(nt_names))
self.assertEqual('nt1', next(nt_names))
self.assertEqual('nt2', next(nt_names))
self.assertEqual('nt3', next(nt_names))
nts = cfg.getNamedTransforms(OCIO.NAMEDTRANSFORM_ALL)
self.assertEqual(3, len(nts))
nt = next(nts)
self.assertEqual('nt1', nt.getName())
nt = next(nts)
self.assertEqual('nt2', nt.getName())
nt = next(nts)
self.assertEqual('nt3', nt.getName())
nt_names = cfg.getNamedTransformNames(OCIO.NAMEDTRANSFORM_INACTIVE)
self.assertEqual(1, len(nt_names))
self.assertEqual('nt2', next(nt_names))
nts = cfg.getNamedTransforms(OCIO.NAMEDTRANSFORM_INACTIVE)
self.assertEqual(1, len(nts))
nt = next(nts)
self.assertEqual('nt2', nt.getName())
cfg.clearNamedTransforms()
nts = cfg.getNamedTransforms(OCIO.NAMEDTRANSFORM_ALL)
self.assertEqual(0, len(nts))
def test_canonical_name(self):
# Test these Config function: getCanonicalName.
cfg = OCIO.Config.CreateRaw()
# add a named transform and a color space.
nt = OCIO.NamedTransform(
name='nt1',
aliases=['alias1', 'test1'],
forwardTransform=OCIO.RangeTransform())
cfg.addNamedTransform(nt)
cs = OCIO.ColorSpace(
name='cs1',
aliases=['cs test', 'other'])
cs.setTransform(OCIO.RangeTransform(),
OCIO.COLORSPACE_DIR_TO_REFERENCE)
cfg.addColorSpace(cs)
cfg.setRole('role', 'cs1')
self.assertEqual(cfg.getCanonicalName(''), '')
self.assertEqual(cfg.getCanonicalName('not found'), '')
self.assertEqual(cfg.getCanonicalName('roLE'), 'cs1')
self.assertEqual(cfg.getCanonicalName('CS1'), 'cs1')
self.assertEqual(cfg.getCanonicalName('Other'), 'cs1')
self.assertEqual(cfg.getCanonicalName('CS test'), 'cs1')
self.assertEqual(cfg.getCanonicalName('NT1'), 'nt1')
self.assertEqual(cfg.getCanonicalName('Alias1'), 'nt1')
self.assertEqual(cfg.getCanonicalName('Test1'), 'nt1')
def test_virtual_display(self):
# Test platform agnostic virtual display interface.
cfg = OCIO.Config.CreateRaw()
cfg.addColorSpace(
OCIO.ColorSpace(OCIO.REFERENCE_SPACE_DISPLAY,
"display_cs",
toReference=OCIO.CDLTransform(sat=1.5)))
cfg.addColorSpace(
OCIO.ColorSpace(OCIO.REFERENCE_SPACE_SCENE,
"raw",
isData=True))
cfg.addViewTransform(
OCIO.ViewTransform(OCIO.REFERENCE_SPACE_SCENE,
"default_vt",
toReference=OCIO.CDLTransform(sat=1.5)))
cfg.addViewTransform(
OCIO.ViewTransform(OCIO.REFERENCE_SPACE_DISPLAY,
"display_vt",
toReference=OCIO.CDLTransform(sat=1.5)))
cfg.addDisplayView("sRGB", "Raw", "raw")
cfg.addDisplayView("sRGB", "view",
viewTransform="display_vt",
displayColorSpaceName="display_cs")
cfg.addSharedView("sview1", "", "raw")
cfg.addSharedView("sview2", "", "raw")
cfg.addDisplaySharedView("sRGB", "sview1")
# Add virtual display and views
cfg.addVirtualDisplayView("Raw", "", "raw")
cfg.addVirtualDisplayView("Film", "display_vt",
OCIO.OCIO_VIEW_USE_DISPLAY_NAME)
cfg.addVirtualDisplaySharedView("sview2")
# Some basic checks
self.assertEqual(3, len(cfg.getViews("sRGB")))
self.assertEqual(2, len(cfg.getViews(OCIO.VIEW_DISPLAY_DEFINED,
"sRGB")))
self.assertEqual(1, len(cfg.getViews(OCIO.VIEW_SHARED, "sRGB")))
# Validate the virtual display information
self.assertEqual(
2,
len(cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)))
view_name = cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)[0]
self.assertEqual("Raw", view_name)
self.assertEqual("", cfg.getVirtualDisplayViewTransformName(view_name))
self.assertEqual("raw",
cfg.getVirtualDisplayViewColorSpaceName(view_name))
self.assertEqual("", cfg.getVirtualDisplayViewLooks(view_name))
self.assertEqual("", cfg.getVirtualDisplayViewRule(view_name))
self.assertEqual("", cfg.getVirtualDisplayViewDescription(view_name))
view_name = cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)[1]
self.assertEqual("Film", view_name)
self.assertEqual("display_vt",
cfg.getVirtualDisplayViewTransformName(view_name))
self.assertEqual(OCIO.OCIO_VIEW_USE_DISPLAY_NAME,
cfg.getVirtualDisplayViewColorSpaceName(view_name))
self.assertEqual("", cfg.getVirtualDisplayViewLooks(view_name))
self.assertEqual("", cfg.getVirtualDisplayViewRule(view_name))
self.assertEqual("", cfg.getVirtualDisplayViewDescription(view_name))
self.assertEqual(1, len(cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)))
self.assertEqual("sview2",
cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)[0])
# Remove a view from the virtual display
cfg.removeVirtualDisplayView("Raw")
self.assertEqual(
1,
len(cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)))
self.assertEqual(
"Film",
cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)[0])
self.assertEqual(1, len(cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)))
self.assertEqual("sview2",
cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)[0])
# Remove a shared view from the virtual display
cfg.removeVirtualDisplayView("sview2")
self.assertEqual(
1,
len(cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)))
self.assertEqual(0, len(cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)))
cfg.addVirtualDisplaySharedView("sview2")
self.assertEqual(
1,
len(cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)))
self.assertEqual(1, len(cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)))
# Remove the virtual display
cfg.clearVirtualDisplay()
self.assertEqual(
0,
len(cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)))
self.assertEqual(0, len(cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)))
class ConfigVirtualWithActiveDisplayTest(unittest.TestCase):
def setUp(self):
self.cfg_active_display = OCIO.Config.CreateFromStream(
SIMPLE_CONFIG_VIRTUAL_DISPLAY_ACTIVE_DISPLAY)
def tearDown(self):
self.cfg_active_display = None
def test_virtual_display_with_active_displays(self):
"""
Test the virtual display instantiation when active displays
and views are defined.
"""
self.cfg_active_display.validate()
displays = self.cfg_active_display.getDisplays()
self.assertEqual(displays.__len__(), 1)
views = self.cfg_active_display.getViews('sRGB')
self.assertEqual(len(views), 1)
class ConfigVirtualDisplayTest(unittest.TestCase):
def setUp(self):
self.cfg = OCIO.Config.CreateFromStream(SIMPLE_CONFIG_VIRTUAL_DISPLAY)
def tearDown(self):
self.cfg = None
def test_validate(self):
"""
Test validate a config containing a virtual display and
some basic checks.
"""
views = self.cfg.getViews('sRGB')
self.assertEqual(len(views), 3)
views = self.cfg.getViews(OCIO.VIEW_DISPLAY_DEFINED, "sRGB")
self.assertEqual(len(views), 2)
views = self.cfg.getViews(OCIO.VIEW_SHARED, "sRGB")
self.assertEqual(len(views), 1)
self.cfg.validate()
def test_get_virtual_display_views_display_defined(self):
"""
Test the virtual display is correctly loaded & saved.
"""
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)
self.assertEqual(len(views), 2)
def test_get_virtual_display_raw(self):
"""
Validate the virtual display information for "Raw".
"""
view_name = self.cfg.getVirtualDisplayViews(
OCIO.VIEW_DISPLAY_DEFINED)[0]
self.assertEqual(view_name, 'Raw')
self.assertEqual(
'', self.cfg.getVirtualDisplayViewTransformName(view_name))
self.assertEqual(
'raw', self.cfg.getVirtualDisplayViewColorSpaceName(view_name))
self.assertEqual('', self.cfg.getVirtualDisplayViewLooks(view_name))
self.assertEqual('', self.cfg.getVirtualDisplayViewRule(view_name))
self.assertEqual(
'', self.cfg.getVirtualDisplayViewDescription(view_name))
def test_get_virtual_display_film(self):
"""
Validate the virtual display information for "Film".
"""
view_name = self.cfg.getVirtualDisplayViews(
OCIO.VIEW_DISPLAY_DEFINED)[1]
self.assertEqual(view_name, 'Film')
self.assertEqual(
'display_vt', self.cfg.getVirtualDisplayViewTransformName(view_name))
self.assertEqual('<USE_DISPLAY_NAME>',
self.cfg.getVirtualDisplayViewColorSpaceName(view_name))
self.assertEqual('', self.cfg.getVirtualDisplayViewLooks(view_name))
self.assertEqual('', self.cfg.getVirtualDisplayViewRule(view_name))
self.assertEqual(
'', self.cfg.getVirtualDisplayViewDescription(view_name))
def test_get_virtual_display_views_shared(self):
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)
self.assertEqual(len(views), 1)
self.assertEqual(views[0], 'sview2')
def test_remove_view_from_virtual_display(self):
"""
Test remove a view from the Virtual Display.
"""
self.cfg.removeVirtualDisplayView('Raw')
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)
self.assertEqual(len(views), 1)
self.assertEqual(views[0], 'Film')
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)
self.assertEqual(len(views), 1)
self.assertEqual(views[0], 'sview2')
# Test remove a shared view from the Virtual Display.
self.cfg.removeVirtualDisplayView('sview2')
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)
self.assertEqual(len(views), 1)
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)
self.assertEqual(len(views), 0)
# Extra serialize & deserialize validation.
cfg = OCIO.Config.CreateFromStream(self.cfg.serialize())
views = cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)
self.assertEqual(len(views), 1)
views = cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)
self.assertEqual(len(views), 0)
self.cfg.addVirtualDisplaySharedView('sview2')
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)
self.assertEqual(len(views), 1)
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)
self.assertEqual(len(views), 1)
def test_remove_virtual_display(self):
"""
Test remove the Virtual Display.
"""
self.cfg.clearVirtualDisplay()
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)
self.assertEqual(len(views), 0)
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)
self.assertEqual(len(views), 0)
# Extra serialize & deserialize validation.
cfg = OCIO.Config.CreateFromStream(self.cfg.serialize())
views = cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)
self.assertEqual(len(views), 0)
views = cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)
self.assertEqual(len(views), 0)
def test_virtual_display_v1(self):
"""
Test that the virtual display is only supported by v2 or higher.
"""
with self.assertRaises(OCIO.Exception):
cfg = OCIO.Config.CreateFromStream(
SIMPLE_CONFIG_VIRTUAL_DISPLAY_V1)
cfg = OCIO.Config.CreateRaw()
cfg.addVirtualDisplaySharedView('sview')
cfg.setMajorVersion(1)
with self.assertRaises(OCIO.Exception):
cfg.validate()
with self.assertRaises(OCIO.Exception):
cfg2 = OCIO.Config.CreateFromStream(cfg.serialize())
def test_virtual_display_exceptions(self):
cfg = OCIO.Config.CreateFromStream(
SIMPLE_CONFIG_VIRTUAL_DISPLAY_EXCEPTION)
cfg.validate()
# Test failures for shared views.
with self.assertRaises(OCIO.Exception) as cm:
cfg.addVirtualDisplaySharedView('sview1')
self.assertEqual(str(cm.exception),
"Shared view could not be added to virtual_display: " +
"There is already a shared view named 'sview1'.")
cfg.addVirtualDisplaySharedView('sview2')
with self.assertRaises(OCIO.Exception) as cm:
cfg.validate()
self.assertEqual(str(cm.exception), "Config failed validation. " +
"The display 'virtual_display' contains a shared " +
"view 'sview2' that is not defined.")
cfg.removeVirtualDisplayView('sview2')
cfg.validate()
# Test failures for views.
with self.assertRaises(OCIO.Exception) as cm:
cfg.addVirtualDisplayView('Raw', 'Film', 'raw')
self.assertEqual(str(cm.exception), "View could not be added to " +
"virtual_display in config: View 'Raw' already exists.")
cfg.addVirtualDisplayView('Raw1', 'Film', 'raw1')
with self.assertRaises(OCIO.Exception) as cm:
cfg.validate()
self.assertEqual(str(cm.exception), "Config failed validation. " +
"Display 'virtual_display' has a " +
"view 'Raw1' that refers to a color space" +
" or a named transform, 'raw1', which is not defined.")
cfg.removeVirtualDisplayView('Raw1')
cfg.validate()
cfg.addVirtualDisplayView('Raw1', 'Film', 'raw1', 'look')
with self.assertRaises(OCIO.Exception) as cm:
cfg.validate()
self.assertEqual(str(cm.exception), "Config failed validation. " +
"Display 'virtual_display' has a view 'Raw1' that " +
"refers to a color space or a named transform, " +
"'raw1', which is not defined.")
| # SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
import copy
import unittest
import os
import sys
import PyOpenColorIO as OCIO
from UnitTestUtils import (SIMPLE_CONFIG_VIRTUAL_DISPLAY,
SIMPLE_CONFIG_VIRTUAL_DISPLAY_ACTIVE_DISPLAY,
SIMPLE_CONFIG_VIRTUAL_DISPLAY_V1,
SIMPLE_CONFIG_VIRTUAL_DISPLAY_EXCEPTION)
# Legacy tests kept for reference.
#
# class ConfigTest(unittest.TestCase):
#
# SIMPLE_PROFILE = """ocio_profile_version: 1
#
# search_path: luts
# strictparsing: false
# luma: [0.2126, 0.7152, 0.0722]
#
# roles:
# default: raw
# scene_linear: lnh
#
# displays:
# sRGB:
# - !<View> {name: Film1D, colorspace: vd8}
# - !<View> {name: Raw, colorspace: raw}
#
# active_displays: []
# active_views: []
#
# colorspaces:
# - !<ColorSpace>
# name: raw
# family: raw
# equalitygroup: ""
# bitdepth: 32f
# description: |
# A raw color space. Conversions to and from this space are no-ops.
#
# isdata: true
# allocation: uniform
#
# - !<ColorSpace>
# name: lnh
# family: ln
# equalitygroup: ""
# bitdepth: 16f
# description: |
# The show reference space. This is a sensor referred linear
# representation of the scene with primaries that correspond to
# scanned film. 0.18 in this space corresponds to a properly
# exposed 18% grey card.
#
# isdata: false
# allocation: lg2
#
# - !<ColorSpace>
# name: vd8
# family: vd8
# equalitygroup: ""
# bitdepth: 8ui
# description: |
# how many transforms can we use?
#
# isdata: false
# allocation: uniform
# to_reference: !<GroupTransform>
# children:
# - !<ExponentTransform> {value: 2.2}
# - !<MatrixTransform> {matrix: [1, 2, 3, 4, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], offset: [1, 2, 0, 0]}
# - !<CDLTransform> {slope: [0.9, 1, 1], offset: [0.1, 0.3, 0.4], power: [1.1, 1.1, 1.1], sat: 0.9}
# """
#
# def setUp(self):
#
# osx_hack = ''
# if osname=="Darwin":
# osx_hack = """
# // OSX segfault work-around: Force a no-op sampling of the 3D LUT.
# texture3D(lut3d, 0.96875 * out_pixel.rgb + 0.015625).rgb;"""
#
# self.GLSLResult = """
# // Generated by OpenColorIO
#
# vec4 pytestocio(in vec4 inPixel,
# const sampler3D lut3d)
# {
# vec4 out_pixel = inPixel;
# out_pixel = out_pixel * mat4(1.0874889, -0.079466686, -0.0080222245, 0., -0.023622228, 1.0316445, -0.0080222245, 0., -0.023622226, -0.079466686, 1.1030889, 0., 0., 0., 0., 1.);
# out_pixel = pow(max(out_pixel, vec4(0., 0., 0., 0.)), vec4(0.90909088, 0.90909088, 0.90909088, 1.));
# out_pixel = out_pixel * mat4(1.1111112, -2., -3., -4., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1.);
# out_pixel = vec4(4.688889, -2.3, -0.40000001, -0.) + out_pixel;
# out_pixel = pow(max(out_pixel, vec4(0., 0., 0., 0.)), vec4(0.45454544, 0.45454544, 0.45454544, 1.));""" \
# + osx_hack + \
# """
# return out_pixel;
# }
#
# """
#
# def test_is_editable(self):
#
# cfg = OCIO.Config().CreateFromStream(self.SIMPLE_PROFILE)
# self.assertEqual(cfg.isEditable(), False)
# cfg = cfg.createEditableCopy()
# self.assertEqual(cfg.isEditable(), True)
# ctx = cfg.getCurrentContext()
# self.assertEqual(ctx.isEditable(), False)
# ctx = ctx.createEditableCopy()
# self.assertEqual(ctx.isEditable(), True)
# ctx.setEnvironmentMode(OCIO.ENV_ENVIRONMENT_LOAD_ALL)
#
# def test_interface(self):
#
# _cfge = OCIO.Config().CreateFromStream(self.SIMPLE_PROFILE)
# _cfge.clearEnvironmentVars()
# self.assertEqual(0, _cfge.getNumEnvironmentVars())
# _cfge.addEnvironmentVar("FOO", "test1")
# _cfge.addEnvironmentVar("FOO2", "test2${FOO}")
# self.assertEqual(2, _cfge.getNumEnvironmentVars())
# self.assertEqual("FOO", _cfge.getEnvironmentVarNameByIndex(0))
# self.assertEqual("FOO2", _cfge.getEnvironmentVarNameByIndex(1))
# self.assertEqual("test1", _cfge.getEnvironmentVarDefault("FOO"))
# self.assertEqual("test2${FOO}", _cfge.getEnvironmentVarDefault("FOO2"))
# self.assertEqual("test2test1", _cfge.getCurrentContext().resolveStringVar("${FOO2}"))
# self.assertEqual({'FOO': 'test1', 'FOO2': 'test2${FOO}'}, _cfge.getEnvironmentVarDefaults())
# _cfge.clearEnvironmentVars()
# self.assertEqual(0, _cfge.getNumEnvironmentVars())
# self.assertEqual("luts", _cfge.getSearchPath())
# _cfge.setSearchPath("otherdir")
# self.assertEqual("otherdir", _cfge.getSearchPath())
# _cfge.validate()
# _cfge.setDescription("testdesc")
# self.assertEqual("testdesc", _cfge.getDescription())
# self.assertEqual(self.SIMPLE_PROFILE, _cfg.serialize())
# #self.assertEqual("$07d1fb1509eeae1837825fd4242f8a69:$885ad1683add38a11f7bbe34e8bf9ac0",
# # _cfg.getCacheID())
# con = _cfge.getCurrentContext()
# self.assertNotEqual(0, con.getNumStringVars())
# _cfge.setWorkingDir("/foobar")
# self.assertEqual("/foobar", _cfge.getWorkingDir())
# self.assertEqual(3, _cfge.getNumColorSpaces())
# self.assertEqual("lnh", _cfge.getColorSpaceNameByIndex(1))
# lnh = _cfge.getColorSpace("lnh")
# self.assertEqual("ln", lnh.getFamily())
# self.assertEqual(-1, _cfge.getIndexForColorSpace("foobar"))
# cs = OCIO.ColorSpace()
# cs.setName("blah")
# _cfge.addColorSpace(cs)
# self.assertEqual(3, _cfge.getIndexForColorSpace("blah"))
# #_cfge.clearColorSpaces()
# #_cfge.parseColorSpaceFromString("foo")
# self.assertEqual(False, _cfg.isStrictParsingEnabled())
# _cfge.setStrictParsingEnabled(True)
# self.assertEqual(True, _cfge.isStrictParsingEnabled())
# self.assertEqual(2, _cfge.getNumRoles())
# self.assertEqual(False, _cfg.hasRole("foo"))
# _cfge.setRole("foo", "vd8")
# self.assertEqual(3, _cfge.getNumRoles())
# self.assertEqual(True, _cfge.hasRole("foo"))
# self.assertEqual("foo", _cfge.getRoleName(1))
# self.assertEqual("sRGB", _cfge.getDefaultDisplay())
# self.assertEqual(1, _cfge.getNumDisplays())
# self.assertEqual("sRGB", _cfge.getDisplay(0))
# self.assertEqual("Film1D", _cfge.getDefaultView("sRGB"))
# self.assertEqual(2, _cfge.getNumViews("sRGB"))
# self.assertEqual("Raw", _cfge.getView("sRGB", 1))
# self.assertEqual("vd8", _cfge.getDisplayColorSpaceName("sRGB", "Film1D"))
# self.assertEqual("", _cfg.getDisplayLooks("sRGB", "Film1D"))
# _cfge.addDisplay("foo", "bar", "foo", "wee")
# _cfge.clearDisplays()
# _cfge.setActiveDisplays("sRGB")
# self.assertEqual("sRGB", _cfge.getActiveDisplays())
# _cfge.setActiveViews("Film1D")
# self.assertEqual("Film1D", _cfge.getActiveViews())
# luma = _cfge.getDefaultLumaCoefs()
# self.assertAlmostEqual(0.2126, luma[0], delta=1e-8)
# _cfge.setDefaultLumaCoefs([0.1, 0.2, 0.3])
# tnewluma = _cfge.getDefaultLumaCoefs()
# self.assertAlmostEqual(0.1, tnewluma[0], delta=1e-8)
# self.assertEqual(0, _cfge.getNumLooks())
# lk = OCIO.Look()
# lk.setName("coollook")
# lk.setProcessSpace("somespace")
# et = OCIO.ExponentTransform()
# et.setValue([0.1, 0.2, 0.3, 0.4])
# lk.setTransform(et)
# iet = OCIO.ExponentTransform()
# iet.setValue([-0.1, -0.2, -0.3, -0.4])
# lk.setInverseTransform(iet)
# _cfge.addLook(lk)
# self.assertEqual(1, _cfge.getNumLooks())
# self.assertEqual("coollook", _cfge.getLookNameByIndex(0))
# glk = _cfge.getLook("coollook")
# self.assertEqual("somespace", glk.getProcessSpace())
# _cfge.clearLooks()
# self.assertEqual(0, _cfge.getNumLooks())
#
# #getProcessor(context, srcColorSpace, dstColorSpace)
# #getProcessor(context, srcName,dstName);
# #getProcessor(transform);
# #getProcessor(transform, direction);
# #getProcessor(context, transform, direction);
#
# _proc = _cfg.getProcessor("lnh", "vd8")
# self.assertEqual(False, _proc.isNoOp())
# self.assertEqual(True, _proc.hasChannelCrosstalk())
#
# #float packedpix[] = new float[]{0.48f, 0.18f, 0.9f, 1.0f,
# # 0.48f, 0.18f, 0.18f, 1.0f,
# # 0.48f, 0.18f, 0.18f, 1.0f,
# # 0.48f, 0.18f, 0.18f, 1.0f };
# #FloatBuffer buf = ByteBuffer.allocateDirect(2 * 2 * 4 * Float.SIZE / 8).asFloatBuffer();
# #buf.put(packedpix);
# #PackedImageDesc foo = new PackedImageDesc(buf, 2, 2, 4);
# #_proc.apply(foo);
# #FloatBuffer wee = foo.getData();
# #self.assertEqual(-2.4307251581696764E-35f, wee.get(2), 1e-8);
#
# # TODO: these should work in-place
# rgbfoo = _proc.applyRGB([0.48, 0.18, 0.18])
# self.assertAlmostEqual(1.9351077, rgbfoo[0], delta=1e-7);
# # TODO: these should work in-place
# rgbafoo = _proc.applyRGBA([0.48, 0.18, 0.18, 1.0])
# self.assertAlmostEqual(1.0, rgbafoo[3], delta=1e-8)
# #self.assertEqual("$a92ef63abd9edf61ad5a7855da064648", _proc.getCpuCacheID())
#
# _cfge.clearSearchPaths()
# self.assertEqual(0, _cfge.getNumSearchPaths())
# _cfge.addSearchPath("First/ Path")
# self.assertEqual(1, _cfge.getNumSearchPaths())
# _cfge.addSearchPath("D:\\Second\\Path\\")
# self.assertEqual(2, _cfge.getNumSearchPaths())
# self.assertEqual("First/ Path", _cfge.getSearchPathByIndex(0))
# self.assertEqual("D:\\Second\\Path\\", _cfge.getSearchPathByIndex(1))
#
# del _cfge
# del _cfg
class ConfigTest(unittest.TestCase):
def test_copy(self):
"""
Test the deepcopy() method.
"""
cfg = OCIO.Config.CreateRaw()
cfg.setMajorVersion(2)
cfg.setMinorVersion(1)
cfg.setName('test config')
cfg.setDescription('test description')
cfg.addColorSpace(
OCIO.ColorSpace(OCIO.REFERENCE_SPACE_DISPLAY,
"display_cs",
toReference=OCIO.CDLTransform(sat=1.5)))
cfg.addColorSpace(
OCIO.ColorSpace(OCIO.REFERENCE_SPACE_SCENE,
"raw",
isData=True))
rules = OCIO.FileRules()
rules.insertRule(0, 'A', 'raw', '*', 'exr')
rules.insertRule(1, 'B', 'display_cs', '*', 'png')
cfg.setFileRules(rules)
other = copy.deepcopy(cfg)
self.assertFalse(other is cfg)
self.assertEqual(other.getMajorVersion(), cfg.getMajorVersion())
self.assertEqual(other.getMinorVersion(), cfg.getMinorVersion())
self.assertEqual(other.getName(), cfg.getName())
self.assertEqual(other.getDescription(), cfg.getDescription())
self.assertEqual(list(other.getColorSpaceNames()), list(cfg.getColorSpaceNames()))
self.assertEqual(other.getFileRules().getNumEntries(), cfg.getFileRules().getNumEntries())
# Check that the file rules are not shared between the two config instances.
rules.removeRule(0)
other.setFileRules(rules)
self.assertEqual(other.getFileRules().getNumEntries(), cfg.getFileRules().getNumEntries() - 1)
def test_shared_views(self):
# Test these Config functions: addSharedView, getSharedViews, removeSharedView.
cfg = OCIO.Config.CreateRaw()
views = cfg.getSharedViews()
self.assertEqual(0, len(views))
# Shared view has to have a name.
with self.assertRaises(OCIO.Exception):
cfg.addSharedView(view='',
viewTransformName='',
colorSpaceName='c1',
looks='',
ruleName='',
description='')
# Shared view has to have a color space name.
with self.assertRaises(OCIO.Exception):
cfg.addSharedView(view='view1',
viewTransformName='',
colorSpaceName='',
looks='',
ruleName='',
description='')
cfg.addSharedView(view='view1',
viewTransformName='',
colorSpaceName='c1',
looks='',
ruleName='',
description='')
cfg.addSharedView(view='view2',
colorSpaceName='c2',
viewTransformName='t2',
looks='',
ruleName='',
description='')
cfg.addSharedView(view='view3',
colorSpaceName='c3',
looks='l3',
viewTransformName='',
ruleName='',
description='')
cfg.addSharedView(view='view4',
colorSpaceName='c4',
ruleName='r4',
looks='',
viewTransformName='',
description='')
cfg.addSharedView(view='view5',
colorSpaceName='c5',
ruleName='',
looks='',
viewTransformName='',
description='description 5')
cfg.addSharedView('view6', 't6', 'c6', 'l6', 'r6', 'desc6')
views = cfg.getSharedViews()
self.assertEqual(6, len(views))
self.assertEqual('view1', next(views))
self.assertEqual('view2', next(views))
self.assertEqual('view3', next(views))
self.assertEqual('view4', next(views))
self.assertEqual('view5', next(views))
self.assertEqual('view6', next(views))
self.assertEqual('', cfg.getDisplayViewTransformName('', 'view1'))
self.assertEqual('t2', cfg.getDisplayViewTransformName('', 'view2'))
self.assertEqual('', cfg.getDisplayViewTransformName('', 'view3'))
self.assertEqual('', cfg.getDisplayViewTransformName('', 'view4'))
self.assertEqual('', cfg.getDisplayViewTransformName('', 'view5'))
self.assertEqual('t6', cfg.getDisplayViewTransformName('', 'view6'))
self.assertEqual('c1', cfg.getDisplayViewColorSpaceName('', 'view1'))
self.assertEqual('c2', cfg.getDisplayViewColorSpaceName('', 'view2'))
self.assertEqual('c3', cfg.getDisplayViewColorSpaceName('', 'view3'))
self.assertEqual('c4', cfg.getDisplayViewColorSpaceName('', 'view4'))
self.assertEqual('c5', cfg.getDisplayViewColorSpaceName('', 'view5'))
self.assertEqual('c6', cfg.getDisplayViewColorSpaceName('', 'view6'))
self.assertEqual('', cfg.getDisplayViewLooks('', 'view1'))
self.assertEqual('', cfg.getDisplayViewLooks('', 'view2'))
self.assertEqual('l3', cfg.getDisplayViewLooks('', 'view3'))
self.assertEqual('', cfg.getDisplayViewLooks('', 'view4'))
self.assertEqual('', cfg.getDisplayViewLooks('', 'view5'))
self.assertEqual('l6', cfg.getDisplayViewLooks('', 'view6'))
self.assertEqual('', cfg.getDisplayViewRule('', 'view1'))
self.assertEqual('', cfg.getDisplayViewRule('', 'view2'))
self.assertEqual('', cfg.getDisplayViewRule('', 'view3'))
self.assertEqual('r4', cfg.getDisplayViewRule('', 'view4'))
self.assertEqual('', cfg.getDisplayViewRule('', 'view5'))
self.assertEqual('r6', cfg.getDisplayViewRule('', 'view6'))
self.assertEqual('', cfg.getDisplayViewDescription('', 'view1'))
self.assertEqual('', cfg.getDisplayViewDescription('', 'view2'))
self.assertEqual('', cfg.getDisplayViewDescription('', 'view3'))
self.assertEqual('', cfg.getDisplayViewDescription('', 'view4'))
self.assertEqual('description 5', cfg.getDisplayViewDescription('', 'view5'))
self.assertEqual('desc6', cfg.getDisplayViewDescription('', 'view6'))
# Adding a shared view using an existing name is replacing the existing view.
cfg.addSharedView(view='view3',
colorSpaceName='c3 new',
looks='l3 new',
viewTransformName='t3 new',
ruleName='r3 new',
description='desc3 new')
views = cfg.getSharedViews()
self.assertEqual(6, len(views))
self.assertEqual(
't3 new', cfg.getDisplayViewTransformName('', 'view3'))
self.assertEqual(
'c3 new', cfg.getDisplayViewColorSpaceName('', 'view3'))
self.assertEqual('l3 new', cfg.getDisplayViewLooks('', 'view3'))
self.assertEqual('r3 new', cfg.getDisplayViewRule('', 'view3'))
self.assertEqual(
'desc3 new', cfg.getDisplayViewDescription('', 'view3'))
# Remove shared views.
# View has to exist.
with self.assertRaises(OCIO.Exception):
cfg.removeSharedView('unknown view')
# Existing views can be removed.
cfg.removeSharedView('view3')
views = cfg.getSharedViews()
self.assertEqual(5, len(views))
cfg.removeSharedView('view4')
cfg.removeSharedView('view5')
cfg.removeSharedView('view6')
cfg.removeSharedView('view1')
cfg.removeSharedView('view2')
views = cfg.getSharedViews()
self.assertEqual(0, len(views))
def test_ruled_views(self):
# Test these Config functions: getDisplays, getViews, removeDisplayView
SIMPLE_PROFILE = """ocio_profile_version: 2
search_path: ""
strictparsing: true
luma: [0.2126, 0.7152, 0.0722]
roles:
default: raw
scene_linear: c3
file_rules:
- !<Rule> {name: ColorSpaceNamePathSearch}
- !<Rule> {name: Default, colorspace: raw}
viewing_rules:
- !<Rule> {name: Rule_1, colorspaces: c1}
- !<Rule> {name: Rule_2, colorspaces: [c2, c3]}
- !<Rule> {name: Rule_3, colorspaces: scene_linear}
- !<Rule> {name: Rule_4, colorspaces: [c3, c4]}
- !<Rule> {name: Rule_5, encodings: log}
- !<Rule> {name: Rule_6, encodings: [log, video]}
shared_views:
- !<View> {name: SView_a, colorspace: raw, rule: Rule_2}
- !<View> {name: SView_b, colorspace: raw, rule: Rule_3}
- !<View> {name: SView_c, colorspace: raw}
- !<View> {name: SView_d, colorspace: raw, rule: Rule_5}
- !<View> {name: SView_e, colorspace: raw}
displays:
sRGB:
- !<View> {name: View_a, colorspace: raw, rule: Rule_1}
- !<View> {name: View_b, colorspace: raw, rule: Rule_2}
- !<View> {name: View_c, colorspace: raw, rule: Rule_2}
- !<View> {name: View_d, colorspace: raw, rule: Rule_3}
- !<View> {name: View_e, colorspace: raw, rule: Rule_4}
- !<View> {name: View_f, colorspace: raw, rule: Rule_5}
- !<View> {name: View_g, colorspace: raw, rule: Rule_6}
- !<View> {name: View_h, colorspace: raw}
- !<Views> [SView_a, SView_b, SView_d, SView_e]
active_displays: []
active_views: []
colorspaces:
- !<ColorSpace>
name: raw
family: ""
equalitygroup: ""
bitdepth: unknown
isdata: false
allocation: uniform
- !<ColorSpace>
name: c1
family: ""
equalitygroup: ""
bitdepth: unknown
isdata: false
encoding: video
allocation: uniform
- !<ColorSpace>
name: c2
family: ""
equalitygroup: ""
bitdepth: unknown
isdata: false
allocation: uniform
- !<ColorSpace>
name: c3
family: ""
equalitygroup: ""
bitdepth: unknown
isdata: false
allocation: uniform
- !<ColorSpace>
name: c4
family: ""
equalitygroup: ""
bitdepth: unknown
isdata: false
encoding: log
allocation: uniform
- !<ColorSpace>
name: c5
family: ""
equalitygroup: ""
bitdepth: unknown
isdata: false
encoding: data
allocation: uniform
- !<ColorSpace>
name: c6
family: ""
equalitygroup: ""
bitdepth: unknown
isdata: false
encoding: video
allocation: uniform
"""
# Create a config.
cfg = OCIO.Config.CreateFromStream(SIMPLE_PROFILE)
# Check number of displays.
displays = cfg.getDisplays()
self.assertEqual(1, len(displays))
# Add a view in a new display.
cfg.addDisplayView('otherDisplay', 'otherView', 'c6', '')
# Check there is a new display and check view.
displays = cfg.getDisplays()
self.assertEqual(2, len(displays))
self.assertEqual('sRGB', next(displays))
self.assertEqual('otherDisplay', next(displays))
views = cfg.getViews('otherDisplay')
self.assertEqual(1, len(views))
self.assertEqual('otherView', next(views))
# Parameter case does not matter.
views = cfg.getViews('oTHerdISplay')
self.assertEqual(1, len(views))
# Add a shared view to the new display.
cfg.addDisplaySharedView('otherDisplay', 'SView_a')
views = cfg.getViews('otherDisplay')
self.assertEqual(2, len(views))
self.assertEqual('otherView', next(views))
self.assertEqual('SView_a', next(views))
# Remove the views (and the display).
cfg.removeDisplayView('otherDisplay', 'otherView')
displays = cfg.getDisplays()
self.assertEqual(2, len(displays))
cfg.removeDisplayView('otherDisplay', 'SView_a')
displays = cfg.getDisplays()
self.assertEqual(1, len(displays))
# Check shared views defined by config.
views = cfg.getSharedViews()
self.assertEqual(5, len(views))
self.assertEqual('SView_a', next(views))
self.assertEqual('SView_b', next(views))
self.assertEqual('SView_c', next(views))
self.assertEqual('SView_d', next(views))
self.assertEqual('SView_e', next(views))
# Check views for sRGB display.
views = cfg.getViews('sRGB')
self.assertEqual(12, len(views))
# Active views are taken into account for getViews.
cfg.setActiveViews('View_a, View_b, SView_a, SView_b')
views = cfg.getViews('sRGB')
self.assertEqual(4, len(views))
cfg.setActiveViews('')
# Views filtered by viewing rules.
views = cfg.getViews('sRGB', 'c3')
self.assertEqual(8, len(views))
# View_b rule is Rule_2 that lists c3.
self.assertEqual('View_b', next(views))
# View_c rule is Rule_2 that lists c3.
self.assertEqual('View_c', next(views))
# View_d rule is Rule_3 that lists c3.
self.assertEqual('View_d', next(views))
# View_e rule is Rule_4 that lists c3.
self.assertEqual('View_e', next(views))
# View_h has no rule.
self.assertEqual('View_h', next(views))
# SView_a has rule Rule_2 that lists c3.
self.assertEqual('SView_a', next(views))
# SView_b has rule Rule_3 that lists c3.
self.assertEqual('SView_b', next(views))
# SView_e has no rule.
self.assertEqual('SView_e', next(views))
views = cfg.getViews('sRGB', 'c4')
self.assertEqual(6, len(views))
# View_e rule is Rule_4 that lists c4.
self.assertEqual('View_e', next(views))
# View_f rule is Rule_5 that lists encoding log, c4 has encoding log.
self.assertEqual('View_f', next(views))
# View_g rule is Rule_6 that lists encoding log, c4 has encoding log.
self.assertEqual('View_g', next(views))
# View_h has no rule.
self.assertEqual('View_h', next(views))
# SView_d rule is Rule_5 that lists encoding log, c4 has encoding log.
self.assertEqual('SView_d', next(views))
# SView_e has no rule.
self.assertEqual('SView_e', next(views))
views = cfg.getViews('sRGB', 'c6')
self.assertEqual(3, len(views))
# View_g rule is Rule_6 that lists encoding video, c6 has encoding video.
self.assertEqual('View_g', next(views))
# View_h has no rule.
self.assertEqual('View_h', next(views))
# SView_e has no rule.
self.assertEqual('SView_e', next(views))
def test_named_transform(self):
# Test these Config functions: addNamedTransform, getNamedTransforms,
# getNamedTransformNames, clearNamedTransforms.
cfg = OCIO.Config.CreateRaw()
nt_names = cfg.getNamedTransformNames()
self.assertEqual(0, len(nt_names))
nts = cfg.getNamedTransforms()
self.assertEqual(0, len(nts))
# Add named transform.
# Missing name.
nt = OCIO.NamedTransform(forwardTransform=OCIO.RangeTransform())
with self.assertRaises(OCIO.Exception):
cfg.addNamedTransform(nt)
# Missing forward or inverse transform.
nt = OCIO.NamedTransform(name="namedTransform")
with self.assertRaises(OCIO.Exception):
cfg.addNamedTransform(nt)
# Legal named transform can be added.
nt = OCIO.NamedTransform(
name="namedTransform",
forwardTransform=OCIO.RangeTransform())
cfg.addNamedTransform(nt)
nt = OCIO.NamedTransform(
name="other",
inverseTransform=OCIO.RangeTransform())
cfg.addNamedTransform(nt)
nt_names = cfg.getNamedTransformNames()
self.assertEqual(2, len(nt_names))
self.assertEqual('namedTransform', next(nt_names))
self.assertEqual('other', next(nt_names))
nts = cfg.getNamedTransforms()
self.assertEqual(2, len(nts))
nt = next(nts)
self.assertEqual('namedTransform', nt.getName())
cur_tr = nt.getTransform(OCIO.TRANSFORM_DIR_FORWARD)
self.assertIsInstance(cur_tr, OCIO.RangeTransform)
cur_tr = nt.getTransform(OCIO.TRANSFORM_DIR_INVERSE)
self.assertEqual(cur_tr, None)
nt = next(nts)
self.assertEqual('other', nt.getName())
cur_tr = nt.getTransform(OCIO.TRANSFORM_DIR_FORWARD)
self.assertEqual(cur_tr, None)
cur_tr = nt.getTransform(OCIO.TRANSFORM_DIR_INVERSE)
self.assertIsInstance(cur_tr, OCIO.RangeTransform)
nts = cfg.getNamedTransforms()
self.assertEqual(2, len(nts))
cfg.clearNamedTransforms()
nts = cfg.getNamedTransforms()
self.assertEqual(0, len(nts))
def test_inactive_named_transform(self):
# Test the active/inactive version of these Config functions and classes: getNamedTransforms,
# getNamedTransformNames, NamedTransformIterator, NamedTransformNameIterator.
cfg = OCIO.Config.CreateRaw()
nt_names = cfg.getNamedTransformNames()
self.assertEqual(0, len(nt_names))
nts = cfg.getNamedTransforms()
self.assertEqual(0, len(nts))
# Add named transforms.
nt = OCIO.NamedTransform(
name="nt1",
forwardTransform=OCIO.RangeTransform())
cfg.addNamedTransform(nt)
nt = OCIO.NamedTransform(
name="nt2",
inverseTransform=OCIO.RangeTransform())
cfg.addNamedTransform(nt)
nt = OCIO.NamedTransform(
name="nt3",
forwardTransform=OCIO.RangeTransform())
cfg.addNamedTransform(nt)
cfg.setInactiveColorSpaces("nt2")
# Check the list of active/inactive named transforms.
nt_names = cfg.getNamedTransformNames()
self.assertEqual(2, len(nt_names))
self.assertEqual('nt1', next(nt_names))
self.assertEqual('nt3', next(nt_names))
nts = cfg.getNamedTransforms()
self.assertEqual(2, len(nts))
nt = next(nts)
self.assertEqual('nt1', nt.getName())
nt = next(nts)
self.assertEqual('nt3', nt.getName())
nt_names = cfg.getNamedTransformNames(OCIO.NAMEDTRANSFORM_ACTIVE)
self.assertEqual(2, len(nt_names))
self.assertEqual('nt1', next(nt_names))
self.assertEqual('nt3', next(nt_names))
nts = cfg.getNamedTransforms(OCIO.NAMEDTRANSFORM_ACTIVE)
self.assertEqual(2, len(nts))
nt = next(nts)
self.assertEqual('nt1', nt.getName())
nt = next(nts)
self.assertEqual('nt3', nt.getName())
nt_names = cfg.getNamedTransformNames(OCIO.NAMEDTRANSFORM_ALL)
self.assertEqual(3, len(nt_names))
self.assertEqual('nt1', next(nt_names))
self.assertEqual('nt2', next(nt_names))
self.assertEqual('nt3', next(nt_names))
nts = cfg.getNamedTransforms(OCIO.NAMEDTRANSFORM_ALL)
self.assertEqual(3, len(nts))
nt = next(nts)
self.assertEqual('nt1', nt.getName())
nt = next(nts)
self.assertEqual('nt2', nt.getName())
nt = next(nts)
self.assertEqual('nt3', nt.getName())
nt_names = cfg.getNamedTransformNames(OCIO.NAMEDTRANSFORM_INACTIVE)
self.assertEqual(1, len(nt_names))
self.assertEqual('nt2', next(nt_names))
nts = cfg.getNamedTransforms(OCIO.NAMEDTRANSFORM_INACTIVE)
self.assertEqual(1, len(nts))
nt = next(nts)
self.assertEqual('nt2', nt.getName())
cfg.clearNamedTransforms()
nts = cfg.getNamedTransforms(OCIO.NAMEDTRANSFORM_ALL)
self.assertEqual(0, len(nts))
def test_canonical_name(self):
# Test these Config function: getCanonicalName.
cfg = OCIO.Config.CreateRaw()
# add a named transform and a color space.
nt = OCIO.NamedTransform(
name='nt1',
aliases=['alias1', 'test1'],
forwardTransform=OCIO.RangeTransform())
cfg.addNamedTransform(nt)
cs = OCIO.ColorSpace(
name='cs1',
aliases=['cs test', 'other'])
cs.setTransform(OCIO.RangeTransform(),
OCIO.COLORSPACE_DIR_TO_REFERENCE)
cfg.addColorSpace(cs)
cfg.setRole('role', 'cs1')
self.assertEqual(cfg.getCanonicalName(''), '')
self.assertEqual(cfg.getCanonicalName('not found'), '')
self.assertEqual(cfg.getCanonicalName('roLE'), 'cs1')
self.assertEqual(cfg.getCanonicalName('CS1'), 'cs1')
self.assertEqual(cfg.getCanonicalName('Other'), 'cs1')
self.assertEqual(cfg.getCanonicalName('CS test'), 'cs1')
self.assertEqual(cfg.getCanonicalName('NT1'), 'nt1')
self.assertEqual(cfg.getCanonicalName('Alias1'), 'nt1')
self.assertEqual(cfg.getCanonicalName('Test1'), 'nt1')
def test_virtual_display(self):
# Test platform agnostic virtual display interface.
cfg = OCIO.Config.CreateRaw()
cfg.addColorSpace(
OCIO.ColorSpace(OCIO.REFERENCE_SPACE_DISPLAY,
"display_cs",
toReference=OCIO.CDLTransform(sat=1.5)))
cfg.addColorSpace(
OCIO.ColorSpace(OCIO.REFERENCE_SPACE_SCENE,
"raw",
isData=True))
cfg.addViewTransform(
OCIO.ViewTransform(OCIO.REFERENCE_SPACE_SCENE,
"default_vt",
toReference=OCIO.CDLTransform(sat=1.5)))
cfg.addViewTransform(
OCIO.ViewTransform(OCIO.REFERENCE_SPACE_DISPLAY,
"display_vt",
toReference=OCIO.CDLTransform(sat=1.5)))
cfg.addDisplayView("sRGB", "Raw", "raw")
cfg.addDisplayView("sRGB", "view",
viewTransform="display_vt",
displayColorSpaceName="display_cs")
cfg.addSharedView("sview1", "", "raw")
cfg.addSharedView("sview2", "", "raw")
cfg.addDisplaySharedView("sRGB", "sview1")
# Add virtual display and views
cfg.addVirtualDisplayView("Raw", "", "raw")
cfg.addVirtualDisplayView("Film", "display_vt",
OCIO.OCIO_VIEW_USE_DISPLAY_NAME)
cfg.addVirtualDisplaySharedView("sview2")
# Some basic checks
self.assertEqual(3, len(cfg.getViews("sRGB")))
self.assertEqual(2, len(cfg.getViews(OCIO.VIEW_DISPLAY_DEFINED,
"sRGB")))
self.assertEqual(1, len(cfg.getViews(OCIO.VIEW_SHARED, "sRGB")))
# Validate the virtual display information
self.assertEqual(
2,
len(cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)))
view_name = cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)[0]
self.assertEqual("Raw", view_name)
self.assertEqual("", cfg.getVirtualDisplayViewTransformName(view_name))
self.assertEqual("raw",
cfg.getVirtualDisplayViewColorSpaceName(view_name))
self.assertEqual("", cfg.getVirtualDisplayViewLooks(view_name))
self.assertEqual("", cfg.getVirtualDisplayViewRule(view_name))
self.assertEqual("", cfg.getVirtualDisplayViewDescription(view_name))
view_name = cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)[1]
self.assertEqual("Film", view_name)
self.assertEqual("display_vt",
cfg.getVirtualDisplayViewTransformName(view_name))
self.assertEqual(OCIO.OCIO_VIEW_USE_DISPLAY_NAME,
cfg.getVirtualDisplayViewColorSpaceName(view_name))
self.assertEqual("", cfg.getVirtualDisplayViewLooks(view_name))
self.assertEqual("", cfg.getVirtualDisplayViewRule(view_name))
self.assertEqual("", cfg.getVirtualDisplayViewDescription(view_name))
self.assertEqual(1, len(cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)))
self.assertEqual("sview2",
cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)[0])
# Remove a view from the virtual display
cfg.removeVirtualDisplayView("Raw")
self.assertEqual(
1,
len(cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)))
self.assertEqual(
"Film",
cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)[0])
self.assertEqual(1, len(cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)))
self.assertEqual("sview2",
cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)[0])
# Remove a shared view from the virtual display
cfg.removeVirtualDisplayView("sview2")
self.assertEqual(
1,
len(cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)))
self.assertEqual(0, len(cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)))
cfg.addVirtualDisplaySharedView("sview2")
self.assertEqual(
1,
len(cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)))
self.assertEqual(1, len(cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)))
# Remove the virtual display
cfg.clearVirtualDisplay()
self.assertEqual(
0,
len(cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)))
self.assertEqual(0, len(cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)))
class ConfigVirtualWithActiveDisplayTest(unittest.TestCase):
def setUp(self):
self.cfg_active_display = OCIO.Config.CreateFromStream(
SIMPLE_CONFIG_VIRTUAL_DISPLAY_ACTIVE_DISPLAY)
def tearDown(self):
self.cfg_active_display = None
def test_virtual_display_with_active_displays(self):
"""
Test the virtual display instantiation when active displays
and views are defined.
"""
self.cfg_active_display.validate()
displays = self.cfg_active_display.getDisplays()
self.assertEqual(displays.__len__(), 1)
views = self.cfg_active_display.getViews('sRGB')
self.assertEqual(len(views), 1)
class ConfigVirtualDisplayTest(unittest.TestCase):
def setUp(self):
self.cfg = OCIO.Config.CreateFromStream(SIMPLE_CONFIG_VIRTUAL_DISPLAY)
def tearDown(self):
self.cfg = None
def test_validate(self):
"""
Test validate a config containing a virtual display and
some basic checks.
"""
views = self.cfg.getViews('sRGB')
self.assertEqual(len(views), 3)
views = self.cfg.getViews(OCIO.VIEW_DISPLAY_DEFINED, "sRGB")
self.assertEqual(len(views), 2)
views = self.cfg.getViews(OCIO.VIEW_SHARED, "sRGB")
self.assertEqual(len(views), 1)
self.cfg.validate()
def test_get_virtual_display_views_display_defined(self):
"""
Test the virtual display is correctly loaded & saved.
"""
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)
self.assertEqual(len(views), 2)
def test_get_virtual_display_raw(self):
"""
Validate the virtual display information for "Raw".
"""
view_name = self.cfg.getVirtualDisplayViews(
OCIO.VIEW_DISPLAY_DEFINED)[0]
self.assertEqual(view_name, 'Raw')
self.assertEqual(
'', self.cfg.getVirtualDisplayViewTransformName(view_name))
self.assertEqual(
'raw', self.cfg.getVirtualDisplayViewColorSpaceName(view_name))
self.assertEqual('', self.cfg.getVirtualDisplayViewLooks(view_name))
self.assertEqual('', self.cfg.getVirtualDisplayViewRule(view_name))
self.assertEqual(
'', self.cfg.getVirtualDisplayViewDescription(view_name))
def test_get_virtual_display_film(self):
"""
Validate the virtual display information for "Film".
"""
view_name = self.cfg.getVirtualDisplayViews(
OCIO.VIEW_DISPLAY_DEFINED)[1]
self.assertEqual(view_name, 'Film')
self.assertEqual(
'display_vt', self.cfg.getVirtualDisplayViewTransformName(view_name))
self.assertEqual('<USE_DISPLAY_NAME>',
self.cfg.getVirtualDisplayViewColorSpaceName(view_name))
self.assertEqual('', self.cfg.getVirtualDisplayViewLooks(view_name))
self.assertEqual('', self.cfg.getVirtualDisplayViewRule(view_name))
self.assertEqual(
'', self.cfg.getVirtualDisplayViewDescription(view_name))
def test_get_virtual_display_views_shared(self):
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)
self.assertEqual(len(views), 1)
self.assertEqual(views[0], 'sview2')
def test_remove_view_from_virtual_display(self):
"""
Test remove a view from the Virtual Display.
"""
self.cfg.removeVirtualDisplayView('Raw')
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)
self.assertEqual(len(views), 1)
self.assertEqual(views[0], 'Film')
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)
self.assertEqual(len(views), 1)
self.assertEqual(views[0], 'sview2')
# Test remove a shared view from the Virtual Display.
self.cfg.removeVirtualDisplayView('sview2')
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)
self.assertEqual(len(views), 1)
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)
self.assertEqual(len(views), 0)
# Extra serialize & deserialize validation.
cfg = OCIO.Config.CreateFromStream(self.cfg.serialize())
views = cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)
self.assertEqual(len(views), 1)
views = cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)
self.assertEqual(len(views), 0)
self.cfg.addVirtualDisplaySharedView('sview2')
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)
self.assertEqual(len(views), 1)
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)
self.assertEqual(len(views), 1)
def test_remove_virtual_display(self):
"""
Test remove the Virtual Display.
"""
self.cfg.clearVirtualDisplay()
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)
self.assertEqual(len(views), 0)
views = self.cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)
self.assertEqual(len(views), 0)
# Extra serialize & deserialize validation.
cfg = OCIO.Config.CreateFromStream(self.cfg.serialize())
views = cfg.getVirtualDisplayViews(OCIO.VIEW_DISPLAY_DEFINED)
self.assertEqual(len(views), 0)
views = cfg.getVirtualDisplayViews(OCIO.VIEW_SHARED)
self.assertEqual(len(views), 0)
def test_virtual_display_v1(self):
"""
Test that the virtual display is only supported by v2 or higher.
"""
with self.assertRaises(OCIO.Exception):
cfg = OCIO.Config.CreateFromStream(
SIMPLE_CONFIG_VIRTUAL_DISPLAY_V1)
cfg = OCIO.Config.CreateRaw()
cfg.addVirtualDisplaySharedView('sview')
cfg.setMajorVersion(1)
with self.assertRaises(OCIO.Exception):
cfg.validate()
with self.assertRaises(OCIO.Exception):
cfg2 = OCIO.Config.CreateFromStream(cfg.serialize())
def test_virtual_display_exceptions(self):
cfg = OCIO.Config.CreateFromStream(
SIMPLE_CONFIG_VIRTUAL_DISPLAY_EXCEPTION)
cfg.validate()
# Test failures for shared views.
with self.assertRaises(OCIO.Exception) as cm:
cfg.addVirtualDisplaySharedView('sview1')
self.assertEqual(str(cm.exception),
"Shared view could not be added to virtual_display: " +
"There is already a shared view named 'sview1'.")
cfg.addVirtualDisplaySharedView('sview2')
with self.assertRaises(OCIO.Exception) as cm:
cfg.validate()
self.assertEqual(str(cm.exception), "Config failed validation. " +
"The display 'virtual_display' contains a shared " +
"view 'sview2' that is not defined.")
cfg.removeVirtualDisplayView('sview2')
cfg.validate()
# Test failures for views.
with self.assertRaises(OCIO.Exception) as cm:
cfg.addVirtualDisplayView('Raw', 'Film', 'raw')
self.assertEqual(str(cm.exception), "View could not be added to " +
"virtual_display in config: View 'Raw' already exists.")
cfg.addVirtualDisplayView('Raw1', 'Film', 'raw1')
with self.assertRaises(OCIO.Exception) as cm:
cfg.validate()
self.assertEqual(str(cm.exception), "Config failed validation. " +
"Display 'virtual_display' has a " +
"view 'Raw1' that refers to a color space" +
" or a named transform, 'raw1', which is not defined.")
cfg.removeVirtualDisplayView('Raw1')
cfg.validate()
cfg.addVirtualDisplayView('Raw1', 'Film', 'raw1', 'look')
with self.assertRaises(OCIO.Exception) as cm:
cfg.validate()
self.assertEqual(str(cm.exception), "Config failed validation. " +
"Display 'virtual_display' has a view 'Raw1' that " +
"refers to a color space or a named transform, " +
"'raw1', which is not defined.") | en | 0.452218 | # SPDX-License-Identifier: BSD-3-Clause # Copyright Contributors to the OpenColorIO Project. # Legacy tests kept for reference. # # class ConfigTest(unittest.TestCase): # # SIMPLE_PROFILE = """ocio_profile_version: 1 # # search_path: luts # strictparsing: false # luma: [0.2126, 0.7152, 0.0722] # # roles: # default: raw # scene_linear: lnh # # displays: # sRGB: # - !<View> {name: Film1D, colorspace: vd8} # - !<View> {name: Raw, colorspace: raw} # # active_displays: [] # active_views: [] # # colorspaces: # - !<ColorSpace> # name: raw # family: raw # equalitygroup: "" # bitdepth: 32f # description: | # A raw color space. Conversions to and from this space are no-ops. # # isdata: true # allocation: uniform # # - !<ColorSpace> # name: lnh # family: ln # equalitygroup: "" # bitdepth: 16f # description: | # The show reference space. This is a sensor referred linear # representation of the scene with primaries that correspond to # scanned film. 0.18 in this space corresponds to a properly # exposed 18% grey card. # # isdata: false # allocation: lg2 # # - !<ColorSpace> # name: vd8 # family: vd8 # equalitygroup: "" # bitdepth: 8ui # description: | # how many transforms can we use? # # isdata: false # allocation: uniform # to_reference: !<GroupTransform> # children: # - !<ExponentTransform> {value: 2.2} # - !<MatrixTransform> {matrix: [1, 2, 3, 4, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], offset: [1, 2, 0, 0]} # - !<CDLTransform> {slope: [0.9, 1, 1], offset: [0.1, 0.3, 0.4], power: [1.1, 1.1, 1.1], sat: 0.9} # """ # # def setUp(self): # # osx_hack = '' # if osname=="Darwin": # osx_hack = """ # // OSX segfault work-around: Force a no-op sampling of the 3D LUT. # texture3D(lut3d, 0.96875 * out_pixel.rgb + 0.015625).rgb;""" # # self.GLSLResult = """ # // Generated by OpenColorIO # # vec4 pytestocio(in vec4 inPixel, # const sampler3D lut3d) # { # vec4 out_pixel = inPixel; # out_pixel = out_pixel * mat4(1.0874889, -0.079466686, -0.0080222245, 0., -0.023622228, 1.0316445, -0.0080222245, 0., -0.023622226, -0.079466686, 1.1030889, 0., 0., 0., 0., 1.); # out_pixel = pow(max(out_pixel, vec4(0., 0., 0., 0.)), vec4(0.90909088, 0.90909088, 0.90909088, 1.)); # out_pixel = out_pixel * mat4(1.1111112, -2., -3., -4., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1.); # out_pixel = vec4(4.688889, -2.3, -0.40000001, -0.) + out_pixel; # out_pixel = pow(max(out_pixel, vec4(0., 0., 0., 0.)), vec4(0.45454544, 0.45454544, 0.45454544, 1.));""" \ # + osx_hack + \ # """ # return out_pixel; # } # # """ # # def test_is_editable(self): # # cfg = OCIO.Config().CreateFromStream(self.SIMPLE_PROFILE) # self.assertEqual(cfg.isEditable(), False) # cfg = cfg.createEditableCopy() # self.assertEqual(cfg.isEditable(), True) # ctx = cfg.getCurrentContext() # self.assertEqual(ctx.isEditable(), False) # ctx = ctx.createEditableCopy() # self.assertEqual(ctx.isEditable(), True) # ctx.setEnvironmentMode(OCIO.ENV_ENVIRONMENT_LOAD_ALL) # # def test_interface(self): # # _cfge = OCIO.Config().CreateFromStream(self.SIMPLE_PROFILE) # _cfge.clearEnvironmentVars() # self.assertEqual(0, _cfge.getNumEnvironmentVars()) # _cfge.addEnvironmentVar("FOO", "test1") # _cfge.addEnvironmentVar("FOO2", "test2${FOO}") # self.assertEqual(2, _cfge.getNumEnvironmentVars()) # self.assertEqual("FOO", _cfge.getEnvironmentVarNameByIndex(0)) # self.assertEqual("FOO2", _cfge.getEnvironmentVarNameByIndex(1)) # self.assertEqual("test1", _cfge.getEnvironmentVarDefault("FOO")) # self.assertEqual("test2${FOO}", _cfge.getEnvironmentVarDefault("FOO2")) # self.assertEqual("test2test1", _cfge.getCurrentContext().resolveStringVar("${FOO2}")) # self.assertEqual({'FOO': 'test1', 'FOO2': 'test2${FOO}'}, _cfge.getEnvironmentVarDefaults()) # _cfge.clearEnvironmentVars() # self.assertEqual(0, _cfge.getNumEnvironmentVars()) # self.assertEqual("luts", _cfge.getSearchPath()) # _cfge.setSearchPath("otherdir") # self.assertEqual("otherdir", _cfge.getSearchPath()) # _cfge.validate() # _cfge.setDescription("testdesc") # self.assertEqual("testdesc", _cfge.getDescription()) # self.assertEqual(self.SIMPLE_PROFILE, _cfg.serialize()) # #self.assertEqual("$07d1fb1509eeae1837825fd4242f8a69:$885ad1683add38a11f7bbe34e8bf9ac0", # # _cfg.getCacheID()) # con = _cfge.getCurrentContext() # self.assertNotEqual(0, con.getNumStringVars()) # _cfge.setWorkingDir("/foobar") # self.assertEqual("/foobar", _cfge.getWorkingDir()) # self.assertEqual(3, _cfge.getNumColorSpaces()) # self.assertEqual("lnh", _cfge.getColorSpaceNameByIndex(1)) # lnh = _cfge.getColorSpace("lnh") # self.assertEqual("ln", lnh.getFamily()) # self.assertEqual(-1, _cfge.getIndexForColorSpace("foobar")) # cs = OCIO.ColorSpace() # cs.setName("blah") # _cfge.addColorSpace(cs) # self.assertEqual(3, _cfge.getIndexForColorSpace("blah")) # #_cfge.clearColorSpaces() # #_cfge.parseColorSpaceFromString("foo") # self.assertEqual(False, _cfg.isStrictParsingEnabled()) # _cfge.setStrictParsingEnabled(True) # self.assertEqual(True, _cfge.isStrictParsingEnabled()) # self.assertEqual(2, _cfge.getNumRoles()) # self.assertEqual(False, _cfg.hasRole("foo")) # _cfge.setRole("foo", "vd8") # self.assertEqual(3, _cfge.getNumRoles()) # self.assertEqual(True, _cfge.hasRole("foo")) # self.assertEqual("foo", _cfge.getRoleName(1)) # self.assertEqual("sRGB", _cfge.getDefaultDisplay()) # self.assertEqual(1, _cfge.getNumDisplays()) # self.assertEqual("sRGB", _cfge.getDisplay(0)) # self.assertEqual("Film1D", _cfge.getDefaultView("sRGB")) # self.assertEqual(2, _cfge.getNumViews("sRGB")) # self.assertEqual("Raw", _cfge.getView("sRGB", 1)) # self.assertEqual("vd8", _cfge.getDisplayColorSpaceName("sRGB", "Film1D")) # self.assertEqual("", _cfg.getDisplayLooks("sRGB", "Film1D")) # _cfge.addDisplay("foo", "bar", "foo", "wee") # _cfge.clearDisplays() # _cfge.setActiveDisplays("sRGB") # self.assertEqual("sRGB", _cfge.getActiveDisplays()) # _cfge.setActiveViews("Film1D") # self.assertEqual("Film1D", _cfge.getActiveViews()) # luma = _cfge.getDefaultLumaCoefs() # self.assertAlmostEqual(0.2126, luma[0], delta=1e-8) # _cfge.setDefaultLumaCoefs([0.1, 0.2, 0.3]) # tnewluma = _cfge.getDefaultLumaCoefs() # self.assertAlmostEqual(0.1, tnewluma[0], delta=1e-8) # self.assertEqual(0, _cfge.getNumLooks()) # lk = OCIO.Look() # lk.setName("coollook") # lk.setProcessSpace("somespace") # et = OCIO.ExponentTransform() # et.setValue([0.1, 0.2, 0.3, 0.4]) # lk.setTransform(et) # iet = OCIO.ExponentTransform() # iet.setValue([-0.1, -0.2, -0.3, -0.4]) # lk.setInverseTransform(iet) # _cfge.addLook(lk) # self.assertEqual(1, _cfge.getNumLooks()) # self.assertEqual("coollook", _cfge.getLookNameByIndex(0)) # glk = _cfge.getLook("coollook") # self.assertEqual("somespace", glk.getProcessSpace()) # _cfge.clearLooks() # self.assertEqual(0, _cfge.getNumLooks()) # # #getProcessor(context, srcColorSpace, dstColorSpace) # #getProcessor(context, srcName,dstName); # #getProcessor(transform); # #getProcessor(transform, direction); # #getProcessor(context, transform, direction); # # _proc = _cfg.getProcessor("lnh", "vd8") # self.assertEqual(False, _proc.isNoOp()) # self.assertEqual(True, _proc.hasChannelCrosstalk()) # # #float packedpix[] = new float[]{0.48f, 0.18f, 0.9f, 1.0f, # # 0.48f, 0.18f, 0.18f, 1.0f, # # 0.48f, 0.18f, 0.18f, 1.0f, # # 0.48f, 0.18f, 0.18f, 1.0f }; # #FloatBuffer buf = ByteBuffer.allocateDirect(2 * 2 * 4 * Float.SIZE / 8).asFloatBuffer(); # #buf.put(packedpix); # #PackedImageDesc foo = new PackedImageDesc(buf, 2, 2, 4); # #_proc.apply(foo); # #FloatBuffer wee = foo.getData(); # #self.assertEqual(-2.4307251581696764E-35f, wee.get(2), 1e-8); # # # TODO: these should work in-place # rgbfoo = _proc.applyRGB([0.48, 0.18, 0.18]) # self.assertAlmostEqual(1.9351077, rgbfoo[0], delta=1e-7); # # TODO: these should work in-place # rgbafoo = _proc.applyRGBA([0.48, 0.18, 0.18, 1.0]) # self.assertAlmostEqual(1.0, rgbafoo[3], delta=1e-8) # #self.assertEqual("$a92ef63abd9edf61ad5a7855da064648", _proc.getCpuCacheID()) # # _cfge.clearSearchPaths() # self.assertEqual(0, _cfge.getNumSearchPaths()) # _cfge.addSearchPath("First/ Path") # self.assertEqual(1, _cfge.getNumSearchPaths()) # _cfge.addSearchPath("D:\\Second\\Path\\") # self.assertEqual(2, _cfge.getNumSearchPaths()) # self.assertEqual("First/ Path", _cfge.getSearchPathByIndex(0)) # self.assertEqual("D:\\Second\\Path\\", _cfge.getSearchPathByIndex(1)) # # del _cfge # del _cfg Test the deepcopy() method. # Check that the file rules are not shared between the two config instances. # Test these Config functions: addSharedView, getSharedViews, removeSharedView. # Shared view has to have a name. # Shared view has to have a color space name. # Adding a shared view using an existing name is replacing the existing view. # Remove shared views. # View has to exist. # Existing views can be removed. # Test these Config functions: getDisplays, getViews, removeDisplayView ocio_profile_version: 2 search_path: "" strictparsing: true luma: [0.2126, 0.7152, 0.0722] roles: default: raw scene_linear: c3 file_rules: - !<Rule> {name: ColorSpaceNamePathSearch} - !<Rule> {name: Default, colorspace: raw} viewing_rules: - !<Rule> {name: Rule_1, colorspaces: c1} - !<Rule> {name: Rule_2, colorspaces: [c2, c3]} - !<Rule> {name: Rule_3, colorspaces: scene_linear} - !<Rule> {name: Rule_4, colorspaces: [c3, c4]} - !<Rule> {name: Rule_5, encodings: log} - !<Rule> {name: Rule_6, encodings: [log, video]} shared_views: - !<View> {name: SView_a, colorspace: raw, rule: Rule_2} - !<View> {name: SView_b, colorspace: raw, rule: Rule_3} - !<View> {name: SView_c, colorspace: raw} - !<View> {name: SView_d, colorspace: raw, rule: Rule_5} - !<View> {name: SView_e, colorspace: raw} displays: sRGB: - !<View> {name: View_a, colorspace: raw, rule: Rule_1} - !<View> {name: View_b, colorspace: raw, rule: Rule_2} - !<View> {name: View_c, colorspace: raw, rule: Rule_2} - !<View> {name: View_d, colorspace: raw, rule: Rule_3} - !<View> {name: View_e, colorspace: raw, rule: Rule_4} - !<View> {name: View_f, colorspace: raw, rule: Rule_5} - !<View> {name: View_g, colorspace: raw, rule: Rule_6} - !<View> {name: View_h, colorspace: raw} - !<Views> [SView_a, SView_b, SView_d, SView_e] active_displays: [] active_views: [] colorspaces: - !<ColorSpace> name: raw family: "" equalitygroup: "" bitdepth: unknown isdata: false allocation: uniform - !<ColorSpace> name: c1 family: "" equalitygroup: "" bitdepth: unknown isdata: false encoding: video allocation: uniform - !<ColorSpace> name: c2 family: "" equalitygroup: "" bitdepth: unknown isdata: false allocation: uniform - !<ColorSpace> name: c3 family: "" equalitygroup: "" bitdepth: unknown isdata: false allocation: uniform - !<ColorSpace> name: c4 family: "" equalitygroup: "" bitdepth: unknown isdata: false encoding: log allocation: uniform - !<ColorSpace> name: c5 family: "" equalitygroup: "" bitdepth: unknown isdata: false encoding: data allocation: uniform - !<ColorSpace> name: c6 family: "" equalitygroup: "" bitdepth: unknown isdata: false encoding: video allocation: uniform # Create a config. # Check number of displays. # Add a view in a new display. # Check there is a new display and check view. # Parameter case does not matter. # Add a shared view to the new display. # Remove the views (and the display). # Check shared views defined by config. # Check views for sRGB display. # Active views are taken into account for getViews. # Views filtered by viewing rules. # View_b rule is Rule_2 that lists c3. # View_c rule is Rule_2 that lists c3. # View_d rule is Rule_3 that lists c3. # View_e rule is Rule_4 that lists c3. # View_h has no rule. # SView_a has rule Rule_2 that lists c3. # SView_b has rule Rule_3 that lists c3. # SView_e has no rule. # View_e rule is Rule_4 that lists c4. # View_f rule is Rule_5 that lists encoding log, c4 has encoding log. # View_g rule is Rule_6 that lists encoding log, c4 has encoding log. # View_h has no rule. # SView_d rule is Rule_5 that lists encoding log, c4 has encoding log. # SView_e has no rule. # View_g rule is Rule_6 that lists encoding video, c6 has encoding video. # View_h has no rule. # SView_e has no rule. # Test these Config functions: addNamedTransform, getNamedTransforms, # getNamedTransformNames, clearNamedTransforms. # Add named transform. # Missing name. # Missing forward or inverse transform. # Legal named transform can be added. # Test the active/inactive version of these Config functions and classes: getNamedTransforms, # getNamedTransformNames, NamedTransformIterator, NamedTransformNameIterator. # Add named transforms. # Check the list of active/inactive named transforms. # Test these Config function: getCanonicalName. # add a named transform and a color space. # Test platform agnostic virtual display interface. # Add virtual display and views # Some basic checks # Validate the virtual display information # Remove a view from the virtual display # Remove a shared view from the virtual display # Remove the virtual display Test the virtual display instantiation when active displays and views are defined. Test validate a config containing a virtual display and some basic checks. Test the virtual display is correctly loaded & saved. Validate the virtual display information for "Raw". Validate the virtual display information for "Film". Test remove a view from the Virtual Display. # Test remove a shared view from the Virtual Display. # Extra serialize & deserialize validation. Test remove the Virtual Display. # Extra serialize & deserialize validation. Test that the virtual display is only supported by v2 or higher. # Test failures for shared views. # Test failures for views. | 1.935585 | 2 |
examples/meter_reader/train_detection.py | yaoshanliang/PaddleX | 2 | 6624659 | import os
# 选择使用0号卡
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from paddlex.det import transforms
import paddlex as pdx
# 下载和解压表计检测数据集
meter_det_dataset = 'https://bj.bcebos.com/paddlex/examples/meter_reader/datasets/meter_det.tar.gz'
pdx.utils.download_and_decompress(meter_det_dataset, path='./')
# 定义训练和验证时的transforms
train_transforms = transforms.Compose([
transforms.MixupImage(mixup_epoch=250),
transforms.RandomDistort(),
transforms.RandomExpand(),
transforms.RandomCrop(),
transforms.Resize(
target_size=608, interp='RANDOM'),
transforms.RandomHorizontalFlip(),
transforms.Normalize(),
])
eval_transforms = transforms.Compose([
transforms.Resize(
target_size=608, interp='CUBIC'),
transforms.Normalize(),
])
# 定义训练和验证所用的数据集
train_dataset = pdx.datasets.CocoDetection(
data_dir='meter_det/train/',
ann_file='meter_det/annotations/instance_train.json',
transforms=train_transforms,
shuffle=True)
eval_dataset = pdx.datasets.CocoDetection(
data_dir='meter_det/test/',
ann_file='meter_det/annotations/instance_test.json',
transforms=eval_transforms)
# 初始化模型,并进行训练
# 可使用VisualDL查看训练指标
# VisualDL启动方式: visualdl --logdir output/yolov3_darknet/vdl_log --port 8001
# 浏览器打开 https://0.0.0.0:8001即可
# 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/models/detection.html#yolov3
num_classes = len(train_dataset.labels)
model = pdx.det.YOLOv3(
num_classes=num_classes, backbone='DarkNet53', label_smooth=True)
model.train(
num_epochs=270,
train_dataset=train_dataset,
train_batch_size=8,
eval_dataset=eval_dataset,
learning_rate=0.001,
warmup_steps=4000,
lr_decay_epochs=[210, 240],
save_dir='output/meter_det',
use_vdl=True)
| import os
# 选择使用0号卡
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from paddlex.det import transforms
import paddlex as pdx
# 下载和解压表计检测数据集
meter_det_dataset = 'https://bj.bcebos.com/paddlex/examples/meter_reader/datasets/meter_det.tar.gz'
pdx.utils.download_and_decompress(meter_det_dataset, path='./')
# 定义训练和验证时的transforms
train_transforms = transforms.Compose([
transforms.MixupImage(mixup_epoch=250),
transforms.RandomDistort(),
transforms.RandomExpand(),
transforms.RandomCrop(),
transforms.Resize(
target_size=608, interp='RANDOM'),
transforms.RandomHorizontalFlip(),
transforms.Normalize(),
])
eval_transforms = transforms.Compose([
transforms.Resize(
target_size=608, interp='CUBIC'),
transforms.Normalize(),
])
# 定义训练和验证所用的数据集
train_dataset = pdx.datasets.CocoDetection(
data_dir='meter_det/train/',
ann_file='meter_det/annotations/instance_train.json',
transforms=train_transforms,
shuffle=True)
eval_dataset = pdx.datasets.CocoDetection(
data_dir='meter_det/test/',
ann_file='meter_det/annotations/instance_test.json',
transforms=eval_transforms)
# 初始化模型,并进行训练
# 可使用VisualDL查看训练指标
# VisualDL启动方式: visualdl --logdir output/yolov3_darknet/vdl_log --port 8001
# 浏览器打开 https://0.0.0.0:8001即可
# 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/models/detection.html#yolov3
num_classes = len(train_dataset.labels)
model = pdx.det.YOLOv3(
num_classes=num_classes, backbone='DarkNet53', label_smooth=True)
model.train(
num_epochs=270,
train_dataset=train_dataset,
train_batch_size=8,
eval_dataset=eval_dataset,
learning_rate=0.001,
warmup_steps=4000,
lr_decay_epochs=[210, 240],
save_dir='output/meter_det',
use_vdl=True)
| zh | 0.665709 | # 选择使用0号卡 # 下载和解压表计检测数据集 # 定义训练和验证时的transforms # 定义训练和验证所用的数据集 # 初始化模型,并进行训练 # 可使用VisualDL查看训练指标 # VisualDL启动方式: visualdl --logdir output/yolov3_darknet/vdl_log --port 8001 # 浏览器打开 https://0.0.0.0:8001即可 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP # API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/models/detection.html#yolov3 | 1.840936 | 2 |
dliplib/utils/weights/__init__.py | oterobaguer/ct-dip-benchmark | 0 | 6624660 | <reponame>oterobaguer/ct-dip-benchmark<filename>dliplib/utils/weights/__init__.py
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_weights_path(weights_key):
path = os.path.join(BASE_DIR, 'weights', weights_key + '.pt')
return path
def save_weights(reconstructor, weights_key, **kwargs):
"""
Saves parameters to a file
:param model: PyTorch model that will save the weights
:param weights_key: Key that identifies the weights
"""
path = get_weights_path(weights_key)
reconstructor.save_learned_params(path, **kwargs)
def load_weights(reconstructor, weights_key, **kwargs):
"""
Loads weights from file
:param model: PyTorch model that will load the weights
:param weights_key: Key that identifies the weights
"""
path = get_weights_path(weights_key)
reconstructor.load_learned_params(path, **kwargs)
| import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_weights_path(weights_key):
path = os.path.join(BASE_DIR, 'weights', weights_key + '.pt')
return path
def save_weights(reconstructor, weights_key, **kwargs):
"""
Saves parameters to a file
:param model: PyTorch model that will save the weights
:param weights_key: Key that identifies the weights
"""
path = get_weights_path(weights_key)
reconstructor.save_learned_params(path, **kwargs)
def load_weights(reconstructor, weights_key, **kwargs):
"""
Loads weights from file
:param model: PyTorch model that will load the weights
:param weights_key: Key that identifies the weights
"""
path = get_weights_path(weights_key)
reconstructor.load_learned_params(path, **kwargs) | en | 0.747926 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...) Saves parameters to a file :param model: PyTorch model that will save the weights :param weights_key: Key that identifies the weights Loads weights from file :param model: PyTorch model that will load the weights :param weights_key: Key that identifies the weights | 2.678451 | 3 |
senlin_dashboard/cluster/nodes/forms.py | sangtq-vn/senlin-dashboard | 18 | 6624661 | <reponame>sangtq-vn/senlin-dashboard
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils.memoized import memoized # noqa: F401
from senlin_dashboard.api import senlin
def _populate_node_params(name, profile_id, cluster_id, role, metadata):
if not metadata:
metadata_dict = {}
else:
try:
metadata_dict = yaml.safe_load(metadata)
except Exception as ex:
raise Exception(_('The specified metadata is not a valid '
'YAML: %s') % ex)
params = {"name": name,
"profile_id": profile_id,
"cluster_id": cluster_id,
"role": role,
"metadata": metadata_dict}
return params
class CreateForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Node Name"))
profile_id = forms.ThemableChoiceField(
label=_("Profile"),
help_text=_("Profile used for this node."))
cluster_id = forms.ThemableChoiceField(
label=_("Cluster"),
required=False,
help_text=_("Cluster for this node."))
role = forms.CharField(
max_length=255,
label=_("Role"),
required=False,
help_text=_("Role for this node in the specific cluster."))
metadata = forms.CharField(
label=_("Metadata"),
required=False,
help_text=_("YAML formatted metadata."),
widget=forms.Textarea(attrs={'rows': 4}))
def __init__(self, request, *args, **kwargs):
super(CreateForm, self).__init__(request, *args, **kwargs)
profiles = senlin.profile_list(request)[0]
self.fields['profile_id'].choices = (
[("", _("Select Profile"))] + [(profile.id, profile.name)
for profile in profiles])
clusters = senlin.cluster_list(request)[0]
self.fields['cluster_id'].choices = (
[("", _("Select Cluster"))] + [(cluster.id, cluster.name)
for cluster in clusters])
def handle(self, request, data):
try:
params = _populate_node_params(data['name'],
data['profile_id'],
data['cluster_id'],
data['role'],
data['metadata'])
node = senlin.node_create(request, **params)
msg = _('Creating node "%s" successfully') % data['name']
messages.info(request, msg)
return node
except Exception:
redirect = reverse("horizon:cluster:nodes:index")
exceptions.handle(request,
_("Unable to create node."),
redirect=redirect)
class UpdateNodeForm(forms.SelfHandlingForm):
node_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(max_length=255, label=_("Node Name"))
profile_id = forms.ThemableChoiceField(
label=_("Profile"),
help_text=_("Profile used for this node."))
role = forms.CharField(
max_length=255,
label=_("Role"),
required=False,
help_text=_("Role for this node in the specific cluster."))
metadata = forms.CharField(
label=_("Metadata"),
required=False,
help_text=_("YAML formatted metadata."),
widget=forms.Textarea(attrs={'rows': 4}))
def __init__(self, request, *args, **kwargs):
super(UpdateNodeForm, self).__init__(request, *args, **kwargs)
profiles = senlin.profile_list(request)[0]
self.fields['profile_id'].choices = (
[("", _("Select Profile"))] + [(profile.id, profile.name)
for profile in profiles])
def handle(self, request, data):
params = _populate_node_params(data['name'],
data['profile_id'],
None,
data['role'],
data['metadata'])
del params['cluster_id']
try:
node = senlin.node_update(request, data.get('node_id'), **params)
messages.success(
request,
_('Your node %s update request'
' has been accepted for processing.') %
data['name'])
return node
except Exception:
redirect = reverse("horizon:cluster:nodes:index")
exceptions.handle(request,
_("Unable to update node."),
redirect=redirect)
return False
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils.memoized import memoized # noqa: F401
from senlin_dashboard.api import senlin
def _populate_node_params(name, profile_id, cluster_id, role, metadata):
if not metadata:
metadata_dict = {}
else:
try:
metadata_dict = yaml.safe_load(metadata)
except Exception as ex:
raise Exception(_('The specified metadata is not a valid '
'YAML: %s') % ex)
params = {"name": name,
"profile_id": profile_id,
"cluster_id": cluster_id,
"role": role,
"metadata": metadata_dict}
return params
class CreateForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Node Name"))
profile_id = forms.ThemableChoiceField(
label=_("Profile"),
help_text=_("Profile used for this node."))
cluster_id = forms.ThemableChoiceField(
label=_("Cluster"),
required=False,
help_text=_("Cluster for this node."))
role = forms.CharField(
max_length=255,
label=_("Role"),
required=False,
help_text=_("Role for this node in the specific cluster."))
metadata = forms.CharField(
label=_("Metadata"),
required=False,
help_text=_("YAML formatted metadata."),
widget=forms.Textarea(attrs={'rows': 4}))
def __init__(self, request, *args, **kwargs):
super(CreateForm, self).__init__(request, *args, **kwargs)
profiles = senlin.profile_list(request)[0]
self.fields['profile_id'].choices = (
[("", _("Select Profile"))] + [(profile.id, profile.name)
for profile in profiles])
clusters = senlin.cluster_list(request)[0]
self.fields['cluster_id'].choices = (
[("", _("Select Cluster"))] + [(cluster.id, cluster.name)
for cluster in clusters])
def handle(self, request, data):
try:
params = _populate_node_params(data['name'],
data['profile_id'],
data['cluster_id'],
data['role'],
data['metadata'])
node = senlin.node_create(request, **params)
msg = _('Creating node "%s" successfully') % data['name']
messages.info(request, msg)
return node
except Exception:
redirect = reverse("horizon:cluster:nodes:index")
exceptions.handle(request,
_("Unable to create node."),
redirect=redirect)
class UpdateNodeForm(forms.SelfHandlingForm):
node_id = forms.CharField(widget=forms.HiddenInput())
name = forms.CharField(max_length=255, label=_("Node Name"))
profile_id = forms.ThemableChoiceField(
label=_("Profile"),
help_text=_("Profile used for this node."))
role = forms.CharField(
max_length=255,
label=_("Role"),
required=False,
help_text=_("Role for this node in the specific cluster."))
metadata = forms.CharField(
label=_("Metadata"),
required=False,
help_text=_("YAML formatted metadata."),
widget=forms.Textarea(attrs={'rows': 4}))
def __init__(self, request, *args, **kwargs):
super(UpdateNodeForm, self).__init__(request, *args, **kwargs)
profiles = senlin.profile_list(request)[0]
self.fields['profile_id'].choices = (
[("", _("Select Profile"))] + [(profile.id, profile.name)
for profile in profiles])
def handle(self, request, data):
params = _populate_node_params(data['name'],
data['profile_id'],
None,
data['role'],
data['metadata'])
del params['cluster_id']
try:
node = senlin.node_update(request, data.get('node_id'), **params)
messages.success(
request,
_('Your node %s update request'
' has been accepted for processing.') %
data['name'])
return node
except Exception:
redirect = reverse("horizon:cluster:nodes:index")
exceptions.handle(request,
_("Unable to update node."),
redirect=redirect)
return False | en | 0.851908 | # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # noqa: F401 | 1.761661 | 2 |
ooobuild/lo/animations/animation_end_sync.py | Amourspirit/ooo_uno_tmpl | 0 | 6624662 | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.animations
class AnimationEndSync(object):
"""
Const Class
See Also:
`API AnimationEndSync <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1animations_1_1AnimationEndSync.html>`_
"""
__ooo_ns__: str = 'com.sun.star.animations'
__ooo_full_ns__: str = 'com.sun.star.animations.AnimationEndSync'
__ooo_type_name__: str = 'const'
FIRST = 0
"""
The par, excl, or media element's implicit duration ends with the earliest active end of all the child elements.
This does not refer to the lexical first child, or to the first child to start, but rather refers to the first child to end its (first) active duration.
"""
LAST = 1
"""
The par, excl, or media element's implicit duration ends with the last active end of the child elements.
This does not refer to the lexical last child, or to the last child to start, but rather refers to the last active end of all children that have a resolved, definite begin time. If the time container has no children with a resolved begin time, the time container ends immediately. If child elements have multiple begin times, or otherwise restart, the child elements must complete all instances of active durations for resolved begin times. This is the default value for par and excl elements.
"""
ALL = 2
"""
The par, excl, or media element's implicit duration ends when all of the child elements have ended their respective active durations.
Elements with indefinite or unresolved begin times will keep the simple duration of the time container from ending. When all elements have completed the active duration one or more times, the parent time container can end.
"""
MEDIA = 3
"""
The time container element's implicit duration ends when the intrinsic media duration of the element ends.
This must be defined by a host language. If the time container element does not define an intrinsic media duration, the host language must define the simple duration for the element. This is the default value for media time container elements.
"""
__all__ = ['AnimationEndSync']
| # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.animations
class AnimationEndSync(object):
"""
Const Class
See Also:
`API AnimationEndSync <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1animations_1_1AnimationEndSync.html>`_
"""
__ooo_ns__: str = 'com.sun.star.animations'
__ooo_full_ns__: str = 'com.sun.star.animations.AnimationEndSync'
__ooo_type_name__: str = 'const'
FIRST = 0
"""
The par, excl, or media element's implicit duration ends with the earliest active end of all the child elements.
This does not refer to the lexical first child, or to the first child to start, but rather refers to the first child to end its (first) active duration.
"""
LAST = 1
"""
The par, excl, or media element's implicit duration ends with the last active end of the child elements.
This does not refer to the lexical last child, or to the last child to start, but rather refers to the last active end of all children that have a resolved, definite begin time. If the time container has no children with a resolved begin time, the time container ends immediately. If child elements have multiple begin times, or otherwise restart, the child elements must complete all instances of active durations for resolved begin times. This is the default value for par and excl elements.
"""
ALL = 2
"""
The par, excl, or media element's implicit duration ends when all of the child elements have ended their respective active durations.
Elements with indefinite or unresolved begin times will keep the simple duration of the time container from ending. When all elements have completed the active duration one or more times, the parent time container can end.
"""
MEDIA = 3
"""
The time container element's implicit duration ends when the intrinsic media duration of the element ends.
This must be defined by a host language. If the time container element does not define an intrinsic media duration, the host language must define the simple duration for the element. This is the default value for media time container elements.
"""
__all__ = ['AnimationEndSync']
| en | 0.842253 | # coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Const Class # this is a auto generated file generated by Cheetah # Libre Office Version: 7.3 # Namespace: com.sun.star.animations Const Class See Also: `API AnimationEndSync <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1animations_1_1AnimationEndSync.html>`_ The par, excl, or media element's implicit duration ends with the earliest active end of all the child elements. This does not refer to the lexical first child, or to the first child to start, but rather refers to the first child to end its (first) active duration. The par, excl, or media element's implicit duration ends with the last active end of the child elements. This does not refer to the lexical last child, or to the last child to start, but rather refers to the last active end of all children that have a resolved, definite begin time. If the time container has no children with a resolved begin time, the time container ends immediately. If child elements have multiple begin times, or otherwise restart, the child elements must complete all instances of active durations for resolved begin times. This is the default value for par and excl elements. The par, excl, or media element's implicit duration ends when all of the child elements have ended their respective active durations. Elements with indefinite or unresolved begin times will keep the simple duration of the time container from ending. When all elements have completed the active duration one or more times, the parent time container can end. The time container element's implicit duration ends when the intrinsic media duration of the element ends. This must be defined by a host language. If the time container element does not define an intrinsic media duration, the host language must define the simple duration for the element. This is the default value for media time container elements. | 1.612955 | 2 |
app/utils/es_connection.py | raunaktr/pokedex_api | 0 | 6624663 |
def es_verify(val):
if val.get('_shards').get('failed') <= 0:
return "its-working!"
else:
return "es-failure"
|
def es_verify(val):
if val.get('_shards').get('failed') <= 0:
return "its-working!"
else:
return "es-failure"
| none | 1 | 2.167962 | 2 | |
src/train.py | cal859/music-maker-2000 | 0 | 6624664 | <filename>src/train.py
from collections import Counter
from dataclasses import dataclass
import os
import pickle as pkl
import numpy as np
import streamlit as st
import torch
import torch.nn as nn
from prep_data import CleanTextData
# Class for model parameters
@dataclass
class ModelConfig:
seq_size: int
batch_size: int
embedding_size: int
hidden_layer_size: int
n_layers: int
dropout: float
gradients_clipping: int
epochs: int
class TrainModel:
def __init__(
self,
text: str,
model_name: str,
model: nn.Module,
model_config: ModelConfig,
model_save_folder: str = "./src/model_files",
run_in_streamlit: bool = False,
criterion=None,
optimiser=None,
):
self.text = text
(
self.n_vocab,
self.int_to_vocab,
self.vocab_to_int,
self.int_text,
) = self.encode_text(self.text)
self.model_name = model_name
self.model_config = model_config
self.model = model(
self.n_vocab,
self.model_config.seq_size,
self.model_config.embedding_size,
self.model_config.hidden_layer_size,
self.model_config.n_layers,
self.model_config.dropout,
)
self.model_save_folder = model_save_folder
if not os.path.exists(self.model_save_folder):
os.mkdir(self.model_save_folder)
self.model_save_path = f"{self.model_save_folder}/{self.model.model_type}"
self.run_in_streamlit = run_in_streamlit
if self.run_in_streamlit:
self.print_fn = st.write
else:
self.print_fn = print
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
self.print_fn(f"Device: {self.device}")
if criterion is None:
self.criterion = nn.CrossEntropyLoss()
else:
self.criterion = criterion
if optimiser is None:
self.optimiser = torch.optim.Adam(self.model.parameters(), lr=0.01)
else:
self.optimiser = optimiser
def encode_text(self, text: str) -> [int, dict, dict, list]:
word_counts = Counter(text)
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
int_to_vocab = {k: w for k, w in enumerate(sorted_vocab)}
vocab_to_int = {w: k for k, w in int_to_vocab.items()}
n_vocab = len(int_to_vocab)
int_text = [vocab_to_int[w] for w in text]
print("Vocabulary size", n_vocab)
return n_vocab, int_to_vocab, vocab_to_int, int_text
def create_training_data(
self, int_text: list, vocab_to_int: dict
) -> [np.array, np.array]:
num_batches = int(
len(int_text) / (self.model_config.seq_size * self.model_config.batch_size)
)
in_text = int_text[
: num_batches * self.model_config.batch_size * self.model_config.seq_size
]
out_text = np.zeros_like(in_text)
out_text[:-1] = in_text[1:]
out_text[-1] = in_text[0]
in_text = np.reshape(in_text, (self.model_config.batch_size, -1))
out_text = np.reshape(out_text, (self.model_config.batch_size, -1))
return in_text, out_text
def get_batches(self, in_text: np.array, out_text: np.array):
num_batches = np.prod(in_text.shape) // (
self.model_config.seq_size * self.model_config.batch_size
)
for i in range(
0, num_batches * self.model_config.seq_size, self.model_config.seq_size
):
yield in_text[:, i : i + self.model_config.seq_size], out_text[
:, i : i + self.model_config.seq_size
]
def save_model_and_maps(self, num_epochs: int) -> None:
if not os.path.exists(self.model_save_path):
os.mkdir(self.model_save_path)
model_run = self.model_save_path + f"/model-{self.model_name}-{num_epochs}"
if not os.path.exists(model_run):
os.mkdir(model_run)
torch.save(
self.model,
f"{model_run}/model.pkl",
)
with open(f"{model_run}/int_to_vocab.pkl", "wb") as itv:
pkl.dump(self.int_to_vocab, itv)
with open(f"{model_run}/vocab_to_int.pkl", "wb") as vti:
pkl.dump(self.vocab_to_int, vti)
def train(self):
iteration = 0
losses = []
for e in range(self.model_config.epochs + 1):
in_text, out_text = self.create_training_data(
self.int_text, self.vocab_to_int
)
batches = self.get_batches(
in_text,
out_text,
)
state_h, state_c = self.model.zero_state(self.model_config.batch_size)
# Transfer data to GPU
state_h = state_h.to(self.device)
state_c = state_c.to(self.device)
for x, y in batches:
iteration += 1
# Tell it we are in training mode
self.model.train()
# Reset all gradients
self.optimiser.zero_grad()
# Transfer data to GPU (if present)
x = torch.tensor(x).to(self.device)
y = torch.tensor(y).to(self.device)
logits, (state_h, state_c) = self.model(x, (state_h, state_c))
loss = self.criterion(logits.transpose(1, 2), y)
state_h = state_h.detach()
state_c = state_c.detach()
loss_value = loss.item()
# Perform back-propagation
loss.backward()
_ = torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.model_config.gradients_clipping
)
# Update the network's parameters
self.optimiser.step()
losses.append(loss_value)
self.print_fn(
"Epoch: {}/{}".format(e, self.model_config.epochs),
"Loss: {}".format(loss_value),
)
self.save_model_and_maps(num_epochs=e)
return losses
def run_training(self):
losses = self.train()
return losses, self.model, self.vocab_to_int, self.int_to_vocab | <filename>src/train.py
from collections import Counter
from dataclasses import dataclass
import os
import pickle as pkl
import numpy as np
import streamlit as st
import torch
import torch.nn as nn
from prep_data import CleanTextData
# Class for model parameters
@dataclass
class ModelConfig:
seq_size: int
batch_size: int
embedding_size: int
hidden_layer_size: int
n_layers: int
dropout: float
gradients_clipping: int
epochs: int
class TrainModel:
def __init__(
self,
text: str,
model_name: str,
model: nn.Module,
model_config: ModelConfig,
model_save_folder: str = "./src/model_files",
run_in_streamlit: bool = False,
criterion=None,
optimiser=None,
):
self.text = text
(
self.n_vocab,
self.int_to_vocab,
self.vocab_to_int,
self.int_text,
) = self.encode_text(self.text)
self.model_name = model_name
self.model_config = model_config
self.model = model(
self.n_vocab,
self.model_config.seq_size,
self.model_config.embedding_size,
self.model_config.hidden_layer_size,
self.model_config.n_layers,
self.model_config.dropout,
)
self.model_save_folder = model_save_folder
if not os.path.exists(self.model_save_folder):
os.mkdir(self.model_save_folder)
self.model_save_path = f"{self.model_save_folder}/{self.model.model_type}"
self.run_in_streamlit = run_in_streamlit
if self.run_in_streamlit:
self.print_fn = st.write
else:
self.print_fn = print
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
self.print_fn(f"Device: {self.device}")
if criterion is None:
self.criterion = nn.CrossEntropyLoss()
else:
self.criterion = criterion
if optimiser is None:
self.optimiser = torch.optim.Adam(self.model.parameters(), lr=0.01)
else:
self.optimiser = optimiser
def encode_text(self, text: str) -> [int, dict, dict, list]:
word_counts = Counter(text)
sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True)
int_to_vocab = {k: w for k, w in enumerate(sorted_vocab)}
vocab_to_int = {w: k for k, w in int_to_vocab.items()}
n_vocab = len(int_to_vocab)
int_text = [vocab_to_int[w] for w in text]
print("Vocabulary size", n_vocab)
return n_vocab, int_to_vocab, vocab_to_int, int_text
def create_training_data(
self, int_text: list, vocab_to_int: dict
) -> [np.array, np.array]:
num_batches = int(
len(int_text) / (self.model_config.seq_size * self.model_config.batch_size)
)
in_text = int_text[
: num_batches * self.model_config.batch_size * self.model_config.seq_size
]
out_text = np.zeros_like(in_text)
out_text[:-1] = in_text[1:]
out_text[-1] = in_text[0]
in_text = np.reshape(in_text, (self.model_config.batch_size, -1))
out_text = np.reshape(out_text, (self.model_config.batch_size, -1))
return in_text, out_text
def get_batches(self, in_text: np.array, out_text: np.array):
num_batches = np.prod(in_text.shape) // (
self.model_config.seq_size * self.model_config.batch_size
)
for i in range(
0, num_batches * self.model_config.seq_size, self.model_config.seq_size
):
yield in_text[:, i : i + self.model_config.seq_size], out_text[
:, i : i + self.model_config.seq_size
]
def save_model_and_maps(self, num_epochs: int) -> None:
if not os.path.exists(self.model_save_path):
os.mkdir(self.model_save_path)
model_run = self.model_save_path + f"/model-{self.model_name}-{num_epochs}"
if not os.path.exists(model_run):
os.mkdir(model_run)
torch.save(
self.model,
f"{model_run}/model.pkl",
)
with open(f"{model_run}/int_to_vocab.pkl", "wb") as itv:
pkl.dump(self.int_to_vocab, itv)
with open(f"{model_run}/vocab_to_int.pkl", "wb") as vti:
pkl.dump(self.vocab_to_int, vti)
def train(self):
iteration = 0
losses = []
for e in range(self.model_config.epochs + 1):
in_text, out_text = self.create_training_data(
self.int_text, self.vocab_to_int
)
batches = self.get_batches(
in_text,
out_text,
)
state_h, state_c = self.model.zero_state(self.model_config.batch_size)
# Transfer data to GPU
state_h = state_h.to(self.device)
state_c = state_c.to(self.device)
for x, y in batches:
iteration += 1
# Tell it we are in training mode
self.model.train()
# Reset all gradients
self.optimiser.zero_grad()
# Transfer data to GPU (if present)
x = torch.tensor(x).to(self.device)
y = torch.tensor(y).to(self.device)
logits, (state_h, state_c) = self.model(x, (state_h, state_c))
loss = self.criterion(logits.transpose(1, 2), y)
state_h = state_h.detach()
state_c = state_c.detach()
loss_value = loss.item()
# Perform back-propagation
loss.backward()
_ = torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.model_config.gradients_clipping
)
# Update the network's parameters
self.optimiser.step()
losses.append(loss_value)
self.print_fn(
"Epoch: {}/{}".format(e, self.model_config.epochs),
"Loss: {}".format(loss_value),
)
self.save_model_and_maps(num_epochs=e)
return losses
def run_training(self):
losses = self.train()
return losses, self.model, self.vocab_to_int, self.int_to_vocab | en | 0.709645 | # Class for model parameters # Transfer data to GPU # Tell it we are in training mode # Reset all gradients # Transfer data to GPU (if present) # Perform back-propagation # Update the network's parameters | 2.560292 | 3 |
setup.py | TinghuiWang/pyActLearn | 3 | 6624665 | #!/usr/bin/env python3
#
# Copyright (c) 2015, <NAME> <<EMAIL>>
# All rights reserved.
from setuptools import setup, find_packages
from Cython.Build import cythonize
import os
CLASSIFIERS = """\
Development Status :: 2 - Pre-Alpha
Intended Audience :: Developers
Intended Audience :: Science/Research
License :: OSI Approved :: BSD License
Operating System :: POSIX
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3.5
Topic :: Home Automation
Topic :: Scientific/Engineering :: Artificial Intelligence
Topic :: Scientific/Engineering :: Information Analysis
""".splitlines()
NAME = "pyActLearn"
MAINTAINER = "<NAME> (Steve)"
MAINTAINER_EMAIL = "<EMAIL>"
DESCRIPTION = ("Activity Learning package designed for rapid prototyping of " +
"activity learning algorithms used with WSU CASAS smart home datasets.")
LONG_DESCRIPTION = DESCRIPTION
LICENSE = "BSD"
URL = "https://github.com/TinghuiWang/pyActLearn"
DOWNLOAD_URL = ""
AUTHOR = "<NAME> (Steve)"
AUTHOR_EMAIL = "<EMAIL>"
PLATFORMS = ["Linux"]
# Get Version from pyActLearn.version
exec_results = {}
exec(open(os.path.join(os.path.dirname(__file__), 'pyActLearn/version.py')).read(), exec_results)
version = exec_results['version']
# Get Install Requirements
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt'), 'r') as f:
install_requires = f.read().splitlines()
def do_setup():
setup(
name=NAME,
version=version,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license=LICENSE,
keywords=' '.join(['activity recognition', 'smart home', 'smart environment']),
packages=find_packages('.'),
entry_points={'console_scripts': ['casas_download = pyActLearn.bin.casas_download:main']},
install_requires=install_requires,
ext_modules=cythonize("pyActLearn/learning/*.pyx", gdb_debug=True)
)
if __name__ == "__main__":
do_setup()
| #!/usr/bin/env python3
#
# Copyright (c) 2015, <NAME> <<EMAIL>>
# All rights reserved.
from setuptools import setup, find_packages
from Cython.Build import cythonize
import os
CLASSIFIERS = """\
Development Status :: 2 - Pre-Alpha
Intended Audience :: Developers
Intended Audience :: Science/Research
License :: OSI Approved :: BSD License
Operating System :: POSIX
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3.5
Topic :: Home Automation
Topic :: Scientific/Engineering :: Artificial Intelligence
Topic :: Scientific/Engineering :: Information Analysis
""".splitlines()
NAME = "pyActLearn"
MAINTAINER = "<NAME> (Steve)"
MAINTAINER_EMAIL = "<EMAIL>"
DESCRIPTION = ("Activity Learning package designed for rapid prototyping of " +
"activity learning algorithms used with WSU CASAS smart home datasets.")
LONG_DESCRIPTION = DESCRIPTION
LICENSE = "BSD"
URL = "https://github.com/TinghuiWang/pyActLearn"
DOWNLOAD_URL = ""
AUTHOR = "<NAME> (Steve)"
AUTHOR_EMAIL = "<EMAIL>"
PLATFORMS = ["Linux"]
# Get Version from pyActLearn.version
exec_results = {}
exec(open(os.path.join(os.path.dirname(__file__), 'pyActLearn/version.py')).read(), exec_results)
version = exec_results['version']
# Get Install Requirements
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt'), 'r') as f:
install_requires = f.read().splitlines()
def do_setup():
setup(
name=NAME,
version=version,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license=LICENSE,
keywords=' '.join(['activity recognition', 'smart home', 'smart environment']),
packages=find_packages('.'),
entry_points={'console_scripts': ['casas_download = pyActLearn.bin.casas_download:main']},
install_requires=install_requires,
ext_modules=cythonize("pyActLearn/learning/*.pyx", gdb_debug=True)
)
if __name__ == "__main__":
do_setup()
| en | 0.547343 | #!/usr/bin/env python3 # # Copyright (c) 2015, <NAME> <<EMAIL>> # All rights reserved. \ Development Status :: 2 - Pre-Alpha Intended Audience :: Developers Intended Audience :: Science/Research License :: OSI Approved :: BSD License Operating System :: POSIX Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3.5 Topic :: Home Automation Topic :: Scientific/Engineering :: Artificial Intelligence Topic :: Scientific/Engineering :: Information Analysis # Get Version from pyActLearn.version # Get Install Requirements | 1.804578 | 2 |
test.py | biguscj7/word_search_creator | 3 | 6624666 | <filename>test.py
# import word_search_creator.py via the execfile function
execfile("word_search_creator.py")
# test createWordSearch function with 8 randomly generated words
returnedWordSearch = createWordSearch(["seemly", "exotic", "obese", "disagreeable", "earn", "spark", "strengthen", "colossal"])
print stringifyWordSearch(returnedWordSearch)
| <filename>test.py
# import word_search_creator.py via the execfile function
execfile("word_search_creator.py")
# test createWordSearch function with 8 randomly generated words
returnedWordSearch = createWordSearch(["seemly", "exotic", "obese", "disagreeable", "earn", "spark", "strengthen", "colossal"])
print stringifyWordSearch(returnedWordSearch)
| en | 0.403792 | # import word_search_creator.py via the execfile function # test createWordSearch function with 8 randomly generated words | 2.865824 | 3 |
Blog_Website/BlogApp/migrations/0011_alter_blogpost_image.py | MetinIlgar/BlogWebsite | 1 | 6624667 | # Generated by Django 4.0.3 on 2022-03-06 14:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('BlogApp', '0010_aboutme'),
]
operations = [
migrations.AlterField(
model_name='blogpost',
name='image',
field=models.ImageField(upload_to='media', verbose_name='Resim'),
),
]
| # Generated by Django 4.0.3 on 2022-03-06 14:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('BlogApp', '0010_aboutme'),
]
operations = [
migrations.AlterField(
model_name='blogpost',
name='image',
field=models.ImageField(upload_to='media', verbose_name='Resim'),
),
]
| en | 0.835988 | # Generated by Django 4.0.3 on 2022-03-06 14:05 | 1.454325 | 1 |
asg/input/__init__.py | aidangoettsch/asg | 8 | 6624668 | <filename>asg/input/__init__.py
from .spice import spice_to_il
| <filename>asg/input/__init__.py
from .spice import spice_to_il
| none | 1 | 1.055732 | 1 | |
imageproc/tasks.py | rossifranca/genegraphics | 5 | 6624669 | from __future__ import absolute_import, unicode_literals
from .celery import app
from celery.utils.log import get_task_logger
import time
from PIL import Image
import subprocess
from cairosvg import svg2png
from subprocess import check_output, STDOUT
import shlex
import sys
from pathlib import Path
TIMEOUT = 90
logger = get_task_logger(__name__)
@app.task(bind=True)
def process_session(self, ft, tsv, svg, output_file):
progress_data = {'message': '',
'current':0,
'total':1,
'result': None,
'complete': False}
progress_data["message"] = update_message(progress_data)
# Get the basename for the output file
output_path = Path(output_file)
filehash = output_path.name.split('.')[0]
save_dir = output_path.parent
if ft == "TSV":
self.update_state(state="PROGRESS", meta=progress_data)
with open(output_file, 'w') as outfile:
outfile.write(tsv)
progress_data["result"] = output_file
progress_data["current"] = progress_data["current"]+1
progress_data["message"] = update_message(progress_data)
self.update_state(state="PROGRESS", meta=progress_data)
elif ft == "SVG":
self.update_state(state="PROGRESS", meta=progress_data)
progress_data = make_svg(self,
svg,
output_file,
progress_data)
elif ft == "PNG":
progress_data["total"] = 2
progress_data["message"] = update_message(progress_data)
self.update_state(state="PROGRESS", meta=progress_data)
progress_data = svg_to_png(self,
svg,
output_file,
progress_data)
elif ft == "EMF":
progress_data["total"] = 2
progress_data["message"] = update_message(progress_data)
self.update_state(state="PROGRESS", meta=progress_data)
progress_data = svg_to_emf(self,
svg,
output_file,
progress_data)
elif ft == "EPS":
progress_data["total"] = 2
progress_data["message"] = update_message(progress_data)
self.update_state(state="PROGRESS", meta=progress_data)
progress_data = svg_to_eps(self,
svg,
output_file,
progress_data)
elif ft == "TIFF":
progress_data["total"] = 3
progress_data["message"] = update_message(progress_data)
self.update_state(state="PROGRESS", meta=progress_data)
progress_data = png_to_tiff(self,
svg,
output_file,
progress_data)
else:
logger.error("Not a valid filetype: " + ft)
progress_data["message"] = update_message(progress_data)
return progress_data
logger.debug(progress_data["result"])
logger.debug(output_file)
if progress_data["result"] == output_file:
progress_data["complete"] = True
return progress_data
def update_message(progress_data):
if progress_data["current"] < progress_data["total"]:
return "Step {} of {} complete.".format(
progress_data["current"],
progress_data["total"])
else:
return "Task complete!"
def make_svg(self, svg, svg_file, progress_data):
""" Make svg file if it doesn't exist
and return the file name. If it exists,
just return the file name.
"""
svg_path = Path(svg_file)
# Check if svg file exists
if not svg_path.is_file():
# Create the svg file
with open(svg_file, 'w') as outfile:
outfile.write(svg)
else:
svg_file = str(svg_path)
progress_data["current"] = progress_data["current"]+1
progress_data["message"] = update_message(progress_data)
progress_data["result"] = svg_file
self.update_state(state="PROGRESS", meta=progress_data)
return progress_data
def svg_to_png(self, svg, png_file, progress_data):
png_path = Path(png_file)
filehash = png_path.name.split('.')[0]
save_dir = png_path.parent
progress_data = make_svg(self,
svg,
str(save_dir.joinpath(filehash+'.svg')),
progress_data)
svg_file = progress_data["result"]
# Make PNG file from SVG file
svg2png(open(svg_file, 'rb').read(), write_to=open(png_file, 'wb'))
progress_data["current"] = progress_data["current"]+1
progress_data["message"] = update_message(progress_data)
progress_data["result"] = png_file
self.update_state(state="PROGRESS", meta=progress_data)
return progress_data
def svg_to_emf(self, svg, emf_file, progress_data):
emf_path = Path(emf_file)
filehash = emf_path.name.split('.')[0]
save_dir = emf_path.parent
progress_data = make_svg(self,
svg,
str(save_dir.joinpath(filehash+'.svg')),
progress_data)
svg_file = progress_data["result"]
# Make EMF file from SVG file
cmd = " ".join(["/usr/bin/inkscape", "--file", svg_file, "--export-emf", emf_file])
output = check_output(shlex.split(cmd), stderr=STDOUT, timeout=TIMEOUT)
if output:
logger.info("cmd" + cmd + ": " + str(output))
progress_data["current"] = progress_data["current"]+1
progress_data["message"] = update_message(progress_data)
progress_data["result"] = emf_file
self.update_state(state="PROGRESS", meta=progress_data)
return progress_data
def svg_to_eps(self, svg, eps_file, progress_data):
eps_path = Path(eps_file)
filehash = eps_path.name.split('.')[0]
save_dir = eps_path.parent
progress_data = make_svg(self,
svg,
str(save_dir.joinpath(filehash+'.svg')),
progress_data)
svg_file = progress_data["result"]
# Make EPS file from SVG file
cmd = " ".join(["/usr/bin/inkscape", "-E", eps_file, svg_file, "--export-area-page","--export-text-to-path", "--export-ignore-filters"])
output = check_output(shlex.split(cmd), stderr=STDOUT, timeout=TIMEOUT)
if output:
logger.info("cmd" + cmd + ": " + str(output))
progress_data["current"] = progress_data["current"]+1
progress_data["message"] = update_message(progress_data)
progress_data["result"] = eps_file
self.update_state(state="PROGRESS", meta=progress_data)
return progress_data
def png_to_tiff(self, svg, tiff_file, progress_data):
tiff_path = Path(tiff_file)
filehash = tiff_path.name.split('.')[0]
save_dir = tiff_path.parent
progress_data = svg_to_png(self,
svg,
str(save_dir.joinpath(filehash+'.png')),
progress_data)
png_file = progress_data["result"]
# Make TIFF file from PNG file
cmd = " ".join(["/usr/bin/convert", png_file, tiff_file])
output = check_output(shlex.split(cmd), stderr=STDOUT, timeout=TIMEOUT)
if output:
logger.info("cmd" + cmd + ": " + str(output))
progress_data["current"] = progress_data["current"]+1
progress_data["message"] = update_message(progress_data)
progress_data["result"] = tiff_file
self.update_state(state="PROGRESS", meta=progress_data)
return progress_data
| from __future__ import absolute_import, unicode_literals
from .celery import app
from celery.utils.log import get_task_logger
import time
from PIL import Image
import subprocess
from cairosvg import svg2png
from subprocess import check_output, STDOUT
import shlex
import sys
from pathlib import Path
TIMEOUT = 90
logger = get_task_logger(__name__)
@app.task(bind=True)
def process_session(self, ft, tsv, svg, output_file):
progress_data = {'message': '',
'current':0,
'total':1,
'result': None,
'complete': False}
progress_data["message"] = update_message(progress_data)
# Get the basename for the output file
output_path = Path(output_file)
filehash = output_path.name.split('.')[0]
save_dir = output_path.parent
if ft == "TSV":
self.update_state(state="PROGRESS", meta=progress_data)
with open(output_file, 'w') as outfile:
outfile.write(tsv)
progress_data["result"] = output_file
progress_data["current"] = progress_data["current"]+1
progress_data["message"] = update_message(progress_data)
self.update_state(state="PROGRESS", meta=progress_data)
elif ft == "SVG":
self.update_state(state="PROGRESS", meta=progress_data)
progress_data = make_svg(self,
svg,
output_file,
progress_data)
elif ft == "PNG":
progress_data["total"] = 2
progress_data["message"] = update_message(progress_data)
self.update_state(state="PROGRESS", meta=progress_data)
progress_data = svg_to_png(self,
svg,
output_file,
progress_data)
elif ft == "EMF":
progress_data["total"] = 2
progress_data["message"] = update_message(progress_data)
self.update_state(state="PROGRESS", meta=progress_data)
progress_data = svg_to_emf(self,
svg,
output_file,
progress_data)
elif ft == "EPS":
progress_data["total"] = 2
progress_data["message"] = update_message(progress_data)
self.update_state(state="PROGRESS", meta=progress_data)
progress_data = svg_to_eps(self,
svg,
output_file,
progress_data)
elif ft == "TIFF":
progress_data["total"] = 3
progress_data["message"] = update_message(progress_data)
self.update_state(state="PROGRESS", meta=progress_data)
progress_data = png_to_tiff(self,
svg,
output_file,
progress_data)
else:
logger.error("Not a valid filetype: " + ft)
progress_data["message"] = update_message(progress_data)
return progress_data
logger.debug(progress_data["result"])
logger.debug(output_file)
if progress_data["result"] == output_file:
progress_data["complete"] = True
return progress_data
def update_message(progress_data):
if progress_data["current"] < progress_data["total"]:
return "Step {} of {} complete.".format(
progress_data["current"],
progress_data["total"])
else:
return "Task complete!"
def make_svg(self, svg, svg_file, progress_data):
""" Make svg file if it doesn't exist
and return the file name. If it exists,
just return the file name.
"""
svg_path = Path(svg_file)
# Check if svg file exists
if not svg_path.is_file():
# Create the svg file
with open(svg_file, 'w') as outfile:
outfile.write(svg)
else:
svg_file = str(svg_path)
progress_data["current"] = progress_data["current"]+1
progress_data["message"] = update_message(progress_data)
progress_data["result"] = svg_file
self.update_state(state="PROGRESS", meta=progress_data)
return progress_data
def svg_to_png(self, svg, png_file, progress_data):
png_path = Path(png_file)
filehash = png_path.name.split('.')[0]
save_dir = png_path.parent
progress_data = make_svg(self,
svg,
str(save_dir.joinpath(filehash+'.svg')),
progress_data)
svg_file = progress_data["result"]
# Make PNG file from SVG file
svg2png(open(svg_file, 'rb').read(), write_to=open(png_file, 'wb'))
progress_data["current"] = progress_data["current"]+1
progress_data["message"] = update_message(progress_data)
progress_data["result"] = png_file
self.update_state(state="PROGRESS", meta=progress_data)
return progress_data
def svg_to_emf(self, svg, emf_file, progress_data):
emf_path = Path(emf_file)
filehash = emf_path.name.split('.')[0]
save_dir = emf_path.parent
progress_data = make_svg(self,
svg,
str(save_dir.joinpath(filehash+'.svg')),
progress_data)
svg_file = progress_data["result"]
# Make EMF file from SVG file
cmd = " ".join(["/usr/bin/inkscape", "--file", svg_file, "--export-emf", emf_file])
output = check_output(shlex.split(cmd), stderr=STDOUT, timeout=TIMEOUT)
if output:
logger.info("cmd" + cmd + ": " + str(output))
progress_data["current"] = progress_data["current"]+1
progress_data["message"] = update_message(progress_data)
progress_data["result"] = emf_file
self.update_state(state="PROGRESS", meta=progress_data)
return progress_data
def svg_to_eps(self, svg, eps_file, progress_data):
eps_path = Path(eps_file)
filehash = eps_path.name.split('.')[0]
save_dir = eps_path.parent
progress_data = make_svg(self,
svg,
str(save_dir.joinpath(filehash+'.svg')),
progress_data)
svg_file = progress_data["result"]
# Make EPS file from SVG file
cmd = " ".join(["/usr/bin/inkscape", "-E", eps_file, svg_file, "--export-area-page","--export-text-to-path", "--export-ignore-filters"])
output = check_output(shlex.split(cmd), stderr=STDOUT, timeout=TIMEOUT)
if output:
logger.info("cmd" + cmd + ": " + str(output))
progress_data["current"] = progress_data["current"]+1
progress_data["message"] = update_message(progress_data)
progress_data["result"] = eps_file
self.update_state(state="PROGRESS", meta=progress_data)
return progress_data
def png_to_tiff(self, svg, tiff_file, progress_data):
tiff_path = Path(tiff_file)
filehash = tiff_path.name.split('.')[0]
save_dir = tiff_path.parent
progress_data = svg_to_png(self,
svg,
str(save_dir.joinpath(filehash+'.png')),
progress_data)
png_file = progress_data["result"]
# Make TIFF file from PNG file
cmd = " ".join(["/usr/bin/convert", png_file, tiff_file])
output = check_output(shlex.split(cmd), stderr=STDOUT, timeout=TIMEOUT)
if output:
logger.info("cmd" + cmd + ": " + str(output))
progress_data["current"] = progress_data["current"]+1
progress_data["message"] = update_message(progress_data)
progress_data["result"] = tiff_file
self.update_state(state="PROGRESS", meta=progress_data)
return progress_data
| en | 0.825687 | # Get the basename for the output file Make svg file if it doesn't exist and return the file name. If it exists, just return the file name. # Check if svg file exists # Create the svg file # Make PNG file from SVG file # Make EMF file from SVG file # Make EPS file from SVG file # Make TIFF file from PNG file | 2.217723 | 2 |
modules/dbnd/src/dbnd/_core/tracking/tracking_info_convertor.py | turbaszek/dbnd | 0 | 6624670 | import hashlib
import logging
import typing
from functools import partial
from itertools import chain
from dbnd._core.constants import RunState, TaskRunState
from dbnd._core.context.databand_context import DatabandContext
from dbnd._core.tracking.tracking_info_objects import (
TargetInfo,
TaskDefinitionInfo,
TaskRunInfo,
TaskRunParamInfo,
)
from dbnd._core.tracking.tracking_info_run import RunInfo
from dbnd._core.utils.string_utils import safe_short_string
from dbnd._core.utils.timezone import utcnow
from dbnd._core.utils.traversing import traverse
from dbnd.api.tracking_api import InitRunArgs, TaskRunsInfo
if typing.TYPE_CHECKING:
from typing import Dict, List
from targets import Target
from dbnd import Task
from dbnd._core.run.databand_run import DatabandRun
from dbnd._core.task_run.task_run import TaskRun
logger = logging.getLogger(__name__)
class TrackingInfoBuilder(object):
def __init__(self, run):
self.run = run # type: DatabandRun
def _run_to_run_info(self):
# type: () -> RunInfo
run = self.run
task = run.driver_task_run.task
context = run.context
env = run.env
return RunInfo(
run_uid=run.run_uid,
job_name=run.job_name,
user=context.task_run_env.user,
name=run.name,
state=RunState.RUNNING,
start_time=utcnow(),
end_time=None,
description=run.description,
is_archived=run.is_archived,
env_name=env.name,
cloud_type=env.cloud_type,
# deprecate and airflow
dag_id=run.dag_id,
execution_date=run.execution_date,
cmd_name=context.name,
driver_name=env.remote_engine or env.local_engine,
# move to task
target_date=task.task_target_date,
version=task.task_version,
# root and submitted by
root_run=run.root_run_info,
scheduled_run=run.scheduled_run_info,
trigger="unknown",
sends_heartbeat=run.sends_heartbeat,
task_executor=run.task_executor_type,
)
def build_init_args(self):
# type: () -> InitRunArgs
run = self.run
task_run_info = self.build_task_runs_info(run.task_runs)
driver_task = run.driver_task_run.task
init_args = InitRunArgs(
run_uid=self.run.run_uid,
root_run_uid=run.root_run_info.root_run_uid,
task_runs_info=task_run_info,
driver_task_uid=run.driver_task_run.task_run_uid,
task_run_env=run.context.task_run_env,
)
if driver_task.is_submitter:
init_args.new_run_info = self._run_to_run_info()
if run.scheduled_run_info:
init_args.scheduled_run_info = run.scheduled_run_info
if run.root_run_info.root_task_run_uid:
rel = (run.root_run_info.root_task_run_uid, init_args.driver_task_uid)
task_run_info.parent_child_map.add(rel)
task_run_info.upstreams_map.add(rel)
return init_args
def build_task_runs_info(self, task_runs, dynamic_task_run_update=False):
# type: (List[TaskRun], bool) -> TaskRunsInfo
run = self.run
task_defs = {}
all_task_models = {}
all_targets = {}
for task_run in task_runs:
task = task_run.task
# we process only tasks in current dag
task_def_id = task.task_definition.full_task_family
if task_def_id not in task_defs:
task_defs[task_def_id] = task_to_task_def(run.context, task)
self.task_to_targets(task, all_targets)
all_task_models[task.task_id] = build_task_run_info(task_run)
def _add_rel(rel_map, t_id_1, t_id_2):
if t_id_1 in all_task_models or t_id_2 in all_task_models:
tr_1 = run.get_task_run_by_id(t_id_1)
tr_2 = run.get_task_run_by_id(t_id_2)
if tr_1 and tr_2:
rel_map.add((tr_1.task_run_uid, tr_2.task_run_uid))
# set children/upstreams maps
upstreams_map = set()
parent_child_map = set()
for task_run in run.task_runs:
task = task_run.task
for t_id in task.task_meta.children:
_add_rel(parent_child_map, task.task_id, t_id)
task_dag = task.ctrl.task_dag
for upstream in task_dag.upstream:
_add_rel(upstreams_map, task.task_id, upstream.task_id)
return TaskRunsInfo(
run_uid=self.run.run_uid,
root_run_uid=self.run.root_run_info.root_run_uid,
task_run_env_uid=run.context.task_run_env.uid,
task_definitions=list(task_defs.values()),
task_runs=list(all_task_models.values()),
targets=list(all_targets.values()),
parent_child_map=parent_child_map,
upstreams_map=upstreams_map,
dynamic_task_run_update=dynamic_task_run_update,
)
def task_to_targets(self, task, targets):
# type: (Task, Dict[str, TargetInfo]) -> List[TargetInfo]
"""
:param run:
:param task:
:param targets: all known targets for current run, so we have uniq list of targets (by path)
:return:
"""
run = self.run
task_targets = []
def process_target(target, name):
# type: (Target, str) -> None
target_path = str(target)
dbnd_target = targets.get(target_path)
if not dbnd_target:
# we see this target for the first time
target_task_run_uid = (
None
) # let assume that Target is now owned by any task
# let try to find it's owner, so we create target that relates to some Task
# if `task` is pipeline, the target owner is going to be different task
if target.task:
target_task_run = run.get_task_run(target.task.task_id)
if target_task_run:
target_task_run_uid = target_task_run.task_run_uid
dbnd_target = targets[target_path] = TargetInfo(
path=target_path,
created_date=utcnow(),
task_run_uid=target_task_run_uid,
parameter_name=name,
)
logger.debug(
"New Target: %s -> %s -> %s",
target.task,
target_task_run_uid,
target_path,
)
task_targets.append(dbnd_target)
rels = task.ctrl.relations
for io_params in chain(rels.task_outputs.values(), rels.task_inputs.values()):
for name, t in io_params.items():
traverse(t, convert_f=partial(process_target, name=name))
return task_targets
def task_to_task_def(ctx, task):
# type: (DatabandContext, Task) -> TaskDefinitionInfo
td = task.task_definition
task_param_definitions = list(td.task_params.values())
task_family = task.task_meta.task_family
task_definition = TaskDefinitionInfo(
task_definition_uid=td.task_definition_uid,
class_version=task.task_class_version,
family=task_family,
module_source=td.task_module_code,
module_source_hash=source_md5(td.task_module_code),
name=task_family,
source=td.task_source_code,
source_hash=source_md5(td.task_source_code),
type=task.task_meta.task_type,
task_param_definitions=task_param_definitions,
)
return task_definition
def build_task_run_info(task_run):
# type: (TaskRun) -> TaskRunInfo
t = task_run.task
tm = task_run.task.task_meta
task_dag = t.ctrl.task_dag
log_local, log_remote = task_run._get_log_files()
task_params_values = dict(t._params.get_params_serialized())
task_definition = t.task_definition
task_run_params = [
TaskRunParamInfo(
parameter_name=tdp.name,
value_origin=t._params.get_param_value_origin(tdp.name),
value=safe_short_string(task_params_values[tdp.name], max_value_len=5000),
)
for tdp in task_definition.task_params.values()
]
return TaskRunInfo(
run_uid=task_run.run.run_uid,
task_definition_uid=task_run.task.task_definition.task_definition_uid,
task_run_uid=task_run.task_run_uid, # this is not the TaskRun uid
task_run_attempt_uid=task_run.task_run_attempt_uid, # this is not the TaskRun uid
task_id=t.task_id,
task_af_id=task_run.task_af_id,
name=t.task_name,
task_signature=tm.task_signature,
task_signature_source=tm.task_signature_source,
output_signature=tm.task_outputs_signature,
command_line=tm.task_command_line,
env=t.task_env.name,
functional_call=tm.task_functional_call,
has_downstreams=bool(task_dag.downstream),
has_upstreams=bool(task_dag.upstream),
state=TaskRunState.SCHEDULED
if not task_run.is_reused
else TaskRunState.SUCCESS,
is_reused=task_run.is_reused,
is_skipped=task_run.is_skipped,
is_dynamic=task_run.is_dynamic,
is_system=task_run.is_system,
version=t.task_version,
target_date=t.task_target_date,
log_local=log_local,
log_remote=log_remote,
task_run_params=task_run_params,
execution_date=task_run.run.execution_date,
is_root=task_run.is_root,
)
def source_md5(source_code):
if source_code:
try:
return hashlib.md5(source_code.encode("utf-8")).hexdigest()
except UnicodeDecodeError:
return hashlib.md5(source_code).hexdigest()
| import hashlib
import logging
import typing
from functools import partial
from itertools import chain
from dbnd._core.constants import RunState, TaskRunState
from dbnd._core.context.databand_context import DatabandContext
from dbnd._core.tracking.tracking_info_objects import (
TargetInfo,
TaskDefinitionInfo,
TaskRunInfo,
TaskRunParamInfo,
)
from dbnd._core.tracking.tracking_info_run import RunInfo
from dbnd._core.utils.string_utils import safe_short_string
from dbnd._core.utils.timezone import utcnow
from dbnd._core.utils.traversing import traverse
from dbnd.api.tracking_api import InitRunArgs, TaskRunsInfo
if typing.TYPE_CHECKING:
from typing import Dict, List
from targets import Target
from dbnd import Task
from dbnd._core.run.databand_run import DatabandRun
from dbnd._core.task_run.task_run import TaskRun
logger = logging.getLogger(__name__)
class TrackingInfoBuilder(object):
def __init__(self, run):
self.run = run # type: DatabandRun
def _run_to_run_info(self):
# type: () -> RunInfo
run = self.run
task = run.driver_task_run.task
context = run.context
env = run.env
return RunInfo(
run_uid=run.run_uid,
job_name=run.job_name,
user=context.task_run_env.user,
name=run.name,
state=RunState.RUNNING,
start_time=utcnow(),
end_time=None,
description=run.description,
is_archived=run.is_archived,
env_name=env.name,
cloud_type=env.cloud_type,
# deprecate and airflow
dag_id=run.dag_id,
execution_date=run.execution_date,
cmd_name=context.name,
driver_name=env.remote_engine or env.local_engine,
# move to task
target_date=task.task_target_date,
version=task.task_version,
# root and submitted by
root_run=run.root_run_info,
scheduled_run=run.scheduled_run_info,
trigger="unknown",
sends_heartbeat=run.sends_heartbeat,
task_executor=run.task_executor_type,
)
def build_init_args(self):
# type: () -> InitRunArgs
run = self.run
task_run_info = self.build_task_runs_info(run.task_runs)
driver_task = run.driver_task_run.task
init_args = InitRunArgs(
run_uid=self.run.run_uid,
root_run_uid=run.root_run_info.root_run_uid,
task_runs_info=task_run_info,
driver_task_uid=run.driver_task_run.task_run_uid,
task_run_env=run.context.task_run_env,
)
if driver_task.is_submitter:
init_args.new_run_info = self._run_to_run_info()
if run.scheduled_run_info:
init_args.scheduled_run_info = run.scheduled_run_info
if run.root_run_info.root_task_run_uid:
rel = (run.root_run_info.root_task_run_uid, init_args.driver_task_uid)
task_run_info.parent_child_map.add(rel)
task_run_info.upstreams_map.add(rel)
return init_args
def build_task_runs_info(self, task_runs, dynamic_task_run_update=False):
# type: (List[TaskRun], bool) -> TaskRunsInfo
run = self.run
task_defs = {}
all_task_models = {}
all_targets = {}
for task_run in task_runs:
task = task_run.task
# we process only tasks in current dag
task_def_id = task.task_definition.full_task_family
if task_def_id not in task_defs:
task_defs[task_def_id] = task_to_task_def(run.context, task)
self.task_to_targets(task, all_targets)
all_task_models[task.task_id] = build_task_run_info(task_run)
def _add_rel(rel_map, t_id_1, t_id_2):
if t_id_1 in all_task_models or t_id_2 in all_task_models:
tr_1 = run.get_task_run_by_id(t_id_1)
tr_2 = run.get_task_run_by_id(t_id_2)
if tr_1 and tr_2:
rel_map.add((tr_1.task_run_uid, tr_2.task_run_uid))
# set children/upstreams maps
upstreams_map = set()
parent_child_map = set()
for task_run in run.task_runs:
task = task_run.task
for t_id in task.task_meta.children:
_add_rel(parent_child_map, task.task_id, t_id)
task_dag = task.ctrl.task_dag
for upstream in task_dag.upstream:
_add_rel(upstreams_map, task.task_id, upstream.task_id)
return TaskRunsInfo(
run_uid=self.run.run_uid,
root_run_uid=self.run.root_run_info.root_run_uid,
task_run_env_uid=run.context.task_run_env.uid,
task_definitions=list(task_defs.values()),
task_runs=list(all_task_models.values()),
targets=list(all_targets.values()),
parent_child_map=parent_child_map,
upstreams_map=upstreams_map,
dynamic_task_run_update=dynamic_task_run_update,
)
def task_to_targets(self, task, targets):
# type: (Task, Dict[str, TargetInfo]) -> List[TargetInfo]
"""
:param run:
:param task:
:param targets: all known targets for current run, so we have uniq list of targets (by path)
:return:
"""
run = self.run
task_targets = []
def process_target(target, name):
# type: (Target, str) -> None
target_path = str(target)
dbnd_target = targets.get(target_path)
if not dbnd_target:
# we see this target for the first time
target_task_run_uid = (
None
) # let assume that Target is now owned by any task
# let try to find it's owner, so we create target that relates to some Task
# if `task` is pipeline, the target owner is going to be different task
if target.task:
target_task_run = run.get_task_run(target.task.task_id)
if target_task_run:
target_task_run_uid = target_task_run.task_run_uid
dbnd_target = targets[target_path] = TargetInfo(
path=target_path,
created_date=utcnow(),
task_run_uid=target_task_run_uid,
parameter_name=name,
)
logger.debug(
"New Target: %s -> %s -> %s",
target.task,
target_task_run_uid,
target_path,
)
task_targets.append(dbnd_target)
rels = task.ctrl.relations
for io_params in chain(rels.task_outputs.values(), rels.task_inputs.values()):
for name, t in io_params.items():
traverse(t, convert_f=partial(process_target, name=name))
return task_targets
def task_to_task_def(ctx, task):
# type: (DatabandContext, Task) -> TaskDefinitionInfo
td = task.task_definition
task_param_definitions = list(td.task_params.values())
task_family = task.task_meta.task_family
task_definition = TaskDefinitionInfo(
task_definition_uid=td.task_definition_uid,
class_version=task.task_class_version,
family=task_family,
module_source=td.task_module_code,
module_source_hash=source_md5(td.task_module_code),
name=task_family,
source=td.task_source_code,
source_hash=source_md5(td.task_source_code),
type=task.task_meta.task_type,
task_param_definitions=task_param_definitions,
)
return task_definition
def build_task_run_info(task_run):
# type: (TaskRun) -> TaskRunInfo
t = task_run.task
tm = task_run.task.task_meta
task_dag = t.ctrl.task_dag
log_local, log_remote = task_run._get_log_files()
task_params_values = dict(t._params.get_params_serialized())
task_definition = t.task_definition
task_run_params = [
TaskRunParamInfo(
parameter_name=tdp.name,
value_origin=t._params.get_param_value_origin(tdp.name),
value=safe_short_string(task_params_values[tdp.name], max_value_len=5000),
)
for tdp in task_definition.task_params.values()
]
return TaskRunInfo(
run_uid=task_run.run.run_uid,
task_definition_uid=task_run.task.task_definition.task_definition_uid,
task_run_uid=task_run.task_run_uid, # this is not the TaskRun uid
task_run_attempt_uid=task_run.task_run_attempt_uid, # this is not the TaskRun uid
task_id=t.task_id,
task_af_id=task_run.task_af_id,
name=t.task_name,
task_signature=tm.task_signature,
task_signature_source=tm.task_signature_source,
output_signature=tm.task_outputs_signature,
command_line=tm.task_command_line,
env=t.task_env.name,
functional_call=tm.task_functional_call,
has_downstreams=bool(task_dag.downstream),
has_upstreams=bool(task_dag.upstream),
state=TaskRunState.SCHEDULED
if not task_run.is_reused
else TaskRunState.SUCCESS,
is_reused=task_run.is_reused,
is_skipped=task_run.is_skipped,
is_dynamic=task_run.is_dynamic,
is_system=task_run.is_system,
version=t.task_version,
target_date=t.task_target_date,
log_local=log_local,
log_remote=log_remote,
task_run_params=task_run_params,
execution_date=task_run.run.execution_date,
is_root=task_run.is_root,
)
def source_md5(source_code):
if source_code:
try:
return hashlib.md5(source_code.encode("utf-8")).hexdigest()
except UnicodeDecodeError:
return hashlib.md5(source_code).hexdigest()
| en | 0.875614 | # type: DatabandRun # type: () -> RunInfo # deprecate and airflow # move to task # root and submitted by # type: () -> InitRunArgs # type: (List[TaskRun], bool) -> TaskRunsInfo # we process only tasks in current dag # set children/upstreams maps # type: (Task, Dict[str, TargetInfo]) -> List[TargetInfo] :param run: :param task: :param targets: all known targets for current run, so we have uniq list of targets (by path) :return: # type: (Target, str) -> None # we see this target for the first time # let assume that Target is now owned by any task # let try to find it's owner, so we create target that relates to some Task # if `task` is pipeline, the target owner is going to be different task # type: (DatabandContext, Task) -> TaskDefinitionInfo # type: (TaskRun) -> TaskRunInfo # this is not the TaskRun uid # this is not the TaskRun uid | 1.820341 | 2 |
arjuna/core/types/named_strings.py | test-mile/arjuna | 9 | 6624671 | '''
This file is a part of Test Mile Arjuna
Copyright 2018 Test Mile Software Testing Pvt Ltd
Website: www.TestMile.com
Email: support [at] testmile.com
Creator: <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from arjuna.core.exceptions import *
class NamedString:
def __init__(self, internal, external):
self.code = internal
self.name = external
def get_code(self):
return self.code
def get_name(self):
return self.name
class NamedStringsContainer:
def __init__(self, name):
self.container_name = name
self.names = []
def add(self, name):
self.names.append(name)
def get_name(self):
return self.container_name
def get_named_strings(self):
return self.names
class Name(NamedString):
pass
class Message(NamedString):
pass
class NamesContainer(NamedStringsContainer):
pass
class MessagesContainer(NamedStringsContainer):
pass
def populate_from_codelist(map, codes):
for index, code in enumerate(codes):
map[code[0].upper().trim()] = code[2]
def populate_from_codemap(map, codes):
for k, v in codes:
map[k.upper()] = v
def populate_from_namedstringlist(map, named_strings):
for ns in named_strings:
map[ns.get_code().upper()] = ns.get_name()
class StringsManager:
def __init__(self):
self.msg_map = {}
self.name_map = {}
self.prop_map = {}
self.flattened_names = {}
def populate_names(self, names_containers_list):
for nc in names_containers_list:
if nc.get_name() not in self.name_map:
self.name_map[nc.get_name()] = {}
populate_from_namedstringlist(self.name_map[nc.get_name()], nc.get_named_strings())
def populate_messages(self, messages_containers_list):
for mc in messages_containers_list:
if mc.get_name() not in self.msg_map:
self.msg_map[mc.get_name()] = {}
populate_from_namedstringlist(self.msg_map[mc.get_name()], mc.get_named_strings())
def populate_flattened_names(self):
for section in self.name_map:
for key in self.name_map[section]:
self.flattened_names[section + "::" + key] = self.name_map[section][key]
def get_all_names(self):
return self.name_map
def get_all_messages(self):
return self.msg_map;
def get_flattneded_names(self):
return self.flattened_names;
def __section_exists(self, section):
return section in self.msg_map
def __code_exists(self, section, code):
if not self.__section_exists(section):
return False;
return code in self.msg_map[section]
def __throw_not_initialized_exception(self, context, method):
raise Problem("adv", context, method, "LOCALIZER_NOT_INITIALIZED",
"Strings Manager not initialized.")
def __get_text_for_code(self, section, msg_code):
section_code = section.to_upper_case().trim();
code = msg_code.to_upper_case().trim();
if not self.__code_exists(section_code, code):
return code;
return self.msg_map[section_code][code]
def get_info_message_text(self, msg_code):
return self.__get_text_for_code("INFO_MESSAGES", msg_code)
def get_problem_text(self, msg_code):
return self.__get_text_for_code("PROBLEM_MESSAGES", msg_code)
def get_warning_text(self, msg_code):
return self.__get_text_for_code("WARNING_MESSAGES", msg_code);
def get_configured_name(self, section_name, internal_name):
return self.name_map[section_name.to_upper_case().trim()][internal_name.to_upper_case().trim()]
class problem_codes:
pass
class info_codes:
pass
class error_codes:
pass
def add_property_name(self, prop_code, prop_name):
self.prop_map[prop_code.upper()] = prop_name
def get_property_name(self, prop_code):
return self.prop_map[prop_code.upper()]
| '''
This file is a part of Test Mile Arjuna
Copyright 2018 Test Mile Software Testing Pvt Ltd
Website: www.TestMile.com
Email: support [at] testmile.com
Creator: <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from arjuna.core.exceptions import *
class NamedString:
def __init__(self, internal, external):
self.code = internal
self.name = external
def get_code(self):
return self.code
def get_name(self):
return self.name
class NamedStringsContainer:
def __init__(self, name):
self.container_name = name
self.names = []
def add(self, name):
self.names.append(name)
def get_name(self):
return self.container_name
def get_named_strings(self):
return self.names
class Name(NamedString):
pass
class Message(NamedString):
pass
class NamesContainer(NamedStringsContainer):
pass
class MessagesContainer(NamedStringsContainer):
pass
def populate_from_codelist(map, codes):
for index, code in enumerate(codes):
map[code[0].upper().trim()] = code[2]
def populate_from_codemap(map, codes):
for k, v in codes:
map[k.upper()] = v
def populate_from_namedstringlist(map, named_strings):
for ns in named_strings:
map[ns.get_code().upper()] = ns.get_name()
class StringsManager:
def __init__(self):
self.msg_map = {}
self.name_map = {}
self.prop_map = {}
self.flattened_names = {}
def populate_names(self, names_containers_list):
for nc in names_containers_list:
if nc.get_name() not in self.name_map:
self.name_map[nc.get_name()] = {}
populate_from_namedstringlist(self.name_map[nc.get_name()], nc.get_named_strings())
def populate_messages(self, messages_containers_list):
for mc in messages_containers_list:
if mc.get_name() not in self.msg_map:
self.msg_map[mc.get_name()] = {}
populate_from_namedstringlist(self.msg_map[mc.get_name()], mc.get_named_strings())
def populate_flattened_names(self):
for section in self.name_map:
for key in self.name_map[section]:
self.flattened_names[section + "::" + key] = self.name_map[section][key]
def get_all_names(self):
return self.name_map
def get_all_messages(self):
return self.msg_map;
def get_flattneded_names(self):
return self.flattened_names;
def __section_exists(self, section):
return section in self.msg_map
def __code_exists(self, section, code):
if not self.__section_exists(section):
return False;
return code in self.msg_map[section]
def __throw_not_initialized_exception(self, context, method):
raise Problem("adv", context, method, "LOCALIZER_NOT_INITIALIZED",
"Strings Manager not initialized.")
def __get_text_for_code(self, section, msg_code):
section_code = section.to_upper_case().trim();
code = msg_code.to_upper_case().trim();
if not self.__code_exists(section_code, code):
return code;
return self.msg_map[section_code][code]
def get_info_message_text(self, msg_code):
return self.__get_text_for_code("INFO_MESSAGES", msg_code)
def get_problem_text(self, msg_code):
return self.__get_text_for_code("PROBLEM_MESSAGES", msg_code)
def get_warning_text(self, msg_code):
return self.__get_text_for_code("WARNING_MESSAGES", msg_code);
def get_configured_name(self, section_name, internal_name):
return self.name_map[section_name.to_upper_case().trim()][internal_name.to_upper_case().trim()]
class problem_codes:
pass
class info_codes:
pass
class error_codes:
pass
def add_property_name(self, prop_code, prop_name):
self.prop_map[prop_code.upper()] = prop_name
def get_property_name(self, prop_code):
return self.prop_map[prop_code.upper()]
| en | 0.850593 | This file is a part of Test Mile Arjuna Copyright 2018 Test Mile Software Testing Pvt Ltd Website: www.TestMile.com Email: support [at] testmile.com Creator: <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 1.976273 | 2 |
specific_scripts/connect_remote_mongo.py | kenstars/Python_Scripts | 0 | 6624672 | from sshtunnel import SSHTunnelForwarder
import pymongo
MONGO_HOST = "REMOTE HOST IP"
MONGO_DB = "MONGO DB NAME"
MONGO_USER = "USERNAME"
MONGO_PASS = "PASSWORD"
server = SSHTunnelForwarder(
MONGO_HOST,
ssh_username=MONGO_USER,
ssh_password=<PASSWORD>,
remote_bind_address=('127.0.0.1', 27017)
)
server.start() # start the ssh tunnel
client = pymongo.MongoClient('127.0.0.1', server.local_bind_port) # server.local_bind_port is assigned local port
db = client[MONGO_DB]
#############################
# Any mongo queries required can be used here
# As per requirement.
#############################
server.stop() # stop the ssh tunnel | from sshtunnel import SSHTunnelForwarder
import pymongo
MONGO_HOST = "REMOTE HOST IP"
MONGO_DB = "MONGO DB NAME"
MONGO_USER = "USERNAME"
MONGO_PASS = "PASSWORD"
server = SSHTunnelForwarder(
MONGO_HOST,
ssh_username=MONGO_USER,
ssh_password=<PASSWORD>,
remote_bind_address=('127.0.0.1', 27017)
)
server.start() # start the ssh tunnel
client = pymongo.MongoClient('127.0.0.1', server.local_bind_port) # server.local_bind_port is assigned local port
db = client[MONGO_DB]
#############################
# Any mongo queries required can be used here
# As per requirement.
#############################
server.stop() # stop the ssh tunnel | en | 0.380592 | # start the ssh tunnel # server.local_bind_port is assigned local port ############################# # Any mongo queries required can be used here # As per requirement. ############################# # stop the ssh tunnel | 3.007726 | 3 |
happier/util.py | williamhogman/happier | 5 | 6624673 | <reponame>williamhogman/happier<gh_stars>1-10
import os
import os.path
ROOTS = {
"pyproject.toml",
"requirements.txt",
"Pipfile",
}
def find_root(cwd: str = None, max_jumps: int = 7):
if cwd is None:
return os.getcwd()
if max_jumps == 0:
return None
files = {f for f in os.listdir(cwd) if os.path.isfile(os.path.join(cwd, f))}
roots_found = files.intersection(ROOTS)
if len(roots_found) > 0:
return cwd
else:
return find_root(os.path.abspath(os.path.join(cwd, "..")))
| import os
import os.path
ROOTS = {
"pyproject.toml",
"requirements.txt",
"Pipfile",
}
def find_root(cwd: str = None, max_jumps: int = 7):
if cwd is None:
return os.getcwd()
if max_jumps == 0:
return None
files = {f for f in os.listdir(cwd) if os.path.isfile(os.path.join(cwd, f))}
roots_found = files.intersection(ROOTS)
if len(roots_found) > 0:
return cwd
else:
return find_root(os.path.abspath(os.path.join(cwd, ".."))) | none | 1 | 2.928775 | 3 | |
ichnaea/api/tests.py | szjozsef/ichnaea | 348 | 6624674 | import json
import time
from unittest import mock
import colander
import pytest
from pyramid.request import Request
from ichnaea.api.key import get_key, Key
from ichnaea.api import exceptions as api_exceptions
from ichnaea.api.rate_limit import rate_limit_exceeded
from ichnaea.api.schema import RenamingMapping
from ichnaea.tests.factories import ApiKeyFactory, KeyFactory
class TestKey(object):
def test_empty(self, session_tracker):
key = Key()
assert isinstance(key, Key)
assert key.valid_key is None
session_tracker(0)
def test_get(self, session, session_tracker):
api_key = ApiKeyFactory()
session.flush()
session_tracker(1)
result = get_key(session, api_key.valid_key)
assert isinstance(result, Key)
session_tracker(2)
# Test get cache
result2 = get_key(session, api_key.valid_key)
assert isinstance(result2, Key)
session_tracker(2)
def test_get_miss(self, session, session_tracker):
result = get_key(session, "unknown")
assert result is None
session_tracker(1)
# Test get cache
result2 = get_key(session, "unknown")
assert result2 is None
session_tracker(1)
def test_allowed(self):
def one(**kw):
return KeyFactory(**kw)
key = one(allow_locate=True, allow_region=True)
assert key.allowed("locate")
assert key.allowed("region")
assert key.allowed("submit")
assert key.allowed("unknown") is None
assert not one(allow_locate=None).allowed("locate")
assert not one(allow_locate=False).allowed("locate")
assert not one(allow_region=None).allowed("region")
assert not one(allow_region=False).allowed("region")
def test_store_sample(self):
key = KeyFactory(store_sample_locate=None, store_sample_submit=None)
assert key.store_sample("locate") is False
assert key.store_sample("submit") is False
assert key.store_sample("region") is False
key = KeyFactory(store_sample_locate=0, store_sample_submit=100)
assert key.store_sample("locate") is False
assert key.store_sample("submit") is True
# A global_locate_sample_rate can turn off samples
assert key.store_sample("locate", global_locate_sample_rate=0.0) is False
# And can raise a sample rate
key = KeyFactory(store_sample_locate=50, store_sample_submit=None)
assert key.store_sample("locate", global_locate_sample_rate=200.0) is True
@mock.patch("ichnaea.api.key.random")
def test_store_sample_mock_random(self, mock_random):
key = KeyFactory(store_sample_locate=50)
mock_random.return_value = 0.1
assert key.store_sample("locate") is True
mock_random.return_value = 0.5
assert key.store_sample("locate") is True
mock_random.return_value = 0.51
assert key.store_sample("locate") is False
mock_random.return_value = 0.9
assert key.store_sample("locate") is False
@pytest.mark.parametrize(
"global_rate, q1, q2, q3, q4",
[
(100.0, 0.1, 0.5, 0.501, 0.7),
(50.0, 0.1, 0.25, 0.251, 0.5),
(1.0, 0.004, 0.005, 0.006, 1.0),
],
)
@mock.patch("ichnaea.api.key.random")
def test_store_sample_mock_random_with_global_rate(
self, mock_random, global_rate, q1, q2, q3, q4
):
assert 0.0 < (q3 - q2) < 0.1
key = KeyFactory(store_sample_locate=50)
mock_random.return_value = q1
assert key.store_sample("locate", global_rate) is True
mock_random.return_value = q2
assert key.store_sample("locate", global_rate) is True
mock_random.return_value = q3
assert key.store_sample("locate", global_rate) is False
mock_random.return_value = q4
assert key.store_sample("locate", global_rate) is False
def test_can_fallback(self):
def one(**kw):
return KeyFactory(**kw)
assert one(allow_fallback=True).can_fallback()
assert not one(allow_fallback=False).can_fallback()
assert not one(allow_fallback=None).can_fallback()
assert not (one(allow_fallback=True, fallback_name=None).can_fallback())
assert not (one(allow_fallback=True, fallback_url=None).can_fallback())
assert not (one(allow_fallback=True, fallback_ratelimit=None).can_fallback())
assert one(allow_fallback=True, fallback_ratelimit=0).can_fallback()
assert not (
one(allow_fallback=True, fallback_ratelimit_interval=None).can_fallback()
)
assert not (
one(allow_fallback=True, fallback_ratelimit_interval=0).can_fallback()
)
assert one(allow_fallback=True, fallback_cache_expire=None).can_fallback()
assert one(allow_fallback=True, fallback_cache_expire=0).can_fallback()
class TestRenamingMapping(object):
def test_to_name(self):
class SampleSchema(colander.MappingSchema):
schema_type = RenamingMapping
input_name = colander.SchemaNode(colander.String(), to_name="output_name")
name = colander.SchemaNode(colander.String())
def __init__(self, *args, **kwargs):
super(SampleSchema, self).__init__(*args, **kwargs)
input_data = {"input_name": "foo", "name": "bar"}
output_data = SampleSchema().deserialize(input_data)
assert output_data["output_name"] == "foo"
assert output_data["name"] == "bar"
assert "input_name" not in output_data
class TestExceptions(object):
def _check(self, error, status, json=True, content_type="application/json"):
response = Request.blank("/").get_response(error)
if content_type:
assert response.content_type == content_type
assert response.status_code == status
if json:
assert response.json == error.json_body()
return response
def test_str(self):
error = api_exceptions.LocationNotFound()
assert str(error) == "<LocationNotFound>: 404"
def test_daily_limit(self):
error = api_exceptions.DailyLimitExceeded()
response = self._check(error, 403)
assert b"dailyLimitExceeded" in response.body
def test_invalid_apikey(self):
error = api_exceptions.InvalidAPIKey()
response = self._check(error, 400)
assert b"keyInvalid" in response.body
def test_location_not_found(self):
error = api_exceptions.LocationNotFound()
response = self._check(error, 404)
assert b"notFound" in response.body
def test_parse_error(self):
error = api_exceptions.ParseError()
response = self._check(error, 400)
assert b"parseError" in response.body
def test_parse_error_details(self):
error = api_exceptions.ParseError(details=["Details of Error"])
response = self._check(error, 400, json=False)
assert b"parseError" in response.body
content = json.loads(response.body.decode())
assert content["details"] == ["Details of Error"]
def test_upload_success(self):
error = api_exceptions.UploadSuccess()
response = self._check(error, 200)
assert response.body == b"{}"
def test_upload_success_v0(self):
error = api_exceptions.UploadSuccessV0()
response = self._check(error, 204, json=False, content_type=None)
assert response.body == b""
class TestLimiter(object):
def test_maxrequests(self, redis):
rate_key = "apilimit:key_a:v1.geolocate:20150101"
maxreq = 5
expire = 1
for i in range(maxreq):
assert not rate_limit_exceeded(
redis, rate_key, maxreq=maxreq, expire=expire
)
assert rate_limit_exceeded(redis, rate_key, maxreq=maxreq, expire=expire)
def test_expiry(self, redis):
rate_key = "apilimit:key_a:v1.geolocate:20150101"
maxreq = 100
expire = 1
assert not rate_limit_exceeded(redis, rate_key, maxreq=maxreq, expire=expire)
time.sleep(1.0)
assert not rate_limit_exceeded(redis, rate_key, maxreq=maxreq, expire=expire)
def test_no_limit(self):
rate_key = "apilimit:key_a:v1.geolocate:20150101"
broken_redis = None
assert not rate_limit_exceeded(broken_redis, rate_key, maxreq=0, expire=1)
| import json
import time
from unittest import mock
import colander
import pytest
from pyramid.request import Request
from ichnaea.api.key import get_key, Key
from ichnaea.api import exceptions as api_exceptions
from ichnaea.api.rate_limit import rate_limit_exceeded
from ichnaea.api.schema import RenamingMapping
from ichnaea.tests.factories import ApiKeyFactory, KeyFactory
class TestKey(object):
def test_empty(self, session_tracker):
key = Key()
assert isinstance(key, Key)
assert key.valid_key is None
session_tracker(0)
def test_get(self, session, session_tracker):
api_key = ApiKeyFactory()
session.flush()
session_tracker(1)
result = get_key(session, api_key.valid_key)
assert isinstance(result, Key)
session_tracker(2)
# Test get cache
result2 = get_key(session, api_key.valid_key)
assert isinstance(result2, Key)
session_tracker(2)
def test_get_miss(self, session, session_tracker):
result = get_key(session, "unknown")
assert result is None
session_tracker(1)
# Test get cache
result2 = get_key(session, "unknown")
assert result2 is None
session_tracker(1)
def test_allowed(self):
def one(**kw):
return KeyFactory(**kw)
key = one(allow_locate=True, allow_region=True)
assert key.allowed("locate")
assert key.allowed("region")
assert key.allowed("submit")
assert key.allowed("unknown") is None
assert not one(allow_locate=None).allowed("locate")
assert not one(allow_locate=False).allowed("locate")
assert not one(allow_region=None).allowed("region")
assert not one(allow_region=False).allowed("region")
def test_store_sample(self):
key = KeyFactory(store_sample_locate=None, store_sample_submit=None)
assert key.store_sample("locate") is False
assert key.store_sample("submit") is False
assert key.store_sample("region") is False
key = KeyFactory(store_sample_locate=0, store_sample_submit=100)
assert key.store_sample("locate") is False
assert key.store_sample("submit") is True
# A global_locate_sample_rate can turn off samples
assert key.store_sample("locate", global_locate_sample_rate=0.0) is False
# And can raise a sample rate
key = KeyFactory(store_sample_locate=50, store_sample_submit=None)
assert key.store_sample("locate", global_locate_sample_rate=200.0) is True
@mock.patch("ichnaea.api.key.random")
def test_store_sample_mock_random(self, mock_random):
key = KeyFactory(store_sample_locate=50)
mock_random.return_value = 0.1
assert key.store_sample("locate") is True
mock_random.return_value = 0.5
assert key.store_sample("locate") is True
mock_random.return_value = 0.51
assert key.store_sample("locate") is False
mock_random.return_value = 0.9
assert key.store_sample("locate") is False
@pytest.mark.parametrize(
"global_rate, q1, q2, q3, q4",
[
(100.0, 0.1, 0.5, 0.501, 0.7),
(50.0, 0.1, 0.25, 0.251, 0.5),
(1.0, 0.004, 0.005, 0.006, 1.0),
],
)
@mock.patch("ichnaea.api.key.random")
def test_store_sample_mock_random_with_global_rate(
self, mock_random, global_rate, q1, q2, q3, q4
):
assert 0.0 < (q3 - q2) < 0.1
key = KeyFactory(store_sample_locate=50)
mock_random.return_value = q1
assert key.store_sample("locate", global_rate) is True
mock_random.return_value = q2
assert key.store_sample("locate", global_rate) is True
mock_random.return_value = q3
assert key.store_sample("locate", global_rate) is False
mock_random.return_value = q4
assert key.store_sample("locate", global_rate) is False
def test_can_fallback(self):
def one(**kw):
return KeyFactory(**kw)
assert one(allow_fallback=True).can_fallback()
assert not one(allow_fallback=False).can_fallback()
assert not one(allow_fallback=None).can_fallback()
assert not (one(allow_fallback=True, fallback_name=None).can_fallback())
assert not (one(allow_fallback=True, fallback_url=None).can_fallback())
assert not (one(allow_fallback=True, fallback_ratelimit=None).can_fallback())
assert one(allow_fallback=True, fallback_ratelimit=0).can_fallback()
assert not (
one(allow_fallback=True, fallback_ratelimit_interval=None).can_fallback()
)
assert not (
one(allow_fallback=True, fallback_ratelimit_interval=0).can_fallback()
)
assert one(allow_fallback=True, fallback_cache_expire=None).can_fallback()
assert one(allow_fallback=True, fallback_cache_expire=0).can_fallback()
class TestRenamingMapping(object):
def test_to_name(self):
class SampleSchema(colander.MappingSchema):
schema_type = RenamingMapping
input_name = colander.SchemaNode(colander.String(), to_name="output_name")
name = colander.SchemaNode(colander.String())
def __init__(self, *args, **kwargs):
super(SampleSchema, self).__init__(*args, **kwargs)
input_data = {"input_name": "foo", "name": "bar"}
output_data = SampleSchema().deserialize(input_data)
assert output_data["output_name"] == "foo"
assert output_data["name"] == "bar"
assert "input_name" not in output_data
class TestExceptions(object):
def _check(self, error, status, json=True, content_type="application/json"):
response = Request.blank("/").get_response(error)
if content_type:
assert response.content_type == content_type
assert response.status_code == status
if json:
assert response.json == error.json_body()
return response
def test_str(self):
error = api_exceptions.LocationNotFound()
assert str(error) == "<LocationNotFound>: 404"
def test_daily_limit(self):
error = api_exceptions.DailyLimitExceeded()
response = self._check(error, 403)
assert b"dailyLimitExceeded" in response.body
def test_invalid_apikey(self):
error = api_exceptions.InvalidAPIKey()
response = self._check(error, 400)
assert b"keyInvalid" in response.body
def test_location_not_found(self):
error = api_exceptions.LocationNotFound()
response = self._check(error, 404)
assert b"notFound" in response.body
def test_parse_error(self):
error = api_exceptions.ParseError()
response = self._check(error, 400)
assert b"parseError" in response.body
def test_parse_error_details(self):
error = api_exceptions.ParseError(details=["Details of Error"])
response = self._check(error, 400, json=False)
assert b"parseError" in response.body
content = json.loads(response.body.decode())
assert content["details"] == ["Details of Error"]
def test_upload_success(self):
error = api_exceptions.UploadSuccess()
response = self._check(error, 200)
assert response.body == b"{}"
def test_upload_success_v0(self):
error = api_exceptions.UploadSuccessV0()
response = self._check(error, 204, json=False, content_type=None)
assert response.body == b""
class TestLimiter(object):
def test_maxrequests(self, redis):
rate_key = "apilimit:key_a:v1.geolocate:20150101"
maxreq = 5
expire = 1
for i in range(maxreq):
assert not rate_limit_exceeded(
redis, rate_key, maxreq=maxreq, expire=expire
)
assert rate_limit_exceeded(redis, rate_key, maxreq=maxreq, expire=expire)
def test_expiry(self, redis):
rate_key = "apilimit:key_a:v1.geolocate:20150101"
maxreq = 100
expire = 1
assert not rate_limit_exceeded(redis, rate_key, maxreq=maxreq, expire=expire)
time.sleep(1.0)
assert not rate_limit_exceeded(redis, rate_key, maxreq=maxreq, expire=expire)
def test_no_limit(self):
rate_key = "apilimit:key_a:v1.geolocate:20150101"
broken_redis = None
assert not rate_limit_exceeded(broken_redis, rate_key, maxreq=0, expire=1)
| en | 0.672077 | # Test get cache # Test get cache # A global_locate_sample_rate can turn off samples # And can raise a sample rate | 2.036604 | 2 |
serverless_zoom_recordings/finish_ingest.py | openlibraryenvironment/serverless-zoom-recordings | 0 | 6624675 | """
Perform the final actions of the ingestion step function.
"""
import json
import os
import boto3
import structlog
from zoomus import ZoomClient
from .util.identifiers import parse_organization
from .util.log_config import setup_logging
from .util.recording_path import recording_path
DEPLOYMENT_STAGE = os.environ["DEPLOYMENT_STAGE"]
RECORDINGS_BUCKET = os.environ["RECORDINGS_BUCKET"]
ZOOM_API_KEY = os.environ["ZOOM_API_KEY"]
ZOOM_API_SECRET = os.environ["ZOOM_API_SECRET"]
MEETINGS_DYNAMODB_TABLE = os.environ["MEETINGS_DYNAMODB_TABLE"]
NOTIFY_WEB_BUILDER_QUEUE = os.environ["NOTIFY_WEB_BUILDER_QUEUE"]
s3 = boto3.resource("s3")
s3_client = boto3.client("s3")
dynamodb = boto3.resource("dynamodb")
meetings_table = dynamodb.Table(MEETINGS_DYNAMODB_TABLE)
sqs = boto3.resource("sqs")
web_builder_notify = sqs.Queue(NOTIFY_WEB_BUILDER_QUEUE)
zoom_client = ZoomClient(ZOOM_API_KEY, ZOOM_API_SECRET)
def handler(sf_input, context):
"""Handle Step Function"""
setup_logging()
log = structlog.get_logger()
aws_request_id = context.aws_request_id if context is not None else "*NO CONTEXT*"
log = structlog.get_logger()
log = log.bind(aws_request_id=aws_request_id)
if "_recording_id" in sf_input:
recording_id = sf_input["_recording_id"]
log = log.bind(recording_id=recording_id)
log.info("STARTED", reason=recording_id, stepfunction_input=sf_input)
else:
log.error(
"STARTUP FAILED PRECONDITION",
reason="_recording_id not found in step function input",
stepfunction_input=sf_input,
)
raise RuntimeError("_recording_id not found in step function input")
sf_output = {"_recording_id": recording_id}
##STAGE Save recording document
stage = "Save recording document"
organization = parse_organization(sf_input["parent_meeting_metadata"]["topic"])
path = recording_path(
organization=organization,
meeting_topic=sf_input["parent_meeting_metadata"]["topic"],
meeting_start=sf_input["past_meeting_metadata"]["start_time"],
)
recording_document = {
"recording_id": recording_id,
"recording_path": path,
"meeting_uuid": sf_input["recording_metadata"]["payload"]["object"]["uuid"],
"parent_meeting_uuid": sf_input["parent_meeting_metadata"]["uuid"],
"organization": organization,
"meeting_id": sf_input["parent_meeting_metadata"]["id"],
"meeting_topic": sf_input["parent_meeting_metadata"]["topic"],
"start_time": sf_input["past_meeting_metadata"]["start_time"],
"end_time": sf_input["past_meeting_metadata"]["end_time"],
"password": sf_input["parent_meeting_metadata"].get("password", ""),
"host_id": sf_input["parent_meeting_metadata"]["host_id"],
}
recording_document["files"] = []
for file in sf_input["recordings_map_results"]:
file_data = {
"recording_type": file["recording_type"],
"s3_url": file["location"],
"etag": file["eTag"],
"zoom_file_size": file["zoom_file_size"],
"mime_type": file["mime_type"],
}
recording_document["files"].append(file_data)
log.info(stage, reason="Recording document", recording_document=recording_document)
recording_json_key = f"{recording_id}/recording_document.json"
s3_object = s3.Object(RECORDINGS_BUCKET, recording_json_key)
response = s3_object.put(
Body=json.dumps(recording_document), ContentType="application/json"
)
log.debug(stage, reason="Put recording document to S3", response=response)
response = meetings_table.put_item(Item=recording_document)
log.debug(stage, reason="Put recording document to DB", response=response)
##STAGE Delete recording from Zoom
stage = "Delete recording from Zoom"
if DEPLOYMENT_STAGE == "prod":
api_response = zoom_client.recording.delete(
meeting_id=sf_input["recording_metadata"]["payload"]["object"]["uuid"]
)
api_content = json.loads(api_response.content) if api_response.content else {}
if not api_response.ok:
reason = api_content["message"] if "message" in api_content else "unknown"
log.warning(
stage,
reason=reason,
response=api_response,
response_content=api_response.content,
)
else:
log.debug(
stage,
reason="Deleted recording",
response=api_response,
response_content=api_response.content,
)
else:
log.info(stage, reason="Not in production deployment, recording not deleted")
##STAGE Send message to website builder routine
stage = "Notify web-builder"
response = web_builder_notify.send_message(
MessageBody=json.dumps(recording_document)
)
log.info(stage, reason="Complete", response=response, body=recording_document)
return sf_output
| """
Perform the final actions of the ingestion step function.
"""
import json
import os
import boto3
import structlog
from zoomus import ZoomClient
from .util.identifiers import parse_organization
from .util.log_config import setup_logging
from .util.recording_path import recording_path
DEPLOYMENT_STAGE = os.environ["DEPLOYMENT_STAGE"]
RECORDINGS_BUCKET = os.environ["RECORDINGS_BUCKET"]
ZOOM_API_KEY = os.environ["ZOOM_API_KEY"]
ZOOM_API_SECRET = os.environ["ZOOM_API_SECRET"]
MEETINGS_DYNAMODB_TABLE = os.environ["MEETINGS_DYNAMODB_TABLE"]
NOTIFY_WEB_BUILDER_QUEUE = os.environ["NOTIFY_WEB_BUILDER_QUEUE"]
s3 = boto3.resource("s3")
s3_client = boto3.client("s3")
dynamodb = boto3.resource("dynamodb")
meetings_table = dynamodb.Table(MEETINGS_DYNAMODB_TABLE)
sqs = boto3.resource("sqs")
web_builder_notify = sqs.Queue(NOTIFY_WEB_BUILDER_QUEUE)
zoom_client = ZoomClient(ZOOM_API_KEY, ZOOM_API_SECRET)
def handler(sf_input, context):
"""Handle Step Function"""
setup_logging()
log = structlog.get_logger()
aws_request_id = context.aws_request_id if context is not None else "*NO CONTEXT*"
log = structlog.get_logger()
log = log.bind(aws_request_id=aws_request_id)
if "_recording_id" in sf_input:
recording_id = sf_input["_recording_id"]
log = log.bind(recording_id=recording_id)
log.info("STARTED", reason=recording_id, stepfunction_input=sf_input)
else:
log.error(
"STARTUP FAILED PRECONDITION",
reason="_recording_id not found in step function input",
stepfunction_input=sf_input,
)
raise RuntimeError("_recording_id not found in step function input")
sf_output = {"_recording_id": recording_id}
##STAGE Save recording document
stage = "Save recording document"
organization = parse_organization(sf_input["parent_meeting_metadata"]["topic"])
path = recording_path(
organization=organization,
meeting_topic=sf_input["parent_meeting_metadata"]["topic"],
meeting_start=sf_input["past_meeting_metadata"]["start_time"],
)
recording_document = {
"recording_id": recording_id,
"recording_path": path,
"meeting_uuid": sf_input["recording_metadata"]["payload"]["object"]["uuid"],
"parent_meeting_uuid": sf_input["parent_meeting_metadata"]["uuid"],
"organization": organization,
"meeting_id": sf_input["parent_meeting_metadata"]["id"],
"meeting_topic": sf_input["parent_meeting_metadata"]["topic"],
"start_time": sf_input["past_meeting_metadata"]["start_time"],
"end_time": sf_input["past_meeting_metadata"]["end_time"],
"password": sf_input["parent_meeting_metadata"].get("password", ""),
"host_id": sf_input["parent_meeting_metadata"]["host_id"],
}
recording_document["files"] = []
for file in sf_input["recordings_map_results"]:
file_data = {
"recording_type": file["recording_type"],
"s3_url": file["location"],
"etag": file["eTag"],
"zoom_file_size": file["zoom_file_size"],
"mime_type": file["mime_type"],
}
recording_document["files"].append(file_data)
log.info(stage, reason="Recording document", recording_document=recording_document)
recording_json_key = f"{recording_id}/recording_document.json"
s3_object = s3.Object(RECORDINGS_BUCKET, recording_json_key)
response = s3_object.put(
Body=json.dumps(recording_document), ContentType="application/json"
)
log.debug(stage, reason="Put recording document to S3", response=response)
response = meetings_table.put_item(Item=recording_document)
log.debug(stage, reason="Put recording document to DB", response=response)
##STAGE Delete recording from Zoom
stage = "Delete recording from Zoom"
if DEPLOYMENT_STAGE == "prod":
api_response = zoom_client.recording.delete(
meeting_id=sf_input["recording_metadata"]["payload"]["object"]["uuid"]
)
api_content = json.loads(api_response.content) if api_response.content else {}
if not api_response.ok:
reason = api_content["message"] if "message" in api_content else "unknown"
log.warning(
stage,
reason=reason,
response=api_response,
response_content=api_response.content,
)
else:
log.debug(
stage,
reason="Deleted recording",
response=api_response,
response_content=api_response.content,
)
else:
log.info(stage, reason="Not in production deployment, recording not deleted")
##STAGE Send message to website builder routine
stage = "Notify web-builder"
response = web_builder_notify.send_message(
MessageBody=json.dumps(recording_document)
)
log.info(stage, reason="Complete", response=response, body=recording_document)
return sf_output
| en | 0.692205 | Perform the final actions of the ingestion step function. Handle Step Function ##STAGE Save recording document ##STAGE Delete recording from Zoom ##STAGE Send message to website builder routine | 2.149992 | 2 |
IPython/utils/PyColorize.py | dchichkov/ipython | 0 | 6624676 | # -*- coding: utf-8 -*-
"""
Class and program to colorize python source code for ANSI terminals.
Based on an HTML code highlighter by <NAME> found at:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52298
Modifications by <NAME> (<EMAIL>).
Information on the original HTML highlighter follows:
MoinMoin - Python Source Parser
Title: Colorize Python source using the built-in tokenizer
Submitter: <NAME>
Last Updated:2001/04/06
Version no:1.2
Description:
This code is part of MoinMoin (http://moin.sourceforge.net/) and converts
Python source code to HTML markup, rendering comments, keywords,
operators, numeric and string literals in different colors.
It shows how to use the built-in keyword, token and tokenize modules to
scan Python source code and re-emit it with no changes to its original
formatting (which is the hard part).
"""
__all__ = ['ANSICodeColors','Parser']
_scheme_default = 'Linux'
# Imports
import StringIO
import keyword
import os
import optparse
import sys
import token
import tokenize
try:
generate_tokens = tokenize.generate_tokens
except AttributeError:
# Python 3. Note that we use the undocumented _tokenize because it expects
# strings, not bytes. See also Python issue #9969.
generate_tokens = tokenize._tokenize
from IPython.utils.coloransi import *
#############################################################################
### Python Source Parser (does Hilighting)
#############################################################################
_KEYWORD = token.NT_OFFSET + 1
_TEXT = token.NT_OFFSET + 2
#****************************************************************************
# Builtin color schemes
Colors = TermColors # just a shorthand
# Build a few color schemes
NoColor = ColorScheme(
'NoColor',{
token.NUMBER : Colors.NoColor,
token.OP : Colors.NoColor,
token.STRING : Colors.NoColor,
tokenize.COMMENT : Colors.NoColor,
token.NAME : Colors.NoColor,
token.ERRORTOKEN : Colors.NoColor,
_KEYWORD : Colors.NoColor,
_TEXT : Colors.NoColor,
'normal' : Colors.NoColor # color off (usu. Colors.Normal)
} )
LinuxColors = ColorScheme(
'Linux',{
token.NUMBER : Colors.LightCyan,
token.OP : Colors.Yellow,
token.STRING : Colors.LightBlue,
tokenize.COMMENT : Colors.LightRed,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.LightGreen,
_TEXT : Colors.Yellow,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
LightBGColors = ColorScheme(
'LightBG',{
token.NUMBER : Colors.Cyan,
token.OP : Colors.Blue,
token.STRING : Colors.Blue,
tokenize.COMMENT : Colors.Red,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.Green,
_TEXT : Colors.Blue,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
# Build table of color schemes (needed by the parser)
ANSICodeColors = ColorSchemeTable([NoColor,LinuxColors,LightBGColors],
_scheme_default)
class Parser:
""" Format colored Python source.
"""
def __init__(self, color_table=None,out = sys.stdout):
""" Create a parser with a specified color table and output channel.
Call format() to process code.
"""
self.color_table = color_table and color_table or ANSICodeColors
self.out = out
def format(self, raw, out = None, scheme = ''):
return self.format2(raw, out, scheme)[0]
def format2(self, raw, out = None, scheme = ''):
""" Parse and send the colored source.
If out and scheme are not specified, the defaults (given to
constructor) are used.
out should be a file-type object. Optionally, out can be given as the
string 'str' and the parser will automatically return the output in a
string."""
string_output = 0
if out == 'str' or self.out == 'str' or \
isinstance(self.out,StringIO.StringIO):
# XXX - I don't really like this state handling logic, but at this
# point I don't want to make major changes, so adding the
# isinstance() check is the simplest I can do to ensure correct
# behavior.
out_old = self.out
self.out = StringIO.StringIO()
string_output = 1
elif out is not None:
self.out = out
# Fast return of the unmodified input for NoColor scheme
if scheme == 'NoColor':
error = False
self.out.write(raw)
if string_output:
return raw,error
else:
return None,error
# local shorthands
colors = self.color_table[scheme].colors
self.colors = colors # put in object so __call__ sees it
# Remove trailing whitespace and normalize tabs
self.raw = raw.expandtabs().rstrip()
# store line offsets in self.lines
self.lines = [0, 0]
pos = 0
raw_find = self.raw.find
lines_append = self.lines.append
while 1:
pos = raw_find('\n', pos) + 1
if not pos: break
lines_append(pos)
lines_append(len(self.raw))
# parse the source and write it
self.pos = 0
text = StringIO.StringIO(self.raw)
error = False
try:
for atoken in generate_tokens(text.readline):
self(*atoken)
except tokenize.TokenError as ex:
msg = ex.args[0]
line = ex.args[1][0]
self.out.write("%s\n\n*** ERROR: %s%s%s\n" %
(colors[token.ERRORTOKEN],
msg, self.raw[self.lines[line]:],
colors.normal)
)
error = True
self.out.write(colors.normal+'\n')
if string_output:
output = self.out.getvalue()
self.out = out_old
return (output, error)
return (None, error)
def __call__(self, toktype, toktext, (srow,scol), (erow,ecol), line):
""" Token handler, with syntax highlighting."""
# local shorthands
colors = self.colors
owrite = self.out.write
# line separator, so this works across platforms
linesep = os.linesep
# calculate new positions
oldpos = self.pos
newpos = self.lines[srow] + scol
self.pos = newpos + len(toktext)
# send the original whitespace, if needed
if newpos > oldpos:
owrite(self.raw[oldpos:newpos])
# skip indenting tokens
if toktype in [token.INDENT, token.DEDENT]:
self.pos = newpos
return
# map token type to a color group
if token.LPAR <= toktype and toktype <= token.OP:
toktype = token.OP
elif toktype == token.NAME and keyword.iskeyword(toktext):
toktype = _KEYWORD
color = colors.get(toktype, colors[_TEXT])
#print '<%s>' % toktext, # dbg
# Triple quoted strings must be handled carefully so that backtracking
# in pagers works correctly. We need color terminators on _each_ line.
if linesep in toktext:
toktext = toktext.replace(linesep, '%s%s%s' %
(colors.normal,linesep,color))
# send text
owrite('%s%s%s' % (color,toktext,colors.normal))
def main(argv=None):
"""Run as a command-line script: colorize a python file or stdin using ANSI
color escapes and print to stdout.
Inputs:
- argv(None): a list of strings like sys.argv[1:] giving the command-line
arguments. If None, use sys.argv[1:].
"""
usage_msg = """%prog [options] [filename]
Colorize a python file or stdin using ANSI color escapes and print to stdout.
If no filename is given, or if filename is -, read standard input."""
parser = optparse.OptionParser(usage=usage_msg)
newopt = parser.add_option
newopt('-s','--scheme',metavar='NAME',dest='scheme_name',action='store',
choices=['Linux','LightBG','NoColor'],default=_scheme_default,
help="give the color scheme to use. Currently only 'Linux'\
(default) and 'LightBG' and 'NoColor' are implemented (give without\
quotes)")
opts,args = parser.parse_args(argv)
if len(args) > 1:
parser.error("you must give at most one filename.")
if len(args) == 0:
fname = '-' # no filename given; setup to read from stdin
else:
fname = args[0]
if fname == '-':
stream = sys.stdin
else:
try:
stream = file(fname)
except IOError,msg:
print >> sys.stderr, msg
sys.exit(1)
parser = Parser()
# we need nested try blocks because pre-2.5 python doesn't support unified
# try-except-finally
try:
try:
# write colorized version to stdout
parser.format(stream.read(),scheme=opts.scheme_name)
except IOError,msg:
# if user reads through a pager and quits, don't print traceback
if msg.args != (32,'Broken pipe'):
raise
finally:
if stream is not sys.stdin:
stream.close() # in case a non-handled exception happened above
if __name__ == "__main__":
main()
| # -*- coding: utf-8 -*-
"""
Class and program to colorize python source code for ANSI terminals.
Based on an HTML code highlighter by <NAME> found at:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52298
Modifications by <NAME> (<EMAIL>).
Information on the original HTML highlighter follows:
MoinMoin - Python Source Parser
Title: Colorize Python source using the built-in tokenizer
Submitter: <NAME>
Last Updated:2001/04/06
Version no:1.2
Description:
This code is part of MoinMoin (http://moin.sourceforge.net/) and converts
Python source code to HTML markup, rendering comments, keywords,
operators, numeric and string literals in different colors.
It shows how to use the built-in keyword, token and tokenize modules to
scan Python source code and re-emit it with no changes to its original
formatting (which is the hard part).
"""
__all__ = ['ANSICodeColors','Parser']
_scheme_default = 'Linux'
# Imports
import StringIO
import keyword
import os
import optparse
import sys
import token
import tokenize
try:
generate_tokens = tokenize.generate_tokens
except AttributeError:
# Python 3. Note that we use the undocumented _tokenize because it expects
# strings, not bytes. See also Python issue #9969.
generate_tokens = tokenize._tokenize
from IPython.utils.coloransi import *
#############################################################################
### Python Source Parser (does Hilighting)
#############################################################################
_KEYWORD = token.NT_OFFSET + 1
_TEXT = token.NT_OFFSET + 2
#****************************************************************************
# Builtin color schemes
Colors = TermColors # just a shorthand
# Build a few color schemes
NoColor = ColorScheme(
'NoColor',{
token.NUMBER : Colors.NoColor,
token.OP : Colors.NoColor,
token.STRING : Colors.NoColor,
tokenize.COMMENT : Colors.NoColor,
token.NAME : Colors.NoColor,
token.ERRORTOKEN : Colors.NoColor,
_KEYWORD : Colors.NoColor,
_TEXT : Colors.NoColor,
'normal' : Colors.NoColor # color off (usu. Colors.Normal)
} )
LinuxColors = ColorScheme(
'Linux',{
token.NUMBER : Colors.LightCyan,
token.OP : Colors.Yellow,
token.STRING : Colors.LightBlue,
tokenize.COMMENT : Colors.LightRed,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.LightGreen,
_TEXT : Colors.Yellow,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
LightBGColors = ColorScheme(
'LightBG',{
token.NUMBER : Colors.Cyan,
token.OP : Colors.Blue,
token.STRING : Colors.Blue,
tokenize.COMMENT : Colors.Red,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.Green,
_TEXT : Colors.Blue,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
# Build table of color schemes (needed by the parser)
ANSICodeColors = ColorSchemeTable([NoColor,LinuxColors,LightBGColors],
_scheme_default)
class Parser:
""" Format colored Python source.
"""
def __init__(self, color_table=None,out = sys.stdout):
""" Create a parser with a specified color table and output channel.
Call format() to process code.
"""
self.color_table = color_table and color_table or ANSICodeColors
self.out = out
def format(self, raw, out = None, scheme = ''):
return self.format2(raw, out, scheme)[0]
def format2(self, raw, out = None, scheme = ''):
""" Parse and send the colored source.
If out and scheme are not specified, the defaults (given to
constructor) are used.
out should be a file-type object. Optionally, out can be given as the
string 'str' and the parser will automatically return the output in a
string."""
string_output = 0
if out == 'str' or self.out == 'str' or \
isinstance(self.out,StringIO.StringIO):
# XXX - I don't really like this state handling logic, but at this
# point I don't want to make major changes, so adding the
# isinstance() check is the simplest I can do to ensure correct
# behavior.
out_old = self.out
self.out = StringIO.StringIO()
string_output = 1
elif out is not None:
self.out = out
# Fast return of the unmodified input for NoColor scheme
if scheme == 'NoColor':
error = False
self.out.write(raw)
if string_output:
return raw,error
else:
return None,error
# local shorthands
colors = self.color_table[scheme].colors
self.colors = colors # put in object so __call__ sees it
# Remove trailing whitespace and normalize tabs
self.raw = raw.expandtabs().rstrip()
# store line offsets in self.lines
self.lines = [0, 0]
pos = 0
raw_find = self.raw.find
lines_append = self.lines.append
while 1:
pos = raw_find('\n', pos) + 1
if not pos: break
lines_append(pos)
lines_append(len(self.raw))
# parse the source and write it
self.pos = 0
text = StringIO.StringIO(self.raw)
error = False
try:
for atoken in generate_tokens(text.readline):
self(*atoken)
except tokenize.TokenError as ex:
msg = ex.args[0]
line = ex.args[1][0]
self.out.write("%s\n\n*** ERROR: %s%s%s\n" %
(colors[token.ERRORTOKEN],
msg, self.raw[self.lines[line]:],
colors.normal)
)
error = True
self.out.write(colors.normal+'\n')
if string_output:
output = self.out.getvalue()
self.out = out_old
return (output, error)
return (None, error)
def __call__(self, toktype, toktext, (srow,scol), (erow,ecol), line):
""" Token handler, with syntax highlighting."""
# local shorthands
colors = self.colors
owrite = self.out.write
# line separator, so this works across platforms
linesep = os.linesep
# calculate new positions
oldpos = self.pos
newpos = self.lines[srow] + scol
self.pos = newpos + len(toktext)
# send the original whitespace, if needed
if newpos > oldpos:
owrite(self.raw[oldpos:newpos])
# skip indenting tokens
if toktype in [token.INDENT, token.DEDENT]:
self.pos = newpos
return
# map token type to a color group
if token.LPAR <= toktype and toktype <= token.OP:
toktype = token.OP
elif toktype == token.NAME and keyword.iskeyword(toktext):
toktype = _KEYWORD
color = colors.get(toktype, colors[_TEXT])
#print '<%s>' % toktext, # dbg
# Triple quoted strings must be handled carefully so that backtracking
# in pagers works correctly. We need color terminators on _each_ line.
if linesep in toktext:
toktext = toktext.replace(linesep, '%s%s%s' %
(colors.normal,linesep,color))
# send text
owrite('%s%s%s' % (color,toktext,colors.normal))
def main(argv=None):
"""Run as a command-line script: colorize a python file or stdin using ANSI
color escapes and print to stdout.
Inputs:
- argv(None): a list of strings like sys.argv[1:] giving the command-line
arguments. If None, use sys.argv[1:].
"""
usage_msg = """%prog [options] [filename]
Colorize a python file or stdin using ANSI color escapes and print to stdout.
If no filename is given, or if filename is -, read standard input."""
parser = optparse.OptionParser(usage=usage_msg)
newopt = parser.add_option
newopt('-s','--scheme',metavar='NAME',dest='scheme_name',action='store',
choices=['Linux','LightBG','NoColor'],default=_scheme_default,
help="give the color scheme to use. Currently only 'Linux'\
(default) and 'LightBG' and 'NoColor' are implemented (give without\
quotes)")
opts,args = parser.parse_args(argv)
if len(args) > 1:
parser.error("you must give at most one filename.")
if len(args) == 0:
fname = '-' # no filename given; setup to read from stdin
else:
fname = args[0]
if fname == '-':
stream = sys.stdin
else:
try:
stream = file(fname)
except IOError,msg:
print >> sys.stderr, msg
sys.exit(1)
parser = Parser()
# we need nested try blocks because pre-2.5 python doesn't support unified
# try-except-finally
try:
try:
# write colorized version to stdout
parser.format(stream.read(),scheme=opts.scheme_name)
except IOError,msg:
# if user reads through a pager and quits, don't print traceback
if msg.args != (32,'Broken pipe'):
raise
finally:
if stream is not sys.stdin:
stream.close() # in case a non-handled exception happened above
if __name__ == "__main__":
main()
| en | 0.680042 | # -*- coding: utf-8 -*- Class and program to colorize python source code for ANSI terminals. Based on an HTML code highlighter by <NAME> found at: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52298 Modifications by <NAME> (<EMAIL>). Information on the original HTML highlighter follows: MoinMoin - Python Source Parser Title: Colorize Python source using the built-in tokenizer Submitter: <NAME> Last Updated:2001/04/06 Version no:1.2 Description: This code is part of MoinMoin (http://moin.sourceforge.net/) and converts Python source code to HTML markup, rendering comments, keywords, operators, numeric and string literals in different colors. It shows how to use the built-in keyword, token and tokenize modules to scan Python source code and re-emit it with no changes to its original formatting (which is the hard part). # Imports # Python 3. Note that we use the undocumented _tokenize because it expects # strings, not bytes. See also Python issue #9969. ############################################################################# ### Python Source Parser (does Hilighting) ############################################################################# #**************************************************************************** # Builtin color schemes # just a shorthand # Build a few color schemes # color off (usu. Colors.Normal) # color off (usu. Colors.Normal) # color off (usu. Colors.Normal) # Build table of color schemes (needed by the parser) Format colored Python source. Create a parser with a specified color table and output channel. Call format() to process code. Parse and send the colored source. If out and scheme are not specified, the defaults (given to constructor) are used. out should be a file-type object. Optionally, out can be given as the string 'str' and the parser will automatically return the output in a string. # XXX - I don't really like this state handling logic, but at this # point I don't want to make major changes, so adding the # isinstance() check is the simplest I can do to ensure correct # behavior. # Fast return of the unmodified input for NoColor scheme # local shorthands # put in object so __call__ sees it # Remove trailing whitespace and normalize tabs # store line offsets in self.lines # parse the source and write it Token handler, with syntax highlighting. # local shorthands # line separator, so this works across platforms # calculate new positions # send the original whitespace, if needed # skip indenting tokens # map token type to a color group #print '<%s>' % toktext, # dbg # Triple quoted strings must be handled carefully so that backtracking # in pagers works correctly. We need color terminators on _each_ line. # send text Run as a command-line script: colorize a python file or stdin using ANSI color escapes and print to stdout. Inputs: - argv(None): a list of strings like sys.argv[1:] giving the command-line arguments. If None, use sys.argv[1:]. %prog [options] [filename] Colorize a python file or stdin using ANSI color escapes and print to stdout. If no filename is given, or if filename is -, read standard input. # no filename given; setup to read from stdin # we need nested try blocks because pre-2.5 python doesn't support unified # try-except-finally # write colorized version to stdout # if user reads through a pager and quits, don't print traceback # in case a non-handled exception happened above | 2.665022 | 3 |
src/pyast_utils.py | kkourt/cmnnc | 8 | 6624677 | <filename>src/pyast_utils.py
# Copyright (c) 2019, IBM Research.
#
# Author: <NAME> <<EMAIL>>
#
# vim: set expandtab softtabstop=4 tabstop=4 shiftwidth=4:
import itertools
import copy
import ast as pyast
class StructureTupleYields(pyast.NodeTransformer):
""" AST transformer for "structuring" yielded tuples
For example, if structure is (2,3), then a yield expression, yielding a
5-tuple: yield (a,b,c,d,e) will be transformed to yield ((a,b,),(c,d,e)).
"""
def __init__(self, structure):
super().__init__()
self.structure = structure
def visit_Yield(self, node):
# This yield is not a tuple, do nothing
if not isinstance(node.value, pyast.Tuple):
print(
"*" * 10,
"Yiedling something which is not a tuple. Doing nothing",
)
return node
elts = node.value.elts
ctx = node.value.ctx
nelts = len(elts)
if nelts != sum(self.structure):
print(
"*" * 10,
"Yiedling a tuple with size=%d while structure=%s. Doing nothing."
% (nelts, structure),
)
return node
new_elts = []
elts_iter = iter(elts)
for n in self.structure:
xelts = [x for x in itertools.islice(elts_iter, n)]
xtuple = pyast.Tuple(xelts, copy.copy(ctx))
new_elts.append(xtuple)
# sanity check that there are no more elements in the iterator
# (they shouldn't be since we checked the length)
try:
next(elts_iter)
assert False
except StopIteration:
pass
new_node = pyast.Yield(pyast.Tuple(new_elts, copy.copy(ctx)))
return pyast.copy_location(new_node, node)
| <filename>src/pyast_utils.py
# Copyright (c) 2019, IBM Research.
#
# Author: <NAME> <<EMAIL>>
#
# vim: set expandtab softtabstop=4 tabstop=4 shiftwidth=4:
import itertools
import copy
import ast as pyast
class StructureTupleYields(pyast.NodeTransformer):
""" AST transformer for "structuring" yielded tuples
For example, if structure is (2,3), then a yield expression, yielding a
5-tuple: yield (a,b,c,d,e) will be transformed to yield ((a,b,),(c,d,e)).
"""
def __init__(self, structure):
super().__init__()
self.structure = structure
def visit_Yield(self, node):
# This yield is not a tuple, do nothing
if not isinstance(node.value, pyast.Tuple):
print(
"*" * 10,
"Yiedling something which is not a tuple. Doing nothing",
)
return node
elts = node.value.elts
ctx = node.value.ctx
nelts = len(elts)
if nelts != sum(self.structure):
print(
"*" * 10,
"Yiedling a tuple with size=%d while structure=%s. Doing nothing."
% (nelts, structure),
)
return node
new_elts = []
elts_iter = iter(elts)
for n in self.structure:
xelts = [x for x in itertools.islice(elts_iter, n)]
xtuple = pyast.Tuple(xelts, copy.copy(ctx))
new_elts.append(xtuple)
# sanity check that there are no more elements in the iterator
# (they shouldn't be since we checked the length)
try:
next(elts_iter)
assert False
except StopIteration:
pass
new_node = pyast.Yield(pyast.Tuple(new_elts, copy.copy(ctx)))
return pyast.copy_location(new_node, node)
| en | 0.74019 | # Copyright (c) 2019, IBM Research. # # Author: <NAME> <<EMAIL>> # # vim: set expandtab softtabstop=4 tabstop=4 shiftwidth=4: AST transformer for "structuring" yielded tuples For example, if structure is (2,3), then a yield expression, yielding a 5-tuple: yield (a,b,c,d,e) will be transformed to yield ((a,b,),(c,d,e)). # This yield is not a tuple, do nothing # sanity check that there are no more elements in the iterator # (they shouldn't be since we checked the length) | 2.722852 | 3 |
tests/test_server.py | vituocgia/izi-grpc | 0 | 6624678 | import os
import signal
import threading
from unittest import mock
from izi_grpc.server import Server
from izi_grpc.signals import server_started, server_stopped
def test_server(app, logstream):
s = Server(app)
assert not s._stopped
def log_started(s):
app.logger.warn('started!')
def log_stopped(s):
app.logger.warn('stopped!')
server_started.connect(log_started)
server_stopped.connect(log_stopped)
with mock.patch('time.sleep', new=lambda s: os.kill(os.getpid(), signal.SIGINT)):
assert s.run()
assert threading.active_count() > 1
assert s._stopped
content = logstream.getvalue()
assert 'started!' in content and 'stopped!' in content
| import os
import signal
import threading
from unittest import mock
from izi_grpc.server import Server
from izi_grpc.signals import server_started, server_stopped
def test_server(app, logstream):
s = Server(app)
assert not s._stopped
def log_started(s):
app.logger.warn('started!')
def log_stopped(s):
app.logger.warn('stopped!')
server_started.connect(log_started)
server_stopped.connect(log_stopped)
with mock.patch('time.sleep', new=lambda s: os.kill(os.getpid(), signal.SIGINT)):
assert s.run()
assert threading.active_count() > 1
assert s._stopped
content = logstream.getvalue()
assert 'started!' in content and 'stopped!' in content
| none | 1 | 2.256482 | 2 | |
pytorch_sound/data/meta/libri_light.py | lunarbridge/pytorch_sound | 86 | 6624679 | import pandas as pd
import os
import json
from typing import List, Tuple
from pytorch_sound.data.meta.commons import split_train_val_frame
from pytorch_sound.data.dataset import SpeechDataLoader, SpeechDataset
from pytorch_sound.data.meta import MetaFrame, MetaType
class LibriLightMeta(MetaFrame):
"""
Extended MetaFrame for using Libri Light Dataset
https://github.com/facebookresearch/libri-light
"""
frame_file_names: List[str] = ['all_meta.json', 'train_meta.json', 'val_meta.json']
def __init__(self, meta_path: str = '', sr: int = 22050):
self.meta_path = meta_path
if os.path.exists(self.meta_path) and not os.path.isdir(self.meta_path):
self._meta = pd.read_json(self.meta_path)
self._meta = self._meta.sort_values(by='duration')
else:
self._meta = pd.DataFrame(columns=self.column_names, data={})
# setup parameters
self._num_speakers = None
self.sr = sr
@property
def columns(self) -> List[Tuple[MetaType, str]]:
return [(MetaType.AUDIO, 'audio_filename'), (MetaType.SCALAR, 'speaker'), (MetaType.META, 'duration')]
@property
def meta(self) -> pd.DataFrame:
return self._meta
@property
def num_speakers(self):
if self._num_speakers is None:
speakers = self._meta['speaker'].values
set_speakers = set(speakers)
self._num_speakers = len(set_speakers)
return self._num_speakers
def __len__(self):
return len(self._meta)
def make_meta(self, chunk_file_list, speakers, val_rate: float = 0.1):
# make dict
info = {'audio_filename': chunk_file_list, 'speaker': speakers}
# change meta obj
self._meta = pd.DataFrame(info)
# make speaker as indices
speaker_mappings = {spk: idx for idx, spk in enumerate(sorted(list(set(self._meta['speaker'].values))))}
# update infos
self._meta['speaker'] = [speaker_mappings[spk] for spk in self._meta['speaker'].values]
self._meta['pass'] = [True] * len(self._meta)
# read duration
print('Check durations on wave files ...')
dur_list = self._process_duration(self._meta['audio_filename'].values, 0, 0)
self._meta['duration'] = dur_list
# split train / val
print('Make train / val meta')
train_meta, val_meta = split_train_val_frame(self._meta, val_rate=val_rate)
# save data frames
print('Save meta frames on {}'.format(' '.join(self.frame_file_names)))
self.save_meta(self.frame_file_names, self.meta_path, self._meta, train_meta, val_meta)
# save speaker map as json
spk_json_path = os.path.join(self.meta_path, 'speaker_map.json')
with open(spk_json_path, 'w') as w:
json.dump(speaker_mappings, w)
def get_datasets(meta_dir: str, batch_size: int, num_workers: int,
fix_len: int = 0, skip_audio: bool = False,
audio_mask: bool = False) -> Tuple[SpeechDataLoader, SpeechDataLoader]:
# TODO: update this function in general
assert os.path.isdir(meta_dir), '{} is not valid directory path!'
train_file, valid_file = LibriLightMeta.frame_file_names[1:]
# load meta file
train_meta = LibriLightMeta(os.path.join(meta_dir, train_file))
valid_meta = LibriLightMeta(os.path.join(meta_dir, valid_file))
# create dataset
train_dataset = SpeechDataset(train_meta, fix_len=fix_len, skip_audio=skip_audio, audio_mask=audio_mask)
valid_dataset = SpeechDataset(valid_meta, fix_len=fix_len, skip_audio=skip_audio, audio_mask=audio_mask)
# create data loader
train_loader = SpeechDataLoader(train_dataset, batch_size=batch_size,
num_workers=num_workers, skip_last_bucket=False)
valid_loader = SpeechDataLoader(valid_dataset, batch_size=batch_size,
num_workers=num_workers, skip_last_bucket=False)
return train_loader, valid_loader
| import pandas as pd
import os
import json
from typing import List, Tuple
from pytorch_sound.data.meta.commons import split_train_val_frame
from pytorch_sound.data.dataset import SpeechDataLoader, SpeechDataset
from pytorch_sound.data.meta import MetaFrame, MetaType
class LibriLightMeta(MetaFrame):
"""
Extended MetaFrame for using Libri Light Dataset
https://github.com/facebookresearch/libri-light
"""
frame_file_names: List[str] = ['all_meta.json', 'train_meta.json', 'val_meta.json']
def __init__(self, meta_path: str = '', sr: int = 22050):
self.meta_path = meta_path
if os.path.exists(self.meta_path) and not os.path.isdir(self.meta_path):
self._meta = pd.read_json(self.meta_path)
self._meta = self._meta.sort_values(by='duration')
else:
self._meta = pd.DataFrame(columns=self.column_names, data={})
# setup parameters
self._num_speakers = None
self.sr = sr
@property
def columns(self) -> List[Tuple[MetaType, str]]:
return [(MetaType.AUDIO, 'audio_filename'), (MetaType.SCALAR, 'speaker'), (MetaType.META, 'duration')]
@property
def meta(self) -> pd.DataFrame:
return self._meta
@property
def num_speakers(self):
if self._num_speakers is None:
speakers = self._meta['speaker'].values
set_speakers = set(speakers)
self._num_speakers = len(set_speakers)
return self._num_speakers
def __len__(self):
return len(self._meta)
def make_meta(self, chunk_file_list, speakers, val_rate: float = 0.1):
# make dict
info = {'audio_filename': chunk_file_list, 'speaker': speakers}
# change meta obj
self._meta = pd.DataFrame(info)
# make speaker as indices
speaker_mappings = {spk: idx for idx, spk in enumerate(sorted(list(set(self._meta['speaker'].values))))}
# update infos
self._meta['speaker'] = [speaker_mappings[spk] for spk in self._meta['speaker'].values]
self._meta['pass'] = [True] * len(self._meta)
# read duration
print('Check durations on wave files ...')
dur_list = self._process_duration(self._meta['audio_filename'].values, 0, 0)
self._meta['duration'] = dur_list
# split train / val
print('Make train / val meta')
train_meta, val_meta = split_train_val_frame(self._meta, val_rate=val_rate)
# save data frames
print('Save meta frames on {}'.format(' '.join(self.frame_file_names)))
self.save_meta(self.frame_file_names, self.meta_path, self._meta, train_meta, val_meta)
# save speaker map as json
spk_json_path = os.path.join(self.meta_path, 'speaker_map.json')
with open(spk_json_path, 'w') as w:
json.dump(speaker_mappings, w)
def get_datasets(meta_dir: str, batch_size: int, num_workers: int,
fix_len: int = 0, skip_audio: bool = False,
audio_mask: bool = False) -> Tuple[SpeechDataLoader, SpeechDataLoader]:
# TODO: update this function in general
assert os.path.isdir(meta_dir), '{} is not valid directory path!'
train_file, valid_file = LibriLightMeta.frame_file_names[1:]
# load meta file
train_meta = LibriLightMeta(os.path.join(meta_dir, train_file))
valid_meta = LibriLightMeta(os.path.join(meta_dir, valid_file))
# create dataset
train_dataset = SpeechDataset(train_meta, fix_len=fix_len, skip_audio=skip_audio, audio_mask=audio_mask)
valid_dataset = SpeechDataset(valid_meta, fix_len=fix_len, skip_audio=skip_audio, audio_mask=audio_mask)
# create data loader
train_loader = SpeechDataLoader(train_dataset, batch_size=batch_size,
num_workers=num_workers, skip_last_bucket=False)
valid_loader = SpeechDataLoader(valid_dataset, batch_size=batch_size,
num_workers=num_workers, skip_last_bucket=False)
return train_loader, valid_loader
| en | 0.5194 | Extended MetaFrame for using Libri Light Dataset https://github.com/facebookresearch/libri-light # setup parameters # make dict # change meta obj # make speaker as indices # update infos # read duration # split train / val # save data frames # save speaker map as json # TODO: update this function in general # load meta file # create dataset # create data loader | 2.547925 | 3 |
HW5/model/model_utils.py | joycenerd/Computer_Vision_2021 | 0 | 6624680 | <reponame>joycenerd/Computer_Vision_2021<filename>HW5/model/model_utils.py
from .resnest.restnest import get_model
from options import opt
from efficientnet_pytorch import EfficientNet
import torch
def get_net(model):
if model == 'resnest50':
model = torch.hub.load('zhanghang1989/ResNeSt', 'resnest50', pretrained=True)
return model
elif model == 'resnest101':
model = torch.hub.load('zhanghang1989/ResNeSt', 'resnest101', pretrained=True)
return model
elif model == 'resnest200':
model = torch.hub.load('zhanghang1989/ResNeSt', 'resnest200', pretrained=True)
return model
elif model == 'efficientnet-b7':
model = EfficientNet.from_pretrained(
'efficientnet-b7', num_classes=opt.num_classes)
return model
elif model == 'efficientnet-b5':
model = EfficientNet.from_pretrained(
'efficientnet-b5', num_classes=opt.num_classes)
return model
elif model == 'efficientnet-b4':
model = EfficientNet.from_pretrained(
'efficientnet-b4', num_classes=opt.num_classes)
return model
elif model == 'efficientnet-b3':
model = EfficientNet.from_pretrained(
'efficientnet-b3', num_classes=opt.num_classes)
return model
| from .resnest.restnest import get_model
from options import opt
from efficientnet_pytorch import EfficientNet
import torch
def get_net(model):
if model == 'resnest50':
model = torch.hub.load('zhanghang1989/ResNeSt', 'resnest50', pretrained=True)
return model
elif model == 'resnest101':
model = torch.hub.load('zhanghang1989/ResNeSt', 'resnest101', pretrained=True)
return model
elif model == 'resnest200':
model = torch.hub.load('zhanghang1989/ResNeSt', 'resnest200', pretrained=True)
return model
elif model == 'efficientnet-b7':
model = EfficientNet.from_pretrained(
'efficientnet-b7', num_classes=opt.num_classes)
return model
elif model == 'efficientnet-b5':
model = EfficientNet.from_pretrained(
'efficientnet-b5', num_classes=opt.num_classes)
return model
elif model == 'efficientnet-b4':
model = EfficientNet.from_pretrained(
'efficientnet-b4', num_classes=opt.num_classes)
return model
elif model == 'efficientnet-b3':
model = EfficientNet.from_pretrained(
'efficientnet-b3', num_classes=opt.num_classes)
return model | none | 1 | 2.489017 | 2 | |
tests/feature_prep_test.py | JherezTaylor/thesis-preprocessing | 13 | 6624681 | # Author: <NAME> <<EMAIL>>
# License: MIT
# Python 3.5
"""Test feature_prep module
"""
import string
from nose.tools import *
from context import hatespeech_core
class TestFeaturePrep(object):
""" init class """
def __init__(self):
self.test_list = ["I'm here", "get rekt", "#squadgoals okay"]
def setup(self):
"""This method is run once before _each_ test method is executed"""
def teardown(self):
"""This method is run once after _each_ test method is executed"""
@nottest
def test_extract_lexical_features(self):
"""This method tests the OR concatenation function"""
nlp = hatespeech_core.feature_prep.init_nlp_pipeline(False)
result_set = [("I'm", 'NN'), ('here', 'RB'), ('get', 'VB'),
('rekt', 'NN'), ('#squadgoals', 'NNS'), ('okay', 'JJ')]
response_string = hatespeech_core.feature_prep.extract_lexical_features_test(nlp,
self.test_list)
assert_equals(response_string, result_set)
| # Author: <NAME> <<EMAIL>>
# License: MIT
# Python 3.5
"""Test feature_prep module
"""
import string
from nose.tools import *
from context import hatespeech_core
class TestFeaturePrep(object):
""" init class """
def __init__(self):
self.test_list = ["I'm here", "get rekt", "#squadgoals okay"]
def setup(self):
"""This method is run once before _each_ test method is executed"""
def teardown(self):
"""This method is run once after _each_ test method is executed"""
@nottest
def test_extract_lexical_features(self):
"""This method tests the OR concatenation function"""
nlp = hatespeech_core.feature_prep.init_nlp_pipeline(False)
result_set = [("I'm", 'NN'), ('here', 'RB'), ('get', 'VB'),
('rekt', 'NN'), ('#squadgoals', 'NNS'), ('okay', 'JJ')]
response_string = hatespeech_core.feature_prep.extract_lexical_features_test(nlp,
self.test_list)
assert_equals(response_string, result_set)
| en | 0.793851 | # Author: <NAME> <<EMAIL>> # License: MIT # Python 3.5 Test feature_prep module init class This method is run once before _each_ test method is executed This method is run once after _each_ test method is executed This method tests the OR concatenation function | 2.56546 | 3 |
api_tests/entitlements/serializers/test_serializers.py | RCOSDP/osf.io | 0 | 6624682 | <reponame>RCOSDP/osf.io
import pytest
from api.entitlements.serializers import LoginAvailabilitySerializer
@pytest.mark.django_db
class TestLoginAvailabilitySerializer:
def test_serializer(self):
id_test = '1'
payload = {
'institution_id': id_test,
'entitlements': ['gkn1-ent1', 'gkn1-ent2', 'gkn1-ent1']
}
data = LoginAvailabilitySerializer(data=payload)
assert data.is_valid() is True
data = data.validated_data
institution_id = data.get('institution_id')
assert institution_id == id_test
| import pytest
from api.entitlements.serializers import LoginAvailabilitySerializer
@pytest.mark.django_db
class TestLoginAvailabilitySerializer:
def test_serializer(self):
id_test = '1'
payload = {
'institution_id': id_test,
'entitlements': ['gkn1-ent1', 'gkn1-ent2', 'gkn1-ent1']
}
data = LoginAvailabilitySerializer(data=payload)
assert data.is_valid() is True
data = data.validated_data
institution_id = data.get('institution_id')
assert institution_id == id_test | none | 1 | 2.55725 | 3 | |
lib/galaxy/tools/parameters/validation.py | Tomasz69/galaxy | 0 | 6624683 | """
Classes related to parameter validation.
"""
import logging
import re
from six import string_types
from galaxy import (
model,
util
)
log = logging.getLogger(__name__)
class Validator(object):
"""
A validator checks that a value meets some conditions OR raises ValueError
"""
requires_dataset_metadata = False
@classmethod
def from_element(cls, param, elem):
"""
Initialize the appropiate Validator class
example call `validation.Validator.from_element(ToolParameter_object, Validator_object)`
needs to be implemented in the subclasses and should return the
corresponding Validator object by a call to `cls( ... )` which calls the
`__init__` method of the corresponding validator
param cls the Validator class
param param the element to be evaluated (which contains the validator)
param elem the validator element
return an object of a Validator subclass that corresponds to the type attribute of the validator element
"""
type = elem.get('type', None)
assert type is not None, "Required 'type' attribute missing from validator"
return validator_types[type].from_element(param, elem)
def validate(self, value, trans=None):
"""
validate a value
return None if positive validation, otherwise a ValueError is raised
"""
raise TypeError("Abstract Method")
class RegexValidator(Validator):
"""
Validator that evaluates a regular expression
>>> from galaxy.util import XML
>>> from galaxy.tools.parameters.basic import ToolParameter
>>> p = ToolParameter.build(None, XML('''
... <param name="blah" type="text" value="10">
... <validator type="regex" message="Not gonna happen">[Ff]oo</validator>
... </param>
... '''))
>>> t = p.validate("Foo")
>>> t = p.validate("foo")
>>> t = p.validate("Fop")
Traceback (most recent call last):
...
ValueError: Not gonna happen
"""
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message'), elem.text)
def __init__(self, message, expression):
self.message = message
# Compile later. RE objects used to not be thread safe. Not sure about
# the sre module.
self.expression = expression
def validate(self, value, trans=None):
if re.match(self.expression, value or '') is None:
raise ValueError(self.message)
class ExpressionValidator(Validator):
"""
Validator that evaluates a python expression using the value
>>> from galaxy.util import XML
>>> from galaxy.tools.parameters.basic import ToolParameter
>>> p = ToolParameter.build(None, XML('''
... <param name="blah" type="text" value="10">
... <validator type="expression" message="Not gonna happen">value.lower() == "foo"</validator>
... </param>
... '''))
>>> t = p.validate("Foo")
>>> t = p.validate("foo")
>>> t = p.validate("Fop")
Traceback (most recent call last):
...
ValueError: Not gonna happen
"""
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message'), elem.text, elem.get('substitute_value_in_message'))
def __init__(self, message, expression, substitute_value_in_message):
self.message = message
self.substitute_value_in_message = substitute_value_in_message
# Save compiled expression, code objects are thread safe (right?)
self.expression = compile(expression, '<string>', 'eval')
def validate(self, value, trans=None):
message = self.message
if self.substitute_value_in_message:
message = message % value
try:
evalresult = eval(self.expression, dict(value=value))
except Exception:
log.debug("Validator %s could not be evaluated on %s" % (self.expression, str(value)), exc_info=True)
raise ValueError(message)
if not(evalresult):
raise ValueError(message)
class InRangeValidator(Validator):
"""
Validator that ensures a number is in a specified range
>>> from galaxy.util import XML
>>> from galaxy.tools.parameters.basic import ToolParameter
>>> p = ToolParameter.build(None, XML('''
... <param name="blah" type="integer" value="10">
... <validator type="in_range" message="Not gonna happen" min="10" exclude_min="true" max="20"/>
... </param>
... '''))
>>> t = p.validate(10)
Traceback (most recent call last):
...
ValueError: Not gonna happen
>>> t = p.validate(15)
>>> t = p.validate(20)
>>> t = p.validate(21)
Traceback (most recent call last):
...
ValueError: Not gonna happen
"""
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None), elem.get('min'),
elem.get('max'), elem.get('exclude_min', 'false'),
elem.get('exclude_max', 'false'))
def __init__(self, message, range_min, range_max, exclude_min=False, exclude_max=False):
"""
When the optional exclude_min and exclude_max attributes are set
to true, the range excludes the end points (i.e., min < value < max),
while if set to False (the default), then range includes the end points
(1.e., min <= value <= max). Combinations of exclude_min and exclude_max
values are allowed.
"""
self.min = float(range_min if range_min is not None else '-inf')
self.exclude_min = util.asbool(exclude_min)
self.max = float(range_max if range_max is not None else 'inf')
self.exclude_max = util.asbool(exclude_max)
assert self.min <= self.max, 'min must be less than or equal to max'
# Remove unneeded 0s and decimal from floats to make message pretty.
self_min_str = str(self.min).rstrip('0').rstrip('.')
self_max_str = str(self.max).rstrip('0').rstrip('.')
op1 = '>='
op2 = '<='
if self.exclude_min:
op1 = '>'
if self.exclude_max:
op2 = '<'
self.message = message or "Value must be %s %s and %s %s" % (op1, self_min_str, op2, self_max_str)
def validate(self, value, trans=None):
if self.exclude_min:
if not self.min < float(value):
raise ValueError(self.message)
else:
if not self.min <= float(value):
raise ValueError(self.message)
if self.exclude_max:
if not float(value) < self.max:
raise ValueError(self.message)
else:
if not float(value) <= self.max:
raise ValueError(self.message)
class LengthValidator(Validator):
"""
Validator that ensures the length of the provided string (value) is in a specific range
>>> from galaxy.util import XML
>>> from galaxy.tools.parameters.basic import ToolParameter
>>> p = ToolParameter.build(None, XML('''
... <param name="blah" type="text" value="foobar">
... <validator type="length" min="2" max="8"/>
... </param>
... '''))
>>> t = p.validate("foo")
>>> t = p.validate("bar")
>>> t = p.validate("f")
Traceback (most recent call last):
...
ValueError: Must have length of at least 2
>>> t = p.validate("foobarbaz")
Traceback (most recent call last):
...
ValueError: Must have length no more than 8
"""
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None), elem.get('min', None), elem.get('max', None))
def __init__(self, message, length_min, length_max):
self.message = message
if length_min is not None:
length_min = int(length_min)
if length_max is not None:
length_max = int(length_max)
self.min = length_min
self.max = length_max
def validate(self, value, trans=None):
if self.min is not None and len(value) < self.min:
raise ValueError(self.message or ("Must have length of at least %d" % self.min))
if self.max is not None and len(value) > self.max:
raise ValueError(self.message or ("Must have length no more than %d" % self.max))
class DatasetOkValidator(Validator):
"""
Validator that checks if a dataset is in an 'ok' state
"""
def __init__(self, message=None):
self.message = message
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None))
def validate(self, value, trans=None):
if value and value.state != model.Dataset.states.OK:
if self.message is None:
self.message = "The selected dataset is still being generated, select another dataset or wait until it is completed"
raise ValueError(self.message)
class DatasetEmptyValidator(Validator):
"""Validator that checks if a dataset has a positive file size."""
def __init__(self, message=None):
self.message = message
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None))
def validate(self, value, trans=None):
if value:
if value.get_size() == 0:
if self.message is None:
self.message = "The selected dataset is empty, this tool expects non-empty files."
raise ValueError(self.message)
class DatasetExtraFilesPathEmptyValidator(Validator):
"""Validator that checks if a dataset's extra_files_path exists and is not empty."""
def __init__(self, message=None):
self.message = message
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None))
def validate(self, value, trans=None):
if value:
if value.get_total_size() == value.get_size():
if self.message is None:
self.message = "The selected dataset's extra_files_path directory is empty or does not exist, this tool expects non-empty extra_files_path directories associated with the selected input."
raise ValueError(self.message)
class MetadataValidator(Validator):
"""
Validator that checks for missing metadata
"""
requires_dataset_metadata = True
def __init__(self, message=None, check="", skip=""):
self.message = message
self.check = check.split(",")
self.skip = skip.split(",")
@classmethod
def from_element(cls, param, elem):
return cls(message=elem.get('message', None), check=elem.get('check', ""), skip=elem.get('skip', ""))
def validate(self, value, trans=None):
if value:
if not isinstance(value, model.DatasetInstance):
raise ValueError('A non-dataset value was provided.')
if value.missing_meta(check=self.check, skip=self.skip):
if self.message is None:
self.message = "Metadata missing, click the pencil icon in the history item to edit / save the metadata attributes"
raise ValueError(self.message)
class UnspecifiedBuildValidator(Validator):
"""
Validator that checks for dbkey not equal to '?'
"""
requires_dataset_metadata = True
def __init__(self, message=None):
if message is None:
self.message = "Unspecified genome build, click the pencil icon in the history item to set the genome build"
else:
self.message = message
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None))
def validate(self, value, trans=None):
# if value is None, we cannot validate
if value:
dbkey = value.metadata.dbkey
if isinstance(dbkey, list):
dbkey = dbkey[0]
if dbkey == '?':
raise ValueError(self.message)
class NoOptionsValidator(Validator):
"""Validator that checks for empty select list"""
def __init__(self, message=None):
self.message = message
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None))
def validate(self, value, trans=None):
if value is None:
if self.message is None:
self.message = "No options available for selection"
raise ValueError(self.message)
class EmptyTextfieldValidator(Validator):
"""Validator that checks for empty text field"""
def __init__(self, message=None):
self.message = message
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None))
def validate(self, value, trans=None):
if value == '':
if self.message is None:
self.message = "Field requires a value"
raise ValueError(self.message)
class MetadataInFileColumnValidator(Validator):
"""
Validator that checks if the value for a dataset's metadata item exists in a file.
"""
requires_dataset_metadata = True
@classmethod
def from_element(cls, param, elem):
filename = elem.get("filename", None)
if filename:
filename = "%s/%s" % (param.tool.app.config.tool_data_path, filename.strip())
metadata_name = elem.get("metadata_name", None)
if metadata_name:
metadata_name = metadata_name.strip()
metadata_column = int(elem.get("metadata_column", 0))
split = elem.get("split", "\t")
message = elem.get("message", "Value for metadata %s was not found in %s." % (metadata_name, filename))
line_startswith = elem.get("line_startswith", None)
if line_startswith:
line_startswith = line_startswith.strip()
return cls(filename, metadata_name, metadata_column, message, line_startswith, split)
def __init__(self, filename, metadata_name, metadata_column, message="Value for metadata not found.", line_startswith=None, split="\t"):
self.metadata_name = metadata_name
self.message = message
self.valid_values = []
for line in open(filename):
if line_startswith is None or line.startswith(line_startswith):
fields = line.split(split)
if metadata_column < len(fields):
self.valid_values.append(fields[metadata_column].strip())
def validate(self, value, trans=None):
if not value:
return
if hasattr(value, "metadata"):
if value.metadata.spec[self.metadata_name].param.to_string(value.metadata.get(self.metadata_name)) in self.valid_values:
return
raise ValueError(self.message)
class ValueInDataTableColumnValidator(Validator):
"""
Validator that checks if a value is in a tool data table column.
"""
@classmethod
def from_element(cls, param, elem):
table_name = elem.get("table_name", None)
assert table_name, 'You must specify a table_name.'
tool_data_table = param.tool.app.tool_data_tables[table_name]
column = elem.get("metadata_column", 0)
try:
column = int(column)
except ValueError:
pass
message = elem.get("message", "Value was not found in %s." % (table_name))
line_startswith = elem.get("line_startswith", None)
if line_startswith:
line_startswith = line_startswith.strip()
return cls(tool_data_table, column, message, line_startswith)
def __init__(self, tool_data_table, column, message="Value not found.", line_startswith=None):
self.message = message
self.valid_values = []
self._data_table_content_version = None
self._tool_data_table = tool_data_table
if isinstance(column, string_types):
column = tool_data_table.columns[column]
self._column = column
self._load_values()
def _load_values(self):
self._data_table_content_version, data_fields = self._tool_data_table.get_version_fields()
self.valid_values = []
for fields in data_fields:
if self._column < len(fields):
self.valid_values.append(fields[self._metadata_column])
def validate(self, value, trans=None):
if not value:
return
if not self._tool_data_table.is_current_version(self._data_table_content_version):
log.debug('MetadataInDataTableColumnValidator values are out of sync with data table (%s), updating validator.', self._tool_data_table.name)
self._load_values()
if value in self.valid_values:
return
raise ValueError(self.message)
class ValueNotInDataTableColumnValidator(ValueInDataTableColumnValidator):
"""
Validator that checks if a value is NOT in a tool data table column.
"""
def __init__(self, tool_data_table, metadata_column, message="Value already present.", line_startswith=None):
super(ValueNotInDataTableColumnValidator, self).__init__(tool_data_table, metadata_column, message, line_startswith)
def validate(self, value, trans=None):
try:
super(ValueInDataTableColumnValidator, self).validate(value, trans)
except ValueError:
return
else:
raise ValueError(self.message)
class MetadataInDataTableColumnValidator(Validator):
"""
Validator that checks if the value for a dataset's metadata item exists in a file.
"""
requires_dataset_metadata = True
@classmethod
def from_element(cls, param, elem):
table_name = elem.get("table_name", None)
assert table_name, 'You must specify a table_name.'
tool_data_table = param.tool.app.tool_data_tables[table_name]
metadata_name = elem.get("metadata_name", None)
if metadata_name:
metadata_name = metadata_name.strip()
metadata_column = elem.get("metadata_column", 0)
try:
metadata_column = int(metadata_column)
except ValueError:
pass
message = elem.get("message", "Value for metadata %s was not found in %s." % (metadata_name, table_name))
line_startswith = elem.get("line_startswith", None)
if line_startswith:
line_startswith = line_startswith.strip()
return cls(tool_data_table, metadata_name, metadata_column, message, line_startswith)
def __init__(self, tool_data_table, metadata_name, metadata_column, message="Value for metadata not found.", line_startswith=None):
self.metadata_name = metadata_name
self.message = message
self.valid_values = []
self._data_table_content_version = None
self._tool_data_table = tool_data_table
if isinstance(metadata_column, string_types):
metadata_column = tool_data_table.columns[metadata_column]
self._metadata_column = metadata_column
self._load_values()
def _load_values(self):
self._data_table_content_version, data_fields = self._tool_data_table.get_version_fields()
self.valid_values = []
for fields in data_fields:
if self._metadata_column < len(fields):
self.valid_values.append(fields[self._metadata_column])
def validate(self, value, trans=None):
if not value:
return
if hasattr(value, "metadata"):
if not self._tool_data_table.is_current_version(self._data_table_content_version):
log.debug('MetadataInDataTableColumnValidator values are out of sync with data table (%s), updating validator.', self._tool_data_table.name)
self._load_values()
if value.metadata.spec[self.metadata_name].param.to_string(value.metadata.get(self.metadata_name)) in self.valid_values:
return
raise ValueError(self.message)
class MetadataNotInDataTableColumnValidator(MetadataInDataTableColumnValidator):
"""
Validator that checks if the value for a dataset's metadata item doesn't exists in a file.
"""
requires_dataset_metadata = True
def __init__(self, tool_data_table, metadata_name, metadata_column, message="Value for metadata not found.", line_startswith=None):
super(MetadataInDataTableColumnValidator, self).__init__(tool_data_table, metadata_name, metadata_column, message, line_startswith)
def validate(self, value, trans=None):
try:
super(MetadataInDataTableColumnValidator, self).validate(value, trans)
except ValueError:
return
else:
raise ValueError(self.message)
class MetadataInRangeValidator(InRangeValidator):
"""
Validator that ensures metadata is in a specified range
"""
requires_dataset_metadata = True
@classmethod
def from_element(cls, param, elem):
metadata_name = elem.get('metadata_name', None)
assert metadata_name, "dataset_metadata_in_range validator requires metadata_name attribute."
metadata_name = metadata_name.strip()
return cls(metadata_name,
elem.get('message', None), elem.get('min'),
elem.get('max'), elem.get('exclude_min', 'false'),
elem.get('exclude_max', 'false'))
def __init__(self, metadata_name, message, range_min, range_max, exclude_min=False, exclude_max=False):
self.metadata_name = metadata_name
super(MetadataInRangeValidator, self).__init__(message, range_min, range_max, exclude_min, exclude_max)
def validate(self, value, trans=None):
if value:
if not isinstance(value, model.DatasetInstance):
raise ValueError('A non-dataset value was provided.')
try:
value_to_check = float(value.metadata.spec[self.metadata_name].param.to_string(value.metadata.get(self.metadata_name)))
except KeyError:
raise ValueError('{} Metadata missing'.format(self.metadata_name))
except ValueError:
raise ValueError('{} must be a float or an integer'.format(self.metadata_name))
super(MetadataInRangeValidator, self).validate(value_to_check, trans)
validator_types = dict(
expression=ExpressionValidator,
regex=RegexValidator,
in_range=InRangeValidator,
length=LengthValidator,
metadata=MetadataValidator,
unspecified_build=UnspecifiedBuildValidator,
no_options=NoOptionsValidator,
empty_field=EmptyTextfieldValidator,
empty_dataset=DatasetEmptyValidator,
empty_extra_files_path=DatasetExtraFilesPathEmptyValidator,
dataset_metadata_in_file=MetadataInFileColumnValidator,
dataset_metadata_in_data_table=MetadataInDataTableColumnValidator,
dataset_metadata_not_in_data_table=MetadataNotInDataTableColumnValidator,
dataset_metadata_in_range=MetadataInRangeValidator,
value_in_data_table=ValueInDataTableColumnValidator,
value_not_in_data_table=ValueInDataTableColumnValidator,
dataset_ok_validator=DatasetOkValidator,
)
def get_suite():
"""Get unittest suite for this module"""
import doctest
import sys
return doctest.DocTestSuite(sys.modules[__name__])
| """
Classes related to parameter validation.
"""
import logging
import re
from six import string_types
from galaxy import (
model,
util
)
log = logging.getLogger(__name__)
class Validator(object):
"""
A validator checks that a value meets some conditions OR raises ValueError
"""
requires_dataset_metadata = False
@classmethod
def from_element(cls, param, elem):
"""
Initialize the appropiate Validator class
example call `validation.Validator.from_element(ToolParameter_object, Validator_object)`
needs to be implemented in the subclasses and should return the
corresponding Validator object by a call to `cls( ... )` which calls the
`__init__` method of the corresponding validator
param cls the Validator class
param param the element to be evaluated (which contains the validator)
param elem the validator element
return an object of a Validator subclass that corresponds to the type attribute of the validator element
"""
type = elem.get('type', None)
assert type is not None, "Required 'type' attribute missing from validator"
return validator_types[type].from_element(param, elem)
def validate(self, value, trans=None):
"""
validate a value
return None if positive validation, otherwise a ValueError is raised
"""
raise TypeError("Abstract Method")
class RegexValidator(Validator):
"""
Validator that evaluates a regular expression
>>> from galaxy.util import XML
>>> from galaxy.tools.parameters.basic import ToolParameter
>>> p = ToolParameter.build(None, XML('''
... <param name="blah" type="text" value="10">
... <validator type="regex" message="Not gonna happen">[Ff]oo</validator>
... </param>
... '''))
>>> t = p.validate("Foo")
>>> t = p.validate("foo")
>>> t = p.validate("Fop")
Traceback (most recent call last):
...
ValueError: Not gonna happen
"""
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message'), elem.text)
def __init__(self, message, expression):
self.message = message
# Compile later. RE objects used to not be thread safe. Not sure about
# the sre module.
self.expression = expression
def validate(self, value, trans=None):
if re.match(self.expression, value or '') is None:
raise ValueError(self.message)
class ExpressionValidator(Validator):
"""
Validator that evaluates a python expression using the value
>>> from galaxy.util import XML
>>> from galaxy.tools.parameters.basic import ToolParameter
>>> p = ToolParameter.build(None, XML('''
... <param name="blah" type="text" value="10">
... <validator type="expression" message="Not gonna happen">value.lower() == "foo"</validator>
... </param>
... '''))
>>> t = p.validate("Foo")
>>> t = p.validate("foo")
>>> t = p.validate("Fop")
Traceback (most recent call last):
...
ValueError: Not gonna happen
"""
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message'), elem.text, elem.get('substitute_value_in_message'))
def __init__(self, message, expression, substitute_value_in_message):
self.message = message
self.substitute_value_in_message = substitute_value_in_message
# Save compiled expression, code objects are thread safe (right?)
self.expression = compile(expression, '<string>', 'eval')
def validate(self, value, trans=None):
message = self.message
if self.substitute_value_in_message:
message = message % value
try:
evalresult = eval(self.expression, dict(value=value))
except Exception:
log.debug("Validator %s could not be evaluated on %s" % (self.expression, str(value)), exc_info=True)
raise ValueError(message)
if not(evalresult):
raise ValueError(message)
class InRangeValidator(Validator):
"""
Validator that ensures a number is in a specified range
>>> from galaxy.util import XML
>>> from galaxy.tools.parameters.basic import ToolParameter
>>> p = ToolParameter.build(None, XML('''
... <param name="blah" type="integer" value="10">
... <validator type="in_range" message="Not gonna happen" min="10" exclude_min="true" max="20"/>
... </param>
... '''))
>>> t = p.validate(10)
Traceback (most recent call last):
...
ValueError: Not gonna happen
>>> t = p.validate(15)
>>> t = p.validate(20)
>>> t = p.validate(21)
Traceback (most recent call last):
...
ValueError: Not gonna happen
"""
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None), elem.get('min'),
elem.get('max'), elem.get('exclude_min', 'false'),
elem.get('exclude_max', 'false'))
def __init__(self, message, range_min, range_max, exclude_min=False, exclude_max=False):
"""
When the optional exclude_min and exclude_max attributes are set
to true, the range excludes the end points (i.e., min < value < max),
while if set to False (the default), then range includes the end points
(1.e., min <= value <= max). Combinations of exclude_min and exclude_max
values are allowed.
"""
self.min = float(range_min if range_min is not None else '-inf')
self.exclude_min = util.asbool(exclude_min)
self.max = float(range_max if range_max is not None else 'inf')
self.exclude_max = util.asbool(exclude_max)
assert self.min <= self.max, 'min must be less than or equal to max'
# Remove unneeded 0s and decimal from floats to make message pretty.
self_min_str = str(self.min).rstrip('0').rstrip('.')
self_max_str = str(self.max).rstrip('0').rstrip('.')
op1 = '>='
op2 = '<='
if self.exclude_min:
op1 = '>'
if self.exclude_max:
op2 = '<'
self.message = message or "Value must be %s %s and %s %s" % (op1, self_min_str, op2, self_max_str)
def validate(self, value, trans=None):
if self.exclude_min:
if not self.min < float(value):
raise ValueError(self.message)
else:
if not self.min <= float(value):
raise ValueError(self.message)
if self.exclude_max:
if not float(value) < self.max:
raise ValueError(self.message)
else:
if not float(value) <= self.max:
raise ValueError(self.message)
class LengthValidator(Validator):
"""
Validator that ensures the length of the provided string (value) is in a specific range
>>> from galaxy.util import XML
>>> from galaxy.tools.parameters.basic import ToolParameter
>>> p = ToolParameter.build(None, XML('''
... <param name="blah" type="text" value="foobar">
... <validator type="length" min="2" max="8"/>
... </param>
... '''))
>>> t = p.validate("foo")
>>> t = p.validate("bar")
>>> t = p.validate("f")
Traceback (most recent call last):
...
ValueError: Must have length of at least 2
>>> t = p.validate("foobarbaz")
Traceback (most recent call last):
...
ValueError: Must have length no more than 8
"""
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None), elem.get('min', None), elem.get('max', None))
def __init__(self, message, length_min, length_max):
self.message = message
if length_min is not None:
length_min = int(length_min)
if length_max is not None:
length_max = int(length_max)
self.min = length_min
self.max = length_max
def validate(self, value, trans=None):
if self.min is not None and len(value) < self.min:
raise ValueError(self.message or ("Must have length of at least %d" % self.min))
if self.max is not None and len(value) > self.max:
raise ValueError(self.message or ("Must have length no more than %d" % self.max))
class DatasetOkValidator(Validator):
"""
Validator that checks if a dataset is in an 'ok' state
"""
def __init__(self, message=None):
self.message = message
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None))
def validate(self, value, trans=None):
if value and value.state != model.Dataset.states.OK:
if self.message is None:
self.message = "The selected dataset is still being generated, select another dataset or wait until it is completed"
raise ValueError(self.message)
class DatasetEmptyValidator(Validator):
"""Validator that checks if a dataset has a positive file size."""
def __init__(self, message=None):
self.message = message
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None))
def validate(self, value, trans=None):
if value:
if value.get_size() == 0:
if self.message is None:
self.message = "The selected dataset is empty, this tool expects non-empty files."
raise ValueError(self.message)
class DatasetExtraFilesPathEmptyValidator(Validator):
"""Validator that checks if a dataset's extra_files_path exists and is not empty."""
def __init__(self, message=None):
self.message = message
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None))
def validate(self, value, trans=None):
if value:
if value.get_total_size() == value.get_size():
if self.message is None:
self.message = "The selected dataset's extra_files_path directory is empty or does not exist, this tool expects non-empty extra_files_path directories associated with the selected input."
raise ValueError(self.message)
class MetadataValidator(Validator):
"""
Validator that checks for missing metadata
"""
requires_dataset_metadata = True
def __init__(self, message=None, check="", skip=""):
self.message = message
self.check = check.split(",")
self.skip = skip.split(",")
@classmethod
def from_element(cls, param, elem):
return cls(message=elem.get('message', None), check=elem.get('check', ""), skip=elem.get('skip', ""))
def validate(self, value, trans=None):
if value:
if not isinstance(value, model.DatasetInstance):
raise ValueError('A non-dataset value was provided.')
if value.missing_meta(check=self.check, skip=self.skip):
if self.message is None:
self.message = "Metadata missing, click the pencil icon in the history item to edit / save the metadata attributes"
raise ValueError(self.message)
class UnspecifiedBuildValidator(Validator):
"""
Validator that checks for dbkey not equal to '?'
"""
requires_dataset_metadata = True
def __init__(self, message=None):
if message is None:
self.message = "Unspecified genome build, click the pencil icon in the history item to set the genome build"
else:
self.message = message
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None))
def validate(self, value, trans=None):
# if value is None, we cannot validate
if value:
dbkey = value.metadata.dbkey
if isinstance(dbkey, list):
dbkey = dbkey[0]
if dbkey == '?':
raise ValueError(self.message)
class NoOptionsValidator(Validator):
"""Validator that checks for empty select list"""
def __init__(self, message=None):
self.message = message
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None))
def validate(self, value, trans=None):
if value is None:
if self.message is None:
self.message = "No options available for selection"
raise ValueError(self.message)
class EmptyTextfieldValidator(Validator):
"""Validator that checks for empty text field"""
def __init__(self, message=None):
self.message = message
@classmethod
def from_element(cls, param, elem):
return cls(elem.get('message', None))
def validate(self, value, trans=None):
if value == '':
if self.message is None:
self.message = "Field requires a value"
raise ValueError(self.message)
class MetadataInFileColumnValidator(Validator):
"""
Validator that checks if the value for a dataset's metadata item exists in a file.
"""
requires_dataset_metadata = True
@classmethod
def from_element(cls, param, elem):
filename = elem.get("filename", None)
if filename:
filename = "%s/%s" % (param.tool.app.config.tool_data_path, filename.strip())
metadata_name = elem.get("metadata_name", None)
if metadata_name:
metadata_name = metadata_name.strip()
metadata_column = int(elem.get("metadata_column", 0))
split = elem.get("split", "\t")
message = elem.get("message", "Value for metadata %s was not found in %s." % (metadata_name, filename))
line_startswith = elem.get("line_startswith", None)
if line_startswith:
line_startswith = line_startswith.strip()
return cls(filename, metadata_name, metadata_column, message, line_startswith, split)
def __init__(self, filename, metadata_name, metadata_column, message="Value for metadata not found.", line_startswith=None, split="\t"):
self.metadata_name = metadata_name
self.message = message
self.valid_values = []
for line in open(filename):
if line_startswith is None or line.startswith(line_startswith):
fields = line.split(split)
if metadata_column < len(fields):
self.valid_values.append(fields[metadata_column].strip())
def validate(self, value, trans=None):
if not value:
return
if hasattr(value, "metadata"):
if value.metadata.spec[self.metadata_name].param.to_string(value.metadata.get(self.metadata_name)) in self.valid_values:
return
raise ValueError(self.message)
class ValueInDataTableColumnValidator(Validator):
"""
Validator that checks if a value is in a tool data table column.
"""
@classmethod
def from_element(cls, param, elem):
table_name = elem.get("table_name", None)
assert table_name, 'You must specify a table_name.'
tool_data_table = param.tool.app.tool_data_tables[table_name]
column = elem.get("metadata_column", 0)
try:
column = int(column)
except ValueError:
pass
message = elem.get("message", "Value was not found in %s." % (table_name))
line_startswith = elem.get("line_startswith", None)
if line_startswith:
line_startswith = line_startswith.strip()
return cls(tool_data_table, column, message, line_startswith)
def __init__(self, tool_data_table, column, message="Value not found.", line_startswith=None):
self.message = message
self.valid_values = []
self._data_table_content_version = None
self._tool_data_table = tool_data_table
if isinstance(column, string_types):
column = tool_data_table.columns[column]
self._column = column
self._load_values()
def _load_values(self):
self._data_table_content_version, data_fields = self._tool_data_table.get_version_fields()
self.valid_values = []
for fields in data_fields:
if self._column < len(fields):
self.valid_values.append(fields[self._metadata_column])
def validate(self, value, trans=None):
if not value:
return
if not self._tool_data_table.is_current_version(self._data_table_content_version):
log.debug('MetadataInDataTableColumnValidator values are out of sync with data table (%s), updating validator.', self._tool_data_table.name)
self._load_values()
if value in self.valid_values:
return
raise ValueError(self.message)
class ValueNotInDataTableColumnValidator(ValueInDataTableColumnValidator):
"""
Validator that checks if a value is NOT in a tool data table column.
"""
def __init__(self, tool_data_table, metadata_column, message="Value already present.", line_startswith=None):
super(ValueNotInDataTableColumnValidator, self).__init__(tool_data_table, metadata_column, message, line_startswith)
def validate(self, value, trans=None):
try:
super(ValueInDataTableColumnValidator, self).validate(value, trans)
except ValueError:
return
else:
raise ValueError(self.message)
class MetadataInDataTableColumnValidator(Validator):
"""
Validator that checks if the value for a dataset's metadata item exists in a file.
"""
requires_dataset_metadata = True
@classmethod
def from_element(cls, param, elem):
table_name = elem.get("table_name", None)
assert table_name, 'You must specify a table_name.'
tool_data_table = param.tool.app.tool_data_tables[table_name]
metadata_name = elem.get("metadata_name", None)
if metadata_name:
metadata_name = metadata_name.strip()
metadata_column = elem.get("metadata_column", 0)
try:
metadata_column = int(metadata_column)
except ValueError:
pass
message = elem.get("message", "Value for metadata %s was not found in %s." % (metadata_name, table_name))
line_startswith = elem.get("line_startswith", None)
if line_startswith:
line_startswith = line_startswith.strip()
return cls(tool_data_table, metadata_name, metadata_column, message, line_startswith)
def __init__(self, tool_data_table, metadata_name, metadata_column, message="Value for metadata not found.", line_startswith=None):
self.metadata_name = metadata_name
self.message = message
self.valid_values = []
self._data_table_content_version = None
self._tool_data_table = tool_data_table
if isinstance(metadata_column, string_types):
metadata_column = tool_data_table.columns[metadata_column]
self._metadata_column = metadata_column
self._load_values()
def _load_values(self):
self._data_table_content_version, data_fields = self._tool_data_table.get_version_fields()
self.valid_values = []
for fields in data_fields:
if self._metadata_column < len(fields):
self.valid_values.append(fields[self._metadata_column])
def validate(self, value, trans=None):
if not value:
return
if hasattr(value, "metadata"):
if not self._tool_data_table.is_current_version(self._data_table_content_version):
log.debug('MetadataInDataTableColumnValidator values are out of sync with data table (%s), updating validator.', self._tool_data_table.name)
self._load_values()
if value.metadata.spec[self.metadata_name].param.to_string(value.metadata.get(self.metadata_name)) in self.valid_values:
return
raise ValueError(self.message)
class MetadataNotInDataTableColumnValidator(MetadataInDataTableColumnValidator):
"""
Validator that checks if the value for a dataset's metadata item doesn't exists in a file.
"""
requires_dataset_metadata = True
def __init__(self, tool_data_table, metadata_name, metadata_column, message="Value for metadata not found.", line_startswith=None):
super(MetadataInDataTableColumnValidator, self).__init__(tool_data_table, metadata_name, metadata_column, message, line_startswith)
def validate(self, value, trans=None):
try:
super(MetadataInDataTableColumnValidator, self).validate(value, trans)
except ValueError:
return
else:
raise ValueError(self.message)
class MetadataInRangeValidator(InRangeValidator):
"""
Validator that ensures metadata is in a specified range
"""
requires_dataset_metadata = True
@classmethod
def from_element(cls, param, elem):
metadata_name = elem.get('metadata_name', None)
assert metadata_name, "dataset_metadata_in_range validator requires metadata_name attribute."
metadata_name = metadata_name.strip()
return cls(metadata_name,
elem.get('message', None), elem.get('min'),
elem.get('max'), elem.get('exclude_min', 'false'),
elem.get('exclude_max', 'false'))
def __init__(self, metadata_name, message, range_min, range_max, exclude_min=False, exclude_max=False):
self.metadata_name = metadata_name
super(MetadataInRangeValidator, self).__init__(message, range_min, range_max, exclude_min, exclude_max)
def validate(self, value, trans=None):
if value:
if not isinstance(value, model.DatasetInstance):
raise ValueError('A non-dataset value was provided.')
try:
value_to_check = float(value.metadata.spec[self.metadata_name].param.to_string(value.metadata.get(self.metadata_name)))
except KeyError:
raise ValueError('{} Metadata missing'.format(self.metadata_name))
except ValueError:
raise ValueError('{} must be a float or an integer'.format(self.metadata_name))
super(MetadataInRangeValidator, self).validate(value_to_check, trans)
validator_types = dict(
expression=ExpressionValidator,
regex=RegexValidator,
in_range=InRangeValidator,
length=LengthValidator,
metadata=MetadataValidator,
unspecified_build=UnspecifiedBuildValidator,
no_options=NoOptionsValidator,
empty_field=EmptyTextfieldValidator,
empty_dataset=DatasetEmptyValidator,
empty_extra_files_path=DatasetExtraFilesPathEmptyValidator,
dataset_metadata_in_file=MetadataInFileColumnValidator,
dataset_metadata_in_data_table=MetadataInDataTableColumnValidator,
dataset_metadata_not_in_data_table=MetadataNotInDataTableColumnValidator,
dataset_metadata_in_range=MetadataInRangeValidator,
value_in_data_table=ValueInDataTableColumnValidator,
value_not_in_data_table=ValueInDataTableColumnValidator,
dataset_ok_validator=DatasetOkValidator,
)
def get_suite():
"""Get unittest suite for this module"""
import doctest
import sys
return doctest.DocTestSuite(sys.modules[__name__])
| en | 0.52011 | Classes related to parameter validation. A validator checks that a value meets some conditions OR raises ValueError Initialize the appropiate Validator class example call `validation.Validator.from_element(ToolParameter_object, Validator_object)` needs to be implemented in the subclasses and should return the corresponding Validator object by a call to `cls( ... )` which calls the `__init__` method of the corresponding validator param cls the Validator class param param the element to be evaluated (which contains the validator) param elem the validator element return an object of a Validator subclass that corresponds to the type attribute of the validator element validate a value return None if positive validation, otherwise a ValueError is raised Validator that evaluates a regular expression >>> from galaxy.util import XML >>> from galaxy.tools.parameters.basic import ToolParameter >>> p = ToolParameter.build(None, XML(''' ... <param name="blah" type="text" value="10"> ... <validator type="regex" message="Not gonna happen">[Ff]oo</validator> ... </param> ... ''')) >>> t = p.validate("Foo") >>> t = p.validate("foo") >>> t = p.validate("Fop") Traceback (most recent call last): ... ValueError: Not gonna happen # Compile later. RE objects used to not be thread safe. Not sure about # the sre module. Validator that evaluates a python expression using the value >>> from galaxy.util import XML >>> from galaxy.tools.parameters.basic import ToolParameter >>> p = ToolParameter.build(None, XML(''' ... <param name="blah" type="text" value="10"> ... <validator type="expression" message="Not gonna happen">value.lower() == "foo"</validator> ... </param> ... ''')) >>> t = p.validate("Foo") >>> t = p.validate("foo") >>> t = p.validate("Fop") Traceback (most recent call last): ... ValueError: Not gonna happen # Save compiled expression, code objects are thread safe (right?) Validator that ensures a number is in a specified range >>> from galaxy.util import XML >>> from galaxy.tools.parameters.basic import ToolParameter >>> p = ToolParameter.build(None, XML(''' ... <param name="blah" type="integer" value="10"> ... <validator type="in_range" message="Not gonna happen" min="10" exclude_min="true" max="20"/> ... </param> ... ''')) >>> t = p.validate(10) Traceback (most recent call last): ... ValueError: Not gonna happen >>> t = p.validate(15) >>> t = p.validate(20) >>> t = p.validate(21) Traceback (most recent call last): ... ValueError: Not gonna happen When the optional exclude_min and exclude_max attributes are set to true, the range excludes the end points (i.e., min < value < max), while if set to False (the default), then range includes the end points (1.e., min <= value <= max). Combinations of exclude_min and exclude_max values are allowed. # Remove unneeded 0s and decimal from floats to make message pretty. Validator that ensures the length of the provided string (value) is in a specific range >>> from galaxy.util import XML >>> from galaxy.tools.parameters.basic import ToolParameter >>> p = ToolParameter.build(None, XML(''' ... <param name="blah" type="text" value="foobar"> ... <validator type="length" min="2" max="8"/> ... </param> ... ''')) >>> t = p.validate("foo") >>> t = p.validate("bar") >>> t = p.validate("f") Traceback (most recent call last): ... ValueError: Must have length of at least 2 >>> t = p.validate("foobarbaz") Traceback (most recent call last): ... ValueError: Must have length no more than 8 Validator that checks if a dataset is in an 'ok' state Validator that checks if a dataset has a positive file size. Validator that checks if a dataset's extra_files_path exists and is not empty. Validator that checks for missing metadata Validator that checks for dbkey not equal to '?' # if value is None, we cannot validate Validator that checks for empty select list Validator that checks for empty text field Validator that checks if the value for a dataset's metadata item exists in a file. Validator that checks if a value is in a tool data table column. Validator that checks if a value is NOT in a tool data table column. Validator that checks if the value for a dataset's metadata item exists in a file. Validator that checks if the value for a dataset's metadata item doesn't exists in a file. Validator that ensures metadata is in a specified range Get unittest suite for this module | 3.34701 | 3 |
kurisu/cogs/utility.py | khakers/Kurisu | 4 | 6624684 | from io import BytesIO
from typing import cast, Optional, Union
import io
from PIL import Image, ImageDraw
from discord.ext import commands
import discord
from utils.kurisu import KurisuBot
class Utility(commands.Cog):
"""A module filled with informative commands. Could be info a bout a guild, user, etc"""
def __init__(self, bot: KurisuBot):
self.bot = bot
@commands.command()
async def color(self, ctx: commands.Context, clr: str):
colors = {
"aliceblue": ["#f0f8ff", "0xf0f8ff"],
"antiquewhite": ["#faebd7", "0xfaebd7"],
"aqua": ["#00ffff", "0x00ffff"],
"aquamarine": ["#7fffd4", "0x7fffd4"],
"azure": ["#f0ffff", "0xf0ffff"],
"beige": ["#f5f5dc", "0xf5f5dc"],
"bisque": ["#ffe4c4", "0xffe4c4"],
"black": ["#000000", "0x000000"],
"blanchedalmond": ["#ffebcd", "0xffebcd"],
"blue": ["#0000ff", "0x0000ff"],
"blueviolet": ["#8a2be2", "0x8a2be2"],
"brown": ["#a52a2a", "0xa52a2a"],
"burlywood": ["#deb887", "0xdeb887"],
"cadetblue": ["#5f9ea0", "0x5f9ea0"],
"chartreuse": ["#7fff00", "0x7fff00"],
"chocolate": ["#d2691e", "0xd2691e"],
"coral": ["#ff7f50", "0xff7f50"],
"cornflowerblue": ["#6495ed", "0x6495ed"],
"cornsilk": ["#fff8dc", "0xfff8dc"],
"crimson": ["#dc143c", "0xdc143c"],
"cyan": ["#00ffff", "0x00ffff"],
"darkblue": ["#00008b", "0x00008b"],
"darkcyan": ["#008b8b", "0x008b8b"],
"darkgoldenrod": ["#b8860b", "0xb8860b"],
"darkgray": ["#a9a9a9", "0xa9a9a9"],
"darkgrey": ["#a9a9a9", "0xa9a9a9"],
"darkgreen": ["#006400", "0x006400"],
"darkkhaki": ["#bdb76b", "0xbdb76b"],
"darkmagenta": ["#8b008b", "0x8b008b"],
"darkolivegreen": ["#556b2f", "0x556b2f"],
"darkorange": ["#ff8c00", "0xff8c00"],
"darkorchid": ["#9932cc", "0x9932cc"],
"darkred": ["#8b0000", "0x8b0000"],
"darksalmon": ["#e9967a", "0xe9967a"],
"darkseagreen": ["#8fbc8f", "0x8fbc8f"],
"darkslateblue": ["#483d8b", "0x483d8b"],
"darkslategray": ["#2f4f4f", "0x2f4f4f"],
"darkslategrey": ["#2f4f4f", "0x2f4f4f"],
"darkturquoise": ["#00ced1", "0x00ced1"],
"darkviolet": ["#9400d3", "0x9400d3"],
"deeppink": ["#ff1493", "0xff1493"],
"deepskyblue": ["#00bfff", "0x00bfff"],
"dimgray": ["#696969", "0x696969"],
"dimgrey": ["#696969", "0x696969"],
"dodgerblue": ["#1e90ff", "0x1e90ff"],
"firebrick": ["#b22222", "0xb22222"],
"floralwhite": ["#fffaf0", "0xfffaf0"],
"forestgreen": ["#228b22", "0x228b22"],
"fuchsia": ["#ff00ff", "0xff00ff"],
"gainsboro": ["#dcdcdc", "0xdcdcdc"],
"ghostwhite": ["#f8f8ff", "0xf8f8ff"],
"gold": ["#ffd700", "0xffd700"],
"goldenrod": ["#daa520", "0xdaa520"],
"gray": ["#808080", "0x808080"],
"grey": ["#808080", "0x808080"],
"green": ["#008000", "0x008000"],
"greenyellow": ["#adff2f", "0xadff2f"],
"honeydew": ["#f0fff0", "0xf0fff0"],
"hotpink": ["#ff69b4", "0xff69b4"],
"indianred": ["#cd5c5c", "0xcd5c5c"],
"indigo": ["#4b0082", "0x4b0082"],
"ivory": ["#fffff0", "0xfffff0"],
"khaki": ["#f0e68c", "0xf0e68c"],
"lavender": ["#e6e6fa", "0xe6e6fa"],
"lavenderblush": ["#fff0f5", "0xfff0f5"],
"lawngreen": ["#7cfc00", "0x7cfc00"],
"lemonchiffon": ["#fffacd", "0xfffacd"],
"lightblue": ["#add8e6", "0xadd8e6"],
"lightcoral": ["#f08080", "0xf08080"],
"lightcyan": ["#e0ffff", "0xe0ffff"],
"lightgoldenrodyellow": ["#fafad2", "0xfafad2"],
"lightgray": ["#d3d3d3", "0xd3d3d3"],
"lightgrey": ["#d3d3d3", "0xd3d3d3"],
"lightgreen": ["#90ee90", "0x90ee90"],
"lightpink": ["#ffb6c1", "0xffb6c1"],
"lightsalmon": ["#ffa07a", "0xffa07a"],
"lightseagreen": ["#20b2aa", "0x20b2aa"],
"lightskyblue": ["#87cefa", "0x87cefa"],
"lightslategray": ["#778899", "0x778899"],
"lightslategrey": ["#778899", "0x778899"],
"lightsteelblue": ["#b0c4de", "0xb0c4de"],
"lightyellow": ["#ffffe0", "0xffffe0"],
"lime": ["#00ff00", "0x00ff00"],
"limegreen": ["#32cd32", "0x32cd32"],
"linen": ["#faf0e6", "0xfaf0e6"],
"magenta": ["#ff00ff", "0xff00ff"],
"maroon": ["#800000", "0x800000"],
"mediumaquamarine": ["#66cdaa", "0x66cdaa"],
"mediumblue": ["#0000cd", "0x0000cd"],
"mediumorchid": ["#ba55d3", "0xba55d3"],
"mediumpurple": ["#9370db", "0x9370db"],
"mediumseagreen": ["#3cb371", "0x3cb371"],
"mediumslateblue": ["#7b68ee", "0x7b68ee"],
"mediumspringgreen": ["#00fa9a", "0x00fa9a"],
"mediumturquoise": ["#48d1cc", "0x48d1cc"],
"mediumvioletred": ["#c71585", "0xc71585"],
"midnightblue": ["#191970", "0x191970"],
"mintcream": ["#f5fffa", "0xf5fffa"],
"mistyrose": ["#ffe4e1", "0xffe4e1"],
"moccasin": ["#ffe4b5", "0xffe4b5"],
"navajowhite": ["#ffdead", "0xffdead"],
"navy": ["#000080", "0x000080"],
"oldlace": ["#fdf5e6", "0xfdf5e6"],
"olive": ["#808000", "0x808000"],
"olivedrab": ["#6b8e23", "0x6b8e23"],
"orange": ["#ffa500", "0xffa500"],
"orangered": ["#ff4500", "0xff4500"],
"orchid": ["#da70d6", "0xda70d6"],
"palegoldenrod": ["#eee8aa", "0xeee8aa"],
"palegreen": ["#98fb98", "0x98fb98"],
"paleturquoise": ["#afeeee", "0xafeeee"],
"palevioletred": ["#db7093", "0xdb7093"],
"papayawhip": ["#ffefd5", "0xffefd5"],
"peachpuff": ["#ffdab9", "0xffdab9"],
"peru": ["#cd853f", "0xcd853f"],
"pink": ["#ffc0cb", "0xffc0cb"],
"plum": ["#dda0dd", "0xdda0dd"],
"powderblue": ["#b0e0e6", "0xb0e0e6"],
"purple": ["#800080", "0x800080"],
"red": ["#ff0000", "0xff0000"],
"rosybrown": ["#bc8f8f", "0xbc8f8f"],
"royalblue": ["#4169e1", "0x4169e1"],
"saddlebrown": ["#8b4513", "0x8b4513"],
"salmon": ["#fa8072", "0xfa8072"],
"sandybrown": ["#f4a460", "0xf4a460"],
"seagreen": ["#2e8b57", "0x2e8b57"],
"seashell": ["#fff5ee", "0xfff5ee"],
"sienna": ["#a0522d", "0xa0522d"],
"silver": ["#c0c0c0", "0xc0c0c0"],
"skyblue": ["#87ceeb", "0x87ceeb"],
"slateblue": ["#6a5acd", "0x6a5acd"],
"slategray": ["#708090", "0x708090"],
"slategrey": ["#708090", "0x708090"],
"snow": ["#fffafa", "0xfffafa"],
"springgreen": ["#00ff7f", "0x00ff7f"],
"steelblue": ["#4682b4", "0x4682b4"],
"tan": ["#d2b48c", "0xd2b48c"],
"teal": ["#008080", "0x008080"],
"thistle": ["#d8bfd8", "0xd8bfd8"],
"tomato": ["#ff6347", "0xff6347"],
"turquoise": ["#40e0d0", "0x40e0d0"],
"violet": ["#ee82ee", "0xee82ee"],
"wheat": ["#f5deb3", "0xf5deb3"],
"white": ["#ffffff", "0xffffff"],
"whitesmoke": ["#f5f5f5", "0xf5f5f5"],
"yellow": ["#ffff00", "0xffff00"],
"yellowgreen": ["#9acd32", "0x9acd32"],
}
if clr == "list":
return await ctx.send(
embed=discord.Embed(
title="Available Color List",
description="```apache\n"
+ ", ".join(sorted(map(str, colors)))
+ "\n```",
color=self.bot.ok_color,
)
)
if not clr.lower() in colors:
await ctx.send(
embed=discord.Embed(
description="Color Not Found", color=self.bot.error_color
)
)
else:
try:
global a, b
a = colors[clr.lower()][1]
b = colors[clr.lower()][0]
except KeyError:
if clr.startswith("#"):
a = f"0x{clr}".replace("#", "")
finally:
img = Image.new("RGB", (128, 128))
aimage = ImageDraw.Draw(img)
aimage.rectangle(xy=(0, 0, 128, 128), fill=b)
buf = io.BytesIO()
img.save(buf, "png")
buf.seek(0)
file = discord.File(buf, "color.png")
await ctx.send(
file=file,
embed=discord.Embed(
description=f"Color: {clr.capitalize()}\n{b}",
color=int(a, base=16),
).set_image(url="attachment://color.png"),
)
@commands.command(aliases=["sinfo", "ginfo", "guildinfo"])
@commands.cooldown(1, 3, commands.BucketType.user)
async def serverinfo(
self, ctx: commands.Context, guild: discord.Guild = None
):
"""Get information about a certain guild"""
if guild is None:
guild = ctx.guild
weird_stuff = {
"ANIMATED_ICON": "Animated Icon",
"BANNER": "Banner Image",
"COMMERCE": "Commerce",
"COMMUNITY": "Community",
"DISCOVERABLE": "Server Discovery",
"FEATURABLE": "Featurable",
"INVITE_SPLASH": "Splash Invite",
"MEMBER_LIST_DISABLED": "Member list disabled",
"MEMBER_VERIFICATION_GATE_ENABLED": "Membership Screening enabled",
"MORE_EMOJI": "More Emojis",
"NEWS": "News Channels",
"PARTNERED": "Partnered",
"PREVIEW_ENABLED": "Preview enabled",
"PUBLIC_DISABLED": "Public disabled",
"VANITY_URL": "Vanity URL",
"VERIFIED": "Verified",
"VIP_REGIONS": "VIP Voice Servers",
"WELCOME_SCREEN_ENABLED": "Welcome Screen enabled",
"THREADS_ENABLED": "Threads Enabled",
"THREADS_ENABLED_TESTING": "Threads Testing",
"PRIVATE_THREADS": "Private Threads",
"SEVEN_DAY_THREAD_ARCHIVE": "Seven Days Thread Archive",
"THREE_DAY_THREAD_ARCHIVE": "Three Days Thread Archive",
"ROLE_ICONS": "Role Icons",
"RELAYS": "Relays Enabled",
}
guild_features = [
f"✅ {name}\n"
for weird_stuff, name in weird_stuff.items()
if weird_stuff in guild.features
]
embed = discord.Embed(title=guild.name, color=self.bot.ok_color)
embed.set_thumbnail(url=guild.icon.url)
embed.add_field(
name="Owner",
value=f"Name: **{guild.owner}**\nID: **{guild.owner.id}**",
inline=True,
)
embed.add_field(
name="Creation Time",
value=f"<t:{int(guild.created_at.timestamp())}:F>",
inline=False,
)
embed.add_field(
name="Member Count", value=f"**{guild.member_count}**", inline=True
)
embed.add_field(
name="Role Count",
value="**{}**".format(len(guild.roles)),
inline=True,
)
embed.add_field(
name="Channel Count",
value=f"Text: **{len(guild.text_channels)}**\n"
f"Voice: **{len(guild.voice_channels)}**\n"
f"Categories: **{len(guild.categories)}**\n"
f"Total **{len(guild.text_channels) + len(guild.voice_channels) + len(guild.categories)}**",
inline=True,
)
embed.add_field(
name="Emoji Count",
value="**{}**".format(len(guild.emojis)),
inline=True,
)
if guild_features:
embed.add_field(
name="Features", value="".join(guild_features), inline=False
)
if guild.banner:
embed.set_image(url=guild.banner.url)
elif guild.splash:
embed.set_image(url=guild.splash.url)
embed.set_footer(text=f"ID: {guild.id}")
await ctx.send(embed=embed)
@commands.command(aliases=["uinfo", "memberinfo", "minfo"])
@commands.guild_only()
@commands.cooldown(1, 3, commands.BucketType.user)
async def userinfo(
self, ctx: commands.context, user: discord.Member = None
):
"""Returns info about a user"""
if user is None:
user = ctx.author
user_flags = "\n".join(
i.replace("_", " ").title() for i, v in user.public_flags if v
)
roles = user.roles[-1:0:-1]
embed = discord.Embed(color=user.color or self.bot.ok_color)
embed.set_thumbnail(url=user.avatar.url)
embed.add_field(name="Name", value=user)
embed.add_field(name="ID", value=user.id)
embed.add_field(
name="Status & Activity",
value=f"Status: {str(user.status).title()}\nActivity: {user.activity.name if user.activity else 'No Activity'}",
inline=False,
)
embed.add_field(
name="Account Creation",
value=f"<t:{int(user.created_at.timestamp())}:F>",
)
embed.add_field(
name=f"{ctx.guild} Join Date",
value=f"<t:{int(user.joined_at.timestamp())}:F>"
if user.joined_at
else "Unknown.",
inline=False,
)
if roles:
embed.add_field(
name=f"Roles **{(len(user.roles) - 1)}**",
value=", ".join([x.mention for x in roles[:10]]),
inline=False,
)
if user_flags:
embed.add_field(
name="Public User Flags",
value=user_flags,
inline=False,
)
if not user.bot:
if banner := (await self.bot.fetch_user(user.id)).banner:
embed.set_image(url=banner.url)
await ctx.send(embed=embed)
@commands.command(aliases=["rinfo"])
@commands.cooldown(1, 3, commands.BucketType.user)
async def roleinfo(self, ctx: commands.Context, *, role: discord.Role):
"""Returns info about a role"""
await ctx.send(
embed=discord.Embed(
title=f"Role info for {role.name}", color=role.color
)
.add_field(name="ID", value=role.id, inline=True)
.add_field(name="Color", value=role.color, inline=True)
.add_field(
name="Creation Time",
value=role.created_at.strftime("%c"),
inline=True,
)
.add_field(name="Members", value=len(role.members), inline=True)
.add_field(name="Hoisted", value=role.hoist, inline=True)
.add_field(name="Mentionable", value=role.mentionable, inline=True)
.add_field(name="Position", value=role.position, inline=True)
.add_field(
name="Permissions",
value=f"Click [Here](https://cogs.fixator10.ru/permissions-calculator/?v={role.permissions.value})",
inline=True,
)
)
@commands.command(aliases=["einfo", "emoteinfo"])
@commands.cooldown(1, 3, commands.BucketType.user)
async def emojiinfo(self, ctx: commands.Context, emoji: discord.Emoji):
"""Returns information about a emoji/emote(Within the current guild)"""
await ctx.send(
embed=discord.Embed(
title="Emoji Information", color=self.bot.ok_color
)
.add_field(name="ID", value=emoji.id, inline=False)
.add_field(name="Animated", value=emoji.animated, inline=False)
.add_field(name="Link", value=emoji.url, inline=False)
.set_image(url=emoji.url)
)
@commands.command(aliases=["se", "bigmoji", "jumbo"])
@commands.cooldown(1, 3, commands.BucketType.user)
async def bigemoji(
self,
ctx: commands.Context,
emoji: Union[discord.Emoji, discord.PartialEmoji, str],
):
"""
Get a emoji in big size lol
"""
await ctx.channel.trigger_typing()
if type(emoji) in [discord.PartialEmoji, discord.Emoji]:
aa_emoji = cast(discord.Emoji, emoji)
ext = "gif" if aa_emoji.animated else "png"
url = "https://cdn.discordapp.com/emojis/{id}.{ext}?v=1".format(
id=aa_emoji.id, ext=ext
)
filename = "{name}.{ext}".format(name=aa_emoji.name, ext=ext)
else:
try:
"""https://github.com/glasnt/emojificate/blob/master/emojificate/filter.py"""
cdn_fmt = (
"https://twemoji.maxcdn.com/2/72x72/{codepoint:x}.png"
)
url = cdn_fmt.format(codepoint=ord(str(emoji)))
filename = "emoji.png"
except TypeError:
return await ctx.send(
"That doesn't appear to be a valid emoji"
)
try:
async with self.bot.session.get(url) as resp:
image = BytesIO(await resp.read())
except Exception:
return await ctx.send("That doesn't appear to be a valid emoji")
await ctx.send(file=discord.File(image, filename=filename))
@commands.command(aliases=["av"])
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.guild_only()
@commands.bot_has_permissions(embed_links=True)
async def avatar(
self, ctx: commands.Context, user: Optional[discord.Member]
):
"""Check your avatars."""
await ctx.channel.trigger_typing()
if user is None:
user = ctx.author
av = user.avatar
e = discord.Embed(
title=f"{user.name}'s avatar", color=self.bot.ok_color
)
e.add_field(
name="File Formations",
value=f"[jpg]({av.with_format('jpg')}), "
f"[png]({av.with_format('png')}), "
f"[webp]({av.with_format('webp')}){',' if av.is_animated() else ''} "
f"{f'[gif]({av})' if av.is_animated() else ''}",
)
e.add_field(
name="Animated", value="\u2705" if av.is_animated() else ":x:"
)
e.set_image(url=av.with_size(4096))
e.set_footer(text=f"ID: {user.id}")
await ctx.send(embed=e)
@commands.command(aliases=["setnsfw"])
@commands.has_permissions(manage_channels=True)
@commands.bot_has_permissions(manage_channels=True)
async def nsfw(self, ctx: commands.Context):
"""Toggle nsfw flag on the current channel"""
if not ctx.channel.is_nsfw():
await ctx.channel.edit(nsfw=True)
await ctx.send(
f"`{ctx.channel.name}` NSFW flag has been toggled to True"
)
else:
await ctx.channel.edit(nsfw=False)
await ctx.send(
f"`{ctx.channel.name}` NSFW flag has been toggled to False"
)
@commands.command()
@commands.has_permissions(manage_guild=True)
@commands.bot_has_permissions(manage_guild=True)
async def setafktimeout(self, ctx: commands.Context, timeout: str):
"""Set the afk timeout for this server. Run [p]setafktimeout timelist for a list for all available times"""
timeouts = {
"1m": ["60", "1 Minute"],
"5m": ["300", "5 Minutes"],
"15m": ["900", "15 Minutes"],
"30m": ["1800", "30 Minutes"],
"1h": ["3600", "1 Hour"],
}
if timeout == "timelist":
return await ctx.send(
embed=discord.Embed(
title="Available timeouts",
description="```\n" + "\n".join(timeouts.keys()) + "\n```",
color=self.bot.ok_color,
)
)
if timeout.lower() in timeouts.keys():
await ctx.guild.edit(afk_timeout=int(timeouts[timeout.lower()][0]))
await ctx.send(
embed=discord.Embed(
description=f"Set AFK timeout to `{timeouts[timeout.lower()][1]}`",
color=self.bot.ok_color,
)
)
@commands.command()
@commands.has_permissions(manage_guild=True)
@commands.bot_has_permissions(manage_guild=True)
async def setafkchannel(
self, ctx: commands.Context, channel: discord.VoiceChannel = None
):
"""Set the channel to where people go when they hit the AFK timeout. Pass in None for no Inactive Channel"""
if channel is None:
await ctx.guild.edit(afk_channel=channel)
return await ctx.send(
embed=discord.Embed(
description="Removed AFK channel", color=self.bot.ok_color
)
)
if channel:
await ctx.guild.edit(afk_channel=channel)
await ctx.send(
embed=discord.Embed(
description=f"Set AFK timeout channel to `{channel.name}`",
color=self.bot.ok_color,
)
)
@commands.command(aliases=["cr"])
@commands.has_permissions(manage_roles=True)
@commands.bot_has_permissions(manage_roles=True)
@commands.cooldown(1, 3, commands.BucketType.user)
async def createrole(self, ctx: commands.Context, *, name: str):
"""Create a role"""
await ctx.guild.create_role(name=name)
await ctx.send(
embed=discord.Embed(
description=f"Successfully created role with name `{name}`",
color=self.bot.ok_color,
)
)
@commands.command(aliases=["dr"])
@commands.has_permissions(manage_roles=True)
@commands.bot_has_permissions(manage_roles=True)
@commands.cooldown(1, 3, commands.BucketType.user)
async def deleterole(self, ctx, *, role: discord.Role):
"""Delete a role"""
await role.delete()
await ctx.send(
embed=discord.Embed(
description=f"Successfully deleted role called `{role}`",
color=self.bot.ok_color,
)
)
def setup(bot):
bot.add_cog(Utility(bot))
| from io import BytesIO
from typing import cast, Optional, Union
import io
from PIL import Image, ImageDraw
from discord.ext import commands
import discord
from utils.kurisu import KurisuBot
class Utility(commands.Cog):
"""A module filled with informative commands. Could be info a bout a guild, user, etc"""
def __init__(self, bot: KurisuBot):
self.bot = bot
@commands.command()
async def color(self, ctx: commands.Context, clr: str):
colors = {
"aliceblue": ["#f0f8ff", "0xf0f8ff"],
"antiquewhite": ["#faebd7", "0xfaebd7"],
"aqua": ["#00ffff", "0x00ffff"],
"aquamarine": ["#7fffd4", "0x7fffd4"],
"azure": ["#f0ffff", "0xf0ffff"],
"beige": ["#f5f5dc", "0xf5f5dc"],
"bisque": ["#ffe4c4", "0xffe4c4"],
"black": ["#000000", "0x000000"],
"blanchedalmond": ["#ffebcd", "0xffebcd"],
"blue": ["#0000ff", "0x0000ff"],
"blueviolet": ["#8a2be2", "0x8a2be2"],
"brown": ["#a52a2a", "0xa52a2a"],
"burlywood": ["#deb887", "0xdeb887"],
"cadetblue": ["#5f9ea0", "0x5f9ea0"],
"chartreuse": ["#7fff00", "0x7fff00"],
"chocolate": ["#d2691e", "0xd2691e"],
"coral": ["#ff7f50", "0xff7f50"],
"cornflowerblue": ["#6495ed", "0x6495ed"],
"cornsilk": ["#fff8dc", "0xfff8dc"],
"crimson": ["#dc143c", "0xdc143c"],
"cyan": ["#00ffff", "0x00ffff"],
"darkblue": ["#00008b", "0x00008b"],
"darkcyan": ["#008b8b", "0x008b8b"],
"darkgoldenrod": ["#b8860b", "0xb8860b"],
"darkgray": ["#a9a9a9", "0xa9a9a9"],
"darkgrey": ["#a9a9a9", "0xa9a9a9"],
"darkgreen": ["#006400", "0x006400"],
"darkkhaki": ["#bdb76b", "0xbdb76b"],
"darkmagenta": ["#8b008b", "0x8b008b"],
"darkolivegreen": ["#556b2f", "0x556b2f"],
"darkorange": ["#ff8c00", "0xff8c00"],
"darkorchid": ["#9932cc", "0x9932cc"],
"darkred": ["#8b0000", "0x8b0000"],
"darksalmon": ["#e9967a", "0xe9967a"],
"darkseagreen": ["#8fbc8f", "0x8fbc8f"],
"darkslateblue": ["#483d8b", "0x483d8b"],
"darkslategray": ["#2f4f4f", "0x2f4f4f"],
"darkslategrey": ["#2f4f4f", "0x2f4f4f"],
"darkturquoise": ["#00ced1", "0x00ced1"],
"darkviolet": ["#9400d3", "0x9400d3"],
"deeppink": ["#ff1493", "0xff1493"],
"deepskyblue": ["#00bfff", "0x00bfff"],
"dimgray": ["#696969", "0x696969"],
"dimgrey": ["#696969", "0x696969"],
"dodgerblue": ["#1e90ff", "0x1e90ff"],
"firebrick": ["#b22222", "0xb22222"],
"floralwhite": ["#fffaf0", "0xfffaf0"],
"forestgreen": ["#228b22", "0x228b22"],
"fuchsia": ["#ff00ff", "0xff00ff"],
"gainsboro": ["#dcdcdc", "0xdcdcdc"],
"ghostwhite": ["#f8f8ff", "0xf8f8ff"],
"gold": ["#ffd700", "0xffd700"],
"goldenrod": ["#daa520", "0xdaa520"],
"gray": ["#808080", "0x808080"],
"grey": ["#808080", "0x808080"],
"green": ["#008000", "0x008000"],
"greenyellow": ["#adff2f", "0xadff2f"],
"honeydew": ["#f0fff0", "0xf0fff0"],
"hotpink": ["#ff69b4", "0xff69b4"],
"indianred": ["#cd5c5c", "0xcd5c5c"],
"indigo": ["#4b0082", "0x4b0082"],
"ivory": ["#fffff0", "0xfffff0"],
"khaki": ["#f0e68c", "0xf0e68c"],
"lavender": ["#e6e6fa", "0xe6e6fa"],
"lavenderblush": ["#fff0f5", "0xfff0f5"],
"lawngreen": ["#7cfc00", "0x7cfc00"],
"lemonchiffon": ["#fffacd", "0xfffacd"],
"lightblue": ["#add8e6", "0xadd8e6"],
"lightcoral": ["#f08080", "0xf08080"],
"lightcyan": ["#e0ffff", "0xe0ffff"],
"lightgoldenrodyellow": ["#fafad2", "0xfafad2"],
"lightgray": ["#d3d3d3", "0xd3d3d3"],
"lightgrey": ["#d3d3d3", "0xd3d3d3"],
"lightgreen": ["#90ee90", "0x90ee90"],
"lightpink": ["#ffb6c1", "0xffb6c1"],
"lightsalmon": ["#ffa07a", "0xffa07a"],
"lightseagreen": ["#20b2aa", "0x20b2aa"],
"lightskyblue": ["#87cefa", "0x87cefa"],
"lightslategray": ["#778899", "0x778899"],
"lightslategrey": ["#778899", "0x778899"],
"lightsteelblue": ["#b0c4de", "0xb0c4de"],
"lightyellow": ["#ffffe0", "0xffffe0"],
"lime": ["#00ff00", "0x00ff00"],
"limegreen": ["#32cd32", "0x32cd32"],
"linen": ["#faf0e6", "0xfaf0e6"],
"magenta": ["#ff00ff", "0xff00ff"],
"maroon": ["#800000", "0x800000"],
"mediumaquamarine": ["#66cdaa", "0x66cdaa"],
"mediumblue": ["#0000cd", "0x0000cd"],
"mediumorchid": ["#ba55d3", "0xba55d3"],
"mediumpurple": ["#9370db", "0x9370db"],
"mediumseagreen": ["#3cb371", "0x3cb371"],
"mediumslateblue": ["#7b68ee", "0x7b68ee"],
"mediumspringgreen": ["#00fa9a", "0x00fa9a"],
"mediumturquoise": ["#48d1cc", "0x48d1cc"],
"mediumvioletred": ["#c71585", "0xc71585"],
"midnightblue": ["#191970", "0x191970"],
"mintcream": ["#f5fffa", "0xf5fffa"],
"mistyrose": ["#ffe4e1", "0xffe4e1"],
"moccasin": ["#ffe4b5", "0xffe4b5"],
"navajowhite": ["#ffdead", "0xffdead"],
"navy": ["#000080", "0x000080"],
"oldlace": ["#fdf5e6", "0xfdf5e6"],
"olive": ["#808000", "0x808000"],
"olivedrab": ["#6b8e23", "0x6b8e23"],
"orange": ["#ffa500", "0xffa500"],
"orangered": ["#ff4500", "0xff4500"],
"orchid": ["#da70d6", "0xda70d6"],
"palegoldenrod": ["#eee8aa", "0xeee8aa"],
"palegreen": ["#98fb98", "0x98fb98"],
"paleturquoise": ["#afeeee", "0xafeeee"],
"palevioletred": ["#db7093", "0xdb7093"],
"papayawhip": ["#ffefd5", "0xffefd5"],
"peachpuff": ["#ffdab9", "0xffdab9"],
"peru": ["#cd853f", "0xcd853f"],
"pink": ["#ffc0cb", "0xffc0cb"],
"plum": ["#dda0dd", "0xdda0dd"],
"powderblue": ["#b0e0e6", "0xb0e0e6"],
"purple": ["#800080", "0x800080"],
"red": ["#ff0000", "0xff0000"],
"rosybrown": ["#bc8f8f", "0xbc8f8f"],
"royalblue": ["#4169e1", "0x4169e1"],
"saddlebrown": ["#8b4513", "0x8b4513"],
"salmon": ["#fa8072", "0xfa8072"],
"sandybrown": ["#f4a460", "0xf4a460"],
"seagreen": ["#2e8b57", "0x2e8b57"],
"seashell": ["#fff5ee", "0xfff5ee"],
"sienna": ["#a0522d", "0xa0522d"],
"silver": ["#c0c0c0", "0xc0c0c0"],
"skyblue": ["#87ceeb", "0x87ceeb"],
"slateblue": ["#6a5acd", "0x6a5acd"],
"slategray": ["#708090", "0x708090"],
"slategrey": ["#708090", "0x708090"],
"snow": ["#fffafa", "0xfffafa"],
"springgreen": ["#00ff7f", "0x00ff7f"],
"steelblue": ["#4682b4", "0x4682b4"],
"tan": ["#d2b48c", "0xd2b48c"],
"teal": ["#008080", "0x008080"],
"thistle": ["#d8bfd8", "0xd8bfd8"],
"tomato": ["#ff6347", "0xff6347"],
"turquoise": ["#40e0d0", "0x40e0d0"],
"violet": ["#ee82ee", "0xee82ee"],
"wheat": ["#f5deb3", "0xf5deb3"],
"white": ["#ffffff", "0xffffff"],
"whitesmoke": ["#f5f5f5", "0xf5f5f5"],
"yellow": ["#ffff00", "0xffff00"],
"yellowgreen": ["#9acd32", "0x9acd32"],
}
if clr == "list":
return await ctx.send(
embed=discord.Embed(
title="Available Color List",
description="```apache\n"
+ ", ".join(sorted(map(str, colors)))
+ "\n```",
color=self.bot.ok_color,
)
)
if not clr.lower() in colors:
await ctx.send(
embed=discord.Embed(
description="Color Not Found", color=self.bot.error_color
)
)
else:
try:
global a, b
a = colors[clr.lower()][1]
b = colors[clr.lower()][0]
except KeyError:
if clr.startswith("#"):
a = f"0x{clr}".replace("#", "")
finally:
img = Image.new("RGB", (128, 128))
aimage = ImageDraw.Draw(img)
aimage.rectangle(xy=(0, 0, 128, 128), fill=b)
buf = io.BytesIO()
img.save(buf, "png")
buf.seek(0)
file = discord.File(buf, "color.png")
await ctx.send(
file=file,
embed=discord.Embed(
description=f"Color: {clr.capitalize()}\n{b}",
color=int(a, base=16),
).set_image(url="attachment://color.png"),
)
@commands.command(aliases=["sinfo", "ginfo", "guildinfo"])
@commands.cooldown(1, 3, commands.BucketType.user)
async def serverinfo(
self, ctx: commands.Context, guild: discord.Guild = None
):
"""Get information about a certain guild"""
if guild is None:
guild = ctx.guild
weird_stuff = {
"ANIMATED_ICON": "Animated Icon",
"BANNER": "Banner Image",
"COMMERCE": "Commerce",
"COMMUNITY": "Community",
"DISCOVERABLE": "Server Discovery",
"FEATURABLE": "Featurable",
"INVITE_SPLASH": "Splash Invite",
"MEMBER_LIST_DISABLED": "Member list disabled",
"MEMBER_VERIFICATION_GATE_ENABLED": "Membership Screening enabled",
"MORE_EMOJI": "More Emojis",
"NEWS": "News Channels",
"PARTNERED": "Partnered",
"PREVIEW_ENABLED": "Preview enabled",
"PUBLIC_DISABLED": "Public disabled",
"VANITY_URL": "Vanity URL",
"VERIFIED": "Verified",
"VIP_REGIONS": "VIP Voice Servers",
"WELCOME_SCREEN_ENABLED": "Welcome Screen enabled",
"THREADS_ENABLED": "Threads Enabled",
"THREADS_ENABLED_TESTING": "Threads Testing",
"PRIVATE_THREADS": "Private Threads",
"SEVEN_DAY_THREAD_ARCHIVE": "Seven Days Thread Archive",
"THREE_DAY_THREAD_ARCHIVE": "Three Days Thread Archive",
"ROLE_ICONS": "Role Icons",
"RELAYS": "Relays Enabled",
}
guild_features = [
f"✅ {name}\n"
for weird_stuff, name in weird_stuff.items()
if weird_stuff in guild.features
]
embed = discord.Embed(title=guild.name, color=self.bot.ok_color)
embed.set_thumbnail(url=guild.icon.url)
embed.add_field(
name="Owner",
value=f"Name: **{guild.owner}**\nID: **{guild.owner.id}**",
inline=True,
)
embed.add_field(
name="Creation Time",
value=f"<t:{int(guild.created_at.timestamp())}:F>",
inline=False,
)
embed.add_field(
name="Member Count", value=f"**{guild.member_count}**", inline=True
)
embed.add_field(
name="Role Count",
value="**{}**".format(len(guild.roles)),
inline=True,
)
embed.add_field(
name="Channel Count",
value=f"Text: **{len(guild.text_channels)}**\n"
f"Voice: **{len(guild.voice_channels)}**\n"
f"Categories: **{len(guild.categories)}**\n"
f"Total **{len(guild.text_channels) + len(guild.voice_channels) + len(guild.categories)}**",
inline=True,
)
embed.add_field(
name="Emoji Count",
value="**{}**".format(len(guild.emojis)),
inline=True,
)
if guild_features:
embed.add_field(
name="Features", value="".join(guild_features), inline=False
)
if guild.banner:
embed.set_image(url=guild.banner.url)
elif guild.splash:
embed.set_image(url=guild.splash.url)
embed.set_footer(text=f"ID: {guild.id}")
await ctx.send(embed=embed)
@commands.command(aliases=["uinfo", "memberinfo", "minfo"])
@commands.guild_only()
@commands.cooldown(1, 3, commands.BucketType.user)
async def userinfo(
self, ctx: commands.context, user: discord.Member = None
):
"""Returns info about a user"""
if user is None:
user = ctx.author
user_flags = "\n".join(
i.replace("_", " ").title() for i, v in user.public_flags if v
)
roles = user.roles[-1:0:-1]
embed = discord.Embed(color=user.color or self.bot.ok_color)
embed.set_thumbnail(url=user.avatar.url)
embed.add_field(name="Name", value=user)
embed.add_field(name="ID", value=user.id)
embed.add_field(
name="Status & Activity",
value=f"Status: {str(user.status).title()}\nActivity: {user.activity.name if user.activity else 'No Activity'}",
inline=False,
)
embed.add_field(
name="Account Creation",
value=f"<t:{int(user.created_at.timestamp())}:F>",
)
embed.add_field(
name=f"{ctx.guild} Join Date",
value=f"<t:{int(user.joined_at.timestamp())}:F>"
if user.joined_at
else "Unknown.",
inline=False,
)
if roles:
embed.add_field(
name=f"Roles **{(len(user.roles) - 1)}**",
value=", ".join([x.mention for x in roles[:10]]),
inline=False,
)
if user_flags:
embed.add_field(
name="Public User Flags",
value=user_flags,
inline=False,
)
if not user.bot:
if banner := (await self.bot.fetch_user(user.id)).banner:
embed.set_image(url=banner.url)
await ctx.send(embed=embed)
@commands.command(aliases=["rinfo"])
@commands.cooldown(1, 3, commands.BucketType.user)
async def roleinfo(self, ctx: commands.Context, *, role: discord.Role):
"""Returns info about a role"""
await ctx.send(
embed=discord.Embed(
title=f"Role info for {role.name}", color=role.color
)
.add_field(name="ID", value=role.id, inline=True)
.add_field(name="Color", value=role.color, inline=True)
.add_field(
name="Creation Time",
value=role.created_at.strftime("%c"),
inline=True,
)
.add_field(name="Members", value=len(role.members), inline=True)
.add_field(name="Hoisted", value=role.hoist, inline=True)
.add_field(name="Mentionable", value=role.mentionable, inline=True)
.add_field(name="Position", value=role.position, inline=True)
.add_field(
name="Permissions",
value=f"Click [Here](https://cogs.fixator10.ru/permissions-calculator/?v={role.permissions.value})",
inline=True,
)
)
@commands.command(aliases=["einfo", "emoteinfo"])
@commands.cooldown(1, 3, commands.BucketType.user)
async def emojiinfo(self, ctx: commands.Context, emoji: discord.Emoji):
"""Returns information about a emoji/emote(Within the current guild)"""
await ctx.send(
embed=discord.Embed(
title="Emoji Information", color=self.bot.ok_color
)
.add_field(name="ID", value=emoji.id, inline=False)
.add_field(name="Animated", value=emoji.animated, inline=False)
.add_field(name="Link", value=emoji.url, inline=False)
.set_image(url=emoji.url)
)
@commands.command(aliases=["se", "bigmoji", "jumbo"])
@commands.cooldown(1, 3, commands.BucketType.user)
async def bigemoji(
self,
ctx: commands.Context,
emoji: Union[discord.Emoji, discord.PartialEmoji, str],
):
"""
Get a emoji in big size lol
"""
await ctx.channel.trigger_typing()
if type(emoji) in [discord.PartialEmoji, discord.Emoji]:
aa_emoji = cast(discord.Emoji, emoji)
ext = "gif" if aa_emoji.animated else "png"
url = "https://cdn.discordapp.com/emojis/{id}.{ext}?v=1".format(
id=aa_emoji.id, ext=ext
)
filename = "{name}.{ext}".format(name=aa_emoji.name, ext=ext)
else:
try:
"""https://github.com/glasnt/emojificate/blob/master/emojificate/filter.py"""
cdn_fmt = (
"https://twemoji.maxcdn.com/2/72x72/{codepoint:x}.png"
)
url = cdn_fmt.format(codepoint=ord(str(emoji)))
filename = "emoji.png"
except TypeError:
return await ctx.send(
"That doesn't appear to be a valid emoji"
)
try:
async with self.bot.session.get(url) as resp:
image = BytesIO(await resp.read())
except Exception:
return await ctx.send("That doesn't appear to be a valid emoji")
await ctx.send(file=discord.File(image, filename=filename))
@commands.command(aliases=["av"])
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.guild_only()
@commands.bot_has_permissions(embed_links=True)
async def avatar(
self, ctx: commands.Context, user: Optional[discord.Member]
):
"""Check your avatars."""
await ctx.channel.trigger_typing()
if user is None:
user = ctx.author
av = user.avatar
e = discord.Embed(
title=f"{user.name}'s avatar", color=self.bot.ok_color
)
e.add_field(
name="File Formations",
value=f"[jpg]({av.with_format('jpg')}), "
f"[png]({av.with_format('png')}), "
f"[webp]({av.with_format('webp')}){',' if av.is_animated() else ''} "
f"{f'[gif]({av})' if av.is_animated() else ''}",
)
e.add_field(
name="Animated", value="\u2705" if av.is_animated() else ":x:"
)
e.set_image(url=av.with_size(4096))
e.set_footer(text=f"ID: {user.id}")
await ctx.send(embed=e)
@commands.command(aliases=["setnsfw"])
@commands.has_permissions(manage_channels=True)
@commands.bot_has_permissions(manage_channels=True)
async def nsfw(self, ctx: commands.Context):
"""Toggle nsfw flag on the current channel"""
if not ctx.channel.is_nsfw():
await ctx.channel.edit(nsfw=True)
await ctx.send(
f"`{ctx.channel.name}` NSFW flag has been toggled to True"
)
else:
await ctx.channel.edit(nsfw=False)
await ctx.send(
f"`{ctx.channel.name}` NSFW flag has been toggled to False"
)
@commands.command()
@commands.has_permissions(manage_guild=True)
@commands.bot_has_permissions(manage_guild=True)
async def setafktimeout(self, ctx: commands.Context, timeout: str):
"""Set the afk timeout for this server. Run [p]setafktimeout timelist for a list for all available times"""
timeouts = {
"1m": ["60", "1 Minute"],
"5m": ["300", "5 Minutes"],
"15m": ["900", "15 Minutes"],
"30m": ["1800", "30 Minutes"],
"1h": ["3600", "1 Hour"],
}
if timeout == "timelist":
return await ctx.send(
embed=discord.Embed(
title="Available timeouts",
description="```\n" + "\n".join(timeouts.keys()) + "\n```",
color=self.bot.ok_color,
)
)
if timeout.lower() in timeouts.keys():
await ctx.guild.edit(afk_timeout=int(timeouts[timeout.lower()][0]))
await ctx.send(
embed=discord.Embed(
description=f"Set AFK timeout to `{timeouts[timeout.lower()][1]}`",
color=self.bot.ok_color,
)
)
@commands.command()
@commands.has_permissions(manage_guild=True)
@commands.bot_has_permissions(manage_guild=True)
async def setafkchannel(
self, ctx: commands.Context, channel: discord.VoiceChannel = None
):
"""Set the channel to where people go when they hit the AFK timeout. Pass in None for no Inactive Channel"""
if channel is None:
await ctx.guild.edit(afk_channel=channel)
return await ctx.send(
embed=discord.Embed(
description="Removed AFK channel", color=self.bot.ok_color
)
)
if channel:
await ctx.guild.edit(afk_channel=channel)
await ctx.send(
embed=discord.Embed(
description=f"Set AFK timeout channel to `{channel.name}`",
color=self.bot.ok_color,
)
)
@commands.command(aliases=["cr"])
@commands.has_permissions(manage_roles=True)
@commands.bot_has_permissions(manage_roles=True)
@commands.cooldown(1, 3, commands.BucketType.user)
async def createrole(self, ctx: commands.Context, *, name: str):
"""Create a role"""
await ctx.guild.create_role(name=name)
await ctx.send(
embed=discord.Embed(
description=f"Successfully created role with name `{name}`",
color=self.bot.ok_color,
)
)
@commands.command(aliases=["dr"])
@commands.has_permissions(manage_roles=True)
@commands.bot_has_permissions(manage_roles=True)
@commands.cooldown(1, 3, commands.BucketType.user)
async def deleterole(self, ctx, *, role: discord.Role):
"""Delete a role"""
await role.delete()
await ctx.send(
embed=discord.Embed(
description=f"Successfully deleted role called `{role}`",
color=self.bot.ok_color,
)
)
def setup(bot):
bot.add_cog(Utility(bot))
| en | 0.71681 | A module filled with informative commands. Could be info a bout a guild, user, etc Get information about a certain guild Returns info about a user Returns info about a role Returns information about a emoji/emote(Within the current guild) Get a emoji in big size lol https://github.com/glasnt/emojificate/blob/master/emojificate/filter.py Check your avatars. Toggle nsfw flag on the current channel Set the afk timeout for this server. Run [p]setafktimeout timelist for a list for all available times Set the channel to where people go when they hit the AFK timeout. Pass in None for no Inactive Channel Create a role Delete a role | 2.495177 | 2 |
python/Basit_Kareem/Exercise1_TB_Basit.py | Tech-Buddies/TB-1.0-Intermediate | 0 | 6624685 | <filename>python/Basit_Kareem/Exercise1_TB_Basit.py
from math import *
def add_num(x= 5, y = 7):
sumAnswer = x + y
print("sum of {0} and {1} = {2:.2f}".format(x,y,sumAnswer))
return sumAnswer
def sub_num(x = 4, y = 9):
subAnswer = x - y
print("difference between {0} and {1} = {2:.2f}".format(x,y,subAnswer))
return subAnswer
def multiply_num(x = 3, y = 2):
multAnswer = x * y
print("{0} multiplied by {1} = {2:.2f}".format(x,y, multAnswer))
return multAnswer
def divide_num(x = 8, y = 3):
divAnswer = x / y
print("{0} divided by {1} = {2:.2f}".format(x,y,divAnswer))
return divAnswer
def avg_nums(x = [4,3,9,7,3,2,8]):
avgAnswer = sum(x)/len(x)
print("The average of {0} = {1:.2f}".format(x, avgAnswer))
return avgAnswer
def geoavg_num(x = [4,3,9,7,3,2,8]):
multvals = 1
for i in range(0,len(x)):
multvals = multvals * x[i]
gmeanAnswer = multvals ** (1/len(x))
print("The geometric mean of {0} = {1:.2f}".format(x,gmeanAnswer))
return gmeanAnswer
def harm_mean(x = [4,3,9,7,3,2,8]):
a = list(map(lambda x : 1.0/x, x))
harmMean = len(x)/sum(a)
print("The harmonic mean of {0} = {1:.2f}".format(x,harmMean))
return harmMean
def weigthedAvg(x = [4,3,9,7,3,2,8], y = [2,8,1,5,9,6,2]):
weiAvg = sum(list(map(lambda x,y: x * y, x,y)))/sum(x)
print("The weighted average of weights {0} and occurence {1} = {2:.2f}".format(x,y,weiAvg))
return weiAvg
def quadroot(a = 2, b = -7, c = 4):
root1 = (-b + sqrt(b**2 - 4*a*c))/(2*a)
root2 = (-b - sqrt(b**2 - 4*a*c))/(2*a)
print("The roots of the equation ({0}x^2) + ({1}x) + ({2}) are {3:.2f} and {4:.2f}".format(a,b,c,root1,root2))
return [root1, root2]
a1 = add_num()
a2 = multiply_num()
a3 = divide_num()
a4 = sub_num()
b1 = avg_nums()
b2 = geoavg_num()
b3 = harm_mean()
b4 = weigthedAvg()
b5 = quadroot()
| <filename>python/Basit_Kareem/Exercise1_TB_Basit.py
from math import *
def add_num(x= 5, y = 7):
sumAnswer = x + y
print("sum of {0} and {1} = {2:.2f}".format(x,y,sumAnswer))
return sumAnswer
def sub_num(x = 4, y = 9):
subAnswer = x - y
print("difference between {0} and {1} = {2:.2f}".format(x,y,subAnswer))
return subAnswer
def multiply_num(x = 3, y = 2):
multAnswer = x * y
print("{0} multiplied by {1} = {2:.2f}".format(x,y, multAnswer))
return multAnswer
def divide_num(x = 8, y = 3):
divAnswer = x / y
print("{0} divided by {1} = {2:.2f}".format(x,y,divAnswer))
return divAnswer
def avg_nums(x = [4,3,9,7,3,2,8]):
avgAnswer = sum(x)/len(x)
print("The average of {0} = {1:.2f}".format(x, avgAnswer))
return avgAnswer
def geoavg_num(x = [4,3,9,7,3,2,8]):
multvals = 1
for i in range(0,len(x)):
multvals = multvals * x[i]
gmeanAnswer = multvals ** (1/len(x))
print("The geometric mean of {0} = {1:.2f}".format(x,gmeanAnswer))
return gmeanAnswer
def harm_mean(x = [4,3,9,7,3,2,8]):
a = list(map(lambda x : 1.0/x, x))
harmMean = len(x)/sum(a)
print("The harmonic mean of {0} = {1:.2f}".format(x,harmMean))
return harmMean
def weigthedAvg(x = [4,3,9,7,3,2,8], y = [2,8,1,5,9,6,2]):
weiAvg = sum(list(map(lambda x,y: x * y, x,y)))/sum(x)
print("The weighted average of weights {0} and occurence {1} = {2:.2f}".format(x,y,weiAvg))
return weiAvg
def quadroot(a = 2, b = -7, c = 4):
root1 = (-b + sqrt(b**2 - 4*a*c))/(2*a)
root2 = (-b - sqrt(b**2 - 4*a*c))/(2*a)
print("The roots of the equation ({0}x^2) + ({1}x) + ({2}) are {3:.2f} and {4:.2f}".format(a,b,c,root1,root2))
return [root1, root2]
a1 = add_num()
a2 = multiply_num()
a3 = divide_num()
a4 = sub_num()
b1 = avg_nums()
b2 = geoavg_num()
b3 = harm_mean()
b4 = weigthedAvg()
b5 = quadroot()
| none | 1 | 4.160811 | 4 | |
environments/environment.py | geektoni/learning_programs_with_arguments | 0 | 6624686 | <reponame>geektoni/learning_programs_with_arguments
from abc import ABC, abstractmethod
import numpy as np
class Environment(ABC):
def __init__(self, programs_library, prog_to_func, prog_to_precondition, prog_to_postcondition, arguments):
"""
Args:
programs_library (dict): Maps a program name to a level and a bool indicating whether recursive
prog_to_func (dict): Maps 0 level programs to their implementation function
prog_to_precondition (dict): Maps a program name to the function that states whether its preconditions are fulfilled
prog_to_postcondition (dict): Maps a program name to the function that states whether its postconditions are fulfilled
"""
super().__init__()
self.programs_library = programs_library
self.arguments = arguments
self.prog_to_func = prog_to_func
self.prog_to_precondition = prog_to_precondition
self.prog_to_postcondition = prog_to_postcondition
self.programs = list(self.programs_library.keys())
self.primary_actions = [prog for prog in self.programs_library if self.programs_library[prog]['level'] <= 0]
self.mask = dict((p, self._get_available_actions(p)) for p in self.programs_library if self.programs_library[p]["level"] > 0)
# correct mask for recursive programs
for program_name, program_mask in self.mask.items():
if self.programs_library[program_name]['recursive']:
program_mask[self.programs_library[program_name]['index']] = 1
self.prog_to_idx = dict((prog, elems["index"]) for prog, elems in self.programs_library.items())
self.idx_to_prog = dict((idx, prog) for (prog, idx) in self.prog_to_idx.items())
self.maximum_level = max([x['level'] for prog, x in self.programs_library.items()])
self.current_task_index = None
self.tasks_dict = {}
self.tasks_list = []
self.has_been_reset = False
def get_maximum_level(self):
"""
Returns the maximum program level.
Returns:
maximum level
"""
return self.maximum_level
def _get_available_actions(self, program):
"""
Args:
program (str): program name
Returns:
mask
"""
level_prog = self.programs_library[program]["level"]
assert level_prog > 0
mask = np.zeros(len(self.programs))
for prog, elems in self.programs_library.items():
if elems["level"] < level_prog:
mask[elems["index"]] = 1
return mask
def get_program_from_index(self, program_index):
"""Returns the program name from its index.
Args:
program_index: index of desired program
Returns:
the program name corresponding to program index
"""
return self.idx_to_prog[program_index]
def get_num_non_primary_programs(self):
"""Returns the number of programs with level > 0.
Returns:
the number of available programs of level > 0 (the number of non primary programs)
"""
return len(self.programs) - len(self.primary_actions)
def get_num_programs(self):
"""Returns the number of available programs.
Returns:
the number of available programs (all levels)
"""
return len(self.programs)
def get_program_level_from_index(self, program_index):
"""
Args:
program_index: program index
Returns:
the level of the program
"""
program = self.get_program_from_index(program_index)
return self.programs_library[program]['level']
def get_reward(self):
"""Returns a reward for the current task at hand.
Returns:
1 if the task at hand has been solved, 0 otherwise.
"""
task_init_state = self.tasks_dict[len(self.tasks_list)]
state = self.get_state()
current_task = self.get_program_from_index(self.current_task_index)
current_task_postcondition = self.prog_to_postcondition[current_task]
return int(current_task_postcondition(task_init_state, state))
def start_task(self, task_index):
"""Function used to begin a task. The task at hand defines the reward signal and stop boolean
returned by the function step. This function resets the environment as well.
Args:
task_index: the index corresponding to the program(task) to start
Returns:
the environment observation
"""
task_name = self.get_program_from_index(task_index)
assert self.prog_to_precondition[task_name], 'cant start task {} ' \
'because its precondition is not verified'.format(task_name)
self.current_task_index = task_index
self.tasks_list.append(task_index)
state_index = -1
total_size = -1
if len(self.tasks_dict.keys()) == 0:
# reset env
state_index, total_size = self.reset_env()
# store init state
init_state = self.get_state()
self.tasks_dict[len(self.tasks_list)] = init_state
return self.get_observation(), state_index, total_size
def end_task(self):
"""
Ends the last tasks that has been started.
"""
del self.tasks_dict[len(self.tasks_list)]
self.tasks_list.pop()
if self.tasks_list:
self.current_task_index = self.tasks_list[-1]
else:
self.current_task_index = None
self.has_been_reset = False
def end_all_tasks(self):
self.tasks_dict = {}
self.tasks_list = []
self.has_been_reset = False
def act(self, primary_action, arguments=None):
"""Apply a primary action that modifies the environment.
Args:
primary_action: action to apply
arguments: the arguments which needs to be given to the function
Returns:
the environment observation after the action has been applied
"""
assert self.has_been_reset, 'Need to reset the environment before acting'
assert primary_action in self.primary_actions, 'action {} is not defined'.format(primary_action)
self.prog_to_func[primary_action](arguments)
return self.get_observation()
def render(self):
"""Print a graphical representation of the current environment state"""
assert self.has_been_reset, 'Need to reset the environment before rendering'
s = self.get_state()
str = self.get_state_str(s)
print(str)
def get_mask_over_actions(self, program_index):
"""Returns the mask of possible programs to call given the current program.
Args:
program_index: index of program for which is wanted the mask of possible programs to call
Returns:
mask of possible programs to call
"""
program = self.get_program_from_index(program_index)
assert program in self.mask, "Error program {} provided is level 0".format(program)
mask = self.mask[program].copy()
# remove actions when pre-condition not satisfied
for program, program_dict in self.programs_library.items():
if not self.prog_to_precondition[program]():
mask[program_dict['index']] = 0
return mask
def get_mask_over_args(self, program_index):
"""
Return the available arguments which can be called by that given program
:param program_index: the program index
:return: a max over the available arguments
"""
program = self.get_program_from_index(program_index)
permitted_arguments = self.programs_library[program]["args"]
mask = np.zeros(len(self.arguments))
for i in range(len(self.arguments)):
if sum(self.arguments[i]) in permitted_arguments:
mask[i] = 1
return mask
@abstractmethod
def compare_state(self, state1, state2):
"""Compares two states to determine whether they are the same state.
Args:
state1 (tuple): Describes the environment
state2 (tuple): Describes the environment
returns:
bool: The return value. True if state1 and state2 are the same, False otherwise.
"""
pass
@abstractmethod
def reset_env(self):
pass
@abstractmethod
def get_state(self):
pass
@abstractmethod
def get_observation(self):
pass
@abstractmethod
def get_observation_dim(self):
pass
@abstractmethod
def reset_to_state(self, state):
"""
Args:
state (tuple): Describes the environment state
"""
pass
@abstractmethod
def get_state_str(self, state):
"""
Args:
state (tuple): Describes the environment state
Returns:
String describes the environment in a more human-friendly way
"""
pass
@abstractmethod
def update_failing_envs(self, state, program_name):
"""
Update failing environments.
:param state: current failed state
:param program_name: current failed program
:return:
"""
pass | from abc import ABC, abstractmethod
import numpy as np
class Environment(ABC):
def __init__(self, programs_library, prog_to_func, prog_to_precondition, prog_to_postcondition, arguments):
"""
Args:
programs_library (dict): Maps a program name to a level and a bool indicating whether recursive
prog_to_func (dict): Maps 0 level programs to their implementation function
prog_to_precondition (dict): Maps a program name to the function that states whether its preconditions are fulfilled
prog_to_postcondition (dict): Maps a program name to the function that states whether its postconditions are fulfilled
"""
super().__init__()
self.programs_library = programs_library
self.arguments = arguments
self.prog_to_func = prog_to_func
self.prog_to_precondition = prog_to_precondition
self.prog_to_postcondition = prog_to_postcondition
self.programs = list(self.programs_library.keys())
self.primary_actions = [prog for prog in self.programs_library if self.programs_library[prog]['level'] <= 0]
self.mask = dict((p, self._get_available_actions(p)) for p in self.programs_library if self.programs_library[p]["level"] > 0)
# correct mask for recursive programs
for program_name, program_mask in self.mask.items():
if self.programs_library[program_name]['recursive']:
program_mask[self.programs_library[program_name]['index']] = 1
self.prog_to_idx = dict((prog, elems["index"]) for prog, elems in self.programs_library.items())
self.idx_to_prog = dict((idx, prog) for (prog, idx) in self.prog_to_idx.items())
self.maximum_level = max([x['level'] for prog, x in self.programs_library.items()])
self.current_task_index = None
self.tasks_dict = {}
self.tasks_list = []
self.has_been_reset = False
def get_maximum_level(self):
"""
Returns the maximum program level.
Returns:
maximum level
"""
return self.maximum_level
def _get_available_actions(self, program):
"""
Args:
program (str): program name
Returns:
mask
"""
level_prog = self.programs_library[program]["level"]
assert level_prog > 0
mask = np.zeros(len(self.programs))
for prog, elems in self.programs_library.items():
if elems["level"] < level_prog:
mask[elems["index"]] = 1
return mask
def get_program_from_index(self, program_index):
"""Returns the program name from its index.
Args:
program_index: index of desired program
Returns:
the program name corresponding to program index
"""
return self.idx_to_prog[program_index]
def get_num_non_primary_programs(self):
"""Returns the number of programs with level > 0.
Returns:
the number of available programs of level > 0 (the number of non primary programs)
"""
return len(self.programs) - len(self.primary_actions)
def get_num_programs(self):
"""Returns the number of available programs.
Returns:
the number of available programs (all levels)
"""
return len(self.programs)
def get_program_level_from_index(self, program_index):
"""
Args:
program_index: program index
Returns:
the level of the program
"""
program = self.get_program_from_index(program_index)
return self.programs_library[program]['level']
def get_reward(self):
"""Returns a reward for the current task at hand.
Returns:
1 if the task at hand has been solved, 0 otherwise.
"""
task_init_state = self.tasks_dict[len(self.tasks_list)]
state = self.get_state()
current_task = self.get_program_from_index(self.current_task_index)
current_task_postcondition = self.prog_to_postcondition[current_task]
return int(current_task_postcondition(task_init_state, state))
def start_task(self, task_index):
"""Function used to begin a task. The task at hand defines the reward signal and stop boolean
returned by the function step. This function resets the environment as well.
Args:
task_index: the index corresponding to the program(task) to start
Returns:
the environment observation
"""
task_name = self.get_program_from_index(task_index)
assert self.prog_to_precondition[task_name], 'cant start task {} ' \
'because its precondition is not verified'.format(task_name)
self.current_task_index = task_index
self.tasks_list.append(task_index)
state_index = -1
total_size = -1
if len(self.tasks_dict.keys()) == 0:
# reset env
state_index, total_size = self.reset_env()
# store init state
init_state = self.get_state()
self.tasks_dict[len(self.tasks_list)] = init_state
return self.get_observation(), state_index, total_size
def end_task(self):
"""
Ends the last tasks that has been started.
"""
del self.tasks_dict[len(self.tasks_list)]
self.tasks_list.pop()
if self.tasks_list:
self.current_task_index = self.tasks_list[-1]
else:
self.current_task_index = None
self.has_been_reset = False
def end_all_tasks(self):
self.tasks_dict = {}
self.tasks_list = []
self.has_been_reset = False
def act(self, primary_action, arguments=None):
"""Apply a primary action that modifies the environment.
Args:
primary_action: action to apply
arguments: the arguments which needs to be given to the function
Returns:
the environment observation after the action has been applied
"""
assert self.has_been_reset, 'Need to reset the environment before acting'
assert primary_action in self.primary_actions, 'action {} is not defined'.format(primary_action)
self.prog_to_func[primary_action](arguments)
return self.get_observation()
def render(self):
"""Print a graphical representation of the current environment state"""
assert self.has_been_reset, 'Need to reset the environment before rendering'
s = self.get_state()
str = self.get_state_str(s)
print(str)
def get_mask_over_actions(self, program_index):
"""Returns the mask of possible programs to call given the current program.
Args:
program_index: index of program for which is wanted the mask of possible programs to call
Returns:
mask of possible programs to call
"""
program = self.get_program_from_index(program_index)
assert program in self.mask, "Error program {} provided is level 0".format(program)
mask = self.mask[program].copy()
# remove actions when pre-condition not satisfied
for program, program_dict in self.programs_library.items():
if not self.prog_to_precondition[program]():
mask[program_dict['index']] = 0
return mask
def get_mask_over_args(self, program_index):
"""
Return the available arguments which can be called by that given program
:param program_index: the program index
:return: a max over the available arguments
"""
program = self.get_program_from_index(program_index)
permitted_arguments = self.programs_library[program]["args"]
mask = np.zeros(len(self.arguments))
for i in range(len(self.arguments)):
if sum(self.arguments[i]) in permitted_arguments:
mask[i] = 1
return mask
@abstractmethod
def compare_state(self, state1, state2):
"""Compares two states to determine whether they are the same state.
Args:
state1 (tuple): Describes the environment
state2 (tuple): Describes the environment
returns:
bool: The return value. True if state1 and state2 are the same, False otherwise.
"""
pass
@abstractmethod
def reset_env(self):
pass
@abstractmethod
def get_state(self):
pass
@abstractmethod
def get_observation(self):
pass
@abstractmethod
def get_observation_dim(self):
pass
@abstractmethod
def reset_to_state(self, state):
"""
Args:
state (tuple): Describes the environment state
"""
pass
@abstractmethod
def get_state_str(self, state):
"""
Args:
state (tuple): Describes the environment state
Returns:
String describes the environment in a more human-friendly way
"""
pass
@abstractmethod
def update_failing_envs(self, state, program_name):
"""
Update failing environments.
:param state: current failed state
:param program_name: current failed program
:return:
"""
pass | en | 0.7979 | Args: programs_library (dict): Maps a program name to a level and a bool indicating whether recursive prog_to_func (dict): Maps 0 level programs to their implementation function prog_to_precondition (dict): Maps a program name to the function that states whether its preconditions are fulfilled prog_to_postcondition (dict): Maps a program name to the function that states whether its postconditions are fulfilled # correct mask for recursive programs Returns the maximum program level. Returns: maximum level Args: program (str): program name Returns: mask Returns the program name from its index. Args: program_index: index of desired program Returns: the program name corresponding to program index Returns the number of programs with level > 0. Returns: the number of available programs of level > 0 (the number of non primary programs) Returns the number of available programs. Returns: the number of available programs (all levels) Args: program_index: program index Returns: the level of the program Returns a reward for the current task at hand. Returns: 1 if the task at hand has been solved, 0 otherwise. Function used to begin a task. The task at hand defines the reward signal and stop boolean returned by the function step. This function resets the environment as well. Args: task_index: the index corresponding to the program(task) to start Returns: the environment observation # reset env # store init state Ends the last tasks that has been started. Apply a primary action that modifies the environment. Args: primary_action: action to apply arguments: the arguments which needs to be given to the function Returns: the environment observation after the action has been applied Print a graphical representation of the current environment state Returns the mask of possible programs to call given the current program. Args: program_index: index of program for which is wanted the mask of possible programs to call Returns: mask of possible programs to call # remove actions when pre-condition not satisfied Return the available arguments which can be called by that given program :param program_index: the program index :return: a max over the available arguments Compares two states to determine whether they are the same state. Args: state1 (tuple): Describes the environment state2 (tuple): Describes the environment returns: bool: The return value. True if state1 and state2 are the same, False otherwise. Args: state (tuple): Describes the environment state Args: state (tuple): Describes the environment state Returns: String describes the environment in a more human-friendly way Update failing environments. :param state: current failed state :param program_name: current failed program :return: | 3.251057 | 3 |
src/py/statiskit/core/distribution.py | StatisKit/Core | 0 | 6624687 | <reponame>StatisKit/Core
from functools import wraps
import math
from statiskit import linalg
from statiskit import stl
from . import _core
from .__core.statiskit import (_ShiftedDistribution,
UnivariateDistribution,
_UnivariateFrequencyDistribution,
_QuantitativeUnivariateFrequencyDistribution,
CategoricalUnivariateDistribution,
BinaryDistribution,
NominalDistribution,
OrdinalDistribution,
HierarchicalDistribution,
CategoricalUnivariateMixtureDistribution,
CategoricalUnivariateDistributionVector,
DiscreteUnivariateDistribution,
DiscreteUnivariateFrequencyDistribution,
PoissonDistribution,
BinomialDistribution,
LogarithmicDistribution,
GeometricDistribution,
NegativeBinomialDistribution,
BetaCompoundDiscreteUnivariateDistribution,
BetaBinomialDistribution,
BetaNegativeBinomialDistribution,
DiscreteUnivariateMixtureDistribution,
DiscreteUnivariateDistributionVector,
ContinuousUnivariateDistribution,
ContinuousUnivariateFrequencyDistribution,
UnivariateHistogramDistribution,
NormalDistribution,
LogisticDistribution,
LaplaceDistribution,
CauchyDistribution,
StudentDistribution,
NonStandardStudentDistribution,
GumbelDistribution,
GompertzDistribution,
ExponentialDistribution,
GammaDistribution,
BetaDistribution,
ContinuousUnivariateMixtureDistribution,
ContinuousUnivariateDistributionVector,
MultivariateDistribution,
# _IndependentMultivariateDistribution,
MixedMultivariateMixtureDistribution,
CategoricalMultivariateDistribution,
# CategoricalIndependentMultivariateDistribution,
CategoricalMultivariateMixtureDistribution,
CategoricalMultivariateDistributionVector,
DiscreteMultivariateDistribution,
SplittingDistribution,
# DiscreteIndependentMultivariateDistribution,
DiscreteMultivariateMixtureDistribution,
DiscreteMultivariateDistributionVector,
ContinuousMultivariateDistribution,
MultinormalDistribution,
DirichletDistribution,
# ContinuousIndependentMultivariateDistribution,
ContinuousMultivariateMixtureDistribution,
ContinuousMultivariateDistributionVector,
MultivariateDistributionVector,
_MixtureDistribution, _UnivariateMixtureDistribution, _QuantitativeUnivariateMixtureDistribution, _MultivariateMixtureDistribution,
UnivariateConditionalDistribution,
CategoricalUnivariateConditionalDistribution,
DiscreteUnivariateConditionalDistribution,
ContinuousUnivariateConditionalDistribution,
MultivariateConditionalDistribution,
CategoricalMultivariateConditionalDistribution,
DiscreteMultivariateConditionalDistribution,
ContinuousMultivariateConditionalDistribution)
from .optionals import pyplot, numpy
from .io import from_list
from .controls import controls
from .event import (UnivariateEvent,
CategoricalEvent,
CategoricalElementaryEvent,
DiscreteEvent,
DiscreteElementaryEvent,
ContinuousEvent,
ContinuousElementaryEvent,
MultivariateEvent,
VectorEvent,
type_to_event,
types_to_event)
from .data import (UnivariateData,
UnivariateDataFrame,
MultivariateData,
MultivariateDataFrame)
from .sample_space import (NominalSampleSpace,
OrdinalSampleSpace)
from ._tools import float_str, remove_latex
__all__ = ['BinaryDistribution',
'NominalDistribution',
'OrdinalDistribution',
'HierarchicalDistribution',
'DiscreteUnivariateFrequencyDistribution',
'PoissonDistribution',
'BinomialDistribution',
'LogarithmicDistribution',
'GeometricDistribution',
'NegativeBinomialDistribution',
'BetaBinomialDistribution',
'BetaNegativeBinomialDistribution',
'ContinuousUnivariateFrequencyDistribution',
'UnivariateHistogramDistribution',
'NormalDistribution',
'LogisticDistribution',
'LaplaceDistribution',
'CauchyDistribution',
'StudentDistribution',
'NonStandardStudentDistribution',
'GumbelDistribution',
'GompertzDistribution',
'ExponentialDistribution',
'GammaDistribution',
'BetaDistribution',
'SplittingDistribution',
'MultinormalDistribution',
'DirichletDistribution',
# 'IndependentMultivariateDistribution',
'MixtureDistribution']
def shifted_distribution_decorator(cls):
cls.distribution = property(cls.get_distribution, cls.set_distribution)
del cls.get_distribution, cls.set_distribution
cls.shift = property(cls.get_shift, cls.set_shift)
cls.get_shift, cls.set_shift
def __str__(self):
return self.distribution.__str__()[:-1] + ", " + str(self.shift) + ")"
cls.__str__ = __str__
cls.__repr__ = __str__
def _repr_latex_(self):
return self.distribution._repr_latex_()[:-8] + ", " + str(self.shift) + r"\right)$"
cls._repr_latex_ = _repr_latex_
for cls in _ShiftedDistribution:
shifted_distribution_decorator(cls)
UnivariateDistribution.nb_parameters = property(UnivariateDistribution.get_nb_parameters)
del UnivariateDistribution.get_nb_parameters
def wrapper_probability(f):
@wraps(f)
def probability(self, event, **kwargs):
if isinstance(event, str):
event = CategoricalElementaryEvent(event)
elif isinstance(event, int):
event = DiscreteElementaryEvent(event)
elif isinstance(event, float):
event = ContinuousElementaryEvent(event)
elif not isinstance(event, UnivariateEvent):
raise TypeError('\'event\' parameter')
return f(self, event, kwargs.pop('log', False))
return probability
UnivariateDistribution.probability = wrapper_probability(UnivariateDistribution.probability)
def simulation(self, size):
if isinstance(self, NominalDistribution):
data = UnivariateDataFrame(NominalSampleSpace(self.values))
elif isinstance(self, OrdinalDistribution):
data = UnivariateDataFrame(OrdinalSampleSpace(self.ordered_values))
elif isinstance(self, DiscreteUnivariateDistribution):
data = UnivariateDataFrame(controls.ZZ)
elif isinstance(self, ContinuousUnivariateDistribution):
data = UnivariateDataFrame(controls.RR)
else:
raise NotImplementedError()
for index in range(size):
data.add_event(self.simulate())
return data
UnivariateDistribution.simulation = simulation
del simulation
def pdf_plot(self, axes=None, fmt='|', **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
labels = getattr(self, 'ordered_values', getattr(self, 'values'))
x, labels = list(zip(*[(index, label) for index, label in enumerate(labels)]))
y = [self.probability(label, log=False) for label in labels]
if 'norm' in kwargs:
norm = kwargs.pop('norm')
y = [norm * p for p in y]
else:
y = [p for p in y]
if fmt == 'pie':
if not 'autopct' in kwargs:
kwargs['autopct'] = '%.2f'
axes.pie(y, labels=labels, **kwargs)
else:
if '|' in fmt:
fmt = fmt.replace('|', '')
width = kwargs.pop('width', .8)
if not 0 < width <= 1.:
raise ValueError('\'width\' parameter must be strictly superior to 0. and inferior to 1.')
axes.bar([q-width/2. for q in x], y, width, align='center', **kwargs)
if len(fmt) > 0:
axes.plot(x, y, fmt, **kwargs)
axes.set_xticks(x)
axes.set_xticklabels(labels)
return axes
CategoricalUnivariateDistribution.pdf_plot = pdf_plot
del pdf_plot
CategoricalUnivariateDistribution.values = property(CategoricalUnivariateDistribution.get_values)
del CategoricalUnivariateDistribution.get_values
def wrapper(f):
@wraps(f)
def __init__(self, *args, **kwargs):
f(self, stl.SetLessString(*args))
for attr in list(kwargs.keys()):
if hasattr(self, attr):
setattr(self, attr, kwargs.pop(attr))
else:
raise AttributeError("'" + self.__class__.__name__ + "' object has no attribute '" + attr + "'")
return __init__
NominalDistribution.__init__ = wrapper(NominalDistribution.__init__)
def wrapper(f):
@wraps(f)
def __init__(self, *args, **kwargs):
f(self, stl.VectorString(*args))
for attr in list(kwargs.keys()):
if hasattr(self, attr):
setattr(self, attr, kwargs.pop(attr))
else:
raise AttributeError("'" + self.__class__.__name__ + "' object has no attribute '" + attr + "'")
return __init__
OrdinalDistribution.__init__ = wrapper(OrdinalDistribution.__init__)
#HierarchicalDistribution.__init__ = wrapper(HierarchicalDistribution.__init__)
BinaryDistribution.pi = property(BinaryDistribution.get_pi, BinaryDistribution.set_pi)
del BinaryDistribution.get_pi, BinaryDistribution.set_pi
OrdinalDistribution.rank = property(OrdinalDistribution.get_rank, OrdinalDistribution.set_rank)
del OrdinalDistribution.get_rank, OrdinalDistribution.set_rank
# def wrapper(f):
# @wraps(f)
# def get_ordered(self):
# values = f(self)
# return [CategoricalElementaryEvent(value) for value in values]
# return get_ordered
OrdinalDistribution.ordered_values = property(OrdinalDistribution.get_ordered_values, OrdinalDistribution.set_ordered_values)
del OrdinalDistribution.get_ordered_values, OrdinalDistribution.set_ordered_values
OrdinalDistribution.ordered_pi = property(OrdinalDistribution.get_ordered_pi, OrdinalDistribution.set_ordered_pi)
del OrdinalDistribution.get_ordered_pi, OrdinalDistribution.set_ordered_pi
def cdf_plot(self, axes=None, fmt='|', **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
labels = self.ordered_values
x, labels = list(zip(*[(index, label) for index, label in enumerate(labels)]))
y = self.pi
if 'norm' in kwargs:
norm = kwargs.pop('norm')
y = [norm * p for p in y]
else:
y = [p for p in y]
y = [y[i] for i in self.rank]
y = [sum(y[:i]) for i in range(1, len(y)+1)]
if '|' in fmt:
fmt = fmt.replace('|', '')
width = kwargs.pop('width', .8)
if not 0 < width <= 1.:
raise ValueError('\'width\' parameter must be strictly superior to 0. and inferior to 1.')
kwargs.pop('pmin', None)
kwargs.pop('pmax', None)
axes.bar([q-width/2. for q in x], y, width, align='center', **kwargs)
else:
axes.plot(x, y, fmt, **kwargs)
axes.set_xticks(x)
axes.set_xticklabels(labels)
return axes
OrdinalDistribution.cdf_plot = cdf_plot
del cdf_plot
def box_plot(self, axes=None, edgecolor="k", width=.5, vert=True, whiskers=(.09,0.91), pos=1, **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
elif not isinstance(axes, pyplot.Axes):
raise TypeError('`axes` parameter')
if not len(whiskers) == 2:
raise IndexError('`whiskers` parameter')
if not all([isinstance(i, float) for i in whiskers]):
raise TypeError('`whiskers` parameter')
if not all([0. <= i <= 1. for i in whiskers]):
raise ValueError('`whiskers` parameter')
values = [value.value for value in self.ordered]
qb = values.index(self.quantile(min(whiskers)))
q1 = values.index(self.quantile(.25))
q2 = values.index(self.quantile(.5))
q3 = values.index(self.quantile(.75))
qe = values.index(self.quantile(max(whiskers)))
facecolor = kwargs.pop('facecolor', next(axes._get_lines.prop_cycler)['color'])
# facecolor = kwargs.pop('facecolor', axes._get_lines.get_next_color())
if not(qb <= q1 <= q2 <= q3 <= qe):
raise ValueError('`whiskers` parameter')
if vert:
axes.bar(pos, q3-q1, width, q1, facecolor=facecolor, edgecolor=edgecolor, align='center')
axes.plot([pos-width/2., pos+width/2.], [q2, q2], color=edgecolor)
axes.plot([pos-width/2., pos+width/2.], [qb, qb], color=edgecolor)
axes.plot([pos-width/2., pos+width/2.], [qe, qe], color=edgecolor)
axes.plot([pos, pos], [qb, q1], color=edgecolor)
axes.plot([pos, pos], [q3, qe], color=edgecolor)
axes.set_yticks(list(range(len(values))))
axes.set_yticklabels(values)
else:
axes.bar(q1, width, q3-q1, pos-width/2., facecolor=facecolor, edgecolor=edgecolor)
axes.plot([q2, q2], [pos-width/2., pos+width/2.], color=edgecolor)
axes.plot([qb, qb], [pos-width/2., pos+width/2.], color=edgecolor)
axes.plot([qe, qe], [pos-width/2., pos+width/2.], color=edgecolor)
axes.plot([qb, q1], [pos, pos], color=edgecolor)
axes.plot([q3, qe], [pos, pos], color=edgecolor)
axes.set_xticks(list(range(len(values))))
axes.set_xticklabels(values)
return axes
OrdinalDistribution.box_plot = box_plot
del box_plot
def quantitative_univariate_frequency_distribution_decorator(cls):
# cls.mean = property(cls.get_mean)
# del cls.get_mean
# cls.variance = property(cls.get_variance)
# del cls.get_variance
pass
for cls in _QuantitativeUnivariateFrequencyDistribution:
quantitative_univariate_frequency_distribution_decorator(cls)
DiscreteUnivariateDistribution.mean = property(DiscreteUnivariateDistribution.get_mean)
del DiscreteUnivariateDistribution.get_mean
DiscreteUnivariateDistribution.variance = property(DiscreteUnivariateDistribution.get_variance)
del DiscreteUnivariateDistribution.get_variance
def pdf_plot(self, axes=None, fmt='|', **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
else:
qmin, qmax = axes.get_xlim()
if 'qmin' not in kwargs and 'pmin' not in kwargs:
kwargs['qmin'] = int(qmin)
if 'qmax' not in kwargs and 'pmax' not in kwargs:
kwargs['qmax'] = int(qmax)
if 'quantiles' in kwargs:
x = kwargs.pop('quantiles')
else:
if 'qmin' in kwargs:
qmin = kwargs.pop('qmin')
else:
qmin = self.quantile(kwargs.pop('pmin', 0.025))
if 'qmax' in kwargs:
qmax = kwargs.pop('qmax')
else:
qmax = self.quantile(kwargs.pop('pmax', 0.975))
x = list(range(qmin, qmax + 1))
y = [self.pdf(q) for q in x]
if 'norm' in kwargs:
norm = kwargs.pop('norm')
y = [norm * p for p in y]
if '|' in fmt:
fmt = fmt.replace('|', '')
width = kwargs.pop('width', .2)
if not 0 < width <= 1.:
raise ValueError('\'width\' parameter must be strictly superior to 0. and inferior to 1.')
axes.bar([q-width/2. for q in x], y, width, align='center', **kwargs)
if len(fmt) > 0:
axes.plot(x, y, fmt, **kwargs)
return axes
DiscreteUnivariateDistribution.pdf_plot = pdf_plot
del pdf_plot
def cdf_plot(self, axes=None, fmt='o-', **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
else:
qmin, qmax = axes.get_xlim()
if 'qmin' not in kwargs and 'pmin' not in kwargs:
kwargs['qmin'] = int(qmin)
if 'qmax' not in kwargs and 'pmax' not in kwargs:
kwargs['qmax'] = int(qmax)
x = kwargs.pop('quantiles', list(range(kwargs.pop('qmin', self.quantile(kwargs.pop('pmin', 0.025))), kwargs.pop('qmax', self.quantile(kwargs.pop('pmax', 0.975)))+1)))
y = [self.cdf(q) for q in x]
if 'norm' in kwargs:
norm = kwargs.pop('norm')
y = [norm * p for p in y]
if 'o' in fmt:
axes.plot(x, y, 'o', **kwargs)
fmt = fmt.replace('o', '')
if len(fmt) > 0:
for i, j in enumerate(x):
axes.plot([j, j+1], [y[i], y[i]], fmt, **kwargs)
return axes
DiscreteUnivariateDistribution.cdf_plot = cdf_plot
del cdf_plot
def box_plot(self, axes=None, edgecolor="k", width=.5, vert=True, whiskers=(.09,0.91), pos=1, mean=None, sd=None, marker='o', **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
elif not isinstance(axes, pyplot.Axes):
raise TypeError('`axes` parameter')
if not len(whiskers) == 2:
raise IndexError('`whiskers` parameter')
if not all([isinstance(i, float) for i in whiskers]):
raise TypeError('`whiskers` parameter')
if not all([0. <= i <= 1. for i in whiskers]):
raise ValueError('`whiskers` parameter')
qb = self.quantile(min(whiskers))
q1 = self.quantile(.25)
q2 = self.quantile(.5)
q3 = self.quantile(.75)
qe = self.quantile(max(whiskers))
facecolor = kwargs.pop('facecolor', next(axes._get_lines.prop_cycler)['color'])
# facecolor = kwargs.pop('facecolor', axes._get_lines.get_next_color())
if vert:
axes.bar(pos, q3-q1, width, q1, facecolor=facecolor, edgecolor=edgecolor, align='center')
axes.plot([pos-width/2., pos+width/2.], [q2, q2], color=edgecolor)
axes.plot([pos-width/2., pos+width/2.], [qb, qb], color=edgecolor)
axes.plot([pos-width/2., pos+width/2.], [qe, qe], color=edgecolor)
axes.plot([pos, pos], [qb, q1], color=edgecolor)
axes.plot([pos, pos], [q3, qe], color=edgecolor)
else:
axes.bar(q1, width, q3-q1, pos-width/2., facecolor=facecolor, edgecolor=edgecolor)
axes.plot([q2, q2], [pos-width/2., pos+width/2.], color=edgecolor)
axes.plot([qb, qb], [pos-width/2., pos+width/2.], color=edgecolor)
axes.plot([qe, qe], [pos-width/2., pos+width/2.], color=edgecolor)
axes.plot([qb, q1], [pos, pos], color=edgecolor)
axes.plot([q3, qe], [pos, pos], color=edgecolor)
if mean is None:
mean = self.mean
if not qb <= mean <= qe:
mean = False
elif mean is True:
mean = self.mean
if mean:
if vert:
axes.plot([pos], [mean], linestyle='None', marker=marker, markeredgecolor=edgecolor, markerfacecolor=facecolor)
else:
axes.plot([mean], [pos], linestyle='None', marker=marker, markeredgecolor=edgecolor, markerfacecolor=facecolor)
if not mean and sd is not False:
mean = q2
if sd is None:
sd = math.sqrt(self.variance)
if not qb <= mean - sd and mean + sd <= qe:
sd = False
elif sd is True:
sd = math.sqrt(self.variance)
if sd:
if vert:
axes.plot([pos, pos], [mean - sd, mean + sd], linestyle='None', marker=marker, markeredgecolor=edgecolor, markerfacecolor=facecolor)
else:
axes.plot([mean - sd, mean + sd], [pos, pos], linestyle='None', marker=marker, markeredgecolor=edgecolor, markerfacecolor=facecolor)
return axes
DiscreteUnivariateDistribution.box_plot = box_plot
ContinuousUnivariateDistribution.box_plot = box_plot
del box_plot
#def lorenz_plot(self, axes=None, fmt='o-', color='r', alpha=1., equality=True, **kwargs):
# if axes is None:
# axes = pyplot.subplot(1,1,1)
# else:
# qmin, qmax = axes.get_xlim()
# if 'qmin' not in kwargs and 'pmin' not in kwargs:
# kwargs['qmin'] = int(qmin)
# if 'qmax' not in kwargs and 'pmax' not in kwargs:
# kwargs['qmax'] = int(qmax)
# x = range(kwargs.pop('qmin', self.quantile(kwargs.pop('pmin', 0.025))), kwargs.pop('qmax', self.quantile(kwargs.pop('pmax', 0.975)))+1)
# x, y = [self.cdf(q) for q in x], [self.pdf(q) * q for q in x]
# y = [sum(y[:i+1]) for i in range(len(y))]
# y = [i/y[-1] for i in y]
# axes.plot(x, y, fmt, color=color, alpha=alpha)
# if equality:
# axes.plot([0., 1.], [0., 1.], kwargs.pop('efmt', '--'), color=kwargs.pop('ecolor', color), alpha=kwargs.pop('ealpha', alpha))
# return axes
#
#DiscreteUnivariateDistribution.lorenz_plot = lorenz_plot
#del lorenz_plot
def __repr__(self):
return "P(" + str(self.theta) + ")"
PoissonDistribution.__str__ = __repr__
PoissonDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{P}\left(" + str(self.theta) + r"\right)$"
PoissonDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
PoissonDistribution.theta = property(PoissonDistribution.get_theta, PoissonDistribution.set_theta)
del PoissonDistribution.get_theta, PoissonDistribution.set_theta
def __repr__(self):
return "B(" + str(self.kappa) + ", " + str(self.pi) + ")"
BinomialDistribution.__str__ = __repr__
BinomialDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{B}\left(" + str(self.kappa) + ", " + str(self.pi) + r"\right)$"
BinomialDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
BinomialDistribution.kappa = property(BinomialDistribution.get_kappa, BinomialDistribution.set_kappa)
del BinomialDistribution.get_kappa, BinomialDistribution.set_kappa
BinomialDistribution.pi = property(BinomialDistribution.get_pi, BinomialDistribution.set_pi)
del BinomialDistribution.get_pi, BinomialDistribution.set_pi
def __repr__(self):
return "Log(" + str(self.theta) + ")"
LogarithmicDistribution.__str__ = __repr__
LogarithmicDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathrm{Log}\left(" + str(self.theta) + r"\right)$"
LogarithmicDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
LogarithmicDistribution.theta = property(LogarithmicDistribution.get_theta, LogarithmicDistribution.set_theta)
del LogarithmicDistribution.get_theta, LogarithmicDistribution.set_theta
def __repr__(self):
return "G(" + str(self.pi) + ")"
GeometricDistribution.__str__ = __repr__
GeometricDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{G}\left(" + str(self.pi) + r"\right)$"
GeometricDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
GeometricDistribution.pi = property(GeometricDistribution.get_pi, GeometricDistribution.set_pi)
del GeometricDistribution.get_pi, GeometricDistribution.set_pi
def __repr__(self):
return "NB(" + str(self.kappa) + ", " + str(self.pi) + ")"
NegativeBinomialDistribution.__str__ = __repr__
NegativeBinomialDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{NB}\left(" + str(self.kappa) + ", " + str(self.pi) + r"\right)$"
NegativeBinomialDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
NegativeBinomialDistribution.kappa = property(NegativeBinomialDistribution.get_kappa, NegativeBinomialDistribution.set_kappa)
del NegativeBinomialDistribution.get_kappa, NegativeBinomialDistribution.set_kappa
NegativeBinomialDistribution.pi = property(NegativeBinomialDistribution.get_pi, NegativeBinomialDistribution.set_pi)
del NegativeBinomialDistribution.get_pi, NegativeBinomialDistribution.set_pi
BetaCompoundDiscreteUnivariateDistribution.alpha = property(BetaCompoundDiscreteUnivariateDistribution.get_alpha, BetaCompoundDiscreteUnivariateDistribution.set_alpha)
del BetaCompoundDiscreteUnivariateDistribution.get_alpha, BetaCompoundDiscreteUnivariateDistribution.set_alpha
BetaCompoundDiscreteUnivariateDistribution.gamma = property(BetaCompoundDiscreteUnivariateDistribution.get_gamma, BetaCompoundDiscreteUnivariateDistribution.set_gamma)
del BetaCompoundDiscreteUnivariateDistribution.get_gamma, BetaCompoundDiscreteUnivariateDistribution.set_gamma
def __repr__(self):
return "BetaB(" + str(self.kappa) + ", " + str(self.alpha) + ", " + str(self.gamma) + ")"
BetaBinomialDistribution.__str__ = __repr__
BetaBinomialDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\beta\mathcal{B}\left(" + str(self.kappa) + ", " + str(self.alpha) + ", " + str(self.gamma) + r"\right)$"
BetaBinomialDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
BetaBinomialDistribution.kappa = property(BetaBinomialDistribution.get_kappa, BetaBinomialDistribution.set_kappa)
del BetaBinomialDistribution.get_kappa, BetaBinomialDistribution.set_kappa
def __repr__(self):
return "BetaNB(" + str(self.kappa) + ", " + str(self.alpha) + ", " + str(self.gamma) + ")"
BetaNegativeBinomialDistribution.__str__ = __repr__
BetaNegativeBinomialDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\beta\mathcal{NB}\left(" + str(self.kappa) + ", " + str(self.alpha) + ", " + str(self.gamma) + r"\right)$"
BetaNegativeBinomialDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
BetaNegativeBinomialDistribution.kappa = property(BetaNegativeBinomialDistribution.get_kappa, BetaNegativeBinomialDistribution.set_kappa)
del BetaNegativeBinomialDistribution.get_kappa, BetaNegativeBinomialDistribution.set_kappa
ContinuousUnivariateDistribution.mean = property(ContinuousUnivariateDistribution.get_mean)
del ContinuousUnivariateDistribution.get_mean
ContinuousUnivariateDistribution.variance = property(ContinuousUnivariateDistribution.get_variance)
del ContinuousUnivariateDistribution.get_variance
def pdf_plot(self, axes=None, fmt='-', num=100, **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
else:
qmin, qmax = axes.get_xlim()
if 'qmin' not in kwargs and 'pmin' not in kwargs:
kwargs['qmin'] = qmin
if 'qmax' not in kwargs and 'pmax' not in kwargs:
kwargs['qmax'] = qmax
x = kwargs.pop('quantiles', numpy.linspace(kwargs.pop('qmin', self.quantile(kwargs.pop('pmin', 0.025))), kwargs.pop('qmax', self.quantile(kwargs.pop('pmax', 0.975))), num=num))
y = [self.pdf(q) for q in x]
if 'norm' in kwargs:
norm = kwargs.pop('norm')
y = [norm * p for p in y]
if '|' in fmt:
fmt = fmt.replace('|', '')
axes.vlines(x, 0, y, **kwargs)
if len(fmt) > 0:
axes.plot(x, y, fmt, **kwargs)
return axes
ContinuousUnivariateDistribution.pdf_plot = pdf_plot
del pdf_plot
def cdf_plot(self, axes=None, fmt='-', num=100, **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
else:
qmin, qmax = axes.get_xlim()
if 'qmin' not in kwargs and 'pmin' not in kwargs:
kwargs['qmin'] = qmin
if 'qmax' not in kwargs and 'pmax' not in kwargs:
kwargs['qmax'] = qmax
x = kwargs.pop('quantiles', numpy.linspace(kwargs.pop('qmin', self.quantile(kwargs.pop('pmin', 0.025))), kwargs.pop('qmax', self.quantile(kwargs.pop('pmax', 0.975))), num=num))
y = [self.cdf(q) for q in x]
if 'norm' in kwargs:
norm = kwargs.pop('norm')
y = [norm * p for p in y]
axes.plot(x, y, fmt, **kwargs)
return axes
ContinuousUnivariateDistribution.cdf_plot = cdf_plot
del cdf_plot
def statiskit_univariate_frequency_distribution_decorator(cls):
cls.pi = property(cls.get_pi, cls.set_pi)
del cls.get_pi, cls.set_pi
if cls.EventType == DiscreteEvent:
def wrapper(f):
@wraps(f)
def get_values(self):
values = f(self)
return [DiscreteElementaryEvent(value) for value in values]
return get_values
cls.values = property(wrapper(cls.get_values))
del wrapper, cls.get_values
elif cls.EventType == ContinuousEvent:
def wrapper(f):
@wraps(f)
def get_values(self):
values = f(self)
return [ContinuousElementaryEvent(value) for value in values]
return get_values
cls.values = property(wrapper(cls.get_values))
del wrapper, cls.get_values
def _repr_latex_(self):
pi = self.pi
string = []
etc = False
for i, j in enumerate(self.values):
if i < controls.head or i >= max(controls.head, len(pi) - controls.tail):
string.append("\\pi_{" + remove_latex(j._repr_latex_()) + "} &= " + float_str(pi[i]))
elif not etc:
etc = True
string.append('\\dots &= \\dots')
return '$\\begin{align}\n\t' + ',\\\\\n\t'.join(string) + '.\n\\end{align}$'
cls._repr_latex_ = _repr_latex_
del _repr_latex_
if not cls.EventType == CategoricalEvent:
def wrapper(f):
@wraps(f)
def pdf_plot(self, fmt='|', **kwargs):
if 'quantiles' not in kwargs and 'qmin' not in kwargs and 'pmin' not in kwargs and not 'qmax' in kwargs and 'pmax' not in kwargs:
kwargs['quantiles'] = [value.value for value in self.values]
return f(self, fmt=fmt, **kwargs)
return pdf_plot
cls.pdf_plot = wrapper(cls.pdf_plot)
del wrapper
def wrapper(f):
@wraps(f)
def cdf_plot(self, **kwargs):
if 'quantiles' not in kwargs:
if 'qmin' not in kwargs and 'pmin' not in kwargs:
kwargs['pmin'] = 0.
if 'qmax' not in kwargs and 'pmax' not in kwargs:
kwargs['pmax'] = 1.
return f(self, **kwargs)
return cdf_plot
cls.cdf_plot = wrapper(cls.cdf_plot)
del wrapper
def wrapper(f):
@wraps(f)
def box_plot(self, axes=None, extrema=True, vert=True, pos=1, edgecolor="k", **kwargs):
if axes is None:
axes = pyplot.subplot(1, 1, 1)
facecolor = kwargs.pop('facecolor', next(axes._get_lines.prop_cycler)['color'])
# facecolor = kwargs.pop('facecolor', axes._get_lines.get_next_color())
axes = f(self, axes=axes, vert=vert, pos=pos, facecolor=facecolor, edgecolor=edgecolor, **kwargs)
if extrema:
values = self.values
values = [values[0].value, values[-1].value]
if vert:
axes.scatter([pos]*len(values), values, c=facecolor, edgecolors=edgecolor)
else:
axes.scatter(values, [pos]*len(values), c=facecolor, edgecolors=edgecolor)
return axes
return box_plot
cls.box_plot = wrapper(cls.box_plot)
del wrapper
for cls in _UnivariateFrequencyDistribution:
statiskit_univariate_frequency_distribution_decorator(cls)
def statiskit_quantitative_univariate_frequency_distribution_decorator(cls):
pass
for cls in _QuantitativeUnivariateFrequencyDistribution:
statiskit_quantitative_univariate_frequency_distribution_decorator(cls)
def __repr__(self):
return "Univariate Histogram Distribution"
UnivariateHistogramDistribution.__str__ = NormalDistribution.__repr__
UnivariateHistogramDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
bins = [x for x in self.bins]
densities = self.densities
string = []
etc = False
for i, j in enumerate([(i, j) for i, j in zip(bins[:-1], bins[1:])]):
if i < controls.head or i >= max(controls.head, len(densities) - controls.tail):
string.append("\\pi_{[" + float_str(j[0]) + ', ' + float_str(j[-1]) + "[} &= " + float_str(densities[i]*(j[-1]-j[0])))
elif not etc:
etc = True
string.append('\\dots &= \\dots')
return '$\\begin{align}\n\t' + ',\\\\\n\t'.join(string) + '.\n\\end{align}$'
UnivariateHistogramDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
UnivariateHistogramDistribution.bins = property(UnivariateHistogramDistribution.get_bins)
UnivariateHistogramDistribution.densities = property(UnivariateHistogramDistribution.get_densities)
def pdf_plot(self, axes=None, fmt='|', fill=True, **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
xmin, xmax = float("inf"), -1 * float("inf")
ymin, ymax = float("inf"), -1 * float("inf")
else:
xmin, xmax = axes.get_xlim()
ymin, ymax = axes.get_ylim()
bins = self.bins
bins = [x for x in bins]
densities = self.densities
densities = [d for d in densities]
if 'norm' in kwargs:
norm = kwargs.pop('norm')
densities = [norm * d for d in densities]
color = kwargs.pop('color', next(axes._get_lines.prop_cycler)['color'])
# color = kwargs.pop('color', axes._get_lines.get_next_color())
if '|' in fmt:
for lc, rc, d in zip(bins[:-1], bins[1:], densities):
axes.bar(x=lc, height=d, width=rc-lc, bottom=0., facecolor=color, edgecolor=kwargs.pop('edgecolor', 'k'), align='edge', **kwargs)
fmt = fmt.replace('|', '')
if 'o' in fmt:
axes.plot(bins[:-1], densities, 'o', color=color, alpha=alpha)
axes.plot([bins[-1]], [densities[-1]], 'o', color=color, **kwargs)
fmt = fmt.replace('o', '')
if len(fmt) > 0:
for lc, rc, d in zip(bins[:-1], bins[1:], densities):
axes.plot([lc, rc], [d, d], fmt, color=color, **kwargs)
return axes
UnivariateHistogramDistribution.pdf_plot = pdf_plot
del pdf_plot
def wrapper(f):
@wraps(f)
def cdf_plot(self, **kwargs):
if 'quantiles' not in kwargs:
if 'qmin' not in kwargs and 'pmin' not in kwargs:
kwargs['pmin'] = 0.
if 'qmax' not in kwargs and 'pmax' not in kwargs:
kwargs['pmax'] = 1.
return f(self, **kwargs)
return cdf_plot
UnivariateHistogramDistribution.cdf_plot = wrapper(UnivariateHistogramDistribution.cdf_plot)
del wrapper
def __repr__(self):
return "N(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ')'
NormalDistribution.__str__ = __repr__
NormalDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{N}\left(" + float_str(self.mu) + ', ' + float_str(self.sigma) + r'\right)$'
NormalDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
NormalDistribution.mu = property(NormalDistribution.get_mu, NormalDistribution.set_mu)
del NormalDistribution.get_mu, NormalDistribution.set_mu
NormalDistribution.sigma = property(NormalDistribution.get_sigma, NormalDistribution.set_sigma)
del NormalDistribution.get_sigma, NormalDistribution.set_sigma
def __repr__(self):
return "Lo(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ')'
LogisticDistribution.__str__ = __repr__
LogisticDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{Lo}\left(" + float_str(self.mu) + ', ' + float_str(self.sigma) + r'\right)$'
LogisticDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
LogisticDistribution.mu = property(LogisticDistribution.get_mu, LogisticDistribution.set_mu)
del LogisticDistribution.get_mu, LogisticDistribution.set_mu
LogisticDistribution.sigma = property(LogisticDistribution.get_sigma, LogisticDistribution.set_sigma)
del LogisticDistribution.get_sigma, LogisticDistribution.set_sigma
def __repr__(self):
return "La(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ')'
LaplaceDistribution.__str__ = __repr__
LaplaceDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{La}\left(" + float_str(self.mu) + ', ' + float_str(self.sigma) + r'\right)$'
LaplaceDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
LaplaceDistribution.mu = property(LaplaceDistribution.get_mu, LaplaceDistribution.set_mu)
del LaplaceDistribution.get_mu, LaplaceDistribution.set_mu
LaplaceDistribution.sigma = property(LaplaceDistribution.get_sigma, LaplaceDistribution.set_sigma)
del LaplaceDistribution.get_sigma, LaplaceDistribution.set_sigma
def __repr__(self):
return "C(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ')'
CauchyDistribution.__str__ = __repr__
CauchyDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{C}\left(" + float_str(self.mu) + ', ' + float_str(self.sigma) + r'\right)$'
CauchyDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
CauchyDistribution.mu = property(CauchyDistribution.get_mu, CauchyDistribution.set_mu)
del CauchyDistribution.get_mu, CauchyDistribution.set_mu
CauchyDistribution.sigma = property(CauchyDistribution.get_sigma, CauchyDistribution.set_sigma)
del CauchyDistribution.get_sigma, CauchyDistribution.set_sigma
def __repr__(self):
return "T(" + float_str(self.nu) + ')'
StudentDistribution.__str__ = __repr__
StudentDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{T}\left(" + float_str(self.nu) + r'\right)$'
StudentDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
StudentDistribution.nu = property(StudentDistribution.get_nu, StudentDistribution.set_nu)
del StudentDistribution.get_nu, StudentDistribution.set_nu
def __repr__(self):
return "nsT(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ', ' + float_str(self.nu) + ')'
NonStandardStudentDistribution.__str__ = __repr__
NonStandardStudentDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{nsT}\left(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ', ' + float_str(self.nu) + r'\right)$'
NonStandardStudentDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
NonStandardStudentDistribution.mu = property(NonStandardStudentDistribution.get_mu, NonStandardStudentDistribution.set_mu)
del NonStandardStudentDistribution.get_mu, NonStandardStudentDistribution.set_mu
NonStandardStudentDistribution.sigma = property(NonStandardStudentDistribution.get_sigma, NonStandardStudentDistribution.set_sigma)
del NonStandardStudentDistribution.get_sigma, NonStandardStudentDistribution.set_sigma
#########################################################
# bla bla #
#########################################################
def __repr__(self):
return "Gu(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ')'
GumbelDistribution.__str__ = __repr__
GumbelDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{C}\left(" + float_str(self.mu) + ', ' + float_str(self.sigma) + r'\right)$'
GumbelDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
GumbelDistribution.mu = property(GumbelDistribution.get_mu, GumbelDistribution.set_mu)
del GumbelDistribution.get_mu, GumbelDistribution.set_mu
GumbelDistribution.sigma = property(GumbelDistribution.get_sigma, GumbelDistribution.set_sigma)
del GumbelDistribution.get_sigma, GumbelDistribution.set_sigma
def __repr__(self):
return "Go(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ')'
GompertzDistribution.__str__ = __repr__
GompertzDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{C}\left(" + float_str(self.mu) + ', ' + float_str(self.sigma) + r'\right)$'
GompertzDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
GompertzDistribution.mu = property(GompertzDistribution.get_mu, GompertzDistribution.set_mu)
del GompertzDistribution.get_mu, GompertzDistribution.set_mu
GompertzDistribution.sigma = property(GompertzDistribution.get_sigma, GompertzDistribution.set_sigma)
del GompertzDistribution.get_sigma, GompertzDistribution.set_sigma
def __repr__(self):
return "Gamma(" + float_str(self.alpha) + ', ' + float_str(self.beta) + ')'
GammaDistribution.__str__ = __repr__
GammaDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\Gamma\left(" + float_str(self.alpha) + ', ' + float_str(self.beta) + r'\right)$'
GammaDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
GammaDistribution.alpha = property(GammaDistribution.get_alpha, GammaDistribution.set_alpha)
del GammaDistribution.get_alpha, GammaDistribution.set_alpha
GammaDistribution.beta = property(GammaDistribution.get_beta, GammaDistribution.set_beta)
del GammaDistribution.get_beta, GammaDistribution.set_beta
def __repr__(self):
return "Beta(" + float_str(self.alpha) + ', ' + float_str(self.beta) + ')'
BetaDistribution.__str__ = __repr__
BetaDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\Beta\left(" + float_str(self.alpha) + ', ' + float_str(self.beta) + r'\right)$'
BetaDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
BetaDistribution.alpha = property(BetaDistribution.get_alpha, BetaDistribution.set_alpha)
del BetaDistribution.get_alpha, BetaDistribution.set_alpha
BetaDistribution.beta = property(BetaDistribution.get_beta, BetaDistribution.set_beta)
del BetaDistribution.get_beta, BetaDistribution.set_beta
def wrapper_probability(f):
@wraps(f)
def probability(self, *events, **kwargs):
if len(events) == 1:
event = events[-1]
else:
event = None
if not isinstance(event, MultivariateEvent):
event = VectorEvent(len(events))
for index, component in enumerate(events):
if isinstance(component, str):
event[index] = CategoricalElementaryEvent(component)
elif isinstance(component, int):
event[index] = DiscreteElementaryEvent(component)
elif isinstance(component, float):
event[index] = ContinuousElementaryEvent(component)
elif isinstance(component, UnivariateEvent):
event[index] = component
else:
raise TypeError('\'events\' parameters')
# event = VectorEvent(event)
if not isinstance(event, MultivariateEvent):
raise TypeError('\'event\' parameter')
return f(self, event, kwargs.pop('log', False))
return probability
MultivariateDistribution.probability = wrapper_probability(MultivariateDistribution.probability)
def simulation(self, size):
return from_list(*list(map(list, list(zip(*[self.simulate() for index in range(size)])))))
MultivariateDistribution.simulation = simulation
del simulation
MultivariateDistribution.nb_parameters = property(MultivariateDistribution.get_nb_parameters)
del MultivariateDistribution.get_nb_parameters
SplittingDistribution.sum = property(SplittingDistribution.get_sum, SplittingDistribution.set_sum)
del SplittingDistribution.get_sum, SplittingDistribution.set_sum
SplittingDistribution.singular = property(SplittingDistribution.get_singular, SplittingDistribution.set_singular)
del SplittingDistribution.get_singular, SplittingDistribution.set_singular
def __str__(self):
return self.singular.__str__() + " /\\ " + self.sum.__str__()
SplittingDistribution.__str__ = __str__
SplittingDistribution.__repr__ = __str__
del __str__
def _repr_latex_(self):
return self.singular._repr_latex_()[:-1] + r" \underset{S}{\wedge} " + self.sum._repr_latex_()[1:]
SplittingDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
def __repr__(self):
return "Dir(" + str(self.alpha) + ')'
DirichletDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return "r$\mathrm{Dir}\left(" + _tools.remove_latex(self.alpha._repr_latex_()) + r'\right)$'
DirichletDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
DirichletDistribution.alpha = property(DirichletDistribution.get_alpha, DirichletDistribution.set_alpha)
del DirichletDistribution.get_alpha, DirichletDistribution.set_alpha
# def statiskit_independent_Multivariate_distribution_decorator(cls):
# pass
# for cls in _IndependentMultivariateDistribution:
# statiskit_independent_Multivariate_distribution_decorator(cls)
# def IndependentMultivariateDistribution(*args):
# if all(isinstance(arg, CategoricalUnivariateDistribution) for arg in args):
# return CategoricalIndependentMultivariateDistribution(args)
# elif all(isinstance(arg, DiscreteUnivariateDistribution) for arg in args):
# return DiscreteIndependentMultivariateDistribution(args)
# elif all(isinstance(arg, ContinuousUnivariateDistribution) for arg in args):
# return ContinuousIndependentMultivariateDistribution(args)
# elif all(isinstance(arg, UnivariateDistribution) for arg in args):
# return MixedIndependentMultivariateDistribution(args)
# else:
# raise TypeError('\'args\' parameter')
def statiskit_mixture_distribution_decorator(cls):
cls.nb_states = property(cls.get_nb_states)
del cls.get_nb_states
cls.pi = property(cls.get_pi, cls.set_pi)
del cls.get_pi, cls.set_pi
class Observations(object):
def __init__(self, distribution):
self._distribution = distribution
def __len__(self):
return self._distribution.nb_states
def wrapper_observations(f0, f1):
@wraps(f0)
def __getitem__(self, index):
if index < 0:
index += len(self)
if not 0 <= index < len(self):
raise IndexError(self._distribution.__class__.__name__ + " index out of range")
return f0(self._distribution, index)
@wraps(f1)
def __setitem__(self, index, value):
if index < 0:
index += len(self)
if not 0 <= index < len(self):
raise IndexError(self._distribution.__class__.__name__ + " index out of range")
return f1(self._distribution, index, value)
return __getitem__, __setitem__
Observations.__getitem__, Observations.__setitem__ = wrapper_observations(cls.get_observation, cls.set_observation)
del cls.get_observation, cls.set_observation
cls.observations = property(Observations)
if hasattr(cls, 'pdf_plot'):
def wrapper_pdf_plot(f):
@wraps(f)
def pdf_plot(self, axes=None, *args, **kwargs):
norm = kwargs.pop('norm', 1.)
states = kwargs.pop('states', True)
if states:
if isinstance(states, (list, tuple)):
skwargs = states
else:
skwargs = [{}] * self.nb_states
for index, (pi, observation) in enumerate(zip(self.pi, self.observations)):
for key, value in kwargs.items():
if not key in skwargs[index]:
skwargs[index][key] = value
axes = observation.pdf_plot(axes=axes, norm=pi*norm, *args, **skwargs[index])
return f(self, axes=axes, *args, norm=norm, **kwargs)
return pdf_plot
cls.pdf_plot = wrapper_pdf_plot(cls.pdf_plot)
for cls in _MixtureDistribution:
statiskit_mixture_distribution_decorator(cls)
def statiskit_univariate_mixture_distribution_decorator(cls):
def wrapper_posterior(f):
@wraps(f)
def posterior(self, event, **kwargs):
return f(self, type_to_event(event), kwargs.pop('log', False))
return posterior
cls.posterior = wrapper_posterior(cls.posterior)
def wrapper_assignment(f):
@wraps(f)
def assignment(self, event):
return f(self, type_to_event(event))
return assignment
cls.assignment = wrapper_assignment(cls.assignment)
def wrapper_uncertainty(f):
@wraps(f)
def uncertainty(self, arg):
if isinstance(arg, UnivariateData):
return f(self, arg)
else:
return f(self, types_to_event(arg))
return uncertainty
cls.uncertainty = wrapper_uncertainty(cls.uncertainty)
for cls in _UnivariateMixtureDistribution:
statiskit_univariate_mixture_distribution_decorator(cls)
def statiskit_Multivariate_mixture_distribution_decorator(cls):
def wrapper_posterior(f):
@wraps(f)
def posterior(self, *event, **kwargs):
return f(self, types_to_event(*events), kwargs.pop('log', False))
return posterior
cls.posterior = wrapper_posterior(cls.posterior)
def wrapper_assignment(f):
@wraps(f)
def assignment(self, *event):
if len(event) == 1 and isinstance(event[0], (UnivariateData, MultivariateData)):
event = event[0]
else:
event = types_to_event(*event)
return f(self, event)
return assignment
cls.assignment = wrapper_assignment(cls.assignment)
def wrapper_uncertainty(f):
@wraps(f)
def uncertainty(self, *args):
if len(args) == 1 and isinstance(args[0], MultivariateData):
return f(self, args[0])
else:
return f(self, types_to_event(*args))
return uncertainty
cls.uncertainty = wrapper_uncertainty(cls.uncertainty)
for cls in _MultivariateMixtureDistribution:
statiskit_Multivariate_mixture_distribution_decorator(cls)
def MixtureDistribution(*args, **kwargs):
if 'pi' in kwargs:
pi = kwargs.pop('pi')
else:
pi = [1. for arg in args]
if not isinstance(pi, linalg.Vector):
pi = linalg.Vector(pi)
if all(isinstance(arg, CategoricalUnivariateDistribution) for arg in args):
return CategoricalUnivariateMixtureDistribution(CategoricalUnivariateDistributionVector(*args), pi)
elif all(isinstance(arg, DiscreteUnivariateDistribution) for arg in args):
return DiscreteUnivariateMixtureDistribution(DiscreteUnivariateDistributionVector(*args), pi)
elif all(isinstance(arg, ContinuousUnivariateDistribution) for arg in args):
return ContinuousUnivariateMixtureDistribution(ContinuousUnivariateDistributionVector(*args), pi)
elif all(isinstance(arg, MultivariateDistribution) for arg in args):
if all(isinstance(arg, CategoricalMultivariateDistribution) for arg in args):
return CategoricalMultivariateMixtureDistribution(CategoricalMultivariateDistributionVector(*args), pi)
elif all(isinstance(arg, DiscreteMultivariateDistribution) for arg in args):
return DiscreteMultivariateMixtureDistribution(DiscreteMultivariateDistributionVector(*args), pi)
elif all(isinstance(arg, ContinuousMultivariateDistribution) for arg in args):
return ContinuousMultivariateMixtureDistribution(ContinuousMultivariateDistributionVector(*args), pi)
else:
return MixedMultivariateMixtureDistribution(MultivariateDistributionVector(*args), pi)
else:
raise TypeError('\'args\' parameter')
UnivariateConditionalDistribution.nb_parameters = property(UnivariateConditionalDistribution.get_nb_parameters)
del UnivariateConditionalDistribution.get_nb_parameters
UnivariateConditionalDistribution.explanatory_space = property(UnivariateConditionalDistribution.get_explanatory_space)
del UnivariateConditionalDistribution.get_explanatory_space
def wrapper_call(f):
@wraps(f)
def __call__(self, *events):
if len(events) == 1:
event = events[-1]
else:
event = None
if not isinstance(event, MultivariateEvent):
event = VectorEvent(len(events))
for index, component in enumerate(events):
event[index] = self.explanatory_space[index](component)
if not isinstance(event, MultivariateEvent):
raise TypeError('\'event\' parameter')
return f(self, event)
return __call__
UnivariateConditionalDistribution.__call__ = wrapper_call(UnivariateConditionalDistribution.__call__)
| from functools import wraps
import math
from statiskit import linalg
from statiskit import stl
from . import _core
from .__core.statiskit import (_ShiftedDistribution,
UnivariateDistribution,
_UnivariateFrequencyDistribution,
_QuantitativeUnivariateFrequencyDistribution,
CategoricalUnivariateDistribution,
BinaryDistribution,
NominalDistribution,
OrdinalDistribution,
HierarchicalDistribution,
CategoricalUnivariateMixtureDistribution,
CategoricalUnivariateDistributionVector,
DiscreteUnivariateDistribution,
DiscreteUnivariateFrequencyDistribution,
PoissonDistribution,
BinomialDistribution,
LogarithmicDistribution,
GeometricDistribution,
NegativeBinomialDistribution,
BetaCompoundDiscreteUnivariateDistribution,
BetaBinomialDistribution,
BetaNegativeBinomialDistribution,
DiscreteUnivariateMixtureDistribution,
DiscreteUnivariateDistributionVector,
ContinuousUnivariateDistribution,
ContinuousUnivariateFrequencyDistribution,
UnivariateHistogramDistribution,
NormalDistribution,
LogisticDistribution,
LaplaceDistribution,
CauchyDistribution,
StudentDistribution,
NonStandardStudentDistribution,
GumbelDistribution,
GompertzDistribution,
ExponentialDistribution,
GammaDistribution,
BetaDistribution,
ContinuousUnivariateMixtureDistribution,
ContinuousUnivariateDistributionVector,
MultivariateDistribution,
# _IndependentMultivariateDistribution,
MixedMultivariateMixtureDistribution,
CategoricalMultivariateDistribution,
# CategoricalIndependentMultivariateDistribution,
CategoricalMultivariateMixtureDistribution,
CategoricalMultivariateDistributionVector,
DiscreteMultivariateDistribution,
SplittingDistribution,
# DiscreteIndependentMultivariateDistribution,
DiscreteMultivariateMixtureDistribution,
DiscreteMultivariateDistributionVector,
ContinuousMultivariateDistribution,
MultinormalDistribution,
DirichletDistribution,
# ContinuousIndependentMultivariateDistribution,
ContinuousMultivariateMixtureDistribution,
ContinuousMultivariateDistributionVector,
MultivariateDistributionVector,
_MixtureDistribution, _UnivariateMixtureDistribution, _QuantitativeUnivariateMixtureDistribution, _MultivariateMixtureDistribution,
UnivariateConditionalDistribution,
CategoricalUnivariateConditionalDistribution,
DiscreteUnivariateConditionalDistribution,
ContinuousUnivariateConditionalDistribution,
MultivariateConditionalDistribution,
CategoricalMultivariateConditionalDistribution,
DiscreteMultivariateConditionalDistribution,
ContinuousMultivariateConditionalDistribution)
from .optionals import pyplot, numpy
from .io import from_list
from .controls import controls
from .event import (UnivariateEvent,
CategoricalEvent,
CategoricalElementaryEvent,
DiscreteEvent,
DiscreteElementaryEvent,
ContinuousEvent,
ContinuousElementaryEvent,
MultivariateEvent,
VectorEvent,
type_to_event,
types_to_event)
from .data import (UnivariateData,
UnivariateDataFrame,
MultivariateData,
MultivariateDataFrame)
from .sample_space import (NominalSampleSpace,
OrdinalSampleSpace)
from ._tools import float_str, remove_latex
__all__ = ['BinaryDistribution',
'NominalDistribution',
'OrdinalDistribution',
'HierarchicalDistribution',
'DiscreteUnivariateFrequencyDistribution',
'PoissonDistribution',
'BinomialDistribution',
'LogarithmicDistribution',
'GeometricDistribution',
'NegativeBinomialDistribution',
'BetaBinomialDistribution',
'BetaNegativeBinomialDistribution',
'ContinuousUnivariateFrequencyDistribution',
'UnivariateHistogramDistribution',
'NormalDistribution',
'LogisticDistribution',
'LaplaceDistribution',
'CauchyDistribution',
'StudentDistribution',
'NonStandardStudentDistribution',
'GumbelDistribution',
'GompertzDistribution',
'ExponentialDistribution',
'GammaDistribution',
'BetaDistribution',
'SplittingDistribution',
'MultinormalDistribution',
'DirichletDistribution',
# 'IndependentMultivariateDistribution',
'MixtureDistribution']
def shifted_distribution_decorator(cls):
cls.distribution = property(cls.get_distribution, cls.set_distribution)
del cls.get_distribution, cls.set_distribution
cls.shift = property(cls.get_shift, cls.set_shift)
cls.get_shift, cls.set_shift
def __str__(self):
return self.distribution.__str__()[:-1] + ", " + str(self.shift) + ")"
cls.__str__ = __str__
cls.__repr__ = __str__
def _repr_latex_(self):
return self.distribution._repr_latex_()[:-8] + ", " + str(self.shift) + r"\right)$"
cls._repr_latex_ = _repr_latex_
for cls in _ShiftedDistribution:
shifted_distribution_decorator(cls)
UnivariateDistribution.nb_parameters = property(UnivariateDistribution.get_nb_parameters)
del UnivariateDistribution.get_nb_parameters
def wrapper_probability(f):
@wraps(f)
def probability(self, event, **kwargs):
if isinstance(event, str):
event = CategoricalElementaryEvent(event)
elif isinstance(event, int):
event = DiscreteElementaryEvent(event)
elif isinstance(event, float):
event = ContinuousElementaryEvent(event)
elif not isinstance(event, UnivariateEvent):
raise TypeError('\'event\' parameter')
return f(self, event, kwargs.pop('log', False))
return probability
UnivariateDistribution.probability = wrapper_probability(UnivariateDistribution.probability)
def simulation(self, size):
if isinstance(self, NominalDistribution):
data = UnivariateDataFrame(NominalSampleSpace(self.values))
elif isinstance(self, OrdinalDistribution):
data = UnivariateDataFrame(OrdinalSampleSpace(self.ordered_values))
elif isinstance(self, DiscreteUnivariateDistribution):
data = UnivariateDataFrame(controls.ZZ)
elif isinstance(self, ContinuousUnivariateDistribution):
data = UnivariateDataFrame(controls.RR)
else:
raise NotImplementedError()
for index in range(size):
data.add_event(self.simulate())
return data
UnivariateDistribution.simulation = simulation
del simulation
def pdf_plot(self, axes=None, fmt='|', **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
labels = getattr(self, 'ordered_values', getattr(self, 'values'))
x, labels = list(zip(*[(index, label) for index, label in enumerate(labels)]))
y = [self.probability(label, log=False) for label in labels]
if 'norm' in kwargs:
norm = kwargs.pop('norm')
y = [norm * p for p in y]
else:
y = [p for p in y]
if fmt == 'pie':
if not 'autopct' in kwargs:
kwargs['autopct'] = '%.2f'
axes.pie(y, labels=labels, **kwargs)
else:
if '|' in fmt:
fmt = fmt.replace('|', '')
width = kwargs.pop('width', .8)
if not 0 < width <= 1.:
raise ValueError('\'width\' parameter must be strictly superior to 0. and inferior to 1.')
axes.bar([q-width/2. for q in x], y, width, align='center', **kwargs)
if len(fmt) > 0:
axes.plot(x, y, fmt, **kwargs)
axes.set_xticks(x)
axes.set_xticklabels(labels)
return axes
CategoricalUnivariateDistribution.pdf_plot = pdf_plot
del pdf_plot
CategoricalUnivariateDistribution.values = property(CategoricalUnivariateDistribution.get_values)
del CategoricalUnivariateDistribution.get_values
def wrapper(f):
@wraps(f)
def __init__(self, *args, **kwargs):
f(self, stl.SetLessString(*args))
for attr in list(kwargs.keys()):
if hasattr(self, attr):
setattr(self, attr, kwargs.pop(attr))
else:
raise AttributeError("'" + self.__class__.__name__ + "' object has no attribute '" + attr + "'")
return __init__
NominalDistribution.__init__ = wrapper(NominalDistribution.__init__)
def wrapper(f):
@wraps(f)
def __init__(self, *args, **kwargs):
f(self, stl.VectorString(*args))
for attr in list(kwargs.keys()):
if hasattr(self, attr):
setattr(self, attr, kwargs.pop(attr))
else:
raise AttributeError("'" + self.__class__.__name__ + "' object has no attribute '" + attr + "'")
return __init__
OrdinalDistribution.__init__ = wrapper(OrdinalDistribution.__init__)
#HierarchicalDistribution.__init__ = wrapper(HierarchicalDistribution.__init__)
BinaryDistribution.pi = property(BinaryDistribution.get_pi, BinaryDistribution.set_pi)
del BinaryDistribution.get_pi, BinaryDistribution.set_pi
OrdinalDistribution.rank = property(OrdinalDistribution.get_rank, OrdinalDistribution.set_rank)
del OrdinalDistribution.get_rank, OrdinalDistribution.set_rank
# def wrapper(f):
# @wraps(f)
# def get_ordered(self):
# values = f(self)
# return [CategoricalElementaryEvent(value) for value in values]
# return get_ordered
OrdinalDistribution.ordered_values = property(OrdinalDistribution.get_ordered_values, OrdinalDistribution.set_ordered_values)
del OrdinalDistribution.get_ordered_values, OrdinalDistribution.set_ordered_values
OrdinalDistribution.ordered_pi = property(OrdinalDistribution.get_ordered_pi, OrdinalDistribution.set_ordered_pi)
del OrdinalDistribution.get_ordered_pi, OrdinalDistribution.set_ordered_pi
def cdf_plot(self, axes=None, fmt='|', **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
labels = self.ordered_values
x, labels = list(zip(*[(index, label) for index, label in enumerate(labels)]))
y = self.pi
if 'norm' in kwargs:
norm = kwargs.pop('norm')
y = [norm * p for p in y]
else:
y = [p for p in y]
y = [y[i] for i in self.rank]
y = [sum(y[:i]) for i in range(1, len(y)+1)]
if '|' in fmt:
fmt = fmt.replace('|', '')
width = kwargs.pop('width', .8)
if not 0 < width <= 1.:
raise ValueError('\'width\' parameter must be strictly superior to 0. and inferior to 1.')
kwargs.pop('pmin', None)
kwargs.pop('pmax', None)
axes.bar([q-width/2. for q in x], y, width, align='center', **kwargs)
else:
axes.plot(x, y, fmt, **kwargs)
axes.set_xticks(x)
axes.set_xticklabels(labels)
return axes
OrdinalDistribution.cdf_plot = cdf_plot
del cdf_plot
def box_plot(self, axes=None, edgecolor="k", width=.5, vert=True, whiskers=(.09,0.91), pos=1, **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
elif not isinstance(axes, pyplot.Axes):
raise TypeError('`axes` parameter')
if not len(whiskers) == 2:
raise IndexError('`whiskers` parameter')
if not all([isinstance(i, float) for i in whiskers]):
raise TypeError('`whiskers` parameter')
if not all([0. <= i <= 1. for i in whiskers]):
raise ValueError('`whiskers` parameter')
values = [value.value for value in self.ordered]
qb = values.index(self.quantile(min(whiskers)))
q1 = values.index(self.quantile(.25))
q2 = values.index(self.quantile(.5))
q3 = values.index(self.quantile(.75))
qe = values.index(self.quantile(max(whiskers)))
facecolor = kwargs.pop('facecolor', next(axes._get_lines.prop_cycler)['color'])
# facecolor = kwargs.pop('facecolor', axes._get_lines.get_next_color())
if not(qb <= q1 <= q2 <= q3 <= qe):
raise ValueError('`whiskers` parameter')
if vert:
axes.bar(pos, q3-q1, width, q1, facecolor=facecolor, edgecolor=edgecolor, align='center')
axes.plot([pos-width/2., pos+width/2.], [q2, q2], color=edgecolor)
axes.plot([pos-width/2., pos+width/2.], [qb, qb], color=edgecolor)
axes.plot([pos-width/2., pos+width/2.], [qe, qe], color=edgecolor)
axes.plot([pos, pos], [qb, q1], color=edgecolor)
axes.plot([pos, pos], [q3, qe], color=edgecolor)
axes.set_yticks(list(range(len(values))))
axes.set_yticklabels(values)
else:
axes.bar(q1, width, q3-q1, pos-width/2., facecolor=facecolor, edgecolor=edgecolor)
axes.plot([q2, q2], [pos-width/2., pos+width/2.], color=edgecolor)
axes.plot([qb, qb], [pos-width/2., pos+width/2.], color=edgecolor)
axes.plot([qe, qe], [pos-width/2., pos+width/2.], color=edgecolor)
axes.plot([qb, q1], [pos, pos], color=edgecolor)
axes.plot([q3, qe], [pos, pos], color=edgecolor)
axes.set_xticks(list(range(len(values))))
axes.set_xticklabels(values)
return axes
OrdinalDistribution.box_plot = box_plot
del box_plot
def quantitative_univariate_frequency_distribution_decorator(cls):
# cls.mean = property(cls.get_mean)
# del cls.get_mean
# cls.variance = property(cls.get_variance)
# del cls.get_variance
pass
for cls in _QuantitativeUnivariateFrequencyDistribution:
quantitative_univariate_frequency_distribution_decorator(cls)
DiscreteUnivariateDistribution.mean = property(DiscreteUnivariateDistribution.get_mean)
del DiscreteUnivariateDistribution.get_mean
DiscreteUnivariateDistribution.variance = property(DiscreteUnivariateDistribution.get_variance)
del DiscreteUnivariateDistribution.get_variance
def pdf_plot(self, axes=None, fmt='|', **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
else:
qmin, qmax = axes.get_xlim()
if 'qmin' not in kwargs and 'pmin' not in kwargs:
kwargs['qmin'] = int(qmin)
if 'qmax' not in kwargs and 'pmax' not in kwargs:
kwargs['qmax'] = int(qmax)
if 'quantiles' in kwargs:
x = kwargs.pop('quantiles')
else:
if 'qmin' in kwargs:
qmin = kwargs.pop('qmin')
else:
qmin = self.quantile(kwargs.pop('pmin', 0.025))
if 'qmax' in kwargs:
qmax = kwargs.pop('qmax')
else:
qmax = self.quantile(kwargs.pop('pmax', 0.975))
x = list(range(qmin, qmax + 1))
y = [self.pdf(q) for q in x]
if 'norm' in kwargs:
norm = kwargs.pop('norm')
y = [norm * p for p in y]
if '|' in fmt:
fmt = fmt.replace('|', '')
width = kwargs.pop('width', .2)
if not 0 < width <= 1.:
raise ValueError('\'width\' parameter must be strictly superior to 0. and inferior to 1.')
axes.bar([q-width/2. for q in x], y, width, align='center', **kwargs)
if len(fmt) > 0:
axes.plot(x, y, fmt, **kwargs)
return axes
DiscreteUnivariateDistribution.pdf_plot = pdf_plot
del pdf_plot
def cdf_plot(self, axes=None, fmt='o-', **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
else:
qmin, qmax = axes.get_xlim()
if 'qmin' not in kwargs and 'pmin' not in kwargs:
kwargs['qmin'] = int(qmin)
if 'qmax' not in kwargs and 'pmax' not in kwargs:
kwargs['qmax'] = int(qmax)
x = kwargs.pop('quantiles', list(range(kwargs.pop('qmin', self.quantile(kwargs.pop('pmin', 0.025))), kwargs.pop('qmax', self.quantile(kwargs.pop('pmax', 0.975)))+1)))
y = [self.cdf(q) for q in x]
if 'norm' in kwargs:
norm = kwargs.pop('norm')
y = [norm * p for p in y]
if 'o' in fmt:
axes.plot(x, y, 'o', **kwargs)
fmt = fmt.replace('o', '')
if len(fmt) > 0:
for i, j in enumerate(x):
axes.plot([j, j+1], [y[i], y[i]], fmt, **kwargs)
return axes
DiscreteUnivariateDistribution.cdf_plot = cdf_plot
del cdf_plot
def box_plot(self, axes=None, edgecolor="k", width=.5, vert=True, whiskers=(.09,0.91), pos=1, mean=None, sd=None, marker='o', **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
elif not isinstance(axes, pyplot.Axes):
raise TypeError('`axes` parameter')
if not len(whiskers) == 2:
raise IndexError('`whiskers` parameter')
if not all([isinstance(i, float) for i in whiskers]):
raise TypeError('`whiskers` parameter')
if not all([0. <= i <= 1. for i in whiskers]):
raise ValueError('`whiskers` parameter')
qb = self.quantile(min(whiskers))
q1 = self.quantile(.25)
q2 = self.quantile(.5)
q3 = self.quantile(.75)
qe = self.quantile(max(whiskers))
facecolor = kwargs.pop('facecolor', next(axes._get_lines.prop_cycler)['color'])
# facecolor = kwargs.pop('facecolor', axes._get_lines.get_next_color())
if vert:
axes.bar(pos, q3-q1, width, q1, facecolor=facecolor, edgecolor=edgecolor, align='center')
axes.plot([pos-width/2., pos+width/2.], [q2, q2], color=edgecolor)
axes.plot([pos-width/2., pos+width/2.], [qb, qb], color=edgecolor)
axes.plot([pos-width/2., pos+width/2.], [qe, qe], color=edgecolor)
axes.plot([pos, pos], [qb, q1], color=edgecolor)
axes.plot([pos, pos], [q3, qe], color=edgecolor)
else:
axes.bar(q1, width, q3-q1, pos-width/2., facecolor=facecolor, edgecolor=edgecolor)
axes.plot([q2, q2], [pos-width/2., pos+width/2.], color=edgecolor)
axes.plot([qb, qb], [pos-width/2., pos+width/2.], color=edgecolor)
axes.plot([qe, qe], [pos-width/2., pos+width/2.], color=edgecolor)
axes.plot([qb, q1], [pos, pos], color=edgecolor)
axes.plot([q3, qe], [pos, pos], color=edgecolor)
if mean is None:
mean = self.mean
if not qb <= mean <= qe:
mean = False
elif mean is True:
mean = self.mean
if mean:
if vert:
axes.plot([pos], [mean], linestyle='None', marker=marker, markeredgecolor=edgecolor, markerfacecolor=facecolor)
else:
axes.plot([mean], [pos], linestyle='None', marker=marker, markeredgecolor=edgecolor, markerfacecolor=facecolor)
if not mean and sd is not False:
mean = q2
if sd is None:
sd = math.sqrt(self.variance)
if not qb <= mean - sd and mean + sd <= qe:
sd = False
elif sd is True:
sd = math.sqrt(self.variance)
if sd:
if vert:
axes.plot([pos, pos], [mean - sd, mean + sd], linestyle='None', marker=marker, markeredgecolor=edgecolor, markerfacecolor=facecolor)
else:
axes.plot([mean - sd, mean + sd], [pos, pos], linestyle='None', marker=marker, markeredgecolor=edgecolor, markerfacecolor=facecolor)
return axes
DiscreteUnivariateDistribution.box_plot = box_plot
ContinuousUnivariateDistribution.box_plot = box_plot
del box_plot
#def lorenz_plot(self, axes=None, fmt='o-', color='r', alpha=1., equality=True, **kwargs):
# if axes is None:
# axes = pyplot.subplot(1,1,1)
# else:
# qmin, qmax = axes.get_xlim()
# if 'qmin' not in kwargs and 'pmin' not in kwargs:
# kwargs['qmin'] = int(qmin)
# if 'qmax' not in kwargs and 'pmax' not in kwargs:
# kwargs['qmax'] = int(qmax)
# x = range(kwargs.pop('qmin', self.quantile(kwargs.pop('pmin', 0.025))), kwargs.pop('qmax', self.quantile(kwargs.pop('pmax', 0.975)))+1)
# x, y = [self.cdf(q) for q in x], [self.pdf(q) * q for q in x]
# y = [sum(y[:i+1]) for i in range(len(y))]
# y = [i/y[-1] for i in y]
# axes.plot(x, y, fmt, color=color, alpha=alpha)
# if equality:
# axes.plot([0., 1.], [0., 1.], kwargs.pop('efmt', '--'), color=kwargs.pop('ecolor', color), alpha=kwargs.pop('ealpha', alpha))
# return axes
#
#DiscreteUnivariateDistribution.lorenz_plot = lorenz_plot
#del lorenz_plot
def __repr__(self):
return "P(" + str(self.theta) + ")"
PoissonDistribution.__str__ = __repr__
PoissonDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{P}\left(" + str(self.theta) + r"\right)$"
PoissonDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
PoissonDistribution.theta = property(PoissonDistribution.get_theta, PoissonDistribution.set_theta)
del PoissonDistribution.get_theta, PoissonDistribution.set_theta
def __repr__(self):
return "B(" + str(self.kappa) + ", " + str(self.pi) + ")"
BinomialDistribution.__str__ = __repr__
BinomialDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{B}\left(" + str(self.kappa) + ", " + str(self.pi) + r"\right)$"
BinomialDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
BinomialDistribution.kappa = property(BinomialDistribution.get_kappa, BinomialDistribution.set_kappa)
del BinomialDistribution.get_kappa, BinomialDistribution.set_kappa
BinomialDistribution.pi = property(BinomialDistribution.get_pi, BinomialDistribution.set_pi)
del BinomialDistribution.get_pi, BinomialDistribution.set_pi
def __repr__(self):
return "Log(" + str(self.theta) + ")"
LogarithmicDistribution.__str__ = __repr__
LogarithmicDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathrm{Log}\left(" + str(self.theta) + r"\right)$"
LogarithmicDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
LogarithmicDistribution.theta = property(LogarithmicDistribution.get_theta, LogarithmicDistribution.set_theta)
del LogarithmicDistribution.get_theta, LogarithmicDistribution.set_theta
def __repr__(self):
return "G(" + str(self.pi) + ")"
GeometricDistribution.__str__ = __repr__
GeometricDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{G}\left(" + str(self.pi) + r"\right)$"
GeometricDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
GeometricDistribution.pi = property(GeometricDistribution.get_pi, GeometricDistribution.set_pi)
del GeometricDistribution.get_pi, GeometricDistribution.set_pi
def __repr__(self):
return "NB(" + str(self.kappa) + ", " + str(self.pi) + ")"
NegativeBinomialDistribution.__str__ = __repr__
NegativeBinomialDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{NB}\left(" + str(self.kappa) + ", " + str(self.pi) + r"\right)$"
NegativeBinomialDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
NegativeBinomialDistribution.kappa = property(NegativeBinomialDistribution.get_kappa, NegativeBinomialDistribution.set_kappa)
del NegativeBinomialDistribution.get_kappa, NegativeBinomialDistribution.set_kappa
NegativeBinomialDistribution.pi = property(NegativeBinomialDistribution.get_pi, NegativeBinomialDistribution.set_pi)
del NegativeBinomialDistribution.get_pi, NegativeBinomialDistribution.set_pi
BetaCompoundDiscreteUnivariateDistribution.alpha = property(BetaCompoundDiscreteUnivariateDistribution.get_alpha, BetaCompoundDiscreteUnivariateDistribution.set_alpha)
del BetaCompoundDiscreteUnivariateDistribution.get_alpha, BetaCompoundDiscreteUnivariateDistribution.set_alpha
BetaCompoundDiscreteUnivariateDistribution.gamma = property(BetaCompoundDiscreteUnivariateDistribution.get_gamma, BetaCompoundDiscreteUnivariateDistribution.set_gamma)
del BetaCompoundDiscreteUnivariateDistribution.get_gamma, BetaCompoundDiscreteUnivariateDistribution.set_gamma
def __repr__(self):
return "BetaB(" + str(self.kappa) + ", " + str(self.alpha) + ", " + str(self.gamma) + ")"
BetaBinomialDistribution.__str__ = __repr__
BetaBinomialDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\beta\mathcal{B}\left(" + str(self.kappa) + ", " + str(self.alpha) + ", " + str(self.gamma) + r"\right)$"
BetaBinomialDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
BetaBinomialDistribution.kappa = property(BetaBinomialDistribution.get_kappa, BetaBinomialDistribution.set_kappa)
del BetaBinomialDistribution.get_kappa, BetaBinomialDistribution.set_kappa
def __repr__(self):
return "BetaNB(" + str(self.kappa) + ", " + str(self.alpha) + ", " + str(self.gamma) + ")"
BetaNegativeBinomialDistribution.__str__ = __repr__
BetaNegativeBinomialDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\beta\mathcal{NB}\left(" + str(self.kappa) + ", " + str(self.alpha) + ", " + str(self.gamma) + r"\right)$"
BetaNegativeBinomialDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
BetaNegativeBinomialDistribution.kappa = property(BetaNegativeBinomialDistribution.get_kappa, BetaNegativeBinomialDistribution.set_kappa)
del BetaNegativeBinomialDistribution.get_kappa, BetaNegativeBinomialDistribution.set_kappa
ContinuousUnivariateDistribution.mean = property(ContinuousUnivariateDistribution.get_mean)
del ContinuousUnivariateDistribution.get_mean
ContinuousUnivariateDistribution.variance = property(ContinuousUnivariateDistribution.get_variance)
del ContinuousUnivariateDistribution.get_variance
def pdf_plot(self, axes=None, fmt='-', num=100, **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
else:
qmin, qmax = axes.get_xlim()
if 'qmin' not in kwargs and 'pmin' not in kwargs:
kwargs['qmin'] = qmin
if 'qmax' not in kwargs and 'pmax' not in kwargs:
kwargs['qmax'] = qmax
x = kwargs.pop('quantiles', numpy.linspace(kwargs.pop('qmin', self.quantile(kwargs.pop('pmin', 0.025))), kwargs.pop('qmax', self.quantile(kwargs.pop('pmax', 0.975))), num=num))
y = [self.pdf(q) for q in x]
if 'norm' in kwargs:
norm = kwargs.pop('norm')
y = [norm * p for p in y]
if '|' in fmt:
fmt = fmt.replace('|', '')
axes.vlines(x, 0, y, **kwargs)
if len(fmt) > 0:
axes.plot(x, y, fmt, **kwargs)
return axes
ContinuousUnivariateDistribution.pdf_plot = pdf_plot
del pdf_plot
def cdf_plot(self, axes=None, fmt='-', num=100, **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
else:
qmin, qmax = axes.get_xlim()
if 'qmin' not in kwargs and 'pmin' not in kwargs:
kwargs['qmin'] = qmin
if 'qmax' not in kwargs and 'pmax' not in kwargs:
kwargs['qmax'] = qmax
x = kwargs.pop('quantiles', numpy.linspace(kwargs.pop('qmin', self.quantile(kwargs.pop('pmin', 0.025))), kwargs.pop('qmax', self.quantile(kwargs.pop('pmax', 0.975))), num=num))
y = [self.cdf(q) for q in x]
if 'norm' in kwargs:
norm = kwargs.pop('norm')
y = [norm * p for p in y]
axes.plot(x, y, fmt, **kwargs)
return axes
ContinuousUnivariateDistribution.cdf_plot = cdf_plot
del cdf_plot
def statiskit_univariate_frequency_distribution_decorator(cls):
cls.pi = property(cls.get_pi, cls.set_pi)
del cls.get_pi, cls.set_pi
if cls.EventType == DiscreteEvent:
def wrapper(f):
@wraps(f)
def get_values(self):
values = f(self)
return [DiscreteElementaryEvent(value) for value in values]
return get_values
cls.values = property(wrapper(cls.get_values))
del wrapper, cls.get_values
elif cls.EventType == ContinuousEvent:
def wrapper(f):
@wraps(f)
def get_values(self):
values = f(self)
return [ContinuousElementaryEvent(value) for value in values]
return get_values
cls.values = property(wrapper(cls.get_values))
del wrapper, cls.get_values
def _repr_latex_(self):
pi = self.pi
string = []
etc = False
for i, j in enumerate(self.values):
if i < controls.head or i >= max(controls.head, len(pi) - controls.tail):
string.append("\\pi_{" + remove_latex(j._repr_latex_()) + "} &= " + float_str(pi[i]))
elif not etc:
etc = True
string.append('\\dots &= \\dots')
return '$\\begin{align}\n\t' + ',\\\\\n\t'.join(string) + '.\n\\end{align}$'
cls._repr_latex_ = _repr_latex_
del _repr_latex_
if not cls.EventType == CategoricalEvent:
def wrapper(f):
@wraps(f)
def pdf_plot(self, fmt='|', **kwargs):
if 'quantiles' not in kwargs and 'qmin' not in kwargs and 'pmin' not in kwargs and not 'qmax' in kwargs and 'pmax' not in kwargs:
kwargs['quantiles'] = [value.value for value in self.values]
return f(self, fmt=fmt, **kwargs)
return pdf_plot
cls.pdf_plot = wrapper(cls.pdf_plot)
del wrapper
def wrapper(f):
@wraps(f)
def cdf_plot(self, **kwargs):
if 'quantiles' not in kwargs:
if 'qmin' not in kwargs and 'pmin' not in kwargs:
kwargs['pmin'] = 0.
if 'qmax' not in kwargs and 'pmax' not in kwargs:
kwargs['pmax'] = 1.
return f(self, **kwargs)
return cdf_plot
cls.cdf_plot = wrapper(cls.cdf_plot)
del wrapper
def wrapper(f):
@wraps(f)
def box_plot(self, axes=None, extrema=True, vert=True, pos=1, edgecolor="k", **kwargs):
if axes is None:
axes = pyplot.subplot(1, 1, 1)
facecolor = kwargs.pop('facecolor', next(axes._get_lines.prop_cycler)['color'])
# facecolor = kwargs.pop('facecolor', axes._get_lines.get_next_color())
axes = f(self, axes=axes, vert=vert, pos=pos, facecolor=facecolor, edgecolor=edgecolor, **kwargs)
if extrema:
values = self.values
values = [values[0].value, values[-1].value]
if vert:
axes.scatter([pos]*len(values), values, c=facecolor, edgecolors=edgecolor)
else:
axes.scatter(values, [pos]*len(values), c=facecolor, edgecolors=edgecolor)
return axes
return box_plot
cls.box_plot = wrapper(cls.box_plot)
del wrapper
for cls in _UnivariateFrequencyDistribution:
statiskit_univariate_frequency_distribution_decorator(cls)
def statiskit_quantitative_univariate_frequency_distribution_decorator(cls):
pass
for cls in _QuantitativeUnivariateFrequencyDistribution:
statiskit_quantitative_univariate_frequency_distribution_decorator(cls)
def __repr__(self):
return "Univariate Histogram Distribution"
UnivariateHistogramDistribution.__str__ = NormalDistribution.__repr__
UnivariateHistogramDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
bins = [x for x in self.bins]
densities = self.densities
string = []
etc = False
for i, j in enumerate([(i, j) for i, j in zip(bins[:-1], bins[1:])]):
if i < controls.head or i >= max(controls.head, len(densities) - controls.tail):
string.append("\\pi_{[" + float_str(j[0]) + ', ' + float_str(j[-1]) + "[} &= " + float_str(densities[i]*(j[-1]-j[0])))
elif not etc:
etc = True
string.append('\\dots &= \\dots')
return '$\\begin{align}\n\t' + ',\\\\\n\t'.join(string) + '.\n\\end{align}$'
UnivariateHistogramDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
UnivariateHistogramDistribution.bins = property(UnivariateHistogramDistribution.get_bins)
UnivariateHistogramDistribution.densities = property(UnivariateHistogramDistribution.get_densities)
def pdf_plot(self, axes=None, fmt='|', fill=True, **kwargs):
if axes is None:
axes = pyplot.subplot(1,1,1)
xmin, xmax = float("inf"), -1 * float("inf")
ymin, ymax = float("inf"), -1 * float("inf")
else:
xmin, xmax = axes.get_xlim()
ymin, ymax = axes.get_ylim()
bins = self.bins
bins = [x for x in bins]
densities = self.densities
densities = [d for d in densities]
if 'norm' in kwargs:
norm = kwargs.pop('norm')
densities = [norm * d for d in densities]
color = kwargs.pop('color', next(axes._get_lines.prop_cycler)['color'])
# color = kwargs.pop('color', axes._get_lines.get_next_color())
if '|' in fmt:
for lc, rc, d in zip(bins[:-1], bins[1:], densities):
axes.bar(x=lc, height=d, width=rc-lc, bottom=0., facecolor=color, edgecolor=kwargs.pop('edgecolor', 'k'), align='edge', **kwargs)
fmt = fmt.replace('|', '')
if 'o' in fmt:
axes.plot(bins[:-1], densities, 'o', color=color, alpha=alpha)
axes.plot([bins[-1]], [densities[-1]], 'o', color=color, **kwargs)
fmt = fmt.replace('o', '')
if len(fmt) > 0:
for lc, rc, d in zip(bins[:-1], bins[1:], densities):
axes.plot([lc, rc], [d, d], fmt, color=color, **kwargs)
return axes
UnivariateHistogramDistribution.pdf_plot = pdf_plot
del pdf_plot
def wrapper(f):
@wraps(f)
def cdf_plot(self, **kwargs):
if 'quantiles' not in kwargs:
if 'qmin' not in kwargs and 'pmin' not in kwargs:
kwargs['pmin'] = 0.
if 'qmax' not in kwargs and 'pmax' not in kwargs:
kwargs['pmax'] = 1.
return f(self, **kwargs)
return cdf_plot
UnivariateHistogramDistribution.cdf_plot = wrapper(UnivariateHistogramDistribution.cdf_plot)
del wrapper
def __repr__(self):
return "N(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ')'
NormalDistribution.__str__ = __repr__
NormalDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{N}\left(" + float_str(self.mu) + ', ' + float_str(self.sigma) + r'\right)$'
NormalDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
NormalDistribution.mu = property(NormalDistribution.get_mu, NormalDistribution.set_mu)
del NormalDistribution.get_mu, NormalDistribution.set_mu
NormalDistribution.sigma = property(NormalDistribution.get_sigma, NormalDistribution.set_sigma)
del NormalDistribution.get_sigma, NormalDistribution.set_sigma
def __repr__(self):
return "Lo(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ')'
LogisticDistribution.__str__ = __repr__
LogisticDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{Lo}\left(" + float_str(self.mu) + ', ' + float_str(self.sigma) + r'\right)$'
LogisticDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
LogisticDistribution.mu = property(LogisticDistribution.get_mu, LogisticDistribution.set_mu)
del LogisticDistribution.get_mu, LogisticDistribution.set_mu
LogisticDistribution.sigma = property(LogisticDistribution.get_sigma, LogisticDistribution.set_sigma)
del LogisticDistribution.get_sigma, LogisticDistribution.set_sigma
def __repr__(self):
return "La(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ')'
LaplaceDistribution.__str__ = __repr__
LaplaceDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{La}\left(" + float_str(self.mu) + ', ' + float_str(self.sigma) + r'\right)$'
LaplaceDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
LaplaceDistribution.mu = property(LaplaceDistribution.get_mu, LaplaceDistribution.set_mu)
del LaplaceDistribution.get_mu, LaplaceDistribution.set_mu
LaplaceDistribution.sigma = property(LaplaceDistribution.get_sigma, LaplaceDistribution.set_sigma)
del LaplaceDistribution.get_sigma, LaplaceDistribution.set_sigma
def __repr__(self):
return "C(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ')'
CauchyDistribution.__str__ = __repr__
CauchyDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{C}\left(" + float_str(self.mu) + ', ' + float_str(self.sigma) + r'\right)$'
CauchyDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
CauchyDistribution.mu = property(CauchyDistribution.get_mu, CauchyDistribution.set_mu)
del CauchyDistribution.get_mu, CauchyDistribution.set_mu
CauchyDistribution.sigma = property(CauchyDistribution.get_sigma, CauchyDistribution.set_sigma)
del CauchyDistribution.get_sigma, CauchyDistribution.set_sigma
def __repr__(self):
return "T(" + float_str(self.nu) + ')'
StudentDistribution.__str__ = __repr__
StudentDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{T}\left(" + float_str(self.nu) + r'\right)$'
StudentDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
StudentDistribution.nu = property(StudentDistribution.get_nu, StudentDistribution.set_nu)
del StudentDistribution.get_nu, StudentDistribution.set_nu
def __repr__(self):
return "nsT(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ', ' + float_str(self.nu) + ')'
NonStandardStudentDistribution.__str__ = __repr__
NonStandardStudentDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{nsT}\left(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ', ' + float_str(self.nu) + r'\right)$'
NonStandardStudentDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
NonStandardStudentDistribution.mu = property(NonStandardStudentDistribution.get_mu, NonStandardStudentDistribution.set_mu)
del NonStandardStudentDistribution.get_mu, NonStandardStudentDistribution.set_mu
NonStandardStudentDistribution.sigma = property(NonStandardStudentDistribution.get_sigma, NonStandardStudentDistribution.set_sigma)
del NonStandardStudentDistribution.get_sigma, NonStandardStudentDistribution.set_sigma
#########################################################
# bla bla #
#########################################################
def __repr__(self):
return "Gu(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ')'
GumbelDistribution.__str__ = __repr__
GumbelDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{C}\left(" + float_str(self.mu) + ', ' + float_str(self.sigma) + r'\right)$'
GumbelDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
GumbelDistribution.mu = property(GumbelDistribution.get_mu, GumbelDistribution.set_mu)
del GumbelDistribution.get_mu, GumbelDistribution.set_mu
GumbelDistribution.sigma = property(GumbelDistribution.get_sigma, GumbelDistribution.set_sigma)
del GumbelDistribution.get_sigma, GumbelDistribution.set_sigma
def __repr__(self):
return "Go(" + float_str(self.mu) + ', ' + float_str(self.sigma) + ')'
GompertzDistribution.__str__ = __repr__
GompertzDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\mathcal{C}\left(" + float_str(self.mu) + ', ' + float_str(self.sigma) + r'\right)$'
GompertzDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
GompertzDistribution.mu = property(GompertzDistribution.get_mu, GompertzDistribution.set_mu)
del GompertzDistribution.get_mu, GompertzDistribution.set_mu
GompertzDistribution.sigma = property(GompertzDistribution.get_sigma, GompertzDistribution.set_sigma)
del GompertzDistribution.get_sigma, GompertzDistribution.set_sigma
def __repr__(self):
return "Gamma(" + float_str(self.alpha) + ', ' + float_str(self.beta) + ')'
GammaDistribution.__str__ = __repr__
GammaDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\Gamma\left(" + float_str(self.alpha) + ', ' + float_str(self.beta) + r'\right)$'
GammaDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
GammaDistribution.alpha = property(GammaDistribution.get_alpha, GammaDistribution.set_alpha)
del GammaDistribution.get_alpha, GammaDistribution.set_alpha
GammaDistribution.beta = property(GammaDistribution.get_beta, GammaDistribution.set_beta)
del GammaDistribution.get_beta, GammaDistribution.set_beta
def __repr__(self):
return "Beta(" + float_str(self.alpha) + ', ' + float_str(self.beta) + ')'
BetaDistribution.__str__ = __repr__
BetaDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return r"$\Beta\left(" + float_str(self.alpha) + ', ' + float_str(self.beta) + r'\right)$'
BetaDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
BetaDistribution.alpha = property(BetaDistribution.get_alpha, BetaDistribution.set_alpha)
del BetaDistribution.get_alpha, BetaDistribution.set_alpha
BetaDistribution.beta = property(BetaDistribution.get_beta, BetaDistribution.set_beta)
del BetaDistribution.get_beta, BetaDistribution.set_beta
def wrapper_probability(f):
@wraps(f)
def probability(self, *events, **kwargs):
if len(events) == 1:
event = events[-1]
else:
event = None
if not isinstance(event, MultivariateEvent):
event = VectorEvent(len(events))
for index, component in enumerate(events):
if isinstance(component, str):
event[index] = CategoricalElementaryEvent(component)
elif isinstance(component, int):
event[index] = DiscreteElementaryEvent(component)
elif isinstance(component, float):
event[index] = ContinuousElementaryEvent(component)
elif isinstance(component, UnivariateEvent):
event[index] = component
else:
raise TypeError('\'events\' parameters')
# event = VectorEvent(event)
if not isinstance(event, MultivariateEvent):
raise TypeError('\'event\' parameter')
return f(self, event, kwargs.pop('log', False))
return probability
MultivariateDistribution.probability = wrapper_probability(MultivariateDistribution.probability)
def simulation(self, size):
return from_list(*list(map(list, list(zip(*[self.simulate() for index in range(size)])))))
MultivariateDistribution.simulation = simulation
del simulation
MultivariateDistribution.nb_parameters = property(MultivariateDistribution.get_nb_parameters)
del MultivariateDistribution.get_nb_parameters
SplittingDistribution.sum = property(SplittingDistribution.get_sum, SplittingDistribution.set_sum)
del SplittingDistribution.get_sum, SplittingDistribution.set_sum
SplittingDistribution.singular = property(SplittingDistribution.get_singular, SplittingDistribution.set_singular)
del SplittingDistribution.get_singular, SplittingDistribution.set_singular
def __str__(self):
return self.singular.__str__() + " /\\ " + self.sum.__str__()
SplittingDistribution.__str__ = __str__
SplittingDistribution.__repr__ = __str__
del __str__
def _repr_latex_(self):
return self.singular._repr_latex_()[:-1] + r" \underset{S}{\wedge} " + self.sum._repr_latex_()[1:]
SplittingDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
def __repr__(self):
return "Dir(" + str(self.alpha) + ')'
DirichletDistribution.__repr__ = __repr__
del __repr__
def _repr_latex_(self):
return "r$\mathrm{Dir}\left(" + _tools.remove_latex(self.alpha._repr_latex_()) + r'\right)$'
DirichletDistribution._repr_latex_ = _repr_latex_
del _repr_latex_
DirichletDistribution.alpha = property(DirichletDistribution.get_alpha, DirichletDistribution.set_alpha)
del DirichletDistribution.get_alpha, DirichletDistribution.set_alpha
# def statiskit_independent_Multivariate_distribution_decorator(cls):
# pass
# for cls in _IndependentMultivariateDistribution:
# statiskit_independent_Multivariate_distribution_decorator(cls)
# def IndependentMultivariateDistribution(*args):
# if all(isinstance(arg, CategoricalUnivariateDistribution) for arg in args):
# return CategoricalIndependentMultivariateDistribution(args)
# elif all(isinstance(arg, DiscreteUnivariateDistribution) for arg in args):
# return DiscreteIndependentMultivariateDistribution(args)
# elif all(isinstance(arg, ContinuousUnivariateDistribution) for arg in args):
# return ContinuousIndependentMultivariateDistribution(args)
# elif all(isinstance(arg, UnivariateDistribution) for arg in args):
# return MixedIndependentMultivariateDistribution(args)
# else:
# raise TypeError('\'args\' parameter')
def statiskit_mixture_distribution_decorator(cls):
cls.nb_states = property(cls.get_nb_states)
del cls.get_nb_states
cls.pi = property(cls.get_pi, cls.set_pi)
del cls.get_pi, cls.set_pi
class Observations(object):
def __init__(self, distribution):
self._distribution = distribution
def __len__(self):
return self._distribution.nb_states
def wrapper_observations(f0, f1):
@wraps(f0)
def __getitem__(self, index):
if index < 0:
index += len(self)
if not 0 <= index < len(self):
raise IndexError(self._distribution.__class__.__name__ + " index out of range")
return f0(self._distribution, index)
@wraps(f1)
def __setitem__(self, index, value):
if index < 0:
index += len(self)
if not 0 <= index < len(self):
raise IndexError(self._distribution.__class__.__name__ + " index out of range")
return f1(self._distribution, index, value)
return __getitem__, __setitem__
Observations.__getitem__, Observations.__setitem__ = wrapper_observations(cls.get_observation, cls.set_observation)
del cls.get_observation, cls.set_observation
cls.observations = property(Observations)
if hasattr(cls, 'pdf_plot'):
def wrapper_pdf_plot(f):
@wraps(f)
def pdf_plot(self, axes=None, *args, **kwargs):
norm = kwargs.pop('norm', 1.)
states = kwargs.pop('states', True)
if states:
if isinstance(states, (list, tuple)):
skwargs = states
else:
skwargs = [{}] * self.nb_states
for index, (pi, observation) in enumerate(zip(self.pi, self.observations)):
for key, value in kwargs.items():
if not key in skwargs[index]:
skwargs[index][key] = value
axes = observation.pdf_plot(axes=axes, norm=pi*norm, *args, **skwargs[index])
return f(self, axes=axes, *args, norm=norm, **kwargs)
return pdf_plot
cls.pdf_plot = wrapper_pdf_plot(cls.pdf_plot)
for cls in _MixtureDistribution:
statiskit_mixture_distribution_decorator(cls)
def statiskit_univariate_mixture_distribution_decorator(cls):
def wrapper_posterior(f):
@wraps(f)
def posterior(self, event, **kwargs):
return f(self, type_to_event(event), kwargs.pop('log', False))
return posterior
cls.posterior = wrapper_posterior(cls.posterior)
def wrapper_assignment(f):
@wraps(f)
def assignment(self, event):
return f(self, type_to_event(event))
return assignment
cls.assignment = wrapper_assignment(cls.assignment)
def wrapper_uncertainty(f):
@wraps(f)
def uncertainty(self, arg):
if isinstance(arg, UnivariateData):
return f(self, arg)
else:
return f(self, types_to_event(arg))
return uncertainty
cls.uncertainty = wrapper_uncertainty(cls.uncertainty)
for cls in _UnivariateMixtureDistribution:
statiskit_univariate_mixture_distribution_decorator(cls)
def statiskit_Multivariate_mixture_distribution_decorator(cls):
def wrapper_posterior(f):
@wraps(f)
def posterior(self, *event, **kwargs):
return f(self, types_to_event(*events), kwargs.pop('log', False))
return posterior
cls.posterior = wrapper_posterior(cls.posterior)
def wrapper_assignment(f):
@wraps(f)
def assignment(self, *event):
if len(event) == 1 and isinstance(event[0], (UnivariateData, MultivariateData)):
event = event[0]
else:
event = types_to_event(*event)
return f(self, event)
return assignment
cls.assignment = wrapper_assignment(cls.assignment)
def wrapper_uncertainty(f):
@wraps(f)
def uncertainty(self, *args):
if len(args) == 1 and isinstance(args[0], MultivariateData):
return f(self, args[0])
else:
return f(self, types_to_event(*args))
return uncertainty
cls.uncertainty = wrapper_uncertainty(cls.uncertainty)
for cls in _MultivariateMixtureDistribution:
statiskit_Multivariate_mixture_distribution_decorator(cls)
def MixtureDistribution(*args, **kwargs):
if 'pi' in kwargs:
pi = kwargs.pop('pi')
else:
pi = [1. for arg in args]
if not isinstance(pi, linalg.Vector):
pi = linalg.Vector(pi)
if all(isinstance(arg, CategoricalUnivariateDistribution) for arg in args):
return CategoricalUnivariateMixtureDistribution(CategoricalUnivariateDistributionVector(*args), pi)
elif all(isinstance(arg, DiscreteUnivariateDistribution) for arg in args):
return DiscreteUnivariateMixtureDistribution(DiscreteUnivariateDistributionVector(*args), pi)
elif all(isinstance(arg, ContinuousUnivariateDistribution) for arg in args):
return ContinuousUnivariateMixtureDistribution(ContinuousUnivariateDistributionVector(*args), pi)
elif all(isinstance(arg, MultivariateDistribution) for arg in args):
if all(isinstance(arg, CategoricalMultivariateDistribution) for arg in args):
return CategoricalMultivariateMixtureDistribution(CategoricalMultivariateDistributionVector(*args), pi)
elif all(isinstance(arg, DiscreteMultivariateDistribution) for arg in args):
return DiscreteMultivariateMixtureDistribution(DiscreteMultivariateDistributionVector(*args), pi)
elif all(isinstance(arg, ContinuousMultivariateDistribution) for arg in args):
return ContinuousMultivariateMixtureDistribution(ContinuousMultivariateDistributionVector(*args), pi)
else:
return MixedMultivariateMixtureDistribution(MultivariateDistributionVector(*args), pi)
else:
raise TypeError('\'args\' parameter')
UnivariateConditionalDistribution.nb_parameters = property(UnivariateConditionalDistribution.get_nb_parameters)
del UnivariateConditionalDistribution.get_nb_parameters
UnivariateConditionalDistribution.explanatory_space = property(UnivariateConditionalDistribution.get_explanatory_space)
del UnivariateConditionalDistribution.get_explanatory_space
def wrapper_call(f):
@wraps(f)
def __call__(self, *events):
if len(events) == 1:
event = events[-1]
else:
event = None
if not isinstance(event, MultivariateEvent):
event = VectorEvent(len(events))
for index, component in enumerate(events):
event[index] = self.explanatory_space[index](component)
if not isinstance(event, MultivariateEvent):
raise TypeError('\'event\' parameter')
return f(self, event)
return __call__
UnivariateConditionalDistribution.__call__ = wrapper_call(UnivariateConditionalDistribution.__call__) | en | 0.321473 | # _IndependentMultivariateDistribution, # CategoricalIndependentMultivariateDistribution, # DiscreteIndependentMultivariateDistribution, # ContinuousIndependentMultivariateDistribution, # 'IndependentMultivariateDistribution', #HierarchicalDistribution.__init__ = wrapper(HierarchicalDistribution.__init__) # def wrapper(f): # @wraps(f) # def get_ordered(self): # values = f(self) # return [CategoricalElementaryEvent(value) for value in values] # return get_ordered # facecolor = kwargs.pop('facecolor', axes._get_lines.get_next_color()) # cls.mean = property(cls.get_mean) # del cls.get_mean # cls.variance = property(cls.get_variance) # del cls.get_variance # facecolor = kwargs.pop('facecolor', axes._get_lines.get_next_color()) #def lorenz_plot(self, axes=None, fmt='o-', color='r', alpha=1., equality=True, **kwargs): # if axes is None: # axes = pyplot.subplot(1,1,1) # else: # qmin, qmax = axes.get_xlim() # if 'qmin' not in kwargs and 'pmin' not in kwargs: # kwargs['qmin'] = int(qmin) # if 'qmax' not in kwargs and 'pmax' not in kwargs: # kwargs['qmax'] = int(qmax) # x = range(kwargs.pop('qmin', self.quantile(kwargs.pop('pmin', 0.025))), kwargs.pop('qmax', self.quantile(kwargs.pop('pmax', 0.975)))+1) # x, y = [self.cdf(q) for q in x], [self.pdf(q) * q for q in x] # y = [sum(y[:i+1]) for i in range(len(y))] # y = [i/y[-1] for i in y] # axes.plot(x, y, fmt, color=color, alpha=alpha) # if equality: # axes.plot([0., 1.], [0., 1.], kwargs.pop('efmt', '--'), color=kwargs.pop('ecolor', color), alpha=kwargs.pop('ealpha', alpha)) # return axes # #DiscreteUnivariateDistribution.lorenz_plot = lorenz_plot #del lorenz_plot # facecolor = kwargs.pop('facecolor', axes._get_lines.get_next_color()) # color = kwargs.pop('color', axes._get_lines.get_next_color()) ######################################################### # bla bla # ######################################################### # event = VectorEvent(event) # def statiskit_independent_Multivariate_distribution_decorator(cls): # pass # for cls in _IndependentMultivariateDistribution: # statiskit_independent_Multivariate_distribution_decorator(cls) # def IndependentMultivariateDistribution(*args): # if all(isinstance(arg, CategoricalUnivariateDistribution) for arg in args): # return CategoricalIndependentMultivariateDistribution(args) # elif all(isinstance(arg, DiscreteUnivariateDistribution) for arg in args): # return DiscreteIndependentMultivariateDistribution(args) # elif all(isinstance(arg, ContinuousUnivariateDistribution) for arg in args): # return ContinuousIndependentMultivariateDistribution(args) # elif all(isinstance(arg, UnivariateDistribution) for arg in args): # return MixedIndependentMultivariateDistribution(args) # else: # raise TypeError('\'args\' parameter') | 1.495298 | 1 |
optimus/helpers/constants.py | XD-DENG/Optimus | 1 | 6624688 | from optimus.helpers.logger import logger
# Python to PySpark reference
#
# type(None): NullType,
# bool: BooleanType,
# int: LongType,
# float: DoubleType,
# str: StringType,
# bytearray: BinaryType,
# decimal.Decimal: DecimalType,
# datetime.date: DateType,
# datetime.datetime: TimestampType,
# datetime.time: TimestampType,
# Profiler
from enum import Enum
class Actions(Enum):
"""
Actions that modify a columns.
"""
LOWER = "lower"
UPPER = "upper"
TRIM = "trim"
REVERSE = "reverse"
REMOVE_ACCENTS = "remove"
REMOVE_SPECIAL_CHARS = "remove"
REMOVE_WHITE_SPACES = "remove"
REPLACE = "replace"
REPLACE_REGEX = "replace"
FILL_NA = "fill_na"
CAST = "cast"
IS_NA = "is_na"
Z_SCORE = "z_score"
NEST = "nest"
UNNEST = "unnest"
VALUES_TO_COLS = "values_to_cols"
SET = "set"
STRING_TO_INDEX = "string_to_index"
INDEX_TO_STRING = "index_to_string"
MIN_MAX_SCALER = "min_max_scaler"
MAX_ABS_SCALER = "max_abs_scaler"
# ROWS
SELECT_ROW = "select_row"
DROP_ROW = "drop_row"
BETWEEN_ROW = "between_drop"
SORT_ROW = "sort_row"
@staticmethod
def list():
return list(map(lambda c: c.value, Actions))
class ProfilerDataTypes(Enum):
INT = "int"
DECIMAL = "decimal"
TRIM = "string"
BOOLEAN = "boolean"
DATE = "date"
ARRAY = "array"
OBJECT = "object"
GENDER = "gender"
IP = "ip"
URL = "url"
EMAIL = "email"
CREDIT_CARD_NUMBER = "credit_card_number"
ZIP_CODE = "zip_code"
NULL = "null"
MISSING = "missing"
# Strings and Function Messages
JUST_CHECKING = "Just check that Spark and all necessary environments vars are present..."
STARTING_SPARK = "Starting or getting SparkSession and SparkContext..."
STARTING_OPTIMUS = "Transform and Roll out..."
SUCCESS = "Optimus successfully imported. Have fun :)."
CONFIDENCE_LEVEL_CONSTANT = [50, .67], [68, .99], [90, 1.64], [95, 1.96], [99, 2.57]
def print_check_point_config(filesystem):
logger.print(
"Setting checkpoint folder %s. If you are in a cluster initialize Optimus with master='your_ip' as param",
filesystem)
SPARK_VERSION = "2.4.1"
HADOOP_VERSION = "2.7"
SPARK_FILE = "spark-{SPARK_VERSION}-bin-hadoop{HADOOP_VERSION}.tgz".format(SPARK_VERSION=SPARK_VERSION,
HADOOP_VERSION=HADOOP_VERSION)
SPARK_URL = "https://archive.apache.org/dist/spark/spark-{SPARK_VERSION}//{SPARK_FILE}".format(
SPARK_VERSION=SPARK_VERSION, SPARK_FILE=SPARK_FILE)
# For Google Colab
SPARK_PATH_COLAB = "/content/spark-{SPARK_VERSION}-bin-hadoop{HADOOP_VERSION}".format(SPARK_VERSION=SPARK_VERSION,
HADOOP_VERSION=HADOOP_VERSION)
JAVA_PATH_COLAB = "/usr/lib/jvm/java-8-openjdk-amd64"
RELATIVE_ERROR = 10000
| from optimus.helpers.logger import logger
# Python to PySpark reference
#
# type(None): NullType,
# bool: BooleanType,
# int: LongType,
# float: DoubleType,
# str: StringType,
# bytearray: BinaryType,
# decimal.Decimal: DecimalType,
# datetime.date: DateType,
# datetime.datetime: TimestampType,
# datetime.time: TimestampType,
# Profiler
from enum import Enum
class Actions(Enum):
"""
Actions that modify a columns.
"""
LOWER = "lower"
UPPER = "upper"
TRIM = "trim"
REVERSE = "reverse"
REMOVE_ACCENTS = "remove"
REMOVE_SPECIAL_CHARS = "remove"
REMOVE_WHITE_SPACES = "remove"
REPLACE = "replace"
REPLACE_REGEX = "replace"
FILL_NA = "fill_na"
CAST = "cast"
IS_NA = "is_na"
Z_SCORE = "z_score"
NEST = "nest"
UNNEST = "unnest"
VALUES_TO_COLS = "values_to_cols"
SET = "set"
STRING_TO_INDEX = "string_to_index"
INDEX_TO_STRING = "index_to_string"
MIN_MAX_SCALER = "min_max_scaler"
MAX_ABS_SCALER = "max_abs_scaler"
# ROWS
SELECT_ROW = "select_row"
DROP_ROW = "drop_row"
BETWEEN_ROW = "between_drop"
SORT_ROW = "sort_row"
@staticmethod
def list():
return list(map(lambda c: c.value, Actions))
class ProfilerDataTypes(Enum):
INT = "int"
DECIMAL = "decimal"
TRIM = "string"
BOOLEAN = "boolean"
DATE = "date"
ARRAY = "array"
OBJECT = "object"
GENDER = "gender"
IP = "ip"
URL = "url"
EMAIL = "email"
CREDIT_CARD_NUMBER = "credit_card_number"
ZIP_CODE = "zip_code"
NULL = "null"
MISSING = "missing"
# Strings and Function Messages
JUST_CHECKING = "Just check that Spark and all necessary environments vars are present..."
STARTING_SPARK = "Starting or getting SparkSession and SparkContext..."
STARTING_OPTIMUS = "Transform and Roll out..."
SUCCESS = "Optimus successfully imported. Have fun :)."
CONFIDENCE_LEVEL_CONSTANT = [50, .67], [68, .99], [90, 1.64], [95, 1.96], [99, 2.57]
def print_check_point_config(filesystem):
logger.print(
"Setting checkpoint folder %s. If you are in a cluster initialize Optimus with master='your_ip' as param",
filesystem)
SPARK_VERSION = "2.4.1"
HADOOP_VERSION = "2.7"
SPARK_FILE = "spark-{SPARK_VERSION}-bin-hadoop{HADOOP_VERSION}.tgz".format(SPARK_VERSION=SPARK_VERSION,
HADOOP_VERSION=HADOOP_VERSION)
SPARK_URL = "https://archive.apache.org/dist/spark/spark-{SPARK_VERSION}//{SPARK_FILE}".format(
SPARK_VERSION=SPARK_VERSION, SPARK_FILE=SPARK_FILE)
# For Google Colab
SPARK_PATH_COLAB = "/content/spark-{SPARK_VERSION}-bin-hadoop{HADOOP_VERSION}".format(SPARK_VERSION=SPARK_VERSION,
HADOOP_VERSION=HADOOP_VERSION)
JAVA_PATH_COLAB = "/usr/lib/jvm/java-8-openjdk-amd64"
RELATIVE_ERROR = 10000
| en | 0.425703 | # Python to PySpark reference # # type(None): NullType, # bool: BooleanType, # int: LongType, # float: DoubleType, # str: StringType, # bytearray: BinaryType, # decimal.Decimal: DecimalType, # datetime.date: DateType, # datetime.datetime: TimestampType, # datetime.time: TimestampType, # Profiler Actions that modify a columns. # ROWS # Strings and Function Messages # For Google Colab | 2.552089 | 3 |
tests/test_align_coord.py | moshi4/GBKviz | 3 | 6624689 | from gbkviz.align_coord import AlignCoord
def test_is_inverted():
"""test is inverted"""
align_coord = AlignCoord(11, 100, 501, 600, 90, 100, 80.0, "ref", "query")
assert align_coord.is_inverted is False
align_coord = AlignCoord(100, 11, 501, 600, 90, 100, 80.0, "ref", "query")
assert align_coord.is_inverted is True
def test_add_offset():
"""test add offset"""
ref_start, ref_end = 10, 100
query_start, query_end = 500, 600
align_coord = AlignCoord(
ref_start, ref_end, query_start, query_end, 90, 100, 80.0, "ref", "query"
)
ref_offset, query_offset = 100, 150
offset_align_coord = align_coord.add_offset(ref_offset, query_offset)
assert (
offset_align_coord.ref_start == ref_start + ref_offset
and offset_align_coord.ref_end == ref_end + ref_offset
and offset_align_coord.query_start == query_start + query_offset
and offset_align_coord.query_end == query_end + query_offset
)
def test_filter():
"""test filter"""
align_coords = [
AlignCoord(1, 1, 1, 1, 100, 100, 80.0, "ref", "query"),
AlignCoord(1, 1, 1, 1, 150, 250, 90.0, "ref", "query"),
AlignCoord(1, 1, 1, 1, 300, 300, 60.0, "ref", "query"),
]
# No setting
assert len(AlignCoord.filter(align_coords)) == 3
# Min Length setting
assert len(AlignCoord.filter(align_coords, min_length=130)) == 2
assert len(AlignCoord.filter(align_coords, min_length=200)) == 1
assert len(AlignCoord.filter(align_coords, min_length=500)) == 0
# Identity setting
assert len(AlignCoord.filter(align_coords, min_identity=70)) == 2
assert len(AlignCoord.filter(align_coords, min_identity=85)) == 1
assert len(AlignCoord.filter(align_coords, min_identity=95)) == 0
# Both setting
assert len(AlignCoord.filter(align_coords, 200, 70)) == 0
| from gbkviz.align_coord import AlignCoord
def test_is_inverted():
"""test is inverted"""
align_coord = AlignCoord(11, 100, 501, 600, 90, 100, 80.0, "ref", "query")
assert align_coord.is_inverted is False
align_coord = AlignCoord(100, 11, 501, 600, 90, 100, 80.0, "ref", "query")
assert align_coord.is_inverted is True
def test_add_offset():
"""test add offset"""
ref_start, ref_end = 10, 100
query_start, query_end = 500, 600
align_coord = AlignCoord(
ref_start, ref_end, query_start, query_end, 90, 100, 80.0, "ref", "query"
)
ref_offset, query_offset = 100, 150
offset_align_coord = align_coord.add_offset(ref_offset, query_offset)
assert (
offset_align_coord.ref_start == ref_start + ref_offset
and offset_align_coord.ref_end == ref_end + ref_offset
and offset_align_coord.query_start == query_start + query_offset
and offset_align_coord.query_end == query_end + query_offset
)
def test_filter():
"""test filter"""
align_coords = [
AlignCoord(1, 1, 1, 1, 100, 100, 80.0, "ref", "query"),
AlignCoord(1, 1, 1, 1, 150, 250, 90.0, "ref", "query"),
AlignCoord(1, 1, 1, 1, 300, 300, 60.0, "ref", "query"),
]
# No setting
assert len(AlignCoord.filter(align_coords)) == 3
# Min Length setting
assert len(AlignCoord.filter(align_coords, min_length=130)) == 2
assert len(AlignCoord.filter(align_coords, min_length=200)) == 1
assert len(AlignCoord.filter(align_coords, min_length=500)) == 0
# Identity setting
assert len(AlignCoord.filter(align_coords, min_identity=70)) == 2
assert len(AlignCoord.filter(align_coords, min_identity=85)) == 1
assert len(AlignCoord.filter(align_coords, min_identity=95)) == 0
# Both setting
assert len(AlignCoord.filter(align_coords, 200, 70)) == 0
| en | 0.683702 | test is inverted test add offset test filter # No setting # Min Length setting # Identity setting # Both setting | 2.72908 | 3 |
Lib/distutils/tests/test_build_py.py | ystk/debian-python3.1 | 0 | 6624690 | """Tests for distutils.command.build_py."""
import os
import sys
import io
import unittest
from distutils.command.build_py import build_py
from distutils.core import Distribution
from distutils.errors import DistutilsFileError
from distutils.tests import support
class BuildPyTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_package_data(self):
sources = self.mkdtemp()
f = open(os.path.join(sources, "__init__.py"), "w")
try:
f.write("# Pretend this is a package.")
finally:
f.close()
f = open(os.path.join(sources, "README.txt"), "w")
try:
f.write("Info about this package")
finally:
f.close()
destination = self.mkdtemp()
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": sources}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.command_obj["build"] = support.DummyCommand(
force=0,
build_lib=destination)
dist.packages = ["pkg"]
dist.package_data = {"pkg": ["README.txt"]}
dist.package_dir = {"pkg": sources}
cmd = build_py(dist)
cmd.compile = 1
cmd.ensure_finalized()
self.assertEqual(cmd.package_data, dist.package_data)
cmd.run()
# This makes sure the list of outputs includes byte-compiled
# files for Python modules but not for package data files
# (there shouldn't *be* byte-code files for those!).
#
self.assertEqual(len(cmd.get_outputs()), 3)
pkgdest = os.path.join(destination, "pkg")
files = os.listdir(pkgdest)
self.assertTrue("__init__.py" in files)
self.assertTrue("__init__.pyc" in files)
self.assertTrue("README.txt" in files)
def test_empty_package_dir (self):
# See SF 1668596/1720897.
cwd = os.getcwd()
# create the distribution files.
sources = self.mkdtemp()
open(os.path.join(sources, "__init__.py"), "w").close()
testdir = os.path.join(sources, "doc")
os.mkdir(testdir)
open(os.path.join(testdir, "testfile"), "w").close()
os.chdir(sources)
old_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": ""},
"package_data": {"pkg": ["doc/*"]}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.script_args = ["build"]
dist.parse_command_line()
try:
dist.run_commands()
except DistutilsFileError:
self.fail("failed package_data test when package_dir is ''")
finally:
# Restore state.
os.chdir(cwd)
sys.stdout = old_stdout
def test_dont_write_bytecode(self):
# makes sure byte_compile is not used
pkg_dir, dist = self.create_dist()
cmd = build_py(dist)
cmd.compile = 1
cmd.optimize = 1
old_dont_write_bytecode = sys.dont_write_bytecode
sys.dont_write_bytecode = True
try:
cmd.byte_compile([])
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
self.assertTrue('byte-compiling is disabled' in self.logs[0][1])
def test_suite():
return unittest.makeSuite(BuildPyTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
| """Tests for distutils.command.build_py."""
import os
import sys
import io
import unittest
from distutils.command.build_py import build_py
from distutils.core import Distribution
from distutils.errors import DistutilsFileError
from distutils.tests import support
class BuildPyTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_package_data(self):
sources = self.mkdtemp()
f = open(os.path.join(sources, "__init__.py"), "w")
try:
f.write("# Pretend this is a package.")
finally:
f.close()
f = open(os.path.join(sources, "README.txt"), "w")
try:
f.write("Info about this package")
finally:
f.close()
destination = self.mkdtemp()
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": sources}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.command_obj["build"] = support.DummyCommand(
force=0,
build_lib=destination)
dist.packages = ["pkg"]
dist.package_data = {"pkg": ["README.txt"]}
dist.package_dir = {"pkg": sources}
cmd = build_py(dist)
cmd.compile = 1
cmd.ensure_finalized()
self.assertEqual(cmd.package_data, dist.package_data)
cmd.run()
# This makes sure the list of outputs includes byte-compiled
# files for Python modules but not for package data files
# (there shouldn't *be* byte-code files for those!).
#
self.assertEqual(len(cmd.get_outputs()), 3)
pkgdest = os.path.join(destination, "pkg")
files = os.listdir(pkgdest)
self.assertTrue("__init__.py" in files)
self.assertTrue("__init__.pyc" in files)
self.assertTrue("README.txt" in files)
def test_empty_package_dir (self):
# See SF 1668596/1720897.
cwd = os.getcwd()
# create the distribution files.
sources = self.mkdtemp()
open(os.path.join(sources, "__init__.py"), "w").close()
testdir = os.path.join(sources, "doc")
os.mkdir(testdir)
open(os.path.join(testdir, "testfile"), "w").close()
os.chdir(sources)
old_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
dist = Distribution({"packages": ["pkg"],
"package_dir": {"pkg": ""},
"package_data": {"pkg": ["doc/*"]}})
# script_name need not exist, it just need to be initialized
dist.script_name = os.path.join(sources, "setup.py")
dist.script_args = ["build"]
dist.parse_command_line()
try:
dist.run_commands()
except DistutilsFileError:
self.fail("failed package_data test when package_dir is ''")
finally:
# Restore state.
os.chdir(cwd)
sys.stdout = old_stdout
def test_dont_write_bytecode(self):
# makes sure byte_compile is not used
pkg_dir, dist = self.create_dist()
cmd = build_py(dist)
cmd.compile = 1
cmd.optimize = 1
old_dont_write_bytecode = sys.dont_write_bytecode
sys.dont_write_bytecode = True
try:
cmd.byte_compile([])
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
self.assertTrue('byte-compiling is disabled' in self.logs[0][1])
def test_suite():
return unittest.makeSuite(BuildPyTestCase)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
| en | 0.81388 | Tests for distutils.command.build_py. # script_name need not exist, it just need to be initialized # This makes sure the list of outputs includes byte-compiled # files for Python modules but not for package data files # (there shouldn't *be* byte-code files for those!). # # See SF 1668596/1720897. # create the distribution files. # script_name need not exist, it just need to be initialized # Restore state. # makes sure byte_compile is not used | 2.467164 | 2 |
tests/test_errors.py | jdknight/sphinxcontrib-blockdiag | 15 | 6624691 | <filename>tests/test_errors.py<gh_stars>10-100
# -*- coding: utf-8 -*-
from mock import patch
from sphinx_testing import with_app
import sys
import unittest
class TestSphinxcontribBlockdiagErrors(unittest.TestCase):
@with_app(srcdir='tests/docs/basic', write_docstring=True)
def test_parse_error(self, app, status, warning):
"""
.. blockdiag::
{ A -> B;
"""
app.builder.build_all()
self.assertIn('got unexpected token:', warning.getvalue())
@with_app(srcdir='tests/docs/basic', confoverrides=dict(blockdiag_html_image_format='JPG'))
def test_unknown_format_error(self, app, status, warning):
app.builder.build_all()
self.assertIn('unknown format: JPG', warning.getvalue())
@with_app(srcdir='tests/docs/basic', confoverrides=dict(blockdiag_html_image_format='PDF'))
def test_reportlab_not_found_error(self, app, status, warning):
try:
# unload reportlab and make loading it impossible
sys.modules.pop('reportlab', None)
path = sys.path
sys.path = []
app.builder.build_all()
self.assertIn('Could not output PDF format. Install reportlab.',
warning.getvalue())
finally:
sys.path = path
@with_app(srcdir='tests/docs/basic')
@patch("blockdiag.utils.rst.nodes.blockdiag.processor.drawer.DiagramDraw")
def test_rendering_error(self, app, status, warning, DiagramDraw):
DiagramDraw.side_effect = RuntimeError("UNKNOWN ERROR!")
app.builder.build_all()
self.assertIn('UNKNOWN ERROR!', warning.getvalue())
@with_app(srcdir='tests/docs/basic')
@patch("sphinxcontrib.blockdiag.blockdiag.drawer.DiagramDraw.draw")
def test_font_settings_error(self, app, status, warning, draw):
draw.side_effect = UnicodeEncodeError("", "", 0, 0, "")
app.builder.build_all()
self.assertIn('UnicodeEncodeError caught (check your font settings)',
warning.getvalue())
| <filename>tests/test_errors.py<gh_stars>10-100
# -*- coding: utf-8 -*-
from mock import patch
from sphinx_testing import with_app
import sys
import unittest
class TestSphinxcontribBlockdiagErrors(unittest.TestCase):
@with_app(srcdir='tests/docs/basic', write_docstring=True)
def test_parse_error(self, app, status, warning):
"""
.. blockdiag::
{ A -> B;
"""
app.builder.build_all()
self.assertIn('got unexpected token:', warning.getvalue())
@with_app(srcdir='tests/docs/basic', confoverrides=dict(blockdiag_html_image_format='JPG'))
def test_unknown_format_error(self, app, status, warning):
app.builder.build_all()
self.assertIn('unknown format: JPG', warning.getvalue())
@with_app(srcdir='tests/docs/basic', confoverrides=dict(blockdiag_html_image_format='PDF'))
def test_reportlab_not_found_error(self, app, status, warning):
try:
# unload reportlab and make loading it impossible
sys.modules.pop('reportlab', None)
path = sys.path
sys.path = []
app.builder.build_all()
self.assertIn('Could not output PDF format. Install reportlab.',
warning.getvalue())
finally:
sys.path = path
@with_app(srcdir='tests/docs/basic')
@patch("blockdiag.utils.rst.nodes.blockdiag.processor.drawer.DiagramDraw")
def test_rendering_error(self, app, status, warning, DiagramDraw):
DiagramDraw.side_effect = RuntimeError("UNKNOWN ERROR!")
app.builder.build_all()
self.assertIn('UNKNOWN ERROR!', warning.getvalue())
@with_app(srcdir='tests/docs/basic')
@patch("sphinxcontrib.blockdiag.blockdiag.drawer.DiagramDraw.draw")
def test_font_settings_error(self, app, status, warning, draw):
draw.side_effect = UnicodeEncodeError("", "", 0, 0, "")
app.builder.build_all()
self.assertIn('UnicodeEncodeError caught (check your font settings)',
warning.getvalue())
| en | 0.667627 | # -*- coding: utf-8 -*- .. blockdiag:: { A -> B; # unload reportlab and make loading it impossible | 2.244143 | 2 |
trainer.py | souschefistry/cs230 | 0 | 6624692 | <reponame>souschefistry/cs230
"""
Copyright (c) 2018 dibghosh AT stanford edu
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
print(sys.path)
print(sys.executable)
import os
import numpy as np
import json
import random
import matplotlib.pyplot as plt
from IPython.display import clear_output
import keras
from keras.utils.data_utils import get_file
from keras import backend as K
from keras.models import model_from_json
from keras import regularizers
import functools
import tensorflow as tf
import numpy as np
import os
import time
from keras.preprocessing import image
from keras.layers import GlobalAveragePooling2D, Dense, Dropout,Activation,Flatten
from keras.applications import ResNet50
# from keras.applications.inception_v3 import InceptionV3
from keras.callbacks import TensorBoard, EarlyStopping, LearningRateScheduler
from keras import optimizers
from keras.layers import Input
from keras.models import Model
from keras.utils import np_utils
from sklearn.utils import shuffle
# lscpu Core(s) per socket: 2
NUM_PARALLEL_EXEC_UNITS = 2
# sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
sess = tf.Session(
config=tf.ConfigProto(
log_device_placement=True,
intra_op_parallelism_threads=NUM_PARALLEL_EXEC_UNITS,
inter_op_parallelism_threads=2,
allow_soft_placement=True,
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5),
device_count = {'GPU': 1, 'CPU': NUM_PARALLEL_EXEC_UNITS }
)
)
keras.backend.set_session(sess)
NUM_CLASSES = 46
TRAIN_DATA_SIZE = 5000
TEST_DATA_SIZE = 1000
VAL_DATA_SIZE = 1000
img_h = 224
img_w = 224
# inception_img_h = 299
# inception_img_w = 299
np.random.seed(seed=1234)
random.seed(1234)
class ImageClass():
"Stores the paths to images for a given class"
def __init__(self, name, image_paths):
self.name = name
self.image_paths = image_paths
def __str__(self):
return self.name + ', ' + str(len(self.image_paths)) + ' images'
def __len__(self):
return len(self.image_paths)
def get_dataset(path, has_class_directories=True):
dataset = []
path_exp = os.path.expanduser(path)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
nrof_classes = len(classes)
for i in range(nrof_classes):
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
image_paths = get_image_paths(facedir)
dataset.append(ImageClass(class_name, image_paths))
return dataset
def get_image_paths(facedir):
image_paths = []
if os.path.isdir(facedir):
images = os.listdir(facedir)
image_paths = [os.path.join(facedir,img) for img in images]
return image_paths
def get_image_paths_and_labels(dataset):
image_paths_flat = []
labels_flat = []
for i in range(len(dataset)):
image_paths_flat += dataset[i].image_paths
labels_flat += [i] * len(dataset[i].image_paths)
return image_paths_flat, labels_flat
def preprocess_input(x, dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
assert dim_ordering in {'tf', 'th'}
if dim_ordering == 'th':
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
# 'RGB'->'BGR'
x = x[:, ::-1, :, :]
else:
x[:, :, :, 0] -= 103.939
x[:, :, :, 1] -= 116.779
x[:, :, :, 2] -= 123.68
# 'RGB'->'BGR'
x = x[:, :, :, ::-1]
return x
from glob import glob
from keras.preprocessing import image
from tqdm import tqdm_notebook, tqdm # Iteration visualization
def load_dataset(data_dir_list, mode, max_per_class=100):
"""
loads images in memory. Expensive method. Doesn't scale well
"""
img_data_list, labels =[],[]
images_per_class = max_per_class
for category in tqdm(data_dir_list):
img_dir = "../deepfashion/dataset/%s/%s/*.jpg" % (mode, category)
# print("Loading category =%s from path=%s" % (category, img_dir))
img_list=glob(img_dir)
if not max_per_class:
# take all images
images_per_class = len(img_list)
print ('Found {} images out of {} for category {}'.format(images_per_class, len(img_list), category))
for img_path in img_list[:images_per_class]:
labels.append(category)
img = image.load_img(img_path, target_size=(img_h, img_w))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
img_data_list.append(x)
img_data = np.array(img_data_list)
img_data=np.rollaxis(img_data,1,0)
img_data=img_data[0]
return img_data, labels
### write both training + validation graphs in same plot
# https://stackoverflow.com/questions/47877475/keras-tensorboard-plot-train-and-validation-scalars-in-a-same-figure
class TrainValTensorBoard(TensorBoard):
def __init__(self, log_dir='./logs', **kwargs):
# Make the original `TensorBoard` log to a subdirectory 'training'
training_log_dir = os.path.join(log_dir, 'training')
super(TrainValTensorBoard, self).__init__(training_log_dir, **kwargs)
# Log the validation metrics to a separate subdirectory
self.val_log_dir = os.path.join(log_dir, 'validation')
def set_model(self, model):
# Setup writer for validation metrics
self.val_writer = tf.summary.FileWriter(self.val_log_dir)
super(TrainValTensorBoard, self).set_model(model)
def on_epoch_end(self, epoch, logs=None):
# Pop the validation logs and handle them separately with
# `self.val_writer`. Also rename the keys so that they can
# be plotted on the same figure with the training metrics
logs = logs or {}
val_logs = {k.replace('val_', ''): v for k, v in logs.items() if k.startswith('val_')}
for name, value in val_logs.items():
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.val_writer.add_summary(summary, epoch)
self.val_writer.flush()
# Pass the remaining logs to `TensorBoard.on_epoch_end`
logs = {k: v for k, v in logs.items() if not k.startswith('val_')}
super(TrainValTensorBoard, self).on_epoch_end(epoch, logs)
def on_train_end(self, logs=None):
super(TrainValTensorBoard, self).on_train_end(logs)
self.val_writer.close()
def save_model_to_disk(model, model_name="model"):
# serialize model to JSON
model_json = model.to_json()
with open("%s.json" % model_name, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("%s.h5" % model_name)
print("Saved %s to disk" % model_name)
def load_model_from_disk(model_name):
# load json and create model
json_file = open('%s.json' % model_name, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("%s.h5" % model_name)
print("Loaded %s from disk" % model_name)
return loaded_model
# apply learning rate decay
# using step decay function and LearningRateScheduler callback to take
# step decay function as argument and return updated learning rates for use in SGD optimizer.
def step_decay(epoch):
initial_lrate = 0.0001
drop = 0.5
epochs_drop = 10.0
lrate = initial_lrate * math.pow(drop,
math.floor((1+epoch)/epochs_drop))
return lrate
# also create a learning rate decay plotter
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.lr = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get("loss"))
self.lr.append(step_decay(len(self.losses)))
class TrainingPlot(keras.callbacks.Callback):
def __init__(self, num_epochs, batch_size, **kwargs):
self.num_epochs = num_epochs
self.batch_size = batch_size
super(TrainingPlot, self).__init__(**kwargs)
# This function is called when the training begins
def on_train_begin(self, logs={}):
# Initialize the lists for holding the logs, losses and accuracies
self.losses = []
self.acc = []
self.val_losses = []
self.val_acc = []
self.logs = []
# This function is called at the end of each epoch
def on_epoch_end(self, epoch, logs={}):
# Append the logs, losses and accuracies to the lists
self.logs.append(logs)
self.losses.append(logs.get('loss'))
self.acc.append(logs.get('acc'))
self.val_losses.append(logs.get('val_loss'))
self.val_acc.append(logs.get('val_acc'))
# Before plotting ensure at least 2 epochs have passed
if len(self.losses) > 1:
# Clear the previous plot
clear_output(wait=True)
N = np.arange(0, len(self.losses))
# You can chose the style of your preference
# print(plt.style.available) to see the available options
plt.style.use("seaborn")
# Plot train loss, train acc, val loss and val acc against epochs passed
plt.figure()
plt.plot(N, self.losses, label = "train_loss")
plt.plot(N, self.acc, label = "train_acc")
plt.plot(N, self.val_losses, label = "val_loss")
plt.plot(N, self.val_acc, label = "val_acc")
plt.title("Training Loss and Accuracy [Epoch {}]".format(epoch))
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig('resnet50_{}_{}_{}.png'.format(self.batch_size, self.num_epochs, time.time()))
plt.close()
train_data_dir = os.listdir("../deepfashion/dataset/train/")
val_data_dir = os.listdir("../deepfashion/dataset/val/")
test_data_dir = os.listdir("../deepfashion/dataset/test/")
images_per_class = 400
train_data, train_labels = load_dataset(train_data_dir, "train", images_per_class)
print("[*] loaded %s training images" % (len(train_labels)))
val_data, val_labels = load_dataset(val_data_dir, "val", images_per_class)
print("[*] loaded %s validation images" % (len(val_labels)))
test_data, test_labels = load_dataset(test_data_dir, "test", max_per_class=None)
print("[*] loaded %s test images" % (len(test_labels)))
# train_set = get_dataset(train_data_dir)
# val_set = get_dataset(val_data_dir)
# nrof_classes = len(train_set)
# print('Number of classes : %s' % nrof_classes)
# # prepare data
# # convert class labels to on-hot encoding
# # Get a list of image paths and their labels
# train_image_list, train_label_list = get_image_paths_and_labels(train_set)
# assert len(image_list) > 0, 'The training set should not be empty'
# val_image_list, val_label_list = get_image_paths_and_labels(val_set)
# we will use the encoders from the scikit-learn library.
# Specifically, the LabelEncoder of creating an integer encoding of labels
# and the OneHotEncoder for creating a one hot encoding of integer encoded values.
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(train_labels)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
train_onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(val_labels)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
val_onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(test_labels)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
test_onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
### network
# keras BN bug - https://github.com/keras-team/keras/pull/9965
# K.clear_session()
# K.set_learning_phase(0)
base_model = ResNet50(
weights='imagenet',
include_top=False,
input_shape=(img_h, img_w, 3))
# base_model = InceptionV3(
# weights='imagenet',
# include_top=False,
# input_shape=(inception_img_h, inception_img_w, 3))
base_model.summary()
L2_RATE_KERNEL = 0.01
L2_RATE_ACTIVITY = 0.01
last_layer = base_model.output
# add a global spatial average pooling layer
x = GlobalAveragePooling2D()(last_layer)
# add fully-connected & dropout layers
x = Dense(1024,
activation='relu',
name='fc-1')(x)
# a softmax layer for 46 classes
predictions = Dense(NUM_CLASSES, activation='softmax',name='output_layer')(x)
# this is the model we will train
custom_resnet_model = Model(inputs=base_model.input, outputs=predictions)
custom_resnet_model.summary()
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all layers
for layer in custom_resnet_model.layers:
layer.trainable = False
# custom_resnet_model.layers[-1].trainable
custom_resnet_model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(), metrics=['accuracy'])
# train settings
TRAIN_BATCH_SIZE = 128
WARM_UP_EPOCHS = 1
FINAL_EPOCHS = 100
GRAD_CLIP_THRESHOLD = 0.5
ALPHA_LEARNING_RATE = 0.001
tensorboard = TensorBoard(log_dir="./deepfashion/tboard-resnet50-logs/{}_{}_{}".format(TRAIN_BATCH_SIZE, FINAL_EPOCHS, time.time()), write_graph=True)
t=time.time()
with tf.device('/gpu:0'):
hist = custom_resnet_model.fit(
train_data,
train_onehot_encoded,
batch_size=TRAIN_BATCH_SIZE,
epochs=WARM_UP_EPOCHS,
verbose=1,
validation_data=(val_data, val_onehot_encoded))
print('Training time (secs): %s' % (time.time() - t))
with tf.device('/gpu:0'):
(loss, accuracy) = custom_resnet_model.evaluate(test_data,
test_onehot_encoded,
batch_size=TRAIN_BATCH_SIZE,
verbose=1)
print("[INFO] pre fine-tune loss={:.4f}, pre fine-tune accuracy: {:.4f}%".format(loss,accuracy * 100))
# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers from inception V3. We will freeze the bottom N layers
# and train the remaining top layers.
# let's visualize layer names and layer indices to see how many layers
# we should freeze:
for i, layer in enumerate(base_model.layers):
print(i, layer.name)
# Test # 1: we chose to train the top 1 resnet blocks, i.e. we will freeze
# the first 163 layers and unfreeze the rest: (add_15)
# for layer in base_model.layers[:163]:
# layer.trainable = False
# for layer in base_model.layers[163:]:
# layer.trainable = True
# Test # 2: we chose to train the top 2 resnet blocks, i.e. we will freeze
# the first 153 layers and unfreeze the rest: (add_14)
# for layer in base_model.layers[:153]:
# layer.trainable = False
# for layer in base_model.layers[153:]:
# layer.trainable = True
# Test # 3: we chose to train the top 3 resnet blocks, i.e. we will freeze
# the first 143 layers and unfreeze the rest: (add_13)
# for layer in base_model.layers[:141]:
# layer.trainable = False
# for layer in base_model.layers[141:]:
# layer.trainable = True
# Test # 4: we chose to train the top 4 resnet blocks, i.e. we will freeze
# the first 143 layers and unfreeze the rest: (add_12)
for layer in base_model.layers[:131]:
layer.trainable = False
for layer in base_model.layers[131:]:
layer.trainable = True
# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 172 layers and unfreeze the rest:
# for layer in model.layers[:172]:
# layer.trainable = False
# for layer in model.layers[172:]:
# layer.trainable = True
# UNUSED: Store the model on disk
# model_name = 'resnet50_{}_{}_{}.h5'.format(TRAIN_BATCH_SIZE, EPOCHS, time.time())
# save_model_to_disk(custom_resnet_model, model_name)
# print('STATIC LEARNING_PHASE = 1')
# K.clear_session()
# K.set_learning_phase(1)
# UNUSED: custom_resnet_model = load_model_from_disk(model_name)
# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
# from keras.optimizers import SGD
# custom_resnet_model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
# stop if val_loss stops improving for 10 epochs
# early_stopping = EarlyStopping(verbose=1, patience=10, monitor='val_loss')
# add top-K accuracy reporting
top3_acc = functools.partial(keras.metrics.top_k_categorical_accuracy, k=3)
top3_acc.__name__ = 'top3_acc'
opti_grad_clip=optimizers.Adam(lr=ALPHA_LEARNING_RATE)
# opti_grad_clip=optimizers.RMSprop(lr=2e-3)
custom_resnet_model.compile(loss='categorical_crossentropy', optimizer=opti_grad_clip, metrics=['accuracy', 'top_k_categorical_accuracy', top3_acc])
# init plotter
plot_losses = TrainingPlot(FINAL_EPOCHS, TRAIN_BATCH_SIZE)
# learning rate
# loss_history = LossHistory()
# lrate = LearningRateScheduler(step_decay)
lr_decay = LearningRateScheduler(schedule=lambda epoch: ALPHA_LEARNING_RATE * (0.9 ** epoch))
# we train our model again (this time fine-tuning the top 2 inception blocks
# alongside the top Dense layers
with tf.device('/gpu:0'):
hist = custom_resnet_model.fit(
train_data,
train_onehot_encoded,
batch_size=TRAIN_BATCH_SIZE,
epochs=FINAL_EPOCHS,
verbose=1,
validation_data=(val_data, val_onehot_encoded),
callbacks=[tensorboard, plot_losses, lr_decay])
with tf.device('/gpu:0'):
(loss, accuracy, top_5, top_3) = custom_resnet_model.evaluate(test_data,
test_onehot_encoded,
batch_size=TRAIN_BATCH_SIZE,
verbose=1)
print("[INFO] final loss={:.4f}, final accuracy: {:.4f}, final top_5: {:.4f}, final top_3: {:.4f}%".format(loss, accuracy * 100, top_5, top_3))
# let's visualize layer names and layer indices to see how many layers
# we should freeze:
# for i, layer in enumerate(base_model.layers):
# print(i, layer.name)
| """
Copyright (c) 2018 dibghosh AT stanford edu
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
print(sys.path)
print(sys.executable)
import os
import numpy as np
import json
import random
import matplotlib.pyplot as plt
from IPython.display import clear_output
import keras
from keras.utils.data_utils import get_file
from keras import backend as K
from keras.models import model_from_json
from keras import regularizers
import functools
import tensorflow as tf
import numpy as np
import os
import time
from keras.preprocessing import image
from keras.layers import GlobalAveragePooling2D, Dense, Dropout,Activation,Flatten
from keras.applications import ResNet50
# from keras.applications.inception_v3 import InceptionV3
from keras.callbacks import TensorBoard, EarlyStopping, LearningRateScheduler
from keras import optimizers
from keras.layers import Input
from keras.models import Model
from keras.utils import np_utils
from sklearn.utils import shuffle
# lscpu Core(s) per socket: 2
NUM_PARALLEL_EXEC_UNITS = 2
# sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
sess = tf.Session(
config=tf.ConfigProto(
log_device_placement=True,
intra_op_parallelism_threads=NUM_PARALLEL_EXEC_UNITS,
inter_op_parallelism_threads=2,
allow_soft_placement=True,
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5),
device_count = {'GPU': 1, 'CPU': NUM_PARALLEL_EXEC_UNITS }
)
)
keras.backend.set_session(sess)
NUM_CLASSES = 46
TRAIN_DATA_SIZE = 5000
TEST_DATA_SIZE = 1000
VAL_DATA_SIZE = 1000
img_h = 224
img_w = 224
# inception_img_h = 299
# inception_img_w = 299
np.random.seed(seed=1234)
random.seed(1234)
class ImageClass():
"Stores the paths to images for a given class"
def __init__(self, name, image_paths):
self.name = name
self.image_paths = image_paths
def __str__(self):
return self.name + ', ' + str(len(self.image_paths)) + ' images'
def __len__(self):
return len(self.image_paths)
def get_dataset(path, has_class_directories=True):
dataset = []
path_exp = os.path.expanduser(path)
classes = [path for path in os.listdir(path_exp) \
if os.path.isdir(os.path.join(path_exp, path))]
classes.sort()
nrof_classes = len(classes)
for i in range(nrof_classes):
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
image_paths = get_image_paths(facedir)
dataset.append(ImageClass(class_name, image_paths))
return dataset
def get_image_paths(facedir):
image_paths = []
if os.path.isdir(facedir):
images = os.listdir(facedir)
image_paths = [os.path.join(facedir,img) for img in images]
return image_paths
def get_image_paths_and_labels(dataset):
image_paths_flat = []
labels_flat = []
for i in range(len(dataset)):
image_paths_flat += dataset[i].image_paths
labels_flat += [i] * len(dataset[i].image_paths)
return image_paths_flat, labels_flat
def preprocess_input(x, dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
assert dim_ordering in {'tf', 'th'}
if dim_ordering == 'th':
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
# 'RGB'->'BGR'
x = x[:, ::-1, :, :]
else:
x[:, :, :, 0] -= 103.939
x[:, :, :, 1] -= 116.779
x[:, :, :, 2] -= 123.68
# 'RGB'->'BGR'
x = x[:, :, :, ::-1]
return x
from glob import glob
from keras.preprocessing import image
from tqdm import tqdm_notebook, tqdm # Iteration visualization
def load_dataset(data_dir_list, mode, max_per_class=100):
"""
loads images in memory. Expensive method. Doesn't scale well
"""
img_data_list, labels =[],[]
images_per_class = max_per_class
for category in tqdm(data_dir_list):
img_dir = "../deepfashion/dataset/%s/%s/*.jpg" % (mode, category)
# print("Loading category =%s from path=%s" % (category, img_dir))
img_list=glob(img_dir)
if not max_per_class:
# take all images
images_per_class = len(img_list)
print ('Found {} images out of {} for category {}'.format(images_per_class, len(img_list), category))
for img_path in img_list[:images_per_class]:
labels.append(category)
img = image.load_img(img_path, target_size=(img_h, img_w))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
img_data_list.append(x)
img_data = np.array(img_data_list)
img_data=np.rollaxis(img_data,1,0)
img_data=img_data[0]
return img_data, labels
### write both training + validation graphs in same plot
# https://stackoverflow.com/questions/47877475/keras-tensorboard-plot-train-and-validation-scalars-in-a-same-figure
class TrainValTensorBoard(TensorBoard):
def __init__(self, log_dir='./logs', **kwargs):
# Make the original `TensorBoard` log to a subdirectory 'training'
training_log_dir = os.path.join(log_dir, 'training')
super(TrainValTensorBoard, self).__init__(training_log_dir, **kwargs)
# Log the validation metrics to a separate subdirectory
self.val_log_dir = os.path.join(log_dir, 'validation')
def set_model(self, model):
# Setup writer for validation metrics
self.val_writer = tf.summary.FileWriter(self.val_log_dir)
super(TrainValTensorBoard, self).set_model(model)
def on_epoch_end(self, epoch, logs=None):
# Pop the validation logs and handle them separately with
# `self.val_writer`. Also rename the keys so that they can
# be plotted on the same figure with the training metrics
logs = logs or {}
val_logs = {k.replace('val_', ''): v for k, v in logs.items() if k.startswith('val_')}
for name, value in val_logs.items():
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.val_writer.add_summary(summary, epoch)
self.val_writer.flush()
# Pass the remaining logs to `TensorBoard.on_epoch_end`
logs = {k: v for k, v in logs.items() if not k.startswith('val_')}
super(TrainValTensorBoard, self).on_epoch_end(epoch, logs)
def on_train_end(self, logs=None):
super(TrainValTensorBoard, self).on_train_end(logs)
self.val_writer.close()
def save_model_to_disk(model, model_name="model"):
# serialize model to JSON
model_json = model.to_json()
with open("%s.json" % model_name, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("%s.h5" % model_name)
print("Saved %s to disk" % model_name)
def load_model_from_disk(model_name):
# load json and create model
json_file = open('%s.json' % model_name, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("%s.h5" % model_name)
print("Loaded %s from disk" % model_name)
return loaded_model
# apply learning rate decay
# using step decay function and LearningRateScheduler callback to take
# step decay function as argument and return updated learning rates for use in SGD optimizer.
def step_decay(epoch):
initial_lrate = 0.0001
drop = 0.5
epochs_drop = 10.0
lrate = initial_lrate * math.pow(drop,
math.floor((1+epoch)/epochs_drop))
return lrate
# also create a learning rate decay plotter
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.lr = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get("loss"))
self.lr.append(step_decay(len(self.losses)))
class TrainingPlot(keras.callbacks.Callback):
def __init__(self, num_epochs, batch_size, **kwargs):
self.num_epochs = num_epochs
self.batch_size = batch_size
super(TrainingPlot, self).__init__(**kwargs)
# This function is called when the training begins
def on_train_begin(self, logs={}):
# Initialize the lists for holding the logs, losses and accuracies
self.losses = []
self.acc = []
self.val_losses = []
self.val_acc = []
self.logs = []
# This function is called at the end of each epoch
def on_epoch_end(self, epoch, logs={}):
# Append the logs, losses and accuracies to the lists
self.logs.append(logs)
self.losses.append(logs.get('loss'))
self.acc.append(logs.get('acc'))
self.val_losses.append(logs.get('val_loss'))
self.val_acc.append(logs.get('val_acc'))
# Before plotting ensure at least 2 epochs have passed
if len(self.losses) > 1:
# Clear the previous plot
clear_output(wait=True)
N = np.arange(0, len(self.losses))
# You can chose the style of your preference
# print(plt.style.available) to see the available options
plt.style.use("seaborn")
# Plot train loss, train acc, val loss and val acc against epochs passed
plt.figure()
plt.plot(N, self.losses, label = "train_loss")
plt.plot(N, self.acc, label = "train_acc")
plt.plot(N, self.val_losses, label = "val_loss")
plt.plot(N, self.val_acc, label = "val_acc")
plt.title("Training Loss and Accuracy [Epoch {}]".format(epoch))
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig('resnet50_{}_{}_{}.png'.format(self.batch_size, self.num_epochs, time.time()))
plt.close()
train_data_dir = os.listdir("../deepfashion/dataset/train/")
val_data_dir = os.listdir("../deepfashion/dataset/val/")
test_data_dir = os.listdir("../deepfashion/dataset/test/")
images_per_class = 400
train_data, train_labels = load_dataset(train_data_dir, "train", images_per_class)
print("[*] loaded %s training images" % (len(train_labels)))
val_data, val_labels = load_dataset(val_data_dir, "val", images_per_class)
print("[*] loaded %s validation images" % (len(val_labels)))
test_data, test_labels = load_dataset(test_data_dir, "test", max_per_class=None)
print("[*] loaded %s test images" % (len(test_labels)))
# train_set = get_dataset(train_data_dir)
# val_set = get_dataset(val_data_dir)
# nrof_classes = len(train_set)
# print('Number of classes : %s' % nrof_classes)
# # prepare data
# # convert class labels to on-hot encoding
# # Get a list of image paths and their labels
# train_image_list, train_label_list = get_image_paths_and_labels(train_set)
# assert len(image_list) > 0, 'The training set should not be empty'
# val_image_list, val_label_list = get_image_paths_and_labels(val_set)
# we will use the encoders from the scikit-learn library.
# Specifically, the LabelEncoder of creating an integer encoding of labels
# and the OneHotEncoder for creating a one hot encoding of integer encoded values.
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(train_labels)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
train_onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(val_labels)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
val_onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(test_labels)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
test_onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
### network
# keras BN bug - https://github.com/keras-team/keras/pull/9965
# K.clear_session()
# K.set_learning_phase(0)
base_model = ResNet50(
weights='imagenet',
include_top=False,
input_shape=(img_h, img_w, 3))
# base_model = InceptionV3(
# weights='imagenet',
# include_top=False,
# input_shape=(inception_img_h, inception_img_w, 3))
base_model.summary()
L2_RATE_KERNEL = 0.01
L2_RATE_ACTIVITY = 0.01
last_layer = base_model.output
# add a global spatial average pooling layer
x = GlobalAveragePooling2D()(last_layer)
# add fully-connected & dropout layers
x = Dense(1024,
activation='relu',
name='fc-1')(x)
# a softmax layer for 46 classes
predictions = Dense(NUM_CLASSES, activation='softmax',name='output_layer')(x)
# this is the model we will train
custom_resnet_model = Model(inputs=base_model.input, outputs=predictions)
custom_resnet_model.summary()
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all layers
for layer in custom_resnet_model.layers:
layer.trainable = False
# custom_resnet_model.layers[-1].trainable
custom_resnet_model.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(), metrics=['accuracy'])
# train settings
TRAIN_BATCH_SIZE = 128
WARM_UP_EPOCHS = 1
FINAL_EPOCHS = 100
GRAD_CLIP_THRESHOLD = 0.5
ALPHA_LEARNING_RATE = 0.001
tensorboard = TensorBoard(log_dir="./deepfashion/tboard-resnet50-logs/{}_{}_{}".format(TRAIN_BATCH_SIZE, FINAL_EPOCHS, time.time()), write_graph=True)
t=time.time()
with tf.device('/gpu:0'):
hist = custom_resnet_model.fit(
train_data,
train_onehot_encoded,
batch_size=TRAIN_BATCH_SIZE,
epochs=WARM_UP_EPOCHS,
verbose=1,
validation_data=(val_data, val_onehot_encoded))
print('Training time (secs): %s' % (time.time() - t))
with tf.device('/gpu:0'):
(loss, accuracy) = custom_resnet_model.evaluate(test_data,
test_onehot_encoded,
batch_size=TRAIN_BATCH_SIZE,
verbose=1)
print("[INFO] pre fine-tune loss={:.4f}, pre fine-tune accuracy: {:.4f}%".format(loss,accuracy * 100))
# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers from inception V3. We will freeze the bottom N layers
# and train the remaining top layers.
# let's visualize layer names and layer indices to see how many layers
# we should freeze:
for i, layer in enumerate(base_model.layers):
print(i, layer.name)
# Test # 1: we chose to train the top 1 resnet blocks, i.e. we will freeze
# the first 163 layers and unfreeze the rest: (add_15)
# for layer in base_model.layers[:163]:
# layer.trainable = False
# for layer in base_model.layers[163:]:
# layer.trainable = True
# Test # 2: we chose to train the top 2 resnet blocks, i.e. we will freeze
# the first 153 layers and unfreeze the rest: (add_14)
# for layer in base_model.layers[:153]:
# layer.trainable = False
# for layer in base_model.layers[153:]:
# layer.trainable = True
# Test # 3: we chose to train the top 3 resnet blocks, i.e. we will freeze
# the first 143 layers and unfreeze the rest: (add_13)
# for layer in base_model.layers[:141]:
# layer.trainable = False
# for layer in base_model.layers[141:]:
# layer.trainable = True
# Test # 4: we chose to train the top 4 resnet blocks, i.e. we will freeze
# the first 143 layers and unfreeze the rest: (add_12)
for layer in base_model.layers[:131]:
layer.trainable = False
for layer in base_model.layers[131:]:
layer.trainable = True
# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 172 layers and unfreeze the rest:
# for layer in model.layers[:172]:
# layer.trainable = False
# for layer in model.layers[172:]:
# layer.trainable = True
# UNUSED: Store the model on disk
# model_name = 'resnet50_{}_{}_{}.h5'.format(TRAIN_BATCH_SIZE, EPOCHS, time.time())
# save_model_to_disk(custom_resnet_model, model_name)
# print('STATIC LEARNING_PHASE = 1')
# K.clear_session()
# K.set_learning_phase(1)
# UNUSED: custom_resnet_model = load_model_from_disk(model_name)
# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
# from keras.optimizers import SGD
# custom_resnet_model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])
# stop if val_loss stops improving for 10 epochs
# early_stopping = EarlyStopping(verbose=1, patience=10, monitor='val_loss')
# add top-K accuracy reporting
top3_acc = functools.partial(keras.metrics.top_k_categorical_accuracy, k=3)
top3_acc.__name__ = 'top3_acc'
opti_grad_clip=optimizers.Adam(lr=ALPHA_LEARNING_RATE)
# opti_grad_clip=optimizers.RMSprop(lr=2e-3)
custom_resnet_model.compile(loss='categorical_crossentropy', optimizer=opti_grad_clip, metrics=['accuracy', 'top_k_categorical_accuracy', top3_acc])
# init plotter
plot_losses = TrainingPlot(FINAL_EPOCHS, TRAIN_BATCH_SIZE)
# learning rate
# loss_history = LossHistory()
# lrate = LearningRateScheduler(step_decay)
lr_decay = LearningRateScheduler(schedule=lambda epoch: ALPHA_LEARNING_RATE * (0.9 ** epoch))
# we train our model again (this time fine-tuning the top 2 inception blocks
# alongside the top Dense layers
with tf.device('/gpu:0'):
hist = custom_resnet_model.fit(
train_data,
train_onehot_encoded,
batch_size=TRAIN_BATCH_SIZE,
epochs=FINAL_EPOCHS,
verbose=1,
validation_data=(val_data, val_onehot_encoded),
callbacks=[tensorboard, plot_losses, lr_decay])
with tf.device('/gpu:0'):
(loss, accuracy, top_5, top_3) = custom_resnet_model.evaluate(test_data,
test_onehot_encoded,
batch_size=TRAIN_BATCH_SIZE,
verbose=1)
print("[INFO] final loss={:.4f}, final accuracy: {:.4f}, final top_5: {:.4f}, final top_3: {:.4f}%".format(loss, accuracy * 100, top_5, top_3))
# let's visualize layer names and layer indices to see how many layers
# we should freeze:
# for i, layer in enumerate(base_model.layers):
# print(i, layer.name) | en | 0.709666 | Copyright (c) 2018 dibghosh AT stanford edu Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # from keras.applications.inception_v3 import InceptionV3 # lscpu Core(s) per socket: 2 # sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5), # inception_img_h = 299 # inception_img_w = 299 # 'RGB'->'BGR' # 'RGB'->'BGR' # Iteration visualization loads images in memory. Expensive method. Doesn't scale well # print("Loading category =%s from path=%s" % (category, img_dir)) # take all images ### write both training + validation graphs in same plot # https://stackoverflow.com/questions/47877475/keras-tensorboard-plot-train-and-validation-scalars-in-a-same-figure # Make the original `TensorBoard` log to a subdirectory 'training' # Log the validation metrics to a separate subdirectory # Setup writer for validation metrics # Pop the validation logs and handle them separately with # `self.val_writer`. Also rename the keys so that they can # be plotted on the same figure with the training metrics # Pass the remaining logs to `TensorBoard.on_epoch_end` # serialize model to JSON # serialize weights to HDF5 # load json and create model # load weights into new model # apply learning rate decay # using step decay function and LearningRateScheduler callback to take # step decay function as argument and return updated learning rates for use in SGD optimizer. # also create a learning rate decay plotter # This function is called when the training begins # Initialize the lists for holding the logs, losses and accuracies # This function is called at the end of each epoch # Append the logs, losses and accuracies to the lists # Before plotting ensure at least 2 epochs have passed # Clear the previous plot # You can chose the style of your preference # print(plt.style.available) to see the available options # Plot train loss, train acc, val loss and val acc against epochs passed #") # train_set = get_dataset(train_data_dir) # val_set = get_dataset(val_data_dir) # nrof_classes = len(train_set) # print('Number of classes : %s' % nrof_classes) # # prepare data # # convert class labels to on-hot encoding # # Get a list of image paths and their labels # train_image_list, train_label_list = get_image_paths_and_labels(train_set) # assert len(image_list) > 0, 'The training set should not be empty' # val_image_list, val_label_list = get_image_paths_and_labels(val_set) # we will use the encoders from the scikit-learn library. # Specifically, the LabelEncoder of creating an integer encoding of labels # and the OneHotEncoder for creating a one hot encoding of integer encoded values. ### network # keras BN bug - https://github.com/keras-team/keras/pull/9965 # K.clear_session() # K.set_learning_phase(0) # base_model = InceptionV3( # weights='imagenet', # include_top=False, # input_shape=(inception_img_h, inception_img_w, 3)) # add a global spatial average pooling layer # add fully-connected & dropout layers # a softmax layer for 46 classes # this is the model we will train # first: train only the top layers (which were randomly initialized) # i.e. freeze all layers # custom_resnet_model.layers[-1].trainable # train settings # at this point, the top layers are well trained and we can start fine-tuning # convolutional layers from inception V3. We will freeze the bottom N layers # and train the remaining top layers. # let's visualize layer names and layer indices to see how many layers # we should freeze: # Test # 1: we chose to train the top 1 resnet blocks, i.e. we will freeze # the first 163 layers and unfreeze the rest: (add_15) # for layer in base_model.layers[:163]: # layer.trainable = False # for layer in base_model.layers[163:]: # layer.trainable = True # Test # 2: we chose to train the top 2 resnet blocks, i.e. we will freeze # the first 153 layers and unfreeze the rest: (add_14) # for layer in base_model.layers[:153]: # layer.trainable = False # for layer in base_model.layers[153:]: # layer.trainable = True # Test # 3: we chose to train the top 3 resnet blocks, i.e. we will freeze # the first 143 layers and unfreeze the rest: (add_13) # for layer in base_model.layers[:141]: # layer.trainable = False # for layer in base_model.layers[141:]: # layer.trainable = True # Test # 4: we chose to train the top 4 resnet blocks, i.e. we will freeze # the first 143 layers and unfreeze the rest: (add_12) # we chose to train the top 2 inception blocks, i.e. we will freeze # the first 172 layers and unfreeze the rest: # for layer in model.layers[:172]: # layer.trainable = False # for layer in model.layers[172:]: # layer.trainable = True # UNUSED: Store the model on disk # model_name = 'resnet50_{}_{}_{}.h5'.format(TRAIN_BATCH_SIZE, EPOCHS, time.time()) # save_model_to_disk(custom_resnet_model, model_name) # print('STATIC LEARNING_PHASE = 1') # K.clear_session() # K.set_learning_phase(1) # UNUSED: custom_resnet_model = load_model_from_disk(model_name) # we need to recompile the model for these modifications to take effect # we use SGD with a low learning rate # from keras.optimizers import SGD # custom_resnet_model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy']) # stop if val_loss stops improving for 10 epochs # early_stopping = EarlyStopping(verbose=1, patience=10, monitor='val_loss') # add top-K accuracy reporting # opti_grad_clip=optimizers.RMSprop(lr=2e-3) # init plotter # learning rate # loss_history = LossHistory() # lrate = LearningRateScheduler(step_decay) # we train our model again (this time fine-tuning the top 2 inception blocks # alongside the top Dense layers # let's visualize layer names and layer indices to see how many layers # we should freeze: # for i, layer in enumerate(base_model.layers): # print(i, layer.name) | 1.463906 | 1 |
tasks/package_manager/__init__.py | kuwv/spades | 0 | 6624693 | <reponame>kuwv/spades
'''Module for package managers.'''
| '''Module for package managers.''' | en | 0.668532 | Module for package managers. | 1.227145 | 1 |
python/getting_started/main.py | arnemolland/getting-started | 0 | 6624694 | import os
import json
from dotenv import load_dotenv
from .aws_signing import AwsSigningV4
from .request_handler import RequestHandler
load_dotenv()
load_dotenv(os.path.join(os.path.dirname(__name__), "..", ".env"))
client_id = os.environ.get("CLIENT_ID")
client_secret = os.environ.get("CLIENT_SECRET")
api_key = os.environ.get("API_KEY")
aws_signer = AwsSigningV4(
aws_access_key_id=client_id,
aws_secret_access_key=client_secret,
aws_host="developer-api-testmode.dnb.no",
)
request_handler = RequestHandler(
endpoint="https://developer-api-testmode.dnb.no", api_key=api_key, aws_signer=aws_signer
)
def get_currency_conversions(quoteCurrency):
response = request_handler.request(path=f"/currencies/{quoteCurrency}")
return response.json()
def get_currency_conversion(quoteCurrency, baseCurrency):
response = request_handler.request(path=f"/currencies/{quoteCurrency}/convert/{baseCurrency}")
return response.json()
def get_access_token(ssn):
response = request_handler.request(path="/tokens", method="POST", data={"ssn": ssn})
return response.json()["jwtToken"]
def get_customer_info(api_token):
response = request_handler.request(path="/customers/current", api_token=api_token)
return response.json()
def main():
api_token = get_access_token(ssn="29105573083")
print("\nAPI token: " + api_token)
customer = get_customer_info(api_token)
print("\nCustomer info: " + json.dumps(customer, indent=4, sort_keys=True))
currencies = get_currency_conversions("NOK")
print("\nCurrencies: " + json.dumps(currencies, indent=4, sort_keys=True))
currency = get_currency_conversion("NOK", "EUR")
print("\nNOK -> EUR: " + json.dumps(currency, indent=4, sort_keys=True))
if __name__ == "__main__":
main()
| import os
import json
from dotenv import load_dotenv
from .aws_signing import AwsSigningV4
from .request_handler import RequestHandler
load_dotenv()
load_dotenv(os.path.join(os.path.dirname(__name__), "..", ".env"))
client_id = os.environ.get("CLIENT_ID")
client_secret = os.environ.get("CLIENT_SECRET")
api_key = os.environ.get("API_KEY")
aws_signer = AwsSigningV4(
aws_access_key_id=client_id,
aws_secret_access_key=client_secret,
aws_host="developer-api-testmode.dnb.no",
)
request_handler = RequestHandler(
endpoint="https://developer-api-testmode.dnb.no", api_key=api_key, aws_signer=aws_signer
)
def get_currency_conversions(quoteCurrency):
response = request_handler.request(path=f"/currencies/{quoteCurrency}")
return response.json()
def get_currency_conversion(quoteCurrency, baseCurrency):
response = request_handler.request(path=f"/currencies/{quoteCurrency}/convert/{baseCurrency}")
return response.json()
def get_access_token(ssn):
response = request_handler.request(path="/tokens", method="POST", data={"ssn": ssn})
return response.json()["jwtToken"]
def get_customer_info(api_token):
response = request_handler.request(path="/customers/current", api_token=api_token)
return response.json()
def main():
api_token = get_access_token(ssn="29105573083")
print("\nAPI token: " + api_token)
customer = get_customer_info(api_token)
print("\nCustomer info: " + json.dumps(customer, indent=4, sort_keys=True))
currencies = get_currency_conversions("NOK")
print("\nCurrencies: " + json.dumps(currencies, indent=4, sort_keys=True))
currency = get_currency_conversion("NOK", "EUR")
print("\nNOK -> EUR: " + json.dumps(currency, indent=4, sort_keys=True))
if __name__ == "__main__":
main()
| none | 1 | 2.259642 | 2 | |
py/tkhello3.py | jieyaren/hello-world | 3 | 6624695 | #/usr/bin/python3
import tkinter
top = tkinter.Tk()
label = tkinter.Label(top,text='hello world')
label.pack()
q = tkinter.Button(top,text='fuck world',command=top.quit,bg='red',fg='white')
q.pack(fill=tkinter.X,expand=1)
tkinter.mainloop()
| #/usr/bin/python3
import tkinter
top = tkinter.Tk()
label = tkinter.Label(top,text='hello world')
label.pack()
q = tkinter.Button(top,text='fuck world',command=top.quit,bg='red',fg='white')
q.pack(fill=tkinter.X,expand=1)
tkinter.mainloop()
| fr | 0.649008 | #/usr/bin/python3 | 3.636359 | 4 |
pypy/objspace/std/typeobject.py | woodrow/pyoac | 1 | 6624696 | from pypy.objspace.std.objspace import *
from pypy.interpreter.function import Function, StaticMethod
from pypy.interpreter import gateway
from pypy.interpreter.typedef import weakref_descr
from pypy.objspace.std.stdtypedef import std_dict_descr, issubtypedef, Member
from pypy.objspace.std.objecttype import object_typedef
from pypy.objspace.std.dictproxyobject import W_DictProxyObject
from pypy.rlib.objectmodel import we_are_translated
from pypy.rlib.objectmodel import current_object_addr_as_int
from pypy.rlib.jit import hint
from pypy.rlib.rarithmetic import intmask, r_uint
from copy_reg import _HEAPTYPE
# from compiler/misc.py
MANGLE_LEN = 256 # magic constant from compile.c
def _mangle(name, klass):
if not name.startswith('__'):
return name
if len(name) + 2 >= MANGLE_LEN:
return name
if name.endswith('__'):
return name
try:
i = 0
while klass[i] == '_':
i = i + 1
except IndexError:
return name
klass = klass[i:]
tlen = len(klass) + len(name)
if tlen > MANGLE_LEN:
end = len(klass) + MANGLE_LEN-tlen
if end < 0:
klass = '' # annotator hint
else:
klass = klass[:end]
return "_%s%s" % (klass, name)
class VersionTag(object):
pass
class W_TypeObject(W_Object):
from pypy.objspace.std.typetype import type_typedef as typedef
lazyloaders = {} # can be overridden by specific instances
version_tag = None
uses_object_getattribute = False
# ^^^ for config.objspace.std.getattributeshortcut
# (False is a conservative default, fixed during real usage)
def __init__(w_self, space, name, bases_w, dict_w,
overridetypedef=None):
w_self.space = space
w_self.name = name
w_self.bases_w = bases_w
w_self.dict_w = dict_w
w_self.nslots = 0
w_self.hasdict = False
w_self.needsdel = False
w_self.weakrefable = False
w_self.w_same_layout_as = None
w_self.weak_subclasses = []
w_self.__flags__ = 0 # or _HEAPTYPE
w_self.instancetypedef = overridetypedef
if overridetypedef is not None:
setup_builtin_type(w_self)
custom_metaclass = False
else:
setup_user_defined_type(w_self)
custom_metaclass = not space.is_w(space.type(w_self), space.w_type)
if space.config.objspace.std.withtypeversion:
if w_self.instancetypedef.hasdict or custom_metaclass:
pass
else:
w_self.version_tag = VersionTag()
def mutated(w_self):
space = w_self.space
if space.config.objspace.std.getattributeshortcut:
w_self.uses_object_getattribute = False
# ^^^ conservative default, fixed during real usage
if not space.config.objspace.std.withtypeversion:
return
# Invariant: version_tag is None if and only if
# 'w_self.instancetypedef.hasdict' is True, which is the case
# for a built-in type that provides its instances with their own
# __dict__. If 'hasdict' is True for a type T then it is also
# True for all subtypes of T; so we don't need to look for
# version_tags to update in the subclasses of a type T whose
# version_tag is None.
if w_self.version_tag is not None:
w_self.version_tag = VersionTag()
subclasses_w = w_self.get_subclasses()
for w_subclass in subclasses_w:
assert isinstance(w_subclass, W_TypeObject)
w_subclass.mutated()
def ready(w_self):
for w_base in w_self.bases_w:
if not isinstance(w_base, W_TypeObject):
continue
w_base.add_subclass(w_self)
# compute a tuple that fully describes the instance layout
def get_full_instance_layout(w_self):
w_layout = w_self.w_same_layout_as or w_self
return (w_layout, w_self.hasdict, w_self.needsdel, w_self.weakrefable)
def compute_default_mro(w_self):
return compute_C3_mro(w_self.space, w_self)
def getdictvalue(w_self, space, w_attr):
return w_self.getdictvalue_w(space, space.str_w(w_attr))
def getdictvalue_w(w_self, space, attr):
w_value = w_self.dict_w.get(attr, None)
if w_self.lazyloaders and w_value is None:
if attr in w_self.lazyloaders:
# very clever next line: it forces the attr string
# to be interned.
w_attr = space.new_interned_str(attr)
loader = w_self.lazyloaders[attr]
del w_self.lazyloaders[attr]
w_value = loader()
if w_value is not None: # None means no such attribute
w_self.dict_w[attr] = w_value
return w_value
return w_value
def lookup(w_self, name):
# note that this doesn't call __get__ on the result at all
space = w_self.space
if space.config.objspace.std.withmethodcache:
return w_self.lookup_where_with_method_cache(name)[1]
return w_self._lookup(name)
def lookup_where(w_self, name):
space = w_self.space
if space.config.objspace.std.withmethodcache:
return w_self.lookup_where_with_method_cache(name)
return w_self._lookup_where(name)
def lookup_starting_at(w_self, w_starttype, name):
space = w_self.space
# XXX Optimize this with method cache
look = False
for w_class in w_self.mro_w:
if w_class is w_starttype:
look = True
elif look:
w_value = w_class.getdictvalue_w(space, name)
if w_value is not None:
return w_value
return None
def _lookup(w_self, key):
space = w_self.space
for w_class in w_self.mro_w:
w_value = w_class.getdictvalue_w(space, key)
if w_value is not None:
return w_value
return None
def _lookup_where(w_self, key):
# like lookup() but also returns the parent class in which the
# attribute was found
space = w_self.space
for w_class in w_self.mro_w:
w_value = w_class.getdictvalue_w(space, key)
if w_value is not None:
return w_class, w_value
return None, None
def lookup_where_with_method_cache(w_self, name):
space = w_self.space
assert space.config.objspace.std.withmethodcache
version_tag = w_self.version_tag
if version_tag is None:
tup = w_self._lookup_where(name)
return tup
SHIFT = r_uint.BITS - space.config.objspace.std.methodcachesizeexp
version_tag_as_int = current_object_addr_as_int(version_tag)
# ^^^Note: if the version_tag object is moved by a moving GC, the
# existing method cache entries won't be found any more; new
# entries will be created based on the new address. The
# assumption is that the version_tag object won't keep moving all
# the time - so using the fast current_object_addr_as_int() instead
# of a slower solution like hash() is still a good trade-off.
method_hash = r_uint(intmask(version_tag_as_int * hash(name))) >> SHIFT
cached_version_tag = space.method_cache_versions[method_hash]
if cached_version_tag is version_tag:
cached_name = space.method_cache_names[method_hash]
if cached_name is name:
tup = space.method_cache_lookup_where[method_hash]
if space.config.objspace.std.withmethodcachecounter:
space.method_cache_hits[name] = \
space.method_cache_hits.get(name, 0) + 1
# print "hit", w_self, name
return tup
tup = w_self._lookup_where(name)
space.method_cache_versions[method_hash] = version_tag
space.method_cache_names[method_hash] = name
space.method_cache_lookup_where[method_hash] = tup
if space.config.objspace.std.withmethodcachecounter:
space.method_cache_misses[name] = \
space.method_cache_misses.get(name, 0) + 1
# print "miss", w_self, name
return tup
def check_user_subclass(w_self, w_subtype):
space = w_self.space
if not isinstance(w_subtype, W_TypeObject):
raise OperationError(space.w_TypeError,
space.wrap("X is not a type object (%s)" % (
space.type(w_subtype).getname(space, '?'))))
if not space.is_true(space.issubtype(w_subtype, w_self)):
raise OperationError(space.w_TypeError,
space.wrap("%s.__new__(%s): %s is not a subtype of %s" % (
w_self.name, w_subtype.name, w_subtype.name, w_self.name)))
if w_self.instancetypedef is not w_subtype.instancetypedef:
raise OperationError(space.w_TypeError,
space.wrap("%s.__new__(%s) is not safe, use %s.__new__()" % (
w_self.name, w_subtype.name, w_subtype.name)))
return w_subtype
def _freeze_(w_self):
"NOT_RPYTHON. Forces the lazy attributes to be computed."
if 'lazyloaders' in w_self.__dict__:
for attr in w_self.lazyloaders.keys():
w_self.getdictvalue_w(w_self.space, attr)
del w_self.lazyloaders
return False
def getdict(w_self): # returning a dict-proxy!
if w_self.lazyloaders:
w_self._freeze_() # force un-lazification
space = w_self.space
dictspec = []
for key, w_value in w_self.dict_w.items():
dictspec.append((space.wrap(key), w_value))
# speed hack: instantiate a dict object cls directly
# NB: cannot use newdict, because that could return something else
# than an instance of DictObjectCls
newdic = space.DictObjectCls(space)
newdic.initialize_content(dictspec)
return W_DictProxyObject(newdic)
def unwrap(w_self, space):
if w_self.instancetypedef.fakedcpytype is not None:
return w_self.instancetypedef.fakedcpytype
from pypy.objspace.std.model import UnwrapError
raise UnwrapError(w_self)
def is_heaptype(w_self):
w_self = hint(w_self, deepfreeze=True)
return w_self.__flags__&_HEAPTYPE
def get_module(w_self):
space = w_self.space
if w_self.is_heaptype() and '__module__' in w_self.dict_w:
return w_self.dict_w['__module__']
else:
# for non-heap types, CPython checks for a module.name in the
# type name. That's a hack, so we're allowed to use a different
# hack...
if ('__module__' in w_self.dict_w and
space.is_true(space.isinstance(w_self.dict_w['__module__'],
space.w_str))):
return w_self.dict_w['__module__']
return space.wrap('__builtin__')
def add_subclass(w_self, w_subclass):
space = w_self.space
if not space.config.translation.rweakref:
return # no weakref support, don't keep track of subclasses
import weakref
assert isinstance(w_subclass, W_TypeObject)
newref = weakref.ref(w_subclass)
for i in range(len(w_self.weak_subclasses)):
ref = w_self.weak_subclasses[i]
if ref() is None:
w_self.weak_subclasses[i] = newref
return
else:
w_self.weak_subclasses.append(newref)
def remove_subclass(w_self, w_subclass):
space = w_self.space
if not space.config.translation.rweakref:
return # no weakref support, don't keep track of subclasses
for i in range(len(w_self.weak_subclasses)):
ref = w_self.weak_subclasses[i]
if ref() is w_subclass:
del w_self.weak_subclasses[i]
return
def get_subclasses(w_self):
space = w_self.space
if not space.config.translation.rweakref:
msg = ("this feature requires weakrefs, "
"which are not available in this build of PyPy")
raise OperationError(space.w_RuntimeError,
space.wrap(msg))
subclasses_w = []
for ref in w_self.weak_subclasses:
w_ob = ref()
if w_ob is not None:
subclasses_w.append(w_ob)
return subclasses_w
# for now, weakref support for W_TypeObject is hard to get automatically
_lifeline_ = None
def getweakref(self):
return self._lifeline_
def setweakref(self, space, weakreflifeline):
self._lifeline_ = weakreflifeline
# ____________________________________________________________
# Initialization of type objects
def get_parent_layout(w_type):
"""Compute the most parent class of 'w_type' whose layout
is the same as 'w_type', or None if all parents of 'w_type'
have a different layout than 'w_type'.
"""
w_starttype = w_type
while len(w_type.bases_w) > 0:
w_bestbase = find_best_base(w_type.space, w_type.bases_w)
if w_type.instancetypedef is not w_bestbase.instancetypedef:
break
if w_type.nslots != w_bestbase.nslots:
break
w_type = w_bestbase
if w_type is not w_starttype:
return w_type
else:
return None
def issublayout(w_layout1, w_layout2):
space = w_layout2.space
while w_layout1 is not w_layout2:
w_layout1 = find_best_base(space, w_layout1.bases_w)
if w_layout1 is None:
return False
w_layout1 = w_layout1.w_same_layout_as or w_layout1
return True
def find_best_base(space, bases_w):
"""The best base is one of the bases in the given list: the one
whose layout a new type should use as a starting point.
"""
w_bestbase = None
for w_candidate in bases_w:
if not isinstance(w_candidate, W_TypeObject):
continue
if w_bestbase is None:
w_bestbase = w_candidate # for now
continue
candtypedef = w_candidate.instancetypedef
besttypedef = w_bestbase.instancetypedef
if candtypedef is besttypedef:
# two candidates with the same typedef are equivalent unless
# one has extra slots over the other
if w_candidate.nslots > w_bestbase.nslots:
w_bestbase = w_candidate
elif issubtypedef(candtypedef, besttypedef):
w_bestbase = w_candidate
return w_bestbase
def check_and_find_best_base(space, bases_w):
"""The best base is one of the bases in the given list: the one
whose layout a new type should use as a starting point.
This version checks that bases_w is an acceptable tuple of bases.
"""
w_bestbase = find_best_base(space, bases_w)
if w_bestbase is None:
raise OperationError(space.w_TypeError,
space.wrap("a new-style class can't have "
"only classic bases"))
if not w_bestbase.instancetypedef.acceptable_as_base_class:
raise OperationError(space.w_TypeError,
space.wrap("type '%s' is not an "
"acceptable base class" %
w_bestbase.instancetypedef.name))
# check that all other bases' layouts are superclasses of the bestbase
w_bestlayout = w_bestbase.w_same_layout_as or w_bestbase
for w_base in bases_w:
if isinstance(w_base, W_TypeObject):
w_layout = w_base.w_same_layout_as or w_base
if not issublayout(w_bestlayout, w_layout):
raise OperationError(space.w_TypeError,
space.wrap("instance layout conflicts in "
"multiple inheritance"))
return w_bestbase
def copy_flags_from_bases(w_self, w_bestbase):
hasoldstylebase = False
for w_base in w_self.bases_w:
if not isinstance(w_base, W_TypeObject):
hasoldstylebase = True
continue
w_self.hasdict = w_self.hasdict or w_base.hasdict
w_self.needsdel = w_self.needsdel or w_base.needsdel
w_self.weakrefable = w_self.weakrefable or w_base.weakrefable
w_self.nslots = w_bestbase.nslots
return hasoldstylebase
def create_all_slots(w_self, hasoldstylebase):
space = w_self.space
dict_w = w_self.dict_w
if '__slots__' not in dict_w:
wantdict = True
wantweakref = True
else:
wantdict = False
wantweakref = False
w_slots = dict_w['__slots__']
if space.is_true(space.isinstance(w_slots, space.w_str)):
slot_names_w = [w_slots]
else:
slot_names_w = space.unpackiterable(w_slots)
for w_slot_name in slot_names_w:
slot_name = space.str_w(w_slot_name)
if slot_name == '__dict__':
if wantdict or w_self.hasdict:
raise OperationError(space.w_TypeError,
space.wrap("__dict__ slot disallowed: "
"we already got one"))
wantdict = True
elif slot_name == '__weakref__':
if wantweakref or w_self.weakrefable:
raise OperationError(space.w_TypeError,
space.wrap("__weakref__ slot disallowed: "
"we already got one"))
wantweakref = True
else:
create_slot(w_self, slot_name)
wantdict = wantdict or hasoldstylebase
if wantdict: create_dict_slot(w_self)
if wantweakref: create_weakref_slot(w_self)
if '__del__' in dict_w: w_self.needsdel = True
def create_slot(w_self, slot_name):
space = w_self.space
if not valid_slot_name(slot_name):
raise OperationError(space.w_TypeError,
space.wrap('__slots__ must be identifiers'))
# create member
slot_name = _mangle(slot_name, w_self.name)
# Force interning of slot names.
slot_name = space.str_w(space.new_interned_str(slot_name))
member = Member(w_self.nslots, slot_name, w_self)
w_self.dict_w[slot_name] = space.wrap(member)
w_self.nslots += 1
def create_dict_slot(w_self):
if not w_self.hasdict:
w_self.dict_w['__dict__'] = w_self.space.wrap(std_dict_descr)
w_self.hasdict = True
def create_weakref_slot(w_self):
if not w_self.weakrefable:
w_self.dict_w['__weakref__'] = w_self.space.wrap(weakref_descr)
w_self.weakrefable = True
def valid_slot_name(slot_name):
if len(slot_name) == 0 or slot_name[0].isdigit():
return False
for c in slot_name:
if not c.isalnum() and c != '_':
return False
return True
def setup_user_defined_type(w_self):
if len(w_self.bases_w) == 0:
w_self.bases_w = [w_self.space.w_object]
w_bestbase = check_and_find_best_base(w_self.space, w_self.bases_w)
w_self.instancetypedef = w_bestbase.instancetypedef
w_self.__flags__ = _HEAPTYPE
hasoldstylebase = copy_flags_from_bases(w_self, w_bestbase)
create_all_slots(w_self, hasoldstylebase)
w_self.w_same_layout_as = get_parent_layout(w_self)
ensure_common_attributes(w_self)
def setup_builtin_type(w_self):
w_self.hasdict = w_self.instancetypedef.hasdict
w_self.weakrefable = w_self.instancetypedef.weakrefable
ensure_common_attributes(w_self)
def ensure_common_attributes(w_self):
ensure_static_new(w_self)
ensure_doc_attr(w_self)
if w_self.is_heaptype():
ensure_module_attr(w_self)
w_self.mro_w = [] # temporarily
compute_mro(w_self)
def ensure_static_new(w_self):
# special-case __new__, as in CPython:
# if it is a Function, turn it into a static method
if '__new__' in w_self.dict_w:
w_new = w_self.dict_w['__new__']
if isinstance(w_new, Function):
w_self.dict_w['__new__'] = StaticMethod(w_new)
def ensure_doc_attr(w_self):
# make sure there is a __doc__ in dict_w
w_self.dict_w.setdefault('__doc__', w_self.space.w_None)
def ensure_module_attr(w_self):
# initialize __module__ in the dict (user-defined types only)
if '__module__' not in w_self.dict_w:
space = w_self.space
try:
caller = space.getexecutioncontext().framestack.top()
except IndexError:
pass
else:
w_globals = caller.w_globals
w_name = space.finditem(w_globals, space.wrap('__name__'))
if w_name is not None:
w_self.dict_w['__module__'] = w_name
def compute_mro(w_self):
if w_self.is_heaptype():
space = w_self.space
w_metaclass = space.type(w_self)
w_where, w_mro_func = space.lookup_in_type_where(w_metaclass, 'mro')
assert w_mro_func is not None # because there is one in 'type'
if not space.is_w(w_where, space.w_type):
w_mro_meth = space.get(w_mro_func, w_self)
w_mro = space.call_function(w_mro_meth)
mro_w = space.viewiterable(w_mro)
w_self.mro_w = validate_custom_mro(space, mro_w)
return # done
w_self.mro_w = w_self.compute_default_mro()[:]
def validate_custom_mro(space, mro_w):
# do some checking here. Note that unlike CPython, strange MROs
# cannot really segfault PyPy. At a minimum, we check that all
# the elements in the mro seem to be (old- or new-style) classes.
for w_class in mro_w:
if not space.abstract_isclass_w(w_class):
raise OperationError(space.w_TypeError,
space.wrap("mro() returned a non-class"))
return mro_w
# ____________________________________________________________
def call__Type(space, w_type, __args__):
# special case for type(x)
if space.is_w(w_type, space.w_type):
try:
w_obj, = __args__.fixedunpack(1)
except ValueError:
pass
else:
return space.type(w_obj)
# invoke the __new__ of the type
w_newfunc = space.getattr(w_type, space.wrap('__new__'))
w_newobject = space.call_obj_args(w_newfunc, w_type, __args__)
# maybe invoke the __init__ of the type
if space.is_true(space.isinstance(w_newobject, w_type)):
w_descr = space.lookup(w_newobject, '__init__')
w_result = space.get_and_call_args(w_descr, w_newobject, __args__)
if not space.is_w(w_result, space.w_None):
raise OperationError(space.w_TypeError,
space.wrap("__init__() should return None"))
return w_newobject
def issubtype__Type_Type(space, w_type1, w_type2):
return space.newbool(w_type2 in w_type1.mro_w)
def repr__Type(space, w_obj):
w_mod = w_obj.get_module()
if not space.is_true(space.isinstance(w_mod, space.w_str)):
mod = None
else:
mod = space.str_w(w_mod)
if (not w_obj.is_heaptype() or
(mod == '__builtin__' or mod == 'exceptions')):
kind = 'type'
else:
kind = 'class'
if mod is not None and mod !='__builtin__':
return space.wrap("<%s '%s.%s'>" % (kind, mod, w_obj.name))
else:
return space.wrap("<%s '%s'>" % (kind, w_obj.name))
def getattr__Type_ANY(space, w_type, w_name):
name = space.str_w(w_name)
w_descr = space.lookup(w_type, name)
if w_descr is not None:
if space.is_data_descr(w_descr):
return space.get(w_descr,w_type)
w_value = w_type.lookup(name)
if w_value is not None:
# __get__(None, type): turns e.g. functions into unbound methods
return space.get(w_value, space.w_None, w_type)
if w_descr is not None:
return space.get(w_descr,w_type)
msg = "type object '%s' has no attribute '%s'" %(w_type.name, name)
raise OperationError(space.w_AttributeError, space.wrap(msg))
def setattr__Type_ANY_ANY(space, w_type, w_name, w_value):
# Note. This is exactly the same thing as descroperation.descr__setattr__,
# but it is needed at bootstrap to avoid a call to w_type.getdict() which
# would un-lazify the whole type.
w_type.mutated()
name = space.str_w(w_name)
w_descr = space.lookup(w_type, name)
if w_descr is not None:
if space.is_data_descr(w_descr):
space.set(w_descr, w_type, w_value)
return
if (space.config.objspace.std.immutable_builtintypes
and not w_type.is_heaptype()):
msg = "can't set attributes on type object '%s'" %(w_type.name,)
raise OperationError(space.w_TypeError, space.wrap(msg))
if name == "__del__" and name not in w_type.dict_w:
msg = "a __del__ method added to an existing type will not be called"
space.warn(msg, space.w_RuntimeWarning)
w_type.dict_w[name] = w_value
def delattr__Type_ANY(space, w_type, w_name):
w_type.mutated()
if w_type.lazyloaders:
w_type._freeze_() # force un-lazification
name = space.str_w(w_name)
w_descr = space.lookup(w_type, name)
if w_descr is not None:
if space.is_data_descr(w_descr):
space.delete(w_descr, w_type)
return
if (space.config.objspace.std.immutable_builtintypes
and not w_type.is_heaptype()):
msg = "can't delete attributes on type object '%s'" %(w_type.name,)
raise OperationError(space.w_TypeError, space.wrap(msg))
try:
del w_type.dict_w[name]
return
except KeyError:
raise OperationError(space.w_AttributeError, w_name)
# ____________________________________________________________
abstract_mro = gateway.applevel("""
def abstract_mro(klass):
# abstract/classic mro
mro = []
stack = [klass]
while stack:
klass = stack.pop()
if klass not in mro:
mro.append(klass)
if not isinstance(klass.__bases__, tuple):
raise TypeError, '__bases__ must be a tuple'
stack += klass.__bases__[::-1]
return mro
""", filename=__file__).interphook("abstract_mro")
def get_mro(space, klass):
if isinstance(klass, W_TypeObject):
return list(klass.mro_w)
else:
return space.unpackiterable(abstract_mro(space, klass))
def compute_C3_mro(space, cls):
order = []
orderlists = [get_mro(space, base) for base in cls.bases_w]
orderlists.append([cls] + cls.bases_w)
while orderlists:
for candidatelist in orderlists:
candidate = candidatelist[0]
if mro_blockinglist(candidate, orderlists) is None:
break # good candidate
else:
return mro_error(space, orderlists) # no candidate found
assert candidate not in order
order.append(candidate)
for i in range(len(orderlists)-1, -1, -1):
if orderlists[i][0] is candidate:
del orderlists[i][0]
if len(orderlists[i]) == 0:
del orderlists[i]
return order
def mro_blockinglist(candidate, orderlists):
for lst in orderlists:
if candidate in lst[1:]:
return lst
return None # good candidate
def mro_error(space, orderlists):
cycle = []
candidate = orderlists[-1][0]
if candidate in orderlists[-1][1:]:
# explicit error message for this specific case
raise OperationError(space.w_TypeError,
space.wrap("duplicate base class " + candidate.getname(space,"?")))
while candidate not in cycle:
cycle.append(candidate)
nextblockinglist = mro_blockinglist(candidate, orderlists)
candidate = nextblockinglist[0]
del cycle[:cycle.index(candidate)]
cycle.append(candidate)
cycle.reverse()
names = [cls.getname(space, "?") for cls in cycle]
raise OperationError(space.w_TypeError,
space.wrap("cycle among base classes: " + ' < '.join(names)))
# ____________________________________________________________
register_all(vars())
| from pypy.objspace.std.objspace import *
from pypy.interpreter.function import Function, StaticMethod
from pypy.interpreter import gateway
from pypy.interpreter.typedef import weakref_descr
from pypy.objspace.std.stdtypedef import std_dict_descr, issubtypedef, Member
from pypy.objspace.std.objecttype import object_typedef
from pypy.objspace.std.dictproxyobject import W_DictProxyObject
from pypy.rlib.objectmodel import we_are_translated
from pypy.rlib.objectmodel import current_object_addr_as_int
from pypy.rlib.jit import hint
from pypy.rlib.rarithmetic import intmask, r_uint
from copy_reg import _HEAPTYPE
# from compiler/misc.py
MANGLE_LEN = 256 # magic constant from compile.c
def _mangle(name, klass):
if not name.startswith('__'):
return name
if len(name) + 2 >= MANGLE_LEN:
return name
if name.endswith('__'):
return name
try:
i = 0
while klass[i] == '_':
i = i + 1
except IndexError:
return name
klass = klass[i:]
tlen = len(klass) + len(name)
if tlen > MANGLE_LEN:
end = len(klass) + MANGLE_LEN-tlen
if end < 0:
klass = '' # annotator hint
else:
klass = klass[:end]
return "_%s%s" % (klass, name)
class VersionTag(object):
pass
class W_TypeObject(W_Object):
from pypy.objspace.std.typetype import type_typedef as typedef
lazyloaders = {} # can be overridden by specific instances
version_tag = None
uses_object_getattribute = False
# ^^^ for config.objspace.std.getattributeshortcut
# (False is a conservative default, fixed during real usage)
def __init__(w_self, space, name, bases_w, dict_w,
overridetypedef=None):
w_self.space = space
w_self.name = name
w_self.bases_w = bases_w
w_self.dict_w = dict_w
w_self.nslots = 0
w_self.hasdict = False
w_self.needsdel = False
w_self.weakrefable = False
w_self.w_same_layout_as = None
w_self.weak_subclasses = []
w_self.__flags__ = 0 # or _HEAPTYPE
w_self.instancetypedef = overridetypedef
if overridetypedef is not None:
setup_builtin_type(w_self)
custom_metaclass = False
else:
setup_user_defined_type(w_self)
custom_metaclass = not space.is_w(space.type(w_self), space.w_type)
if space.config.objspace.std.withtypeversion:
if w_self.instancetypedef.hasdict or custom_metaclass:
pass
else:
w_self.version_tag = VersionTag()
def mutated(w_self):
space = w_self.space
if space.config.objspace.std.getattributeshortcut:
w_self.uses_object_getattribute = False
# ^^^ conservative default, fixed during real usage
if not space.config.objspace.std.withtypeversion:
return
# Invariant: version_tag is None if and only if
# 'w_self.instancetypedef.hasdict' is True, which is the case
# for a built-in type that provides its instances with their own
# __dict__. If 'hasdict' is True for a type T then it is also
# True for all subtypes of T; so we don't need to look for
# version_tags to update in the subclasses of a type T whose
# version_tag is None.
if w_self.version_tag is not None:
w_self.version_tag = VersionTag()
subclasses_w = w_self.get_subclasses()
for w_subclass in subclasses_w:
assert isinstance(w_subclass, W_TypeObject)
w_subclass.mutated()
def ready(w_self):
for w_base in w_self.bases_w:
if not isinstance(w_base, W_TypeObject):
continue
w_base.add_subclass(w_self)
# compute a tuple that fully describes the instance layout
def get_full_instance_layout(w_self):
w_layout = w_self.w_same_layout_as or w_self
return (w_layout, w_self.hasdict, w_self.needsdel, w_self.weakrefable)
def compute_default_mro(w_self):
return compute_C3_mro(w_self.space, w_self)
def getdictvalue(w_self, space, w_attr):
return w_self.getdictvalue_w(space, space.str_w(w_attr))
def getdictvalue_w(w_self, space, attr):
w_value = w_self.dict_w.get(attr, None)
if w_self.lazyloaders and w_value is None:
if attr in w_self.lazyloaders:
# very clever next line: it forces the attr string
# to be interned.
w_attr = space.new_interned_str(attr)
loader = w_self.lazyloaders[attr]
del w_self.lazyloaders[attr]
w_value = loader()
if w_value is not None: # None means no such attribute
w_self.dict_w[attr] = w_value
return w_value
return w_value
def lookup(w_self, name):
# note that this doesn't call __get__ on the result at all
space = w_self.space
if space.config.objspace.std.withmethodcache:
return w_self.lookup_where_with_method_cache(name)[1]
return w_self._lookup(name)
def lookup_where(w_self, name):
space = w_self.space
if space.config.objspace.std.withmethodcache:
return w_self.lookup_where_with_method_cache(name)
return w_self._lookup_where(name)
def lookup_starting_at(w_self, w_starttype, name):
space = w_self.space
# XXX Optimize this with method cache
look = False
for w_class in w_self.mro_w:
if w_class is w_starttype:
look = True
elif look:
w_value = w_class.getdictvalue_w(space, name)
if w_value is not None:
return w_value
return None
def _lookup(w_self, key):
space = w_self.space
for w_class in w_self.mro_w:
w_value = w_class.getdictvalue_w(space, key)
if w_value is not None:
return w_value
return None
def _lookup_where(w_self, key):
# like lookup() but also returns the parent class in which the
# attribute was found
space = w_self.space
for w_class in w_self.mro_w:
w_value = w_class.getdictvalue_w(space, key)
if w_value is not None:
return w_class, w_value
return None, None
def lookup_where_with_method_cache(w_self, name):
space = w_self.space
assert space.config.objspace.std.withmethodcache
version_tag = w_self.version_tag
if version_tag is None:
tup = w_self._lookup_where(name)
return tup
SHIFT = r_uint.BITS - space.config.objspace.std.methodcachesizeexp
version_tag_as_int = current_object_addr_as_int(version_tag)
# ^^^Note: if the version_tag object is moved by a moving GC, the
# existing method cache entries won't be found any more; new
# entries will be created based on the new address. The
# assumption is that the version_tag object won't keep moving all
# the time - so using the fast current_object_addr_as_int() instead
# of a slower solution like hash() is still a good trade-off.
method_hash = r_uint(intmask(version_tag_as_int * hash(name))) >> SHIFT
cached_version_tag = space.method_cache_versions[method_hash]
if cached_version_tag is version_tag:
cached_name = space.method_cache_names[method_hash]
if cached_name is name:
tup = space.method_cache_lookup_where[method_hash]
if space.config.objspace.std.withmethodcachecounter:
space.method_cache_hits[name] = \
space.method_cache_hits.get(name, 0) + 1
# print "hit", w_self, name
return tup
tup = w_self._lookup_where(name)
space.method_cache_versions[method_hash] = version_tag
space.method_cache_names[method_hash] = name
space.method_cache_lookup_where[method_hash] = tup
if space.config.objspace.std.withmethodcachecounter:
space.method_cache_misses[name] = \
space.method_cache_misses.get(name, 0) + 1
# print "miss", w_self, name
return tup
def check_user_subclass(w_self, w_subtype):
space = w_self.space
if not isinstance(w_subtype, W_TypeObject):
raise OperationError(space.w_TypeError,
space.wrap("X is not a type object (%s)" % (
space.type(w_subtype).getname(space, '?'))))
if not space.is_true(space.issubtype(w_subtype, w_self)):
raise OperationError(space.w_TypeError,
space.wrap("%s.__new__(%s): %s is not a subtype of %s" % (
w_self.name, w_subtype.name, w_subtype.name, w_self.name)))
if w_self.instancetypedef is not w_subtype.instancetypedef:
raise OperationError(space.w_TypeError,
space.wrap("%s.__new__(%s) is not safe, use %s.__new__()" % (
w_self.name, w_subtype.name, w_subtype.name)))
return w_subtype
def _freeze_(w_self):
"NOT_RPYTHON. Forces the lazy attributes to be computed."
if 'lazyloaders' in w_self.__dict__:
for attr in w_self.lazyloaders.keys():
w_self.getdictvalue_w(w_self.space, attr)
del w_self.lazyloaders
return False
def getdict(w_self): # returning a dict-proxy!
if w_self.lazyloaders:
w_self._freeze_() # force un-lazification
space = w_self.space
dictspec = []
for key, w_value in w_self.dict_w.items():
dictspec.append((space.wrap(key), w_value))
# speed hack: instantiate a dict object cls directly
# NB: cannot use newdict, because that could return something else
# than an instance of DictObjectCls
newdic = space.DictObjectCls(space)
newdic.initialize_content(dictspec)
return W_DictProxyObject(newdic)
def unwrap(w_self, space):
if w_self.instancetypedef.fakedcpytype is not None:
return w_self.instancetypedef.fakedcpytype
from pypy.objspace.std.model import UnwrapError
raise UnwrapError(w_self)
def is_heaptype(w_self):
w_self = hint(w_self, deepfreeze=True)
return w_self.__flags__&_HEAPTYPE
def get_module(w_self):
space = w_self.space
if w_self.is_heaptype() and '__module__' in w_self.dict_w:
return w_self.dict_w['__module__']
else:
# for non-heap types, CPython checks for a module.name in the
# type name. That's a hack, so we're allowed to use a different
# hack...
if ('__module__' in w_self.dict_w and
space.is_true(space.isinstance(w_self.dict_w['__module__'],
space.w_str))):
return w_self.dict_w['__module__']
return space.wrap('__builtin__')
def add_subclass(w_self, w_subclass):
space = w_self.space
if not space.config.translation.rweakref:
return # no weakref support, don't keep track of subclasses
import weakref
assert isinstance(w_subclass, W_TypeObject)
newref = weakref.ref(w_subclass)
for i in range(len(w_self.weak_subclasses)):
ref = w_self.weak_subclasses[i]
if ref() is None:
w_self.weak_subclasses[i] = newref
return
else:
w_self.weak_subclasses.append(newref)
def remove_subclass(w_self, w_subclass):
space = w_self.space
if not space.config.translation.rweakref:
return # no weakref support, don't keep track of subclasses
for i in range(len(w_self.weak_subclasses)):
ref = w_self.weak_subclasses[i]
if ref() is w_subclass:
del w_self.weak_subclasses[i]
return
def get_subclasses(w_self):
space = w_self.space
if not space.config.translation.rweakref:
msg = ("this feature requires weakrefs, "
"which are not available in this build of PyPy")
raise OperationError(space.w_RuntimeError,
space.wrap(msg))
subclasses_w = []
for ref in w_self.weak_subclasses:
w_ob = ref()
if w_ob is not None:
subclasses_w.append(w_ob)
return subclasses_w
# for now, weakref support for W_TypeObject is hard to get automatically
_lifeline_ = None
def getweakref(self):
return self._lifeline_
def setweakref(self, space, weakreflifeline):
self._lifeline_ = weakreflifeline
# ____________________________________________________________
# Initialization of type objects
def get_parent_layout(w_type):
"""Compute the most parent class of 'w_type' whose layout
is the same as 'w_type', or None if all parents of 'w_type'
have a different layout than 'w_type'.
"""
w_starttype = w_type
while len(w_type.bases_w) > 0:
w_bestbase = find_best_base(w_type.space, w_type.bases_w)
if w_type.instancetypedef is not w_bestbase.instancetypedef:
break
if w_type.nslots != w_bestbase.nslots:
break
w_type = w_bestbase
if w_type is not w_starttype:
return w_type
else:
return None
def issublayout(w_layout1, w_layout2):
space = w_layout2.space
while w_layout1 is not w_layout2:
w_layout1 = find_best_base(space, w_layout1.bases_w)
if w_layout1 is None:
return False
w_layout1 = w_layout1.w_same_layout_as or w_layout1
return True
def find_best_base(space, bases_w):
"""The best base is one of the bases in the given list: the one
whose layout a new type should use as a starting point.
"""
w_bestbase = None
for w_candidate in bases_w:
if not isinstance(w_candidate, W_TypeObject):
continue
if w_bestbase is None:
w_bestbase = w_candidate # for now
continue
candtypedef = w_candidate.instancetypedef
besttypedef = w_bestbase.instancetypedef
if candtypedef is besttypedef:
# two candidates with the same typedef are equivalent unless
# one has extra slots over the other
if w_candidate.nslots > w_bestbase.nslots:
w_bestbase = w_candidate
elif issubtypedef(candtypedef, besttypedef):
w_bestbase = w_candidate
return w_bestbase
def check_and_find_best_base(space, bases_w):
"""The best base is one of the bases in the given list: the one
whose layout a new type should use as a starting point.
This version checks that bases_w is an acceptable tuple of bases.
"""
w_bestbase = find_best_base(space, bases_w)
if w_bestbase is None:
raise OperationError(space.w_TypeError,
space.wrap("a new-style class can't have "
"only classic bases"))
if not w_bestbase.instancetypedef.acceptable_as_base_class:
raise OperationError(space.w_TypeError,
space.wrap("type '%s' is not an "
"acceptable base class" %
w_bestbase.instancetypedef.name))
# check that all other bases' layouts are superclasses of the bestbase
w_bestlayout = w_bestbase.w_same_layout_as or w_bestbase
for w_base in bases_w:
if isinstance(w_base, W_TypeObject):
w_layout = w_base.w_same_layout_as or w_base
if not issublayout(w_bestlayout, w_layout):
raise OperationError(space.w_TypeError,
space.wrap("instance layout conflicts in "
"multiple inheritance"))
return w_bestbase
def copy_flags_from_bases(w_self, w_bestbase):
hasoldstylebase = False
for w_base in w_self.bases_w:
if not isinstance(w_base, W_TypeObject):
hasoldstylebase = True
continue
w_self.hasdict = w_self.hasdict or w_base.hasdict
w_self.needsdel = w_self.needsdel or w_base.needsdel
w_self.weakrefable = w_self.weakrefable or w_base.weakrefable
w_self.nslots = w_bestbase.nslots
return hasoldstylebase
def create_all_slots(w_self, hasoldstylebase):
space = w_self.space
dict_w = w_self.dict_w
if '__slots__' not in dict_w:
wantdict = True
wantweakref = True
else:
wantdict = False
wantweakref = False
w_slots = dict_w['__slots__']
if space.is_true(space.isinstance(w_slots, space.w_str)):
slot_names_w = [w_slots]
else:
slot_names_w = space.unpackiterable(w_slots)
for w_slot_name in slot_names_w:
slot_name = space.str_w(w_slot_name)
if slot_name == '__dict__':
if wantdict or w_self.hasdict:
raise OperationError(space.w_TypeError,
space.wrap("__dict__ slot disallowed: "
"we already got one"))
wantdict = True
elif slot_name == '__weakref__':
if wantweakref or w_self.weakrefable:
raise OperationError(space.w_TypeError,
space.wrap("__weakref__ slot disallowed: "
"we already got one"))
wantweakref = True
else:
create_slot(w_self, slot_name)
wantdict = wantdict or hasoldstylebase
if wantdict: create_dict_slot(w_self)
if wantweakref: create_weakref_slot(w_self)
if '__del__' in dict_w: w_self.needsdel = True
def create_slot(w_self, slot_name):
space = w_self.space
if not valid_slot_name(slot_name):
raise OperationError(space.w_TypeError,
space.wrap('__slots__ must be identifiers'))
# create member
slot_name = _mangle(slot_name, w_self.name)
# Force interning of slot names.
slot_name = space.str_w(space.new_interned_str(slot_name))
member = Member(w_self.nslots, slot_name, w_self)
w_self.dict_w[slot_name] = space.wrap(member)
w_self.nslots += 1
def create_dict_slot(w_self):
if not w_self.hasdict:
w_self.dict_w['__dict__'] = w_self.space.wrap(std_dict_descr)
w_self.hasdict = True
def create_weakref_slot(w_self):
if not w_self.weakrefable:
w_self.dict_w['__weakref__'] = w_self.space.wrap(weakref_descr)
w_self.weakrefable = True
def valid_slot_name(slot_name):
if len(slot_name) == 0 or slot_name[0].isdigit():
return False
for c in slot_name:
if not c.isalnum() and c != '_':
return False
return True
def setup_user_defined_type(w_self):
if len(w_self.bases_w) == 0:
w_self.bases_w = [w_self.space.w_object]
w_bestbase = check_and_find_best_base(w_self.space, w_self.bases_w)
w_self.instancetypedef = w_bestbase.instancetypedef
w_self.__flags__ = _HEAPTYPE
hasoldstylebase = copy_flags_from_bases(w_self, w_bestbase)
create_all_slots(w_self, hasoldstylebase)
w_self.w_same_layout_as = get_parent_layout(w_self)
ensure_common_attributes(w_self)
def setup_builtin_type(w_self):
w_self.hasdict = w_self.instancetypedef.hasdict
w_self.weakrefable = w_self.instancetypedef.weakrefable
ensure_common_attributes(w_self)
def ensure_common_attributes(w_self):
ensure_static_new(w_self)
ensure_doc_attr(w_self)
if w_self.is_heaptype():
ensure_module_attr(w_self)
w_self.mro_w = [] # temporarily
compute_mro(w_self)
def ensure_static_new(w_self):
# special-case __new__, as in CPython:
# if it is a Function, turn it into a static method
if '__new__' in w_self.dict_w:
w_new = w_self.dict_w['__new__']
if isinstance(w_new, Function):
w_self.dict_w['__new__'] = StaticMethod(w_new)
def ensure_doc_attr(w_self):
# make sure there is a __doc__ in dict_w
w_self.dict_w.setdefault('__doc__', w_self.space.w_None)
def ensure_module_attr(w_self):
# initialize __module__ in the dict (user-defined types only)
if '__module__' not in w_self.dict_w:
space = w_self.space
try:
caller = space.getexecutioncontext().framestack.top()
except IndexError:
pass
else:
w_globals = caller.w_globals
w_name = space.finditem(w_globals, space.wrap('__name__'))
if w_name is not None:
w_self.dict_w['__module__'] = w_name
def compute_mro(w_self):
if w_self.is_heaptype():
space = w_self.space
w_metaclass = space.type(w_self)
w_where, w_mro_func = space.lookup_in_type_where(w_metaclass, 'mro')
assert w_mro_func is not None # because there is one in 'type'
if not space.is_w(w_where, space.w_type):
w_mro_meth = space.get(w_mro_func, w_self)
w_mro = space.call_function(w_mro_meth)
mro_w = space.viewiterable(w_mro)
w_self.mro_w = validate_custom_mro(space, mro_w)
return # done
w_self.mro_w = w_self.compute_default_mro()[:]
def validate_custom_mro(space, mro_w):
# do some checking here. Note that unlike CPython, strange MROs
# cannot really segfault PyPy. At a minimum, we check that all
# the elements in the mro seem to be (old- or new-style) classes.
for w_class in mro_w:
if not space.abstract_isclass_w(w_class):
raise OperationError(space.w_TypeError,
space.wrap("mro() returned a non-class"))
return mro_w
# ____________________________________________________________
def call__Type(space, w_type, __args__):
# special case for type(x)
if space.is_w(w_type, space.w_type):
try:
w_obj, = __args__.fixedunpack(1)
except ValueError:
pass
else:
return space.type(w_obj)
# invoke the __new__ of the type
w_newfunc = space.getattr(w_type, space.wrap('__new__'))
w_newobject = space.call_obj_args(w_newfunc, w_type, __args__)
# maybe invoke the __init__ of the type
if space.is_true(space.isinstance(w_newobject, w_type)):
w_descr = space.lookup(w_newobject, '__init__')
w_result = space.get_and_call_args(w_descr, w_newobject, __args__)
if not space.is_w(w_result, space.w_None):
raise OperationError(space.w_TypeError,
space.wrap("__init__() should return None"))
return w_newobject
def issubtype__Type_Type(space, w_type1, w_type2):
return space.newbool(w_type2 in w_type1.mro_w)
def repr__Type(space, w_obj):
w_mod = w_obj.get_module()
if not space.is_true(space.isinstance(w_mod, space.w_str)):
mod = None
else:
mod = space.str_w(w_mod)
if (not w_obj.is_heaptype() or
(mod == '__builtin__' or mod == 'exceptions')):
kind = 'type'
else:
kind = 'class'
if mod is not None and mod !='__builtin__':
return space.wrap("<%s '%s.%s'>" % (kind, mod, w_obj.name))
else:
return space.wrap("<%s '%s'>" % (kind, w_obj.name))
def getattr__Type_ANY(space, w_type, w_name):
name = space.str_w(w_name)
w_descr = space.lookup(w_type, name)
if w_descr is not None:
if space.is_data_descr(w_descr):
return space.get(w_descr,w_type)
w_value = w_type.lookup(name)
if w_value is not None:
# __get__(None, type): turns e.g. functions into unbound methods
return space.get(w_value, space.w_None, w_type)
if w_descr is not None:
return space.get(w_descr,w_type)
msg = "type object '%s' has no attribute '%s'" %(w_type.name, name)
raise OperationError(space.w_AttributeError, space.wrap(msg))
def setattr__Type_ANY_ANY(space, w_type, w_name, w_value):
# Note. This is exactly the same thing as descroperation.descr__setattr__,
# but it is needed at bootstrap to avoid a call to w_type.getdict() which
# would un-lazify the whole type.
w_type.mutated()
name = space.str_w(w_name)
w_descr = space.lookup(w_type, name)
if w_descr is not None:
if space.is_data_descr(w_descr):
space.set(w_descr, w_type, w_value)
return
if (space.config.objspace.std.immutable_builtintypes
and not w_type.is_heaptype()):
msg = "can't set attributes on type object '%s'" %(w_type.name,)
raise OperationError(space.w_TypeError, space.wrap(msg))
if name == "__del__" and name not in w_type.dict_w:
msg = "a __del__ method added to an existing type will not be called"
space.warn(msg, space.w_RuntimeWarning)
w_type.dict_w[name] = w_value
def delattr__Type_ANY(space, w_type, w_name):
w_type.mutated()
if w_type.lazyloaders:
w_type._freeze_() # force un-lazification
name = space.str_w(w_name)
w_descr = space.lookup(w_type, name)
if w_descr is not None:
if space.is_data_descr(w_descr):
space.delete(w_descr, w_type)
return
if (space.config.objspace.std.immutable_builtintypes
and not w_type.is_heaptype()):
msg = "can't delete attributes on type object '%s'" %(w_type.name,)
raise OperationError(space.w_TypeError, space.wrap(msg))
try:
del w_type.dict_w[name]
return
except KeyError:
raise OperationError(space.w_AttributeError, w_name)
# ____________________________________________________________
abstract_mro = gateway.applevel("""
def abstract_mro(klass):
# abstract/classic mro
mro = []
stack = [klass]
while stack:
klass = stack.pop()
if klass not in mro:
mro.append(klass)
if not isinstance(klass.__bases__, tuple):
raise TypeError, '__bases__ must be a tuple'
stack += klass.__bases__[::-1]
return mro
""", filename=__file__).interphook("abstract_mro")
def get_mro(space, klass):
if isinstance(klass, W_TypeObject):
return list(klass.mro_w)
else:
return space.unpackiterable(abstract_mro(space, klass))
def compute_C3_mro(space, cls):
order = []
orderlists = [get_mro(space, base) for base in cls.bases_w]
orderlists.append([cls] + cls.bases_w)
while orderlists:
for candidatelist in orderlists:
candidate = candidatelist[0]
if mro_blockinglist(candidate, orderlists) is None:
break # good candidate
else:
return mro_error(space, orderlists) # no candidate found
assert candidate not in order
order.append(candidate)
for i in range(len(orderlists)-1, -1, -1):
if orderlists[i][0] is candidate:
del orderlists[i][0]
if len(orderlists[i]) == 0:
del orderlists[i]
return order
def mro_blockinglist(candidate, orderlists):
for lst in orderlists:
if candidate in lst[1:]:
return lst
return None # good candidate
def mro_error(space, orderlists):
cycle = []
candidate = orderlists[-1][0]
if candidate in orderlists[-1][1:]:
# explicit error message for this specific case
raise OperationError(space.w_TypeError,
space.wrap("duplicate base class " + candidate.getname(space,"?")))
while candidate not in cycle:
cycle.append(candidate)
nextblockinglist = mro_blockinglist(candidate, orderlists)
candidate = nextblockinglist[0]
del cycle[:cycle.index(candidate)]
cycle.append(candidate)
cycle.reverse()
names = [cls.getname(space, "?") for cls in cycle]
raise OperationError(space.w_TypeError,
space.wrap("cycle among base classes: " + ' < '.join(names)))
# ____________________________________________________________
register_all(vars())
| en | 0.843715 | # from compiler/misc.py # magic constant from compile.c # annotator hint # can be overridden by specific instances # ^^^ for config.objspace.std.getattributeshortcut # (False is a conservative default, fixed during real usage) # or _HEAPTYPE # ^^^ conservative default, fixed during real usage # Invariant: version_tag is None if and only if # 'w_self.instancetypedef.hasdict' is True, which is the case # for a built-in type that provides its instances with their own # __dict__. If 'hasdict' is True for a type T then it is also # True for all subtypes of T; so we don't need to look for # version_tags to update in the subclasses of a type T whose # version_tag is None. # compute a tuple that fully describes the instance layout # very clever next line: it forces the attr string # to be interned. # None means no such attribute # note that this doesn't call __get__ on the result at all # XXX Optimize this with method cache # like lookup() but also returns the parent class in which the # attribute was found # ^^^Note: if the version_tag object is moved by a moving GC, the # existing method cache entries won't be found any more; new # entries will be created based on the new address. The # assumption is that the version_tag object won't keep moving all # the time - so using the fast current_object_addr_as_int() instead # of a slower solution like hash() is still a good trade-off. # print "hit", w_self, name # print "miss", w_self, name # returning a dict-proxy! # force un-lazification # speed hack: instantiate a dict object cls directly # NB: cannot use newdict, because that could return something else # than an instance of DictObjectCls # for non-heap types, CPython checks for a module.name in the # type name. That's a hack, so we're allowed to use a different # hack... # no weakref support, don't keep track of subclasses # no weakref support, don't keep track of subclasses # for now, weakref support for W_TypeObject is hard to get automatically # ____________________________________________________________ # Initialization of type objects Compute the most parent class of 'w_type' whose layout is the same as 'w_type', or None if all parents of 'w_type' have a different layout than 'w_type'. The best base is one of the bases in the given list: the one whose layout a new type should use as a starting point. # for now # two candidates with the same typedef are equivalent unless # one has extra slots over the other The best base is one of the bases in the given list: the one whose layout a new type should use as a starting point. This version checks that bases_w is an acceptable tuple of bases. # check that all other bases' layouts are superclasses of the bestbase # create member # Force interning of slot names. # temporarily # special-case __new__, as in CPython: # if it is a Function, turn it into a static method # make sure there is a __doc__ in dict_w # initialize __module__ in the dict (user-defined types only) # because there is one in 'type' # done # do some checking here. Note that unlike CPython, strange MROs # cannot really segfault PyPy. At a minimum, we check that all # the elements in the mro seem to be (old- or new-style) classes. # ____________________________________________________________ # special case for type(x) # invoke the __new__ of the type # maybe invoke the __init__ of the type # __get__(None, type): turns e.g. functions into unbound methods # Note. This is exactly the same thing as descroperation.descr__setattr__, # but it is needed at bootstrap to avoid a call to w_type.getdict() which # would un-lazify the whole type. # force un-lazification # ____________________________________________________________ def abstract_mro(klass): # abstract/classic mro mro = [] stack = [klass] while stack: klass = stack.pop() if klass not in mro: mro.append(klass) if not isinstance(klass.__bases__, tuple): raise TypeError, '__bases__ must be a tuple' stack += klass.__bases__[::-1] return mro # good candidate # no candidate found # good candidate # explicit error message for this specific case # ____________________________________________________________ | 1.897947 | 2 |
tests/dummypackage2/setup.py | msabramo/CheesePrism | 0 | 6624697 | from setuptools import setup
from setuptools import find_packages
version = '0.1'
setup(name='dummypackage',
version=version,
description="",
long_description="",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='',
author_email='',
url='',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=["something_else"],
entry_points="""
# -*- Entry points: -*-
""",
)
| from setuptools import setup
from setuptools import find_packages
version = '0.1'
setup(name='dummypackage',
version=version,
description="",
long_description="",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='',
author_email='',
url='',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=["something_else"],
entry_points="""
# -*- Entry points: -*-
""",
)
| en | 0.523309 | # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers # -*- Entry points: -*- | 1.422132 | 1 |
contrib/rackspace/rackspace/tests/test_auto_scale.py | jasondunsmore/heat | 1 | 6624698 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import itertools
import mock
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import glance
from heat.engine.clients.os import nova
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
from ..resources import auto_scale # noqa
class FakeScalingGroup(object):
"""A fake implementation of pyrax's ScalingGroup object."""
def __init__(self, id, **kwargs):
self.id = id
self.kwargs = kwargs
class FakeScalePolicy(object):
"""A fake implementation of pyrax's AutoScalePolicy object."""
def __init__(self, id, **kwargs):
self.id = id
self.kwargs = kwargs
class FakeWebHook(object):
"""A fake implementation of pyrax's AutoScaleWebhook object."""
def __init__(self, id, **kwargs):
self.id = id
self.kwargs = kwargs
self.links = [
{'rel': 'self', 'href': 'self-url'},
{'rel': 'capability', 'href': 'capability-url'}]
class FakeAutoScale(object):
"""A fake implementation of pyrax's autoscale client."""
def __init__(self):
self.groups = {}
self.policies = {}
self.webhooks = {}
self.group_counter = itertools.count()
self.policy_counter = itertools.count()
self.webhook_counter = itertools.count()
def create(self, **kwargs):
"""Create a scaling group."""
new_id = str(next(self.group_counter))
fsg = FakeScalingGroup(new_id, **kwargs)
self.groups[new_id] = fsg
return fsg
def _check_args(self, kwargs, allowed):
for parameter in kwargs:
if parameter not in allowed:
raise TypeError("unexpected argument %r" % (parameter,))
def _get_group(self, id):
if id not in self.groups:
raise auto_scale.NotFound("Group %s not found!" % (id,))
return self.groups[id]
def _get_policy(self, id):
if id not in self.policies:
raise auto_scale.NotFound("Policy %s not found!" % (id,))
return self.policies[id]
def _get_webhook(self, webhook_id):
if webhook_id not in self.webhooks:
raise auto_scale.NotFound(
"Webhook %s doesn't exist!" % (webhook_id,))
return self.webhooks[webhook_id]
def replace(self, group_id, **kwargs):
"""Update the groupConfiguration section of a scaling group."""
allowed = ['name', 'cooldown',
'min_entities', 'max_entities', 'metadata']
self._check_args(kwargs, allowed)
self._get_group(group_id).kwargs = kwargs
def replace_launch_config(self, group_id, **kwargs):
"""Update the launch configuration on a scaling group."""
if kwargs.get('launch_config_type') == 'launch_server':
allowed = ['launch_config_type', 'server_name', 'image', 'flavor',
'disk_config', 'metadata', 'personality', 'networks',
'load_balancers', 'key_name', 'user_data',
'config_drive']
elif kwargs.get('launch_config_type') == 'launch_stack':
allowed = ['launch_config_type', 'template', 'template_url',
'disable_rollback', 'environment', 'files',
'parameters', 'timeout_mins']
self._check_args(kwargs, allowed)
self._get_group(group_id).kwargs = kwargs
def delete(self, group_id):
"""Delete the group, if the min entities and max entities are 0."""
group = self._get_group(group_id)
if (group.kwargs['min_entities'] > 0
or group.kwargs['max_entities'] > 0):
raise Exception("Can't delete yet!")
del self.groups[group_id]
def add_policy(self, **kwargs):
"""Create and store a FakeScalePolicy."""
allowed = [
'scaling_group', 'name', 'policy_type', 'cooldown', 'change',
'is_percent', 'desired_capacity', 'args']
self._check_args(kwargs, allowed)
policy_id = str(next(self.policy_counter))
policy = FakeScalePolicy(policy_id, **kwargs)
self.policies[policy_id] = policy
return policy
def replace_policy(self, scaling_group, policy, **kwargs):
allowed = [
'name', 'policy_type', 'cooldown',
'change', 'is_percent', 'desired_capacity', 'args']
self._check_args(kwargs, allowed)
policy = self._get_policy(policy)
assert policy.kwargs['scaling_group'] == scaling_group
kwargs['scaling_group'] = scaling_group
policy.kwargs = kwargs
def add_webhook(self, **kwargs):
"""Create and store a FakeWebHook."""
allowed = ['scaling_group', 'policy', 'name', 'metadata']
self._check_args(kwargs, allowed)
webhook_id = str(next(self.webhook_counter))
webhook = FakeWebHook(webhook_id, **kwargs)
self.webhooks[webhook_id] = webhook
return webhook
def delete_policy(self, scaling_group, policy):
"""Delete a policy, if it exists."""
if policy not in self.policies:
raise auto_scale.NotFound("Policy %s doesn't exist!" % (policy,))
assert self.policies[policy].kwargs['scaling_group'] == scaling_group
del self.policies[policy]
def delete_webhook(self, scaling_group, policy, webhook_id):
"""Delete a webhook, if it exists."""
webhook = self._get_webhook(webhook_id)
assert webhook.kwargs['scaling_group'] == scaling_group
assert webhook.kwargs['policy'] == policy
del self.webhooks[webhook_id]
def replace_webhook(self, scaling_group, policy, webhook,
name=None, metadata=None):
webhook = self._get_webhook(webhook)
assert webhook.kwargs['scaling_group'] == scaling_group
assert webhook.kwargs['policy'] == policy
webhook.kwargs['name'] = name
webhook.kwargs['metadata'] = metadata
class ScalingGroupTest(common.HeatTestCase):
server_template = template_format.parse('''
HeatTemplateFormatVersion: "2012-12-12"
Description: "Rackspace Auto Scale"
Parameters: {}
Resources:
my_group:
Type: Rackspace::AutoScale::Group
Properties:
groupConfiguration:
name: "My Group"
cooldown: 60
minEntities: 1
maxEntities: 25
metadata:
group: metadata
launchConfiguration:
type: "launch_server"
args:
server:
name: autoscaled-server
flavorRef: flavor-ref
imageRef: image-ref
key_name: my-key
metadata:
server: metadata
personality:
/tmp/testfile: "dGVzdCBjb250ZW50"
networks:
- uuid: "00000000-0000-0000-0000-000000000000"
- uuid: "11111111-1111-1111-1111-111111111111"
loadBalancers:
- loadBalancerId: 234
port: 80
''')
stack_template = template_format.parse('''
HeatTemplateFormatVersion: "2012-12-12"
Description: "Rackspace Auto Scale"
Parameters: {}
Resources:
my_group:
Type: Rackspace::AutoScale::Group
Properties:
groupConfiguration:
name: "My Group"
cooldown: 60
minEntities: 1
maxEntities: 25
metadata:
group: metadata
launchConfiguration:
type: launch_stack
args:
stack:
template: |
heat_template_version: 2015-10-15
description: This is a Heat template
parameters:
image:
default: cirros-0.3.4-x86_64-uec
type: string
flavor:
default: m1.tiny
type: string
resources:
rand:
type: OS::Heat::RandomString
disable_rollback: False
environment:
parameters:
image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
resource_registry:
Heat::InstallConfigAgent:
https://myhost.com/bootconfig.yaml
files:
fileA.yaml: Contents of the file
file:///usr/fileB.template: Contents of the file
parameters:
flavor: 4 GB Performance
timeout_mins: 30
''')
def setUp(self):
super(ScalingGroupTest, self).setUp()
for res_name, res_class in auto_scale.resource_mapping().items():
resource._register_class(res_name, res_class)
self.fake_auto_scale = FakeAutoScale()
self.patchobject(auto_scale.Group, 'auto_scale',
return_value=self.fake_auto_scale)
# mock nova and glance client methods to satisfy contraints
mock_im = self.patchobject(glance.GlanceClientPlugin,
'find_image_by_name_or_id')
mock_im.return_value = 'image-ref'
mock_fl = self.patchobject(nova.NovaClientPlugin,
'find_flavor_by_name_or_id')
mock_fl.return_value = 'flavor-ref'
def _setup_test_stack(self, template=None):
if template is None:
template = self.server_template
self.stack = utils.parse_stack(template)
self.stack.create()
self.assertEqual(
('CREATE', 'COMPLETE'), self.stack.state,
self.stack.status_reason)
def test_group_create_server(self):
"""Creating a group passes all the correct arguments to pyrax.
Also saves the group ID as the resource ID.
"""
self._setup_test_stack()
self.assertEqual(1, len(self.fake_auto_scale.groups))
self.assertEqual(
{
'cooldown': 60,
'config_drive': False,
'user_data': None,
'disk_config': None,
'flavor': 'flavor-ref',
'image': 'image-ref',
'load_balancers': [{
'loadBalancerId': 234,
'port': 80,
}],
'key_name': "my-key",
'launch_config_type': u'launch_server',
'max_entities': 25,
'group_metadata': {'group': 'metadata'},
'metadata': {'server': 'metadata'},
'min_entities': 1,
'name': '<NAME>',
'networks': [{'uuid': '00000000-0000-0000-0000-000000000000'},
{'uuid': '11111111-1111-1111-1111-111111111111'}],
'personality': [{
'path': u'/tmp/testfile',
'contents': u'dGVzdCBjb250ZW50'}],
'server_name': u'autoscaled-server'},
self.fake_auto_scale.groups['0'].kwargs)
resource = self.stack['my_group']
self.assertEqual('0', resource.FnGetRefId())
def test_group_create_stack(self):
"""Creating a group passes all the correct arguments to pyrax.
Also saves the group ID as the resource ID.
"""
self._setup_test_stack(self.stack_template)
self.assertEqual(1, len(self.fake_auto_scale.groups))
self.assertEqual(
{
'cooldown': 60,
'min_entities': 1,
'max_entities': 25,
'group_metadata': {'group': 'metadata'},
'name': 'My Group',
'launch_config_type': u'launch_stack',
'template': (
'''heat_template_version: 2015-10-15
description: This is a Heat template
parameters:
image:
default: cirros-0.3.4-x86_64-uec
type: string
flavor:
default: m1.tiny
type: string
resources:
rand:
type: OS::Heat::RandomString
'''),
'template_url': None,
'disable_rollback': False,
'environment': {
'parameters': {
'image':
'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
},
'resource_registry': {
'Heat::InstallConfigAgent': ('https://myhost.com/'
'bootconfig.yaml')
}
},
'files': {
'fileA.yaml': 'Contents of the file',
'file:///usr/fileB.template': 'Contents of the file'
},
'parameters': {
'flavor': '4 GB Performance',
},
'timeout_mins': 30,
},
self.fake_auto_scale.groups['0'].kwargs
)
resource = self.stack['my_group']
self.assertEqual('0', resource.FnGetRefId())
def test_group_create_no_personality(self):
template = template_format.parse('''
HeatTemplateFormatVersion: "2012-12-12"
Description: "Rackspace Auto Scale"
Parameters: {}
Resources:
my_group:
Type: Rackspace::AutoScale::Group
Properties:
groupConfiguration:
name: "My Group"
cooldown: 60
minEntities: 1
maxEntities: 25
metadata:
group: metadata
launchConfiguration:
type: "launch_server"
args:
server:
name: autoscaled-server
flavorRef: flavor-ref
imageRef: image-ref
key_name: my-key
metadata:
server: metadata
networks:
- uuid: "00000000-0000-0000-0000-000000000000"
- uuid: "11111111-1111-1111-1111-111111111111"
''')
self.stack = utils.parse_stack(template)
self.stack.create()
self.assertEqual(
('CREATE', 'COMPLETE'), self.stack.state,
self.stack.status_reason)
self.assertEqual(1, len(self.fake_auto_scale.groups))
self.assertEqual(
{
'cooldown': 60,
'config_drive': False,
'user_data': None,
'disk_config': None,
'flavor': 'flavor-ref',
'image': 'image-ref',
'launch_config_type': 'launch_server',
'load_balancers': [],
'key_name': "my-key",
'max_entities': 25,
'group_metadata': {'group': 'metadata'},
'metadata': {'server': 'metadata'},
'min_entities': 1,
'name': '<NAME>',
'networks': [{'uuid': '00000000-0000-0000-0000-000000000000'},
{'uuid': '11111111-1111-1111-1111-111111111111'}],
'personality': None,
'server_name': u'autoscaled-server'},
self.fake_auto_scale.groups['0'].kwargs)
resource = self.stack['my_group']
self.assertEqual('0', resource.FnGetRefId())
def test_check(self):
self._setup_test_stack()
resource = self.stack['my_group']
mock_get = mock.Mock()
resource.auto_scale().get = mock_get
scheduler.TaskRunner(resource.check)()
self.assertEqual('CHECK', resource.action)
self.assertEqual('COMPLETE', resource.status)
mock_get.side_effect = auto_scale.NotFound('boom')
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(resource.check))
self.assertEqual('CHECK', resource.action)
self.assertEqual('FAILED', resource.status)
self.assertIn('boom', str(exc))
def test_update_group_config(self):
"""Updates the groupConfiguration section.
Updates the groupConfiguration section in a template results in a
pyrax call to update the group configuration.
"""
self._setup_test_stack()
resource = self.stack['my_group']
uprops = copy.deepcopy(dict(resource.properties.data))
uprops['groupConfiguration']['minEntities'] = 5
new_template = rsrc_defn.ResourceDefinition(resource.name,
resource.type(),
uprops)
scheduler.TaskRunner(resource.update, new_template)()
self.assertEqual(1, len(self.fake_auto_scale.groups))
self.assertEqual(
5, self.fake_auto_scale.groups['0'].kwargs['min_entities'])
def test_update_launch_config_server(self):
"""Updates the launchConfigresults section.
Updates the launchConfigresults section in a template results in a
pyrax call to update the launch configuration.
"""
self._setup_test_stack()
resource = self.stack['my_group']
uprops = copy.deepcopy(dict(resource.properties.data))
lcargs = uprops['launchConfiguration']['args']
lcargs['loadBalancers'] = [{'loadBalancerId': '1', 'port': 80}]
new_template = rsrc_defn.ResourceDefinition(resource.name,
resource.type(),
uprops)
scheduler.TaskRunner(resource.update, new_template)()
self.assertEqual(1, len(self.fake_auto_scale.groups))
self.assertEqual(
[{'loadBalancerId': 1, 'port': 80}],
self.fake_auto_scale.groups['0'].kwargs['load_balancers'])
def test_update_launch_config_stack(self):
self._setup_test_stack(self.stack_template)
resource = self.stack['my_group']
uprops = copy.deepcopy(dict(resource.properties.data))
lcargs = uprops['launchConfiguration']['args']
lcargs['stack']['timeout_mins'] = 60
new_template = rsrc_defn.ResourceDefinition(resource.name,
resource.type(),
uprops)
scheduler.TaskRunner(resource.update, new_template)()
self.assertEqual(1, len(self.fake_auto_scale.groups))
self.assertEqual(
60,
self.fake_auto_scale.groups['0'].kwargs['timeout_mins'])
def test_delete(self):
"""Deleting a ScalingGroup resource invokes pyrax API to delete it."""
self._setup_test_stack()
resource = self.stack['my_group']
scheduler.TaskRunner(resource.delete)()
self.assertEqual({}, self.fake_auto_scale.groups)
def test_delete_without_backing_group(self):
"""Resource deletion succeeds, if no backing scaling group exists."""
self._setup_test_stack()
resource = self.stack['my_group']
del self.fake_auto_scale.groups['0']
scheduler.TaskRunner(resource.delete)()
self.assertEqual({}, self.fake_auto_scale.groups)
def test_delete_waits_for_server_deletion(self):
"""Test case for waiting for successful resource deletion.
The delete operation may fail until the servers are really gone; the
resource retries until success.
"""
self._setup_test_stack()
delete_counter = itertools.count()
def delete(group_id):
count = next(delete_counter)
if count < 3:
raise auto_scale.Forbidden("Not empty!")
self.patchobject(self.fake_auto_scale, 'delete', side_effect=delete)
resource = self.stack['my_group']
scheduler.TaskRunner(resource.delete)()
# It really called delete until it succeeded:
self.assertEqual(4, next(delete_counter))
def test_delete_blows_up_on_other_errors(self):
"""Test case for correct error handling during deletion.
Only the Forbidden (403) error is honored as an indicator of pending
deletion; other errors cause deletion to fail.
"""
self._setup_test_stack()
def delete(group_id):
1 / 0
self.patchobject(self.fake_auto_scale, 'delete', side_effect=delete)
resource = self.stack['my_group']
err = self.assertRaises(
exception.ResourceFailure, scheduler.TaskRunner(resource.delete))
self.assertIsInstance(err.exc, ZeroDivisionError)
class PolicyTest(common.HeatTestCase):
policy_template = template_format.parse('''
HeatTemplateFormatVersion: "2012-12-12"
Description: "Rackspace Auto Scale"
Parameters: {}
Resources:
my_policy:
Type: Rackspace::AutoScale::ScalingPolicy
Properties:
group: "my-group-id"
name: "+10 on webhook"
change: 10
cooldown: 0
type: "webhook"
''')
def setUp(self):
super(PolicyTest, self).setUp()
for res_name, res_class in auto_scale.resource_mapping().items():
resource._register_class(res_name, res_class)
self.fake_auto_scale = FakeAutoScale()
self.patchobject(auto_scale.ScalingPolicy, 'auto_scale',
return_value=self.fake_auto_scale)
def _setup_test_stack(self, template):
self.stack = utils.parse_stack(template)
self.stack.create()
self.assertEqual(
('CREATE', 'COMPLETE'), self.stack.state,
self.stack.status_reason)
def test_create_webhook_change(self):
"""Creating the resource creates the scaling policy with pyrax.
Also sets the resource's ID to {group_id}:{policy_id}.
"""
self._setup_test_stack(self.policy_template)
resource = self.stack['my_policy']
self.assertEqual('my-group-id:0', resource.FnGetRefId())
self.assertEqual(
{
'name': '+10 on webhook',
'scaling_group': 'my-group-id',
'change': 10,
'cooldown': 0,
'policy_type': 'webhook'},
self.fake_auto_scale.policies['0'].kwargs)
def test_webhook_change_percent(self):
"""Test case for specified changePercent.
When changePercent is specified, it translates to pyrax arguments
'change' and 'is_percent'.
"""
template = copy.deepcopy(self.policy_template)
template['Resources']['my_policy']['Properties']['changePercent'] = 10
del template['Resources']['my_policy']['Properties']['change']
self._setup_test_stack(template)
self.assertEqual(
{
'name': '+10 on webhook',
'scaling_group': 'my-group-id',
'change': 10,
'is_percent': True,
'cooldown': 0,
'policy_type': 'webhook'},
self.fake_auto_scale.policies['0'].kwargs)
def test_webhook_desired_capacity(self):
"""Test case for desiredCapacity property.
The desiredCapacity property translates to the desired_capacity pyrax
argument.
"""
template = copy.deepcopy(self.policy_template)
template['Resources']['my_policy']['Properties']['desiredCapacity'] = 1
del template['Resources']['my_policy']['Properties']['change']
self._setup_test_stack(template)
self.assertEqual(
{
'name': '+10 on webhook',
'scaling_group': 'my-group-id',
'desired_capacity': 1,
'cooldown': 0,
'policy_type': 'webhook'},
self.fake_auto_scale.policies['0'].kwargs)
def test_schedule(self):
"""We can specify schedule-type policies with args."""
template = copy.deepcopy(self.policy_template)
props = template['Resources']['my_policy']['Properties']
props['type'] = 'schedule'
props['args'] = {'cron': '0 0 0 * *'}
self._setup_test_stack(template)
self.assertEqual(
{
'name': '+10 on webhook',
'scaling_group': 'my-group-id',
'change': 10,
'cooldown': 0,
'policy_type': 'schedule',
'args': {'cron': '0 0 0 * *'}},
self.fake_auto_scale.policies['0'].kwargs)
def test_update(self):
"""Updating the resource calls appropriate update method with pyrax."""
self._setup_test_stack(self.policy_template)
resource = self.stack['my_policy']
uprops = copy.deepcopy(dict(resource.properties.data))
uprops['changePercent'] = 50
del uprops['change']
template = rsrc_defn.ResourceDefinition(resource.name,
resource.type(),
uprops)
scheduler.TaskRunner(resource.update, template)()
self.assertEqual(
{
'name': '+10 on webhook',
'scaling_group': 'my-group-id',
'change': 50,
'is_percent': True,
'cooldown': 0,
'policy_type': 'webhook'},
self.fake_auto_scale.policies['0'].kwargs)
def test_delete(self):
"""Deleting the resource deletes the policy with pyrax."""
self._setup_test_stack(self.policy_template)
resource = self.stack['my_policy']
scheduler.TaskRunner(resource.delete)()
self.assertEqual({}, self.fake_auto_scale.policies)
def test_delete_policy_non_existent(self):
"""Test case for deleting resource without backing policy.
Deleting a resource for which there is no backing policy succeeds
silently.
"""
self._setup_test_stack(self.policy_template)
resource = self.stack['my_policy']
del self.fake_auto_scale.policies['0']
scheduler.TaskRunner(resource.delete)()
self.assertEqual({}, self.fake_auto_scale.policies)
class WebHookTest(common.HeatTestCase):
webhook_template = template_format.parse('''
HeatTemplateFormatVersion: "2012-12-12"
Description: "Rackspace Auto Scale"
Parameters: {}
Resources:
my_webhook:
Type: Rackspace::AutoScale::WebHook
Properties:
policy: my-group-id:my-policy-id
name: "exec my policy"
metadata:
a: b
''')
def setUp(self):
super(WebHookTest, self).setUp()
for res_name, res_class in auto_scale.resource_mapping().items():
resource._register_class(res_name, res_class)
self.fake_auto_scale = FakeAutoScale()
self.patchobject(auto_scale.WebHook, 'auto_scale',
return_value=self.fake_auto_scale)
def _setup_test_stack(self, template):
self.stack = utils.parse_stack(template)
self.stack.create()
self.assertEqual(
('CREATE', 'COMPLETE'), self.stack.state,
self.stack.status_reason)
def test_create(self):
"""Creates a webhook with pyrax and makes attributes available."""
self._setup_test_stack(self.webhook_template)
resource = self.stack['my_webhook']
self.assertEqual(
{
'name': 'exec my policy',
'scaling_group': 'my-group-id',
'policy': 'my-policy-id',
'metadata': {'a': 'b'}},
self.fake_auto_scale.webhooks['0'].kwargs)
self.assertEqual("self-url", resource.FnGetAtt("executeUrl"))
self.assertEqual("capability-url", resource.FnGetAtt("capabilityUrl"))
def test_failed_create(self):
"""When a create fails, getting the attributes returns None."""
template = copy.deepcopy(self.webhook_template)
template['Resources']['my_webhook']['Properties']['policy'] = 'foobar'
self.stack = utils.parse_stack(template)
self.stack.create()
resource = self.stack['my_webhook']
self.assertIsNone(resource.FnGetAtt('capabilityUrl'))
def test_update(self):
self._setup_test_stack(self.webhook_template)
resource = self.stack['my_webhook']
uprops = copy.deepcopy(dict(resource.properties.data))
uprops['metadata']['a'] = 'different!'
uprops['name'] = 'newhook'
template = rsrc_defn.ResourceDefinition(resource.name,
resource.type(),
uprops)
scheduler.TaskRunner(resource.update, template)()
self.assertEqual(
{
'name': 'newhook',
'scaling_group': 'my-group-id',
'policy': 'my-policy-id',
'metadata': {'a': 'different!'}},
self.fake_auto_scale.webhooks['0'].kwargs)
def test_delete(self):
"""Deleting the resource deletes the webhook with pyrax."""
self._setup_test_stack(self.webhook_template)
resource = self.stack['my_webhook']
scheduler.TaskRunner(resource.delete)()
self.assertEqual({}, self.fake_auto_scale.webhooks)
def test_delete_without_backing_webhook(self):
"""Test case for deleting resource without backing webhook.
Deleting a resource for which there is no backing webhook succeeds
silently.
"""
self._setup_test_stack(self.webhook_template)
resource = self.stack['my_webhook']
del self.fake_auto_scale.webhooks['0']
scheduler.TaskRunner(resource.delete)()
self.assertEqual({}, self.fake_auto_scale.webhooks)
@mock.patch.object(resource.Resource, "client_plugin")
@mock.patch.object(resource.Resource, "client")
class AutoScaleGroupValidationTests(common.HeatTestCase):
def setUp(self):
super(AutoScaleGroupValidationTests, self).setUp()
self.mockstack = mock.Mock()
self.mockstack.has_cache_data.return_value = False
self.mockstack.db_resource_get.return_value = None
def test_validate_no_rcv3_pool(self, mock_client, mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_server",
"args": {
"loadBalancers": [{
"loadBalancerId": 'not integer!',
}],
"server": {
"name": "sdfsdf",
"flavorRef": "ffdgdf",
"imageRef": "image-ref",
},
},
},
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
mock_client().list_load_balancer_pools.return_value = []
error = self.assertRaises(
exception.StackValidationFailed, asg.validate)
self.assertEqual(
'Could not find RackConnectV3 pool with id not integer!: ',
six.text_type(error))
def test_validate_rcv3_pool_found(self, mock_client, mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_server",
"args": {
"loadBalancers": [{
"loadBalancerId": 'pool_exists',
}],
"server": {
"name": "sdfsdf",
"flavorRef": "ffdgdf",
"imageRef": "image-ref",
},
},
},
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
mock_client().list_load_balancer_pools.return_value = [
mock.Mock(id='pool_exists'),
]
self.assertIsNone(asg.validate())
def test_validate_no_lb_specified(self, mock_client, mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_server",
"args": {
"server": {
"name": "sdfsdf",
"flavorRef": "ffdgdf",
"imageRef": "image-ref",
},
},
},
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
self.assertIsNone(asg.validate())
def test_validate_launch_stack(self, mock_client, mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_stack",
"args": {
"stack": {
'template': (
'''heat_template_version: 2015-10-15
description: This is a Heat template
parameters:
image:
default: cirros-0.3.4-x86_64-uec
type: string
flavor:
default: m1.tiny
type: string
resources:
rand:
type: OS::Heat::RandomString
'''),
'template_url': None,
'disable_rollback': False,
'environment': {
'parameters': {
'image':
'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
},
'resource_registry': {
'Heat::InstallConfigAgent': (
'https://myhost.com/bootconfig.yaml')
}
},
'files': {
'fileA.yaml': 'Contents of the file',
'file:///usr/fileB.yaml': 'Contents of the file'
},
'parameters': {
'flavor': '4 GB Performance',
},
'timeout_mins': 30,
}
}
}
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
self.assertIsNone(asg.validate())
def test_validate_launch_server_and_stack(self, mock_client, mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_server",
"args": {
"server": {
"name": "sdfsdf",
"flavorRef": "ffdgdf",
"imageRef": "image-ref",
},
"stack": {
'template': (
'''heat_template_version: 2015-10-15
description: This is a Heat template
parameters:
image:
default: cirros-0.3.4-x86_64-uec
type: string
flavor:
default: m1.tiny
type: string
resources:
rand:
type: OS::Heat::RandomString
'''),
'template_url': None,
'disable_rollback': False,
'environment': {
'parameters': {
'image':
'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
},
'resource_registry': {
'Heat::InstallConfigAgent': (
'https://myhost.com/bootconfig.yaml')
}
},
'files': {
'fileA.yaml': 'Contents of the file',
'file:///usr/fileB.yaml': 'Contents of the file'
},
'parameters': {
'flavor': '4 GB Performance',
},
'timeout_mins': 30,
}
}
}
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
error = self.assertRaises(
exception.StackValidationFailed, asg.validate)
self.assertIn(
'Must provide one of server or stack in launchConfiguration',
six.text_type(error))
def test_validate_no_launch_server_or_stack(self, mock_client,
mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_server",
"args": {}
}
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
error = self.assertRaises(
exception.StackValidationFailed, asg.validate)
self.assertIn(
'Must provide one of server or stack in launchConfiguration',
six.text_type(error))
def test_validate_stack_template_and_template_url(self, mock_client,
mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "<NAME>",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_server",
"args": {
"stack": {
'template': (
'''heat_template_version: 2015-10-15
description: This is a Heat template
parameters:
image:
default: cirros-0.3.4-x86_64-uec
type: string
flavor:
default: m1.tiny
type: string
resources:
rand:
type: OS::Heat::RandomString
'''),
'template_url': 'https://myhost.com/template.yaml',
}
}
}
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
error = self.assertRaises(
exception.StackValidationFailed, asg.validate)
self.assertIn(
'Must provide one of template or template_url',
six.text_type(error))
def test_validate_stack_no_template_or_template_url(self, mock_client,
mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_server",
"args": {
"stack": {
'disable_rollback': False,
'environment': {
'parameters': {
'image':
'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
},
'resource_registry': {
'Heat::InstallConfigAgent': (
'https://myhost.com/bootconfig.yaml')
}
},
'files': {
'fileA.yaml': 'Contents of the file',
'file:///usr/fileB.yaml': 'Contents of the file'
},
'parameters': {
'flavor': '4 GB Performance',
},
'timeout_mins': 30,
}
}
}
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
error = self.assertRaises(
exception.StackValidationFailed, asg.validate)
self.assertIn(
'Must provide one of template or template_url',
six.text_type(error))
def test_validate_invalid_template(self, mock_client, mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_stack",
"args": {
"stack": {
'template': (
'''SJDADKJAJKLSheat_template_version: 2015-10-15
description: This is a Heat template
parameters:
image:
default: cirros-0.3.4-x86_64-uec
type: string
flavor:
default: m1.tiny
type: string
resources:
rand:
type: OS::Heat::RandomString
'''),
'template_url': None,
'disable_rollback': False,
'environment': {'Foo': 'Bar'},
'files': {
'fileA.yaml': 'Contents of the file',
'file:///usr/fileB.yaml': 'Contents of the file'
},
'parameters': {
'flavor': '4 GB Performance',
},
'timeout_mins': 30,
}
}
}
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
error = self.assertRaises(
exception.StackValidationFailed, asg.validate)
self.assertIn(
'Encountered error while loading template:',
six.text_type(error))
| #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import itertools
import mock
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import glance
from heat.engine.clients.os import nova
from heat.engine import resource
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
from ..resources import auto_scale # noqa
class FakeScalingGroup(object):
"""A fake implementation of pyrax's ScalingGroup object."""
def __init__(self, id, **kwargs):
self.id = id
self.kwargs = kwargs
class FakeScalePolicy(object):
"""A fake implementation of pyrax's AutoScalePolicy object."""
def __init__(self, id, **kwargs):
self.id = id
self.kwargs = kwargs
class FakeWebHook(object):
"""A fake implementation of pyrax's AutoScaleWebhook object."""
def __init__(self, id, **kwargs):
self.id = id
self.kwargs = kwargs
self.links = [
{'rel': 'self', 'href': 'self-url'},
{'rel': 'capability', 'href': 'capability-url'}]
class FakeAutoScale(object):
"""A fake implementation of pyrax's autoscale client."""
def __init__(self):
self.groups = {}
self.policies = {}
self.webhooks = {}
self.group_counter = itertools.count()
self.policy_counter = itertools.count()
self.webhook_counter = itertools.count()
def create(self, **kwargs):
"""Create a scaling group."""
new_id = str(next(self.group_counter))
fsg = FakeScalingGroup(new_id, **kwargs)
self.groups[new_id] = fsg
return fsg
def _check_args(self, kwargs, allowed):
for parameter in kwargs:
if parameter not in allowed:
raise TypeError("unexpected argument %r" % (parameter,))
def _get_group(self, id):
if id not in self.groups:
raise auto_scale.NotFound("Group %s not found!" % (id,))
return self.groups[id]
def _get_policy(self, id):
if id not in self.policies:
raise auto_scale.NotFound("Policy %s not found!" % (id,))
return self.policies[id]
def _get_webhook(self, webhook_id):
if webhook_id not in self.webhooks:
raise auto_scale.NotFound(
"Webhook %s doesn't exist!" % (webhook_id,))
return self.webhooks[webhook_id]
def replace(self, group_id, **kwargs):
"""Update the groupConfiguration section of a scaling group."""
allowed = ['name', 'cooldown',
'min_entities', 'max_entities', 'metadata']
self._check_args(kwargs, allowed)
self._get_group(group_id).kwargs = kwargs
def replace_launch_config(self, group_id, **kwargs):
"""Update the launch configuration on a scaling group."""
if kwargs.get('launch_config_type') == 'launch_server':
allowed = ['launch_config_type', 'server_name', 'image', 'flavor',
'disk_config', 'metadata', 'personality', 'networks',
'load_balancers', 'key_name', 'user_data',
'config_drive']
elif kwargs.get('launch_config_type') == 'launch_stack':
allowed = ['launch_config_type', 'template', 'template_url',
'disable_rollback', 'environment', 'files',
'parameters', 'timeout_mins']
self._check_args(kwargs, allowed)
self._get_group(group_id).kwargs = kwargs
def delete(self, group_id):
"""Delete the group, if the min entities and max entities are 0."""
group = self._get_group(group_id)
if (group.kwargs['min_entities'] > 0
or group.kwargs['max_entities'] > 0):
raise Exception("Can't delete yet!")
del self.groups[group_id]
def add_policy(self, **kwargs):
"""Create and store a FakeScalePolicy."""
allowed = [
'scaling_group', 'name', 'policy_type', 'cooldown', 'change',
'is_percent', 'desired_capacity', 'args']
self._check_args(kwargs, allowed)
policy_id = str(next(self.policy_counter))
policy = FakeScalePolicy(policy_id, **kwargs)
self.policies[policy_id] = policy
return policy
def replace_policy(self, scaling_group, policy, **kwargs):
allowed = [
'name', 'policy_type', 'cooldown',
'change', 'is_percent', 'desired_capacity', 'args']
self._check_args(kwargs, allowed)
policy = self._get_policy(policy)
assert policy.kwargs['scaling_group'] == scaling_group
kwargs['scaling_group'] = scaling_group
policy.kwargs = kwargs
def add_webhook(self, **kwargs):
"""Create and store a FakeWebHook."""
allowed = ['scaling_group', 'policy', 'name', 'metadata']
self._check_args(kwargs, allowed)
webhook_id = str(next(self.webhook_counter))
webhook = FakeWebHook(webhook_id, **kwargs)
self.webhooks[webhook_id] = webhook
return webhook
def delete_policy(self, scaling_group, policy):
"""Delete a policy, if it exists."""
if policy not in self.policies:
raise auto_scale.NotFound("Policy %s doesn't exist!" % (policy,))
assert self.policies[policy].kwargs['scaling_group'] == scaling_group
del self.policies[policy]
def delete_webhook(self, scaling_group, policy, webhook_id):
"""Delete a webhook, if it exists."""
webhook = self._get_webhook(webhook_id)
assert webhook.kwargs['scaling_group'] == scaling_group
assert webhook.kwargs['policy'] == policy
del self.webhooks[webhook_id]
def replace_webhook(self, scaling_group, policy, webhook,
name=None, metadata=None):
webhook = self._get_webhook(webhook)
assert webhook.kwargs['scaling_group'] == scaling_group
assert webhook.kwargs['policy'] == policy
webhook.kwargs['name'] = name
webhook.kwargs['metadata'] = metadata
class ScalingGroupTest(common.HeatTestCase):
server_template = template_format.parse('''
HeatTemplateFormatVersion: "2012-12-12"
Description: "Rackspace Auto Scale"
Parameters: {}
Resources:
my_group:
Type: Rackspace::AutoScale::Group
Properties:
groupConfiguration:
name: "My Group"
cooldown: 60
minEntities: 1
maxEntities: 25
metadata:
group: metadata
launchConfiguration:
type: "launch_server"
args:
server:
name: autoscaled-server
flavorRef: flavor-ref
imageRef: image-ref
key_name: my-key
metadata:
server: metadata
personality:
/tmp/testfile: "dGVzdCBjb250ZW50"
networks:
- uuid: "00000000-0000-0000-0000-000000000000"
- uuid: "11111111-1111-1111-1111-111111111111"
loadBalancers:
- loadBalancerId: 234
port: 80
''')
stack_template = template_format.parse('''
HeatTemplateFormatVersion: "2012-12-12"
Description: "Rackspace Auto Scale"
Parameters: {}
Resources:
my_group:
Type: Rackspace::AutoScale::Group
Properties:
groupConfiguration:
name: "My Group"
cooldown: 60
minEntities: 1
maxEntities: 25
metadata:
group: metadata
launchConfiguration:
type: launch_stack
args:
stack:
template: |
heat_template_version: 2015-10-15
description: This is a Heat template
parameters:
image:
default: cirros-0.3.4-x86_64-uec
type: string
flavor:
default: m1.tiny
type: string
resources:
rand:
type: OS::Heat::RandomString
disable_rollback: False
environment:
parameters:
image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
resource_registry:
Heat::InstallConfigAgent:
https://myhost.com/bootconfig.yaml
files:
fileA.yaml: Contents of the file
file:///usr/fileB.template: Contents of the file
parameters:
flavor: 4 GB Performance
timeout_mins: 30
''')
def setUp(self):
super(ScalingGroupTest, self).setUp()
for res_name, res_class in auto_scale.resource_mapping().items():
resource._register_class(res_name, res_class)
self.fake_auto_scale = FakeAutoScale()
self.patchobject(auto_scale.Group, 'auto_scale',
return_value=self.fake_auto_scale)
# mock nova and glance client methods to satisfy contraints
mock_im = self.patchobject(glance.GlanceClientPlugin,
'find_image_by_name_or_id')
mock_im.return_value = 'image-ref'
mock_fl = self.patchobject(nova.NovaClientPlugin,
'find_flavor_by_name_or_id')
mock_fl.return_value = 'flavor-ref'
def _setup_test_stack(self, template=None):
if template is None:
template = self.server_template
self.stack = utils.parse_stack(template)
self.stack.create()
self.assertEqual(
('CREATE', 'COMPLETE'), self.stack.state,
self.stack.status_reason)
def test_group_create_server(self):
"""Creating a group passes all the correct arguments to pyrax.
Also saves the group ID as the resource ID.
"""
self._setup_test_stack()
self.assertEqual(1, len(self.fake_auto_scale.groups))
self.assertEqual(
{
'cooldown': 60,
'config_drive': False,
'user_data': None,
'disk_config': None,
'flavor': 'flavor-ref',
'image': 'image-ref',
'load_balancers': [{
'loadBalancerId': 234,
'port': 80,
}],
'key_name': "my-key",
'launch_config_type': u'launch_server',
'max_entities': 25,
'group_metadata': {'group': 'metadata'},
'metadata': {'server': 'metadata'},
'min_entities': 1,
'name': '<NAME>',
'networks': [{'uuid': '00000000-0000-0000-0000-000000000000'},
{'uuid': '11111111-1111-1111-1111-111111111111'}],
'personality': [{
'path': u'/tmp/testfile',
'contents': u'dGVzdCBjb250ZW50'}],
'server_name': u'autoscaled-server'},
self.fake_auto_scale.groups['0'].kwargs)
resource = self.stack['my_group']
self.assertEqual('0', resource.FnGetRefId())
def test_group_create_stack(self):
"""Creating a group passes all the correct arguments to pyrax.
Also saves the group ID as the resource ID.
"""
self._setup_test_stack(self.stack_template)
self.assertEqual(1, len(self.fake_auto_scale.groups))
self.assertEqual(
{
'cooldown': 60,
'min_entities': 1,
'max_entities': 25,
'group_metadata': {'group': 'metadata'},
'name': 'My Group',
'launch_config_type': u'launch_stack',
'template': (
'''heat_template_version: 2015-10-15
description: This is a Heat template
parameters:
image:
default: cirros-0.3.4-x86_64-uec
type: string
flavor:
default: m1.tiny
type: string
resources:
rand:
type: OS::Heat::RandomString
'''),
'template_url': None,
'disable_rollback': False,
'environment': {
'parameters': {
'image':
'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
},
'resource_registry': {
'Heat::InstallConfigAgent': ('https://myhost.com/'
'bootconfig.yaml')
}
},
'files': {
'fileA.yaml': 'Contents of the file',
'file:///usr/fileB.template': 'Contents of the file'
},
'parameters': {
'flavor': '4 GB Performance',
},
'timeout_mins': 30,
},
self.fake_auto_scale.groups['0'].kwargs
)
resource = self.stack['my_group']
self.assertEqual('0', resource.FnGetRefId())
def test_group_create_no_personality(self):
template = template_format.parse('''
HeatTemplateFormatVersion: "2012-12-12"
Description: "Rackspace Auto Scale"
Parameters: {}
Resources:
my_group:
Type: Rackspace::AutoScale::Group
Properties:
groupConfiguration:
name: "My Group"
cooldown: 60
minEntities: 1
maxEntities: 25
metadata:
group: metadata
launchConfiguration:
type: "launch_server"
args:
server:
name: autoscaled-server
flavorRef: flavor-ref
imageRef: image-ref
key_name: my-key
metadata:
server: metadata
networks:
- uuid: "00000000-0000-0000-0000-000000000000"
- uuid: "11111111-1111-1111-1111-111111111111"
''')
self.stack = utils.parse_stack(template)
self.stack.create()
self.assertEqual(
('CREATE', 'COMPLETE'), self.stack.state,
self.stack.status_reason)
self.assertEqual(1, len(self.fake_auto_scale.groups))
self.assertEqual(
{
'cooldown': 60,
'config_drive': False,
'user_data': None,
'disk_config': None,
'flavor': 'flavor-ref',
'image': 'image-ref',
'launch_config_type': 'launch_server',
'load_balancers': [],
'key_name': "my-key",
'max_entities': 25,
'group_metadata': {'group': 'metadata'},
'metadata': {'server': 'metadata'},
'min_entities': 1,
'name': '<NAME>',
'networks': [{'uuid': '00000000-0000-0000-0000-000000000000'},
{'uuid': '11111111-1111-1111-1111-111111111111'}],
'personality': None,
'server_name': u'autoscaled-server'},
self.fake_auto_scale.groups['0'].kwargs)
resource = self.stack['my_group']
self.assertEqual('0', resource.FnGetRefId())
def test_check(self):
self._setup_test_stack()
resource = self.stack['my_group']
mock_get = mock.Mock()
resource.auto_scale().get = mock_get
scheduler.TaskRunner(resource.check)()
self.assertEqual('CHECK', resource.action)
self.assertEqual('COMPLETE', resource.status)
mock_get.side_effect = auto_scale.NotFound('boom')
exc = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(resource.check))
self.assertEqual('CHECK', resource.action)
self.assertEqual('FAILED', resource.status)
self.assertIn('boom', str(exc))
def test_update_group_config(self):
"""Updates the groupConfiguration section.
Updates the groupConfiguration section in a template results in a
pyrax call to update the group configuration.
"""
self._setup_test_stack()
resource = self.stack['my_group']
uprops = copy.deepcopy(dict(resource.properties.data))
uprops['groupConfiguration']['minEntities'] = 5
new_template = rsrc_defn.ResourceDefinition(resource.name,
resource.type(),
uprops)
scheduler.TaskRunner(resource.update, new_template)()
self.assertEqual(1, len(self.fake_auto_scale.groups))
self.assertEqual(
5, self.fake_auto_scale.groups['0'].kwargs['min_entities'])
def test_update_launch_config_server(self):
"""Updates the launchConfigresults section.
Updates the launchConfigresults section in a template results in a
pyrax call to update the launch configuration.
"""
self._setup_test_stack()
resource = self.stack['my_group']
uprops = copy.deepcopy(dict(resource.properties.data))
lcargs = uprops['launchConfiguration']['args']
lcargs['loadBalancers'] = [{'loadBalancerId': '1', 'port': 80}]
new_template = rsrc_defn.ResourceDefinition(resource.name,
resource.type(),
uprops)
scheduler.TaskRunner(resource.update, new_template)()
self.assertEqual(1, len(self.fake_auto_scale.groups))
self.assertEqual(
[{'loadBalancerId': 1, 'port': 80}],
self.fake_auto_scale.groups['0'].kwargs['load_balancers'])
def test_update_launch_config_stack(self):
self._setup_test_stack(self.stack_template)
resource = self.stack['my_group']
uprops = copy.deepcopy(dict(resource.properties.data))
lcargs = uprops['launchConfiguration']['args']
lcargs['stack']['timeout_mins'] = 60
new_template = rsrc_defn.ResourceDefinition(resource.name,
resource.type(),
uprops)
scheduler.TaskRunner(resource.update, new_template)()
self.assertEqual(1, len(self.fake_auto_scale.groups))
self.assertEqual(
60,
self.fake_auto_scale.groups['0'].kwargs['timeout_mins'])
def test_delete(self):
"""Deleting a ScalingGroup resource invokes pyrax API to delete it."""
self._setup_test_stack()
resource = self.stack['my_group']
scheduler.TaskRunner(resource.delete)()
self.assertEqual({}, self.fake_auto_scale.groups)
def test_delete_without_backing_group(self):
"""Resource deletion succeeds, if no backing scaling group exists."""
self._setup_test_stack()
resource = self.stack['my_group']
del self.fake_auto_scale.groups['0']
scheduler.TaskRunner(resource.delete)()
self.assertEqual({}, self.fake_auto_scale.groups)
def test_delete_waits_for_server_deletion(self):
"""Test case for waiting for successful resource deletion.
The delete operation may fail until the servers are really gone; the
resource retries until success.
"""
self._setup_test_stack()
delete_counter = itertools.count()
def delete(group_id):
count = next(delete_counter)
if count < 3:
raise auto_scale.Forbidden("Not empty!")
self.patchobject(self.fake_auto_scale, 'delete', side_effect=delete)
resource = self.stack['my_group']
scheduler.TaskRunner(resource.delete)()
# It really called delete until it succeeded:
self.assertEqual(4, next(delete_counter))
def test_delete_blows_up_on_other_errors(self):
"""Test case for correct error handling during deletion.
Only the Forbidden (403) error is honored as an indicator of pending
deletion; other errors cause deletion to fail.
"""
self._setup_test_stack()
def delete(group_id):
1 / 0
self.patchobject(self.fake_auto_scale, 'delete', side_effect=delete)
resource = self.stack['my_group']
err = self.assertRaises(
exception.ResourceFailure, scheduler.TaskRunner(resource.delete))
self.assertIsInstance(err.exc, ZeroDivisionError)
class PolicyTest(common.HeatTestCase):
policy_template = template_format.parse('''
HeatTemplateFormatVersion: "2012-12-12"
Description: "Rackspace Auto Scale"
Parameters: {}
Resources:
my_policy:
Type: Rackspace::AutoScale::ScalingPolicy
Properties:
group: "my-group-id"
name: "+10 on webhook"
change: 10
cooldown: 0
type: "webhook"
''')
def setUp(self):
super(PolicyTest, self).setUp()
for res_name, res_class in auto_scale.resource_mapping().items():
resource._register_class(res_name, res_class)
self.fake_auto_scale = FakeAutoScale()
self.patchobject(auto_scale.ScalingPolicy, 'auto_scale',
return_value=self.fake_auto_scale)
def _setup_test_stack(self, template):
self.stack = utils.parse_stack(template)
self.stack.create()
self.assertEqual(
('CREATE', 'COMPLETE'), self.stack.state,
self.stack.status_reason)
def test_create_webhook_change(self):
"""Creating the resource creates the scaling policy with pyrax.
Also sets the resource's ID to {group_id}:{policy_id}.
"""
self._setup_test_stack(self.policy_template)
resource = self.stack['my_policy']
self.assertEqual('my-group-id:0', resource.FnGetRefId())
self.assertEqual(
{
'name': '+10 on webhook',
'scaling_group': 'my-group-id',
'change': 10,
'cooldown': 0,
'policy_type': 'webhook'},
self.fake_auto_scale.policies['0'].kwargs)
def test_webhook_change_percent(self):
"""Test case for specified changePercent.
When changePercent is specified, it translates to pyrax arguments
'change' and 'is_percent'.
"""
template = copy.deepcopy(self.policy_template)
template['Resources']['my_policy']['Properties']['changePercent'] = 10
del template['Resources']['my_policy']['Properties']['change']
self._setup_test_stack(template)
self.assertEqual(
{
'name': '+10 on webhook',
'scaling_group': 'my-group-id',
'change': 10,
'is_percent': True,
'cooldown': 0,
'policy_type': 'webhook'},
self.fake_auto_scale.policies['0'].kwargs)
def test_webhook_desired_capacity(self):
"""Test case for desiredCapacity property.
The desiredCapacity property translates to the desired_capacity pyrax
argument.
"""
template = copy.deepcopy(self.policy_template)
template['Resources']['my_policy']['Properties']['desiredCapacity'] = 1
del template['Resources']['my_policy']['Properties']['change']
self._setup_test_stack(template)
self.assertEqual(
{
'name': '+10 on webhook',
'scaling_group': 'my-group-id',
'desired_capacity': 1,
'cooldown': 0,
'policy_type': 'webhook'},
self.fake_auto_scale.policies['0'].kwargs)
def test_schedule(self):
"""We can specify schedule-type policies with args."""
template = copy.deepcopy(self.policy_template)
props = template['Resources']['my_policy']['Properties']
props['type'] = 'schedule'
props['args'] = {'cron': '0 0 0 * *'}
self._setup_test_stack(template)
self.assertEqual(
{
'name': '+10 on webhook',
'scaling_group': 'my-group-id',
'change': 10,
'cooldown': 0,
'policy_type': 'schedule',
'args': {'cron': '0 0 0 * *'}},
self.fake_auto_scale.policies['0'].kwargs)
def test_update(self):
"""Updating the resource calls appropriate update method with pyrax."""
self._setup_test_stack(self.policy_template)
resource = self.stack['my_policy']
uprops = copy.deepcopy(dict(resource.properties.data))
uprops['changePercent'] = 50
del uprops['change']
template = rsrc_defn.ResourceDefinition(resource.name,
resource.type(),
uprops)
scheduler.TaskRunner(resource.update, template)()
self.assertEqual(
{
'name': '+10 on webhook',
'scaling_group': 'my-group-id',
'change': 50,
'is_percent': True,
'cooldown': 0,
'policy_type': 'webhook'},
self.fake_auto_scale.policies['0'].kwargs)
def test_delete(self):
"""Deleting the resource deletes the policy with pyrax."""
self._setup_test_stack(self.policy_template)
resource = self.stack['my_policy']
scheduler.TaskRunner(resource.delete)()
self.assertEqual({}, self.fake_auto_scale.policies)
def test_delete_policy_non_existent(self):
"""Test case for deleting resource without backing policy.
Deleting a resource for which there is no backing policy succeeds
silently.
"""
self._setup_test_stack(self.policy_template)
resource = self.stack['my_policy']
del self.fake_auto_scale.policies['0']
scheduler.TaskRunner(resource.delete)()
self.assertEqual({}, self.fake_auto_scale.policies)
class WebHookTest(common.HeatTestCase):
webhook_template = template_format.parse('''
HeatTemplateFormatVersion: "2012-12-12"
Description: "Rackspace Auto Scale"
Parameters: {}
Resources:
my_webhook:
Type: Rackspace::AutoScale::WebHook
Properties:
policy: my-group-id:my-policy-id
name: "exec my policy"
metadata:
a: b
''')
def setUp(self):
super(WebHookTest, self).setUp()
for res_name, res_class in auto_scale.resource_mapping().items():
resource._register_class(res_name, res_class)
self.fake_auto_scale = FakeAutoScale()
self.patchobject(auto_scale.WebHook, 'auto_scale',
return_value=self.fake_auto_scale)
def _setup_test_stack(self, template):
self.stack = utils.parse_stack(template)
self.stack.create()
self.assertEqual(
('CREATE', 'COMPLETE'), self.stack.state,
self.stack.status_reason)
def test_create(self):
"""Creates a webhook with pyrax and makes attributes available."""
self._setup_test_stack(self.webhook_template)
resource = self.stack['my_webhook']
self.assertEqual(
{
'name': 'exec my policy',
'scaling_group': 'my-group-id',
'policy': 'my-policy-id',
'metadata': {'a': 'b'}},
self.fake_auto_scale.webhooks['0'].kwargs)
self.assertEqual("self-url", resource.FnGetAtt("executeUrl"))
self.assertEqual("capability-url", resource.FnGetAtt("capabilityUrl"))
def test_failed_create(self):
"""When a create fails, getting the attributes returns None."""
template = copy.deepcopy(self.webhook_template)
template['Resources']['my_webhook']['Properties']['policy'] = 'foobar'
self.stack = utils.parse_stack(template)
self.stack.create()
resource = self.stack['my_webhook']
self.assertIsNone(resource.FnGetAtt('capabilityUrl'))
def test_update(self):
self._setup_test_stack(self.webhook_template)
resource = self.stack['my_webhook']
uprops = copy.deepcopy(dict(resource.properties.data))
uprops['metadata']['a'] = 'different!'
uprops['name'] = 'newhook'
template = rsrc_defn.ResourceDefinition(resource.name,
resource.type(),
uprops)
scheduler.TaskRunner(resource.update, template)()
self.assertEqual(
{
'name': 'newhook',
'scaling_group': 'my-group-id',
'policy': 'my-policy-id',
'metadata': {'a': 'different!'}},
self.fake_auto_scale.webhooks['0'].kwargs)
def test_delete(self):
"""Deleting the resource deletes the webhook with pyrax."""
self._setup_test_stack(self.webhook_template)
resource = self.stack['my_webhook']
scheduler.TaskRunner(resource.delete)()
self.assertEqual({}, self.fake_auto_scale.webhooks)
def test_delete_without_backing_webhook(self):
"""Test case for deleting resource without backing webhook.
Deleting a resource for which there is no backing webhook succeeds
silently.
"""
self._setup_test_stack(self.webhook_template)
resource = self.stack['my_webhook']
del self.fake_auto_scale.webhooks['0']
scheduler.TaskRunner(resource.delete)()
self.assertEqual({}, self.fake_auto_scale.webhooks)
@mock.patch.object(resource.Resource, "client_plugin")
@mock.patch.object(resource.Resource, "client")
class AutoScaleGroupValidationTests(common.HeatTestCase):
def setUp(self):
super(AutoScaleGroupValidationTests, self).setUp()
self.mockstack = mock.Mock()
self.mockstack.has_cache_data.return_value = False
self.mockstack.db_resource_get.return_value = None
def test_validate_no_rcv3_pool(self, mock_client, mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_server",
"args": {
"loadBalancers": [{
"loadBalancerId": 'not integer!',
}],
"server": {
"name": "sdfsdf",
"flavorRef": "ffdgdf",
"imageRef": "image-ref",
},
},
},
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
mock_client().list_load_balancer_pools.return_value = []
error = self.assertRaises(
exception.StackValidationFailed, asg.validate)
self.assertEqual(
'Could not find RackConnectV3 pool with id not integer!: ',
six.text_type(error))
def test_validate_rcv3_pool_found(self, mock_client, mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_server",
"args": {
"loadBalancers": [{
"loadBalancerId": 'pool_exists',
}],
"server": {
"name": "sdfsdf",
"flavorRef": "ffdgdf",
"imageRef": "image-ref",
},
},
},
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
mock_client().list_load_balancer_pools.return_value = [
mock.Mock(id='pool_exists'),
]
self.assertIsNone(asg.validate())
def test_validate_no_lb_specified(self, mock_client, mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_server",
"args": {
"server": {
"name": "sdfsdf",
"flavorRef": "ffdgdf",
"imageRef": "image-ref",
},
},
},
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
self.assertIsNone(asg.validate())
def test_validate_launch_stack(self, mock_client, mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_stack",
"args": {
"stack": {
'template': (
'''heat_template_version: 2015-10-15
description: This is a Heat template
parameters:
image:
default: cirros-0.3.4-x86_64-uec
type: string
flavor:
default: m1.tiny
type: string
resources:
rand:
type: OS::Heat::RandomString
'''),
'template_url': None,
'disable_rollback': False,
'environment': {
'parameters': {
'image':
'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
},
'resource_registry': {
'Heat::InstallConfigAgent': (
'https://myhost.com/bootconfig.yaml')
}
},
'files': {
'fileA.yaml': 'Contents of the file',
'file:///usr/fileB.yaml': 'Contents of the file'
},
'parameters': {
'flavor': '4 GB Performance',
},
'timeout_mins': 30,
}
}
}
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
self.assertIsNone(asg.validate())
def test_validate_launch_server_and_stack(self, mock_client, mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_server",
"args": {
"server": {
"name": "sdfsdf",
"flavorRef": "ffdgdf",
"imageRef": "image-ref",
},
"stack": {
'template': (
'''heat_template_version: 2015-10-15
description: This is a Heat template
parameters:
image:
default: cirros-0.3.4-x86_64-uec
type: string
flavor:
default: m1.tiny
type: string
resources:
rand:
type: OS::Heat::RandomString
'''),
'template_url': None,
'disable_rollback': False,
'environment': {
'parameters': {
'image':
'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
},
'resource_registry': {
'Heat::InstallConfigAgent': (
'https://myhost.com/bootconfig.yaml')
}
},
'files': {
'fileA.yaml': 'Contents of the file',
'file:///usr/fileB.yaml': 'Contents of the file'
},
'parameters': {
'flavor': '4 GB Performance',
},
'timeout_mins': 30,
}
}
}
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
error = self.assertRaises(
exception.StackValidationFailed, asg.validate)
self.assertIn(
'Must provide one of server or stack in launchConfiguration',
six.text_type(error))
def test_validate_no_launch_server_or_stack(self, mock_client,
mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_server",
"args": {}
}
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
error = self.assertRaises(
exception.StackValidationFailed, asg.validate)
self.assertIn(
'Must provide one of server or stack in launchConfiguration',
six.text_type(error))
def test_validate_stack_template_and_template_url(self, mock_client,
mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "<NAME>",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_server",
"args": {
"stack": {
'template': (
'''heat_template_version: 2015-10-15
description: This is a Heat template
parameters:
image:
default: cirros-0.3.4-x86_64-uec
type: string
flavor:
default: m1.tiny
type: string
resources:
rand:
type: OS::Heat::RandomString
'''),
'template_url': 'https://myhost.com/template.yaml',
}
}
}
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
error = self.assertRaises(
exception.StackValidationFailed, asg.validate)
self.assertIn(
'Must provide one of template or template_url',
six.text_type(error))
def test_validate_stack_no_template_or_template_url(self, mock_client,
mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_server",
"args": {
"stack": {
'disable_rollback': False,
'environment': {
'parameters': {
'image':
'Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)',
},
'resource_registry': {
'Heat::InstallConfigAgent': (
'https://myhost.com/bootconfig.yaml')
}
},
'files': {
'fileA.yaml': 'Contents of the file',
'file:///usr/fileB.yaml': 'Contents of the file'
},
'parameters': {
'flavor': '4 GB Performance',
},
'timeout_mins': 30,
}
}
}
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
error = self.assertRaises(
exception.StackValidationFailed, asg.validate)
self.assertIn(
'Must provide one of template or template_url',
six.text_type(error))
def test_validate_invalid_template(self, mock_client, mock_plugin):
asg_properties = {
"groupConfiguration": {
"name": "My Group",
"cooldown": 60,
"minEntities": 1,
"maxEntities": 25,
"metadata": {
"group": "metadata",
},
},
"launchConfiguration": {
"type": "launch_stack",
"args": {
"stack": {
'template': (
'''SJDADKJAJKLSheat_template_version: 2015-10-15
description: This is a Heat template
parameters:
image:
default: cirros-0.3.4-x86_64-uec
type: string
flavor:
default: m1.tiny
type: string
resources:
rand:
type: OS::Heat::RandomString
'''),
'template_url': None,
'disable_rollback': False,
'environment': {'Foo': 'Bar'},
'files': {
'fileA.yaml': 'Contents of the file',
'file:///usr/fileB.yaml': 'Contents of the file'
},
'parameters': {
'flavor': '4 GB Performance',
},
'timeout_mins': 30,
}
}
}
}
rsrcdef = rsrc_defn.ResourceDefinition(
"test", auto_scale.Group, properties=asg_properties)
asg = auto_scale.Group("test", rsrcdef, self.mockstack)
error = self.assertRaises(
exception.StackValidationFailed, asg.validate)
self.assertIn(
'Encountered error while loading template:',
six.text_type(error))
| en | 0.634972 | # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # noqa A fake implementation of pyrax's ScalingGroup object. A fake implementation of pyrax's AutoScalePolicy object. A fake implementation of pyrax's AutoScaleWebhook object. A fake implementation of pyrax's autoscale client. Create a scaling group. Update the groupConfiguration section of a scaling group. Update the launch configuration on a scaling group. Delete the group, if the min entities and max entities are 0. Create and store a FakeScalePolicy. Create and store a FakeWebHook. Delete a policy, if it exists. Delete a webhook, if it exists. HeatTemplateFormatVersion: "2012-12-12" Description: "Rackspace Auto Scale" Parameters: {} Resources: my_group: Type: Rackspace::AutoScale::Group Properties: groupConfiguration: name: "My Group" cooldown: 60 minEntities: 1 maxEntities: 25 metadata: group: metadata launchConfiguration: type: "launch_server" args: server: name: autoscaled-server flavorRef: flavor-ref imageRef: image-ref key_name: my-key metadata: server: metadata personality: /tmp/testfile: "dGVzdCBjb250ZW50" networks: - uuid: "00000000-0000-0000-0000-000000000000" - uuid: "11111111-1111-1111-1111-111111111111" loadBalancers: - loadBalancerId: 234 port: 80 HeatTemplateFormatVersion: "2012-12-12" Description: "Rackspace Auto Scale" Parameters: {} Resources: my_group: Type: Rackspace::AutoScale::Group Properties: groupConfiguration: name: "My Group" cooldown: 60 minEntities: 1 maxEntities: 25 metadata: group: metadata launchConfiguration: type: launch_stack args: stack: template: | heat_template_version: 2015-10-15 description: This is a Heat template parameters: image: default: cirros-0.3.4-x86_64-uec type: string flavor: default: m1.tiny type: string resources: rand: type: OS::Heat::RandomString disable_rollback: False environment: parameters: image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM) resource_registry: Heat::InstallConfigAgent: https://myhost.com/bootconfig.yaml files: fileA.yaml: Contents of the file file:///usr/fileB.template: Contents of the file parameters: flavor: 4 GB Performance timeout_mins: 30 # mock nova and glance client methods to satisfy contraints Creating a group passes all the correct arguments to pyrax. Also saves the group ID as the resource ID. Creating a group passes all the correct arguments to pyrax. Also saves the group ID as the resource ID. heat_template_version: 2015-10-15 description: This is a Heat template parameters: image: default: cirros-0.3.4-x86_64-uec type: string flavor: default: m1.tiny type: string resources: rand: type: OS::Heat::RandomString HeatTemplateFormatVersion: "2012-12-12" Description: "Rackspace Auto Scale" Parameters: {} Resources: my_group: Type: Rackspace::AutoScale::Group Properties: groupConfiguration: name: "My Group" cooldown: 60 minEntities: 1 maxEntities: 25 metadata: group: metadata launchConfiguration: type: "launch_server" args: server: name: autoscaled-server flavorRef: flavor-ref imageRef: image-ref key_name: my-key metadata: server: metadata networks: - uuid: "00000000-0000-0000-0000-000000000000" - uuid: "11111111-1111-1111-1111-111111111111" Updates the groupConfiguration section. Updates the groupConfiguration section in a template results in a pyrax call to update the group configuration. Updates the launchConfigresults section. Updates the launchConfigresults section in a template results in a pyrax call to update the launch configuration. Deleting a ScalingGroup resource invokes pyrax API to delete it. Resource deletion succeeds, if no backing scaling group exists. Test case for waiting for successful resource deletion. The delete operation may fail until the servers are really gone; the resource retries until success. # It really called delete until it succeeded: Test case for correct error handling during deletion. Only the Forbidden (403) error is honored as an indicator of pending deletion; other errors cause deletion to fail. HeatTemplateFormatVersion: "2012-12-12" Description: "Rackspace Auto Scale" Parameters: {} Resources: my_policy: Type: Rackspace::AutoScale::ScalingPolicy Properties: group: "my-group-id" name: "+10 on webhook" change: 10 cooldown: 0 type: "webhook" Creating the resource creates the scaling policy with pyrax. Also sets the resource's ID to {group_id}:{policy_id}. Test case for specified changePercent. When changePercent is specified, it translates to pyrax arguments 'change' and 'is_percent'. Test case for desiredCapacity property. The desiredCapacity property translates to the desired_capacity pyrax argument. We can specify schedule-type policies with args. Updating the resource calls appropriate update method with pyrax. Deleting the resource deletes the policy with pyrax. Test case for deleting resource without backing policy. Deleting a resource for which there is no backing policy succeeds silently. HeatTemplateFormatVersion: "2012-12-12" Description: "Rackspace Auto Scale" Parameters: {} Resources: my_webhook: Type: Rackspace::AutoScale::WebHook Properties: policy: my-group-id:my-policy-id name: "exec my policy" metadata: a: b Creates a webhook with pyrax and makes attributes available. When a create fails, getting the attributes returns None. Deleting the resource deletes the webhook with pyrax. Test case for deleting resource without backing webhook. Deleting a resource for which there is no backing webhook succeeds silently. heat_template_version: 2015-10-15 description: This is a Heat template parameters: image: default: cirros-0.3.4-x86_64-uec type: string flavor: default: m1.tiny type: string resources: rand: type: OS::Heat::RandomString heat_template_version: 2015-10-15 description: This is a Heat template parameters: image: default: cirros-0.3.4-x86_64-uec type: string flavor: default: m1.tiny type: string resources: rand: type: OS::Heat::RandomString heat_template_version: 2015-10-15 description: This is a Heat template parameters: image: default: cirros-0.3.4-x86_64-uec type: string flavor: default: m1.tiny type: string resources: rand: type: OS::Heat::RandomString SJDADKJAJKLSheat_template_version: 2015-10-15 description: This is a Heat template parameters: image: default: cirros-0.3.4-x86_64-uec type: string flavor: default: m1.tiny type: string resources: rand: type: OS::Heat::RandomString | 1.962997 | 2 |
l7/z3.py | iCarrrot/Python | 0 | 6624699 | <reponame>iCarrrot/Python<gh_stars>0
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GObject
class MyWindow(Gtk.Window):
def startTimer(self, widget):
t1 = self.time.get_text()
self.timer.start_odliczania(int(t1) - 1)
def startTimer1(self, widget):
t1 = 5 * 50
self.timer.start_odliczania(int(t1) - 1)
def startTimer2(self, widget):
t1 = 8 * 60
self.timer.start_odliczania(int(t1) - 1)
def startTimer3(self, widget):
t1 = 2 * 60
self.timer.start_odliczania(int(t1) - 1)
def startTimer4(self, widget):
t1 = 30
self.timer.start_odliczania(int(t1) - 1)
def __init__(self):
Gtk.Window.__init__(self, title="Hello World")
self.box2 = Gtk.Box(spacing=6, orientation=Gtk.Orientation.VERTICAL)
self.add(self.box2)
self.box = Gtk.Box(spacing=6)
self.box2.pack_start(self.box, True, True, 0)
self.box3 = Gtk.Box(spacing=6)
self.box2.pack_start(self.box3, True, True, 0)
self.button1 = Gtk.Button(label="Jajka (5')")
self.button1.connect("clicked", self.startTimer1)
self.box.pack_start(self.button1, True, True, 0)
self.button2 = Gtk.Button(label="Makaron (8')")
self.button2.connect("clicked", self.startTimer2)
self.box.pack_start(self.button2, True, True, 0)
self.button3 = Gtk.Button(label="Stek (2')")
self.button3.connect("clicked", self.startTimer3)
self.box3.pack_start(self.button3, True, True, 0)
self.button4 = Gtk.Button(label="Tortilla (30'')")
self.button4.connect("clicked", self.startTimer4)
self.box3.pack_start(self.button4, True, True, 0)
self.time = Gtk.Entry()
# self.time.set_text("czas")
self.time.connect("activate", self.startTimer)
self.box2.pack_start(self.time, True, True, 0)
self.timer = Odliczanie()
self.box2.pack_start(self.timer, True, True, 0)
class Odliczanie(Gtk.Label):
def __init__(self):
Gtk.Label.__init__(self)
self.time = 0
def countdown(self):
if self.time > 0:
self.set_text(str(self.time))
self.time -= 1
return True
else:
self.time = 0
self.set_text(str(self.time))
return False
def start_odliczania(self, time):
self.time = time
self.id = GObject.timeout_add(1000, self.countdown)
win = MyWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
| import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GObject
class MyWindow(Gtk.Window):
def startTimer(self, widget):
t1 = self.time.get_text()
self.timer.start_odliczania(int(t1) - 1)
def startTimer1(self, widget):
t1 = 5 * 50
self.timer.start_odliczania(int(t1) - 1)
def startTimer2(self, widget):
t1 = 8 * 60
self.timer.start_odliczania(int(t1) - 1)
def startTimer3(self, widget):
t1 = 2 * 60
self.timer.start_odliczania(int(t1) - 1)
def startTimer4(self, widget):
t1 = 30
self.timer.start_odliczania(int(t1) - 1)
def __init__(self):
Gtk.Window.__init__(self, title="Hello World")
self.box2 = Gtk.Box(spacing=6, orientation=Gtk.Orientation.VERTICAL)
self.add(self.box2)
self.box = Gtk.Box(spacing=6)
self.box2.pack_start(self.box, True, True, 0)
self.box3 = Gtk.Box(spacing=6)
self.box2.pack_start(self.box3, True, True, 0)
self.button1 = Gtk.Button(label="Jajka (5')")
self.button1.connect("clicked", self.startTimer1)
self.box.pack_start(self.button1, True, True, 0)
self.button2 = Gtk.Button(label="Makaron (8')")
self.button2.connect("clicked", self.startTimer2)
self.box.pack_start(self.button2, True, True, 0)
self.button3 = Gtk.Button(label="Stek (2')")
self.button3.connect("clicked", self.startTimer3)
self.box3.pack_start(self.button3, True, True, 0)
self.button4 = Gtk.Button(label="Tortilla (30'')")
self.button4.connect("clicked", self.startTimer4)
self.box3.pack_start(self.button4, True, True, 0)
self.time = Gtk.Entry()
# self.time.set_text("czas")
self.time.connect("activate", self.startTimer)
self.box2.pack_start(self.time, True, True, 0)
self.timer = Odliczanie()
self.box2.pack_start(self.timer, True, True, 0)
class Odliczanie(Gtk.Label):
def __init__(self):
Gtk.Label.__init__(self)
self.time = 0
def countdown(self):
if self.time > 0:
self.set_text(str(self.time))
self.time -= 1
return True
else:
self.time = 0
self.set_text(str(self.time))
return False
def start_odliczania(self, time):
self.time = time
self.id = GObject.timeout_add(1000, self.countdown)
win = MyWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main() | ja | 0.180888 | # self.time.set_text("czas") | 2.630747 | 3 |
notebooks/stimulus_presentation/ssvep.py | synicalsyntax/eeg-notebooks | 7 | 6624700 | """
Generate Steady-State Visually Evoked Potential (SSVEP)
=======================================================
Steady-State Visually Evoked Potential (SSVEP) stimulus presentation.
"""
from time import time
from optparse import OptionParser
import numpy as np
from pandas import DataFrame
from psychopy import visual, core, event
from pylsl import StreamInfo, StreamOutlet
def present(duration=120):
# Create markers stream outlet
info = StreamInfo('Markers', 'Markers', 1, 0, 'int32', 'myuidw43536')
outlet = StreamOutlet(info)
markernames = [1, 2]
start = time()
# Set up trial parameters
n_trials = 2010
iti = 0.5
soa = 3.0
jitter = 0.2
record_duration = np.float32(duration)
# Set up trial list
stim_freq = np.random.binomial(1, 0.5, n_trials)
trials = DataFrame(dict(stim_freq=stim_freq, timestamp=np.zeros(n_trials)))
# Set up graphics
mywin = visual.Window([1600, 900], monitor='testMonitor', units='deg',
fullscr=True, winType='pygame')
grating = visual.GratingStim(win=mywin, mask='circle', size=80, sf=0.2)
grating_neg = visual.GratingStim(win=mywin, mask='circle', size=80, sf=0.2,
phase=0.5)
fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0,
color=[1, 0, 0], autoDraw=True)
def get_possible_ssvep_freqs(frame_rate, stim_type='single'):
"""Get possible SSVEP stimulation frequencies.
Utility function that returns the possible SSVEP stimulation
frequencies and on/off pattern based on screen refresh rate.
Args:
frame_rate (float): screen frame rate, in Hz
Keyword Args:
stim_type (str): type of stimulation
'single'-> single graphic stimulus (the displayed object
appears and disappears in the background.)
'reversal' -> pattern reversal stimulus (the displayed object
appears and is replaced by its opposite.)
Returns:
(dict): keys are stimulation frequencies (in Hz), and values are
lists of tuples, where each tuple is the number of (on, off)
periods of one stimulation cycle
For more info on stimulation patterns, see Section 2 of:
<NAME>, <NAME>, <NAME>, and <NAME>,
"A Survey of Stimulation Methods Used in SSVEP-Based BCIs,"
Computational Intelligence and Neuroscience, vol. 2010, 12 pages,
2010.
"""
max_period_nb = int(frame_rate / 6)
periods = np.arange(max_period_nb) + 1
if stim_type == 'single':
freqs = dict()
for p1 in periods:
for p2 in periods:
f = frame_rate / (p1 + p2)
try:
freqs[f].append((p1, p2))
except:
freqs[f] = [(p1, p2)]
elif stim_type == 'reversal':
freqs = {frame_rate / p: [(p, p)] for p in periods[::-1]}
return freqs
def init_flicker_stim(frame_rate, cycle, soa):
"""Initialize flickering stimulus.
Get parameters for a flickering stimulus, based on the screen refresh
rate and the desired stimulation cycle.
Args:
frame_rate (float): screen frame rate, in Hz
cycle (tuple or int): if tuple (on, off), represents the number of
'on' periods and 'off' periods in one flickering cycle. This
supposes a "single graphic" stimulus, where the displayed object
appears and disappears in the background.
If int, represents the number of total periods in one cycle.
This supposes a "pattern reversal" stimulus, where the
displayed object appears and is replaced by its opposite.
soa (float): stimulus duration, in s
Returns:
(dict): dictionary with keys
'cycle' -> tuple of (on, off) periods in a cycle
'freq' -> stimulus frequency
'n_cycles' -> number of cycles in one stimulus trial
"""
if isinstance(cycle, tuple):
stim_freq = frame_rate / sum(cycle)
n_cycles = int(soa * stim_freq)
else:
stim_freq = frame_rate / cycle
cycle = (cycle, cycle)
n_cycles = int(soa * stim_freq) / 2
return {'cycle': cycle,
'freq': stim_freq,
'n_cycles': n_cycles}
# Set up stimuli
frame_rate = np.round(mywin.getActualFrameRate()) # Frame rate, in Hz
freqs = get_possible_ssvep_freqs(frame_rate, stim_type='reversal')
# print(freqs)
stim_patterns = [init_flicker_stim(frame_rate, 2, soa),
init_flicker_stim(frame_rate, 3, soa)]
print(('Flickering frequencies (Hz): {}\n'.format(
[stim_patterns[0]['freq'], stim_patterns[1]['freq']])))
for ii, trial in trials.iterrows():
# Intertrial interval
core.wait(iti + np.random.rand() * jitter)
# Select stimulus frequency
ind = trials['stim_freq'].iloc[ii]
# Send start marker
timestamp = time()
outlet.push_sample([markernames[ind]], timestamp)
# Present flickering stimulus
for _ in range(int(stim_patterns[ind]['n_cycles'])):
grating.setAutoDraw(True)
for _ in range(int(stim_patterns[ind]['cycle'][0])):
mywin.flip()
grating.setAutoDraw(False)
grating_neg.setAutoDraw(True)
for _ in range(stim_patterns[ind]['cycle'][1]):
mywin.flip()
grating_neg.setAutoDraw(False)
# offset
mywin.flip()
if len(event.getKeys()) > 0 or (time() - start) > record_duration:
break
event.clearEvents()
# Cleanup
mywin.close()
def main():
parser = OptionParser()
parser.add_option("-d", "--duration",
dest="duration", type='int', default=120,
help="duration of the recording in seconds.")
(options, args) = parser.parse_args()
present(options.duration)
if __name__ == '__main__':
main()
| """
Generate Steady-State Visually Evoked Potential (SSVEP)
=======================================================
Steady-State Visually Evoked Potential (SSVEP) stimulus presentation.
"""
from time import time
from optparse import OptionParser
import numpy as np
from pandas import DataFrame
from psychopy import visual, core, event
from pylsl import StreamInfo, StreamOutlet
def present(duration=120):
# Create markers stream outlet
info = StreamInfo('Markers', 'Markers', 1, 0, 'int32', 'myuidw43536')
outlet = StreamOutlet(info)
markernames = [1, 2]
start = time()
# Set up trial parameters
n_trials = 2010
iti = 0.5
soa = 3.0
jitter = 0.2
record_duration = np.float32(duration)
# Set up trial list
stim_freq = np.random.binomial(1, 0.5, n_trials)
trials = DataFrame(dict(stim_freq=stim_freq, timestamp=np.zeros(n_trials)))
# Set up graphics
mywin = visual.Window([1600, 900], monitor='testMonitor', units='deg',
fullscr=True, winType='pygame')
grating = visual.GratingStim(win=mywin, mask='circle', size=80, sf=0.2)
grating_neg = visual.GratingStim(win=mywin, mask='circle', size=80, sf=0.2,
phase=0.5)
fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0,
color=[1, 0, 0], autoDraw=True)
def get_possible_ssvep_freqs(frame_rate, stim_type='single'):
"""Get possible SSVEP stimulation frequencies.
Utility function that returns the possible SSVEP stimulation
frequencies and on/off pattern based on screen refresh rate.
Args:
frame_rate (float): screen frame rate, in Hz
Keyword Args:
stim_type (str): type of stimulation
'single'-> single graphic stimulus (the displayed object
appears and disappears in the background.)
'reversal' -> pattern reversal stimulus (the displayed object
appears and is replaced by its opposite.)
Returns:
(dict): keys are stimulation frequencies (in Hz), and values are
lists of tuples, where each tuple is the number of (on, off)
periods of one stimulation cycle
For more info on stimulation patterns, see Section 2 of:
<NAME>, <NAME>, <NAME>, and <NAME>,
"A Survey of Stimulation Methods Used in SSVEP-Based BCIs,"
Computational Intelligence and Neuroscience, vol. 2010, 12 pages,
2010.
"""
max_period_nb = int(frame_rate / 6)
periods = np.arange(max_period_nb) + 1
if stim_type == 'single':
freqs = dict()
for p1 in periods:
for p2 in periods:
f = frame_rate / (p1 + p2)
try:
freqs[f].append((p1, p2))
except:
freqs[f] = [(p1, p2)]
elif stim_type == 'reversal':
freqs = {frame_rate / p: [(p, p)] for p in periods[::-1]}
return freqs
def init_flicker_stim(frame_rate, cycle, soa):
"""Initialize flickering stimulus.
Get parameters for a flickering stimulus, based on the screen refresh
rate and the desired stimulation cycle.
Args:
frame_rate (float): screen frame rate, in Hz
cycle (tuple or int): if tuple (on, off), represents the number of
'on' periods and 'off' periods in one flickering cycle. This
supposes a "single graphic" stimulus, where the displayed object
appears and disappears in the background.
If int, represents the number of total periods in one cycle.
This supposes a "pattern reversal" stimulus, where the
displayed object appears and is replaced by its opposite.
soa (float): stimulus duration, in s
Returns:
(dict): dictionary with keys
'cycle' -> tuple of (on, off) periods in a cycle
'freq' -> stimulus frequency
'n_cycles' -> number of cycles in one stimulus trial
"""
if isinstance(cycle, tuple):
stim_freq = frame_rate / sum(cycle)
n_cycles = int(soa * stim_freq)
else:
stim_freq = frame_rate / cycle
cycle = (cycle, cycle)
n_cycles = int(soa * stim_freq) / 2
return {'cycle': cycle,
'freq': stim_freq,
'n_cycles': n_cycles}
# Set up stimuli
frame_rate = np.round(mywin.getActualFrameRate()) # Frame rate, in Hz
freqs = get_possible_ssvep_freqs(frame_rate, stim_type='reversal')
# print(freqs)
stim_patterns = [init_flicker_stim(frame_rate, 2, soa),
init_flicker_stim(frame_rate, 3, soa)]
print(('Flickering frequencies (Hz): {}\n'.format(
[stim_patterns[0]['freq'], stim_patterns[1]['freq']])))
for ii, trial in trials.iterrows():
# Intertrial interval
core.wait(iti + np.random.rand() * jitter)
# Select stimulus frequency
ind = trials['stim_freq'].iloc[ii]
# Send start marker
timestamp = time()
outlet.push_sample([markernames[ind]], timestamp)
# Present flickering stimulus
for _ in range(int(stim_patterns[ind]['n_cycles'])):
grating.setAutoDraw(True)
for _ in range(int(stim_patterns[ind]['cycle'][0])):
mywin.flip()
grating.setAutoDraw(False)
grating_neg.setAutoDraw(True)
for _ in range(stim_patterns[ind]['cycle'][1]):
mywin.flip()
grating_neg.setAutoDraw(False)
# offset
mywin.flip()
if len(event.getKeys()) > 0 or (time() - start) > record_duration:
break
event.clearEvents()
# Cleanup
mywin.close()
def main():
parser = OptionParser()
parser.add_option("-d", "--duration",
dest="duration", type='int', default=120,
help="duration of the recording in seconds.")
(options, args) = parser.parse_args()
present(options.duration)
if __name__ == '__main__':
main()
| en | 0.771314 | Generate Steady-State Visually Evoked Potential (SSVEP)
=======================================================
Steady-State Visually Evoked Potential (SSVEP) stimulus presentation. # Create markers stream outlet # Set up trial parameters # Set up trial list # Set up graphics Get possible SSVEP stimulation frequencies.
Utility function that returns the possible SSVEP stimulation
frequencies and on/off pattern based on screen refresh rate.
Args:
frame_rate (float): screen frame rate, in Hz
Keyword Args:
stim_type (str): type of stimulation
'single'-> single graphic stimulus (the displayed object
appears and disappears in the background.)
'reversal' -> pattern reversal stimulus (the displayed object
appears and is replaced by its opposite.)
Returns:
(dict): keys are stimulation frequencies (in Hz), and values are
lists of tuples, where each tuple is the number of (on, off)
periods of one stimulation cycle
For more info on stimulation patterns, see Section 2 of:
<NAME>, <NAME>, <NAME>, and <NAME>,
"A Survey of Stimulation Methods Used in SSVEP-Based BCIs,"
Computational Intelligence and Neuroscience, vol. 2010, 12 pages,
2010. Initialize flickering stimulus.
Get parameters for a flickering stimulus, based on the screen refresh
rate and the desired stimulation cycle.
Args:
frame_rate (float): screen frame rate, in Hz
cycle (tuple or int): if tuple (on, off), represents the number of
'on' periods and 'off' periods in one flickering cycle. This
supposes a "single graphic" stimulus, where the displayed object
appears and disappears in the background.
If int, represents the number of total periods in one cycle.
This supposes a "pattern reversal" stimulus, where the
displayed object appears and is replaced by its opposite.
soa (float): stimulus duration, in s
Returns:
(dict): dictionary with keys
'cycle' -> tuple of (on, off) periods in a cycle
'freq' -> stimulus frequency
'n_cycles' -> number of cycles in one stimulus trial # Set up stimuli # Frame rate, in Hz # print(freqs) # Intertrial interval # Select stimulus frequency # Send start marker # Present flickering stimulus # offset # Cleanup | 2.771522 | 3 |
food-app-api/food_app_core/apps.py | sajith-v/food-app-api | 0 | 6624701 | from django.apps import AppConfig
class food_app_coreConfig(AppConfig):
name = 'food_app_core'
| from django.apps import AppConfig
class food_app_coreConfig(AppConfig):
name = 'food_app_core'
| none | 1 | 1.117506 | 1 | |
sorts/insertion_sort/insertion.py | JCode1986/python-data-structures-and-algorithms | 0 | 6624702 | def insertion_sort(lst):
"""
Sorts list from lowest to highest using insertion sort method
In - takes in a list of integers
Out - returns a list of sorted integers
"""
for i in range(1, len(lst)):
j = i - 1
temp = int((lst[i]))
while j >= 0 and temp < lst[j]:
lst[j + 1] = lst[j]
j = j - 1
lst[j + 1] = temp
return lst
test_lst = [18,22,1,13,53,64]
print(insertion_sort(test_lst))
| def insertion_sort(lst):
"""
Sorts list from lowest to highest using insertion sort method
In - takes in a list of integers
Out - returns a list of sorted integers
"""
for i in range(1, len(lst)):
j = i - 1
temp = int((lst[i]))
while j >= 0 and temp < lst[j]:
lst[j + 1] = lst[j]
j = j - 1
lst[j + 1] = temp
return lst
test_lst = [18,22,1,13,53,64]
print(insertion_sort(test_lst))
| en | 0.729689 | Sorts list from lowest to highest using insertion sort method In - takes in a list of integers Out - returns a list of sorted integers | 4.215509 | 4 |
examples/websocket/aggs.py | Polygon-io/client-python | 1 | 6624703 | <filename>examples/websocket/aggs.py
from polygon import WebSocketClient
from polygon.websocket.models import WebSocketMessage, EquityTrade
from typing import List
c = WebSocketClient(subscriptions=["T.*"])
class MessageHandler:
count = 0
def handle_msg(self, msgs: List[WebSocketMessage]):
for m in msgs:
if type(m) == EquityTrade:
print(self.count, m)
self.count += 1
h = MessageHandler()
def handle_msg(msgs: List[WebSocketMessage]):
h.handle_msg(msgs)
c.run(handle_msg)
| <filename>examples/websocket/aggs.py
from polygon import WebSocketClient
from polygon.websocket.models import WebSocketMessage, EquityTrade
from typing import List
c = WebSocketClient(subscriptions=["T.*"])
class MessageHandler:
count = 0
def handle_msg(self, msgs: List[WebSocketMessage]):
for m in msgs:
if type(m) == EquityTrade:
print(self.count, m)
self.count += 1
h = MessageHandler()
def handle_msg(msgs: List[WebSocketMessage]):
h.handle_msg(msgs)
c.run(handle_msg)
| none | 1 | 2.632133 | 3 | |
examples/models/image_object_detection/food_detection/food172.py | zlheui/singa-auto | 10 | 6624704 | from keras.applications.xception import Xception
from singa_auto.darknet.food_objection_base_model import FoodDetectionBase
class FoodDetection172(FoodDetectionBase):
def __init__(self, **knobs):
super().__init__(clf_model_class_name=Xception, **knobs)
# pre config
self.classes = 172
self.image_size = 299
# preload files
self.yolo_cfg_name = "yolov3-food.cfg"
self.yolo_weight_name = "yolov3-food_final.weights"
self.food_name = "food.names"
# this is the model file downloaded from internet,
# can choose download locally and upload , or download from server
# if download at server side, leave it to none
self.preload_clf_model_weights_name = None
# this is the trained model
self.trained_clf_model_weights_name = "xception-800_F172-0.86.h5"
self._npy_index_name = "food172.npy"
| from keras.applications.xception import Xception
from singa_auto.darknet.food_objection_base_model import FoodDetectionBase
class FoodDetection172(FoodDetectionBase):
def __init__(self, **knobs):
super().__init__(clf_model_class_name=Xception, **knobs)
# pre config
self.classes = 172
self.image_size = 299
# preload files
self.yolo_cfg_name = "yolov3-food.cfg"
self.yolo_weight_name = "yolov3-food_final.weights"
self.food_name = "food.names"
# this is the model file downloaded from internet,
# can choose download locally and upload , or download from server
# if download at server side, leave it to none
self.preload_clf_model_weights_name = None
# this is the trained model
self.trained_clf_model_weights_name = "xception-800_F172-0.86.h5"
self._npy_index_name = "food172.npy"
| en | 0.907479 | # pre config # preload files # this is the model file downloaded from internet, # can choose download locally and upload , or download from server # if download at server side, leave it to none # this is the trained model | 2.343766 | 2 |
src/sagemaker/chainer/model.py | aws-patlin/sagemaker-python-sdk | 0 | 6624705 | <gh_stars>0
# Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import
import logging
from sagemaker import fw_utils
import sagemaker
from sagemaker.fw_utils import (
create_image_uri,
model_code_key_prefix,
python_deprecation_warning,
empty_framework_version_warning,
)
from sagemaker.model import FrameworkModel, MODEL_SERVER_WORKERS_PARAM_NAME
from sagemaker.chainer import defaults
from sagemaker.predictor import RealTimePredictor, npy_serializer, numpy_deserializer
logger = logging.getLogger("sagemaker")
class ChainerPredictor(RealTimePredictor):
"""A RealTimePredictor for inference against Chainer Endpoints.
This is able to serialize Python lists, dictionaries, and numpy arrays to
multidimensional tensors for Chainer inference.
"""
def __init__(self, endpoint_name, sagemaker_session=None):
"""Initialize an ``ChainerPredictor``.
Args:
endpoint_name (str): The name of the endpoint to perform inference
on.
sagemaker_session (sagemaker.session.Session): Session object which
manages interactions with Amazon SageMaker APIs and any other
AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
"""
super(ChainerPredictor, self).__init__(
endpoint_name, sagemaker_session, npy_serializer, numpy_deserializer
)
class ChainerModel(FrameworkModel):
"""An Chainer SageMaker ``Model`` that can be deployed to a SageMaker
``Endpoint``.
"""
__framework_name__ = "chainer"
def __init__(
self,
model_data,
role,
entry_point,
image=None,
py_version="py3",
framework_version=None,
predictor_cls=ChainerPredictor,
model_server_workers=None,
**kwargs
):
"""Initialize an ChainerModel.
Args:
model_data (str): The S3 location of a SageMaker model data
``.tar.gz`` file.
role (str): An AWS IAM role (either name or full ARN). The Amazon
SageMaker training jobs and APIs that create Amazon SageMaker
endpoints use this role to access training data and model
artifacts. After the endpoint is created, the inference code
might use the IAM role, if it needs to access an AWS resource.
entry_point (str): Path (absolute or relative) to the Python source
file which should be executed as the entry point to model
hosting. If ``source_dir`` is specified, then ``entry_point``
must point to a file located at the root of ``source_dir``.
image (str): A Docker image URI (default: None). If not specified, a
default image for Chainer will be used.
py_version (str): Python version you want to use for executing your
model training code (default: 'py2').
framework_version (str): Chainer version you want to use for
executing your model training code.
predictor_cls (callable[str, sagemaker.session.Session]): A function
to call to create a predictor with an endpoint name and
SageMaker ``Session``. If specified, ``deploy()`` returns the
result of invoking this function on the created endpoint name.
model_server_workers (int): Optional. The number of worker processes
used by the inference server. If None, server will use one
worker per vCPU.
**kwargs: Keyword arguments passed to the
:class:`~sagemaker.model.FrameworkModel` initializer.
.. tip::
You can find additional parameters for initializing this class at
:class:`~sagemaker.model.FrameworkModel` and
:class:`~sagemaker.model.Model`.
"""
super(ChainerModel, self).__init__(
model_data, image, role, entry_point, predictor_cls=predictor_cls, **kwargs
)
if py_version == "py2":
logger.warning(
python_deprecation_warning(self.__framework_name__, defaults.LATEST_PY2_VERSION)
)
if framework_version is None:
logger.warning(
empty_framework_version_warning(defaults.CHAINER_VERSION, defaults.LATEST_VERSION)
)
self.py_version = py_version
self.framework_version = framework_version or defaults.CHAINER_VERSION
self.model_server_workers = model_server_workers
def prepare_container_def(self, instance_type, accelerator_type=None):
"""Return a container definition with framework configuration set in
model environment variables.
Args:
instance_type (str): The EC2 instance type to deploy this Model to.
For example, 'ml.p2.xlarge'.
accelerator_type (str): The Elastic Inference accelerator type to
deploy to the instance for loading and making inferences to the
model. For example, 'ml.eia1.medium'.
Returns:
dict[str, str]: A container definition object usable with the
CreateModel API.
"""
deploy_image = self.image
if not deploy_image:
region_name = self.sagemaker_session.boto_session.region_name
deploy_image = create_image_uri(
region_name,
self.__framework_name__,
instance_type,
self.framework_version,
self.py_version,
accelerator_type=accelerator_type,
)
deploy_key_prefix = model_code_key_prefix(self.key_prefix, self.name, deploy_image)
self._upload_code(deploy_key_prefix)
deploy_env = dict(self.env)
deploy_env.update(self._framework_env_vars())
if self.model_server_workers:
deploy_env[MODEL_SERVER_WORKERS_PARAM_NAME.upper()] = str(self.model_server_workers)
return sagemaker.container_def(deploy_image, self.model_data, deploy_env)
def serving_image_uri(self, region_name, instance_type):
"""Create a URI for the serving image.
Args:
region_name (str): AWS region where the image is uploaded.
instance_type (str): SageMaker instance type. Used to determine device type
(cpu/gpu/family-specific optimized).
Returns:
str: The appropriate image URI based on the given parameters.
"""
return fw_utils.create_image_uri(
region_name,
self.__framework_name__,
instance_type,
self.framework_version,
self.py_version,
)
| # Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import
import logging
from sagemaker import fw_utils
import sagemaker
from sagemaker.fw_utils import (
create_image_uri,
model_code_key_prefix,
python_deprecation_warning,
empty_framework_version_warning,
)
from sagemaker.model import FrameworkModel, MODEL_SERVER_WORKERS_PARAM_NAME
from sagemaker.chainer import defaults
from sagemaker.predictor import RealTimePredictor, npy_serializer, numpy_deserializer
logger = logging.getLogger("sagemaker")
class ChainerPredictor(RealTimePredictor):
"""A RealTimePredictor for inference against Chainer Endpoints.
This is able to serialize Python lists, dictionaries, and numpy arrays to
multidimensional tensors for Chainer inference.
"""
def __init__(self, endpoint_name, sagemaker_session=None):
"""Initialize an ``ChainerPredictor``.
Args:
endpoint_name (str): The name of the endpoint to perform inference
on.
sagemaker_session (sagemaker.session.Session): Session object which
manages interactions with Amazon SageMaker APIs and any other
AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
"""
super(ChainerPredictor, self).__init__(
endpoint_name, sagemaker_session, npy_serializer, numpy_deserializer
)
class ChainerModel(FrameworkModel):
"""An Chainer SageMaker ``Model`` that can be deployed to a SageMaker
``Endpoint``.
"""
__framework_name__ = "chainer"
def __init__(
self,
model_data,
role,
entry_point,
image=None,
py_version="py3",
framework_version=None,
predictor_cls=ChainerPredictor,
model_server_workers=None,
**kwargs
):
"""Initialize an ChainerModel.
Args:
model_data (str): The S3 location of a SageMaker model data
``.tar.gz`` file.
role (str): An AWS IAM role (either name or full ARN). The Amazon
SageMaker training jobs and APIs that create Amazon SageMaker
endpoints use this role to access training data and model
artifacts. After the endpoint is created, the inference code
might use the IAM role, if it needs to access an AWS resource.
entry_point (str): Path (absolute or relative) to the Python source
file which should be executed as the entry point to model
hosting. If ``source_dir`` is specified, then ``entry_point``
must point to a file located at the root of ``source_dir``.
image (str): A Docker image URI (default: None). If not specified, a
default image for Chainer will be used.
py_version (str): Python version you want to use for executing your
model training code (default: 'py2').
framework_version (str): Chainer version you want to use for
executing your model training code.
predictor_cls (callable[str, sagemaker.session.Session]): A function
to call to create a predictor with an endpoint name and
SageMaker ``Session``. If specified, ``deploy()`` returns the
result of invoking this function on the created endpoint name.
model_server_workers (int): Optional. The number of worker processes
used by the inference server. If None, server will use one
worker per vCPU.
**kwargs: Keyword arguments passed to the
:class:`~sagemaker.model.FrameworkModel` initializer.
.. tip::
You can find additional parameters for initializing this class at
:class:`~sagemaker.model.FrameworkModel` and
:class:`~sagemaker.model.Model`.
"""
super(ChainerModel, self).__init__(
model_data, image, role, entry_point, predictor_cls=predictor_cls, **kwargs
)
if py_version == "py2":
logger.warning(
python_deprecation_warning(self.__framework_name__, defaults.LATEST_PY2_VERSION)
)
if framework_version is None:
logger.warning(
empty_framework_version_warning(defaults.CHAINER_VERSION, defaults.LATEST_VERSION)
)
self.py_version = py_version
self.framework_version = framework_version or defaults.CHAINER_VERSION
self.model_server_workers = model_server_workers
def prepare_container_def(self, instance_type, accelerator_type=None):
"""Return a container definition with framework configuration set in
model environment variables.
Args:
instance_type (str): The EC2 instance type to deploy this Model to.
For example, 'ml.p2.xlarge'.
accelerator_type (str): The Elastic Inference accelerator type to
deploy to the instance for loading and making inferences to the
model. For example, 'ml.eia1.medium'.
Returns:
dict[str, str]: A container definition object usable with the
CreateModel API.
"""
deploy_image = self.image
if not deploy_image:
region_name = self.sagemaker_session.boto_session.region_name
deploy_image = create_image_uri(
region_name,
self.__framework_name__,
instance_type,
self.framework_version,
self.py_version,
accelerator_type=accelerator_type,
)
deploy_key_prefix = model_code_key_prefix(self.key_prefix, self.name, deploy_image)
self._upload_code(deploy_key_prefix)
deploy_env = dict(self.env)
deploy_env.update(self._framework_env_vars())
if self.model_server_workers:
deploy_env[MODEL_SERVER_WORKERS_PARAM_NAME.upper()] = str(self.model_server_workers)
return sagemaker.container_def(deploy_image, self.model_data, deploy_env)
def serving_image_uri(self, region_name, instance_type):
"""Create a URI for the serving image.
Args:
region_name (str): AWS region where the image is uploaded.
instance_type (str): SageMaker instance type. Used to determine device type
(cpu/gpu/family-specific optimized).
Returns:
str: The appropriate image URI based on the given parameters.
"""
return fw_utils.create_image_uri(
region_name,
self.__framework_name__,
instance_type,
self.framework_version,
self.py_version,
) | en | 0.719968 | # Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. Placeholder docstring A RealTimePredictor for inference against Chainer Endpoints. This is able to serialize Python lists, dictionaries, and numpy arrays to multidimensional tensors for Chainer inference. Initialize an ``ChainerPredictor``. Args: endpoint_name (str): The name of the endpoint to perform inference on. sagemaker_session (sagemaker.session.Session): Session object which manages interactions with Amazon SageMaker APIs and any other AWS services needed. If not specified, the estimator creates one using the default AWS configuration chain. An Chainer SageMaker ``Model`` that can be deployed to a SageMaker ``Endpoint``. Initialize an ChainerModel. Args: model_data (str): The S3 location of a SageMaker model data ``.tar.gz`` file. role (str): An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs that create Amazon SageMaker endpoints use this role to access training data and model artifacts. After the endpoint is created, the inference code might use the IAM role, if it needs to access an AWS resource. entry_point (str): Path (absolute or relative) to the Python source file which should be executed as the entry point to model hosting. If ``source_dir`` is specified, then ``entry_point`` must point to a file located at the root of ``source_dir``. image (str): A Docker image URI (default: None). If not specified, a default image for Chainer will be used. py_version (str): Python version you want to use for executing your model training code (default: 'py2'). framework_version (str): Chainer version you want to use for executing your model training code. predictor_cls (callable[str, sagemaker.session.Session]): A function to call to create a predictor with an endpoint name and SageMaker ``Session``. If specified, ``deploy()`` returns the result of invoking this function on the created endpoint name. model_server_workers (int): Optional. The number of worker processes used by the inference server. If None, server will use one worker per vCPU. **kwargs: Keyword arguments passed to the :class:`~sagemaker.model.FrameworkModel` initializer. .. tip:: You can find additional parameters for initializing this class at :class:`~sagemaker.model.FrameworkModel` and :class:`~sagemaker.model.Model`. Return a container definition with framework configuration set in model environment variables. Args: instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'. accelerator_type (str): The Elastic Inference accelerator type to deploy to the instance for loading and making inferences to the model. For example, 'ml.eia1.medium'. Returns: dict[str, str]: A container definition object usable with the CreateModel API. Create a URI for the serving image. Args: region_name (str): AWS region where the image is uploaded. instance_type (str): SageMaker instance type. Used to determine device type (cpu/gpu/family-specific optimized). Returns: str: The appropriate image URI based on the given parameters. | 1.816185 | 2 |
sym_api_client_python/__init__.py | 3tilley/symphony-api-client-python | 1 | 6624706 | <gh_stars>1-10
name = "sym_api_client_python"
| name = "sym_api_client_python" | none | 1 | 1.131033 | 1 | |
examples/contrib/lectures.py | klorel/or-tools | 279 | 6624707 | # Copyright 2010 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Lectures problem in Google CP Solver.
Biggs: Discrete Mathematics (2nd ed), page 187.
'''
Suppose we wish to schedule six one-hour lectures, v1, v2, v3, v4, v5, v6.
Among the the potential audience there are people who wish to hear both
- v1 and v2
- v1 and v4
- v3 and v5
- v2 and v6
- v4 and v5
- v5 and v6
- v1 and v6
How many hours are necessary in order that the lectures can be given
without clashes?
'''
Compare with the following models:
* MiniZinc: http://www.hakank.org/minizinc/lectures.mzn
* SICstus: http://hakank.org/sicstus/lectures.pl
* ECLiPSe: http://hakank.org/eclipse/lectures.ecl
* Gecode: http://hakank.org/gecode/lectures.cpp
This model was created by <NAME> (<EMAIL>)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
import sys
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver('Lectures')
#
# data
#
#
# The schedule requirements:
# lecture a cannot be held at the same time as b
# Note: 1-based
g = [[1, 2], [1, 4], [3, 5], [2, 6], [4, 5], [5, 6], [1, 6]]
# number of nodes
n = 6
# number of edges
edges = len(g)
#
# declare variables
#
v = [solver.IntVar(0, n - 1, 'v[%i]' % i) for i in range(n)]
# maximum color, to minimize
# Note: since Python is 0-based, the
# number of colors is +1
max_c = solver.IntVar(0, n - 1, 'max_c')
#
# constraints
#
solver.Add(max_c == solver.Max(v))
# ensure that there are no clashes
# also, adjust to 0-base
for i in range(edges):
solver.Add(v[g[i][0] - 1] != v[g[i][1] - 1])
# symmetry breaking:
# - v0 has the color 0,
# - v1 has either color 0 or 1
solver.Add(v[0] == 0)
solver.Add(v[1] <= 1)
# objective
objective = solver.Minimize(max_c, 1)
#
# solution and search
#
db = solver.Phase(v, solver.CHOOSE_MIN_SIZE_LOWEST_MIN,
solver.ASSIGN_CENTER_VALUE)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print('max_c:', max_c.Value() + 1, 'colors')
print('v:', [v[i].Value() for i in range(n)])
print()
print('num_solutions:', num_solutions)
print('failures:', solver.Failures())
print('branches:', solver.Branches())
print('WallTime:', solver.WallTime(), 'ms')
if __name__ == '__main__':
main()
| # Copyright 2010 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Lectures problem in Google CP Solver.
Biggs: Discrete Mathematics (2nd ed), page 187.
'''
Suppose we wish to schedule six one-hour lectures, v1, v2, v3, v4, v5, v6.
Among the the potential audience there are people who wish to hear both
- v1 and v2
- v1 and v4
- v3 and v5
- v2 and v6
- v4 and v5
- v5 and v6
- v1 and v6
How many hours are necessary in order that the lectures can be given
without clashes?
'''
Compare with the following models:
* MiniZinc: http://www.hakank.org/minizinc/lectures.mzn
* SICstus: http://hakank.org/sicstus/lectures.pl
* ECLiPSe: http://hakank.org/eclipse/lectures.ecl
* Gecode: http://hakank.org/gecode/lectures.cpp
This model was created by <NAME> (<EMAIL>)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
import sys
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver('Lectures')
#
# data
#
#
# The schedule requirements:
# lecture a cannot be held at the same time as b
# Note: 1-based
g = [[1, 2], [1, 4], [3, 5], [2, 6], [4, 5], [5, 6], [1, 6]]
# number of nodes
n = 6
# number of edges
edges = len(g)
#
# declare variables
#
v = [solver.IntVar(0, n - 1, 'v[%i]' % i) for i in range(n)]
# maximum color, to minimize
# Note: since Python is 0-based, the
# number of colors is +1
max_c = solver.IntVar(0, n - 1, 'max_c')
#
# constraints
#
solver.Add(max_c == solver.Max(v))
# ensure that there are no clashes
# also, adjust to 0-base
for i in range(edges):
solver.Add(v[g[i][0] - 1] != v[g[i][1] - 1])
# symmetry breaking:
# - v0 has the color 0,
# - v1 has either color 0 or 1
solver.Add(v[0] == 0)
solver.Add(v[1] <= 1)
# objective
objective = solver.Minimize(max_c, 1)
#
# solution and search
#
db = solver.Phase(v, solver.CHOOSE_MIN_SIZE_LOWEST_MIN,
solver.ASSIGN_CENTER_VALUE)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print('max_c:', max_c.Value() + 1, 'colors')
print('v:', [v[i].Value() for i in range(n)])
print()
print('num_solutions:', num_solutions)
print('failures:', solver.Failures())
print('branches:', solver.Branches())
print('WallTime:', solver.WallTime(), 'ms')
if __name__ == '__main__':
main()
| en | 0.850792 | # Copyright 2010 <NAME> <EMAIL> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Lectures problem in Google CP Solver. Biggs: Discrete Mathematics (2nd ed), page 187. ''' Suppose we wish to schedule six one-hour lectures, v1, v2, v3, v4, v5, v6. Among the the potential audience there are people who wish to hear both - v1 and v2 - v1 and v4 - v3 and v5 - v2 and v6 - v4 and v5 - v5 and v6 - v1 and v6 How many hours are necessary in order that the lectures can be given without clashes? ''' Compare with the following models: * MiniZinc: http://www.hakank.org/minizinc/lectures.mzn * SICstus: http://hakank.org/sicstus/lectures.pl * ECLiPSe: http://hakank.org/eclipse/lectures.ecl * Gecode: http://hakank.org/gecode/lectures.cpp This model was created by <NAME> (<EMAIL>) Also see my other Google CP Solver models: http://www.hakank.org/google_or_tools/ # Create the solver. # # data # # # The schedule requirements: # lecture a cannot be held at the same time as b # Note: 1-based # number of nodes # number of edges # # declare variables # # maximum color, to minimize # Note: since Python is 0-based, the # number of colors is +1 # # constraints # # ensure that there are no clashes # also, adjust to 0-base # symmetry breaking: # - v0 has the color 0, # - v1 has either color 0 or 1 # objective # # solution and search # | 2.358527 | 2 |
docs/podstawy/przyklady/04_listy_01.py | sokol02/python101 | 0 | 6624708 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# ~/python/04_1_listy.py
tupla = input("Podaj liczby oddzielone przecinkami: ")
lista = []
for i in range(len(tupla)):
lista.append(int(tupla[i]))
print "Elementy i ich indeksy:"
for i, v in enumerate(lista):
print v, "[", i, "]"
print "Elementy w odwróconym porządku:"
for e in reversed(lista):
print e,
print ""
print "Elementy posortowane rosnąco:"
for e in sorted(lista):
print e,
print ""
e = int(raw_input("Którą liczbę usunąć? "))
lista.remove(e)
print lista
print "Dodawanie elementów do listy"
a, i = input("Podaj element i indeks oddzielone przecinkiem: ")
lista.insert(i, a)
print lista
print "Wyszukiwanie i zliczanie elementu w liście"
e = int(raw_input("Podaj liczbę: "))
print "Liczba wystąpień: "
print lista.count(e)
print "Indeks pierwszego wystąpienia: "
if lista.count(e):
print lista.index(e)
else:
print "Brak elementu w liście"
print "Pobieramy ostatni element z listy: "
print lista.pop()
print lista
print "Część listy:"
i, j = input("Podaj indeks początkowy i końcowy oddzielone przecinkiem: ")
print lista[i:j]
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# ~/python/04_1_listy.py
tupla = input("Podaj liczby oddzielone przecinkami: ")
lista = []
for i in range(len(tupla)):
lista.append(int(tupla[i]))
print "Elementy i ich indeksy:"
for i, v in enumerate(lista):
print v, "[", i, "]"
print "Elementy w odwróconym porządku:"
for e in reversed(lista):
print e,
print ""
print "Elementy posortowane rosnąco:"
for e in sorted(lista):
print e,
print ""
e = int(raw_input("Którą liczbę usunąć? "))
lista.remove(e)
print lista
print "Dodawanie elementów do listy"
a, i = input("Podaj element i indeks oddzielone przecinkiem: ")
lista.insert(i, a)
print lista
print "Wyszukiwanie i zliczanie elementu w liście"
e = int(raw_input("Podaj liczbę: "))
print "Liczba wystąpień: "
print lista.count(e)
print "Indeks pierwszego wystąpienia: "
if lista.count(e):
print lista.index(e)
else:
print "Brak elementu w liście"
print "Pobieramy ostatni element z listy: "
print lista.pop()
print lista
print "Część listy:"
i, j = input("Podaj indeks początkowy i końcowy oddzielone przecinkiem: ")
print lista[i:j]
| en | 0.185043 | #! /usr/bin/env python # -*- coding: utf-8 -*- # ~/python/04_1_listy.py | 4.145789 | 4 |
utils/calculix/calculix_utils.py | parallelworks/welding-model | 2 | 6624709 | <gh_stars>1-10
import re
import warnings
import data_IO
def read_setting_from_str(text_line, setting_flag):
x = re.search(setting_flag, text_line, re.IGNORECASE)
if x is None:
warning_msg = 'Cannot find \"{}\" in line \"{}\"'.format(setting_flag, text_line)
print(warning_msg)
warnings.warn(warning_msg)
return None
setting = text_line[x.end():].split(',')[0]
return setting.rstrip()
class Set:
"""Stores set name and members"""
def __init__(self, set_type, name='', members=[]):
self.type = set_type
self.name = name
self.members = members
self.set_type = set_type
def num_members(self):
return len(self.members)
def read_set_name_from_line(self, line):
self.name = read_setting_from_str(line, self.set_type + '=')
def read_members_from_inp(self, inp_file):
self. members = data_IO.read_ints_from_file_line_offset(
inp_file, '*' + self.type + ',' + self.set_type + '=' + self.name,
delimiter=',', end_flag='*')
class ElementSet(Set):
"""Stores element set name and element numbers"""
def __init__(self, name='', members=[]):
Set.__init__(self, 'ELSET', name, members)
class NodeSet(Set):
"""Stores node set name and node numbers"""
def __init__(self, name='', members=[]):
Set.__init__(self, 'NSET', name, members)
def extract_sets_from_inp(finp, set_type):
finp.seek(0)
all_lines = finp.readlines()
line_num = 0
finp_sets = []
while line_num is not None:
line_num = data_IO.get_index_in_str_list(all_lines,
'*' + set_type, start_from=line_num)
if line_num:
set = Set(set_type)
set.read_set_name_from_line(all_lines[line_num])
set.read_members_from_inp(finp)
finp_sets.append(set)
line_num = line_num + 1
return finp_sets
class Mesh:
"""Reads/Stores Node sets and element sets"""
def __init__(self, element_sets=[], node_sets=[]):
self.element_sets = element_sets
self.node_sets = node_sets
def read_element_sets_from_inp(self, inp_file):
fin = data_IO.open_file(inp_file)
self.element_sets = extract_sets_from_inp(fin, 'ELSET')
fin.close()
def read_node_sets_from_inp(self, inp_file):
fin = data_IO.open_file(inp_file)
self.node_sets = extract_sets_from_inp(fin, 'NSET')
fin.close()
def read_mesh_from_inp(self, inp_file):
self.read_node_sets_from_inp(inp_file)
self.read_element_sets_from_inp(inp_file)
def element_set_names(self):
return [element_set.name for element_set in self.element_sets]
def node_set_names(self):
return [node_set.name for node_set in self.node_sets]
def num_element_sets(self):
return len(self.element_sets)
def num_node_sets(self):
return len(self.node_sets)
def num_elements_in_sets(self):
return [element_set.num_members() for element_set in self.element_sets]
def num_nodes_in_sets(self):
return [node_set.num_members() for node_set in self.node_sets]
def num_all_elements(self):
num_elements = self.num_elements_in_sets()
return sum(num_elements)
def num_all_nodes(self):
num_nodes = self.num_nodes_in_sets()
return sum(num_nodes)
def get_all_elements(self):
all_elements = []
for elem_set in self.element_sets:
all_elements.extend(elem_set.members)
return all_elements
def get_all_nodes(self):
all_nodes = []
for elem_set in self.node_sets:
all_nodes.extend(elem_set.members)
return all_nodes
def remove_element_set_by_name(self, set_name_2_del):
names = self.element_set_names()
set_index = data_IO.get_index_in_str_list(names, set_name_2_del)
self.element_sets.pop(set_index)
def remove_node_set_by_name(self, set_name_2_del):
names = self.node_set_names()
set_index = data_IO.get_index_in_str_list(names, set_name_2_del)
self.node_sets.pop(set_index)
class WeldPasses:
"""Reads and stores the weld pass coordinate information"""
def __init__(self, pass_coor_path):
self.pass_coor_path = pass_coor_path
self.read_num_layers_from_pass_coor_file()
self.read_passes_from_pass_coor_file()
def read_num_layers_from_pass_coor_file(self):
fcp = data_IO.open_file(self.pass_coor_path)
# First get the number of layers:
num_layers = data_IO.read_int_from_file_line_offset(fcp,'Number-of-Layers')
fcp.close()
self.num_layers = num_layers
def read_passes_from_pass_coor_file(self):
fcp = data_IO.open_file(self.pass_coor_path)
# Then, read the passes in each layer
num_passes = 0
for layer in range(self.num_layers):
data = data_IO.read_ints_from_file_line_offset(fcp,'Layer,Number-of-Passes',
delimiter=',', offset=layer,
end_line=1)
num_passes = num_passes + data[1]
fcp.close()
self.num_passes = num_passes
def read_uncoupled_step_time_from_inp(inp_file_path):
"""Read time period of UNCOUPLED TEMPERATURE-DISPLACEMENT steps from ccx input file"""
finp = data_IO.open_file(inp_file_path)
lines = finp.readlines()
finp.close()
line_num = 0
times = []
while line_num is not None:
line_num = data_IO.get_index_in_str_list(lines,
'UNCOUPLED TEMPERATURE-DISPLACEMENT',
start_from=line_num)
if line_num is not None:
times.append(data_IO.read_floats_from_string(lines[line_num+1], ',')[1])
line_num = line_num + 1
return times
| import re
import warnings
import data_IO
def read_setting_from_str(text_line, setting_flag):
x = re.search(setting_flag, text_line, re.IGNORECASE)
if x is None:
warning_msg = 'Cannot find \"{}\" in line \"{}\"'.format(setting_flag, text_line)
print(warning_msg)
warnings.warn(warning_msg)
return None
setting = text_line[x.end():].split(',')[0]
return setting.rstrip()
class Set:
"""Stores set name and members"""
def __init__(self, set_type, name='', members=[]):
self.type = set_type
self.name = name
self.members = members
self.set_type = set_type
def num_members(self):
return len(self.members)
def read_set_name_from_line(self, line):
self.name = read_setting_from_str(line, self.set_type + '=')
def read_members_from_inp(self, inp_file):
self. members = data_IO.read_ints_from_file_line_offset(
inp_file, '*' + self.type + ',' + self.set_type + '=' + self.name,
delimiter=',', end_flag='*')
class ElementSet(Set):
"""Stores element set name and element numbers"""
def __init__(self, name='', members=[]):
Set.__init__(self, 'ELSET', name, members)
class NodeSet(Set):
"""Stores node set name and node numbers"""
def __init__(self, name='', members=[]):
Set.__init__(self, 'NSET', name, members)
def extract_sets_from_inp(finp, set_type):
finp.seek(0)
all_lines = finp.readlines()
line_num = 0
finp_sets = []
while line_num is not None:
line_num = data_IO.get_index_in_str_list(all_lines,
'*' + set_type, start_from=line_num)
if line_num:
set = Set(set_type)
set.read_set_name_from_line(all_lines[line_num])
set.read_members_from_inp(finp)
finp_sets.append(set)
line_num = line_num + 1
return finp_sets
class Mesh:
"""Reads/Stores Node sets and element sets"""
def __init__(self, element_sets=[], node_sets=[]):
self.element_sets = element_sets
self.node_sets = node_sets
def read_element_sets_from_inp(self, inp_file):
fin = data_IO.open_file(inp_file)
self.element_sets = extract_sets_from_inp(fin, 'ELSET')
fin.close()
def read_node_sets_from_inp(self, inp_file):
fin = data_IO.open_file(inp_file)
self.node_sets = extract_sets_from_inp(fin, 'NSET')
fin.close()
def read_mesh_from_inp(self, inp_file):
self.read_node_sets_from_inp(inp_file)
self.read_element_sets_from_inp(inp_file)
def element_set_names(self):
return [element_set.name for element_set in self.element_sets]
def node_set_names(self):
return [node_set.name for node_set in self.node_sets]
def num_element_sets(self):
return len(self.element_sets)
def num_node_sets(self):
return len(self.node_sets)
def num_elements_in_sets(self):
return [element_set.num_members() for element_set in self.element_sets]
def num_nodes_in_sets(self):
return [node_set.num_members() for node_set in self.node_sets]
def num_all_elements(self):
num_elements = self.num_elements_in_sets()
return sum(num_elements)
def num_all_nodes(self):
num_nodes = self.num_nodes_in_sets()
return sum(num_nodes)
def get_all_elements(self):
all_elements = []
for elem_set in self.element_sets:
all_elements.extend(elem_set.members)
return all_elements
def get_all_nodes(self):
all_nodes = []
for elem_set in self.node_sets:
all_nodes.extend(elem_set.members)
return all_nodes
def remove_element_set_by_name(self, set_name_2_del):
names = self.element_set_names()
set_index = data_IO.get_index_in_str_list(names, set_name_2_del)
self.element_sets.pop(set_index)
def remove_node_set_by_name(self, set_name_2_del):
names = self.node_set_names()
set_index = data_IO.get_index_in_str_list(names, set_name_2_del)
self.node_sets.pop(set_index)
class WeldPasses:
"""Reads and stores the weld pass coordinate information"""
def __init__(self, pass_coor_path):
self.pass_coor_path = pass_coor_path
self.read_num_layers_from_pass_coor_file()
self.read_passes_from_pass_coor_file()
def read_num_layers_from_pass_coor_file(self):
fcp = data_IO.open_file(self.pass_coor_path)
# First get the number of layers:
num_layers = data_IO.read_int_from_file_line_offset(fcp,'Number-of-Layers')
fcp.close()
self.num_layers = num_layers
def read_passes_from_pass_coor_file(self):
fcp = data_IO.open_file(self.pass_coor_path)
# Then, read the passes in each layer
num_passes = 0
for layer in range(self.num_layers):
data = data_IO.read_ints_from_file_line_offset(fcp,'Layer,Number-of-Passes',
delimiter=',', offset=layer,
end_line=1)
num_passes = num_passes + data[1]
fcp.close()
self.num_passes = num_passes
def read_uncoupled_step_time_from_inp(inp_file_path):
"""Read time period of UNCOUPLED TEMPERATURE-DISPLACEMENT steps from ccx input file"""
finp = data_IO.open_file(inp_file_path)
lines = finp.readlines()
finp.close()
line_num = 0
times = []
while line_num is not None:
line_num = data_IO.get_index_in_str_list(lines,
'UNCOUPLED TEMPERATURE-DISPLACEMENT',
start_from=line_num)
if line_num is not None:
times.append(data_IO.read_floats_from_string(lines[line_num+1], ',')[1])
line_num = line_num + 1
return times | en | 0.751391 | Stores set name and members Stores element set name and element numbers Stores node set name and node numbers Reads/Stores Node sets and element sets Reads and stores the weld pass coordinate information # First get the number of layers: # Then, read the passes in each layer Read time period of UNCOUPLED TEMPERATURE-DISPLACEMENT steps from ccx input file | 2.785479 | 3 |
tests/settings/__init__.py | Amin-egn/Recipient | 1 | 6624710 | <reponame>Amin-egn/Recipient
from .tests import TestSettings
| from .tests import TestSettings | none | 1 | 1.090032 | 1 | |
src/twisted/test/test_iutils.py | mithodin/twisted | 0 | 6624711 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test running processes with the APIs in L{twisted.internet.utils}.
"""
from __future__ import division, absolute_import
import warnings, os, stat, sys, signal
from twisted.python.compat import _PY3
from twisted.python.runtime import platform
from twisted.trial import unittest
from twisted.internet import error, reactor, utils, interfaces
from twisted.internet.defer import Deferred
from twisted.python.test.test_util import SuppressedWarningsTests
class ProcessUtilsTests(unittest.TestCase):
"""
Test running a process using L{getProcessOutput}, L{getProcessValue}, and
L{getProcessOutputAndValue}.
"""
if interfaces.IReactorProcess(reactor, None) is None:
skip = "reactor doesn't implement IReactorProcess"
output = None
value = None
exe = sys.executable
def makeSourceFile(self, sourceLines):
"""
Write the given list of lines to a text file and return the absolute
path to it.
"""
script = self.mktemp()
with open(script, 'wt') as scriptFile:
scriptFile.write(os.linesep.join(sourceLines) + os.linesep)
return os.path.abspath(script)
def test_output(self):
"""
L{getProcessOutput} returns a L{Deferred} which fires with the complete
output of the process it runs after that process exits.
"""
scriptFile = self.makeSourceFile([
"import sys",
"for s in b'hello world\\n':",
" if hasattr(sys.stdout, 'buffer'):",
" # Python 3",
" s = bytes([s])",
" sys.stdout.buffer.write(s)",
" else:",
" # Python 2",
" sys.stdout.write(s)",
" sys.stdout.flush()"])
d = utils.getProcessOutput(self.exe, ['-u', scriptFile])
return d.addCallback(self.assertEqual, b"hello world\n")
def test_outputWithErrorIgnored(self):
"""
The L{Deferred} returned by L{getProcessOutput} is fired with an
L{IOError} L{Failure} if the child process writes to stderr.
"""
# make sure stderr raises an error normally
scriptFile = self.makeSourceFile([
'import sys',
'sys.stderr.write("hello world\\n")'
])
d = utils.getProcessOutput(self.exe, ['-u', scriptFile])
d = self.assertFailure(d, IOError)
def cbFailed(err):
return self.assertFailure(err.processEnded, error.ProcessDone)
d.addCallback(cbFailed)
return d
def test_outputWithErrorCollected(self):
"""
If a C{True} value is supplied for the C{errortoo} parameter to
L{getProcessOutput}, the returned L{Deferred} fires with the child's
stderr output as well as its stdout output.
"""
scriptFile = self.makeSourceFile([
'import sys',
# Write the same value to both because ordering isn't guaranteed so
# this simplifies the test.
'sys.stdout.write("foo")',
'sys.stdout.flush()',
'sys.stderr.write("foo")',
'sys.stderr.flush()'])
d = utils.getProcessOutput(self.exe, ['-u', scriptFile], errortoo=True)
return d.addCallback(self.assertEqual, b"foofoo")
def test_value(self):
"""
The L{Deferred} returned by L{getProcessValue} is fired with the exit
status of the child process.
"""
scriptFile = self.makeSourceFile(["raise SystemExit(1)"])
d = utils.getProcessValue(self.exe, ['-u', scriptFile])
return d.addCallback(self.assertEqual, 1)
def test_outputAndValue(self):
"""
The L{Deferred} returned by L{getProcessOutputAndValue} fires with a
three-tuple, the elements of which give the data written to the child's
stdout, the data written to the child's stderr, and the exit status of
the child.
"""
scriptFile = self.makeSourceFile([
"import sys",
"if hasattr(sys.stdout, 'buffer'):",
" # Python 3",
" sys.stdout.buffer.write(b'hello world!\\n')",
" sys.stderr.buffer.write(b'goodbye world!\\n')",
"else:",
" # Python 2",
" sys.stdout.write(b'hello world!\\n')",
" sys.stderr.write(b'goodbye world!\\n')",
"sys.exit(1)"
])
def gotOutputAndValue(out_err_code):
out, err, code = out_err_code
self.assertEqual(out, b"hello world!\n")
if _PY3:
self.assertEqual(err, b"goodbye world!\n")
else:
self.assertEqual(err, b"goodbye world!" +
os.linesep)
self.assertEqual(code, 1)
d = utils.getProcessOutputAndValue(self.exe, ["-u", scriptFile])
return d.addCallback(gotOutputAndValue)
def test_outputSignal(self):
"""
If the child process exits because of a signal, the L{Deferred}
returned by L{getProcessOutputAndValue} fires a L{Failure} of a tuple
containing the child's stdout, stderr, and the signal which caused
it to exit.
"""
# Use SIGKILL here because it's guaranteed to be delivered. Using
# SIGHUP might not work in, e.g., a buildbot slave run under the
# 'nohup' command.
scriptFile = self.makeSourceFile([
"import sys, os, signal",
"sys.stdout.write('stdout bytes\\n')",
"sys.stderr.write('stderr bytes\\n')",
"sys.stdout.flush()",
"sys.stderr.flush()",
"os.kill(os.getpid(), signal.SIGKILL)"])
def gotOutputAndValue(out_err_sig):
out, err, sig = out_err_sig
self.assertEqual(out, b"stdout bytes\n")
self.assertEqual(err, b"stderr bytes\n")
self.assertEqual(sig, signal.SIGKILL)
d = utils.getProcessOutputAndValue(self.exe, ['-u', scriptFile])
d = self.assertFailure(d, tuple)
return d.addCallback(gotOutputAndValue)
if platform.isWindows():
test_outputSignal.skip = "Windows doesn't have real signals."
def _pathTest(self, utilFunc, check):
dir = os.path.abspath(self.mktemp())
os.makedirs(dir)
scriptFile = self.makeSourceFile([
"import os, sys",
"sys.stdout.write(os.getcwd())"])
d = utilFunc(self.exe, ['-u', scriptFile], path=dir)
d.addCallback(check, dir.encode(sys.getfilesystemencoding()))
return d
def test_getProcessOutputPath(self):
"""
L{getProcessOutput} runs the given command with the working directory
given by the C{path} parameter.
"""
return self._pathTest(utils.getProcessOutput, self.assertEqual)
def test_getProcessValuePath(self):
"""
L{getProcessValue} runs the given command with the working directory
given by the C{path} parameter.
"""
def check(result, ignored):
self.assertEqual(result, 0)
return self._pathTest(utils.getProcessValue, check)
def test_getProcessOutputAndValuePath(self):
"""
L{getProcessOutputAndValue} runs the given command with the working
directory given by the C{path} parameter.
"""
def check(out_err_status, dir):
out, err, status = out_err_status
self.assertEqual(out, dir)
self.assertEqual(status, 0)
return self._pathTest(utils.getProcessOutputAndValue, check)
def _defaultPathTest(self, utilFunc, check):
# Make another directory to mess around with.
dir = os.path.abspath(self.mktemp())
os.makedirs(dir)
scriptFile = self.makeSourceFile([
"import os, sys",
"cdir = os.getcwd()",
"sys.stdout.write(cdir)"]
)
# Switch to it, but make sure we switch back
self.addCleanup(os.chdir, os.getcwd())
os.chdir(dir)
# Remember its default permissions.
originalMode = stat.S_IMODE(os.stat('.').st_mode)
# On macOS Catalina (and maybe elsewhere), os.getcwd() sometimes fails
# with EACCES if u+rx is missing from the working directory, so don't
# reduce it further than this.
os.chmod(dir, stat.S_IXUSR | stat.S_IRUSR)
# Restore the permissions to their original state later (probably
# adding at least u+w), because otherwise it might be hard to delete
# the trial temporary directory.
self.addCleanup(os.chmod, dir, originalMode)
# Pass in -S so that if run using the coverage .pth trick, it won't be
# loaded and cause Coverage to try and get the current working
# directory (see the comments above why this can be a problem) on OSX.
d = utilFunc(self.exe, ['-S', '-u', scriptFile])
d.addCallback(check, dir.encode(sys.getfilesystemencoding()))
return d
def test_getProcessOutputDefaultPath(self):
"""
If no value is supplied for the C{path} parameter, L{getProcessOutput}
runs the given command in the same working directory as the parent
process and succeeds even if the current working directory is not
accessible.
"""
return self._defaultPathTest(utils.getProcessOutput, self.assertEqual)
def test_getProcessValueDefaultPath(self):
"""
If no value is supplied for the C{path} parameter, L{getProcessValue}
runs the given command in the same working directory as the parent
process and succeeds even if the current working directory is not
accessible.
"""
def check(result, ignored):
self.assertEqual(result, 0)
return self._defaultPathTest(utils.getProcessValue, check)
def test_getProcessOutputAndValueDefaultPath(self):
"""
If no value is supplied for the C{path} parameter,
L{getProcessOutputAndValue} runs the given command in the same working
directory as the parent process and succeeds even if the current
working directory is not accessible.
"""
def check(out_err_status, dir):
out, err, status = out_err_status
self.assertEqual(out, dir)
self.assertEqual(status, 0)
return self._defaultPathTest(
utils.getProcessOutputAndValue, check)
def test_get_processOutputAndValueStdin(self):
"""
Standard input can be made available to the child process by passing
bytes for the `stdinBytes` parameter.
"""
scriptFile = self.makeSourceFile([
"import sys",
"sys.stdout.write(sys.stdin.read())",
])
stdinBytes = b"These are the bytes to see."
d = utils.getProcessOutputAndValue(
self.exe,
['-u', scriptFile],
stdinBytes=stdinBytes,
)
def gotOutputAndValue(out_err_code):
out, err, code = out_err_code
# Avoid making an exact equality comparison in case there is extra
# random output on stdout (warnings, stray print statements,
# logging, who knows).
self.assertIn(stdinBytes, out)
self.assertEqual(0, code)
d.addCallback(gotOutputAndValue)
return d
class SuppressWarningsTests(unittest.SynchronousTestCase):
"""
Tests for L{utils.suppressWarnings}.
"""
def test_suppressWarnings(self):
"""
L{utils.suppressWarnings} decorates a function so that the given
warnings are suppressed.
"""
result = []
def showwarning(self, *a, **kw):
result.append((a, kw))
self.patch(warnings, "showwarning", showwarning)
def f(msg):
warnings.warn(msg)
g = utils.suppressWarnings(f, (('ignore',), dict(message="This is message")))
# Start off with a sanity check - calling the original function
# should emit the warning.
f("Sanity check message")
self.assertEqual(len(result), 1)
# Now that that's out of the way, call the wrapped function, and
# make sure no new warnings show up.
g("This is message")
self.assertEqual(len(result), 1)
# Finally, emit another warning which should not be ignored, and
# make sure it is not.
g("Unignored message")
self.assertEqual(len(result), 2)
class DeferredSuppressedWarningsTests(SuppressedWarningsTests):
"""
Tests for L{utils.runWithWarningsSuppressed}, the version that supports
Deferreds.
"""
# Override the non-Deferred-supporting function from the base class with
# the function we are testing in this class:
runWithWarningsSuppressed = staticmethod(utils.runWithWarningsSuppressed)
def test_deferredCallback(self):
"""
If the function called by L{utils.runWithWarningsSuppressed} returns a
C{Deferred}, the warning filters aren't removed until the Deferred
fires.
"""
filters = [(("ignore", ".*foo.*"), {}),
(("ignore", ".*bar.*"), {})]
result = Deferred()
self.runWithWarningsSuppressed(filters, lambda: result)
warnings.warn("ignore foo")
result.callback(3)
warnings.warn("ignore foo 2")
self.assertEqual(
["ignore foo 2"], [w['message'] for w in self.flushWarnings()])
def test_deferredErrback(self):
"""
If the function called by L{utils.runWithWarningsSuppressed} returns a
C{Deferred}, the warning filters aren't removed until the Deferred
fires with an errback.
"""
filters = [(("ignore", ".*foo.*"), {}),
(("ignore", ".*bar.*"), {})]
result = Deferred()
d = self.runWithWarningsSuppressed(filters, lambda: result)
warnings.warn("ignore foo")
result.errback(ZeroDivisionError())
d.addErrback(lambda f: f.trap(ZeroDivisionError))
warnings.warn("ignore foo 2")
self.assertEqual(
["ignore foo 2"], [w['message'] for w in self.flushWarnings()])
| # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test running processes with the APIs in L{twisted.internet.utils}.
"""
from __future__ import division, absolute_import
import warnings, os, stat, sys, signal
from twisted.python.compat import _PY3
from twisted.python.runtime import platform
from twisted.trial import unittest
from twisted.internet import error, reactor, utils, interfaces
from twisted.internet.defer import Deferred
from twisted.python.test.test_util import SuppressedWarningsTests
class ProcessUtilsTests(unittest.TestCase):
"""
Test running a process using L{getProcessOutput}, L{getProcessValue}, and
L{getProcessOutputAndValue}.
"""
if interfaces.IReactorProcess(reactor, None) is None:
skip = "reactor doesn't implement IReactorProcess"
output = None
value = None
exe = sys.executable
def makeSourceFile(self, sourceLines):
"""
Write the given list of lines to a text file and return the absolute
path to it.
"""
script = self.mktemp()
with open(script, 'wt') as scriptFile:
scriptFile.write(os.linesep.join(sourceLines) + os.linesep)
return os.path.abspath(script)
def test_output(self):
"""
L{getProcessOutput} returns a L{Deferred} which fires with the complete
output of the process it runs after that process exits.
"""
scriptFile = self.makeSourceFile([
"import sys",
"for s in b'hello world\\n':",
" if hasattr(sys.stdout, 'buffer'):",
" # Python 3",
" s = bytes([s])",
" sys.stdout.buffer.write(s)",
" else:",
" # Python 2",
" sys.stdout.write(s)",
" sys.stdout.flush()"])
d = utils.getProcessOutput(self.exe, ['-u', scriptFile])
return d.addCallback(self.assertEqual, b"hello world\n")
def test_outputWithErrorIgnored(self):
"""
The L{Deferred} returned by L{getProcessOutput} is fired with an
L{IOError} L{Failure} if the child process writes to stderr.
"""
# make sure stderr raises an error normally
scriptFile = self.makeSourceFile([
'import sys',
'sys.stderr.write("hello world\\n")'
])
d = utils.getProcessOutput(self.exe, ['-u', scriptFile])
d = self.assertFailure(d, IOError)
def cbFailed(err):
return self.assertFailure(err.processEnded, error.ProcessDone)
d.addCallback(cbFailed)
return d
def test_outputWithErrorCollected(self):
"""
If a C{True} value is supplied for the C{errortoo} parameter to
L{getProcessOutput}, the returned L{Deferred} fires with the child's
stderr output as well as its stdout output.
"""
scriptFile = self.makeSourceFile([
'import sys',
# Write the same value to both because ordering isn't guaranteed so
# this simplifies the test.
'sys.stdout.write("foo")',
'sys.stdout.flush()',
'sys.stderr.write("foo")',
'sys.stderr.flush()'])
d = utils.getProcessOutput(self.exe, ['-u', scriptFile], errortoo=True)
return d.addCallback(self.assertEqual, b"foofoo")
def test_value(self):
"""
The L{Deferred} returned by L{getProcessValue} is fired with the exit
status of the child process.
"""
scriptFile = self.makeSourceFile(["raise SystemExit(1)"])
d = utils.getProcessValue(self.exe, ['-u', scriptFile])
return d.addCallback(self.assertEqual, 1)
def test_outputAndValue(self):
"""
The L{Deferred} returned by L{getProcessOutputAndValue} fires with a
three-tuple, the elements of which give the data written to the child's
stdout, the data written to the child's stderr, and the exit status of
the child.
"""
scriptFile = self.makeSourceFile([
"import sys",
"if hasattr(sys.stdout, 'buffer'):",
" # Python 3",
" sys.stdout.buffer.write(b'hello world!\\n')",
" sys.stderr.buffer.write(b'goodbye world!\\n')",
"else:",
" # Python 2",
" sys.stdout.write(b'hello world!\\n')",
" sys.stderr.write(b'goodbye world!\\n')",
"sys.exit(1)"
])
def gotOutputAndValue(out_err_code):
out, err, code = out_err_code
self.assertEqual(out, b"hello world!\n")
if _PY3:
self.assertEqual(err, b"goodbye world!\n")
else:
self.assertEqual(err, b"goodbye world!" +
os.linesep)
self.assertEqual(code, 1)
d = utils.getProcessOutputAndValue(self.exe, ["-u", scriptFile])
return d.addCallback(gotOutputAndValue)
def test_outputSignal(self):
"""
If the child process exits because of a signal, the L{Deferred}
returned by L{getProcessOutputAndValue} fires a L{Failure} of a tuple
containing the child's stdout, stderr, and the signal which caused
it to exit.
"""
# Use SIGKILL here because it's guaranteed to be delivered. Using
# SIGHUP might not work in, e.g., a buildbot slave run under the
# 'nohup' command.
scriptFile = self.makeSourceFile([
"import sys, os, signal",
"sys.stdout.write('stdout bytes\\n')",
"sys.stderr.write('stderr bytes\\n')",
"sys.stdout.flush()",
"sys.stderr.flush()",
"os.kill(os.getpid(), signal.SIGKILL)"])
def gotOutputAndValue(out_err_sig):
out, err, sig = out_err_sig
self.assertEqual(out, b"stdout bytes\n")
self.assertEqual(err, b"stderr bytes\n")
self.assertEqual(sig, signal.SIGKILL)
d = utils.getProcessOutputAndValue(self.exe, ['-u', scriptFile])
d = self.assertFailure(d, tuple)
return d.addCallback(gotOutputAndValue)
if platform.isWindows():
test_outputSignal.skip = "Windows doesn't have real signals."
def _pathTest(self, utilFunc, check):
dir = os.path.abspath(self.mktemp())
os.makedirs(dir)
scriptFile = self.makeSourceFile([
"import os, sys",
"sys.stdout.write(os.getcwd())"])
d = utilFunc(self.exe, ['-u', scriptFile], path=dir)
d.addCallback(check, dir.encode(sys.getfilesystemencoding()))
return d
def test_getProcessOutputPath(self):
"""
L{getProcessOutput} runs the given command with the working directory
given by the C{path} parameter.
"""
return self._pathTest(utils.getProcessOutput, self.assertEqual)
def test_getProcessValuePath(self):
"""
L{getProcessValue} runs the given command with the working directory
given by the C{path} parameter.
"""
def check(result, ignored):
self.assertEqual(result, 0)
return self._pathTest(utils.getProcessValue, check)
def test_getProcessOutputAndValuePath(self):
"""
L{getProcessOutputAndValue} runs the given command with the working
directory given by the C{path} parameter.
"""
def check(out_err_status, dir):
out, err, status = out_err_status
self.assertEqual(out, dir)
self.assertEqual(status, 0)
return self._pathTest(utils.getProcessOutputAndValue, check)
def _defaultPathTest(self, utilFunc, check):
# Make another directory to mess around with.
dir = os.path.abspath(self.mktemp())
os.makedirs(dir)
scriptFile = self.makeSourceFile([
"import os, sys",
"cdir = os.getcwd()",
"sys.stdout.write(cdir)"]
)
# Switch to it, but make sure we switch back
self.addCleanup(os.chdir, os.getcwd())
os.chdir(dir)
# Remember its default permissions.
originalMode = stat.S_IMODE(os.stat('.').st_mode)
# On macOS Catalina (and maybe elsewhere), os.getcwd() sometimes fails
# with EACCES if u+rx is missing from the working directory, so don't
# reduce it further than this.
os.chmod(dir, stat.S_IXUSR | stat.S_IRUSR)
# Restore the permissions to their original state later (probably
# adding at least u+w), because otherwise it might be hard to delete
# the trial temporary directory.
self.addCleanup(os.chmod, dir, originalMode)
# Pass in -S so that if run using the coverage .pth trick, it won't be
# loaded and cause Coverage to try and get the current working
# directory (see the comments above why this can be a problem) on OSX.
d = utilFunc(self.exe, ['-S', '-u', scriptFile])
d.addCallback(check, dir.encode(sys.getfilesystemencoding()))
return d
def test_getProcessOutputDefaultPath(self):
"""
If no value is supplied for the C{path} parameter, L{getProcessOutput}
runs the given command in the same working directory as the parent
process and succeeds even if the current working directory is not
accessible.
"""
return self._defaultPathTest(utils.getProcessOutput, self.assertEqual)
def test_getProcessValueDefaultPath(self):
"""
If no value is supplied for the C{path} parameter, L{getProcessValue}
runs the given command in the same working directory as the parent
process and succeeds even if the current working directory is not
accessible.
"""
def check(result, ignored):
self.assertEqual(result, 0)
return self._defaultPathTest(utils.getProcessValue, check)
def test_getProcessOutputAndValueDefaultPath(self):
"""
If no value is supplied for the C{path} parameter,
L{getProcessOutputAndValue} runs the given command in the same working
directory as the parent process and succeeds even if the current
working directory is not accessible.
"""
def check(out_err_status, dir):
out, err, status = out_err_status
self.assertEqual(out, dir)
self.assertEqual(status, 0)
return self._defaultPathTest(
utils.getProcessOutputAndValue, check)
def test_get_processOutputAndValueStdin(self):
"""
Standard input can be made available to the child process by passing
bytes for the `stdinBytes` parameter.
"""
scriptFile = self.makeSourceFile([
"import sys",
"sys.stdout.write(sys.stdin.read())",
])
stdinBytes = b"These are the bytes to see."
d = utils.getProcessOutputAndValue(
self.exe,
['-u', scriptFile],
stdinBytes=stdinBytes,
)
def gotOutputAndValue(out_err_code):
out, err, code = out_err_code
# Avoid making an exact equality comparison in case there is extra
# random output on stdout (warnings, stray print statements,
# logging, who knows).
self.assertIn(stdinBytes, out)
self.assertEqual(0, code)
d.addCallback(gotOutputAndValue)
return d
class SuppressWarningsTests(unittest.SynchronousTestCase):
"""
Tests for L{utils.suppressWarnings}.
"""
def test_suppressWarnings(self):
"""
L{utils.suppressWarnings} decorates a function so that the given
warnings are suppressed.
"""
result = []
def showwarning(self, *a, **kw):
result.append((a, kw))
self.patch(warnings, "showwarning", showwarning)
def f(msg):
warnings.warn(msg)
g = utils.suppressWarnings(f, (('ignore',), dict(message="This is message")))
# Start off with a sanity check - calling the original function
# should emit the warning.
f("Sanity check message")
self.assertEqual(len(result), 1)
# Now that that's out of the way, call the wrapped function, and
# make sure no new warnings show up.
g("This is message")
self.assertEqual(len(result), 1)
# Finally, emit another warning which should not be ignored, and
# make sure it is not.
g("Unignored message")
self.assertEqual(len(result), 2)
class DeferredSuppressedWarningsTests(SuppressedWarningsTests):
"""
Tests for L{utils.runWithWarningsSuppressed}, the version that supports
Deferreds.
"""
# Override the non-Deferred-supporting function from the base class with
# the function we are testing in this class:
runWithWarningsSuppressed = staticmethod(utils.runWithWarningsSuppressed)
def test_deferredCallback(self):
"""
If the function called by L{utils.runWithWarningsSuppressed} returns a
C{Deferred}, the warning filters aren't removed until the Deferred
fires.
"""
filters = [(("ignore", ".*foo.*"), {}),
(("ignore", ".*bar.*"), {})]
result = Deferred()
self.runWithWarningsSuppressed(filters, lambda: result)
warnings.warn("ignore foo")
result.callback(3)
warnings.warn("ignore foo 2")
self.assertEqual(
["ignore foo 2"], [w['message'] for w in self.flushWarnings()])
def test_deferredErrback(self):
"""
If the function called by L{utils.runWithWarningsSuppressed} returns a
C{Deferred}, the warning filters aren't removed until the Deferred
fires with an errback.
"""
filters = [(("ignore", ".*foo.*"), {}),
(("ignore", ".*bar.*"), {})]
result = Deferred()
d = self.runWithWarningsSuppressed(filters, lambda: result)
warnings.warn("ignore foo")
result.errback(ZeroDivisionError())
d.addErrback(lambda f: f.trap(ZeroDivisionError))
warnings.warn("ignore foo 2")
self.assertEqual(
["ignore foo 2"], [w['message'] for w in self.flushWarnings()])
| en | 0.846631 | # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. Test running processes with the APIs in L{twisted.internet.utils}. Test running a process using L{getProcessOutput}, L{getProcessValue}, and L{getProcessOutputAndValue}. Write the given list of lines to a text file and return the absolute path to it. L{getProcessOutput} returns a L{Deferred} which fires with the complete output of the process it runs after that process exits. # Python 3", # Python 2", The L{Deferred} returned by L{getProcessOutput} is fired with an L{IOError} L{Failure} if the child process writes to stderr. # make sure stderr raises an error normally If a C{True} value is supplied for the C{errortoo} parameter to L{getProcessOutput}, the returned L{Deferred} fires with the child's stderr output as well as its stdout output. # Write the same value to both because ordering isn't guaranteed so # this simplifies the test. The L{Deferred} returned by L{getProcessValue} is fired with the exit status of the child process. The L{Deferred} returned by L{getProcessOutputAndValue} fires with a three-tuple, the elements of which give the data written to the child's stdout, the data written to the child's stderr, and the exit status of the child. # Python 3", # Python 2", If the child process exits because of a signal, the L{Deferred} returned by L{getProcessOutputAndValue} fires a L{Failure} of a tuple containing the child's stdout, stderr, and the signal which caused it to exit. # Use SIGKILL here because it's guaranteed to be delivered. Using # SIGHUP might not work in, e.g., a buildbot slave run under the # 'nohup' command. L{getProcessOutput} runs the given command with the working directory given by the C{path} parameter. L{getProcessValue} runs the given command with the working directory given by the C{path} parameter. L{getProcessOutputAndValue} runs the given command with the working directory given by the C{path} parameter. # Make another directory to mess around with. # Switch to it, but make sure we switch back # Remember its default permissions. # On macOS Catalina (and maybe elsewhere), os.getcwd() sometimes fails # with EACCES if u+rx is missing from the working directory, so don't # reduce it further than this. # Restore the permissions to their original state later (probably # adding at least u+w), because otherwise it might be hard to delete # the trial temporary directory. # Pass in -S so that if run using the coverage .pth trick, it won't be # loaded and cause Coverage to try and get the current working # directory (see the comments above why this can be a problem) on OSX. If no value is supplied for the C{path} parameter, L{getProcessOutput} runs the given command in the same working directory as the parent process and succeeds even if the current working directory is not accessible. If no value is supplied for the C{path} parameter, L{getProcessValue} runs the given command in the same working directory as the parent process and succeeds even if the current working directory is not accessible. If no value is supplied for the C{path} parameter, L{getProcessOutputAndValue} runs the given command in the same working directory as the parent process and succeeds even if the current working directory is not accessible. Standard input can be made available to the child process by passing bytes for the `stdinBytes` parameter. # Avoid making an exact equality comparison in case there is extra # random output on stdout (warnings, stray print statements, # logging, who knows). Tests for L{utils.suppressWarnings}. L{utils.suppressWarnings} decorates a function so that the given warnings are suppressed. # Start off with a sanity check - calling the original function # should emit the warning. # Now that that's out of the way, call the wrapped function, and # make sure no new warnings show up. # Finally, emit another warning which should not be ignored, and # make sure it is not. Tests for L{utils.runWithWarningsSuppressed}, the version that supports Deferreds. # Override the non-Deferred-supporting function from the base class with # the function we are testing in this class: If the function called by L{utils.runWithWarningsSuppressed} returns a C{Deferred}, the warning filters aren't removed until the Deferred fires. If the function called by L{utils.runWithWarningsSuppressed} returns a C{Deferred}, the warning filters aren't removed until the Deferred fires with an errback. | 2.113444 | 2 |
srm-service.py | dmshch/srm-service | 0 | 6624712 | # Copyright © 2020 <NAME>. All rights reserved.
# run point
from servmoncode import monitoring_process
monitoring_process.start()
| # Copyright © 2020 <NAME>. All rights reserved.
# run point
from servmoncode import monitoring_process
monitoring_process.start()
| en | 0.846676 | # Copyright © 2020 <NAME>. All rights reserved. # run point | 1.330306 | 1 |
h2o-py/tests/testdir_algos/gbm/pyunit_DEPRECATED_weightsGBM.py | huamichaelchen/h2o-3 | 0 | 6624713 | import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import random
def weights_check():
def check_same(data1, data2, min_rows_scale):
gbm1_regression = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y="economy",
training_frame=data1,
min_rows=5,
ntrees=5,
max_depth=5)
gbm2_regression = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year", "weights"]],
y=data2["economy"],
min_rows=5*min_rows_scale,
weights_column=data2["weights"],
ntrees=5,
max_depth=5)
gbm1_binomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["economy_20mpg"],
min_rows=5,
distribution="bernoulli",
ntrees=5,
max_depth=5)
gbm2_binomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year", "weights"]],
y=data2["economy_20mpg"],
weights_column="weights",
training_frame=data2,
min_rows=5*min_rows_scale,
distribution="bernoulli",
ntrees=5,
max_depth=5)
gbm1_multinomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["cylinders"],
min_rows=5,
distribution="multinomial",
ntrees=5,
max_depth=5)
gbm2_multinomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year", "weights"]],
y=data2["cylinders"],
weights_column="weights",
training_frame=data2,
min_rows=5*min_rows_scale,
distribution="multinomial",
ntrees=5,
max_depth=5)
reg1_mse = gbm1_regression.mse()
reg2_mse = gbm2_regression.mse()
bin1_auc = gbm1_binomial.auc()
bin2_auc = gbm2_binomial.auc()
mul1_mse = gbm1_multinomial.mse()
mul2_mse = gbm2_multinomial.mse()
print "MSE (regresson) no weights vs. weights: {0}, {1}".format(reg1_mse, reg2_mse)
print "AUC (binomial) no weights vs. weights: {0}, {1}".format(bin1_auc, bin2_auc)
print "MSE (multinomial) no weights vs. weights: {0}, {1}".format(mul1_mse, mul2_mse)
assert abs(reg1_mse - reg2_mse) < 1e-6 * reg1_mse, "Expected mse's to be the same, but got {0}, and {1}".format(reg1_mse, reg2_mse)
assert abs(bin1_auc - bin2_auc) < 3e-4 * bin1_auc, "Expected auc's to be the same, but got {0}, and {1}".format(bin1_auc, bin2_auc)
assert abs(mul1_mse - mul1_mse) < 1e-6 * mul1_mse, "Expected auc's to be the same, but got {0}, and {1}".format(mul1_mse, mul2_mse)
h2o_cars_data = h2o.import_file(pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
h2o_cars_data["economy_20mpg"] = h2o_cars_data["economy_20mpg"].asfactor()
h2o_cars_data["cylinders"] = h2o_cars_data["cylinders"].asfactor()
# uniform weights same as no weights
random.seed(2222)
weight = random.randint(1,10)
uniform_weights = [[weight]*406]
h2o_uniform_weights = h2o.H2OFrame(uniform_weights)
h2o_uniform_weights.set_names(["weights"])
h2o_data_uniform_weights = h2o_cars_data.cbind(h2o_uniform_weights)
print "Checking that using uniform weights is equivalent to no weights:"
print
check_same(h2o_cars_data, h2o_data_uniform_weights, weight)
# zero weights same as removed observations
zero_weights = [[0 if random.randint(0,1) else 1 for r in range(406)]]
h2o_zero_weights = h2o.H2OFrame(zero_weights)
h2o_zero_weights.set_names(["weights"])
h2o_data_zero_weights = h2o_cars_data.cbind(h2o_zero_weights)
h2o_data_zeros_removed = h2o_cars_data[h2o_zero_weights["weights"] == 1]
print "Checking that using some zero weights is equivalent to removing those observations:"
print
check_same(h2o_data_zeros_removed, h2o_data_zero_weights, 1)
# doubled weights same as doubled observations
doubled_weights = [[1 if random.randint(0,1) else 2 for r in range(406)]]
h2o_doubled_weights = h2o.H2OFrame(doubled_weights)
h2o_doubled_weights.set_names(["weights"])
h2o_data_doubled_weights = h2o_cars_data.cbind(h2o_doubled_weights)
doubled_data = h2o.as_list(h2o_cars_data, use_pandas=False)
doubled_data = zip(*doubled_data)
colnames = doubled_data.pop(0)
for idx, w in enumerate(doubled_weights[0]):
if w == 2: doubled_data.append(doubled_data[idx])
doubled_data = zip(*doubled_data)
h2o_data_doubled = h2o.H2OFrame(doubled_data)
h2o_data_doubled.set_names(list(colnames))
h2o_data_doubled["economy_20mpg"] = h2o_data_doubled["economy_20mpg"].asfactor()
h2o_data_doubled["cylinders"] = h2o_data_doubled["cylinders"].asfactor()
h2o_data_doubled_weights["economy_20mpg"] = h2o_data_doubled_weights["economy_20mpg"].asfactor()
h2o_data_doubled_weights["cylinders"] = h2o_data_doubled_weights["cylinders"].asfactor()
print "Checking that doubling some weights is equivalent to doubling those observations:"
print
check_same(h2o_data_doubled, h2o_data_doubled_weights, 1)
# TODO: random weights
# TODO: all zero weights???
# TODO: negative weights???
if __name__ == "__main__":
pyunit_utils.standalone_test(weights_check)
else:
weights_check()
| import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import random
def weights_check():
def check_same(data1, data2, min_rows_scale):
gbm1_regression = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y="economy",
training_frame=data1,
min_rows=5,
ntrees=5,
max_depth=5)
gbm2_regression = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year", "weights"]],
y=data2["economy"],
min_rows=5*min_rows_scale,
weights_column=data2["weights"],
ntrees=5,
max_depth=5)
gbm1_binomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["economy_20mpg"],
min_rows=5,
distribution="bernoulli",
ntrees=5,
max_depth=5)
gbm2_binomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year", "weights"]],
y=data2["economy_20mpg"],
weights_column="weights",
training_frame=data2,
min_rows=5*min_rows_scale,
distribution="bernoulli",
ntrees=5,
max_depth=5)
gbm1_multinomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["cylinders"],
min_rows=5,
distribution="multinomial",
ntrees=5,
max_depth=5)
gbm2_multinomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year", "weights"]],
y=data2["cylinders"],
weights_column="weights",
training_frame=data2,
min_rows=5*min_rows_scale,
distribution="multinomial",
ntrees=5,
max_depth=5)
reg1_mse = gbm1_regression.mse()
reg2_mse = gbm2_regression.mse()
bin1_auc = gbm1_binomial.auc()
bin2_auc = gbm2_binomial.auc()
mul1_mse = gbm1_multinomial.mse()
mul2_mse = gbm2_multinomial.mse()
print "MSE (regresson) no weights vs. weights: {0}, {1}".format(reg1_mse, reg2_mse)
print "AUC (binomial) no weights vs. weights: {0}, {1}".format(bin1_auc, bin2_auc)
print "MSE (multinomial) no weights vs. weights: {0}, {1}".format(mul1_mse, mul2_mse)
assert abs(reg1_mse - reg2_mse) < 1e-6 * reg1_mse, "Expected mse's to be the same, but got {0}, and {1}".format(reg1_mse, reg2_mse)
assert abs(bin1_auc - bin2_auc) < 3e-4 * bin1_auc, "Expected auc's to be the same, but got {0}, and {1}".format(bin1_auc, bin2_auc)
assert abs(mul1_mse - mul1_mse) < 1e-6 * mul1_mse, "Expected auc's to be the same, but got {0}, and {1}".format(mul1_mse, mul2_mse)
h2o_cars_data = h2o.import_file(pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
h2o_cars_data["economy_20mpg"] = h2o_cars_data["economy_20mpg"].asfactor()
h2o_cars_data["cylinders"] = h2o_cars_data["cylinders"].asfactor()
# uniform weights same as no weights
random.seed(2222)
weight = random.randint(1,10)
uniform_weights = [[weight]*406]
h2o_uniform_weights = h2o.H2OFrame(uniform_weights)
h2o_uniform_weights.set_names(["weights"])
h2o_data_uniform_weights = h2o_cars_data.cbind(h2o_uniform_weights)
print "Checking that using uniform weights is equivalent to no weights:"
print
check_same(h2o_cars_data, h2o_data_uniform_weights, weight)
# zero weights same as removed observations
zero_weights = [[0 if random.randint(0,1) else 1 for r in range(406)]]
h2o_zero_weights = h2o.H2OFrame(zero_weights)
h2o_zero_weights.set_names(["weights"])
h2o_data_zero_weights = h2o_cars_data.cbind(h2o_zero_weights)
h2o_data_zeros_removed = h2o_cars_data[h2o_zero_weights["weights"] == 1]
print "Checking that using some zero weights is equivalent to removing those observations:"
print
check_same(h2o_data_zeros_removed, h2o_data_zero_weights, 1)
# doubled weights same as doubled observations
doubled_weights = [[1 if random.randint(0,1) else 2 for r in range(406)]]
h2o_doubled_weights = h2o.H2OFrame(doubled_weights)
h2o_doubled_weights.set_names(["weights"])
h2o_data_doubled_weights = h2o_cars_data.cbind(h2o_doubled_weights)
doubled_data = h2o.as_list(h2o_cars_data, use_pandas=False)
doubled_data = zip(*doubled_data)
colnames = doubled_data.pop(0)
for idx, w in enumerate(doubled_weights[0]):
if w == 2: doubled_data.append(doubled_data[idx])
doubled_data = zip(*doubled_data)
h2o_data_doubled = h2o.H2OFrame(doubled_data)
h2o_data_doubled.set_names(list(colnames))
h2o_data_doubled["economy_20mpg"] = h2o_data_doubled["economy_20mpg"].asfactor()
h2o_data_doubled["cylinders"] = h2o_data_doubled["cylinders"].asfactor()
h2o_data_doubled_weights["economy_20mpg"] = h2o_data_doubled_weights["economy_20mpg"].asfactor()
h2o_data_doubled_weights["cylinders"] = h2o_data_doubled_weights["cylinders"].asfactor()
print "Checking that doubling some weights is equivalent to doubling those observations:"
print
check_same(h2o_data_doubled, h2o_data_doubled_weights, 1)
# TODO: random weights
# TODO: all zero weights???
# TODO: negative weights???
if __name__ == "__main__":
pyunit_utils.standalone_test(weights_check)
else:
weights_check()
| en | 0.93691 | # uniform weights same as no weights # zero weights same as removed observations # doubled weights same as doubled observations # TODO: random weights # TODO: all zero weights??? # TODO: negative weights??? | 2.449243 | 2 |
dephell_archive/_stream.py | jhermann/dephell_archive | 0 | 6624714 | # built-in
from contextlib import suppress
from pathlib import Path, PurePath
from typing import List, Optional, Set
# external
import attr
# app
from ._cached_property import cached_property
def _dir_list(filelist: List[str]) -> Set[str]:
# paths starting with '/' or containing '.' are not supported
dir_list = set() # type: Set[str]
for path in filelist:
while path:
path, _, _ = path.rpartition('/')
if not path or path in dir_list:
break
dir_list.add(path)
return dir_list
@attr.s()
class ArchiveStream:
descriptor = attr.ib()
cache_path = attr.ib(type=Path)
member_path = attr.ib(type=PurePath)
mode = attr.ib(type=str, default='r')
encoding = attr.ib(type=Optional[str], default=None)
# private
@cached_property
def _is_tar(self) -> bool:
return hasattr(self.descriptor, 'getmember')
@cached_property
def _dir_list(self) -> Set[str]:
return _dir_list(self.descriptor.namelist())
@cached_property
def _info(self):
path = self.member_path.as_posix()
with suppress(KeyError):
if self._is_tar:
return self.descriptor.getmember(path)
try:
return self.descriptor.getinfo(path) # zip file
except KeyError:
return self.descriptor.getinfo(path + '/') # zip dir
return None
@cached_property
def _is_implicit_dir(self) -> bool:
# Only zip have implicit dirs
if self._is_tar:
return False
path = self.member_path.as_posix()
return path in self._dir_list
# used from ArchivePath
def exists(self) -> bool:
return self.is_file() or self.is_dir()
def is_file(self) -> bool:
if self._info is None:
return False
if self._is_tar:
return self._info.isfile()
# zip
return self._info.filename[-1] != '/'
def is_dir(self) -> bool:
if self._info is None:
return self._is_implicit_dir
if self._is_tar:
return self._info.isdir()
# zip explicit dir entry
return self._info.filename[-1] == '/'
# public interface
def read(self):
if not self.member_path.name:
raise NotImplementedError
path = self.cache_path / self.member_path
if path.exists():
raise FileExistsError('file in cache created between open and read')
# extract to cache
self.descriptor.extract(member=self._info, path=str(self.cache_path))
# read from cache
with path.open(self.mode, encoding=self.encoding) as stream:
return stream.read()
| # built-in
from contextlib import suppress
from pathlib import Path, PurePath
from typing import List, Optional, Set
# external
import attr
# app
from ._cached_property import cached_property
def _dir_list(filelist: List[str]) -> Set[str]:
# paths starting with '/' or containing '.' are not supported
dir_list = set() # type: Set[str]
for path in filelist:
while path:
path, _, _ = path.rpartition('/')
if not path or path in dir_list:
break
dir_list.add(path)
return dir_list
@attr.s()
class ArchiveStream:
descriptor = attr.ib()
cache_path = attr.ib(type=Path)
member_path = attr.ib(type=PurePath)
mode = attr.ib(type=str, default='r')
encoding = attr.ib(type=Optional[str], default=None)
# private
@cached_property
def _is_tar(self) -> bool:
return hasattr(self.descriptor, 'getmember')
@cached_property
def _dir_list(self) -> Set[str]:
return _dir_list(self.descriptor.namelist())
@cached_property
def _info(self):
path = self.member_path.as_posix()
with suppress(KeyError):
if self._is_tar:
return self.descriptor.getmember(path)
try:
return self.descriptor.getinfo(path) # zip file
except KeyError:
return self.descriptor.getinfo(path + '/') # zip dir
return None
@cached_property
def _is_implicit_dir(self) -> bool:
# Only zip have implicit dirs
if self._is_tar:
return False
path = self.member_path.as_posix()
return path in self._dir_list
# used from ArchivePath
def exists(self) -> bool:
return self.is_file() or self.is_dir()
def is_file(self) -> bool:
if self._info is None:
return False
if self._is_tar:
return self._info.isfile()
# zip
return self._info.filename[-1] != '/'
def is_dir(self) -> bool:
if self._info is None:
return self._is_implicit_dir
if self._is_tar:
return self._info.isdir()
# zip explicit dir entry
return self._info.filename[-1] == '/'
# public interface
def read(self):
if not self.member_path.name:
raise NotImplementedError
path = self.cache_path / self.member_path
if path.exists():
raise FileExistsError('file in cache created between open and read')
# extract to cache
self.descriptor.extract(member=self._info, path=str(self.cache_path))
# read from cache
with path.open(self.mode, encoding=self.encoding) as stream:
return stream.read()
| en | 0.750272 | # built-in # external # app # paths starting with '/' or containing '.' are not supported # type: Set[str] # private # zip file # zip dir # Only zip have implicit dirs # used from ArchivePath # zip # zip explicit dir entry # public interface # extract to cache # read from cache | 2.290866 | 2 |
pyvplm/gui/csv_export.py | ArthurAmmeux/pyVPLM-GUI | 0 | 6624715 | import numpy as np
import csv
import pandas as pd
def open_csv_file(f_name):
"""
:param f_name: name of the csv file
:return: a new csv file with as many (1) as needed to not already exist
"""
try:
f = open(f_name, "x")
return f, f_name
except IOError:
return open_csv_file(f_name[:-4] + "(1)" + f_name[-4:])
def generate_csv(doeX, file_name, parameter_set, out_headers):
"""
Parameters
----------
doeX DOE points in physical space
file_name name of the .csv file (with extension)
parameter_set current physical parameter set (PositiveParameterSet)
out_headers Headers of output physical parameters (List of str)
Returns
-------
"""
_, file_name = open_csv_file(file_name)
with open(file_name, 'w', encoding='UTF8', newline='') as out_file:
writer = csv.writer(out_file)
headers = []
for key in parameter_set.dictionary:
headers.append(f"{key} [{parameter_set.dictionary[key].defined_units}]")
headers = headers + out_headers
writer.writerow(headers)
doe_list = doeX.tolist()
for point in doe_list:
writer.writerow(point)
out_file.close()
def format_headers(headers):
"""
Parameters
----------
headers Headers to be formatted (List of str)
Returns A list of dict, the right format for v.DataTable headers
-------
"""
out_headers = []
for header in headers:
header_dict = {'text': header, 'sortable': True, 'value': header}
out_headers.append(header_dict)
return out_headers
def check_headers(df_headers, physical_parameters):
"""
Parameters
----------
df_headers Headers to be checked
physical_parameters Current set of physical parameters (PositiveParameterSet)
Returns Raises exceptions if the headers are invalid (ex: corresponds to no physical parameter)
-------
"""
params = list(physical_parameters.dictionary.keys())
raw_headers = []
units = []
for header in df_headers:
try:
spt = header.split("[")
raw_headers.append(spt[0].strip())
units.append(spt[1].split("]")[0])
except Exception:
raise SyntaxError("Invalid csv headers")
if len(raw_headers) < len(params):
raise ValueError(
f"Not enough columns ({len(raw_headers)}, should be {len(params)}), physical parameter missing")
if len(raw_headers) > len(params):
raise ValueError(
f"Too many columns ({len(raw_headers)}, should be {len(params)}),"
f" inconsistent with defined physical parameters")
remaining_params = params.copy()
for i, header in enumerate(raw_headers):
valid = False
j_ = 0
for j, param in enumerate(remaining_params):
if header == param:
valid = True
j_ = j
break
if not valid:
raise ValueError(
f"CSV headers and defined physical parameters do not match: {header} =/= {remaining_params[0]}")
else:
cur_unit = physical_parameters.dictionary[remaining_params[j_]].defined_units
remaining_params.pop(j_)
if units[i] != cur_unit:
raise ValueError(
f"CSV units and defined physical parameters units do not match: {units[i]} =/= {cur_unit}")
def check_content(result_df):
"""
Parameters
----------
result_df DataFrame with the result to be imported
Returns Raises exceptions if the content of the DataFrame is invalid (ex: empty cells)
-------
"""
errors = []
for col in result_df.columns:
chk_sum = result_df[col].isnull().sum()
if chk_sum > 0:
errors.append([col, chk_sum])
if errors:
err_str = "Csv contains None values: "
for error in errors:
err_str += f"in column {error[0]} {error[1]} None values, "
raise ValueError(err_str[:-2])
def read_csv(path, physical_parameters, round_=False):
"""
Parameters
----------
path Path to the .csv file
physical_parameters Current set of physical parameters (PositiveParameterSet)
round_ Rounds numbers to display for better readability
Returns The headers and items to be displayed by v.DataTable as well as the DataFrame to be put in memory
-------
"""
with open(path) as csv_file:
raw_file = csv_file.read()
if ";" in raw_file:
raw_file = raw_file.replace(",", ".")
raw_file = raw_file.replace(";", ",")
csv_spt = raw_file.splitlines()
csv_reader = csv.DictReader(csv_spt)
line_count = 0
df_headers = []
df_items = []
headers = ['Measure']
items = []
for row in csv_reader:
if line_count == 0:
df_headers = list(row.keys())
headers = headers + list(row.keys())
line_count += 1
val = list(row.values())
app = []
for v in val:
try:
app.append(float(v))
except Exception:
raise ValueError("CSV contains non numbers")
if float(v) <= 0:
raise ValueError(f"Csv contains 0 or negative values: {float(v)}")
df_items.append(app)
row['Measure'] = line_count
if round_:
for key in row.keys():
try:
row[key] = float('{:g}'.format(float(row[key])))
except Exception:
raise ValueError("CSV contains non numbers")
items.append(row)
line_count += 1
result_df = pd.DataFrame(df_items, columns=df_headers)
check_headers(df_headers, physical_parameters)
check_content(result_df)
return format_headers(headers), items, result_df
# For testing purposes only
if __name__ == '__main__':
from pyvplm.core.definition import PositiveParameter, PositiveParameterSet
pi1 = PositiveParameter('pi1', [0.1, 1], '', 'p_j')
pi2 = PositiveParameter('pi2', [0.1, 1], '', 'p_fe')
pi3 = PositiveParameter('pi3', [0.1, 1], '', 'd_i*d_e**-1')
pi4 = PositiveParameter('pi4', [0.1, 1], '', 'e_tooth*d_e**-1*n')
pi5 = PositiveParameter('pi5', [0.1, 1], '', 'e_yoke*d_e**-1*n')
pi6 = PositiveParameter('pi6', [0.1, 1], '', 'w_pm*d_e**-1')
pi7 = PositiveParameter('pi7', [0.1, 1], '', 'r_i*d_e**-1')
pi_set = PositiveParameterSet(pi1, pi2, pi3, pi4, pi5, pi6, pi7)
doe = np.array([[1.1, 2.2, 3.5, 4.7, 5.3, 6.9, 7.1], [0.1, 2, 3, 4, 5.5, 6, 0], [7, 5, 4, 8.4, 5, 6, 9]])
generate_csv(doe, 'test_csv.csv', pi_set, [])
read_csv('test_csv.csv', pi_set)
| import numpy as np
import csv
import pandas as pd
def open_csv_file(f_name):
"""
:param f_name: name of the csv file
:return: a new csv file with as many (1) as needed to not already exist
"""
try:
f = open(f_name, "x")
return f, f_name
except IOError:
return open_csv_file(f_name[:-4] + "(1)" + f_name[-4:])
def generate_csv(doeX, file_name, parameter_set, out_headers):
"""
Parameters
----------
doeX DOE points in physical space
file_name name of the .csv file (with extension)
parameter_set current physical parameter set (PositiveParameterSet)
out_headers Headers of output physical parameters (List of str)
Returns
-------
"""
_, file_name = open_csv_file(file_name)
with open(file_name, 'w', encoding='UTF8', newline='') as out_file:
writer = csv.writer(out_file)
headers = []
for key in parameter_set.dictionary:
headers.append(f"{key} [{parameter_set.dictionary[key].defined_units}]")
headers = headers + out_headers
writer.writerow(headers)
doe_list = doeX.tolist()
for point in doe_list:
writer.writerow(point)
out_file.close()
def format_headers(headers):
"""
Parameters
----------
headers Headers to be formatted (List of str)
Returns A list of dict, the right format for v.DataTable headers
-------
"""
out_headers = []
for header in headers:
header_dict = {'text': header, 'sortable': True, 'value': header}
out_headers.append(header_dict)
return out_headers
def check_headers(df_headers, physical_parameters):
"""
Parameters
----------
df_headers Headers to be checked
physical_parameters Current set of physical parameters (PositiveParameterSet)
Returns Raises exceptions if the headers are invalid (ex: corresponds to no physical parameter)
-------
"""
params = list(physical_parameters.dictionary.keys())
raw_headers = []
units = []
for header in df_headers:
try:
spt = header.split("[")
raw_headers.append(spt[0].strip())
units.append(spt[1].split("]")[0])
except Exception:
raise SyntaxError("Invalid csv headers")
if len(raw_headers) < len(params):
raise ValueError(
f"Not enough columns ({len(raw_headers)}, should be {len(params)}), physical parameter missing")
if len(raw_headers) > len(params):
raise ValueError(
f"Too many columns ({len(raw_headers)}, should be {len(params)}),"
f" inconsistent with defined physical parameters")
remaining_params = params.copy()
for i, header in enumerate(raw_headers):
valid = False
j_ = 0
for j, param in enumerate(remaining_params):
if header == param:
valid = True
j_ = j
break
if not valid:
raise ValueError(
f"CSV headers and defined physical parameters do not match: {header} =/= {remaining_params[0]}")
else:
cur_unit = physical_parameters.dictionary[remaining_params[j_]].defined_units
remaining_params.pop(j_)
if units[i] != cur_unit:
raise ValueError(
f"CSV units and defined physical parameters units do not match: {units[i]} =/= {cur_unit}")
def check_content(result_df):
"""
Parameters
----------
result_df DataFrame with the result to be imported
Returns Raises exceptions if the content of the DataFrame is invalid (ex: empty cells)
-------
"""
errors = []
for col in result_df.columns:
chk_sum = result_df[col].isnull().sum()
if chk_sum > 0:
errors.append([col, chk_sum])
if errors:
err_str = "Csv contains None values: "
for error in errors:
err_str += f"in column {error[0]} {error[1]} None values, "
raise ValueError(err_str[:-2])
def read_csv(path, physical_parameters, round_=False):
"""
Parameters
----------
path Path to the .csv file
physical_parameters Current set of physical parameters (PositiveParameterSet)
round_ Rounds numbers to display for better readability
Returns The headers and items to be displayed by v.DataTable as well as the DataFrame to be put in memory
-------
"""
with open(path) as csv_file:
raw_file = csv_file.read()
if ";" in raw_file:
raw_file = raw_file.replace(",", ".")
raw_file = raw_file.replace(";", ",")
csv_spt = raw_file.splitlines()
csv_reader = csv.DictReader(csv_spt)
line_count = 0
df_headers = []
df_items = []
headers = ['Measure']
items = []
for row in csv_reader:
if line_count == 0:
df_headers = list(row.keys())
headers = headers + list(row.keys())
line_count += 1
val = list(row.values())
app = []
for v in val:
try:
app.append(float(v))
except Exception:
raise ValueError("CSV contains non numbers")
if float(v) <= 0:
raise ValueError(f"Csv contains 0 or negative values: {float(v)}")
df_items.append(app)
row['Measure'] = line_count
if round_:
for key in row.keys():
try:
row[key] = float('{:g}'.format(float(row[key])))
except Exception:
raise ValueError("CSV contains non numbers")
items.append(row)
line_count += 1
result_df = pd.DataFrame(df_items, columns=df_headers)
check_headers(df_headers, physical_parameters)
check_content(result_df)
return format_headers(headers), items, result_df
# For testing purposes only
if __name__ == '__main__':
from pyvplm.core.definition import PositiveParameter, PositiveParameterSet
pi1 = PositiveParameter('pi1', [0.1, 1], '', 'p_j')
pi2 = PositiveParameter('pi2', [0.1, 1], '', 'p_fe')
pi3 = PositiveParameter('pi3', [0.1, 1], '', 'd_i*d_e**-1')
pi4 = PositiveParameter('pi4', [0.1, 1], '', 'e_tooth*d_e**-1*n')
pi5 = PositiveParameter('pi5', [0.1, 1], '', 'e_yoke*d_e**-1*n')
pi6 = PositiveParameter('pi6', [0.1, 1], '', 'w_pm*d_e**-1')
pi7 = PositiveParameter('pi7', [0.1, 1], '', 'r_i*d_e**-1')
pi_set = PositiveParameterSet(pi1, pi2, pi3, pi4, pi5, pi6, pi7)
doe = np.array([[1.1, 2.2, 3.5, 4.7, 5.3, 6.9, 7.1], [0.1, 2, 3, 4, 5.5, 6, 0], [7, 5, 4, 8.4, 5, 6, 9]])
generate_csv(doe, 'test_csv.csv', pi_set, [])
read_csv('test_csv.csv', pi_set)
| en | 0.618615 | :param f_name: name of the csv file
:return: a new csv file with as many (1) as needed to not already exist Parameters
----------
doeX DOE points in physical space
file_name name of the .csv file (with extension)
parameter_set current physical parameter set (PositiveParameterSet)
out_headers Headers of output physical parameters (List of str)
Returns
------- Parameters
----------
headers Headers to be formatted (List of str)
Returns A list of dict, the right format for v.DataTable headers
------- Parameters
----------
df_headers Headers to be checked
physical_parameters Current set of physical parameters (PositiveParameterSet)
Returns Raises exceptions if the headers are invalid (ex: corresponds to no physical parameter)
------- Parameters
----------
result_df DataFrame with the result to be imported
Returns Raises exceptions if the content of the DataFrame is invalid (ex: empty cells)
------- Parameters
----------
path Path to the .csv file
physical_parameters Current set of physical parameters (PositiveParameterSet)
round_ Rounds numbers to display for better readability
Returns The headers and items to be displayed by v.DataTable as well as the DataFrame to be put in memory
------- # For testing purposes only | 3.340031 | 3 |
algorithm.py | xiongzwfire/LeetCode-Solution | 0 | 6624716 | # coding: utf8
import sys
# ==排序==
"""
冒泡排序、直接插入排序、选择排序,时间复杂度O(n^2)
快速排序、归并排序、堆排序,时间复杂度O(nlogn)
"""
def bubbleSort(nums):
"""
冒泡排序:稳定排序
"""
size = len(nums)
for i in range(size):
flag = True
for j in range(1, size - i):
if nums[j] < nums[j-1]:
nums[j-1], nums[j] = nums[j], nums[j-1]
flag = False
if flag:
return nums
return nums
def insertSort(nums):
"""
插入排序:稳定排序
"""
size = len(nums)
for i in range(1, size):
while nums[i] < nums[i-1] and i > 0:
nums[i], nums[i-1] = nums[i-1], nums[i]
i -= 1
return nums
def selectSort(nums):
"""
选择排序:不稳定排序
"""
size = len(nums)
for i in range(size):
min_idx, min_val = i, nums[i]
for j in range(i+1, size):
if nums[j] < min_val:
min_idx, min_val = j, nums[j]
if min_idx != i:
nums[i], nums[min_idx] = nums[min_idx], nums[i]
return nums
def quickSort(nums):
"""
快速排序:不稳定
"""
from random import randint
def sort(nums, left, right):
if left >= right: return
pivot_idx = randint(left, right)
pivot = nums[pivot_idx]
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
i = left - 1
for j in range(left, right):
if nums[j] <= pivot:
nums[i+1], nums[j] = nums[j], nums[i+1]
i += 1
nums[i+1], nums[right] = nums[right], nums[i+1]
sort(nums, left, i)
sort(nums, i+2, right)
left, right = 0, len(nums) - 1
sort(nums, left, right)
return nums
def heapSort(nums):
"""
堆排序:不稳定
堆的定义:堆是一颗完全二叉树;若根节点有左孩子,则根节点的值<=左孩子节点的值;若根节点有右孩子,则根节点的值<=右孩子节点的值;以左右孩子为根的子树分别又是一个堆(小根堆)
堆的特性:(n为堆节点的个数)
- 堆宜采用顺序存储结构(数组)
- 分支节点的索引:0 ~ (n / 2) - 1;叶子节点的索引:n / 2 ~ n - 1
- 若n为奇数,则每个分支节点都有左右孩子,若n为偶数,则最后一个分支节点只有左孩子
- 下标为i的分支节点,其左右孩子节点的索引分别为2i+1、2i+2
- 除根节点外,其余任一索引为i的节点,其父节点的索引为floor((i - 1) / 2)
堆排序逻辑:首先将无序数组用“自顶向下”操作构建为大根堆,然后将堆顶元素和堆尾元素对调,再来一次“自顶向下”,重新调整堆为大根堆,循环往复即可
"""
def siftDown(nums, i, size):
while 2 * i + 1 < size:
l, r = 2 * i + 1, 2 * i + 2
if r < size and nums[r] > nums[l]:
next = r
else:
next = l
if nums[i] > nums[next]:
break
nums[i], nums[next] = nums[next], nums[i]
i = next
size = len(nums)
for i in range(size / 2 - 1, -1, -1):
siftDown(nums, i, size)
for i in range(size - 1, 0, -1):
nums[0], nums[i] = nums[i], nums[0]
siftDown(nums, 0, i)
return nums
def mergeSort(nums):
"""
归并排序:稳定
"""
def merge(nums1, nums2):
i, j = 0, 0
nums = []
while i < len(nums1) and j < len(nums2):
if nums1[i] <= nums2[j]:
nums.append(nums1[i])
i += 1
else:
nums.append(nums2[j])
j += 1
if i < len(nums1):
nums += nums1[i:]
if j < len(nums2):
nums += nums2[j:]
return nums
def sort(nums, left, right):
if left == right:
return [nums[left]]
mid = (left + right) / 2
left_part = sort(nums, left, mid)
right_part = sort(nums, mid+1, right)
sorted_nums = merge(left_part, right_part)
return sorted_nums
return sort(nums, 0, len(nums) - 1)
# ==二分查找==
"""
如果线性查找表对于关键字是有序的且为顺序表,那么可以采用二分查找法
可以用递归实现,也可以迭代实现
时间复杂度O(logn)
"""
def binarySearch(nums, target):
"""
递归版本
"""
def search(nums, target, left, right):
if left > right:
return -1
mid = (left + right) / 2
if nums[mid] == target:
return mid
if nums[mid] > target:
return search(nums, target, left, mid-1)
else:
return search(nums, target, mid+1, right)
return search(nums, target, 0, len(nums)-1)
def binarySearch_2(nums, target):
"""
迭代版本
"""
left, right = 0, len(nums) - 1
while left <= right:
mid = (left + right) / 2
if nums[mid] == target:
return mid
if nums[mid] > target:
right = mid - 1
else:
left = mid + 1
return -1
# ==回溯==
"""
1. 回朔法的重要思想在于:通过枚举法,对所有可能性进行遍历。但是枚举的顺序是“一条路走到黑”,发现黑之后,退一步,再向前尝试没走过的路,直到所有路都试过。
2. 因此回朔法可以简单的理解为:走不通就退一步的枚举法,而这里回退点也叫做回朔点。
3. 什么时候使用 used 数组,什么时候使用 begin 变量:
- 排列问题,讲究顺序(即 [2, 2, 3] 与 [2, 3, 2] 视为不同列表时),需要记录哪些数字已经使用过,此时用 used 数组;
- 组合问题,不讲究顺序(即 [2, 2, 3] 与 [2, 3, 2] 视为相同列表时),需要按照某种顺序搜索,此时使用 begin 变量。
"""
def combinationSum(candidates, target):
"""
LeetCode 39:组合总数
需要使用begin变量
"""
candidates.sort()
def backtrack(candidates, target, beg, path, res):
if target == 0:
res.append(path[:])
return
for i in range(beg, len(candidates)):
if target-candidates[i] < 0:
return
path.append(candidates[i])
backtrack(candidates, target-candidates[i], i, path, res)
path.pop()
path, res = [], []
backtrack(candidates, target, 0, path, res)
return res
def permute(nums):
"""
LeetCode 46:全排列
需要使用used数组
"""
def backtrack(nums, used, path, ans):
if len(nums) == len(path):
ans.append(path[:])
return
for i in range(len(nums)):
if nums[i] in used:
continue
path.append(nums[i])
used.append(nums[i])
backtrack(nums, used, path, ans)
used.pop()
path.pop()
used, path, ans = [], [], []
backtrack(nums, used, path, ans)
return ans
# ==分治==
"""
分治法的求解步骤:划分问题、求解子问题、合并子问题的解
归并排序的“自顶向下”写法就是分治法的实例
"""
class ListNode(object):
"""
链表结点定义
"""
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def mergeKLists(lists):
def mergeTwoLists(p, q):
"""
LeetCode 23:合并两个有序链表
"""
dummy = ListNode()
root = dummy
while p and q:
if p.val < q.val:
dummy.next = p
p = p.next
else:
dummy.next = q
q = q.next
dummy = dummy.next
dummy.next = p if p else q
return root.next
def merge(lists, left, right):
if left == right:
return lists[left]
mid = (left + right) / 2
left_part = merge(lists, left, mid)
right_part = merge(lists, mid+1, right)
return mergeTwoLists(left_part, right_part)
left, right = 0, len(lists) - 1
if right < left: return None
return merge(lists, left, right)
# 动态规划
def climbStairs(n):
"""
LeetCode 70:爬楼梯
建模为斐波那契数列问题:f(n) = f(n-1) + f(n-2)
"""
dp = [1, 2]
if n < 3: return dp[n-1]
for i in range(3, n+1):
tmp = sum(dp)
dp = [dp[1], tmp]
return dp[1]
def editDistance(word1, word2):
"""
LeetCode 72:编辑距离
要想求解horse和ros的编辑距离,可以拆分成这样:
- 求出horse和ro的编辑距离为a,则a+1即可(对应插入/删除操作)
- 求出hors和ros的编辑距离为b,则b+1即可(对应插入/删除操作)
- 求出hors和ro的编辑距离为c,则c+1即可(对应替换操作)
除此之外,没有其它的方式了,因此我们求min(a+1, b+1, c+1)即可
"""
m, n = len(word1), len(word2)
dp = [[0 for j in range(n+1)] for i in range(m+1)]
for i in range(m+1):
dp[i][0] = i
for j in range(n+1):
dp[0][j] = j
for i in range(1, m+1):
for j in range(1, n+1):
a, b, c = dp[i-1][j], dp[i][j-1], dp[i-1][j-1]
dp[i][j] = min(a + 1, b + 1, c + 1 if word1[i-1] != word2[j-1] else c)
return dp[m][n]
if __name__ == "__main__":
# 排序
nums = [65, 83, 79, 82, 84, 73, 78, 71, 69, 88, 65, 77, 80, 76, 69]
print bubbleSort(nums)
nums = [65, 83, 79, 82, 84, 73, 78, 71, 69, 88, 65, 77, 80, 76, 69]
print insertSort(nums)
nums = [65, 83, 79, 82, 84, 73, 78, 71, 69, 88, 65, 77, 80, 76, 69]
print selectSort(nums)
nums = [65, 83, 79, 82, 84, 73, 78, 71, 69, 88, 65, 77, 80, 76, 69]
print quickSort(nums)
nums = [65, 83, 79, 82, 84, 73, 78, 71, 69, 88, 65, 77, 80, 76, 69]
print heapSort(nums)
nums = [65, 83, 79, 82, 84, 73, 78, 71, 69, 88, 65, 77, 80, 76, 69]
print mergeSort(nums)
# 二分查找
nums = [65, 65, 69, 69, 71, 73, 76, 77, 78, 79, 80, 82, 83, 84, 88]
target = 650
print binarySearch(nums, target)
print binarySearch_2(nums, target)
# 回溯
candidates = [2, 3, 6, 7]
target = 7
print combinationSum(candidates, target)
nums = [1, 2, 3]
print permute(nums)
| # coding: utf8
import sys
# ==排序==
"""
冒泡排序、直接插入排序、选择排序,时间复杂度O(n^2)
快速排序、归并排序、堆排序,时间复杂度O(nlogn)
"""
def bubbleSort(nums):
"""
冒泡排序:稳定排序
"""
size = len(nums)
for i in range(size):
flag = True
for j in range(1, size - i):
if nums[j] < nums[j-1]:
nums[j-1], nums[j] = nums[j], nums[j-1]
flag = False
if flag:
return nums
return nums
def insertSort(nums):
"""
插入排序:稳定排序
"""
size = len(nums)
for i in range(1, size):
while nums[i] < nums[i-1] and i > 0:
nums[i], nums[i-1] = nums[i-1], nums[i]
i -= 1
return nums
def selectSort(nums):
"""
选择排序:不稳定排序
"""
size = len(nums)
for i in range(size):
min_idx, min_val = i, nums[i]
for j in range(i+1, size):
if nums[j] < min_val:
min_idx, min_val = j, nums[j]
if min_idx != i:
nums[i], nums[min_idx] = nums[min_idx], nums[i]
return nums
def quickSort(nums):
"""
快速排序:不稳定
"""
from random import randint
def sort(nums, left, right):
if left >= right: return
pivot_idx = randint(left, right)
pivot = nums[pivot_idx]
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
i = left - 1
for j in range(left, right):
if nums[j] <= pivot:
nums[i+1], nums[j] = nums[j], nums[i+1]
i += 1
nums[i+1], nums[right] = nums[right], nums[i+1]
sort(nums, left, i)
sort(nums, i+2, right)
left, right = 0, len(nums) - 1
sort(nums, left, right)
return nums
def heapSort(nums):
"""
堆排序:不稳定
堆的定义:堆是一颗完全二叉树;若根节点有左孩子,则根节点的值<=左孩子节点的值;若根节点有右孩子,则根节点的值<=右孩子节点的值;以左右孩子为根的子树分别又是一个堆(小根堆)
堆的特性:(n为堆节点的个数)
- 堆宜采用顺序存储结构(数组)
- 分支节点的索引:0 ~ (n / 2) - 1;叶子节点的索引:n / 2 ~ n - 1
- 若n为奇数,则每个分支节点都有左右孩子,若n为偶数,则最后一个分支节点只有左孩子
- 下标为i的分支节点,其左右孩子节点的索引分别为2i+1、2i+2
- 除根节点外,其余任一索引为i的节点,其父节点的索引为floor((i - 1) / 2)
堆排序逻辑:首先将无序数组用“自顶向下”操作构建为大根堆,然后将堆顶元素和堆尾元素对调,再来一次“自顶向下”,重新调整堆为大根堆,循环往复即可
"""
def siftDown(nums, i, size):
while 2 * i + 1 < size:
l, r = 2 * i + 1, 2 * i + 2
if r < size and nums[r] > nums[l]:
next = r
else:
next = l
if nums[i] > nums[next]:
break
nums[i], nums[next] = nums[next], nums[i]
i = next
size = len(nums)
for i in range(size / 2 - 1, -1, -1):
siftDown(nums, i, size)
for i in range(size - 1, 0, -1):
nums[0], nums[i] = nums[i], nums[0]
siftDown(nums, 0, i)
return nums
def mergeSort(nums):
"""
归并排序:稳定
"""
def merge(nums1, nums2):
i, j = 0, 0
nums = []
while i < len(nums1) and j < len(nums2):
if nums1[i] <= nums2[j]:
nums.append(nums1[i])
i += 1
else:
nums.append(nums2[j])
j += 1
if i < len(nums1):
nums += nums1[i:]
if j < len(nums2):
nums += nums2[j:]
return nums
def sort(nums, left, right):
if left == right:
return [nums[left]]
mid = (left + right) / 2
left_part = sort(nums, left, mid)
right_part = sort(nums, mid+1, right)
sorted_nums = merge(left_part, right_part)
return sorted_nums
return sort(nums, 0, len(nums) - 1)
# ==二分查找==
"""
如果线性查找表对于关键字是有序的且为顺序表,那么可以采用二分查找法
可以用递归实现,也可以迭代实现
时间复杂度O(logn)
"""
def binarySearch(nums, target):
"""
递归版本
"""
def search(nums, target, left, right):
if left > right:
return -1
mid = (left + right) / 2
if nums[mid] == target:
return mid
if nums[mid] > target:
return search(nums, target, left, mid-1)
else:
return search(nums, target, mid+1, right)
return search(nums, target, 0, len(nums)-1)
def binarySearch_2(nums, target):
"""
迭代版本
"""
left, right = 0, len(nums) - 1
while left <= right:
mid = (left + right) / 2
if nums[mid] == target:
return mid
if nums[mid] > target:
right = mid - 1
else:
left = mid + 1
return -1
# ==回溯==
"""
1. 回朔法的重要思想在于:通过枚举法,对所有可能性进行遍历。但是枚举的顺序是“一条路走到黑”,发现黑之后,退一步,再向前尝试没走过的路,直到所有路都试过。
2. 因此回朔法可以简单的理解为:走不通就退一步的枚举法,而这里回退点也叫做回朔点。
3. 什么时候使用 used 数组,什么时候使用 begin 变量:
- 排列问题,讲究顺序(即 [2, 2, 3] 与 [2, 3, 2] 视为不同列表时),需要记录哪些数字已经使用过,此时用 used 数组;
- 组合问题,不讲究顺序(即 [2, 2, 3] 与 [2, 3, 2] 视为相同列表时),需要按照某种顺序搜索,此时使用 begin 变量。
"""
def combinationSum(candidates, target):
"""
LeetCode 39:组合总数
需要使用begin变量
"""
candidates.sort()
def backtrack(candidates, target, beg, path, res):
if target == 0:
res.append(path[:])
return
for i in range(beg, len(candidates)):
if target-candidates[i] < 0:
return
path.append(candidates[i])
backtrack(candidates, target-candidates[i], i, path, res)
path.pop()
path, res = [], []
backtrack(candidates, target, 0, path, res)
return res
def permute(nums):
"""
LeetCode 46:全排列
需要使用used数组
"""
def backtrack(nums, used, path, ans):
if len(nums) == len(path):
ans.append(path[:])
return
for i in range(len(nums)):
if nums[i] in used:
continue
path.append(nums[i])
used.append(nums[i])
backtrack(nums, used, path, ans)
used.pop()
path.pop()
used, path, ans = [], [], []
backtrack(nums, used, path, ans)
return ans
# ==分治==
"""
分治法的求解步骤:划分问题、求解子问题、合并子问题的解
归并排序的“自顶向下”写法就是分治法的实例
"""
class ListNode(object):
"""
链表结点定义
"""
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def mergeKLists(lists):
def mergeTwoLists(p, q):
"""
LeetCode 23:合并两个有序链表
"""
dummy = ListNode()
root = dummy
while p and q:
if p.val < q.val:
dummy.next = p
p = p.next
else:
dummy.next = q
q = q.next
dummy = dummy.next
dummy.next = p if p else q
return root.next
def merge(lists, left, right):
if left == right:
return lists[left]
mid = (left + right) / 2
left_part = merge(lists, left, mid)
right_part = merge(lists, mid+1, right)
return mergeTwoLists(left_part, right_part)
left, right = 0, len(lists) - 1
if right < left: return None
return merge(lists, left, right)
# 动态规划
def climbStairs(n):
"""
LeetCode 70:爬楼梯
建模为斐波那契数列问题:f(n) = f(n-1) + f(n-2)
"""
dp = [1, 2]
if n < 3: return dp[n-1]
for i in range(3, n+1):
tmp = sum(dp)
dp = [dp[1], tmp]
return dp[1]
def editDistance(word1, word2):
"""
LeetCode 72:编辑距离
要想求解horse和ros的编辑距离,可以拆分成这样:
- 求出horse和ro的编辑距离为a,则a+1即可(对应插入/删除操作)
- 求出hors和ros的编辑距离为b,则b+1即可(对应插入/删除操作)
- 求出hors和ro的编辑距离为c,则c+1即可(对应替换操作)
除此之外,没有其它的方式了,因此我们求min(a+1, b+1, c+1)即可
"""
m, n = len(word1), len(word2)
dp = [[0 for j in range(n+1)] for i in range(m+1)]
for i in range(m+1):
dp[i][0] = i
for j in range(n+1):
dp[0][j] = j
for i in range(1, m+1):
for j in range(1, n+1):
a, b, c = dp[i-1][j], dp[i][j-1], dp[i-1][j-1]
dp[i][j] = min(a + 1, b + 1, c + 1 if word1[i-1] != word2[j-1] else c)
return dp[m][n]
if __name__ == "__main__":
# 排序
nums = [65, 83, 79, 82, 84, 73, 78, 71, 69, 88, 65, 77, 80, 76, 69]
print bubbleSort(nums)
nums = [65, 83, 79, 82, 84, 73, 78, 71, 69, 88, 65, 77, 80, 76, 69]
print insertSort(nums)
nums = [65, 83, 79, 82, 84, 73, 78, 71, 69, 88, 65, 77, 80, 76, 69]
print selectSort(nums)
nums = [65, 83, 79, 82, 84, 73, 78, 71, 69, 88, 65, 77, 80, 76, 69]
print quickSort(nums)
nums = [65, 83, 79, 82, 84, 73, 78, 71, 69, 88, 65, 77, 80, 76, 69]
print heapSort(nums)
nums = [65, 83, 79, 82, 84, 73, 78, 71, 69, 88, 65, 77, 80, 76, 69]
print mergeSort(nums)
# 二分查找
nums = [65, 65, 69, 69, 71, 73, 76, 77, 78, 79, 80, 82, 83, 84, 88]
target = 650
print binarySearch(nums, target)
print binarySearch_2(nums, target)
# 回溯
candidates = [2, 3, 6, 7]
target = 7
print combinationSum(candidates, target)
nums = [1, 2, 3]
print permute(nums)
| zh | 0.980097 | # coding: utf8 # ==排序== 冒泡排序、直接插入排序、选择排序,时间复杂度O(n^2) 快速排序、归并排序、堆排序,时间复杂度O(nlogn) 冒泡排序:稳定排序 插入排序:稳定排序 选择排序:不稳定排序 快速排序:不稳定 堆排序:不稳定 堆的定义:堆是一颗完全二叉树;若根节点有左孩子,则根节点的值<=左孩子节点的值;若根节点有右孩子,则根节点的值<=右孩子节点的值;以左右孩子为根的子树分别又是一个堆(小根堆) 堆的特性:(n为堆节点的个数) - 堆宜采用顺序存储结构(数组) - 分支节点的索引:0 ~ (n / 2) - 1;叶子节点的索引:n / 2 ~ n - 1 - 若n为奇数,则每个分支节点都有左右孩子,若n为偶数,则最后一个分支节点只有左孩子 - 下标为i的分支节点,其左右孩子节点的索引分别为2i+1、2i+2 - 除根节点外,其余任一索引为i的节点,其父节点的索引为floor((i - 1) / 2) 堆排序逻辑:首先将无序数组用“自顶向下”操作构建为大根堆,然后将堆顶元素和堆尾元素对调,再来一次“自顶向下”,重新调整堆为大根堆,循环往复即可 归并排序:稳定 # ==二分查找== 如果线性查找表对于关键字是有序的且为顺序表,那么可以采用二分查找法 可以用递归实现,也可以迭代实现 时间复杂度O(logn) 递归版本 迭代版本 # ==回溯== 1. 回朔法的重要思想在于:通过枚举法,对所有可能性进行遍历。但是枚举的顺序是“一条路走到黑”,发现黑之后,退一步,再向前尝试没走过的路,直到所有路都试过。 2. 因此回朔法可以简单的理解为:走不通就退一步的枚举法,而这里回退点也叫做回朔点。 3. 什么时候使用 used 数组,什么时候使用 begin 变量: - 排列问题,讲究顺序(即 [2, 2, 3] 与 [2, 3, 2] 视为不同列表时),需要记录哪些数字已经使用过,此时用 used 数组; - 组合问题,不讲究顺序(即 [2, 2, 3] 与 [2, 3, 2] 视为相同列表时),需要按照某种顺序搜索,此时使用 begin 变量。 LeetCode 39:组合总数 需要使用begin变量 LeetCode 46:全排列 需要使用used数组 # ==分治== 分治法的求解步骤:划分问题、求解子问题、合并子问题的解 归并排序的“自顶向下”写法就是分治法的实例 链表结点定义 LeetCode 23:合并两个有序链表 # 动态规划 LeetCode 70:爬楼梯 建模为斐波那契数列问题:f(n) = f(n-1) + f(n-2) LeetCode 72:编辑距离 要想求解horse和ros的编辑距离,可以拆分成这样: - 求出horse和ro的编辑距离为a,则a+1即可(对应插入/删除操作) - 求出hors和ros的编辑距离为b,则b+1即可(对应插入/删除操作) - 求出hors和ro的编辑距离为c,则c+1即可(对应替换操作) 除此之外,没有其它的方式了,因此我们求min(a+1, b+1, c+1)即可 # 排序 # 二分查找 # 回溯 | 3.74895 | 4 |
homeassistant/components/elv/switch.py | MrDelik/core | 30,023 | 6624717 | <reponame>MrDelik/core
"""Support for PCA 301 smart switch."""
from __future__ import annotations
import logging
import pypca
from serial import SerialException
from homeassistant.components.switch import SwitchEntity
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "PCA 301"
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the PCA switch platform."""
if discovery_info is None:
return
serial_device = discovery_info["device"]
try:
pca = pypca.PCA(serial_device)
pca.open()
entities = [SmartPlugSwitch(pca, device) for device in pca.get_devices()]
add_entities(entities, True)
except SerialException as exc:
_LOGGER.warning("Unable to open serial port: %s", exc)
return
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, pca.close)
pca.start_scan()
class SmartPlugSwitch(SwitchEntity):
"""Representation of a PCA Smart Plug switch."""
def __init__(self, pca, device_id):
"""Initialize the switch."""
self._device_id = device_id
self._name = "PCA 301"
self._state = None
self._available = True
self._pca = pca
@property
def name(self):
"""Return the name of the Smart Plug, if any."""
return self._name
@property
def available(self) -> bool:
"""Return if switch is available."""
return self._available
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._pca.turn_on(self._device_id)
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._pca.turn_off(self._device_id)
def update(self):
"""Update the PCA switch's state."""
try:
self._state = self._pca.get_state(self._device_id)
self._available = True
except (OSError) as ex:
if self._available:
_LOGGER.warning("Could not read state for %s: %s", self.name, ex)
self._available = False
| """Support for PCA 301 smart switch."""
from __future__ import annotations
import logging
import pypca
from serial import SerialException
from homeassistant.components.switch import SwitchEntity
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "PCA 301"
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the PCA switch platform."""
if discovery_info is None:
return
serial_device = discovery_info["device"]
try:
pca = pypca.PCA(serial_device)
pca.open()
entities = [SmartPlugSwitch(pca, device) for device in pca.get_devices()]
add_entities(entities, True)
except SerialException as exc:
_LOGGER.warning("Unable to open serial port: %s", exc)
return
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, pca.close)
pca.start_scan()
class SmartPlugSwitch(SwitchEntity):
"""Representation of a PCA Smart Plug switch."""
def __init__(self, pca, device_id):
"""Initialize the switch."""
self._device_id = device_id
self._name = "PCA 301"
self._state = None
self._available = True
self._pca = pca
@property
def name(self):
"""Return the name of the Smart Plug, if any."""
return self._name
@property
def available(self) -> bool:
"""Return if switch is available."""
return self._available
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._pca.turn_on(self._device_id)
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._pca.turn_off(self._device_id)
def update(self):
"""Update the PCA switch's state."""
try:
self._state = self._pca.get_state(self._device_id)
self._available = True
except (OSError) as ex:
if self._available:
_LOGGER.warning("Could not read state for %s: %s", self.name, ex)
self._available = False | en | 0.752449 | Support for PCA 301 smart switch. Set up the PCA switch platform. Representation of a PCA Smart Plug switch. Initialize the switch. Return the name of the Smart Plug, if any. Return if switch is available. Return true if switch is on. Turn the switch on. Turn the switch off. Update the PCA switch's state. | 2.385115 | 2 |
utils/regression/forrest_run.py | noahsherrill/force-riscv | 111 | 6624718 | #!/usr/bin/env python3
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# PYTHON3 UP
# /software/public/python/3.4.1/bin/python3
#
# module: forrest_run.py
# comments: This module can be run as a part of master_run or can exec as a
# standalone which will process a special control file which is
# created by. This frun control file must contain all information
# necessary to process a template task
#
#
import os
import signal
# Make third-party modules available for import
import sys
import traceback
import common.cmdline_utils as CmdLineUtils
from classes.ApplicationsSetup import ApplicationsSetup
from classes.control_item import ControlItem
from classes.exec_controller import ExecuteController
from classes.module_run import ModuleRun
from common.msg_utils import Msg
from common.path_utils import PathUtils
from common.sys_utils import SysUtils
from force_init import the_force_root
from forrest_init import CmdLine, Defaults, CommandLineParameters
sys.path.append(PathUtils.real_path("../../3rd_party/py"))
class ForrestRun(ModuleRun):
def __init__(self):
super().__init__(CmdLine.Switches[CmdLine.msg_lev], Defaults.msg_level)
self.frun_name = None
self.frun_dir = None
self.fctrl = None
self.item_data = {}
self.options = {}
self.fcontrol = None
def init_app_setup(self):
try:
self.m_app_setup = ApplicationsSetup(
CommandLineParameters,
sys.argv,
CmdLineUtils.basic_command_line_argument_retrieval(
sys.argv[1:], "-w", "--workflow", str, 1
).workflow[0],
)
self.m_app_info = self.m_app_setup.getApplicationsInfo()
except TypeError:
# catches error that is thrown when trying to iterate through a
# None type variable (if workflow argument does not exist)
self.m_app_setup = ApplicationsSetup(
CommandLineParameters, sys.argv
)
self.m_app_info = self.m_app_setup.getApplicationsInfo()
except SystemExit as aSysExit:
sys.exit(int(str(aSysExit)))
except Exception as ex:
print(
"[ERROR] - An Unhandled Error has Occurred during "
"applications setup of " + str(sys.argv[0])
)
traceback.print_exc(file=sys.stdout)
sys.exit(43)
def load(self):
my_frun_path = self.option_def(
CmdLine.Switches[CmdLine.control_name], None
)
if my_frun_path is None:
raise Exception(
"F-Run Control File Not Found on the Forrest Run Command "
"Line: Given Path: %s",
str((my_frun_path)),
)
self.locate_frun(my_frun_path)
Msg.user("File Path: %s" % (my_frun_path))
my_content = open(self.frun_name).read()
my_glb, my_loc = SysUtils.exec_content(my_content)
Msg.dbg(str(my_loc))
self.fcontrol = my_loc["control_items"]
my_ctrl_dict = self.fcontrol[0]
my_ctrl_item = ControlItem()
my_ctrl_item.load(self.m_app_info, my_ctrl_dict)
# Msg.lout( my_ctrl_dict, "user", "Forrest Parent Data ...." )
self.check_simulator()
self.fctrl = ExecuteController(self.m_app_info)
self.fctrl.set_frun(self.frun_name)
self.fctrl.load(my_ctrl_item)
def run(self):
Msg.dbg("ForrestRun::run()")
self.fctrl.process()
def locate_frun(self, arg_frun_path):
Msg.user("Directory set to %s" % (PathUtils.current_dir()))
# if the control file contains a path then split that into the
# directory and the file
my_frun_dir, my_frun_name = PathUtils.split_path(arg_frun_path)
# always convert to full path
my_cur_dir = PathUtils.real_path(PathUtils.current_dir())
# gots to have a source directory as part of the file name
if my_frun_dir is None:
my_frun_dir = my_cur_dir
else:
# always convert to full path. If the frun was loaded correctly
# then we can conclude that the path tendered is either a relative
# path from the starting directory or a full path to that file. If
# it is not a full path then it will need to be converted to a
# full path and all links removed
my_frun_dir = PathUtils.real_path(my_frun_dir)
# change into the directory to generate and simulate
if not PathUtils.chdir(my_frun_dir):
raise Exception(
"F-Run Directory[%s] Not Found" % (str(my_frun_dir))
)
self.frun_name = my_frun_name
self.frun_dir = my_frun_dir
def check_simulator(self):
if SysUtils.check_host("SAN"):
Msg.dbg("System is in Green Zone .....")
my_gcc_path = "/project/software/public/gcc/5.1/centos6.6/lib64"
my_lib_path = SysUtils.envar("LD_LIBRARY_PATH", None)
if not my_lib_path:
SysUtils.envar_set("LD_LIBRARY_PATH", my_gcc_path)
elif my_lib_path.find(my_gcc_path) < 0:
SysUtils.envar_set(
"LD_LIBRARY_PATH", "%s:%s" % (my_gcc_path, my_lib_path)
)
Msg.dbg("LD_LIB_PATH: %s " % (str(my_lib_path)))
Msg.dbg(
'"LD_LIBRARY_PATH" = %s'
% (str(SysUtils.envar("LD_LIBRARY_PATH", None)))
)
else:
Msg.dbg("System is Red Zone or Yellow Zone")
return True
def handle_signal(arg_signal, arg_stackframe):
# it is necessary to write directly to stdout and not use print which is
# very unreliable
if arg_signal == signal.SIGINT:
sys.stdout.write(
"Signal = {'retcode': %d, 'message': 'Encountered interrupt, "
"all processing halted'}\n" % (signal.SIGINT)
)
elif arg_signal == signal.SIGTERM:
sys.stdout.write(
"Signal = {'retcode': %d, 'message': 'OS Terminated Process, "
"all processing halted'}\n" % (signal.SIGTERM)
)
# Flush the line and release the processor to ensure that the output is
# fully written
sys.stdout.flush()
SysUtils.sleep(1)
# once the line has been written kill any remaining processes dead dead
# dead, this will suppress further output
os.killpg(0, signal.SIGKILL)
# finally return the signal id as the return code
sys.exit(int(arg_signal))
def main():
# set up signal handlers,
signal.signal(signal.SIGINT, handle_signal)
signal.signal(signal.SIGTERM, handle_signal)
# initialize variables
my_hlog = None
my_org_stdout = None
# global the_output_path =
# Step 1: Save the originating directory
my_pwd = PathUtils.current_dir()
# Step 3: Extract Pid Group
os.setpgid(os.getpid(), os.getpid())
my_module = ForrestRun()
try:
my_module.force_path = the_force_root
my_logfile = my_module.m_app_info.mCmdLineOpts.option_def(
CmdLine.Switches[CmdLine.logfile], None
)
if my_logfile is not None:
# print( "Redirecting STDOUT to my_logfile" )
my_org_stdout = sys.stdout
my_hlog = open(my_logfile, "w")
sys.stdout = my_hlog
Msg.user("Log File: %s" % (str(my_logfile)), "STDLOG")
Msg.dbg("\nForce Path: %s" % (str(the_force_root)))
Msg.dbg("Original Directory: " + my_pwd)
# save current working directory
Msg.dbg("Processing Command Line and Loading Control File")
my_module.load()
Msg.dbg("Directory set to %s" % (PathUtils.current_dir()))
if not PathUtils.chdir(my_module.frun_dir, False):
Msg.dbg(
"Directory Unchanged, using the current directory for output"
)
my_module.run()
Msg.dbg("Test Completed ....\n")
Msg.blank()
# sys.exit( 0 )
except Exception as ex:
from force_init import force_usage
Msg.err(
"An Unhandled Error has Occurred during run of " + str(sys.argv[0])
)
traceback.print_exc(file=sys.stdout)
Msg.error_trace(str(ex))
my_module.m_app_info.mCmdLineOpts.print_help()
sys.exit(41)
except BaseException:
print(
"[ERROR] - An Unhandled Error has Occurred during run of "
+ str(sys.argv[0])
)
traceback.print_exc(file=sys.stdout)
sys.exit(42)
finally:
if my_logfile is not None:
my_hlog.close()
sys.stdout = my_org_stdout
with open(my_logfile, "r") as my_hlog:
print(my_hlog.read())
if my_pwd is not None:
PathUtils.chdir(my_pwd)
Msg.dbg("Returned To: %s" % (PathUtils.current_dir()))
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
#
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# PYTHON3 UP
# /software/public/python/3.4.1/bin/python3
#
# module: forrest_run.py
# comments: This module can be run as a part of master_run or can exec as a
# standalone which will process a special control file which is
# created by. This frun control file must contain all information
# necessary to process a template task
#
#
import os
import signal
# Make third-party modules available for import
import sys
import traceback
import common.cmdline_utils as CmdLineUtils
from classes.ApplicationsSetup import ApplicationsSetup
from classes.control_item import ControlItem
from classes.exec_controller import ExecuteController
from classes.module_run import ModuleRun
from common.msg_utils import Msg
from common.path_utils import PathUtils
from common.sys_utils import SysUtils
from force_init import the_force_root
from forrest_init import CmdLine, Defaults, CommandLineParameters
sys.path.append(PathUtils.real_path("../../3rd_party/py"))
class ForrestRun(ModuleRun):
def __init__(self):
super().__init__(CmdLine.Switches[CmdLine.msg_lev], Defaults.msg_level)
self.frun_name = None
self.frun_dir = None
self.fctrl = None
self.item_data = {}
self.options = {}
self.fcontrol = None
def init_app_setup(self):
try:
self.m_app_setup = ApplicationsSetup(
CommandLineParameters,
sys.argv,
CmdLineUtils.basic_command_line_argument_retrieval(
sys.argv[1:], "-w", "--workflow", str, 1
).workflow[0],
)
self.m_app_info = self.m_app_setup.getApplicationsInfo()
except TypeError:
# catches error that is thrown when trying to iterate through a
# None type variable (if workflow argument does not exist)
self.m_app_setup = ApplicationsSetup(
CommandLineParameters, sys.argv
)
self.m_app_info = self.m_app_setup.getApplicationsInfo()
except SystemExit as aSysExit:
sys.exit(int(str(aSysExit)))
except Exception as ex:
print(
"[ERROR] - An Unhandled Error has Occurred during "
"applications setup of " + str(sys.argv[0])
)
traceback.print_exc(file=sys.stdout)
sys.exit(43)
def load(self):
my_frun_path = self.option_def(
CmdLine.Switches[CmdLine.control_name], None
)
if my_frun_path is None:
raise Exception(
"F-Run Control File Not Found on the Forrest Run Command "
"Line: Given Path: %s",
str((my_frun_path)),
)
self.locate_frun(my_frun_path)
Msg.user("File Path: %s" % (my_frun_path))
my_content = open(self.frun_name).read()
my_glb, my_loc = SysUtils.exec_content(my_content)
Msg.dbg(str(my_loc))
self.fcontrol = my_loc["control_items"]
my_ctrl_dict = self.fcontrol[0]
my_ctrl_item = ControlItem()
my_ctrl_item.load(self.m_app_info, my_ctrl_dict)
# Msg.lout( my_ctrl_dict, "user", "Forrest Parent Data ...." )
self.check_simulator()
self.fctrl = ExecuteController(self.m_app_info)
self.fctrl.set_frun(self.frun_name)
self.fctrl.load(my_ctrl_item)
def run(self):
Msg.dbg("ForrestRun::run()")
self.fctrl.process()
def locate_frun(self, arg_frun_path):
Msg.user("Directory set to %s" % (PathUtils.current_dir()))
# if the control file contains a path then split that into the
# directory and the file
my_frun_dir, my_frun_name = PathUtils.split_path(arg_frun_path)
# always convert to full path
my_cur_dir = PathUtils.real_path(PathUtils.current_dir())
# gots to have a source directory as part of the file name
if my_frun_dir is None:
my_frun_dir = my_cur_dir
else:
# always convert to full path. If the frun was loaded correctly
# then we can conclude that the path tendered is either a relative
# path from the starting directory or a full path to that file. If
# it is not a full path then it will need to be converted to a
# full path and all links removed
my_frun_dir = PathUtils.real_path(my_frun_dir)
# change into the directory to generate and simulate
if not PathUtils.chdir(my_frun_dir):
raise Exception(
"F-Run Directory[%s] Not Found" % (str(my_frun_dir))
)
self.frun_name = my_frun_name
self.frun_dir = my_frun_dir
def check_simulator(self):
if SysUtils.check_host("SAN"):
Msg.dbg("System is in Green Zone .....")
my_gcc_path = "/project/software/public/gcc/5.1/centos6.6/lib64"
my_lib_path = SysUtils.envar("LD_LIBRARY_PATH", None)
if not my_lib_path:
SysUtils.envar_set("LD_LIBRARY_PATH", my_gcc_path)
elif my_lib_path.find(my_gcc_path) < 0:
SysUtils.envar_set(
"LD_LIBRARY_PATH", "%s:%s" % (my_gcc_path, my_lib_path)
)
Msg.dbg("LD_LIB_PATH: %s " % (str(my_lib_path)))
Msg.dbg(
'"LD_LIBRARY_PATH" = %s'
% (str(SysUtils.envar("LD_LIBRARY_PATH", None)))
)
else:
Msg.dbg("System is Red Zone or Yellow Zone")
return True
def handle_signal(arg_signal, arg_stackframe):
# it is necessary to write directly to stdout and not use print which is
# very unreliable
if arg_signal == signal.SIGINT:
sys.stdout.write(
"Signal = {'retcode': %d, 'message': 'Encountered interrupt, "
"all processing halted'}\n" % (signal.SIGINT)
)
elif arg_signal == signal.SIGTERM:
sys.stdout.write(
"Signal = {'retcode': %d, 'message': 'OS Terminated Process, "
"all processing halted'}\n" % (signal.SIGTERM)
)
# Flush the line and release the processor to ensure that the output is
# fully written
sys.stdout.flush()
SysUtils.sleep(1)
# once the line has been written kill any remaining processes dead dead
# dead, this will suppress further output
os.killpg(0, signal.SIGKILL)
# finally return the signal id as the return code
sys.exit(int(arg_signal))
def main():
# set up signal handlers,
signal.signal(signal.SIGINT, handle_signal)
signal.signal(signal.SIGTERM, handle_signal)
# initialize variables
my_hlog = None
my_org_stdout = None
# global the_output_path =
# Step 1: Save the originating directory
my_pwd = PathUtils.current_dir()
# Step 3: Extract Pid Group
os.setpgid(os.getpid(), os.getpid())
my_module = ForrestRun()
try:
my_module.force_path = the_force_root
my_logfile = my_module.m_app_info.mCmdLineOpts.option_def(
CmdLine.Switches[CmdLine.logfile], None
)
if my_logfile is not None:
# print( "Redirecting STDOUT to my_logfile" )
my_org_stdout = sys.stdout
my_hlog = open(my_logfile, "w")
sys.stdout = my_hlog
Msg.user("Log File: %s" % (str(my_logfile)), "STDLOG")
Msg.dbg("\nForce Path: %s" % (str(the_force_root)))
Msg.dbg("Original Directory: " + my_pwd)
# save current working directory
Msg.dbg("Processing Command Line and Loading Control File")
my_module.load()
Msg.dbg("Directory set to %s" % (PathUtils.current_dir()))
if not PathUtils.chdir(my_module.frun_dir, False):
Msg.dbg(
"Directory Unchanged, using the current directory for output"
)
my_module.run()
Msg.dbg("Test Completed ....\n")
Msg.blank()
# sys.exit( 0 )
except Exception as ex:
from force_init import force_usage
Msg.err(
"An Unhandled Error has Occurred during run of " + str(sys.argv[0])
)
traceback.print_exc(file=sys.stdout)
Msg.error_trace(str(ex))
my_module.m_app_info.mCmdLineOpts.print_help()
sys.exit(41)
except BaseException:
print(
"[ERROR] - An Unhandled Error has Occurred during run of "
+ str(sys.argv[0])
)
traceback.print_exc(file=sys.stdout)
sys.exit(42)
finally:
if my_logfile is not None:
my_hlog.close()
sys.stdout = my_org_stdout
with open(my_logfile, "r") as my_hlog:
print(my_hlog.read())
if my_pwd is not None:
PathUtils.chdir(my_pwd)
Msg.dbg("Returned To: %s" % (PathUtils.current_dir()))
if __name__ == "__main__":
main()
| en | 0.858965 | #!/usr/bin/env python3 # # Copyright (C) [2020] Futurewei Technologies, Inc. # # FORCE-RISCV is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES # OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. # See the License for the specific language governing permissions and # limitations under the License. # # PYTHON3 UP # /software/public/python/3.4.1/bin/python3 # # module: forrest_run.py # comments: This module can be run as a part of master_run or can exec as a # standalone which will process a special control file which is # created by. This frun control file must contain all information # necessary to process a template task # # # Make third-party modules available for import # catches error that is thrown when trying to iterate through a # None type variable (if workflow argument does not exist) # Msg.lout( my_ctrl_dict, "user", "Forrest Parent Data ...." ) # if the control file contains a path then split that into the # directory and the file # always convert to full path # gots to have a source directory as part of the file name # always convert to full path. If the frun was loaded correctly # then we can conclude that the path tendered is either a relative # path from the starting directory or a full path to that file. If # it is not a full path then it will need to be converted to a # full path and all links removed # change into the directory to generate and simulate # it is necessary to write directly to stdout and not use print which is # very unreliable # Flush the line and release the processor to ensure that the output is # fully written # once the line has been written kill any remaining processes dead dead # dead, this will suppress further output # finally return the signal id as the return code # set up signal handlers, # initialize variables # global the_output_path = # Step 1: Save the originating directory # Step 3: Extract Pid Group # print( "Redirecting STDOUT to my_logfile" ) # save current working directory # sys.exit( 0 ) | 1.773382 | 2 |
garage/tf/exploration_strategies/ou_strategy.py | shadiakiki1986/garage | 3 | 6624719 | """
This module creates an OU exploration strategy.
Ornstein Uhlenbeck exploration strategy comes from the Ornstein-Uhlenbeck
process. It is often used in DDPG algorithm because in continuous control task
it is better to have temporally correlated exploration to get smoother
transitions. And OU process is relatively smooth in time.
"""
import numpy as np
from garage.exploration_strategies import ExplorationStrategy
from garage.misc.overrides import overrides
class OUStrategy(ExplorationStrategy):
"""
An OU exploration strategy to add noise to environment actions.
Example:
$ python garage/tf/exploration_strategies/ou_strategy.py
"""
def __init__(self, env_spec, mu=0, sigma=0.3, theta=0.15, dt=1e-2,
x0=None):
"""
Construct class.
Args:
env_spec: Environment for OUStrategy to explore.
mu: A parameter to simulate the process.
sigma: A parameter to simulate the process.
theta: A parameter to simulate the process.
dt: A parameter to simulate the process.
x0: Initial state.
"""
self.env_spec = env_spec
self.action_space = env_spec.action_space
self.action_dim = self.action_space.flat_dim
self.mu = mu
self.sigma = sigma
self.theta = theta
self.dt = dt
self.x0 = x0
self.reset()
def simulate(self):
"""
Compute the next state of the exploration.
Returns:
self.state: Next state of the exploration.
"""
x = self.state
dx = self.theta * (self.mu - x) * self.dt + self.sigma * np.sqrt(
self.dt) * np.random.normal(size=len(x))
self.state = x + dx
return self.state
@overrides
def reset(self):
"""Reset the state of the exploration."""
self.state = self.x0 if self.x0 is not None else self.mu * np.zeros(
self.action_dim)
@overrides
def get_action(self, t, observation, policy, **kwargs):
"""Return an action with noise.
Args:
t: Iteration.
observation: Observation from the environment.
policy: Policy network to predict action based on the observation.
Returns:
An action with noise explored by OUStrategy.
"""
action, agent_infos = policy.get_action(observation)
ou_state = self.simulate()
return np.clip(action + ou_state, self.action_space.low,
self.action_space.high), agent_infos
def get_actions(self, observations, policy):
actions, agent_infos = policy.get_actions(observations)
ou_state = self.simulate()
return np.clip(actions + ou_state, self.action_space.low,
self.action_space.high), agent_infos
if __name__ == "__main__":
import gym
import matplotlib.pyplot as plt
ou = OUStrategy(
env_spec=gym.make("Pendulum-v0"), mu=0, theta=0.15, sigma=0.3)
states = []
for i in range(1000):
states.append(ou.simulate()[0])
plt.plot(states)
plt.show()
| """
This module creates an OU exploration strategy.
Ornstein Uhlenbeck exploration strategy comes from the Ornstein-Uhlenbeck
process. It is often used in DDPG algorithm because in continuous control task
it is better to have temporally correlated exploration to get smoother
transitions. And OU process is relatively smooth in time.
"""
import numpy as np
from garage.exploration_strategies import ExplorationStrategy
from garage.misc.overrides import overrides
class OUStrategy(ExplorationStrategy):
"""
An OU exploration strategy to add noise to environment actions.
Example:
$ python garage/tf/exploration_strategies/ou_strategy.py
"""
def __init__(self, env_spec, mu=0, sigma=0.3, theta=0.15, dt=1e-2,
x0=None):
"""
Construct class.
Args:
env_spec: Environment for OUStrategy to explore.
mu: A parameter to simulate the process.
sigma: A parameter to simulate the process.
theta: A parameter to simulate the process.
dt: A parameter to simulate the process.
x0: Initial state.
"""
self.env_spec = env_spec
self.action_space = env_spec.action_space
self.action_dim = self.action_space.flat_dim
self.mu = mu
self.sigma = sigma
self.theta = theta
self.dt = dt
self.x0 = x0
self.reset()
def simulate(self):
"""
Compute the next state of the exploration.
Returns:
self.state: Next state of the exploration.
"""
x = self.state
dx = self.theta * (self.mu - x) * self.dt + self.sigma * np.sqrt(
self.dt) * np.random.normal(size=len(x))
self.state = x + dx
return self.state
@overrides
def reset(self):
"""Reset the state of the exploration."""
self.state = self.x0 if self.x0 is not None else self.mu * np.zeros(
self.action_dim)
@overrides
def get_action(self, t, observation, policy, **kwargs):
"""Return an action with noise.
Args:
t: Iteration.
observation: Observation from the environment.
policy: Policy network to predict action based on the observation.
Returns:
An action with noise explored by OUStrategy.
"""
action, agent_infos = policy.get_action(observation)
ou_state = self.simulate()
return np.clip(action + ou_state, self.action_space.low,
self.action_space.high), agent_infos
def get_actions(self, observations, policy):
actions, agent_infos = policy.get_actions(observations)
ou_state = self.simulate()
return np.clip(actions + ou_state, self.action_space.low,
self.action_space.high), agent_infos
if __name__ == "__main__":
import gym
import matplotlib.pyplot as plt
ou = OUStrategy(
env_spec=gym.make("Pendulum-v0"), mu=0, theta=0.15, sigma=0.3)
states = []
for i in range(1000):
states.append(ou.simulate()[0])
plt.plot(states)
plt.show()
| en | 0.782736 | This module creates an OU exploration strategy. Ornstein Uhlenbeck exploration strategy comes from the Ornstein-Uhlenbeck process. It is often used in DDPG algorithm because in continuous control task it is better to have temporally correlated exploration to get smoother transitions. And OU process is relatively smooth in time. An OU exploration strategy to add noise to environment actions. Example: $ python garage/tf/exploration_strategies/ou_strategy.py Construct class. Args: env_spec: Environment for OUStrategy to explore. mu: A parameter to simulate the process. sigma: A parameter to simulate the process. theta: A parameter to simulate the process. dt: A parameter to simulate the process. x0: Initial state. Compute the next state of the exploration. Returns: self.state: Next state of the exploration. Reset the state of the exploration. Return an action with noise. Args: t: Iteration. observation: Observation from the environment. policy: Policy network to predict action based on the observation. Returns: An action with noise explored by OUStrategy. | 3.104356 | 3 |
test/data/t1_expected.py | ci-fuzz/protobuf_parser | 0 | 6624720 | <filename>test/data/t1_expected.py
from proto_parser.proto_parser import ScopedSection, WORD_ROOT, WORD_PROTO_FILE, WORD_SERVICE, Service, RPC, Message, HttpMethod, \
WORD_MESSAGE, MessageField, WORD_FIELD
T1_BASKET_SERVICE_CONTENT = [
"rpc Update(UpdateBasketReq) returns (UpdateBasketResp) {",
"option (google.api.http) = {",
"post: \"/user/{basket.user_id}/basket\"",
"body: \"*\"",
"};",
"}",
"rpc Get(GetBasketReq) returns (Basket) {",
"option (google.api.http) = {",
"get: \"/user/{basket.user_id}/basket\"",
"body: \"*\"",
"};",
"}",
]
T1_BASKET_SERVICE = [
"service BasketService {",
"rpc Update(UpdateBasketReq) returns (UpdateBasketResp) {",
"option (google.api.http) = {",
"post: \"/user/{basket.user_id}/basket\"",
"body: \"*\"",
"};",
"}",
"rpc Get(GetBasketReq) returns (Basket) {",
"option (google.api.http) = {",
"get: \"/user/{basket.user_id}/basket\"",
"body: \"*\"",
"};",
"}",
"}",
]
T1_MESSAGE_UPDATEBASKETREQ = [
"message UpdateBasketReq {",
"Basket basket = 1;",
"}",
]
T1_MESSAGE_UPDATEBASKETRESP = [
"message UpdateBasketResp {",
"double subtotal = 1;",
"double total = 2;",
"repeated api.gen.Promotion applied_promotions = 3;",
"}",
]
T1_MESSAGE_UPDATEBASKET = [
"message Basket {",
"string id = 1;",
"string user_id = 2;",
"repeated string product_ids = 3;",
"}",
]
T1_LINES = [
str.join("\n", T1_BASKET_SERVICE) + "\n",
str.join("\n", T1_MESSAGE_UPDATEBASKETREQ) + "\n",
str.join("\n", T1_MESSAGE_UPDATEBASKETRESP) + "\n",
str.join("\n", T1_MESSAGE_UPDATEBASKET) + "\n"
]
T1_BASKET_SERVICE_RPC_GET = [
"rpc Get(GetBasketReq) returns (Basket) {",
"option (google.api.http) = {",
"get: \"/user/{basket.user_id}/basket\"",
"body: \"*\"",
"};",
"}",
]
T1_BASKET_SERVICE_RPC_POST = [
"rpc Update(UpdateBasketReq) returns (UpdateBasketResp) {",
"option (google.api.http) = {",
"post: \"/user/{basket.user_id}/basket\"",
"body: \"*\"",
"};",
"}",
]
T1_BASKET_SERVICE_WRAPPED = [
str.join("\n", T1_BASKET_SERVICE_RPC_GET) + "\n",
str.join("\n", T1_BASKET_SERVICE_RPC_POST) + "\n",
]
T1_SCOPED_SECTION_EXPECTED = ScopedSection()
T1_SCOPED_SECTION_EXPECTED.name = WORD_ROOT
T1_SCOPED_SECTION_EXPECTED.data_type = WORD_PROTO_FILE
T1_SCOPED_SECTION_EXPECTED.declaration_dict = {
WORD_SERVICE: [
Service(name="BasketService", rpc_list=[
RPC(
name="Update",
req="UpdateBasketReq",
resp="UpdateBasketResp",
endpoint="/user/{basket.user_id}/basket",
http_method=HttpMethod.POST,
),
RPC(
name="Get",
req="GetBasketReq",
resp="Basket",
endpoint="/user/{basket.user_id}/basket",
http_method=HttpMethod.GET,
),
])
],
WORD_MESSAGE: [
Message(
name="UpdateBasketReq",
declaration_dict={
WORD_FIELD: [
MessageField(name="basket", data_type="Basket"),
]
}
),
Message(
name="UpdateBasketResp",
declaration_dict={
WORD_FIELD: [
MessageField(name="subtotal", data_type="double"),
MessageField(name="total", data_type="double"),
MessageField(name="applied_promotions", data_type="api.gen.Promotion", is_array=True),
],
},
),
Message(
name="Basket",
declaration_dict={
WORD_FIELD: [
MessageField(name="id", data_type="string"),
MessageField(name="user_id", data_type="string"),
MessageField(name="product_ids", data_type="string", is_array=True),
],
},
)
],
}
| <filename>test/data/t1_expected.py
from proto_parser.proto_parser import ScopedSection, WORD_ROOT, WORD_PROTO_FILE, WORD_SERVICE, Service, RPC, Message, HttpMethod, \
WORD_MESSAGE, MessageField, WORD_FIELD
T1_BASKET_SERVICE_CONTENT = [
"rpc Update(UpdateBasketReq) returns (UpdateBasketResp) {",
"option (google.api.http) = {",
"post: \"/user/{basket.user_id}/basket\"",
"body: \"*\"",
"};",
"}",
"rpc Get(GetBasketReq) returns (Basket) {",
"option (google.api.http) = {",
"get: \"/user/{basket.user_id}/basket\"",
"body: \"*\"",
"};",
"}",
]
T1_BASKET_SERVICE = [
"service BasketService {",
"rpc Update(UpdateBasketReq) returns (UpdateBasketResp) {",
"option (google.api.http) = {",
"post: \"/user/{basket.user_id}/basket\"",
"body: \"*\"",
"};",
"}",
"rpc Get(GetBasketReq) returns (Basket) {",
"option (google.api.http) = {",
"get: \"/user/{basket.user_id}/basket\"",
"body: \"*\"",
"};",
"}",
"}",
]
T1_MESSAGE_UPDATEBASKETREQ = [
"message UpdateBasketReq {",
"Basket basket = 1;",
"}",
]
T1_MESSAGE_UPDATEBASKETRESP = [
"message UpdateBasketResp {",
"double subtotal = 1;",
"double total = 2;",
"repeated api.gen.Promotion applied_promotions = 3;",
"}",
]
T1_MESSAGE_UPDATEBASKET = [
"message Basket {",
"string id = 1;",
"string user_id = 2;",
"repeated string product_ids = 3;",
"}",
]
T1_LINES = [
str.join("\n", T1_BASKET_SERVICE) + "\n",
str.join("\n", T1_MESSAGE_UPDATEBASKETREQ) + "\n",
str.join("\n", T1_MESSAGE_UPDATEBASKETRESP) + "\n",
str.join("\n", T1_MESSAGE_UPDATEBASKET) + "\n"
]
T1_BASKET_SERVICE_RPC_GET = [
"rpc Get(GetBasketReq) returns (Basket) {",
"option (google.api.http) = {",
"get: \"/user/{basket.user_id}/basket\"",
"body: \"*\"",
"};",
"}",
]
T1_BASKET_SERVICE_RPC_POST = [
"rpc Update(UpdateBasketReq) returns (UpdateBasketResp) {",
"option (google.api.http) = {",
"post: \"/user/{basket.user_id}/basket\"",
"body: \"*\"",
"};",
"}",
]
T1_BASKET_SERVICE_WRAPPED = [
str.join("\n", T1_BASKET_SERVICE_RPC_GET) + "\n",
str.join("\n", T1_BASKET_SERVICE_RPC_POST) + "\n",
]
T1_SCOPED_SECTION_EXPECTED = ScopedSection()
T1_SCOPED_SECTION_EXPECTED.name = WORD_ROOT
T1_SCOPED_SECTION_EXPECTED.data_type = WORD_PROTO_FILE
T1_SCOPED_SECTION_EXPECTED.declaration_dict = {
WORD_SERVICE: [
Service(name="BasketService", rpc_list=[
RPC(
name="Update",
req="UpdateBasketReq",
resp="UpdateBasketResp",
endpoint="/user/{basket.user_id}/basket",
http_method=HttpMethod.POST,
),
RPC(
name="Get",
req="GetBasketReq",
resp="Basket",
endpoint="/user/{basket.user_id}/basket",
http_method=HttpMethod.GET,
),
])
],
WORD_MESSAGE: [
Message(
name="UpdateBasketReq",
declaration_dict={
WORD_FIELD: [
MessageField(name="basket", data_type="Basket"),
]
}
),
Message(
name="UpdateBasketResp",
declaration_dict={
WORD_FIELD: [
MessageField(name="subtotal", data_type="double"),
MessageField(name="total", data_type="double"),
MessageField(name="applied_promotions", data_type="api.gen.Promotion", is_array=True),
],
},
),
Message(
name="Basket",
declaration_dict={
WORD_FIELD: [
MessageField(name="id", data_type="string"),
MessageField(name="user_id", data_type="string"),
MessageField(name="product_ids", data_type="string", is_array=True),
],
},
)
],
}
| none | 1 | 2.166019 | 2 | |
PG/7-PPO/config.py | g6ling/Pytorch-Cartpole | 116 | 6624721 | import torch
env_name = 'CartPole-v1'
gamma = 0.99
lr = 0.001
goal_score = 200
log_interval = 10
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
lambda_gae = 0.96
epsilon_clip = 0.2
ciritic_coefficient = 0.5
entropy_coefficient = 0.01
batch_size = 8
epoch_k = 10
| import torch
env_name = 'CartPole-v1'
gamma = 0.99
lr = 0.001
goal_score = 200
log_interval = 10
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
lambda_gae = 0.96
epsilon_clip = 0.2
ciritic_coefficient = 0.5
entropy_coefficient = 0.01
batch_size = 8
epoch_k = 10
| none | 1 | 1.748558 | 2 | |
{{cookiecutter.project_shortname}}/tests/api/test_api_simple_flow.py | mvidalgarcia/cookiecutter-invenio-instance | 0 | 6624722 | {% include 'misc/header.py' %}
"""Test simple rest flow."""
import json
from invenio_search import current_search
def test_simple_flow(client):
"""Test simple flow using REST API."""
headers = [('Content-Type', 'application/json')]
data = {
'title': 'The title of the record ',
'contributors': [
{'name': '<NAME>'},
]
}
url = 'https://localhost:5000/records/'
# create a record
response = client.post(url, data=json.dumps(data), headers=headers)
assert response.status_code == 201
current_search.flush_and_refresh('records')
# retrieve record
res = client.get('https://localhost:5000/records/1')
assert res.status_code == 200
| {% include 'misc/header.py' %}
"""Test simple rest flow."""
import json
from invenio_search import current_search
def test_simple_flow(client):
"""Test simple flow using REST API."""
headers = [('Content-Type', 'application/json')]
data = {
'title': 'The title of the record ',
'contributors': [
{'name': '<NAME>'},
]
}
url = 'https://localhost:5000/records/'
# create a record
response = client.post(url, data=json.dumps(data), headers=headers)
assert response.status_code == 201
current_search.flush_and_refresh('records')
# retrieve record
res = client.get('https://localhost:5000/records/1')
assert res.status_code == 200
| en | 0.486129 | Test simple rest flow. Test simple flow using REST API. # create a record # retrieve record | 2.239959 | 2 |
resources/2019-05-16/zad1.py | lopiola/2lo | 0 | 6624723 | # -*- coding: utf-8 -*-
# Program otrzymuje listę zbiorów liczb,
# następnie wypisuje listę sum liczb w każdym zbiorze.
# Dla podanego przykładu (list1) program powinien wypisać: [10, 22, 38]
#
# W programie popełniono jednak 2 błędy.
# Znajdź je żeby otrzymać poprawny wynik!
if __name__ == '__main__':
list1 = [(1, 2, 3, 4), (4, 5, 6, 7), (8, 9, 10, 11)]
list2 = []
list_sum = 0
for t in list1:
for i in range(len(t)):
list_sum = list_sum + t[i]
list2.append(list_sum)
print(list2)
| # -*- coding: utf-8 -*-
# Program otrzymuje listę zbiorów liczb,
# następnie wypisuje listę sum liczb w każdym zbiorze.
# Dla podanego przykładu (list1) program powinien wypisać: [10, 22, 38]
#
# W programie popełniono jednak 2 błędy.
# Znajdź je żeby otrzymać poprawny wynik!
if __name__ == '__main__':
list1 = [(1, 2, 3, 4), (4, 5, 6, 7), (8, 9, 10, 11)]
list2 = []
list_sum = 0
for t in list1:
for i in range(len(t)):
list_sum = list_sum + t[i]
list2.append(list_sum)
print(list2)
| pl | 0.997994 | # -*- coding: utf-8 -*- # Program otrzymuje listę zbiorów liczb, # następnie wypisuje listę sum liczb w każdym zbiorze. # Dla podanego przykładu (list1) program powinien wypisać: [10, 22, 38] # # W programie popełniono jednak 2 błędy. # Znajdź je żeby otrzymać poprawny wynik! | 3.776202 | 4 |
utils/bots/CoreBot/cogs/RedirectService.py | Space-Turtle0/Timmy-BU | 0 | 6624724 | <reponame>Space-Turtle0/Timmy-BU
import os
import aiohttp
import discord
from dotenv import load_dotenv
from core import database
from core.checks import is_botAdmin
from discord.ext import commands
from core import redirect_sdk
load_dotenv()
class RedirectURL(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.domain = "rs.schoolsimplified.org"
self.raOBJ = redirect_sdk.RedirectClient(
os.getenv("RP_TK"), domain="https://rs.schoolsimplified.org"
)
@commands.command(alliases=["redirectadd", "addredirect"])
@is_botAdmin
async def ra(self, ctx, redirect_code, destination_url: str):
val = self.raOBJ.add_redirect(redirect_code, destination_url)
await ctx.send(
f"Redirect added for {destination_url} with redirect path /{redirect_code}\nCreated with the ID: {val.id}. In order to delete this redirect, you'll need this ID!\n\nAccess it at https://rs.schoolsimplified.org/{redirect_code}"
)
@commands.command(alliases=["redirectremove", "removeredirect"])
@is_botAdmin
async def rr(self, ctx, ID):
self.raOBJ.del_redirect(ID)
await ctx.send(f"Redirect removed for {ID}")
@commands.command(alliases=["redirectlist", "listredirect"])
@is_botAdmin
async def rl(self, ctx):
objlist = self.raOBJ.get_redirects()
newlist = []
for obj in objlist:
newlist.append(
f"**ID:** {obj.id} | **URL:** `https://{obj.domain}/{obj.source}` -> `{obj.destination}`"
)
newlist = "\n".join(newlist)
embed = discord.Embed(
title=f"Redirects for {self.raOBJ.domain}", color=discord.Color.blue()
)
embed.add_field(name="Redirects", value=newlist)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(RedirectURL(bot))
| import os
import aiohttp
import discord
from dotenv import load_dotenv
from core import database
from core.checks import is_botAdmin
from discord.ext import commands
from core import redirect_sdk
load_dotenv()
class RedirectURL(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.domain = "rs.schoolsimplified.org"
self.raOBJ = redirect_sdk.RedirectClient(
os.getenv("RP_TK"), domain="https://rs.schoolsimplified.org"
)
@commands.command(alliases=["redirectadd", "addredirect"])
@is_botAdmin
async def ra(self, ctx, redirect_code, destination_url: str):
val = self.raOBJ.add_redirect(redirect_code, destination_url)
await ctx.send(
f"Redirect added for {destination_url} with redirect path /{redirect_code}\nCreated with the ID: {val.id}. In order to delete this redirect, you'll need this ID!\n\nAccess it at https://rs.schoolsimplified.org/{redirect_code}"
)
@commands.command(alliases=["redirectremove", "removeredirect"])
@is_botAdmin
async def rr(self, ctx, ID):
self.raOBJ.del_redirect(ID)
await ctx.send(f"Redirect removed for {ID}")
@commands.command(alliases=["redirectlist", "listredirect"])
@is_botAdmin
async def rl(self, ctx):
objlist = self.raOBJ.get_redirects()
newlist = []
for obj in objlist:
newlist.append(
f"**ID:** {obj.id} | **URL:** `https://{obj.domain}/{obj.source}` -> `{obj.destination}`"
)
newlist = "\n".join(newlist)
embed = discord.Embed(
title=f"Redirects for {self.raOBJ.domain}", color=discord.Color.blue()
)
embed.add_field(name="Redirects", value=newlist)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(RedirectURL(bot)) | none | 1 | 2.407872 | 2 | |
exp.py | YSL-1997/DBx1000 | 0 | 6624725 | <reponame>YSL-1997/DBx1000
#!/usr/bin/env python3
import os
import os.path
import re
import subprocess as sp
from pathlib import Path
CFG_STD = "config-std.h"
CFG_CURR = "config.h"
RESULTS_DIR = Path("results")
def replace(filename, pattern, replacement):
f = open(filename)
s = f.read()
f.close()
s = re.sub(pattern, replacement, s)
f = open(filename, "w")
f.write(s)
f.close()
def execute(cmd, out_path, err_path):
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
stdout, stderr = p.communicate()
out_str, err_str = stdout.decode(), stderr.decode()
with open(out_path, "w") as fout, open(err_path, "w") as ferr:
print(out_str, file=fout)
print(err_str, file=ferr)
return p.returncode, out_str, err_str
def test_compile(job_name, job, result_dir):
os.system("cp " + CFG_STD + " " + CFG_CURR)
for param, value in job.items():
pattern = r"\#define\s" + re.escape(param) + r".*"
replacement = "#define " + param + " " + str(value)
replace(CFG_CURR, pattern, replacement)
ret, _, _ = execute(
"make -j",
out_path=result_dir / "compile.out",
err_path=result_dir / "compile.err",
)
if ret != 0:
print(f"ERROR in compiling job {job_name}")
else:
print(f"PASS compile\t {job_name}")
def test_run(job_name, job, result_dir):
_, stdout, _ = execute(
f"./rundb -o {result_dir / 'result.txt'}",
out_path=result_dir / "run.out",
err_path=result_dir / "run.err",
)
if "PASS" in stdout:
print(f"PASS execution\t {job_name}")
else:
print(f"FAILED execution. {job_name}")
def get_job_name(job):
return ",".join(f"{k}={v}" for k, v in job.items())
def run_exp(exp_name, jobs):
for job in jobs:
job_name = get_job_name(job)
result_dir = RESULTS_DIR / exp_name / job_name
if result_dir.exists():
print(f"WARNING skip\t {job_name}")
else:
os.makedirs(result_dir)
test_compile(job_name, job, result_dir)
test_run(job_name, job, result_dir)
scalability_exp = [
{
"WORKLOAD": workload,
"THREAD_CNT": num_threads,
"CC_ALG": alg,
"INDEX_STRUCT": index,
}
for workload in ["YCSB", "TPCC"]
for alg in ["DL_DETECT", "NO_WAIT", "HEKATON", "SILO", "TICTOC"]
for index in ["IDX_BTREE", "IDX_HASH"]
# for num_threads in [2 ** i for i in range(0, 8)]
for num_threads in [2 ** i for i in range(0, 6)]
]
fanout_exp = [
{
"WORKLOAD": workload,
"THREAD_CNT": num_threads,
"CC_ALG": alg,
"INDEX_STRUCT": index,
"BTREE_ORDER": fanout,
}
for workload in ["TPCC"]
for alg in ["NO_WAIT"]
for index in ["IDX_BTREE"]
for num_threads in [1]
for fanout in [2**i for i in range(2, 15)]
]
contention_exp = [
{
"WORKLOAD": workload,
"THREAD_CNT": num_threads,
"CC_ALG": alg,
"INDEX_STRUCT": index,
"NUM_WH": num_wh,
}
for workload in ["TPCC"]
for alg in ["NO_WAIT"]
for index in ["IDX_BTREE", "IDX_HASH"]
for num_threads in [1]
for num_wh in [i for i in range(1, 21)]
]
rw_exp = [
{
"WORKLOAD": workload,
"THREAD_CNT": num_threads,
"CC_ALG": alg,
"INDEX_STRUCT": index,
"READ_PERC": round(read_perc, 1),
"WRITE_PERC": round(1 - read_perc, 1),
}
for workload in ["YCSB"]
for alg in ["NO_WAIT"]
for index in ["IDX_BTREE", "IDX_HASH"]
for num_threads in [1]
for read_perc in [0.1 * i for i in range(11)]
]
hotset_exp = [
{
"WORKLOAD": workload,
"THREAD_CNT": num_threads,
"CC_ALG": alg,
"INDEX_STRUCT": index,
"ZIPF_THETA": zipf_theta,
}
for workload in ["YCSB"]
for alg in ["NO_WAIT"]
for index in ["IDX_BTREE", "IDX_HASH"]
for num_threads in [1]
for zipf_theta in [i / 10 for i in range(10)]
]
latch_exp = [
{
"WORKLOAD": workload,
"THREAD_CNT": num_threads,
"CC_ALG": alg,
"INDEX_STRUCT": index,
"ENABLE_LATCH": latch,
}
for workload in ["YCSB", "TPCC"]
for alg in ["NO_WAIT"]
for index in ["IDX_BTREE", "IDX_HASH"]
for num_threads in [1]
for latch in ["true", "false"]
]
def main():
# run_exp("scalability", scalability_exp)
# run_exp("fanout", fanout_exp)
# run_exp("contention", contention_exp)
run_exp("rw", rw_exp)
# run_exp("hotset", hotset_exp)
# run_exp("latch", latch_exp)
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
import os
import os.path
import re
import subprocess as sp
from pathlib import Path
CFG_STD = "config-std.h"
CFG_CURR = "config.h"
RESULTS_DIR = Path("results")
def replace(filename, pattern, replacement):
f = open(filename)
s = f.read()
f.close()
s = re.sub(pattern, replacement, s)
f = open(filename, "w")
f.write(s)
f.close()
def execute(cmd, out_path, err_path):
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
stdout, stderr = p.communicate()
out_str, err_str = stdout.decode(), stderr.decode()
with open(out_path, "w") as fout, open(err_path, "w") as ferr:
print(out_str, file=fout)
print(err_str, file=ferr)
return p.returncode, out_str, err_str
def test_compile(job_name, job, result_dir):
os.system("cp " + CFG_STD + " " + CFG_CURR)
for param, value in job.items():
pattern = r"\#define\s" + re.escape(param) + r".*"
replacement = "#define " + param + " " + str(value)
replace(CFG_CURR, pattern, replacement)
ret, _, _ = execute(
"make -j",
out_path=result_dir / "compile.out",
err_path=result_dir / "compile.err",
)
if ret != 0:
print(f"ERROR in compiling job {job_name}")
else:
print(f"PASS compile\t {job_name}")
def test_run(job_name, job, result_dir):
_, stdout, _ = execute(
f"./rundb -o {result_dir / 'result.txt'}",
out_path=result_dir / "run.out",
err_path=result_dir / "run.err",
)
if "PASS" in stdout:
print(f"PASS execution\t {job_name}")
else:
print(f"FAILED execution. {job_name}")
def get_job_name(job):
return ",".join(f"{k}={v}" for k, v in job.items())
def run_exp(exp_name, jobs):
for job in jobs:
job_name = get_job_name(job)
result_dir = RESULTS_DIR / exp_name / job_name
if result_dir.exists():
print(f"WARNING skip\t {job_name}")
else:
os.makedirs(result_dir)
test_compile(job_name, job, result_dir)
test_run(job_name, job, result_dir)
scalability_exp = [
{
"WORKLOAD": workload,
"THREAD_CNT": num_threads,
"CC_ALG": alg,
"INDEX_STRUCT": index,
}
for workload in ["YCSB", "TPCC"]
for alg in ["DL_DETECT", "NO_WAIT", "HEKATON", "SILO", "TICTOC"]
for index in ["IDX_BTREE", "IDX_HASH"]
# for num_threads in [2 ** i for i in range(0, 8)]
for num_threads in [2 ** i for i in range(0, 6)]
]
fanout_exp = [
{
"WORKLOAD": workload,
"THREAD_CNT": num_threads,
"CC_ALG": alg,
"INDEX_STRUCT": index,
"BTREE_ORDER": fanout,
}
for workload in ["TPCC"]
for alg in ["NO_WAIT"]
for index in ["IDX_BTREE"]
for num_threads in [1]
for fanout in [2**i for i in range(2, 15)]
]
contention_exp = [
{
"WORKLOAD": workload,
"THREAD_CNT": num_threads,
"CC_ALG": alg,
"INDEX_STRUCT": index,
"NUM_WH": num_wh,
}
for workload in ["TPCC"]
for alg in ["NO_WAIT"]
for index in ["IDX_BTREE", "IDX_HASH"]
for num_threads in [1]
for num_wh in [i for i in range(1, 21)]
]
rw_exp = [
{
"WORKLOAD": workload,
"THREAD_CNT": num_threads,
"CC_ALG": alg,
"INDEX_STRUCT": index,
"READ_PERC": round(read_perc, 1),
"WRITE_PERC": round(1 - read_perc, 1),
}
for workload in ["YCSB"]
for alg in ["NO_WAIT"]
for index in ["IDX_BTREE", "IDX_HASH"]
for num_threads in [1]
for read_perc in [0.1 * i for i in range(11)]
]
hotset_exp = [
{
"WORKLOAD": workload,
"THREAD_CNT": num_threads,
"CC_ALG": alg,
"INDEX_STRUCT": index,
"ZIPF_THETA": zipf_theta,
}
for workload in ["YCSB"]
for alg in ["NO_WAIT"]
for index in ["IDX_BTREE", "IDX_HASH"]
for num_threads in [1]
for zipf_theta in [i / 10 for i in range(10)]
]
latch_exp = [
{
"WORKLOAD": workload,
"THREAD_CNT": num_threads,
"CC_ALG": alg,
"INDEX_STRUCT": index,
"ENABLE_LATCH": latch,
}
for workload in ["YCSB", "TPCC"]
for alg in ["NO_WAIT"]
for index in ["IDX_BTREE", "IDX_HASH"]
for num_threads in [1]
for latch in ["true", "false"]
]
def main():
# run_exp("scalability", scalability_exp)
# run_exp("fanout", fanout_exp)
# run_exp("contention", contention_exp)
run_exp("rw", rw_exp)
# run_exp("hotset", hotset_exp)
# run_exp("latch", latch_exp)
if __name__ == "__main__":
main() | en | 0.468196 | #!/usr/bin/env python3 #define\s" + re.escape(param) + r".*" # for num_threads in [2 ** i for i in range(0, 8)] # run_exp("scalability", scalability_exp) # run_exp("fanout", fanout_exp) # run_exp("contention", contention_exp) # run_exp("hotset", hotset_exp) # run_exp("latch", latch_exp) | 2.590799 | 3 |
test/test_20_filterlogmsg.py | growell/svnhook | 1 | 6624726 | <filename>test/test_20_filterlogmsg.py<gh_stars>1-10
#!/usr/bin/env python
######################################################################
# Test Log Message Filter
######################################################################
import os, re, sys, unittest
# Prefer local modules.
mylib = os.path.normpath(os.path.join(
os.path.dirname(__file__), '..'))
if os.path.isdir(mylib): sys.path.insert(0, mylib)
from test.base import HookTestCase
class TestFilterLogMsg(HookTestCase):
"""File Content Filter Tests"""
def setUp(self):
super(TestFilterLogMsg, self).setUp(
re.sub(r'^test_?(.+)\.[^\.]+$', r'\1',
os.path.basename(__file__)))
def test_01_no_regex(self):
"""No regex tag"""
# Define the hook configuration.
self.writeConf('pre-commit.xml', '''\
<?xml version="1.0"?>
<Actions>
<FilterLogMsg>
<SendError>Not gonna happen.</SendError>
</FilterLogMsg>
</Actions>
''')
# Add a working copy change.
self.addWcFile('fileA1.txt')
# Attempt to commit the change.
p = self.commitWc()
# Verify that an error is indicated. Please note that this is
# NOT the hook script exit code. This is the "svn commit" exit
# code - that indicates if the commit succeeded (zero) or
# failed (one).
self.assertEqual(
p.returncode, 1,
'Error exit code not found:'\
' exit code = {0}'.format(p.returncode))
# Verify that the proper error is indicated.
self.assertRegexpMatches(
p.stderr.read(), r'Internal hook error',
'Internal error message not returned')
# Verify that the detailed error is logged.
self.assertLogRegexp(
'pre-commit', r'\nValueError: Required tag missing',
'Expected error not found in hook log')
def test_02_match(self):
"""Log message match"""
# Define the hook configuration.
self.writeConf('pre-commit.xml', '''\
<?xml version="1.0"?>
<Actions>
<FilterLogMsg>
<LogMsgRegex>secret</LogMsgRegex>
<SendError>Cannot expose secret!</SendError>
</FilterLogMsg>
</Actions>
''')
# Add a working copy change.
self.addWcFile('fileA1.txt')
# Attempt to commit the change.
p = self.commitWc('Tell the secret.')
# Verify that an error is indicated.
self.assertEqual(
p.returncode, 1,
'Error exit code not found:'\
' exit code = {0}'.format(p.returncode))
# Verify that the proper error is indicated.
self.assertRegexpMatches(
p.stderr.read(), r'Cannot expose secret!',
'Expected error message not found')
def test_03_mismatch(self):
"""Log message mismatch"""
# Define the hook configuration.
self.writeConf('pre-commit.xml', '''\
<?xml version="1.0"?>
<Actions>
<FilterLogMsg>
<LogMsgRegex>secret</LogMsgRegex>
<SendError>Cannot expose secret!</SendError>
</FilterLogMsg>
</Actions>
''')
# Add a working copy change.
self.addWcFile('fileA1.txt')
# Attempt to commit the change.
p = self.commitWc('I\'m not telling.')
# Verify that an error isn't indicated.
self.assertEqual(
p.returncode, 0,
'Success exit code not found:'\
' exit code = {0}'.format(p.returncode))
# Verify that an error message isn't returned.
self.assertRegexpMatches(
p.stderr.read(), r'(?s)^\s*$',
'Unexpected error message found')
def test_04_no_required_msg(self):
"""Required log message missing"""
# Define the hook configuration.
self.writeConf('pre-commit.xml', '''\
<?xml version="1.0"?>
<Actions>
<FilterLogMsg>
<LogMsgRegex sense="0">\S</LogMsgRegex>
<SendError>Log message is required.</SendError>
</FilterLogMsg>
</Actions>
''')
# Add a working copy change.
self.addWcFile('fileA1.txt')
# Attempt to commit the change (without a message).
p = self.commitWc()
# Verify that an error is indicated.
self.assertEqual(
p.returncode, 1,
'Error exit code not found:'\
' exit code = {0}'.format(p.returncode))
# Verify that the proper error is indicated.
self.assertRegexpMatches(
p.stderr.read(), r'Log message is required',
'Expected error message not found')
# Allow manual execution of tests.
if __name__=='__main__':
for tclass in [TestFilterLogMsg]:
suite = unittest.TestLoader().loadTestsFromTestCase(tclass)
unittest.TextTestRunner(verbosity=2).run(suite)
########################### end of file ##############################
| <filename>test/test_20_filterlogmsg.py<gh_stars>1-10
#!/usr/bin/env python
######################################################################
# Test Log Message Filter
######################################################################
import os, re, sys, unittest
# Prefer local modules.
mylib = os.path.normpath(os.path.join(
os.path.dirname(__file__), '..'))
if os.path.isdir(mylib): sys.path.insert(0, mylib)
from test.base import HookTestCase
class TestFilterLogMsg(HookTestCase):
"""File Content Filter Tests"""
def setUp(self):
super(TestFilterLogMsg, self).setUp(
re.sub(r'^test_?(.+)\.[^\.]+$', r'\1',
os.path.basename(__file__)))
def test_01_no_regex(self):
"""No regex tag"""
# Define the hook configuration.
self.writeConf('pre-commit.xml', '''\
<?xml version="1.0"?>
<Actions>
<FilterLogMsg>
<SendError>Not gonna happen.</SendError>
</FilterLogMsg>
</Actions>
''')
# Add a working copy change.
self.addWcFile('fileA1.txt')
# Attempt to commit the change.
p = self.commitWc()
# Verify that an error is indicated. Please note that this is
# NOT the hook script exit code. This is the "svn commit" exit
# code - that indicates if the commit succeeded (zero) or
# failed (one).
self.assertEqual(
p.returncode, 1,
'Error exit code not found:'\
' exit code = {0}'.format(p.returncode))
# Verify that the proper error is indicated.
self.assertRegexpMatches(
p.stderr.read(), r'Internal hook error',
'Internal error message not returned')
# Verify that the detailed error is logged.
self.assertLogRegexp(
'pre-commit', r'\nValueError: Required tag missing',
'Expected error not found in hook log')
def test_02_match(self):
"""Log message match"""
# Define the hook configuration.
self.writeConf('pre-commit.xml', '''\
<?xml version="1.0"?>
<Actions>
<FilterLogMsg>
<LogMsgRegex>secret</LogMsgRegex>
<SendError>Cannot expose secret!</SendError>
</FilterLogMsg>
</Actions>
''')
# Add a working copy change.
self.addWcFile('fileA1.txt')
# Attempt to commit the change.
p = self.commitWc('Tell the secret.')
# Verify that an error is indicated.
self.assertEqual(
p.returncode, 1,
'Error exit code not found:'\
' exit code = {0}'.format(p.returncode))
# Verify that the proper error is indicated.
self.assertRegexpMatches(
p.stderr.read(), r'Cannot expose secret!',
'Expected error message not found')
def test_03_mismatch(self):
"""Log message mismatch"""
# Define the hook configuration.
self.writeConf('pre-commit.xml', '''\
<?xml version="1.0"?>
<Actions>
<FilterLogMsg>
<LogMsgRegex>secret</LogMsgRegex>
<SendError>Cannot expose secret!</SendError>
</FilterLogMsg>
</Actions>
''')
# Add a working copy change.
self.addWcFile('fileA1.txt')
# Attempt to commit the change.
p = self.commitWc('I\'m not telling.')
# Verify that an error isn't indicated.
self.assertEqual(
p.returncode, 0,
'Success exit code not found:'\
' exit code = {0}'.format(p.returncode))
# Verify that an error message isn't returned.
self.assertRegexpMatches(
p.stderr.read(), r'(?s)^\s*$',
'Unexpected error message found')
def test_04_no_required_msg(self):
"""Required log message missing"""
# Define the hook configuration.
self.writeConf('pre-commit.xml', '''\
<?xml version="1.0"?>
<Actions>
<FilterLogMsg>
<LogMsgRegex sense="0">\S</LogMsgRegex>
<SendError>Log message is required.</SendError>
</FilterLogMsg>
</Actions>
''')
# Add a working copy change.
self.addWcFile('fileA1.txt')
# Attempt to commit the change (without a message).
p = self.commitWc()
# Verify that an error is indicated.
self.assertEqual(
p.returncode, 1,
'Error exit code not found:'\
' exit code = {0}'.format(p.returncode))
# Verify that the proper error is indicated.
self.assertRegexpMatches(
p.stderr.read(), r'Log message is required',
'Expected error message not found')
# Allow manual execution of tests.
if __name__=='__main__':
for tclass in [TestFilterLogMsg]:
suite = unittest.TestLoader().loadTestsFromTestCase(tclass)
unittest.TextTestRunner(verbosity=2).run(suite)
########################### end of file ##############################
| en | 0.561861 | #!/usr/bin/env python ###################################################################### # Test Log Message Filter ###################################################################### # Prefer local modules. File Content Filter Tests No regex tag # Define the hook configuration. \ <?xml version="1.0"?> <Actions> <FilterLogMsg> <SendError>Not gonna happen.</SendError> </FilterLogMsg> </Actions> # Add a working copy change. # Attempt to commit the change. # Verify that an error is indicated. Please note that this is # NOT the hook script exit code. This is the "svn commit" exit # code - that indicates if the commit succeeded (zero) or # failed (one). # Verify that the proper error is indicated. # Verify that the detailed error is logged. Log message match # Define the hook configuration. \ <?xml version="1.0"?> <Actions> <FilterLogMsg> <LogMsgRegex>secret</LogMsgRegex> <SendError>Cannot expose secret!</SendError> </FilterLogMsg> </Actions> # Add a working copy change. # Attempt to commit the change. # Verify that an error is indicated. # Verify that the proper error is indicated. Log message mismatch # Define the hook configuration. \ <?xml version="1.0"?> <Actions> <FilterLogMsg> <LogMsgRegex>secret</LogMsgRegex> <SendError>Cannot expose secret!</SendError> </FilterLogMsg> </Actions> # Add a working copy change. # Attempt to commit the change. # Verify that an error isn't indicated. # Verify that an error message isn't returned. Required log message missing # Define the hook configuration. \ <?xml version="1.0"?> <Actions> <FilterLogMsg> <LogMsgRegex sense="0">\S</LogMsgRegex> <SendError>Log message is required.</SendError> </FilterLogMsg> </Actions> # Add a working copy change. # Attempt to commit the change (without a message). # Verify that an error is indicated. # Verify that the proper error is indicated. # Allow manual execution of tests. ########################### end of file ############################## | 2.073139 | 2 |
GAN.py | oriyanh/digit-generation | 0 | 6624727 | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Conv2DTranspose, \
Reshape, BatchNormalization, LeakyReLU, Dropout
from tensorflow.keras import Model
LATENT_DIM = 100
NUM_EPOCHS = 50
BATCH_SIZE = 256
LEARNING_RATE = 1e-4
d_train_loss = tf.keras.metrics.Mean(name='disc_train_loss')
g_train_accuracy = tf.keras.metrics.BinaryAccuracy(name='gen_train_accuracy')
g_train_loss = tf.keras.metrics.Mean(name='gen_train_loss')
disc_optimizer = tf.keras.optimizers.Adam(LEARNING_RATE)
gen_optimizer = tf.keras.optimizers.Adam(LEARNING_RATE)
loss_cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def loss_discriminator_obj(real, fake):
real_loss = loss_cross_entropy(tf.ones_like(real), real)
fake_loss = loss_cross_entropy(tf.zeros_like(fake), fake)
return real_loss + fake_loss
def loss_generator_obj(fake):
return loss_cross_entropy(tf.ones_like(fake), fake)
class GANDiscriminator(Model):
def __init__(self):
super(GANDiscriminator, self).__init__()
self.conv1 = Conv2D(64, 5, activation=tf.nn.leaky_relu, strides=2,
padding='SAME', input_shape=(28, 28, 1))
self.dropout1 = Dropout(0.3)
self.conv2 = Conv2D(128, 5, activation=tf.nn.leaky_relu, strides=2,
padding='SAME')
self.dropout2 = Dropout(0.3)
self.flatten = Flatten()
self.d1 = Dense(1)
def call(self, x):
x = self.conv1(x)
x = self.dropout1(x)
x = self.conv2(x)
x = self.dropout2(x)
x = self.flatten(x)
return self.d1(x)
class GANGenerator(Model):
def __init__(self, latent_dim):
super(GANGenerator, self).__init__()
self.d1 = Dense(7 * 7 * 256, input_dim=latent_dim, use_bias=False)
self.bn1 = BatchNormalization()
self.leaky_relu1 = tf.keras.layers.LeakyReLU()
self.resh = Reshape((7, 7, 256))
self.conv1t = Conv2DTranspose(128, 5, strides=1,
padding='SAME', input_shape=(7, 7, 256), use_bias=False)
self.bn2 = BatchNormalization()
self.leaky_relu2 = tf.keras.layers.LeakyReLU()
self.conv2t = Conv2DTranspose(64, 5, strides=2,
padding='SAME', input_shape=(7, 7, 128), use_bias=False)
self.bn3 = BatchNormalization()
self.leaky_relu3 = tf.keras.layers.LeakyReLU()
self.conv3t = Conv2DTranspose(1, 5, strides=2,
activation='tanh',
padding='SAME', input_shape=(14, 14, 64), use_bias=False)
def call(self, x):
x = self.d1(x)
x = self.bn1(x)
x = self.leaky_relu1(x)
x = self.resh(x)
x = self.conv1t(x)
x = self.bn2(x)
x = self.leaky_relu2(x)
x = self.conv2t(x)
x = self.bn3(x)
x = self.leaky_relu3(x)
return self.conv3t(x)
def get_train_step_gan(batch_size, latent_dim):
""" Wrapper for training step, needed if running more than one model
per run
:return: train step function
"""
@tf.function
def train_step(generator, discriminator, im_batch):
noise = sample_Z(batch_size, latent_dim)
with tf.GradientTape() as gan_grad_tape:
with tf.GradientTape() as disc_grad_tape:
gen_images = generator(noise, training=True)
preds_real = discriminator(im_batch, training=True)
preds_fake = discriminator(gen_images, training=True)
loss_gen = loss_generator_obj(preds_fake)
loss_disc = loss_discriminator_obj(preds_real, preds_fake)
disc_grads = disc_grad_tape.gradient(loss_disc,
discriminator.trainable_variables)
disc_optimizer.apply_gradients(zip(disc_grads,
discriminator.trainable_variables))
gen_grads = gan_grad_tape.gradient(loss_gen, generator.trainable_variables)
gen_optimizer.apply_gradients(zip(gen_grads, generator.trainable_variables))
d_train_loss(loss_disc)
g_train_loss(loss_gen)
g_train_accuracy(tf.ones_like(preds_fake), preds_fake)
return train_step
def sample_Z(batch_size, latent_dim):
return tf.random.normal([batch_size, latent_dim])
def train(generator, discriminator, images, latent_dim, num_epochs, batch_size):
""" Trains a model (subclassing tf.keras.Model) over MNIST data collection
:param load_data:
:param use_full_train_set:
:param Model model: Model to train, whose __call__() function accepts a
batch of 28x28 greyscale images and returns a 10-class logits
:param int num_epochs: Number of epochs to train with
:param int batch_size: Batch size
:param train_metric: either `train_loss` or `train_accuracy`
:param test_metric: either `test_loss` or `test_accuracy`
:param List metric_scaling_factor: ints [train_metric_scale, test_metric_scale] .
Scales the value outputted by the metric at each measuring point by this value.
:returns List: [train_metric_values, test_metric_values]
"""
sample_noise = sample_Z(16, latent_dim)
shuffle_seed = 60000
train_ds = tf.data.Dataset.from_tensor_slices(images) \
.shuffle(shuffle_seed) \
.batch(batch_size)
train_step = get_train_step_gan(batch_size, latent_dim)
for epoch in range(num_epochs):
for image_batch in train_ds:
train_step(generator, discriminator, image_batch)
print(f'Epoch {epoch + 1} : Disc loss: {d_train_loss.result()}, Gen loss: {g_train_loss.result()}')
# Reset the metrics for the next epoch
d_train_loss.reset_states()
g_train_loss.reset_states()
generated_images_tensor = generator(sample_noise, training=False)
fig = plt.figure(figsize=(4, 4))
for i in range(generated_images_tensor.shape[0]):
plt.subplot(4, 4, i + 1)
plt.imshow(generated_images_tensor[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
plt.axis('off')
plt.show()
| import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense, Flatten, Conv2D, Conv2DTranspose, \
Reshape, BatchNormalization, LeakyReLU, Dropout
from tensorflow.keras import Model
LATENT_DIM = 100
NUM_EPOCHS = 50
BATCH_SIZE = 256
LEARNING_RATE = 1e-4
d_train_loss = tf.keras.metrics.Mean(name='disc_train_loss')
g_train_accuracy = tf.keras.metrics.BinaryAccuracy(name='gen_train_accuracy')
g_train_loss = tf.keras.metrics.Mean(name='gen_train_loss')
disc_optimizer = tf.keras.optimizers.Adam(LEARNING_RATE)
gen_optimizer = tf.keras.optimizers.Adam(LEARNING_RATE)
loss_cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def loss_discriminator_obj(real, fake):
real_loss = loss_cross_entropy(tf.ones_like(real), real)
fake_loss = loss_cross_entropy(tf.zeros_like(fake), fake)
return real_loss + fake_loss
def loss_generator_obj(fake):
return loss_cross_entropy(tf.ones_like(fake), fake)
class GANDiscriminator(Model):
def __init__(self):
super(GANDiscriminator, self).__init__()
self.conv1 = Conv2D(64, 5, activation=tf.nn.leaky_relu, strides=2,
padding='SAME', input_shape=(28, 28, 1))
self.dropout1 = Dropout(0.3)
self.conv2 = Conv2D(128, 5, activation=tf.nn.leaky_relu, strides=2,
padding='SAME')
self.dropout2 = Dropout(0.3)
self.flatten = Flatten()
self.d1 = Dense(1)
def call(self, x):
x = self.conv1(x)
x = self.dropout1(x)
x = self.conv2(x)
x = self.dropout2(x)
x = self.flatten(x)
return self.d1(x)
class GANGenerator(Model):
def __init__(self, latent_dim):
super(GANGenerator, self).__init__()
self.d1 = Dense(7 * 7 * 256, input_dim=latent_dim, use_bias=False)
self.bn1 = BatchNormalization()
self.leaky_relu1 = tf.keras.layers.LeakyReLU()
self.resh = Reshape((7, 7, 256))
self.conv1t = Conv2DTranspose(128, 5, strides=1,
padding='SAME', input_shape=(7, 7, 256), use_bias=False)
self.bn2 = BatchNormalization()
self.leaky_relu2 = tf.keras.layers.LeakyReLU()
self.conv2t = Conv2DTranspose(64, 5, strides=2,
padding='SAME', input_shape=(7, 7, 128), use_bias=False)
self.bn3 = BatchNormalization()
self.leaky_relu3 = tf.keras.layers.LeakyReLU()
self.conv3t = Conv2DTranspose(1, 5, strides=2,
activation='tanh',
padding='SAME', input_shape=(14, 14, 64), use_bias=False)
def call(self, x):
x = self.d1(x)
x = self.bn1(x)
x = self.leaky_relu1(x)
x = self.resh(x)
x = self.conv1t(x)
x = self.bn2(x)
x = self.leaky_relu2(x)
x = self.conv2t(x)
x = self.bn3(x)
x = self.leaky_relu3(x)
return self.conv3t(x)
def get_train_step_gan(batch_size, latent_dim):
""" Wrapper for training step, needed if running more than one model
per run
:return: train step function
"""
@tf.function
def train_step(generator, discriminator, im_batch):
noise = sample_Z(batch_size, latent_dim)
with tf.GradientTape() as gan_grad_tape:
with tf.GradientTape() as disc_grad_tape:
gen_images = generator(noise, training=True)
preds_real = discriminator(im_batch, training=True)
preds_fake = discriminator(gen_images, training=True)
loss_gen = loss_generator_obj(preds_fake)
loss_disc = loss_discriminator_obj(preds_real, preds_fake)
disc_grads = disc_grad_tape.gradient(loss_disc,
discriminator.trainable_variables)
disc_optimizer.apply_gradients(zip(disc_grads,
discriminator.trainable_variables))
gen_grads = gan_grad_tape.gradient(loss_gen, generator.trainable_variables)
gen_optimizer.apply_gradients(zip(gen_grads, generator.trainable_variables))
d_train_loss(loss_disc)
g_train_loss(loss_gen)
g_train_accuracy(tf.ones_like(preds_fake), preds_fake)
return train_step
def sample_Z(batch_size, latent_dim):
return tf.random.normal([batch_size, latent_dim])
def train(generator, discriminator, images, latent_dim, num_epochs, batch_size):
""" Trains a model (subclassing tf.keras.Model) over MNIST data collection
:param load_data:
:param use_full_train_set:
:param Model model: Model to train, whose __call__() function accepts a
batch of 28x28 greyscale images and returns a 10-class logits
:param int num_epochs: Number of epochs to train with
:param int batch_size: Batch size
:param train_metric: either `train_loss` or `train_accuracy`
:param test_metric: either `test_loss` or `test_accuracy`
:param List metric_scaling_factor: ints [train_metric_scale, test_metric_scale] .
Scales the value outputted by the metric at each measuring point by this value.
:returns List: [train_metric_values, test_metric_values]
"""
sample_noise = sample_Z(16, latent_dim)
shuffle_seed = 60000
train_ds = tf.data.Dataset.from_tensor_slices(images) \
.shuffle(shuffle_seed) \
.batch(batch_size)
train_step = get_train_step_gan(batch_size, latent_dim)
for epoch in range(num_epochs):
for image_batch in train_ds:
train_step(generator, discriminator, image_batch)
print(f'Epoch {epoch + 1} : Disc loss: {d_train_loss.result()}, Gen loss: {g_train_loss.result()}')
# Reset the metrics for the next epoch
d_train_loss.reset_states()
g_train_loss.reset_states()
generated_images_tensor = generator(sample_noise, training=False)
fig = plt.figure(figsize=(4, 4))
for i in range(generated_images_tensor.shape[0]):
plt.subplot(4, 4, i + 1)
plt.imshow(generated_images_tensor[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
plt.axis('off')
plt.show()
| en | 0.62155 | Wrapper for training step, needed if running more than one model per run :return: train step function Trains a model (subclassing tf.keras.Model) over MNIST data collection :param load_data: :param use_full_train_set: :param Model model: Model to train, whose __call__() function accepts a batch of 28x28 greyscale images and returns a 10-class logits :param int num_epochs: Number of epochs to train with :param int batch_size: Batch size :param train_metric: either `train_loss` or `train_accuracy` :param test_metric: either `test_loss` or `test_accuracy` :param List metric_scaling_factor: ints [train_metric_scale, test_metric_scale] . Scales the value outputted by the metric at each measuring point by this value. :returns List: [train_metric_values, test_metric_values] # Reset the metrics for the next epoch | 2.694555 | 3 |
Easy/BuyTheBundle.py | revanthsenthil/dCoder_select | 1 | 6624728 | <filename>Easy/BuyTheBundle.py
"""
Problem Description:
Jimmy wants to buy books for N students. He went to the bookshop to buy a bundle of books, each bundle has a different number of books. He wants to buy such a bundle that contains the number of books, which can be distributed equally amongst all the students.
Input:
First line contains T, number of test cases.
Each test case contains two integers, N and M. where is N is number of students and M is number of books in a bundle.
Output:
In each test case output "Yes" if he can buy that bundle and "No" if he can't buy that bundle.
Constraints:
1<=T<=20
1<=N<=100
1<=M<=10^5
Sample Input:
2
5 14
3 21
Sample Output:
No
Yes
"""
n = int(input())
for i in range(n):
d = input().split()
print("Yes") if int(d[1]) % int(d[0]) == 0 else print("No")
| <filename>Easy/BuyTheBundle.py
"""
Problem Description:
Jimmy wants to buy books for N students. He went to the bookshop to buy a bundle of books, each bundle has a different number of books. He wants to buy such a bundle that contains the number of books, which can be distributed equally amongst all the students.
Input:
First line contains T, number of test cases.
Each test case contains two integers, N and M. where is N is number of students and M is number of books in a bundle.
Output:
In each test case output "Yes" if he can buy that bundle and "No" if he can't buy that bundle.
Constraints:
1<=T<=20
1<=N<=100
1<=M<=10^5
Sample Input:
2
5 14
3 21
Sample Output:
No
Yes
"""
n = int(input())
for i in range(n):
d = input().split()
print("Yes") if int(d[1]) % int(d[0]) == 0 else print("No")
| en | 0.964471 | Problem Description: Jimmy wants to buy books for N students. He went to the bookshop to buy a bundle of books, each bundle has a different number of books. He wants to buy such a bundle that contains the number of books, which can be distributed equally amongst all the students. Input: First line contains T, number of test cases. Each test case contains two integers, N and M. where is N is number of students and M is number of books in a bundle. Output: In each test case output "Yes" if he can buy that bundle and "No" if he can't buy that bundle. Constraints: 1<=T<=20 1<=N<=100 1<=M<=10^5 Sample Input: 2 5 14 3 21 Sample Output: No Yes | 3.940804 | 4 |
tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/base_models.py | Ohtani-y/open_model_zoo | 0 | 6624729 | <filename>tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/base_models.py
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
from collections import OrderedDict
import numpy as np
from ...config import ConfigError
from ...utils import get_path, parse_partial_shape, contains_any
from ...logging import print_info
def create_model(model_config, launcher, launcher_model_mapping, suffix=None, delayed_model_loading=False):
framework = launcher.config['framework']
model_class = launcher_model_mapping.get(framework)
if not model_class:
raise ValueError('model for framework {} is not supported'.format(framework))
return model_class(model_config, launcher, suffix, delayed_model_loading)
def create_encoder(model_config, launcher, launcher_model_mapping, delayed_model_loading=False):
framework = launcher.config['framework']
if 'predictions' in model_config and not model_config.get('store_predictions', False):
framework = 'dummy'
model_class = launcher_model_mapping.get(framework)
if not model_class:
raise ValueError('model for framework {} is not supported'.format(framework))
return model_class(model_config, launcher, 'encoder', delayed_model_loading)
class BaseCascadeModel:
def __init__(self, network_info, launcher, delayed_model_loading=False):
self.network_info = network_info
self.launcher = launcher
self._part_by_name = None
def predict(self, identifiers, input_data):
raise NotImplementedError
def release(self):
if self._part_by_name:
for model in self._part_by_name.values():
model.release()
def load_network(self, network_list, launcher):
if len(self._part_by_name) == 1 and 'name' not in network_list[0]:
next(iter(self._part_by_name.values())).load_model(network_list[0]['model'], launcher)
return
for network_dict in network_list:
self._part_by_name[network_dict['name']].load_network(network_dict['model'], launcher)
def load_model(self, network_list, launcher):
if len(self._part_by_name) == 1 and 'name' not in network_list[0]:
next(iter(self._part_by_name.values())).load_model(network_list[0], launcher)
return
for network_dict in network_list:
self._part_by_name[network_dict['name']].load_model(network_dict, launcher)
def get_network(self):
if not self._part_by_name:
return []
return [{'name': name, 'model': model.network} for name, model in self._part_by_name.items()]
def reset(self):
pass
@staticmethod
def fill_part_with_model(network_info, parts, models_args, is_blob, delayed_model_loading):
if models_args and not delayed_model_loading:
for idx, part in enumerate(parts):
part_info = network_info.get(part, {})
if not contains_any(part_info, ['model', 'onnx_model']) and models_args:
part_info['model'] = models_args[idx if len(models_args) > idx else 0]
part_info['_model_is_blob'] = is_blob
network_info.update({part: part_info})
return network_info
class BaseDLSDKModel:
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
self.network_info = network_info
self.launcher = launcher
self.default_model_suffix = suffix
if not hasattr(self, 'output_blob'):
self.output_blob = None
if not hasattr(self, 'input_blob'):
self.input_blob = None
self.with_prefix = False
self.is_dynamic = False
if not delayed_model_loading:
self.load_model(network_info, launcher, log=True)
def _reshape_input(self, input_shapes):
if self.is_dynamic:
return
if hasattr(self, 'exec_network') and self.exec_network is not None:
del self.exec_network
self.network.reshape(input_shapes)
self.dynamic_inputs, self.partial_shapes = self.launcher.get_dynamic_inputs(self.network)
if not self.is_dynamic and self.dynamic_inputs:
self.exec_network = None
return
self.exec_network = self.launcher.ie_core.load_network(self.network, self.launcher.device)
def load_network(self, network, launcher):
self.network = network
self.dynamic_inputs, self.partial_shapes = launcher.get_dynamic_inputs(self.network)
if self.dynamic_inputs and launcher.dynamic_shapes_policy in ['dynamic', 'default']:
try:
self.exec_network = launcher.ie_core.load_network(self.network, launcher.device)
self.is_dynamic = True
except RuntimeError as e:
if launcher.dynamic_shapes_policy == 'dynamic':
raise e
self.is_dynamic = False
self.exec_network = None
if not self.dynamic_inputs:
self.exec_network = launcher.ie_core.load_network(self.network, launcher.device)
def print_input_output_info(self):
print_info('{} - Input info:'.format(self.default_model_suffix))
has_info = hasattr(self.network if self.network is not None else self.exec_network, 'input_info')
if self.network:
if has_info:
network_inputs = OrderedDict(
[(name, data.input_data) for name, data in self.network.input_info.items()]
)
else:
network_inputs = self.network.inputs
network_outputs = self.network.outputs
else:
if has_info:
network_inputs = OrderedDict([
(name, data.input_data) for name, data in self.exec_network.input_info.items()
])
else:
network_inputs = self.exec_network.inputs
network_outputs = self.exec_network.outputs
for name, input_info in network_inputs.items():
print_info('\tLayer name: {}'.format(name))
print_info('\tprecision: {}'.format(input_info.precision))
print_info('\tshape {}\n'.format(
input_info.shape if name not in self.partial_shapes else self.partial_shapes[name]))
print_info('{} - Output info'.format(self.default_model_suffix))
for name, output_info in network_outputs.items():
print_info('\tLayer name: {}'.format(name))
print_info('\tprecision: {}'.format(output_info.precision))
print_info('\tshape: {}\n'.format(
output_info.shape if name not in self.partial_shapes else self.partial_shapes[name]))
def automatic_model_search(self, network_info):
model = Path(network_info['model'])
if model.is_dir():
is_blob = network_info.get('_model_is_blob')
if is_blob:
model_list = list(model.glob('*{}.blob'.format(self.default_model_suffix)))
if not model_list:
model_list = list(model.glob('*.blob'))
else:
model_list = list(model.glob('*{}.xml'.format(self.default_model_suffix)))
blob_list = list(model.glob('*{}.blob'.format(self.default_model_suffix)))
if not model_list and not blob_list:
model_list = list(model.glob('*.xml'))
blob_list = list(model.glob('*.blob'))
if not model_list:
model_list = blob_list
if not model_list:
raise ConfigError('Suitable model for {} not found'.format(self.default_model_suffix))
if len(model_list) > 1:
raise ConfigError('Several suitable models for {} found'.format(self.default_model_suffix))
model = model_list[0]
accepted_suffixes = ['.blob', '.xml', '.onnx']
if model.suffix not in accepted_suffixes:
raise ConfigError('Models with following suffixes are allowed: {}'.format(accepted_suffixes))
print_info('{} - Found model: {}'.format(self.default_model_suffix, model))
if model.suffix in ['.blob', '.onnx']:
return model, None
weights = get_path(network_info.get('weights', model.parent / model.name.replace('xml', 'bin')))
accepted_weights_suffixes = ['.bin']
if weights.suffix not in accepted_weights_suffixes:
raise ConfigError('Weights with following suffixes are allowed: {}'.format(accepted_weights_suffixes))
print_info('{} - Found weights: {}'.format(self.default_model_suffix, weights))
return model, weights
def set_input_and_output(self):
has_info = hasattr(self.exec_network, 'input_info')
input_info = self.exec_network.input_info if has_info else self.exec_network.inputs
input_blob = next(iter(input_info))
with_prefix = input_blob.startswith(self.default_model_suffix)
if self.input_blob is None or with_prefix != self.with_prefix:
if self.output_blob is None:
output_blob = next(iter(self.exec_network.outputs))
else:
output_blob = (
'_'.join([self.default_model_suffix, self.output_blob])
if with_prefix else self.output_blob.split(self.default_model_suffix + '_')[-1]
)
self.input_blob = input_blob
self.output_blob = output_blob
self.with_prefix = with_prefix
if hasattr(self, 'adapter') and self.adapter is not None:
self.adapter.output_blob = output_blob
def load_model(self, network_info, launcher, log=False):
if 'onnx_model' in network_info:
network_info.update(launcher.config)
model, weights = launcher.convert_model(network_info)
else:
model, weights = self.automatic_model_search(network_info)
if weights is None and model.suffix != '.onnx':
self.exec_network = launcher.ie_core.import_network(str(model))
else:
if weights:
self.network = launcher.read_network(str(model), str(weights))
else:
self.network = launcher.ie_core.read_network(str(model))
self.load_network(self.network, launcher)
self.set_input_and_output()
if log:
self.print_input_output_info()
def release(self):
del self.exec_network
del self.network
del self.launcher
def fit_to_input(self, input_data):
has_info = hasattr(self.exec_network, 'input_info')
if has_info:
input_info = self.exec_network.input_info[self.input_blob].input_data
else:
input_info = self.exec_network.inputs[self.input_blob]
if self.input_blob in self.dynamic_inputs or tuple(input_info.shape) != np.shape(input_data):
self._reshape_input({self.input_blob: np.shape(input_data)})
return {self.input_blob: np.array(input_data)}
def predict(self, identifiers, input_data):
raise NotImplementedError
class BaseOpenVINOModel(BaseDLSDKModel):
def input_tensors_mapping(self):
inputs = self.network.inputs if self.network is not None else self.exec_network.inputs
node_to_tensor = {}
for idx, input_desc in enumerate(inputs):
tensor_names = input_desc.get_tensor().get_names()
node_to_tensor[input_desc.get_node().friendly_name] = idx if not tensor_names else next(iter(tensor_names))
return node_to_tensor
def _reshape_input(self, input_shapes):
if self.is_dynamic:
return
if hasattr(self, 'exec_network') and self.exec_network is not None:
del self.infer_request
del self.exec_network
tensor_mapping = self.input_tensors_mapping()
input_shapes_for_tensors = {tensor_mapping[name]: shape for name, shape in input_shapes.items()}
self.launcher.reshape_network(self.network, input_shapes_for_tensors)
self.dynamic_inputs, self.partial_shapes = self.launcher.get_dynamic_inputs(self.network)
if not self.is_dynamic and self.dynamic_inputs:
self.exec_network = None
return
self.exec_network = self.launcher.ie_core.compile_model(self.network, self.launcher.device)
self.infer_request = None
def predict(self, identifiers, input_data):
raise NotImplementedError
def load_network(self, network, launcher):
self.infer_request = None
self.network = network
self.dynamic_inputs, self.partial_shapes = launcher.get_dynamic_inputs(self.network)
if self.dynamic_inputs and launcher.dynamic_shapes_policy in ['dynamic', 'default']:
try:
self.exec_network = launcher.ie_core.compile_model(self.network, launcher.device)
self.is_dynamic = True
except RuntimeError as e:
if launcher.dynamic_shapes_policy == 'dynamic':
raise e
self.is_dynamic = False
self.exec_network = None
if not self.dynamic_inputs:
self.exec_network = launcher.ie_core.compile_model(self.network, launcher.device)
def load_model(self, network_info, launcher, log=False):
if 'onnx_model' in network_info:
network_info.update(launcher.config)
model, weights = launcher.convert_model(network_info)
else:
model, weights = self.automatic_model_search(network_info)
if weights is None and model.suffix != '.onnx':
self.exec_network = launcher.ie_core.import_network(str(model))
else:
if weights:
self.network = launcher.read_network(str(model), str(weights))
else:
self.network = launcher.ie_core.read_network(str(model))
self.load_network(self.network, launcher)
self.set_input_and_output()
if log:
self.print_input_output_info()
def print_input_output_info(self):
self.launcher.print_input_output_info(
self.network if self.network is not None else self.exec_network, self.default_model_suffix)
def set_input_and_output(self):
inputs = self.network.inputs if self.network is not None else self.exec_network.inputs
outputs = self.network.outputs if self.network is not None else self.exec_network.outputs
input_blob = next(iter(inputs)).get_node().friendly_name
with_prefix = input_blob.startswith(self.default_model_suffix)
if self.input_blob is None or with_prefix != self.with_prefix:
if self.output_blob is None:
output_blob = next(iter(outputs)).get_node().friendly_name
else:
output_blob = (
'_'.join([self.default_model_suffix, self.output_blob])
if with_prefix else self.output_blob.split(self.default_model_suffix + '_')[-1]
)
self.input_blob = input_blob
self.output_blob = output_blob
self.with_prefix = with_prefix
if hasattr(self, 'adapter') and self.adapter is not None:
self.adapter.output_blob = output_blob
@property
def inputs(self):
if self.network:
return {node.get_node().friendly_name: node.get_node() for node in self.network.inputs}
return {node.get_node().friendly_name: node.get_node() for node in self.exec_network.inputs}
def fit_to_input(self, input_data):
input_info = self.inputs[self.input_blob]
if (self.input_blob in self.dynamic_inputs or
parse_partial_shape(input_info.get_partial_shape()) != np.shape(input_data)):
self._reshape_input({self.input_blob: np.shape(input_data)})
return {self.input_blob: np.array(input_data)}
def infer(self, input_data):
if not hasattr(self, 'infer_request') or self.infer_request is None:
self.infer_request = self.exec_network.create_infer_request()
tensors_mapping = self.input_tensors_mapping()
feed_dict = {tensors_mapping[name]: data for name, data in input_data.items()}
outputs = self.infer_request.infer(feed_dict)
return {
out_node.get_node().friendly_name: out_res
for out_node, out_res in outputs.items()
}
class BaseONNXModel:
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
self.network_info = network_info
self.launcher = launcher
self.default_model_suffix = suffix
if not delayed_model_loading:
model = self.automatic_model_search(network_info)
self.inference_session = launcher.create_inference_session(str(model))
self.input_blob = next(iter(self.inference_session.get_inputs()))
self.output_blob = next(iter(self.inference_session.get_outputs()))
def fit_to_input(self, input_data):
return {self.input_blob.name: input_data}
def release(self):
del self.inference_session
def automatic_model_search(self, network_info):
model = Path(network_info['model'])
if model.is_dir():
model_list = list(model.glob('*{}.onnx'.format(self.default_model_suffix)))
if not model_list:
model_list = list(model.glob('*.onnx'))
if not model_list:
raise ConfigError('Suitable model for {} not found'.format(self.default_model_suffix))
if len(model_list) > 1:
raise ConfigError('Several suitable models for {} found'.format(self.default_model_suffix))
model = model_list[0]
accepted_suffixes = ['.onnx']
if model.suffix not in accepted_suffixes:
raise ConfigError('Models with following suffixes are allowed: {}'.format(accepted_suffixes))
print_info('{} - Found model: {}'.format(self.default_model_suffix, model))
return model
class BaseOpenCVModel:
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
self.network_info = network_info
self.launcher = launcher
self.default_model_suffix = suffix
if not delayed_model_loading:
self.network = launcher.create_network(network_info['model'], network_info.get('weights', ''))
network_info.update(launcher.config)
input_shapes = launcher.get_inputs_from_config(network_info)
self.input_blob = next(iter(input_shapes))
self.input_shape = input_shapes[self.input_blob]
self.network.setInputsNames(list(self.input_blob))
self.output_blob = next(iter(self.network.getUnconnectedOutLayersNames()))
def fit_to_input(self, input_data):
return {self.input_blob: input_data.astype(np.float32)}
def release(self):
del self.network
class BaseTFModel:
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
self.network_info = network_info
self.launcher = launcher
self.default_model_suffix = suffix
if not delayed_model_loading:
model = self.automatic_model_search(network_info)
self.inference_session = launcher.create_inference_session(str(model))
def fit_to_input(self, input_data):
raise NotImplementedError
def predict(self, identifiers, input_data):
raise NotImplementedError
def release(self):
del self.inference_session
@staticmethod
def automatic_model_search(network_info):
model = Path(network_info['model'])
return model
class BaseCaffeModel:
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
self.network_info = network_info
self.launcher = launcher
self.default_model_suffix = suffix
def fit_to_input(self, data, layer_name, layout, precision, tmpl=None):
return self.launcher.fit_to_input(data, layer_name, layout, precision, template=tmpl)
def predict(self, identifiers, input_data):
raise NotImplementedError
def release(self):
del self.net
def automatic_model_search(self, network_info):
model = Path(network_info.get('model', ''))
weights = network_info.get('weights')
if model.is_dir():
models_list = list(Path(model).glob('{}.prototxt'.format(self.default_model_name)))
if not models_list:
models_list = list(Path(model).glob('*.prototxt'))
if not models_list:
raise ConfigError('Suitable model description is not detected')
if len(models_list) != 1:
raise ConfigError('Several suitable models found, please specify required model')
model = models_list[0]
if weights is None or Path(weights).is_dir():
weights_dir = weights or model.parent
weights = Path(weights_dir) / model.name.replace('prototxt', 'caffemodel')
if not weights.exists():
weights_list = list(weights_dir.glob('*.caffemodel'))
if not weights_list:
raise ConfigError('Suitable weights is not detected')
if len(weights_list) != 1:
raise ConfigError('Several suitable weights found, please specify required explicitly')
weights = weights_list[0]
weights = Path(weights)
accepted_suffixes = ['.prototxt']
if model.suffix not in accepted_suffixes:
raise ConfigError('Models with following suffixes are allowed: {}'.format(accepted_suffixes))
print_info('{} - Found model: {}'.format(self.default_model_name, model))
accepted_weights_suffixes = ['.caffemodel']
if weights.suffix not in accepted_weights_suffixes:
raise ConfigError('Weights with following suffixes are allowed: {}'.format(accepted_weights_suffixes))
print_info('{} - Found weights: {}'.format(self.default_model_name, weights))
return model, weights
| <filename>tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/base_models.py
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
from collections import OrderedDict
import numpy as np
from ...config import ConfigError
from ...utils import get_path, parse_partial_shape, contains_any
from ...logging import print_info
def create_model(model_config, launcher, launcher_model_mapping, suffix=None, delayed_model_loading=False):
framework = launcher.config['framework']
model_class = launcher_model_mapping.get(framework)
if not model_class:
raise ValueError('model for framework {} is not supported'.format(framework))
return model_class(model_config, launcher, suffix, delayed_model_loading)
def create_encoder(model_config, launcher, launcher_model_mapping, delayed_model_loading=False):
framework = launcher.config['framework']
if 'predictions' in model_config and not model_config.get('store_predictions', False):
framework = 'dummy'
model_class = launcher_model_mapping.get(framework)
if not model_class:
raise ValueError('model for framework {} is not supported'.format(framework))
return model_class(model_config, launcher, 'encoder', delayed_model_loading)
class BaseCascadeModel:
def __init__(self, network_info, launcher, delayed_model_loading=False):
self.network_info = network_info
self.launcher = launcher
self._part_by_name = None
def predict(self, identifiers, input_data):
raise NotImplementedError
def release(self):
if self._part_by_name:
for model in self._part_by_name.values():
model.release()
def load_network(self, network_list, launcher):
if len(self._part_by_name) == 1 and 'name' not in network_list[0]:
next(iter(self._part_by_name.values())).load_model(network_list[0]['model'], launcher)
return
for network_dict in network_list:
self._part_by_name[network_dict['name']].load_network(network_dict['model'], launcher)
def load_model(self, network_list, launcher):
if len(self._part_by_name) == 1 and 'name' not in network_list[0]:
next(iter(self._part_by_name.values())).load_model(network_list[0], launcher)
return
for network_dict in network_list:
self._part_by_name[network_dict['name']].load_model(network_dict, launcher)
def get_network(self):
if not self._part_by_name:
return []
return [{'name': name, 'model': model.network} for name, model in self._part_by_name.items()]
def reset(self):
pass
@staticmethod
def fill_part_with_model(network_info, parts, models_args, is_blob, delayed_model_loading):
if models_args and not delayed_model_loading:
for idx, part in enumerate(parts):
part_info = network_info.get(part, {})
if not contains_any(part_info, ['model', 'onnx_model']) and models_args:
part_info['model'] = models_args[idx if len(models_args) > idx else 0]
part_info['_model_is_blob'] = is_blob
network_info.update({part: part_info})
return network_info
class BaseDLSDKModel:
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
self.network_info = network_info
self.launcher = launcher
self.default_model_suffix = suffix
if not hasattr(self, 'output_blob'):
self.output_blob = None
if not hasattr(self, 'input_blob'):
self.input_blob = None
self.with_prefix = False
self.is_dynamic = False
if not delayed_model_loading:
self.load_model(network_info, launcher, log=True)
def _reshape_input(self, input_shapes):
if self.is_dynamic:
return
if hasattr(self, 'exec_network') and self.exec_network is not None:
del self.exec_network
self.network.reshape(input_shapes)
self.dynamic_inputs, self.partial_shapes = self.launcher.get_dynamic_inputs(self.network)
if not self.is_dynamic and self.dynamic_inputs:
self.exec_network = None
return
self.exec_network = self.launcher.ie_core.load_network(self.network, self.launcher.device)
def load_network(self, network, launcher):
self.network = network
self.dynamic_inputs, self.partial_shapes = launcher.get_dynamic_inputs(self.network)
if self.dynamic_inputs and launcher.dynamic_shapes_policy in ['dynamic', 'default']:
try:
self.exec_network = launcher.ie_core.load_network(self.network, launcher.device)
self.is_dynamic = True
except RuntimeError as e:
if launcher.dynamic_shapes_policy == 'dynamic':
raise e
self.is_dynamic = False
self.exec_network = None
if not self.dynamic_inputs:
self.exec_network = launcher.ie_core.load_network(self.network, launcher.device)
def print_input_output_info(self):
print_info('{} - Input info:'.format(self.default_model_suffix))
has_info = hasattr(self.network if self.network is not None else self.exec_network, 'input_info')
if self.network:
if has_info:
network_inputs = OrderedDict(
[(name, data.input_data) for name, data in self.network.input_info.items()]
)
else:
network_inputs = self.network.inputs
network_outputs = self.network.outputs
else:
if has_info:
network_inputs = OrderedDict([
(name, data.input_data) for name, data in self.exec_network.input_info.items()
])
else:
network_inputs = self.exec_network.inputs
network_outputs = self.exec_network.outputs
for name, input_info in network_inputs.items():
print_info('\tLayer name: {}'.format(name))
print_info('\tprecision: {}'.format(input_info.precision))
print_info('\tshape {}\n'.format(
input_info.shape if name not in self.partial_shapes else self.partial_shapes[name]))
print_info('{} - Output info'.format(self.default_model_suffix))
for name, output_info in network_outputs.items():
print_info('\tLayer name: {}'.format(name))
print_info('\tprecision: {}'.format(output_info.precision))
print_info('\tshape: {}\n'.format(
output_info.shape if name not in self.partial_shapes else self.partial_shapes[name]))
def automatic_model_search(self, network_info):
model = Path(network_info['model'])
if model.is_dir():
is_blob = network_info.get('_model_is_blob')
if is_blob:
model_list = list(model.glob('*{}.blob'.format(self.default_model_suffix)))
if not model_list:
model_list = list(model.glob('*.blob'))
else:
model_list = list(model.glob('*{}.xml'.format(self.default_model_suffix)))
blob_list = list(model.glob('*{}.blob'.format(self.default_model_suffix)))
if not model_list and not blob_list:
model_list = list(model.glob('*.xml'))
blob_list = list(model.glob('*.blob'))
if not model_list:
model_list = blob_list
if not model_list:
raise ConfigError('Suitable model for {} not found'.format(self.default_model_suffix))
if len(model_list) > 1:
raise ConfigError('Several suitable models for {} found'.format(self.default_model_suffix))
model = model_list[0]
accepted_suffixes = ['.blob', '.xml', '.onnx']
if model.suffix not in accepted_suffixes:
raise ConfigError('Models with following suffixes are allowed: {}'.format(accepted_suffixes))
print_info('{} - Found model: {}'.format(self.default_model_suffix, model))
if model.suffix in ['.blob', '.onnx']:
return model, None
weights = get_path(network_info.get('weights', model.parent / model.name.replace('xml', 'bin')))
accepted_weights_suffixes = ['.bin']
if weights.suffix not in accepted_weights_suffixes:
raise ConfigError('Weights with following suffixes are allowed: {}'.format(accepted_weights_suffixes))
print_info('{} - Found weights: {}'.format(self.default_model_suffix, weights))
return model, weights
def set_input_and_output(self):
has_info = hasattr(self.exec_network, 'input_info')
input_info = self.exec_network.input_info if has_info else self.exec_network.inputs
input_blob = next(iter(input_info))
with_prefix = input_blob.startswith(self.default_model_suffix)
if self.input_blob is None or with_prefix != self.with_prefix:
if self.output_blob is None:
output_blob = next(iter(self.exec_network.outputs))
else:
output_blob = (
'_'.join([self.default_model_suffix, self.output_blob])
if with_prefix else self.output_blob.split(self.default_model_suffix + '_')[-1]
)
self.input_blob = input_blob
self.output_blob = output_blob
self.with_prefix = with_prefix
if hasattr(self, 'adapter') and self.adapter is not None:
self.adapter.output_blob = output_blob
def load_model(self, network_info, launcher, log=False):
if 'onnx_model' in network_info:
network_info.update(launcher.config)
model, weights = launcher.convert_model(network_info)
else:
model, weights = self.automatic_model_search(network_info)
if weights is None and model.suffix != '.onnx':
self.exec_network = launcher.ie_core.import_network(str(model))
else:
if weights:
self.network = launcher.read_network(str(model), str(weights))
else:
self.network = launcher.ie_core.read_network(str(model))
self.load_network(self.network, launcher)
self.set_input_and_output()
if log:
self.print_input_output_info()
def release(self):
del self.exec_network
del self.network
del self.launcher
def fit_to_input(self, input_data):
has_info = hasattr(self.exec_network, 'input_info')
if has_info:
input_info = self.exec_network.input_info[self.input_blob].input_data
else:
input_info = self.exec_network.inputs[self.input_blob]
if self.input_blob in self.dynamic_inputs or tuple(input_info.shape) != np.shape(input_data):
self._reshape_input({self.input_blob: np.shape(input_data)})
return {self.input_blob: np.array(input_data)}
def predict(self, identifiers, input_data):
raise NotImplementedError
class BaseOpenVINOModel(BaseDLSDKModel):
def input_tensors_mapping(self):
inputs = self.network.inputs if self.network is not None else self.exec_network.inputs
node_to_tensor = {}
for idx, input_desc in enumerate(inputs):
tensor_names = input_desc.get_tensor().get_names()
node_to_tensor[input_desc.get_node().friendly_name] = idx if not tensor_names else next(iter(tensor_names))
return node_to_tensor
def _reshape_input(self, input_shapes):
if self.is_dynamic:
return
if hasattr(self, 'exec_network') and self.exec_network is not None:
del self.infer_request
del self.exec_network
tensor_mapping = self.input_tensors_mapping()
input_shapes_for_tensors = {tensor_mapping[name]: shape for name, shape in input_shapes.items()}
self.launcher.reshape_network(self.network, input_shapes_for_tensors)
self.dynamic_inputs, self.partial_shapes = self.launcher.get_dynamic_inputs(self.network)
if not self.is_dynamic and self.dynamic_inputs:
self.exec_network = None
return
self.exec_network = self.launcher.ie_core.compile_model(self.network, self.launcher.device)
self.infer_request = None
def predict(self, identifiers, input_data):
raise NotImplementedError
def load_network(self, network, launcher):
self.infer_request = None
self.network = network
self.dynamic_inputs, self.partial_shapes = launcher.get_dynamic_inputs(self.network)
if self.dynamic_inputs and launcher.dynamic_shapes_policy in ['dynamic', 'default']:
try:
self.exec_network = launcher.ie_core.compile_model(self.network, launcher.device)
self.is_dynamic = True
except RuntimeError as e:
if launcher.dynamic_shapes_policy == 'dynamic':
raise e
self.is_dynamic = False
self.exec_network = None
if not self.dynamic_inputs:
self.exec_network = launcher.ie_core.compile_model(self.network, launcher.device)
def load_model(self, network_info, launcher, log=False):
if 'onnx_model' in network_info:
network_info.update(launcher.config)
model, weights = launcher.convert_model(network_info)
else:
model, weights = self.automatic_model_search(network_info)
if weights is None and model.suffix != '.onnx':
self.exec_network = launcher.ie_core.import_network(str(model))
else:
if weights:
self.network = launcher.read_network(str(model), str(weights))
else:
self.network = launcher.ie_core.read_network(str(model))
self.load_network(self.network, launcher)
self.set_input_and_output()
if log:
self.print_input_output_info()
def print_input_output_info(self):
self.launcher.print_input_output_info(
self.network if self.network is not None else self.exec_network, self.default_model_suffix)
def set_input_and_output(self):
inputs = self.network.inputs if self.network is not None else self.exec_network.inputs
outputs = self.network.outputs if self.network is not None else self.exec_network.outputs
input_blob = next(iter(inputs)).get_node().friendly_name
with_prefix = input_blob.startswith(self.default_model_suffix)
if self.input_blob is None or with_prefix != self.with_prefix:
if self.output_blob is None:
output_blob = next(iter(outputs)).get_node().friendly_name
else:
output_blob = (
'_'.join([self.default_model_suffix, self.output_blob])
if with_prefix else self.output_blob.split(self.default_model_suffix + '_')[-1]
)
self.input_blob = input_blob
self.output_blob = output_blob
self.with_prefix = with_prefix
if hasattr(self, 'adapter') and self.adapter is not None:
self.adapter.output_blob = output_blob
@property
def inputs(self):
if self.network:
return {node.get_node().friendly_name: node.get_node() for node in self.network.inputs}
return {node.get_node().friendly_name: node.get_node() for node in self.exec_network.inputs}
def fit_to_input(self, input_data):
input_info = self.inputs[self.input_blob]
if (self.input_blob in self.dynamic_inputs or
parse_partial_shape(input_info.get_partial_shape()) != np.shape(input_data)):
self._reshape_input({self.input_blob: np.shape(input_data)})
return {self.input_blob: np.array(input_data)}
def infer(self, input_data):
if not hasattr(self, 'infer_request') or self.infer_request is None:
self.infer_request = self.exec_network.create_infer_request()
tensors_mapping = self.input_tensors_mapping()
feed_dict = {tensors_mapping[name]: data for name, data in input_data.items()}
outputs = self.infer_request.infer(feed_dict)
return {
out_node.get_node().friendly_name: out_res
for out_node, out_res in outputs.items()
}
class BaseONNXModel:
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
self.network_info = network_info
self.launcher = launcher
self.default_model_suffix = suffix
if not delayed_model_loading:
model = self.automatic_model_search(network_info)
self.inference_session = launcher.create_inference_session(str(model))
self.input_blob = next(iter(self.inference_session.get_inputs()))
self.output_blob = next(iter(self.inference_session.get_outputs()))
def fit_to_input(self, input_data):
return {self.input_blob.name: input_data}
def release(self):
del self.inference_session
def automatic_model_search(self, network_info):
model = Path(network_info['model'])
if model.is_dir():
model_list = list(model.glob('*{}.onnx'.format(self.default_model_suffix)))
if not model_list:
model_list = list(model.glob('*.onnx'))
if not model_list:
raise ConfigError('Suitable model for {} not found'.format(self.default_model_suffix))
if len(model_list) > 1:
raise ConfigError('Several suitable models for {} found'.format(self.default_model_suffix))
model = model_list[0]
accepted_suffixes = ['.onnx']
if model.suffix not in accepted_suffixes:
raise ConfigError('Models with following suffixes are allowed: {}'.format(accepted_suffixes))
print_info('{} - Found model: {}'.format(self.default_model_suffix, model))
return model
class BaseOpenCVModel:
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
self.network_info = network_info
self.launcher = launcher
self.default_model_suffix = suffix
if not delayed_model_loading:
self.network = launcher.create_network(network_info['model'], network_info.get('weights', ''))
network_info.update(launcher.config)
input_shapes = launcher.get_inputs_from_config(network_info)
self.input_blob = next(iter(input_shapes))
self.input_shape = input_shapes[self.input_blob]
self.network.setInputsNames(list(self.input_blob))
self.output_blob = next(iter(self.network.getUnconnectedOutLayersNames()))
def fit_to_input(self, input_data):
return {self.input_blob: input_data.astype(np.float32)}
def release(self):
del self.network
class BaseTFModel:
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
self.network_info = network_info
self.launcher = launcher
self.default_model_suffix = suffix
if not delayed_model_loading:
model = self.automatic_model_search(network_info)
self.inference_session = launcher.create_inference_session(str(model))
def fit_to_input(self, input_data):
raise NotImplementedError
def predict(self, identifiers, input_data):
raise NotImplementedError
def release(self):
del self.inference_session
@staticmethod
def automatic_model_search(network_info):
model = Path(network_info['model'])
return model
class BaseCaffeModel:
def __init__(self, network_info, launcher, suffix=None, delayed_model_loading=False):
self.network_info = network_info
self.launcher = launcher
self.default_model_suffix = suffix
def fit_to_input(self, data, layer_name, layout, precision, tmpl=None):
return self.launcher.fit_to_input(data, layer_name, layout, precision, template=tmpl)
def predict(self, identifiers, input_data):
raise NotImplementedError
def release(self):
del self.net
def automatic_model_search(self, network_info):
model = Path(network_info.get('model', ''))
weights = network_info.get('weights')
if model.is_dir():
models_list = list(Path(model).glob('{}.prototxt'.format(self.default_model_name)))
if not models_list:
models_list = list(Path(model).glob('*.prototxt'))
if not models_list:
raise ConfigError('Suitable model description is not detected')
if len(models_list) != 1:
raise ConfigError('Several suitable models found, please specify required model')
model = models_list[0]
if weights is None or Path(weights).is_dir():
weights_dir = weights or model.parent
weights = Path(weights_dir) / model.name.replace('prototxt', 'caffemodel')
if not weights.exists():
weights_list = list(weights_dir.glob('*.caffemodel'))
if not weights_list:
raise ConfigError('Suitable weights is not detected')
if len(weights_list) != 1:
raise ConfigError('Several suitable weights found, please specify required explicitly')
weights = weights_list[0]
weights = Path(weights)
accepted_suffixes = ['.prototxt']
if model.suffix not in accepted_suffixes:
raise ConfigError('Models with following suffixes are allowed: {}'.format(accepted_suffixes))
print_info('{} - Found model: {}'.format(self.default_model_name, model))
accepted_weights_suffixes = ['.caffemodel']
if weights.suffix not in accepted_weights_suffixes:
raise ConfigError('Weights with following suffixes are allowed: {}'.format(accepted_weights_suffixes))
print_info('{} - Found weights: {}'.format(self.default_model_name, weights))
return model, weights
| en | 0.850136 | Copyright (c) 2018-2021 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 1.739117 | 2 |
ex047_npares2em2.py | elisamariacampos/MundoPhyton | 0 | 6624730 | <reponame>elisamariacampos/MundoPhyton
for c in range (2,51,2):
print(c, end=' ') | for c in range (2,51,2):
print(c, end=' ') | none | 1 | 2.907701 | 3 | |
lowfat/migrations/0041_auto_20160720_1031.py | elena-kolomeets/lowfat | 6 | 6624731 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-07-20 10:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0040_auto_20160720_1029'),
]
operations = [
migrations.AlterField(
model_name='event',
name='fellow',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='lowfat.Fellow'),
preserve_default=False,
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-07-20 10:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0040_auto_20160720_1029'),
]
operations = [
migrations.AlterField(
model_name='event',
name='fellow',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='lowfat.Fellow'),
preserve_default=False,
),
]
| en | 0.750503 | # -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-07-20 10:31 | 1.410289 | 1 |
pyqus/__replaces.py | JorgeDeLosSantos/pyqus | 21 | 6624732 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# __replaces.py
# =============================
# (c) 2015, <NAME>
# ITC-Bypasa
# =============================
#
from cfg import *
CONTENTS_REPLACES = {
"_deformedshape_":r"graphs/elements.png",
"_misesstress_":r"graphs/stress.png",
"_nominalstrain_":r"graphs/strain.png",
"_materialcurve_":r"graphs/strain.png",
"_material_":"1018 Steel",
"_friction_":str(FRICTION_COEFF),
"_poisson_":str(POISSON_COEFF),
"_density_":str(STEEL_DENSITY),
"_young_":str(YOUNG_MOD),
"_elementtype_":"CPS4",
"_meshsize_":str(MESH_SIZE_QUAD)
}
PARTS_REPLACE = {
"_partname_":1,
"_parttype_":2,
}
| # -*- coding: utf-8 -*-
# __replaces.py
# =============================
# (c) 2015, <NAME>
# ITC-Bypasa
# =============================
#
from cfg import *
CONTENTS_REPLACES = {
"_deformedshape_":r"graphs/elements.png",
"_misesstress_":r"graphs/stress.png",
"_nominalstrain_":r"graphs/strain.png",
"_materialcurve_":r"graphs/strain.png",
"_material_":"1018 Steel",
"_friction_":str(FRICTION_COEFF),
"_poisson_":str(POISSON_COEFF),
"_density_":str(STEEL_DENSITY),
"_young_":str(YOUNG_MOD),
"_elementtype_":"CPS4",
"_meshsize_":str(MESH_SIZE_QUAD)
}
PARTS_REPLACE = {
"_partname_":1,
"_parttype_":2,
} | en | 0.537198 | # -*- coding: utf-8 -*- # __replaces.py # ============================= # (c) 2015, <NAME> # ITC-Bypasa # ============================= # | 1.792904 | 2 |
tcc/dataset_preparation/images_to_tfrecords.py | egonrian/google-research | 3 | 6624733 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert list of videos to tfrecords based on SequenceExample."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json
import math
import os
from absl import app
from absl import flags
from absl import logging
import scipy.io as sio
from tcc.dataset_preparation.dataset_utils import label_timestamps
from tcc.dataset_preparation.dataset_utils import merge_annotations
from tcc.dataset_preparation.dataset_utils import write_seqs_to_tfrecords
import cv2
flags.DEFINE_string('dir', None, 'Path to videos.')
flags.DEFINE_string('name', None, 'Name of the dataset being created. This will'
'be used as a prefix.')
flags.DEFINE_string('vid_list', None, 'Path to list of folders with frames of '
'videos.')
flags.DEFINE_string('extension', 'jpg', 'Extension of images.')
flags.DEFINE_string(
'label_file', None, 'Provide a corresponding labels file'
'that stores per-frame or per-sequence labels.')
flags.DEFINE_string('output_dir', '/tmp/tfrecords/', 'Output directory where'
'tfrecords will be stored.')
flags.DEFINE_integer('vids_per_shard', 1, 'Number of videos to store in a'
'shard.')
flags.DEFINE_list(
'frame_labels', '', 'Comma separated list of descriptions '
'for labels given on a per frame basis. For example: '
'winding_up,early_cocking,acclerating,follow_through')
flags.DEFINE_integer('action_label', -1, 'Action label of all videos.')
flags.DEFINE_integer('expected_segments', -1, 'Expected number of segments.')
flags.DEFINE_boolean('rotate', False, 'Rotate videos by 90 degrees before'
'creating tfrecords')
flags.DEFINE_boolean('resize', True, 'Resize videos to a given size.')
flags.DEFINE_integer('width', 224, 'Width of frames in the TFRecord.')
flags.DEFINE_integer('height', 224, 'Height of frames in the TFRecord.')
flags.DEFINE_integer('fps', 30, 'Frames per second in video.')
flags.mark_flag_as_required('name')
flags.mark_flag_as_required('dir')
flags.mark_flag_as_required('vid_list')
FLAGS = flags.FLAGS
def preprocess(im, rotate, resize, width, height):
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
if resize:
im = cv2.resize(im, (width, height))
if rotate:
im = cv2.transpose(im)
im = cv2.flip(im, 1)
return im
def get_frames_in_folder(path, rotate, resize, width, height):
"""Returns all frames from a video in a given folder.
Args:
path: string, directory containing frames of a video.
rotate: Boolean, if True rotates an image by 90 degrees.
resize: Boolean, if True resizes images to given size.
width: Integer, Width of image.
height: Integer, Height of image.
Returns:
frames: list, list of frames in a video.
Raises:
ValueError: When provided directory doesn't exist.
"""
if not os.path.isdir(path):
raise ValueError('Provided path %s is not a directory' % path)
else:
im_list = sorted(glob.glob(os.path.join(path, '*.%s' % FLAGS.extension)))
frames = [preprocess(cv2.imread(im), rotate, resize, width, height)
for im in im_list]
return frames
def get_name(filename, videos_dir, penn_action=False):
"""Add label to name for Penn Action dataset."""
if penn_action:
labels_path = os.path.join(videos_dir, 'labels', '%s.mat' % filename)
annotation = sio.loadmat(labels_path)
label = annotation['action'][0]
return '{}_{}'.format(filename, label)
else:
return filename
def get_timestamps(frames, fps, offset=0.0):
"""Returns timestamps for frames in a video."""
return [offset + x/float(fps) for x in range(len(frames))]
def create_tfrecords(name, output_dir, videos_dir, vid_list, label_file,
frame_labels, fps, expected_segments):
"""Create TFRecords from videos in a given path.
Args:
name: string, name of the dataset being created.
output_dir: string, path to output directory.
videos_dir: string, path to input videos directory.
vid_list: string, path to file containing list of folders where frames
are stored.
label_file: string, JSON file that contains annotations.
frame_labels: list, list of string describing each class. Class label is
the index in list.
fps: integer, frames per second with which the images were extracted.
expected_segments: int, expected number of segments.
Raises:
ValueError: If invalid args are passed.
"""
if not os.path.exists(output_dir):
logging.info('Creating output directory: %s', output_dir)
os.makedirs(output_dir)
with open(vid_list, 'r') as f:
paths = sorted([os.path.join(videos_dir, x.strip()) for x in f.readlines()])
if label_file is not None:
with open(os.path.join(label_file)) as labels_file:
data = json.load(labels_file)
names_to_seqs = {}
num_shards = int(math.ceil(len(paths)/FLAGS.vids_per_shard))
len_num_shards = len(str(num_shards))
shard_id = 0
for i, path in enumerate(paths):
seq = {}
vid_name = get_name(os.path.basename(path), videos_dir)
frames = get_frames_in_folder(path, FLAGS.rotate, FLAGS.resize,
FLAGS.width, FLAGS.height)
seq['video'] = frames
if label_file is not None:
video_id = os.path.basename(path)
if video_id in data:
video_labels = data[video_id]
else:
raise ValueError('Video id %s not found in labels file.' % video_id)
merged_annotations = merge_annotations(video_labels,
expected_segments)
video_timestamps = get_timestamps(frames, fps)
seq['labels'] = label_timestamps(video_timestamps, merged_annotations)
names_to_seqs[vid_name] = seq
if (i + 1) % FLAGS.vids_per_shard == 0 or i == len(paths)-1:
output_filename = os.path.join(
output_dir,
'%s-%s-of-%s.tfrecord' % (name,
str(shard_id).zfill(len_num_shards),
str(num_shards).zfill(len_num_shards)))
write_seqs_to_tfrecords(output_filename, names_to_seqs,
FLAGS.action_label, frame_labels)
shard_id += 1
names_to_seqs = {}
def main(_):
create_tfrecords(FLAGS.name, FLAGS.output_dir, FLAGS.dir, FLAGS.vid_list,
FLAGS.label_file, FLAGS.frame_labels, FLAGS.fps,
FLAGS.expected_segments)
if __name__ == '__main__':
app.run(main)
| # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert list of videos to tfrecords based on SequenceExample."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json
import math
import os
from absl import app
from absl import flags
from absl import logging
import scipy.io as sio
from tcc.dataset_preparation.dataset_utils import label_timestamps
from tcc.dataset_preparation.dataset_utils import merge_annotations
from tcc.dataset_preparation.dataset_utils import write_seqs_to_tfrecords
import cv2
flags.DEFINE_string('dir', None, 'Path to videos.')
flags.DEFINE_string('name', None, 'Name of the dataset being created. This will'
'be used as a prefix.')
flags.DEFINE_string('vid_list', None, 'Path to list of folders with frames of '
'videos.')
flags.DEFINE_string('extension', 'jpg', 'Extension of images.')
flags.DEFINE_string(
'label_file', None, 'Provide a corresponding labels file'
'that stores per-frame or per-sequence labels.')
flags.DEFINE_string('output_dir', '/tmp/tfrecords/', 'Output directory where'
'tfrecords will be stored.')
flags.DEFINE_integer('vids_per_shard', 1, 'Number of videos to store in a'
'shard.')
flags.DEFINE_list(
'frame_labels', '', 'Comma separated list of descriptions '
'for labels given on a per frame basis. For example: '
'winding_up,early_cocking,acclerating,follow_through')
flags.DEFINE_integer('action_label', -1, 'Action label of all videos.')
flags.DEFINE_integer('expected_segments', -1, 'Expected number of segments.')
flags.DEFINE_boolean('rotate', False, 'Rotate videos by 90 degrees before'
'creating tfrecords')
flags.DEFINE_boolean('resize', True, 'Resize videos to a given size.')
flags.DEFINE_integer('width', 224, 'Width of frames in the TFRecord.')
flags.DEFINE_integer('height', 224, 'Height of frames in the TFRecord.')
flags.DEFINE_integer('fps', 30, 'Frames per second in video.')
flags.mark_flag_as_required('name')
flags.mark_flag_as_required('dir')
flags.mark_flag_as_required('vid_list')
FLAGS = flags.FLAGS
def preprocess(im, rotate, resize, width, height):
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
if resize:
im = cv2.resize(im, (width, height))
if rotate:
im = cv2.transpose(im)
im = cv2.flip(im, 1)
return im
def get_frames_in_folder(path, rotate, resize, width, height):
"""Returns all frames from a video in a given folder.
Args:
path: string, directory containing frames of a video.
rotate: Boolean, if True rotates an image by 90 degrees.
resize: Boolean, if True resizes images to given size.
width: Integer, Width of image.
height: Integer, Height of image.
Returns:
frames: list, list of frames in a video.
Raises:
ValueError: When provided directory doesn't exist.
"""
if not os.path.isdir(path):
raise ValueError('Provided path %s is not a directory' % path)
else:
im_list = sorted(glob.glob(os.path.join(path, '*.%s' % FLAGS.extension)))
frames = [preprocess(cv2.imread(im), rotate, resize, width, height)
for im in im_list]
return frames
def get_name(filename, videos_dir, penn_action=False):
"""Add label to name for Penn Action dataset."""
if penn_action:
labels_path = os.path.join(videos_dir, 'labels', '%s.mat' % filename)
annotation = sio.loadmat(labels_path)
label = annotation['action'][0]
return '{}_{}'.format(filename, label)
else:
return filename
def get_timestamps(frames, fps, offset=0.0):
"""Returns timestamps for frames in a video."""
return [offset + x/float(fps) for x in range(len(frames))]
def create_tfrecords(name, output_dir, videos_dir, vid_list, label_file,
frame_labels, fps, expected_segments):
"""Create TFRecords from videos in a given path.
Args:
name: string, name of the dataset being created.
output_dir: string, path to output directory.
videos_dir: string, path to input videos directory.
vid_list: string, path to file containing list of folders where frames
are stored.
label_file: string, JSON file that contains annotations.
frame_labels: list, list of string describing each class. Class label is
the index in list.
fps: integer, frames per second with which the images were extracted.
expected_segments: int, expected number of segments.
Raises:
ValueError: If invalid args are passed.
"""
if not os.path.exists(output_dir):
logging.info('Creating output directory: %s', output_dir)
os.makedirs(output_dir)
with open(vid_list, 'r') as f:
paths = sorted([os.path.join(videos_dir, x.strip()) for x in f.readlines()])
if label_file is not None:
with open(os.path.join(label_file)) as labels_file:
data = json.load(labels_file)
names_to_seqs = {}
num_shards = int(math.ceil(len(paths)/FLAGS.vids_per_shard))
len_num_shards = len(str(num_shards))
shard_id = 0
for i, path in enumerate(paths):
seq = {}
vid_name = get_name(os.path.basename(path), videos_dir)
frames = get_frames_in_folder(path, FLAGS.rotate, FLAGS.resize,
FLAGS.width, FLAGS.height)
seq['video'] = frames
if label_file is not None:
video_id = os.path.basename(path)
if video_id in data:
video_labels = data[video_id]
else:
raise ValueError('Video id %s not found in labels file.' % video_id)
merged_annotations = merge_annotations(video_labels,
expected_segments)
video_timestamps = get_timestamps(frames, fps)
seq['labels'] = label_timestamps(video_timestamps, merged_annotations)
names_to_seqs[vid_name] = seq
if (i + 1) % FLAGS.vids_per_shard == 0 or i == len(paths)-1:
output_filename = os.path.join(
output_dir,
'%s-%s-of-%s.tfrecord' % (name,
str(shard_id).zfill(len_num_shards),
str(num_shards).zfill(len_num_shards)))
write_seqs_to_tfrecords(output_filename, names_to_seqs,
FLAGS.action_label, frame_labels)
shard_id += 1
names_to_seqs = {}
def main(_):
create_tfrecords(FLAGS.name, FLAGS.output_dir, FLAGS.dir, FLAGS.vid_list,
FLAGS.label_file, FLAGS.frame_labels, FLAGS.fps,
FLAGS.expected_segments)
if __name__ == '__main__':
app.run(main)
| en | 0.820231 | # coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Convert list of videos to tfrecords based on SequenceExample. Returns all frames from a video in a given folder. Args: path: string, directory containing frames of a video. rotate: Boolean, if True rotates an image by 90 degrees. resize: Boolean, if True resizes images to given size. width: Integer, Width of image. height: Integer, Height of image. Returns: frames: list, list of frames in a video. Raises: ValueError: When provided directory doesn't exist. Add label to name for Penn Action dataset. Returns timestamps for frames in a video. Create TFRecords from videos in a given path. Args: name: string, name of the dataset being created. output_dir: string, path to output directory. videos_dir: string, path to input videos directory. vid_list: string, path to file containing list of folders where frames are stored. label_file: string, JSON file that contains annotations. frame_labels: list, list of string describing each class. Class label is the index in list. fps: integer, frames per second with which the images were extracted. expected_segments: int, expected number of segments. Raises: ValueError: If invalid args are passed. | 1.746619 | 2 |
users/migrations/0014_alter_address_country.py | MattiMatt8/ship-o-cereal | 1 | 6624734 | # Generated by Django 3.2 on 2021-05-10 10:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("users", "0013_auto_20210506_1329"),
]
operations = [
migrations.AlterField(
model_name="address",
name="country",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="users.country"
),
),
]
| # Generated by Django 3.2 on 2021-05-10 10:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("users", "0013_auto_20210506_1329"),
]
operations = [
migrations.AlterField(
model_name="address",
name="country",
field=models.ForeignKey(
on_delete=django.db.models.deletion.DO_NOTHING, to="users.country"
),
),
]
| en | 0.834942 | # Generated by Django 3.2 on 2021-05-10 10:53 | 1.466676 | 1 |
tidy.py | ghoost82/sf30ac-extractor | 35 | 6624735 | <filename>tidy.py
#!/usr/bin/env python3
import os
import sys
if len(sys.argv) != 2:
print("Usage: python tidy.py \"C:\\...your extraction folder...\"")
exit(1)
root_dir = sys.argv[1]
if not os.path.exists(root_dir):
print("Cant find the extraction folder, are you sure you're using this correctly? Read the README.")
exit(2)
main_path = os.path.join(root_dir, "Main")
second_impact_path = os.path.join(root_dir, "StreetFighterIII_2ndImpact")
third_strike_path = os.path.join(root_dir, "StreetFighterIII_3rdStrike")
main_files = os.listdir(main_path)
third_strike_files = os.listdir(third_strike_path)
# Move Second Impact music from Third Strike dir to Second Impact dir.
for filename in third_strike_files:
if filename.startswith("SF3SI") and filename.endswith(".ogg"):
print(filename)
old = os.path.join(third_strike_path, filename)
new = os.path.join(second_impact_path, filename)
os.rename(old, new)
# Move Third Strike music from Main dir to Third Strike dir.
for filename in main_files:
if filename.startswith("SF3TS") and filename.endswith(".ogg"):
print(filename)
old = os.path.join(main_path, filename)
new = os.path.join(third_strike_path, filename)
os.rename(old, new)
| <filename>tidy.py
#!/usr/bin/env python3
import os
import sys
if len(sys.argv) != 2:
print("Usage: python tidy.py \"C:\\...your extraction folder...\"")
exit(1)
root_dir = sys.argv[1]
if not os.path.exists(root_dir):
print("Cant find the extraction folder, are you sure you're using this correctly? Read the README.")
exit(2)
main_path = os.path.join(root_dir, "Main")
second_impact_path = os.path.join(root_dir, "StreetFighterIII_2ndImpact")
third_strike_path = os.path.join(root_dir, "StreetFighterIII_3rdStrike")
main_files = os.listdir(main_path)
third_strike_files = os.listdir(third_strike_path)
# Move Second Impact music from Third Strike dir to Second Impact dir.
for filename in third_strike_files:
if filename.startswith("SF3SI") and filename.endswith(".ogg"):
print(filename)
old = os.path.join(third_strike_path, filename)
new = os.path.join(second_impact_path, filename)
os.rename(old, new)
# Move Third Strike music from Main dir to Third Strike dir.
for filename in main_files:
if filename.startswith("SF3TS") and filename.endswith(".ogg"):
print(filename)
old = os.path.join(main_path, filename)
new = os.path.join(third_strike_path, filename)
os.rename(old, new)
| en | 0.776831 | #!/usr/bin/env python3 # Move Second Impact music from Third Strike dir to Second Impact dir. # Move Third Strike music from Main dir to Third Strike dir. | 2.911756 | 3 |
LassoVariants/EnumLasso/paper/paper_synthetic_alpha.py | carushi/Catactor | 0 | 6624736 | # -*- coding: utf-8 -*-
"""
@author: satohara
"""
import sys
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
from EnumerateLinearModel import EnumLasso
# setting - data
dim = 10
L = 5
num = 100
eps = 0.1
alpha = np.logspace(-3, 0, 11)
# setting - EnumLasso
rho = 0.01
ratio = 3
maxitr = 10000
tol = 1e-10
delta = 0
# test
ss = []
tt = []
seed = 0
count = 100
for c in range(count):
print('seed = %2d' % (seed+c,))
# test
s = []
t = []
for a in alpha:
# data
np.random.seed(seed+c)
V = np.random.randn(dim, L)
A = V.dot(V.T)
A /= np.linalg.norm(A) / dim
B = (1 - a) * A + a * np.identity(dim)
x = np.random.randn(num, dim).dot(B)
y = x[:, 0] + x[:, 1] + eps * np.random.randn(num)
# EnumLasso
mdl = EnumLasso(rho=rho, r=ratio, maxitr=maxitr, tol=tol, delta=delta)
mdl.fit(x, y)
K = len(mdl.obj_)
obj = []
sord = np.nan
stype = np.nan
for i in range(K):
nonzeros = np.where(np.abs(mdl.a_[i]) > 0)[0]
obj.append(mdl.obj_[i])
if np.isnan(sord):
if set(nonzeros) <= set([0, 1]):
sord = i+1
if set(s) == set([0, 1]):
stype = 1
else:
stype = 0
s.append(sord)
t.append(stype)
ss.append(s)
tt.append(t)
# print
ss = np.array(ss)
mm = np.median(ss, axis=0)
pp1 = np.percentile(ss, 25, axis=0)
pp2 = np.percentile(ss, 75, axis=0)
ax = plt.subplot(111)
ax.set_xscale('log', nonposx='clip')
plt.errorbar(alpha, mm, yerr=[mm-pp1, pp2-mm])
plt.xlabel('log10(alpha)')
plt.ylabel('K')
plt.show()
plt.savefig('./rho%03d_seed%03d-%03d.pdf' % (int(100 * rho), seed, seed+count), format="pdf", bbox_inches="tight")
plt.close()
np.savetxt('./rho%03d_seed%03d-%03d.txt' % (int(100 * rho), seed, seed+count), np.c_[alpha, mm, pp1, pp2], fmt='%f', delimiter=',')
| # -*- coding: utf-8 -*-
"""
@author: satohara
"""
import sys
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
from EnumerateLinearModel import EnumLasso
# setting - data
dim = 10
L = 5
num = 100
eps = 0.1
alpha = np.logspace(-3, 0, 11)
# setting - EnumLasso
rho = 0.01
ratio = 3
maxitr = 10000
tol = 1e-10
delta = 0
# test
ss = []
tt = []
seed = 0
count = 100
for c in range(count):
print('seed = %2d' % (seed+c,))
# test
s = []
t = []
for a in alpha:
# data
np.random.seed(seed+c)
V = np.random.randn(dim, L)
A = V.dot(V.T)
A /= np.linalg.norm(A) / dim
B = (1 - a) * A + a * np.identity(dim)
x = np.random.randn(num, dim).dot(B)
y = x[:, 0] + x[:, 1] + eps * np.random.randn(num)
# EnumLasso
mdl = EnumLasso(rho=rho, r=ratio, maxitr=maxitr, tol=tol, delta=delta)
mdl.fit(x, y)
K = len(mdl.obj_)
obj = []
sord = np.nan
stype = np.nan
for i in range(K):
nonzeros = np.where(np.abs(mdl.a_[i]) > 0)[0]
obj.append(mdl.obj_[i])
if np.isnan(sord):
if set(nonzeros) <= set([0, 1]):
sord = i+1
if set(s) == set([0, 1]):
stype = 1
else:
stype = 0
s.append(sord)
t.append(stype)
ss.append(s)
tt.append(t)
# print
ss = np.array(ss)
mm = np.median(ss, axis=0)
pp1 = np.percentile(ss, 25, axis=0)
pp2 = np.percentile(ss, 75, axis=0)
ax = plt.subplot(111)
ax.set_xscale('log', nonposx='clip')
plt.errorbar(alpha, mm, yerr=[mm-pp1, pp2-mm])
plt.xlabel('log10(alpha)')
plt.ylabel('K')
plt.show()
plt.savefig('./rho%03d_seed%03d-%03d.pdf' % (int(100 * rho), seed, seed+count), format="pdf", bbox_inches="tight")
plt.close()
np.savetxt('./rho%03d_seed%03d-%03d.txt' % (int(100 * rho), seed, seed+count), np.c_[alpha, mm, pp1, pp2], fmt='%f', delimiter=',')
| en | 0.479719 | # -*- coding: utf-8 -*- @author: satohara # setting - data # setting - EnumLasso # test # test # data # EnumLasso # print | 2.293243 | 2 |
tests/ui_interaction/main_menu.py | ow-gryphon/gryphon | 0 | 6624737 | from gryphon.wizard.wizard_text import Text
from .basic_actions import wait_for_output, select_nth_option
NEXT_MENU = "Navigate the categories"
USEFUL_LINKS = "Useful links"
# ON main menu
# AND starting on the first option
def select_init_on_main_menu(process):
select_nth_option(process, n=1)
wait_for_output(process, Text.init_prompt_template_question)
def select_generate_on_main_menu(process):
select_nth_option(process, n=2)
wait_for_output(process, Text.add_prompt_categories_question)
print("selected generate")
def select_add_on_main_menu(process):
select_nth_option(process, n=3)
wait_for_output(process, NEXT_MENU)
def select_advanced_on_main_menu(process):
select_nth_option(process, n=4)
wait_for_output(process, NEXT_MENU)
def select_about_on_main_menu(process):
select_nth_option(process, n=5)
wait_for_output(process, USEFUL_LINKS)
def select_exit_on_main_menu(process):
select_nth_option(process, n=6)
| from gryphon.wizard.wizard_text import Text
from .basic_actions import wait_for_output, select_nth_option
NEXT_MENU = "Navigate the categories"
USEFUL_LINKS = "Useful links"
# ON main menu
# AND starting on the first option
def select_init_on_main_menu(process):
select_nth_option(process, n=1)
wait_for_output(process, Text.init_prompt_template_question)
def select_generate_on_main_menu(process):
select_nth_option(process, n=2)
wait_for_output(process, Text.add_prompt_categories_question)
print("selected generate")
def select_add_on_main_menu(process):
select_nth_option(process, n=3)
wait_for_output(process, NEXT_MENU)
def select_advanced_on_main_menu(process):
select_nth_option(process, n=4)
wait_for_output(process, NEXT_MENU)
def select_about_on_main_menu(process):
select_nth_option(process, n=5)
wait_for_output(process, USEFUL_LINKS)
def select_exit_on_main_menu(process):
select_nth_option(process, n=6)
| en | 0.752372 | # ON main menu # AND starting on the first option | 2.701705 | 3 |
research-work/src/synth_runner.py | egbuch/super-urop | 0 | 6624738 | ######
#
# Music system playback is possible thanks to:
# Widget-based Synthesizer Logic thanks to <NAME>
# Music21 Python Module via <NAME>
#
######
import sys
sys.path.append('..')
from common.core import *
from common.audio import *
from common.synth import *
from common.gfxutil import *
from common.clock import *
from common.metro import *
import music21 as m21
import analyzer
import transformer
import looper
import av_grid
import concurrent.futures as fut
import time
STRING_PATCH = 48
BRASS_PATCH = 61
## CC CHANNELS ##
VIBRATO_CC = 1
VOLUME_CC = 7
PAN_CC = 10
EXPRESSION_CC = 11
SUSTAIN_CC = 64
REVERB_CC = 91
CHORUS_CC = 93
class MainWidget(BaseWidget) :
def __init__(self):
super(MainWidget, self).__init__()
self.audio = Audio(2) # set up audio
self.song_path = '../scores/mario-song.musicxml' # set song path
# create TempoMap, AudioScheduler
self.tempo = 120 #TODO: grab tempo from file
self.tempo_map = SimpleTempoMap(self.tempo)
self.sched = AudioScheduler(self.tempo_map)
# Add a looper
self.looper = looper.SongLooper(self.song_path, self.tempo)
self.looper.initialize()
# Set up FluidSynth
self.synth = Synth('./synth_data/FluidR3_GM.sf2')
self.note_velocity = 127
# set up a midi channel for each part
for i in range(len(self.looper.parts)):
base_channel = 2*i
switch_channel = 2*i + 1
self.synth.program(base_channel, 0, 0)
self.synth.program(switch_channel, 0, 0)
# set the reverb
self.synth.cc(base_channel, REVERB_CC, 127)
self.synth.cc(switch_channel, REVERB_CC, 127)
# set the EXPRESSION_CC
self.synth.cc(base_channel, EXPRESSION_CC, 100)
self.synth.cc(base_channel, EXPRESSION_CC, 100)
# connect scheduler into audio system
self.audio.set_generator(self.sched)
self.sched.set_generator(self.synth)
# and text to display our status
self.label = topleft_label()
self.add_widget(self.label)
# as the loop continues, these values will be updated to the current transformation
key_info = self.looper.initial_key.split(" ")
self.note_letter = key_info[0][0]
self.accidental_letter = key_info[0][1] if len(key_info[0]) == 2 else ''
self.mode = key_info[1]
self.current_rhythm = 'ORIGINAL'
# concurrent processing of transformations
self.executor = fut.ThreadPoolExecutor(max_workers=4)
def on_cmd(self,tick, pitch, channel, velocity):
self.synth.noteon(channel, pitch, velocity)
def off_cmd(self,tick, pitch, channel):
self.synth.noteoff(channel, pitch)
def measure_update(self, now_beat, now_tick):
# next step in the loop
self.looper.step(now_beat + 1)
# schedule each element that appears within the measure
for i in range(len(self.looper.current_measure_in_parts)):
part = self.looper.current_measure_in_parts[i]
for j in range(len(part)):
#retrieve the specific element in the measure
element = part[j]
dur = element.element.duration.quarterLength
# ge millisecond timestamps that the element will be scheduled on
on_tick = now_tick + (element.beatOffset + 1)*kTicksPerQuarter
off_tick = on_tick + kTicksPerQuarter*dur
# if the element is a note
if element.is_note():
pitch = element.element.pitch.midi
# schedule note on and off
self.sched.post_at_tick(on_tick, self.on_cmd, pitch, 2*i, self.note_velocity)
self.sched.post_at_tick(off_tick, self.off_cmd, pitch, 2*i)
# switch channel should mirror silently
self.sched.post_at_tick(on_tick, self.on_cmd, pitch, 2*i + 1, self.note_velocity)
self.sched.post_at_tick(off_tick, self.off_cmd, pitch, 2*i + 1)
# else if the element is a chord
elif element.is_chord():
pitches = [pitch.midi for pitch in list(element.element.pitches)]
# schedule off and on events for each pitch in the chord
for pitch in pitches:
self.sched.post_at_tick(on_tick, self.on_cmd, pitch, 2*i, self.note_velocity)
self.sched.post_at_tick(off_tick, self.off_cmd, pitch, 2*i)
# swtich channel should mirror silently
self.sched.post_at_tick(on_tick, self.on_cmd, pitch, 2*i + 1, self.note_velocity)
self.sched.post_at_tick(off_tick, self.off_cmd, pitch, 2*i + 1)
def on_update(self):
self.audio.on_update()
# current time
now_beat = self.sched.get_current_beat()
now_tick = self.sched.get_tick()
#time of last measure
previous_beat = self.looper.get_last_measure_beat()
# take the difference, and see if it falls within the buffer-zone
diff = now_beat - previous_beat
mb = 3
if (diff >= mb):
# self.executor.submit(self.measure_update, now_beat, now_tick)
self.measure_update(now_beat, now_tick)
self.label.text = "Synthesizer and accompanying code via <NAME> (21M.385)" + '\n\n'
self.label.text += self.sched.now_str() + '\n'
self.label.text += 'key = ' + self.note_letter + self.accidental_letter + ' ' + self.mode + '\n'
self.label.text += 'tempo = ' + str(self.tempo) + '\n'
class TransformationWidget(MainWidget):
def __init__(self):
super(TransformationWidget, self).__init__()
# volume/dynamic control
self.default_volume = 88
self.volume_delta = 4
self.current_volume = self.default_volume
# tempo control
self.tempo_delta = 8.0
# keep track of key and rhythms
self.key_changing = False
self.rhythm_changing = False
self.checking_transformation_done = False
self.last_key_change_beat = 0
#### TEMPO ###
def tempoChanged(self):
cur_time = self.tempo_map.tick_to_time(self.sched.get_tick())
self.tempo_map.set_tempo(self.tempo, cur_time)
self.looper.set_tempo(self.tempo)
def tempoUp(self):
self.tempo += 8
self.tempoChanged()
def tempoDown(self):
self.tempo -= 8
self.tempoChanged()
def setTempo(self, tempo):
self.tempo = tempo
self.tempoChanged()
#### Key and Mode ####
def keyChanged(self, rhythm = None):
new_key = self.note_letter + self.accidental_letter + ' ' + self.mode
if new_key != self.looper.current_key:
# # submit the actual transformation task to the executor
self.executor.submit(self.looper.transform, None, new_key, rhythm)
def rhythmChanged(self):
# submit the actual transformation task to the executor
self.executor.submit(self.looper.transform, None, None, self.current_rhythm)
def checkKeyChange(self, note, accidental, mode):
# if this results in a key change, then calculate the new transformation
same_note = (self.note_letter == note)
same_accidental = (self.accidental_letter == accidental)
same_mode = (self.mode == mode)
if not (same_note and same_accidental and same_mode):
# if (self.last_key_change_beat == 0) or (self.sched.get_current_beat() - self.last_key_change_beat > 20) or not same_mode:
if not same_mode:
self.note_letter = note
self.accidental_letter = accidental
self.mode = mode
self.key_changing = True
self.last_key_change_beat = self.sched.get_current_beat()
def checkRhythmChange(self, rhythm):
if self.current_rhythm != rhythm:
self.current_rhythm = rhythm
self.rhythm_changing = True
### Instrument ###
def switchInstruments(self, patches):
# if not enough instruments from this point, fill with string and brass
count = 0
while len(patches) < len(self.looper.parts):
if count % 2 == 0:
patches.append(STRING_PATCH)
else:
patches.append(BRASS_PATCH)
count += 1
# apply instrument patches to synth base channels, and play switch channels louder
for i in range(len(self.looper.parts)):
# switch instruments base channels and make them quiet
self.synth.program(2*i, 0, patches[i])
self.setChannelVolume(2*i, 0)
# play sound from switch CHANNELS
self.setChannelVolume(2*i + 1, self.current_volume)
# create the *linear* volume arc (list of values to iteratively set channels to for crescendo/decrescendo effect)
volume_arc = list(range(0, self.current_volume, 5)) + [self.current_volume]
# travel over arc
for val in volume_arc:
for i in range(len(self.looper.parts)):
self.setChannelVolume(2*i, val)
self.setChannelVolume(2*i + 1, self.current_volume - val)
time.sleep(0.10)
# finally, switch instruments in the switch channels to current instruments_multi
for i in range(len(self.looper.parts)):
# switch instruments base channels and make them quiet
self.synth.program(2*i + 1, 0, patches[i])
def setVolume(self):
for i in range(len(self.looper.parts)):
self.synth.cc(i, VOLUME_CC, self.current_volume)
def setChannelVolume(self, i, value):
self.synth.cc(i, VOLUME_CC, value)
def on_update(self):
if self.checking_transformation_done:
if self.key_changing and not self.rhythm_changing:
self.keyChanged()
self.key_changing = False
elif self.rhythm_changing and not self.key_changing:
self.rhythmChanged()
self.rhythm_changing = False
elif self.key_changing and self.rhythm_changing:
self.keyChanged(self.current_rhythm)
self.key_changing = False
self.rhythm_changing = False
self.checking_transformation_done = False
super(TransformationWidget, self).on_update()
class KeyboardWidget(TransformationWidget):
"""
Control the music transformer via various keyboard inputs.
"""
def __init__(self):
super(KeyboardWidget, self).__init__()
# Rhythm editting mechanism
self.held_r = False # Keep track of whether R is being held down
self.r_log = [] # Log of all numbers pressed
self.rhythm = [] # Rhythm recorded
# instrument edditing mechanism
self.held_s = False
self.s_log = []
#parts control
self.num_parts = len(self.looper.parts)
self.current_part_index = 0
def on_key_down(self, keycode, modifiers):
note = self.note_letter
accidental = self.accidental_letter
mode = self.mode
if keycode[1] in 'abcdefg':
note = keycode[1]
elif keycode[1] in '123456789':
if self.held_r:
self.r_log.append(int(keycode[1]))
elif self.held_s:
self.s_log.append(keycode[1])
elif keycode[1] == 'r':
self.held_r = True
self.r_log = []
elif keycode[1] == 's':
self.held_s = True
self.s_log = []
elif keycode[1] == 'i':
accidental = '#'
elif keycode[1] == 'p':
accidental = '-'
elif keycode[1] == 'o':
accidental = ''
elif keycode[1] == '-':
mode = 'major'
elif keycode[1] == '=':
mode = 'minor'
elif keycode[1] == 'right':
self.tempoUp()
elif keycode[1] == 'left':
self.tempo -= 8
self.tempoChanged()
elif keycode[1] == 'up':
self.current_part_index = (self.current_part_index + 1) % self.num_parts
self.r_log = []
self.rhythm = []
elif keycode[1] == 'down':
self.current_part_index = (self.current_part_index - 1) % self.num_parts
self.r_log = []
self.rhythm = []
self.checkKeyChange(note, accidental, mode)
def on_key_up(self, keycode):
if keycode[1] == 'r':
self.held_r = False
if len(self.r_log) >= 4:
self.rhythm = self.r_log[-4:]
self.executor.submit(self.looper.transform, [self.current_part_index], None, self.rhythm)
elif keycode[1] == 's':
self.held_s = False
if len(self.s_log) == 1:
self.synth.program(self.current_part_index, 0, int(self.s_log[0]))
elif len(self.s_log) >= 2:
self.synth.program(self.current_part_index, 0, int("".join(self.s_log[-2:])))
def on_update(self):
self.label.text += 'rhythm = ' + str(self.r_log[-4:]) + '\n'
self.label.text += 'patch = ' + "".join(self.s_log[-2:]) + '\n'
self.label.text += 'selected part = ' + str(self.current_part_index + 1) + '\n'
super(KeyboardWidget, self).on_update()
class ArousalValenceWidget(TransformationWidget):
"""
Control the music transformer via tuples of Arousal and Valence values that correspond
to different values of musical attributes (Rhythm, Tempo, Instrument, etc).
"""
def __init__(self):
super(ArousalValenceWidget, self).__init__()
self.arousal = 0
self.valence = 0
self.file = open('./data/av.txt', 'r')
self.tempo_grid = av_grid.TempoGrid()
self.tempo_grid.parse_point_file('./av-grid-points/tempo-mario.txt')
self.rhythm_grid = av_grid.RhythmGrid()
self.rhythm_grid.parse_point_file('./av-grid-points/rhythm-mario.txt')
self.instrument_grid = av_grid.InstrumentGrid()
self.instrument_grid.parse_point_file('./av-grid-points/instruments_multi-mario.txt')
self.key_grid = av_grid.KeySignatureGrid()
self.key_grid.parse_point_file('./av-grid-points/key-mario.txt')
def transform_arousal_valence(self, arousal, valence):
self.checking_transformation_done = False
# print(arousal)
# print(valence)
try:
self.change_note_velocity(arousal)
except Exception as e:
pass
try:
# tempo
tempo_point, _ = self.tempo_grid.sample_parameter_point(arousal, valence)
self.setTempo(tempo_point.get_value())
except Exception as e:
pass
try:
# rhythm
rhythm_point, _ = self.rhythm_grid.sample_parameter_point(arousal, valence)
self.checkRhythmChange(list(rhythm_point.get_value()))
except Exception as e:
pass
try:
# instrument
instrument_point, _ = self.instrument_grid.sample_parameter_point(arousal, valence)
self.executor.submit(self.switchInstruments, list(instrument_point.get_value()))
except Exception as e:
print("couldn't switch instruments")
try:
# key
key_point, _ = self.key_grid.sample_parameter_point(arousal, valence)
key_tuple = key_point.get_value()
self.checkKeyChange(key_tuple[0], key_tuple[1], key_tuple[2])
except Exception as e:
pass
self.checking_transformation_done = True
def change_note_velocity(self, arousal):
max_velocity = 127
arousal += 1.0
arousal /= 2.0
velocity = max_velocity * arousal
self.note_velocity = max(45, int(velocity))
def on_update(self):
where = self.file.tell()
line = self.file.readline()
if not line:
self.file.seek(where)
else:
values = line.split(' ')
self.arousal = float(values[0])
self.valence = float(values[1])
self.executor.submit(self.transform_arousal_valence, self.arousal, self.valence)
super(ArousalValenceWidget, self).on_update()
run(eval('ArousalValenceWidget'))
| ######
#
# Music system playback is possible thanks to:
# Widget-based Synthesizer Logic thanks to <NAME>
# Music21 Python Module via <NAME>
#
######
import sys
sys.path.append('..')
from common.core import *
from common.audio import *
from common.synth import *
from common.gfxutil import *
from common.clock import *
from common.metro import *
import music21 as m21
import analyzer
import transformer
import looper
import av_grid
import concurrent.futures as fut
import time
STRING_PATCH = 48
BRASS_PATCH = 61
## CC CHANNELS ##
VIBRATO_CC = 1
VOLUME_CC = 7
PAN_CC = 10
EXPRESSION_CC = 11
SUSTAIN_CC = 64
REVERB_CC = 91
CHORUS_CC = 93
class MainWidget(BaseWidget) :
def __init__(self):
super(MainWidget, self).__init__()
self.audio = Audio(2) # set up audio
self.song_path = '../scores/mario-song.musicxml' # set song path
# create TempoMap, AudioScheduler
self.tempo = 120 #TODO: grab tempo from file
self.tempo_map = SimpleTempoMap(self.tempo)
self.sched = AudioScheduler(self.tempo_map)
# Add a looper
self.looper = looper.SongLooper(self.song_path, self.tempo)
self.looper.initialize()
# Set up FluidSynth
self.synth = Synth('./synth_data/FluidR3_GM.sf2')
self.note_velocity = 127
# set up a midi channel for each part
for i in range(len(self.looper.parts)):
base_channel = 2*i
switch_channel = 2*i + 1
self.synth.program(base_channel, 0, 0)
self.synth.program(switch_channel, 0, 0)
# set the reverb
self.synth.cc(base_channel, REVERB_CC, 127)
self.synth.cc(switch_channel, REVERB_CC, 127)
# set the EXPRESSION_CC
self.synth.cc(base_channel, EXPRESSION_CC, 100)
self.synth.cc(base_channel, EXPRESSION_CC, 100)
# connect scheduler into audio system
self.audio.set_generator(self.sched)
self.sched.set_generator(self.synth)
# and text to display our status
self.label = topleft_label()
self.add_widget(self.label)
# as the loop continues, these values will be updated to the current transformation
key_info = self.looper.initial_key.split(" ")
self.note_letter = key_info[0][0]
self.accidental_letter = key_info[0][1] if len(key_info[0]) == 2 else ''
self.mode = key_info[1]
self.current_rhythm = 'ORIGINAL'
# concurrent processing of transformations
self.executor = fut.ThreadPoolExecutor(max_workers=4)
def on_cmd(self,tick, pitch, channel, velocity):
self.synth.noteon(channel, pitch, velocity)
def off_cmd(self,tick, pitch, channel):
self.synth.noteoff(channel, pitch)
def measure_update(self, now_beat, now_tick):
# next step in the loop
self.looper.step(now_beat + 1)
# schedule each element that appears within the measure
for i in range(len(self.looper.current_measure_in_parts)):
part = self.looper.current_measure_in_parts[i]
for j in range(len(part)):
#retrieve the specific element in the measure
element = part[j]
dur = element.element.duration.quarterLength
# ge millisecond timestamps that the element will be scheduled on
on_tick = now_tick + (element.beatOffset + 1)*kTicksPerQuarter
off_tick = on_tick + kTicksPerQuarter*dur
# if the element is a note
if element.is_note():
pitch = element.element.pitch.midi
# schedule note on and off
self.sched.post_at_tick(on_tick, self.on_cmd, pitch, 2*i, self.note_velocity)
self.sched.post_at_tick(off_tick, self.off_cmd, pitch, 2*i)
# switch channel should mirror silently
self.sched.post_at_tick(on_tick, self.on_cmd, pitch, 2*i + 1, self.note_velocity)
self.sched.post_at_tick(off_tick, self.off_cmd, pitch, 2*i + 1)
# else if the element is a chord
elif element.is_chord():
pitches = [pitch.midi for pitch in list(element.element.pitches)]
# schedule off and on events for each pitch in the chord
for pitch in pitches:
self.sched.post_at_tick(on_tick, self.on_cmd, pitch, 2*i, self.note_velocity)
self.sched.post_at_tick(off_tick, self.off_cmd, pitch, 2*i)
# swtich channel should mirror silently
self.sched.post_at_tick(on_tick, self.on_cmd, pitch, 2*i + 1, self.note_velocity)
self.sched.post_at_tick(off_tick, self.off_cmd, pitch, 2*i + 1)
def on_update(self):
self.audio.on_update()
# current time
now_beat = self.sched.get_current_beat()
now_tick = self.sched.get_tick()
#time of last measure
previous_beat = self.looper.get_last_measure_beat()
# take the difference, and see if it falls within the buffer-zone
diff = now_beat - previous_beat
mb = 3
if (diff >= mb):
# self.executor.submit(self.measure_update, now_beat, now_tick)
self.measure_update(now_beat, now_tick)
self.label.text = "Synthesizer and accompanying code via <NAME> (21M.385)" + '\n\n'
self.label.text += self.sched.now_str() + '\n'
self.label.text += 'key = ' + self.note_letter + self.accidental_letter + ' ' + self.mode + '\n'
self.label.text += 'tempo = ' + str(self.tempo) + '\n'
class TransformationWidget(MainWidget):
def __init__(self):
super(TransformationWidget, self).__init__()
# volume/dynamic control
self.default_volume = 88
self.volume_delta = 4
self.current_volume = self.default_volume
# tempo control
self.tempo_delta = 8.0
# keep track of key and rhythms
self.key_changing = False
self.rhythm_changing = False
self.checking_transformation_done = False
self.last_key_change_beat = 0
#### TEMPO ###
def tempoChanged(self):
cur_time = self.tempo_map.tick_to_time(self.sched.get_tick())
self.tempo_map.set_tempo(self.tempo, cur_time)
self.looper.set_tempo(self.tempo)
def tempoUp(self):
self.tempo += 8
self.tempoChanged()
def tempoDown(self):
self.tempo -= 8
self.tempoChanged()
def setTempo(self, tempo):
self.tempo = tempo
self.tempoChanged()
#### Key and Mode ####
def keyChanged(self, rhythm = None):
new_key = self.note_letter + self.accidental_letter + ' ' + self.mode
if new_key != self.looper.current_key:
# # submit the actual transformation task to the executor
self.executor.submit(self.looper.transform, None, new_key, rhythm)
def rhythmChanged(self):
# submit the actual transformation task to the executor
self.executor.submit(self.looper.transform, None, None, self.current_rhythm)
def checkKeyChange(self, note, accidental, mode):
# if this results in a key change, then calculate the new transformation
same_note = (self.note_letter == note)
same_accidental = (self.accidental_letter == accidental)
same_mode = (self.mode == mode)
if not (same_note and same_accidental and same_mode):
# if (self.last_key_change_beat == 0) or (self.sched.get_current_beat() - self.last_key_change_beat > 20) or not same_mode:
if not same_mode:
self.note_letter = note
self.accidental_letter = accidental
self.mode = mode
self.key_changing = True
self.last_key_change_beat = self.sched.get_current_beat()
def checkRhythmChange(self, rhythm):
if self.current_rhythm != rhythm:
self.current_rhythm = rhythm
self.rhythm_changing = True
### Instrument ###
def switchInstruments(self, patches):
# if not enough instruments from this point, fill with string and brass
count = 0
while len(patches) < len(self.looper.parts):
if count % 2 == 0:
patches.append(STRING_PATCH)
else:
patches.append(BRASS_PATCH)
count += 1
# apply instrument patches to synth base channels, and play switch channels louder
for i in range(len(self.looper.parts)):
# switch instruments base channels and make them quiet
self.synth.program(2*i, 0, patches[i])
self.setChannelVolume(2*i, 0)
# play sound from switch CHANNELS
self.setChannelVolume(2*i + 1, self.current_volume)
# create the *linear* volume arc (list of values to iteratively set channels to for crescendo/decrescendo effect)
volume_arc = list(range(0, self.current_volume, 5)) + [self.current_volume]
# travel over arc
for val in volume_arc:
for i in range(len(self.looper.parts)):
self.setChannelVolume(2*i, val)
self.setChannelVolume(2*i + 1, self.current_volume - val)
time.sleep(0.10)
# finally, switch instruments in the switch channels to current instruments_multi
for i in range(len(self.looper.parts)):
# switch instruments base channels and make them quiet
self.synth.program(2*i + 1, 0, patches[i])
def setVolume(self):
for i in range(len(self.looper.parts)):
self.synth.cc(i, VOLUME_CC, self.current_volume)
def setChannelVolume(self, i, value):
self.synth.cc(i, VOLUME_CC, value)
def on_update(self):
if self.checking_transformation_done:
if self.key_changing and not self.rhythm_changing:
self.keyChanged()
self.key_changing = False
elif self.rhythm_changing and not self.key_changing:
self.rhythmChanged()
self.rhythm_changing = False
elif self.key_changing and self.rhythm_changing:
self.keyChanged(self.current_rhythm)
self.key_changing = False
self.rhythm_changing = False
self.checking_transformation_done = False
super(TransformationWidget, self).on_update()
class KeyboardWidget(TransformationWidget):
"""
Control the music transformer via various keyboard inputs.
"""
def __init__(self):
super(KeyboardWidget, self).__init__()
# Rhythm editting mechanism
self.held_r = False # Keep track of whether R is being held down
self.r_log = [] # Log of all numbers pressed
self.rhythm = [] # Rhythm recorded
# instrument edditing mechanism
self.held_s = False
self.s_log = []
#parts control
self.num_parts = len(self.looper.parts)
self.current_part_index = 0
def on_key_down(self, keycode, modifiers):
note = self.note_letter
accidental = self.accidental_letter
mode = self.mode
if keycode[1] in 'abcdefg':
note = keycode[1]
elif keycode[1] in '123456789':
if self.held_r:
self.r_log.append(int(keycode[1]))
elif self.held_s:
self.s_log.append(keycode[1])
elif keycode[1] == 'r':
self.held_r = True
self.r_log = []
elif keycode[1] == 's':
self.held_s = True
self.s_log = []
elif keycode[1] == 'i':
accidental = '#'
elif keycode[1] == 'p':
accidental = '-'
elif keycode[1] == 'o':
accidental = ''
elif keycode[1] == '-':
mode = 'major'
elif keycode[1] == '=':
mode = 'minor'
elif keycode[1] == 'right':
self.tempoUp()
elif keycode[1] == 'left':
self.tempo -= 8
self.tempoChanged()
elif keycode[1] == 'up':
self.current_part_index = (self.current_part_index + 1) % self.num_parts
self.r_log = []
self.rhythm = []
elif keycode[1] == 'down':
self.current_part_index = (self.current_part_index - 1) % self.num_parts
self.r_log = []
self.rhythm = []
self.checkKeyChange(note, accidental, mode)
def on_key_up(self, keycode):
if keycode[1] == 'r':
self.held_r = False
if len(self.r_log) >= 4:
self.rhythm = self.r_log[-4:]
self.executor.submit(self.looper.transform, [self.current_part_index], None, self.rhythm)
elif keycode[1] == 's':
self.held_s = False
if len(self.s_log) == 1:
self.synth.program(self.current_part_index, 0, int(self.s_log[0]))
elif len(self.s_log) >= 2:
self.synth.program(self.current_part_index, 0, int("".join(self.s_log[-2:])))
def on_update(self):
self.label.text += 'rhythm = ' + str(self.r_log[-4:]) + '\n'
self.label.text += 'patch = ' + "".join(self.s_log[-2:]) + '\n'
self.label.text += 'selected part = ' + str(self.current_part_index + 1) + '\n'
super(KeyboardWidget, self).on_update()
class ArousalValenceWidget(TransformationWidget):
"""
Control the music transformer via tuples of Arousal and Valence values that correspond
to different values of musical attributes (Rhythm, Tempo, Instrument, etc).
"""
def __init__(self):
super(ArousalValenceWidget, self).__init__()
self.arousal = 0
self.valence = 0
self.file = open('./data/av.txt', 'r')
self.tempo_grid = av_grid.TempoGrid()
self.tempo_grid.parse_point_file('./av-grid-points/tempo-mario.txt')
self.rhythm_grid = av_grid.RhythmGrid()
self.rhythm_grid.parse_point_file('./av-grid-points/rhythm-mario.txt')
self.instrument_grid = av_grid.InstrumentGrid()
self.instrument_grid.parse_point_file('./av-grid-points/instruments_multi-mario.txt')
self.key_grid = av_grid.KeySignatureGrid()
self.key_grid.parse_point_file('./av-grid-points/key-mario.txt')
def transform_arousal_valence(self, arousal, valence):
self.checking_transformation_done = False
# print(arousal)
# print(valence)
try:
self.change_note_velocity(arousal)
except Exception as e:
pass
try:
# tempo
tempo_point, _ = self.tempo_grid.sample_parameter_point(arousal, valence)
self.setTempo(tempo_point.get_value())
except Exception as e:
pass
try:
# rhythm
rhythm_point, _ = self.rhythm_grid.sample_parameter_point(arousal, valence)
self.checkRhythmChange(list(rhythm_point.get_value()))
except Exception as e:
pass
try:
# instrument
instrument_point, _ = self.instrument_grid.sample_parameter_point(arousal, valence)
self.executor.submit(self.switchInstruments, list(instrument_point.get_value()))
except Exception as e:
print("couldn't switch instruments")
try:
# key
key_point, _ = self.key_grid.sample_parameter_point(arousal, valence)
key_tuple = key_point.get_value()
self.checkKeyChange(key_tuple[0], key_tuple[1], key_tuple[2])
except Exception as e:
pass
self.checking_transformation_done = True
def change_note_velocity(self, arousal):
max_velocity = 127
arousal += 1.0
arousal /= 2.0
velocity = max_velocity * arousal
self.note_velocity = max(45, int(velocity))
def on_update(self):
where = self.file.tell()
line = self.file.readline()
if not line:
self.file.seek(where)
else:
values = line.split(' ')
self.arousal = float(values[0])
self.valence = float(values[1])
self.executor.submit(self.transform_arousal_valence, self.arousal, self.valence)
super(ArousalValenceWidget, self).on_update()
run(eval('ArousalValenceWidget'))
| en | 0.776671 | ###### # # Music system playback is possible thanks to: # Widget-based Synthesizer Logic thanks to <NAME> # Music21 Python Module via <NAME> # ###### ## CC CHANNELS ## # set up audio # set song path # create TempoMap, AudioScheduler #TODO: grab tempo from file # Add a looper # Set up FluidSynth # set up a midi channel for each part # set the reverb # set the EXPRESSION_CC # connect scheduler into audio system # and text to display our status # as the loop continues, these values will be updated to the current transformation # concurrent processing of transformations # next step in the loop # schedule each element that appears within the measure #retrieve the specific element in the measure # ge millisecond timestamps that the element will be scheduled on # if the element is a note # schedule note on and off # switch channel should mirror silently # else if the element is a chord # schedule off and on events for each pitch in the chord # swtich channel should mirror silently # current time #time of last measure # take the difference, and see if it falls within the buffer-zone # self.executor.submit(self.measure_update, now_beat, now_tick) # volume/dynamic control # tempo control # keep track of key and rhythms #### TEMPO ### #### Key and Mode #### # # submit the actual transformation task to the executor # submit the actual transformation task to the executor # if this results in a key change, then calculate the new transformation # if (self.last_key_change_beat == 0) or (self.sched.get_current_beat() - self.last_key_change_beat > 20) or not same_mode: ### Instrument ### # if not enough instruments from this point, fill with string and brass # apply instrument patches to synth base channels, and play switch channels louder # switch instruments base channels and make them quiet # play sound from switch CHANNELS # create the *linear* volume arc (list of values to iteratively set channels to for crescendo/decrescendo effect) # travel over arc # finally, switch instruments in the switch channels to current instruments_multi # switch instruments base channels and make them quiet Control the music transformer via various keyboard inputs. # Rhythm editting mechanism # Keep track of whether R is being held down # Log of all numbers pressed # Rhythm recorded # instrument edditing mechanism #parts control Control the music transformer via tuples of Arousal and Valence values that correspond to different values of musical attributes (Rhythm, Tempo, Instrument, etc). # print(arousal) # print(valence) # tempo # rhythm # instrument # key | 2.688035 | 3 |
venv/lib/python3.6/site-packages/ansible_collections/check_point/mgmt/tests/units/modules/test_cp_mgmt_verify_software_package.py | usegalaxy-no/usegalaxy | 1 | 6624739 | # Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible_collections.check_point.mgmt.plugins.modules import cp_mgmt_verify_software_package
PAYLOAD = {
"name": "Check_Point_R80_40_JHF_MCD_DEMO_019_MAIN_Bundle_T1_VISIBLE_FULL.tgz",
"wait_for_task": False
}
RETURN_PAYLOAD = {
"task-id": "53de74b7-8f19-4cbe-99fc-a81ef0759bad"
}
command = 'verify-software-package'
failure_msg = '{command failed}'
class TestCheckpointVerifySoftwarePackage(object):
module = cp_mgmt_verify_software_package
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_command(self, mocker, connection_mock):
connection_mock.send_request.return_value = (200, RETURN_PAYLOAD)
result = self._run_module(PAYLOAD)
assert result['changed']
assert RETURN_PAYLOAD == result[command]
def test_command_fail(self, mocker, connection_mock):
connection_mock.send_request.return_value = (404, failure_msg)
try:
result = self._run_module(PAYLOAD)
except Exception as e:
result = e.args[0]
assert 'Checkpoint device returned error 404 with message ' + failure_msg == result['msg']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
| # Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible_collections.check_point.mgmt.plugins.modules import cp_mgmt_verify_software_package
PAYLOAD = {
"name": "Check_Point_R80_40_JHF_MCD_DEMO_019_MAIN_Bundle_T1_VISIBLE_FULL.tgz",
"wait_for_task": False
}
RETURN_PAYLOAD = {
"task-id": "53de74b7-8f19-4cbe-99fc-a81ef0759bad"
}
command = 'verify-software-package'
failure_msg = '{command failed}'
class TestCheckpointVerifySoftwarePackage(object):
module = cp_mgmt_verify_software_package
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_command(self, mocker, connection_mock):
connection_mock.send_request.return_value = (200, RETURN_PAYLOAD)
result = self._run_module(PAYLOAD)
assert result['changed']
assert RETURN_PAYLOAD == result[command]
def test_command_fail(self, mocker, connection_mock):
connection_mock.send_request.return_value = (404, failure_msg)
try:
result = self._run_module(PAYLOAD)
except Exception as e:
result = e.args[0]
assert 'Checkpoint device returned error 404 with message ' + failure_msg == result['msg']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
| en | 0.866831 | # Ansible module to manage CheckPoint Firewall (c) 2019 # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # | 1.756769 | 2 |
setup.py | perminovsi/landsatxplore | 1 | 6624740 | <filename>setup.py
from codecs import open
from os import path
from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='landsatxplore',
version='0.8',
description='Search and download Landsat scenes from EarthExplorer.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/yannforget/landsatxplore',
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: GIS',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords=['earth observation', 'remote sensing', 'satellite imagery', 'landsat'],
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'requests',
'tqdm',
'click'
],
include_package_data=True,
zip_safe=False,
entry_points="""
[console_scripts]
landsatxplore=landsatxplore.cli:cli
""",
)
| <filename>setup.py
from codecs import open
from os import path
from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='landsatxplore',
version='0.8',
description='Search and download Landsat scenes from EarthExplorer.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/yannforget/landsatxplore',
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: GIS',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords=['earth observation', 'remote sensing', 'satellite imagery', 'landsat'],
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=[
'requests',
'tqdm',
'click'
],
include_package_data=True,
zip_safe=False,
entry_points="""
[console_scripts]
landsatxplore=landsatxplore.cli:cli
""",
)
| it | 0.245937 | [console_scripts] landsatxplore=landsatxplore.cli:cli | 1.430931 | 1 |
dev/breeze/src/airflow_breeze/params/common_build_params.py | holly-evans/airflow | 3 | 6624741 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
from dataclasses import dataclass
from datetime import datetime
from typing import List, Optional
from airflow_breeze.branch_defaults import AIRFLOW_BRANCH
from airflow_breeze.global_constants import DOCKER_DEFAULT_PLATFORM
from airflow_breeze.utils.console import get_console
from airflow_breeze.utils.platforms import get_real_platform
@dataclass
class CommonBuildParams:
"""
Common build parameters. Those parameters are common parameters for CI And PROD build.
"""
additional_airflow_extras: str = ""
additional_dev_apt_command: str = ""
additional_dev_apt_deps: str = ""
additional_dev_apt_env: str = ""
additional_python_deps: str = ""
additional_runtime_apt_command: str = ""
additional_runtime_apt_deps: str = ""
additional_runtime_apt_env: str = ""
airflow_branch: str = AIRFLOW_BRANCH
airflow_constraints_location: str = ""
answer: Optional[str] = None
build_id: int = 0
constraints_github_repository: str = "apache/airflow"
debian_version: str = "bullseye"
dev_apt_command: str = ""
dev_apt_deps: str = ""
docker_cache: str = "registry"
empty_image: bool = False
github_actions: str = os.environ.get('GITHUB_ACTIONS', "false")
github_repository: str = "apache/airflow"
github_token: str = os.environ.get('GITHUB_TOKEN', "")
github_username: str = ""
image_tag: Optional[str] = None
install_providers_from_sources: bool = False
platform: str = DOCKER_DEFAULT_PLATFORM
prepare_buildx_cache: bool = False
push_image: bool = False
python: str = "3.7"
runtime_apt_command: str = ""
runtime_apt_deps: str = ""
tag_as_latest: bool = False
upgrade_to_newer_dependencies: bool = False
@property
def airflow_version(self):
raise NotImplementedError()
@property
def image_type(self) -> str:
raise NotImplementedError()
@property
def airflow_pre_cached_pip_packages(self):
raise NotImplementedError()
@property
def airflow_base_image_name(self):
image = f'ghcr.io/{self.github_repository.lower()}'
return image
@property
def airflow_image_name(self):
"""Construct image link"""
image = (
f'{self.airflow_base_image_name}/{self.airflow_branch}/'
f'{self.image_type.lower()}/python{self.python}'
)
return image
@property
def extra_docker_build_flags(self) -> List[str]:
raise NotImplementedError()
@property
def docker_cache_directive(self) -> List[str]:
docker_cache_directive = []
if self.docker_cache == "registry":
for platform in self.platforms:
docker_cache_directive.append(f"--cache-from={self.get_cache(platform)}")
elif self.docker_cache == "disabled":
docker_cache_directive.append("--no-cache")
else:
docker_cache_directive = []
return docker_cache_directive
@property
def python_base_image(self):
"""Construct Python Base Image"""
# ghcr.io/apache/airflow/main/python:3.8-slim-bullseye
return f'python:{self.python}-slim-{self.debian_version}'
@property
def airflow_image_repository(self):
return f'https://github.com/{self.github_repository}'
@property
def airflow_image_date_created(self):
now = datetime.now()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
@property
def airflow_image_readme_url(self):
return "https://raw.githubusercontent.com/apache/airflow/main/docs/docker-stack/README.md"
@property
def airflow_image_name_with_tag(self):
"""Construct image link"""
image = (
f'{self.airflow_base_image_name}/{self.airflow_branch}/'
f'{self.image_type.lower()}/python{self.python}'
)
return image if self.image_tag is None else image + f":{self.image_tag}"
def get_cache(self, single_platform: str) -> str:
if "," in single_platform:
get_console().print(
"[error]Cache can only be retrieved for single platform and you "
f"tried for {single_platform}[/]"
)
sys.exit(1)
return f"{self.airflow_image_name}:cache-{get_real_platform(single_platform)}"
def is_multi_platform(self) -> bool:
return "," in self.platform
def preparing_latest_image(self) -> bool:
return self.tag_as_latest or self.airflow_image_name == self.airflow_image_name_with_tag
@property
def platforms(self) -> List[str]:
return self.platform.split(",")
@property
def required_image_args(self) -> List[str]:
raise NotImplementedError()
@property
def optional_image_args(self) -> List[str]:
raise NotImplementedError()
def __post_init__(self):
pass
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
from dataclasses import dataclass
from datetime import datetime
from typing import List, Optional
from airflow_breeze.branch_defaults import AIRFLOW_BRANCH
from airflow_breeze.global_constants import DOCKER_DEFAULT_PLATFORM
from airflow_breeze.utils.console import get_console
from airflow_breeze.utils.platforms import get_real_platform
@dataclass
class CommonBuildParams:
"""
Common build parameters. Those parameters are common parameters for CI And PROD build.
"""
additional_airflow_extras: str = ""
additional_dev_apt_command: str = ""
additional_dev_apt_deps: str = ""
additional_dev_apt_env: str = ""
additional_python_deps: str = ""
additional_runtime_apt_command: str = ""
additional_runtime_apt_deps: str = ""
additional_runtime_apt_env: str = ""
airflow_branch: str = AIRFLOW_BRANCH
airflow_constraints_location: str = ""
answer: Optional[str] = None
build_id: int = 0
constraints_github_repository: str = "apache/airflow"
debian_version: str = "bullseye"
dev_apt_command: str = ""
dev_apt_deps: str = ""
docker_cache: str = "registry"
empty_image: bool = False
github_actions: str = os.environ.get('GITHUB_ACTIONS', "false")
github_repository: str = "apache/airflow"
github_token: str = os.environ.get('GITHUB_TOKEN', "")
github_username: str = ""
image_tag: Optional[str] = None
install_providers_from_sources: bool = False
platform: str = DOCKER_DEFAULT_PLATFORM
prepare_buildx_cache: bool = False
push_image: bool = False
python: str = "3.7"
runtime_apt_command: str = ""
runtime_apt_deps: str = ""
tag_as_latest: bool = False
upgrade_to_newer_dependencies: bool = False
@property
def airflow_version(self):
raise NotImplementedError()
@property
def image_type(self) -> str:
raise NotImplementedError()
@property
def airflow_pre_cached_pip_packages(self):
raise NotImplementedError()
@property
def airflow_base_image_name(self):
image = f'ghcr.io/{self.github_repository.lower()}'
return image
@property
def airflow_image_name(self):
"""Construct image link"""
image = (
f'{self.airflow_base_image_name}/{self.airflow_branch}/'
f'{self.image_type.lower()}/python{self.python}'
)
return image
@property
def extra_docker_build_flags(self) -> List[str]:
raise NotImplementedError()
@property
def docker_cache_directive(self) -> List[str]:
docker_cache_directive = []
if self.docker_cache == "registry":
for platform in self.platforms:
docker_cache_directive.append(f"--cache-from={self.get_cache(platform)}")
elif self.docker_cache == "disabled":
docker_cache_directive.append("--no-cache")
else:
docker_cache_directive = []
return docker_cache_directive
@property
def python_base_image(self):
"""Construct Python Base Image"""
# ghcr.io/apache/airflow/main/python:3.8-slim-bullseye
return f'python:{self.python}-slim-{self.debian_version}'
@property
def airflow_image_repository(self):
return f'https://github.com/{self.github_repository}'
@property
def airflow_image_date_created(self):
now = datetime.now()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
@property
def airflow_image_readme_url(self):
return "https://raw.githubusercontent.com/apache/airflow/main/docs/docker-stack/README.md"
@property
def airflow_image_name_with_tag(self):
"""Construct image link"""
image = (
f'{self.airflow_base_image_name}/{self.airflow_branch}/'
f'{self.image_type.lower()}/python{self.python}'
)
return image if self.image_tag is None else image + f":{self.image_tag}"
def get_cache(self, single_platform: str) -> str:
if "," in single_platform:
get_console().print(
"[error]Cache can only be retrieved for single platform and you "
f"tried for {single_platform}[/]"
)
sys.exit(1)
return f"{self.airflow_image_name}:cache-{get_real_platform(single_platform)}"
def is_multi_platform(self) -> bool:
return "," in self.platform
def preparing_latest_image(self) -> bool:
return self.tag_as_latest or self.airflow_image_name == self.airflow_image_name_with_tag
@property
def platforms(self) -> List[str]:
return self.platform.split(",")
@property
def required_image_args(self) -> List[str]:
raise NotImplementedError()
@property
def optional_image_args(self) -> List[str]:
raise NotImplementedError()
def __post_init__(self):
pass
| en | 0.808451 | # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. Common build parameters. Those parameters are common parameters for CI And PROD build. Construct image link Construct Python Base Image # ghcr.io/apache/airflow/main/python:3.8-slim-bullseye Construct image link | 1.808971 | 2 |
doban_spider/try_test/processing_mp_test.py | XiaoWu-5759/xwspider | 0 | 6624742 | # -*- coding: utf-8 -*-
'''
@createBy : xiaowu
@date : 2019/10/22 14:16:41
'''
import multiprocessing as mp
def job(q):
res = 0
for i in range(12):
res += i+i**2+i**3
q.put(res)
if __name__ == "__main__":
queue = mp.Queue()
p1 = mp.Process(target=job,args=(queue,))
p1.start()
for _ in range(1000):
if(not queue.empty()):
print('has')
else:
print('emtpy') | # -*- coding: utf-8 -*-
'''
@createBy : xiaowu
@date : 2019/10/22 14:16:41
'''
import multiprocessing as mp
def job(q):
res = 0
for i in range(12):
res += i+i**2+i**3
q.put(res)
if __name__ == "__main__":
queue = mp.Queue()
p1 = mp.Process(target=job,args=(queue,))
p1.start()
for _ in range(1000):
if(not queue.empty()):
print('has')
else:
print('emtpy') | en | 0.34467 | # -*- coding: utf-8 -*- @createBy : xiaowu @date : 2019/10/22 14:16:41 | 3.086224 | 3 |
auto/app/lib/appController.py | Strugglingrookie/oldboy2 | 1 | 6624743 | import time
import threading
import queue
import subprocess
import os
from conf.settings import logger,LOG_DIR,APP_PICTUREPATH
from lib.tool import Tool
from appium import webdriver
# 多线程数据隔离
local = threading.local()
# 存放driver队列
drivers_queue = queue.Queue()
# 存放手机设备名称队列
devices_name_queue = queue.Queue()
class Controller():
def __init__(self):
self.tool = Tool()
# 配置信息
self.yml = self.tool.app_data
# 所有手机配置信息
self.devices = self.yml.get('devices')
# 测试app信息 包名 入口等
self.app = self.yml.get('tester')
# Android or IOS
self.device_type = self.yml.get('device_type')
# 启动的服务端口列表,用于校验服务是否成功启动
self.ports = []
def kill_servers(self):
"""
每次运行之前杀掉之前所有的服务
adb如果重启 夜游神将不会被查到
"""
logger.debug('执行[KILL SERVER]操作:%s'
% subprocess.getoutput("taskkill /F /IM node.exe /t"))
logger.debug('关闭ADB服务!%s' % subprocess.run(
["adb","kill-server"],stdout=subprocess.PIPE).stdout)
def server_start_command(self, **kwargs):
'''
根据kwargs中ip、端口等信息 启动appium服务
'''
command = 'appium -a {ip} -p {port} -U {deviceName} -g {log}'.format(
ip=kwargs.get('ip'),port=kwargs.get('port'),
deviceName=kwargs.get('deviceName'),log=kwargs.get('log_path'))
logger.debug('启动服务执行的命令:%s' % command)
subprocess.Popen(command, stdout=open(kwargs.get('log_path'), 'a+'),
stderr=subprocess.PIPE, shell=True)
def server_start(self):
'''
根据配置的手机信息,启动对应的appium服务
'''
# 每次启动前 清掉上一次还存活的端口
self.kill_servers()
logger.debug('启动ADB服务!%s' % subprocess.getoutput("adb start-server"))
# 启动的server加入到这个列表,用来等待所有服务启动起来之后才往下运行
server_threads = []
for device in self.devices.get(self.device_type):
# 将手机操作log加载到配置中
log_path = {'log_path': os.path.join(LOG_DIR, '%s.log' % device.get('name'))}
device.update(log_path)
logger.debug("每个手机的信息:%s" % device)
# 提取校验服务启动成功的端口
self.ports.append(device.get('port'))
# 启动多线程开启服务
t = threading.Thread(target=self.server_start_command, kwargs=device)
server_threads.append(t)
t.start()
for i in server_threads:
i.join()
def check_server(self):
'''
校验所有appium服务是否启动成功
:return: True
'''
while True:
for port in self.ports:
# 通过查看是否有返回值来确定是否启动
res = subprocess.getoutput("netstat -ano | findstr %s" % port)
# 如果有 则从list中删除这个端口 直到这个list为空时 代表启动成功 跳出循环
if res:
logger.debug('检验appium服务启动:%s' % res)
self.ports.remove(port)
else:
logger.debug('端口 [%s] 服务启动失败5秒钟后尝试' % port)
if not self.ports:
break
time.sleep(5)
logger.debug('全部appium服务启动成功!')
return True
def driver_start_command(self,**kwargs):
'''
driver启动命令
:param kwargs: 被测app配置,如包名,入口等
:return:
'''
local.desired_caps = {'platformName': '', 'platformVersion': '', 'deviceName': '',
"unicodeKeyboard": "True",
"resetKeyboard": "True", 'udid': '', 'noReset': 'True'}
local.desired_caps.update(kwargs)
port = local.desired_caps.get('port')
ip = local.desired_caps.get('ip')
url = 'http://{ip}:{port}/wd/hub'.format(port=port,ip=ip)
logger.debug('url:%s 开始启动'%url)
local.driver = webdriver.Remote(url, local.desired_caps)
logger.debug('url:%s 启动成功' % url)
# 通过消息对列传递driver驱动
drivers_queue.put(local.driver)
logger.debug('driver 为 %s 成功push到队列'%local.driver)
# 存放手机名称的对列(用于后续对线程名进行区分)
devices_name_queue.put(local.desired_caps.get('name'))
logger.debug('driver名字 %s 成功push到队列' % local.desired_caps.get('name'))
# 创建错误图片存放的路径
app_picture_path = APP_PICTUREPATH.format(local.desired_caps.get('name'))
# 如果存在则清除目录下的所有内容
if os.path.exists(app_picture_path):
# 调用写好的clear方法
self.tool.app_clear(app_picture_path)
else:
# 如果不存在path 则递归创建目录
os.makedirs(app_picture_path)
def driver_start(self):
driver_threads = []
for device_app in self.devices.get(self.device_type):
# 将测试的app信息增加到 手机的配置文件中
device_app.update(self.app)
# 多线程启动,注意这里只是开启了线程,并没有启动
t = threading.Thread(target=self.driver_start_command, kwargs=device_app)
driver_threads.append(t)
for t in driver_threads:
# 必须在这里启动并join,多线程启动driver会发生覆盖现象
# 导致只会有一个线程运行成功
t.start()
t.join()
# 所有driver启动成功后 返回driver的mq
return drivers_queue
if __name__ == '__main__':
c = Controller()
print(c.yml)
# c.server_start()
# c.check_server()
# c.driver_start()
| import time
import threading
import queue
import subprocess
import os
from conf.settings import logger,LOG_DIR,APP_PICTUREPATH
from lib.tool import Tool
from appium import webdriver
# 多线程数据隔离
local = threading.local()
# 存放driver队列
drivers_queue = queue.Queue()
# 存放手机设备名称队列
devices_name_queue = queue.Queue()
class Controller():
def __init__(self):
self.tool = Tool()
# 配置信息
self.yml = self.tool.app_data
# 所有手机配置信息
self.devices = self.yml.get('devices')
# 测试app信息 包名 入口等
self.app = self.yml.get('tester')
# Android or IOS
self.device_type = self.yml.get('device_type')
# 启动的服务端口列表,用于校验服务是否成功启动
self.ports = []
def kill_servers(self):
"""
每次运行之前杀掉之前所有的服务
adb如果重启 夜游神将不会被查到
"""
logger.debug('执行[KILL SERVER]操作:%s'
% subprocess.getoutput("taskkill /F /IM node.exe /t"))
logger.debug('关闭ADB服务!%s' % subprocess.run(
["adb","kill-server"],stdout=subprocess.PIPE).stdout)
def server_start_command(self, **kwargs):
'''
根据kwargs中ip、端口等信息 启动appium服务
'''
command = 'appium -a {ip} -p {port} -U {deviceName} -g {log}'.format(
ip=kwargs.get('ip'),port=kwargs.get('port'),
deviceName=kwargs.get('deviceName'),log=kwargs.get('log_path'))
logger.debug('启动服务执行的命令:%s' % command)
subprocess.Popen(command, stdout=open(kwargs.get('log_path'), 'a+'),
stderr=subprocess.PIPE, shell=True)
def server_start(self):
'''
根据配置的手机信息,启动对应的appium服务
'''
# 每次启动前 清掉上一次还存活的端口
self.kill_servers()
logger.debug('启动ADB服务!%s' % subprocess.getoutput("adb start-server"))
# 启动的server加入到这个列表,用来等待所有服务启动起来之后才往下运行
server_threads = []
for device in self.devices.get(self.device_type):
# 将手机操作log加载到配置中
log_path = {'log_path': os.path.join(LOG_DIR, '%s.log' % device.get('name'))}
device.update(log_path)
logger.debug("每个手机的信息:%s" % device)
# 提取校验服务启动成功的端口
self.ports.append(device.get('port'))
# 启动多线程开启服务
t = threading.Thread(target=self.server_start_command, kwargs=device)
server_threads.append(t)
t.start()
for i in server_threads:
i.join()
def check_server(self):
'''
校验所有appium服务是否启动成功
:return: True
'''
while True:
for port in self.ports:
# 通过查看是否有返回值来确定是否启动
res = subprocess.getoutput("netstat -ano | findstr %s" % port)
# 如果有 则从list中删除这个端口 直到这个list为空时 代表启动成功 跳出循环
if res:
logger.debug('检验appium服务启动:%s' % res)
self.ports.remove(port)
else:
logger.debug('端口 [%s] 服务启动失败5秒钟后尝试' % port)
if not self.ports:
break
time.sleep(5)
logger.debug('全部appium服务启动成功!')
return True
def driver_start_command(self,**kwargs):
'''
driver启动命令
:param kwargs: 被测app配置,如包名,入口等
:return:
'''
local.desired_caps = {'platformName': '', 'platformVersion': '', 'deviceName': '',
"unicodeKeyboard": "True",
"resetKeyboard": "True", 'udid': '', 'noReset': 'True'}
local.desired_caps.update(kwargs)
port = local.desired_caps.get('port')
ip = local.desired_caps.get('ip')
url = 'http://{ip}:{port}/wd/hub'.format(port=port,ip=ip)
logger.debug('url:%s 开始启动'%url)
local.driver = webdriver.Remote(url, local.desired_caps)
logger.debug('url:%s 启动成功' % url)
# 通过消息对列传递driver驱动
drivers_queue.put(local.driver)
logger.debug('driver 为 %s 成功push到队列'%local.driver)
# 存放手机名称的对列(用于后续对线程名进行区分)
devices_name_queue.put(local.desired_caps.get('name'))
logger.debug('driver名字 %s 成功push到队列' % local.desired_caps.get('name'))
# 创建错误图片存放的路径
app_picture_path = APP_PICTUREPATH.format(local.desired_caps.get('name'))
# 如果存在则清除目录下的所有内容
if os.path.exists(app_picture_path):
# 调用写好的clear方法
self.tool.app_clear(app_picture_path)
else:
# 如果不存在path 则递归创建目录
os.makedirs(app_picture_path)
def driver_start(self):
driver_threads = []
for device_app in self.devices.get(self.device_type):
# 将测试的app信息增加到 手机的配置文件中
device_app.update(self.app)
# 多线程启动,注意这里只是开启了线程,并没有启动
t = threading.Thread(target=self.driver_start_command, kwargs=device_app)
driver_threads.append(t)
for t in driver_threads:
# 必须在这里启动并join,多线程启动driver会发生覆盖现象
# 导致只会有一个线程运行成功
t.start()
t.join()
# 所有driver启动成功后 返回driver的mq
return drivers_queue
if __name__ == '__main__':
c = Controller()
print(c.yml)
# c.server_start()
# c.check_server()
# c.driver_start()
| zh | 0.950969 | # 多线程数据隔离 # 存放driver队列 # 存放手机设备名称队列 # 配置信息 # 所有手机配置信息 # 测试app信息 包名 入口等 # Android or IOS # 启动的服务端口列表,用于校验服务是否成功启动 每次运行之前杀掉之前所有的服务 adb如果重启 夜游神将不会被查到 根据kwargs中ip、端口等信息 启动appium服务 根据配置的手机信息,启动对应的appium服务 # 每次启动前 清掉上一次还存活的端口 # 启动的server加入到这个列表,用来等待所有服务启动起来之后才往下运行 # 将手机操作log加载到配置中 # 提取校验服务启动成功的端口 # 启动多线程开启服务 校验所有appium服务是否启动成功 :return: True # 通过查看是否有返回值来确定是否启动 # 如果有 则从list中删除这个端口 直到这个list为空时 代表启动成功 跳出循环 driver启动命令 :param kwargs: 被测app配置,如包名,入口等 :return: # 通过消息对列传递driver驱动 # 存放手机名称的对列(用于后续对线程名进行区分) # 创建错误图片存放的路径 # 如果存在则清除目录下的所有内容 # 调用写好的clear方法 # 如果不存在path 则递归创建目录 # 将测试的app信息增加到 手机的配置文件中 # 多线程启动,注意这里只是开启了线程,并没有启动 # 必须在这里启动并join,多线程启动driver会发生覆盖现象 # 导致只会有一个线程运行成功 # 所有driver启动成功后 返回driver的mq # c.server_start() # c.check_server() # c.driver_start() | 2.219367 | 2 |
test/functional/wallet_createwallet.py | devcoin/devcoin | 0 | 6624744 | <reponame>devcoin/devcoin
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core and Devcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test createwallet arguments.
"""
from test_framework.address import key_to_p2wpkh
from test_framework.descriptors import descsum_create
from test_framework.key import ECKey
from test_framework.test_framework import DevcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.wallet_util import bytes_to_wif, generate_wif_key
class CreateWalletTest(DevcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
node.generate(1) # Leave IBD for sethdseed
self.nodes[0].createwallet(wallet_name='w0')
w0 = node.get_wallet_rpc('w0')
address1 = w0.getnewaddress()
self.log.info("Test disableprivatekeys creation.")
self.nodes[0].createwallet(wallet_name='w1', disable_private_keys=True)
w1 = node.get_wallet_rpc('w1')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w1.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w1.getrawchangeaddress)
w1.importpubkey(w0.getaddressinfo(address1)['pubkey'])
self.log.info('Test that private keys cannot be imported')
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
assert_raises_rpc_error(-4, 'Cannot import private keys to a wallet with private keys disabled', w1.importprivkey, privkey)
if self.options.descriptors:
result = w1.importdescriptors([{'desc': descsum_create('wpkh(' + privkey + ')'), 'timestamp': 'now'}])
else:
result = w1.importmulti([{'scriptPubKey': {'address': key_to_p2wpkh(eckey.get_pubkey().get_bytes())}, 'timestamp': 'now', 'keys': [privkey]}])
assert not result[0]['success']
assert 'warning' not in result[0]
assert_equal(result[0]['error']['code'], -4)
assert_equal(result[0]['error']['message'], 'Cannot import private keys to a wallet with private keys disabled')
self.log.info("Test blank creation with private keys disabled.")
self.nodes[0].createwallet(wallet_name='w2', disable_private_keys=True, blank=True)
w2 = node.get_wallet_rpc('w2')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w2.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w2.getrawchangeaddress)
w2.importpubkey(w0.getaddressinfo(address1)['pubkey'])
self.log.info("Test blank creation with private keys enabled.")
self.nodes[0].createwallet(wallet_name='w3', disable_private_keys=False, blank=True)
w3 = node.get_wallet_rpc('w3')
assert_equal(w3.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getrawchangeaddress)
# Import private key
w3.importprivkey(generate_wif_key())
# Imported private keys are currently ignored by the keypool
assert_equal(w3.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getnewaddress)
# Set the seed
if self.options.descriptors:
w3.importdescriptors([{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/0h/*)'),
'timestamp': 'now',
'active': True
},
{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/1h/*)'),
'timestamp': 'now',
'active': True,
'internal': True
}])
else:
w3.sethdseed()
assert_equal(w3.getwalletinfo()['keypoolsize'], 1)
w3.getnewaddress()
w3.getrawchangeaddress()
self.log.info("Test blank creation with privkeys enabled and then encryption")
self.nodes[0].createwallet(wallet_name='w4', disable_private_keys=False, blank=True)
w4 = node.get_wallet_rpc('w4')
assert_equal(w4.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getrawchangeaddress)
# Encrypt the wallet. Nothing should change about the keypool
w4.encryptwallet('pass')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getrawchangeaddress)
# Now set a seed and it should work. Wallet should also be encrypted
w4.walletpassphrase('<PASSWORD>', 60)
if self.options.descriptors:
w4.importdescriptors([{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/0h/*)'),
'timestamp': 'now',
'active': True
},
{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/1h/*)'),
'timestamp': 'now',
'active': True,
'internal': True
}])
else:
w4.sethdseed()
w4.getnewaddress()
w4.getrawchangeaddress()
self.log.info("Test blank creation with privkeys disabled and then encryption")
self.nodes[0].createwallet(wallet_name='w5', disable_private_keys=True, blank=True)
w5 = node.get_wallet_rpc('w5')
assert_equal(w5.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getrawchangeaddress)
# Encrypt the wallet
assert_raises_rpc_error(-16, "Error: wallet does not contain private keys, nothing to encrypt.", w5.encryptwallet, 'pass')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getrawchangeaddress)
self.log.info('New blank and encrypted wallets can be created')
self.nodes[0].createwallet(wallet_name='wblank', disable_private_keys=False, blank=True, passphrase='<PASSWORD>')
wblank = node.get_wallet_rpc('wblank')
assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", wblank.signmessage, "needanargument", "test")
wblank.walletpassphrase('<PASSWORD>', 60)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getrawchangeaddress)
self.log.info('Test creating a new encrypted wallet.')
# Born encrypted wallet is created (has keys)
self.nodes[0].createwallet(wallet_name='w6', disable_private_keys=False, blank=False, passphrase='<PASSWORD>')
w6 = node.get_wallet_rpc('w6')
assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", w6.signmessage, "needanargument", "test")
w6.walletpassphrase('<PASSWORD>', 60)
w6.signmessage(w6.getnewaddress('', 'legacy'), "test")
w6.keypoolrefill(1)
# There should only be 1 key for legacy, 3 for descriptors
walletinfo = w6.getwalletinfo()
keys = 3 if self.options.descriptors else 1
assert_equal(walletinfo['keypoolsize'], keys)
assert_equal(walletinfo['keypoolsize_hd_internal'], keys)
# Allow empty passphrase, but there should be a warning
resp = self.nodes[0].createwallet(wallet_name='w7', disable_private_keys=False, blank=False, passphrase='')
assert 'Empty string given as passphrase, wallet will not be encrypted.' in resp['warning']
w7 = node.get_wallet_rpc('w7')
assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 60)
self.log.info('Test making a wallet with avoid reuse flag')
self.nodes[0].createwallet('w8', False, False, '', True) # Use positional arguments to check for bug where avoid_reuse could not be set for wallets without needing them to be encrypted
w8 = node.get_wallet_rpc('w8')
assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 60)
assert_equal(w8.getwalletinfo()["avoid_reuse"], True)
self.log.info('Using a passphrase with private keys disabled returns error')
assert_raises_rpc_error(-4, 'Passphrase provided but private keys are disabled. A passphrase is only used to encrypt private keys, so cannot be used for wallets with private keys disabled.', self.nodes[0].createwallet, wallet_name='w9', disable_private_keys=True, passphrase='<PASSWORD>')
if __name__ == '__main__':
CreateWalletTest().main()
| #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core and Devcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test createwallet arguments.
"""
from test_framework.address import key_to_p2wpkh
from test_framework.descriptors import descsum_create
from test_framework.key import ECKey
from test_framework.test_framework import DevcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.wallet_util import bytes_to_wif, generate_wif_key
class CreateWalletTest(DevcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
node.generate(1) # Leave IBD for sethdseed
self.nodes[0].createwallet(wallet_name='w0')
w0 = node.get_wallet_rpc('w0')
address1 = w0.getnewaddress()
self.log.info("Test disableprivatekeys creation.")
self.nodes[0].createwallet(wallet_name='w1', disable_private_keys=True)
w1 = node.get_wallet_rpc('w1')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w1.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w1.getrawchangeaddress)
w1.importpubkey(w0.getaddressinfo(address1)['pubkey'])
self.log.info('Test that private keys cannot be imported')
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
assert_raises_rpc_error(-4, 'Cannot import private keys to a wallet with private keys disabled', w1.importprivkey, privkey)
if self.options.descriptors:
result = w1.importdescriptors([{'desc': descsum_create('wpkh(' + privkey + ')'), 'timestamp': 'now'}])
else:
result = w1.importmulti([{'scriptPubKey': {'address': key_to_p2wpkh(eckey.get_pubkey().get_bytes())}, 'timestamp': 'now', 'keys': [privkey]}])
assert not result[0]['success']
assert 'warning' not in result[0]
assert_equal(result[0]['error']['code'], -4)
assert_equal(result[0]['error']['message'], 'Cannot import private keys to a wallet with private keys disabled')
self.log.info("Test blank creation with private keys disabled.")
self.nodes[0].createwallet(wallet_name='w2', disable_private_keys=True, blank=True)
w2 = node.get_wallet_rpc('w2')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w2.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w2.getrawchangeaddress)
w2.importpubkey(w0.getaddressinfo(address1)['pubkey'])
self.log.info("Test blank creation with private keys enabled.")
self.nodes[0].createwallet(wallet_name='w3', disable_private_keys=False, blank=True)
w3 = node.get_wallet_rpc('w3')
assert_equal(w3.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getrawchangeaddress)
# Import private key
w3.importprivkey(generate_wif_key())
# Imported private keys are currently ignored by the keypool
assert_equal(w3.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getnewaddress)
# Set the seed
if self.options.descriptors:
w3.importdescriptors([{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/0h/*)'),
'timestamp': 'now',
'active': True
},
{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/1h/*)'),
'timestamp': 'now',
'active': True,
'internal': True
}])
else:
w3.sethdseed()
assert_equal(w3.getwalletinfo()['keypoolsize'], 1)
w3.getnewaddress()
w3.getrawchangeaddress()
self.log.info("Test blank creation with privkeys enabled and then encryption")
self.nodes[0].createwallet(wallet_name='w4', disable_private_keys=False, blank=True)
w4 = node.get_wallet_rpc('w4')
assert_equal(w4.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getrawchangeaddress)
# Encrypt the wallet. Nothing should change about the keypool
w4.encryptwallet('pass')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getrawchangeaddress)
# Now set a seed and it should work. Wallet should also be encrypted
w4.walletpassphrase('<PASSWORD>', 60)
if self.options.descriptors:
w4.importdescriptors([{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/0h/*)'),
'timestamp': 'now',
'active': True
},
{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/1h/*)'),
'timestamp': 'now',
'active': True,
'internal': True
}])
else:
w4.sethdseed()
w4.getnewaddress()
w4.getrawchangeaddress()
self.log.info("Test blank creation with privkeys disabled and then encryption")
self.nodes[0].createwallet(wallet_name='w5', disable_private_keys=True, blank=True)
w5 = node.get_wallet_rpc('w5')
assert_equal(w5.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getrawchangeaddress)
# Encrypt the wallet
assert_raises_rpc_error(-16, "Error: wallet does not contain private keys, nothing to encrypt.", w5.encryptwallet, 'pass')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getrawchangeaddress)
self.log.info('New blank and encrypted wallets can be created')
self.nodes[0].createwallet(wallet_name='wblank', disable_private_keys=False, blank=True, passphrase='<PASSWORD>')
wblank = node.get_wallet_rpc('wblank')
assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", wblank.signmessage, "needanargument", "test")
wblank.walletpassphrase('<PASSWORD>', 60)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getrawchangeaddress)
self.log.info('Test creating a new encrypted wallet.')
# Born encrypted wallet is created (has keys)
self.nodes[0].createwallet(wallet_name='w6', disable_private_keys=False, blank=False, passphrase='<PASSWORD>')
w6 = node.get_wallet_rpc('w6')
assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", w6.signmessage, "needanargument", "test")
w6.walletpassphrase('<PASSWORD>', 60)
w6.signmessage(w6.getnewaddress('', 'legacy'), "test")
w6.keypoolrefill(1)
# There should only be 1 key for legacy, 3 for descriptors
walletinfo = w6.getwalletinfo()
keys = 3 if self.options.descriptors else 1
assert_equal(walletinfo['keypoolsize'], keys)
assert_equal(walletinfo['keypoolsize_hd_internal'], keys)
# Allow empty passphrase, but there should be a warning
resp = self.nodes[0].createwallet(wallet_name='w7', disable_private_keys=False, blank=False, passphrase='')
assert 'Empty string given as passphrase, wallet will not be encrypted.' in resp['warning']
w7 = node.get_wallet_rpc('w7')
assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 60)
self.log.info('Test making a wallet with avoid reuse flag')
self.nodes[0].createwallet('w8', False, False, '', True) # Use positional arguments to check for bug where avoid_reuse could not be set for wallets without needing them to be encrypted
w8 = node.get_wallet_rpc('w8')
assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 60)
assert_equal(w8.getwalletinfo()["avoid_reuse"], True)
self.log.info('Using a passphrase with private keys disabled returns error')
assert_raises_rpc_error(-4, 'Passphrase provided but private keys are disabled. A passphrase is only used to encrypt private keys, so cannot be used for wallets with private keys disabled.', self.nodes[0].createwallet, wallet_name='w9', disable_private_keys=True, passphrase='<PASSWORD>')
if __name__ == '__main__':
CreateWalletTest().main() | en | 0.818897 | #!/usr/bin/env python3 # Copyright (c) 2018-2020 The Bitcoin Core and Devcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. Test createwallet arguments. # Leave IBD for sethdseed # Import private key # Imported private keys are currently ignored by the keypool # Set the seed # Encrypt the wallet. Nothing should change about the keypool # Now set a seed and it should work. Wallet should also be encrypted # Encrypt the wallet # Born encrypted wallet is created (has keys) # There should only be 1 key for legacy, 3 for descriptors # Allow empty passphrase, but there should be a warning # Use positional arguments to check for bug where avoid_reuse could not be set for wallets without needing them to be encrypted | 2.180964 | 2 |
braindecode/datasets/xy.py | Simon-Free/braindecode | 1 | 6624745 | <filename>braindecode/datasets/xy.py
import numpy as np
import pandas as pd
import logging
import mne
from .base import BaseDataset, BaseConcatDataset
from ..datautil.windowers import (
create_fixed_length_windows,)
log = logging.getLogger(__name__)
def create_from_X_y(
X, y, drop_last_window, sfreq=None, ch_names=None, window_size_samples=None,
window_stride_samples=None):
"""Create a BaseConcatDataset of WindowsDatasets from X and y to be used for
decoding with skorch and braindecode, where X is a list of pre-cut trials
and y are corresponding targets.
Parameters
----------
X: array-like
list of pre-cut trials as n_trials x n_channels x n_times
y: array-like
targets corresponding to the trials
sfreq: common sampling frequency of all trials
ch_names: array-like
channel names of the trials
drop_last_window: bool
whether or not have a last overlapping window, when
windows/windows do not equally divide the continuous signal
window_size_samples: int
window size
window_stride_samples: int
stride between windows
Returns
-------
windows_datasets: BaseConcatDataset
X and y transformed to a dataset format that is compatible with skorch
and braindecode
"""
n_samples_per_x = []
base_datasets = []
if sfreq is None:
sfreq = 100
log.info("No sampling frequency given, set to 100 Hz.")
if ch_names is None:
ch_names = [str(i) for i in range(X.shape[1])]
log.info(f"No channel names given, set to 0-{X.shape[1]}).")
for x, target in zip(X, y):
n_samples_per_x.append(x.shape[1])
info = mne.create_info(ch_names=ch_names, sfreq=sfreq)
raw = mne.io.RawArray(x, info)
base_dataset = BaseDataset(raw, pd.Series({"target": target}),
target_name="target")
base_datasets.append(base_dataset)
base_datasets = BaseConcatDataset(base_datasets)
if window_size_samples is None and window_stride_samples is None:
if not len(np.unique(n_samples_per_x)) == 1:
raise ValueError(f"if 'window_size_samples' and "
f"'window_stride_samples' are None, "
f"all trials have to have the same length")
window_size_samples = n_samples_per_x[0]
window_stride_samples = n_samples_per_x[0]
windows_datasets = create_fixed_length_windows(
base_datasets,
start_offset_samples=0,
stop_offset_samples=0,
window_size_samples=window_size_samples,
window_stride_samples=window_stride_samples,
drop_last_window=drop_last_window
)
return windows_datasets
| <filename>braindecode/datasets/xy.py
import numpy as np
import pandas as pd
import logging
import mne
from .base import BaseDataset, BaseConcatDataset
from ..datautil.windowers import (
create_fixed_length_windows,)
log = logging.getLogger(__name__)
def create_from_X_y(
X, y, drop_last_window, sfreq=None, ch_names=None, window_size_samples=None,
window_stride_samples=None):
"""Create a BaseConcatDataset of WindowsDatasets from X and y to be used for
decoding with skorch and braindecode, where X is a list of pre-cut trials
and y are corresponding targets.
Parameters
----------
X: array-like
list of pre-cut trials as n_trials x n_channels x n_times
y: array-like
targets corresponding to the trials
sfreq: common sampling frequency of all trials
ch_names: array-like
channel names of the trials
drop_last_window: bool
whether or not have a last overlapping window, when
windows/windows do not equally divide the continuous signal
window_size_samples: int
window size
window_stride_samples: int
stride between windows
Returns
-------
windows_datasets: BaseConcatDataset
X and y transformed to a dataset format that is compatible with skorch
and braindecode
"""
n_samples_per_x = []
base_datasets = []
if sfreq is None:
sfreq = 100
log.info("No sampling frequency given, set to 100 Hz.")
if ch_names is None:
ch_names = [str(i) for i in range(X.shape[1])]
log.info(f"No channel names given, set to 0-{X.shape[1]}).")
for x, target in zip(X, y):
n_samples_per_x.append(x.shape[1])
info = mne.create_info(ch_names=ch_names, sfreq=sfreq)
raw = mne.io.RawArray(x, info)
base_dataset = BaseDataset(raw, pd.Series({"target": target}),
target_name="target")
base_datasets.append(base_dataset)
base_datasets = BaseConcatDataset(base_datasets)
if window_size_samples is None and window_stride_samples is None:
if not len(np.unique(n_samples_per_x)) == 1:
raise ValueError(f"if 'window_size_samples' and "
f"'window_stride_samples' are None, "
f"all trials have to have the same length")
window_size_samples = n_samples_per_x[0]
window_stride_samples = n_samples_per_x[0]
windows_datasets = create_fixed_length_windows(
base_datasets,
start_offset_samples=0,
stop_offset_samples=0,
window_size_samples=window_size_samples,
window_stride_samples=window_stride_samples,
drop_last_window=drop_last_window
)
return windows_datasets
| en | 0.807352 | Create a BaseConcatDataset of WindowsDatasets from X and y to be used for decoding with skorch and braindecode, where X is a list of pre-cut trials and y are corresponding targets. Parameters ---------- X: array-like list of pre-cut trials as n_trials x n_channels x n_times y: array-like targets corresponding to the trials sfreq: common sampling frequency of all trials ch_names: array-like channel names of the trials drop_last_window: bool whether or not have a last overlapping window, when windows/windows do not equally divide the continuous signal window_size_samples: int window size window_stride_samples: int stride between windows Returns ------- windows_datasets: BaseConcatDataset X and y transformed to a dataset format that is compatible with skorch and braindecode | 2.65402 | 3 |
php/python/UpdateDb.py | the16bitgamer/YourflixMkII | 0 | 6624746 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import YourflixDbManager
import DatabaseManager
_user = "pi"
_password = "<PASSWORD>"
dbManager = YourflixDbManager
db = DatabaseManager
db.ConnectToDb(db, _user, _password)
print(dbManager.CheckDatabase(dbManager, db))
db.DisconnectDb(db) | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import YourflixDbManager
import DatabaseManager
_user = "pi"
_password = "<PASSWORD>"
dbManager = YourflixDbManager
db = DatabaseManager
db.ConnectToDb(db, _user, _password)
print(dbManager.CheckDatabase(dbManager, db))
db.DisconnectDb(db) | en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.353442 | 2 |
bomlib/xlsx_writer.py | BarracudaPff/code-golf-data-pythpn | 0 | 6624747 | try:
pass
except:
def WriteXLSX(filename, groups, net, headings, prefs):
return False
else:
"""
Write BoM out to a XLSX file
filename = path to output file (must be a .xlsx file)
groups = [list of ComponentGroup groups]
net = netlist object
headings = [list of headings to display in the BoM file]
prefs = BomPref object
"""
def WriteXLSX(filename, groups, net, headings, prefs):
filename = os.path.abspath(filename)
if not filename.endswith(".xlsx"):
return False
nGroups = len(groups)
nTotal = sum([g.getCount() for g in groups])
nFitted = sum([g.getCount() for g in groups if g.isFitted()])
nBuild = nFitted * prefs.boards
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
if prefs.numberRows:
row_headings = ["Component"] + headings
else:
row_headings = headings
cellformats = {}
column_widths = {}
for i in range(len(row_headings)):
cellformats[i] = workbook.add_format({"align": "center_across"})
column_widths[i] = len(row_headings[i]) + 10
if not prefs.hideHeaders:
worksheet.write_string(0, i, row_headings[i], cellformats[i])
count = 0
rowCount = 1
for i, group in enumerate(groups):
if prefs.ignoreDNF and not group.isFitted():
continue
row = group.getRow(headings)
if prefs.numberRows:
row = [str(rowCount)] + row
for columnCount in range(len(row)):
cell = row[columnCount].decode("utf-8")
worksheet.write_string(rowCount, columnCount, cell, cellformats[columnCount])
if len(cell) > column_widths[columnCount] - 5:
column_widths[columnCount] = len(cell) + 5
try:
count += group.getCount()
except:
pass
rowCount += 1
if not prefs.hidePcbInfo:
for i in range(5):
rowCount += 1
cellformat_left = workbook.add_format({"align": "left"})
worksheet.write_string(rowCount, 0, "Component Groups:", cellformats[0])
worksheet.write_number(rowCount, 1, nGroups, cellformat_left)
rowCount += 1
worksheet.write_string(rowCount, 0, "Component Count:", cellformats[0])
worksheet.write_number(rowCount, 1, nTotal, cellformat_left)
rowCount += 1
worksheet.write_string(rowCount, 0, "Fitted Components:", cellformats[0])
worksheet.write_number(rowCount, 1, nFitted, cellformat_left)
rowCount += 1
worksheet.write_string(rowCount, 0, "Number of PCBs:", cellformats[0])
worksheet.write_number(rowCount, 1, prefs.boards, cellformat_left)
rowCount += 1
worksheet.write_string(rowCount, 0, "Total components:", cellformats[0])
worksheet.write_number(rowCount, 1, nBuild, cellformat_left)
rowCount += 1
worksheet.write_string(rowCount, 0, "Schematic Version:", cellformats[0])
worksheet.write_string(rowCount, 1, net.getVersion(), cellformat_left)
rowCount += 1
if len(net.getVersion()) > column_widths[1]:
column_widths[1] = len(net.getVersion())
worksheet.write_string(rowCount, 0, "Schematic Date:", cellformats[0])
worksheet.write_string(rowCount, 1, net.getSheetDate(), cellformat_left)
rowCount += 1
if len(net.getSheetDate()) > column_widths[1]:
column_widths[1] = len(net.getSheetDate())
worksheet.write_string(rowCount, 0, "BoM Date:", cellformats[0])
worksheet.write_string(rowCount, 1, net.getDate(), cellformat_left)
rowCount += 1
if len(net.getDate()) > column_widths[1]:
column_widths[1] = len(net.getDate())
worksheet.write_string(rowCount, 0, "Schematic Source:", cellformats[0])
worksheet.write_string(rowCount, 1, net.getSource(), cellformat_left)
rowCount += 1
if len(net.getSource()) > column_widths[1]:
column_widths[1] = len(net.getSource())
worksheet.write_string(rowCount, 0, "KiCad Version:", cellformats[0])
worksheet.write_string(rowCount, 1, net.getTool(), cellformat_left)
rowCount += 1
if len(net.getTool()) > column_widths[1]:
column_widths[1] = len(net.getTool())
for i in range(len(column_widths)):
worksheet.set_column(i, i, column_widths[i])
workbook.close()
return True | try:
pass
except:
def WriteXLSX(filename, groups, net, headings, prefs):
return False
else:
"""
Write BoM out to a XLSX file
filename = path to output file (must be a .xlsx file)
groups = [list of ComponentGroup groups]
net = netlist object
headings = [list of headings to display in the BoM file]
prefs = BomPref object
"""
def WriteXLSX(filename, groups, net, headings, prefs):
filename = os.path.abspath(filename)
if not filename.endswith(".xlsx"):
return False
nGroups = len(groups)
nTotal = sum([g.getCount() for g in groups])
nFitted = sum([g.getCount() for g in groups if g.isFitted()])
nBuild = nFitted * prefs.boards
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
if prefs.numberRows:
row_headings = ["Component"] + headings
else:
row_headings = headings
cellformats = {}
column_widths = {}
for i in range(len(row_headings)):
cellformats[i] = workbook.add_format({"align": "center_across"})
column_widths[i] = len(row_headings[i]) + 10
if not prefs.hideHeaders:
worksheet.write_string(0, i, row_headings[i], cellformats[i])
count = 0
rowCount = 1
for i, group in enumerate(groups):
if prefs.ignoreDNF and not group.isFitted():
continue
row = group.getRow(headings)
if prefs.numberRows:
row = [str(rowCount)] + row
for columnCount in range(len(row)):
cell = row[columnCount].decode("utf-8")
worksheet.write_string(rowCount, columnCount, cell, cellformats[columnCount])
if len(cell) > column_widths[columnCount] - 5:
column_widths[columnCount] = len(cell) + 5
try:
count += group.getCount()
except:
pass
rowCount += 1
if not prefs.hidePcbInfo:
for i in range(5):
rowCount += 1
cellformat_left = workbook.add_format({"align": "left"})
worksheet.write_string(rowCount, 0, "Component Groups:", cellformats[0])
worksheet.write_number(rowCount, 1, nGroups, cellformat_left)
rowCount += 1
worksheet.write_string(rowCount, 0, "Component Count:", cellformats[0])
worksheet.write_number(rowCount, 1, nTotal, cellformat_left)
rowCount += 1
worksheet.write_string(rowCount, 0, "Fitted Components:", cellformats[0])
worksheet.write_number(rowCount, 1, nFitted, cellformat_left)
rowCount += 1
worksheet.write_string(rowCount, 0, "Number of PCBs:", cellformats[0])
worksheet.write_number(rowCount, 1, prefs.boards, cellformat_left)
rowCount += 1
worksheet.write_string(rowCount, 0, "Total components:", cellformats[0])
worksheet.write_number(rowCount, 1, nBuild, cellformat_left)
rowCount += 1
worksheet.write_string(rowCount, 0, "Schematic Version:", cellformats[0])
worksheet.write_string(rowCount, 1, net.getVersion(), cellformat_left)
rowCount += 1
if len(net.getVersion()) > column_widths[1]:
column_widths[1] = len(net.getVersion())
worksheet.write_string(rowCount, 0, "Schematic Date:", cellformats[0])
worksheet.write_string(rowCount, 1, net.getSheetDate(), cellformat_left)
rowCount += 1
if len(net.getSheetDate()) > column_widths[1]:
column_widths[1] = len(net.getSheetDate())
worksheet.write_string(rowCount, 0, "BoM Date:", cellformats[0])
worksheet.write_string(rowCount, 1, net.getDate(), cellformat_left)
rowCount += 1
if len(net.getDate()) > column_widths[1]:
column_widths[1] = len(net.getDate())
worksheet.write_string(rowCount, 0, "Schematic Source:", cellformats[0])
worksheet.write_string(rowCount, 1, net.getSource(), cellformat_left)
rowCount += 1
if len(net.getSource()) > column_widths[1]:
column_widths[1] = len(net.getSource())
worksheet.write_string(rowCount, 0, "KiCad Version:", cellformats[0])
worksheet.write_string(rowCount, 1, net.getTool(), cellformat_left)
rowCount += 1
if len(net.getTool()) > column_widths[1]:
column_widths[1] = len(net.getTool())
for i in range(len(column_widths)):
worksheet.set_column(i, i, column_widths[i])
workbook.close()
return True | en | 0.809674 | Write BoM out to a XLSX file filename = path to output file (must be a .xlsx file) groups = [list of ComponentGroup groups] net = netlist object headings = [list of headings to display in the BoM file] prefs = BomPref object | 2.782391 | 3 |
arrays/delete-dupes-from-sorted-arr.py | geekidharsh/elements-of-programming | 0 | 6624748 | def delete_dupes_from_sorted_1(arr):
# time O(n) space O(1)
# count number of valid entries
i = 1
for j in range(1, len(arr)):
if arr[i-1] != arr[j]:
arr[i] = arr[j]
i += 1
# resulting arr will still have invalid items but theres no
# requirement of the code to delete items beyond the last valid item
return arr, i
# optionally
# to get all valid items only, use slice: arr[:i]
# test
arr = [2,3,5,5,7,11,11,11,13] # inp
# exp out: ([2, 3, 5, 7, 11, 13, 11, 11, 13], 6)
print(delete_dupes_from_sorted_1(arr)) | def delete_dupes_from_sorted_1(arr):
# time O(n) space O(1)
# count number of valid entries
i = 1
for j in range(1, len(arr)):
if arr[i-1] != arr[j]:
arr[i] = arr[j]
i += 1
# resulting arr will still have invalid items but theres no
# requirement of the code to delete items beyond the last valid item
return arr, i
# optionally
# to get all valid items only, use slice: arr[:i]
# test
arr = [2,3,5,5,7,11,11,11,13] # inp
# exp out: ([2, 3, 5, 7, 11, 13, 11, 11, 13], 6)
print(delete_dupes_from_sorted_1(arr)) | en | 0.735065 | # time O(n) space O(1) # count number of valid entries # resulting arr will still have invalid items but theres no # requirement of the code to delete items beyond the last valid item # optionally # to get all valid items only, use slice: arr[:i] # test # inp # exp out: ([2, 3, 5, 7, 11, 13, 11, 11, 13], 6) | 3.507706 | 4 |
netbox/netbox/settings.py | ae-exact/netbox | 0 | 6624749 | <filename>netbox/netbox/settings.py
import importlib
import logging
import os
import platform
import re
import socket
import warnings
from urllib.parse import urlsplit
from django.contrib.messages import constants as messages
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.validators import URLValidator
#
# Environment setup
#
VERSION = '2.9.4-dev'
# Hostname
HOSTNAME = platform.node()
# Set the base directory two levels up
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Validate Python version
if platform.python_version_tuple() < ('3', '6'):
raise RuntimeError(
"NetBox requires Python 3.6 or higher (current: Python {})".format(platform.python_version())
)
#
# Configuration import
#
# Import configuration parameters
try:
from netbox import configuration
except ImportError:
raise ImproperlyConfigured(
"Configuration file is not present. Please define netbox/netbox/configuration.py per the documentation."
)
# Enforce required configuration parameters
for parameter in ['ALLOWED_HOSTS', 'DATABASE', 'SECRET_KEY', 'REDIS']:
if not hasattr(configuration, parameter):
raise ImproperlyConfigured(
"Required parameter {} is missing from configuration.py.".format(parameter)
)
# Set required parameters
ALLOWED_HOSTS = getattr(configuration, 'ALLOWED_HOSTS')
DATABASE = getattr(configuration, 'DATABASE')
REDIS = getattr(configuration, 'REDIS')
SECRET_KEY = getattr(configuration, 'SECRET_KEY')
# Set optional parameters
ADMINS = getattr(configuration, 'ADMINS', [])
ALLOWED_URL_SCHEMES = getattr(configuration, 'ALLOWED_URL_SCHEMES', (
'file', 'ftp', 'ftps', 'http', 'https', 'irc', 'mailto', 'sftp', 'ssh', 'tel', 'telnet', 'tftp', 'vnc', 'xmpp',
))
BANNER_BOTTOM = getattr(configuration, 'BANNER_BOTTOM', '')
BANNER_LOGIN = getattr(configuration, 'BANNER_LOGIN', '')
BANNER_TOP = getattr(configuration, 'BANNER_TOP', '')
BASE_PATH = getattr(configuration, 'BASE_PATH', '')
if BASE_PATH:
BASE_PATH = BASE_PATH.strip('/') + '/' # Enforce trailing slash only
CACHE_TIMEOUT = getattr(configuration, 'CACHE_TIMEOUT', 900)
CHANGELOG_RETENTION = getattr(configuration, 'CHANGELOG_RETENTION', 90)
CORS_ORIGIN_ALLOW_ALL = getattr(configuration, 'CORS_ORIGIN_ALLOW_ALL', False)
CORS_ORIGIN_REGEX_WHITELIST = getattr(configuration, 'CORS_ORIGIN_REGEX_WHITELIST', [])
CORS_ORIGIN_WHITELIST = getattr(configuration, 'CORS_ORIGIN_WHITELIST', [])
DATE_FORMAT = getattr(configuration, 'DATE_FORMAT', 'N j, Y')
DATETIME_FORMAT = getattr(configuration, 'DATETIME_FORMAT', 'N j, Y g:i a')
DEBUG = getattr(configuration, 'DEBUG', False)
DEVELOPER = getattr(configuration, 'DEVELOPER', False)
DOCS_ROOT = getattr(configuration, 'DOCS_ROOT', os.path.join(os.path.dirname(BASE_DIR), 'docs'))
EMAIL = getattr(configuration, 'EMAIL', {})
ENFORCE_GLOBAL_UNIQUE = getattr(configuration, 'ENFORCE_GLOBAL_UNIQUE', False)
EXEMPT_VIEW_PERMISSIONS = getattr(configuration, 'EXEMPT_VIEW_PERMISSIONS', [])
HTTP_PROXIES = getattr(configuration, 'HTTP_PROXIES', None)
INTERNAL_IPS = getattr(configuration, 'INTERNAL_IPS', ('127.0.0.1', '::1'))
LOGGING = getattr(configuration, 'LOGGING', {})
LOGIN_REQUIRED = getattr(configuration, 'LOGIN_REQUIRED', False)
LOGIN_TIMEOUT = getattr(configuration, 'LOGIN_TIMEOUT', None)
MAINTENANCE_MODE = getattr(configuration, 'MAINTENANCE_MODE', False)
MAX_PAGE_SIZE = getattr(configuration, 'MAX_PAGE_SIZE', 1000)
MEDIA_ROOT = getattr(configuration, 'MEDIA_ROOT', os.path.join(BASE_DIR, 'media')).rstrip('/')
STORAGE_BACKEND = getattr(configuration, 'STORAGE_BACKEND', None)
STORAGE_CONFIG = getattr(configuration, 'STORAGE_CONFIG', {})
METRICS_ENABLED = getattr(configuration, 'METRICS_ENABLED', False)
NAPALM_ARGS = getattr(configuration, 'NAPALM_ARGS', {})
NAPALM_PASSWORD = getattr(configuration, 'NAPALM_PASSWORD', '')
NAPALM_TIMEOUT = getattr(configuration, 'NAPALM_TIMEOUT', 30)
NAPALM_USERNAME = getattr(configuration, 'NAPALM_USERNAME', '')
PAGINATE_COUNT = getattr(configuration, 'PAGINATE_COUNT', 50)
PLUGINS = getattr(configuration, 'PLUGINS', [])
PLUGINS_CONFIG = getattr(configuration, 'PLUGINS_CONFIG', {})
PREFER_IPV4 = getattr(configuration, 'PREFER_IPV4', False)
RACK_ELEVATION_DEFAULT_UNIT_HEIGHT = getattr(configuration, 'RACK_ELEVATION_DEFAULT_UNIT_HEIGHT', 22)
RACK_ELEVATION_DEFAULT_UNIT_WIDTH = getattr(configuration, 'RACK_ELEVATION_DEFAULT_UNIT_WIDTH', 220)
REMOTE_AUTH_AUTO_CREATE_USER = getattr(configuration, 'REMOTE_AUTH_AUTO_CREATE_USER', False)
REMOTE_AUTH_BACKEND = getattr(configuration, 'REMOTE_AUTH_BACKEND', 'netbox.authentication.RemoteUserBackend')
REMOTE_AUTH_DEFAULT_GROUPS = getattr(configuration, 'REMOTE_AUTH_DEFAULT_GROUPS', [])
REMOTE_AUTH_DEFAULT_PERMISSIONS = getattr(configuration, 'REMOTE_AUTH_DEFAULT_PERMISSIONS', {})
REMOTE_AUTH_ENABLED = getattr(configuration, 'REMOTE_AUTH_ENABLED', False)
REMOTE_AUTH_HEADER = getattr(configuration, 'REMOTE_AUTH_HEADER', 'HTTP_REMOTE_USER')
RELEASE_CHECK_URL = getattr(configuration, 'RELEASE_CHECK_URL', None)
RELEASE_CHECK_TIMEOUT = getattr(configuration, 'RELEASE_CHECK_TIMEOUT', 24 * 3600)
REPORTS_ROOT = getattr(configuration, 'REPORTS_ROOT', os.path.join(BASE_DIR, 'reports')).rstrip('/')
SCRIPTS_ROOT = getattr(configuration, 'SCRIPTS_ROOT', os.path.join(BASE_DIR, 'scripts')).rstrip('/')
SESSION_FILE_PATH = getattr(configuration, 'SESSION_FILE_PATH', None)
SHORT_DATE_FORMAT = getattr(configuration, 'SHORT_DATE_FORMAT', 'Y-m-d')
SHORT_DATETIME_FORMAT = getattr(configuration, 'SHORT_DATETIME_FORMAT', 'Y-m-d H:i')
SHORT_TIME_FORMAT = getattr(configuration, 'SHORT_TIME_FORMAT', 'H:i:s')
TIME_FORMAT = getattr(configuration, 'TIME_FORMAT', 'g:i a')
TIME_ZONE = getattr(configuration, 'TIME_ZONE', 'UTC')
# Validate update repo URL and timeout
if RELEASE_CHECK_URL:
try:
URLValidator(RELEASE_CHECK_URL)
except ValidationError:
raise ImproperlyConfigured(
"RELEASE_CHECK_URL must be a valid API URL. Example: "
"https://api.github.com/repos/netbox-community/netbox"
)
# Enforce a minimum cache timeout for update checks
if RELEASE_CHECK_TIMEOUT < 3600:
raise ImproperlyConfigured("RELEASE_CHECK_TIMEOUT has to be at least 3600 seconds (1 hour)")
# TODO: Remove in v2.10
# Backward compatibility for REMOTE_AUTH_DEFAULT_PERMISSIONS
if type(REMOTE_AUTH_DEFAULT_PERMISSIONS) is not dict:
try:
REMOTE_AUTH_DEFAULT_PERMISSIONS = {perm: None for perm in REMOTE_AUTH_DEFAULT_PERMISSIONS}
warnings.warn(
"REMOTE_AUTH_DEFAULT_PERMISSIONS should be a dictionary. Backward compatibility will be removed in v2.10."
)
except TypeError:
raise ImproperlyConfigured("REMOTE_AUTH_DEFAULT_PERMISSIONS must be a dictionary.")
# Backward compatibility for REMOTE_AUTH_BACKEND
if REMOTE_AUTH_BACKEND == 'utilities.auth_backends.RemoteUserBackend':
warnings.warn(
"RemoteUserBackend has moved! Please update your configuration to:\n"
" REMOTE_AUTH_BACKEND='netbox.authentication.RemoteUserBackend'"
)
REMOTE_AUTH_BACKEND = 'netbox.authentication.RemoteUserBackend'
#
# Database
#
# Only PostgreSQL is supported
if METRICS_ENABLED:
DATABASE.update({
'ENGINE': 'django_prometheus.db.backends.postgresql'
})
else:
DATABASE.update({
'ENGINE': 'django.db.backends.postgresql'
})
DATABASES = {
'default': DATABASE,
}
#
# Media storage
#
if STORAGE_BACKEND is not None:
DEFAULT_FILE_STORAGE = STORAGE_BACKEND
# django-storages
if STORAGE_BACKEND.startswith('storages.'):
try:
import storages.utils
except ImportError:
raise ImproperlyConfigured(
"STORAGE_BACKEND is set to {} but django-storages is not present. It can be installed by running 'pip "
"install django-storages'.".format(STORAGE_BACKEND)
)
# Monkey-patch django-storages to fetch settings from STORAGE_CONFIG
def _setting(name, default=None):
if name in STORAGE_CONFIG:
return STORAGE_CONFIG[name]
return globals().get(name, default)
storages.utils.setting = _setting
if STORAGE_CONFIG and STORAGE_BACKEND is None:
warnings.warn(
"STORAGE_CONFIG has been set in configuration.py but STORAGE_BACKEND is not defined. STORAGE_CONFIG will be "
"ignored."
)
#
# Redis
#
# Background task queuing
if 'tasks' not in REDIS:
raise ImproperlyConfigured(
"REDIS section in configuration.py is missing the 'tasks' subsection."
)
TASKS_REDIS = REDIS['tasks']
TASKS_REDIS_HOST = TASKS_REDIS.get('HOST', 'localhost')
TASKS_REDIS_PORT = TASKS_REDIS.get('PORT', 6379)
TASKS_REDIS_SENTINELS = TASKS_REDIS.get('SENTINELS', [])
TASKS_REDIS_USING_SENTINEL = all([
isinstance(TASKS_REDIS_SENTINELS, (list, tuple)),
len(TASKS_REDIS_SENTINELS) > 0
])
TASKS_REDIS_SENTINEL_SERVICE = TASKS_REDIS.get('SENTINEL_SERVICE', 'default')
TASKS_REDIS_PASSWORD = TASKS_REDIS.get('PASSWORD', '')
TASKS_REDIS_DATABASE = TASKS_REDIS.get('DATABASE', 0)
TASKS_REDIS_DEFAULT_TIMEOUT = TASKS_REDIS.get('DEFAULT_TIMEOUT', 300)
TASKS_REDIS_SSL = TASKS_REDIS.get('SSL', False)
# Caching
if 'caching' not in REDIS:
raise ImproperlyConfigured(
"REDIS section in configuration.py is missing caching subsection."
)
CACHING_REDIS = REDIS['caching']
CACHING_REDIS_HOST = CACHING_REDIS.get('HOST', 'localhost')
CACHING_REDIS_PORT = CACHING_REDIS.get('PORT', 6379)
CACHING_REDIS_SENTINELS = CACHING_REDIS.get('SENTINELS', [])
CACHING_REDIS_USING_SENTINEL = all([
isinstance(CACHING_REDIS_SENTINELS, (list, tuple)),
len(CACHING_REDIS_SENTINELS) > 0
])
CACHING_REDIS_SENTINEL_SERVICE = CACHING_REDIS.get('SENTINEL_SERVICE', 'default')
CACHING_REDIS_PASSWORD = CACHING_REDIS.get('PASSWORD', '')
CACHING_REDIS_DATABASE = CACHING_REDIS.get('DATABASE', 0)
CACHING_REDIS_DEFAULT_TIMEOUT = CACHING_REDIS.get('DEFAULT_TIMEOUT', 300)
CACHING_REDIS_SSL = CACHING_REDIS.get('SSL', False)
#
# Sessions
#
if LOGIN_TIMEOUT is not None:
# Django default is 1209600 seconds (14 days)
SESSION_COOKIE_AGE = LOGIN_TIMEOUT
if SESSION_FILE_PATH is not None:
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
#
# Email
#
EMAIL_HOST = EMAIL.get('SERVER')
EMAIL_HOST_USER = EMAIL.get('USERNAME')
EMAIL_HOST_PASSWORD = EMAIL.get('PASSWORD')
EMAIL_PORT = EMAIL.get('PORT', 25)
EMAIL_SSL_CERTFILE = EMAIL.get('SSL_CERTFILE')
EMAIL_SSL_KEYFILE = EMAIL.get('SSL_KEYFILE')
EMAIL_SUBJECT_PREFIX = '[NetBox] '
EMAIL_USE_SSL = EMAIL.get('USE_SSL', False)
EMAIL_USE_TLS = EMAIL.get('USE_TLS', False)
EMAIL_TIMEOUT = EMAIL.get('TIMEOUT', 10)
SERVER_EMAIL = EMAIL.get('FROM_EMAIL')
#
# Django
#
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'cacheops',
'corsheaders',
'debug_toolbar',
'django_filters',
'django_tables2',
'django_prometheus',
'mptt',
'rest_framework',
'taggit',
'timezone_field',
'circuits',
'dcim',
'ipam',
'extras',
'secrets',
'tenancy',
'users',
'utilities',
'virtualization',
'django_rq', # Must come after extras to allow overriding management commands
'drf_yasg',
]
# Middleware
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'utilities.middleware.ExceptionHandlingMiddleware',
'utilities.middleware.RemoteUserMiddleware',
'utilities.middleware.LoginRequiredMiddleware',
'utilities.middleware.APIVersionMiddleware',
'extras.middleware.ObjectChangeMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware',
]
ROOT_URLCONF = 'netbox.urls'
TEMPLATES_DIR = BASE_DIR + '/templates'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'utilities.context_processors.settings_and_registry',
],
},
},
]
# Set up authentication backends
AUTHENTICATION_BACKENDS = [
REMOTE_AUTH_BACKEND,
'netbox.authentication.ObjectPermissionBackend',
]
# Internationalization
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_TZ = True
# WSGI
WSGI_APPLICATION = 'netbox.wsgi.application'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
USE_X_FORWARDED_HOST = True
X_FRAME_OPTIONS = 'SAMEORIGIN'
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = BASE_DIR + '/static'
STATIC_URL = '/{}static/'.format(BASE_PATH)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "project-static"),
)
# Media
MEDIA_URL = '/{}media/'.format(BASE_PATH)
# Disable default limit of 1000 fields per request. Needed for bulk deletion of objects. (Added in Django 1.10.)
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
# Messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
# Authentication URLs
LOGIN_URL = '/{}login/'.format(BASE_PATH)
CSRF_TRUSTED_ORIGINS = ALLOWED_HOSTS
# Exclude potentially sensitive models from wildcard view exemption. These may still be exempted
# by specifying the model individually in the EXEMPT_VIEW_PERMISSIONS configuration parameter.
EXEMPT_EXCLUDE_MODELS = (
('auth', 'group'),
('auth', 'user'),
('users', 'objectpermission'),
)
#
# Caching
#
if CACHING_REDIS_USING_SENTINEL:
CACHEOPS_SENTINEL = {
'locations': CACHING_REDIS_SENTINELS,
'service_name': CACHING_REDIS_SENTINEL_SERVICE,
'db': CACHING_REDIS_DATABASE,
}
else:
if CACHING_REDIS_SSL:
REDIS_CACHE_CON_STRING = 'rediss://'
else:
REDIS_CACHE_CON_STRING = 'redis://'
if CACHING_REDIS_PASSWORD:
REDIS_CACHE_CON_STRING = '{}:{}@'.format(REDIS_CACHE_CON_STRING, CACHING_REDIS_PASSWORD)
REDIS_CACHE_CON_STRING = '{}{}:{}/{}'.format(
REDIS_CACHE_CON_STRING,
CACHING_REDIS_HOST,
CACHING_REDIS_PORT,
CACHING_REDIS_DATABASE
)
CACHEOPS_REDIS = REDIS_CACHE_CON_STRING
if not CACHE_TIMEOUT:
CACHEOPS_ENABLED = False
else:
CACHEOPS_ENABLED = True
CACHEOPS_DEFAULTS = {
'timeout': CACHE_TIMEOUT
}
CACHEOPS = {
'auth.user': {'ops': 'get', 'timeout': 60 * 15},
'auth.*': {'ops': ('fetch', 'get')},
'auth.permission': {'ops': 'all'},
'circuits.*': {'ops': 'all'},
'dcim.region': None, # MPTT models are exempt due to raw sql
'dcim.rackgroup': None, # MPTT models are exempt due to raw sql
'dcim.*': {'ops': 'all'},
'ipam.*': {'ops': 'all'},
'extras.*': {'ops': 'all'},
'secrets.*': {'ops': 'all'},
'users.*': {'ops': 'all'},
'tenancy.tenantgroup': None, # MPTT models are exempt due to raw sql
'tenancy.*': {'ops': 'all'},
'virtualization.*': {'ops': 'all'},
}
CACHEOPS_DEGRADE_ON_FAILURE = True
#
# Django Prometheus
#
PROMETHEUS_EXPORT_MIGRATIONS = False
#
# Django filters
#
FILTERS_NULL_CHOICE_LABEL = 'None'
FILTERS_NULL_CHOICE_VALUE = 'null'
#
# Django REST framework (API)
#
REST_FRAMEWORK_VERSION = VERSION.rsplit('.', 1)[0] # Use major.minor as API version
REST_FRAMEWORK = {
'ALLOWED_VERSIONS': [REST_FRAMEWORK_VERSION],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'netbox.api.TokenAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
'DEFAULT_PAGINATION_CLASS': 'netbox.api.OptionalLimitOffsetPagination',
'DEFAULT_PERMISSION_CLASSES': (
'netbox.api.TokenPermissions',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'netbox.api.FormlessBrowsableAPIRenderer',
),
'DEFAULT_VERSION': REST_FRAMEWORK_VERSION,
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.AcceptHeaderVersioning',
'PAGE_SIZE': PAGINATE_COUNT,
'VIEW_NAME_FUNCTION': 'netbox.api.get_view_name',
}
#
# drf_yasg (OpenAPI/Swagger)
#
SWAGGER_SETTINGS = {
'DEFAULT_AUTO_SCHEMA_CLASS': 'utilities.custom_inspectors.NetBoxSwaggerAutoSchema',
'DEFAULT_FIELD_INSPECTORS': [
'utilities.custom_inspectors.JSONFieldInspector',
'utilities.custom_inspectors.NullableBooleanFieldInspector',
'utilities.custom_inspectors.CustomChoiceFieldInspector',
'utilities.custom_inspectors.SerializedPKRelatedFieldInspector',
'drf_yasg.inspectors.CamelCaseJSONFilter',
'drf_yasg.inspectors.ReferencingSerializerInspector',
'drf_yasg.inspectors.RelatedFieldInspector',
'drf_yasg.inspectors.ChoiceFieldInspector',
'drf_yasg.inspectors.FileFieldInspector',
'drf_yasg.inspectors.DictFieldInspector',
'drf_yasg.inspectors.SerializerMethodFieldInspector',
'drf_yasg.inspectors.SimpleFieldInspector',
'drf_yasg.inspectors.StringDefaultFieldInspector',
],
'DEFAULT_FILTER_INSPECTORS': [
'drf_yasg.inspectors.CoreAPICompatInspector',
],
'DEFAULT_INFO': 'netbox.urls.openapi_info',
'DEFAULT_MODEL_DEPTH': 1,
'DEFAULT_PAGINATOR_INSPECTORS': [
'utilities.custom_inspectors.NullablePaginatorInspector',
'drf_yasg.inspectors.DjangoRestResponsePagination',
'drf_yasg.inspectors.CoreAPICompatInspector',
],
'SECURITY_DEFINITIONS': {
'Bearer': {
'type': 'apiKey',
'name': 'Authorization',
'in': 'header',
}
},
'VALIDATOR_URL': None,
}
#
# Django RQ (Webhooks backend)
#
if TASKS_REDIS_USING_SENTINEL:
RQ_PARAMS = {
'SENTINELS': TASKS_REDIS_SENTINELS,
'MASTER_NAME': TASKS_REDIS_SENTINEL_SERVICE,
'DB': TASKS_REDIS_DATABASE,
'PASSWORD': TASKS_REDIS_PASSWORD,
'SOCKET_TIMEOUT': None,
'CONNECTION_KWARGS': {
'socket_connect_timeout': TASKS_REDIS_DEFAULT_TIMEOUT
},
}
else:
RQ_PARAMS = {
'HOST': TASKS_REDIS_HOST,
'PORT': TASKS_REDIS_PORT,
'DB': TASKS_REDIS_DATABASE,
'PASSWORD': TASKS_REDIS_PASSWORD,
'DEFAULT_TIMEOUT': TASKS_REDIS_DEFAULT_TIMEOUT,
'SSL': TASKS_REDIS_SSL,
}
RQ_QUEUES = {
'default': RQ_PARAMS, # Webhooks
'check_releases': RQ_PARAMS,
}
#
# NetBox internal settings
#
# Secrets
SECRETS_MIN_PUBKEY_SIZE = 2048
# Pagination
PER_PAGE_DEFAULTS = [
25, 50, 100, 250, 500, 1000
]
if PAGINATE_COUNT not in PER_PAGE_DEFAULTS:
PER_PAGE_DEFAULTS.append(PAGINATE_COUNT)
PER_PAGE_DEFAULTS = sorted(PER_PAGE_DEFAULTS)
#
# Plugins
#
for plugin_name in PLUGINS:
# Import plugin module
try:
plugin = importlib.import_module(plugin_name)
except ImportError:
raise ImproperlyConfigured(
"Unable to import plugin {}: Module not found. Check that the plugin module has been installed within the "
"correct Python environment.".format(plugin_name)
)
# Determine plugin config and add to INSTALLED_APPS.
try:
plugin_config = plugin.config
INSTALLED_APPS.append("{}.{}".format(plugin_config.__module__, plugin_config.__name__))
except AttributeError:
raise ImproperlyConfigured(
"Plugin {} does not provide a 'config' variable. This should be defined in the plugin's __init__.py file "
"and point to the PluginConfig subclass.".format(plugin_name)
)
# Validate user-provided configuration settings and assign defaults
if plugin_name not in PLUGINS_CONFIG:
PLUGINS_CONFIG[plugin_name] = {}
plugin_config.validate(PLUGINS_CONFIG[plugin_name])
# Add middleware
plugin_middleware = plugin_config.middleware
if plugin_middleware and type(plugin_middleware) in (list, tuple):
MIDDLEWARE.extend(plugin_middleware)
# Apply cacheops config
if type(plugin_config.caching_config) is not dict:
raise ImproperlyConfigured(
"Plugin {} caching_config must be a dictionary.".format(plugin_name)
)
CACHEOPS.update({
"{}.{}".format(plugin_name, key): value for key, value in plugin_config.caching_config.items()
})
| <filename>netbox/netbox/settings.py
import importlib
import logging
import os
import platform
import re
import socket
import warnings
from urllib.parse import urlsplit
from django.contrib.messages import constants as messages
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.validators import URLValidator
#
# Environment setup
#
VERSION = '2.9.4-dev'
# Hostname
HOSTNAME = platform.node()
# Set the base directory two levels up
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Validate Python version
if platform.python_version_tuple() < ('3', '6'):
raise RuntimeError(
"NetBox requires Python 3.6 or higher (current: Python {})".format(platform.python_version())
)
#
# Configuration import
#
# Import configuration parameters
try:
from netbox import configuration
except ImportError:
raise ImproperlyConfigured(
"Configuration file is not present. Please define netbox/netbox/configuration.py per the documentation."
)
# Enforce required configuration parameters
for parameter in ['ALLOWED_HOSTS', 'DATABASE', 'SECRET_KEY', 'REDIS']:
if not hasattr(configuration, parameter):
raise ImproperlyConfigured(
"Required parameter {} is missing from configuration.py.".format(parameter)
)
# Set required parameters
ALLOWED_HOSTS = getattr(configuration, 'ALLOWED_HOSTS')
DATABASE = getattr(configuration, 'DATABASE')
REDIS = getattr(configuration, 'REDIS')
SECRET_KEY = getattr(configuration, 'SECRET_KEY')
# Set optional parameters
ADMINS = getattr(configuration, 'ADMINS', [])
ALLOWED_URL_SCHEMES = getattr(configuration, 'ALLOWED_URL_SCHEMES', (
'file', 'ftp', 'ftps', 'http', 'https', 'irc', 'mailto', 'sftp', 'ssh', 'tel', 'telnet', 'tftp', 'vnc', 'xmpp',
))
BANNER_BOTTOM = getattr(configuration, 'BANNER_BOTTOM', '')
BANNER_LOGIN = getattr(configuration, 'BANNER_LOGIN', '')
BANNER_TOP = getattr(configuration, 'BANNER_TOP', '')
BASE_PATH = getattr(configuration, 'BASE_PATH', '')
if BASE_PATH:
BASE_PATH = BASE_PATH.strip('/') + '/' # Enforce trailing slash only
CACHE_TIMEOUT = getattr(configuration, 'CACHE_TIMEOUT', 900)
CHANGELOG_RETENTION = getattr(configuration, 'CHANGELOG_RETENTION', 90)
CORS_ORIGIN_ALLOW_ALL = getattr(configuration, 'CORS_ORIGIN_ALLOW_ALL', False)
CORS_ORIGIN_REGEX_WHITELIST = getattr(configuration, 'CORS_ORIGIN_REGEX_WHITELIST', [])
CORS_ORIGIN_WHITELIST = getattr(configuration, 'CORS_ORIGIN_WHITELIST', [])
DATE_FORMAT = getattr(configuration, 'DATE_FORMAT', 'N j, Y')
DATETIME_FORMAT = getattr(configuration, 'DATETIME_FORMAT', 'N j, Y g:i a')
DEBUG = getattr(configuration, 'DEBUG', False)
DEVELOPER = getattr(configuration, 'DEVELOPER', False)
DOCS_ROOT = getattr(configuration, 'DOCS_ROOT', os.path.join(os.path.dirname(BASE_DIR), 'docs'))
EMAIL = getattr(configuration, 'EMAIL', {})
ENFORCE_GLOBAL_UNIQUE = getattr(configuration, 'ENFORCE_GLOBAL_UNIQUE', False)
EXEMPT_VIEW_PERMISSIONS = getattr(configuration, 'EXEMPT_VIEW_PERMISSIONS', [])
HTTP_PROXIES = getattr(configuration, 'HTTP_PROXIES', None)
INTERNAL_IPS = getattr(configuration, 'INTERNAL_IPS', ('127.0.0.1', '::1'))
LOGGING = getattr(configuration, 'LOGGING', {})
LOGIN_REQUIRED = getattr(configuration, 'LOGIN_REQUIRED', False)
LOGIN_TIMEOUT = getattr(configuration, 'LOGIN_TIMEOUT', None)
MAINTENANCE_MODE = getattr(configuration, 'MAINTENANCE_MODE', False)
MAX_PAGE_SIZE = getattr(configuration, 'MAX_PAGE_SIZE', 1000)
MEDIA_ROOT = getattr(configuration, 'MEDIA_ROOT', os.path.join(BASE_DIR, 'media')).rstrip('/')
STORAGE_BACKEND = getattr(configuration, 'STORAGE_BACKEND', None)
STORAGE_CONFIG = getattr(configuration, 'STORAGE_CONFIG', {})
METRICS_ENABLED = getattr(configuration, 'METRICS_ENABLED', False)
NAPALM_ARGS = getattr(configuration, 'NAPALM_ARGS', {})
NAPALM_PASSWORD = getattr(configuration, 'NAPALM_PASSWORD', '')
NAPALM_TIMEOUT = getattr(configuration, 'NAPALM_TIMEOUT', 30)
NAPALM_USERNAME = getattr(configuration, 'NAPALM_USERNAME', '')
PAGINATE_COUNT = getattr(configuration, 'PAGINATE_COUNT', 50)
PLUGINS = getattr(configuration, 'PLUGINS', [])
PLUGINS_CONFIG = getattr(configuration, 'PLUGINS_CONFIG', {})
PREFER_IPV4 = getattr(configuration, 'PREFER_IPV4', False)
RACK_ELEVATION_DEFAULT_UNIT_HEIGHT = getattr(configuration, 'RACK_ELEVATION_DEFAULT_UNIT_HEIGHT', 22)
RACK_ELEVATION_DEFAULT_UNIT_WIDTH = getattr(configuration, 'RACK_ELEVATION_DEFAULT_UNIT_WIDTH', 220)
REMOTE_AUTH_AUTO_CREATE_USER = getattr(configuration, 'REMOTE_AUTH_AUTO_CREATE_USER', False)
REMOTE_AUTH_BACKEND = getattr(configuration, 'REMOTE_AUTH_BACKEND', 'netbox.authentication.RemoteUserBackend')
REMOTE_AUTH_DEFAULT_GROUPS = getattr(configuration, 'REMOTE_AUTH_DEFAULT_GROUPS', [])
REMOTE_AUTH_DEFAULT_PERMISSIONS = getattr(configuration, 'REMOTE_AUTH_DEFAULT_PERMISSIONS', {})
REMOTE_AUTH_ENABLED = getattr(configuration, 'REMOTE_AUTH_ENABLED', False)
REMOTE_AUTH_HEADER = getattr(configuration, 'REMOTE_AUTH_HEADER', 'HTTP_REMOTE_USER')
RELEASE_CHECK_URL = getattr(configuration, 'RELEASE_CHECK_URL', None)
RELEASE_CHECK_TIMEOUT = getattr(configuration, 'RELEASE_CHECK_TIMEOUT', 24 * 3600)
REPORTS_ROOT = getattr(configuration, 'REPORTS_ROOT', os.path.join(BASE_DIR, 'reports')).rstrip('/')
SCRIPTS_ROOT = getattr(configuration, 'SCRIPTS_ROOT', os.path.join(BASE_DIR, 'scripts')).rstrip('/')
SESSION_FILE_PATH = getattr(configuration, 'SESSION_FILE_PATH', None)
SHORT_DATE_FORMAT = getattr(configuration, 'SHORT_DATE_FORMAT', 'Y-m-d')
SHORT_DATETIME_FORMAT = getattr(configuration, 'SHORT_DATETIME_FORMAT', 'Y-m-d H:i')
SHORT_TIME_FORMAT = getattr(configuration, 'SHORT_TIME_FORMAT', 'H:i:s')
TIME_FORMAT = getattr(configuration, 'TIME_FORMAT', 'g:i a')
TIME_ZONE = getattr(configuration, 'TIME_ZONE', 'UTC')
# Validate update repo URL and timeout
if RELEASE_CHECK_URL:
try:
URLValidator(RELEASE_CHECK_URL)
except ValidationError:
raise ImproperlyConfigured(
"RELEASE_CHECK_URL must be a valid API URL. Example: "
"https://api.github.com/repos/netbox-community/netbox"
)
# Enforce a minimum cache timeout for update checks
if RELEASE_CHECK_TIMEOUT < 3600:
raise ImproperlyConfigured("RELEASE_CHECK_TIMEOUT has to be at least 3600 seconds (1 hour)")
# TODO: Remove in v2.10
# Backward compatibility for REMOTE_AUTH_DEFAULT_PERMISSIONS
if type(REMOTE_AUTH_DEFAULT_PERMISSIONS) is not dict:
try:
REMOTE_AUTH_DEFAULT_PERMISSIONS = {perm: None for perm in REMOTE_AUTH_DEFAULT_PERMISSIONS}
warnings.warn(
"REMOTE_AUTH_DEFAULT_PERMISSIONS should be a dictionary. Backward compatibility will be removed in v2.10."
)
except TypeError:
raise ImproperlyConfigured("REMOTE_AUTH_DEFAULT_PERMISSIONS must be a dictionary.")
# Backward compatibility for REMOTE_AUTH_BACKEND
if REMOTE_AUTH_BACKEND == 'utilities.auth_backends.RemoteUserBackend':
warnings.warn(
"RemoteUserBackend has moved! Please update your configuration to:\n"
" REMOTE_AUTH_BACKEND='netbox.authentication.RemoteUserBackend'"
)
REMOTE_AUTH_BACKEND = 'netbox.authentication.RemoteUserBackend'
#
# Database
#
# Only PostgreSQL is supported
if METRICS_ENABLED:
DATABASE.update({
'ENGINE': 'django_prometheus.db.backends.postgresql'
})
else:
DATABASE.update({
'ENGINE': 'django.db.backends.postgresql'
})
DATABASES = {
'default': DATABASE,
}
#
# Media storage
#
if STORAGE_BACKEND is not None:
DEFAULT_FILE_STORAGE = STORAGE_BACKEND
# django-storages
if STORAGE_BACKEND.startswith('storages.'):
try:
import storages.utils
except ImportError:
raise ImproperlyConfigured(
"STORAGE_BACKEND is set to {} but django-storages is not present. It can be installed by running 'pip "
"install django-storages'.".format(STORAGE_BACKEND)
)
# Monkey-patch django-storages to fetch settings from STORAGE_CONFIG
def _setting(name, default=None):
if name in STORAGE_CONFIG:
return STORAGE_CONFIG[name]
return globals().get(name, default)
storages.utils.setting = _setting
if STORAGE_CONFIG and STORAGE_BACKEND is None:
warnings.warn(
"STORAGE_CONFIG has been set in configuration.py but STORAGE_BACKEND is not defined. STORAGE_CONFIG will be "
"ignored."
)
#
# Redis
#
# Background task queuing
if 'tasks' not in REDIS:
raise ImproperlyConfigured(
"REDIS section in configuration.py is missing the 'tasks' subsection."
)
TASKS_REDIS = REDIS['tasks']
TASKS_REDIS_HOST = TASKS_REDIS.get('HOST', 'localhost')
TASKS_REDIS_PORT = TASKS_REDIS.get('PORT', 6379)
TASKS_REDIS_SENTINELS = TASKS_REDIS.get('SENTINELS', [])
TASKS_REDIS_USING_SENTINEL = all([
isinstance(TASKS_REDIS_SENTINELS, (list, tuple)),
len(TASKS_REDIS_SENTINELS) > 0
])
TASKS_REDIS_SENTINEL_SERVICE = TASKS_REDIS.get('SENTINEL_SERVICE', 'default')
TASKS_REDIS_PASSWORD = TASKS_REDIS.get('PASSWORD', '')
TASKS_REDIS_DATABASE = TASKS_REDIS.get('DATABASE', 0)
TASKS_REDIS_DEFAULT_TIMEOUT = TASKS_REDIS.get('DEFAULT_TIMEOUT', 300)
TASKS_REDIS_SSL = TASKS_REDIS.get('SSL', False)
# Caching
if 'caching' not in REDIS:
raise ImproperlyConfigured(
"REDIS section in configuration.py is missing caching subsection."
)
CACHING_REDIS = REDIS['caching']
CACHING_REDIS_HOST = CACHING_REDIS.get('HOST', 'localhost')
CACHING_REDIS_PORT = CACHING_REDIS.get('PORT', 6379)
CACHING_REDIS_SENTINELS = CACHING_REDIS.get('SENTINELS', [])
CACHING_REDIS_USING_SENTINEL = all([
isinstance(CACHING_REDIS_SENTINELS, (list, tuple)),
len(CACHING_REDIS_SENTINELS) > 0
])
CACHING_REDIS_SENTINEL_SERVICE = CACHING_REDIS.get('SENTINEL_SERVICE', 'default')
CACHING_REDIS_PASSWORD = CACHING_REDIS.get('PASSWORD', '')
CACHING_REDIS_DATABASE = CACHING_REDIS.get('DATABASE', 0)
CACHING_REDIS_DEFAULT_TIMEOUT = CACHING_REDIS.get('DEFAULT_TIMEOUT', 300)
CACHING_REDIS_SSL = CACHING_REDIS.get('SSL', False)
#
# Sessions
#
if LOGIN_TIMEOUT is not None:
# Django default is 1209600 seconds (14 days)
SESSION_COOKIE_AGE = LOGIN_TIMEOUT
if SESSION_FILE_PATH is not None:
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
#
# Email
#
EMAIL_HOST = EMAIL.get('SERVER')
EMAIL_HOST_USER = EMAIL.get('USERNAME')
EMAIL_HOST_PASSWORD = EMAIL.get('PASSWORD')
EMAIL_PORT = EMAIL.get('PORT', 25)
EMAIL_SSL_CERTFILE = EMAIL.get('SSL_CERTFILE')
EMAIL_SSL_KEYFILE = EMAIL.get('SSL_KEYFILE')
EMAIL_SUBJECT_PREFIX = '[NetBox] '
EMAIL_USE_SSL = EMAIL.get('USE_SSL', False)
EMAIL_USE_TLS = EMAIL.get('USE_TLS', False)
EMAIL_TIMEOUT = EMAIL.get('TIMEOUT', 10)
SERVER_EMAIL = EMAIL.get('FROM_EMAIL')
#
# Django
#
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'cacheops',
'corsheaders',
'debug_toolbar',
'django_filters',
'django_tables2',
'django_prometheus',
'mptt',
'rest_framework',
'taggit',
'timezone_field',
'circuits',
'dcim',
'ipam',
'extras',
'secrets',
'tenancy',
'users',
'utilities',
'virtualization',
'django_rq', # Must come after extras to allow overriding management commands
'drf_yasg',
]
# Middleware
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'utilities.middleware.ExceptionHandlingMiddleware',
'utilities.middleware.RemoteUserMiddleware',
'utilities.middleware.LoginRequiredMiddleware',
'utilities.middleware.APIVersionMiddleware',
'extras.middleware.ObjectChangeMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware',
]
ROOT_URLCONF = 'netbox.urls'
TEMPLATES_DIR = BASE_DIR + '/templates'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'utilities.context_processors.settings_and_registry',
],
},
},
]
# Set up authentication backends
AUTHENTICATION_BACKENDS = [
REMOTE_AUTH_BACKEND,
'netbox.authentication.ObjectPermissionBackend',
]
# Internationalization
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_TZ = True
# WSGI
WSGI_APPLICATION = 'netbox.wsgi.application'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
USE_X_FORWARDED_HOST = True
X_FRAME_OPTIONS = 'SAMEORIGIN'
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = BASE_DIR + '/static'
STATIC_URL = '/{}static/'.format(BASE_PATH)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "project-static"),
)
# Media
MEDIA_URL = '/{}media/'.format(BASE_PATH)
# Disable default limit of 1000 fields per request. Needed for bulk deletion of objects. (Added in Django 1.10.)
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
# Messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
# Authentication URLs
LOGIN_URL = '/{}login/'.format(BASE_PATH)
CSRF_TRUSTED_ORIGINS = ALLOWED_HOSTS
# Exclude potentially sensitive models from wildcard view exemption. These may still be exempted
# by specifying the model individually in the EXEMPT_VIEW_PERMISSIONS configuration parameter.
EXEMPT_EXCLUDE_MODELS = (
('auth', 'group'),
('auth', 'user'),
('users', 'objectpermission'),
)
#
# Caching
#
if CACHING_REDIS_USING_SENTINEL:
CACHEOPS_SENTINEL = {
'locations': CACHING_REDIS_SENTINELS,
'service_name': CACHING_REDIS_SENTINEL_SERVICE,
'db': CACHING_REDIS_DATABASE,
}
else:
if CACHING_REDIS_SSL:
REDIS_CACHE_CON_STRING = 'rediss://'
else:
REDIS_CACHE_CON_STRING = 'redis://'
if CACHING_REDIS_PASSWORD:
REDIS_CACHE_CON_STRING = '{}:{}@'.format(REDIS_CACHE_CON_STRING, CACHING_REDIS_PASSWORD)
REDIS_CACHE_CON_STRING = '{}{}:{}/{}'.format(
REDIS_CACHE_CON_STRING,
CACHING_REDIS_HOST,
CACHING_REDIS_PORT,
CACHING_REDIS_DATABASE
)
CACHEOPS_REDIS = REDIS_CACHE_CON_STRING
if not CACHE_TIMEOUT:
CACHEOPS_ENABLED = False
else:
CACHEOPS_ENABLED = True
CACHEOPS_DEFAULTS = {
'timeout': CACHE_TIMEOUT
}
CACHEOPS = {
'auth.user': {'ops': 'get', 'timeout': 60 * 15},
'auth.*': {'ops': ('fetch', 'get')},
'auth.permission': {'ops': 'all'},
'circuits.*': {'ops': 'all'},
'dcim.region': None, # MPTT models are exempt due to raw sql
'dcim.rackgroup': None, # MPTT models are exempt due to raw sql
'dcim.*': {'ops': 'all'},
'ipam.*': {'ops': 'all'},
'extras.*': {'ops': 'all'},
'secrets.*': {'ops': 'all'},
'users.*': {'ops': 'all'},
'tenancy.tenantgroup': None, # MPTT models are exempt due to raw sql
'tenancy.*': {'ops': 'all'},
'virtualization.*': {'ops': 'all'},
}
CACHEOPS_DEGRADE_ON_FAILURE = True
#
# Django Prometheus
#
PROMETHEUS_EXPORT_MIGRATIONS = False
#
# Django filters
#
FILTERS_NULL_CHOICE_LABEL = 'None'
FILTERS_NULL_CHOICE_VALUE = 'null'
#
# Django REST framework (API)
#
REST_FRAMEWORK_VERSION = VERSION.rsplit('.', 1)[0] # Use major.minor as API version
REST_FRAMEWORK = {
'ALLOWED_VERSIONS': [REST_FRAMEWORK_VERSION],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'netbox.api.TokenAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
'DEFAULT_PAGINATION_CLASS': 'netbox.api.OptionalLimitOffsetPagination',
'DEFAULT_PERMISSION_CLASSES': (
'netbox.api.TokenPermissions',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'netbox.api.FormlessBrowsableAPIRenderer',
),
'DEFAULT_VERSION': REST_FRAMEWORK_VERSION,
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.AcceptHeaderVersioning',
'PAGE_SIZE': PAGINATE_COUNT,
'VIEW_NAME_FUNCTION': 'netbox.api.get_view_name',
}
#
# drf_yasg (OpenAPI/Swagger)
#
SWAGGER_SETTINGS = {
'DEFAULT_AUTO_SCHEMA_CLASS': 'utilities.custom_inspectors.NetBoxSwaggerAutoSchema',
'DEFAULT_FIELD_INSPECTORS': [
'utilities.custom_inspectors.JSONFieldInspector',
'utilities.custom_inspectors.NullableBooleanFieldInspector',
'utilities.custom_inspectors.CustomChoiceFieldInspector',
'utilities.custom_inspectors.SerializedPKRelatedFieldInspector',
'drf_yasg.inspectors.CamelCaseJSONFilter',
'drf_yasg.inspectors.ReferencingSerializerInspector',
'drf_yasg.inspectors.RelatedFieldInspector',
'drf_yasg.inspectors.ChoiceFieldInspector',
'drf_yasg.inspectors.FileFieldInspector',
'drf_yasg.inspectors.DictFieldInspector',
'drf_yasg.inspectors.SerializerMethodFieldInspector',
'drf_yasg.inspectors.SimpleFieldInspector',
'drf_yasg.inspectors.StringDefaultFieldInspector',
],
'DEFAULT_FILTER_INSPECTORS': [
'drf_yasg.inspectors.CoreAPICompatInspector',
],
'DEFAULT_INFO': 'netbox.urls.openapi_info',
'DEFAULT_MODEL_DEPTH': 1,
'DEFAULT_PAGINATOR_INSPECTORS': [
'utilities.custom_inspectors.NullablePaginatorInspector',
'drf_yasg.inspectors.DjangoRestResponsePagination',
'drf_yasg.inspectors.CoreAPICompatInspector',
],
'SECURITY_DEFINITIONS': {
'Bearer': {
'type': 'apiKey',
'name': 'Authorization',
'in': 'header',
}
},
'VALIDATOR_URL': None,
}
#
# Django RQ (Webhooks backend)
#
if TASKS_REDIS_USING_SENTINEL:
RQ_PARAMS = {
'SENTINELS': TASKS_REDIS_SENTINELS,
'MASTER_NAME': TASKS_REDIS_SENTINEL_SERVICE,
'DB': TASKS_REDIS_DATABASE,
'PASSWORD': TASKS_REDIS_PASSWORD,
'SOCKET_TIMEOUT': None,
'CONNECTION_KWARGS': {
'socket_connect_timeout': TASKS_REDIS_DEFAULT_TIMEOUT
},
}
else:
RQ_PARAMS = {
'HOST': TASKS_REDIS_HOST,
'PORT': TASKS_REDIS_PORT,
'DB': TASKS_REDIS_DATABASE,
'PASSWORD': TASKS_REDIS_PASSWORD,
'DEFAULT_TIMEOUT': TASKS_REDIS_DEFAULT_TIMEOUT,
'SSL': TASKS_REDIS_SSL,
}
RQ_QUEUES = {
'default': RQ_PARAMS, # Webhooks
'check_releases': RQ_PARAMS,
}
#
# NetBox internal settings
#
# Secrets
SECRETS_MIN_PUBKEY_SIZE = 2048
# Pagination
PER_PAGE_DEFAULTS = [
25, 50, 100, 250, 500, 1000
]
if PAGINATE_COUNT not in PER_PAGE_DEFAULTS:
PER_PAGE_DEFAULTS.append(PAGINATE_COUNT)
PER_PAGE_DEFAULTS = sorted(PER_PAGE_DEFAULTS)
#
# Plugins
#
for plugin_name in PLUGINS:
# Import plugin module
try:
plugin = importlib.import_module(plugin_name)
except ImportError:
raise ImproperlyConfigured(
"Unable to import plugin {}: Module not found. Check that the plugin module has been installed within the "
"correct Python environment.".format(plugin_name)
)
# Determine plugin config and add to INSTALLED_APPS.
try:
plugin_config = plugin.config
INSTALLED_APPS.append("{}.{}".format(plugin_config.__module__, plugin_config.__name__))
except AttributeError:
raise ImproperlyConfigured(
"Plugin {} does not provide a 'config' variable. This should be defined in the plugin's __init__.py file "
"and point to the PluginConfig subclass.".format(plugin_name)
)
# Validate user-provided configuration settings and assign defaults
if plugin_name not in PLUGINS_CONFIG:
PLUGINS_CONFIG[plugin_name] = {}
plugin_config.validate(PLUGINS_CONFIG[plugin_name])
# Add middleware
plugin_middleware = plugin_config.middleware
if plugin_middleware and type(plugin_middleware) in (list, tuple):
MIDDLEWARE.extend(plugin_middleware)
# Apply cacheops config
if type(plugin_config.caching_config) is not dict:
raise ImproperlyConfigured(
"Plugin {} caching_config must be a dictionary.".format(plugin_name)
)
CACHEOPS.update({
"{}.{}".format(plugin_name, key): value for key, value in plugin_config.caching_config.items()
})
| en | 0.571954 | # # Environment setup # # Hostname # Set the base directory two levels up # Validate Python version # # Configuration import # # Import configuration parameters # Enforce required configuration parameters # Set required parameters # Set optional parameters # Enforce trailing slash only # Validate update repo URL and timeout # Enforce a minimum cache timeout for update checks # TODO: Remove in v2.10 # Backward compatibility for REMOTE_AUTH_DEFAULT_PERMISSIONS # Backward compatibility for REMOTE_AUTH_BACKEND # # Database # # Only PostgreSQL is supported # # Media storage # # django-storages # Monkey-patch django-storages to fetch settings from STORAGE_CONFIG # # Redis # # Background task queuing # Caching # # Sessions # # Django default is 1209600 seconds (14 days) # # Email # # # Django # # Must come after extras to allow overriding management commands # Middleware # Set up authentication backends # Internationalization # WSGI # Static files (CSS, JavaScript, Images) # Media # Disable default limit of 1000 fields per request. Needed for bulk deletion of objects. (Added in Django 1.10.) # Messages # Authentication URLs # Exclude potentially sensitive models from wildcard view exemption. These may still be exempted # by specifying the model individually in the EXEMPT_VIEW_PERMISSIONS configuration parameter. # # Caching # # MPTT models are exempt due to raw sql # MPTT models are exempt due to raw sql # MPTT models are exempt due to raw sql # # Django Prometheus # # # Django filters # # # Django REST framework (API) # # Use major.minor as API version # # drf_yasg (OpenAPI/Swagger) # # # Django RQ (Webhooks backend) # # Webhooks # # NetBox internal settings # # Secrets # Pagination # # Plugins # # Import plugin module # Determine plugin config and add to INSTALLED_APPS. # Validate user-provided configuration settings and assign defaults # Add middleware # Apply cacheops config | 2.27069 | 2 |
models_vqa/vis.py | jalonzou/snmn | 71 | 6624750 | <filename>models_vqa/vis.py
import matplotlib; matplotlib.use('Agg') # NOQA
import os
import json
import skimage.io
import skimage.transform
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Arrow
from .config import cfg
from util import boxes
def vis_one_vqa(img_path, words, vqa_scores, label, module_names, answers,
txt_att, att_stack, stack_ptr, module_prob, save_path):
img = skimage.io.imread(img_path)
h = plt.figure(figsize=(20, 20))
T = cfg.MODEL.T_CTRL
# img
plt.subplot(5, 3, 1)
plt.imshow(img)
plt.title(
'\n'.join([' '.join(words[b:b+10]) for b in range(0, len(words), 10)]))
# module weights
plt.subplot(5, 3, 2)
plt.imshow(module_prob.T, cmap='Reds')
plt.colorbar()
plt.xticks(range(T), range(T))
plt.yticks(range(len(module_names)), module_names, size='small')
plt.title('module weights at controller timestep')
# textual attention
plt.subplot(5, 3, 3)
# print(np.sum(txt_att, axis=1))
# print(np.sum(txt_att[:, :len(words)], axis=1))
plt.imshow(txt_att[:, :len(words)], cmap='Reds')
plt.colorbar()
plt.xticks(range(len(words)), words, rotation=90)
plt.yticks(range(T), range(T))
plt.ylabel('controller timestep')
plt.title('textual attention at controller timestep')
# scores
plt.subplot(5, 3, 4)
plt.imshow(vqa_scores[np.newaxis, :], cmap='Reds')
plt.xticks(range(len(answers)), answers, rotation=90)
plt.yticks([], [])
plt.xlabel('answer logits')
plt.title('prediction: %s label: %s' % (
answers[np.argmax(vqa_scores)], answers[label]))
plt.subplot(5, 3, 5)
plt.imshow(stack_ptr.T, cmap='Reds')
plt.colorbar()
plt.xticks(range(T), range(T))
plt.yticks(range(stack_ptr.shape[1]), range(stack_ptr.shape[1]))
plt.ylabel('stack depth')
plt.xlabel('stack pointer at controller timestep')
# Visualize the attention stack
# att_stack is T x H x W x L -> L x H x T x W
plt.subplot(5, 3, 6)
T, H, W, L = att_stack.shape
plt.imshow(att_stack.transpose((3, 1, 0, 2)).reshape((L*H, T*W)))
plt.colorbar()
plt.xticks(W // 2 + np.arange(T) * W, range(T))
plt.yticks(np.arange(L) * H, np.arange(L) * H)
plt.ylabel('stack depth')
plt.xlabel('image attention at controller timestep')
# image attention at each timestep
for t in range(T):
plt.subplot(5, 3, t+7)
att = np.sum(att_stack[t] * stack_ptr[t], axis=-1)
img_with_att = attention_interpolation(img, att)
plt.imshow(img_with_att)
plt.xlabel('controller timestep t = %d' % t)
plt.savefig(save_path)
print('visualization saved to ' + save_path)
plt.close(h)
def vis_one_loc(img_path, words, loc_scores, bbox_pred, bbox_gt, module_names,
txt_att, att_stack, stack_ptr, module_prob, save_path):
img = skimage.io.imread(img_path)
h = plt.figure(figsize=(20, 20))
T = cfg.MODEL.T_CTRL
# img
plt.subplot(5, 3, 1)
plt.imshow(img)
_print_bbox(bbox_pred, 'r')
_print_bbox(bbox_gt, 'y')
plt.title(
'\n'.join([' '.join(words[b:b+10]) for b in range(0, len(words), 10)])
+ '\nred: prediction yellow: ground-truth')
# module weights
plt.subplot(5, 3, 2)
plt.imshow(module_prob.T, cmap='Reds')
plt.colorbar()
plt.xticks(range(T), range(T))
plt.yticks(range(len(module_names)), module_names, size='small')
plt.title('module weights at controller timestep')
# textual attention
plt.subplot(5, 3, 3)
# print(np.sum(txt_att, axis=1))
# print(np.sum(txt_att[:, :len(words)], axis=1))
plt.imshow(txt_att[:, :len(words)], cmap='Reds')
plt.colorbar()
plt.xticks(range(len(words)), words, rotation=90)
plt.yticks(range(T), range(T))
plt.ylabel('controller timestep')
plt.title('textual attention at controller timestep')
# scores
plt.subplot(5, 3, 4)
plt.imshow(loc_scores.reshape(cfg.MODEL.H_FEAT, cfg.MODEL.W_FEAT))
plt.colorbar()
plt.title('localization scores')
plt.subplot(5, 3, 5)
plt.imshow(stack_ptr.T, cmap='Reds')
plt.colorbar()
plt.xticks(range(T), range(T))
plt.yticks(range(stack_ptr.shape[1]), range(stack_ptr.shape[1]))
plt.ylabel('stack depth')
plt.xlabel('stack pointer at controller timestep')
# Visualize the attention stack
# att_stack is T x H x W x L -> L x H x T x W
plt.subplot(5, 3, 6)
T, H, W, L = att_stack.shape
plt.imshow(att_stack.transpose((3, 1, 0, 2)).reshape((L*H, T*W)))
plt.colorbar()
plt.xticks(W // 2 + np.arange(T) * W, range(T))
plt.yticks(np.arange(L) * H, np.arange(L) * H)
plt.ylabel('stack depth')
plt.xlabel('image attention at controller timestep')
# image attention at each timestep
for t in range(T):
plt.subplot(5, 3, t+7)
att = np.sum(att_stack[t] * stack_ptr[t], axis=-1)
img_with_att = attention_interpolation(img, att)
plt.imshow(img_with_att)
plt.xlabel('controller timestep t = %d' % t)
plt.savefig(save_path)
print('visualization saved to ' + save_path)
plt.close(h)
def _format_str(s):
words = s.split()
s = '\n'.join([' '.join(words[b:b+8]) for b in range(0, len(words), 8)])
return s
MODULE_DESCRIPTION_TEXT = {
'_NoOp':
'it doesn\'t do anything (i.e. nothing is updated in this timestep).', # NoQA
'_Find':
'it looks at new image regions based on attended text.', # NoQA
'_Transform':
'it shifts the image attention to somewhere new, conditioned on its previous glimpse.', # NoQA
'_Filter':
'it tries to select out some image regions from where it looked before (based on attended text).', # NoQA
'_And':
'it takes the intersection of the program\'s two previous glimpses as inputs, returning their intersection.', # NoQA
'_Or':
'it takes the union of the program\'s two previous glimpses as inputs, returning their union.', # NoQA
'_Scene':
'it tries to look at some objects in the image.', # NoQA
'_DescribeOne':
'it takes the program\'s previous glimpse as input, and tries to infer the answer from it.', # NoQA
'_DescribeTwo':
'it takes the program\'s two previous glimpses as inputs, and tries to infer the answer from them.', # NoQA
}
def _find_txt_segs(keep, words):
segs = []
elems = []
for n, k in enumerate(keep):
if k:
elems.append(words[n])
else:
if elems:
segs.append('"' + ' '.join(elems) + '"')
elems = []
if elems:
segs.append('"' + ' '.join(elems) + '"')
return segs
def _extract_txt_att(words, atts, thresh=0.5):
"""
Take at most 3 words that have at least 50% of the max attention.
"""
atts_sorted = np.sort(atts)[::-1]
att_min = max(atts_sorted[2], atts_sorted[0]*thresh)
# collect those words above att_min
keep = (atts >= att_min)
# assert np.any(keep)
vis_txt = ', '.join(_find_txt_segs(keep, words))
return vis_txt
def vis_one_stepwise(img_path, words, module_names, txt_att, att_stack,
stack_ptr, module_prob, save_path, vis_type,
vqa_scores=None, label=None, answers=None,
loc_scores=None, bbox_pred=None, bbox_gt=None):
T = cfg.MODEL.T_CTRL
# M = len(module_names)
img = skimage.io.imread(img_path)
scale_x = 480. / img.shape[1]
scale_y = 320. / img.shape[0]
img = skimage.transform.resize(img, (320, 480))
h = plt.figure(figsize=(18, (T+2) * 5))
if cfg.TEST.VIS_SHOW_IMG:
# Image and question
plt.subplot((T+2)*2, 3, (3, 6))
plt.imshow(img)
plt.axis('off')
plt.title('\n'.join(
[' '.join(words[b:b+6]) for b in range(0, len(words), 6)]),
fontsize=20)
# Modules at each timestep
m_list = [module_names[np.argmax(module_prob[t])] for t in range(T)]
is_disp = np.ones(T, np.bool)
is_ans = np.zeros(T, np.bool)
if vis_type == 'vqa':
"""
Show the output of the last "_Describe*"
"""
describe_t = -1
for t in range(T-1, -1, -1):
if m_list[t].startswith('_Describe'):
describe_t = t
break
for t in range(T):
is_disp[t] = not (
(m_list[t] == '_NoOp') or
(m_list[t].startswith('_Describe') and t != describe_t))
is_ans[describe_t] = True
else:
for t in range(T):
is_disp[t] = (t == T-1) or not (
(m_list[t] == '_NoOp') or
(m_list[t].startswith('_Describe')))
is_ans[T-1] = True
t_disp = 0
for t in range(T):
if not is_disp[t]:
continue
show_ans = is_ans[t]
m = m_list[t]
if m in {'_Scene', '_NoOp', '_And', '_Or'}:
att_txt = ''
else:
att_txt = _extract_txt_att(words, txt_att[t, :len(words)])
if t == 0 and m == '_Filter':
m_display = 'find'
else:
m_display = m[1:].replace(
'Find', 'look_for').replace(
'Filter', 'select').replace(
'Transform', 'related_by').replace(
'DescribeOne', 'Answer').replace(
'DescribeTwo', 'Compare_Two').replace(
'And', 'Intersect').replace('Or', 'Combine').lower()
if show_ans and vis_type == 'loc' and \
m in {'_NoOp', '_DescribeOne', '_DescribeTwo'}:
m_display = 'bbox_regression'
att_txt = ''
# output attention
if show_ans:
if vis_type == 'vqa':
plt.subplot((T+2)*2, 3, (6*t_disp+9, 6*t_disp+12))
plt.imshow(np.ones(img.shape, np.float32))
plt.axis('off')
if cfg.TEST.VIS_SHOW_ANSWER:
answer_txt = (
'predicted answer: "%s"\ntrue answer: "%s"' % (
answers[np.argmax(vqa_scores)], answers[label]))
else:
answer_txt = '(model prediction not shown)'
plt.text(10, 100, answer_txt, fontsize=20)
elif vis_type == 'loc':
plt.subplot((T+2)*2, 3, (6*t_disp+9, 6*t_disp+12))
plt.imshow(img)
_print_bbox(bbox_gt, 'y', scale_x, scale_y)
if cfg.TEST.VIS_SHOW_ANSWER:
_print_bbox(bbox_pred, 'r', scale_x, scale_y)
IoU = boxes.bbox_iou(bbox_pred, bbox_gt)
txt = 'prediction: red box\nground-truth: yellow box\n' \
'(IoU = %.2f)' % IoU
else:
txt = 'prediction: (not shown)\nground-truth: yellow box'
plt.xticks([], [])
plt.yticks([], [])
plt.xlabel(txt, fontsize=20)
else:
raise ValueError('Unknow vis_type ' + str(vis_type))
else:
plt.subplot((T+2)*2, 3, (6*t_disp+9, 6*t_disp+12))
att = np.sum(att_stack[t] * stack_ptr[t], axis=-1)
img_with_att = attention_interpolation(img, att)
plt.imshow(img_with_att)
plt.xticks([], [])
plt.yticks([], [])
plt.title('%s(%s)\n' % (m_display, att_txt), fontsize=24)
patches = Arrow(
img.shape[1] // 2, -35, 0, 32, width=40, color='k', clip_on=False)
plt.gca().add_patch(patches)
t_disp += 1
plt.savefig(save_path, bbox_inches='tight')
with open(save_path.replace('.png', '') + '.txt', 'w') as f:
question = (' '.join(words)).replace(' ?', '?')
if vis_type == 'vqa':
ans_pred, ans_gt = answers[np.argmax(vqa_scores)], answers[label]
json.dump({'question': question, 'ans_pred': ans_pred,
'ans_gt': ans_gt}, f)
elif vis_type == 'loc':
json.dump({'question': question, 'bbox_pred': list(bbox_pred),
'bbox_gt': list(bbox_gt)}, f)
else:
raise ValueError('Unknow vis_type ' + str(vis_type))
print('visualization saved to ' + save_path)
plt.close(h)
def vis_batch_vqa(model, data_reader, batch, vis_outputs, start_idx,
start_idx_correct, start_idx_incorrect, vis_dir):
module_names = model.nmn.module_names
answers = data_reader.batch_loader.answer_dict.word_list
if cfg.TEST.VIS_SEPARATE_CORRECTNESS:
num_correct = max(cfg.TEST.NUM_VIS_CORRECT-start_idx_correct, 0)
num_incorrect = max(cfg.TEST.NUM_VIS_INCORRECT-start_idx_incorrect, 0)
labels = batch['answer_label_batch']
predictions = np.argmax(vis_outputs['vqa_scores'], axis=1)
is_correct = predictions == labels
inds = (list(np.where(is_correct)[0][:num_correct]) +
list(np.where(~is_correct)[0][:num_incorrect]))
else:
num = min(len(batch['image_path_list']), cfg.TEST.NUM_VIS - start_idx)
inds = range(num)
for n in inds:
img_path = batch['image_path_list'][n]
if cfg.TEST.VIS_SEPARATE_CORRECTNESS:
if is_correct[n]:
save_name = 'correct_%08d_%s.png' % (
start_idx_correct,
os.path.basename(img_path).split('.')[0])
start_idx_correct += 1
else:
save_name = 'incorrect_%08d_%s.png' % (
start_idx_incorrect,
os.path.basename(img_path).split('.')[0])
start_idx_incorrect += 1
else:
save_name = '%08d_%s.png' % (
start_idx, os.path.basename(img_path).split('.')[0])
start_idx += 1
save_path = os.path.join(vis_dir, save_name)
words = [
data_reader.batch_loader.vocab_dict.idx2word(n_w) for n_w in
batch['input_seq_batch'][:batch['seq_length_batch'][n], n]]
vqa_scores = vis_outputs['vqa_scores'][n]
label = batch['answer_label_batch'][n]
txt_att = vis_outputs['txt_att'][n]
att_stack = vis_outputs['att_stack'][n]
stack_ptr = vis_outputs['stack_ptr'][n]
module_prob = vis_outputs['module_prob'][n]
if cfg.TEST.STEPWISE_VIS:
vis_one_stepwise(img_path, words, module_names, txt_att, att_stack,
stack_ptr, module_prob, save_path, vis_type='vqa',
vqa_scores=vqa_scores, label=label,
answers=answers)
else:
vis_one_vqa(img_path, words, vqa_scores, label, module_names,
answers, txt_att, att_stack, stack_ptr, module_prob,
save_path)
def vis_batch_loc(model, data_reader, batch, vis_outputs, start_idx,
start_idx_correct, start_idx_incorrect, vis_dir):
module_names = model.nmn.module_names
iou_th = cfg.TEST.BBOX_IOU_THRESH
if cfg.TEST.VIS_SEPARATE_CORRECTNESS:
num_correct = max(cfg.TEST.NUM_VIS_CORRECT-start_idx_correct, 0)
num_incorrect = max(cfg.TEST.NUM_VIS_INCORRECT-start_idx_incorrect, 0)
bbox_pred = boxes.batch_feat_grid2bbox(
np.argmax(vis_outputs['loc_scores'], axis=1),
vis_outputs['bbox_offset'],
data_reader.batch_loader.stride_H,
data_reader.batch_loader.stride_W,
data_reader.batch_loader.feat_H, data_reader.batch_loader.feat_W)
bbox_gt = batch['bbox_batch']
is_correct = boxes.batch_bbox_iou(bbox_pred, bbox_gt) >= iou_th
inds = (list(np.where(is_correct)[0][:num_correct]) +
list(np.where(~is_correct)[0][:num_incorrect]))
else:
num = min(len(batch['image_path_list']), cfg.TEST.NUM_VIS - start_idx)
inds = range(num)
for n in inds:
img_path = batch['image_path_list'][n]
if cfg.TEST.VIS_SEPARATE_CORRECTNESS:
if is_correct[n]:
save_name = 'correct_%08d_%s.png' % (
start_idx_correct,
os.path.basename(img_path).split('.')[0])
start_idx_correct += 1
else:
save_name = 'incorrect_%08d_%s.png' % (
start_idx_incorrect,
os.path.basename(img_path).split('.')[0])
start_idx_incorrect += 1
else:
save_name = '%08d_%s.png' % (
start_idx, os.path.basename(img_path).split('.')[0])
start_idx += 1
save_path = os.path.join(vis_dir, save_name)
words = [
data_reader.batch_loader.vocab_dict.idx2word(n_w) for n_w in
batch['input_seq_batch'][:batch['seq_length_batch'][n], n]]
loc_scores = vis_outputs['loc_scores'][n]
bbox_offset = vis_outputs['bbox_offset'][n]
bbox_pred = boxes.feat_grid2bbox(
np.argmax(loc_scores), bbox_offset,
data_reader.batch_loader.stride_H,
data_reader.batch_loader.stride_W, data_reader.batch_loader.feat_H,
data_reader.batch_loader.feat_W)
bbox_gt = boxes.feat_grid2bbox(
batch['bbox_ind_batch'][n], batch['bbox_offset_batch'][n],
data_reader.batch_loader.stride_H,
data_reader.batch_loader.stride_W, data_reader.batch_loader.feat_H,
data_reader.batch_loader.feat_W)
# bbox_gt = batch['bbox_batch'][n]
txt_att = vis_outputs['txt_att'][n]
att_stack = vis_outputs['att_stack'][n]
stack_ptr = vis_outputs['stack_ptr'][n]
module_prob = vis_outputs['module_prob'][n]
if cfg.TEST.STEPWISE_VIS:
vis_one_stepwise(img_path, words, module_names, txt_att, att_stack,
stack_ptr, module_prob, save_path, vis_type='loc',
loc_scores=loc_scores, bbox_pred=bbox_pred,
bbox_gt=bbox_gt)
else:
vis_one_loc(
img_path, words, loc_scores, bbox_pred, bbox_gt, module_names,
txt_att, att_stack, stack_ptr, module_prob, save_path)
def _print_bbox(bbox, color='r', scale_x=1., scale_y=1.):
x1, y1, h, w = bbox
x2 = x1 + w - 1
y2 = y1 + h - 1
x1 *= scale_x
y1 *= scale_y
x2 *= scale_x
y2 *= scale_y
plt.plot([x1, x2, x2, x1, x1], [y1, y1, y2, y2, y1], color)
def _att_softmax(att):
exps = np.exp(att - np.max(att))
softmax = exps / np.sum(exps)
return softmax
def attention_interpolation(im, att):
softmax = _att_softmax(att)
att_reshaped = skimage.transform.resize(softmax, im.shape[:2], order=3)
# normalize the attention
# make sure the 255 alpha channel is at least 3x uniform attention
att_reshaped /= np.maximum(np.max(att_reshaped), 3. / att.size)
att_reshaped = att_reshaped[..., np.newaxis]
# make the attention area brighter than the rest of the area
vis_im = att_reshaped * im + (1-att_reshaped) * im * .45
vis_im = vis_im.astype(im.dtype)
return vis_im
def _move_ptr_bw(stack_ptr):
new_stack_ptr = np.zeros_like(stack_ptr)
new_stack_ptr[:-1] = stack_ptr[1:]
if cfg.MODEL.NMN.STACK.GUARD_STACK_PTR:
stack_bottom_mask = np.zeros_like(stack_ptr)
stack_bottom_mask[0] = 1.
new_stack_ptr += stack_bottom_mask * stack_ptr
return new_stack_ptr
def _read_two_from_stack(att_stack, stack_ptr):
att_2 = np.sum(att_stack * stack_ptr, axis=-1)
att_1 = np.sum(att_stack * _move_ptr_bw(stack_ptr), axis=-1)
return att_1, att_2
| <filename>models_vqa/vis.py
import matplotlib; matplotlib.use('Agg') # NOQA
import os
import json
import skimage.io
import skimage.transform
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Arrow
from .config import cfg
from util import boxes
def vis_one_vqa(img_path, words, vqa_scores, label, module_names, answers,
txt_att, att_stack, stack_ptr, module_prob, save_path):
img = skimage.io.imread(img_path)
h = plt.figure(figsize=(20, 20))
T = cfg.MODEL.T_CTRL
# img
plt.subplot(5, 3, 1)
plt.imshow(img)
plt.title(
'\n'.join([' '.join(words[b:b+10]) for b in range(0, len(words), 10)]))
# module weights
plt.subplot(5, 3, 2)
plt.imshow(module_prob.T, cmap='Reds')
plt.colorbar()
plt.xticks(range(T), range(T))
plt.yticks(range(len(module_names)), module_names, size='small')
plt.title('module weights at controller timestep')
# textual attention
plt.subplot(5, 3, 3)
# print(np.sum(txt_att, axis=1))
# print(np.sum(txt_att[:, :len(words)], axis=1))
plt.imshow(txt_att[:, :len(words)], cmap='Reds')
plt.colorbar()
plt.xticks(range(len(words)), words, rotation=90)
plt.yticks(range(T), range(T))
plt.ylabel('controller timestep')
plt.title('textual attention at controller timestep')
# scores
plt.subplot(5, 3, 4)
plt.imshow(vqa_scores[np.newaxis, :], cmap='Reds')
plt.xticks(range(len(answers)), answers, rotation=90)
plt.yticks([], [])
plt.xlabel('answer logits')
plt.title('prediction: %s label: %s' % (
answers[np.argmax(vqa_scores)], answers[label]))
plt.subplot(5, 3, 5)
plt.imshow(stack_ptr.T, cmap='Reds')
plt.colorbar()
plt.xticks(range(T), range(T))
plt.yticks(range(stack_ptr.shape[1]), range(stack_ptr.shape[1]))
plt.ylabel('stack depth')
plt.xlabel('stack pointer at controller timestep')
# Visualize the attention stack
# att_stack is T x H x W x L -> L x H x T x W
plt.subplot(5, 3, 6)
T, H, W, L = att_stack.shape
plt.imshow(att_stack.transpose((3, 1, 0, 2)).reshape((L*H, T*W)))
plt.colorbar()
plt.xticks(W // 2 + np.arange(T) * W, range(T))
plt.yticks(np.arange(L) * H, np.arange(L) * H)
plt.ylabel('stack depth')
plt.xlabel('image attention at controller timestep')
# image attention at each timestep
for t in range(T):
plt.subplot(5, 3, t+7)
att = np.sum(att_stack[t] * stack_ptr[t], axis=-1)
img_with_att = attention_interpolation(img, att)
plt.imshow(img_with_att)
plt.xlabel('controller timestep t = %d' % t)
plt.savefig(save_path)
print('visualization saved to ' + save_path)
plt.close(h)
def vis_one_loc(img_path, words, loc_scores, bbox_pred, bbox_gt, module_names,
txt_att, att_stack, stack_ptr, module_prob, save_path):
img = skimage.io.imread(img_path)
h = plt.figure(figsize=(20, 20))
T = cfg.MODEL.T_CTRL
# img
plt.subplot(5, 3, 1)
plt.imshow(img)
_print_bbox(bbox_pred, 'r')
_print_bbox(bbox_gt, 'y')
plt.title(
'\n'.join([' '.join(words[b:b+10]) for b in range(0, len(words), 10)])
+ '\nred: prediction yellow: ground-truth')
# module weights
plt.subplot(5, 3, 2)
plt.imshow(module_prob.T, cmap='Reds')
plt.colorbar()
plt.xticks(range(T), range(T))
plt.yticks(range(len(module_names)), module_names, size='small')
plt.title('module weights at controller timestep')
# textual attention
plt.subplot(5, 3, 3)
# print(np.sum(txt_att, axis=1))
# print(np.sum(txt_att[:, :len(words)], axis=1))
plt.imshow(txt_att[:, :len(words)], cmap='Reds')
plt.colorbar()
plt.xticks(range(len(words)), words, rotation=90)
plt.yticks(range(T), range(T))
plt.ylabel('controller timestep')
plt.title('textual attention at controller timestep')
# scores
plt.subplot(5, 3, 4)
plt.imshow(loc_scores.reshape(cfg.MODEL.H_FEAT, cfg.MODEL.W_FEAT))
plt.colorbar()
plt.title('localization scores')
plt.subplot(5, 3, 5)
plt.imshow(stack_ptr.T, cmap='Reds')
plt.colorbar()
plt.xticks(range(T), range(T))
plt.yticks(range(stack_ptr.shape[1]), range(stack_ptr.shape[1]))
plt.ylabel('stack depth')
plt.xlabel('stack pointer at controller timestep')
# Visualize the attention stack
# att_stack is T x H x W x L -> L x H x T x W
plt.subplot(5, 3, 6)
T, H, W, L = att_stack.shape
plt.imshow(att_stack.transpose((3, 1, 0, 2)).reshape((L*H, T*W)))
plt.colorbar()
plt.xticks(W // 2 + np.arange(T) * W, range(T))
plt.yticks(np.arange(L) * H, np.arange(L) * H)
plt.ylabel('stack depth')
plt.xlabel('image attention at controller timestep')
# image attention at each timestep
for t in range(T):
plt.subplot(5, 3, t+7)
att = np.sum(att_stack[t] * stack_ptr[t], axis=-1)
img_with_att = attention_interpolation(img, att)
plt.imshow(img_with_att)
plt.xlabel('controller timestep t = %d' % t)
plt.savefig(save_path)
print('visualization saved to ' + save_path)
plt.close(h)
def _format_str(s):
words = s.split()
s = '\n'.join([' '.join(words[b:b+8]) for b in range(0, len(words), 8)])
return s
MODULE_DESCRIPTION_TEXT = {
'_NoOp':
'it doesn\'t do anything (i.e. nothing is updated in this timestep).', # NoQA
'_Find':
'it looks at new image regions based on attended text.', # NoQA
'_Transform':
'it shifts the image attention to somewhere new, conditioned on its previous glimpse.', # NoQA
'_Filter':
'it tries to select out some image regions from where it looked before (based on attended text).', # NoQA
'_And':
'it takes the intersection of the program\'s two previous glimpses as inputs, returning their intersection.', # NoQA
'_Or':
'it takes the union of the program\'s two previous glimpses as inputs, returning their union.', # NoQA
'_Scene':
'it tries to look at some objects in the image.', # NoQA
'_DescribeOne':
'it takes the program\'s previous glimpse as input, and tries to infer the answer from it.', # NoQA
'_DescribeTwo':
'it takes the program\'s two previous glimpses as inputs, and tries to infer the answer from them.', # NoQA
}
def _find_txt_segs(keep, words):
segs = []
elems = []
for n, k in enumerate(keep):
if k:
elems.append(words[n])
else:
if elems:
segs.append('"' + ' '.join(elems) + '"')
elems = []
if elems:
segs.append('"' + ' '.join(elems) + '"')
return segs
def _extract_txt_att(words, atts, thresh=0.5):
"""
Take at most 3 words that have at least 50% of the max attention.
"""
atts_sorted = np.sort(atts)[::-1]
att_min = max(atts_sorted[2], atts_sorted[0]*thresh)
# collect those words above att_min
keep = (atts >= att_min)
# assert np.any(keep)
vis_txt = ', '.join(_find_txt_segs(keep, words))
return vis_txt
def vis_one_stepwise(img_path, words, module_names, txt_att, att_stack,
stack_ptr, module_prob, save_path, vis_type,
vqa_scores=None, label=None, answers=None,
loc_scores=None, bbox_pred=None, bbox_gt=None):
T = cfg.MODEL.T_CTRL
# M = len(module_names)
img = skimage.io.imread(img_path)
scale_x = 480. / img.shape[1]
scale_y = 320. / img.shape[0]
img = skimage.transform.resize(img, (320, 480))
h = plt.figure(figsize=(18, (T+2) * 5))
if cfg.TEST.VIS_SHOW_IMG:
# Image and question
plt.subplot((T+2)*2, 3, (3, 6))
plt.imshow(img)
plt.axis('off')
plt.title('\n'.join(
[' '.join(words[b:b+6]) for b in range(0, len(words), 6)]),
fontsize=20)
# Modules at each timestep
m_list = [module_names[np.argmax(module_prob[t])] for t in range(T)]
is_disp = np.ones(T, np.bool)
is_ans = np.zeros(T, np.bool)
if vis_type == 'vqa':
"""
Show the output of the last "_Describe*"
"""
describe_t = -1
for t in range(T-1, -1, -1):
if m_list[t].startswith('_Describe'):
describe_t = t
break
for t in range(T):
is_disp[t] = not (
(m_list[t] == '_NoOp') or
(m_list[t].startswith('_Describe') and t != describe_t))
is_ans[describe_t] = True
else:
for t in range(T):
is_disp[t] = (t == T-1) or not (
(m_list[t] == '_NoOp') or
(m_list[t].startswith('_Describe')))
is_ans[T-1] = True
t_disp = 0
for t in range(T):
if not is_disp[t]:
continue
show_ans = is_ans[t]
m = m_list[t]
if m in {'_Scene', '_NoOp', '_And', '_Or'}:
att_txt = ''
else:
att_txt = _extract_txt_att(words, txt_att[t, :len(words)])
if t == 0 and m == '_Filter':
m_display = 'find'
else:
m_display = m[1:].replace(
'Find', 'look_for').replace(
'Filter', 'select').replace(
'Transform', 'related_by').replace(
'DescribeOne', 'Answer').replace(
'DescribeTwo', 'Compare_Two').replace(
'And', 'Intersect').replace('Or', 'Combine').lower()
if show_ans and vis_type == 'loc' and \
m in {'_NoOp', '_DescribeOne', '_DescribeTwo'}:
m_display = 'bbox_regression'
att_txt = ''
# output attention
if show_ans:
if vis_type == 'vqa':
plt.subplot((T+2)*2, 3, (6*t_disp+9, 6*t_disp+12))
plt.imshow(np.ones(img.shape, np.float32))
plt.axis('off')
if cfg.TEST.VIS_SHOW_ANSWER:
answer_txt = (
'predicted answer: "%s"\ntrue answer: "%s"' % (
answers[np.argmax(vqa_scores)], answers[label]))
else:
answer_txt = '(model prediction not shown)'
plt.text(10, 100, answer_txt, fontsize=20)
elif vis_type == 'loc':
plt.subplot((T+2)*2, 3, (6*t_disp+9, 6*t_disp+12))
plt.imshow(img)
_print_bbox(bbox_gt, 'y', scale_x, scale_y)
if cfg.TEST.VIS_SHOW_ANSWER:
_print_bbox(bbox_pred, 'r', scale_x, scale_y)
IoU = boxes.bbox_iou(bbox_pred, bbox_gt)
txt = 'prediction: red box\nground-truth: yellow box\n' \
'(IoU = %.2f)' % IoU
else:
txt = 'prediction: (not shown)\nground-truth: yellow box'
plt.xticks([], [])
plt.yticks([], [])
plt.xlabel(txt, fontsize=20)
else:
raise ValueError('Unknow vis_type ' + str(vis_type))
else:
plt.subplot((T+2)*2, 3, (6*t_disp+9, 6*t_disp+12))
att = np.sum(att_stack[t] * stack_ptr[t], axis=-1)
img_with_att = attention_interpolation(img, att)
plt.imshow(img_with_att)
plt.xticks([], [])
plt.yticks([], [])
plt.title('%s(%s)\n' % (m_display, att_txt), fontsize=24)
patches = Arrow(
img.shape[1] // 2, -35, 0, 32, width=40, color='k', clip_on=False)
plt.gca().add_patch(patches)
t_disp += 1
plt.savefig(save_path, bbox_inches='tight')
with open(save_path.replace('.png', '') + '.txt', 'w') as f:
question = (' '.join(words)).replace(' ?', '?')
if vis_type == 'vqa':
ans_pred, ans_gt = answers[np.argmax(vqa_scores)], answers[label]
json.dump({'question': question, 'ans_pred': ans_pred,
'ans_gt': ans_gt}, f)
elif vis_type == 'loc':
json.dump({'question': question, 'bbox_pred': list(bbox_pred),
'bbox_gt': list(bbox_gt)}, f)
else:
raise ValueError('Unknow vis_type ' + str(vis_type))
print('visualization saved to ' + save_path)
plt.close(h)
def vis_batch_vqa(model, data_reader, batch, vis_outputs, start_idx,
start_idx_correct, start_idx_incorrect, vis_dir):
module_names = model.nmn.module_names
answers = data_reader.batch_loader.answer_dict.word_list
if cfg.TEST.VIS_SEPARATE_CORRECTNESS:
num_correct = max(cfg.TEST.NUM_VIS_CORRECT-start_idx_correct, 0)
num_incorrect = max(cfg.TEST.NUM_VIS_INCORRECT-start_idx_incorrect, 0)
labels = batch['answer_label_batch']
predictions = np.argmax(vis_outputs['vqa_scores'], axis=1)
is_correct = predictions == labels
inds = (list(np.where(is_correct)[0][:num_correct]) +
list(np.where(~is_correct)[0][:num_incorrect]))
else:
num = min(len(batch['image_path_list']), cfg.TEST.NUM_VIS - start_idx)
inds = range(num)
for n in inds:
img_path = batch['image_path_list'][n]
if cfg.TEST.VIS_SEPARATE_CORRECTNESS:
if is_correct[n]:
save_name = 'correct_%08d_%s.png' % (
start_idx_correct,
os.path.basename(img_path).split('.')[0])
start_idx_correct += 1
else:
save_name = 'incorrect_%08d_%s.png' % (
start_idx_incorrect,
os.path.basename(img_path).split('.')[0])
start_idx_incorrect += 1
else:
save_name = '%08d_%s.png' % (
start_idx, os.path.basename(img_path).split('.')[0])
start_idx += 1
save_path = os.path.join(vis_dir, save_name)
words = [
data_reader.batch_loader.vocab_dict.idx2word(n_w) for n_w in
batch['input_seq_batch'][:batch['seq_length_batch'][n], n]]
vqa_scores = vis_outputs['vqa_scores'][n]
label = batch['answer_label_batch'][n]
txt_att = vis_outputs['txt_att'][n]
att_stack = vis_outputs['att_stack'][n]
stack_ptr = vis_outputs['stack_ptr'][n]
module_prob = vis_outputs['module_prob'][n]
if cfg.TEST.STEPWISE_VIS:
vis_one_stepwise(img_path, words, module_names, txt_att, att_stack,
stack_ptr, module_prob, save_path, vis_type='vqa',
vqa_scores=vqa_scores, label=label,
answers=answers)
else:
vis_one_vqa(img_path, words, vqa_scores, label, module_names,
answers, txt_att, att_stack, stack_ptr, module_prob,
save_path)
def vis_batch_loc(model, data_reader, batch, vis_outputs, start_idx,
start_idx_correct, start_idx_incorrect, vis_dir):
module_names = model.nmn.module_names
iou_th = cfg.TEST.BBOX_IOU_THRESH
if cfg.TEST.VIS_SEPARATE_CORRECTNESS:
num_correct = max(cfg.TEST.NUM_VIS_CORRECT-start_idx_correct, 0)
num_incorrect = max(cfg.TEST.NUM_VIS_INCORRECT-start_idx_incorrect, 0)
bbox_pred = boxes.batch_feat_grid2bbox(
np.argmax(vis_outputs['loc_scores'], axis=1),
vis_outputs['bbox_offset'],
data_reader.batch_loader.stride_H,
data_reader.batch_loader.stride_W,
data_reader.batch_loader.feat_H, data_reader.batch_loader.feat_W)
bbox_gt = batch['bbox_batch']
is_correct = boxes.batch_bbox_iou(bbox_pred, bbox_gt) >= iou_th
inds = (list(np.where(is_correct)[0][:num_correct]) +
list(np.where(~is_correct)[0][:num_incorrect]))
else:
num = min(len(batch['image_path_list']), cfg.TEST.NUM_VIS - start_idx)
inds = range(num)
for n in inds:
img_path = batch['image_path_list'][n]
if cfg.TEST.VIS_SEPARATE_CORRECTNESS:
if is_correct[n]:
save_name = 'correct_%08d_%s.png' % (
start_idx_correct,
os.path.basename(img_path).split('.')[0])
start_idx_correct += 1
else:
save_name = 'incorrect_%08d_%s.png' % (
start_idx_incorrect,
os.path.basename(img_path).split('.')[0])
start_idx_incorrect += 1
else:
save_name = '%08d_%s.png' % (
start_idx, os.path.basename(img_path).split('.')[0])
start_idx += 1
save_path = os.path.join(vis_dir, save_name)
words = [
data_reader.batch_loader.vocab_dict.idx2word(n_w) for n_w in
batch['input_seq_batch'][:batch['seq_length_batch'][n], n]]
loc_scores = vis_outputs['loc_scores'][n]
bbox_offset = vis_outputs['bbox_offset'][n]
bbox_pred = boxes.feat_grid2bbox(
np.argmax(loc_scores), bbox_offset,
data_reader.batch_loader.stride_H,
data_reader.batch_loader.stride_W, data_reader.batch_loader.feat_H,
data_reader.batch_loader.feat_W)
bbox_gt = boxes.feat_grid2bbox(
batch['bbox_ind_batch'][n], batch['bbox_offset_batch'][n],
data_reader.batch_loader.stride_H,
data_reader.batch_loader.stride_W, data_reader.batch_loader.feat_H,
data_reader.batch_loader.feat_W)
# bbox_gt = batch['bbox_batch'][n]
txt_att = vis_outputs['txt_att'][n]
att_stack = vis_outputs['att_stack'][n]
stack_ptr = vis_outputs['stack_ptr'][n]
module_prob = vis_outputs['module_prob'][n]
if cfg.TEST.STEPWISE_VIS:
vis_one_stepwise(img_path, words, module_names, txt_att, att_stack,
stack_ptr, module_prob, save_path, vis_type='loc',
loc_scores=loc_scores, bbox_pred=bbox_pred,
bbox_gt=bbox_gt)
else:
vis_one_loc(
img_path, words, loc_scores, bbox_pred, bbox_gt, module_names,
txt_att, att_stack, stack_ptr, module_prob, save_path)
def _print_bbox(bbox, color='r', scale_x=1., scale_y=1.):
x1, y1, h, w = bbox
x2 = x1 + w - 1
y2 = y1 + h - 1
x1 *= scale_x
y1 *= scale_y
x2 *= scale_x
y2 *= scale_y
plt.plot([x1, x2, x2, x1, x1], [y1, y1, y2, y2, y1], color)
def _att_softmax(att):
exps = np.exp(att - np.max(att))
softmax = exps / np.sum(exps)
return softmax
def attention_interpolation(im, att):
softmax = _att_softmax(att)
att_reshaped = skimage.transform.resize(softmax, im.shape[:2], order=3)
# normalize the attention
# make sure the 255 alpha channel is at least 3x uniform attention
att_reshaped /= np.maximum(np.max(att_reshaped), 3. / att.size)
att_reshaped = att_reshaped[..., np.newaxis]
# make the attention area brighter than the rest of the area
vis_im = att_reshaped * im + (1-att_reshaped) * im * .45
vis_im = vis_im.astype(im.dtype)
return vis_im
def _move_ptr_bw(stack_ptr):
new_stack_ptr = np.zeros_like(stack_ptr)
new_stack_ptr[:-1] = stack_ptr[1:]
if cfg.MODEL.NMN.STACK.GUARD_STACK_PTR:
stack_bottom_mask = np.zeros_like(stack_ptr)
stack_bottom_mask[0] = 1.
new_stack_ptr += stack_bottom_mask * stack_ptr
return new_stack_ptr
def _read_two_from_stack(att_stack, stack_ptr):
att_2 = np.sum(att_stack * stack_ptr, axis=-1)
att_1 = np.sum(att_stack * _move_ptr_bw(stack_ptr), axis=-1)
return att_1, att_2
| en | 0.739477 | # NOQA # img # module weights # textual attention # print(np.sum(txt_att, axis=1)) # print(np.sum(txt_att[:, :len(words)], axis=1)) # scores # Visualize the attention stack # att_stack is T x H x W x L -> L x H x T x W # image attention at each timestep # img # module weights # textual attention # print(np.sum(txt_att, axis=1)) # print(np.sum(txt_att[:, :len(words)], axis=1)) # scores # Visualize the attention stack # att_stack is T x H x W x L -> L x H x T x W # image attention at each timestep # NoQA # NoQA # NoQA # NoQA # NoQA # NoQA # NoQA # NoQA # NoQA Take at most 3 words that have at least 50% of the max attention. # collect those words above att_min # assert np.any(keep) # M = len(module_names) # Image and question # Modules at each timestep Show the output of the last "_Describe*" # output attention # bbox_gt = batch['bbox_batch'][n] # normalize the attention # make sure the 255 alpha channel is at least 3x uniform attention # make the attention area brighter than the rest of the area | 2.121954 | 2 |