hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0fd8510182cf81512b0f5b6e68cf53699718c2
| 383
|
py
|
Python
|
apps/dataupload/admin.py
|
Sunbird-Ed/evolve-api
|
371b39422839762e32401340456c13858cb8e1e9
|
[
"MIT"
] | 1
|
2019-02-27T15:26:11.000Z
|
2019-02-27T15:26:11.000Z
|
apps/dataupload/admin.py
|
Sunbird-Ed/evolve-api
|
371b39422839762e32401340456c13858cb8e1e9
|
[
"MIT"
] | 9
|
2019-12-16T10:09:46.000Z
|
2022-03-11T23:42:12.000Z
|
apps/dataupload/admin.py
|
Sunbird-Ed/evolve-api
|
371b39422839762e32401340456c13858cb8e1e9
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from . models import Chapter, Section, SubSection,ChapterKeyword,SectionKeyword,SubSectionKeyword,SubSubSection
admin.site.register(Chapter)
admin.site.register(Section)
admin.site.register(SubSection)
admin.site.register(ChapterKeyword)
admin.site.register(SectionKeyword)
admin.site.register(SubSectionKeyword)
admin.site.register(SubSubSection)
| 31.916667
| 111
| 0.853786
|
4a0fd8db3cac4fe3b47ff388b2b68793abc42e00
| 1,682
|
py
|
Python
|
crawler/archiveCrawler.py
|
jahanzebk/python-text-classifier
|
577c076f400e140aa2df895aafb177dd796a42d1
|
[
"MIT"
] | null | null | null |
crawler/archiveCrawler.py
|
jahanzebk/python-text-classifier
|
577c076f400e140aa2df895aafb177dd796a42d1
|
[
"MIT"
] | null | null | null |
crawler/archiveCrawler.py
|
jahanzebk/python-text-classifier
|
577c076f400e140aa2df895aafb177dd796a42d1
|
[
"MIT"
] | null | null | null |
# from readability.readability import Document
import urllib2
import re
import time
import os
from bs4 import BeautifulSoup
#NOTE: this is in python 2.7 and is the working version
#This file only needed to run once to crawl the archives for links
def writeToFile(link):
#category = link.split('/')[3] # CATEGORY NAME
#print(category + " : " + link)
#print(os.getcwd())
print(link)
fo = open("links.txt", "a")
fo.write(link + "\n")
def crawlGuardianArchives():
categories = ("news", "sport", "culture", "business", "money", "lifeandstyle", "travel", "environment", "technology", "tv-and-radio")
for i in categories:
#time.sleep(10)
for j in range(2, 10):
if (j < 10):
getGuardianArchivesLinks("http://www.theguardian.com/" + i + "/2014/jul/0" + str(j) + "/all")
else:
getGuardianArchivesLinks("http://www.theguardian.com/" + i + "/2014/jul/" + str(j) + "/all")
def getGuardianArchivesLinks(url):
headers = { 'User-Agent' : 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:29.0) Gecko/20100101 Firefox/29.0' }
req = urllib2.Request(url, '', headers)
html = urllib2.urlopen(req).read()
soup = BeautifulSoup(html)
data = soup.findAll('div',attrs={'class':'trail'});
fo = open("links.txt", "a")
for h3 in data:
links = h3.findAll('a')
for a in links:
if a.parent.name == "h3":
print(a['href'])
fo.write(a['href'] + "\n")
def main():
#os.chdir("../../Corpus")
crawlGuardianArchives() #THEY HAVE BEEN CRAWLED, NOW WORK WITH THE LINKS FILE
main()
| 31.148148
| 137
| 0.583234
|
4a0fd94ee2ebbeb1310384209960a81ad52fb798
| 659
|
py
|
Python
|
libcloudforensics/__init__.py
|
juju4/cloud-forensics-utils
|
7bd4038e906c0e854d5b7b33d087239522ccd246
|
[
"Apache-2.0"
] | 241
|
2020-02-08T20:05:54.000Z
|
2022-03-31T23:51:30.000Z
|
libcloudforensics/__init__.py
|
juju4/cloud-forensics-utils
|
7bd4038e906c0e854d5b7b33d087239522ccd246
|
[
"Apache-2.0"
] | 376
|
2020-02-05T10:22:54.000Z
|
2022-03-30T00:09:18.000Z
|
libcloudforensics/__init__.py
|
juju4/cloud-forensics-utils
|
7bd4038e906c0e854d5b7b33d087239522ccd246
|
[
"Apache-2.0"
] | 72
|
2020-02-06T07:34:30.000Z
|
2022-03-05T18:02:13.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""libcloud forensics module."""
__version__ = '20211109'
| 36.611111
| 74
| 0.743551
|
4a0fd9a56169e4e63e40fb1ee7d517cd69e13aea
| 5,541
|
py
|
Python
|
game/load_node_data.py
|
Muthaias/global-explorer
|
38645a7ced2a1614f8d09b01986f4c0bf9cf2e0d
|
[
"MIT"
] | 1
|
2020-11-18T03:12:51.000Z
|
2020-11-18T03:12:51.000Z
|
game/load_node_data.py
|
Muthaias/global-explorer
|
38645a7ced2a1614f8d09b01986f4c0bf9cf2e0d
|
[
"MIT"
] | null | null | null |
game/load_node_data.py
|
Muthaias/global-explorer
|
38645a7ced2a1614f8d09b01986f4c0bf9cf2e0d
|
[
"MIT"
] | null | null | null |
import yaml
import random
from collections import ChainMap
from .descriptors import NodeDescriptor, ActionDescriptor
from .game import Node, Action
from .actions import (
step_into,
combine_actions,
add_trace,
step_out,
step,
pass_time,
charge_card,
select_by_tags,
add_skill,
transfer,
require_skill,
require_time,
require_funds,
require_some,
require_all,
)
def travel_action_from_entry(entry, node_dict):
id = entry.get("id")
opening_hours = entry.get("opening_hours", None)
return Action(
apply=combine_actions(
add_trace,
step_into([id], node_dict),
),
match=require_time(*opening_hours) if opening_hours else None,
descriptor=action_descriptor_from_entry(entry, "location")
)
def action_from_entry(entry, extra_funcs):
action = entry.get("action", None)
match = entry.get("match", None)
return Action(
apply=(
parse_apply_func(action, extra_funcs)
if action
else None
),
match=(
parse_apply_func(match, extra_funcs)
if match
else None
),
descriptor=action_descriptor_from_entry(entry, "action"),
)
def parse_apply_func(struct, extra_funcs):
if not isinstance(struct, list):
return struct
funcs = ChainMap(
{
"sequence": combine_actions,
"add_trace": lambda: add_trace,
"step_out": lambda: step_out,
"step": lambda: step,
"pass_time": pass_time,
"pass_hours": lambda hours: pass_time(hours * 3600),
"charge_card": charge_card,
"skill": add_skill,
"require_skill": require_skill,
"require_time": require_time,
"require_funds": require_funds,
"all": require_all,
"some": require_some,
"list": lambda *items: [
item
for subitems in items
for item in (
subitems
if isinstance(subitems, list)
else [subitems]
)
],
"rlist": lambda items, count: random.sample(items, k=count)
},
extra_funcs
)
[id, *args] = struct
if id == "lambda":
return lambda: parse_apply_func(args[0], extra_funcs)
else:
parsed_args = [
parse_apply_func(s, extra_funcs)
for s in args
]
func = funcs.get(id)
result = func(*parsed_args)
return result
def action_descriptor_from_entry(entry, type):
return ActionDescriptor(
title=entry.get("title", "Action"),
type=type
)
def node_from_entry(entry, actions, default):
return Node(
descriptor=node_descriptor_from_entry(entry, default),
actions=actions
)
def node_descriptor_from_entry(entry, default):
id = entry.get("id")
e = ChainMap(entry, default)
return NodeDescriptor(
id=id,
title=e.get("title", id),
description=e.get("description", ""),
background=e.get("background", ""),
title_image=e.get("title_image", ""),
position=e.get("position", (0, 0)),
type=e.get("actuator", "hub"),
is_entry_point=e.get("is_entry_point", False),
tags={tag for tag in e.get("tags", [])}
)
def load_nodes_from_entries(location_entries):
default = next((
entry
for entry in iter(location_entries)
if entry.get("is_default", False)
),
{}
)
entry_dict = {
entry["id"]: entry for entry in location_entries if "id" in entry
}
node_dict = {}
travel_actions_dict = {}
back_action = Action(
apply=combine_actions(
add_trace,
step_out
),
descriptor=ActionDescriptor(title="Back")
)
for id, entry in entry_dict.items():
parent_id = entry.get("parent_id", None)
if parent_id:
actions = travel_actions_dict.get(parent_id, None)
travel_actions_dict[parent_id] = (
actions
if actions is not None
else []
) + [travel_action_from_entry(entry, node_dict)]
for id, entry in entry_dict.items():
actions = travel_actions_dict.get(id, [])
parent_id = entry_dict[id].get("parent_id", None)
node_dict[id] = node_from_entry(
entry,
(
actions
if parent_id is None
else (actions + [back_action])
),
default
)
for id, node in node_dict.items():
entry = entry_dict[id]
node.set_actions([
action_from_entry(action_entry, {
"step_into": lambda ids: step_into(ids, node_dict),
"transfer": lambda ids: transfer(ids, node_dict),
"by_tags": (
lambda tags, count=0, ex_tags=[]:
select_by_tags(tags, node_dict, count, ex_tags)
),
})
for action_entry in entry.get("actions", [])
] + node.actions)
return node_dict
def load_entires(paths):
entries = []
for path in paths:
data = load_yaml(path)
entries += data.get("entries", [])
return entries
def load_yaml(path):
with open(path, "r") as file:
return yaml.safe_load(file)
| 27.705
| 73
| 0.557661
|
4a0fda44970b731dcddc4b1d403dc14936d0cafa
| 298
|
py
|
Python
|
regtests/c++/classmethod.py
|
ahakingdom/Rusthon
|
5b6b78111b62281cd1381e53362c5d4b520ade30
|
[
"BSD-3-Clause"
] | 622
|
2015-01-01T14:53:51.000Z
|
2022-03-27T14:52:25.000Z
|
regtests/c++/classmethod.py
|
ahakingdom/Rusthon
|
5b6b78111b62281cd1381e53362c5d4b520ade30
|
[
"BSD-3-Clause"
] | 74
|
2015-01-05T01:24:09.000Z
|
2021-04-26T00:06:38.000Z
|
regtests/c++/classmethod.py
|
ahakingdom/Rusthon
|
5b6b78111b62281cd1381e53362c5d4b520ade30
|
[
"BSD-3-Clause"
] | 67
|
2015-01-18T22:54:54.000Z
|
2022-03-01T12:54:23.000Z
|
'''
class methods
'''
class A:
def __init__(self, x:int, y:int):
self.x = x
self.y = y
@classmethod
def foo(self):
print('my classmethod')
@classmethod
def bar(self, a:int) ->int:
return a+1000
def main():
x = A(1,2)
x.foo()
A.foo()
print x.bar( 100 )
y = A.bar(200)
print(y)
| 12.416667
| 34
| 0.590604
|
4a0fda8059b2740c74828f0d23d888655e363bb8
| 17,575
|
py
|
Python
|
glance/common/exception.py
|
hmakkapati/glance
|
9d6e89da2f1c001cb01b6bc6d38b5e098de92ccf
|
[
"Apache-2.0"
] | null | null | null |
glance/common/exception.py
|
hmakkapati/glance
|
9d6e89da2f1c001cb01b6bc6d38b5e098de92ccf
|
[
"Apache-2.0"
] | null | null | null |
glance/common/exception.py
|
hmakkapati/glance
|
9d6e89da2f1c001cb01b6bc6d38b5e098de92ccf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Glance exception subclasses"""
import six
import six.moves.urllib.parse as urlparse
from glance.i18n import _
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class RedirectException(Exception):
def __init__(self, url):
self.url = urlparse.urlparse(url)
class GlanceException(Exception):
"""
Base Glance Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred")
def __init__(self, message=None, *args, **kwargs):
if not message:
message = self.message
try:
if kwargs:
message = message % kwargs
except Exception:
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise
else:
# at least get the core message out if something happened
pass
self.msg = message
super(GlanceException, self).__init__(message)
def __unicode__(self):
# NOTE(flwang): By default, self.msg is an instance of Message, which
# can't be converted by str(). Based on the definition of
# __unicode__, it should return unicode always.
return six.text_type(self.msg)
class MissingCredentialError(GlanceException):
message = _("Missing required credential: %(required)s")
class BadAuthStrategy(GlanceException):
message = _("Incorrect auth strategy, expected \"%(expected)s\" but "
"received \"%(received)s\"")
class NotFound(GlanceException):
message = _("An object with the specified identifier was not found.")
class BadStoreUri(GlanceException):
message = _("The Store URI was malformed.")
class Duplicate(GlanceException):
message = _("An object with the same identifier already exists.")
class Conflict(GlanceException):
message = _("An object with the same identifier is currently being "
"operated on.")
class StorageQuotaFull(GlanceException):
message = _("The size of the data %(image_size)s will exceed the limit. "
"%(remaining)s bytes remaining.")
class AuthBadRequest(GlanceException):
message = _("Connect error/bad request to Auth service at URL %(url)s.")
class AuthUrlNotFound(GlanceException):
message = _("Auth service at URL %(url)s not found.")
class AuthorizationFailure(GlanceException):
message = _("Authorization failed.")
class NotAuthenticated(GlanceException):
message = _("You are not authenticated.")
class UploadException(GlanceException):
message = _('Image upload problem: %s')
class Forbidden(GlanceException):
message = _("You are not authorized to complete %(action)s action.")
class ForbiddenPublicImage(Forbidden):
message = _("You are not authorized to complete this action.")
class ProtectedImageDelete(Forbidden):
message = _("Image %(image_id)s is protected and cannot be deleted.")
class ProtectedMetadefNamespaceDelete(Forbidden):
message = _("Metadata definition namespace %(namespace)s is protected"
" and cannot be deleted.")
class ProtectedMetadefNamespacePropDelete(Forbidden):
message = _("Metadata definition property %(property_name)s is protected"
" and cannot be deleted.")
class ProtectedMetadefObjectDelete(Forbidden):
message = _("Metadata definition object %(object_name)s is protected"
" and cannot be deleted.")
class ProtectedMetadefResourceTypeAssociationDelete(Forbidden):
message = _("Metadata definition resource-type-association"
" %(resource_type)s is protected and cannot be deleted.")
class ProtectedMetadefResourceTypeSystemDelete(Forbidden):
message = _("Metadata definition resource-type %(resource_type_name)s is"
" a seeded-system type and cannot be deleted.")
class ProtectedMetadefTagDelete(Forbidden):
message = _("Metadata definition tag %(tag_name)s is protected"
" and cannot be deleted.")
class Invalid(GlanceException):
message = _("Data supplied was not valid.")
class InvalidSortKey(Invalid):
message = _("Sort key supplied was not valid.")
class InvalidSortDir(Invalid):
message = _("Sort direction supplied was not valid.")
class InvalidPropertyProtectionConfiguration(Invalid):
message = _("Invalid configuration in property protection file.")
class InvalidSwiftStoreConfiguration(Invalid):
message = _("Invalid configuration in glance-swift conf file.")
class InvalidFilterOperatorValue(Invalid):
message = _("Unable to filter using the specified operator.")
class InvalidFilterRangeValue(Invalid):
message = _("Unable to filter using the specified range.")
class InvalidOptionValue(Invalid):
message = _("Invalid value for option %(option)s: %(value)s")
class ReadonlyProperty(Forbidden):
message = _("Attribute '%(property)s' is read-only.")
class ReservedProperty(Forbidden):
message = _("Attribute '%(property)s' is reserved.")
class AuthorizationRedirect(GlanceException):
message = _("Redirecting to %(uri)s for authorization.")
class ClientConnectionError(GlanceException):
message = _("There was an error connecting to a server")
class ClientConfigurationError(GlanceException):
message = _("There was an error configuring the client.")
class MultipleChoices(GlanceException):
message = _("The request returned a 302 Multiple Choices. This generally "
"means that you have not included a version indicator in a "
"request URI.\n\nThe body of response returned:\n%(body)s")
class LimitExceeded(GlanceException):
message = _("The request returned a 413 Request Entity Too Large. This "
"generally means that rate limiting or a quota threshold was "
"breached.\n\nThe response body:\n%(body)s")
def __init__(self, *args, **kwargs):
self.retry_after = (int(kwargs['retry']) if kwargs.get('retry')
else None)
super(LimitExceeded, self).__init__(*args, **kwargs)
class ServiceUnavailable(GlanceException):
message = _("The request returned 503 Service Unavailable. This "
"generally occurs on service overload or other transient "
"outage.")
def __init__(self, *args, **kwargs):
self.retry_after = (int(kwargs['retry']) if kwargs.get('retry')
else None)
super(ServiceUnavailable, self).__init__(*args, **kwargs)
class ServerError(GlanceException):
message = _("The request returned 500 Internal Server Error.")
class UnexpectedStatus(GlanceException):
message = _("The request returned an unexpected status: %(status)s."
"\n\nThe response body:\n%(body)s")
class InvalidContentType(GlanceException):
message = _("Invalid content type %(content_type)s")
class BadRegistryConnectionConfiguration(GlanceException):
message = _("Registry was not configured correctly on API server. "
"Reason: %(reason)s")
class BadDriverConfiguration(GlanceException):
message = _("Driver %(driver_name)s could not be configured correctly. "
"Reason: %(reason)s")
class MaxRedirectsExceeded(GlanceException):
message = _("Maximum redirects (%(redirects)s) was exceeded.")
class InvalidRedirect(GlanceException):
message = _("Received invalid HTTP redirect.")
class NoServiceEndpoint(GlanceException):
message = _("Response from Keystone does not contain a Glance endpoint.")
class RegionAmbiguity(GlanceException):
message = _("Multiple 'image' service matches for region %(region)s. This "
"generally means that a region is required and you have not "
"supplied one.")
class WorkerCreationFailure(GlanceException):
message = _("Server worker creation failed: %(reason)s.")
class SchemaLoadError(GlanceException):
message = _("Unable to load schema: %(reason)s")
class InvalidObject(GlanceException):
message = _("Provided object does not match schema "
"'%(schema)s': %(reason)s")
class ImageSizeLimitExceeded(GlanceException):
message = _("The provided image is too large.")
class ImageMemberLimitExceeded(LimitExceeded):
message = _("The limit has been exceeded on the number of allowed image "
"members for this image. Attempted: %(attempted)s, "
"Maximum: %(maximum)s")
class ImagePropertyLimitExceeded(LimitExceeded):
message = _("The limit has been exceeded on the number of allowed image "
"properties. Attempted: %(attempted)s, Maximum: %(maximum)s")
class ImageTagLimitExceeded(LimitExceeded):
message = _("The limit has been exceeded on the number of allowed image "
"tags. Attempted: %(attempted)s, Maximum: %(maximum)s")
class ImageLocationLimitExceeded(LimitExceeded):
message = _("The limit has been exceeded on the number of allowed image "
"locations. Attempted: %(attempted)s, Maximum: %(maximum)s")
class SIGHUPInterrupt(GlanceException):
message = _("System SIGHUP signal received.")
class RPCError(GlanceException):
message = _("%(cls)s exception was raised in the last rpc call: %(val)s")
class TaskException(GlanceException):
message = _("An unknown task exception occurred")
class BadTaskConfiguration(GlanceException):
message = _("Task was not configured properly")
class ImageNotFound(NotFound):
message = _("Image with the given id %(image_id)s was not found")
class TaskNotFound(TaskException, NotFound):
message = _("Task with the given id %(task_id)s was not found")
class InvalidTaskStatus(TaskException, Invalid):
message = _("Provided status of task is unsupported: %(status)s")
class InvalidTaskType(TaskException, Invalid):
message = _("Provided type of task is unsupported: %(type)s")
class InvalidTaskStatusTransition(TaskException, Invalid):
message = _("Status transition from %(cur_status)s to"
" %(new_status)s is not allowed")
class ImportTaskError(TaskException, Invalid):
message = _("An import task exception occurred")
class DuplicateLocation(Duplicate):
message = _("The location %(location)s already exists")
class InvalidParameterValue(Invalid):
message = _("Invalid value '%(value)s' for parameter '%(param)s': "
"%(extra_msg)s")
class InvalidImageStatusTransition(Invalid):
message = _("Image status transition from %(cur_status)s to"
" %(new_status)s is not allowed")
class MetadefDuplicateNamespace(Duplicate):
message = _("The metadata definition namespace=%(namespace_name)s"
" already exists.")
class MetadefDuplicateObject(Duplicate):
message = _("A metadata definition object with name=%(object_name)s"
" already exists in namespace=%(namespace_name)s.")
class MetadefDuplicateProperty(Duplicate):
message = _("A metadata definition property with name=%(property_name)s"
" already exists in namespace=%(namespace_name)s.")
class MetadefDuplicateResourceType(Duplicate):
message = _("A metadata definition resource-type with"
" name=%(resource_type_name)s already exists.")
class MetadefDuplicateResourceTypeAssociation(Duplicate):
message = _("The metadata definition resource-type association of"
" resource-type=%(resource_type_name)s to"
" namespace=%(namespace_name)s"
" already exists.")
class MetadefDuplicateTag(Duplicate):
message = _("A metadata tag with name=%(name)s"
" already exists in namespace=%(namespace_name)s."
" (Please note that metadata tag names are"
" case insensitive).")
class MetadefForbidden(Forbidden):
message = _("You are not authorized to complete this action.")
class MetadefIntegrityError(Forbidden):
message = _("The metadata definition %(record_type)s with"
" name=%(record_name)s not deleted."
" Other records still refer to it.")
class MetadefNamespaceNotFound(NotFound):
message = _("Metadata definition namespace=%(namespace_name)s"
" was not found.")
class MetadefObjectNotFound(NotFound):
message = _("The metadata definition object with"
" name=%(object_name)s was not found in"
" namespace=%(namespace_name)s.")
class MetadefPropertyNotFound(NotFound):
message = _("The metadata definition property with"
" name=%(property_name)s was not found in"
" namespace=%(namespace_name)s.")
class MetadefResourceTypeNotFound(NotFound):
message = _("The metadata definition resource-type with"
" name=%(resource_type_name)s, was not found.")
class MetadefResourceTypeAssociationNotFound(NotFound):
message = _("The metadata definition resource-type association of"
" resource-type=%(resource_type_name)s to"
" namespace=%(namespace_name)s,"
" was not found.")
class MetadefTagNotFound(NotFound):
message = _("The metadata definition tag with"
" name=%(name)s was not found in"
" namespace=%(namespace_name)s.")
class InvalidVersion(Invalid):
message = _("Version is invalid: %(reason)s")
class InvalidArtifactTypePropertyDefinition(Invalid):
message = _("Invalid property definition")
class InvalidArtifactTypeDefinition(Invalid):
message = _("Invalid type definition")
class InvalidArtifactPropertyValue(Invalid):
message = _("Property '%(name)s' may not have value '%(val)s': %(msg)s")
def __init__(self, message=None, *args, **kwargs):
super(InvalidArtifactPropertyValue, self).__init__(message, *args,
**kwargs)
self.name = kwargs.get('name')
self.value = kwargs.get('val')
class ArtifactNotFound(NotFound):
message = _("Artifact with id=%(id)s was not found")
class ArtifactForbidden(Forbidden):
message = _("Artifact with id=%(id)s is not accessible")
class ArtifactDuplicateNameTypeVersion(Duplicate):
message = _("Artifact with the specified type, name and version"
" already exists")
class InvalidArtifactStateTransition(Invalid):
message = _("Artifact cannot change state from %(source)s to %(target)s")
class ArtifactDuplicateDirectDependency(Duplicate):
message = _("Artifact with the specified type, name and version"
" already has the direct dependency=%(dep)s")
class ArtifactDuplicateTransitiveDependency(Duplicate):
message = _("Artifact with the specified type, name and version"
" already has the transitive dependency=%(dep)s")
class ArtifactCircularDependency(Invalid):
message = _("Artifact with a circular dependency can not be created")
class ArtifactUnsupportedPropertyOperator(Invalid):
message = _("Operator %(op)s is not supported")
class ArtifactUnsupportedShowLevel(Invalid):
message = _("Show level %(shl)s is not supported in this operation")
class ArtifactPropertyValueNotFound(NotFound):
message = _("Property's %(prop)s value has not been found")
class ArtifactInvalidProperty(Invalid):
message = _("Artifact has no property %(prop)s")
class ArtifactInvalidPropertyParameter(Invalid):
message = _("Cannot use this parameter with the operator %(op)s")
class ArtifactLoadError(GlanceException):
message = _("Cannot load artifact '%(name)s'")
class ArtifactNonMatchingTypeName(ArtifactLoadError):
message = _("Plugin name '%(plugin)s' should match "
"artifact typename '%(name)s'")
class ArtifactPluginNotFound(NotFound):
message = _("No plugin for '%(name)s' has been loaded")
class UnknownArtifactType(NotFound):
message = _("Artifact type with name '%(name)s' and version '%(version)s' "
"is not known")
class ArtifactInvalidStateTransition(Invalid):
message = _("Artifact state cannot be changed from %(curr)s to %(to)s")
class JsonPatchException(GlanceException):
message = _("Invalid jsonpatch request")
class InvalidJsonPatchBody(JsonPatchException):
message = _("The provided body %(body)s is invalid "
"under given schema: %(schema)s")
class InvalidJsonPatchPath(JsonPatchException):
message = _("The provided path '%(path)s' is invalid: %(explanation)s")
def __init__(self, message=None, *args, **kwargs):
self.explanation = kwargs.get("explanation")
super(InvalidJsonPatchPath, self).__init__(message, *args, **kwargs)
| 31.383929
| 79
| 0.689331
|
4a0fdb3de9430e7e755b8253ddb7801bb34288bf
| 2,696
|
py
|
Python
|
app/tests/exo_currency/test_commands.py
|
jcazallasc/exo-investing
|
c32900dd2fd426a15f3b02389d75b51427e5df66
|
[
"MIT"
] | null | null | null |
app/tests/exo_currency/test_commands.py
|
jcazallasc/exo-investing
|
c32900dd2fd426a15f3b02389d75b51427e5df66
|
[
"MIT"
] | null | null | null |
app/tests/exo_currency/test_commands.py
|
jcazallasc/exo-investing
|
c32900dd2fd426a15f3b02389d75b51427e5df66
|
[
"MIT"
] | null | null | null |
import csv
from unittest.mock import patch
from django.core.management import call_command
from django.test import TestCase
from exo_currency.models import Currency, CurrencyExchangeRate, Provider
from exo_currency.utils import CURRENCY_EXCHANGER_PROVIDERS
class CommandsTestCase(TestCase):
def _get_num_lines_from_csv(self, filename):
"""Return the number of lines in the CSV file inside commands folder"""
_file = open('/app/exo_currency/management/commands/' + filename)
_reader = csv.reader(_file)
return len(list(_reader))
def test_load_currencies_from_csv(self):
"""Test load currencies from CSV file"""
filename = 'currencies.csv'
call_command('load_currencies', filename)
num_currencies = Currency.objects.all().count()
self.assertEqual(num_currencies + 1, self._get_num_lines_from_csv(filename))
def test_load_currencies_from_csv_twice(self):
"""Test load currencies from CSV file twice to check no errors raise"""
filename = 'currencies.csv'
call_command('load_currencies', filename)
call_command('load_currencies', filename)
num_currencies = Currency.objects.all().count()
self.assertEqual(num_currencies + 1, self._get_num_lines_from_csv(filename))
def test_load_exchange_rates_from_csv(self):
"""Test load exchange rates from CSV file"""
filename = 'data.csv'
call_command('load_exchange_rates', filename)
num_exchange_rates = CurrencyExchangeRate.objects.all().count()
self.assertEqual(num_exchange_rates + 1, self._get_num_lines_from_csv(filename))
def test_load_exchange_rates_from_csv_twice(self):
"""Test load exchange rates from CSV file twice to check no errors raise"""
filename = 'data.csv'
call_command('load_exchange_rates', filename)
call_command('load_exchange_rates', filename)
num_exchange_rates = CurrencyExchangeRate.objects.all().count()
self.assertEqual(num_exchange_rates + 1, self._get_num_lines_from_csv(filename))
def test_load_providers(self):
"""Test load providers twice to check no errors raise"""
call_command('load_providers')
num_providers = Provider.objects.all().count()
self.assertEqual(num_providers, len(CURRENCY_EXCHANGER_PROVIDERS))
def test_load_providers_twice(self):
"""Test load providers twice to check no errors raise"""
call_command('load_providers')
call_command('load_providers')
num_providers = Provider.objects.all().count()
self.assertEqual(num_providers, len(CURRENCY_EXCHANGER_PROVIDERS))
| 31.717647
| 88
| 0.714021
|
4a0fdbda8ca018d1ccbaef2d1222e7316ca1f2b9
| 5,212
|
py
|
Python
|
tests/settings.py
|
danielmcquillen/django-machina
|
08aa3a58a7889b0d5a5bcd6c24965c524762c7a6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/settings.py
|
danielmcquillen/django-machina
|
08aa3a58a7889b0d5a5bcd6c24965c524762c7a6
|
[
"BSD-3-Clause"
] | 2
|
2021-06-02T00:29:11.000Z
|
2021-09-01T23:02:30.000Z
|
tests/settings.py
|
danielmcquillen/django-machina
|
08aa3a58a7889b0d5a5bcd6c24965c524762c7a6
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from django import VERSION as DJANGO_VERSION
from machina import MACHINA_MAIN_STATIC_DIR, MACHINA_MAIN_TEMPLATE_DIR
class DisableMigrations(object):
def __contains__(self, item):
return True
def __getitem__(self, item):
return 'nomigrations'
TEST_ROOT = os.path.abspath(os.path.dirname(__file__))
# Helper function to extract absolute path
location = lambda x: os.path.join(TEST_ROOT, x)
DEBUG = False
TEMPLATE_DEBUG = False
DB_CONFIG = os.environ.get('DB', 'sqlite')
if DB_CONFIG == 'sqlite':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
}
MIGRATION_MODULES = DisableMigrations() if DJANGO_VERSION < (1, 11) else {}
elif DB_CONFIG == 'postgres':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'machina_test',
'USER': 'postgres',
}
}
elif DB_CONFIG == 'mysql':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'machina_test',
'USER': 'root',
'TEST': {
'CHARSET': 'utf8mb4',
'COLLATION': 'utf8mb4_general_ci',
},
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (
location('_testsite/templates'),
MACHINA_MAIN_TEMPLATE_DIR,
),
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
# Machina
'machina.core.context_processors.metadata',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'mptt',
'haystack',
'widget_tweaks',
'tests',
# Machina apps.
'machina',
'machina.apps.forum',
'machina.apps.forum_conversation.forum_attachments',
'machina.apps.forum_conversation.forum_polls',
'machina.apps.forum_feeds',
'machina.apps.forum_moderation',
'machina.apps.forum_search',
'machina.apps.forum_tracking',
'machina.apps.forum_member',
'machina.apps.forum_permission',
'tests._testsite.apps.forum_conversation',
)
SITE_ID = 1
ROOT_URLCONF = 'tests._testsite.urls'
if DJANGO_VERSION >= (1, 10):
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# Machina
'machina.apps.forum_permission.middleware.ForumPermissionMiddleware',
)
else:
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# Machina
'machina.apps.forum_permission.middleware.ForumPermissionMiddleware',
)
ADMINS = ('admin@example.com',)
MEDIA_ROOT = os.path.join(TEST_ROOT, '_testdata/media/')
STATIC_ROOT = os.path.join(TEST_ROOT, '_testdata/static/')
STATICFILES_DIRS = (
MACHINA_MAIN_STATIC_DIR,
)
PASSWORD_HASHERS = ['django.contrib.auth.hashers.MD5PasswordHasher']
LOGIN_REDIRECT_URL = '/accounts/'
STATIC_URL = '/static/'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(TEST_ROOT, '_testdata/whoosh_index'),
},
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'machina_attachments': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/tmp',
}
}
# Setting this explicitly prevents Django 1.7+ from showing a
# warning regarding a changed default test runner. The test
# suite is run with py.test, so it does not matter.
SILENCED_SYSTEM_CHECKS = ['1_6.W001']
SECRET_KEY = 'key'
try:
from .settings_local import * # noqa
except ImportError:
pass
| 28.79558
| 79
| 0.640445
|
4a0fdbfe4bd90eaf6070566ae89dab780049f092
| 1,493
|
py
|
Python
|
javatool/search.py
|
tengge1/javatool
|
44caa18b4b4c0222fe3ed629c88f17627013cf78
|
[
"Apache-2.0"
] | 2
|
2017-04-10T14:04:12.000Z
|
2020-08-16T04:07:15.000Z
|
javatool/search.py
|
tengge1/javatool
|
44caa18b4b4c0222fe3ed629c88f17627013cf78
|
[
"Apache-2.0"
] | null | null | null |
javatool/search.py
|
tengge1/javatool
|
44caa18b4b4c0222fe3ed629c88f17627013cf78
|
[
"Apache-2.0"
] | null | null | null |
import appuifw,os
class Search(object):
def __init__(self,super):
self.cn=super.cn
self.manager=super.manager
self.add=super.add
self.result=[]
self.index=appuifw.popup_menu([self.cn("Utf8编码"),self.cn("十六进制")])
if self.index!=None:
self.path=self.manager.AskUser("e:\\data\\tengge\\","dir")
if self.path:
self.path=self.path.encode("u8")
self.word=appuifw.query(self.cn("输入查找内容:"),"text")
if self.word:
os.path.walk(self.path,self.walk,None)
self.show()
def walk(self,x,path,list):
if not path.endswith("\\"):
path+="\\"
for i in list:
p=path+i
if os.path.isfile(p) and p.endswith(".class"):
self.add("正在查找"+i)
pos=-1
f=open(p,"rb")
if self.index==0:
pos=f.read().find(self.word.encode("u8"))
else:
pos=f.read().encode("hex").find(self.word.encode("u8"))
f.close()
if pos!=-1:
self.result.append((p,pos))
def show(self):
if self.result==[]:
self.add("未找到相关信息")
else:
self.add(self.word.encode("u8")+"查找结果:")
for i in self.result:
self.add(i[0].replace(self.path,"")+" 位置:"+str(i[1]))
| 33.931818
| 75
| 0.45211
|
4a0fdc481c5a936d2863ba60f31ce7a87ac8d13b
| 2,576
|
py
|
Python
|
exampdftomindmap.py
|
synsandacks/CiscoExamPDFtoMindmap
|
fff0a10bcf18a2a2075e770b2305b038b1375de4
|
[
"MIT"
] | 1
|
2022-02-10T09:31:50.000Z
|
2022-02-10T09:31:50.000Z
|
exampdftomindmap.py
|
synsandacks/CiscoExamPDFtoMindmap
|
fff0a10bcf18a2a2075e770b2305b038b1375de4
|
[
"MIT"
] | null | null | null |
exampdftomindmap.py
|
synsandacks/CiscoExamPDFtoMindmap
|
fff0a10bcf18a2a2075e770b2305b038b1375de4
|
[
"MIT"
] | 1
|
2022-02-09T21:19:38.000Z
|
2022-02-09T21:19:38.000Z
|
import PyPDF2
import re
# Function that extracts the text from the supplied PDF and return the contents as a massive string.
def pdftotext(pdffile):
pdfFile = open(pdffile, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFile)
numPages = pdfReader.numPages
pdfText = ''
for page in range(numPages):
pdfPage = pdfReader.getPage(page)
pdfText += pdfPage.extractText()
pdfFile.close()
# Performing some clean up on the provided file.
pdfText = pdfText.split('any time without notice.')[1]
pattern = r'\d\d\d\d Cisco Systems, Inc. This document is Cisco Public. Page \d'
pdfText = pdfText.replace('\n', '')
pdfText = re.sub(pattern, '', pdfText)
pdfText.strip(' ')
return pdfText
# Function that takes a list of text ex. ['this', 'is', 'how', 'the', 'data', 'would', 'look']
# and iterate over that list to return a new list that groups exam objectives properly.
def objectiveBuilder(textList):
newlist = []
while len(textList) > 1:
loopString = ''
if re.match(r'\d\d%|\d\.\d', textList[0]):
loopString += textList[0]
textList.remove(textList[0])
while len(textList) > 1 and not re.match(r'\d\d%|\d\.[1-9]', textList[0]):
loopString += f' {textList[0]}'
textList.remove(textList[0])
newlist.append(loopString)
if not re.match(r'\d\d%|\d\.\d', textList[0]):
newlist[-1] += f' {textList[0]}'
textList = []
return newlist
# Function to generate the md file leveraging the provided list from objectiveBuilder.
# Takes the exam string to be used as the top level of the mind map, the list to generate the rest of the mindmap
# and a string to be used for naming the output file.
def makemd(exam, list, outfile):
with open(outfile, 'w') as f:
f.write(f'# {exam}\n')
for objective in list:
if re.search(r'\d\.0', objective):
f.write(f'## {objective}\n')
if re.search(r'\d\.[1-9]\s', objective):
f.write(f'### {objective}\n')
if re.search(r'\d\.\d\.[a-zA-Z]', objective):
f.write(f'#### {objective}\n')
f.close()
def main():
pdf = 'pdfs\\200-301-CCNA.pdf'
outFile = '200-301-CCNA.md'
exam = 'CCNA Exam v1.0 (CCNA 200-301)'
pdfText = pdftotext(pdf)
pdfText = pdfText.split()
objectives = objectiveBuilder(pdfText)
makemd(exam, objectives, outFile)
if __name__ == '__main__':
main()
| 35.287671
| 113
| 0.590062
|
4a0fdd31be5159529d0421125b07cb697c8b915c
| 39
|
py
|
Python
|
octoprint_marlin_flasher/validation/validator_error.py
|
AvanOsch/OctoPrint-Marlin-Flasher
|
acc294d3eeb081f6967136389740c60a75c21f28
|
[
"MIT"
] | 41
|
2019-04-02T23:05:19.000Z
|
2022-03-31T09:39:08.000Z
|
octoprint_marlin_flasher/validation/validator_error.py
|
AvanOsch/OctoPrint-Marlin-Flasher
|
acc294d3eeb081f6967136389740c60a75c21f28
|
[
"MIT"
] | 82
|
2019-04-02T14:50:42.000Z
|
2022-03-26T05:33:55.000Z
|
octoprint_marlin_flasher/validation/validator_error.py
|
AvanOsch/OctoPrint-Marlin-Flasher
|
acc294d3eeb081f6967136389740c60a75c21f28
|
[
"MIT"
] | 9
|
2019-10-16T08:16:11.000Z
|
2022-01-19T10:59:44.000Z
|
class ValidatorError(Exception):
pass
| 13
| 32
| 0.820513
|
4a0fde47710e207e52c2e44e4db5cb8f953966a1
| 7,468
|
py
|
Python
|
sahara/utils/cluster_progress_ops.py
|
esikachev/scenario
|
40a59114c7bac44fea510767a3c07d73649f4caf
|
[
"Apache-2.0"
] | null | null | null |
sahara/utils/cluster_progress_ops.py
|
esikachev/scenario
|
40a59114c7bac44fea510767a3c07d73649f4caf
|
[
"Apache-2.0"
] | null | null | null |
sahara/utils/cluster_progress_ops.py
|
esikachev/scenario
|
40a59114c7bac44fea510767a3c07d73649f4caf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import timeutils
import six
from sahara import conductor as c
from sahara.conductor import resource
from sahara import context
from sahara.utils import general as g
conductor = c.API
CONF = cfg.CONF
event_log_opts = [
cfg.BoolOpt('disable_event_log',
default=False,
help="Disables event log feature.")
]
CONF.register_opts(event_log_opts)
def add_successful_event(instance):
cluster_id = instance.cluster_id
step_id = get_current_provisioning_step(cluster_id)
if step_id:
conductor.cluster_event_add(context.ctx(), step_id, {
'successful': True,
'node_group_id': instance.node_group_id,
'instance_id': instance.instance_id,
'instance_name': instance.instance_name,
'event_info': None,
})
update_provisioning_steps(cluster_id)
def add_fail_event(instance, exception):
cluster_id = instance.cluster_id
step_id = get_current_provisioning_step(cluster_id)
event_info = six.text_type(exception)
if step_id:
conductor.cluster_event_add(context.ctx(), step_id, {
'successful': False,
'node_group_id': instance.node_group_id,
'instance_id': instance.instance_id,
'instance_name': instance.instance_name,
'event_info': event_info,
})
update_provisioning_steps(cluster_id)
def add_provisioning_step(cluster_id, step_name, total):
if CONF.disable_event_log or not g.check_cluster_exists(cluster_id):
return
update_provisioning_steps(cluster_id)
return conductor.cluster_provision_step_add(context.ctx(), cluster_id, {
'step_name': step_name,
'completed': 0,
'total': total,
'started_at': timeutils.utcnow(),
})
def get_current_provisioning_step(cluster_id):
if CONF.disable_event_log or not g.check_cluster_exists(cluster_id):
return None
update_provisioning_steps(cluster_id)
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, cluster_id)
for step in cluster.provision_progress:
if step.successful is not None:
continue
return step.id
return None
def update_provisioning_steps(cluster_id):
if CONF.disable_event_log or not g.check_cluster_exists(cluster_id):
return
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, cluster_id)
for step in cluster.provision_progress:
if step.successful is not None:
continue
has_failed = False
successful_events_count = 0
events = conductor.cluster_provision_step_get_events(
ctx, step.id)
for event in events:
if event.successful:
successful_events_count += 1
else:
has_failed = True
successful = None
if has_failed:
successful = False
elif successful_events_count == step.total:
successful = True
completed_at = None
if successful and not step.completed_at:
completed_at = timeutils.utcnow()
conductor.cluster_provision_step_update(ctx, step.id, {
'completed': successful_events_count,
'successful': successful,
'completed_at': completed_at,
})
if successful:
conductor.cluster_provision_step_remove_events(
ctx, step.id)
def get_cluster_events(cluster_id, provision_step=None):
if CONF.disable_event_log or not g.check_cluster_exists(cluster_id):
return []
update_provisioning_steps(cluster_id)
if provision_step:
return conductor.cluster_provision_step_get_events(
context.ctx(), provision_step)
else:
cluster = conductor.cluster_get(context.ctx(), cluster_id)
events = []
for step in cluster['provision_progress']:
step_id = step['id']
events += conductor.cluster_provision_step_get_events(
context.ctx(), step_id)
return events
def event_wrapper(mark_successful_on_exit, **spec):
""""General event-log wrapper
:param mark_successful_on_exit: should we send success event
after execution of function
:param spec: extra specification
:parameter step: provisioning step name (only for provisioning
steps with only one event)
:parameter param: tuple (name, pos) with parameter specification,
where 'name' is the name of the parameter of function, 'pos' is the
position of the parameter of function. This parameter is used to
extract info about Instance or Cluster.
"""
def decorator(func):
@functools.wraps(func)
def handler(*args, **kwargs):
if CONF.disable_event_log:
return func(*args, **kwargs)
step_name = spec.get('step', None)
instance = _find_in_args(spec, *args, **kwargs)
cluster_id = instance.cluster_id
if not g.check_cluster_exists(cluster_id):
return func(*args, **kwargs)
if step_name:
# It's single process, let's add provisioning step here
add_provisioning_step(cluster_id, step_name, 1)
try:
value = func(*args, **kwargs)
except Exception as e:
with excutils.save_and_reraise_exception():
add_fail_event(instance, e)
if mark_successful_on_exit:
add_successful_event(instance)
return value
return handler
return decorator
def _get_info_from_instance(arg):
if isinstance(arg, resource.InstanceResource):
return arg
return None
def _get_info_from_cluster(arg):
if isinstance(arg, resource.ClusterResource):
return context.InstanceInfo(arg.id)
return None
def _get_info_from_obj(arg):
functions = [_get_info_from_instance, _get_info_from_cluster]
for func in functions:
value = func(arg)
if value:
return value
return None
def _find_in_args(spec, *args, **kwargs):
param_values = spec.get('param', None)
if param_values:
p_name, p_pos = param_values
obj = kwargs.get(p_name, None)
if obj:
return _get_info_from_obj(obj)
return _get_info_from_obj(args[p_pos])
# If param is not specified, let's search instance in args
for arg in args:
val = _get_info_from_instance(arg)
if val:
return val
for arg in kwargs.values():
val = _get_info_from_instance(arg)
if val:
return val
# If instance not found in args, let's get instance info from context
return context.ctx().current_instance_info
| 29.752988
| 76
| 0.659347
|
4a0fdef657def203c91c790af2ca22a275825af6
| 485
|
py
|
Python
|
src/kol/request/MeatOrchidRequest.py
|
danheath/temppykol
|
7f9621b44df9f9d2d9fc0a5b2a06db116b9ccfab
|
[
"BSD-3-Clause"
] | 19
|
2015-02-16T08:30:49.000Z
|
2020-05-01T06:06:33.000Z
|
src/kol/request/MeatOrchidRequest.py
|
danheath/temppykol
|
7f9621b44df9f9d2d9fc0a5b2a06db116b9ccfab
|
[
"BSD-3-Clause"
] | 5
|
2015-01-13T23:01:54.000Z
|
2016-11-30T15:23:43.000Z
|
src/kol/request/MeatOrchidRequest.py
|
danheath/temppykol
|
7f9621b44df9f9d2d9fc0a5b2a06db116b9ccfab
|
[
"BSD-3-Clause"
] | 19
|
2015-05-28T09:36:19.000Z
|
2022-03-15T23:19:29.000Z
|
from GenericRequest import GenericRequest
from kol.util import ParseResponseUtils
class MeatOrchidRequest(GenericRequest):
"Visits the hanging meat orchid in the clan rumpus room."
def __init__(self, session):
super(MeatOrchidRequest, self).__init__(session)
self.url = session.serverURL + 'clan_rumpus.php?action=click&spot=1&furni=4'
def parseResponse(self):
self.responseData["meat"] = ParseResponseUtils.parseMeatGainedLost(self.responseText)
| 40.416667
| 93
| 0.760825
|
4a0fdfb1b4cdaf46d5c12eed256b2af8650fe411
| 3,330
|
py
|
Python
|
movingpandas/tools/_show_versions.py
|
DeemanOne/movingpandas
|
441d48f7aa98c861f1a66dcad486b9332b08e0b8
|
[
"BSD-3-Clause"
] | null | null | null |
movingpandas/tools/_show_versions.py
|
DeemanOne/movingpandas
|
441d48f7aa98c861f1a66dcad486b9332b08e0b8
|
[
"BSD-3-Clause"
] | null | null | null |
movingpandas/tools/_show_versions.py
|
DeemanOne/movingpandas
|
441d48f7aa98c861f1a66dcad486b9332b08e0b8
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import importlib
import platform
import sys
def _get_sys_info():
"""System information
Returns
-------
sys_info : dict
system and Python version information
"""
python = sys.version.replace("\n", " ")
blob = [
("python", python),
("executable", sys.executable),
("machine", platform.platform()),
]
return dict(blob)
def _get_C_info():
"""Information on system PROJ, GDAL, GEOS
Returns
-------
c_info: dict
system PROJ information
"""
try:
import pyproj
proj_version = pyproj.proj_version_str
except Exception:
proj_version = None
try:
import pyproj
proj_dir = pyproj.datadir.get_data_dir()
except Exception:
proj_dir = None
try:
import shapely._buildcfg
geos_version = "{}.{}.{}".format(*shapely._buildcfg.geos_version)
geos_dir = shapely._buildcfg.geos_library_path
except Exception:
geos_version = None
geos_dir = None
try:
import fiona
gdal_version = fiona.env.get_gdal_release_name()
except Exception:
gdal_version = None
try:
import fiona
gdal_dir = fiona.env.GDALDataFinder().search()
except Exception:
gdal_dir = None
blob = [
("GEOS", geos_version),
("GEOS lib", geos_dir),
("GDAL", gdal_version),
("GDAL data dir", gdal_dir),
("PROJ", proj_version),
("PROJ data dir", proj_dir),
]
return dict(blob)
def _get_deps_info():
"""Overview of the installed version of main dependencies
Returns
-------
deps_info: dict
version information on relevant Python libraries
"""
deps = [
"geopandas",
"pandas",
"fiona",
"numpy",
"shapely",
"rtree",
"pyproj",
"matplotlib",
"mapclassify",
"geopy",
"holoviews",
"hvplot",
"geoviews",
"stonesoup",
]
def get_version(module):
return module.__version__
deps_info = {}
for modname in deps:
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
ver = get_version(mod)
deps_info[modname] = ver
except Exception:
deps_info[modname] = None
return deps_info
def show_versions():
"""
Print system information and installed module versions.
"""
from movingpandas import __version__ as mpd_version
sys_info = _get_sys_info()
deps_info = _get_deps_info()
proj_info = _get_C_info()
maxlen = max(len(x) for x in deps_info)
tpl = "{{k:<{maxlen}}}: {{stat}}".format(maxlen=maxlen)
print(f"\nMovingPandas {mpd_version}")
print("\nSYSTEM INFO")
print("-----------")
for k, stat in sys_info.items():
print(tpl.format(k=k, stat=stat))
print("\nGEOS, GDAL, PROJ INFO")
print("---------------------")
for k, stat in proj_info.items():
print(tpl.format(k=k, stat=stat))
print("\nPYTHON DEPENDENCIES")
print("-------------------")
for k, stat in deps_info.items():
print(tpl.format(k=k, stat=stat))
| 22.2
| 73
| 0.557958
|
4a0fe0d8929ade73c19986fe9f8811ceddbd72ee
| 8,912
|
py
|
Python
|
fol/estimator_box.py
|
Jacfger/simple-stuffs
|
5596a03ec7a42a2f32b695ed73afb8c6a3cce030
|
[
"MIT"
] | 9
|
2021-09-19T18:20:43.000Z
|
2022-03-07T07:58:28.000Z
|
fol/estimator_box.py
|
Jacfger/simple-stuffs
|
5596a03ec7a42a2f32b695ed73afb8c6a3cce030
|
[
"MIT"
] | 1
|
2022-03-09T08:15:17.000Z
|
2022-03-09T08:15:17.000Z
|
fol/estimator_box.py
|
Jacfger/simple-stuffs
|
5596a03ec7a42a2f32b695ed73afb8c6a3cce030
|
[
"MIT"
] | 3
|
2021-09-23T07:27:47.000Z
|
2022-03-07T08:45:51.000Z
|
from typing import List
import torch
from torch import nn
import torch.nn.functional as F
from .appfoq import (AppFOQEstimator, IntList, find_optimal_batch,
inclusion_sampling)
class BoxOffsetIntersection(nn.Module):
def __init__(self, dim):
super(BoxOffsetIntersection, self).__init__()
self.dim = dim
self.layer1 = nn.Linear(self.dim, self.dim)
self.layer2 = nn.Linear(self.dim, self.dim)
nn.init.xavier_uniform_(self.layer1.weight)
nn.init.xavier_uniform_(self.layer2.weight)
def forward(self, embeddings):
layer1_act = F.relu(self.layer1(embeddings))
layer1_mean = torch.mean(layer1_act, dim=0)
gate = torch.sigmoid(self.layer2(layer1_mean))
offset, _ = torch.min(embeddings, dim=0)
return offset * gate
class CenterIntersection(nn.Module):
def __init__(self, dim):
super(CenterIntersection, self).__init__()
self.dim = dim
self.layer1 = nn.Linear(self.dim, self.dim)
self.layer2 = nn.Linear(self.dim, self.dim)
nn.init.xavier_uniform_(self.layer1.weight)
nn.init.xavier_uniform_(self.layer2.weight)
def forward(self, embeddings):
layer1_act = F.relu(self.layer1(embeddings)) # (num_conj, dim)
attention = F.softmax(self.layer2(layer1_act),
dim=0) # (num_conj, dim)
embedding = torch.sum(attention * embeddings, dim=0)
return embedding
def identity(x):
return x
class BoxEstimator(AppFOQEstimator):
def __init__(self, n_entity, n_relation, gamma, entity_dim,
relation_dim, offset_activation, center_reg,
negative_sample_size, device):
super().__init__()
self.name = 'box'
self.n_entity = n_entity
self.n_relation = n_relation
self.gamma = nn.Parameter(
torch.Tensor([gamma]),
requires_grad=False
)
self.negative_size = negative_sample_size
self.entity_dim = entity_dim
self.relation_dim = relation_dim
self.device = device
self.epsilon = 2.0
self.embedding_range = nn.Parameter(
torch.Tensor([(self.gamma.item() + self.epsilon) / entity_dim]),
requires_grad=False
)
self.entity_embeddings = nn.Embedding(
num_embeddings=n_entity, embedding_dim=self.entity_dim)
self.relation_embeddings = nn.Embedding(
num_embeddings=n_relation, embedding_dim=self.relation_dim)
self.offset_embeddings = nn.Embedding(
num_embeddings=n_relation, embedding_dim=self.entity_dim)
nn.init.uniform_(self.entity_embeddings.weight,
-self.embedding_range.item(),
self.embedding_range.item())
nn.init.uniform_(self.relation_embeddings.weight,
-self.embedding_range.item(),
self.embedding_range.item())
nn.init.uniform_(self.offset_embeddings.weight,
0,
self.embedding_range.item())
self.center_net = CenterIntersection(self.entity_dim)
self.offset_net = BoxOffsetIntersection(self.entity_dim)
self.cen_reg = center_reg
if offset_activation == 'none':
self.func = identity
elif offset_activation == 'relu':
self.func = F.relu
elif offset_activation == 'softplus':
self.func = F.softplus
else:
assert False, "No valid activation function!"
def get_entity_embedding(self, entity_ids: torch.LongTensor):
center_emb = self.entity_embeddings(entity_ids)
offset_emb = torch.zeros_like(center_emb).to(self.device)
return torch.cat((center_emb, offset_emb), dim=-1)
def get_projection_embedding(self, proj_ids: torch.LongTensor, emb):
assert emb.shape[0] == len(proj_ids)
rel_emb = self.relation_embeddings(proj_ids)
r_offset_emb = self.offset_embeddings(proj_ids)
q_emb, q_off_emb = torch.chunk(emb, 2, dim=-1)
q_emb = torch.add(q_emb, rel_emb)
q_off_emb = torch.add(q_off_emb, self.func(r_offset_emb))
return torch.cat((q_emb, q_off_emb), dim=-1)
def get_negation_embedding(self, emb: torch.Tensor):
assert False, "box cannot handle negation"
def get_disjunction_embedding(self, disj_emb: List[torch.Tensor]):
return torch.stack(disj_emb, dim=1)
def get_difference_embedding(self, lemb: torch.Tensor, remb: torch.Tensor):
assert False, "box cannot handle negation"
def get_multiple_difference_embedding(self,
emb: List[torch.Tensor],
**kwargs):
assert False, "box cannot handle negation"
def get_conjunction_embedding(self, conj_emb: List[torch.Tensor]):
sub_center_list, sub_offset_list = [], []
for sub_emb in conj_emb:
sub_center, sub_offset = torch.chunk(sub_emb, 2, dim=-1)
sub_center_list.append(sub_center)
sub_offset_list.append(sub_offset)
new_center = self.center_net(torch.stack(sub_center_list))
new_offset = self.offset_net(torch.stack(sub_offset_list))
return torch.cat((new_center, new_offset), dim=-1)
def criterion(self,
pred_emb: torch.Tensor,
answer_set: List[IntList],
union=False):
pred_emb = pred_emb.unsqueeze(dim=-2)
chosen_answer, chosen_false_answer, subsampling_weight = \
inclusion_sampling(answer_set,
negative_size=self.negative_size,
entity_num=self.n_entity)
positive_all_embedding = self.get_entity_embedding(
torch.tensor(chosen_answer, device=self.device)) # b*d
positive_embedding, _ = torch.chunk(
positive_all_embedding, 2, dim=-1)
neg_embedding = self.get_entity_embedding(
torch.tensor(chosen_false_answer, device=self.device).view(-1))
neg_embedding = neg_embedding.view(
-1, self.negative_size, 2 * self.entity_dim) # batch*n*dim
negative_embedding, _ = torch.chunk(neg_embedding, 2, dim=-1)
if union:
positive_union_logit = self.compute_logit(
positive_embedding.unsqueeze(1), pred_emb)
positive_logit = torch.max(
positive_union_logit, dim=1)[0]
negative_union_logit = self.compute_logit(
negative_embedding.unsqueeze(1), pred_emb)
negative_logit = torch.max(negative_union_logit, dim=1)[0]
else:
positive_logit = self.compute_logit(positive_embedding, pred_emb)
negative_logit = self.compute_logit(negative_embedding, pred_emb)
return positive_logit, negative_logit, subsampling_weight.to(
self.device)
def compute_logit(self, entity_emb, query_emb):
query_center_embedding, query_offset_embedding = torch.chunk(
query_emb, 2, dim=-1)
delta = (entity_emb - query_center_embedding).abs()
distance_out = F.relu(delta - query_offset_embedding)
distance_in = torch.min(delta, query_offset_embedding)
logit = self.gamma - torch.norm(distance_out, p=1, dim=-1) \
- self.cen_reg * torch.norm(distance_in, p=1, dim=-1)
return logit
def compute_all_entity_logit(self,
pred_emb: torch.Tensor,
union=False) -> torch.Tensor:
all_entities = torch.LongTensor(range(self.n_entity)).to(self.device)
all_embedding, _ = torch.chunk(
self.get_entity_embedding(all_entities), 2, dim=-1)
pred_emb = pred_emb.unsqueeze(-2)
batch_num = find_optimal_batch(all_embedding,
query_dist=pred_emb,
compute_logit=self.compute_logit,
union=union)
chunk_of_answer = torch.chunk(all_embedding, batch_num, dim=0)
logit_list = []
for answer_part in chunk_of_answer:
if union:
union_part = self.compute_logit(
answer_part.unsqueeze(0).unsqueeze(0), pred_emb)
# b*disj*answer_part*dim
logit_part = torch.max(union_part, dim=1)[0]
else:
logit_part = self.compute_logit(answer_part.unsqueeze(dim=0),
pred_emb)
# batch*answer_part*dim
logit_list.append(logit_part)
all_logit = torch.cat(logit_list, dim=1)
return all_logit
| 42.438095
| 79
| 0.609179
|
4a0fe0daa9109360b2abc2fc83af9a62efae356a
| 331
|
py
|
Python
|
tests/test_config.py
|
McahineLearning/wine_quality
|
951d6aee7d6f6949a3d1671e433c1af0b886aeab
|
[
"MIT"
] | 1
|
2021-10-02T20:24:46.000Z
|
2021-10-02T20:24:46.000Z
|
tests/test_config.py
|
McahineLearning/wine_quality
|
951d6aee7d6f6949a3d1671e433c1af0b886aeab
|
[
"MIT"
] | null | null | null |
tests/test_config.py
|
McahineLearning/wine_quality
|
951d6aee7d6f6949a3d1671e433c1af0b886aeab
|
[
"MIT"
] | null | null | null |
import pytest
class NotInRange(Exception):
def __init__(self, message = 'Value out of range'):
# self.input = input_
self.message = message
super().__init__(self.message)
def test_geneirc():
a = 5
with pytest.raises(NotInRange):
if a not in range(10,20):
raise NotInRange
| 25.461538
| 55
| 0.619335
|
4a0fe1bc0e48a83f9e8c3cdd8ab14a55814010a6
| 1,497
|
py
|
Python
|
src/tests/dtlzproblems.py
|
KernelA/nsga3
|
fc8c862fb41657108d5499f4343beb408e526c19
|
[
"MIT"
] | 7
|
2020-06-12T21:52:18.000Z
|
2022-03-24T14:28:01.000Z
|
src/tests/dtlzproblems.py
|
KernelA/nsga3
|
fc8c862fb41657108d5499f4343beb408e526c19
|
[
"MIT"
] | null | null | null |
src/tests/dtlzproblems.py
|
KernelA/nsga3
|
fc8c862fb41657108d5499f4343beb408e526c19
|
[
"MIT"
] | 3
|
2018-01-01T09:46:18.000Z
|
2021-06-16T07:09:26.000Z
|
import math
from abc import abstractmethod
import scipy
from pynsga3 import bproblem
__all__ = ["DTLZ1"]
class _BaseDTLZ(bproblem.MOProblem):
def __init__(self, amount_dec: int, amount_objs: int):
assert amount_objs > 1
self.__amount_objs = amount_objs
self.__lower_bounds = (0,) * amount_dec
self.__upper_bounds = (1,) * amount_dec
@abstractmethod
def eval(self, x):
pass
@property
def amount_objs(self):
return self.__amount_objs
@property
def lower_bounds(self):
return self.__lower_bounds
@property
def upper_bounds(self):
return self.__upper_bounds
def __str__(self):
return "{0}_num_var={1}".format(self.__class__.__name__, len(self.__lower_bounds))
class DTLZ1(_BaseDTLZ):
__PI_20 = 20 * math.pi
__K = 5
def __init__(self, amount_objs: int):
super().__init__(amount_objs + DTLZ1.__K - 1, amount_objs)
self.__res = [0] * amount_objs
def _g(self, x):
temp = x - 0.5
return 100 * (DTLZ1.__K + (temp ** 2).sum() - scipy.cos(DTLZ1.__PI_20 * temp).sum())
def eval(self, x):
g = self._g(x[self.amount_objs - 1:])
num_obj = 0
for i in range(self.amount_objs - 1, -1, -1):
product = scipy.prod(x[:i])
if num_obj != 0:
product *= (1 - x[i])
self.__res[num_obj] = 0.5 * product * (1 + g)
num_obj += 1
return self.__res
| 22.343284
| 92
| 0.589846
|
4a0fe20770130e86ed25724fd5443cc3ed15e85c
| 22,658
|
py
|
Python
|
RL_utils.py
|
EmbodiedLearning/ICLR-Submission-2020
|
9806b7594eeb393de760545425a7b9fada0f9cef
|
[
"MIT"
] | 1
|
2021-10-12T09:48:56.000Z
|
2021-10-12T09:48:56.000Z
|
RL_utils.py
|
EmbodiedLearning/ICLR-Submission-2020
|
9806b7594eeb393de760545425a7b9fada0f9cef
|
[
"MIT"
] | null | null | null |
RL_utils.py
|
EmbodiedLearning/ICLR-Submission-2020
|
9806b7594eeb393de760545425a7b9fada0f9cef
|
[
"MIT"
] | null | null | null |
import os
import matplotlib.pyplot as plt
import numpy as np
from JSAnimation import IPython_display
from matplotlib import animation
import cv2
import imageio
import mpld3
import scipy.misc
def plot_movie_js2(enc_array,image_array,save=None):
#Shows encoding and frames
fig = plt.figure(figsize=(10,3), dpi=72)
ax1 = fig.add_subplot(2, 2, 1)
plt.title('Visual Encoding', fontsize=15)
plt.axis('off')
im = plt.imshow(enc_array[0][:8,:],vmin=-1,vmax=25)
ax2 = fig.add_subplot(2, 2, 3)
plt.title('Vector Encoding', fontsize=15)
plt.axis('off')
im2 = plt.imshow(enc_array[0][8:,:],vmin=-1,vmax=25)
ax3 = fig.add_subplot(1, 2, 2)
im3 = plt.imshow(image_array[0])
plt.axis('off')
Writer = animation.writers['ffmpeg']
writer = Writer(fps=10, metadata=dict(artist='Me'), bitrate=1800)
def animate(i):
im.set_array(enc_array[i][:8,:])
im2.set_array(enc_array[i][8:,:])
im3.set_array(image_array[i])
return (im,)
anim = animation.FuncAnimation(fig, animate, frames=len(image_array))
display(IPython_display.display_animation(anim))
if save!=None:
anim.save(save, writer=writer)
def save_movie_js2(enc_array,image_array,save=None):
#dpi = 72.0
#xpixels, ypixels = image_array[0].shape[0], image_array[0].shape[1]
fig = plt.figure(figsize=(10,3), dpi=72)
ax1 = fig.add_subplot(2, 2, 1)
plt.title('Visual Encoding', fontsize=15)
plt.axis('off')
im = plt.imshow(enc_array[0][:8,:],vmin=-1,vmax=25)
ax2 = fig.add_subplot(2, 2, 3)
plt.title('Vector Encoding', fontsize=15)
plt.axis('off')
im2 = plt.imshow(enc_array[0][8:,:],vmin=-1,vmax=25)
ax3 = fig.add_subplot(1, 2, 2)
im3 = plt.imshow(image_array[0])
plt.axis('off')
for i in range(enc_array.shape[0]):
im.set_array(enc_array[i][:8,:])
im2.set_array(enc_array[i][8:,:])
im3.set_array(image_array[i])
#plt.savefig(path+save+'/img'+str(i).zfill(4)+'.png', bbox_inches='tight')
plt.savefig(save+'/img'+str(i)+'.png', bbox_inches='tight')
def plot_movie_jsInfo(enc_array,image_array,acts,vals,rews,save=None):
def getImage(act,num):
if act==0:
return stand
if num==0:
if act==1:
return up
if act==2:
return down
if num==1:
if act==1:
return turn_l
if act==2:
return turn_r
if num==2 and act==1:
return jump
if num==3:
if act==1:
return right
if act==2:
return left
jump = imageio.imread('./symbols/jump.png')
left = imageio.imread('./symbols/arrow-left.png')
right = imageio.imread('./symbols/arrow_right.png')
down = imageio.imread('./symbols/down-arrow.png')
up = imageio.imread('./symbols/up-arrow.png')
turn_l = imageio.imread('./symbols/turn-left.png')
turn_r = imageio.imread('./symbols/turn-right.png')
stand = imageio.imread('./symbols/Stand.png')
fig = plt.figure(figsize=(10,3), dpi=72)
ax1 = fig.add_subplot(2, 2, 1)
plt.axis('off')
if not isinstance(enc_array,list):
im = plt.imshow(enc_array[0][:8,:],vmin=-1,vmax=25)
plt.title('Visual Encoding', fontsize=15)
else:
icaLen = enc_array[0][0].shape[0]
im = plt.imshow(enc_array[0][0].reshape(1,icaLen),vmin=-1,vmax=25)
plt.title('Visual Encoding - ICs', fontsize=15)
ax2 = fig.add_subplot(2, 2, 3)
plt.axis('off')
if not isinstance(enc_array,list):
im2 = plt.imshow(enc_array[0][8:,:],vmin=-1,vmax=25)
plt.title('Vector Encoding', fontsize=15)
else:
im2 = plt.imshow(enc_array[1][0].reshape(1,icaLen),vmin=-1,vmax=25)
plt.title('Vector Encoding - ICs', fontsize=15)
ax4 = fig.add_subplot(1, 2, 2)
im4 = plt.imshow(image_array[0])
plt.axis('off')
ax3 = fig.add_subplot(6, 2, 2)
im3 = plt.text(0.2,0.1,"R: "+str(rews[0])+' V: '+str(vals[0]), fontsize=15,color='white',
bbox=dict(facecolor='blue', alpha=0.5))
plt.axis('off')
ax5 = fig.add_subplot(4, 10, 10)
im5 = plt.imshow(getImage(acts[0][0][0],0))
plt.axis('off')
ax6 = fig.add_subplot(4, 10, 20)
im6 = plt.imshow(getImage(acts[0][0][1],1))
plt.axis('off')
ax7 = fig.add_subplot(4, 10, 30)
im7 = plt.imshow(getImage(acts[0][0][2],2))
plt.axis('off')
ax8 = fig.add_subplot(4, 10, 40)
im8 = plt.imshow(getImage(acts[0][0][3],3))
plt.axis('off')
Writer = animation.writers['ffmpeg']
writer = Writer(fps=10, metadata=dict(artist='Me'), bitrate=1800)
def animate(i):
if not isinstance(enc_array,list):
im.set_array(enc_array[i][:8,:])
im2.set_array(enc_array[i][8:,:])
else:
im.set_array(enc_array[0][i].reshape(1,icaLen))
im2.set_array(enc_array[1][i].reshape(1,icaLen))
im3.set_text("R: "+str(rews[i])[:3]+' V: '+str(vals[i][0][0])[:4])
im4.set_array(image_array[i])
im5.set_array(getImage(acts[i][0][0],0))
im6.set_array(getImage(acts[i][0][1],1))
im7.set_array(getImage(acts[i][0][2],2))
im8.set_array(getImage(acts[i][0][3],3))
return (im,)
anim = animation.FuncAnimation(fig, animate, frames=len(image_array))
display(IPython_display.display_animation(anim))
if save!=None:
anim.save(save, writer=writer)
def save_movie_jsInfo(enc_array,image_array,acts,vals,rews,save=None):
def getImage(act,num):
if act==0:
return stand
if num==0:
if act==1:
return up
if act==2:
return down
if num==1:
if act==1:
return turn_l
if act==2:
return turn_r
if num==2 and act==1:
return jump
if num==3:
if act==1:
return right
if act==2:
return left
jump = imageio.imread('./symbols/jump.png')
left = imageio.imread('./symbols/arrow-left.png')
right = imageio.imread('./symbols/arrow_right.png')
down = imageio.imread('./symbols/down-arrow.png')
up = imageio.imread('./symbols/up-arrow.png')
turn_l = imageio.imread('./symbols/turn-left.png')
turn_r = imageio.imread('./symbols/turn-right.png')
stand = imageio.imread('./symbols/Stand.png')
fig = plt.figure(figsize=(10,3), dpi=72)
ax1 = fig.add_subplot(2, 2, 1)
plt.axis('off')
if not isinstance(enc_array,list):
im = plt.imshow(enc_array[0][:8,:],vmin=-1,vmax=25)
plt.title('Visual Encoding', fontsize=15)
else:
icaLen = enc_array[0][0].shape[0]
im = plt.imshow(enc_array[0][0].reshape(1,icaLen),vmin=-1,vmax=25)
plt.title('Visual Encoding - ICs', fontsize=15)
ax2 = fig.add_subplot(2, 2, 3)
plt.axis('off')
if not isinstance(enc_array,list):
im2 = plt.imshow(enc_array[0][8:,:],vmin=-1,vmax=25)
plt.title('Vector Encoding', fontsize=15)
else:
im2 = plt.imshow(enc_array[1][0].reshape(1,icaLen),vmin=-1,vmax=25)
plt.title('Vector Encoding - ICs', fontsize=15)
ax4 = fig.add_subplot(1, 2, 2)
im4 = plt.imshow(image_array[0])
plt.axis('off')
ax3 = fig.add_subplot(6, 2, 2)
im3 = plt.text(0.2,0.1,"R: "+str(rews[0])+' V: '+str(vals[0]), fontsize=15,color='white',
bbox=dict(facecolor='blue', alpha=0.5))
plt.axis('off')
ax5 = fig.add_subplot(4, 10, 10)
im5 = plt.imshow(getImage(acts[0][0][0],0))
plt.axis('off')
ax6 = fig.add_subplot(4, 10, 20)
im6 = plt.imshow(getImage(acts[0][0][1],1))
plt.axis('off')
ax7 = fig.add_subplot(4, 10, 30)
im7 = plt.imshow(getImage(acts[0][0][2],2))
plt.axis('off')
ax8 = fig.add_subplot(4, 10, 40)
im8 = plt.imshow(getImage(acts[0][0][3],3))
plt.axis('off')
Writer = animation.writers['ffmpeg']
writer = Writer(fps=10, metadata=dict(artist='Me'), bitrate=1800)
if isinstance(enc_array,list):
a_len = rews.shape[0]
else:
a_len = enc_array.shape[0]
for i in range(a_len):
if not isinstance(enc_array,list):
im.set_array(enc_array[i][:8,:])
im2.set_array(enc_array[i][8:,:])
else:
im.set_array(enc_array[0][i].reshape(1,icaLen))
im2.set_array(enc_array[1][i].reshape(1,icaLen))
im3.set_text("R: "+str(rews[i])[:3]+' V: '+str(vals[i][0][0])[:4])
im4.set_array(image_array[i])
im5.set_array(getImage(acts[i][0][0],0))
im6.set_array(getImage(acts[i][0][1],1))
im7.set_array(getImage(acts[i][0][2],2))
im8.set_array(getImage(acts[i][0][3],3))
plt.savefig(save+'/img'+str(i).zfill(4)+'.png', bbox_inches='tight')
def plot_movie_js3(enc_array,image_array,cluster, save=None):
#Plot Encodings and frames + information about which cluster the frame is in
fig = plt.figure(figsize=(10,3), dpi=72)
ax1 = fig.add_subplot(2, 2, 1)
plt.axis('off')
im = plt.imshow(enc_array[0][:8,:],vmin=-1,vmax=25)
plt.title('Visual Encoding', fontsize=15)
ax2 = fig.add_subplot(2, 2, 3)
plt.axis('off')
im2 = plt.imshow(enc_array[0][8:,:],vmin=-1,vmax=25)
plt.title('Vector Encoding', fontsize=15)
ax4 = fig.add_subplot(1, 2, 2)
plt.axis('off')
im4 = plt.imshow(image_array[0])
ax3 = fig.add_subplot(6, 2, 2)
im3 = plt.text(0.3,0.1,'Cluster ' + str(cluster[0]), fontsize=20,color='white',bbox=dict(facecolor='blue', alpha=0.5))
plt.axis('off')
Writer = animation.writers['ffmpeg']
writer = Writer(fps=10, metadata=dict(artist='Me'), bitrate=1800)
def animate(i):
im.set_array(enc_array[i][:8,:])
im2.set_array(enc_array[i][8:,:])
im3.set_text('Cluster ' + str(cluster[i]))
im4.set_array(image_array[i])
return (im,)
anim = animation.FuncAnimation(fig, animate, frames=len(image_array))
display(IPython_display.display_animation(anim))
if save!=None:
anim.save(save, writer=writer)
def plot_actions(act):
fig = plt.figure(figsize=(7,10))
fig.suptitle('Distribution of Actions', fontsize=20)
ax1 = fig.add_subplot(2, 2, 1)
plt.hist(act[:,0,0],bins=np.linspace(-0.4,2.6,4),color='chocolate',width=0.8)
plt.title('Move Forward/Back', fontsize=15)
plt.xticks(np.arange(3),['Stand','Forward','Back'], fontsize=13)
ax2 = fig.add_subplot(2, 2, 2)
plt.hist(act[:,0,1],bins=np.linspace(-0.4,2.6,4),color='chocolate',width=0.8)
plt.title('Camera', fontsize=15)
plt.xticks(np.arange(3),['Straight','Left','Right'], fontsize=13)
ax3 = fig.add_subplot(2, 2, 3)
plt.hist(act[:,0,2],bins=np.linspace(-0.4,1.6,3),color='chocolate',width=0.8)
plt.title('Jump', fontsize=15)
plt.xticks(np.arange(2),['Stand','Jump'], fontsize=13)
ax4 = fig.add_subplot(2, 2, 4)
plt.hist(act[:,0,3],bins=np.linspace(-0.4,2.6,4),color='chocolate',width=0.8)
plt.title('Move Left/Right', fontsize=15)
plt.xticks(np.arange(3),['Stand','Right','Left'], fontsize=13)
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
return fig
def correlate(enc,val,num=0,normalize=False):
corrs = []
v = val#[:,0,num]
if normalize:
v = (v - np.mean(v)) / np.std(v)
for i in range(enc.shape[-1]):
e = enc[:,i]
if normalize:
e = (e - np.mean(e)) / (np.std(e) * len(e))
#corr = np.correlate(e,v)
corr = np.corrcoef(e,v)[0,1]
corrs.append(float(corr))
return np.array(corrs)
def plot_movie_curInfo(enc_array,image_array,pred_s,enc_cur,acts,vals,rews,range_vis,range_vec,save=None):
fig = plt.figure(figsize=(10,3), dpi=72)
ax1 = fig.add_subplot(3, 3, 1)
plt.axis('off')
if not isinstance(enc_array,list):
im = plt.imshow(enc_array[0][:4,:],vmin=range_vis[0], vmax=range_vis[1])
plt.title('Visual Encoding', fontsize=15)
else:
icaLen = enc_array[0][0].shape[0]
im = plt.imshow(enc_array[0][0].reshape(1,icaLen))
plt.title('Visual Encoding - ICs', fontsize=15)
ax2 = fig.add_subplot(3, 3, 2)
plt.axis('off')
if not isinstance(enc_array,list):
im2 = plt.imshow(enc_array[0][4:,:],vmin=range_vec[0], vmax=range_vec[1])
plt.title('Vector Encoding', fontsize=15)
else:
im2 = plt.imshow(enc_array[1][0].reshape(1,icaLen))
plt.title('Vector Encoding - ICs', fontsize=15)
ax5 = fig.add_subplot(3,3,4)
plt.axis('off')
im5 = plt.imshow(pred_s[0][:4,:],vmin=range_vis[0], vmax=range_vis[1])
plt.title('Predicted')
ax6 = fig.add_subplot(3,3,5)
plt.axis('off')
im6 = plt.imshow(pred_s[0][4:,:],vmin=range_vec[0], vmax=range_vec[1])
plt.title('Predicted')
ax7 = fig.add_subplot(3,3,7)
plt.axis('off')
im7 = plt.imshow(enc_cur[0][:4,:],vmin=range_vis[0], vmax=range_vis[1])
plt.title('Actual')
ax8 = fig.add_subplot(3,3,8)
plt.axis('off')
im8 = plt.imshow(enc_cur[0][4:,:],vmin=range_vec[0], vmax=range_vec[1])
plt.title('Actual')
ax4 = fig.add_subplot(1, 3, 3)
im4 = plt.imshow(image_array[0])
plt.axis('off')
ax3 = fig.add_subplot(6, 3, 3)
im3 = plt.text(0.2,0.1,"R: "+str(rews[0])+' V: '+str(vals[0]), fontsize=15,color='white',
bbox=dict(facecolor='blue', alpha=0.5))
plt.axis('off')
Writer = animation.writers['ffmpeg']
writer = Writer(fps=10, metadata=dict(artist='Me'), bitrate=1800)
def animate(i):
if not isinstance(enc_array,list):
im.set_array(enc_array[i][:4,:])
im2.set_array(enc_array[i][4:,:])
else:
im.set_array(enc_array[0][i].reshape(1,icaLen))
im2.set_array(enc_array[1][i].reshape(1,icaLen))
im3.set_text("R: "+str(rews[i])[:3]+' V: '+str(vals[i][0][0])[:4])
im4.set_array(image_array[i])
im5.set_array(pred_s[i][:4,:])
im6.set_array(pred_s[i][4:,:])
im7.set_array(enc_cur[i][:4,:])
im8.set_array(enc_cur[i][4:,:])
return (im,)
anim = animation.FuncAnimation(fig, animate, frames=len(image_array))
display(IPython_display.display_animation(anim))
if save!=None:
anim.save(save, writer=writer)
def plot_movie_semantic(semantic,image_array,acts,vals,rews,save=None):
def getImage(act,num):
if act==0:
return stand
if num==0:
if act==1:
return up
if act==2:
return down
if num==1:
if act==1:
return turn_l
if act==2:
return turn_r
if num==2 and act==1:
return jump
if num==3:
if act==1:
return right
if act==2:
return left
jump = imageio.imread('./symbols/jump.png')
left = imageio.imread('./symbols/arrow-left.png')
right = imageio.imread('./symbols/arrow_right.png')
down = imageio.imread('./symbols/down-arrow.png')
up = imageio.imread('./symbols/up-arrow.png')
turn_l = imageio.imread('./symbols/turn-left.png')
turn_r = imageio.imread('./symbols/turn-right.png')
stand = imageio.imread('./symbols/Stand.png')
fig = plt.figure(figsize=(10,3), dpi=72)
ax1 = fig.add_subplot(1, 2, 1)
plt.axis('off')
lbls = np.rot90(np.array([int(n) for n in semantic[0][1:-4].split(",")]).reshape((128,128)))
im1 = plt.imshow(lbls,vmin=-1,vmax=10,cmap='tab20')
values = np.linspace(-1,10,12)
colors = [ im1.cmap(im1.norm(value)) for value in values]
patches = [ mpatches.Patch(color=colors[i], label=inv_map[values[i]] ) for i in range(len(values)) ]
plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
ax4 = fig.add_subplot(1, 2, 2)
im4 = plt.imshow(image_array[0])
plt.axis('off')
ax3 = fig.add_subplot(6, 2, 2)
im3 = plt.text(0.2,0.1,"R: "+str(rews[0])+' V: '+str(vals[0]), fontsize=15,color='white',
bbox=dict(facecolor='blue', alpha=0.5))
plt.axis('off')
ax5 = fig.add_subplot(4, 10, 10)
im5 = plt.imshow(getImage(acts[0][0][0],0))
plt.axis('off')
ax6 = fig.add_subplot(4, 10, 20)
im6 = plt.imshow(getImage(acts[0][0][1],1))
plt.axis('off')
ax7 = fig.add_subplot(4, 10, 30)
im7 = plt.imshow(getImage(acts[0][0][2],2))
plt.axis('off')
ax8 = fig.add_subplot(4, 10, 40)
im8 = plt.imshow(getImage(acts[0][0][3],3))
plt.axis('off')
Writer = animation.writers['ffmpeg']
writer = Writer(fps=10, metadata=dict(artist='Me'), bitrate=1800)
def animate(i):
try:
lbls = np.rot90(np.array([int(n) for n in semantic[i][1:-4].split(",")]).reshape((128,128)))
im1.set_array(lbls)
except:
broken = np.array([int(n) for n in data[i][1:-4].split(",")])
lbls = np.rot90(np.append(broken,np.zeros((128*128)-broken.shape[0])).reshape((128,128)))
im1.set_array(lbls)
print(str(i)+" - "+str(broken.shape))
im3.set_text("R: "+str(rews[i])[:3]+' V: '+str(vals[i][0][0])[:4])
im4.set_array(image_array[i])
im5.set_array(getImage(acts[i][0][0],0))
im6.set_array(getImage(acts[i][0][1],1))
im7.set_array(getImage(acts[i][0][2],2))
im8.set_array(getImage(acts[i][0][3],3))
return (im1,)
anim = animation.FuncAnimation(fig, animate, frames=len(image_array))
display(IPython_display.display_animation(anim))
if save!=None:
anim.save(save, writer=writer)
def plot_movie_semantic2(semantic,image_array,acts,vals,rews,save=None):
label_dict = {"Unknown": 0,
"Agent": 1,
"Level Door": 2,
"Regular Door": 3 ,
"Key Door": 4 ,
"Entry Door": 5 ,
"Puzzle Door": 6 ,
"Key": 7 ,
"Time Orb": 8 ,
"Wall":9,
"Floor": 10}
inv_map = {v: k for k, v in label_dict.items()}
def getImage(act,num):
if act==0:
return stand
if num==0:
if act==1:
return up
if act==2:
return down
if num==1:
if act==1:
return turn_l
if act==2:
return turn_r
if num==2 and act==1:
return jump
if num==3:
if act==1:
return right
if act==2:
return left
jump = imageio.imread('./symbols/jump.png')
left = imageio.imread('./symbols/arrow-left.png')
right = imageio.imread('./symbols/arrow_right.png')
down = imageio.imread('./symbols/down-arrow.png')
up = imageio.imread('./symbols/up-arrow.png')
turn_l = imageio.imread('./symbols/turn-left.png')
turn_r = imageio.imread('./symbols/turn-right.png')
stand = imageio.imread('./symbols/Stand.png')
fig = plt.figure(figsize=(10,3), dpi=72)
ax1 = fig.add_subplot(1, 2, 1)
plt.axis('off')
im1 = plt.imshow(rgb2L(semantic[0]),vmin=0,vmax=11,cmap='tab20')
values = np.linspace(0,10,11)
colors = [ im1.cmap(im1.norm(value)) for value in values]
patches = [ mpatches.Patch(color=colors[i], label=inv_map[values[i]] ) for i in range(len(values)) ]
plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
ax4 = fig.add_subplot(1, 2, 2)
im4 = plt.imshow(image_array[0])
plt.axis('off')
ax3 = fig.add_subplot(6, 2, 2)
im3 = plt.text(0.2,0.1,"0 R: "+str(rews[0])+' V: '+str(vals[0]), fontsize=15,color='white',
bbox=dict(facecolor='blue', alpha=0.5))
plt.axis('off')
ax5 = fig.add_subplot(4, 10, 10)
im5 = plt.imshow(getImage(acts[0][0][0],0))
plt.axis('off')
ax6 = fig.add_subplot(4, 10, 20)
im6 = plt.imshow(getImage(acts[0][0][1],1))
plt.axis('off')
ax7 = fig.add_subplot(4, 10, 30)
im7 = plt.imshow(getImage(acts[0][0][2],2))
plt.axis('off')
ax8 = fig.add_subplot(4, 10, 40)
im8 = plt.imshow(getImage(acts[0][0][3],3))
plt.axis('off')
Writer = animation.writers['ffmpeg']
writer = Writer(fps=10, metadata=dict(artist='Me'), bitrate=1800)
def animate(i):
try:
im1.set_array(rgb2L(semantic[i]))
except:
print(str(i)+" - "+str(broken.shape))
im3.set_text(str(i)+" R: "+str(rews[i])[:3]+' V: '+str(vals[i][0][0])[:4])
im4.set_array(image_array[i])
im5.set_array(getImage(acts[i][0][0],0))
im6.set_array(getImage(acts[i][0][1],1))
im7.set_array(getImage(acts[i][0][2],2))
im8.set_array(getImage(acts[i][0][3],3))
return (im1,)
anim = animation.FuncAnimation(fig, animate, frames=len(image_array))
display(IPython_display.display_animation(anim))
if save!=None:
anim.save(save, writer=writer)
def rgb2L(img):
l_img = np.zeros((img.shape[0]*img.shape[1]))
for i,p in enumerate(img.reshape(img.shape[0]*img.shape[1],3)):
if (p[0] in range(20,30) and p[1] in range(25,35) and p[2] in range(80,110)):
l_img[i] = 1#Agent
elif(p[0] in range(65,95) and p[1] in range(65,95) and p[2] in range(30,50)):
l_img[i] = 2#Level Door
elif(p[0] in range(30,50) and p[1] in range(65,105) and p[2] in range(35,60)):
l_img[i] = 3#Green Door
elif(p[0] in range(65,90) and p[1] in range(35,50) and p[2] in range(35,50)):
l_img[i] = 4#Key Door
elif(p[0] in range(35,50) and p[1] in range(35,50) and p[2] in range(35,55)):
l_img[i] = 5#Entry Door
elif(p[0] in range(55,80) and p[1] in range(35,50) and p[2] in range(75,110)):
l_img[i] = 6#Puzzle Door
elif(p[0] in range(65,95) and p[1] in range(70,100) and p[2] in range(25,45)):
l_img[i] = 7#Key
elif(p[0] in range(10,40) and p[1] in range(70,90) and p[2] in range(75,105)):
l_img[i] = 8#Orb
elif(p[0] in range(25,30) and p[1] in range(25,30) and p[2] in range(25,30)):
l_img[i] = 9#Wall
elif(p[0] in range(45,75) and p[1] in range(40,60) and p[2] in range(30,50)):
l_img[i] = 10#Floor
else:
l_img[i] = 0#Other
return l_img.reshape((img.shape[0],img.shape[1]))
| 37.08347
| 122
| 0.573705
|
4a0fe392b2debeb359408b6a6b10d5bceca4a811
| 17,431
|
py
|
Python
|
nucleus/iam/login.py
|
1x-eng/PROTON
|
2f27352f7eb9b46642325d800fcdb98ba5c99596
|
[
"BSD-3-Clause"
] | 31
|
2018-09-28T05:00:02.000Z
|
2021-11-09T11:06:57.000Z
|
nucleus/iam/login.py
|
PruthviKumarBK/PROTON
|
2f27352f7eb9b46642325d800fcdb98ba5c99596
|
[
"BSD-3-Clause"
] | 23
|
2019-05-17T08:48:07.000Z
|
2020-01-20T22:34:28.000Z
|
nucleus/iam/login.py
|
1x-eng/PROTON
|
2f27352f7eb9b46642325d800fcdb98ba5c99596
|
[
"BSD-3-Clause"
] | 7
|
2018-09-28T16:57:35.000Z
|
2019-11-23T07:36:41.000Z
|
#
# Copyright (c) 2018, Pruthvi Kumar All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import falcon
import json
import os
from datetime import datetime
from nucleus.db.connection_manager import ConnectionManager
from nucleus.email.email import ProtonEmail
from nucleus.iam.jwt_manager import JWTManager
from nucleus.iam.password_manager import PasswordManager
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import Table
from threading import Thread
__author__ = "Pruthvi Kumar, pruthvikumar.123@gmail.com"
__copyright__ = "Copyright (C) 2018 Pruthvi Kumar | http://www.apricity.co.in"
__license__ = "BSD 3-Clause License"
__version__ = "1.0"
class ProtonLogin(ConnectionManager, PasswordManager, JWTManager, ProtonEmail):
def __init__(self):
super(ProtonLogin, self).__init__()
self.__alchemy_engine = self.alchemy_engine()
self.iam_login_logger = self.get_logger(log_file_name='iam_login_logs.log',
log_file_path='{}/trace/iam_login_logs.log'.format(self.ROOT_DIR))
def login(self, db_flavour, login_payload, db_name='proton', schema_name='iam', table_name='PROTON_login_registry'):
"""
Logs in valid PROTON users.
:param db_flavour: One of the supported versions. Must have an entry in dataBaseConfig.ini
:param db_name: Name of target Database. Default: Proton
:param schema_name: Name of target schema. Default: iam
:param table_name: Name of target table. Default: PROTON_user_registry
:param login_payload: Payload of users login details.
:return: A dictionary containing login status, message & JWT token is successful.
"""
def validate_login_payload(payload):
def validate_payload_contents(payload):
"""
Validates that payload contains atleast one character. More stringent form validation
must be a client side operation.
:param payload: login payload
:return: Boolean
"""
validity_store = []
for k, v in payload.items():
if len(str(v)) > 0:
validity_store.append(True)
else:
validity_store.append(False)
if all(validity_store):
return True
return False
if type(payload) is not dict:
return False
required_keys = ['user_name', 'password']
actual_keys = list(payload.keys())
if set(required_keys) == set(actual_keys):
return validate_payload_contents(payload)
return False
def threaded_send_email(email_type, email_address):
if email_type == 'success':
self.send_email(email_address,
'{} - Successful Login'.format(
os.environ.get('APP_NAME')),
'<span>Hi {},<br /><br />'
'Someone (hopefully you) '
'has successfully logged in to {}.<br/><br />'
'If '
'you did not make this '
'attempt, please contact '
'{} immediately.<br /><br />'
'<i>We strongly advise '
'to choose strong password '
'to {} app. Examples of '
'strong password - '
'https://1password.com/password-generator/'
'</i>'.format(login_payload['user_name'],
os.environ.get('APP_NAME'),
os.environ.get('APP_SUPPORT_EMAIL'),
os.environ.get('APP_NAME')))
elif email_type == 'failure':
self.send_email(registered_email,
'{} - Invalid Login Attempt'.format(
os.environ.get('APP_NAME')),
'<span>Hi {},<br /><br />'
'Someone (hopefully you) '
'tried to login to {} with '
'invalid credentials. If '
'you did not make this '
'attempt, please contact '
'{} immediately.<br /><br />'
'<i>We strongly advise '
'to choose strong password '
'to {} app. Examples of '
'strong password - https://1password.com/password-generator/'
'</i>'.format(login_payload['user_name'],
os.environ.get('APP_NAME'),
os.environ.get('APP_SUPPORT_EMAIL'),
os.environ.get('APP_NAME')))
else:
pass
if validate_login_payload(login_payload):
try:
login_payload.update({'last_login_date_time': datetime.now()})
connection = self.__alchemy_engine[db_flavour].connect()
with connection.begin() as transaction:
if db_flavour == 'sqlite':
metadata = MetaData(self.__alchemy_engine[db_flavour], reflect=True)
table = metadata.tables[table_name]
# Check if user exists:
query_existence = select([table.c.id]).where(table.c.user_name == login_payload['user_name'])
existence_results = (connection.execute(query_existence)).fetchall()
if len(existence_results) == 0:
self.iam_login_logger.info(
'[ProtonLogin]:[SQLite] Invalid user_name. Proton denies login for '
'{}'.format(login_payload['user_name']))
return {
'status': False,
'message': 'Invalid user_name. Please try again with valid credentials.',
'token': None
}
else:
# Check if password matches.
query_stored_password = select([table.c.password]).where(
table.c.user_name == login_payload['user_name'])
stored_password = (connection.execute(query_stored_password)).fetchall()[0][0]
password_match = self.verify_password(stored_password, login_payload['password'])
# Get registered email to notify upon login.
user_registry_table = metadata.tables['PROTON_user_registry']
query_user_registry_id = select([table.c.user_registry_id]).where(
table.c.user_name == login_payload['user_name'])
user_registry_id = (connection.execute(query_user_registry_id)).fetchall()[0][0]
query_registered_email = select([user_registry_table.c.email]).where(
user_registry_table.c.id == user_registry_id)
registered_email = (connection.execute(query_registered_email)).fetchall()[0][0]
if not password_match:
Thread(target=threaded_send_email, args=('failure', registered_email)).start()
self.iam_login_logger.info(
'[ProtonLogin]:[SQLite] Invalid password. Proton denies login for '
'{}'.format(login_payload['user_name']))
return {
'status': False,
'message': 'Invalid password. Please try again with valid credentials',
'token': None
}
else:
Thread(target=threaded_send_email, args=('success', registered_email)).start()
self.iam_login_logger.info(
'[ProtonLogin]:[SQLite] Valid login. Proton login successful for '
'{}'.format(login_payload['user_name']))
token = self.generate_token(login_payload['user_name'])
return {
'status': True,
'message': 'Successful Login',
'token': token
}
elif db_flavour == 'postgresql':
schema_status = self.__alchemy_engine[db_flavour].dialect.has_schema(
self.__alchemy_engine[db_flavour], schema_name)
metadata = MetaData(self.__alchemy_engine[db_flavour], reflect=True, schema=schema_name)
metadata.reflect(self.__alchemy_engine[db_flavour])
if schema_status:
# Check if user exists:
login_registry_table = Table('PROTON_login_registry', metadata)
query_existence = select([login_registry_table.c.id]).where(
login_registry_table.c.user_name == login_payload['user_name'])
existence_results = (connection.execute(query_existence)).fetchall()
if len(existence_results) == 0:
self.iam_login_logger.info(
'[ProtonLogin]:[Postgresql] Invalid user_name. Proton denies login '
'for {}'.format(login_payload['user_name']))
return {
'status': False,
'message': 'Invalid user_name. Please try again with valid credentials.',
'token': None
}
else:
# Check if password matches.
query_stored_password = select([login_registry_table.c.password]).where(
login_registry_table.c.user_name == login_payload['user_name'])
stored_password = (connection.execute(query_stored_password)).fetchall()[0][0]
password_match = self.verify_password(stored_password, login_payload['password'])
# Get registered email to notify upon login.
user_registry_table = Table('PROTON_user_registry', metadata)
query_user_registry_id = select([login_registry_table.c.user_registry_id]).where(
login_registry_table.c.user_name == login_payload['user_name'])
user_registry_id = (connection.execute(query_user_registry_id)).fetchall()[0][0]
query_registered_email = select([user_registry_table.c.email]).where(
user_registry_table.c.id == user_registry_id)
registered_email = (connection.execute(query_registered_email)).fetchall()[0][0]
if not password_match:
Thread(target=threaded_send_email, args=('failure', registered_email)).start()
self.iam_login_logger.info(
'[ProtonLogin]:[Postgresql] Invalid password. Proton denies login for '
'{}'.format(login_payload['user_name']))
return {
'status': False,
'message': 'Invalid password. Please try again with valid credentials.',
'token': None
}
else:
Thread(target=threaded_send_email, args=('success', registered_email)).start()
self.iam_login_logger.info(
'[ProtonLogin]:[Postgresql] Valid login. Proton login successful for '
'{}'.format(login_payload['user_name']))
token = self.generate_token(login_payload['user_name'])
return {
'status': True,
'message': 'Successful Login',
'token': token,
'id': existence_results[0][0]
}
else:
self.iam_login_logger.info(
'[ProtonLogin]:[Postgresql] {} schema does not exist. Proton denies login '
'for {}'.format(schema_name, login_payload['user_name']))
return {
'status': False,
'message': 'Login not possible due to server side error. Please try again in sometime.',
'token': None
}
else:
self.iam_login_logger.info(
'[ProtonLogin]: New Login is unsuccessful due to unsupported db_flavour. Proton '
'was asked for {} to login; by, {}'.format(db_flavour,
login_payload['user_name']))
return {
'status': False,
'message': 'PROTON only supports SQLite and Postgresql atm. Do you have valid db_flavour '
'in your payload?',
'token': None
}
transaction.commit()
connection.close()
except Exception as e:
self.iam_login_logger.info('[ProtonLogin]: Exception while loggin in User. Stack trace to follow')
self.iam_login_logger.exception(str(e))
finally:
if connection:
connection.close()
class IctrlProtonLogin(ProtonLogin):
def __init__(self):
super(IctrlProtonLogin, self).__init__()
def on_get(self, req, resp):
resp.status = falcon.HTTP_SERVICE_UNAVAILABLE
def on_post(self, req, resp):
try:
post_payload = json.loads(req.stream.read())
results = self.login(post_payload['db_flavour'], post_payload['login_payload'])
resp.body = json.dumps(results)
resp.status = falcon.HTTP_200
except Exception as e:
resp.body = json.dumps({
'message': "POST request must contain 'db_flavour'[PROTON supports `sqlite` or `postgresql`] and "
"'login_payload'."
})
resp.status = falcon.HTTP_403
| 55.336508
| 120
| 0.498824
|
4a0fe4ac1385ab6fe88e79305393600f5cb7dce5
| 845
|
py
|
Python
|
example files/flask_apps/vib2/controller.py
|
nikku1234/InbreastData-Html-Page
|
5f02b2e03e5f2f8f9fe9e2ce1b089b4dd2e36323
|
[
"Apache-2.0"
] | 1
|
2020-07-02T06:06:18.000Z
|
2020-07-02T06:06:18.000Z
|
example files/flask_apps/vib2/controller.py
|
nikku1234/InbreastData-Html-Page
|
5f02b2e03e5f2f8f9fe9e2ce1b089b4dd2e36323
|
[
"Apache-2.0"
] | 6
|
2020-06-17T14:19:47.000Z
|
2022-03-12T00:36:20.000Z
|
example files/flask_apps/vib2/controller.py
|
nikku1234/InbreastData-Html-Page
|
5f02b2e03e5f2f8f9fe9e2ce1b089b4dd2e36323
|
[
"Apache-2.0"
] | null | null | null |
from model import InputForm
from flask import Flask, render_template, request
import sys
# SVG or PNG plot?
svg = False
try:
if sys.argv[1] == 'svg':
svg = True
except IndexError:
pass
if svg:
from compute import compute_png_svg as compute
template = 'view_svg.html'
else:
from compute import compute
template = 'view.html'
app = Flask(__name__)
@app.route('/vib2', methods=['GET', 'POST'])
def index():
form = InputForm(request.form)
if request.method == 'POST' and form.validate():
for field in form:
# Make local variable (name field.name)
exec('%s = %s' % (field.name, field.data))
result = compute(A, b, w, T)
else:
result = None
return render_template(template, form=form, result=result)
if __name__ == '__main__':
app.run(debug=True)
| 24.142857
| 62
| 0.635503
|
4a0fe4e0ccbb4163afe871ba1bd5fd87c9c63a99
| 286
|
py
|
Python
|
Paned window/paned_window.py
|
hemidvsmusayev/Tkinter-learn
|
3d35d7fedbda92a47450b84e3896e701e95de8cf
|
[
"MIT"
] | 1
|
2020-08-27T12:31:12.000Z
|
2020-08-27T12:31:12.000Z
|
Paned window/paned_window.py
|
hemidvsmusayev/Tkinter-learn
|
3d35d7fedbda92a47450b84e3896e701e95de8cf
|
[
"MIT"
] | null | null | null |
Paned window/paned_window.py
|
hemidvsmusayev/Tkinter-learn
|
3d35d7fedbda92a47450b84e3896e701e95de8cf
|
[
"MIT"
] | null | null | null |
import tkinter
from tkinter import *
pw = PanedWindow()
pw.pack(fill=BOTH, expand=1)
left = Entry(pw, bd=5)
pw.add(left)
pw2 = PanedWindow(pw, orient=VERTICAL)
pw.add(pw2)
top = Scale(pw2, orient=HORIZONTAL)
pw2.add(top)
button = Button(pw2, text="Ok")
pw2.add(button)
mainloop()
| 14.3
| 38
| 0.706294
|
4a0fe5160498492f001a3381d8a37436abd568c5
| 6,766
|
py
|
Python
|
rpscv/camera.py
|
sourcery-ai-bot/rps-cv
|
9445528b106b445f0be266218889074a1a47e51d
|
[
"MIT"
] | null | null | null |
rpscv/camera.py
|
sourcery-ai-bot/rps-cv
|
9445528b106b445f0be266218889074a1a47e51d
|
[
"MIT"
] | null | null | null |
rpscv/camera.py
|
sourcery-ai-bot/rps-cv
|
9445528b106b445f0be266218889074a1a47e51d
|
[
"MIT"
] | null | null | null |
# camera.py
# Source: https://github.com/DrGFreeman/rps-cv
#
# MIT License
#
# Copyright (c) 2017-2019 Julien de la Bruere-Terreault <drgfreeman@tuta.io>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module defines the Camera class, a wrapper for the Raspberry Pi camera
# based on the picamera library to be used with OpenCV computer vision library.
import time
import cv2
import numpy as np
#from picamera import PiCamera, PiCameraCircularIO
from rpscv.utils import Filter1D, Timer
class Camera():
def __init__(self, size=10, frameRate=40, hflip=False, vflip=False):
"""A wrapper class for the Raspberry Pi camera using the picamera
python library. The size parameter sets the camera resolution to
size * (64, 48)."""
from picamera import PiCamera, PiCameraCircularIO
self.active = False
try:
if type(size) is not int:
raise TypeError("Size must be an integer")
elif size >= 1 and size <= 51:
self.size = size
self.hRes = size * 64
self.vRes = size * 48
else:
raise ValueError("Size must be in range 1 to 51")
except TypeError or ValueError:
raise
self.picam = PiCamera()
self.picam.resolution = (self.hRes, self.vRes)
self.picam.framerate = frameRate
self.picam.hflip = hflip
self.picam.vflip = vflip
time.sleep(1)
self.stream = PiCameraCircularIO(self.picam, seconds=1)
self.frameRateTimer = Timer()
self.frameRateFilter = Filter1D(maxSize=21)
self.start()
def close(self):
"""Stops the running thread and closes the PiCamera instance."""
self.stop()
self.picam.close()
def doWhiteBalance(self, awbFilename='awb_gains.txt', mode='auto'):
"""A method that performs white balance calibration, sets the PiCamera
awb_gains to fixed values and write these values in a file. For best
results, put a white objet in the camera field of view (a sheet of paper
) during the calibration process."""
## Set AWB mode for calibration
self.picam.awb_mode = mode
print('Calibrating white balance gains...')
time.sleep(1)
## Read AWB gains
gRed = 0
gBlue = 0
nbReadings = 100
for _ in range(nbReadings):
gains = self.picam.awb_gains
gRed += gains[0]
gBlue += gains[1]
time.sleep(.01)
gains = gRed / nbReadings, gBlue / nbReadings
## Set AWB mode to off (manual)
self.picam.awb_mode = 'off'
## Set AWB gains to remain constant
self.picam.awb_gains = gains
## Write AWB gains to file
gRed = float(gains[0])
gBlue = float(gains[1])
with open(awbFilename, 'w') as f:
f.flush()
f.write(str(gRed) + ', ' + str(gBlue))
print('AWB gains set to:', gRed, gBlue)
print('AWB gains written to ' + awbFilename)
def addFrameRateText(self, img, pos=(0, 25), bgr=(0,255,0), samples=21):
"""Returns an image with the frame rate added as text on the image
passed as argument. The framerate is calculated based on the time
between calls to this method and averaged over a number of samples.
img: image to which the framerate is to be added,
bgr: tuple defining the blue, green and red values of the text color,
samples: number of samples used for averaging.
"""
# Calculate framerate and reset timer
self.frameRateFilter.addDataPoint(1 / self.frameRateTimer.getElapsed())
self.frameRateTimer.reset()
# Get averaged framerate as a string
frString = '{}fps'.format(str(int(round(self.frameRateFilter.getMean(),
0))))
# Add text to image
cv2.putText(img, frString, pos, cv2.FONT_HERSHEY_DUPLEX, 1, bgr)
def getOpenCVImage(self):
"""Grabs a frame from the camera and returns an OpenCV image array."""
img = np.empty((self.vRes * self.hRes * 3), dtype=np.uint8)
self.picam.capture(img, 'bgr', use_video_port=True)
return img.reshape((self.vRes, self.hRes, 3))
def readWhiteBalance(self, awbFilename='awb_gains.txt'):
"""Reads white balance gains from a file created using the
.doWitheBalance() method and fixes the PiCamera awb_gains parameter
to these values."""
## Read AWB gains from file
f = open(awbFilename, 'r')
line = f.readline()
f.close()
gRed, gBlue = [float(g) for g in line.split(', ')]
## Set AWB mode to off (manual)
self.picam.awb_mode = 'off'
## Set AWB gains to remain constant
self.picam.awb_gains = gRed, gBlue
print('AWB gains set to:', gRed, gBlue)
def start(self):
"""Starts continuous recording of the camera into a PicameraCircularIO
buffer."""
if not self.active:
self.active = True
self.picam.start_recording(self.stream, format='h264',
resize=(self.hRes, self.vRes))
def startPreview(self):
"""Starts the preview of the PiCamera. Works only on the display
connected directly on the Raspberry Pi."""
self.picam.start_preview()
def stop(self):
"""Stops the camera continuous recording and stops the preview if
active."""
self.active = False
self.picam.stop_recording()
self.stopPreview()
def stopPreview(self):
"""Stops the PiCamera preview if active."""
self.picam.stop_preview()
| 40.759036
| 80
| 0.634939
|
4a0fe58f52153bb918233444c25647a95867f68c
| 227
|
py
|
Python
|
Implementation/1134. Armstrong Number.py
|
thewires2/Leetcode
|
a37ff81d60dd9195ba637b970b40aabbea5f4680
|
[
"Unlicense"
] | 1
|
2021-06-30T17:51:56.000Z
|
2021-06-30T17:51:56.000Z
|
Implementation/1134. Armstrong Number.py
|
thewires2/Leetcode
|
a37ff81d60dd9195ba637b970b40aabbea5f4680
|
[
"Unlicense"
] | null | null | null |
Implementation/1134. Armstrong Number.py
|
thewires2/Leetcode
|
a37ff81d60dd9195ba637b970b40aabbea5f4680
|
[
"Unlicense"
] | null | null | null |
class Solution:
def isArmstrong(self, N: int) -> bool:
x=list(map(int,str(N)))
b=list(map(lambda s:s**len(x),x))
if N==sum(b):
return True
else:
return False
| 22.7
| 42
| 0.471366
|
4a0fe71e137e78b527412158d34220a5764ec69b
| 13,640
|
py
|
Python
|
improver_tests/calibration/ensemble_calibration/helper_functions.py
|
cpelley/improver
|
ebf77fe2adc85ed7aec74c26671872a2e4388ded
|
[
"BSD-3-Clause"
] | 77
|
2017-04-26T07:47:40.000Z
|
2022-03-31T09:40:49.000Z
|
improver_tests/calibration/ensemble_calibration/helper_functions.py
|
cpelley/improver
|
ebf77fe2adc85ed7aec74c26671872a2e4388ded
|
[
"BSD-3-Clause"
] | 1,440
|
2017-03-29T10:04:15.000Z
|
2022-03-28T10:11:29.000Z
|
improver_tests/calibration/ensemble_calibration/helper_functions.py
|
MoseleyS/improver
|
ca028e3a1c842e3ff00b188c8ea6eaedd0a07149
|
[
"BSD-3-Clause"
] | 72
|
2017-03-17T16:53:45.000Z
|
2022-02-16T09:41:37.000Z
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Functions for use within unit tests for `ensemble_calibration` plugins.
"""
import datetime
import iris
import numpy as np
from cf_units import Unit
from iris.tests import IrisTest
from improver.metadata.constants.attributes import MANDATORY_ATTRIBUTE_DEFAULTS
from improver.spotdata.build_spotdata_cube import build_spotdata_cube
from improver.synthetic_data.set_up_test_cubes import (
construct_scalar_time_coords,
set_up_variable_cube,
)
from improver.utilities.warnings_handler import ManageWarnings
IGNORED_MESSAGES = ["Collapsing a non-contiguous coordinate"]
WARNING_TYPES = [UserWarning]
class EnsembleCalibrationAssertions(IrisTest):
"""Additional assertions, specifically for usage in the
ensemble calibration unit tests."""
def assertEMOSCoefficientsAlmostEqual(self, first, second):
"""Overriding of the assertArrayAlmostEqual method to check whether
array are matching to 4 decimal places. This is specifically
for use in assertions involving the EMOS coefficients. This is
justified based on the default tolerance of the minimisation using the
Nelder-Mead algorithm of 0.0001, so that minimisations on different
machines would only be aiming to match to 4 decimal places.
Args:
first (numpy.ndarray):
First array to compare.
second (numpy.ndarray):
Second array to compare.
"""
self.assertArrayAlmostEqual(first, second, decimal=4)
def assertCalibratedVariablesAlmostEqual(self, first, second):
"""Overriding of the assertArrayAlmostEqual method to check whether
array are matching to 4 decimal places. This is specifically
for use in assertions following applying the EMOS coefficients,
in order to calibrate the chosen variables. This is justified
based on the default tolerance of the minimisation using the
Nelder-Mead algorithm of 0.0001, so that minimisations on different
machines would only be aiming to match to 4 decimal places.
Args:
first (numpy.ndarray):
First array to compare.
second (numpy.ndarray):
Second array to compare.
"""
self.assertArrayAlmostEqual(first, second, decimal=4)
class SetupCubes(IrisTest):
"""Set up cubes for testing."""
@ManageWarnings(ignored_messages=IGNORED_MESSAGES, warning_types=WARNING_TYPES)
def setUp(self):
"""Set up temperature and wind speed cubes for testing."""
super().setUp()
frt_dt = datetime.datetime(2017, 11, 10, 0, 0)
time_dt = datetime.datetime(2017, 11, 10, 4, 0)
base_data = np.array(
[
[[0.3, 1.1, 2.6], [4.2, 5.3, 5.9], [7.1, 8.2, 8.8]],
[[0.7, 2.0, 2.9], [4.3, 5.6, 6.4], [7.0, 7.0, 9.2]],
[[2.1, 3.0, 3.1], [4.8, 5.0, 6.1], [7.9, 8.1, 8.9]],
],
dtype=np.float32,
)
temperature_data = Unit("Celsius").convert(base_data, "Kelvin")
self.current_temperature_forecast_cube = set_up_variable_cube(
temperature_data,
units="Kelvin",
realizations=[0, 1, 2],
time=time_dt,
frt=frt_dt,
attributes=MANDATORY_ATTRIBUTE_DEFAULTS,
)
time_dt = time_dt - datetime.timedelta(days=5)
frt_dt = frt_dt - datetime.timedelta(days=5)
# Create historic forecasts and truth
self.historic_forecasts = _create_historic_forecasts(
temperature_data, time_dt, frt_dt, realizations=[0, 1, 2]
)
self.truth = _create_truth(temperature_data, time_dt)
# Create a combined list of historic forecasts and truth
self.combined = self.historic_forecasts + self.truth
# Create the historic and truth cubes
self.historic_temperature_forecast_cube = self.historic_forecasts.merge_cube()
self.temperature_truth_cube = self.truth.merge_cube()
# Create a cube for testing wind speed.
self.current_wind_speed_forecast_cube = set_up_variable_cube(
base_data,
name="wind_speed",
units="m s-1",
realizations=[0, 1, 2],
attributes=MANDATORY_ATTRIBUTE_DEFAULTS,
)
self.historic_wind_speed_forecast_cube = _create_historic_forecasts(
base_data,
time_dt,
frt_dt,
realizations=[0, 1, 2],
name="wind_speed",
units="m s-1",
).merge_cube()
self.wind_speed_truth_cube = _create_truth(
base_data, time_dt, name="wind_speed", units="m s-1"
).merge_cube()
# Set up another set of cubes which have a halo of zeros round the
# original data. This data will be masked out in tests using a
# landsea_mask
base_data = np.pad(base_data, ((0, 0), (1, 1), (1, 1)), mode="constant")
temperature_data = Unit("Celsius").convert(base_data, "Kelvin")
# Create historic forecasts and truth
self.historic_forecasts_halo = _create_historic_forecasts(
temperature_data, time_dt, frt_dt, realizations=[0, 1, 2]
)
self.truth_halo = _create_truth(temperature_data, time_dt)
# Create the historic and truth cubes
self.historic_temperature_forecast_cube_halo = (
self.historic_forecasts_halo.merge_cube()
)
self.temperature_truth_cube_halo = self.truth_halo.merge_cube()
# Create a cube for testing wind speed.
self.historic_wind_speed_forecast_cube_halo = _create_historic_forecasts(
base_data,
time_dt,
frt_dt,
realizations=[0, 1, 2],
name="wind_speed",
units="m s-1",
).merge_cube()
self.wind_speed_truth_cube_halo = _create_truth(
base_data, time_dt, name="wind_speed", units="m s-1"
).merge_cube()
data = np.array([1.6, 1.3, 1.4, 1.1])
altitude = np.array([10, 20, 30, 40])
latitude = np.linspace(58.0, 59.5, 4)
longitude = np.linspace(-0.25, 0.5, 4)
wmo_id = ["03001", "03002", "03003", "03004"]
forecast_spot_cubes = iris.cube.CubeList()
for realization in range(1, 3):
realization_coord = [
iris.coords.DimCoord(realization, standard_name="realization")
]
for day in range(5, 11):
time_coords = construct_scalar_time_coords(
datetime.datetime(2017, 11, day, 4, 0),
None,
datetime.datetime(2017, 11, day, 0, 0),
)
time_coords = [t[0] for t in time_coords]
forecast_spot_cubes.append(
build_spotdata_cube(
data + 0.2 * day,
"air_temperature",
"degC",
altitude,
latitude,
longitude,
wmo_id,
scalar_coords=time_coords + realization_coord,
)
)
forecast_spot_cube = forecast_spot_cubes.merge_cube()
self.historic_forecast_spot_cube = forecast_spot_cube[:, :5, :]
self.historic_forecast_spot_cube.convert_units("Kelvin")
self.historic_forecast_spot_cube.attributes = MANDATORY_ATTRIBUTE_DEFAULTS
self.current_forecast_spot_cube = forecast_spot_cube[:, 5, :]
self.current_forecast_spot_cube.convert_units("Kelvin")
self.current_forecast_spot_cube.attributes = MANDATORY_ATTRIBUTE_DEFAULTS
self.truth_spot_cube = self.historic_forecast_spot_cube[0].copy()
self.truth_spot_cube.remove_coord("realization")
self.truth_spot_cube.data = self.truth_spot_cube.data + 1.0
self.spot_altitude_cube = forecast_spot_cube[0, 0].copy(
forecast_spot_cube.coord("altitude").points
)
self.spot_altitude_cube.rename("altitude")
self.spot_altitude_cube.units = "m"
for coord in [
"altitude",
"forecast_period",
"forecast_reference_time",
"realization",
"time",
]:
self.spot_altitude_cube.remove_coord(coord)
def _create_historic_forecasts(
data, time_dt, frt_dt, standard_grid_metadata="uk_ens", number_of_days=5, **kwargs
):
"""
Function to create a cubelist of historic forecasts, based on the inputs
provided, and assuming that there will be one forecast per day at the
same hour of the day.
Please see improver.tests.set_up_test_cubes.set_up_variable_cube for the
supported keyword arguments.
Args:
data (numpy.ndarray):
Numpy array to define the data that will be used to fill the cube.
This will be subtracted by 2 with the aim of giving a difference
between the current forecast and the historic forecasts.
Therefore, the current forecast would contain the unadjusted data.
time_dt (datetime.datetime):
Datetime to define the initial validity time. This will be
incremented in days up to the defined number_of_days.
frt_dt (datetime.datetime):
Datetime to define the initial forecast reference time. This will
be incremented in days up to the defined number_of_days.
standard_grid_metadata (str):
Please see improver.tests.set_up_test_cubes.set_up_variable_cube.
number_of_days(int):
Number of days to increment when constructing a cubelist of the
historic forecasts.
Returns:
iris.cube.CubeList:
Cubelist of historic forecasts in one day increments.
"""
historic_forecasts = iris.cube.CubeList([])
for day in range(number_of_days):
new_frt_dt = frt_dt + datetime.timedelta(days=day)
new_time_dt = time_dt + datetime.timedelta(days=day)
historic_forecasts.append(
set_up_variable_cube(
data - 2 + 0.2 * day,
time=new_time_dt,
frt=new_frt_dt,
standard_grid_metadata=standard_grid_metadata,
**kwargs,
)
)
return historic_forecasts
def _create_truth(data, time_dt, number_of_days=5, **kwargs):
"""
Function to create truth cubes, based on the input cube, and assuming that
there will be one forecast per day at the same hour of the day.
Please see improver.tests.set_up_test_cubes.set_up_variable_cube for the
other supported keyword arguments.
Args:
data (numpy.ndarray):
Numpy array to define the data that will be used to fill the cube.
This will be subtracted by 3 with the aim of giving a difference
between the current forecast and the truths.
Therefore, the current forecast would contain the unadjusted data.
time_dt (datetime.datetime):
Datetime to define the initial validity time. This will be
incremented in days up to the defined number_of_days.
standard_grid_metadata (str):
Please see improver.tests.set_up_test_cubes.set_up_variable_cube.
number_of_days(int):
Number of days to increment when constructing a cubelist of the
historic forecasts.
"""
truth = iris.cube.CubeList([])
for day in range(number_of_days):
new_time_dt = time_dt + datetime.timedelta(days=day)
truth.append(
set_up_variable_cube(
np.amax(data - 3, axis=0) + 0.2 * day,
time=new_time_dt,
frt=new_time_dt,
standard_grid_metadata="uk_det",
**kwargs,
)
)
return truth
if __name__ == "__main__":
pass
| 40.474777
| 86
| 0.640249
|
4a0fe7ac1d4cb800d86747b9a8f642bd24bc855a
| 1,044
|
py
|
Python
|
OOP/inheritence2.py
|
HarshilPatel007/learn-python
|
e64e6f8c95d39bfc229519ee7043787864997cdf
|
[
"MIT"
] | null | null | null |
OOP/inheritence2.py
|
HarshilPatel007/learn-python
|
e64e6f8c95d39bfc229519ee7043787864997cdf
|
[
"MIT"
] | null | null | null |
OOP/inheritence2.py
|
HarshilPatel007/learn-python
|
e64e6f8c95d39bfc229519ee7043787864997cdf
|
[
"MIT"
] | null | null | null |
class A:
def __init__(self):
print("In A __init__")
def feature1(self):
print("Feature 1")
def feature2(self):
print("Feature 2")
# class B(A):
class B:
def __init__(self):
super().__init__()
print("In B __init__")
def feature3(self):
print("Feature 3")
def feature4(self):
print("Feature 4")
class C(A, B):
def __init__(self):
super().__init__()
print("In C __init__")
def feature5(self):
super().feature2()
# a = A()
# a.feature1()
# a.feature2()
""" Output
In A __init__
Feature 1
Feature 2
"""
# b = B()
""" Output
In A __init__ => We've not created __init__ method into B Class.
"""
# b = B()
""" Output
In B __init__ => We've created __init__ method into B Class.
"""
# b = B()
""" Output
In A __init__
In B __init__ => We've created super().__init__() in __init__ method of B Class.
"""
c = C()
c.feature5()
| 15.352941
| 85
| 0.516284
|
4a0fe7c6e624f026b58d89bd62832d2a8b4f49b5
| 4,388
|
py
|
Python
|
test/t_compliance/t_notify/test_fd_notifier.py
|
Invidence/auditree-framework
|
39fbef32aa5066449485af501a74051838dc45f6
|
[
"Apache-2.0"
] | null | null | null |
test/t_compliance/t_notify/test_fd_notifier.py
|
Invidence/auditree-framework
|
39fbef32aa5066449485af501a74051838dc45f6
|
[
"Apache-2.0"
] | 15
|
2020-11-10T23:01:35.000Z
|
2021-08-19T23:30:27.000Z
|
test/t_compliance/t_notify/test_fd_notifier.py
|
dlminvestments/auditree-framework
|
19858c17797a7626fe20f0489d1aab163c6d69ec
|
[
"Apache-2.0"
] | null | null | null |
# -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compliance automation file descriptor notifier tests module."""
import unittest
from io import StringIO
from unittest.mock import create_autospec
from compliance.controls import ControlDescriptor
from compliance.notify import FDNotifier
from .. import build_test_mock
class FDNotifierTest(unittest.TestCase):
"""FDNotifier test class."""
def setUp(self):
"""Initialize each test."""
self.fd = StringIO()
def test_notify_with_no_results(self):
"""Check that FDNotifier notifies that there are no results."""
notifier = FDNotifier({}, {}, self.fd)
notifier.notify()
self.assertEqual(
self.fd.getvalue(), '\n-- NOTIFICATIONS --\n\nNo results\n'
)
def test_notify_normal_run(self):
"""Check that FDNotifier notifies a test with Error."""
results = {
'compliance.test.one': {
'status': 'error', 'test': build_test_mock()
},
'compliance.test.two': {
'status': 'warn', 'test': build_test_mock('two', warns=1)
},
'compliance.test.three': {
'status': 'fail', 'test': build_test_mock('three', fails=1)
},
'compliance.test.four': {
'status': 'pass', 'test': build_test_mock('four')
}
}
controls = create_autospec(ControlDescriptor)
controls.get_accreditations.return_value = ['infra-internal']
notifier = FDNotifier(results, controls, self.fd)
notifier.notify()
self.assertIn(
(
'\n-- NOTIFICATIONS --\n\n'
'Notifications for INFRA-INTERNAL accreditation'
),
self.fd.getvalue()
)
self.assertIn(
(
'mock check title one - ERROR () Reports: (none) '
'| <http://mockedrunbooks/path/to/runbook_one|Run Book>\n'
'Check compliance.test.one failed to execute'
),
self.fd.getvalue()
)
self.assertIn(
(
'mock check title two - WARN (1 warnings) Reports: (none) '
'| <http://mockedrunbooks/path/to/runbook_two|Run Book>'
),
self.fd.getvalue()
)
self.assertIn(
(
'mock check title three - FAIL (1 failures) Reports: (none) '
'| <http://mockedrunbooks/path/to/runbook_three|Run Book>'
),
self.fd.getvalue()
)
self.assertIn(
'PASSED checks: mock check title four', self.fd.getvalue()
)
def test_notify_push_error(self):
"""Check that FDNotifier notifies a test with Error."""
results = {
'compliance.test.one': {
'status': 'error', 'test': build_test_mock()
},
'compliance.test.two': {
'status': 'warn', 'test': build_test_mock('two', warns=1)
},
'compliance.test.three': {
'status': 'fail', 'test': build_test_mock('three', fails=1)
},
'compliance.test.four': {
'status': 'pass', 'test': build_test_mock('four')
}
}
controls = create_autospec(ControlDescriptor)
controls.get_accreditations.return_value = ['infra-internal']
notifier = FDNotifier(results, controls, self.fd, push_error=True)
notifier.notify()
self.assertEqual(
(
'\n-- NOTIFICATIONS --\n\n'
'All accreditation checks: '
'Evidence/Results failed to push to remote locker.\n'
),
self.fd.getvalue()
)
| 35.674797
| 77
| 0.562215
|
4a0fe82b76199e567701522aba9d213efcd5260a
| 12,765
|
py
|
Python
|
dtBot/dtBot.py
|
an0mali/dtBot
|
a8333c0af61f1dbc166b447167145a0795f38d0f
|
[
"MIT"
] | 1
|
2022-02-16T19:27:31.000Z
|
2022-02-16T19:27:31.000Z
|
dtBot/dtBot.py
|
an0mali/dtBot
|
a8333c0af61f1dbc166b447167145a0795f38d0f
|
[
"MIT"
] | null | null | null |
dtBot/dtBot.py
|
an0mali/dtBot
|
a8333c0af61f1dbc166b447167145a0795f38d0f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 8 22:28:16 2021
@author: Qui
"""
import asyncio
from asyncio.windows_events import NULL
import discord
import os
import time
from requests.api import delete
import botutils
import authkey
from discord.ext import commands
import threading
from pathlib import Path
from random import randint
from Pycom import pycom
class dtBot(object):
def __init__(self, name, *kwargs):
self.name = name
intents = discord.Intents.default()
intents.members = True
activity = discord.Game(name='Ultimate Turtle Simulator')
self.bot = commands.Bot(command_prefix='!', intents=intents, activity=activity)
self.pycom = pycom.Pycom('a', self)
self.outcoms = []
self.prev_update_time = 0.0
self.sigalrm_obj = ''
self.chat_target = False
self.humpty = botutils.BotUtils()
self.authkey = authkey.authkey
self.chk_nxt = False
self.ctx_count = 1
self.ctx_responses = {'all': []}
self.ctx_objs = {}
self.user_screens = {}
self.new_user_screens = {}
self.max_ctx_time = 180.0
self.prev_checkctx_time = 0.0
self.clear_ctx_wait = 90.0
self.rm_usrscreen = []
@self.bot.event
async def on_member_join(member):
print('Member join detected')
for channel in self.bot.get_all_channels():
if channel.name == 'general-chat':
await channel.send(f'{member.mention} ', file=discord.File(Path('./textures/welcomepartner.png')))
return
@self.bot.event
async def on_ready():
print('Bot connected')
@self.bot.listen('on_message')
async def on_message(message):
if message.author == self.bot.user:
return
if message.content.startswith('!'):
print('Com detected.')
return
@self.bot.command(brief='Roast humpty', description='Refer to the breif description')
async def roast(ctx, *args):
print('bully com')
print(ctx.author)
await ctx.channel.send('humpty prolly doesnt even code ngl', file=discord.File(Path('./textures/garytriggered.gif')))
@self.bot.command(brief='Post random gif')
async def gifme(ctx):
mpath = self.get_random_meme()
await ctx.send('', file=discord.File(Path(mpath)))
@self.bot.command(brief='Not a weeb? Use this', description="We be a weeb for you so you don't have to")
async def weebs(ctx):
mes = self.get_rand_quote('weebs')
await ctx.send(mes)
@self.bot.command(brief='Do not use this', description='Please do not use this')
async def uwu(ctx):
mes = self.get_rand_quote('uwu')
await ctx.send(mes)
@self.bot.command(brief='Post random shit', description='The best way to be both interesting and funny without having to be either')
async def shitpost(ctx):
mes = self.get_shitpost()
await ctx.send(mes)
@self.bot.command(brief='Play UTS via discord. Super beta', description='Use normal uts commands, but !uts instead of !. Try !uts stats')
async def uts(ctx, *args):
if not len(args):
await ctx.send(f'{ctx.author.mention}' + 'You must specifiy a command. Try !uts stats')
return
usrnm = ctx.author.name + '_discord'
#emb = self.get_uts_embed()
mesobj = await ctx.send('**Ultimate Turtle Simulator**\n Sending command "' + args[0] + '" to turtle "' + usrnm + '"')#, embed=emb)
#time.sleep(0.1)
ctxid = self.get_ctx_id(ctx, mesobj)
com = '!' + usrnm + ',!'
for arg in args:
if arg != args[0]:
com += ' '
com += arg
com += ',' + ctxid
print('Uts com received')
self.pycom.send_toclient(com)
time.sleep(0.1)
#await self.check_feedback()
@self.bot.command(brief='Meme me bruh')
async def mememe(ctx):
mem = 'Synchronizing memeographic resonance..'
mesobj = await ctx.send(mem)
failed = True
attempts = 10
for x in range(attempts):
mem = self.humpty.search_reddit()
if mem:
failed = False
break
#else:
# failed = True
# break
if not failed:
await ctx.send(mem)
await mesobj.edit(content='Consider yourself memed')
else:
await mesobj.edit(content='Meme sychronization failed. You have died.')
@self.bot.command(brief='Get server member count')
async def server_count(ctx):
usrcount = self.get_total_users(ctx)
await ctx.send('Total number of weebs in server: ' + str(usrcount -1))
@self.bot.command(brief='About this bot')
async def about(ctx):
embed = discord.Embed(
title='Go here NOW',
url = 'https://derangedturtlegames.com',
description = 'v0.69420 made by Jesus H Fucking Christ and a paperclip'
)
ico = 'https://static.wixstatic.com/media/e7a94e_0cb9088f334a4392901aeeb04c47f884~mv2.png'
auth = 'Deranged Turtlebot'
if not randint(0,5):
auth = 'Jesus H Fucking Christ'
ico = 'https://i.pinimg.com/originals/31/b8/f6/31b8f6c73de6fa6eee96f0c6545d6de4.jpg'
embed.set_author(
name=auth,
url='https://twitter.com/_dtgames',
icon_url= ico
)
await ctx.send(embed=embed)
self.run_bot()
def send_to(self, mes, ctxid, utsres=False):
#print('Sending: ' + mes + ' to ' + ctxid)
#create_task(self.send_message(mes, ctxid))
#self.bot.loop.create_task(self.send_message(mes, ctxid, utsres))
self.bot.loop.create_task(self.send_message(mes, ctxid, utsres))
#self.bot.loop.run_until_complete(self.send_message(mes, ctxid, utsres))
async def send_message(self, mes, ctxid, uts_response=False):
#print('Send message:: Sending message to CTXID:"' + ctxid + '"')
if ctxid == 'all':
for channel in self.bot.get_all_channels():
if channel.name == 'uturtle-bot-dev':
await channel.send(mes)
break
else:
if ctxid in self.ctx_objs:
ctxdata = self.ctx_objs[ctxid]
chan_name = ctxdata['channel_name']
ctxobj = ctxdata['channel']
for channel in self.bot.get_all_channels():
if channel.name == chan_name:
ctxobj = channel
mes = self.format_uts_mes(mes, ctxdata['auth_name'])
mes = '\n' + mes
title = ''
mention = ctxdata['auth_mention']
if not uts_response:
await ctxobj.send(title + f'{mention}' + mes)
else:
title = '**Ultimate Turtle Simulator** '
oldmesobj = False
efile = False
authname = ctxdata['auth_name'] + '_discord'
print('Authname is ' + authname)
sdat = False
if authname in self.user_screens:
print('Found authname')
if len(self.user_screens[authname]):
sdat = self.user_screens[authname].pop(-1)
#self.rm_usrscreen[authname] = sdat
rawpath = sdat[0]
try:
self.rm_usrscreen.append(rawpath)
epath = os.path.expandvars(rawpath)
print('Epath is ' + str(epath))
efile = True
except:
pass
else:
print('Current userscreens: ' + str(self.user_screens))
#print('Epath is ' + str(epath))
#efile = discord.File(Path(epath))
if self.ctx_objs[ctxid]['mesobj']:
oldmesobj = self.ctx_objs[ctxid]['mesobj']
self.ctx_objs[ctxid]['mesobj'] = False
if efile:
await ctxobj.send(content=title + f'{mention}' + mes, file=discord.File(Path(epath)))
else:
if oldmesobj:
await oldmesobj.edit(content=title + f'{mention}' + mes)
else:
self.ctx_objs[ctxid]['mesobj'] = await ctxobj.send(content=title + f'{mention}' + mes)
self.check_ctxobj_times()
#time.sleep(0.1)
def format_uts_mes(self, mes, authname):
authname = authname + '_discord'
if authname in mes:
if mes.find(authname) == 0:
newmes = '**' + authname + '** '
mes = mes.replace(authname, '')
newmes += mes
return newmes
return mes
def check_ctxobj_times(self):
current_time = time.time()
if current_time - self.prev_checkctx_time > self.clear_ctx_wait:
self.prev_checkctx_time = current_time
rm = []
for sdat in self.ctx_objs:
dat = self.ctx_objs[sdat]
stime = dat['time']
if current_time - stime > self.max_ctx_time:
rm.append(sdat)
for sdat in rm:
self.ctx_objs.pop(sdat)
def get_rand_quote(self, topic):
while True:
mes = self.humpty.rand_quote(topic)
lmes = len(mes)
if lmes < 2000 or not lmes:
return mes
def get_uts_embed(self):
embed = discord.Embed()
embed.set_author(
name='dtGames',
url='https://twitter.com/_dtgames',
icon_url= 'https://static.wixstatic.com/media/e7a94e_da51ae208c3e4dae954e4b524eacc162~mv2.png'
)
return embed
def get_ctx_id(self, ctx, mesobj):
ctxid = str(self.ctx_count)
ctxdata = {
'obj': ctx,
'time': time.time(),
'mesobj': mesobj,
'channel': ctx.channel,
'channel_name': ctx.channel.name,
'auth_name': ctx.author.name.lower(),
'auth_mention': ctx.author.mention,
}
self.ctx_objs[ctxid] = ctxdata
self.ctx_count += 1
return ctxid
def get_shitpost(self):
print('Getting shitpost')
top = ''
mem = ''
while True:
top = self.get_rand_topic().lower()
mem = self.get_rand_quote(top)
if len(mem):
break
time.sleep(0.1)
print('Selected topic: ' + top)
print('Shitpost: ' + mem)
mem = mem.replace('twitchquotes:', '')
return mem
def run_bot(self):
self.bot.run(self.authkey)
def get_rand_topic(self):
topics = self.humpty.topics
roll = randint(0, len(topics) -1)
topic = topics[roll]
print('Topic is ' + topic)
return topic
def get_total_users(self, ctx):
channel = ctx.channel.name
count = len(ctx.guild.members)
return count
def get_random_meme(self):
allmemes = os.listdir('./memes')
roll = randint(0, len(allmemes))
memepath = './memes/' + allmemes[roll]
return memepath
a = dtBot('a')
| 38.333333
| 146
| 0.493537
|
4a0fe8a231e4de12c8e340e4a5994ba5c3bce138
| 221
|
py
|
Python
|
unittests/test_featureVisualization.py
|
dingguanglei/jdit
|
ef878e696c9e2fad5069f106496289d4e4cc6154
|
[
"Apache-2.0"
] | 28
|
2019-06-18T15:56:53.000Z
|
2021-11-09T13:11:13.000Z
|
unittests/test_featureVisualization.py
|
dingguanglei/jdit
|
ef878e696c9e2fad5069f106496289d4e4cc6154
|
[
"Apache-2.0"
] | 2
|
2018-10-24T01:09:56.000Z
|
2018-11-08T07:13:48.000Z
|
unittests/test_featureVisualization.py
|
dingguanglei/jdit
|
ef878e696c9e2fad5069f106496289d4e4cc6154
|
[
"Apache-2.0"
] | 8
|
2019-01-11T01:12:15.000Z
|
2021-03-12T10:15:43.000Z
|
from unittest import TestCase
class TestFeatureVisualization(TestCase):
def test__hook(self):
pass
def test__register_forward_hook(self):
pass
def test_trace_activation(self):
pass
| 17
| 42
| 0.696833
|
4a0fea84cc0dc02fc1d44f238f12dd3d81e6acd4
| 7,526
|
py
|
Python
|
main.py
|
AbdullahKaratas/GenerationOfCylinders
|
e0e98d194990b8ad4d697a59c75eae5d647b936a
|
[
"MIT"
] | null | null | null |
main.py
|
AbdullahKaratas/GenerationOfCylinders
|
e0e98d194990b8ad4d697a59c75eae5d647b936a
|
[
"MIT"
] | null | null | null |
main.py
|
AbdullahKaratas/GenerationOfCylinders
|
e0e98d194990b8ad4d697a59c75eae5d647b936a
|
[
"MIT"
] | null | null | null |
import time
from cylinder_fitting import fitting_rmsd
from cylinder_fitting import geometry
from cylinder_fitting import show_G_distribution
from cylinder_fitting import show_fit
from cylinder_fitting import fit
import pandas as pd
import pytest
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
np.seterr(all='raise')
def extract_features(dataframe):
extract_features = dataframe[dataframe.columns[7:]].copy()
extract_features.columns = np.arange(0, len(extract_features.columns))
return extract_features
def test_fit(cylinder_data, plotVisable):
print("test fit.")
C = np.array([0, 0, 0])
r = 10
data = cylinder_data
start = time.time()
w_fit, C_fit, r_fit, fit_err = fit(data)
end = time.time()
print('Fitting time: ', end - start)
if plotVisable == True:
show_fit(w_fit, C_fit, r_fit, data)
return w_fit, r_fit, C_fit, fitting_rmsd(w_fit, C_fit, r_fit, data)
def rotationsmatrix(r, s, c):
r = r / np.linalg.norm(r)
Rot = np.array([[c+(1.-c)*r[0]*r[0], r[0]*r[1]*(1.-c)-r[2]*s, r[0]*r[2]*(1.-c)+r[1]*s],
[r[1]*r[0]*(1.-c)+r[2]*s, c + (1.-c)*r[1] *
r[1], r[1]*r[2]*(1.-c)-r[0]*s],
[r[2]*r[0]*(1.-c)-r[1]*s, r[2]*r[1]*(1.-c)+r[0]*s, c+r[2]*r[2]*(1.-c)]])
return Rot
def kos_trafo(nx, ny, nz):
e_1 = np.array([1., 0., 0.])
e_2 = np.array([0., 1., 0.])
e_3 = np.array([0., 0., 1.])
normal = np.array([nx, ny, nz])
normal = normal / np.linalg.norm(normal)
r = np.cross(e_3, normal)
zero = np.array([0., 0., 0.])
if np.all(r == zero):
pass
else:
s = np.linalg.norm(r)
c = np.dot(normal, e_3)
Rot = rotationsmatrix(r, s, c)
e_1 = np.matmul(Rot, e_1)
e_2 = np.matmul(Rot, e_2)
e_3 = np.matmul(Rot, e_3)
return e_1, e_2, e_3
def cylinder_calculator(normal, R_set, xc_set, yc_set, zc_set, n, n_p, deltaphi, L, alpha):
cylinder_data = []
theta_start_set = np.random.uniform(0., 2*np.pi, n)
for k in range(n):
e_1, e_2, e_3 = kos_trafo(normal[0], normal[1], normal[2])
theta_start = - deltaphi/2
theta_end = + deltaphi/2
theta_set = np.random.uniform(theta_start, theta_end, n_p)
z_area = np.random.uniform(-L / 2., L / 2., n_p)
R = np.random.normal(R_set[k], alpha*R_set[k], n_p)
cylinder_data_row = [R_set[k], normal[0], normal[1],
normal[2], xc_set[k], yc_set[k], zc_set[k]]
measurements = []
for i in range(n_p):
term_x = R[i]*np.cos(theta_set[i]) * e_1[0] + R[i] * \
np.sin(theta_set[i]) * e_2[0] + z_area[i] * e_3[0] + xc_set[k]
term_y = R[i]*np.cos(theta_set[i]) * e_1[1] + R[i] * \
np.sin(theta_set[i]) * e_2[1] + z_area[i] * e_3[1] + yc_set[k]
term_z = R[i]*np.cos(theta_set[i]) * e_1[2] + R[i] * \
np.sin(theta_set[i]) * e_2[2] + z_area[i] * e_3[2] + zc_set[k]
measurements.append((term_x, term_y, term_z))
measurements = sorted(measurements, key=lambda w: w[2])
measurements = [r for s in measurements for r in s]
cylinder_data_row.extend(measurements)
cylinder_data.append(cylinder_data_row)
column_list = ['R', 'nx', 'ny', 'nz', 'xc', 'yc', 'zc']
column_list.extend(list(range(3 * n_p)))
cylinder_data = pd.DataFrame(cylinder_data, columns=column_list)
return cylinder_data, R_set
def starting_generation_and_fit(nReplay, nheight, ndata, lengthStep, Ropt, plotVisable):
print('start')
normalList = np.empty((nReplay, 3))
directionList = np.empty((nReplay, 3))
cReal = np.empty((nReplay, 3))
cFit = np.empty((nReplay, 3))
RadiusList = np.empty((nReplay, 1))
RealRadiusList = np.empty((nReplay, 1))
RMSDList = np.empty((nReplay, 1))
for i in range(nReplay):
normal = np.array([0, 0, 1]) + np.random.uniform(-1., 1., 3) * 1/1000
normal = normal / np.linalg.norm(normal)
normalList[i, :] = normal
deltaStep = 0
DataEnd = np.zeros((ndata*ndata*lengthStep*nheight, 3))
R_min = Ropt*(1.-1/1000000)
R_max = Ropt*(1.+1/1000000)
xc_min = -1./1000
xc_max = 1./1000
yc_min = -1./1000
yc_max = 1./1000
R_set = np.random.uniform(R_min, R_max, 1)
xc_set = np.random.uniform(xc_min, xc_max, 1)
yc_set = np.random.uniform(yc_min, yc_max, 1)
RealRadiusList[i, :] = R_set
for j in range(nheight):
zc_min = 2*j - 1./1000
zc_max = 2*j+1./1000
zc_set = np.random.uniform(zc_min, zc_max, 1)
cReal[i, :] = [xc_set, yc_set, zc_set]
print(cReal)
for k in range(0, lengthStep):
deltaStep = k*(360/lengthStep) * np.pi/180
nameCSV = './cylinder_data_' + \
'{}'.format(k+lengthStep*j) + '.csv'
saveDataIn = nameCSV
data, Rset = cylinder_calculator(
normal, R_set, xc_set, yc_set, zc_set, n=1, n_p=ndata*ndata, deltaphi=0.8/Ropt, L=0.8, alpha=0.001)
data.to_csv(saveDataIn, index=False)
cylinder_data = pd.read_csv(
'./cylinder_data_{}.csv'.format(k+lengthStep*j), sep=",")
features_dataframe = extract_features(cylinder_data)
features = np.array(features_dataframe,
dtype=np.float32).reshape((ndata*ndata, 3))
x = features[:, 0]
y = features[:, 1]
z = features[:, 2]
X = x * np.cos(deltaStep) - y * np.sin(deltaStep)
Y = x * np.sin(deltaStep) + y * np.cos(deltaStep)
Z = z
Data = np.vstack((X, Y, Z)).T
start = j*lengthStep*ndata*ndata + ndata*ndata*k
stop = j*lengthStep*ndata*ndata + ndata*ndata*(k+1)
DataEnd[start:stop, :] = Data
DataEnd_data = pd.DataFrame(DataEnd)
DataEnd_data.to_csv(
'./Data_{}.csv'.format(i), index=False)
normalList_data = pd.DataFrame(normalList)
normalList_data.to_csv(
'./normalList_{}.csv'.format(i), index=False)
RealRadiusList_data = pd.DataFrame(RealRadiusList)
RealRadiusList_data.to_csv(
'./radiusList_{}.csv'.format(i), index=False)
cReal_data = pd.DataFrame(cReal)
cReal_data.to_csv(
'./center_{}.csv'.format(i), index=False)
print('fit')
w_fit, r_fit, c_fit, RMSD = test_fit(DataEnd, plotVisable)
directionList[i, :] = w_fit
RadiusList[i, :] = r_fit
cFit[i, :] = c_fit
directionList_data = pd.DataFrame(directionList)
directionList_data.to_csv(
'./fit_direction_{}.csv'.format(i), index=False)
RadiusList_data = pd.DataFrame(RadiusList)
RadiusList_data.to_csv(
'./fit_RadiusList_{}.csv'.format(i), index=False)
cFit_data = pd.DataFrame(cFit)
cFit_data.to_csv(
'./fit_center_{}.csv'.format(i), index=False)
print('run number = ', i)
if __name__ == "__main__":
nReplay = 1
nheight = 1
ndata = 50
lengthStep = 4
Ropt = 12
plotVisable = True
starting_generation_and_fit(
nReplay, nheight, ndata, lengthStep, Ropt, plotVisable)
| 31.889831
| 119
| 0.562052
|
4a0feaa561ea73af54a42a1d79be3616bce094d9
| 2,236
|
py
|
Python
|
app/main.py
|
CirculusVCFB/example-fastapi
|
87a9ba0c7db245326e3aa9b952585a440ba2327c
|
[
"bzip2-1.0.6"
] | null | null | null |
app/main.py
|
CirculusVCFB/example-fastapi
|
87a9ba0c7db245326e3aa9b952585a440ba2327c
|
[
"bzip2-1.0.6"
] | 1
|
2022-03-01T07:02:47.000Z
|
2022-03-01T07:02:47.000Z
|
app/main.py
|
CirculusVCFB/example-fastapi
|
87a9ba0c7db245326e3aa9b952585a440ba2327c
|
[
"bzip2-1.0.6"
] | null | null | null |
import time
from fastapi import FastAPI, Response,status,HTTPException,Depends
from passlib.context import CryptContext
from fastapi.params import Body
from pydantic import BaseModel
from random import randrange
from typing import Optional, List
from sqlalchemy.orm import Session
from . import models, schemas, utils
from . database import engine, SessionLocal, get_db
from . routers import post, user, auth, vote
from .config import settings
from fastapi.middleware.cors import CORSMiddleware
import sys, asyncio
app = FastAPI()
if sys.platform == "win32" and (3, 8, 0) <= sys.version_info < (3, 9, 0):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
pwd_context = CryptContext(schemes=["bcrypt"], deprecated = "auto")
models.Base.metadata.create_all(bind=engine)
#try:
#cnx = mysql.connector.connect(user='root', password = 'wyzecam53$DELMY', host = 'localhost',
#database= 'sakila')
#cursor = cnx.cursor(dictionary = True)
# print("successfully connected to the DATABASE!!")
#except mysql.connector.Error as err:
#if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
# print("Something is wrong with your user name or password")
#elif err.errno == errorcode.ER_BAD_DB_ERROR:
#print("Database does not exist")
#else:
#print(err)
my_posts = [{"title": "title of post1", "content": "content of post 1", "id": 1, "rating" : 4}, {"title": "favorite foods", "content": "I like pizza",
"id":2}]
def find_post(id):
for p in my_posts:
if p["id"] == id:
return p
def find_index_post(id):
for i, p in enumerate(my_posts):
if p['id'] == id:
return i
app.include_router(post.router)
app.include_router(user.router)
app.include_router(auth.router)
app.include_router(vote.router)
@app.get("/")
def root():
return {"message": "Bind mount works at last"}
@app.get("/sqlalchemy")
def test_posts(db: Session = Depends(get_db)):
posts = db.query(models.Post).all()
return posts
@app.get("/posts/latest")
def get_latest_post():
post = my_posts[len(my_posts)-1]
return post
| 27.268293
| 151
| 0.709302
|
4a0febab843d3f09b91df751138b971c513eac8e
| 262
|
py
|
Python
|
lib_rovpp/policies/rovpp_v2a_lite_policy.py
|
iReynaldo/lib_rovpp
|
eb201adc948e9375123c2e2301ee524392dd7b0d
|
[
"BSD-3-Clause"
] | 1
|
2021-12-05T07:42:35.000Z
|
2021-12-05T07:42:35.000Z
|
lib_rovpp/policies/rovpp_v2a_lite_policy.py
|
iReynaldo/lib_rovpp
|
eb201adc948e9375123c2e2301ee524392dd7b0d
|
[
"BSD-3-Clause"
] | null | null | null |
lib_rovpp/policies/rovpp_v2a_lite_policy.py
|
iReynaldo/lib_rovpp
|
eb201adc948e9375123c2e2301ee524392dd7b0d
|
[
"BSD-3-Clause"
] | null | null | null |
from .rovpp_v2_lite_policy import ROVPPV2LitePolicy
class ROVPPV2aLitePolicy(ROVPPV2LitePolicy):
name = "ROV++V2a Lite"
def _policy_propagate(*args, **kwargs):
"""Do nothing. Send blackholes according to export policy"""
return False
| 23.818182
| 68
| 0.717557
|
4a0fedd5e00c391230a5e9c4b7a33bdafca87ac5
| 10,627
|
py
|
Python
|
validator/tests/integration/test_sawtooth_stats.py
|
gabykyei/GC_BlockChain_T_Rec
|
b72cb483064852d0a60286943ff55233462fea08
|
[
"Apache-2.0"
] | 1
|
2019-03-18T13:31:11.000Z
|
2019-03-18T13:31:11.000Z
|
validator/tests/integration/test_sawtooth_stats.py
|
gabykyei/GC_BlockChain_T_Rec
|
b72cb483064852d0a60286943ff55233462fea08
|
[
"Apache-2.0"
] | null | null | null |
validator/tests/integration/test_sawtooth_stats.py
|
gabykyei/GC_BlockChain_T_Rec
|
b72cb483064852d0a60286943ff55233462fea08
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import os
import time
import unittest
import logging
from sawtooth.exceptions import MessageException
from sawtooth.cli.stats_lib.stats_utils import StatsModule
from sawtooth.cli.stats_lib.validator_stats import SystemStatsManager
from sawtooth.cli.stats_lib.endpoint_manager import EndpointManager
from sawtooth.cli.stats_lib.topology_stats import TopologyManager
from sawtooth.cli.stats import SawtoothStats
from twisted.internet import reactor
LOGGER = logging.getLogger(__name__)
RUN_TEST_SUITES = True \
if os.environ.get("RUN_TEST_SUITES", False) == "1" else False
class SawtoothStatsTestHarness():
'''
test_run()
- initializes stats config options
- creates instances of endpoint_manager, sawtooth stats,
sawtooth stats test module, sawtooth stats test harness,
and int key workload
- initializes test_run_in_thread() in reactor thread
- starts reactor
test_run_in_thread()
- sequences the stats test operations
- collects stats on initial network state; verifies they are correct
- runs int key workload to increment txn and block count
- collects stats on new network state; verifies they are correct
- note: sleeps after calls to endpoint_discovery_loop() and stats_loop()
to allow time for associated http requests to validators can complete
- Note: runs in reactor thread so that blocking calls to time.sleep()
do not suspend main reactor thread required to complete http calls
'''
def __init__(self):
self.epm = None
self.ss = None
self.sst = None
self.int_key_load_test = None
self.urls = None
self.keys = 10
self.rounds = 2
def test_run(self, urls, config_opts=None, config_dict=None):
self.urls = urls
self.url = urls[0]
config = self.default_config()
# update defaults with cli options if provided
if config_opts is None:
config['EndpointManager']['urls'] = self.urls
else:
self.update_config(config, config_opts)
if config_dict is not None:
self.update_config_dict(config_dict, config)
self.epm = EndpointManager(config['EndpointManager']['urls'])
self.ss = SawtoothStats(self.epm, config)
self.sst = SawtoothStatsTest(None, None)
self.sst.initialize(self.ss.stats_modules)
reactor.callInThread(self.test_run_in_thread)
print "test complete with no errors"
def _are_registered(self, timeout=256, validators=5):
registered = False
wait_time = 0
while not registered:
reactor.callFromThread(self.epm.endpoint_discovery_loop)
time.sleep(1)
if len(self.epm.endpoint_urls) is validators:
registered = True
if wait_time > timeout:
break
wait_time += 1
return registered
def test_run_in_thread(self):
# get list of validators in network
# sleep to wait for requests to complete
assert self._are_registered(), "max registration time exceeded"
assert len(self.epm.endpoint_urls) == 5
reactor.callFromThread(self.ss.stats_loop)
time.sleep(1)
reactor.callFromThread(self.ss.stats_loop)
time.sleep(1)
# compare stats values to expected values before executing
self.sst.get_stats()
self.sst.test_stats_values(self._stats_values_before_new_txns())
# end test
reactor.callFromThread(self.ss.stats_stop)
def _stats_values_before_new_txns(self):
validator_count = 1
expected_params = {
'clients': {
'known_validators': validator_count,
'active_validators': validator_count
},
'transactions': {
'txns_max_committed': 1,
'txns_max_pending': 0
},
'blocks': {
'blocks_max_committed': 1,
'blocks_min_pending': 0
},
'messages': {
'msgs_max_acked': 1,
'msgs_max_handled': 1
},
'packets': {
'packets_max_acks_received': 1
},
'poet': {
'avg_local_mean': 1
},
'topology': {
'maximum_shortest_path_length': 1,
'maximum_degree': validator_count - 1,
'node_count': validator_count
}
}
return expected_params
def default_config(self):
config = {}
config['CsvManager'] = {}
config['CsvManager']['csv_enable_summary'] = False
config['CsvManager']['csv_enable_validator'] = False
config['EndpointManager'] = {}
config['EndpointManager']['urls'] = ["http://localhost:8899"]
config['EndpointManager']['interval'] = 10
config['SawtoothStats'] = {}
config['SawtoothStats']['interval'] = 3
config['SawtoothStats']['max_loop_count'] = 0
config['StatsPrint'] = {}
config['StatsPrint']['print_all'] = False
return config
def update_config_dict(self, new_conf, default_conf):
for key1 in set(new_conf) & set(default_conf):
for key2 in set(new_conf[key1]) & set(default_conf[key1]):
default_conf[key1][key2] = new_conf[key1][key2]
def update_config(self, config, opts):
config['CsvManager']['csv_enable_summary'] = opts.csv_enable_summary
config['CsvManager']['csv_enable_validator'] = opts.csv_enable_validator
config['EndpointManager']['urls'] = [opts.url]
config['EndpointManager']['interval'] = opts.endpoint_time
config['SawtoothStats']['interval'] = opts.stats_time
return config
@unittest.skipUnless(RUN_TEST_SUITES, "Must be run in a test suites")
class TestSawtoothStatsTwistedThread(unittest.TestCase):
def __init__(self, test_name, urls=None):
super(TestSawtoothStatsTwistedThread, self).__init__(test_name)
self.urls = urls
self.ssth = SawtoothStatsTestHarness()
def test_sawtooth_stats_twisted_thread(self):
try:
self.ssth.test_run(self.urls, config_opts=None, config_dict=None)
except MessageException as e:
raise MessageException('stats error: {0}'.format(e))
finally:
print "No Validators need to be stopped"
def stats_config_dict(self):
config = {}
config['SawtoothStats'] = {}
config['SawtoothStats']['max_loop_count'] = 4
config['StatsPrint'] = {}
config['StatsPrint']['print_all'] = True
return config
class SawtoothStatsTest(StatsModule):
'''
initialize()
- gets an instance of system stats manager, topology_stats_manager
from stats module list
- gets instance of system_stats, topology_stats from system stats manager,
topology_stats_manager respectively
get_stats()
- builds a dict of system and topology stats
test_stats()
- asserts some stats values against reference list
'''
def __init__(self, epm, config):
super(SawtoothStatsTest, self).__init__()
self.stats = {}
self.system_stats = None
self.topology_stats = None
def initialize(self, module_list):
self.module_list = module_list
system_stats_manager = self.get_module(SystemStatsManager)
self.system_stats = system_stats_manager.system_stats
topology_stats_manager = self.get_module(TopologyManager)
self.topology_stats = topology_stats_manager.topology_stats
def test_stats_values(self, expected_params):
assert self.stats["summary_stats"]["clients"]["known_validators"] >= \
expected_params["clients"]["known_validators"]
assert self.stats["summary_stats"]["clients"]["active_validators"] == \
expected_params["clients"]["active_validators"]
assert self.stats
["summary_stats"]["topology"]["maximum_shortest_path_length"] >=\
expected_params["topology"]["maximum_shortest_path_length"]
assert self.stats["summary_stats"]["topology"]["maximum_degree"] >= \
expected_params["topology"]["maximum_degree"]
assert self.stats["summary_stats"]["topology"]["node_count"] >= \
expected_params["topology"]["node_count"]
assert self.stats
['summary_stats']["blocks"]["blocks_max_committed"] > \
expected_params["blocks"]["blocks_max_committed"]
assert self.stats['summary_stats']['blocks']["blocks_min_pending"] >= \
expected_params["blocks"]["blocks_min_pending"]
assert self.stats["summary_stats"]["messages"]["msgs_max_acked"] > \
expected_params["messages"]["msgs_max_acked"]
assert self.stats["summary_stats"]["messages"]["msgs_max_handled"] > \
expected_params["messages"]["msgs_max_handled"]
assert self.stats
["summary_stats"]["packets"]["packets_max_acks_received"] > \
expected_params["packets"]["packets_max_acks_received"]
assert self.stats["summary_stats"]["poet"]["avg_local_mean"] > \
expected_params["poet"]["avg_local_mean"]
def get_stats(self):
self._get_summary_stats()
def _get_summary_stats(self):
stats = {'clients': self.system_stats.sys_client._asdict(),
'blocks': self.system_stats.sys_blocks._asdict(),
'transactions': self.system_stats.sys_txns._asdict(),
'packets': self.system_stats.sys_packets._asdict(),
'messages': self.system_stats.sys_msgs._asdict(),
'poet': self.system_stats.poet_stats._asdict(),
'topology': self.topology_stats.get_stats_as_dict(),
}
self.stats['summary_stats'] = stats
| 36.269625
| 80
| 0.638844
|
4a0fee6db1d3e6f83a3cfe5423e738b896ea6fda
| 1,753
|
py
|
Python
|
docs/test_dist_cont.py
|
h-vetinari/tsne-cuda
|
b740a7d46a07ca9415f072001839fb66a582a3fa
|
[
"BSD-3-Clause"
] | 1,400
|
2018-05-09T17:17:59.000Z
|
2022-03-31T14:17:59.000Z
|
docs/test_dist_cont.py
|
h-vetinari/tsne-cuda
|
b740a7d46a07ca9415f072001839fb66a582a3fa
|
[
"BSD-3-Clause"
] | 104
|
2018-06-06T09:21:31.000Z
|
2022-03-05T03:11:35.000Z
|
docs/test_dist_cont.py
|
h-vetinari/tsne-cuda
|
b740a7d46a07ca9415f072001839fb66a582a3fa
|
[
"BSD-3-Clause"
] | 113
|
2018-08-01T23:45:28.000Z
|
2022-03-20T08:46:23.000Z
|
# Generate a uniform random distributuion of points
import numpy as np
import scipy
from sklearn.decomposition import PCA
import tsnecuda
print('Loading data...')
import tflearn
(X,Y),_ = tflearn.datasets.cifar10.load_data()
# randomly sample some points
r_points = X[np.random.choice(np.arange(0,X.shape[0]), size=20000,replace=False)].reshape(20000,-1)
# r_points = np.random.rand(5000,768)
# print(X.shape)
# r_points = X.reshape(X.shape[0],-1)
print('Computing distances...')
# Compute the pairwise distances between the points
# hd_distances = scipy.spatial.distance.pdist(r_points)
hd_distances = tsnecuda.TSNE.e_pw_dist(r_points)
print('Projecting...')
# Project the points using PCA
proj_points = PCA(n_components=30).fit_transform(r_points)
print('Computing LD distances...')
# Compute the pairwise distances between the points
ld_distances = tsnecuda.TSNE.e_pw_dist(proj_points) + 1e-4*np.ones(shape=(20000, 20000))
print('Computing ratios...')
# Compute for each pair of points the ratio between the point distances
point_ratios = hd_distances / ld_distances
print(np.percentile(point_ratios.reshape(-1), 5))
print(np.percentile(point_ratios.reshape(-1), 25))
print(np.percentile(point_ratios.reshape(-1), 50))
print(np.percentile(point_ratios.reshape(-1), 75))
print(np.percentile(point_ratios.reshape(-1), 95))
# Display the histogram
import matplotlib
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# the histogram of the data
ax.hist(point_ratios.reshape(-1), bins=np.arange(0,5,0.001))
# add a 'best fit' line
ax.set_xlabel('Ratio')
ax.set_ylabel('Density')
ax.set_title('Histogram of Ratios between low and high dim distances under PCA')
# Tweak spacing to prevent clipping of ylabel
fig.tight_layout()
plt.show()
| 30.754386
| 99
| 0.764404
|
4a0feeaaf8c005c2fc8bb800e1fcc7f002ca4923
| 481
|
py
|
Python
|
django/contrib/staticfiles/urls.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | 2
|
2016-09-27T09:30:19.000Z
|
2016-10-17T01:47:43.000Z
|
env/lib/python2.7/site-packages/django/contrib/staticfiles/urls.py
|
luiscarlosgph/nas
|
e5acee61e8bbf12c34785fe971ce7df8dee775d4
|
[
"MIT"
] | 10
|
2019-12-26T17:31:31.000Z
|
2022-03-21T22:17:33.000Z
|
env/lib/python2.7/site-packages/django/contrib/staticfiles/urls.py
|
luiscarlosgph/nas
|
e5acee61e8bbf12c34785fe971ce7df8dee775d4
|
[
"MIT"
] | 1
|
2020-12-24T01:28:30.000Z
|
2020-12-24T01:28:30.000Z
|
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = []
def staticfiles_urlpatterns(prefix=None):
"""
Helper function to return a URL pattern for serving static files.
"""
if prefix is None:
prefix = settings.STATIC_URL
return static(prefix, view='django.contrib.staticfiles.views.serve')
# Only append if urlpatterns are empty
if settings.DEBUG and not urlpatterns:
urlpatterns += staticfiles_urlpatterns()
| 26.722222
| 72
| 0.740125
|
4a0feefb97e6e2caf8870822bc173f7664f30631
| 46,654
|
py
|
Python
|
src/api/meta/utils/basicapi/parseresult.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 84
|
2021-06-30T06:20:23.000Z
|
2022-03-22T03:05:49.000Z
|
src/api/meta/utils/basicapi/parseresult.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 7
|
2021-06-30T06:21:16.000Z
|
2022-03-29T07:36:13.000Z
|
src/api/meta/utils/basicapi/parseresult.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 40
|
2021-06-30T06:21:26.000Z
|
2022-03-29T12:42:26.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import time
import pymysql
from common.meta.common import create_tag_to_target, get_default_geog_tag
from django.conf import settings
from meta.public.common import field_allowed_roles
from meta.utils.basicapi import BasicApi
from meta.utils.common import paged_sql
MYSQL_BACKEND_TYPE = "mysql"
ATLAS_BACKEND_TYPE = "atlas"
DEFAULT_DATE_FORMAT = "%Y-%m-%d %H:%i:%s"
TAG_TYPE_RESULT_TABLE = "result_table"
TAG_TYPE_RAW_DATA = "raw_data"
TAG_TYPE_PROJECT = "project"
TAG_TYPE_DATA_PROCESSING = "data_processing"
TAG_TYPE_DATA_PROCESSING_RELATION = "data_processing_relation"
TAG_TYPE_DATA_TRANSFERRING = "data_transferring"
TAG_TYPE_CLUSTER_GROUP = "cluster_group"
VIRTUAL_BELONG_TO_FIELD = "belong_to"
def get_tag_params(params):
tags = params.pop("tags", [])
return tags
def create_tag_to_result_table(tags, bk_biz_id, project_id, *result_table_ids):
tag_to_target(TAG_TYPE_RESULT_TABLE, tags, bk_biz_id, project_id, *result_table_ids)
def create_tag_to_raw_data(tags, bk_biz_id, *raw_data_ids):
tag_to_target(TAG_TYPE_RAW_DATA, tags, bk_biz_id, None, *raw_data_ids)
def create_tag_to_data_processing(tags, *raw_data_ids):
tag_to_target(TAG_TYPE_DATA_PROCESSING, tags, None, None, *raw_data_ids)
def create_tag_to_dpr(tags, *data_processing_ids):
tag_to_target(TAG_TYPE_DATA_PROCESSING_RELATION, tags, None, None, *data_processing_ids)
def create_tag_to_data_transferring(tags, *raw_data_ids):
tag_to_target(TAG_TYPE_DATA_TRANSFERRING, tags, None, None, *raw_data_ids)
def create_tag_to_project(tags, *project_id_ids):
tag_to_target(TAG_TYPE_PROJECT, tags, None, None, *project_id_ids)
def create_tag_to_cluster_group(tags, *cluster_group_ids):
tag_to_target(TAG_TYPE_CLUSTER_GROUP, tags, None, None, *cluster_group_ids)
def tag_to_target(target_type, tags, bk_biz_id, project_id, *target_ids):
targets = []
for target_id in target_ids:
targets.append((target_type, target_id))
create_tag_to_target(targets, tags, target_exists=False, bk_biz_id=bk_biz_id, project_id=project_id)
def add_manage_tag_to_project(result):
return manage_tag_info(TAG_TYPE_PROJECT, result, "project_id")
def add_manage_tag_to_result_table(result):
return manage_tag_info(TAG_TYPE_RESULT_TABLE, result, "result_table_id")
def add_manage_tag_to_data_processing(result):
return manage_tag_info(TAG_TYPE_DATA_PROCESSING, result, "processing_id")
def add_manage_tag_to_data_transferring(result):
return manage_tag_info(TAG_TYPE_DATA_TRANSFERRING, result, "transferring_id")
def add_manage_tag_to_cluster_group(result):
return manage_tag_info(TAG_TYPE_CLUSTER_GROUP, result, "cluster_group_id")
def gen_managed_tag_dict(sql):
tag_dict = {}
tag_id_obj_dict = {}
tag_list = BasicApi.get_complex_search(sql)
# 转换标签,为每一个tag添加它所属的大类,并虚拟命名为:belong_to字段
if tag_list:
# 构建标签关系树
for tag_obj in tag_list:
tag_id = tag_obj["id"]
parent_id = tag_obj["parent_id"]
tag_code = tag_obj["code"]
if parent_id == 0: # 根节点
tag_obj[VIRTUAL_BELONG_TO_FIELD] = tag_code
tag_id_obj_dict[tag_id] = tag_obj
# 从树中检索继承关系
for tag_obj in tag_list:
if VIRTUAL_BELONG_TO_FIELD not in tag_obj:
parent_id = tag_obj["parent_id"]
while True:
parent_dict = tag_id_obj_dict.get(parent_id)
if not parent_dict:
break
if VIRTUAL_BELONG_TO_FIELD in parent_dict:
tag_obj[VIRTUAL_BELONG_TO_FIELD] = parent_dict[VIRTUAL_BELONG_TO_FIELD]
break
parent_id = parent_dict["parent_id"]
# 更新标签信息
for tag_obj in tag_list:
tag_dict[tag_obj["code"]] = {
"code": tag_obj["code"],
"alias": tag_obj["alias"],
VIRTUAL_BELONG_TO_FIELD: tag_obj.get(VIRTUAL_BELONG_TO_FIELD),
}
return tag_dict
def gen_managed_tag_target_dict(sql, tag_dict):
tag_target_dict = {}
tag_target_list = BasicApi.get_complex_search(sql)
if tag_target_list:
for tag_target_obj in tag_target_list:
target_id = tag_target_obj["target_id"]
tag_code = tag_target_obj["tag_code"]
ttt_dict = tag_dict.get(tag_code)
if ttt_dict:
belong_to = ttt_dict[VIRTUAL_BELONG_TO_FIELD]
cp_ttt_dict = ttt_dict.copy()
cp_ttt_dict.pop(VIRTUAL_BELONG_TO_FIELD)
tg_dict = tag_target_dict.get(target_id)
if not tg_dict:
tag_target_dict[target_id] = {belong_to: [cp_ttt_dict]}
elif belong_to in tg_dict:
tg_dict[belong_to].append(cp_ttt_dict)
else:
tg_dict[belong_to] = [cp_ttt_dict]
return tag_target_dict
def manage_tag_info(target_type, result, data_set_name): # 给返回结果添加上管理标签信息
if not result:
return
if settings.ENABLED_TDW:
from meta.extend.tencent.lol.utils import mixin_lol_manage_tag_info
default_geog_area_dict = None
if not settings.MULTI_GEOG_AREA: # 非多地域,直接返回默认标签
dgt_dict = get_default_geog_tag()
if dgt_dict:
default_geog_area_dict = {"code": dgt_dict["code"], "alias": dgt_dict["alias"]}
if isinstance(result, list):
for obj in result:
obj["tags"] = {"manage": {"geog_area": [default_geog_area_dict]}}
if (
settings.ENABLED_TDW
and target_type == TAG_TYPE_PROJECT
and obj.get("project_id", 0) == settings.LOL_BILLING_PRJ_ID
):
mixin_lol_manage_tag_info(obj)
elif isinstance(result, dict):
result["tags"] = {"manage": {"geog_area": [default_geog_area_dict]}}
if (
settings.ENABLED_TDW
and target_type == TAG_TYPE_PROJECT
and result.get("project_id", 0) == settings.LOL_BILLING_PRJ_ID
):
mixin_lol_manage_tag_info(result)
return
target_id_list = []
if isinstance(result, list):
for obj in result:
if data_set_name in obj:
target_id_list.append(obj[data_set_name])
elif isinstance(result, dict) and data_set_name in result:
target_id_list.append(result[data_set_name])
if not target_id_list:
return
# 生成tag字典
tag_sql = "select id,code,alias,parent_id from tag where active=1 and tag_type='manage' order by parent_id asc"
tag_dict = gen_managed_tag_dict(tag_sql)
# 生成tag_target字典 {target_id:{"geog_area":[{"tag_code":"xxx","tag_name":"xxx"}]}}
where_cond = ",".join("'" + escape_string(str(target_id)) + "'" for target_id in target_id_list)
tag_target_sql = (
"select id,target_id,target_type,tag_code from tag_target where active=1 "
" and tag_type='manage' and tag_code=source_tag_code and target_type='{}' and target_id in({})".format(
escape_string(target_type), where_cond
)
)
tag_target_dict = gen_managed_tag_target_dict(tag_target_sql, tag_dict)
# 添加标签信息到结果中
if isinstance(result, list):
for obj in result:
data_set_id = str(obj[data_set_name])
geog_area_dict = tag_target_dict.get(data_set_id)
if geog_area_dict:
if "geog_area" not in geog_area_dict:
geog_area_dict["geog_area"] = [default_geog_area_dict]
obj["tags"] = {"manage": geog_area_dict}
elif default_geog_area_dict:
obj["tags"] = {"manage": {"geog_area": [default_geog_area_dict]}}
elif isinstance(result, dict):
data_set_id = str(result[data_set_name])
geog_area_dict = tag_target_dict.get(data_set_id)
if geog_area_dict:
if "geog_area" not in geog_area_dict:
geog_area_dict["geog_area"] = [default_geog_area_dict]
result["tags"] = {"manage": geog_area_dict}
elif default_geog_area_dict:
result["tags"] = {"manage": {"geog_area": [default_geog_area_dict]}}
def escape_string(s):
return pymysql.converters.escape_string(s)
def cover_storage_detail(cluster_dict, channel_dict, cluster_id_in_cond=None, channel_id_in_cond=None):
cluster_sql, channel_sql, union_sql = None, None, ""
if cluster_id_in_cond:
cluster_id_in_cond = cluster_id_in_cond.lstrip(",").rstrip(",")
cluster_sql = """select id,cluster_type,'1' as comm from storage_cluster_config where id in ({})""".format(
cluster_id_in_cond
)
if channel_id_in_cond:
channel_id_in_cond = channel_id_in_cond.lstrip(",").rstrip(",")
channel_sql = (
"""select id,cluster_type,'2' as comm from databus_channel_cluster_config where id in({})""".format(
channel_id_in_cond
)
)
if cluster_sql:
union_sql += cluster_sql
if channel_sql:
if union_sql:
union_sql += " union all "
union_sql += channel_sql
if union_sql:
union_result = BasicApi.get_complex_search(union_sql)
if union_result:
for union_obj in union_result:
comm = union_obj["comm"]
id_key = union_obj["id"]
if comm == "1":
cluster_dict[id_key] = union_obj
elif comm == "2":
channel_dict[id_key] = union_obj
def get_result_table_storages_v3(in_cond, cluster_type=None, need_storage_detail=True):
if need_storage_detail and in_cond:
storage_sql = """select id,physical_table_name,updated_by,generate_type,storage_config,active,priority,
created_by,data_type,date_format(updated_at,'{0}') updated_at,expires,
date_format(created_at,'{0}') created_at,description,storage_cluster_config_id,previous_cluster_name,
storage_channel_id,result_table_id from storage_result_table where active=1
and result_table_id in ({1})""".format(
DEFAULT_DATE_FORMAT, in_cond
)
elif need_storage_detail and not in_cond:
storage_sql = """select id,physical_table_name,updated_by,generate_type,storage_config,active,priority,
created_by,data_type,date_format(updated_at,'{0}') updated_at,expires,
date_format(created_at,'{0}') created_at,description,storage_cluster_config_id,previous_cluster_name,
storage_channel_id,result_table_id from storage_result_table where active=1 """.format(
DEFAULT_DATE_FORMAT
)
elif not need_storage_detail and in_cond: # 不需要storage详情
storage_sql = """select id,result_table_id,storage_cluster_config_id,storage_channel_id,
active from storage_result_table where active=1 and result_table_id in ({})""".format(
in_cond
)
else:
storage_sql = """select id,result_table_id,storage_cluster_config_id,storage_channel_id,
active from storage_result_table where active=1"""
storage_rt_id_dict = {}
cluster_id_in_cond, channel_id_in_cond = "", ""
storage_result = BasicApi.get_complex_search(storage_sql)
if not storage_result:
return storage_rt_id_dict
# 获取存储cluster id条件列表 & 总线channel id条件列表
storage_result = parse_field_to_boolean(storage_result, "active")
for storage_obj in storage_result:
storage_cluster_config_id = storage_obj["storage_cluster_config_id"]
storage_channel_id = storage_obj["storage_channel_id"]
if storage_cluster_config_id is not None:
if not cluster_id_in_cond:
cluster_id_in_cond += ","
if "," + str(storage_cluster_config_id) + "," not in cluster_id_in_cond: # id去重
cluster_id_in_cond += str(storage_cluster_config_id) + ","
if storage_channel_id is not None:
if not channel_id_in_cond:
channel_id_in_cond += ","
if "," + str(storage_channel_id) + "," not in channel_id_in_cond:
channel_id_in_cond += str(storage_channel_id) + ","
# 获取存储集群信息
cluster_dict = {}
if need_storage_detail and cluster_id_in_cond:
cluster_id_in_cond = cluster_id_in_cond.lstrip(",").rstrip(",")
cluster_sql = """select id,id as storage_cluster_config_id,cluster_name,cluster_type,priority,version,
expires,connection_info,belongs_to,cluster_group from storage_cluster_config where id in ({})""".format(
cluster_id_in_cond
)
if cluster_type:
cluster_sql += " and cluster_type='" + escape_string(cluster_type) + "'"
cluster_result = BasicApi.get_complex_search(cluster_sql)
if cluster_result:
for cluster_obj in cluster_result:
storage_cluster_config_id = cluster_obj["storage_cluster_config_id"]
cluster_dict[storage_cluster_config_id] = cluster_obj
# 获取总线集群信息
channel_dict = {}
if need_storage_detail and channel_id_in_cond:
channel_id_in_cond = channel_id_in_cond.lstrip(",").rstrip(",")
channel_sql = """select id,id as channel_cluster_config_id,cluster_backup_ips,attribute,zk_port,
description,cluster_name,cluster_type,cluster_role,priority,cluster_domain,cluster_port,active,
zk_domain,zk_root_path from databus_channel_cluster_config where id in ({})""".format(
channel_id_in_cond
)
if cluster_type:
channel_sql += " and cluster_type='" + escape_string(cluster_type) + "'"
channel_result = BasicApi.get_complex_search(channel_sql)
channel_result = parse_field_to_boolean(channel_result, "active")
if channel_result:
for channel_obj in channel_result:
channel_cluster_config_id = channel_obj["channel_cluster_config_id"]
channel_dict[channel_cluster_config_id] = channel_obj
if not need_storage_detail: # 不需要storage详情, 用概要信息覆盖对应结果
cover_storage_detail(
cluster_dict, channel_dict, cluster_id_in_cond=cluster_id_in_cond, channel_id_in_cond=channel_id_in_cond
)
for storage_obj in storage_result:
storage_rt_id = storage_obj["result_table_id"]
storage_cluster_config_id = storage_obj["storage_cluster_config_id"]
storage_channel_id = storage_obj["storage_channel_id"]
tmp_storage_rt_id_dict = storage_rt_id_dict.get(storage_rt_id, {})
tmp_cluster_obj = cluster_dict.get(storage_cluster_config_id, {})
tmp_channel_obj = channel_dict.get(storage_channel_id, {})
cluster_type = None
if tmp_cluster_obj:
cluster_type = tmp_cluster_obj.get("cluster_type")
elif tmp_channel_obj:
cluster_type = tmp_channel_obj.get("cluster_type")
if cluster_type is None: # 为了处理有cluster_type参数的情况
continue
storage_obj["storage_cluster"] = tmp_cluster_obj
storage_obj["storage_channel"] = tmp_channel_obj
if need_storage_detail:
tmp_storage_rt_id_dict[cluster_type] = storage_obj
else: # 不需要storage详情
tmp_storage_rt_id_dict[cluster_type] = {}
storage_rt_id_dict[storage_rt_id] = tmp_storage_rt_id_dict
return storage_rt_id_dict
def get_result_table_fields_v3(in_cond, need_result_table_id=False):
field_sql = "select "
if need_result_table_id:
field_sql += "result_table_id,"
if in_cond:
field_sql += """ roles,field_type,description,date_format(created_at,'{0}') created_at,is_dimension,created_by,
date_format(updated_at,'{0}') updated_at,origins,field_alias,field_name,id,field_index,updated_by
from result_table_field where result_table_id in ({1}) order by result_table_id asc,field_index asc""".format(
DEFAULT_DATE_FORMAT, in_cond
)
else:
field_sql += """ roles,field_type,description,date_format(created_at,'{0}') created_at,is_dimension,created_by,
date_format(updated_at,'{0}') updated_at,origins,field_alias,field_name,id,field_index,updated_by
from result_table_field order by result_table_id asc,field_index asc""".format(
DEFAULT_DATE_FORMAT,
)
field_result = BasicApi.get_complex_search(field_sql)
field_result = parse_field_to_boolean(field_result, "is_dimension")
for item in field_result:
default_roles = {k: False for k in field_allowed_roles}
roles = None
try:
roles = json.loads(item["roles"])
except Exception:
pass
if roles and isinstance(roles, dict):
default_roles.update(roles)
item["roles"] = default_roles
return field_result
def get_result_table_infos_v3(
bk_biz_ids=None,
project_id=None,
result_table_ids=None,
related=None,
processing_type=None,
page=None,
page_size=None,
need_storage_detail=True,
tags=None,
only_queryable=True,
):
sql = """select a.result_table_name,a.bk_biz_id,date_format(a.created_at,'{0}') created_at,a.sensitivity,
a.result_table_name_alias,a.updated_by,a.created_by,a.result_table_id,a.count_freq,a.description,
date_format(a.updated_at,'{0}') updated_at,a.generate_type,a.result_table_type,a.processing_type,a.project_id,
a.platform, a.is_managed,a.count_freq_unit,a.data_category,b.project_name from result_table a
left join project_info b on a.project_id=b.project_id where 1=1""".format(
DEFAULT_DATE_FORMAT
)
where_cond = ""
if bk_biz_ids:
where_cond += " and a.bk_biz_id in ({})".format(",".join(escape_string(bk_biz_id) for bk_biz_id in bk_biz_ids))
if project_id:
where_cond += " and a.project_id=" + escape_string(project_id)
# 只查询可见的rt(queryset不可见)
if only_queryable:
if processing_type:
if processing_type == "queryset":
return []
where_cond += " and a.processing_type='" + escape_string(processing_type) + "'"
else:
where_cond += " and a.processing_type!='queryset'"
else:
if processing_type:
where_cond += " and a.processing_type='" + escape_string(processing_type) + "'"
if settings.MULTI_GEOG_AREA and tags: # 非多地域
where_cond += (
" and a.result_table_id in(select target_id from tag_target where tag_code=source_tag_code "
"and active=1 and target_type='result_table' and tag_type='manage' and source_tag_code in ({}))".format(
",".join("'" + escape_string(tag) + "'" for tag in tags)
)
)
if result_table_ids:
page = None
page_size = None
where_cond += " and a.result_table_id in("
where_cond += "".join("'" + escape_string(rt_id) + "'," for rt_id in result_table_ids)
where_cond = where_cond.rstrip(",") + ") "
if where_cond:
sql += where_cond
query_result = BasicApi.get_complex_search(paged_sql(sql, page=page, page_size=page_size))
if only_queryable:
query_result = [
rt_obj
for rt_obj in query_result
if not (rt_obj["processing_type"] == "queryset" and rt_obj["generate_type"] == "system")
]
if related and query_result:
if not (result_table_ids or bk_biz_ids or project_id or processing_type):
in_cond = None
else:
in_cond = ",".join("'" + escape_string(rt_obj["result_table_id"]) + "'" for rt_obj in query_result)
field_rt_id_dict, dp_rt_id_dict, storage_rt_id_dict = {}, {}, None
if "fields" in related:
# 不允许拉取fields全表
if not in_cond:
pass
else:
field_result = get_result_table_fields_v3(in_cond, need_result_table_id=True)
if field_result:
for field_obj in field_result:
field_rt_id = field_obj["result_table_id"]
del field_obj["result_table_id"]
field_list = field_rt_id_dict.get(field_rt_id, [])
field_list.append(field_obj)
field_rt_id_dict[field_rt_id] = field_list
if "data_processing" in related:
if not in_cond:
dp_sql = """select tmp.*,c.project_name from
(select a.processing_id,a.project_id,a.processing_alias,a.processing_type,a.description,
a.generate_type,a.platform,a.created_by,date_format(a.created_at,'{0}') created_at,
a.updated_by,date_format(a.updated_at,'{0}') updated_at,b.data_set_id
from data_processing a,data_processing_relation b where a.processing_id=b.processing_id
and b.data_set_type='result_table' and b.data_directing='output')tmp
left join project_info c on tmp.project_id=c.project_id""".format(
DEFAULT_DATE_FORMAT
)
else:
dp_sql = """select tmp.*,c.project_name from
(select a.processing_id,a.project_id,a.processing_alias,a.processing_type,a.description,
a.generate_type,a.platform,a.created_by,date_format(a.created_at,'{0}') created_at,
a.updated_by,date_format(a.updated_at,'{0}') updated_at,b.data_set_id
from data_processing a,data_processing_relation b where a.processing_id=b.processing_id
and b.data_set_type='result_table' and b.data_directing='output' and b.data_set_id in({1}))tmp
left join project_info c on tmp.project_id=c.project_id""".format(
DEFAULT_DATE_FORMAT, in_cond
)
dp_result = BasicApi.get_complex_search(dp_sql)
if dp_result:
for dp_obj in dp_result:
data_set_id = dp_obj["data_set_id"]
del dp_obj["data_set_id"]
dp_rt_id_dict[data_set_id] = dp_obj
if "storages" in related:
storage_rt_id_dict = get_result_table_storages_v3(in_cond, need_storage_detail=need_storage_detail)
for rt_obj in query_result:
rt_id = rt_obj["result_table_id"]
if "fields" in related:
rt_obj["fields"] = field_rt_id_dict.get(rt_id, [])
if "data_processing" in related:
rt_obj["data_processing"] = dp_rt_id_dict.get(rt_id, {})
if "storages" in related:
rt_obj["storages"] = storage_rt_id_dict.get(rt_id, {})
return query_result
def get_result_table_geog_area(result_table_id=None):
sql = None
if result_table_id:
sql = """select id, tag_code, tag_type from tag_target where target_id = {}
and tag_code in ('inland', 'SEA', 'NA') and target_type = 'result_table'""".format(
result_table_id
)
return BasicApi.get_complex_search(sql)
def parse_field_to_boolean(obj, field_name):
if isinstance(obj, list):
for single in obj:
if single[field_name] == 1:
single[field_name] = True
elif single[field_name] == 0:
single[field_name] = False
elif isinstance(obj, dict):
if obj[field_name] == 1:
obj[field_name] = True
elif obj[field_name] == 0:
obj[field_name] = False
return obj
def get_data_transfers_v3(where_cond, page=None, page_size=None): # backend=mysql
sql = """
select a.project_id,b.project_name,a.transferring_id, a.transferring_alias, a.transferring_type, a.generate_type,
a.created_by, date_format(a.created_at,'{0}') created_at, a.updated_by, date_format(a.updated_at,'{0}') updated_at,
a.description from data_transferring a left join project_info b on a.project_id=b.project_id where 1=1 """.format(
DEFAULT_DATE_FORMAT
)
if where_cond:
sql += where_cond
query_result = BasicApi.get_complex_search(paged_sql(sql, page=page, page_size=page_size))
if query_result:
dp_relation_sql = """
select transferring_id,data_directing,data_set_id,data_set_type,storage_type,channel_cluster_config_id,
storage_cluster_config_id from data_transferring_relation where transferring_id in(
"""
for dp_obj in query_result:
dp_obj["inputs"] = []
dp_obj["outputs"] = []
dp_relation_sql += "".join("'" + escape_string(dp_obj["transferring_id"]) + "'," for dp_obj in query_result)
dp_relation_sql = dp_relation_sql[: len(dp_relation_sql) - 1] + ") order by transferring_id"
dp_relation_result = BasicApi.get_complex_search(dp_relation_sql)
dp_relation_dict = {}
for dp_relation_obj in dp_relation_result:
dp_id = dp_relation_obj["transferring_id"]
data_directing = dp_relation_obj["data_directing"]
del dp_relation_obj["transferring_id"]
del dp_relation_obj["data_directing"]
if dp_id in dp_relation_dict:
if data_directing == "input":
dp_relation_dict[dp_id]["inputs"].append(dp_relation_obj)
elif data_directing == "output":
dp_relation_dict[dp_id]["outputs"].append(dp_relation_obj)
else:
tmp_dict = {}
inputs_list, outputs_list = [], []
if data_directing == "input":
inputs_list.append(dp_relation_obj)
elif data_directing == "output":
outputs_list.append(dp_relation_obj)
tmp_dict["inputs"] = inputs_list
tmp_dict["outputs"] = outputs_list
dp_relation_dict[dp_id] = tmp_dict
for dp_obj in query_result:
dp_id = dp_obj["transferring_id"]
dp_relation_obj = dp_relation_dict.get(dp_id)
if dp_relation_obj:
dp_obj["inputs"] = dp_relation_obj["inputs"]
dp_obj["outputs"] = dp_relation_obj["outputs"]
return query_result
def get_data_processings_v3(where_cond, page=None, page_size=None): # backend=mysql
sql = """
select a.processing_id,a.project_id,b.project_name,a.processing_alias,a.processing_type,a.generate_type,
a.created_by,date_format(a.created_at,'{0}') created_at,a.updated_by,date_format(a.updated_at,'{0}') updated_at,
a.description,a.platform from data_processing a left join project_info b on a.project_id=b.project_id
where 1=1 """.format(
DEFAULT_DATE_FORMAT
)
if where_cond:
sql += where_cond
query_result = BasicApi.get_complex_search(paged_sql(sql, page=page, page_size=page_size))
if query_result:
dp_relation_sql = """
select processing_id,data_directing,data_set_id,data_set_type,storage_type,channel_cluster_config_id,
storage_cluster_config_id from data_processing_relation where processing_id in(
"""
for dp_obj in query_result:
dp_obj["inputs"] = []
dp_obj["outputs"] = []
dp_relation_sql += "".join("'" + escape_string(dp_obj["processing_id"]) + "'," for dp_obj in query_result)
dp_relation_sql = dp_relation_sql[: len(dp_relation_sql) - 1] + ") order by processing_id"
dp_relation_result = BasicApi.get_complex_search(dp_relation_sql)
dp_relation_dict = {}
for dp_relation_obj in dp_relation_result:
dp_id = dp_relation_obj["processing_id"]
data_directing = dp_relation_obj["data_directing"]
del dp_relation_obj["processing_id"]
del dp_relation_obj["data_directing"]
if dp_id in dp_relation_dict:
if data_directing == "input":
dp_relation_dict[dp_id]["inputs"].append(dp_relation_obj)
elif data_directing == "output":
dp_relation_dict[dp_id]["outputs"].append(dp_relation_obj)
else:
tmp_dict = {}
inputs_list, outputs_list = [], []
if data_directing == "input":
inputs_list.append(dp_relation_obj)
elif data_directing == "output":
outputs_list.append(dp_relation_obj)
tmp_dict["inputs"] = inputs_list
tmp_dict["outputs"] = outputs_list
dp_relation_dict[dp_id] = tmp_dict
for dp_obj in query_result:
dp_id = dp_obj["processing_id"]
dp_relation_obj = dp_relation_dict.get(dp_id)
if dp_relation_obj:
dp_obj["inputs"] = dp_relation_obj["inputs"]
dp_obj["outputs"] = dp_relation_obj["outputs"]
return query_result
def get_project_info_sql():
sql = """select project_name,date_format(deleted_at,'{0}') deleted_at,description,
date_format(created_at,'{0}') created_at,date_format(updated_at,'{0}') updated_at,created_by,deleted_by,bk_app_code,
active,project_id,updated_by from project_info where 1=1""".format(
DEFAULT_DATE_FORMAT
)
return sql
def parse_data_category_result(ret_result):
parse_result = []
tmp_result = []
not_visible_dict = {} # visible=0的id与parent_id
if ret_result:
ret_result.sort(key=lambda l: (l["parent_id"], l["seq_index"]), reverse=False)
for ret_dict in ret_result:
id_val = ret_dict["id"]
active = ret_dict["active"]
visible = ret_dict["visible"]
parent_id = ret_dict["parent_id"]
if not active: # false
continue
if not visible: # false
not_visible_dict[id_val] = parent_id
continue
tmp_result.append(ret_dict)
if tmp_result:
for ret_dict in tmp_result:
parent_id = ret_dict["parent_id"]
if parent_id in not_visible_dict:
p_parent_id = not_visible_dict[parent_id]
while p_parent_id in not_visible_dict:
p_parent_id = not_visible_dict[p_parent_id]
ret_dict["parent_id"] = p_parent_id
if tmp_result:
parent_id_dict = {} # parent_id:value_list
for ret_dict in tmp_result:
parent_id = ret_dict["parent_id"]
if parent_id in parent_id_dict:
parent_id_dict[parent_id].append(ret_dict)
else:
parent_id_dict[parent_id] = [ret_dict]
# for value in parent_id_dict.values():
# if value:
# value.sort(key=field_index_seq_index_sort, reverse=False)
for ret_dict in tmp_result:
id_val = ret_dict["id"]
parent_id = ret_dict["parent_id"]
ret_dict["sub_list"] = parent_id_dict[id_val] if id_val in parent_id_dict else []
if parent_id == 0:
parse_result.append(ret_dict)
# if parse_result:
# parse_result.sort(key=field_index_seq_index_sort, reverse=False)
return parse_result
def get_filter_attrs(related_filter, need_type_name):
filter_attr_name = None
filter_attr_value = None
if related_filter:
filter_obj = json.loads(related_filter)
filter_type = filter_obj.get("type")
if filter_type == need_type_name:
filter_attr_name = filter_obj.get("attr_name", "")
filter_attr_value = filter_obj.get("attr_value", "")
return filter_attr_name, filter_attr_value
# Atlas公共字段
ATLAS_FIELD_ID = "id"
ATLAS_FIELD_NAME = "name"
ATLAS_FIELD_QUALIFIED_NAME = "qualifiedName"
ATLAS_FIELD_OWNER = "owner"
ATLAS_FIELD_DESCRIPTION = "description"
ATLAS_FIELD_INPUTS = "inputs"
ATLAS_FIELD_OUTPUTS = "outputs"
# Atlas状态
ATLAS_FIELD_ACTIVE = "ACTIVE"
def get_attrs_v2(obj, batch_guid_dict):
if isinstance(obj, dict) and obj:
guid = obj["guid"]
obj_attrs = batch_guid_dict.get(guid)
if obj_attrs:
return obj_attrs
return {}
def parse_storage_to_dict_v2(storage_list, batch_guid_dict, result_table_id):
res_dict = {}
for attrs in storage_list:
# cluster_type = attrs['cluster_type'] # cluster_type 字段已去掉
# storage_cluster 和 storage_channel 只会一个有值
active = attrs.get("active")
if active is not None and not active: # 软删除的
continue
storage_cluster = attrs["storage_cluster"]
storage_channel = attrs["storage_channel"]
cluster_type = None
if storage_channel:
cluster_type = "kafka"
storage_cluster_attrs = get_attrs_v2(storage_cluster, batch_guid_dict)
if storage_cluster_attrs:
storage_cluster_attrs["storage_cluster_config_id"] = storage_cluster_attrs.get("id")
del_redundance_key(storage_cluster_attrs, "storage_configs", "storage_config", "dp_characters", "dt_characters")
del_common_unuse_fields(storage_cluster_attrs)
del_common_unuse_fields2(storage_cluster_attrs)
del_common_unuse_fields3(storage_cluster_attrs)
attrs["storage_cluster"] = storage_cluster_attrs
if storage_cluster_attrs:
cluster_type = storage_cluster_attrs["cluster_type"]
storage_channel_obj = get_attrs_v2(storage_channel, batch_guid_dict)
if storage_channel_obj:
storage_channel_obj["channel_cluster_config_id"] = storage_channel_obj.get("id")
del_redundance_key(
storage_channel_obj,
"storage_config",
"storage_configs",
ATLAS_FIELD_NAME,
ATLAS_FIELD_OWNER,
"dp_characters",
"dt_characters",
)
del_common_unuse_fields(storage_channel_obj)
del_common_unuse_fields2(storage_channel_obj)
attrs["storage_channel"] = storage_channel_obj
del_redundance_key(attrs, "result_tables", "result_table", ATLAS_FIELD_NAME, ATLAS_FIELD_OWNER)
# attrs['result_table_id'] = result_table_id
res_dict[cluster_type] = attrs
return res_dict
def parse_lineage_attrs(entity_obj):
type_name = entity_obj["typeName"]
entity_attrs = entity_obj["attributes"]
prefix_name = None
field_name = None
ret_type_name = None
if type_name == "ResultTable":
prefix_name = "result_table_"
field_name = "result_table_id"
ret_type_name = "result_table"
elif type_name == "DataProcess":
prefix_name = "data_processing_"
field_name = "processing_id"
ret_type_name = "data_processing"
elif type_name == "RawData":
prefix_name = "raw_data_"
field_name = "id"
ret_type_name = "raw_data"
qualified_name = str(entity_attrs[field_name])
name = prefix_name + qualified_name
return name, ret_type_name, qualified_name
def parse_mysql_lineage_attrs(entity_obj):
type_name = entity_obj["type"]
qualified_name = entity_obj["qualified_name"]
return type_name + "_" + qualified_name, type_name, qualified_name
def parse_mysql_lineage_node_attrs(entity_obj):
type_name = entity_obj["type"]
qualified_name = entity_obj["qualified_name"]
extra = entity_obj.get("extra", {})
return type_name + "_" + qualified_name, type_name, qualified_name, extra
def get_result_table_lineage_info(params):
res_obj = {}
res_data = BasicApi.get_lineage(params)
backend_type = res_data.get("backend_type", None)
if backend_type and backend_type in ("mysql", "dgraph"):
lineage = res_data["lineage"]
if lineage:
mysql_criteria = lineage["criteria"]
mysql_nodes = lineage["nodes"]
mysql_relations = lineage["relations"]
res_obj["depth"] = mysql_criteria["depth"]
res_obj["direction"] = mysql_criteria["direction"]
res_relations = []
nodes = {}
if mysql_relations:
for relation in mysql_relations:
fm = relation["from"]
to = relation["to"]
from_name, _, _ = parse_mysql_lineage_attrs(fm)
to_name, _, _ = parse_mysql_lineage_attrs(to)
res_relations.append({"from": from_name, "to": to_name, "status": "ACTIVE"})
if mysql_nodes:
for node in mysql_nodes:
key_name, type_name, qualified_name, extra = parse_mysql_lineage_node_attrs(node)
if "extra_retrieve" in params:
tmp_node = {"type": type_name, "qualified_name": qualified_name, "extra": extra}
else:
tmp_node = {"type": type_name, "qualified_name": qualified_name}
if type_name == "result_table":
tmp_node["result_table_id"] = qualified_name
elif type_name == "data_processing":
tmp_node["processing_id"] = qualified_name
elif type_name == "raw_data":
tmp_node["id"] = int(qualified_name)
nodes[key_name] = tmp_node
res_obj["nodes"] = nodes
res_obj["relations"] = res_relations
return res_obj
def parse_genealogy_info(rpc_ret):
res_obj = {}
genealogy = rpc_ret["genealogy"]
if genealogy:
mysql_criteria = genealogy["criteria"]
mysql_nodes = genealogy["nodes"]
mysql_relations = genealogy["relations"]
res_obj["depth"] = mysql_criteria["depth"]
res_relations = []
nodes = {}
if mysql_relations:
for relation in mysql_relations:
fm = relation["from"]
to = relation["to"]
from_name, _, _ = parse_mysql_lineage_attrs(fm)
to_name, _, _ = parse_mysql_lineage_attrs(to)
res_relations.append({"from": from_name, "to": to_name, "status": "ACTIVE"})
if mysql_nodes:
for node in mysql_nodes:
key_name, type_name, qualified_name = parse_mysql_lineage_attrs(node)
tmp_node = {"type": type_name, "qualified_name": qualified_name}
if type_name == "result_table":
tmp_node["result_table_id"] = qualified_name
elif type_name == "data_processing":
tmp_node["processing_id"] = qualified_name
elif type_name == "raw_data":
tmp_node["id"] = int(qualified_name)
nodes[key_name] = tmp_node
res_obj["nodes"] = nodes
res_obj["relations"] = res_relations
return res_obj
def get_lineage_node(obj, type_name, qualified_name):
obj_attrs = get_obj_attrs(obj, add_common=False)
parse_lineage_nodes_obj(obj_attrs)
obj_attrs["type"] = type_name
obj_attrs["qualified_name"] = qualified_name
return obj_attrs
def parse_lineage_nodes_obj(obj):
del_redundance_key(obj, "createTime", ATLAS_FIELD_OWNER, ATLAS_FIELD_NAME, ATLAS_FIELD_DESCRIPTION)
def field_index_sort(elem):
return elem["field_index"]
def get_result_guids(res_dict, field_name):
result = res_dict["result"]
ret_attrs = []
if result:
res_data = res_dict["data"]
if res_data:
if field_name == "entities":
if "entities" in res_data:
entities = res_data[field_name]
for entity in entities:
status = entity["status"]
if status == ATLAS_FIELD_ACTIVE:
ret_attrs.append(entity["guid"])
elif field_name == "entity":
entity = res_data["entity"]
status = entity["status"]
if status == ATLAS_FIELD_ACTIVE:
ret_attrs.append(entity["guid"])
return ret_attrs
def get_result_attrs(res_dict, available_attr=None, complex_attrs=None, need_guid=False):
result = res_dict["result"]
ret_attrs = []
referred_entities = None
if result:
res_data = res_dict["data"]
if res_data:
if "entities" in res_data:
entities = res_data["entities"]
referred_entities = res_data.get("referredEntities")
for entity in entities:
status = entity["status"]
if status == ATLAS_FIELD_ACTIVE:
attrs = entity["attributes"]
handler_common_fields(attrs)
ret_entity = {}
if need_guid:
ret_entity["guid"] = entity["guid"]
if complex_attrs:
for k, v in list(complex_attrs.items()):
field_arr = v.split("|")
for def_field_val in field_arr:
show_field, get_field = def_field_val.split("=", 1)
ret_entity[show_field] = None
key_dict = attrs.get(k)
if key_dict:
k_guid = key_dict["guid"]
if referred_entities:
guid_dict = referred_entities.get(k_guid)
if guid_dict:
guid_attrs = guid_dict.get("attributes")
if guid_attrs:
ret_entity[show_field] = guid_attrs.get(get_field)
if available_attr:
for field_name in available_attr:
if complex_attrs:
if field_name in complex_attrs:
continue
ret_entity[field_name] = attrs.get(field_name)
ret_attrs.append(ret_entity)
else:
if need_guid:
attrs["guid"] = entity["guid"]
ret_attrs.append(attrs)
return ret_attrs, referred_entities
def get_result_obj_attrs(res_dict, need_guid=False):
result = res_dict["result"]
if result:
res_data = res_dict["data"]
if res_data:
referred_entities = res_data["referredEntities"]
entity = res_data["entity"]
guid = entity["guid"]
if guid not in list(referred_entities.keys()):
# 加入referredEntities中,主要为了处理如:根据Project查询得到了ResultTable,然后在ResultTable中又需要取Project的信息的情况
referred_entities[guid] = entity
attrs = get_obj_attrs(entity, need_guid=need_guid)
return attrs, referred_entities
return {}, {}
def get_obj_attrs(entity_obj, add_common=True, need_guid=False):
status = entity_obj["status"]
if status == ATLAS_FIELD_ACTIVE:
attrs = entity_obj["attributes"]
if add_common:
handler_common_fields(attrs)
else:
del_redundance_key(attrs, "created_by", "updated_by", "created_at", "updated_at")
if need_guid:
attrs["guid"] = entity_obj["guid"]
del_common_unuse_fields(attrs)
return attrs
return {}
def handler_common_fields(attrs):
if "deleted_at" in attrs:
deleted_at = attrs["deleted_at"]
if deleted_at is not None and deleted_at != 0:
attrs["deleted_at"] = parse_time_format(deleted_at)
if "created_by" not in attrs:
attrs["created_by"] = "admin"
if "updated_by" not in attrs:
attrs["updated_by"] = "admin"
if "created_at" not in attrs:
attrs["created_at"] = None
else:
attrs["created_at"] = parse_time_format(attrs["created_at"])
if "updated_at" not in attrs:
attrs["updated_at"] = None
else:
attrs["updated_at"] = parse_time_format(attrs["updated_at"])
def del_common_unuse_fields(attrs_dict):
del_redundance_key(attrs_dict, "qualifiedName", "qualified_name_ref", "name_ref", "qualified_name")
return attrs_dict
def del_common_unuse_fields2(attrs_dict):
del_redundance_key(attrs_dict, "created_by", "updated_by", "created_at", "updated_at")
return attrs_dict
def del_common_unuse_fields3(attrs_dict):
del_redundance_key(attrs_dict, ATLAS_FIELD_ID, ATLAS_FIELD_NAME, ATLAS_FIELD_OWNER, ATLAS_FIELD_DESCRIPTION)
return attrs_dict
def del_redundance_key(obj, *keys):
if isinstance(obj, dict):
for key in keys:
if key in list(obj.keys()):
del obj[key]
elif isinstance(obj, (list, tuple)):
for elem in obj:
if isinstance(elem, dict):
for key in keys:
if key in list(elem.keys()):
del elem[key]
def parse_time_format(time_stamp):
if isinstance(time_stamp, str):
return time_stamp
if time_stamp:
# time_stamp /= 1000.0
time_arr = time.localtime(time_stamp)
return time.strftime("%Y-%m-%d %H:%M:%S", time_arr)
return None
| 42.374205
| 120
| 0.637587
|
4a0fefc82b758798564bef63a6c581a59692a202
| 1,323
|
py
|
Python
|
gym_wrapper/abstractEnv.py
|
rtatze/JSBSim_gym_wrapper
|
525d70e72e36fb284b2adfd63d2b3205d6e05202
|
[
"MIT"
] | 10
|
2021-02-08T15:26:25.000Z
|
2022-03-18T07:22:20.000Z
|
gym_wrapper/abstractEnv.py
|
rtatze/JSBSim_gym_wrapper
|
525d70e72e36fb284b2adfd63d2b3205d6e05202
|
[
"MIT"
] | 1
|
2021-02-23T08:09:57.000Z
|
2021-02-23T11:13:29.000Z
|
gym_wrapper/abstractEnv.py
|
rtatze/JSBSim_gym_wrapper
|
525d70e72e36fb284b2adfd63d2b3205d6e05202
|
[
"MIT"
] | 4
|
2021-01-22T16:21:26.000Z
|
2022-01-25T14:05:54.000Z
|
from typing import Tuple, List
import numpy as np
from enum import Enum
import gym
from gym import spaces
import logging
from abc import ABC, abstractmethod
class AbstractEnv(gym.Env, ABC):
def __init__(self):
super(AbstractEnv, self).__init__()
self.max_speed = 8
self.max_torque = 2.
self.dt = .05
self.m = 1.
self.l = 1.
self.viewer = None
high = np.array([1., 1., self.max_speed], dtype=np.float32)
self.action_space = spaces.Box(
low=-self.max_torque,
high=self.max_torque, shape=(1,),
dtype=np.float32
)
self.observation_space = spaces.Box(
low=-high,
high=high,
dtype=np.float32
)
print(self.observation_space.shape)
print(self._get_obs().shape)
def step(self, action) -> Tuple[object, float, bool, dict]: # ->observation, reward, done, info
return self._get_obs(), self._calcRewards(), False, {}
def reset(self) -> object:
return self._get_obs()
def _get_obs(self) -> np.ndarray:
return np.array([1.0, 1.0, 1.0])
def _calcRewards(self) -> float:
rewAgent0 = 0
return rewAgent0
def render(self, mode='human'):
pass
def close(self):
pass
| 24.962264
| 100
| 0.579743
|
4a0fefe10987338a0f2f17fcead267a002316884
| 23
|
py
|
Python
|
aio_telegraph/__init__.py
|
bluzir/aio-telegraph
|
cd5cb110067a1ad44a535197fc9a6cdc05d4c317
|
[
"MIT"
] | 1
|
2019-08-19T09:15:14.000Z
|
2019-08-19T09:15:14.000Z
|
aio_telegraph/__init__.py
|
bluzir/aio-telegraph
|
cd5cb110067a1ad44a535197fc9a6cdc05d4c317
|
[
"MIT"
] | null | null | null |
aio_telegraph/__init__.py
|
bluzir/aio-telegraph
|
cd5cb110067a1ad44a535197fc9a6cdc05d4c317
|
[
"MIT"
] | 1
|
2019-05-17T19:00:30.000Z
|
2019-05-17T19:00:30.000Z
|
name = 'aio_telegraph'
| 11.5
| 22
| 0.73913
|
4a0ff0a43adcfb23ede7a012551d2c570a9847bd
| 591
|
py
|
Python
|
what_is_the_python_decorator/demo7.py
|
NightmareQAQ/python-notes
|
4e766be06073a495ff9654f0dd8c0bb03310c559
|
[
"MIT"
] | 106
|
2017-05-02T10:25:50.000Z
|
2022-03-23T14:57:28.000Z
|
what_is_the_python_decorator/demo7.py
|
NightmareQAQ/python-notes
|
4e766be06073a495ff9654f0dd8c0bb03310c559
|
[
"MIT"
] | 2
|
2021-01-14T15:07:15.000Z
|
2021-12-21T07:18:05.000Z
|
what_is_the_python_decorator/demo7.py
|
NightmareQAQ/python-notes
|
4e766be06073a495ff9654f0dd8c0bb03310c559
|
[
"MIT"
] | 42
|
2017-07-31T07:07:38.000Z
|
2021-12-26T09:36:55.000Z
|
def my_logging(func):
def wrapper(*args, **kwargs):
"""my wrapper"""
print('logging - {} is running'.format(func.__name__))
func(*args, **kwargs)
return wrapper
@my_logging
def f1(*args, **kwargs):
"""f1 function"""
print("f1")
for thing in args:
print('hello {}'.format(thing))
for name, value in kwargs.items():
print('{0} = {1}'.format(name, value))
f1('twtrubiks', apple='fruit', cabbage='vegetable')
print('f1.__name__', f1.__name__) # output -> 'wrapper'
print('f1.__doc__', f1.__doc__) # output -> 'my wrapper'
| 23.64
| 62
| 0.590525
|
4a0ff137a730764924b446333ee0f5952a618206
| 34,856
|
py
|
Python
|
src/sqlfluff/core/parser/grammar/base.py
|
iserko/sqlfluff
|
c0bad78f3fa9549591738c77f869724f721e6830
|
[
"MIT"
] | null | null | null |
src/sqlfluff/core/parser/grammar/base.py
|
iserko/sqlfluff
|
c0bad78f3fa9549591738c77f869724f721e6830
|
[
"MIT"
] | null | null | null |
src/sqlfluff/core/parser/grammar/base.py
|
iserko/sqlfluff
|
c0bad78f3fa9549591738c77f869724f721e6830
|
[
"MIT"
] | null | null | null |
"""Base grammar, Ref, Anything and Nothing."""
import copy
from typing import List, NamedTuple, Optional, Union, Type, Tuple
from sqlfluff.core.errors import SQLParseError
from sqlfluff.core.string_helpers import curtail_string
from sqlfluff.core.parser.segments import BaseSegment, EphemeralSegment
from sqlfluff.core.parser.helpers import trim_non_code_segments
from sqlfluff.core.parser.match_result import MatchResult
from sqlfluff.core.parser.match_logging import (
parse_match_logging,
LateBoundJoinSegmentsCurtailed,
)
from sqlfluff.core.parser.match_wrapper import match_wrapper
from sqlfluff.core.parser.matchable import Matchable
from sqlfluff.core.parser.context import ParseContext
# Either a Grammar or a Segment CLASS
MatchableType = Union[Matchable, Type[BaseSegment]]
def cached_method_for_parse_context(func):
"""A decorator to cache the output of this method for a given parse context.
This cache automatically invalidates if the uuid
of the parse context changes. The value is store
in the __dict__ attribute of the class against a
key unique to that function.
"""
cache_key = "__cache_" + func.__name__
def wrapped_method(self, parse_context: ParseContext):
"""Cache the output of the method against a given parse context."""
cache_tuple: Tuple = self.__dict__.get(cache_key, (None, None))
# Do we currently have a cached value?
if cache_tuple[0] == parse_context.uuid:
return cache_tuple[1]
# Generate a new value, cache it and return
result = func(self, parse_context=parse_context)
self.__dict__[cache_key] = (parse_context.uuid, result)
return result
return wrapped_method
class BaseGrammar(Matchable):
"""Grammars are a way of composing match statements.
Any grammar must implement the `match` function. Segments can also be
passed to most grammars. Segments implement `match` as a classmethod. Grammars
implement it as an instance method.
"""
is_meta = False
# Are we allowed to refer to keywords as strings instead of only passing
# grammars or segments?
allow_keyword_string_refs = True
@staticmethod
def _resolve_ref(elem):
"""Resolve potential string references to things we can match against."""
initialisers = [
# t: instance / f: class, ref, func
(True, str, Ref.keyword),
(True, BaseGrammar, lambda x: x),
(False, BaseSegment, lambda x: x),
]
# Get-out clause for None
if elem is None:
return None
for instance, init_type, init_func in initialisers:
if (instance and isinstance(elem, init_type)) or (
not instance and issubclass(elem, init_type)
):
return init_func(elem)
raise TypeError(
"Grammar element [{0!r}] was found of unexpected type [{1}] was found.".format(
elem, type(elem)
)
)
def __init__(
self,
*args,
allow_gaps=True,
optional=False,
ephemeral_name=None,
):
"""Deal with kwargs common to all grammars.
Args:
*args: Any number of elements which because the subjects
of this grammar.
allow_gaps (:obj:`bool`, optional): Does this instance of the
grammar allow gaps between the elements it matches? This
may be exhibited slightly differently in each grammar. See
that grammar for details. Defaults `True`.
optional (:obj:`bool`, optional): In the context of a sequence,
is this grammar *optional*, i.e. can it be skipped if no
match is found. Outside of a Sequence, this option does nothing.
Defaults `False`.
ephemeral_name (:obj:`str`, optional): If specified this allows
the grammar to match anything, and create an EphemeralSegment
with the given name in its place. The content of this grammar
is passed to the segment, and will become the parse grammar
for it. If used widely this is an excellent way of breaking
up the parse process and also signposting the name of a given
chunk of code that might be parsed separately.
"""
# We provide a common interface for any grammar that allows positional elements.
# If *any* for the elements are a string and not a grammar, then this is a shortcut
# to the Ref.keyword grammar by default.
if self.allow_keyword_string_refs:
self._elements = []
for elem in args:
self._elements.append(self._resolve_ref(elem))
else:
self._elements = args
# Now we deal with the standard kwargs
self.allow_gaps = allow_gaps
self.optional = optional
self.ephemeral_segment = None
# Set up the ephemeral_segment if name is specified.
if ephemeral_name:
# Make the EphemeralSegment class. This is effectively syntactic sugar
# to allow us to avoid specifying a EphemeralSegment directly in a dialect.
# Copy self (*before* making the EphemeralSegment, but with everything else in place)
parse_grammar = copy.copy(self)
# Add the EphemeralSegment to self.
self.ephemeral_segment = EphemeralSegment.make(
match_grammar=None,
# Pass in the copy without the EphemeralSegment
parse_grammar=parse_grammar,
name=ephemeral_name,
)
def is_optional(self):
"""Return whether this segment is optional.
The optional attribute is set in the __init__ method.
"""
return self.optional
@match_wrapper()
def match(self, segments: Tuple["BaseSegment", ...], parse_context: ParseContext):
"""Match a list of segments against this segment.
Matching can be done from either the raw or the segments.
This raw function can be overridden, or a grammar defined
on the underlying class.
"""
raise NotImplementedError(
"{0} has no match function implemented".format(self.__class__.__name__)
)
@cached_method_for_parse_context
def simple(self, parse_context: ParseContext) -> Optional[List[str]]:
"""Does this matcher support a lowercase hash matching route?"""
return None
@staticmethod
def _iter_raw_segs(segments):
for segment in segments:
yield from segment.iter_raw_seg()
@classmethod
def _longest_trimmed_match(
cls,
segments: Tuple["BaseSegment", ...],
matchers: List["MatchableType"],
parse_context: ParseContext,
trim_noncode=True,
) -> Tuple[MatchResult, Optional["MatchableType"]]:
"""Return longest match from a selection of matchers.
Prioritise the first match, and if multiple match at the same point the longest.
If two matches of the same length match at the same time, then it's the first in
the iterable of matchers.
Returns:
`tuple` of (match_object, matcher).
"""
# Have we been passed an empty list?
if len(segments) == 0:
return MatchResult.from_empty(), None
# If gaps are allowed, trim the ends.
if trim_noncode:
pre_nc, segments, post_nc = trim_non_code_segments(segments)
best_match_length = 0
# iterate at this position across all the matchers
for matcher in matchers:
# MyPy seems to require a type hint here. Not quite sure why.
res_match: MatchResult = matcher.match(
segments, parse_context=parse_context
)
if res_match.is_complete():
# Just return it! (WITH THE RIGHT OTHER STUFF)
if trim_noncode:
return (
MatchResult.from_matched(
pre_nc + res_match.matched_segments + post_nc
),
matcher,
)
else:
return res_match, matcher
elif res_match:
# We've got an incomplete match, if it's the best so far keep it.
if res_match.matched_length > best_match_length:
best_match = res_match, matcher
best_match_length = res_match.matched_length
# If we get here, then there wasn't a complete match. If we
# has a best_match, return that.
if best_match_length > 0:
if trim_noncode:
return (
MatchResult(
pre_nc + best_match[0].matched_segments,
best_match[0].unmatched_segments + post_nc,
),
best_match[1],
)
else:
return best_match
# If no match at all, return nothing
return MatchResult.from_unmatched(segments), None
@classmethod
def _look_ahead_match(cls, segments, matchers, parse_context):
"""Look ahead for matches beyond the first element of the segments list.
This function also contains the performance improved hash-matching approach to
searching for matches, which should significantly improve performance.
Prioritise the first match, and if multiple match at the same point the longest.
If two matches of the same length match at the same time, then it's the first in
the iterable of matchers.
Returns:
`tuple` of (unmatched_segments, match_object, matcher).
"""
parse_match_logging(
cls.__name__,
"_look_ahead_match",
"IN",
parse_context=parse_context,
v_level=4,
ls=len(segments),
seg=LateBoundJoinSegmentsCurtailed(segments),
)
# Do some type munging
matchers = list(matchers)
if isinstance(segments, BaseSegment):
segments = [segments]
# Have we been passed an empty list?
if len(segments) == 0:
return ((), MatchResult.from_empty(), None)
# Here we enable a performance optimisation. Most of the time in this cycle
# happens in loops looking for simple matchers which we should
# be able to find a shortcut for.
# First: Assess the matchers passed in, if any are
# "simple", then we effectively use a hash lookup across the
# content of segments to quickly evaluate if the segment is present.
# Matchers which aren't "simple" still take a slower route.
_matchers = [
(matcher, matcher.simple(parse_context=parse_context))
for matcher in matchers
]
simple_matchers = [matcher for matcher in _matchers if matcher[1]]
non_simple_matchers = [matcher[0] for matcher in _matchers if not matcher[1]]
best_simple_match = None
if simple_matchers:
# If they're all simple we can use a hash match to identify the first one.
# Build a buffer of all the upper case raw segments ahead of us.
str_buff = []
# For existing compound segments, we should assume that within
# that segment, things are internally consistent, that means
# rather than enumerating all the individual segments of a longer
# one we just dump out the whole segment, but splitting off the
# first element seperated by whitespace. This is a) faster and
# also b) prevents some really horrible bugs with bracket matching.
# See https://github.com/sqlfluff/sqlfluff/issues/433
def _trim_elem(seg):
s = seg.raw_upper.split(maxsplit=1)
return s[0] if s else ""
str_buff = [_trim_elem(seg) for seg in segments]
match_queue = []
for matcher, simple in simple_matchers:
# Simple will be a tuple of options
for simple_option in simple:
try:
buff_pos = str_buff.index(simple_option)
match_queue.append((matcher, buff_pos, simple_option))
except ValueError:
pass
# Sort the match queue. First to process AT THE END.
# That means we pop from the end.
match_queue = sorted(match_queue, key=lambda x: x[1])
parse_match_logging(
cls.__name__,
"_look_ahead_match",
"SI",
parse_context=parse_context,
v_level=4,
mq=match_queue,
sb=str_buff,
)
while match_queue:
# We've managed to match. We can shortcut home.
# NB: We may still need to deal with whitespace.
queued_matcher, queued_buff_pos, queued_option = match_queue.pop()
# Here we do the actual transform to the new segment.
match = queued_matcher.match(segments[queued_buff_pos:], parse_context)
if not match:
# We've had something match in simple matching, but then later excluded.
# Log but then move on to the next item on the list.
parse_match_logging(
cls.__name__,
"_look_ahead_match",
"NM",
parse_context=parse_context,
v_level=4,
_so=queued_option,
)
continue
# Ok we have a match. Because we sorted the list, we'll take it!
best_simple_match = (segments[:queued_buff_pos], match, queued_matcher)
if not non_simple_matchers:
# There are no other matchers, we can just shortcut now.
parse_match_logging(
cls.__name__,
"_look_ahead_match",
"SC",
parse_context=parse_context,
v_level=4,
bsm=None
if not best_simple_match
else (
len(best_simple_match[0]),
len(best_simple_match[1]),
best_simple_match[2],
),
)
if best_simple_match:
return best_simple_match
else:
return ((), MatchResult.from_unmatched(segments), None)
# Make some buffers
seg_buff = segments
pre_seg_buff = () # NB: Tuple
# Loop
while True:
# Do we have anything left to match on?
if seg_buff:
# Great, carry on.
pass
else:
# We've got to the end without a match, return empty
return ((), MatchResult.from_unmatched(segments), None)
# We only check the NON-simple ones here for brevity.
mat, m = cls._longest_trimmed_match(
seg_buff,
non_simple_matchers,
parse_context=parse_context,
trim_noncode=False,
)
if mat and not best_simple_match:
return (pre_seg_buff, mat, m)
elif mat:
# It will be earlier than the simple one if we've even checked,
# but there's a chance that this might be *longer*, or just FIRST.
pre_lengths = (len(pre_seg_buff), len(best_simple_match[0]))
mat_lengths = (len(mat), len(best_simple_match[1]))
mat_indexes = (matchers.index(m), matchers.index(best_simple_match[2]))
if (
(pre_lengths[0] < pre_lengths[1])
or (
pre_lengths[0] == pre_lengths[1]
and mat_lengths[0] > mat_lengths[1]
)
or (
pre_lengths[0] == pre_lengths[1]
and mat_lengths[0] == mat_lengths[1]
and mat_indexes[0] < mat_indexes[1]
)
):
return (pre_seg_buff, mat, m)
else:
return best_simple_match
else:
# If there aren't any matches, then advance the buffer and try again.
# Two improvements:
# 1) if we get as far as the first simple match, then return that.
# 2) be eager in consuming non-code segments if allowed
if best_simple_match and len(pre_seg_buff) >= len(best_simple_match[0]):
return best_simple_match
pre_seg_buff += (seg_buff[0],)
seg_buff = seg_buff[1:]
@classmethod
def _bracket_sensitive_look_ahead_match(
cls, segments, matchers, parse_context, start_bracket=None, end_bracket=None
):
"""Same as `_look_ahead_match` but with bracket counting.
NB: Given we depend on `_look_ahead_match` we can also utilise
the same performance optimisations which are implemented there.
Returns:
`tuple` of (unmatched_segments, match_object, matcher).
"""
class BracketInfo(NamedTuple):
bracket: BaseSegment
is_definite: bool
# Type munging
matchers = list(matchers)
if isinstance(segments, BaseSegment):
segments = [segments]
# Have we been passed an empty list?
if len(segments) == 0:
return ((), MatchResult.from_unmatched(segments), None)
# Get hold of the bracket matchers from the dialect, and append them
# to the list of matchers. We get them from the relevant set on the
# dialect. We use zip twice to "unzip" them. We ignore the first
# argument because that's just the name.
_, start_bracket_refs, end_bracket_refs, definitely_bracket = zip(
*parse_context.dialect.sets("bracket_pairs")
)
# These are currently strings which need rehydrating
start_brackets = [
parse_context.dialect.ref(seg_ref) for seg_ref in start_bracket_refs
]
end_brackets = [
parse_context.dialect.ref(seg_ref) for seg_ref in end_bracket_refs
]
start_definite = list(definitely_bracket)
end_definite = list(definitely_bracket)
# Add any bracket-like things passed as arguments
if start_bracket:
start_brackets += [start_bracket]
start_definite += [True]
if end_bracket:
end_brackets += [end_bracket]
end_definite += [True]
bracket_matchers = start_brackets + end_brackets
# Make some buffers
seg_buff = segments
pre_seg_buff = () # NB: Tuple
bracket_stack: List[BracketInfo] = []
# Iterate
while True:
# Do we have anything left to match on?
if seg_buff:
# Yes we have buffer left to work with.
# Are we already in a bracket stack?
if bracket_stack:
# Yes, we're just looking for the closing bracket, or
# another opening bracket.
pre, match, matcher = cls._look_ahead_match(
seg_buff,
bracket_matchers,
parse_context=parse_context,
)
if match:
# NB: We can only consider this as a nested bracket if the start
# and end tokens are not the same. If a matcher is both a start and
# end token we cannot deepen the bracket stack. In general, quoted
# strings are a typical example where the start and end tokens are
# the same. Currently, though, quoted strings are handled elsewhere
# in the parser, and there are no cases where *this* code has to
# handle identical start and end brackets. For now, consider this
# a small, speculative investment in a possible future requirement.
if matcher in start_brackets and matcher not in end_brackets:
# Same procedure as below in finding brackets.
bracket_stack.append(
BracketInfo(
bracket=match.matched_segments[0],
is_definite=start_definite[
start_brackets.index(matcher)
],
)
)
pre_seg_buff += pre
pre_seg_buff += match.matched_segments
seg_buff = match.unmatched_segments
continue
elif matcher in end_brackets:
# Found an end bracket. Does its type match that of
# the innermost start bracket (e.g. ")" matches "(",
# "]" matches "[".
start_index = start_brackets.index(
type(bracket_stack[-1].bracket)
)
end_index = end_brackets.index(matcher)
bracket_types_match = start_index == end_index
if bracket_types_match:
# Yes, the types match. So we've found a
# matching end bracket. Pop the stack and carry
# on.
bracket_stack.pop()
pre_seg_buff += pre
pre_seg_buff += match.matched_segments
seg_buff = match.unmatched_segments
continue
else:
# The types don't match. Check whether the end
# bracket is a definite bracket.
end_is_definite = end_definite[end_index]
if not end_is_definite:
# The end bracket whose type didn't match
# the innermost open bracket is not
# definite. Assume it's not a bracket and
# carry on.
pre_seg_buff += pre
pre_seg_buff += match.matched_segments
seg_buff = match.unmatched_segments
else:
# Definite end bracket does not match the
# innermost start bracket. Was the innermost
# start bracket definite? If yes, error. If
# no, assume it was not a bracket.
# Can we remove any brackets from the stack which aren't definites
# to resolve the issue?
for idx in range(len(bracket_stack) - 1, -1, -1):
if not bracket_stack[idx].is_definite:
del bracket_stack[idx]
# We don't change the string buffer, we assume that was ok.
break
else:
raise SQLParseError(
f"Found unexpected end bracket!, was expecting {end_brackets[start_index]}, but got {matcher}",
segment=match.matched_segments[0],
)
else:
raise RuntimeError("I don't know how we get here?!")
else:
# No match, we're in a bracket stack. Either this is an error,
# OR we were mistaken in our initial identification of the opening
# bracket. That's only allowed if `not definitely_bracket`.
# Can we remove any brackets from the stack which aren't definites
# to resolve the issue?
for idx, elem in enumerate(reversed(bracket_stack)):
if not elem.is_definite:
del bracket_stack[-idx]
# We don't change the string buffer, we assume that was ok.
break
else:
# No we can't. We don't have a match and we're in a bracket stack.
raise SQLParseError(
"Couldn't find closing bracket for opening bracket.",
segment=bracket_stack[-1].bracket,
)
# We have attempted a potential solution to the problem. Loop around.
continue
else:
# No, we're open to more opening brackets or the thing(s)
# that we're otherwise looking for.
pre, match, matcher = cls._look_ahead_match(
seg_buff,
matchers + bracket_matchers,
parse_context=parse_context,
)
if match:
if matcher in matchers:
# It's one of the things we were looking for!
# Return.
return (pre_seg_buff + pre, match, matcher)
elif matcher in start_brackets:
# We've found the start of a bracket segment.
# NB: It might not *actually* be the bracket itself,
# but could be some non-code element preceding it.
# That's actually ok.
# Add the bracket to the stack.
bracket_stack.append(
BracketInfo(
bracket=match.matched_segments[0],
is_definite=start_definite[
start_brackets.index(matcher)
],
)
)
# Add the matched elements and anything before it to the
# pre segment buffer. Reset the working buffer.
pre_seg_buff += pre
pre_seg_buff += match.matched_segments
seg_buff = match.unmatched_segments
continue
elif matcher in end_brackets:
# each bracket with its "definite" attribute
bracket_is_definite = end_definite[
end_brackets.index(matcher)
]
if bracket_is_definite:
# We've found an unexpected end bracket!
raise SQLParseError(
f"Found unexpected end bracket!, was expecting one of: {matchers + bracket_matchers}, but got {matcher}",
segment=match.matched_segments[0],
)
pre_seg_buff += pre
pre_seg_buff += match.matched_segments
seg_buff = match.unmatched_segments
continue
else:
# This shouldn't happen!?
raise NotImplementedError(
"This shouldn't happen. Panic in _bracket_sensitive_look_ahead_match."
)
else:
# Not in a bracket stack, but no match. This is a happy
# unmatched exit.
return ((), MatchResult.from_unmatched(segments), None)
else:
# No we're at the end:
# Now check have we closed all our brackets?
if bracket_stack:
# No we haven't.
# Check that the unclosed brackets are definite
definite_bracket_stack = [b for b in bracket_stack if b.is_definite]
if definite_bracket_stack:
raise SQLParseError(
f"Couldn't find closing bracket for opened brackets: `{bracket_stack}`.",
segment=bracket_stack[-1].bracket,
)
# We at the end but without a bracket left open. This is a
# friendly unmatched return.
return ((), MatchResult.from_unmatched(segments), None)
def __str__(self):
return repr(self)
def __repr__(self):
return "<{0}: [{1}]>".format(
self.__class__.__name__,
curtail_string(
", ".join(curtail_string(repr(elem), 40) for elem in self._elements),
100,
),
)
class Ref(BaseGrammar):
"""A kind of meta-grammar that references other grammars by name at runtime."""
# We can't allow keyword refs here, because it doesn't make sense
# and it also causes infinite recursion.
allow_keyword_string_refs = False
@cached_method_for_parse_context
def simple(self, parse_context: ParseContext) -> Optional[List[str]]:
"""Does this matcher support a uppercase hash matching route?
A ref is simple, if the thing it references is simple.
"""
return self._get_elem(dialect=parse_context.dialect).simple(
parse_context=parse_context
)
def _get_ref(self):
"""Get the name of the thing we're referencing."""
# Unusually for a grammar we expect _elements to be a list of strings.
# Notable ONE string for now.
if len(self._elements) == 1:
# We're good on length. Get the name of the reference
return self._elements[0]
else:
raise ValueError(
"Ref grammar can only deal with precisely one element for now. Instead found {0!r}".format(
self._elements
)
)
def _get_elem(self, dialect):
"""Get the actual object we're referencing."""
if dialect:
# Use the dialect to retrieve the grammar it refers to.
return dialect.ref(self._get_ref())
else:
raise ReferenceError("No Dialect has been provided to Ref grammar!")
def __repr__(self):
return "<Ref: {0}{1}>".format(
", ".join(self._elements), " [opt]" if self.is_optional() else ""
)
@match_wrapper(v_level=4) # Log less for Ref
def match(self, segments, parse_context):
"""Match a list of segments against this segment.
Matching can be done from either the raw or the segments.
This raw function can be overridden, or a grammar defined
on the underlying class.
The match element of Ref, also implements the caching
using the parse_context `blacklist` methods.
"""
elem = self._get_elem(dialect=parse_context.dialect)
if not elem:
raise ValueError(
"Null Element returned! _elements: {0!r}".format(self._elements)
)
# First check against the efficiency Cache.
# We used to use seg_to_tuple here, but it was too slow,
# so instead we rely on segments not being mutated within a given
# match cycle and so the ids should continue to refer to unchanged
# objects.
seg_tuple = (id(seg) for seg in segments)
self_name = self._get_ref()
if parse_context.blacklist.check(self_name, seg_tuple):
# This has been tried before.
parse_match_logging(
self.__class__.__name__,
"match",
"SKIP",
parse_context=parse_context,
v_level=3,
self_name=self_name,
)
return MatchResult.from_unmatched(segments)
# Match against that. NB We're not incrementing the match_depth here.
# References shouldn't really count as a depth of match.
with parse_context.matching_segment(self._get_ref()) as ctx:
resp = elem.match(segments=segments, parse_context=ctx)
if not resp:
parse_context.blacklist.mark(self_name, seg_tuple)
return resp
@classmethod
def keyword(cls, keyword, **kwargs):
"""Generate a reference to a keyword by name.
This function is entirely syntactic sugar, and designed
for more readable dialects.
Ref.keyword('select') == Ref('SelectKeywordSegment')
"""
name = keyword.capitalize() + "KeywordSegment"
return cls(name, **kwargs)
class Anything(BaseGrammar):
"""Matches anything."""
def match(self, segments, parse_context):
"""Matches... Anything.
Most useful in match grammars, where a later parse grammar
will work out what's inside.
"""
return MatchResult.from_matched(segments)
class Nothing(BaseGrammar):
"""Matches nothing.
Useful for placeholders which might be overwritten by other
dialects.
"""
def match(self, segments, parse_context):
"""Matches... nothing.
Useful for placeholders which might be overwritten by other
dialects.
"""
return MatchResult.from_unmatched(segments)
| 42.979038
| 141
| 0.538128
|
4a0ff1f1c7aca8e0a9102d9659954d2156fc56de
| 1,234
|
py
|
Python
|
alphacsc/other/sdtw/distance.py
|
sophiaas/alphacsc
|
402b8f6c8ee4ba9c86e9da0e2073d900cf8da207
|
[
"BSD-3-Clause"
] | 89
|
2017-05-31T19:20:52.000Z
|
2022-03-22T09:52:17.000Z
|
alphacsc/other/sdtw/distance.py
|
sophiaas/alphacsc
|
402b8f6c8ee4ba9c86e9da0e2073d900cf8da207
|
[
"BSD-3-Clause"
] | 75
|
2017-07-15T14:03:40.000Z
|
2022-03-29T17:31:58.000Z
|
alphacsc/other/sdtw/distance.py
|
sophiaas/alphacsc
|
402b8f6c8ee4ba9c86e9da0e2073d900cf8da207
|
[
"BSD-3-Clause"
] | 35
|
2017-06-16T12:48:23.000Z
|
2022-03-21T09:49:55.000Z
|
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances
from .soft_dtw_fast import _jacobian_product_sq_euc
class SquaredEuclidean(object):
def __init__(self, X, Y):
"""
Parameters
----------
X: array, shape = [m, d]
First time series.
Y: array, shape = [n, d]
Second time series.
"""
self.X = X.astype(np.float64)
self.Y = Y.astype(np.float64)
def compute(self):
"""
Compute distance matrix.
Returns
-------
D: array, shape = [m, n]
Distance matrix.
"""
return euclidean_distances(self.X, self.Y, squared=True)
def jacobian_product(self, E):
"""
Compute the product between the Jacobian
(a linear map from m x d to m x n) and a matrix E.
Parameters
----------
E: array, shape = [m, n]
Second time series.
Returns
-------
G: array, shape = [m, d]
Product with Jacobian
([m x d, m x n] * [m x n] = [m x d]).
"""
G = np.zeros_like(self.X)
_jacobian_product_sq_euc(self.X, self.Y, E, G)
return G
| 22.436364
| 64
| 0.505673
|
4a0ff339c61d6d2de68af5df0b9d5fa4853928c9
| 3,435
|
py
|
Python
|
qml/Lib/site-packages/dash_bootstrap_components/_components/Alert.py
|
SammyOngaya/qml
|
d4ede647a0ea79c9c4f07d4219e6576a55a3c343
|
[
"Apache-2.0"
] | 1
|
2021-04-04T09:14:03.000Z
|
2021-04-04T09:14:03.000Z
|
qml/Lib/site-packages/dash_bootstrap_components/_components/Alert.py
|
SammyOngaya/qml
|
d4ede647a0ea79c9c4f07d4219e6576a55a3c343
|
[
"Apache-2.0"
] | null | null | null |
qml/Lib/site-packages/dash_bootstrap_components/_components/Alert.py
|
SammyOngaya/qml
|
d4ede647a0ea79c9c4f07d4219e6576a55a3c343
|
[
"Apache-2.0"
] | null | null | null |
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Alert(Component):
"""An Alert component.
Alert allows you to create contextual feedback messages on user actions.
Control the visibility using callbacks with the `is_open` prop, or set it to
auto-dismiss with the `duration` prop.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional): The children of this component.
- id (string; optional): The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.
- style (dict; optional): Defines CSS styles which will override styles previously set.
- className (string; optional): Often used with CSS to style elements with common properties.
- key (string; optional): A unique identifier for the component, used to improve
performance by React.js while rendering components
See https://reactjs.org/docs/lists-and-keys.html for more info
- color (string; optional): Alert color, options: primary, secondary, success, info, warning, danger,
link or any valid CSS color of
your choice (e.g. a hex code, a decimal code or a CSS color name)
Default: secondary.
- is_open (boolean; default True): Whether alert is open. Default: True.
- fade (boolean; optional): If True, a fade animation will be applied when `is_open` is toggled. If
False the Alert will simply appear and disappear.
- dismissable (boolean; optional): If true, add a close button that allows Alert to be dismissed.
- duration (number; optional): Duration in milliseconds after which the Alert dismisses itself.
- loading_state (dict; optional): Object that holds the loading state object coming from dash-renderer. loading_state has the following type: dict containing keys 'is_loading', 'prop_name', 'component_name'.
Those keys have the following types:
- is_loading (boolean; optional): Determines if the component is loading or not
- prop_name (string; optional): Holds which property is loading
- component_name (string; optional): Holds the name of the component that is loading"""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, style=Component.UNDEFINED, className=Component.UNDEFINED, key=Component.UNDEFINED, color=Component.UNDEFINED, is_open=Component.UNDEFINED, fade=Component.UNDEFINED, dismissable=Component.UNDEFINED, duration=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'style', 'className', 'key', 'color', 'is_open', 'fade', 'dismissable', 'duration', 'loading_state']
self._type = 'Alert'
self._namespace = 'dash_bootstrap_components'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'style', 'className', 'key', 'color', 'is_open', 'fade', 'dismissable', 'duration', 'loading_state']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Alert, self).__init__(children=children, **args)
| 61.339286
| 335
| 0.728384
|
4a0ff3515ceb977c8e4ea9001c2997e10aeb8d94
| 774
|
py
|
Python
|
demos/gtk3/02-hello-world-test/buildconf.py
|
pustotnik/raven
|
adb75d04a1ce719266eb34c29b35151dfaf91a8a
|
[
"BSD-3-Clause"
] | 2
|
2019-10-14T05:05:34.000Z
|
2022-03-28T04:55:00.000Z
|
demos/gtk3/02-hello-world-test/buildconf.py
|
pustotnik/raven
|
adb75d04a1ce719266eb34c29b35151dfaf91a8a
|
[
"BSD-3-Clause"
] | 42
|
2020-08-25T07:59:32.000Z
|
2021-11-15T03:12:29.000Z
|
demos/gtk3/02-hello-world-test/buildconf.py
|
pustotnik/raven
|
adb75d04a1ce719266eb34c29b35151dfaf91a8a
|
[
"BSD-3-Clause"
] | 1
|
2021-08-13T13:59:51.000Z
|
2021-08-13T13:59:51.000Z
|
tasks = {
'hello' : {
'features' : 'cprogram',
'source' : 'hello.c',
'configure' : [
{
'do' : 'pkgconfig',
'packages' : 'gtk+-3.0 > 1 pango gtk+-3.0 <= 100 ',
'tool-atleast-version' : '0.1',
'pkg-version' : True,
#'defnames' : False,
'defnames' : {
'gtk+-3.0' : { 'have' : 'WE_HAVE_GTK3', 'version': 'GTK3_VER' },
'pango' : { 'version': 'LIBPANGO_VER' },
},
#'mandatory' : False,
},
],
},
}
buildtypes = {
'debug' : {
'cflags' : '-O0 -g',
},
'release' : {
'cflags' : '-O2',
},
'default' : 'debug',
}
| 23.454545
| 84
| 0.342377
|
4a0ff4cfe2158f2549896d70fa887c9063ea00c4
| 3,771
|
py
|
Python
|
CEAM/settings.py
|
egruttadauria98/Crypto-Efficient-Asset-Management
|
ce4d47a8fe0b3a01bbad654e45afb5c335a2dc0f
|
[
"MIT"
] | 6
|
2021-11-29T14:05:56.000Z
|
2022-03-26T21:41:49.000Z
|
CEAM/settings.py
|
egruttadauria98/Crypto-Efficient-Asset-Management
|
ce4d47a8fe0b3a01bbad654e45afb5c335a2dc0f
|
[
"MIT"
] | null | null | null |
CEAM/settings.py
|
egruttadauria98/Crypto-Efficient-Asset-Management
|
ce4d47a8fe0b3a01bbad654e45afb5c335a2dc0f
|
[
"MIT"
] | 2
|
2021-12-24T16:02:31.000Z
|
2021-12-25T11:25:44.000Z
|
"""
Django settings for CEAM project.
Generated by 'django-admin startproject' using Django 3.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-_rrnm#p4$5lf^sb60^@t!tvl^40u%sesdn$d=sja&kq%1)qd45'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['13.40.105.101', 'localhost', '127.0.0.1', '.ngrok.io', '192.168.1.48', '192.168.1.226']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Markowitz',
'rest_framework',
'django_crontab'
]
#('',
CRONJOBS = [
('0 0 12 1 * ?', 'Markowitz.cron.cron.oracle_wakeup')
]
CRONTAB_COMMAND_SUFFIX = '2>/home/ubuntu/Crypto-Efficient-Asset-Management/error_log.txt 1>/home/ubuntu/Crypto-Efficient-Asset-Management/normal_log.txt'
#''Usage of the CRONJOBS above:
#python manage.py crontab add
#python manage.py crontab show
#python manage.py crontab remove
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CEAM.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CEAM.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 26.935714
| 153
| 0.700345
|
4a0ff641f659065be156577894efa04e30addf32
| 291
|
py
|
Python
|
packaging/get_version.py
|
JWCook/inat-image-tagger
|
2ba48ec849517b32cee1bfe9527f838a4f22cd94
|
[
"MIT"
] | 1
|
2020-05-10T23:17:07.000Z
|
2020-05-10T23:17:07.000Z
|
packaging/get_version.py
|
JWCook/inat-image-tagger
|
2ba48ec849517b32cee1bfe9527f838a4f22cd94
|
[
"MIT"
] | 13
|
2020-05-23T14:56:39.000Z
|
2020-05-24T03:35:21.000Z
|
packaging/get_version.py
|
JWCook/inat-image-tagger
|
2ba48ec849517b32cee1bfe9527f838a4f22cd94
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Get app version from poetry config"""
from pathlib import Path
import tomlkit
PROJECT_DIR = Path(__file__).parent.parent.absolute()
with open(PROJECT_DIR / 'pyproject.toml', 'rb') as f:
conf = tomlkit.loads(f.read())
print(conf['tool']['poetry']['version'])
| 22.384615
| 53
| 0.704467
|
4a0ff658df0c6cc8378bea4d7d6282afedc33682
| 5,418
|
py
|
Python
|
travel/docs/Amadeus-master/pactravel-master/swagger_client/models/infant.py
|
shopglobal/api
|
176e1858d3f93e8e7854ba194698b6b9825841da
|
[
"CC-BY-4.0"
] | null | null | null |
travel/docs/Amadeus-master/pactravel-master/swagger_client/models/infant.py
|
shopglobal/api
|
176e1858d3f93e8e7854ba194698b6b9825841da
|
[
"CC-BY-4.0"
] | 1
|
2021-06-01T22:04:28.000Z
|
2021-06-01T22:04:28.000Z
|
travel/docs/Amadeus-master/pactravel-master/swagger_client/models/infant.py
|
shopglobal/api
|
176e1858d3f93e8e7854ba194698b6b9825841da
|
[
"CC-BY-4.0"
] | null | null | null |
# coding: utf-8
"""
Amadeus Travel Innovation Sandbox
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Infant(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_name': 'str',
'first_name': 'str',
'date_of_birth': 'date'
}
attribute_map = {
'last_name': 'last_name',
'first_name': 'first_name',
'date_of_birth': 'date_of_birth'
}
def __init__(self, last_name=None, first_name=None, date_of_birth=None):
"""
Infant - a model defined in Swagger
"""
self._last_name = None
self._first_name = None
self._date_of_birth = None
if last_name is not None:
self.last_name = last_name
if first_name is not None:
self.first_name = first_name
if date_of_birth is not None:
self.date_of_birth = date_of_birth
@property
def last_name(self):
"""
Gets the last_name of this Infant.
The last name of the infant, as entered by the agent, in upper-case. If no value is provided, the last name of the infant can generally be assumed to be the same as that of the traveler which whom they are associated.
:return: The last_name of this Infant.
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""
Sets the last_name of this Infant.
The last name of the infant, as entered by the agent, in upper-case. If no value is provided, the last name of the infant can generally be assumed to be the same as that of the traveler which whom they are associated.
:param last_name: The last_name of this Infant.
:type: str
"""
self._last_name = last_name
@property
def first_name(self):
"""
Gets the first_name of this Infant.
The first name of the infant, as entered by the agent, in upper-case. May include middle names, initials or prefixes.
:return: The first_name of this Infant.
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""
Sets the first_name of this Infant.
The first name of the infant, as entered by the agent, in upper-case. May include middle names, initials or prefixes.
:param first_name: The first_name of this Infant.
:type: str
"""
self._first_name = first_name
@property
def date_of_birth(self):
"""
Gets the date_of_birth of this Infant.
An optional <a href=\"https://en.wikipedia.org/wiki/ISO_8601\">ISO 8601</a> date indicating the birth date of the infant, as provided by the agent. For example: 1972-02-19.
:return: The date_of_birth of this Infant.
:rtype: date
"""
return self._date_of_birth
@date_of_birth.setter
def date_of_birth(self, date_of_birth):
"""
Sets the date_of_birth of this Infant.
An optional <a href=\"https://en.wikipedia.org/wiki/ISO_8601\">ISO 8601</a> date indicating the birth date of the infant, as provided by the agent. For example: 1972-02-19.
:param date_of_birth: The date_of_birth of this Infant.
:type: date
"""
self._date_of_birth = date_of_birth
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Infant):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.769231
| 225
| 0.588778
|
4a0ff6943a0cb84d6c75bf1c632bc9804f856890
| 371
|
py
|
Python
|
Django/makeplan_old/user/migrations/0003_auto_20190222_1541.py
|
taoyan/python
|
3fbb1e534ec91b33cd2ac690966d7253c2f5d757
|
[
"MIT"
] | 1
|
2019-05-04T09:21:52.000Z
|
2019-05-04T09:21:52.000Z
|
Django/makeplan_old/user/migrations/0003_auto_20190222_1541.py
|
taoyan/python
|
3fbb1e534ec91b33cd2ac690966d7253c2f5d757
|
[
"MIT"
] | 26
|
2020-07-18T02:02:54.000Z
|
2022-02-26T10:49:58.000Z
|
Django/makeplan_old/user/migrations/0003_auto_20190222_1541.py
|
taoyan/python
|
3fbb1e534ec91b33cd2ac690966d7253c2f5d757
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.5 on 2019-02-22 07:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0002_auto_20190222_1534'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='date_last_login',
new_name='last_login',
),
]
| 19.526316
| 47
| 0.592992
|
4a0ff87033a0540b04a79f0f417672a2b7528875
| 782
|
py
|
Python
|
HLTrigger/Configuration/python/HLT_75e33/eventsetup/hltESPKFFittingSmootherWithOutliersRejectionAndRK_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1
|
2021-11-30T16:24:46.000Z
|
2021-11-30T16:24:46.000Z
|
HLTrigger/Configuration/python/HLT_75e33/eventsetup/hltESPKFFittingSmootherWithOutliersRejectionAndRK_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 4
|
2021-11-29T13:57:56.000Z
|
2022-03-29T06:28:36.000Z
|
HLTrigger/Configuration/python/HLT_75e33/eventsetup/hltESPKFFittingSmootherWithOutliersRejectionAndRK_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1
|
2021-11-30T16:16:05.000Z
|
2021-11-30T16:16:05.000Z
|
import FWCore.ParameterSet.Config as cms
hltESPKFFittingSmootherWithOutliersRejectionAndRK = cms.ESProducer("KFFittingSmootherESProducer",
BreakTrajWith2ConsecutiveMissing = cms.bool(True),
ComponentName = cms.string('hltESPKFFittingSmootherWithOutliersRejectionAndRK'),
EstimateCut = cms.double(20.0),
Fitter = cms.string('hltESPRKTrajectoryFitter'),
LogPixelProbabilityCut = cms.double(-14.0),
MaxFractionOutliers = cms.double(0.3),
MaxNumberOfOutliers = cms.int32(3),
MinDof = cms.int32(2),
MinNumberOfHits = cms.int32(3),
NoInvalidHitsBeginEnd = cms.bool(True),
NoOutliersBeginEnd = cms.bool(False),
RejectTracks = cms.bool(True),
Smoother = cms.string('hltESPRKTrajectorySmoother'),
appendToDataLabel = cms.string('')
)
| 41.157895
| 97
| 0.748082
|
4a0ff89a07dc73af952eda55575d607349bcbdc7
| 4,932
|
py
|
Python
|
math3d_triangle.py
|
spencerparkin/pyMath3D
|
8a0a5155f7db11a0d4d555d6963faf0bc0ea2b71
|
[
"MIT"
] | null | null | null |
math3d_triangle.py
|
spencerparkin/pyMath3D
|
8a0a5155f7db11a0d4d555d6963faf0bc0ea2b71
|
[
"MIT"
] | null | null | null |
math3d_triangle.py
|
spencerparkin/pyMath3D
|
8a0a5155f7db11a0d4d555d6963faf0bc0ea2b71
|
[
"MIT"
] | 1
|
2021-03-06T05:28:22.000Z
|
2021-03-06T05:28:22.000Z
|
# math3d_triangle.py
import math
from math3d_vector import Vector
from math3d_side import Side
from math3d_plane import Plane
from math3d_line_segment import LineSegment
class Triangle(object):
def __init__(self, point_a=None, point_b=None, point_c=None):
self.point_a = point_a.clone() if point_a is not None else Vector(0.0, 0.0, 0.0)
self.point_b = point_b.clone() if point_b is not None else Vector(0.0, 0.0, 0.0)
self.point_c = point_c.clone() if point_c is not None else Vector(0.0, 0.0, 0.0)
def clone(self):
return Triangle(self.point_a, self.point_b, self.point_c)
def calc_plane(self):
unit_normal = (self.point_b - self.point_a).cross(self.point_c - self.point_a).normalized()
return Plane(self.point_a, unit_normal)
def calc_center(self):
return (self.point_a + self.point_b + self.point_c) / 3.0
def contains_point(self, point, eps=1e-7):
if not self.calc_plane().contains_point(point, eps):
return False
area_a = Triangle(point, self.point_a, self.point_b).area()
area_b = Triangle(point, self.point_b, self.point_c).area()
area_c = Triangle(point, self.point_c, self.point_a).area()
return math.fabs((area_a + area_b + area_c) - self.area()) < eps
def contains_edge_point(self, point, eps=1e-7):
return any([line_segment.contains_point(point, eps) for line_segment in self.yield_line_segments()])
def contains_interior_point(self, point, eps=1e-7):
return self.contains_point(point, eps) and not self.contains_edge_point(point, eps)
def yield_line_segments(self):
yield LineSegment(self.point_a, self.point_b)
yield LineSegment(self.point_b, self.point_c)
yield LineSegment(self.point_c, self.point_a)
def side(self, point, eps=1e-7):
return self.calc_plane().side(point, eps)
def area(self):
return (self.point_b - self.point_a).cross(self.point_c - self.point_a).length() / 2.0
def __getitem__(self, i):
return [self.point_a, self.point_b, self.point_c][i % 3]
def __setitem__(self, i, point):
setattr(self, ['point_a', 'point_b', 'point_c'][i % 3], point)
def split_against_plane(self, plane, eps=1e-7):
back_list = []
front_list = []
triangle_list = [self.clone()]
while len(triangle_list) > 0:
triangle = triangle_list.pop(0)
side_list = [plane.side(triangle[i], eps) for i in range(3)]
if all([side == Side.NEITHER for side in side_list]):
pass
elif all([side == Side.BACK or side == Side.NEITHER for side in side_list]):
back_list.append(triangle)
elif all([side == Side.FRONT or side == Side.NEITHER for side in side_list]):
front_list.append(triangle)
else:
for i in range(3):
if (side_list[i] == Side.BACK and side_list[(i + 1) % 3] == Side.FRONT or
side_list[i] == Side.FRONT and side_list[(i + 1) % 3] == Side.BACK):
# This might not be the best tessellation, but it will work.
line_segment = LineSegment(triangle[i], triangle[i + 1])
alpha = plane.intersect_line_segment(line_segment)
point = line_segment.lerp(alpha)
triangle_list.append(Triangle(triangle[i], point, triangle[i + 2]))
triangle_list.append(Triangle(point, triangle[i + 1], triangle[i + 2]))
break
return back_list, front_list
def intersect_with(self, other, eps=1e-7):
if isinstance(other, Triangle):
from math3d_point_cloud import PointCloud
point_cloud = PointCloud()
for line_segment in self.yield_line_segments():
point = other.intersect_with(line_segment)
if point is not None:
point_cloud.add_point(point)
for line_segment in other.yield_line_segments():
point = self.intersect_with(line_segment)
if point is not None:
point_cloud.add_point(point)
point_list = point_cloud.point_list
if len(point_list) == 2:
line_segment = LineSegment(point_list[0], point_list[1])
if line_segment.length() >= eps:
return line_segment
elif len(point_list) > 0:
return point_cloud
elif isinstance(other, LineSegment):
plane = self.calc_plane()
alpha = plane.intersect_line_segment(other)
if alpha is not None and 0.0 <= alpha <= 1.0:
point = other.lerp(alpha)
if self.contains_point(point, eps):
return point
| 44.035714
| 108
| 0.599554
|
4a0ff8b3d4f2de4b5a7c052ca03480feaafca0b4
| 8,315
|
py
|
Python
|
ch8/playMap.py
|
antallen/PythonMaterial
|
c582fb1610610feb72002f43a3758d5c58d6da85
|
[
"MIT"
] | null | null | null |
ch8/playMap.py
|
antallen/PythonMaterial
|
c582fb1610610feb72002f43a3758d5c58d6da85
|
[
"MIT"
] | null | null | null |
ch8/playMap.py
|
antallen/PythonMaterial
|
c582fb1610610feb72002f43a3758d5c58d6da85
|
[
"MIT"
] | 1
|
2021-07-23T09:59:15.000Z
|
2021-07-23T09:59:15.000Z
|
import Stores
import csv
import Messages
class playMap:
__mapEmpty = " "
__mapWall = "|"
__mapLine = "-"
myStores = None
def printMap(self,userPo):
self.myStores = Stores.Stores()
# 新程式區塊
for k in range(1,28):
# 印出第一、二、三行
if (( k == 1 ) or (k == 2) or ( k == 3)):
self.printmap1(k,0,7,userPo)
# 印出第四行,以及第二十四行
if (( k == 4 ) or ( k == 24 )):
print(48*self.__mapLine)
# 印出第五、六、七行,九、十、十一行,十三、十四、十五行,十七、十八、十九行,二十一、二十二、二十三行
if (( k == 5 ) or (k == 6) or ( k == 7)):
self.printmap2(k,23,7,userPo)
if (( k == 9 ) or (k == 10) or ( k == 11)):
self.printmap2(k,22,8,userPo)
if (( k == 13 ) or (k == 14) or ( k == 15)):
self.printmap2(k,21,9,userPo)
if (( k == 17 ) or (k == 18) or ( k == 19)):
self.printmap2(k,20,10,userPo)
if (( k == 21 ) or (k == 22) or ( k == 23)):
self.printmap2(k,19,11,userPo)
# 印出第八、十二、十六、二十行
if (( k == 8 ) or (k == 12) or ( k == 16) or ( k == 20)):
print(7*self.__mapLine + 34*self.__mapEmpty + 7*self.__mapLine)
# 印出第二十五、二十六、二十七行
if (( k == 25 ) or (k == 26) or ( k == 27)):
self.printmap1(k,18,11,userPo)
self.myStores = None
# 列印地圖程式
def printmap1(self,k,min,max,userPo):
if (max-min) > 0 :
j = 1
else:
j = -1
if ((k == 1) or (k == 25)):
for i in range(min,max,j):
if (self.myStores.getStoreData(str(i))[2] == "-1"):
owner = " "
else:
owner = self.transferNo(self.myStores.getStoreData(str(i))[2])
print(self.__mapEmpty + self.getStoreName(self.myStores.getStoreData(str(i))[1]) + owner,end = '')
if ((i < 6) or (i > 12)):
print(self.__mapWall,end = '')
else:
print()
elif (( k == 2) or (k == 26)):
for i in range(min,max,j):
print(self.__mapEmpty + self.getStoreName(self.transferNo(self.myStores.getStoreData(str(i))[3])) + self.__mapEmpty,end = '')
if ((i < 6) or (i > 12)):
print(self.__mapWall,end='')
else:
print()
elif (( k == 3) or (k == 27)):
po_tmp = ""
for i in range(min,max,j):
po_tmp = self.__mapEmpty
for l in range(len(userPo)):
if (userPo[l] == str(i)):
po_tmp = po_tmp + self.transferNo(str(l+1))
else:
po_tmp = po_tmp + self.__mapEmpty
# 若人數不足四人則補足其它空間
if (len(userPo)<4):
po_tmp = po_tmp + (4-len(userPo))*self.__mapEmpty
po_tmp = po_tmp + self.__mapEmpty
if ((i < 6) or (i > 12)):
print(po_tmp + self.__mapWall,end = '')
else:
print(po_tmp,end = '')
print()
def printmap2(self,k,min,max,userPo):
for i in (min,max):
if (self.myStores.getStoreData(str(i))[2] == "-1"):
owner = " "
else:
owner = self.transferNo(self.myStores.getStoreData(str(i))[2])
if (( k == 5) or ( k == 9) or( k == 13) or( k == 17) or( k == 21)):
lines = ""
lines = lines + self.__mapEmpty + self.getStoreName(self.myStores.getStoreData(str(min))[1]) + owner + self.__mapWall
if (( k == 9) or ( k == 13)):
messages = self.getUserData(k)
lines = lines + 6*self.__mapEmpty + messages + 6*self.__mapEmpty
else:
lines = lines + 34*self.__mapEmpty
lines = lines + self.__mapWall + self.__mapEmpty + self.getStoreName(self.myStores.getStoreData(str(max))[1]) + owner
print(lines)
elif (( k == 6) or ( k == 10) or( k == 14) or( k == 18) or( k == 22)):
lines = ""
lines = lines + self.__mapEmpty + self.getStoreName(self.transferNo(self.myStores.getStoreData(str(min))[3])) + self.__mapEmpty + self.__mapWall
if (k == 6):
lines = lines + 6*self.__mapEmpty + "歡迎參加大富翁文字桌遊 請儘量嫌棄畫面太醜!" + 6*self.__mapEmpty
else:
lines = lines + 34*self.__mapEmpty
lines = lines + self.__mapWall + self.__mapEmpty + self.getStoreName(self.transferNo(self.myStores.getStoreData(str(max))[3])) + self.__mapEmpty
print(lines)
elif (( k == 7) or ( k == 11) or( k == 15) or( k == 19) or( k == 23)):
po_tmp = ""
lines = self.__mapEmpty
for j in range(len(userPo)):
if (userPo[j] == str(str(min))):
po_tmp = po_tmp + self.transferNo(str(j+1))
else:
po_tmp = po_tmp + self.__mapEmpty
# 若人數不足四人則補足其它空間
if (len(userPo)<4):
po_tmp = po_tmp + (4-len(userPo))*self.__mapEmpty
po_tmp = po_tmp + self.__mapEmpty
lines = lines + po_tmp + self.__mapWall
po_tmp = ""
if (( k == 11) or ( k == 15)):
messages = self.getUserData(k)
lines = lines + 6*self.__mapEmpty + messages + 6*self.__mapEmpty
elif ((k == 23)):
news = self.getMessages()
lines = lines + 6*self.__mapEmpty + news + 6*self.__mapEmpty
else:
lines = lines + 34*self.__mapEmpty
for j in range(len(userPo)):
if (userPo[j] == str(str(max))):
po_tmp = po_tmp + self.transferNo(str(j+1))
else:
po_tmp = po_tmp + self.__mapEmpty
po_tmp = po_tmp + self.__mapEmpty
lines = lines + self.__mapWall + self.__mapEmpty + po_tmp
print(lines)
# 控制每一行的格式大小
def getStoreName(self,data):
storeName = ""
if (len(data) <= 4):
storeName = data + (4-len(data))*" "
return storeName
# 半形全形轉換功能
def transferNo(self,data):
nums = (0,"0",1,"1",2,"2",3,"3",4,"4",5,"5",6,"6",7,"7",8,"8",9,"9")
tmp = []
dataleng = len(data)
for j in range(0,dataleng):
tmp.append(0)
newdata = ""
for i in range(1,dataleng+1):
tmp[(dataleng-i)] = int(data)%10
data = int(int(data) / 10)
for i in range(0,len(tmp)):
newdata += nums[nums.index(tmp[i])+1]
return newdata
# 取得玩家資料內容
def getUserData(self,k):
messages = ""
with open('players.csv', newline='') as csvfile:
players = csv.DictReader(csvfile)
for player in players:
if (k == 9):
if (player.get('id') == '0'):
money = self.transferNo(str(player.get('money')))
messages = "1號玩家: " + money
if ( k == 11):
if (player.get('id') == '1'):
money = self.transferNo(str(player.get('money')))
messages = "2號玩家: " + money
if ( k == 13):
if (player.get('id') == '2'):
money = self.transferNo(str(player.get('money')))
messages = "3號玩家: " + money
if ( k == 15):
if (player.get('id') == '3'):
money = self.transferNo(str(player.get('money')))
messages = "4號玩家: " + money
if len(messages) < 22:
messages = messages + (22-len(messages))*self.__mapEmpty
return messages
# 取得訊息行的資料
def getMessages(self):
news = Messages.Messages()
messages = news.outputData()
if (len(messages) < 22):
hello = messages + (22-len(messages))*self.__mapEmpty
return hello
if __name__ == "__main__":
myMap = playMap()
userPo = ['11']
myMap.printMap(userPo)
| 39.221698
| 156
| 0.452796
|
4a0ff8f2f13edbc165a31f58c24a5db1be27b6d2
| 1,119
|
py
|
Python
|
revscoring/utilities/tests/test_util.py
|
mariushoch/revscoring
|
5ecd54d31c4088b6f142c0ef54116cc5bdce0ff2
|
[
"MIT"
] | null | null | null |
revscoring/utilities/tests/test_util.py
|
mariushoch/revscoring
|
5ecd54d31c4088b6f142c0ef54116cc5bdce0ff2
|
[
"MIT"
] | null | null | null |
revscoring/utilities/tests/test_util.py
|
mariushoch/revscoring
|
5ecd54d31c4088b6f142c0ef54116cc5bdce0ff2
|
[
"MIT"
] | null | null | null |
from ..util import read_labels_and_population_rates, read_labels_config
def test_plain_labels():
labels, label_weights, population_rates = read_labels_and_population_rates(
"true,false", ["true=5"], ["true=0.1", "false=0.9"], None)
assert labels == [True, False]
assert label_weights == {True: 5}
assert population_rates == {True: 0.1, False: 0.9}
def test_pop_rates_labels():
labels, label_weights, population_rates = read_labels_and_population_rates(
None, ["true=5"], ["true=0.1", "false=0.9"], None)
assert labels == [True, False]
assert label_weights == {True: 5}
assert population_rates == {True: 0.1, False: 0.9}
def test_labels_config():
labels_config = {
'name': "enwiki damaging",
'labels': [
{'value': True, 'weight': 5, 'population_rate': 0.1},
{'value': False, 'population_rate': 0.9}
]}
labels, label_weights, population_rates = read_labels_config(labels_config)
assert labels == [True, False]
assert label_weights == {True: 5}
assert population_rates == {True: 0.1, False: 0.9}
| 32.911765
| 79
| 0.646113
|
4a0ffed8c900ddcbe70a211af359e890cd97ea91
| 787
|
py
|
Python
|
ziproto/__init__.py
|
intellivoid/ZiProto-Python
|
fe1db33e8288a9f5500f26ae1b3339ecb6e913ab
|
[
"Apache-2.0"
] | 1
|
2021-01-04T19:25:34.000Z
|
2021-01-04T19:25:34.000Z
|
ziproto/__init__.py
|
intellivoid/ZiProto-Python
|
fe1db33e8288a9f5500f26ae1b3339ecb6e913ab
|
[
"Apache-2.0"
] | null | null | null |
ziproto/__init__.py
|
intellivoid/ZiProto-Python
|
fe1db33e8288a9f5500f26ae1b3339ecb6e913ab
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2020 Intellivoid Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ziproto.ZiProtoEncoder
import ziproto.ZiProtoDecoder
def encode(obj):
return(ZiProtoEncoder.encode(obj))
def decode(bytes):
return(ZiProtoDecoder.decode(bytes))
| 28.107143
| 75
| 0.752224
|
4a0ffef07f4b2d56f7f6a0612b9f00bee834e136
| 206
|
py
|
Python
|
division.py
|
DahlitzFlorian/leverage-the-full-potential-of-type-hints-article-snippets
|
2247b99aba6b9e4d97db81af8fabaf8b2f69bc30
|
[
"MIT"
] | null | null | null |
division.py
|
DahlitzFlorian/leverage-the-full-potential-of-type-hints-article-snippets
|
2247b99aba6b9e4d97db81af8fabaf8b2f69bc30
|
[
"MIT"
] | null | null | null |
division.py
|
DahlitzFlorian/leverage-the-full-potential-of-type-hints-article-snippets
|
2247b99aba6b9e4d97db81af8fabaf8b2f69bc30
|
[
"MIT"
] | null | null | null |
# division.py
from typing import Union
Number = Union[int, float]
def division(divident: Number, divisor: Number) -> float:
return divident / divisor
result = division(1.5, 2)
print(result) # 0.75
| 17.166667
| 57
| 0.703883
|
4a0fff88cd918d7e56db07749544c84354d4d67f
| 4,540
|
py
|
Python
|
MatchingPennies/trcopo_mp.py
|
manish-pra/trcopo
|
df8730f07ef554970c7a0aa653cc42d4886948ec
|
[
"MIT"
] | 5
|
2020-06-22T17:13:51.000Z
|
2021-11-02T14:19:58.000Z
|
MatchingPennies/trcopo_mp.py
|
manish-pra/trcopo
|
df8730f07ef554970c7a0aa653cc42d4886948ec
|
[
"MIT"
] | null | null | null |
MatchingPennies/trcopo_mp.py
|
manish-pra/trcopo
|
df8730f07ef554970c7a0aa653cc42d4886948ec
|
[
"MIT"
] | 1
|
2020-09-14T06:41:03.000Z
|
2020-09-14T06:41:03.000Z
|
import torch
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
sys.path.insert(0, '..')
from trcopo_optim import TRCoPO
from torch.distributions import Categorical
import numpy as np
from MatchingPennies.matching_pennies import pennies_game
from torch.utils.tensorboard import SummaryWriter
from MatchingPennies.network import policy1, policy2
import os
folder_location = 'tensorboard/mp/'
experiment_name = 'trcopo/'
directory = '../' + folder_location + experiment_name + 'model'
if not os.path.exists(directory):
os.makedirs(directory)
writer = SummaryWriter('../' + folder_location + experiment_name + 'data')
p1 = policy1()
p2 = policy2()
optim = TRCoPO(p1.parameters(),p2.parameters(), threshold=0.01)
batch_size = 1000
num_episode = 1000
env = pennies_game()
for t_eps in range(num_episode):
mat_action = []
mat_state1 = []
mat_reward1 = []
mat_done = []
mat_state2 = []
mat_reward2 = []
state, _, _, _, _ = env.reset()
#data_collection
for i in range(batch_size):
pi1 = p1()
dist1 = Categorical(pi1)
action1 = dist1.sample()
pi2 = p2()
dist2 = Categorical(pi2)
action2 = dist2.sample()
action = np.array([action1, action2])
state = np.array([0,0])
mat_state1.append(torch.FloatTensor(state))
mat_state2.append(torch.FloatTensor(state))
mat_action.append(torch.FloatTensor(action))
#print(action)
state, reward1, reward2, done, _ = env.step(action)
mat_reward1.append(torch.FloatTensor([reward1]))
mat_reward2.append(torch.FloatTensor([reward2]))
mat_done.append(torch.FloatTensor([1 - done]))
#print(action)
# print('a1',dist1.mean, dist1.variance)
# print('a2',dist2.mean, dist2.variance)
action_both = torch.stack(mat_action)
writer.add_scalar('Entropy/Agent1', dist1.entropy().data, t_eps)
writer.add_scalar('Entropy/agent2', dist2.entropy().data, t_eps)
writer.add_scalar('Action/Agent1', torch.mean(action_both[:,0]), t_eps)
writer.add_scalar('Action/agent2', torch.mean(action_both[:,1]), t_eps)
#val1_p = -advantage_mat1#val1.detach()
val1_p = torch.stack(mat_reward1).transpose(0,1)
# st_time = time.time()
# calculate gradients
if val1_p.size(0)!=1:
raise 'error'
optim.zero_grad()
def get_log_prob():
pi_a1_s = p1()
dist_pi1 = Categorical(pi_a1_s)
action_both = torch.stack(mat_action)
log_probs1 = dist_pi1.log_prob(action_both[:, 0])
pi_a2_s = p2()
dist_pi2 = Categorical(pi_a2_s)
log_probs2 = dist_pi2.log_prob(action_both[:, 1])
# objective = torch.exp(log_probs1 + log_probs2 - log_probs1.detach() - log_probs2.detach()) * (val1_p)
return log_probs1, log_probs2, val1_p
improve1, improve2, lamda, lam1, lam2, esp, stat, its = optim.step(get_log_prob)
writer.add_scalar('Improvement/agent1', improve1, t_eps)
writer.add_scalar('Improvement/agent2', improve2, t_eps)
writer.add_scalar('Improvement/error', esp, t_eps)
writer.add_scalar('Improvement/status', stat, t_eps)
writer.add_scalar('lamda/agent1', lam1, t_eps)
writer.add_scalar('lamda/agent2', lam2, t_eps)
writer.add_scalar('lamda/commona', lamda, t_eps)
if t_eps%100 ==0:
for p in p1.parameters():
print('p1', p)
for p in p2.parameters():
print('p2', p)
for p in p1.parameters():
writer.add_scalar('Agent1/p1', p.data[0], t_eps)
writer.add_scalar('Agent1/p2', p.data[1], t_eps)
for p in p2.parameters():
writer.add_scalar('Agent2/p1', p.data[0], t_eps)
writer.add_scalar('Agent2/p2', p.data[1], t_eps)
#print('p2', p)
writer.add_scalar('Agent1/sm1', pi1.data[0], t_eps)
writer.add_scalar('Agent1/sm2', pi1.data[1], t_eps)
writer.add_scalar('Agent2/sm1', pi2.data[0], t_eps)
writer.add_scalar('Agent2/sm2', pi2.data[1], t_eps)
_, _, cgx, cgy, itr_num = optim.getinfo()
writer.add_scalar('norm/theta_diff', cgx + cgy, t_eps)
writer.add_scalar('norm/itr_num', itr_num, t_eps)
if t_eps%100==0:
print(t_eps)
torch.save(p1.state_dict(),
'../' + folder_location + experiment_name + 'model/agent1_' + str(
t_eps) + ".pth")
torch.save(p2.state_dict(),
'../' + folder_location + experiment_name + 'model/agent2_' + str(
t_eps) + ".pth")
| 33.382353
| 111
| 0.644493
|
4a0fffc41c52543b02a1d54cf49b92d2ebde149e
| 1,405
|
py
|
Python
|
docs/conf.py
|
gevent/python-tblib
|
ebc7cd6f60bf5943658bf9f02c1d6068dfdb6331
|
[
"BSD-2-Clause"
] | 5
|
2017-06-23T14:52:49.000Z
|
2018-10-23T16:17:06.000Z
|
docs/conf.py
|
gevent/python-tblib
|
ebc7cd6f60bf5943658bf9f02c1d6068dfdb6331
|
[
"BSD-2-Clause"
] | null | null | null |
docs/conf.py
|
gevent/python-tblib
|
ebc7cd6f60bf5943658bf9f02c1d6068dfdb6331
|
[
"BSD-2-Clause"
] | 1
|
2021-01-13T11:23:05.000Z
|
2021-01-13T11:23:05.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = 'tblib'
year = '2013-2017'
author = 'Ionel Cristian Mărieș'
copyright = '{0}, {1}'.format(year, author)
version = release = '1.3.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/ionelmc/python-tblib/issues/%s', '#'),
'pr': ('https://github.com/ionelmc/python-tblib/pull/%s', 'PR #'),
}
import sphinx_py3doc_enhanced_theme
html_theme = "sphinx_py3doc_enhanced_theme"
html_theme_path = [sphinx_py3doc_enhanced_theme.get_html_theme_path()]
html_theme_options = {
'githuburl': 'https://github.com/ionelmc/python-tblib/'
}
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
| 25.545455
| 72
| 0.69395
|
4a10003657013434458f3637c719af40eac69e68
| 1,306
|
py
|
Python
|
examples/py/async-market-making-symbols.py
|
diwenshi61/ccxt
|
ebdda10e7c4ed8841d572f3bfe198b5f0e949cf6
|
[
"MIT"
] | 24,910
|
2017-10-27T21:41:59.000Z
|
2022-03-31T23:08:57.000Z
|
examples/py/async-market-making-symbols.py
|
diwenshi61/ccxt
|
ebdda10e7c4ed8841d572f3bfe198b5f0e949cf6
|
[
"MIT"
] | 8,201
|
2017-10-28T10:19:28.000Z
|
2022-03-31T23:49:37.000Z
|
examples/py/async-market-making-symbols.py
|
diwenshi61/ccxt
|
ebdda10e7c4ed8841d572f3bfe198b5f0e949cf6
|
[
"MIT"
] | 6,632
|
2017-10-28T02:53:24.000Z
|
2022-03-31T23:20:14.000Z
|
# -*- coding: utf-8 -*-
from asyncio import gather, get_event_loop
from pprint import pprint
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt.async_support as ccxt # noqa: E402
async def load_markets(exchange):
results = None
try:
await exchange.load_markets()
print('Loaded', len(exchange.symbols), exchange.id, 'symbols')
results = []
for market in exchange.markets.values():
if market['maker'] <= 0:
results.append({'exchange': exchange.id, 'symbol': market['symbol']})
if len(results) < 1:
results = None
except:
results = None
await exchange.close()
return results
async def main(loop):
config = {'enableRateLimit': True, 'asyncio_loop': loop}
exchanges = [getattr(ccxt, exchange_id)(config) for exchange_id in ccxt.exchanges]
# exchanges = [exchange for exchange in exchanges if exchange.certified]
results = await gather(*[load_markets(exchange) for exchange in exchanges])
results = [result for result in results if result is not None]
return results
if __name__ == '__main__':
loop = get_event_loop()
pprint(loop.run_until_complete(main(loop)))
| 30.372093
| 86
| 0.666922
|
4a1000d298eb060677e00fb674b2bf4de6ba634e
| 182
|
py
|
Python
|
mozillians/announcements/__init__.py
|
mozilla/vouched-mozillians
|
88fca9aea0ab1e173cbc33776aa388b956859559
|
[
"BSD-3-Clause"
] | 1
|
2020-10-27T12:17:34.000Z
|
2020-10-27T12:17:34.000Z
|
mozillians/announcements/__init__.py
|
akatsoulas/vouched-mozillians
|
6dcfaf61518ff038403b2b3e06ad9b813135b287
|
[
"BSD-3-Clause"
] | 5
|
2020-09-28T19:04:19.000Z
|
2020-10-27T19:48:31.000Z
|
mozillians/announcements/__init__.py
|
akatsoulas/vouched-mozillians
|
6dcfaf61518ff038403b2b3e06ad9b813135b287
|
[
"BSD-3-Clause"
] | 2
|
2020-09-22T08:55:10.000Z
|
2020-09-24T10:40:58.000Z
|
from django.apps import AppConfig
default_app_config = "mozillians.announcements.AnnouncementsConfig"
class AnnouncementsConfig(AppConfig):
name = "mozillians.announcements"
| 20.222222
| 67
| 0.818681
|
4a100140024a158ec4c132a949652103c0c3e02a
| 3,552
|
py
|
Python
|
clumioapi/models/report_download.py
|
clumio-code/clumio-python-sdk
|
63bfaf3afed5c0ab4bae3dd1be52271249d07c51
|
[
"Apache-2.0"
] | null | null | null |
clumioapi/models/report_download.py
|
clumio-code/clumio-python-sdk
|
63bfaf3afed5c0ab4bae3dd1be52271249d07c51
|
[
"Apache-2.0"
] | 1
|
2021-09-16T05:56:05.000Z
|
2021-09-16T05:56:05.000Z
|
clumioapi/models/report_download.py
|
clumio-code/clumio-python-sdk
|
63bfaf3afed5c0ab4bae3dd1be52271249d07c51
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2021. Clumio, Inc.
#
from typing import Any, Dict, Mapping, Optional, Sequence, Type, TypeVar
T = TypeVar('T', bound='ReportDownload')
class ReportDownload:
"""Implementation of the 'ReportDownload' model.
Attributes:
download_link:
The link to the actual CSV report.
end_timestamp:
The time when the request was completed.
expiration_timestamp:
The time when this report CSV will expire and not be available for download.
file_name:
The name of CSV file.
filters:
The filters applied to the report when download was initiated.
id:
start_timestamp:
The time when the request was made.
task_id:
The Clumio-assigned ID of the task which generated the restored file.
type:
The type of report this CSV Download is associated with.
The possible values include "activity" and "compliance".
"""
# Create a mapping from Model property names to API property names
_names = {
'download_link': 'download_link',
'end_timestamp': 'end_timestamp',
'expiration_timestamp': 'expiration_timestamp',
'file_name': 'file_name',
'filters': 'filters',
'id': 'id',
'start_timestamp': 'start_timestamp',
'task_id': 'task_id',
'type': 'type',
}
def __init__(
self,
download_link: str = None,
end_timestamp: str = None,
expiration_timestamp: str = None,
file_name: str = None,
filters: str = None,
id: str = None,
start_timestamp: str = None,
task_id: str = None,
type: str = None,
) -> None:
"""Constructor for the ReportDownload class."""
# Initialize members of the class
self.download_link: str = download_link
self.end_timestamp: str = end_timestamp
self.expiration_timestamp: str = expiration_timestamp
self.file_name: str = file_name
self.filters: str = filters
self.id: str = id
self.start_timestamp: str = start_timestamp
self.task_id: str = task_id
self.type: str = type
@classmethod
def from_dictionary(cls: Type, dictionary: Mapping[str, Any]) -> Optional[T]:
"""Creates an instance of this model from a dictionary
Args:
dictionary: A dictionary representation of the object as obtained
from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if not dictionary:
return None
# Extract variables from the dictionary
download_link = dictionary.get('download_link')
end_timestamp = dictionary.get('end_timestamp')
expiration_timestamp = dictionary.get('expiration_timestamp')
file_name = dictionary.get('file_name')
filters = dictionary.get('filters')
id = dictionary.get('id')
start_timestamp = dictionary.get('start_timestamp')
task_id = dictionary.get('task_id')
type = dictionary.get('type')
# Return an object of this model
return cls(
download_link,
end_timestamp,
expiration_timestamp,
file_name,
filters,
id,
start_timestamp,
task_id,
type,
)
| 32.290909
| 88
| 0.599944
|
4a100198b08a5a5dbded11b669a8e3f6a6889e4e
| 47,565
|
py
|
Python
|
Sublime Text 3/Backup/20210122193107/backrefs/st3/backrefs/uniprops/unidata/scriptextensions.py
|
anekeallen/Sublime-Text-3
|
8502b9089ca4223f8ba7ff168626a0dbe67713cb
|
[
"MIT"
] | 182
|
2017-03-05T07:43:13.000Z
|
2022-03-15T13:09:07.000Z
|
Sublime Text 3/Backup/20210122193107/backrefs/st3/backrefs/uniprops/unidata/scriptextensions.py
|
anekeallen/Sublime-Text-3
|
8502b9089ca4223f8ba7ff168626a0dbe67713cb
|
[
"MIT"
] | 3
|
2021-05-10T18:59:14.000Z
|
2021-09-02T01:50:15.000Z
|
Sublime Text 3/Backup/20210122193107/backrefs/st3/backrefs/uniprops/unidata/scriptextensions.py
|
anekeallen/Sublime-Text-3
|
8502b9089ca4223f8ba7ff168626a0dbe67713cb
|
[
"MIT"
] | 16
|
2017-03-07T11:01:27.000Z
|
2022-01-08T09:21:01.000Z
|
"""Unicode Properties from Unicode version 6.1.0 (autogen)."""
from __future__ import unicode_literals
unicode_script_extensions = {
"^arabic": "\x00-\u05ff\u0605\u061c-\u061d\u065f\u06dd\u0700-\u074f\u0780-\u089f\u08a1\u08ad-\u08e3\u08ff-\ufb4f\ufbc2-\ufbd2\ufd3e-\ufd4f\ufd90-\ufd91\ufdc8-\ufdef\ufdfe-\ufe6f\ufe75\ufefd-\U00010e5f\U00010e7f-\U0001edff\U0001ee04\U0001ee20\U0001ee23\U0001ee25-\U0001ee26\U0001ee28\U0001ee33\U0001ee38\U0001ee3a\U0001ee3c-\U0001ee41\U0001ee43-\U0001ee46\U0001ee48\U0001ee4a\U0001ee4c\U0001ee50\U0001ee53\U0001ee55-\U0001ee56\U0001ee58\U0001ee5a\U0001ee5c\U0001ee5e\U0001ee60\U0001ee63\U0001ee65-\U0001ee66\U0001ee6b\U0001ee73\U0001ee78\U0001ee7d\U0001ee7f\U0001ee8a\U0001ee9c-\U0001eea0\U0001eea4\U0001eeaa\U0001eebc-\U0001eeef\U0001eef2-\U0010ffff",
"^armenian": "\x00-\u0530\u0557-\u0558\u0560\u0588\u058b-\u058e\u0590-\ufb12\ufb18-\U0010ffff",
"^avestan": "\x00-\U00010aff\U00010b36-\U00010b38\U00010b40-\U0010ffff",
"^balinese": "\x00-\u1aff\u1b4c-\u1b4f\u1b7d-\U0010ffff",
"^bamum": "\x00-\ua69f\ua6f8-\U000167ff\U00016a39-\U0010ffff",
"^batak": "\x00-\u1bbf\u1bf4-\u1bfb\u1c00-\U0010ffff",
"^bengali": "\x00-\u0963\u0966-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09c5-\u09c6\u09c9-\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09fc-\U0010ffff",
"^bopomofo": "\x00-\u02e9\u02ec-\u3000\u3004-\u3007\u3012\u3020-\u3029\u302e-\u302f\u3031-\u3036\u3038-\u303d\u3040-\u30fa\u30fc-\u3104\u312e-\u319f\u31bb-\u31bf\u31e4-\u321f\u3244-\u327f\u32b1-\u32bf\u32cc-\u3357\u3371-\u337a\u3380-\u33df\u33ff-\ufe44\ufe47-\uff60\uff66-\U0010ffff",
"^brahmi": "\x00-\U00010fff\U0001104e-\U00011051\U00011070-\U0010ffff",
"^braille": "\x00-\u27ff\u2900-\U0010ffff",
"^buginese": "\x00-\u19ff\u1a1c-\u1a1d\u1a20-\U0010ffff",
"^buhid": "\x00-\u1734\u1737-\u173f\u1754-\U0010ffff",
"^canadianaboriginal": "\x00-\u13ff\u1680-\u18af\u18f6-\U0010ffff",
"^carian": "\x00-\U0001029f\U000102d1-\U0010ffff",
"^chakma": "\x00-\U000110ff\U00011135\U00011144-\U0010ffff",
"^cham": "\x00-\ua9ff\uaa37-\uaa3f\uaa4e-\uaa4f\uaa5a-\uaa5b\uaa60-\U0010ffff",
"^cherokee": "\x00-\u139f\u13f5-\U0010ffff",
"^common": "\x41-\x5a\x61-\x7a\xaa\xba\xc0-\xd6\xd8-\xf6\xf8-\u02b8\u02e0-\u02e4\u02ea-\u02eb\u0300-\u0373\u0375-\u037d\u037f-\u0384\u0386\u0388-\u0588\u058a-\u060b\u060d-\u061a\u061c-\u061e\u0620-\u063f\u0641-\u065f\u066a-\u06dc\u06de-\u0963\u0966-\u0e3e\u0e40-\u0fd4\u0fd9-\u10fa\u10fc-\u16ea\u16ee-\u1734\u1737-\u1801\u1804\u1806-\u1cd2\u1cd4-\u1ce0\u1ce2-\u1ce8\u1ced\u1cf4\u1cf7-\u1fff\u200c-\u200d\u2065-\u2069\u2071-\u2073\u207f\u208f-\u209f\u20ba-\u20ff\u2126\u212a-\u212b\u2132\u214e\u2160-\u2188\u218a-\u218f\u23f4-\u23ff\u2427-\u243f\u244b-\u245f\u2700\u2800-\u28ff\u2b4d-\u2b4f\u2b5a-\u2dff\u2e3c-\u2fef\u2ffc-\u2fff\u3005\u3007\u3021-\u302f\u3038-\u303b\u3040-\u309a\u309d-\u309f\u30a1-\u30fa\u30fd-\u318f\u31a0-\u31bf\u31e4-\u321f\u3260-\u327e\u32d0-\u3357\u3400-\u4dbf\u4e00-\ua6ff\ua722-\ua787\ua78b-\ua82f\ua83a-\ufd3d\ufd40-\ufdfc\ufdfe-\ufe0f\ufe1a-\ufe2f\ufe53\ufe67\ufe6c-\ufefe\uff00\uff21-\uff3a\uff41-\uff5a\uff66-\uff6f\uff71-\uff9d\uffa0-\uffdf\uffe7\uffef-\ufff8\ufffe-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U00010140-\U0001018f\U0001019c-\U000101cf\U000101fd-\U0001cfff\U0001d0f6-\U0001d0ff\U0001d127-\U0001d128\U0001d167-\U0001d169\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d1de-\U0001d2ff\U0001d357-\U0001d35f\U0001d372-\U0001d3ff\U0001d455\U0001d49d\U0001d4a0-\U0001d4a1\U0001d4a3-\U0001d4a4\U0001d4a7-\U0001d4a8\U0001d4ad\U0001d4ba\U0001d4bc\U0001d4c4\U0001d506\U0001d50b-\U0001d50c\U0001d515\U0001d51d\U0001d53a\U0001d53f\U0001d545\U0001d547-\U0001d549\U0001d551\U0001d6a6-\U0001d6a7\U0001d7cc-\U0001d7cd\U0001d800-\U0001efff\U0001f02c-\U0001f02f\U0001f094-\U0001f09f\U0001f0af-\U0001f0b0\U0001f0bf-\U0001f0c0\U0001f0d0\U0001f0e0-\U0001f0ff\U0001f10b-\U0001f10f\U0001f12f\U0001f16c-\U0001f16f\U0001f19b-\U0001f1e5\U0001f200\U0001f203-\U0001f20f\U0001f23b-\U0001f23f\U0001f249-\U0001f24f\U0001f252-\U0001f2ff\U0001f321-\U0001f32f\U0001f336\U0001f37d-\U0001f37f\U0001f394-\U0001f39f\U0001f3c5\U0001f3cb-\U0001f3df\U0001f3f1-\U0001f3ff\U0001f43f\U0001f441\U0001f4f8\U0001f4fd-\U0001f4ff\U0001f53e-\U0001f53f\U0001f544-\U0001f54f\U0001f568-\U0001f5fa\U0001f641-\U0001f644\U0001f650-\U0001f67f\U0001f6c6-\U0001f6ff\U0001f774-\U000e0000\U000e0002-\U000e001f\U000e0080-\U0010ffff",
"^coptic": "\x00-\u03e1\u03f0-\u2c7f\u2cf4-\u2cf8\u2d00-\U0010ffff",
"^cuneiform": "\x00-\U00011fff\U0001236f-\U000123ff\U00012463-\U0001246f\U00012474-\U0010ffff",
"^cypriot": "\x00-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U00010140-\U000107ff\U00010806-\U00010807\U00010809\U00010836\U00010839-\U0001083b\U0001083d-\U0001083e\U00010840-\U0010ffff",
"^cyrillic": "\x00-\u03ff\u0485-\u0486\u0528-\u1d2a\u1d2c-\u1d77\u1d79-\u2ddf\u2e00-\ua63f\ua698-\ua69e\ua6a0-\U0010ffff",
"^deseret": "\x00-\U000103ff\U00010450-\U0010ffff",
"^devanagari": "\x00-\u08ff\u0951-\u0952\u0978\u0980-\ua82f\ua83a-\ua8df\ua8fc-\U0010ffff",
"^egyptianhieroglyphs": "\x00-\U00012fff\U0001342f-\U0010ffff",
"^ethiopic": "\x00-\u11ff\u1249\u124e-\u124f\u1257\u1259\u125e-\u125f\u1289\u128e-\u128f\u12b1\u12b6-\u12b7\u12bf\u12c1\u12c6-\u12c7\u12d7\u1311\u1316-\u1317\u135b-\u135c\u137d-\u137f\u139a-\u2d7f\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf-\uab00\uab07-\uab08\uab0f-\uab10\uab17-\uab1f\uab27\uab2f-\U0010ffff",
"^georgian": "\x00-\u0588\u058a-\u109f\u10c6\u10c8-\u10cc\u10ce-\u10cf\u10fb\u1100-\u2cff\u2d26\u2d28-\u2d2c\u2d2e-\U0010ffff",
"^glagolitic": "\x00-\u2bff\u2c2f\u2c5f-\U0010ffff",
"^gothic": "\x00-\U0001032f\U0001034b-\U0010ffff",
"^greek": "\x00-\u036f\u0374\u0378-\u0379\u037e-\u0383\u0385\u0387\u038b\u038d\u03a2\u03e2-\u03ef\u0400-\u1d25\u1d2b-\u1d5c\u1d62-\u1d65\u1d6b-\u1dbe\u1dc0-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fc5\u1fd4-\u1fd5\u1fdc\u1ff0-\u1ff1\u1ff5\u1fff-\u2125\u2127-\U0001013f\U0001018b-\U0001d1ff\U0001d246-\U0010ffff",
"^gujarati": "\x00-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0acf\u0ad1-\u0adf\u0ae4-\u0ae5\u0af2-\ua82f\ua83a-\U0010ffff",
"^gurmukhi": "\x00-\u0963\u0966-\u0a00\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a76-\ua82f\ua83a-\U0010ffff",
"^han": "\x00-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u3000\u3004\u3012\u3020\u302e-\u302f\u3031-\u3036\u3040-\u30fa\u30fc-\u318f\u31a0-\u31bf\u31e4-\u321f\u3244-\u327f\u32b1-\u32bf\u32cc-\u3357\u3371-\u337a\u3380-\u33df\u33ff\u4db6-\u4dff\u9fcd-\uf8ff\ufa6e-\ufa6f\ufada-\ufe44\ufe47-\uff60\uff66-\U0001ffff\U0002a6d7-\U0002a6ff\U0002b735-\U0002b73f\U0002b81e-\U0002f7ff\U0002fa1e-\U0010ffff",
"^hangul": "\x00-\u10ff\u1200-\u3000\u3004-\u3007\u3012\u3020-\u302d\u3031-\u3036\u3038-\u303d\u3040-\u30fa\u30fc-\u3130\u318f-\u31bf\u31e4-\u31ff\u321f\u3244-\u325f\u327f\u32b1-\u32bf\u32cc-\u3357\u3371-\u337a\u3380-\u33df\u33ff-\ua95f\ua97d-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\ufe44\ufe47-\uff60\uff66-\uff9f\uffbf-\uffc1\uffc8-\uffc9\uffd0-\uffd1\uffd8-\uffd9\uffdd-\U0010ffff",
"^hanunoo": "\x00-\u171f\u1737-\U0010ffff",
"^hebrew": "\x00-\u0590\u05c8-\u05cf\u05eb-\u05ef\u05f5-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufb50-\U0010ffff",
"^hiragana": "\x00-\u3000\u3004-\u3005\u3007\u3012\u3020-\u302f\u3036\u3038-\u303b\u3040\u3097-\u3098\u30a1-\u30fa\u30fd-\u318f\u31a0-\u31bf\u31e4-\u321f\u3244-\u327f\u32b1-\u32bf\u32cc-\u3357\u3371-\u337a\u3380-\u33df\u33ff-\ufe44\ufe47-\uff60\uff66-\uff6f\uff71-\uff9d\uffa0-\U0001b000\U0001b002-\U0001f1ff\U0001f201-\U0010ffff",
"^imperialaramaic": "\x00-\U0001083f\U00010856\U00010860-\U0010ffff",
"^inherited": "\x00-\u02ff\u0370-\u0484\u0487-\u064a\u0656-\u065e\u0660-\u066f\u0671-\u0950\u0953-\u1ccf\u1cd3\u1ce1\u1ce9-\u1cec\u1cee-\u1cf3\u1cf5-\u1dbf\u1de7-\u1dfb\u1e00-\u200b\u200e-\u20cf\u20f1-\u3029\u302e-\u3098\u309b-\ufdff\ufe10-\ufe1f\ufe27-\U000101fc\U000101fe-\U0001d166\U0001d16a-\U0001d17a\U0001d183-\U0001d184\U0001d18c-\U0001d1a9\U0001d1ae-\U000e00ff\U000e01f0-\U0010ffff",
"^inscriptionalpahlavi": "\x00-\U00010b5f\U00010b73-\U00010b77\U00010b80-\U0010ffff",
"^inscriptionalparthian": "\x00-\U00010b3f\U00010b56-\U00010b57\U00010b60-\U0010ffff",
"^javanese": "\x00-\ua97f\ua9ce\ua9da-\ua9dd\ua9e0-\U0010ffff",
"^kaithi": "\x00-\ua82f\ua83a-\U0001107f\U000110c2-\U0010ffff",
"^kannada": "\x00-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4-\u0ce5\u0cf0\u0cf3-\U0010ffff",
"^katakana": "\x00-\u3000\u3004-\u3005\u3007\u3012\u3020-\u302f\u3036\u3038-\u303b\u3040-\u3098\u309d-\u309f\u3100-\u318f\u31a0-\u31bf\u31e4-\u31ef\u3200-\u321f\u3244-\u327f\u32b1-\u32bf\u32cc-\u32cf\u32ff\u3371-\u337a\u3380-\u33df\u33ff-\ufe44\ufe47-\uff60\uffa0-\U0001afff\U0001b001-\U0010ffff",
"^kayahli": "\x00-\ua8ff\ua930-\U0010ffff",
"^kharoshthi": "\x00-\U000109ff\U00010a04\U00010a07-\U00010a0b\U00010a14\U00010a18\U00010a34-\U00010a37\U00010a3b-\U00010a3e\U00010a48-\U00010a4f\U00010a59-\U0010ffff",
"^khmer": "\x00-\u177f\u17de-\u17df\u17ea-\u17ef\u17fa-\u19df\u1a00-\U0010ffff",
"^lao": "\x00-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0edb\u0ee0-\U0010ffff",
"^latin": "\x00-\x40\x5c\x5b-\x60\x7b-\xa9\xab-\xb9\xbb-\xbf\xd7\xf7\u02b9-\u02df\u02e5-\u1cff\u1d26-\u1d2b\u1d5d-\u1d61\u1d66-\u1d6a\u1d78\u1dbf-\u1dff\u1f00-\u2070\u2072-\u207e\u2080-\u208f\u209d-\u2129\u212c-\u2131\u2133-\u214d\u214f-\u215f\u2189-\u2c5f\u2c80-\ua721\ua788-\ua78a\ua78f\ua794-\ua79f\ua7ab-\ua7f7\ua800-\ufaff\ufb07-\uff20\uff3b-\uff40\uff5b-\U0010ffff",
"^lepcha": "\x00-\u1bff\u1c38-\u1c3a\u1c4a-\u1c4c\u1c50-\U0010ffff",
"^limbu": "\x00-\u18ff\u191d-\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u1950-\U0010ffff",
"^linearb": "\x00-\uffff\U0001000c\U00010027\U0001003b\U0001003e\U0001004e-\U0001004f\U0001005e-\U0001007f\U000100fb-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U00010140-\U0010ffff",
"^lisu": "\x00-\ua4cf\ua500-\U0010ffff",
"^lycian": "\x00-\U0001027f\U0001029d-\U0010ffff",
"^lydian": "\x00-\U0001091f\U0001093a-\U0001093e\U00010940-\U0010ffff",
"^malayalam": "\x00-\u0d01\u0d04\u0d0d\u0d11\u0d3b-\u0d3c\u0d45\u0d49\u0d4f-\u0d56\u0d58-\u0d5f\u0d64-\u0d65\u0d76-\u0d78\u0d80-\U0010ffff",
"^mandaic": "\x00-\u063f\u0641-\u083f\u085c-\u085d\u085f-\U0010ffff",
"^meeteimayek": "\x00-\uaadf\uaaf7-\uabbf\uabee-\uabef\uabfa-\U0010ffff",
"^meroiticcursive": "\x00-\U0001099f\U000109b8-\U000109bd\U000109c0-\U0010ffff",
"^meroitichieroglyphs": "\x00-\U0001097f\U000109a0-\U0010ffff",
"^miao": "\x00-\U00016eff\U00016f45-\U00016f4f\U00016f7f-\U00016f8e\U00016fa0-\U0010ffff",
"^mongolian": "\x00-\u17ff\u180f\u181a-\u181f\u1878-\u187f\u18ab-\U0010ffff",
"^myanmar": "\x00-\u0fff\u10a0-\uaa5f\uaa7c-\U0010ffff",
"^newtailue": "\x00-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u19e0-\U0010ffff",
"^nko": "\x00-\u07bf\u07fb-\U0010ffff",
"^ogham": "\x00-\u167f\u169d-\U0010ffff",
"^olchiki": "\x00-\u1c4f\u1c80-\U0010ffff",
"^olditalic": "\x00-\U000102ff\U0001031f\U00010324-\U0010ffff",
"^oldpersian": "\x00-\U0001039f\U000103c4-\U000103c7\U000103d6-\U0010ffff",
"^oldsoutharabian": "\x00-\U00010a5f\U00010a80-\U0010ffff",
"^oldturkic": "\x00-\U00010bff\U00010c49-\U0010ffff",
"^oriya": "\x00-\u0963\u0966-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34\u0b3a-\u0b3b\u0b45-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64-\u0b65\u0b78-\U0010ffff",
"^osmanya": "\x00-\U0001047f\U0001049e-\U0001049f\U000104aa-\U0010ffff",
"^phagspa": "\x00-\u1801\u1804\u1806-\ua83f\ua878-\U0010ffff",
"^phoenician": "\x00-\U000108ff\U0001091c-\U0001091e\U00010920-\U0010ffff",
"^rejang": "\x00-\ua92f\ua954-\ua95e\ua960-\U0010ffff",
"^runic": "\x00-\u169f\u16eb-\u16ed\u16f1-\U0010ffff",
"^samaritan": "\x00-\u07ff\u082e-\u082f\u083f-\U0010ffff",
"^saurashtra": "\x00-\ua87f\ua8c5-\ua8cd\ua8da-\U0010ffff",
"^sharada": "\x00-\U0001117f\U000111c9-\U000111cf\U000111da-\U0010ffff",
"^shavian": "\x00-\U0001044f\U00010480-\U0010ffff",
"^sinhala": "\x00-\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe-\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0df1\u0df5-\U0010ffff",
"^sorasompeng": "\x00-\U000110cf\U000110e9-\U000110ef\U000110fa-\U0010ffff",
"^sundanese": "\x00-\u1b7f\u1bc0-\u1cbf\u1cc8-\U0010ffff",
"^sylotinagri": "\x00-\ua7ff\ua82c-\U0010ffff",
"^syriac": "\x00-\u060b\u060d-\u061a\u061c-\u061e\u0620-\u063f\u0641-\u064a\u0656-\u066f\u0671-\u06ff\u070e\u074b-\u074c\u0750-\U0010ffff",
"^tagalog": "\x00-\u16ff\u170d\u1715-\u1734\u1737-\U0010ffff",
"^tagbanwa": "\x00-\u1734\u1737-\u175f\u176d\u1771\u1774-\U0010ffff",
"^taile": "\x00-\u194f\u196e-\u196f\u1975-\U0010ffff",
"^taitham": "\x00-\u1a1f\u1a5f\u1a7d-\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae-\U0010ffff",
"^taiviet": "\x00-\uaa7f\uaac3-\uaada\uaae0-\U0010ffff",
"^takri": "\x00-\ua82f\ua83a-\U0001167f\U000116b8-\U000116bf\U000116ca-\U0010ffff",
"^tamil": "\x00-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\U0010ffff",
"^telugu": "\x00-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5a-\u0c5f\u0c64-\u0c65\u0c70-\u0c77\u0c80-\U0010ffff",
"^thaana": "\x00-\u060b\u060d-\u061a\u061c-\u061e\u0620-\u065f\u066a-\u077f\u07b2-\ufdf1\ufdf3-\ufdfc\ufdfe-\U0010ffff",
"^thai": "\x00-\u0e00\u0e3b-\u0e3f\u0e5c-\U0010ffff",
"^tibetan": "\x00-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fd5-\u0fd8\u0fdb-\U0010ffff",
"^tifinagh": "\x00-\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d80-\U0010ffff",
"^ugaritic": "\x00-\U0001037f\U0001039e\U000103a0-\U0010ffff",
"^vai": "\x00-\ua4ff\ua62c-\U0010ffff",
"^yi": "\x00-\u3000\u3003-\u3007\u3012-\u3013\u301c-\u30fa\u30fc-\u9fff\ua48d-\ua48f\ua4c7-\uff60\uff66-\U0010ffff",
"^zzzz": "\x00-\u0377\u037a-\u037e\u0384-\u038a\u038c\u038e-\u03a1\u03a3-\u0527\u0531-\u0556\u0559-\u055f\u0561-\u0587\u0589-\u058a\u058f\u0591-\u05c7\u05d0-\u05ea\u05f0-\u05f4\u0600-\u0604\u0606-\u061b\u061e-\u070d\u070f-\u074a\u074d-\u07b1\u07c0-\u07fa\u0800-\u082d\u0830-\u083e\u0840-\u085b\u085e\u08a0\u08a2-\u08ac\u08e4-\u08fe\u0900-\u0977\u0979-\u097f\u0981-\u0983\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7-\u09c8\u09cb-\u09ce\u09d7\u09dc-\u09dd\u09df-\u09e3\u09e6-\u09fb\u0a01-\u0a03\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0af1\u0b01-\u0b03\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b5c-\u0b5d\u0b5f-\u0b63\u0b66-\u0b77\u0b82-\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bfa\u0c01-\u0c03\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c58-\u0c59\u0c60-\u0c63\u0c66-\u0c6f\u0c78-\u0c7f\u0c82-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1-\u0cf2\u0d02-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d57\u0d60-\u0d63\u0d66-\u0d75\u0d79-\u0d7f\u0d82-\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2-\u0df4\u0e01-\u0e3a\u0e3f-\u0e5b\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf\u0f00-\u0f47\u0f49-\u0f6c\u0f71-\u0f97\u0f99-\u0fbc\u0fbe-\u0fcc\u0fce-\u0fda\u1000-\u10c5\u10c7\u10cd\u10d0-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u137c\u1380-\u1399\u13a0-\u13f4\u1400-\u169c\u16a0-\u16f0\u1700-\u170c\u170e-\u1714\u1720-\u1736\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772-\u1773\u1780-\u17dd\u17e0-\u17e9\u17f0-\u17f9\u1800-\u180e\u1810-\u1819\u1820-\u1877\u1880-\u18aa\u18b0-\u18f5\u1900-\u191c\u1920-\u192b\u1930-\u193b\u1940\u1944-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u19de-\u1a1b\u1a1e-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa0-\u1aad\u1b00-\u1b4b\u1b50-\u1b7c\u1b80-\u1bf3\u1bfc-\u1c37\u1c3b-\u1c49\u1c4d-\u1c7f\u1cc0-\u1cc7\u1cd0-\u1cf6\u1d00-\u1de6\u1dfc-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fc4\u1fc6-\u1fd3\u1fd6-\u1fdb\u1fdd-\u1fef\u1ff2-\u1ff4\u1ff6-\u1ffe\u2000-\u2064\u206a-\u2071\u2074-\u208e\u2090-\u209c\u20a0-\u20b9\u20d0-\u20f0\u2100-\u2189\u2190-\u23f3\u2400-\u2426\u2440-\u244a\u2460-\u26ff\u2701-\u2b4c\u2b50-\u2b59\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2cf3\u2cf9-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f-\u2d70\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2e3b\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3000-\u303f\u3041-\u3096\u3099-\u30ff\u3105-\u312d\u3131-\u318e\u3190-\u31ba\u31c0-\u31e3\u31f0-\u321e\u3220-\u32fe\u3300-\u4db5\u4dc0-\u9fcc\ua000-\ua48c\ua490-\ua4c6\ua4d0-\ua62b\ua640-\ua697\ua69f-\ua6f7\ua700-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua82b\ua830-\ua839\ua840-\ua877\ua880-\ua8c4\ua8ce-\ua8d9\ua8e0-\ua8fb\ua900-\ua953\ua95f-\ua97c\ua980-\ua9cd\ua9cf-\ua9d9\ua9de-\ua9df\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa5c-\uaa7b\uaa80-\uaac2\uaadb-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabed\uabf0-\uabf9\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbc1\ufbd3-\ufd3f\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfd\ufe00-\ufe19\ufe20-\ufe26\ufe30-\ufe52\ufe54-\ufe66\ufe68-\ufe6b\ufe70-\ufe74\ufe76-\ufefc\ufeff\uff01-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\uffe0-\uffe6\uffe8-\uffee\ufff9-\ufffd\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010100-\U00010102\U00010107-\U00010133\U00010137-\U0001018a\U00010190-\U0001019b\U000101d0-\U000101fd\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031e\U00010320-\U00010323\U00010330-\U0001034a\U00010380-\U0001039d\U0001039f-\U000103c3\U000103c8-\U000103d5\U00010400-\U0001049d\U000104a0-\U000104a9\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010857-\U0001085f\U00010900-\U0001091b\U0001091f-\U00010939\U0001093f\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a33\U00010a38-\U00010a3a\U00010a3f-\U00010a47\U00010a50-\U00010a58\U00010a60-\U00010a7f\U00010b00-\U00010b35\U00010b39-\U00010b55\U00010b58-\U00010b72\U00010b78-\U00010b7f\U00010c00-\U00010c48\U00010e60-\U00010e7e\U00011000-\U0001104d\U00011052-\U0001106f\U00011080-\U000110c1\U000110d0-\U000110e8\U000110f0-\U000110f9\U00011100-\U00011134\U00011136-\U00011143\U00011180-\U000111c8\U000111d0-\U000111d9\U00011680-\U000116b7\U000116c0-\U000116c9\U00012000-\U0001236e\U00012400-\U00012462\U00012470-\U00012473\U00013000-\U0001342e\U00016800-\U00016a38\U00016f00-\U00016f44\U00016f50-\U00016f7e\U00016f8f-\U00016f9f\U0001b000-\U0001b001\U0001d000-\U0001d0f5\U0001d100-\U0001d126\U0001d129-\U0001d1dd\U0001d200-\U0001d245\U0001d300-\U0001d356\U0001d360-\U0001d371\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d7cb\U0001d7ce-\U0001d7ff\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U0001eef0-\U0001eef1\U0001f000-\U0001f02b\U0001f030-\U0001f093\U0001f0a0-\U0001f0ae\U0001f0b1-\U0001f0be\U0001f0c1-\U0001f0cf\U0001f0d1-\U0001f0df\U0001f100-\U0001f10a\U0001f110-\U0001f12e\U0001f130-\U0001f16b\U0001f170-\U0001f19a\U0001f1e6-\U0001f202\U0001f210-\U0001f23a\U0001f240-\U0001f248\U0001f250-\U0001f251\U0001f300-\U0001f320\U0001f330-\U0001f335\U0001f337-\U0001f37c\U0001f380-\U0001f393\U0001f3a0-\U0001f3c4\U0001f3c6-\U0001f3ca\U0001f3e0-\U0001f3f0\U0001f400-\U0001f43e\U0001f440\U0001f442-\U0001f4f7\U0001f4f9-\U0001f4fc\U0001f500-\U0001f53d\U0001f540-\U0001f543\U0001f550-\U0001f567\U0001f5fb-\U0001f640\U0001f645-\U0001f64f\U0001f680-\U0001f6c5\U0001f700-\U0001f773\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002f800-\U0002fa1d\U000e0001\U000e0020-\U000e007f\U000e0100-\U000e01ef",
"arabic": "\u0600-\u0604\u0606-\u061b\u061e-\u065e\u0660-\u06dc\u06de-\u06ff\u0750-\u077f\u08a0\u08a2-\u08ac\u08e4-\u08fe\ufb50-\ufbc1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfd\ufe70-\ufe74\ufe76-\ufefc\U00010e60-\U00010e7e\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U0001eef0-\U0001eef1",
"armenian": "\u0531-\u0556\u0559-\u055f\u0561-\u0587\u0589-\u058a\u058f\ufb13-\ufb17",
"avestan": "\U00010b00-\U00010b35\U00010b39-\U00010b3f",
"balinese": "\u1b00-\u1b4b\u1b50-\u1b7c",
"bamum": "\ua6a0-\ua6f7\U00016800-\U00016a38",
"batak": "\u1bc0-\u1bf3\u1bfc-\u1bff",
"bengali": "\u0964-\u0965\u0981-\u0983\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7-\u09c8\u09cb-\u09ce\u09d7\u09dc-\u09dd\u09df-\u09e3\u09e6-\u09fb",
"bopomofo": "\u02ea-\u02eb\u3001-\u3003\u3008-\u3011\u3013-\u301f\u302a-\u302d\u3030\u3037\u303e-\u303f\u30fb\u3105-\u312d\u31a0-\u31ba\u31c0-\u31e3\u3220-\u3243\u3280-\u32b0\u32c0-\u32cb\u3358-\u3370\u337b-\u337f\u33e0-\u33fe\ufe45-\ufe46\uff61-\uff65",
"brahmi": "\U00011000-\U0001104d\U00011052-\U0001106f",
"braille": "\u2800-\u28ff",
"buginese": "\u1a00-\u1a1b\u1a1e-\u1a1f",
"buhid": "\u1735-\u1736\u1740-\u1753",
"canadianaboriginal": "\u1400-\u167f\u18b0-\u18f5",
"carian": "\U000102a0-\U000102d0",
"chakma": "\U00011100-\U00011134\U00011136-\U00011143",
"cham": "\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa5c-\uaa5f",
"cherokee": "\u13a0-\u13f4",
"common": "\x00-\x40\x5c\x5b-\x60\x7b-\xa9\xab-\xb9\xbb-\xbf\xd7\xf7\u02b9-\u02df\u02e5-\u02e9\u02ec-\u02ff\u0374\u037e\u0385\u0387\u0589\u060c\u061b\u061f\u0640\u0660-\u0669\u06dd\u0964-\u0965\u0e3f\u0fd5-\u0fd8\u10fb\u16eb-\u16ed\u1735-\u1736\u1802-\u1803\u1805\u1cd3\u1ce1\u1ce9-\u1cec\u1cee-\u1cf3\u1cf5-\u1cf6\u2000-\u200b\u200e-\u2064\u206a-\u2070\u2074-\u207e\u2080-\u208e\u20a0-\u20b9\u2100-\u2125\u2127-\u2129\u212c-\u2131\u2133-\u214d\u214f-\u215f\u2189\u2190-\u23f3\u2400-\u2426\u2440-\u244a\u2460-\u26ff\u2701-\u27ff\u2900-\u2b4c\u2b50-\u2b59\u2e00-\u2e3b\u2ff0-\u2ffb\u3000-\u3004\u3006\u3008-\u3020\u3030-\u3037\u303c-\u303f\u309b-\u309c\u30a0\u30fb-\u30fc\u3190-\u319f\u31c0-\u31e3\u3220-\u325f\u327f-\u32cf\u3358-\u33ff\u4dc0-\u4dff\ua700-\ua721\ua788-\ua78a\ua830-\ua839\ufd3e-\ufd3f\ufdfd\ufe10-\ufe19\ufe30-\ufe52\ufe54-\ufe66\ufe68-\ufe6b\ufeff\uff01-\uff20\uff3b-\uff40\uff5b-\uff65\uff70\uff9e-\uff9f\uffe0-\uffe6\uffe8-\uffee\ufff9-\ufffd\U00010100-\U00010102\U00010107-\U00010133\U00010137-\U0001013f\U00010190-\U0001019b\U000101d0-\U000101fc\U0001d000-\U0001d0f5\U0001d100-\U0001d126\U0001d129-\U0001d166\U0001d16a-\U0001d17a\U0001d183-\U0001d184\U0001d18c-\U0001d1a9\U0001d1ae-\U0001d1dd\U0001d300-\U0001d356\U0001d360-\U0001d371\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d7cb\U0001d7ce-\U0001d7ff\U0001f000-\U0001f02b\U0001f030-\U0001f093\U0001f0a0-\U0001f0ae\U0001f0b1-\U0001f0be\U0001f0c1-\U0001f0cf\U0001f0d1-\U0001f0df\U0001f100-\U0001f10a\U0001f110-\U0001f12e\U0001f130-\U0001f16b\U0001f170-\U0001f19a\U0001f1e6-\U0001f1ff\U0001f201-\U0001f202\U0001f210-\U0001f23a\U0001f240-\U0001f248\U0001f250-\U0001f251\U0001f300-\U0001f320\U0001f330-\U0001f335\U0001f337-\U0001f37c\U0001f380-\U0001f393\U0001f3a0-\U0001f3c4\U0001f3c6-\U0001f3ca\U0001f3e0-\U0001f3f0\U0001f400-\U0001f43e\U0001f440\U0001f442-\U0001f4f7\U0001f4f9-\U0001f4fc\U0001f500-\U0001f53d\U0001f540-\U0001f543\U0001f550-\U0001f567\U0001f5fb-\U0001f640\U0001f645-\U0001f64f\U0001f680-\U0001f6c5\U0001f700-\U0001f773\U000e0001\U000e0020-\U000e007f",
"coptic": "\u03e2-\u03ef\u2c80-\u2cf3\u2cf9-\u2cff",
"cuneiform": "\U00012000-\U0001236e\U00012400-\U00012462\U00012470-\U00012473",
"cypriot": "\U00010100-\U00010102\U00010107-\U00010133\U00010137-\U0001013f\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f",
"cyrillic": "\u0400-\u0484\u0487-\u0527\u1d2b\u1d78\u2de0-\u2dff\ua640-\ua697\ua69f",
"deseret": "\U00010400-\U0001044f",
"devanagari": "\u0900-\u0950\u0953-\u0977\u0979-\u097f\ua830-\ua839\ua8e0-\ua8fb",
"egyptianhieroglyphs": "\U00013000-\U0001342e",
"ethiopic": "\u1200-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u137c\u1380-\u1399\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e",
"georgian": "\u0589\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u10ff\u2d00-\u2d25\u2d27\u2d2d",
"glagolitic": "\u2c00-\u2c2e\u2c30-\u2c5e",
"gothic": "\U00010330-\U0001034a",
"greek": "\u0370-\u0373\u0375-\u0377\u037a-\u037d\u0384\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03e1\u03f0-\u03ff\u1d26-\u1d2a\u1d5d-\u1d61\u1d66-\u1d6a\u1dbf\u1f00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fc4\u1fc6-\u1fd3\u1fd6-\u1fdb\u1fdd-\u1fef\u1ff2-\u1ff4\u1ff6-\u1ffe\u2126\U00010140-\U0001018a\U0001d200-\U0001d245",
"gujarati": "\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0af1\ua830-\ua839",
"gurmukhi": "\u0964-\u0965\u0a01-\u0a03\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\ua830-\ua839",
"han": "\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u3001-\u3003\u3005-\u3011\u3013-\u301f\u3021-\u302d\u3030\u3037-\u303f\u30fb\u3190-\u319f\u31c0-\u31e3\u3220-\u3243\u3280-\u32b0\u32c0-\u32cb\u3358-\u3370\u337b-\u337f\u33e0-\u33fe\u3400-\u4db5\u4e00-\u9fcc\uf900-\ufa6d\ufa70-\ufad9\ufe45-\ufe46\uff61-\uff65\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002f800-\U0002fa1d",
"hangul": "\u1100-\u11ff\u3001-\u3003\u3008-\u3011\u3013-\u301f\u302e-\u3030\u3037\u303e-\u303f\u30fb\u3131-\u318e\u31c0-\u31e3\u3200-\u321e\u3220-\u3243\u3260-\u327e\u3280-\u32b0\u32c0-\u32cb\u3358-\u3370\u337b-\u337f\u33e0-\u33fe\ua960-\ua97c\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\ufe45-\ufe46\uff61-\uff65\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc",
"hanunoo": "\u1720-\u1736",
"hebrew": "\u0591-\u05c7\u05d0-\u05ea\u05f0-\u05f4\ufb1d-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufb4f",
"hiragana": "\u3001-\u3003\u3006\u3008-\u3011\u3013-\u301f\u3030-\u3035\u3037\u303c-\u303f\u3041-\u3096\u3099-\u30a0\u30fb-\u30fc\u3190-\u319f\u31c0-\u31e3\u3220-\u3243\u3280-\u32b0\u32c0-\u32cb\u3358-\u3370\u337b-\u337f\u33e0-\u33fe\ufe45-\ufe46\uff61-\uff65\uff70\uff9e-\uff9f\U0001b001\U0001f200",
"imperialaramaic": "\U00010840-\U00010855\U00010857-\U0001085f",
"inherited": "\u0300-\u036f\u0485-\u0486\u064b-\u0655\u065f\u0670\u0951-\u0952\u1cd0-\u1cd2\u1cd4-\u1ce0\u1ce2-\u1ce8\u1ced\u1cf4\u1dc0-\u1de6\u1dfc-\u1dff\u200c-\u200d\u20d0-\u20f0\u302a-\u302d\u3099-\u309a\ufe00-\ufe0f\ufe20-\ufe26\U000101fd\U0001d167-\U0001d169\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U000e0100-\U000e01ef",
"inscriptionalpahlavi": "\U00010b60-\U00010b72\U00010b78-\U00010b7f",
"inscriptionalparthian": "\U00010b40-\U00010b55\U00010b58-\U00010b5f",
"javanese": "\ua980-\ua9cd\ua9cf-\ua9d9\ua9de-\ua9df",
"kaithi": "\ua830-\ua839\U00011080-\U000110c1",
"kannada": "\u0c82-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1-\u0cf2",
"katakana": "\u3001-\u3003\u3006\u3008-\u3011\u3013-\u301f\u3030-\u3035\u3037\u303c-\u303f\u3099-\u309c\u30a0-\u30ff\u3190-\u319f\u31c0-\u31e3\u31f0-\u31ff\u3220-\u3243\u3280-\u32b0\u32c0-\u32cb\u32d0-\u32fe\u3300-\u3370\u337b-\u337f\u33e0-\u33fe\ufe45-\ufe46\uff61-\uff9f\U0001b000",
"kayahli": "\ua900-\ua92f",
"kharoshthi": "\U00010a00-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a33\U00010a38-\U00010a3a\U00010a3f-\U00010a47\U00010a50-\U00010a58",
"khmer": "\u1780-\u17dd\u17e0-\u17e9\u17f0-\u17f9\u19e0-\u19ff",
"lao": "\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf",
"latin": "\x41-\x5a\x61-\x7a\xaa\xba\xc0-\xd6\xd8-\xf6\xf8-\u02b8\u02e0-\u02e4\u1d00-\u1d25\u1d2c-\u1d5c\u1d62-\u1d65\u1d6b-\u1d77\u1d79-\u1dbe\u1e00-\u1eff\u2071\u207f\u2090-\u209c\u212a-\u212b\u2132\u214e\u2160-\u2188\u2c60-\u2c7f\ua722-\ua787\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua7ff\ufb00-\ufb06\uff21-\uff3a\uff41-\uff5a",
"lepcha": "\u1c00-\u1c37\u1c3b-\u1c49\u1c4d-\u1c4f",
"limbu": "\u1900-\u191c\u1920-\u192b\u1930-\u193b\u1940\u1944-\u194f",
"linearb": "\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010100-\U00010102\U00010107-\U00010133\U00010137-\U0001013f",
"lisu": "\ua4d0-\ua4ff",
"lycian": "\U00010280-\U0001029c",
"lydian": "\U00010920-\U00010939\U0001093f",
"malayalam": "\u0d02-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d57\u0d60-\u0d63\u0d66-\u0d75\u0d79-\u0d7f",
"mandaic": "\u0640\u0840-\u085b\u085e",
"meeteimayek": "\uaae0-\uaaf6\uabc0-\uabed\uabf0-\uabf9",
"meroiticcursive": "\U000109a0-\U000109b7\U000109be-\U000109bf",
"meroitichieroglyphs": "\U00010980-\U0001099f",
"miao": "\U00016f00-\U00016f44\U00016f50-\U00016f7e\U00016f8f-\U00016f9f",
"mongolian": "\u1800-\u180e\u1810-\u1819\u1820-\u1877\u1880-\u18aa",
"myanmar": "\u1000-\u109f\uaa60-\uaa7b",
"newtailue": "\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u19de-\u19df",
"nko": "\u07c0-\u07fa",
"ogham": "\u1680-\u169c",
"olchiki": "\u1c50-\u1c7f",
"olditalic": "\U00010300-\U0001031e\U00010320-\U00010323",
"oldpersian": "\U000103a0-\U000103c3\U000103c8-\U000103d5",
"oldsoutharabian": "\U00010a60-\U00010a7f",
"oldturkic": "\U00010c00-\U00010c48",
"oriya": "\u0964-\u0965\u0b01-\u0b03\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b5c-\u0b5d\u0b5f-\u0b63\u0b66-\u0b77",
"osmanya": "\U00010480-\U0001049d\U000104a0-\U000104a9",
"phagspa": "\u1802-\u1803\u1805\ua840-\ua877",
"phoenician": "\U00010900-\U0001091b\U0001091f",
"rejang": "\ua930-\ua953\ua95f",
"runic": "\u16a0-\u16ea\u16ee-\u16f0",
"samaritan": "\u0800-\u082d\u0830-\u083e",
"saurashtra": "\ua880-\ua8c4\ua8ce-\ua8d9",
"sharada": "\U00011180-\U000111c8\U000111d0-\U000111d9",
"shavian": "\U00010450-\U0001047f",
"sinhala": "\u0d82-\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2-\u0df4",
"sorasompeng": "\U000110d0-\U000110e8\U000110f0-\U000110f9",
"sundanese": "\u1b80-\u1bbf\u1cc0-\u1cc7",
"sylotinagri": "\ua800-\ua82b",
"syriac": "\u060c\u061b\u061f\u0640\u064b-\u0655\u0670\u0700-\u070d\u070f-\u074a\u074d-\u074f",
"tagalog": "\u1700-\u170c\u170e-\u1714\u1735-\u1736",
"tagbanwa": "\u1735-\u1736\u1760-\u176c\u176e-\u1770\u1772-\u1773",
"taile": "\u1950-\u196d\u1970-\u1974",
"taitham": "\u1a20-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa0-\u1aad",
"taiviet": "\uaa80-\uaac2\uaadb-\uaadf",
"takri": "\ua830-\ua839\U00011680-\U000116b7\U000116c0-\U000116c9",
"tamil": "\u0b82-\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bfa",
"telugu": "\u0c01-\u0c03\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c58-\u0c59\u0c60-\u0c63\u0c66-\u0c6f\u0c78-\u0c7f",
"thaana": "\u060c\u061b\u061f\u0660-\u0669\u0780-\u07b1\ufdf2\ufdfd",
"thai": "\u0e01-\u0e3a\u0e40-\u0e5b",
"tibetan": "\u0f00-\u0f47\u0f49-\u0f6c\u0f71-\u0f97\u0f99-\u0fbc\u0fbe-\u0fcc\u0fce-\u0fd4\u0fd9-\u0fda",
"tifinagh": "\u2d30-\u2d67\u2d6f-\u2d70\u2d7f",
"ugaritic": "\U00010380-\U0001039d\U0001039f",
"vai": "\ua500-\ua62b",
"yi": "\u3001-\u3002\u3008-\u3011\u3014-\u301b\u30fb\ua000-\ua48c\ua490-\ua4c6\uff61-\uff65",
"zzzz": "\u0378-\u0379\u037f-\u0383\u038b\u038d\u03a2\u0528-\u0530\u0557-\u0558\u0560\u0588\u058b-\u058e\u0590\u05c8-\u05cf\u05eb-\u05ef\u05f5-\u05ff\u0605\u061c-\u061d\u070e\u074b-\u074c\u07b2-\u07bf\u07fb-\u07ff\u082e-\u082f\u083f\u085c-\u085d\u085f-\u089f\u08a1\u08ad-\u08e3\u08ff\u0978\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09c5-\u09c6\u09c9-\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09fc-\u0a00\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a76-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0acf\u0ad1-\u0adf\u0ae4-\u0ae5\u0af2-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34\u0b3a-\u0b3b\u0b45-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64-\u0b65\u0b78-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5a-\u0c5f\u0c64-\u0c65\u0c70-\u0c77\u0c80-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4-\u0ce5\u0cf0\u0cf3-\u0d01\u0d04\u0d0d\u0d11\u0d3b-\u0d3c\u0d45\u0d49\u0d4f-\u0d56\u0d58-\u0d5f\u0d64-\u0d65\u0d76-\u0d78\u0d80-\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe-\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0df1\u0df5-\u0e00\u0e3b-\u0e3e\u0e5c-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0edb\u0ee0-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fdb-\u0fff\u10c6\u10c8-\u10cc\u10ce-\u10cf\u1249\u124e-\u124f\u1257\u1259\u125e-\u125f\u1289\u128e-\u128f\u12b1\u12b6-\u12b7\u12bf\u12c1\u12c6-\u12c7\u12d7\u1311\u1316-\u1317\u135b-\u135c\u137d-\u137f\u139a-\u139f\u13f5-\u13ff\u169d-\u169f\u16f1-\u16ff\u170d\u1715-\u171f\u1737-\u173f\u1754-\u175f\u176d\u1771\u1774-\u177f\u17de-\u17df\u17ea-\u17ef\u17fa-\u17ff\u180f\u181a-\u181f\u1878-\u187f\u18ab-\u18af\u18f6-\u18ff\u191d-\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u196e-\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u1a1c-\u1a1d\u1a5f\u1a7d-\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae-\u1aff\u1b4c-\u1b4f\u1b7d-\u1b7f\u1bf4-\u1bfb\u1c38-\u1c3a\u1c4a-\u1c4c\u1c80-\u1cbf\u1cc8-\u1ccf\u1cf7-\u1cff\u1de7-\u1dfb\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fc5\u1fd4-\u1fd5\u1fdc\u1ff0-\u1ff1\u1ff5\u1fff\u2065-\u2069\u2072-\u2073\u208f\u209d-\u209f\u20ba-\u20cf\u20f1-\u20ff\u218a-\u218f\u23f4-\u23ff\u2427-\u243f\u244b-\u245f\u2700\u2b4d-\u2b4f\u2b5a-\u2bff\u2c2f\u2c5f\u2cf4-\u2cf8\u2d26\u2d28-\u2d2c\u2d2e-\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2e3c-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3040\u3097-\u3098\u3100-\u3104\u312e-\u3130\u318f\u31bb-\u31bf\u31e4-\u31ef\u321f\u32ff\u4db6-\u4dbf\u9fcd-\u9fff\ua48d-\ua48f\ua4c7-\ua4cf\ua62c-\ua63f\ua698-\ua69e\ua6f8-\ua6ff\ua78f\ua794-\ua79f\ua7ab-\ua7f7\ua82c-\ua82f\ua83a-\ua83f\ua878-\ua87f\ua8c5-\ua8cd\ua8da-\ua8df\ua8fc-\ua8ff\ua954-\ua95e\ua97d-\ua97f\ua9ce\ua9da-\ua9dd\ua9e0-\ua9ff\uaa37-\uaa3f\uaa4e-\uaa4f\uaa5a-\uaa5b\uaa7c-\uaa7f\uaac3-\uaada\uaaf7-\uab00\uab07-\uab08\uab0f-\uab10\uab17-\uab1f\uab27\uab2f-\uabbf\uabee-\uabef\uabfa-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\uf8ff\ufa6e-\ufa6f\ufada-\ufaff\ufb07-\ufb12\ufb18-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbc2-\ufbd2\ufd40-\ufd4f\ufd90-\ufd91\ufdc8-\ufdef\ufdfe-\ufdff\ufe1a-\ufe1f\ufe27-\ufe2f\ufe53\ufe67\ufe6c-\ufe6f\ufe75\ufefd-\ufefe\uff00\uffbf-\uffc1\uffc8-\uffc9\uffd0-\uffd1\uffd8-\uffd9\uffdd-\uffdf\uffe7\uffef-\ufff8\ufffe-\uffff\U0001000c\U00010027\U0001003b\U0001003e\U0001004e-\U0001004f\U0001005e-\U0001007f\U000100fb-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U0001018b-\U0001018f\U0001019c-\U000101cf\U000101fe-\U0001027f\U0001029d-\U0001029f\U000102d1-\U000102ff\U0001031f\U00010324-\U0001032f\U0001034b-\U0001037f\U0001039e\U000103c4-\U000103c7\U000103d6-\U000103ff\U0001049e-\U0001049f\U000104aa-\U000107ff\U00010806-\U00010807\U00010809\U00010836\U00010839-\U0001083b\U0001083d-\U0001083e\U00010856\U00010860-\U000108ff\U0001091c-\U0001091e\U0001093a-\U0001093e\U00010940-\U0001097f\U000109b8-\U000109bd\U000109c0-\U000109ff\U00010a04\U00010a07-\U00010a0b\U00010a14\U00010a18\U00010a34-\U00010a37\U00010a3b-\U00010a3e\U00010a48-\U00010a4f\U00010a59-\U00010a5f\U00010a80-\U00010aff\U00010b36-\U00010b38\U00010b56-\U00010b57\U00010b73-\U00010b77\U00010b80-\U00010bff\U00010c49-\U00010e5f\U00010e7f-\U00010fff\U0001104e-\U00011051\U00011070-\U0001107f\U000110c2-\U000110cf\U000110e9-\U000110ef\U000110fa-\U000110ff\U00011135\U00011144-\U0001117f\U000111c9-\U000111cf\U000111da-\U0001167f\U000116b8-\U000116bf\U000116ca-\U00011fff\U0001236f-\U000123ff\U00012463-\U0001246f\U00012474-\U00012fff\U0001342f-\U000167ff\U00016a39-\U00016eff\U00016f45-\U00016f4f\U00016f7f-\U00016f8e\U00016fa0-\U0001afff\U0001b002-\U0001cfff\U0001d0f6-\U0001d0ff\U0001d127-\U0001d128\U0001d1de-\U0001d1ff\U0001d246-\U0001d2ff\U0001d357-\U0001d35f\U0001d372-\U0001d3ff\U0001d455\U0001d49d\U0001d4a0-\U0001d4a1\U0001d4a3-\U0001d4a4\U0001d4a7-\U0001d4a8\U0001d4ad\U0001d4ba\U0001d4bc\U0001d4c4\U0001d506\U0001d50b-\U0001d50c\U0001d515\U0001d51d\U0001d53a\U0001d53f\U0001d545\U0001d547-\U0001d549\U0001d551\U0001d6a6-\U0001d6a7\U0001d7cc-\U0001d7cd\U0001d800-\U0001edff\U0001ee04\U0001ee20\U0001ee23\U0001ee25-\U0001ee26\U0001ee28\U0001ee33\U0001ee38\U0001ee3a\U0001ee3c-\U0001ee41\U0001ee43-\U0001ee46\U0001ee48\U0001ee4a\U0001ee4c\U0001ee50\U0001ee53\U0001ee55-\U0001ee56\U0001ee58\U0001ee5a\U0001ee5c\U0001ee5e\U0001ee60\U0001ee63\U0001ee65-\U0001ee66\U0001ee6b\U0001ee73\U0001ee78\U0001ee7d\U0001ee7f\U0001ee8a\U0001ee9c-\U0001eea0\U0001eea4\U0001eeaa\U0001eebc-\U0001eeef\U0001eef2-\U0001efff\U0001f02c-\U0001f02f\U0001f094-\U0001f09f\U0001f0af-\U0001f0b0\U0001f0bf-\U0001f0c0\U0001f0d0\U0001f0e0-\U0001f0ff\U0001f10b-\U0001f10f\U0001f12f\U0001f16c-\U0001f16f\U0001f19b-\U0001f1e5\U0001f203-\U0001f20f\U0001f23b-\U0001f23f\U0001f249-\U0001f24f\U0001f252-\U0001f2ff\U0001f321-\U0001f32f\U0001f336\U0001f37d-\U0001f37f\U0001f394-\U0001f39f\U0001f3c5\U0001f3cb-\U0001f3df\U0001f3f1-\U0001f3ff\U0001f43f\U0001f441\U0001f4f8\U0001f4fd-\U0001f4ff\U0001f53e-\U0001f53f\U0001f544-\U0001f54f\U0001f568-\U0001f5fa\U0001f641-\U0001f644\U0001f650-\U0001f67f\U0001f6c6-\U0001f6ff\U0001f774-\U0001ffff\U0002a6d7-\U0002a6ff\U0002b735-\U0002b73f\U0002b81e-\U0002f7ff\U0002fa1e-\U000e0000\U000e0002-\U000e001f\U000e0080-\U000e00ff\U000e01f0-\U0010ffff"
}
ascii_script_extensions = {
"^arabic": "\x00-\xff",
"^armenian": "\x00-\xff",
"^avestan": "\x00-\xff",
"^balinese": "\x00-\xff",
"^bamum": "\x00-\xff",
"^batak": "\x00-\xff",
"^bengali": "\x00-\xff",
"^bopomofo": "\x00-\xff",
"^brahmi": "\x00-\xff",
"^braille": "\x00-\xff",
"^buginese": "\x00-\xff",
"^buhid": "\x00-\xff",
"^canadianaboriginal": "\x00-\xff",
"^carian": "\x00-\xff",
"^chakma": "\x00-\xff",
"^cham": "\x00-\xff",
"^cherokee": "\x00-\xff",
"^common": "\x41-\x5a\x61-\x7a\xaa\xba\xc0-\xd6\xd8-\xf6\xf8-\xff",
"^coptic": "\x00-\xff",
"^cuneiform": "\x00-\xff",
"^cypriot": "\x00-\xff",
"^cyrillic": "\x00-\xff",
"^deseret": "\x00-\xff",
"^devanagari": "\x00-\xff",
"^egyptianhieroglyphs": "\x00-\xff",
"^ethiopic": "\x00-\xff",
"^georgian": "\x00-\xff",
"^glagolitic": "\x00-\xff",
"^gothic": "\x00-\xff",
"^greek": "\x00-\xff",
"^gujarati": "\x00-\xff",
"^gurmukhi": "\x00-\xff",
"^han": "\x00-\xff",
"^hangul": "\x00-\xff",
"^hanunoo": "\x00-\xff",
"^hebrew": "\x00-\xff",
"^hiragana": "\x00-\xff",
"^imperialaramaic": "\x00-\xff",
"^inherited": "\x00-\xff",
"^inscriptionalpahlavi": "\x00-\xff",
"^inscriptionalparthian": "\x00-\xff",
"^javanese": "\x00-\xff",
"^kaithi": "\x00-\xff",
"^kannada": "\x00-\xff",
"^katakana": "\x00-\xff",
"^kayahli": "\x00-\xff",
"^kharoshthi": "\x00-\xff",
"^khmer": "\x00-\xff",
"^lao": "\x00-\xff",
"^latin": "\x00-\x40\x5c\x5b-\x60\x7b-\xa9\xab-\xb9\xbb-\xbf\xd7\xf7",
"^lepcha": "\x00-\xff",
"^limbu": "\x00-\xff",
"^linearb": "\x00-\xff",
"^lisu": "\x00-\xff",
"^lycian": "\x00-\xff",
"^lydian": "\x00-\xff",
"^malayalam": "\x00-\xff",
"^mandaic": "\x00-\xff",
"^meeteimayek": "\x00-\xff",
"^meroiticcursive": "\x00-\xff",
"^meroitichieroglyphs": "\x00-\xff",
"^miao": "\x00-\xff",
"^mongolian": "\x00-\xff",
"^myanmar": "\x00-\xff",
"^newtailue": "\x00-\xff",
"^nko": "\x00-\xff",
"^ogham": "\x00-\xff",
"^olchiki": "\x00-\xff",
"^olditalic": "\x00-\xff",
"^oldpersian": "\x00-\xff",
"^oldsoutharabian": "\x00-\xff",
"^oldturkic": "\x00-\xff",
"^oriya": "\x00-\xff",
"^osmanya": "\x00-\xff",
"^phagspa": "\x00-\xff",
"^phoenician": "\x00-\xff",
"^rejang": "\x00-\xff",
"^runic": "\x00-\xff",
"^samaritan": "\x00-\xff",
"^saurashtra": "\x00-\xff",
"^sharada": "\x00-\xff",
"^shavian": "\x00-\xff",
"^sinhala": "\x00-\xff",
"^sorasompeng": "\x00-\xff",
"^sundanese": "\x00-\xff",
"^sylotinagri": "\x00-\xff",
"^syriac": "\x00-\xff",
"^tagalog": "\x00-\xff",
"^tagbanwa": "\x00-\xff",
"^taile": "\x00-\xff",
"^taitham": "\x00-\xff",
"^taiviet": "\x00-\xff",
"^takri": "\x00-\xff",
"^tamil": "\x00-\xff",
"^telugu": "\x00-\xff",
"^thaana": "\x00-\xff",
"^thai": "\x00-\xff",
"^tibetan": "\x00-\xff",
"^tifinagh": "\x00-\xff",
"^ugaritic": "\x00-\xff",
"^vai": "\x00-\xff",
"^yi": "\x00-\xff",
"^zzzz": "\x00-\xff",
"arabic": "",
"armenian": "",
"avestan": "",
"balinese": "",
"bamum": "",
"batak": "",
"bengali": "",
"bopomofo": "",
"brahmi": "",
"braille": "",
"buginese": "",
"buhid": "",
"canadianaboriginal": "",
"carian": "",
"chakma": "",
"cham": "",
"cherokee": "",
"common": "\x00-\x40\x5c\x5b-\x60\x7b-\xa9\xab-\xb9\xbb-\xbf\xd7\xf7",
"coptic": "",
"cuneiform": "",
"cypriot": "",
"cyrillic": "",
"deseret": "",
"devanagari": "",
"egyptianhieroglyphs": "",
"ethiopic": "",
"georgian": "",
"glagolitic": "",
"gothic": "",
"greek": "",
"gujarati": "",
"gurmukhi": "",
"han": "",
"hangul": "",
"hanunoo": "",
"hebrew": "",
"hiragana": "",
"imperialaramaic": "",
"inherited": "",
"inscriptionalpahlavi": "",
"inscriptionalparthian": "",
"javanese": "",
"kaithi": "",
"kannada": "",
"katakana": "",
"kayahli": "",
"kharoshthi": "",
"khmer": "",
"lao": "",
"latin": "\x41-\x5a\x61-\x7a\xaa\xba\xc0-\xd6\xd8-\xf6\xf8-\xff",
"lepcha": "",
"limbu": "",
"linearb": "",
"lisu": "",
"lycian": "",
"lydian": "",
"malayalam": "",
"mandaic": "",
"meeteimayek": "",
"meroiticcursive": "",
"meroitichieroglyphs": "",
"miao": "",
"mongolian": "",
"myanmar": "",
"newtailue": "",
"nko": "",
"ogham": "",
"olchiki": "",
"olditalic": "",
"oldpersian": "",
"oldsoutharabian": "",
"oldturkic": "",
"oriya": "",
"osmanya": "",
"phagspa": "",
"phoenician": "",
"rejang": "",
"runic": "",
"samaritan": "",
"saurashtra": "",
"sharada": "",
"shavian": "",
"sinhala": "",
"sorasompeng": "",
"sundanese": "",
"sylotinagri": "",
"syriac": "",
"tagalog": "",
"tagbanwa": "",
"taile": "",
"taitham": "",
"taiviet": "",
"takri": "",
"tamil": "",
"telugu": "",
"thaana": "",
"thai": "",
"tibetan": "",
"tifinagh": "",
"ugaritic": "",
"vai": "",
"yi": "",
"zzzz": ""
}
| 113.25
| 7,831
| 0.729486
|
4a10022e574310fa1bb9e6405ff1da2cc6ef8f01
| 19,219
|
py
|
Python
|
chromeos-config/cros_config_host/libcros_config_host_unittest.py
|
Toromino/chromiumos-platform2
|
97e6ba18f0e5ab6723f3448a66f82c1a07538d87
|
[
"BSD-3-Clause"
] | null | null | null |
chromeos-config/cros_config_host/libcros_config_host_unittest.py
|
Toromino/chromiumos-platform2
|
97e6ba18f0e5ab6723f3448a66f82c1a07538d87
|
[
"BSD-3-Clause"
] | null | null | null |
chromeos-config/cros_config_host/libcros_config_host_unittest.py
|
Toromino/chromiumos-platform2
|
97e6ba18f0e5ab6723f3448a66f82c1a07538d87
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=class-missing-docstring
"""The unit test suite for the libcros_config_host.py library"""
from __future__ import print_function
from collections import OrderedDict
from contextlib import contextmanager
from io import StringIO
import copy
import json
import os
import sys
import unittest
from libcros_config_host import CrosConfig
from libcros_config_host_base import BaseFile, SymlinkedFile, FirmwareInfo
from libcros_config_host_base import FirmwareImage, DeviceSignerInfo
YAML_FILE = '../test_data/test.yaml'
MODELS = sorted(['some', 'another', 'whitelabel'])
ANOTHER_BUCKET = ('gs://chromeos-binaries/HOME/bcs-another-private/overlay-'
'another-private/chromeos-base/chromeos-firmware-another/')
SOME_BUCKET = ('gs://chromeos-binaries/HOME/bcs-some-private/'
'overlay-some-private/chromeos-base/chromeos-firmware-some/')
SOME_FIRMWARE_FILES = ['Some_EC.1111.11.1.tbz2',
'Some.1111.11.1.tbz2',
'Some_RW.1111.11.1.tbz2']
ANOTHER_FIRMWARE_FILES = ['Another_EC.1111.11.1.tbz2',
'Another.1111.11.1.tbz2',
'Another_RW.1111.11.1.tbz2']
LIB_FIRMWARE = '/lib/firmware/'
TOUCH_FIRMWARE = '/opt/google/touch/firmware/'
# Use this to suppress stdout/stderr output:
# with capture_sys_output() as (stdout, stderr)
# ...do something...
@contextmanager
def capture_sys_output():
capture_out, capture_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = capture_out, capture_err
yield capture_out, capture_err
finally:
sys.stdout, sys.stderr = old_out, old_err
def _FormatNamedTuplesDict(value):
result = copy.deepcopy(value)
for key, val in result.items():
result[key] = val._asdict()
return json.dumps(result, indent=2)
def _FormatListNamedTuplesDict(value):
result = copy.deepcopy(value)
for key, values in result.items():
result[key] = [value._asdict() for value in values]
return json.dumps(result, indent=2, sort_keys=True)
class CrosConfigHostTest(unittest.TestCase):
def setUp(self):
self.filepath = os.path.join(os.path.dirname(__file__), YAML_FILE)
def _assertEqualsNamedTuplesDict(self, expected, result):
self.assertEqual(
_FormatNamedTuplesDict(expected), _FormatNamedTuplesDict(result))
def _assertEqualsListNamedTuplesDict(self, expected, result):
self.assertEqual(
_FormatListNamedTuplesDict(expected),
_FormatListNamedTuplesDict(result))
def testGetProperty(self):
config = CrosConfig(self.filepath)
another = config.GetConfig('another')
self.assertEqual(another.GetProperty('/', 'wallpaper'), 'default')
with self.assertRaises(Exception):
another.GetProperty('/', 'missing')
def testModels(self):
config = CrosConfig(self.filepath)
models = config.GetModelList()
for model in MODELS:
self.assertIn(model, models)
def testGetFirmwareUris(self):
config = CrosConfig(self.filepath)
firmware_uris = config.GetConfig('another').GetFirmwareUris()
self.assertSequenceEqual(
firmware_uris,
sorted([ANOTHER_BUCKET + fname for fname in ANOTHER_FIRMWARE_FILES]))
def testGetSharedFirmwareUris(self):
config = CrosConfig(self.filepath)
firmware_uris = config.GetFirmwareUris()
expected = sorted(
[ANOTHER_BUCKET + fname for fname in ANOTHER_FIRMWARE_FILES] +
[SOME_BUCKET + fname for fname in SOME_FIRMWARE_FILES])
self.assertSequenceEqual(firmware_uris, expected)
def testGetArcFiles(self):
config = CrosConfig(self.filepath)
arc_files = config.GetArcFiles()
self.assertEqual(arc_files, [
BaseFile(
source='some/hardware_features.xml',
dest='/etc/some_hardware_features.xml'),
BaseFile(
source='some/media_profiles.xml',
dest='/etc/some_media_profiles.xml'),
])
def testGetThermalFiles(self):
config = CrosConfig(self.filepath)
thermal_files = config.GetThermalFiles()
self.assertEqual(
thermal_files,
[BaseFile('another/dptf.dv', '/etc/dptf/another/dptf.dv'),
BaseFile('some_notouch/dptf.dv', '/etc/dptf/some_notouch/dptf.dv'),
BaseFile('some_touch/dptf.dv', '/etc/dptf/some_touch/dptf.dv')])
def testGetFirmwareBuildTargets(self):
config = CrosConfig(self.filepath)
self.assertSequenceEqual(config.GetFirmwareBuildTargets('coreboot'),
['another', 'some'])
os.environ['FW_NAME'] = 'another'
self.assertSequenceEqual(config.GetFirmwareBuildTargets('coreboot'),
['another'])
self.assertSequenceEqual(config.GetFirmwareBuildTargets('ec'),
['another', 'another_base', 'extra1', 'extra2'])
del os.environ['FW_NAME']
def testFileTree(self):
"""Test that we can obtain a file tree"""
config = CrosConfig(self.filepath)
node = config.GetFileTree()
self.assertEqual(node.name, '')
self.assertEqual(sorted(node.children.keys()),
['etc', 'lib', 'opt', 'usr'])
etc = node.children['etc']
self.assertEqual(etc.name, 'etc')
cras = etc.children['cras']
self.assertEqual(cras.name, 'cras')
another = cras.children['another']
self.assertEqual(sorted(another.children.keys()),
['a-card', 'dsp.ini'])
def testShowTree(self):
"""Test that we can show a file tree"""
config = CrosConfig(self.filepath)
tree = config.GetFileTree()
with capture_sys_output() as (stdout, stderr):
config.ShowTree('/', tree)
self.assertEqual(stderr.getvalue(), '')
lines = [line.strip() for line in stdout.getvalue().splitlines()]
self.assertEqual(lines[0].split(), ['Size', 'Path'])
self.assertEqual(lines[1], '/')
self.assertEqual(lines[2], 'etc/')
self.assertEqual(lines[3].split(), ['missing', 'cras/'])
def testFirmwareBuildCombinations(self):
"""Test generating a dict of firmware build combinations."""
config = CrosConfig(self.filepath)
expected = OrderedDict(
[('another', ['another', 'another']),
('some', ['some', 'some']),
('some2', [None, None])])
result = config.GetFirmwareBuildCombinations(['coreboot', 'depthcharge'])
self.assertEqual(result, expected)
# Unspecified targets should be represented as None.
expected = OrderedDict(
[('another', ['some/another']),
('some', [None]),
('some2', ['experimental/some2'])])
result = config.GetFirmwareBuildCombinations(['zephyr-ec'])
self.assertEqual(result, expected)
os.environ['FW_NAME'] = 'another'
expected = OrderedDict([('another', ['another', 'another'])])
result = config.GetFirmwareBuildCombinations(['coreboot', 'depthcharge'])
self.assertEqual(result, expected)
del os.environ['FW_NAME']
def testGetWallpaper(self):
"""Test that we can access the wallpaper information"""
config = CrosConfig(self.filepath)
wallpaper = config.GetWallpaperFiles()
self.assertEqual(['default',
'some',
'wallpaper-wl1',
'wallpaper-wl2'],
wallpaper)
def testGetTouchFirmwareFiles(self):
def _GetFile(source, symlink):
"""Helper to return a suitable SymlinkedFile"""
return SymlinkedFile(source, TOUCH_FIRMWARE + source,
LIB_FIRMWARE + symlink)
config = CrosConfig(self.filepath)
touch_files = config.GetConfig('another').GetTouchFirmwareFiles()
# pylint: disable=line-too-long
self.assertEqual(
touch_files,
[SymlinkedFile(source='some_stylus_vendor/another-version.hex',
dest='/opt/google/touch/firmware/some_stylus_vendor/another-version.hex',
symlink='/lib/firmware/some_stylus_vendor_firmware_ANOTHER.bin'),
SymlinkedFile(source='some_touch_vendor/some-pid_some-version.bin',
dest='/opt/google/touch/firmware/some_touch_vendor/some-pid_some-version.bin',
symlink='/lib/firmware/some_touch_vendorts_i2c_some-pid.bin')])
touch_files = config.GetConfig('some').GetTouchFirmwareFiles()
# This checks that duplicate processing works correct, since both models
# have the same wacom firmware
self.assertEqual(
touch_files,
[SymlinkedFile(source='some_stylus_vendor/some-version.hex',
dest='/opt/google/touch/firmware/some_stylus_vendor/some-version.hex',
symlink='/lib/firmware/some_stylus_vendor_firmware_SOME.bin'),
SymlinkedFile(source='some_touch_vendor/some-pid_some-version.bin',
dest='/opt/google/touch/firmware/some_touch_vendor/some-pid_some-version.bin',
symlink='/lib/firmware/some_touch_vendorts_i2c_some-pid.bin'),
SymlinkedFile(source='some_touch_vendor/some-other-pid_some-other-version.bin',
dest='/opt/google/touch/firmware/some_touch_vendor/some-other-pid_some-other-version.bin',
symlink='/lib/firmware/some_touch_vendorts_i2c_some-other-pid.bin')])
touch_files = config.GetTouchFirmwareFiles()
expected = set(
[SymlinkedFile(source='some_stylus_vendor/another-version.hex',
dest='/opt/google/touch/firmware/some_stylus_vendor/another-version.hex',
symlink='/lib/firmware/some_stylus_vendor_firmware_ANOTHER.bin'),
SymlinkedFile(source='some_stylus_vendor/some-version.hex',
dest='/opt/google/touch/firmware/some_stylus_vendor/some-version.hex',
symlink='/lib/firmware/some_stylus_vendor_firmware_SOME.bin'),
SymlinkedFile(source='some_touch_vendor/some-pid_some-version.bin',
dest='/opt/google/touch/firmware/some_touch_vendor/some-pid_some-version.bin',
symlink='/lib/firmware/some_touch_vendorts_i2c_some-pid.bin'),
SymlinkedFile(source='some_touch_vendor/some-other-pid_some-other-version.bin',
dest='/opt/google/touch/firmware/some_touch_vendor/some-other-pid_some-other-version.bin',
symlink='/lib/firmware/some_touch_vendorts_i2c_some-other-pid.bin'),
SymlinkedFile(source='some_touch_vendor/some-pid_some-version.bin',
dest='/opt/google/touch/firmware/some_touch_vendor/some-pid_some-version.bin',
symlink='/lib/firmware/some_touch_vendorts_i2c_some-pid.bin')])
self.assertEqual(set(touch_files), expected)
def testGetAudioFiles(self):
config = CrosConfig(self.filepath)
audio_files = config.GetAudioFiles()
expected = [
BaseFile(source='cras-config/another/dsp.ini',
dest='/etc/cras/another/dsp.ini'),
BaseFile(source='cras-config/another/a-card',
dest='/etc/cras/another/a-card'),
BaseFile(source='cras-config/some/dsp.ini',
dest='/etc/cras/some/dsp.ini'),
BaseFile(source='cras-config/some/a-card',
dest='/etc/cras/some/a-card'),
BaseFile(source='cras-config/some2/dsp.ini',
dest='/etc/cras/some2/dsp.ini'),
BaseFile(source='cras-config/some2/a-card',
dest='/etc/cras/some2/a-card'),
BaseFile(source='topology/another-tplg.bin',
dest='/lib/firmware/another-tplg.bin'),
BaseFile(source='topology/some-tplg.bin',
dest='/lib/firmware/some-tplg.bin'),
BaseFile(source='ucm-config/a-card.another/HiFi.conf',
dest='/usr/share/alsa/ucm/a-card.another/HiFi.conf'),
BaseFile(source='ucm-config/a-card.another/a-card.another.conf',
dest='/usr/share/alsa/ucm/a-card.another/a-card.another.conf'),
BaseFile(source='ucm-config/a-card.some/HiFi.conf',
dest='/usr/share/alsa/ucm/a-card.some/HiFi.conf'),
BaseFile(source='ucm-config/a-card.some/a-card.some.conf',
dest='/usr/share/alsa/ucm/a-card.some/a-card.some.conf'),
BaseFile(source='ucm-config/a-card.some2/HiFi.conf',
dest='/usr/share/alsa/ucm/a-card.some2/HiFi.conf'),
BaseFile(source='ucm-config/a-card.some2/a-card.some2.conf',
dest='/usr/share/alsa/ucm/a-card.some2/a-card.some2.conf')]
self.assertEqual(audio_files, sorted(expected))
def testFirmware(self):
"""Test access to firmware information"""
expected = OrderedDict(
[('another',
FirmwareInfo(model='another',
shared_model='another',
key_id='ANOTHER',
have_image=True,
bios_build_target='another',
ec_build_target='another',
main_image_uri='bcs://Another.1111.11.1.tbz2',
main_rw_image_uri='bcs://Another_RW.1111.11.1.tbz2',
ec_image_uri='bcs://Another_EC.1111.11.1.tbz2',
pd_image_uri='',
sig_id='another',
brand_code='')),
('some',
FirmwareInfo(model='some',
shared_model='some',
key_id='SOME',
have_image=True,
bios_build_target='some',
ec_build_target='some',
main_image_uri='bcs://Some.1111.11.1.tbz2',
main_rw_image_uri='bcs://Some_RW.1111.11.1.tbz2',
ec_image_uri='bcs://Some_EC.1111.11.1.tbz2',
pd_image_uri='',
sig_id='some',
brand_code='')),
('some2',
FirmwareInfo(model='some2',
shared_model='some2',
key_id='SOME',
have_image=True,
bios_build_target=None,
ec_build_target=None,
main_image_uri='',
main_rw_image_uri='',
ec_image_uri='',
pd_image_uri='',
sig_id='some2',
brand_code='')),
('whitelabel',
FirmwareInfo(model='whitelabel',
shared_model='some',
key_id='WHITELABEL1',
have_image=True,
bios_build_target='some',
ec_build_target='some',
main_image_uri='bcs://Some.1111.11.1.tbz2',
main_rw_image_uri='bcs://Some_RW.1111.11.1.tbz2',
ec_image_uri='bcs://Some_EC.1111.11.1.tbz2',
pd_image_uri='',
sig_id='sig-id-in-customization-id',
brand_code='')),
('whitelabel-whitelabel1',
FirmwareInfo(model='whitelabel-whitelabel1',
shared_model='some',
key_id='WHITELABEL1',
have_image=False,
bios_build_target='some',
ec_build_target='some',
main_image_uri='bcs://Some.1111.11.1.tbz2',
main_rw_image_uri='bcs://Some_RW.1111.11.1.tbz2',
ec_image_uri='bcs://Some_EC.1111.11.1.tbz2',
pd_image_uri='',
sig_id='whitelabel-whitelabel1',
brand_code='WLBA')),
('whitelabel-whitelabel2',
FirmwareInfo(model='whitelabel-whitelabel2',
shared_model='some',
key_id='WHITELABEL2',
have_image=False,
bios_build_target='some',
ec_build_target='some',
main_image_uri='bcs://Some.1111.11.1.tbz2',
main_rw_image_uri='bcs://Some_RW.1111.11.1.tbz2',
ec_image_uri='bcs://Some_EC.1111.11.1.tbz2',
pd_image_uri='',
sig_id='whitelabel-whitelabel2',
brand_code='WLBB'))])
result = CrosConfig(self.filepath).GetFirmwareInfo()
self._assertEqualsNamedTuplesDict(expected, result)
def testFirmwareConfigs(self):
"""Test access to firmware configs."""
expected = {
'some': [
FirmwareImage(
type='ap',
build_target='some',
image_uri='bcs://Some.1111.11.1.tbz2'),
FirmwareImage(
type='rw',
build_target='some',
image_uri='bcs://Some_RW.1111.11.1.tbz2'),
FirmwareImage(
type='ec',
build_target='some',
image_uri='bcs://Some_EC.1111.11.1.tbz2')
],
'another': [
FirmwareImage(
type='ap',
build_target='another',
image_uri='bcs://Another.1111.11.1.tbz2'),
FirmwareImage(
type='rw',
build_target='another',
image_uri='bcs://Another_RW.1111.11.1.tbz2'),
FirmwareImage(
type='ec',
build_target='another',
image_uri='bcs://Another_EC.1111.11.1.tbz2')
],
'some2': [
],
}
result = CrosConfig(self.filepath).GetFirmwareConfigs()
self._assertEqualsListNamedTuplesDict(expected, result)
def testFirmwareConfigsByDevice(self):
"""Test access to firmware config names."""
expected = {
'some': 'some',
'some2': 'some2',
'another': 'another',
'whitelabel': 'some',
'whitelabel-whitelabel1': 'some',
'whitelabel-whitelabel2': 'some',
}
result = CrosConfig(self.filepath).GetFirmwareConfigsByDevice()
self.assertEqual(result, expected)
def testSignerInfoByDevice(self):
"""Test access to device signer info."""
expected = {
'whitelabel-whitelabel2':
DeviceSignerInfo(
key_id='WHITELABEL2', sig_id='whitelabel-whitelabel2'),
'whitelabel-whitelabel1':
DeviceSignerInfo(
key_id='WHITELABEL1', sig_id='whitelabel-whitelabel1'),
'some':
DeviceSignerInfo(key_id='SOME', sig_id='some'),
'some2':
DeviceSignerInfo(key_id='SOME', sig_id='some2'),
'whitelabel':
DeviceSignerInfo(
key_id='WHITELABEL1', sig_id='sig-id-in-customization-id'),
'another':
DeviceSignerInfo(key_id='ANOTHER', sig_id='another')
}
result = CrosConfig(self.filepath).GetDeviceSignerInfo()
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()
| 42.332599
| 113
| 0.599459
|
4a10028fc94c6d58d26b4c4197637a7e984cb776
| 15,698
|
py
|
Python
|
econml/tests/test_integration.py
|
lwschm/EconML
|
6e7b107e1f8a7a5922489eb81143db8656ff01af
|
[
"BSD-3-Clause"
] | 1
|
2022-03-26T16:05:27.000Z
|
2022-03-26T16:05:27.000Z
|
econml/tests/test_integration.py
|
QPC-database/EconML
|
0cec5ab3d4cb5d681f7d8600fb261d93ca528020
|
[
"BSD-3-Clause"
] | null | null | null |
econml/tests/test_integration.py
|
QPC-database/EconML
|
0cec5ab3d4cb5d681f7d8600fb261d93ca528020
|
[
"BSD-3-Clause"
] | 1
|
2021-08-20T09:06:42.000Z
|
2021-08-20T09:06:42.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
import unittest
import pytest
import keras
import tensorflow as tf
from econml.drlearner import LinearDRLearner, SparseLinearDRLearner, ForestDRLearner
from econml.dml import LinearDML, SparseLinearDML, ForestDML
from econml.ortho_forest import DMLOrthoForest, DROrthoForest
from econml.sklearn_extensions.linear_model import WeightedLasso
from econml.metalearners import XLearner, SLearner, TLearner
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.linear_model import LinearRegression, MultiTaskLasso, LassoCV
from sklearn.preprocessing import PolynomialFeatures, FunctionTransformer
from econml.ortho_iv import LinearIntentToTreatDRIV
from econml.deepiv import DeepIVEstimator
class TestPandasIntegration(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(123)
# DGP constants
cls.n_controls = 10
cls.n_features = 2
cls.n = 100
# Define data features
# Added `_df`to names to be different from the default cate_estimator names
cls.controls = [f"W{i}_df" for i in range(cls.n_controls)]
cls.features = [f"X{i}_df" for i in range(cls.n_features)]
cls.instrument = ["Z0_df"]
cls.outcome = ["Y0_df"]
cls.cont_treat = ["T0_df"]
cls.bin_treat = ["T2_df"]
cls.cat_treat = ["T_cat"]
cls.cat_treat_labels = ["None", "One", "Two"]
cls.outcome_multi = ["Y0_df", "Y1_df"]
cls.cont_treat_multi = ["T0_df", "T1_df"]
# Generate data
d = {}
d.update({w: np.random.normal(size=cls.n) for w in cls.controls})
d.update({x: np.random.normal(size=cls.n) for x in cls.features})
d.update({t: np.random.uniform(size=cls.n) for t in cls.cont_treat_multi})
d.update({t: np.random.binomial(1, 0.5, size=cls.n) for t in cls.bin_treat})
d.update({t: np.random.choice(["None", "One", "Two"], size=cls.n, p=[0.4, 0.3, 0.3]) for t in cls.cat_treat})
d.update({z: np.random.binomial(1, 0.5, size=cls.n) for z in cls.instrument})
d.update({y: np.random.normal(size=cls.n) for y in cls.outcome_multi})
cls.df = pd.DataFrame(d)
def test_dml(self):
#################################
# Single treatment and outcome #
#################################
X = TestPandasIntegration.df[TestPandasIntegration.features]
W = TestPandasIntegration.df[TestPandasIntegration.controls]
Y = TestPandasIntegration.df[TestPandasIntegration.outcome]
T = TestPandasIntegration.df[TestPandasIntegration.cont_treat]
# Test LinearDML
est = LinearDML(model_y=LassoCV(), model_t=LassoCV())
est.fit(Y, T, X=X, W=W, inference='statsmodels')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_input_names(est.summary()) # Check that names propagate as expected
# |--> Test featurizers
est.featurizer = PolynomialFeatures(degree=2, include_bias=False)
est.fit(Y, T, X=X, W=W, inference='statsmodels')
self._check_input_names(
est.summary(),
feat_comp=est.original_featurizer.get_feature_names(X.columns))
est.featurizer = FunctionTransformer()
est.fit(Y, T, X=X, W=W, inference='statsmodels')
self._check_input_names(
est.summary(),
feat_comp=[f"feat(X){i}" for i in range(TestPandasIntegration.n_features)])
est.featurizer = ColumnTransformer([('passthrough', 'passthrough', [0])])
est.fit(Y, T, X=X, W=W, inference='statsmodels')
# ColumnTransformer doesn't propagate column names
self._check_input_names(est.summary(), feat_comp=["x0"])
# |--> Test re-fit
est.featurizer = None
X1 = X.rename(columns={c: "{}_1".format(c) for c in X.columns})
est.fit(Y, T, X=X1, W=W, inference='statsmodels')
self._check_input_names(est.summary(), feat_comp=X1.columns)
# Test SparseLinearDML
est = SparseLinearDML(model_y=LassoCV(), model_t=LassoCV())
est.fit(Y, T, X=X, W=W, inference='debiasedlasso')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_input_names(est.summary()) # Check that names propagate as expected
# Test ForestDML
est = ForestDML(model_y=GradientBoostingRegressor(), model_t=GradientBoostingRegressor())
est.fit(Y, T, X=X, W=W, inference='blb')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
####################################
# Mutiple treatments and outcomes #
####################################
Y = TestPandasIntegration.df[TestPandasIntegration.outcome_multi]
T = TestPandasIntegration.df[TestPandasIntegration.cont_treat_multi]
# Test LinearDML
est = LinearDML(model_y=MultiTaskLasso(), model_t=MultiTaskLasso())
est.fit(Y, T, X=X, W=W, inference='statsmodels')
self._check_input_names(est.summary(), True, True) # Check that names propagate as expected
self._check_popsum_names(est.effect_inference(X).population_summary(), True)
est.fit(Y, T, X=X, W=W, inference='bootstrap') # Check bootstrap as well
self._check_input_names(est.summary(), True, True)
self._check_popsum_names(est.effect_inference(X).population_summary(), True)
# Test SparseLinearDML
est = SparseLinearDML(model_y=MultiTaskLasso(), model_t=MultiTaskLasso())
est.fit(Y, T, X=X, W=W, inference='debiasedlasso')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_input_names(est.summary(), True, True) # Check that names propagate as expected
self._check_popsum_names(est.effect_inference(X).population_summary(), True)
def test_orf(self):
# Single outcome only, ORF does not support multiple outcomes
X = TestPandasIntegration.df[TestPandasIntegration.features]
W = TestPandasIntegration.df[TestPandasIntegration.controls]
Y = TestPandasIntegration.df[TestPandasIntegration.outcome]
T = TestPandasIntegration.df[TestPandasIntegration.cont_treat]
# Test DMLOrthoForest
est = DMLOrthoForest(
n_trees=100, max_depth=2, model_T=WeightedLasso(), model_Y=WeightedLasso())
est.fit(Y, T, X=X, W=W, inference='blb')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_popsum_names(est.effect_inference(X).population_summary())
# Test DROrthoForest
est = DROrthoForest(n_trees=100, max_depth=2)
T = TestPandasIntegration.df[TestPandasIntegration.bin_treat]
est.fit(Y, T, X=X, W=W, inference='blb')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_popsum_names(est.effect_inference(X).population_summary())
def test_metalearners(self):
X = TestPandasIntegration.df[TestPandasIntegration.features]
W = TestPandasIntegration.df[TestPandasIntegration.controls]
Y = TestPandasIntegration.df[TestPandasIntegration.outcome]
T = TestPandasIntegration.df[TestPandasIntegration.bin_treat]
# Test XLearner
# Skipping population summary names test because bootstrap inference is too slow
est = XLearner(models=GradientBoostingRegressor(),
propensity_model=GradientBoostingClassifier(),
cate_models=GradientBoostingRegressor())
est.fit(Y, T, X=np.hstack([X, W]))
treatment_effects = est.effect(np.hstack([X, W]))
# Test SLearner
est = SLearner(overall_model=GradientBoostingRegressor())
est.fit(Y, T, X=np.hstack([X, W]))
treatment_effects = est.effect(np.hstack([X, W]))
# Test TLearner
est = TLearner(models=GradientBoostingRegressor())
est.fit(Y, T, X=np.hstack([X, W]))
treatment_effects = est.effect(np.hstack([X, W]))
def test_drlearners(self):
X = TestPandasIntegration.df[TestPandasIntegration.features]
W = TestPandasIntegration.df[TestPandasIntegration.controls]
Y = TestPandasIntegration.df[TestPandasIntegration.outcome]
T = TestPandasIntegration.df[TestPandasIntegration.bin_treat]
# Test LinearDRLearner
est = LinearDRLearner(model_propensity=GradientBoostingClassifier(),
model_regression=GradientBoostingRegressor())
est.fit(Y, T, X=X, W=W, inference='statsmodels')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_input_names(est.summary(T=1))
self._check_popsum_names(est.effect_inference(X).population_summary())
# Test SparseLinearDRLearner
est = SparseLinearDRLearner(model_propensity=GradientBoostingClassifier(),
model_regression=GradientBoostingRegressor())
est.fit(Y, T, X=X, W=W, inference='debiasedlasso')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_input_names(est.summary(T=1))
self._check_popsum_names(est.effect_inference(X).population_summary())
# Test ForestDRLearner
est = ForestDRLearner(model_propensity=GradientBoostingClassifier(),
model_regression=GradientBoostingRegressor())
est.fit(Y, T, X=X, W=W, inference='blb')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_popsum_names(est.effect_inference(X).population_summary())
def test_orthoiv(self):
X = TestPandasIntegration.df[TestPandasIntegration.features]
Y = TestPandasIntegration.df[TestPandasIntegration.outcome]
T = TestPandasIntegration.df[TestPandasIntegration.bin_treat]
Z = TestPandasIntegration.df[TestPandasIntegration.instrument]
# Test LinearIntentToTreatDRIV
est = LinearIntentToTreatDRIV(model_Y_X=GradientBoostingRegressor(),
model_T_XZ=GradientBoostingClassifier(),
flexible_model_effect=GradientBoostingRegressor())
est.fit(Y, T, Z=Z, X=X, inference='statsmodels')
treatment_effects = est.effect(X)
lb, ub = est.effect_interval(X, alpha=0.05)
self._check_input_names(est.summary()) # Check input names propagate
self._check_popsum_names(est.effect_inference(X).population_summary())
def test_deepiv(self):
X = TestPandasIntegration.df[TestPandasIntegration.features]
Y = TestPandasIntegration.df[TestPandasIntegration.outcome]
T = TestPandasIntegration.df[TestPandasIntegration.cont_treat]
Z = TestPandasIntegration.df[TestPandasIntegration.instrument]
# Test DeepIV
treatment_model = keras.Sequential([keras.layers.Dense(128, activation='relu', input_shape=(3,)),
keras.layers.Dropout(0.17),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dropout(0.17),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dropout(0.17)])
response_model = keras.Sequential([keras.layers.Dense(128, activation='relu', input_shape=(3,)),
keras.layers.Dropout(0.17),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dropout(0.17),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dropout(0.17),
keras.layers.Dense(1)])
est = DeepIVEstimator(n_components=10, # Number of gaussians in the mixture density networks)
m=lambda z, x: treatment_model(keras.layers.concatenate([z, x])), # Treatment model
h=lambda t, x: response_model(keras.layers.concatenate([t, x])), # Response model
n_samples=1 # Number of samples used to estimate the response
)
est.fit(Y, T, X=X, Z=Z)
treatment_effects = est.effect(X)
def test_cat_treatments(self):
X = TestPandasIntegration.df[TestPandasIntegration.features]
Y = TestPandasIntegration.df[TestPandasIntegration.outcome]
T = TestPandasIntegration.df[TestPandasIntegration.cat_treat]
# Test categorical treatments
est = LinearDML(discrete_treatment=True, linear_first_stages=False,
categories=TestPandasIntegration.cat_treat_labels)
est.fit(Y, T, X=X)
self._check_input_names(est.summary(), T_cat=True)
treat_name = "Category"
self._check_input_names(est.summary(treatment_names=[treat_name]), T_cat=True, treat_comp=[
f"{treat_name}_{t}" for t in TestPandasIntegration.cat_treat_labels[1:]])
# Check refit
est.fit(Y, T, X=X)
self._check_input_names(est.summary(), T_cat=True)
# Check refit after setting categories
est.categories = [f"{t}_1" for t in TestPandasIntegration.cat_treat_labels]
T = T.apply(lambda t: t + "_1")
est.fit(Y, T, X=X)
self._check_input_names(est.summary(), T_cat=True, treat_comp=[
f"{TestPandasIntegration.cat_treat[0]}_{t}_1" for t in
TestPandasIntegration.cat_treat_labels[1:]])
def _check_input_names(self, summary_table,
Y_multi=False, T_multi=False, T_cat=False, feat_comp=None, treat_comp=None):
index_name = np.array(summary_table.tables[0].data)[1:, 0]
if feat_comp is None:
feat_comp = TestPandasIntegration.features
if treat_comp is None:
if T_multi:
treat_comp = TestPandasIntegration.cont_treat_multi
if T_cat:
treat_comp = ["{}_{}".format(TestPandasIntegration.cat_treat[0], label)
for label in TestPandasIntegration.cat_treat_labels[1:]]
if Y_multi:
out_comp = TestPandasIntegration.outcome_multi
if T_cat or T_multi:
index_name_comp = [
f"{feat}|{outcome}|{treat}" for feat in feat_comp for outcome in out_comp for treat in treat_comp]
else:
index_name_comp = [
f"{feat}|{outcome}" for feat in feat_comp for outcome in out_comp]
else:
if T_cat or T_multi:
index_name_comp = [
f"{feat}|{treat}" for feat in feat_comp for treat in treat_comp]
else:
index_name_comp = feat_comp
np.testing.assert_array_equal(index_name, index_name_comp)
def _check_popsum_names(self, popsum, Y_multi=False):
np.testing.assert_array_equal(popsum.output_names,
TestPandasIntegration.outcome_multi if Y_multi
else TestPandasIntegration.outcome)
| 54.131034
| 118
| 0.633393
|
4a1002d2002fd699e4a41cda3fe200420faa11d5
| 4,133
|
py
|
Python
|
google/ads/googleads/v8/services/services/ad_group_label_service/transports/base.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 285
|
2018-10-05T16:47:58.000Z
|
2022-03-31T00:58:39.000Z
|
google/ads/googleads/v8/services/services/ad_group_label_service/transports/base.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 425
|
2018-09-10T13:32:41.000Z
|
2022-03-31T14:50:05.000Z
|
google/ads/googleads/v8/services/services/ad_group_label_service/transports/base.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | 369
|
2018-11-28T07:01:00.000Z
|
2022-03-28T09:53:22.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v8.resources.types import ad_group_label
from google.ads.googleads.v8.services.types import ad_group_label_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AdGroupLabelServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for AdGroupLabelService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_ad_group_label: gapic_v1.method.wrap_method(
self.get_ad_group_label,
default_timeout=None,
client_info=client_info,
),
self.mutate_ad_group_labels: gapic_v1.method.wrap_method(
self.mutate_ad_group_labels,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_ad_group_label(
self,
) -> typing.Callable[
[ad_group_label_service.GetAdGroupLabelRequest],
ad_group_label.AdGroupLabel,
]:
raise NotImplementedError
@property
def mutate_ad_group_labels(
self,
) -> typing.Callable[
[ad_group_label_service.MutateAdGroupLabelsRequest],
ad_group_label_service.MutateAdGroupLabelsResponse,
]:
raise NotImplementedError
__all__ = ("AdGroupLabelServiceTransport",)
| 35.93913
| 78
| 0.672151
|
4a100315d34b34bf3b4ae6917f7a0aee8da84fdd
| 6,264
|
py
|
Python
|
barry/cosmology/power_spectrum_smoothing.py
|
AaronGlanville/Barry
|
f181448b2ed10a8c08195e7e34819ceb8abfe532
|
[
"MIT"
] | null | null | null |
barry/cosmology/power_spectrum_smoothing.py
|
AaronGlanville/Barry
|
f181448b2ed10a8c08195e7e34819ceb8abfe532
|
[
"MIT"
] | null | null | null |
barry/cosmology/power_spectrum_smoothing.py
|
AaronGlanville/Barry
|
f181448b2ed10a8c08195e7e34819ceb8abfe532
|
[
"MIT"
] | null | null | null |
import logging
import math
import numpy as np
from scipy import integrate, interpolate, optimize
def get_smooth_methods_dict():
fns = {"hinton2017": smooth_hinton2017, "eh1998": smooth_eh1998}
return fns
def validate_smooth_method(method):
if method.lower() in get_smooth_methods_dict().keys():
return True
else:
logging.getLogger("barry").error(f"Smoothing method is {method} and not in list {get_smooth_methods_dict().keys()}")
return False
def smooth(ks, pk, method="hinton2017", **kwargs):
return get_smooth_methods_dict()[method.lower()](ks, pk, **kwargs)
def smooth_hinton2017(ks, pk, degree=13, sigma=1, weight=0.5, **kwargs):
""" Smooth power spectrum based on Hinton 2017 polynomial method """
# logging.debug("Smoothing spectrum using Hinton 2017 method")
log_ks = np.log(ks)
log_pk = np.log(pk)
index = np.argmax(pk)
maxk2 = log_ks[index]
gauss = np.exp(-0.5 * np.power(((log_ks - maxk2) / sigma), 2))
w = np.ones(pk.size) - weight * gauss
z = np.polyfit(log_ks, log_pk, degree, w=w)
p = np.poly1d(z)
polyval = p(log_ks)
pk_smoothed = np.exp(polyval)
return pk_smoothed
def smooth_eh1998(ks, pk, om=0.3121, ob=0.0491, h0=0.6751, ns=0.9653, sigma8=0.8150, rs=None, **kwargs):
""" Smooth power spectrum based on Eisenstein and Hu 1998 fitting formulae for the transfer function
with shape of matter power spectrum fit using 5th order polynomial
"""
# logging.debug("Smoothing spectrum using Eisenstein and Hu 1998 plus 5th order polynomial method")
# First compute the normalised Eisenstein and Hu smooth power spectrum
pk_EH98 = ks ** ns * __EH98_dewiggled(ks, om, ob, h0, rs) ** 2
pk_EH98_spline = interpolate.splrep(ks, pk_EH98)
pk_EH98_norm = math.sqrt(integrate.quad(__sigma8_integrand, ks[0], ks[-1], args=(ks[0], ks[-1], pk_EH98_spline))[0] / (2.0 * math.pi * math.pi))
pk_EH98 *= (sigma8 / pk_EH98_norm) ** 2
nll = lambda *args: __EH98_lnlike(*args)
start = np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0])
result = optimize.minimize(nll, start, args=(ks, pk_EH98, pk), method="Nelder-Mead", tol=1.0e-6, options={"maxiter": 1000000})
# Then compute the smooth model
Apoly = result["x"][1] * ks + result["x"][2] + result["x"][3] / ks + result["x"][4] / ks ** 2 + result["x"][5] / ks ** 3
return result["x"][0] * pk_EH98 + Apoly
# Compute the Eisenstein and Hu dewiggled transfer function
def __EH98_dewiggled(ks, om, ob, h0, rs):
if rs == None:
rs = __EH98_rs(om, ob, h0)
# Fitting parameters
a1 = 0.328
a2 = 431.0
a3 = 0.380
a4 = 22.30
g1 = 0.43
g2 = 4.0
c1 = 14.2
c2 = 731.0
c3 = 62.5
l1 = 2.0
l2 = 1.8
t1 = 2.0
theta = 2.725 / 2.7 # Normalised CMB temperature
q0 = ks * theta * theta
alpha = 1.0 - a1 * math.log(a2 * om * h0 * h0) * (ob / om) + a3 * math.log(a4 * om * h0 * h0) * (ob / om) ** 2
gamma_p1 = (1.0 - alpha) / (1.0 + (g1 * ks * rs * h0) ** g2)
gamma = om * h0 * (alpha + gamma_p1)
q = q0 / gamma
c = c1 + c2 / (1.0 + c3 * q)
l = np.log(l1 * math.exp(1.0) + l2 * q)
t = l / (l + c * q ** t1)
return t
def __EH98_lnlike(params, ks, pkEH, pkdata):
pk_B, pk_a1, pk_a2, pk_a3, pk_a4, pk_a5 = params
Apoly = pk_a1 * ks + pk_a2 + pk_a3 / ks + pk_a4 / ks ** 2 + pk_a5 / ks ** 3
pkfit = pk_B * pkEH + Apoly
# Compute the chi_squared
chi_squared = np.sum(((pkdata - pkfit) / pkdata) ** 2)
return chi_squared
def __sigma8_integrand(ks, kmin, kmax, pkspline):
if (ks < kmin) or (ks > kmax):
pk = 0.0
else:
pk = interpolate.splev(ks, pkspline, der=0)
window = 3.0 * ((math.sin(8.0 * ks) / (8.0 * ks) ** 3) - (math.cos(8.0 * ks) / (8.0 * ks) ** 2))
return ks * ks * window * window * pk
# Compute the Eisenstein and Hu 1998 value for the sound horizon
def __EH98_rs(om, ob, h0):
# Fitting parameters
b1 = 0.313
b2 = -0.419
b3 = 0.607
b4 = 0.674
b5 = 0.238
b6 = 0.223
a1 = 1291.0
a2 = 0.251
a3 = 0.659
a4 = 0.828
theta = 2.725 / 2.7 # Normalised CMB temperature
obh2 = ob * h0 * h0
omh2 = om * h0 * h0
z_eq = 2.5e4 * omh2 / (theta ** 4)
k_eq = 7.46e-2 * omh2 / (theta ** 2)
zd1 = b1 * omh2 ** b2 * (1.0 + b3 * omh2 ** b4)
zd2 = b5 * omh2 ** b6
z_d = a1 * (omh2 ** a2 / (1.0 + a3 * omh2 ** a4)) * (1.0 + zd1 * obh2 ** zd2)
R_eq = 3.15e4 * obh2 / (z_eq * theta ** 4)
R_d = 3.15e4 * obh2 / (z_d * theta ** 4)
s = 2.0 / (3.0 * k_eq) * math.sqrt(6.0 / R_eq) * math.log((math.sqrt(1.0 + R_d) + math.sqrt(R_d + R_eq)) / (1.0 + math.sqrt(R_eq)))
return s
if __name__ == "__main__":
import sys
sys.path.append("../..")
logging.basicConfig(level=logging.DEBUG, format="[%(levelname)7s |%(funcName)20s] %(message)s")
logging.getLogger("matplotlib").setLevel(logging.ERROR)
om, h0 = 0.3121, 0.6751
from barry.cosmology import CambGenerator
camb = CambGenerator(h0=h0)
ks = camb.ks
r_s, pk_lin, _ = camb.get_data(om=om)
if True: # Do timing tests
import timeit
n = 30
def test_hinton():
smooth(ks, pk_lin, "hinton2017")
def test_eh1998():
smooth(ks, pk_lin, "eh1998")
t_hinton = timeit.timeit(test_hinton, number=n) * 1000 / n
t_eh1998 = timeit.timeit(test_eh1998, number=n) * 1000 / n
print(f"Hinton smoothing takes on average, {t_hinton:.2f} milliseconds")
print(f"Eisenstein and Hu smoothing takes on average, {t_eh1998:.2f} milliseconds")
print(f"Ratio is {t_eh1998/t_hinton:.1f} Hu/Hinton")
if True: # Do plotting comparison
pk_smoothed = smooth_hinton2017(ks, pk_lin)
pk_smoothed2 = smooth_eh1998(ks, pk_lin)
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1.plot(ks, pk_lin, "-", c="k")
ax1.plot(ks, pk_smoothed, ".", c="r", ms=2)
ax1.plot(ks, pk_smoothed2, "+", c="b", ms=2)
ax1.set_xscale("log")
ax1.set_yscale("log")
ax2.plot(ks, pk_lin / pk_smoothed, "-", c="r")
ax2.plot(ks, pk_lin / pk_smoothed2, ":", c="b")
plt.show()
| 31.796954
| 148
| 0.591156
|
4a100553d2ac271b6ec5ca21a795f40d72944902
| 4,989
|
py
|
Python
|
spectree/response.py
|
robertmrk/spectree
|
310bf5ae75d96dbba5ebbd967a73f1ded398c253
|
[
"Apache-2.0"
] | null | null | null |
spectree/response.py
|
robertmrk/spectree
|
310bf5ae75d96dbba5ebbd967a73f1ded398c253
|
[
"Apache-2.0"
] | null | null | null |
spectree/response.py
|
robertmrk/spectree
|
310bf5ae75d96dbba5ebbd967a73f1ded398c253
|
[
"Apache-2.0"
] | null | null | null |
from typing import Type
from pydantic import BaseModel
from .utils import get_model_key, parse_code
class Response:
"""
response object
:param codes: list of HTTP status code, format('HTTP_[0-9]{3}'), 'HTTP200'
:param code_models: dict of <HTTP status code>: <`pydantic.BaseModel`> or None
"""
def __init__(self, *codes, **code_models):
self.codes = []
for code in codes:
assert code in DEFAULT_CODE_DESC, "invalid HTTP status code"
self.codes.append(code)
self.code_models = {}
for code, model in code_models.items():
assert code in DEFAULT_CODE_DESC, "invalid HTTP status code"
if model:
assert issubclass(model, BaseModel), "invalid `pydantic.BaseModel`"
self.code_models[code] = model
else:
self.codes.append(code)
def add_model(
self, code: int, model: Type[BaseModel], replace: bool = True
) -> None:
"""Add data *model* for the specified status *code*.
:param code: An HTTP status code.
:param model: A `pydantic.BaseModel`.
:param replace: If `True` and a data *model* already exists for the given
status *code* it will be replaced, if `False` the existing data *model*
will be retained.
"""
if not replace and self.find_model(code):
return
self.code_models[f"HTTP_{code}"] = model
def has_model(self):
"""
:returns: boolean -- does this response has models or not
"""
return bool(self.code_models)
def find_model(self, code):
"""
:param code: ``r'\\d{3}'``
"""
return self.code_models.get(f"HTTP_{code}")
@property
def models(self):
"""
:returns: dict_values -- all the models in this response
"""
return self.code_models.values()
def generate_spec(self):
"""
generate the spec for responses
:returns: JSON
"""
responses = {}
for code in self.codes:
responses[parse_code(code)] = {"description": DEFAULT_CODE_DESC[code]}
for code, model in self.code_models.items():
model_name = get_model_key(model=model)
responses[parse_code(code)] = {
"description": DEFAULT_CODE_DESC[code],
"content": {
"application/json": {
"schema": {"$ref": f"#/components/schemas/{model_name}"}
}
},
}
return responses
# according to https://tools.ietf.org/html/rfc2616#section-10
# https://tools.ietf.org/html/rfc7231#section-6.1
# https://developer.mozilla.org/sv-SE/docs/Web/HTTP/Status
DEFAULT_CODE_DESC = {
# Information 1xx
"HTTP_100": "Continue",
"HTTP_101": "Switching Protocols",
# Successful 2xx
"HTTP_200": "OK",
"HTTP_201": "Created",
"HTTP_202": "Accepted",
"HTTP_203": "Non-Authoritative Information",
"HTTP_204": "No Content",
"HTTP_205": "Reset Content",
"HTTP_206": "Partial Content",
# Redirection 3xx
"HTTP_300": "Multiple Choices",
"HTTP_301": "Moved Permanently",
"HTTP_302": "Found",
"HTTP_303": "See Other",
"HTTP_304": "Not Modified",
"HTTP_305": "Use Proxy",
"HTTP_306": "(Unused)",
"HTTP_307": "Temporary Redirect",
"HTTP_308": "Permanent Redirect",
# Client Error 4xx
"HTTP_400": "Bad Request",
"HTTP_401": "Unauthorized",
"HTTP_402": "Payment Required",
"HTTP_403": "Forbidden",
"HTTP_404": "Not Found",
"HTTP_405": "Method Not Allowed",
"HTTP_406": "Not Acceptable",
"HTTP_407": "Proxy Authentication Required",
"HTTP_408": "Request Timeout",
"HTTP_409": "Conflict",
"HTTP_410": "Gone",
"HTTP_411": "Length Required",
"HTTP_412": "Precondition Failed",
"HTTP_413": "Request Entity Too Large",
"HTTP_414": "Request-URI Too Long",
"HTTP_415": "Unsupported Media Type",
"HTTP_416": "Requested Range Not Satisfiable",
"HTTP_417": "Expectation Failed",
"HTTP_418": "I'm a teapot",
"HTTP_421": "Misdirected Request",
"HTTP_422": "Unprocessable Entity",
"HTTP_423": "Locked",
"HTTP_424": "Failed Dependency",
"HTTP_425": "Too Early",
"HTTP_426": "Upgrade Required",
"HTTP_428": "Precondition Required",
"HTTP_429": "Too Many Requests",
"HTTP_431": "Request Header Fields Too Large",
"HTTP_451": "Unavailable For Legal Reasons",
# Server Error 5xx
"HTTP_500": "Internal Server Error",
"HTTP_501": "Not Implemented",
"HTTP_502": "Bad Gateway",
"HTTP_503": "Service Unavailable",
"HTTP_504": "Gateway Timeout",
"HTTP_505": "HTTP Version Not Supported",
"HTTP_506": "Variant Also negotiates",
"HTTP_507": "Insufficient Sotrage",
"HTTP_508": "Loop Detected",
"HTTP_511": "Network Authentication Required",
}
| 31.77707
| 83
| 0.596913
|
4a1007661a992d42cd07c8063889e126e56d54b3
| 2,036
|
py
|
Python
|
molecule/java/tests/test_default.py
|
mvdheenux/OpenConext-deploy
|
9c75866cba7675cafa8946e591ffac0fe528f7b3
|
[
"Apache-2.0"
] | 11
|
2015-07-05T10:38:10.000Z
|
2019-06-27T07:49:32.000Z
|
molecule/java/tests/test_default.py
|
mvdheenux/OpenConext-deploy
|
9c75866cba7675cafa8946e591ffac0fe528f7b3
|
[
"Apache-2.0"
] | 201
|
2015-02-03T14:52:30.000Z
|
2022-03-09T08:45:00.000Z
|
molecule/java/tests/test_default.py
|
domgon/OpenConext-deploy
|
80b28f59bdef2ac683744c07bb938c889cb43681
|
[
"Apache-2.0"
] | 48
|
2015-03-10T13:28:23.000Z
|
2021-11-28T23:15:32.000Z
|
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_java_binary(host):
java_binary = host.file("/usr/bin/java")
command = host.run('/usr/bin/java -version 2>&1 | grep openjdk')
assert java_binary.exists
assert java_binary.is_file
assert command.rc == 0
assert 'version "1.8.' in command.stdout
@pytest.mark.parametrize("components, dir_owner, file_owner, group, httpd_listen, spring_listen", [
("manage", "root", "manage", "root", "617", "9393"),
("mujina-idp", "mujina-idp", "mujina-idp", "mujina-idp", "608", "9390"),
("mujina-sp", "mujina-sp", "mujina-sp", "mujina-sp", "607", "9391"),
])
def test_components(host, components, dir_owner, file_owner, group, httpd_listen, spring_listen):
user = host.user(components)
service = host.service(components)
socket_httpd = host.socket("tcp://127.0.0.1:" + httpd_listen)
socket_springboot = host.socket("tcp://127.0.0.1:" + spring_listen)
opt_dir = host.file("/opt/" + components)
logback = host.file("/opt/" + components + "/logback.xml")
application = host.file("/opt/" + components + "/application.yml")
http_file = host.file("/etc/httpd/conf.d/" + components.replace("-", "_") + '.conf')
# manage contains a version in symlink, so lets skip that for now.
if components != "manage":
jar_file = host.file("/opt/" + components + "/" + components + '.jar')
assert jar_file.is_symlink
assert user.exists
assert service.is_enabled
assert service.is_running
assert opt_dir.is_directory
assert opt_dir.user == dir_owner
assert opt_dir.group == group
assert logback.exists
assert logback.user == file_owner
assert application.exists
assert application.user == file_owner
assert http_file.exists
assert http_file.is_file
assert socket_httpd.is_listening
assert socket_springboot.is_listening
| 34.508475
| 99
| 0.68664
|
4a10079d8a94179d7242bf6f672f63ffad30a4bf
| 92,189
|
py
|
Python
|
ortools/sat/python/cp_model.py
|
reachatul/or_tools
|
50f90b0dc8a331092ad68b0d3919a503fb7cdd06
|
[
"Apache-2.0"
] | null | null | null |
ortools/sat/python/cp_model.py
|
reachatul/or_tools
|
50f90b0dc8a331092ad68b0d3919a503fb7cdd06
|
[
"Apache-2.0"
] | null | null | null |
ortools/sat/python/cp_model.py
|
reachatul/or_tools
|
50f90b0dc8a331092ad68b0d3919a503fb7cdd06
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for building and solving CP-SAT models.
The following two sections describe the main
methods for building and solving CP-SAT models.
* [`CpModel`](#cp_model.CpModel): Methods for creating
models, including variables and constraints.
* [`CPSolver`](#cp_model.CpSolver): Methods for solving
a model and evaluating solutions.
The following methods implement callbacks that the
solver calls each time it finds a new solution.
* [`CpSolverSolutionCallback`](#cp_model.CpSolverSolutionCallback):
A general method for implementing callbacks.
* [`ObjectiveSolutionPrinter`](#cp_model.ObjectiveSolutionPrinter):
Print objective values and elapsed time for intermediate solutions.
* [`VarArraySolutionPrinter`](#cp_model.VarArraySolutionPrinter):
Print intermediate solutions (variable values, time).
* [`VarArrayAndObjectiveSolutionPrinter`]
(#cp_model.VarArrayAndObjectiveSolutionPrinter):
Print both intermediate solutions and objective values.
Additional methods for solving CP-SAT models:
* [`Constraint`](#cp_model.Constraint): A few utility methods for modifying
constraints created by `CpModel`.
* [`LinearExpr`](#lineacp_model.LinearExpr): Methods for creating constraints
and the objective from large arrays of coefficients.
Other methods and functions listed are primarily used for developing OR-Tools,
rather than for solving specific optimization problems.
"""
import collections
import threading
import time
import warnings
from ortools.sat import cp_model_pb2
from ortools.sat import sat_parameters_pb2
from ortools.sat.python import cp_model_helper as cmh
from ortools.sat import pywrapsat
from ortools.util import sorted_interval_list
Domain = sorted_interval_list.Domain
# The classes below allow linear expressions to be expressed naturally with the
# usual arithmetic operators + - * / and with constant numbers, which makes the
# python API very intuitive. See../ samples/*.py for examples.
INT_MIN = -9223372036854775808 # hardcoded to be platform independent.
INT_MAX = 9223372036854775807
INT32_MAX = 2147483647
INT32_MIN = -2147483648
# CpSolver status (exported to avoid importing cp_model_cp2).
UNKNOWN = cp_model_pb2.UNKNOWN
MODEL_INVALID = cp_model_pb2.MODEL_INVALID
FEASIBLE = cp_model_pb2.FEASIBLE
INFEASIBLE = cp_model_pb2.INFEASIBLE
OPTIMAL = cp_model_pb2.OPTIMAL
# Variable selection strategy
CHOOSE_FIRST = cp_model_pb2.DecisionStrategyProto.CHOOSE_FIRST
CHOOSE_LOWEST_MIN = cp_model_pb2.DecisionStrategyProto.CHOOSE_LOWEST_MIN
CHOOSE_HIGHEST_MAX = cp_model_pb2.DecisionStrategyProto.CHOOSE_HIGHEST_MAX
CHOOSE_MIN_DOMAIN_SIZE = (
cp_model_pb2.DecisionStrategyProto.CHOOSE_MIN_DOMAIN_SIZE)
CHOOSE_MAX_DOMAIN_SIZE = (
cp_model_pb2.DecisionStrategyProto.CHOOSE_MAX_DOMAIN_SIZE)
# Domain reduction strategy
SELECT_MIN_VALUE = cp_model_pb2.DecisionStrategyProto.SELECT_MIN_VALUE
SELECT_MAX_VALUE = cp_model_pb2.DecisionStrategyProto.SELECT_MAX_VALUE
SELECT_LOWER_HALF = cp_model_pb2.DecisionStrategyProto.SELECT_LOWER_HALF
SELECT_UPPER_HALF = cp_model_pb2.DecisionStrategyProto.SELECT_UPPER_HALF
# Search branching
AUTOMATIC_SEARCH = sat_parameters_pb2.SatParameters.AUTOMATIC_SEARCH
FIXED_SEARCH = sat_parameters_pb2.SatParameters.FIXED_SEARCH
PORTFOLIO_SEARCH = sat_parameters_pb2.SatParameters.PORTFOLIO_SEARCH
LP_SEARCH = sat_parameters_pb2.SatParameters.LP_SEARCH
def DisplayBounds(bounds):
"""Displays a flattened list of intervals."""
out = ''
for i in range(0, len(bounds), 2):
if i != 0:
out += ', '
if bounds[i] == bounds[i + 1]:
out += str(bounds[i])
else:
out += str(bounds[i]) + '..' + str(bounds[i + 1])
return out
def ShortName(model, i):
"""Returns a short name of an integer variable, or its negation."""
if i < 0:
return 'Not(%s)' % ShortName(model, -i - 1)
v = model.variables[i]
if v.name:
return v.name
elif len(v.domain) == 2 and v.domain[0] == v.domain[1]:
return str(v.domain[0])
else:
return '[%s]' % DisplayBounds(v.domain)
def ShortExprName(model, e):
"""Pretty-print LinearExpressionProto instances."""
if not e.vars:
return str(e.offset)
if len(e.vars) == 1:
var_name = ShortName(model, e.vars[0])
coeff = e.coeffs[0]
result = ''
if coeff == 1:
result = var_name
elif coeff == -1:
result = f'-{var_name}'
elif coeff != 0:
result = f'{coeff} * {var_name}'
if e.offset > 0:
result = f'{result} + {e.offset}'
elif e.offset < 0:
result = f'{result} - {-e.offset}'
return result
# TODO(user): Support more than affine expressions.
return str(e)
class LinearExpr(object):
"""Holds an integer linear expression.
A linear expression is built from integer constants and variables.
For example, `x + 2 * (y - z + 1)`.
Linear expressions are used in CP-SAT models in constraints and in the
objective:
* You can define linear constraints as in:
```
model.Add(x + 2 * y <= 5)
model.Add(sum(array_of_vars) == 5)
```
* In CP-SAT, the objective is a linear expression:
```
model.Minimize(x + 2 * y + z)
```
* For large arrays, using the LinearExpr class is faster that using the python
`sum()` function. You can create constraints and the objective from lists of
linear expressions or coefficients as follows:
```
model.Minimize(cp_model.LinearExpr.Sum(expressions))
model.Add(cp_model.LinearExpr.WeightedSum(expressions, coefficients) >= 0)
```
"""
@classmethod
def Sum(cls, expressions):
"""Creates the expression sum(expressions)."""
if len(expressions) == 1:
return expressions[0]
return _SumArray(expressions)
@classmethod
def WeightedSum(cls, expressions, coefficients):
"""Creates the expression sum(expressions[i] * coefficients[i])."""
if LinearExpr.IsEmptyOrAllNull(coefficients):
return 0
elif len(expressions) == 1:
return expressions[0] * coefficients[0]
else:
return _WeightedSum(expressions, coefficients)
@classmethod
def Term(cls, expression, coefficient):
"""Creates `expression * coefficient`."""
if cmh.is_zero(coefficient):
return 0
else:
return expression * coefficient
@classmethod
def IsEmptyOrAllNull(cls, coefficients):
for c in coefficients:
if not cmh.is_zero(c):
return False
return True
@classmethod
def RebuildFromLinearExpressionProto(cls, model, proto):
"""Recreate a LinearExpr from a LinearExpressionProto."""
offset = proto.offset
num_elements = len(proto.vars)
if num_elements == 0:
return offset
elif num_elements == 1:
return IntVar(model, proto.vars[0], None) * proto.coeffs[0] + offset
else:
variables = []
coeffs = []
all_ones = True
for index, coeff in zip(proto.vars(), proto.coeffs()):
variables.append(IntVar(model, index, None))
coeffs.append(coeff)
if not cmh.is_one(coeff):
all_ones = False
if all_ones:
return _SumArray(variables, offset)
else:
return _WeightedSum(variables, coeffs, offset)
def GetIntegerVarValueMap(self):
"""Scans the expression, and returns (var_coef_map, constant)."""
coeffs = collections.defaultdict(int)
constant = 0
to_process = [(self, 1)]
while to_process: # Flatten to avoid recursion.
expr, coeff = to_process.pop()
if cmh.is_integral(expr):
constant += coeff * int(expr)
elif isinstance(expr, _ProductCst):
to_process.append(
(expr.Expression(), coeff * expr.Coefficient()))
elif isinstance(expr, _Sum):
to_process.append((expr.Left(), coeff))
to_process.append((expr.Right(), coeff))
elif isinstance(expr, _SumArray):
for e in expr.Expressions():
to_process.append((e, coeff))
constant += expr.Constant() * coeff
elif isinstance(expr, _WeightedSum):
for e, c in zip(expr.Expressions(), expr.Coefficients()):
to_process.append((e, coeff * c))
constant += expr.Constant() * coeff
elif isinstance(expr, IntVar):
coeffs[expr] += coeff
elif isinstance(expr, _NotBooleanVariable):
constant += coeff
coeffs[expr.Not()] -= coeff
else:
raise TypeError('Unrecognized linear expression: ' + str(expr))
return coeffs, constant
def GetFloatVarValueMap(self):
"""Scans the expression. Returns (var_coef_map, constant, is_integer)."""
coeffs = {}
constant = 0
to_process = [(self, 1)]
while to_process: # Flatten to avoid recursion.
expr, coeff = to_process.pop()
if cmh.is_integral(expr): # Keep integrality.
constant += coeff * int(expr)
elif cmh.is_a_number(expr):
constant += coeff * float(expr)
elif isinstance(expr, _ProductCst):
to_process.append(
(expr.Expression(), coeff * expr.Coefficient()))
elif isinstance(expr, _Sum):
to_process.append((expr.Left(), coeff))
to_process.append((expr.Right(), coeff))
elif isinstance(expr, _SumArray):
for e in expr.Expressions():
to_process.append((e, coeff))
constant += expr.Constant() * coeff
elif isinstance(expr, _WeightedSum):
for e, c in zip(expr.Expressions(), expr.Coefficients()):
to_process.append((e, coeff * c))
constant += expr.Constant() * coeff
elif isinstance(expr, IntVar):
if expr in coeffs:
coeffs[expr] += coeff
else:
coeffs[expr] = coeff
elif isinstance(expr, _NotBooleanVariable):
constant += coeff
if expr.Not() in coeffs:
coeffs[expr.Not()] -= coeff
else:
coeffs[expr.Not()] = -coeff
else:
raise TypeError('Unrecognized linear expression: ' + str(expr))
is_integer = cmh.is_integral(constant)
if is_integer:
for coeff in coeffs.values():
if not cmh.is_integral(coeff):
is_integer = False
break
return coeffs, constant, is_integer
def __hash__(self):
return object.__hash__(self)
def __abs__(self):
raise NotImplementedError(
'calling abs() on a linear expression is not supported, '
'please use CpModel.AddAbsEquality')
def __add__(self, arg):
if cmh.is_zero(arg):
return self
return _Sum(self, arg)
def __radd__(self, arg):
if cmh.is_zero(arg):
return self
return _Sum(self, arg)
def __sub__(self, arg):
if cmh.is_zero(arg):
return self
return _Sum(self, -arg)
def __rsub__(self, arg):
return _Sum(-self, arg)
def __mul__(self, arg):
arg = cmh.assert_is_a_number(arg)
if cmh.is_one(arg):
return self
elif cmh.is_zero(arg):
return 0
return _ProductCst(self, arg)
def __rmul__(self, arg):
arg = cmh.assert_is_a_number(arg)
if cmh.is_one(arg):
return self
elif cmh.is_zero(arg):
return 0
return _ProductCst(self, arg)
def __div__(self, _):
raise NotImplementedError(
'calling / on a linear expression is not supported, '
'please use CpModel.AddDivisionEquality')
def __truediv__(self, _):
raise NotImplementedError(
'calling // on a linear expression is not supported, '
'please use CpModel.AddDivisionEquality')
def __mod__(self, _):
raise NotImplementedError(
'calling %% on a linear expression is not supported, '
'please use CpModel.AddModuloEquality')
def __pow__(self, _):
raise NotImplementedError(
'calling ** on a linear expression is not supported, '
'please use CpModel.AddMultiplicationEquality')
def __lshift__(self, _):
raise NotImplementedError(
'calling left shift on a linear expression is not supported')
def __rshift__(self, _):
raise NotImplementedError(
'calling right shift on a linear expression is not supported')
def __and__(self, _):
raise NotImplementedError(
'calling and on a linear expression is not supported, '
'please use CpModel.AddBoolAnd')
def __or__(self, _):
raise NotImplementedError(
'calling or on a linear expression is not supported, '
'please use CpModel.AddBoolOr')
def __xor__(self, _):
raise NotImplementedError(
'calling xor on a linear expression is not supported, '
'please use CpModel.AddBoolXor')
def __neg__(self):
return _ProductCst(self, -1)
def __bool__(self):
raise NotImplementedError(
'Evaluating a LinearExpr instance as a Boolean is not implemented.')
def __eq__(self, arg):
if arg is None:
return False
if cmh.is_integral(arg):
arg = cmh.assert_is_int64(arg)
return BoundedLinearExpression(self, [arg, arg])
else:
return BoundedLinearExpression(self - arg, [0, 0])
def __ge__(self, arg):
if cmh.is_integral(arg):
arg = cmh.assert_is_int64(arg)
return BoundedLinearExpression(self, [arg, INT_MAX])
else:
return BoundedLinearExpression(self - arg, [0, INT_MAX])
def __le__(self, arg):
if cmh.is_integral(arg):
arg = cmh.assert_is_int64(arg)
return BoundedLinearExpression(self, [INT_MIN, arg])
else:
return BoundedLinearExpression(self - arg, [INT_MIN, 0])
def __lt__(self, arg):
if cmh.is_integral(arg):
arg = cmh.assert_is_int64(arg)
if arg == INT_MIN:
raise ArithmeticError('< INT_MIN is not supported')
return BoundedLinearExpression(self, [INT_MIN, arg - 1])
else:
return BoundedLinearExpression(self - arg, [INT_MIN, -1])
def __gt__(self, arg):
if cmh.is_integral(arg):
arg = cmh.assert_is_int64(arg)
if arg == INT_MAX:
raise ArithmeticError('> INT_MAX is not supported')
return BoundedLinearExpression(self, [arg + 1, INT_MAX])
else:
return BoundedLinearExpression(self - arg, [1, INT_MAX])
def __ne__(self, arg):
if arg is None:
return True
if cmh.is_integral(arg):
arg = cmh.assert_is_int64(arg)
if arg == INT_MAX:
return BoundedLinearExpression(self, [INT_MIN, INT_MAX - 1])
elif arg == INT_MIN:
return BoundedLinearExpression(self, [INT_MIN + 1, INT_MAX])
else:
return BoundedLinearExpression(
self, [INT_MIN, arg - 1, arg + 1, INT_MAX])
else:
return BoundedLinearExpression(self - arg,
[INT_MIN, -1, 1, INT_MAX])
class _Sum(LinearExpr):
"""Represents the sum of two LinearExprs."""
def __init__(self, left, right):
for x in [left, right]:
if not cmh.is_a_number(x) and not isinstance(x, LinearExpr):
raise TypeError('Not an linear expression: ' + str(x))
self.__left = left
self.__right = right
def Left(self):
return self.__left
def Right(self):
return self.__right
def __str__(self):
return f'({self.__left} + {self.__right})'
def __repr__(self):
return f'Sum({repr(self.__left)}, {repr(self.__right)})'
class _ProductCst(LinearExpr):
"""Represents the product of a LinearExpr by a constant."""
def __init__(self, expr, coeff):
coeff = cmh.assert_is_a_number(coeff)
if isinstance(expr, _ProductCst):
self.__expr = expr.Expression()
self.__coef = expr.Coefficient() * coeff
else:
self.__expr = expr
self.__coef = coeff
def __str__(self):
if self.__coef == -1:
return '-' + str(self.__expr)
else:
return '(' + str(self.__coef) + ' * ' + str(self.__expr) + ')'
def __repr__(self):
return 'ProductCst(' + repr(self.__expr) + ', ' + repr(
self.__coef) + ')'
def Coefficient(self):
return self.__coef
def Expression(self):
return self.__expr
class _SumArray(LinearExpr):
"""Represents the sum of a list of LinearExpr and a constant."""
def __init__(self, expressions, constant=0):
self.__expressions = []
self.__constant = constant
for x in expressions:
if cmh.is_a_number(x):
if cmh.is_zero(x):
continue
x = cmh.assert_is_a_number(x)
self.__constant += x
elif isinstance(x, LinearExpr):
self.__expressions.append(x)
else:
raise TypeError('Not an linear expression: ' + str(x))
def __str__(self):
if self.__constant == 0:
return '({})'.format(' + '.join(map(str, self.__expressions)))
else:
return '({} + {})'.format(' + '.join(map(str, self.__expressions)),
self.__constant)
def __repr__(self):
return 'SumArray({}, {})'.format(
', '.join(map(repr, self.__expressions)), self.__constant)
def Expressions(self):
return self.__expressions
def Constant(self):
return self.__constant
class _WeightedSum(LinearExpr):
"""Represents sum(ai * xi) + b."""
def __init__(self, expressions, coefficients, constant=0):
self.__expressions = []
self.__coefficients = []
self.__constant = constant
if len(expressions) != len(coefficients):
raise TypeError(
'In the LinearExpr.WeightedSum method, the expression array and the '
' coefficient array must have the same length.')
for e, c in zip(expressions, coefficients):
c = cmh.assert_is_a_number(c)
if cmh.is_zero(c):
continue
if cmh.is_a_number(e):
e = cmh.assert_is_a_number(e)
self.__constant += e * c
elif isinstance(e, LinearExpr):
self.__expressions.append(e)
self.__coefficients.append(c)
else:
raise TypeError('Not an linear expression: ' + str(e))
def __str__(self):
output = None
for expr, coeff in zip(self.__expressions, self.__coefficients):
if not output and cmh.is_one(coeff):
output = str(expr)
elif not output and cmh.is_minus_one(coeff):
output = '-' + str(expr)
elif not output:
output = '{} * {}'.format(coeff, str(expr))
elif cmh.is_one(coeff):
output += ' + {}'.format(str(expr))
elif cmh.is_minus_one(coeff):
output += ' - {}'.format(str(expr))
elif coeff > 1:
output += ' + {} * {}'.format(coeff, str(expr))
elif coeff < -1:
output += ' - {} * {}'.format(-coeff, str(expr))
if self.__constant > 0:
output += ' + {}'.format(self.__constant)
elif self.__constant < 0:
output += ' - {}'.format(-self.__constant)
if output is None:
output = '0'
return output
def __repr__(self):
return 'WeightedSum([{}], [{}], {})'.format(
', '.join(map(repr, self.__expressions)),
', '.join(map(repr, self.__coefficients)), self.__constant)
def Expressions(self):
return self.__expressions
def Coefficients(self):
return self.__coefficients
def Constant(self):
return self.__constant
class IntVar(LinearExpr):
"""An integer variable.
An IntVar is an object that can take on any integer value within defined
ranges. Variables appear in constraint like:
x + y >= 5
AllDifferent([x, y, z])
Solving a model is equivalent to finding, for each variable, a single value
from the set of initial values (called the initial domain), such that the
model is feasible, or optimal if you provided an objective function.
"""
def __init__(self, model, domain, name):
"""See CpModel.NewIntVar below."""
self.__model = model
self.__negation = None
# Python do not support multiple __init__ methods.
# This method is only called from the CpModel class.
# We hack the parameter to support the two cases:
# case 1:
# model is a CpModelProto, domain is a Domain, and name is a string.
# case 2:
# model is a CpModelProto, domain is an index (int), and name is None.
if cmh.is_integral(domain) and name is None:
self.__index = int(domain)
self.__var = model.variables[domain]
else:
self.__index = len(model.variables)
self.__var = model.variables.add()
self.__var.domain.extend(domain.FlattenedIntervals())
self.__var.name = name
def Index(self):
"""Returns the index of the variable in the model."""
return self.__index
def Proto(self):
"""Returns the variable protobuf."""
return self.__var
def IsEqualTo(self, other):
"""Returns true if self == other in the python sense."""
if not isinstance(other, IntVar):
return False
return self.Index() == other.Index()
def __str__(self):
if not self.__var.name:
if len(self.__var.domain
) == 2 and self.__var.domain[0] == self.__var.domain[1]:
# Special case for constants.
return str(self.__var.domain[0])
else:
return 'unnamed_var_%i' % self.__index
return self.__var.name
def __repr__(self):
return '%s(%s)' % (self.__var.name, DisplayBounds(self.__var.domain))
def Name(self):
return self.__var.name
def Not(self):
"""Returns the negation of a Boolean variable.
This method implements the logical negation of a Boolean variable.
It is only valid if the variable has a Boolean domain (0 or 1).
Note that this method is nilpotent: `x.Not().Not() == x`.
"""
for bound in self.__var.domain:
if bound < 0 or bound > 1:
raise TypeError(
'Cannot call Not on a non boolean variable: %s' % self)
if self.__negation is None:
self.__negation = _NotBooleanVariable(self)
return self.__negation
class _NotBooleanVariable(LinearExpr):
"""Negation of a boolean variable."""
def __init__(self, boolvar):
self.__boolvar = boolvar
def Index(self):
return -self.__boolvar.Index() - 1
def Not(self):
return self.__boolvar
def __str__(self):
return 'not(%s)' % str(self.__boolvar)
def __bool__(self):
raise NotImplementedError(
'Evaluating a literal as a Boolean value is not implemented.')
class BoundedLinearExpression(object):
"""Represents a linear constraint: `lb <= linear expression <= ub`.
The only use of this class is to be added to the CpModel through
`CpModel.Add(expression)`, as in:
model.Add(x + 2 * y -1 >= z)
"""
def __init__(self, expr, bounds):
self.__expr = expr
self.__bounds = bounds
def __str__(self):
if len(self.__bounds) == 2:
lb = self.__bounds[0]
ub = self.__bounds[1]
if lb > INT_MIN and ub < INT_MAX:
if lb == ub:
return str(self.__expr) + ' == ' + str(lb)
else:
return str(lb) + ' <= ' + str(
self.__expr) + ' <= ' + str(ub)
elif lb > INT_MIN:
return str(self.__expr) + ' >= ' + str(lb)
elif ub < INT_MAX:
return str(self.__expr) + ' <= ' + str(ub)
else:
return 'True (unbounded expr ' + str(self.__expr) + ')'
elif (len(self.__bounds) == 4 and self.__bounds[0] == INT_MIN and
self.__bounds[1] + 2 == self.__bounds[2] and
self.__bounds[3] == INT_MAX):
return str(self.__expr) + ' != ' + str(self.__bounds[1] + 1)
else:
return str(self.__expr) + ' in [' + DisplayBounds(
self.__bounds) + ']'
def Expression(self):
return self.__expr
def Bounds(self):
return self.__bounds
def __bool__(self):
coeffs_map, constant = self.__expr.GetIntegerVarValueMap()
all_coeffs = set(coeffs_map.values())
same_var = set([0])
eq_bounds = [0, 0]
different_vars = set([-1, 1])
ne_bounds = [INT_MIN, -1, 1, INT_MAX]
if (len(coeffs_map) == 1 and all_coeffs == same_var and
constant == 0 and
(self.__bounds == eq_bounds or self.__bounds == ne_bounds)):
return self.__bounds == eq_bounds
if (len(coeffs_map) == 2 and all_coeffs == different_vars and
constant == 0 and
(self.__bounds == eq_bounds or self.__bounds == ne_bounds)):
return self.__bounds == ne_bounds
raise NotImplementedError(
f'Evaluating a BoundedLinearExpression \'{self}\' as a Boolean value'
+ ' is not supported.')
class Constraint(object):
"""Base class for constraints.
Constraints are built by the CpModel through the Add<XXX> methods.
Once created by the CpModel class, they are automatically added to the model.
The purpose of this class is to allow specification of enforcement literals
for this constraint.
b = model.NewBoolVar('b')
x = model.NewIntVar(0, 10, 'x')
y = model.NewIntVar(0, 10, 'y')
model.Add(x + 2 * y == 5).OnlyEnforceIf(b.Not())
"""
def __init__(self, constraints):
self.__index = len(constraints)
self.__constraint = constraints.add()
def OnlyEnforceIf(self, *boolvar):
"""Adds an enforcement literal to the constraint.
This method adds one or more literals (that is, a boolean variable or its
negation) as enforcement literals. The conjunction of all these literals
determines whether the constraint is active or not. It acts as an
implication, so if the conjunction is true, it implies that the constraint
must be enforced. If it is false, then the constraint is ignored.
BoolOr, BoolAnd, and linear constraints all support enforcement literals.
Args:
*boolvar: One or more Boolean literals.
Returns:
self.
"""
for lit in ExpandGeneratorOrTuple(boolvar):
if (isinstance(lit, bool) and
bool(lit)) or (cmh.is_integral(lit) and int(lit) == 1):
# Always true. Do nothing.
pass
else:
self.__constraint.enforcement_literal.append(lit.Index())
return self
def Index(self):
"""Returns the index of the constraint in the model."""
return self.__index
def Proto(self):
"""Returns the constraint protobuf."""
return self.__constraint
class IntervalVar(object):
"""Represents an Interval variable.
An interval variable is both a constraint and a variable. It is defined by
three integer variables: start, size, and end.
It is a constraint because, internally, it enforces that start + size == end.
It is also a variable as it can appear in specific scheduling constraints:
NoOverlap, NoOverlap2D, Cumulative.
Optionally, an enforcement literal can be added to this constraint, in which
case these scheduling constraints will ignore interval variables with
enforcement literals assigned to false. Conversely, these constraints will
also set these enforcement literals to false if they cannot fit these
intervals into the schedule.
"""
def __init__(self, model, start, size, end, is_present_index, name):
self.__model = model
# As with the IntVar::__init__ method, we hack the __init__ method to
# support two use cases:
# case 1: called when creating a new interval variable.
# {start|size|end} are linear expressions, is_present_index is either
# None or the index of a Boolean literal. name is a string
# case 2: called when querying an existing interval variable.
# start_index is an int, all parameters after are None.
if (size is None and end is None and is_present_index is None and
name is None):
self.__index = start
self.__ct = model.constraints[start]
else:
self.__index = len(model.constraints)
self.__ct = self.__model.constraints.add()
self.__ct.interval.start.CopyFrom(start)
self.__ct.interval.size.CopyFrom(size)
self.__ct.interval.end.CopyFrom(end)
if is_present_index is not None:
self.__ct.enforcement_literal.append(is_present_index)
if name:
self.__ct.name = name
def Index(self):
"""Returns the index of the interval constraint in the model."""
return self.__index
def Proto(self):
"""Returns the interval protobuf."""
return self.__ct.interval
def __str__(self):
return self.__ct.name
def __repr__(self):
interval = self.__ct.interval
if self.__ct.enforcement_literal:
return '%s(start = %s, size = %s, end = %s, is_present = %s)' % (
self.__ct.name, ShortExprName(self.__model, interval.start),
ShortExprName(self.__model, interval.size),
ShortExprName(self.__model, interval.end),
ShortName(self.__model, self.__ct.enforcement_literal[0]))
else:
return '%s(start = %s, size = %s, end = %s)' % (
self.__ct.name, ShortExprName(self.__model, interval.start),
ShortExprName(self.__model, interval.size),
ShortExprName(self.__model, interval.end))
def Name(self):
return self.__ct.name
def StartExpr(self):
return LinearExpr.RebuildFromLinearExpressionProto(
self.__model, self.__ct.interval.start)
def SizeExpr(self):
return LinearExpr.RebuildFromLinearExpressionProto(
self.__model, self.__ct.interval.size)
def EndExpr(self):
return LinearExpr.RebuildFromLinearExpressionProto(
self.__model, self.__ct.interval.end)
def ObjectIsATrueLiteral(literal):
"""Checks if literal is either True, or a Boolean literals fixed to True."""
if isinstance(literal, IntVar):
proto = literal.Proto()
return (len(proto.domain) == 2 and proto.domain[0] == 1 and
proto.domain[1] == 1)
if isinstance(literal, _NotBooleanVariable):
proto = literal.Not().Proto()
return (len(proto.domain) == 2 and proto.domain[0] == 0 and
proto.domain[1] == 0)
if cmh.is_integral(literal):
return int(literal) == 1
return False
def ObjectIsAFalseLiteral(literal):
"""Checks if literal is either False, or a Boolean literals fixed to False."""
if isinstance(literal, IntVar):
proto = literal.Proto()
return (len(proto.domain) == 2 and proto.domain[0] == 0 and
proto.domain[1] == 0)
if isinstance(literal, _NotBooleanVariable):
proto = literal.Not().Proto()
return (len(proto.domain) == 2 and proto.domain[0] == 1 and
proto.domain[1] == 1)
if cmh.is_integral(literal):
return int(literal) == 0
return False
class CpModel(object):
"""Methods for building a CP model.
Methods beginning with:
* ```New``` create integer, boolean, or interval variables.
* ```Add``` create new constraints and add them to the model.
"""
def __init__(self):
self.__model = cp_model_pb2.CpModelProto()
self.__constant_map = {}
# Integer variable.
def NewIntVar(self, lb, ub, name):
"""Create an integer variable with domain [lb, ub].
The CP-SAT solver is limited to integer variables. If you have fractional
values, scale them up so that they become integers; if you have strings,
encode them as integers.
Args:
lb: Lower bound for the variable.
ub: Upper bound for the variable.
name: The name of the variable.
Returns:
a variable whose domain is [lb, ub].
"""
return IntVar(self.__model, Domain(lb, ub), name)
def NewIntVarFromDomain(self, domain, name):
"""Create an integer variable from a domain.
A domain is a set of integers specified by a collection of intervals.
For example, `model.NewIntVarFromDomain(cp_model.
Domain.FromIntervals([[1, 2], [4, 6]]), 'x')`
Args:
domain: An instance of the Domain class.
name: The name of the variable.
Returns:
a variable whose domain is the given domain.
"""
return IntVar(self.__model, domain, name)
def NewBoolVar(self, name):
"""Creates a 0-1 variable with the given name."""
return IntVar(self.__model, Domain(0, 1), name)
def NewConstant(self, value):
"""Declares a constant integer."""
return IntVar(self.__model, self.GetOrMakeIndexFromConstant(value),
None)
# Linear constraints.
def AddLinearConstraint(self, linear_expr, lb, ub):
"""Adds the constraint: `lb <= linear_expr <= ub`."""
return self.AddLinearExpressionInDomain(linear_expr, Domain(lb, ub))
def AddLinearExpressionInDomain(self, linear_expr, domain):
"""Adds the constraint: `linear_expr` in `domain`."""
if isinstance(linear_expr, LinearExpr):
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
coeffs_map, constant = linear_expr.GetIntegerVarValueMap()
for t in coeffs_map.items():
if not isinstance(t[0], IntVar):
raise TypeError('Wrong argument' + str(t))
c = cmh.assert_is_int64(t[1])
model_ct.linear.vars.append(t[0].Index())
model_ct.linear.coeffs.append(c)
model_ct.linear.domain.extend([
cmh.capped_subtraction(x, constant)
for x in domain.FlattenedIntervals()
])
return ct
elif cmh.is_integral(linear_expr):
if not domain.Contains(int(linear_expr)):
return self.AddBoolOr([]) # Evaluate to false.
# Nothing to do otherwise.
else:
raise TypeError(
'Not supported: CpModel.AddLinearExpressionInDomain(' +
str(linear_expr) + ' ' + str(domain) + ')')
def Add(self, ct):
"""Adds a `BoundedLinearExpression` to the model.
Args:
ct: A [`BoundedLinearExpression`](#boundedlinearexpression).
Returns:
An instance of the `Constraint` class.
"""
if isinstance(ct, BoundedLinearExpression):
return self.AddLinearExpressionInDomain(
ct.Expression(), Domain.FromFlatIntervals(ct.Bounds()))
elif ct and isinstance(ct, bool):
return self.AddBoolOr([True])
elif not ct and isinstance(ct, bool):
return self.AddBoolOr([]) # Evaluate to false.
else:
raise TypeError('Not supported: CpModel.Add(' + str(ct) + ')')
# General Integer Constraints.
def AddAllDifferent(self, *expressions):
"""Adds AllDifferent(expressions).
This constraint forces all expressions to have different values.
Args:
*expressions: simple expressions of the form a * var + constant.
Returns:
An instance of the `Constraint` class.
"""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
expanded = ExpandGeneratorOrTuple(expressions)
model_ct.all_diff.exprs.extend(
[self.ParseLinearExpression(x) for x in expanded])
return ct
def AddElement(self, index, variables, target):
"""Adds the element constraint: `variables[index] == target`."""
if not variables:
raise ValueError('AddElement expects a non-empty variables array')
if cmh.is_integral(index):
return self.Add(list(variables)[int(index)] == target)
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.element.index = self.GetOrMakeIndex(index)
model_ct.element.vars.extend(
[self.GetOrMakeIndex(x) for x in variables])
model_ct.element.target = self.GetOrMakeIndex(target)
return ct
def AddCircuit(self, arcs):
"""Adds Circuit(arcs).
Adds a circuit constraint from a sparse list of arcs that encode the graph.
A circuit is a unique Hamiltonian path in a subgraph of the total
graph. In case a node 'i' is not in the path, then there must be a
loop arc 'i -> i' associated with a true literal. Otherwise
this constraint will fail.
Args:
arcs: a list of arcs. An arc is a tuple (source_node, destination_node,
literal). The arc is selected in the circuit if the literal is true.
Both source_node and destination_node must be integers between 0 and the
number of nodes - 1.
Returns:
An instance of the `Constraint` class.
Raises:
ValueError: If the list of arcs is empty.
"""
if not arcs:
raise ValueError('AddCircuit expects a non-empty array of arcs')
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
for arc in arcs:
tail = cmh.assert_is_int32(arc[0])
head = cmh.assert_is_int32(arc[1])
lit = self.GetOrMakeBooleanIndex(arc[2])
model_ct.circuit.tails.append(tail)
model_ct.circuit.heads.append(head)
model_ct.circuit.literals.append(lit)
return ct
def AddMultipleCircuit(self, arcs):
"""Adds a multiple circuit constraint, aka the "VRP" constraint.
The direct graph where arc #i (from tails[i] to head[i]) is present iff
literals[i] is true must satisfy this set of properties:
- #incoming arcs == 1 except for node 0.
- #outgoing arcs == 1 except for node 0.
- for node zero, #incoming arcs == #outgoing arcs.
- There are no duplicate arcs.
- Self-arcs are allowed except for node 0.
- There is no cycle in this graph, except through node 0.
Args:
arcs: a list of arcs. An arc is a tuple (source_node, destination_node,
literal). The arc is selected in the circuit if the literal is true.
Both source_node and destination_node must be integers between 0 and the
number of nodes - 1.
Returns:
An instance of the `Constraint` class.
Raises:
ValueError: If the list of arcs is empty.
"""
if not arcs:
raise ValueError(
'AddMultipleCircuit expects a non-empty array of arcs')
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
for arc in arcs:
tail = cmh.assert_is_int32(arc[0])
head = cmh.assert_is_int32(arc[1])
lit = self.GetOrMakeBooleanIndex(arc[2])
model_ct.routes.tails.append(tail)
model_ct.routes.heads.append(head)
model_ct.routes.literals.append(lit)
return ct
def AddAllowedAssignments(self, variables, tuples_list):
"""Adds AllowedAssignments(variables, tuples_list).
An AllowedAssignments constraint is a constraint on an array of variables,
which requires that when all variables are assigned values, the resulting
array equals one of the tuples in `tuple_list`.
Args:
variables: A list of variables.
tuples_list: A list of admissible tuples. Each tuple must have the same
length as the variables, and the ith value of a tuple corresponds to the
ith variable.
Returns:
An instance of the `Constraint` class.
Raises:
TypeError: If a tuple does not have the same size as the list of
variables.
ValueError: If the array of variables is empty.
"""
if not variables:
raise ValueError(
'AddAllowedAssignments expects a non-empty variables '
'array')
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.table.vars.extend([self.GetOrMakeIndex(x) for x in variables])
arity = len(variables)
for t in tuples_list:
if len(t) != arity:
raise TypeError('Tuple ' + str(t) + ' has the wrong arity')
ar = []
for v in t:
ar.append(cmh.assert_is_int64(v))
model_ct.table.values.extend(ar)
return ct
def AddForbiddenAssignments(self, variables, tuples_list):
"""Adds AddForbiddenAssignments(variables, [tuples_list]).
A ForbiddenAssignments constraint is a constraint on an array of variables
where the list of impossible combinations is provided in the tuples list.
Args:
variables: A list of variables.
tuples_list: A list of forbidden tuples. Each tuple must have the same
length as the variables, and the *i*th value of a tuple corresponds to
the *i*th variable.
Returns:
An instance of the `Constraint` class.
Raises:
TypeError: If a tuple does not have the same size as the list of
variables.
ValueError: If the array of variables is empty.
"""
if not variables:
raise ValueError(
'AddForbiddenAssignments expects a non-empty variables '
'array')
index = len(self.__model.constraints)
ct = self.AddAllowedAssignments(variables, tuples_list)
self.__model.constraints[index].table.negated = True
return ct
def AddAutomaton(self, transition_variables, starting_state, final_states,
transition_triples):
"""Adds an automaton constraint.
An automaton constraint takes a list of variables (of size *n*), an initial
state, a set of final states, and a set of transitions. A transition is a
triplet (*tail*, *transition*, *head*), where *tail* and *head* are states,
and *transition* is the label of an arc from *head* to *tail*,
corresponding to the value of one variable in the list of variables.
This automaton will be unrolled into a flow with *n* + 1 phases. Each phase
contains the possible states of the automaton. The first state contains the
initial state. The last phase contains the final states.
Between two consecutive phases *i* and *i* + 1, the automaton creates a set
of arcs. For each transition (*tail*, *transition*, *head*), it will add
an arc from the state *tail* of phase *i* and the state *head* of phase
*i* + 1. This arc is labeled by the value *transition* of the variables
`variables[i]`. That is, this arc can only be selected if `variables[i]`
is assigned the value *transition*.
A feasible solution of this constraint is an assignment of variables such
that, starting from the initial state in phase 0, there is a path labeled by
the values of the variables that ends in one of the final states in the
final phase.
Args:
transition_variables: A non-empty list of variables whose values
correspond to the labels of the arcs traversed by the automaton.
starting_state: The initial state of the automaton.
final_states: A non-empty list of admissible final states.
transition_triples: A list of transitions for the automaton, in the
following format (current_state, variable_value, next_state).
Returns:
An instance of the `Constraint` class.
Raises:
ValueError: if `transition_variables`, `final_states`, or
`transition_triples` are empty.
"""
if not transition_variables:
raise ValueError(
'AddAutomaton expects a non-empty transition_variables '
'array')
if not final_states:
raise ValueError('AddAutomaton expects some final states')
if not transition_triples:
raise ValueError('AddAutomaton expects some transition triples')
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.automaton.vars.extend(
[self.GetOrMakeIndex(x) for x in transition_variables])
starting_state = cmh.assert_is_int64(starting_state)
model_ct.automaton.starting_state = starting_state
for v in final_states:
v = cmh.assert_is_int64(v)
model_ct.automaton.final_states.append(v)
for t in transition_triples:
if len(t) != 3:
raise TypeError('Tuple ' + str(t) +
' has the wrong arity (!= 3)')
tail = cmh.assert_is_int64(t[0])
label = cmh.assert_is_int64(t[1])
head = cmh.assert_is_int64(t[2])
model_ct.automaton.transition_tail.append(tail)
model_ct.automaton.transition_label.append(label)
model_ct.automaton.transition_head.append(head)
return ct
def AddInverse(self, variables, inverse_variables):
"""Adds Inverse(variables, inverse_variables).
An inverse constraint enforces that if `variables[i]` is assigned a value
`j`, then `inverse_variables[j]` is assigned a value `i`. And vice versa.
Args:
variables: An array of integer variables.
inverse_variables: An array of integer variables.
Returns:
An instance of the `Constraint` class.
Raises:
TypeError: if variables and inverse_variables have different lengths, or
if they are empty.
"""
if not variables or not inverse_variables:
raise TypeError(
'The Inverse constraint does not accept empty arrays')
if len(variables) != len(inverse_variables):
raise TypeError(
'In the inverse constraint, the two array variables and'
' inverse_variables must have the same length.')
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.inverse.f_direct.extend(
[self.GetOrMakeIndex(x) for x in variables])
model_ct.inverse.f_inverse.extend(
[self.GetOrMakeIndex(x) for x in inverse_variables])
return ct
def AddReservoirConstraint(self, times, level_changes, min_level,
max_level):
"""Adds Reservoir(times, level_changes, min_level, max_level).
Maintains a reservoir level within bounds. The water level starts at 0, and
at any time, it must be between min_level and max_level.
If the affine expression `times[i]` is assigned a value t, then the current
level changes by `level_changes[i]`, which is constant, at time t.
Note that min level must be <= 0, and the max level must be >= 0. Please
use fixed level_changes to simulate initial state.
Therefore, at any time:
sum(level_changes[i] if times[i] <= t) in [min_level, max_level]
Args:
times: A list of affine expressions which specify the time of the filling
or emptying the reservoir.
level_changes: A list of integer values that specifies the amount of the
emptying or filling.
min_level: At any time, the level of the reservoir must be greater or
equal than the min level.
max_level: At any time, the level of the reservoir must be less or equal
than the max level.
Returns:
An instance of the `Constraint` class.
Raises:
ValueError: if max_level < min_level.
ValueError: if max_level < 0.
ValueError: if min_level > 0
"""
if max_level < min_level:
return ValueError(
'Reservoir constraint must have a max_level >= min_level')
if max_level < 0:
return ValueError('Reservoir constraint must have a max_level >= 0')
if min_level > 0:
return ValueError('Reservoir constraint must have a min_level <= 0')
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.reservoir.time_exprs.extend(
[self.ParseLinearExpression(x) for x in times])
model_ct.reservoir.level_changes.extend(level_changes)
model_ct.reservoir.min_level = min_level
model_ct.reservoir.max_level = max_level
return ct
def AddReservoirConstraintWithActive(self, times, level_changes, actives,
min_level, max_level):
"""Adds Reservoir(times, level_changes, actives, min_level, max_level).
Maintains a reservoir level within bounds. The water level starts at 0, and
at any time, it must be between min_level and max_level.
If the variable `times[i]` is assigned a value t, and `actives[i]` is
`True`, then the current level changes by `level_changes[i]`, which is
constant,
at time t.
Note that min level must be <= 0, and the max level must be >= 0. Please
use fixed level_changes to simulate initial state.
Therefore, at any time:
sum(level_changes[i] * actives[i] if times[i] <= t) in [min_level,
max_level]
The array of boolean variables 'actives', if defined, indicates which
actions are actually performed.
Args:
times: A list of affine expressions which specify the time of the filling
or emptying the reservoir.
level_changes: A list of integer values that specifies the amount of the
emptying or filling.
actives: a list of boolean variables. They indicates if the
emptying/refilling events actually take place.
min_level: At any time, the level of the reservoir must be greater or
equal than the min level.
max_level: At any time, the level of the reservoir must be less or equal
than the max level.
Returns:
An instance of the `Constraint` class.
Raises:
ValueError: if max_level < min_level.
ValueError: if max_level < 0.
ValueError: if min_level > 0
"""
if max_level < min_level:
return ValueError(
'Reservoir constraint must have a max_level >= min_level')
if max_level < 0:
return ValueError('Reservoir constraint must have a max_level >= 0')
if min_level > 0:
return ValueError('Reservoir constraint must have a min_level <= 0')
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.reservoir.time_exprs.extend(
[self.ParseLinearExpression(x) for x in times])
model_ct.reservoir.level_changes.extend(level_changes)
model_ct.reservoir.active_literals.extend(
[self.GetOrMakeIndex(x) for x in actives])
model_ct.reservoir.min_level = min_level
model_ct.reservoir.max_level = max_level
return ct
def AddMapDomain(self, var, bool_var_array, offset=0):
"""Adds `var == i + offset <=> bool_var_array[i] == true for all i`."""
for i, bool_var in enumerate(bool_var_array):
b_index = bool_var.Index()
var_index = var.Index()
model_ct = self.__model.constraints.add()
model_ct.linear.vars.append(var_index)
model_ct.linear.coeffs.append(1)
model_ct.linear.domain.extend([offset + i, offset + i])
model_ct.enforcement_literal.append(b_index)
model_ct = self.__model.constraints.add()
model_ct.linear.vars.append(var_index)
model_ct.linear.coeffs.append(1)
model_ct.enforcement_literal.append(-b_index - 1)
if offset + i - 1 >= INT_MIN:
model_ct.linear.domain.extend([INT_MIN, offset + i - 1])
if offset + i + 1 <= INT_MAX:
model_ct.linear.domain.extend([offset + i + 1, INT_MAX])
def AddImplication(self, a, b):
"""Adds `a => b` (`a` implies `b`)."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.bool_or.literals.append(self.GetOrMakeBooleanIndex(b))
model_ct.enforcement_literal.append(self.GetOrMakeBooleanIndex(a))
return ct
def AddBoolOr(self, *literals):
"""Adds `Or(literals) == true`: Sum(literals) >= 1."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.bool_or.literals.extend([
self.GetOrMakeBooleanIndex(x)
for x in ExpandGeneratorOrTuple(literals)
])
return ct
def AddAtLeastOne(self, *literals):
"""Same as `AddBoolOr`: `Sum(literals) >= 1`."""
return self.AddBoolOr(*literals)
def AddAtMostOne(self, *literals):
"""Adds `AtMostOne(literals)`: `Sum(literals) <= 1`."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.at_most_one.literals.extend([
self.GetOrMakeBooleanIndex(x)
for x in ExpandGeneratorOrTuple(literals)
])
return ct
def AddExactlyOne(self, *literals):
"""Adds `ExactlyOne(literals)`: `Sum(literals) == 1`."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.exactly_one.literals.extend([
self.GetOrMakeBooleanIndex(x)
for x in ExpandGeneratorOrTuple(literals)
])
return ct
def AddBoolAnd(self, *literals):
"""Adds `And(literals) == true`."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.bool_and.literals.extend([
self.GetOrMakeBooleanIndex(x)
for x in ExpandGeneratorOrTuple(literals)
])
return ct
def AddBoolXOr(self, *literals):
"""Adds `XOr(literals) == true`.
In contrast to AddBoolOr and AddBoolAnd, it does not support
.OnlyEnforceIf().
Args:
*literals: the list of literals in the constraint.
Returns:
An `Constraint` object.
"""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.bool_xor.literals.extend([
self.GetOrMakeBooleanIndex(x)
for x in ExpandGeneratorOrTuple(literals)
])
return ct
def AddMinEquality(self, target, exprs):
"""Adds `target == Min(exprs)`."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.lin_max.exprs.extend(
[self.ParseLinearExpression(x, True) for x in exprs])
model_ct.lin_max.target.CopyFrom(
self.ParseLinearExpression(target, True))
return ct
def AddMaxEquality(self, target, exprs):
"""Adds `target == Max(exprs)`."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.lin_max.exprs.extend(
[self.ParseLinearExpression(x) for x in exprs])
model_ct.lin_max.target.CopyFrom(self.ParseLinearExpression(target))
return ct
def AddDivisionEquality(self, target, num, denom):
"""Adds `target == num // denom` (integer division rounded towards 0)."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.int_div.exprs.append(self.ParseLinearExpression(num))
model_ct.int_div.exprs.append(self.ParseLinearExpression(denom))
model_ct.int_div.target.CopyFrom(self.ParseLinearExpression(target))
return ct
def AddAbsEquality(self, target, expr):
"""Adds `target == Abs(var)`."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.lin_max.exprs.append(self.ParseLinearExpression(expr))
model_ct.lin_max.exprs.append(self.ParseLinearExpression(expr, True))
model_ct.lin_max.target.CopyFrom(self.ParseLinearExpression(target))
return ct
def AddModuloEquality(self, target, var, mod):
"""Adds `target = var % mod`."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.int_mod.exprs.append(self.ParseLinearExpression(var))
model_ct.int_mod.exprs.append(self.ParseLinearExpression(mod))
model_ct.int_mod.target.CopyFrom(self.ParseLinearExpression(target))
return ct
def AddMultiplicationEquality(self, target, *expressions):
"""Adds `target == expressions[0] * .. * expressions[n]`."""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.int_prod.exprs.extend([
self.ParseLinearExpression(expr)
for expr in ExpandGeneratorOrTuple(expressions)
])
model_ct.int_prod.target.CopyFrom(self.ParseLinearExpression(target))
return ct
# Scheduling support
def NewIntervalVar(self, start, size, end, name):
"""Creates an interval variable from start, size, and end.
An interval variable is a constraint, that is itself used in other
constraints like NoOverlap.
Internally, it ensures that `start + size == end`.
Args:
start: The start of the interval. It can be an affine or constant
expression.
size: The size of the interval. It can be an affine or constant
expression.
end: The end of the interval. It can be an affine or constant expression.
name: The name of the interval variable.
Returns:
An `IntervalVar` object.
"""
self.Add(start + size == end)
start_expr = self.ParseLinearExpression(start)
size_expr = self.ParseLinearExpression(size)
end_expr = self.ParseLinearExpression(end)
if len(start_expr.vars) > 1:
raise TypeError(
'cp_model.NewIntervalVar: start must be affine or constant.')
if len(size_expr.vars) > 1:
raise TypeError(
'cp_model.NewIntervalVar: size must be affine or constant.')
if len(end_expr.vars) > 1:
raise TypeError(
'cp_model.NewIntervalVar: end must be affine or constant.')
return IntervalVar(self.__model, start_expr, size_expr, end_expr, None,
name)
def NewFixedSizeIntervalVar(self, start, size, name):
"""Creates an interval variable from start, and a fixed size.
An interval variable is a constraint, that is itself used in other
constraints like NoOverlap.
Args:
start: The start of the interval. It can be an affine or constant
expression.
size: The size of the interval. It must be an integer value.
name: The name of the interval variable.
Returns:
An `IntervalVar` object.
"""
size = cmh.assert_is_int64(size)
start_expr = self.ParseLinearExpression(start)
size_expr = self.ParseLinearExpression(size)
end_expr = self.ParseLinearExpression(start + size)
if len(start_expr.vars) > 1:
raise TypeError(
'cp_model.NewIntervalVar: start must be affine or constant.')
return IntervalVar(self.__model, start_expr, size_expr, end_expr, None,
name)
def NewOptionalIntervalVar(self, start, size, end, is_present, name):
"""Creates an optional interval var from start, size, end, and is_present.
An optional interval variable is a constraint, that is itself used in other
constraints like NoOverlap. This constraint is protected by an is_present
literal that indicates if it is active or not.
Internally, it ensures that `is_present` implies `start + size == end`.
Args:
start: The start of the interval. It can be an integer value, or an
integer variable.
size: The size of the interval. It can be an integer value, or an integer
variable.
end: The end of the interval. It can be an integer value, or an integer
variable.
is_present: A literal that indicates if the interval is active or not. A
inactive interval is simply ignored by all constraints.
name: The name of the interval variable.
Returns:
An `IntervalVar` object.
"""
# Add the linear constraint.
self.Add(start + size == end).OnlyEnforceIf(is_present)
# Creates the IntervalConstraintProto object.
is_present_index = self.GetOrMakeBooleanIndex(is_present)
start_expr = self.ParseLinearExpression(start)
size_expr = self.ParseLinearExpression(size)
end_expr = self.ParseLinearExpression(end)
if len(start_expr.vars) > 1:
raise TypeError(
'cp_model.NewIntervalVar: start must be affine or constant.')
if len(size_expr.vars) > 1:
raise TypeError(
'cp_model.NewIntervalVar: size must be affine or constant.')
if len(end_expr.vars) > 1:
raise TypeError(
'cp_model.NewIntervalVar: end must be affine or constant.')
return IntervalVar(self.__model, start_expr, size_expr, end_expr,
is_present_index, name)
def NewOptionalFixedSizeIntervalVar(self, start, size, is_present, name):
"""Creates an interval variable from start, and a fixed size.
An interval variable is a constraint, that is itself used in other
constraints like NoOverlap.
Args:
start: The start of the interval. It can be an affine or constant
expression.
size: The size of the interval. It must be an integer value.
is_present: A literal that indicates if the interval is active or not. A
inactive interval is simply ignored by all constraints.
name: The name of the interval variable.
Returns:
An `IntervalVar` object.
"""
size = cmh.assert_is_int64(size)
start_expr = self.ParseLinearExpression(start)
size_expr = self.ParseLinearExpression(size)
end_expr = self.ParseLinearExpression(start + size)
if len(start_expr.vars) > 1:
raise TypeError(
'cp_model.NewIntervalVar: start must be affine or constant.')
is_present_index = self.GetOrMakeBooleanIndex(is_present)
return IntervalVar(self.__model, start_expr, size_expr, end_expr,
is_present_index, name)
def AddNoOverlap(self, interval_vars):
"""Adds NoOverlap(interval_vars).
A NoOverlap constraint ensures that all present intervals do not overlap
in time.
Args:
interval_vars: The list of interval variables to constrain.
Returns:
An instance of the `Constraint` class.
"""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.no_overlap.intervals.extend(
[self.GetIntervalIndex(x) for x in interval_vars])
return ct
def AddNoOverlap2D(self, x_intervals, y_intervals):
"""Adds NoOverlap2D(x_intervals, y_intervals).
A NoOverlap2D constraint ensures that all present rectangles do not overlap
on a plane. Each rectangle is aligned with the X and Y axis, and is defined
by two intervals which represent its projection onto the X and Y axis.
Furthermore, one box is optional if at least one of the x or y interval is
optional.
Args:
x_intervals: The X coordinates of the rectangles.
y_intervals: The Y coordinates of the rectangles.
Returns:
An instance of the `Constraint` class.
"""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.no_overlap_2d.x_intervals.extend(
[self.GetIntervalIndex(x) for x in x_intervals])
model_ct.no_overlap_2d.y_intervals.extend(
[self.GetIntervalIndex(x) for x in y_intervals])
return ct
def AddCumulative(self, intervals, demands, capacity):
"""Adds Cumulative(intervals, demands, capacity).
This constraint enforces that:
for all t:
sum(demands[i]
if (start(intervals[i]) <= t < end(intervals[i])) and
(intervals[i] is present)) <= capacity
Args:
intervals: The list of intervals.
demands: The list of demands for each interval. Each demand must be >= 0.
Each demand can be an integer value, or an integer variable.
capacity: The maximum capacity of the cumulative constraint. It must be a
positive integer value or variable.
Returns:
An instance of the `Constraint` class.
"""
ct = Constraint(self.__model.constraints)
model_ct = self.__model.constraints[ct.Index()]
model_ct.cumulative.intervals.extend(
[self.GetIntervalIndex(x) for x in intervals])
for d in demands:
model_ct.cumulative.demands.append(self.ParseLinearExpression(d))
model_ct.cumulative.capacity.CopyFrom(
self.ParseLinearExpression(capacity))
return ct
# Support for deep copy.
def CopyFrom(self, other_model):
"""Reset the model, and creates a new one from a CpModelProto instance."""
self.__model.CopyFrom(other_model.Proto())
# Rebuild constant map.
self.__constant_map.clear()
for i, var in enumerate(self.__model.variables):
if len(var.domain) == 2 and var.domain[0] == var.domain[1]:
self.__constant_map[var.domain[0]] = i
def GetBoolVarFromProtoIndex(self, index):
"""Returns an already created Boolean variable from its index."""
if index < 0 or index >= len(self.__model.variables):
raise ValueError(
f'GetBoolVarFromProtoIndex: out of bound index {index}')
var = self.__model.variables[index]
if len(var.domain) != 2 or var.domain[0] < 0 or var.domain[1] > 1:
raise ValueError(
f'GetBoolVarFromProtoIndex: index {index} does not reference' +
' a Boolean variable')
return IntVar(self.__model, index, None)
def GetIntVarFromProtoIndex(self, index):
"""Returns an already created integer variable from its index."""
if index < 0 or index >= len(self.__model.variables):
raise ValueError(
f'GetIntVarFromProtoIndex: out of bound index {index}')
return IntVar(self.__model, index, None)
def GetIntervalVarFromProtoIndex(self, index):
"""Returns an already created interval variable from its index."""
if index < 0 or index >= len(self.__model.constraints):
raise ValueError(
f'GetIntervalVarFromProtoIndex: out of bound index {index}')
ct = self.__model.constraints[index]
if not ct.HasField('interval'):
raise ValueError(
f'GetIntervalVarFromProtoIndex: index {index} does not reference an'
+ ' interval variable')
return IntervalVar(self.__model, index, None, None, None, None)
# Helpers.
def __str__(self):
return str(self.__model)
def Proto(self):
"""Returns the underlying CpModelProto."""
return self.__model
def Negated(self, index):
return -index - 1
def GetOrMakeIndex(self, arg):
"""Returns the index of a variable, its negation, or a number."""
if isinstance(arg, IntVar):
return arg.Index()
elif (isinstance(arg, _ProductCst) and
isinstance(arg.Expression(), IntVar) and arg.Coefficient() == -1):
return -arg.Expression().Index() - 1
elif cmh.is_integral(arg):
arg = cmh.assert_is_int64(arg)
return self.GetOrMakeIndexFromConstant(arg)
else:
raise TypeError('NotSupported: model.GetOrMakeIndex(' + str(arg) +
')')
def GetOrMakeBooleanIndex(self, arg):
"""Returns an index from a boolean expression."""
if isinstance(arg, IntVar):
self.AssertIsBooleanVariable(arg)
return arg.Index()
elif isinstance(arg, _NotBooleanVariable):
self.AssertIsBooleanVariable(arg.Not())
return arg.Index()
elif cmh.is_integral(arg):
cmh.assert_is_boolean(arg)
return self.GetOrMakeIndexFromConstant(int(arg))
else:
raise TypeError('NotSupported: model.GetOrMakeBooleanIndex(' +
str(arg) + ')')
def GetIntervalIndex(self, arg):
if not isinstance(arg, IntervalVar):
raise TypeError('NotSupported: model.GetIntervalIndex(%s)' % arg)
return arg.Index()
def GetOrMakeIndexFromConstant(self, value):
if value in self.__constant_map:
return self.__constant_map[value]
index = len(self.__model.variables)
var = self.__model.variables.add()
var.domain.extend([value, value])
self.__constant_map[value] = index
return index
def VarIndexToVarProto(self, var_index):
if var_index >= 0:
return self.__model.variables[var_index]
else:
return self.__model.variables[-var_index - 1]
def ParseLinearExpression(self, linear_expr, negate=False):
"""Returns a LinearExpressionProto built from a LinearExpr instance."""
result = cp_model_pb2.LinearExpressionProto()
mult = -1 if negate else 1
if cmh.is_integral(linear_expr):
result.offset = int(linear_expr) * mult
return result
if isinstance(linear_expr, IntVar):
result.vars.append(self.GetOrMakeIndex(linear_expr))
result.coeffs.append(mult)
return result
coeffs_map, constant = linear_expr.GetIntegerVarValueMap()
result.offset = constant * mult
for t in coeffs_map.items():
if not isinstance(t[0], IntVar):
raise TypeError('Wrong argument' + str(t))
c = cmh.assert_is_int64(t[1])
result.vars.append(t[0].Index())
result.coeffs.append(c * mult)
return result
def _SetObjective(self, obj, minimize):
"""Sets the objective of the model."""
self.__model.ClearField('objective')
self.__model.ClearField('floating_point_objective')
if isinstance(obj, IntVar):
self.__model.objective.coeffs.append(1)
self.__model.objective.offset = 0
if minimize:
self.__model.objective.vars.append(obj.Index())
self.__model.objective.scaling_factor = 1
else:
self.__model.objective.vars.append(self.Negated(obj.Index()))
self.__model.objective.scaling_factor = -1
elif isinstance(obj, LinearExpr):
coeffs_map, constant, is_integer = obj.GetFloatVarValueMap()
if is_integer:
if minimize:
self.__model.objective.scaling_factor = 1
self.__model.objective.offset = constant
else:
self.__model.objective.scaling_factor = -1
self.__model.objective.offset = -constant
for v, c, in coeffs_map.items():
self.__model.objective.coeffs.append(c)
if minimize:
self.__model.objective.vars.append(v.Index())
else:
self.__model.objective.vars.append(
self.Negated(v.Index()))
else:
self.__model.floating_point_objective.maximize = not minimize
self.__model.floating_point_objective.offset = constant
for v, c, in coeffs_map.items():
self.__model.floating_point_objective.coeffs.append(c)
self.__model.floating_point_objective.vars.append(v.Index())
elif cmh.is_integral(obj):
self.__model.objective.offset = int(obj)
self.__model.objective.scaling_factor = 1
else:
raise TypeError('TypeError: ' + str(obj) +
' is not a valid objective')
def Minimize(self, obj):
"""Sets the objective of the model to minimize(obj)."""
self._SetObjective(obj, minimize=True)
def Maximize(self, obj):
"""Sets the objective of the model to maximize(obj)."""
self._SetObjective(obj, minimize=False)
def HasObjective(self):
return self.__model.HasField('objective')
def AddDecisionStrategy(self, variables, var_strategy, domain_strategy):
"""Adds a search strategy to the model.
Args:
variables: a list of variables this strategy will assign.
var_strategy: heuristic to choose the next variable to assign.
domain_strategy: heuristic to reduce the domain of the selected variable.
Currently, this is advanced code: the union of all strategies added to
the model must be complete, i.e. instantiates all variables.
Otherwise, Solve() will fail.
"""
strategy = self.__model.search_strategy.add()
for v in variables:
strategy.variables.append(v.Index())
strategy.variable_selection_strategy = var_strategy
strategy.domain_reduction_strategy = domain_strategy
def ModelStats(self):
"""Returns a string containing some model statistics."""
return pywrapsat.CpSatHelper.ModelStats(self.__model)
def Validate(self):
"""Returns a string indicating that the model is invalid."""
return pywrapsat.CpSatHelper.ValidateModel(self.__model)
def ExportToFile(self, file):
"""Write the model as a protocol buffer to 'file'.
Args:
file: file to write the model to. If the filename ends with 'txt', the
model will be written as a text file, otherwise, the binary format will
be used.
Returns:
True if the model was correctly written.
"""
return pywrapsat.CpSatHelper.WriteModelToFile(self.__model, file)
def AssertIsBooleanVariable(self, x):
if isinstance(x, IntVar):
var = self.__model.variables[x.Index()]
if len(var.domain) != 2 or var.domain[0] < 0 or var.domain[1] > 1:
raise TypeError('TypeError: ' + str(x) +
' is not a boolean variable')
elif not isinstance(x, _NotBooleanVariable):
raise TypeError('TypeError: ' + str(x) +
' is not a boolean variable')
def AddHint(self, var, value):
"""Adds 'var == value' as a hint to the solver."""
self.__model.solution_hint.vars.append(self.GetOrMakeIndex(var))
self.__model.solution_hint.values.append(value)
def ClearHints(self):
"""Remove any solution hint from the model."""
self.__model.ClearField('solution_hint')
def AddAssumption(self, lit):
"""Add the literal 'lit' to the model as assumptions."""
self.__model.assumptions.append(self.GetOrMakeBooleanIndex(lit))
def AddAssumptions(self, literals):
"""Add the literals to the model as assumptions."""
for lit in literals:
self.AddAssumption(lit)
def ClearAssumptions(self):
"""Remove all assumptions from the model."""
self.__model.ClearField('assumptions')
def ExpandGeneratorOrTuple(args):
if hasattr(args, '__len__'): # Tuple
if len(args) != 1:
return args
if cmh.is_a_number(args[0]) or isinstance(args[0], LinearExpr):
return args
# Generator
return args[0]
def EvaluateLinearExpr(expression, solution):
"""Evaluate a linear expression against a solution."""
if cmh.is_integral(expression):
return int(expression)
if not isinstance(expression, LinearExpr):
raise TypeError('Cannot interpret %s as a linear expression.' %
expression)
value = 0
to_process = [(expression, 1)]
while to_process:
expr, coeff = to_process.pop()
if cmh.is_integral(expr):
value += int(expr) * coeff
elif isinstance(expr, _ProductCst):
to_process.append((expr.Expression(), coeff * expr.Coefficient()))
elif isinstance(expr, _Sum):
to_process.append((expr.Left(), coeff))
to_process.append((expr.Right(), coeff))
elif isinstance(expr, _SumArray):
for e in expr.Expressions():
to_process.append((e, coeff))
value += expr.Constant() * coeff
elif isinstance(expr, _WeightedSum):
for e, c in zip(expr.Expressions(), expr.Coefficients()):
to_process.append((e, coeff * c))
value += expr.Constant() * coeff
elif isinstance(expr, IntVar):
value += coeff * solution.solution[expr.Index()]
elif isinstance(expr, _NotBooleanVariable):
value += coeff * (1 - solution.solution[expr.Not().Index()])
else:
raise TypeError(f'Cannot interpret {expr} as a linear expression.')
return value
def EvaluateBooleanExpression(literal, solution):
"""Evaluate a boolean expression against a solution."""
if cmh.is_integral(literal):
return bool(literal)
elif isinstance(literal, IntVar) or isinstance(literal,
_NotBooleanVariable):
index = literal.Index()
if index >= 0:
return bool(solution.solution[index])
else:
return not solution.solution[-index - 1]
else:
raise TypeError(f'Cannot interpret {literal} as a boolean expression.')
class CpSolver(object):
"""Main solver class.
The purpose of this class is to search for a solution to the model provided
to the Solve() method.
Once Solve() is called, this class allows inspecting the solution found
with the Value() and BooleanValue() methods, as well as general statistics
about the solve procedure.
"""
def __init__(self):
self.__model = None
self.__solution: cp_model_pb2.CpSolverResponse = None
self.parameters = sat_parameters_pb2.SatParameters()
self.log_callback = None
self.__solve_wrapper: pywrapsat.SolveWrapper = None
self.__lock = threading.Lock()
def Solve(self, model, solution_callback=None):
"""Solves a problem and passes each solution to the callback if not null."""
with self.__lock:
solve_wrapper = pywrapsat.SolveWrapper()
solve_wrapper.SetParameters(self.parameters)
if solution_callback is not None:
solve_wrapper.AddSolutionCallback(solution_callback)
if self.log_callback is not None:
solve_wrapper.AddLogCallback(self.log_callback)
self.__solution = solve_wrapper.Solve(model.Proto())
if solution_callback is not None:
solve_wrapper.ClearSolutionCallback(solution_callback)
with self.__lock:
self.__solve_wrapper = None
return self.__solution.status
def SolveWithSolutionCallback(self, model, callback):
"""DEPRECATED Use Solve() with the callback argument."""
warnings.warn(
'SolveWithSolutionCallback is deprecated; use Solve() with' +
'the callback argument.', DeprecationWarning)
return self.Solve(model, callback)
def SearchForAllSolutions(self, model, callback):
"""DEPRECATED Use Solve() with the right parameter.
Search for all solutions of a satisfiability problem.
This method searches for all feasible solutions of a given model.
Then it feeds the solution to the callback.
Note that the model cannot contain an objective.
Args:
model: The model to solve.
callback: The callback that will be called at each solution.
Returns:
The status of the solve:
* *FEASIBLE* if some solutions have been found
* *INFEASIBLE* if the solver has proved there are no solution
* *OPTIMAL* if all solutions have been found
"""
warnings.warn(
'SearchForAllSolutions is deprecated; use Solve() with' +
'enumerate_all_solutions = True.', DeprecationWarning)
if model.HasObjective():
raise TypeError('Search for all solutions is only defined on '
'satisfiability problems')
# Store old parameter.
enumerate_all = self.parameters.enumerate_all_solutions
self.parameters.enumerate_all_solutions = True
self.Solve(model, callback)
# Restore parameter.
self.parameters.enumerate_all_solutions = enumerate_all
return self.__solution.status
def StopSearch(self):
"""Stops the current search asynchronously."""
with self.__lock:
if self.__solve_wrapper:
self.__solve_wrapper.StopSearch()
def Value(self, expression):
"""Returns the value of a linear expression after solve."""
if not self.__solution:
raise RuntimeError('Solve() has not be called.')
return EvaluateLinearExpr(expression, self.__solution)
def BooleanValue(self, literal):
"""Returns the boolean value of a literal after solve."""
if not self.__solution:
raise RuntimeError('Solve() has not be called.')
return EvaluateBooleanExpression(literal, self.__solution)
def ObjectiveValue(self):
"""Returns the value of the objective after solve."""
return self.__solution.objective_value
def BestObjectiveBound(self):
"""Returns the best lower (upper) bound found when min(max)imizing."""
return self.__solution.best_objective_bound
def StatusName(self, status=None):
"""Returns the name of the status returned by Solve()."""
if status is None:
status = self.__solution.status
return cp_model_pb2.CpSolverStatus.Name(status)
def NumBooleans(self):
"""Returns the number of boolean variables managed by the SAT solver."""
return self.__solution.num_booleans
def NumConflicts(self):
"""Returns the number of conflicts since the creation of the solver."""
return self.__solution.num_conflicts
def NumBranches(self):
"""Returns the number of search branches explored by the solver."""
return self.__solution.num_branches
def WallTime(self):
"""Returns the wall time in seconds since the creation of the solver."""
return self.__solution.wall_time
def UserTime(self):
"""Returns the user time in seconds since the creation of the solver."""
return self.__solution.user_time
def ResponseStats(self):
"""Returns some statistics on the solution found as a string."""
return pywrapsat.CpSatHelper.SolverResponseStats(self.__solution)
def ResponseProto(self):
"""Returns the response object."""
return self.__solution
def SufficientAssumptionsForInfeasibility(self):
"""Returns the indices of the infeasible assumptions."""
return self.__solution.sufficient_assumptions_for_infeasibility
def SolutionInfo(self):
"""Returns some information on the solve process.
Returns some information on how the solution was found, or the reason
why the model or the parameters are invalid.
"""
return self.__solution.solution_info
class CpSolverSolutionCallback(pywrapsat.SolutionCallback):
"""Solution callback.
This class implements a callback that will be called at each new solution
found during search.
The method OnSolutionCallback() will be called by the solver, and must be
implemented. The current solution can be queried using the BooleanValue()
and Value() methods.
It inherits the following methods from its base class:
* `ObjectiveValue(self)`
* `BestObjectiveBound(self)`
* `NumBooleans(self)`
* `NumConflicts(self)`
* `NumBranches(self)`
* `WallTime(self)`
* `UserTime(self)`
These methods returns the same information as their counterpart in the
`CpSolver` class.
"""
def __init__(self):
pywrapsat.SolutionCallback.__init__(self)
def OnSolutionCallback(self):
"""Proxy for the same method in snake case."""
self.on_solution_callback()
def BooleanValue(self, lit):
"""Returns the boolean value of a boolean literal.
Args:
lit: A boolean variable or its negation.
Returns:
The Boolean value of the literal in the solution.
Raises:
RuntimeError: if `lit` is not a boolean variable or its negation.
"""
if not self.HasResponse():
raise RuntimeError('Solve() has not be called.')
if cmh.is_integral(lit):
return bool(lit)
elif isinstance(lit, IntVar) or isinstance(lit, _NotBooleanVariable):
index = lit.Index()
return self.SolutionBooleanValue(index)
else:
raise TypeError(f'Cannot interpret {lit} as a boolean expression.')
def Value(self, expression):
"""Evaluates an linear expression in the current solution.
Args:
expression: a linear expression of the model.
Returns:
An integer value equal to the evaluation of the linear expression
against the current solution.
Raises:
RuntimeError: if 'expression' is not a LinearExpr.
"""
if not self.HasResponse():
raise RuntimeError('Solve() has not be called.')
value = 0
to_process = [(expression, 1)]
while to_process:
expr, coeff = to_process.pop()
if cmh.is_integral(expr):
value += int(expr) * coeff
elif isinstance(expr, _ProductCst):
to_process.append(
(expr.Expression(), coeff * expr.Coefficient()))
elif isinstance(expr, _Sum):
to_process.append((expr.Left(), coeff))
to_process.append((expr.Right(), coeff))
elif isinstance(expr, _SumArray):
for e in expr.Expressions():
to_process.append((e, coeff))
value += expr.Constant() * coeff
elif isinstance(expr, _WeightedSum):
for e, c in zip(expr.Expressions(), expr.Coefficients()):
to_process.append((e, coeff * c))
value += expr.Constant() * coeff
elif isinstance(expr, IntVar):
value += coeff * self.SolutionIntegerValue(expr.Index())
elif isinstance(expr, _NotBooleanVariable):
value += coeff * (1 -
self.SolutionIntegerValue(expr.Not().Index()))
else:
raise TypeError(
f'Cannot interpret {expression} as a linear expression.')
return value
class ObjectiveSolutionPrinter(CpSolverSolutionCallback):
"""Display the objective value and time of intermediate solutions."""
def __init__(self):
CpSolverSolutionCallback.__init__(self)
self.__solution_count = 0
self.__start_time = time.time()
def on_solution_callback(self):
"""Called on each new solution."""
current_time = time.time()
obj = self.ObjectiveValue()
print('Solution %i, time = %0.2f s, objective = %i' %
(self.__solution_count, current_time - self.__start_time, obj))
self.__solution_count += 1
def solution_count(self):
"""Returns the number of solutions found."""
return self.__solution_count
class VarArrayAndObjectiveSolutionPrinter(CpSolverSolutionCallback):
"""Print intermediate solutions (objective, variable values, time)."""
def __init__(self, variables):
CpSolverSolutionCallback.__init__(self)
self.__variables = variables
self.__solution_count = 0
self.__start_time = time.time()
def on_solution_callback(self):
"""Called on each new solution."""
current_time = time.time()
obj = self.ObjectiveValue()
print('Solution %i, time = %0.2f s, objective = %i' %
(self.__solution_count, current_time - self.__start_time, obj))
for v in self.__variables:
print(' %s = %i' % (v, self.Value(v)), end=' ')
print()
self.__solution_count += 1
def solution_count(self):
"""Returns the number of solutions found."""
return self.__solution_count
class VarArraySolutionPrinter(CpSolverSolutionCallback):
"""Print intermediate solutions (variable values, time)."""
def __init__(self, variables):
CpSolverSolutionCallback.__init__(self)
self.__variables = variables
self.__solution_count = 0
self.__start_time = time.time()
def on_solution_callback(self):
"""Called on each new solution."""
current_time = time.time()
print('Solution %i, time = %0.2f s' %
(self.__solution_count, current_time - self.__start_time))
for v in self.__variables:
print(' %s = %i' % (v, self.Value(v)), end=' ')
print()
self.__solution_count += 1
def solution_count(self):
"""Returns the number of solutions found."""
return self.__solution_count
| 37.368869
| 85
| 0.625736
|
4a10087338abcfc3844a8bb0c65e411677ae489f
| 279
|
py
|
Python
|
toprepos/urls.py
|
sujpac/github-read-service
|
b4d691df91482867d4921c0df033a61040273fef
|
[
"MIT"
] | null | null | null |
toprepos/urls.py
|
sujpac/github-read-service
|
b4d691df91482867d4921c0df033a61040273fef
|
[
"MIT"
] | null | null | null |
toprepos/urls.py
|
sujpac/github-read-service
|
b4d691df91482867d4921c0df033a61040273fef
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register('', views.TopNReposViewSet, basename='top-n-repos-viewset')
urlpatterns = [
path('top/<int:N>/', include(router.urls)),
]
| 19.928571
| 75
| 0.741935
|
4a10092de7f02c55adbe616d479f72b5774d1e98
| 1,180
|
py
|
Python
|
python/setup.py
|
jaimergp/openmm-plumed
|
db390e228c19ab0ec34bee8f98354475f29d168c
|
[
"Unlicense"
] | 19
|
2016-02-10T18:17:57.000Z
|
2021-03-08T12:26:53.000Z
|
python/setup.py
|
jaimergp/openmm-plumed
|
db390e228c19ab0ec34bee8f98354475f29d168c
|
[
"Unlicense"
] | 35
|
2020-01-24T18:45:34.000Z
|
2022-01-12T17:35:08.000Z
|
python/setup.py
|
jaimergp/openmm-plumed
|
db390e228c19ab0ec34bee8f98354475f29d168c
|
[
"Unlicense"
] | 13
|
2016-11-22T01:18:41.000Z
|
2021-08-02T22:28:47.000Z
|
from distutils.core import setup
from distutils.extension import Extension
import os
import sys
import platform
openmm_dir = '@OPENMM_DIR@'
openmmplumed_header_dir = '@OPENMMPLUMED_HEADER_DIR@'
openmmplumed_library_dir = '@OPENMMPLUMED_LIBRARY_DIR@'
# setup extra compile and link arguments on Mac
extra_compile_args = []
extra_link_args = []
if platform.system() == 'Darwin':
extra_compile_args += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
extra_link_args += ['-stdlib=libc++', '-mmacosx-version-min=10.7', '-Wl', '-rpath', openmm_dir+'/lib']
extension = Extension(name='_openmmplumed',
sources=['PlumedPluginWrapper.cpp'],
libraries=['OpenMM', 'OpenMMPlumed'],
include_dirs=[os.path.join(openmm_dir, 'include'), openmmplumed_header_dir],
library_dirs=[os.path.join(openmm_dir, 'lib'), openmmplumed_library_dir],
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args
)
setup(name='OpenMMPlumed',
version='1.0',
py_modules=['openmmplumed'],
ext_modules=[extension],
)
| 35.757576
| 106
| 0.649153
|
4a1009641df9063a4c46d668f0a3297a7668f762
| 3,673
|
py
|
Python
|
src/google/auth/transport/_http_client.py
|
MarletteFunding/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
[
"Apache-2.0"
] | 184
|
2017-12-20T21:50:06.000Z
|
2022-03-19T13:24:58.000Z
|
src/google/auth/transport/_http_client.py
|
MarletteFunding/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
[
"Apache-2.0"
] | 68
|
2016-12-12T20:38:47.000Z
|
2020-07-26T18:28:49.000Z
|
src/google/auth/transport/_http_client.py
|
MarletteFunding/aws-kube-codesuite
|
ab4e5ce45416b83bffb947ab8d234df5437f4fca
|
[
"Apache-2.0"
] | 136
|
2018-01-09T22:52:06.000Z
|
2022-02-24T13:26:18.000Z
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transport adapter for http.client, for internal use only."""
import logging
import socket
from six.moves import http_client
from six.moves import urllib
from google.auth import exceptions
from google.auth import transport
_LOGGER = logging.getLogger(__name__)
class Response(transport.Response):
"""http.client transport response adapter.
Args:
response (http.client.HTTPResponse): The raw http client response.
"""
def __init__(self, response):
self._status = response.status
self._headers = {
key.lower(): value for key, value in response.getheaders()}
self._data = response.read()
@property
def status(self):
return self._status
@property
def headers(self):
return self._headers
@property
def data(self):
return self._data
class Request(transport.Request):
"""http.client transport request adapter."""
def __call__(self, url, method='GET', body=None, headers=None,
timeout=None, **kwargs):
"""Make an HTTP request using http.client.
Args:
url (str): The URI to be requested.
method (str): The HTTP method to use for the request. Defaults
to 'GET'.
body (bytes): The payload / body in HTTP request.
headers (Mapping): Request headers.
timeout (Optional(int)): The number of seconds to wait for a
response from the server. If not specified or if None, the
socket global default timeout will be used.
kwargs: Additional arguments passed throught to the underlying
:meth:`~http.client.HTTPConnection.request` method.
Returns:
Response: The HTTP response.
Raises:
google.auth.exceptions.TransportError: If any exception occurred.
"""
# socket._GLOBAL_DEFAULT_TIMEOUT is the default in http.client.
if timeout is None:
timeout = socket._GLOBAL_DEFAULT_TIMEOUT
# http.client doesn't allow None as the headers argument.
if headers is None:
headers = {}
# http.client needs the host and path parts specified separately.
parts = urllib.parse.urlsplit(url)
path = urllib.parse.urlunsplit(
('', '', parts.path, parts.query, parts.fragment))
if parts.scheme != 'http':
raise exceptions.TransportError(
'http.client transport only supports the http scheme, {}'
'was specified'.format(parts.scheme))
connection = http_client.HTTPConnection(parts.netloc, timeout=timeout)
try:
_LOGGER.debug('Making request: %s %s', method, url)
connection.request(
method, path, body=body, headers=headers, **kwargs)
response = connection.getresponse()
return Response(response)
except (http_client.HTTPException, socket.error) as exc:
raise exceptions.TransportError(exc)
finally:
connection.close()
| 32.794643
| 78
| 0.642799
|
4a100a2fb40f3b7bde125003b838ddedc0a947e8
| 17,082
|
py
|
Python
|
scripts/backend_tests.py
|
anubhavsinha98/oppia
|
9a64ea2e91d2f471ce22bd39da77b43dccd5b51f
|
[
"Apache-2.0"
] | 1
|
2019-08-31T17:06:41.000Z
|
2019-08-31T17:06:41.000Z
|
scripts/backend_tests.py
|
anubhavsinha98/oppia
|
9a64ea2e91d2f471ce22bd39da77b43dccd5b51f
|
[
"Apache-2.0"
] | null | null | null |
scripts/backend_tests.py
|
anubhavsinha98/oppia
|
9a64ea2e91d2f471ce22bd39da77b43dccd5b51f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for running backend tests in parallel.
This should not be run directly. Instead, navigate to the oppia/ folder and
execute:
bash scripts/run_backend_tests.sh
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
# Pylint has issues with the import order of argparse.
# pylint: disable=wrong-import-order
import argparse
import datetime
import importlib
import inspect
import os
import re
import subprocess
import sys
import threading
import time
import python_utils
# pylint: enable=wrong-import-order
CURR_DIR = os.path.abspath(os.getcwd())
OPPIA_TOOLS_DIR = os.path.join(CURR_DIR, '..', 'oppia_tools')
THIRD_PARTY_DIR = os.path.join(CURR_DIR, 'third_party')
PYTHONPATH = os.environ['PYTHONPATH']
DIRS_TO_ADD_TO_SYS_PATH = [
os.path.join(OPPIA_TOOLS_DIR, 'pylint-1.9.4'),
os.path.join(
OPPIA_TOOLS_DIR, 'google_appengine_1.9.67', 'google_appengine'),
os.path.join(OPPIA_TOOLS_DIR, 'webtest-2.0.33'),
os.path.join(
OPPIA_TOOLS_DIR, 'google_appengine_1.9.67', 'google_appengine',
'lib', 'webob_0_9'),
os.path.join(OPPIA_TOOLS_DIR, 'browsermob-proxy-0.7.1'),
os.path.join(OPPIA_TOOLS_DIR, 'selenium-3.13.0'),
os.path.join(OPPIA_TOOLS_DIR, 'Pillow-6.0.0'),
CURR_DIR,
os.path.join(THIRD_PARTY_DIR, 'backports.functools_lru_cache-1.5'),
os.path.join(THIRD_PARTY_DIR, 'beautifulsoup4-4.7.1'),
os.path.join(THIRD_PARTY_DIR, 'bleach-3.1.0'),
os.path.join(THIRD_PARTY_DIR, 'callbacks-0.3.0'),
os.path.join(THIRD_PARTY_DIR, 'gae-cloud-storage-1.9.22.1'),
os.path.join(THIRD_PARTY_DIR, 'gae-mapreduce-1.9.22.0'),
os.path.join(THIRD_PARTY_DIR, 'gae-pipeline-1.9.22.1'),
os.path.join(THIRD_PARTY_DIR, 'graphy-1.0.0'),
os.path.join(THIRD_PARTY_DIR, 'html5lib-python-1.0.1'),
os.path.join(THIRD_PARTY_DIR, 'mutagen-1.42.0'),
os.path.join(THIRD_PARTY_DIR, 'simplejson-3.16.0'),
os.path.join(THIRD_PARTY_DIR, 'six-1.12.0'),
os.path.join(THIRD_PARTY_DIR, 'soupsieve-1.9.1'),
os.path.join(THIRD_PARTY_DIR, 'webencodings-0.5.1'),
]
COVERAGE_PATH = os.path.join(
os.getcwd(), '..', 'oppia_tools', 'coverage-4.5.3', 'coverage')
TEST_RUNNER_PATH = os.path.join(os.getcwd(), 'core', 'tests', 'gae_suite.py')
LOG_LOCK = threading.Lock()
ALL_ERRORS = []
# This should be the same as core.test_utils.LOG_LINE_PREFIX.
LOG_LINE_PREFIX = 'LOG_INFO_TEST: '
_LOAD_TESTS_DIR = os.path.join(os.getcwd(), 'core', 'tests', 'load_tests')
_PARSER = argparse.ArgumentParser()
_PARSER.add_argument(
'--generate_coverage_report',
help='optional; if specified, generates a coverage report',
action='store_true')
_PARSER.add_argument(
'--test_target',
help='optional dotted module name of the test(s) to run',
type=python_utils.STR)
_PARSER.add_argument(
'--test_path',
help='optional subdirectory path containing the test(s) to run',
type=python_utils.STR)
_PARSER.add_argument(
'--exclude_load_tests',
help='optional; if specified, exclude load tests from being run',
action='store_true')
_PARSER.add_argument(
'-v',
'--verbose',
help='optional; if specified, display the output of the tests being run',
action='store_true')
def log(message, show_time=False):
"""Logs a message to the terminal.
If show_time is True, prefixes the message with the current time.
"""
with LOG_LOCK:
if show_time:
python_utils.PRINT(
datetime.datetime.utcnow().strftime('%H:%M:%S'), message)
else:
python_utils.PRINT(message)
def run_shell_cmd(exe, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
"""Runs a shell command and captures the stdout and stderr output.
If the cmd fails, raises Exception. Otherwise, returns a string containing
the concatenation of the stdout and stderr logs.
"""
p = subprocess.Popen(exe, stdout=stdout, stderr=stderr)
last_stdout_str, last_stderr_str = p.communicate()
last_stdout = last_stdout_str.split('\n')
if LOG_LINE_PREFIX in last_stdout_str:
log('')
for line in last_stdout:
if line.startswith(LOG_LINE_PREFIX):
log('INFO: %s' % line[len(LOG_LINE_PREFIX):])
log('')
result = '%s%s' % (last_stdout_str, last_stderr_str)
if p.returncode != 0:
raise Exception('Error %s\n%s' % (p.returncode, result))
return result
class TaskThread(threading.Thread):
"""Runs a task in its own thread."""
def __init__(self, func, verbose, name=None):
super(TaskThread, self).__init__()
self.func = func
self.output = None
self.exception = None
self.verbose = verbose
self.name = name
self.finished = False
def run(self):
try:
self.output = self.func()
if self.verbose:
log('LOG %s:' % self.name, show_time=True)
log(self.output)
log('----------------------------------------')
log('FINISHED %s: %.1f secs' %
(self.name, time.time() - self.start_time), show_time=True)
self.finished = True
except Exception as e:
self.exception = e
if 'KeyboardInterrupt' not in python_utils.convert_to_bytes(
self.exception):
log('ERROR %s: %.1f secs' %
(self.name, time.time() - self.start_time), show_time=True)
self.finished = True
class TestingTaskSpec(python_utils.OBJECT):
"""Executes a set of tests given a test class name."""
def __init__(self, test_target, generate_coverage_report):
self.test_target = test_target
self.generate_coverage_report = generate_coverage_report
def run(self):
"""Runs all tests corresponding to the given test target."""
test_target_flag = '--test_target=%s' % self.test_target
# This is done because PYTHONPATH is modified while using importlib
# to import modules. PYTHONPATH is changed to comma separated list
# after which python is unable to find certain modules. So, the old
# PYTHONPATH is copied here to avoid import errors.
os.environ['PYTHONPATH'] = PYTHONPATH
if self.generate_coverage_report:
exc_list = [
'python', COVERAGE_PATH, 'run', '-p', TEST_RUNNER_PATH,
test_target_flag]
else:
exc_list = ['python', TEST_RUNNER_PATH, test_target_flag]
return run_shell_cmd(exc_list)
def _check_all_tasks(tasks):
"""Checks the results of all tasks."""
running_tasks_data = []
for task in tasks:
if task.isAlive():
running_tasks_data.append(' %s (started %s)' % (
task.name,
time.strftime('%H:%M:%S', time.localtime(task.start_time))
))
if task.exception:
ALL_ERRORS.append(task.exception)
if running_tasks_data:
log('----------------------------------------')
log('Tasks still running:')
for task_details in running_tasks_data:
log(task_details)
def _execute_tasks(tasks, batch_size=24):
"""Starts all tasks and checks the results.
Runs no more than 'batch_size' tasks at a time.
"""
remaining_tasks = [] + tasks
currently_running_tasks = set([])
while remaining_tasks or currently_running_tasks:
if currently_running_tasks:
for task in list(currently_running_tasks):
task.join(1)
if not task.isAlive():
currently_running_tasks.remove(task)
while remaining_tasks and len(currently_running_tasks) < batch_size:
task = remaining_tasks.pop()
currently_running_tasks.add(task)
task.start()
task.start_time = time.time()
time.sleep(5)
if remaining_tasks:
log('----------------------------------------')
log('Number of unstarted tasks: %s' % len(remaining_tasks))
_check_all_tasks(tasks)
log('----------------------------------------')
def _get_all_test_targets(test_path=None, include_load_tests=True):
"""Returns a list of test targets for all classes under test_path
containing tests.
"""
def _get_test_target_classes(path):
"""Returns a list of all test classes in a given test file path.
Args:
path: str. The path of the test file from which all test classes
are to be extracted.
Returns:
list. A list of all test classes in a given test file path.
"""
class_names = []
test_target_path = os.path.relpath(
path, os.getcwd())[:-3].replace('/', '.')
python_module = importlib.import_module(test_target_path)
for name, clazz in inspect.getmembers(
python_module, predicate=inspect.isclass):
all_base_classes = [base_class.__name__ for base_class in
(inspect.getmro(clazz))]
# Check that it is a subclass of 'AppEngineTestBase'.
if 'AppEngineTestBase' in all_base_classes:
class_names.append(name)
return [
'%s.%s' % (test_target_path, class_name)
for class_name in class_names]
base_path = os.path.join(os.getcwd(), test_path or '')
result = []
excluded_dirs = ['.git', 'third_party', 'core/tests', 'node_modules']
for root in os.listdir(base_path):
if any([s in root for s in excluded_dirs]):
continue
if root.endswith('_test.py'):
result = result + (
_get_test_target_classes(os.path.join(base_path, root)))
for subroot, _, files in os.walk(os.path.join(base_path, root)):
if _LOAD_TESTS_DIR in subroot and include_load_tests:
for f in files:
if f.endswith('_test.py'):
result = result + (
_get_test_target_classes(os.path.join(subroot, f)))
for f in files:
if (f.endswith('_test.py') and
os.path.join('core', 'tests') not in subroot):
result = result + (
_get_test_target_classes(os.path.join(subroot, f)))
return result
def main():
"""Run the tests."""
for directory in DIRS_TO_ADD_TO_SYS_PATH:
if not os.path.exists(os.path.dirname(directory)):
raise Exception('Directory %s does not exist.' % directory)
sys.path.insert(0, directory)
import dev_appserver
dev_appserver.fix_sys_path()
parsed_args = _PARSER.parse_args()
if parsed_args.test_target and parsed_args.test_path:
raise Exception('At most one of test_path and test_target '
'should be specified.')
if parsed_args.test_path and '.' in parsed_args.test_path:
raise Exception('The delimiter in test_path should be a slash (/)')
if parsed_args.test_target and '/' in parsed_args.test_target:
raise Exception('The delimiter in test_target should be a dot (.)')
if parsed_args.test_target:
if '_test' in parsed_args.test_target:
all_test_targets = [parsed_args.test_target]
else:
python_utils.PRINT('')
python_utils.PRINT(
'---------------------------------------------------------')
python_utils.PRINT(
'WARNING : test_target flag should point to the test file.')
python_utils.PRINT(
'---------------------------------------------------------')
python_utils.PRINT('')
time.sleep(3)
python_utils.PRINT('Redirecting to its corresponding test file...')
all_test_targets = [parsed_args.test_target + '_test']
else:
include_load_tests = not parsed_args.exclude_load_tests
all_test_targets = _get_all_test_targets(
test_path=parsed_args.test_path,
include_load_tests=include_load_tests)
# Prepare tasks.
task_to_taskspec = {}
tasks = []
for test_target in all_test_targets:
test = TestingTaskSpec(
test_target, parsed_args.generate_coverage_report)
task = TaskThread(test.run, parsed_args.verbose, name=test_target)
task_to_taskspec[task] = test
tasks.append(task)
task_execution_failed = False
try:
_execute_tasks(tasks)
except Exception:
task_execution_failed = True
for task in tasks:
if task.exception:
log(python_utils.convert_to_bytes(task.exception))
python_utils.PRINT('')
python_utils.PRINT('+------------------+')
python_utils.PRINT('| SUMMARY OF TESTS |')
python_utils.PRINT('+------------------+')
python_utils.PRINT('')
# Check we ran all tests as expected.
total_count = 0
total_errors = 0
total_failures = 0
for task in tasks:
spec = task_to_taskspec[task]
if not task.finished:
python_utils.PRINT('CANCELED %s' % spec.test_target)
test_count = 0
elif 'No tests were run' in python_utils.convert_to_bytes(
task.exception):
python_utils.PRINT(
'ERROR %s: No tests found.' % spec.test_target)
test_count = 0
elif task.exception:
exc_str = python_utils.convert_to_bytes(task.exception)
python_utils.PRINT(exc_str[exc_str.find('='): exc_str.rfind('-')])
tests_failed_regex_match = re.search(
r'Test suite failed: ([0-9]+) tests run, ([0-9]+) errors, '
'([0-9]+) failures',
python_utils.convert_to_bytes(task.exception))
try:
test_count = int(tests_failed_regex_match.group(1))
errors = int(tests_failed_regex_match.group(2))
failures = int(tests_failed_regex_match.group(3))
total_errors += errors
total_failures += failures
python_utils.PRINT('FAILED %s: %s errors, %s failures' % (
spec.test_target, errors, failures))
except AttributeError:
# There was an internal error, and the tests did not run (The
# error message did not match `tests_failed_regex_match`).
test_count = 0
total_errors += 1
python_utils.PRINT('')
python_utils.PRINT(
'------------------------------------------------------')
python_utils.PRINT(
' WARNING: FAILED TO RUN %s' % spec.test_target)
python_utils.PRINT('')
python_utils.PRINT(
' This is most likely due to an import error.')
python_utils.PRINT(
'------------------------------------------------------')
else:
try:
tests_run_regex_match = re.search(
r'Ran ([0-9]+) tests? in ([0-9\.]+)s', task.output)
test_count = int(tests_run_regex_match.group(1))
test_time = float(tests_run_regex_match.group(2))
python_utils.PRINT(
'SUCCESS %s: %d tests (%.1f secs)' %
(spec.test_target, test_count, test_time))
except Exception:
python_utils.PRINT(
'An unexpected error occurred. '
'Task output:\n%s' % task.output)
total_count += test_count
python_utils.PRINT('')
if total_count == 0:
raise Exception('WARNING: No tests were run.')
else:
python_utils.PRINT('Ran %s test%s in %s test class%s.' % (
total_count, '' if total_count == 1 else 's',
len(tasks), '' if len(tasks) == 1 else 'es'))
if total_errors or total_failures:
python_utils.PRINT(
'(%s ERRORS, %s FAILURES)' % (total_errors, total_failures))
else:
python_utils.PRINT('All tests passed.')
if task_execution_failed:
raise Exception('Task execution failed.')
elif total_errors or total_failures:
raise Exception(
'%s errors, %s failures' % (total_errors, total_failures))
if __name__ == '__main__':
main()
| 37.215686
| 79
| 0.601101
|
4a100ab293fad5563e5bbac3bcced2b4f0ec1f1e
| 12,955
|
py
|
Python
|
tb_rest_client/models/models_ce/edge_info.py
|
jernkuan/thingsboard-python-rest-client
|
3fb25272507494e6d494b27ca2380d3c543562e5
|
[
"Apache-2.0"
] | null | null | null |
tb_rest_client/models/models_ce/edge_info.py
|
jernkuan/thingsboard-python-rest-client
|
3fb25272507494e6d494b27ca2380d3c543562e5
|
[
"Apache-2.0"
] | null | null | null |
tb_rest_client/models/models_ce/edge_info.py
|
jernkuan/thingsboard-python-rest-client
|
3fb25272507494e6d494b27ca2380d3c543562e5
|
[
"Apache-2.0"
] | 1
|
2021-11-26T11:24:56.000Z
|
2021-11-26T11:24:56.000Z
|
# coding: utf-8
"""
ThingsBoard REST API
For instructions how to authorize requests please visit <a href='http://thingsboard.io/docs/reference/rest-api/'>REST API documentation page</a>. # noqa: E501
OpenAPI spec version: 2.0
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class EdgeInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'additional_info': 'str',
'cloud_endpoint': 'str',
'created_time': 'int',
'customer_id': 'CustomerId',
'customer_is_public': 'bool',
'customer_title': 'str',
'edge_license_key': 'str',
'id': 'EdgeId',
'label': 'str',
'name': 'str',
'root_rule_chain_id': 'RuleChainId',
'routing_key': 'str',
'secret': 'str',
'tenant_id': 'TenantId',
'type': 'str'
}
attribute_map = {
'additional_info': 'additionalInfo',
'cloud_endpoint': 'cloudEndpoint',
'created_time': 'createdTime',
'customer_id': 'customerId',
'customer_is_public': 'customerIsPublic',
'customer_title': 'customerTitle',
'edge_license_key': 'edgeLicenseKey',
'id': 'id',
'label': 'label',
'name': 'name',
'root_rule_chain_id': 'rootRuleChainId',
'routing_key': 'routingKey',
'secret': 'secret',
'tenant_id': 'tenantId',
'type': 'type'
}
def __init__(self, additional_info=None, cloud_endpoint=None, created_time=None, customer_id=None, customer_is_public=None, customer_title=None, edge_license_key=None, id=None, label=None, name=None, root_rule_chain_id=None, routing_key=None, secret=None, tenant_id=None, type=None): # noqa: E501
"""EdgeInfo - a model defined in Swagger""" # noqa: E501
self._additional_info = None
self._cloud_endpoint = None
self._created_time = None
self._customer_id = None
self._customer_is_public = None
self._customer_title = None
self._edge_license_key = None
self._id = None
self._label = None
self._name = None
self._root_rule_chain_id = None
self._routing_key = None
self._secret = None
self._tenant_id = None
self._type = None
self.discriminator = None
if additional_info is not None:
self.additional_info = additional_info
if cloud_endpoint is not None:
self.cloud_endpoint = cloud_endpoint
if created_time is not None:
self.created_time = created_time
if customer_id is not None:
self.customer_id = customer_id
if customer_is_public is not None:
self.customer_is_public = customer_is_public
if customer_title is not None:
self.customer_title = customer_title
if edge_license_key is not None:
self.edge_license_key = edge_license_key
if id is not None:
self.id = id
if label is not None:
self.label = label
if name is not None:
self.name = name
if root_rule_chain_id is not None:
self.root_rule_chain_id = root_rule_chain_id
if routing_key is not None:
self.routing_key = routing_key
if secret is not None:
self.secret = secret
if tenant_id is not None:
self.tenant_id = tenant_id
if type is not None:
self.type = type
@property
def additional_info(self):
"""Gets the additional_info of this EdgeInfo. # noqa: E501
:return: The additional_info of this EdgeInfo. # noqa: E501
:rtype: str
"""
return self._additional_info
@additional_info.setter
def additional_info(self, additional_info):
"""Sets the additional_info of this EdgeInfo.
:param additional_info: The additional_info of this EdgeInfo. # noqa: E501
:type: str
"""
self._additional_info = additional_info
@property
def cloud_endpoint(self):
"""Gets the cloud_endpoint of this EdgeInfo. # noqa: E501
:return: The cloud_endpoint of this EdgeInfo. # noqa: E501
:rtype: str
"""
return self._cloud_endpoint
@cloud_endpoint.setter
def cloud_endpoint(self, cloud_endpoint):
"""Sets the cloud_endpoint of this EdgeInfo.
:param cloud_endpoint: The cloud_endpoint of this EdgeInfo. # noqa: E501
:type: str
"""
self._cloud_endpoint = cloud_endpoint
@property
def created_time(self):
"""Gets the created_time of this EdgeInfo. # noqa: E501
:return: The created_time of this EdgeInfo. # noqa: E501
:rtype: int
"""
return self._created_time
@created_time.setter
def created_time(self, created_time):
"""Sets the created_time of this EdgeInfo.
:param created_time: The created_time of this EdgeInfo. # noqa: E501
:type: int
"""
self._created_time = created_time
@property
def customer_id(self):
"""Gets the customer_id of this EdgeInfo. # noqa: E501
:return: The customer_id of this EdgeInfo. # noqa: E501
:rtype: CustomerId
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""Sets the customer_id of this EdgeInfo.
:param customer_id: The customer_id of this EdgeInfo. # noqa: E501
:type: CustomerId
"""
self._customer_id = customer_id
@property
def customer_is_public(self):
"""Gets the customer_is_public of this EdgeInfo. # noqa: E501
:return: The customer_is_public of this EdgeInfo. # noqa: E501
:rtype: bool
"""
return self._customer_is_public
@customer_is_public.setter
def customer_is_public(self, customer_is_public):
"""Sets the customer_is_public of this EdgeInfo.
:param customer_is_public: The customer_is_public of this EdgeInfo. # noqa: E501
:type: bool
"""
self._customer_is_public = customer_is_public
@property
def customer_title(self):
"""Gets the customer_title of this EdgeInfo. # noqa: E501
:return: The customer_title of this EdgeInfo. # noqa: E501
:rtype: str
"""
return self._customer_title
@customer_title.setter
def customer_title(self, customer_title):
"""Sets the customer_title of this EdgeInfo.
:param customer_title: The customer_title of this EdgeInfo. # noqa: E501
:type: str
"""
self._customer_title = customer_title
@property
def edge_license_key(self):
"""Gets the edge_license_key of this EdgeInfo. # noqa: E501
:return: The edge_license_key of this EdgeInfo. # noqa: E501
:rtype: str
"""
return self._edge_license_key
@edge_license_key.setter
def edge_license_key(self, edge_license_key):
"""Sets the edge_license_key of this EdgeInfo.
:param edge_license_key: The edge_license_key of this EdgeInfo. # noqa: E501
:type: str
"""
self._edge_license_key = edge_license_key
@property
def id(self):
"""Gets the id of this EdgeInfo. # noqa: E501
:return: The id of this EdgeInfo. # noqa: E501
:rtype: EdgeId
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this EdgeInfo.
:param id: The id of this EdgeInfo. # noqa: E501
:type: EdgeId
"""
self._id = id
@property
def label(self):
"""Gets the label of this EdgeInfo. # noqa: E501
:return: The label of this EdgeInfo. # noqa: E501
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this EdgeInfo.
:param label: The label of this EdgeInfo. # noqa: E501
:type: str
"""
self._label = label
@property
def name(self):
"""Gets the name of this EdgeInfo. # noqa: E501
:return: The name of this EdgeInfo. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EdgeInfo.
:param name: The name of this EdgeInfo. # noqa: E501
:type: str
"""
self._name = name
@property
def root_rule_chain_id(self):
"""Gets the root_rule_chain_id of this EdgeInfo. # noqa: E501
:return: The root_rule_chain_id of this EdgeInfo. # noqa: E501
:rtype: RuleChainId
"""
return self._root_rule_chain_id
@root_rule_chain_id.setter
def root_rule_chain_id(self, root_rule_chain_id):
"""Sets the root_rule_chain_id of this EdgeInfo.
:param root_rule_chain_id: The root_rule_chain_id of this EdgeInfo. # noqa: E501
:type: RuleChainId
"""
self._root_rule_chain_id = root_rule_chain_id
@property
def routing_key(self):
"""Gets the routing_key of this EdgeInfo. # noqa: E501
:return: The routing_key of this EdgeInfo. # noqa: E501
:rtype: str
"""
return self._routing_key
@routing_key.setter
def routing_key(self, routing_key):
"""Sets the routing_key of this EdgeInfo.
:param routing_key: The routing_key of this EdgeInfo. # noqa: E501
:type: str
"""
self._routing_key = routing_key
@property
def secret(self):
"""Gets the secret of this EdgeInfo. # noqa: E501
:return: The secret of this EdgeInfo. # noqa: E501
:rtype: str
"""
return self._secret
@secret.setter
def secret(self, secret):
"""Sets the secret of this EdgeInfo.
:param secret: The secret of this EdgeInfo. # noqa: E501
:type: str
"""
self._secret = secret
@property
def tenant_id(self):
"""Gets the tenant_id of this EdgeInfo. # noqa: E501
:return: The tenant_id of this EdgeInfo. # noqa: E501
:rtype: TenantId
"""
return self._tenant_id
@tenant_id.setter
def tenant_id(self, tenant_id):
"""Sets the tenant_id of this EdgeInfo.
:param tenant_id: The tenant_id of this EdgeInfo. # noqa: E501
:type: TenantId
"""
self._tenant_id = tenant_id
@property
def type(self):
"""Gets the type of this EdgeInfo. # noqa: E501
:return: The type of this EdgeInfo. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this EdgeInfo.
:param type: The type of this EdgeInfo. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EdgeInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EdgeInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.273684
| 301
| 0.590351
|
4a100cd790fdd43e05bd9aaf06b10a8ca4d34eb8
| 1,903
|
py
|
Python
|
utils/console.py
|
hndoss/EagleEye
|
4ce6925dee2f7c23f5b1357d7e440956bd5869e2
|
[
"WTFPL"
] | null | null | null |
utils/console.py
|
hndoss/EagleEye
|
4ce6925dee2f7c23f5b1357d7e440956bd5869e2
|
[
"WTFPL"
] | null | null | null |
utils/console.py
|
hndoss/EagleEye
|
4ce6925dee2f7c23f5b1357d7e440956bd5869e2
|
[
"WTFPL"
] | null | null | null |
from termcolor import colored
import os
import getpass
VER = "0.2"
def banner():
logo = """
███████╗ █████╗ ██████╗ ██╗ ███████╗ ███████╗██╗ ██╗███████╗
██╔════╝██╔══██╗██╔════╝ ██║ ██╔════╝ ██╔════╝╚██╗ ██╔╝██╔════╝
█████╗ ███████║██║ ███╗██║ █████╗ Version █████╗ ╚████╔╝ █████╗
██╔══╝ ██╔══██║██║ ██║██║ ██╔══╝ {0} ██╔══╝ ╚██╔╝ ██╔══╝
███████╗██║ ██║╚██████╔╝███████╗███████╗ ███████╗ ██║ ███████╗
╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚══════╝ ╚══════╝ ╚═╝ ╚══════╝
{1}, you have been activated
"""
clear()
print(logo.format(
VER,
colored(getpass.getuser(), 'red', attrs=['bold'])
)
)
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
def section(name):
print("\n{} {}".format(
colored("::", 'blue', attrs=['bold']),
colored(name, attrs=['bold'])
)
)
def task(name):
print('{} {}'.format(
colored("==>", 'green', attrs=['bold']),
colored(name, attrs=['bold'])
)
)
def subtask(name):
print('{} {}'.format(
colored(" ->", 'blue', attrs=['bold']),
colored(name, attrs=['bold'])
)
)
def failure(name):
print('{} {}'.format(
colored("==> ERROR:", 'red', attrs=['bold']),
colored(name, attrs=['bold'])
)
)
def subfailure(name):
print('{} {}'.format(
colored(" ->", 'red', attrs=['bold']),
colored(name, 'red', attrs=['bold'])
)
)
def prompt(name):
print('{} {}'.format(
colored("==>", 'yellow', attrs=['bold']),
colored(name, attrs=['bold'])),
end=""
)
def subprompt(name):
print('{} {}'.format(
colored(" ->", 'yellow', attrs=['bold']),
colored(name, attrs=['bold'])),
end="")
| 22.927711
| 111
| 0.347872
|
4a100d33d248681370edf8051a7f53eda3e067b4
| 6,262
|
py
|
Python
|
oceanbolt/com/drydock_v3/services/drydock_service/transports/base.py
|
oceanbolt/oceanbolt-python-sdk
|
c03c400fb7861a7918c6d18d39ad7a108a72baab
|
[
"MIT"
] | 8
|
2021-04-15T08:43:55.000Z
|
2021-12-21T09:23:58.000Z
|
oceanbolt/com/drydock_v3/services/drydock_service/transports/base.py
|
oceanbolt/oceanbolt-python-sdk
|
c03c400fb7861a7918c6d18d39ad7a108a72baab
|
[
"MIT"
] | null | null | null |
oceanbolt/com/drydock_v3/services/drydock_service/transports/base.py
|
oceanbolt/oceanbolt-python-sdk
|
c03c400fb7861a7918c6d18d39ad7a108a72baab
|
[
"MIT"
] | 2
|
2022-01-16T11:43:51.000Z
|
2022-03-24T19:26:44.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from oceanbolt.com.drydock_v3.types import service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'oceanbolt-com-drydock',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class DrydockServiceTransport(abc.ABC):
"""Abstract transport class for DrydockService."""
AUTH_SCOPES = (
)
def __init__(
self, *,
host: str = 'api.oceanbolt.com',
credentials: credentials.Credentials = None,
credentials_file: typing.Optional[str] = None,
scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
quota_project_id: typing.Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
credentials_file,
scopes=scopes,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(scopes=scopes, quota_project_id=quota_project_id)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.get_dry_dock_stays: gapic_v1.method.wrap_method(
self.get_dry_dock_stays,
default_timeout=None,
client_info=client_info,
),
self.get_dry_dock_timeseries: gapic_v1.method.wrap_method(
self.get_dry_dock_timeseries,
default_timeout=None,
client_info=client_info,
),
self.get_dry_dock_web: gapic_v1.method.wrap_method(
self.get_dry_dock_web,
default_timeout=None,
client_info=client_info,
),
self.get_dry_dock_vessels: gapic_v1.method.wrap_method(
self.get_dry_dock_vessels,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_dry_dock_stays(self) -> typing.Callable[
[service.GetDryDockStaysRequest],
typing.Union[
service.GetDryDockStaysResponse,
typing.Awaitable[service.GetDryDockStaysResponse]
]]:
raise NotImplementedError()
@property
def get_dry_dock_timeseries(self) -> typing.Callable[
[service.GetDryDockRequest],
typing.Union[
service.DryDockResponse,
typing.Awaitable[service.DryDockResponse]
]]:
raise NotImplementedError()
@property
def get_dry_dock_web(self) -> typing.Callable[
[service.GetDryDockRequest],
typing.Union[
service.DryDockResponse,
typing.Awaitable[service.DryDockResponse]
]]:
raise NotImplementedError()
@property
def get_dry_dock_vessels(self) -> typing.Callable[
[service.GetDryDockRequest],
typing.Union[
service.DryDockResponse,
typing.Awaitable[service.DryDockResponse]
]]:
raise NotImplementedError()
__all__ = (
'DrydockServiceTransport',
)
| 37.053254
| 115
| 0.625998
|
4a100de7bfedb1234abd4920da2668d72f3e8af5
| 13,988
|
py
|
Python
|
madminer/utils/interfaces/mg.py
|
alexander-held/madminer
|
c57989472bf42054821ef2357662b8b8e76b8afa
|
[
"MIT"
] | null | null | null |
madminer/utils/interfaces/mg.py
|
alexander-held/madminer
|
c57989472bf42054821ef2357662b8b8e76b8afa
|
[
"MIT"
] | null | null | null |
madminer/utils/interfaces/mg.py
|
alexander-held/madminer
|
c57989472bf42054821ef2357662b8b8e76b8afa
|
[
"MIT"
] | 1
|
2019-01-16T14:32:54.000Z
|
2019-01-16T14:32:54.000Z
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
import logging
from madminer.utils.various import call_command, make_file_executable, create_missing_folders
logger = logging.getLogger(__name__)
def generate_mg_process(
mg_directory,
temp_directory,
proc_card_file,
mg_process_directory,
ufo_model_directory=None,
log_file=None,
initial_command=None,
):
"""
Calls MadGraph to create the process folder.
Parameters
----------
mg_directory : str
Path to the MadGraph 5 directory.
temp_directory : str
Path to a directory for temporary files.
proc_card_file : str
Path to the process card that tells MadGraph how to generate the process.
mg_process_directory : str
Path to the MG process directory.
ufo_model_directory : str or None, optional
Path to a UFO model that is not yet installed. It will be copied to the MG directory before the process card
is executed. Default value: None.
initial_command : str or None, optional
Initial bash commands that have to be executed before MG is run (e.g. to load the correct virtual
environment). Default value: None.
log_file : str or None, optional
Path to a log file in which the MadGraph output is saved. Default value: None.
Returns
-------
None
"""
# Preparations
logger.info("Generating MadGraph process folder from %s at %s", proc_card_file, mg_process_directory)
create_missing_folders([temp_directory, mg_process_directory, os.path.dirname(log_file)])
if ufo_model_directory is not None:
copy_ufo_model(ufo_model_directory, mg_directory)
# MG commands
temp_proc_card_file = temp_directory + "/generate.mg5"
shutil.copyfile(proc_card_file, temp_proc_card_file)
with open(temp_proc_card_file, "a") as myfile:
myfile.write("\n\noutput " + mg_process_directory)
# Call MG5
if initial_command is None:
initial_command = ""
else:
initial_command = initial_command + "; "
_ = call_command(initial_command + mg_directory + "/bin/mg5_aMC " + temp_proc_card_file, log_file=log_file)
def prepare_run_mg_pythia(
mg_process_directory,
proc_card_filename_from_mgprocdir=None,
run_card_file_from_mgprocdir=None,
param_card_file_from_mgprocdir=None,
reweight_card_file_from_mgprocdir=None,
pythia8_card_file_from_mgprocdir=None,
is_background=False,
script_file_from_mgprocdir=None,
initial_command=None,
log_dir=None,
log_file_from_logdir=None,
):
"""
Prepares a bash script that will start the event generation.
Parameters
----------
mg_process_directory : str
Path to the MG process directory.
proc_card_filename_from_mgprocdir : str or None, optional
Filename for the MG command card that will be generated, relative from mg_process_directory. If None, a
default filename in the MG process directory will be chosen.
param_card_file_from_mgprocdir : str or None, optional
Path to the MadGraph run card, relative from mg_process_directory. If None, the card present in the process
folder is used. Default value: None.
param_card_file_from_mgprocdir : str or None, optional
Path to the MadGraph run card, relative from mg_process_directory. If None, the card present in the process
folder is used. Default value: None.
reweight_card_file_from_mgprocdir : str or None, optional
Path to the MadGraph reweight card, relative from mg_process_directory. If None, the card present in the
process folder is used. Default value: None.
pythia8_card_file_from_mgprocdir : str or None, optional
Path to the MadGraph Pythia8 card, relative from mg_process_directory. If None, Pythia is not run. Default
value: None.
is_background : bool, optional
Should be True for background processes, i.e. process in which the differential cross section does not
depend on the parameters (and would be the same for all benchmarks). In this case, no reweighting is run,
which can substantially speed up the event generation. Default value: False.
script_file_from_mgprocdir : str or None, optional
This sets where the shell script to run MG and Pythia is generated, relative from mg_process_directory. If
None, a default filename in `mg_process_directory/madminer` is used. Default value: None.
initial_command : str or None, optional
Initial shell commands that have to be executed before MG is run (e.g. to load a virtual environment).
Default value: None.
log_file_from_logdir : str or None, optional
Log directory. Default value: None.
log_file_from_logdir : str or None, optional
Path to a log file in which the MadGraph output is saved, relative from the default log directory. Default
value: None.
Returns
-------
bash_script_call : str
How to call this script.
"""
# Preparations
create_missing_folders([mg_process_directory])
if log_dir is not None:
create_missing_folders([log_dir])
if proc_card_filename_from_mgprocdir is not None:
create_missing_folders([os.path.dirname(mg_process_directory + "/" + proc_card_filename_from_mgprocdir)])
# Prepare run...
logger.info("Preparing script to run MadGraph and Pythia in %s", mg_process_directory)
# Bash script can optionally provide MG path or process directory
mg_directory_placeholder = "$mgdir"
mg_process_directory_placeholder = "$mgprocdir"
log_dir_placeholder = "$mmlogdir"
placeholder_definition = "mgdir=$1\nmgprocdir=$2\nmmlogdir=$3"
# Find filenames for process card and script
if proc_card_filename_from_mgprocdir is None:
for i in range(1000):
proc_card_filename_from_mgprocdir = "/Cards/start_event_generation_{}.mg5".format(i)
if not os.path.isfile(mg_process_directory + "/" + proc_card_filename_from_mgprocdir):
break
else:
proc_card_filename = mg_process_directory + "/" + proc_card_filename_from_mgprocdir
if script_file_from_mgprocdir is None:
for i in range(1000):
script_file = mg_process_directory + "/madminer/scripts/madminer_run_{}.sh".format(i)
if not os.path.isfile(script_file):
break
else:
script_file = mg_process_directory + "/" + script_file_from_mgprocdir
script_filename = os.path.basename(script_file)
if log_file_from_logdir is None:
log_file_from_logdir = "/log.log"
# MG commands
shower_option = "OFF" if pythia8_card_file_from_mgprocdir is None else "Pythia8"
reweight_option = "OFF" if is_background else "ON"
mg_commands = """
launch {}
shower={}
detector=OFF
analysis=OFF
madspin=OFF
reweight={}
done
""".format(
mg_process_directory_placeholder, shower_option, reweight_option
)
with open(proc_card_filename, "w") as file:
file.write(mg_commands)
# Initial commands
if initial_command is None:
initial_command = ""
# Card copying commands
copy_commands = ""
if run_card_file_from_mgprocdir is not None:
copy_commands += "cp {}/{} {}{}\n".format(
mg_process_directory_placeholder,
run_card_file_from_mgprocdir,
mg_process_directory_placeholder,
"/Cards/run_card.dat",
)
if param_card_file_from_mgprocdir is not None:
copy_commands += "cp {}/{} {}{}\n".format(
mg_process_directory_placeholder,
param_card_file_from_mgprocdir,
mg_process_directory_placeholder,
"/Cards/param_card.dat",
)
if reweight_card_file_from_mgprocdir is not None and not is_background:
copy_commands += "cp {}/{} {}{}\n".format(
mg_process_directory_placeholder,
reweight_card_file_from_mgprocdir,
mg_process_directory_placeholder,
"/Cards/reweight_card.dat",
)
if pythia8_card_file_from_mgprocdir is not None:
copy_commands += "cp {}/{} {}{}\n".format(
mg_process_directory_placeholder,
pythia8_card_file_from_mgprocdir,
mg_process_directory_placeholder,
"/Cards/pythia8_card.dat",
)
# Replace environment variable in proc card
replacement_command = """sed -e 's@\$mgprocdir@'"$mgprocdir"'@' {}/{} > {}/{}""".format(
mg_process_directory_placeholder,
proc_card_filename_from_mgprocdir,
mg_process_directory_placeholder,
"Cards/mg_commands.mg5",
)
# Put together script
script = (
"#!/bin/bash\n\n# Script generated by MadMiner\n\n# Usage: {} MG_directory MG_process_directory log_dir\n\n"
+ "{}\n\n{}\n\n{}\n{}\n\n{}/bin/mg5_aMC {}/{} > {}/{}\n"
).format(
script_filename,
initial_command,
placeholder_definition,
copy_commands,
replacement_command,
mg_directory_placeholder,
mg_process_directory_placeholder,
"Cards/mg_commands.mg5",
log_dir_placeholder,
log_file_from_logdir,
)
with open(script_file, "w") as file:
file.write(script)
make_file_executable(script_file)
# How to call it from master script
call_placeholder = "{}/{} {} {} {}".format(
mg_process_directory_placeholder,
script_file_from_mgprocdir,
mg_directory_placeholder,
mg_process_directory_placeholder,
log_dir_placeholder,
)
return call_placeholder
def run_mg_pythia(
mg_directory,
mg_process_directory,
proc_card_filename=None,
run_card_file=None,
param_card_file=None,
reweight_card_file=None,
pythia8_card_file=None,
is_background=False,
initial_command=None,
log_file=None,
):
"""
Calls MadGraph to generate events.
Parameters
----------
mg_directory : str
Path to the MadGraph 5 base directory.
mg_process_directory : str
Path to the MG process directory.
proc_card_filename : str or None, optional
Filename for the MG command card that will be generated. If None, a default filename in the MG process
directory will be chosen.
run_card_file : str or None, optional
Path to the MadGraph run card. If None, the card present in the process folder is used. Default value:
None)
param_card_file : str or None, optional
Path to the MadGraph param card. If None, the card present in the process folder is used. Default value:
None)
reweight_card_file : str or None, optional
Path to the MadGraph reweight card. If None, the card present in the process folder is used. (Default value
= None)
pythia8_card_file : str or None, optional
Path to the MadGraph Pythia8 card. If None, Pythia is not run. Default value: None.
is_background : bool, optional
Should be True for background processes, i.e. process in which the differential cross section does not
depend on the parameters (and would be the same for all benchmarks). In this case, no reweighting is run,
which can substantially speed up the event generation. Default value: False.
initial_command : str or None, optional
Initial shell commands that have to be executed before MG is run (e.g. to load a virtual environment).
Default value: None.
log_file : str or None, optional
Path to a log file in which the MadGraph output is saved. Default value: None.
Returns
-------
None
"""
# Preparations
create_missing_folders([mg_process_directory, os.path.dirname(log_file)])
if proc_card_filename is not None:
create_missing_folders([os.path.dirname(proc_card_filename)])
# Just run it already
logger.info("Starting MadGraph and Pythia in %s", mg_process_directory)
# Copy cards
if run_card_file is not None:
shutil.copyfile(run_card_file, mg_process_directory + "/Cards/run_card.dat")
if param_card_file is not None:
shutil.copyfile(param_card_file, mg_process_directory + "/Cards/param_card.dat")
if reweight_card_file is not None and not is_background:
shutil.copyfile(reweight_card_file, mg_process_directory + "/Cards/reweight_card.dat")
if pythia8_card_file is not None:
shutil.copyfile(pythia8_card_file, mg_process_directory + "/Cards/pythia8_card.dat")
# Find filenames for process card and script
if proc_card_filename is None:
for i in range(1000):
proc_card_filename = mg_process_directory + "/Cards/start_event_generation_{}.mg5".format(i)
if not os.path.isfile(proc_card_filename):
break
# MG commands
shower_option = "OFF" if pythia8_card_file is None else "Pythia8"
reweight_option = "OFF" if is_background else "ON"
mg_commands = """
launch {}
shower={}
detector=OFF
analysis=OFF
madspin=OFF
reweight={}
done
""".format(
mg_process_directory, shower_option, reweight_option
)
with open(proc_card_filename, "w") as file:
file.write(mg_commands)
# Call MG5 or export into script
if initial_command is None:
initial_command = ""
else:
initial_command = initial_command + "; "
_ = call_command(initial_command + mg_directory + "/bin/mg5_aMC " + proc_card_filename, log_file=log_file)
def copy_ufo_model(ufo_directory, mg_directory):
_, model_name = os.path.split(ufo_directory)
destination = mg_directory + "/models/" + model_name
if os.path.isdir(destination):
return
shutil.copytree(ufo_directory, destination)
| 34.79602
| 116
| 0.686446
|
4a100dea27eebcf11cfaf07e3bceb461054e6caa
| 419
|
py
|
Python
|
common/utils.py
|
jeina7/genie
|
b19e520a49c00b3ec846ee6a3d2219ae4549689e
|
[
"MIT"
] | null | null | null |
common/utils.py
|
jeina7/genie
|
b19e520a49c00b3ec846ee6a3d2219ae4549689e
|
[
"MIT"
] | null | null | null |
common/utils.py
|
jeina7/genie
|
b19e520a49c00b3ec846ee6a3d2219ae4549689e
|
[
"MIT"
] | null | null | null |
import yaml
import os
phase = 'development'
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), './'))
bigquery_config = yaml.load(
open(
os.path.join(root_path, 'config', 'bigquery.yaml'),
'r'
)
)
__all__ = ['phase', 'root_path', 'bigquery_config']
# CARDINAL_NUM
CARDINAL_NUM = 4
if CARDINAL_NUM == 3:
cardinal = "3rd"
else:
cardinal = str(CARDINAL_NUM) + "th"
| 16.115385
| 74
| 0.639618
|
4a100e6c521da58d42df7db74e3ac82d2d8ac7af
| 2,029
|
py
|
Python
|
wb/main/jobs/dev_cloud/profiling/handle_profiling_sockets_job.py
|
apaniukov/workbench
|
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
|
[
"Apache-2.0"
] | 23
|
2022-03-17T12:24:09.000Z
|
2022-03-31T09:13:30.000Z
|
wb/main/jobs/dev_cloud/profiling/handle_profiling_sockets_job.py
|
apaniukov/workbench
|
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
|
[
"Apache-2.0"
] | 18
|
2022-03-21T08:17:44.000Z
|
2022-03-30T12:42:30.000Z
|
wb/main/jobs/dev_cloud/profiling/handle_profiling_sockets_job.py
|
apaniukov/workbench
|
2f2653ecfd0143d2d53e33ad84379f13443fdfaa
|
[
"Apache-2.0"
] | 16
|
2022-03-17T12:24:14.000Z
|
2022-03-31T12:15:12.000Z
|
"""
OpenVINO DL Workbench
Class for handling sockets of profiling from DevCloud service
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from contextlib import closing
from wb.extensions_factories.database import get_db_session_for_celery
from wb.main.console_tool_wrapper.benchmark_app import BenchmarkConsoleOutputParser
from wb.main.enumerates import JobTypesEnum
from wb.main.jobs.dev_cloud.handle_dev_cloud_job_sockets_job import HandleDevCloudJobSocketsJob
from wb.main.jobs.interfaces.job_observers import ProfilingDBObserver
from wb.main.jobs.profiling.profiling_job_state import ProfilingJobStateSubject
from wb.main.models import ProfilingJobModel
class HandleDevCloudProfilingSocketsJob(HandleDevCloudJobSocketsJob):
job_type = JobTypesEnum.handle_dev_cloud_profiling_sockets_job
_job_model_class = ProfilingJobModel
_job_state_subject_class = ProfilingJobStateSubject
_db_observer_class = ProfilingDBObserver
_console_tool_output_parser = BenchmarkConsoleOutputParser
# Annotations
_job_state_subject: ProfilingJobStateSubject
def _create_job_state_subject(self) -> ProfilingJobStateSubject:
with closing(get_db_session_for_celery()) as session:
job: ProfilingJobModel = self.get_job_model(session)
return ProfilingJobStateSubject(job_id=self.job_id,
num_single_inferences=job.num_single_inferences,
model_path=job.xml_model_path)
| 46.113636
| 95
| 0.78758
|
4a100f663eb13000d11efb2d1778af537a3df6e2
| 9,899
|
py
|
Python
|
john_doe/cities/us/arkansas.py
|
xioren/JohnDoe
|
4bd16f394709cac246438c8ffd650b4b301cb2b7
|
[
"MIT"
] | null | null | null |
john_doe/cities/us/arkansas.py
|
xioren/JohnDoe
|
4bd16f394709cac246438c8ffd650b4b301cb2b7
|
[
"MIT"
] | null | null | null |
john_doe/cities/us/arkansas.py
|
xioren/JohnDoe
|
4bd16f394709cac246438c8ffd650b4b301cb2b7
|
[
"MIT"
] | null | null | null |
cities = [
'Adona',
'Alco',
'Alexander',
'Alicia',
'Alix',
'Alleene',
'Alma',
'Almyra',
'Alpena',
'Alpine',
'Altheimer',
'Altus',
'Amagon',
'Amity',
'Antoine',
'Arkadelphia',
'Arkansas City',
'Armorel',
'Ash Flat',
'Ashdown',
'Atkins',
'Aubrey',
'Augusta',
'Austin',
'Avoca',
'Bald Knob',
'Banks',
'Barling',
'Barton',
'Bassett',
'Bates',
'Batesville',
'Bauxite',
'Bay',
'Bearden',
'Beaver',
'Bee Branch',
'Beebe',
'Beech Grove',
'Beedeville',
'Beirne',
'Bella Vista',
'Belleville',
'Ben Lomond',
'Benton',
'Bentonville',
'Bergman',
'Berryville',
'Bexar',
'Big Flat',
'Bigelow',
'Biggers',
'Birdeye',
'Biscoe',
'Bismarck',
'Black Oak',
'Black Rock',
'Blevins',
'Blue Mountain',
'Bluff City',
'Bluffton',
'Blytheville',
'Board Camp',
'Boles',
'Bonnerdale',
'Bono',
'Booneville',
'Boswell',
'Bradford',
'Bradley',
'Branch',
'Brickeys',
'Briggsville',
'Brinkley',
'Brockwell',
'Brookland',
'Bryant',
'Buckner',
'Bull Shoals',
'Burdette',
'Cabot',
'Caddo Gap',
'Caldwell',
'Cale',
'Calico Rock',
'Calion',
'Camden',
'Camp',
'Canehill',
'Caraway',
'Carlisle',
'Carthage',
'Casa',
'Cash',
'Casscoe',
'Cave City',
'Cave Springs',
'Cecil',
'Cedarville',
'Center Ridge',
'Centerton',
'Centerville',
'Charleston',
'Charlotte',
'Cherokee Village',
'Cherry Valley',
'Chester',
'Chidester',
'Choctaw',
'Clarendon',
'Clarkedale',
'Clarkridge',
'Clarksville',
'Cleveland',
'Clinton',
'Coal Hill',
'College Station',
'Colt',
'Columbus',
'Combs',
'Compton',
'Concord',
'Conway',
'Cord',
'Corning',
'Cotter',
'Cotton Plant',
'Cove',
'Coy',
'Crawfordsville',
'Crocketts Bluff',
'Crossett',
'Crumrod',
'Curtis',
'Cushman',
'Damascus',
'Danville',
'Dardanelle',
'Datto',
'De Queen',
'De Valls Bluff',
'De Witt',
'Decatur',
'Deer',
'Delaplaine',
'Delaware',
'Delight',
'Dell',
'Dennard',
'Dermott',
'Des Arc',
'Desha',
'Diamond City',
'Diaz',
'Dierks',
'Doddridge',
'Dolph',
'Donaldson',
'Dover',
'Drasco',
'Driver',
'Dumas',
'Dyer',
'Dyess',
'Earle',
'Edgemont',
'Edmondson',
'Egypt',
'El Dorado',
'El Paso',
'Elaine',
'Elizabeth',
'Elkins',
'Elm Springs',
'Emerson',
'Emmet',
'England',
'Enola',
'Ethel',
'Etowah',
'Eudora',
'Eureka Springs',
'Evansville',
'Evening Shade',
'Everton',
'Fairfield Bay',
'Farmington',
'Fayetteville',
'Fifty Six',
'Fisher',
'Flippin',
'Floral',
'Fordyce',
'Foreman',
'Forrest City',
'Fort Smith',
'Fouke',
'Fountain Hill',
'Fox',
'Franklin',
'Frenchmans Bayou',
'Friendship',
'Fulton',
'Gamaliel',
'Garfield',
'Garland City',
'Garner',
'Gassville',
'Gateway',
'Genoa',
'Gentry',
'Gepp',
'Gilbert',
'Gillett',
'Gillham',
'Gilmore',
'Glencoe',
'Glenwood',
'Goodwin',
'Goshen',
'Gosnell',
'Gould',
'Grady',
'Grannis',
'Grapevine',
'Gravelly',
'Gravette',
'Green Forest',
'Greenbrier',
'Greenland',
'Greenway',
'Greenwood',
'Gregory',
'Griffithville',
'Grubbs',
'Guion',
'Gurdon',
'Guy',
'Hackett',
'Hagarville',
'Hamburg',
'Hampton',
'Hardy',
'Harrell',
'Harriet',
'Harrisburg',
'Harrison',
'Hartford',
'Hartman',
'Harvey',
'Hasty',
'Hatfield',
'Hattieville',
'Hatton',
'Havana',
'Haynes',
'Hazen',
'Heber Springs',
'Hector',
'Helena',
'Henderson',
'Henning',
'Hensley',
'Hermitage',
'Heth',
'Hickory Plains',
'Hickory Ridge',
'Higden',
'Higginson',
'Hindsville',
'Hiwasse',
'Holly Grove',
'Hope',
'Horatio',
'Horseshoe Bend',
'Hot Springs',
'Hot Springs National Park',
'Hot Springs Village',
'Houston',
'Howell',
'Hoxie',
'Hughes',
'Humnoke',
'Humphrey',
'Hunter',
'Huntington',
'Huntsville',
'Huttig',
'Ida',
'Imboden',
'Ivan',
'Jacksonport',
'Jacksonville',
'Jasper',
'Jefferson',
'Jennie',
'Jerome',
'Jersey',
'Jerusalem',
'Jessieville',
'Johnson',
'Joiner',
'Jones Mill',
'Jonesboro',
'Judsonia',
'Junction City',
'Keiser',
'Kensett',
'Keo',
'Kingsland',
'Kingston',
'Kirby',
'Knobel',
'Knoxville',
'La Grange',
'Lafe',
'Lake City',
'Lake Village',
'Lakeview',
'Lamar',
'Lambrook',
'Laneburg',
'Langley',
'Lavaca',
'Lawson',
'Leachville',
'Lead Hill',
'Leola',
'Lepanto',
'Leslie',
'Letona',
'Lewisville',
'Lexa',
'Light',
'Lincoln',
'Little Rock',
'Little Rock Air Force Base',
'Lockesburg',
'Locust Grove',
'London',
'Lonoke',
'Lonsdale',
'Louann',
'Lowell',
'Luxora',
'Lynn',
'Mabelvale',
'Madison',
'Magazine',
'Magness',
'Magnolia',
'Malvern',
'Mammoth Spring',
'Manila',
'Mansfield',
'Marble Falls',
'Marcella',
'Marianna',
'Marion',
'Marked Tree',
'Marmaduke',
'Marshall',
'Marvell',
'Maumelle',
'Mayflower',
'Maynard',
'Maysville',
'McCaskill',
'McCrory',
'McDougal',
'McGehee',
'McNeil',
'McRae',
'Melbourne',
'Mellwood',
'Mena',
'Menifee',
'Midland',
'Midway',
'Mineral Springs',
'Minturn',
'Moko',
'Monette',
'Monroe',
'Monticello',
'Montrose',
'Moro',
'Morrilton',
'Morrow',
'Moscow',
'Mount Holly',
'Mount Ida',
'Mount Judea',
'Mount Pleasant',
'Mount Vernon',
'Mountain Home',
'Mountain Pine',
'Mountain View',
'Mountainburg',
'Mulberry',
'Murfreesboro',
'Nashville',
'Natural Dam',
'New Blaine',
'New Edinburg',
'Newark',
'Newhope',
'Newport',
'Norfork',
'Norman',
'Norphlet',
'North Little Rock',
'O Kean',
'Oak Grove',
'Oakland',
'Oark',
'Oden',
'Ogden',
'Oil Trough',
'Okolona',
'Ola',
'Omaha',
'Oneida',
'Onia',
'Osceola',
'Oxford',
'Ozan',
'Ozark',
'Ozone',
'Palestine',
'Pangburn',
'Paragould',
'Paris',
'Parkdale',
'Parkin',
'Parks',
'Paron',
'Parthenon',
'Patterson',
'Pea Ridge',
'Peach Orchard',
'Pearcy',
'Peel',
'Pelsor',
'Pencil Bluff',
'Perry',
'Perryville',
'Pettigrew',
'Pickens',
'Piggott',
'Pindall',
'Pine Bluff',
'Pineville',
'Plainview',
'Pleasant Grove',
'Pleasant Plains',
'Plumerville',
'Pocahontas',
'Pollard',
'Ponca',
'Poplar Grove',
'Portia',
'Portland',
'Pottsville',
'Poughkeepsie',
'Powhatan',
'Poyen',
'Prairie Grove',
'Prattsville',
'Prescott',
'Prim',
'Proctor',
'Protem',
'Pyatt',
'Quitman',
'Ratcliff',
'Ravenden',
'Ravenden Springs',
'Rector',
'Redfield',
'Reydell',
'Reyno',
'Ripley',
'Rison',
'Rivervale',
'Roe',
'Rogers',
'Roland',
'Romance',
'Rose Bud',
'Rosie',
'Rosston',
'Rover',
'Royal',
'Rudy',
'Russell',
'Russellville',
'Saffell',
'Sage',
'Saint Charles',
'Saint Francis',
'Saint Joe',
'Saint Paul',
'Salado',
'Salem',
'Saratoga',
'Scotland',
'Scott',
'Scranton',
'Searcy',
'Sedgwick',
'Sheridan',
'Sherrill',
'Sherwood',
'Shirley',
'Sidney',
'Siloam Springs',
'Sims',
'Smackover',
'Smithville',
'Snow Lake',
'Solgohachia',
'Sparkman',
'Springdale',
'Springfield',
'Stamps',
'Star City',
'State University',
'Stephens',
'Story',
'Strawberry',
'Strong',
'Sturkie',
'Stuttgart',
'Subiaco',
'Success',
'Sulphur Rock',
'Sulphur Springs',
'Summers',
'Summit',
'Sweet Home',
'Swifton',
'Taylor',
'Texarkana',
'Theodosia',
'Thida',
'Thornton',
'Tichnor',
'Tillar',
'Tilly',
'Timbo',
'Tomato',
'Tontitown',
'Traskwood',
'Trumann',
'Tucker',
'Tuckerman',
'Tumbling Shoals',
'Tupelo',
'Turner',
'Turrell',
'Twist',
'Tyronza',
'Ulm',
'Umpire',
'Uniontown',
'Urbana',
'Valley Springs',
'Van Buren',
'Vandervoort',
'Vanndale',
'Vendor',
'Village',
'Vilonia',
'Viola',
'Violet Hill',
'Wabash',
'Wabbaseka',
'Walcott',
'Waldenburg',
'Waldo',
'Waldron',
'Walnut Ridge',
'Ward',
'Warm Springs',
'Warren',
'Washington',
'Watson',
'Weiner',
'Wesley',
'West Fork',
'West Helena',
'West Memphis',
'West Point',
'West Ridge',
'Western Grove',
'Wheatley',
'Whelen Springs',
'White Hall',
'Wickes',
'Wideman',
'Widener',
'Wilburn',
'Williford',
'Willisville',
'Wilmar',
'Wilmot',
'Wilson',
'Wilton',
'Winchester',
'Winslow',
'Winthrop',
'Wiseman',
'Witter',
'Witts Springs',
'Woodson',
'Wooster',
'Wright',
'Wrightsville',
'Wynne',
'Yellville',
'Yorktown'
]
| 15.8384
| 33
| 0.484291
|
4a101052d7314bff68636cd31ead6950d9c32199
| 17,609
|
py
|
Python
|
core/models/conduct.py
|
u-n-i-c-o-rn/jimi
|
bbd647fa9cd4326305a33a99122d8d8a2614967d
|
[
"Apache-2.0"
] | 1
|
2021-03-14T21:27:51.000Z
|
2021-03-14T21:27:51.000Z
|
core/models/conduct.py
|
u-n-i-c-o-rn/jimi
|
bbd647fa9cd4326305a33a99122d8d8a2614967d
|
[
"Apache-2.0"
] | null | null | null |
core/models/conduct.py
|
u-n-i-c-o-rn/jimi
|
bbd647fa9cd4326305a33a99122d8d8a2614967d
|
[
"Apache-2.0"
] | null | null | null |
import time
import copy
import traceback
import jimi
# Model Class
class _conduct(jimi.db._document):
name = str()
flow = list()
enabled = True
log = bool()
comment = str()
_dbCollection = jimi.db.db["conducts"]
def __init__(self):
# Cached lookups to limit reloading the same actions
jimi.cache.globalCache.newCache("actionCache")
jimi.cache.globalCache.newCache("triggerCache")
jimi.cache.globalCache.newCache("triggeredFlowTriggers")
jimi.cache.globalCache.newCache("triggeredFlowActions")
jimi.cache.globalCache.newCache("triggeredFlowFlows")
jimi.cache.globalCache.newCache("flowDict")
# Override parent new to include name var, parent class new run after class var update
def new(self,name=""):
# Confirming that the given name is not already in use
results = self.query(query={"name" : name})["results"]
if len(results) == 0:
# Run parent class function ( alternative to end decorator for the new function within a class )
result = super(_conduct, self).new()
if name == "":
self.name = self._id
else:
self.name = name
self.update(["name"])
return result
else:
return None
def setAttribute(self,attr,value,sessionData=None):
if attr == "name":
results = self.query(query={"name" : value, "_id" : { "$ne" : jimi.db.ObjectId(self._id) }})["results"]
if len(results) != 0:
return False
setattr(self,attr,value)
return True
# actionIDType=True uses actionID instead of triggerID
def triggerHandler(self,triggerID,data,actionIDType=False,flowIDType=False,flowDebugSession=None):
####################################
# Header #
####################################
if self.log:
startTime = 0
startTime = time.time()
jimi.audit._audit().add("conduct","trigger_start",{ "conduct_id" : self._id, "conduct_name" : self.name, "trigger_id" : triggerID })
data["persistentData"]["system"]["conduct"] = self
####################################
flowDict = jimi.cache.globalCache.get("flowDict",self._id,getFlowDict,self.flow)
uid = "{0}{1}".format(self._id,triggerID)
if actionIDType:
triggeredFlows = jimi.cache.globalCache.get("triggeredFlowActions",uid,getTriggeredFlowActions,self.flow,triggerID)
elif flowIDType:
triggeredFlows = jimi.cache.globalCache.get("triggeredFlowFlows",uid,getTriggeredFlowFlows,self.flow,triggerID)
else:
triggeredFlows = jimi.cache.globalCache.get("triggeredFlowTriggers",uid,getTriggeredFlowTriggers,self.flow,triggerID)
for triggeredFlow in triggeredFlows:
self.flowHandler(triggeredFlow,flowDict,data,flowDebugSession=flowDebugSession)
####################################
# Footer #
####################################
if self.log:
jimi.audit._audit().add("conduct","trigger_end",{ "conduct_id" : self._id, "conduct_name" : self.name, "trigger_id" : triggerID, "duration" : ( time.time() - startTime ) })
####################################
# Eval logic between links
def flowLogicEval(self,data,logicVar):
try:
if type(logicVar) is bool:
try:
if logicVar == data["flowData"]["action"]["result"]:
return True
except:
pass
elif type(logicVar) is int:
try:
if logicVar == data["flowData"]["action"]["rc"]:
return True
except:
pass
elif type(logicVar) is str:
if logicVar.startswith("if"):
if jimi.logic.ifEval(logicVar, { "data" : data["flowData"], "eventData" : data["eventData"], "conductData" : data["conductData"], "persistentData" : data["persistentData"]}):
return True
except Exception as e:
jimi.logging.debug("Error: Flow Logic Crashed. flowID={0}, error={1}".format(data["flowData"]["flow_id"],''.join(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__))),-1)
try:
if data["persistentData"]["system"]["trigger"].failOnActionFailure:
raise jimi.exceptions.linkCrash(data["flowData"]["flow_id"],e)
except AttributeError:
pass
return False
def flowHandler(self,currentFlow,flowDict,data,flowDebugSession=None):
if flowDebugSession or "flowDebugSession" in data["persistentData"]["system"]:
if "flowDebugSession" in data["persistentData"]["system"]:
flowDebugSession = copy.deepcopy(data["persistentData"]["system"]["flowDebugSession"])
else:
data["persistentData"]["system"]["flowDebugSession"] = flowDebugSession
flowDebugSession["eventID"] = jimi.debug.flowDebugSession[flowDebugSession["sessionID"]].startEvent(data["flowData"]["trigger_name"],data["flowData"]["event"],data)
processQueue = []
data["flowData"]["conductID"] = self._id
data["flowData"]["action"] = { "result" : True, "rc" : 1337 }
flowObjectsUsed = []
codifyFlow = True if "classObject" in currentFlow else False
cpuSaver = jimi.helpers.cpuSaver()
while True:
if currentFlow:
flowObjectsUsed.append(currentFlow["flowID"])
if currentFlow["type"] == "trigger":
if not codifyFlow:
currentTrigger = jimi.cache.globalCache.get("triggerCache",currentFlow["triggerID"]+currentFlow["flowID"],getTrigger,currentFlow)[0]
else:
currentTrigger = currentFlow["classObject"]
# Logic and var defintion
triggerContinue = True
if currentTrigger.logicString:
if jimi.logic.ifEval(currentTrigger.logicString,{ "data" : data["flowData"], "eventData" : data["eventData"], "conductData" : data["conductData"], "persistentData" : data["persistentData"]}):
if currentTrigger.varDefinitions:
data["flowData"]["var"] = jimi.variable.varEval(currentTrigger.varDefinitions,data["flowData"]["var"],{ "data" : data["flowData"], "eventData" : data["eventData"], "conductData" : data["conductData"], "persistentData" : data["persistentData"]},0)
data["eventData"]["var"] = jimi.variable.varEval(currentTrigger.varDefinitions,data["eventData"]["var"],{ "data" : data["flowData"], "eventData" : data["eventData"], "conductData" : data["conductData"], "persistentData" : data["persistentData"]},1)
data["conductData"]["var"] = jimi.variable.varEval(currentTrigger.varDefinitions,data["conductData"]["var"],{ "data" : data["flowData"], "eventData" : data["eventData"], "conductData" : data["conductData"], "persistentData" : data["persistentData"]},2)
data["persistentData"]["var"] = jimi.variable.varEval(currentTrigger.varDefinitions,data["persistentData"]["var"],{ "data" : data["flowData"], "eventData" : data["eventData"], "conductData" : data["conductData"], "persistentData" : data["persistentData"]},3)
else:
triggerContinue = False
else:
if currentTrigger.varDefinitions:
data["flowData"]["var"] = jimi.variable.varEval(currentTrigger.varDefinitions,data["flowData"]["var"],{ "data" : data["flowData"], "eventData" : data["eventData"], "conductData" : data["conductData"], "persistentData" : data["persistentData"]},0)
data["eventData"]["var"] = jimi.variable.varEval(currentTrigger.varDefinitions,data["eventData"]["var"],{ "data" : data["flowData"], "eventData" : data["eventData"], "conductData" : data["conductData"], "persistentData" : data["persistentData"]},1)
data["conductData"]["var"] = jimi.variable.varEval(currentTrigger.varDefinitions,data["conductData"]["var"],{ "data" : data["flowData"], "eventData" : data["eventData"], "conductData" : data["conductData"], "persistentData" : data["persistentData"]},2)
data["persistentData"]["var"] = jimi.variable.varEval(currentTrigger.varDefinitions,data["persistentData"]["var"],{ "data" : data["flowData"], "eventData" : data["eventData"], "conductData" : data["conductData"], "persistentData" : data["persistentData"]},3)
# If logic has said yes or no logic defined then move onto actions
if triggerContinue == True:
passData = data
for nextFlow in currentFlow["next"]:
if passData == None:
passData = copyData(data)
if self.flowLogicEval(data,nextFlow["logic"]):
processQueue.append({ "flowID" : nextFlow["flowID"], "data" : passData })
passData = None
elif currentFlow["type"] == "action":
if not codifyFlow:
class_ = jimi.cache.globalCache.get("actionCache",currentFlow["actionID"]+currentFlow["flowID"],getAction,currentFlow)[0]
else:
class_ = currentFlow["classObject"]
if class_.enabled:
data["flowData"]["flow_id"] = currentFlow["flowID"]
if flowDebugSession:
flowDebugSession["actionID"] = jimi.debug.flowDebugSession[flowDebugSession["sessionID"]].startAction(flowDebugSession["eventID"],data["flowData"]["flow_id"],class_.name,copyData(data,copyEventData=True,copyConductData=True,copyPersistentData=True))
try:
data["flowData"]["action"] = class_.runHandler(data=data)
except Exception as e:
jimi.logging.debug("Error: Action Crashed. actionID={0}, actionName={1}, error={2}".format(class_._id,class_.name,''.join(traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__))),-1)
try:
if data["persistentData"]["system"]["trigger"].failOnActionFailure:
raise jimi.exceptions.actionCrash(class_._id,class_.name,e)
except AttributeError:
pass
if class_.systemCrashHandler:
jimi.exceptions.actionCrash(class_._id,class_.name,e)
data["flowData"]["action"] = { "result" : False, "rc" : -255, "error" : traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__) }
data["flowData"]["action"]["action_id"] = class_._id
data["flowData"]["action"]["action_name"] = class_.name
if flowDebugSession:
jimi.debug.flowDebugSession[flowDebugSession["sessionID"]].endAction(flowDebugSession["eventID"],flowDebugSession["actionID"],copyData(data,copyEventData=True,copyConductData=True,copyPersistentData=True))
passData = data
for nextFlow in currentFlow["next"]:
if passData == None:
passData = copyData(data)
if self.flowLogicEval(data,nextFlow["logic"]):
processQueue.append({ "flowID" : nextFlow["flowID"], "data" : passData })
passData = None
if len(processQueue) == 0:
break
else:
nextFlowID = processQueue[-1]["flowID"]
data = processQueue[-1]["data"]
processQueue.pop()
try:
currentFlow = flowDict[nextFlowID]
except KeyError:
currentFlow = None
# CPU saver
cpuSaver.tick()
# Post processing for all event postRun actions
if data["flowData"]["eventStats"]["last"]:
for flow in flowDict:
if flowDict[flow]["type"] == "action" and flowDict[flow]["flowID"] in flowObjectsUsed:
if not codifyFlow:
class_ = jimi.cache.globalCache.get("actionCache",flowDict[flow]["actionID"]+flowDict[flow]["flowID"],getAction,flowDict[flow],dontCheck=True)
else:
class_ = [flowDict[flow]["classObject"]]
if class_:
if len(class_) > 0:
class_ = class_[0]
class_.postRun()
if flowDebugSession:
jimi.debug.flowDebugSession[flowDebugSession["sessionID"]].endEvent(flowDebugSession["eventID"])
def dataTemplate(data=None,keepEvent=False):
if data != None and type(data) is dict:
try:
if "event" in data["flowData"] and keepEvent != True:
del data["flowData"]["event"]
if "var" not in data["flowData"]:
data["flowData"]["var"] = {}
if "plugin" not in data["flowData"]:
data["flowData"]["plugin"] = {}
except KeyError:
data["flowData"] = { "var" : {}, "plugin" : {} }
if "eventData" not in data:
data["eventData"] = { "var" : {}, "plugin" : {} }
else:
if "var" not in data["eventData"]:
data["eventData"]["var"] = {}
if "plugin" not in data["eventData"]:
data["eventData"]["plugin"] = {}
if "conductData" not in data:
data["conductData"] = { "var" : {}, "plugin" : {} }
else:
if "var" not in data["conductData"]:
data["conductData"]["var"] = {}
if "plugin" not in data["conductData"]:
data["conductData"]["plugin"] = {}
if "persistentData" not in data:
data["persistentData"] = { "system" : { "trigger" : None, "conduct" : None }, "plugin" : { }, "var" : {} }
else:
if "system" not in data["persistentData"]:
data["persistentData"] = { "system" : { "trigger" : None, "conduct" : None } }
if "plugin" not in data["persistentData"]:
data["persistentData"]["plugin"] = {}
if "var" not in data["persistentData"]:
data["persistentData"]["var"] = {}
else:
data = { "flowData" : { "var" : {}, "plugin" : {} }, "eventData" : { "var" : {}, "plugin" : {} }, "conductData" : { "var" : {}, "plugin" : {} }, "persistentData" : { "system" : { "trigger" : None, "conduct" : None }, "var" : {}, "plugin" : {} } }
return data
def copyData(data,copyEventData=False,copyConductData=False,copyPersistentData=False):
copyOfData = {}
dataTypes = ["flowData"]
if copyPersistentData:
dataTypes.append("persistentData")
else:
copyOfData["persistentData"] = data["persistentData"]
if copyConductData:
dataTypes.append("conductData")
else:
copyOfData["conductData"] = data["conductData"]
if copyEventData:
dataTypes.append("eventData")
else:
copyOfData["eventData"] = data["eventData"]
for dataType in dataTypes:
copyOfData[dataType] = data[dataType].copy()
if not copyOfData[dataType]["var"]:
copyOfData[dataType]["var"] = {}
else:
copyOfData[dataType]["var"] = copy.deepcopy(data[dataType]["var"])
if not copyOfData[dataType]["plugin"]:
copyOfData[dataType]["plugin"] = {}
else:
copyOfData[dataType]["plugin"] = copy.deepcopy(data[dataType]["plugin"])
return copyOfData
def getAction(match,sessionData,currentflow):
return jimi.action._action().getAsClass(id=currentflow["actionID"])
def getTrigger(match,sessionData,currentflow):
return jimi.trigger._trigger().getAsClass(id=currentflow["triggerID"])
def getTriggeredFlowTriggers(uid,sessionData,flowData,triggerID):
return [ x for x in flowData if "triggerID" in x and x["triggerID"] == triggerID and x["type"] == "trigger" ]
def getTriggeredFlowActions(uid,sessionData,flowData,actionID):
return [ x for x in flowData if "actionID" in x and x["actionID"] == actionID and x["type"] == "action" ]
def getTriggeredFlowFlows(uid,sessionData,flowData,flowID):
# prevent cache when running as testTrigger
try:
classObject = flowData[0]["classObject"]
return (False, [ x for x in flowData if "flowID" in x and x["flowID"] == flowID ])
except:
return [ x for x in flowData if "flowID" in x and x["flowID"] == flowID ]
def getFlowDict(uid,sessionData,flowData):
result = {}
for flow in flowData:
result[flow["flowID"]] = flow
# prevent cache when running as testTrigger
try:
classObject = flowData[0]["classObject"]
return (False, result)
except:
return result
| 56.079618
| 290
| 0.559089
|
4a1010610e9cac290bacd99f3087c09af90339e1
| 62,296
|
py
|
Python
|
src/biokbase/narrative/tests/test_jobcomm.py
|
charleshtrenholm/narrative
|
054b52da816e4fd34bf376eef2bd79b241605714
|
[
"MIT"
] | 13
|
2015-01-09T08:14:23.000Z
|
2020-10-03T14:55:28.000Z
|
src/biokbase/narrative/tests/test_jobcomm.py
|
charleshtrenholm/narrative
|
054b52da816e4fd34bf376eef2bd79b241605714
|
[
"MIT"
] | 2,006
|
2015-01-04T01:18:31.000Z
|
2022-03-31T21:08:22.000Z
|
src/biokbase/narrative/tests/test_jobcomm.py
|
charleshtrenholm/narrative
|
054b52da816e4fd34bf376eef2bd79b241605714
|
[
"MIT"
] | 78
|
2015-01-06T19:34:53.000Z
|
2020-11-04T20:37:14.000Z
|
import unittest
from unittest import mock
import os
import itertools
import re
from biokbase.narrative.exception_util import transform_job_exception
from biokbase.narrative.jobs.jobcomm import exc_to_msg
import biokbase.narrative.jobs.jobcomm
import biokbase.narrative.jobs.jobmanager
from biokbase.narrative.jobs.jobmanager import (
JOB_NOT_REG_ERR,
JOB_NOT_BATCH_ERR,
JOBS_TYPE_ERR,
JOBS_MISSING_FALSY_ERR,
get_error_output_state,
)
from biokbase.narrative.jobs.jobcomm import (
JobRequest,
JobComm,
JOB_NOT_PROVIDED_ERR,
JOBS_NOT_PROVIDED_ERR,
)
from biokbase.narrative.exception_util import (
NarrativeException,
JobIDException,
)
from .util import ConfigTests, validate_job_state
from .narrative_mock.mockcomm import MockComm
from .narrative_mock.mockclients import (
get_mock_client,
get_failing_mock_client,
MockClients,
)
from .test_job import (
JOB_COMPLETED,
JOB_CREATED,
JOB_RUNNING,
JOB_TERMINATED,
JOB_ERROR,
JOB_NOT_FOUND,
BATCH_PARENT,
BATCH_COMPLETED,
BATCH_TERMINATED,
BATCH_TERMINATED_RETRIED,
BATCH_ERROR_RETRIED,
BATCH_RETRY_COMPLETED,
BATCH_RETRY_RUNNING,
BATCH_RETRY_ERROR,
ALL_JOBS,
JOBS_TERMINALITY,
TERMINAL_JOBS,
ACTIVE_JOBS,
BATCH_CHILDREN,
get_test_job,
get_test_spec,
TEST_SPECS,
get_test_job_states,
get_cell_2_jobs,
)
from .test_jobmanager import get_test_job_info, get_test_job_infos
APP_NAME = "The Best App in the World"
EXP_ALL_STATE_IDS = ALL_JOBS # or ACTIVE_JOBS
def make_comm_msg(
msg_type: str, job_id_like, as_job_request: bool, content: dict = None
):
if type(job_id_like) is list:
job_id_key = "job_id_list"
else:
job_id_key = "job_id"
msg = {
"msg_id": "some_id",
"content": {"data": {"request_type": msg_type, job_id_key: job_id_like}},
}
if content is not None:
msg["content"]["data"].update(content)
if as_job_request:
return JobRequest(msg)
else:
return msg
def get_app_data(*args):
return {"info": {"name": APP_NAME}}
class JobCommTestCase(unittest.TestCase):
maxDiff = None
@classmethod
@mock.patch("biokbase.narrative.jobs.jobcomm.Comm", MockComm)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def setUpClass(cls):
cls.jm = biokbase.narrative.jobs.jobmanager.JobManager()
config = ConfigTests()
os.environ["KB_WORKSPACE_ID"] = config.get("jobs", "job_test_wsname")
cls.jc = biokbase.narrative.jobs.jobcomm.JobComm()
cls.jc._comm = MockComm()
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def setUp(self):
self.jc._comm.clear_message_cache()
self.jc._jm.initialize_jobs()
self.jc.stop_job_status_loop()
self.job_states = get_test_job_states()
def check_error_message(self, source, input_, err):
"""
response when no input was submitted with a query
args:
input: the dict with job_id/job_id_list key and value
source: the request that was called
name: the type of exception
message: the error message
"""
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_comm_error",
"content": {
"source": source,
**input_,
"name": type(err).__name__,
"message": str(err),
},
},
msg["data"],
)
# ---------------------
# Send comms methods
# ---------------------
def test_send_comm_msg_ok(self):
self.jc.send_comm_message("some_msg", {"foo": "bar"})
msg = self.jc._comm.last_message
self.assertDictEqual(
msg,
{
"content": None,
"data": {"content": {"foo": "bar"}, "msg_type": "some_msg"},
},
)
self.jc._comm.clear_message_cache()
def test_send_error_msg__JobRequest(self):
msg = make_comm_msg("bar", "aeaeae", True)
self.jc.send_error_message("foo", msg, {"extra": "field"})
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "foo",
"content": {
"source": "bar",
"job_id": "aeaeae",
"extra": "field"
}
},
msg["data"]
)
def test_send_error_msg__dict(self):
msg = make_comm_msg("bar", "aeaeae", False)
self.jc.send_error_message("foo", msg, {"extra": "field"})
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "foo",
"content": {
"source": "bar",
"job_id": "aeaeae",
"extra": "field"
}
},
msg["data"]
)
def test_send_error_msg__None(self):
self.jc.send_error_message("foo", None, {"extra": "field"})
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "foo",
"content": {
"source": None,
"extra": "field"
}
},
msg["data"]
)
def test_send_error_msg__str(self):
source = "test_jobcomm"
self.jc.send_error_message("foo", source, {"extra": "field"})
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "foo",
"content": {
"source": source,
"extra": "field"
}
},
msg["data"]
)
# ---------------------
# Requests
# ---------------------
def test_req_no_inputs__succeed(self):
msg = {
"msg_id": "some_id",
"content": {"data": {
"request_type": "all_status",
}},
}
self.jc._handle_comm_message(msg)
msg = self.jc._comm.last_message
self.assertEqual("job_status_all", msg["data"]["msg_type"])
def test_req_no_inputs__fail(self):
msg = {
"msg_id": "some_id",
"content": {"data": {
"request_type": "job_status_batch",
}},
}
err = JobIDException(JOB_NOT_PROVIDED_ERR)
with self.assertRaisesRegex(type(err), str(err)):
self.jc._handle_comm_message(msg)
self.check_error_message("job_status_batch", {}, err)
msg = {
"msg_id": "some_id",
"content": {"data": {
"request_type": "retry_job",
}},
}
err = JobIDException(JOBS_NOT_PROVIDED_ERR)
with self.assertRaisesRegex(type(err), str(err)):
self.jc._handle_comm_message(msg)
self.check_error_message("retry_job", {}, err)
# ---------------------
# Start job status loop
# ---------------------
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_start_stop_job_status_loop(self):
self.assertFalse(self.jc._running_lookup_loop)
self.assertIsNone(self.jc._lookup_timer)
self.jc.start_job_status_loop()
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_status_all",
"content": get_test_job_states(EXP_ALL_STATE_IDS),
},
msg["data"],
)
self.assertTrue(self.jc._running_lookup_loop)
self.assertIsNotNone(self.jc._lookup_timer)
self.jc.stop_job_status_loop()
self.assertFalse(self.jc._running_lookup_loop)
self.assertIsNone(self.jc._lookup_timer)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_start_job_status_loop__cell_ids(self):
cell_2_jobs = get_cell_2_jobs()
cell_ids = list(cell_2_jobs.keys())
# Iterate through all combinations of cell IDs
for combo_len in range(len(cell_ids) + 1):
for combo in itertools.combinations(cell_ids, combo_len):
combo = list(combo)
self.jm._running_jobs = {}
self.assertFalse(self.jc._running_lookup_loop)
self.assertIsNone(self.jc._lookup_timer)
self.jc.start_job_status_loop(init_jobs=True, cell_list=combo)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_status_all",
"content": get_test_job_states(
EXP_ALL_STATE_IDS
), # consult version history for when this was exp_job_ids
},
msg["data"],
)
self.assertTrue(self.jc._running_lookup_loop)
self.assertTrue(self.jc._lookup_timer)
self.jc.stop_job_status_loop()
self.assertFalse(self.jc._running_lookup_loop)
self.assertIsNone(self.jc._lookup_timer)
# ---------------------
# Lookup all job states
# ---------------------
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_lookup_all_job_states__ok(self):
req = make_comm_msg("all_status", None, True)
states = self.jc._lookup_all_job_states(req)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_status_all",
"content": get_test_job_states(EXP_ALL_STATE_IDS),
},
msg["data"],
)
for job_id, state in states.items():
self.assertIsInstance(job_id, str)
validate_job_state(state)
# -----------------------
# Lookup single job state
# -----------------------
def test_lookup_job_state__1_ok(self):
output_states = self.jc.lookup_job_state(JOB_COMPLETED)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_status",
"content": output_states,
},
msg["data"],
)
for job_id, state in output_states.items():
self.assertEqual(self.job_states[job_id], state)
validate_job_state(state)
def test_lookup_job_state__no_job(self):
with self.assertRaisesRegex(JobIDException, re.escape(f"{JOBS_MISSING_FALSY_ERR}: {[None]}")):
self.jc.lookup_job_state(None)
# -----------------------
# Lookup select job states
# -----------------------
def test_lookup_job_states__1_ok(self):
job_id_list = [JOB_COMPLETED]
req = make_comm_msg("job_status", job_id_list, True)
output_states = self.jc._lookup_job_states(req)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_status",
"content": output_states,
},
msg["data"],
)
for job_id, state in output_states.items():
self.assertEqual(self.job_states[job_id], state)
validate_job_state(state)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_lookup_job_states__2_ok(self):
job_id_list = [JOB_COMPLETED, BATCH_PARENT]
req = make_comm_msg("job_status", job_id_list, True)
output_states = self.jc._lookup_job_states(req)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_status",
"content": output_states,
},
msg["data"],
)
for job_id, state in output_states.items():
self.assertEqual(self.job_states[job_id], state)
validate_job_state(state)
def test_lookup_job_states__no_job(self):
job_id_list = [None]
req = make_comm_msg("job_status", job_id_list, False)
err = JobIDException(JOBS_MISSING_FALSY_ERR, job_id_list)
with self.assertRaisesRegex(type(err), re.escape(str(err))):
self.jc._handle_comm_message(req)
self.check_error_message(
"job_status", {"job_id_list": job_id_list}, err
)
def test_lookup_job_states__ok_bad(self):
job_id_list = ["nope", JOB_COMPLETED]
req = make_comm_msg("job_status", job_id_list, True)
output_states = self.jc._lookup_job_states(req)
msg = self.jc._comm.last_message
self.assertEqual(
{"msg_type": "job_status", "content": output_states},
msg["data"],
)
for job_id, state in output_states.items():
if job_id in self.job_states:
self.assertEqual(self.job_states[job_id], state)
validate_job_state(state)
else:
self.assertEqual(get_error_output_state(job_id), state)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_lookup_job_states__ee2_error(self):
def mock_check_jobs(self, params):
raise Exception("Test exception")
job_id_list = ALL_JOBS
req = make_comm_msg("job_status", job_id_list, True)
with mock.patch.object(MockClients, "check_jobs", side_effect=mock_check_jobs):
self.jc._lookup_job_states(req)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_status",
"content": {
**get_test_job_states(TERMINAL_JOBS),
**{
job_id: get_error_output_state(job_id, "ee2_error")
for job_id in ACTIVE_JOBS
}
}
},
msg["data"]
)
# -----------------------
# Lookup batch job states
# -----------------------
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_lookup_job_states_batch__ok(self):
job_id = BATCH_PARENT
req = make_comm_msg("job_status_batch", job_id, False)
self.jc._handle_comm_message(req)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_status",
"content": {
job_id: self.job_states[job_id]
for job_id in [BATCH_PARENT] + BATCH_CHILDREN
},
},
msg["data"],
)
def test_lookup_job_states_batch__dne(self):
job_id = JOB_NOT_FOUND
req = make_comm_msg("job_status_batch", job_id, False)
err = JobIDException(JOB_NOT_REG_ERR, job_id)
with self.assertRaisesRegex(type(err), re.escape(str(err))):
self.jc._handle_comm_message(req)
msg = self.jc._comm.peek_message(-2)
self.assertEqual(
{
"msg_type": "job_status",
"content": {JOB_NOT_FOUND: get_error_output_state(JOB_NOT_FOUND)},
},
msg["data"],
)
self.check_error_message(
"job_status_batch", {"job_id": job_id}, err
)
def test_lookup_job_states_batch__no_job(self):
job_id = None
req = make_comm_msg("job_status_batch", job_id, False)
err = JobIDException(JOB_NOT_REG_ERR, job_id)
with self.assertRaisesRegex(type(err), re.escape(str(err))):
self.jc._handle_comm_message(req)
self.check_error_message(
"job_status_batch", {"job_id": job_id}, err
)
def test_lookup_job_states_batch__not_batch(self):
job_id = JOB_CREATED
req = make_comm_msg("job_status_batch", job_id, False)
err = JobIDException(JOB_NOT_BATCH_ERR, job_id)
with self.assertRaisesRegex(type(err), re.escape(str(err))):
self.jc._handle_comm_message(req)
self.check_error_message(
"job_status_batch", {"job_id": job_id}, err
)
# -----------------------
# Lookup job info
# -----------------------
def test_lookup_job_info__ok(self):
job_id_list = ALL_JOBS
req = make_comm_msg("job_info", job_id_list, True)
self.jc._lookup_job_info(req)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_info",
"content": get_test_job_infos(job_id_list),
},
msg["data"],
)
def test_lookup_job_info__no_job(self):
job_id_list = [None]
req = make_comm_msg("job_info", job_id_list, False)
err = JobIDException(JOBS_MISSING_FALSY_ERR, job_id_list)
with self.assertRaisesRegex(type(err), re.escape(str(err))):
self.jc._handle_comm_message(req)
self.check_error_message(
"job_info", {"job_id_list": job_id_list}, err
)
def test_lookup_job_info__ok_bad(self):
job_id_list = [JOB_COMPLETED, JOB_NOT_FOUND]
req = make_comm_msg("job_info", job_id_list, True)
self.jc._lookup_job_info(req)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_info",
"content": {
JOB_COMPLETED: get_test_job_info(JOB_COMPLETED),
JOB_NOT_FOUND: "does_not_exist",
},
},
msg["data"],
)
# ------------
# Lookup batch job infos
# ------------
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_lookup_job_info_batch__ok(self):
job_id = BATCH_PARENT
req = make_comm_msg("job_info_batch", job_id, True)
self.jc._lookup_job_info_batch(req)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_info",
"content": get_test_job_infos([BATCH_PARENT] + BATCH_CHILDREN),
},
msg["data"],
)
def test_lookup_job_info_batch__no_job(self):
job_id = None
req = make_comm_msg("job_info_batch", job_id, False)
err = JobIDException(JOB_NOT_REG_ERR, job_id)
with self.assertRaisesRegex(type(err), str(err)):
self.jc._handle_comm_message(req)
self.check_error_message("job_info_batch", {"job_id": job_id}, err)
def test_lookup_job_info_batch__dne(self):
job_id = JOB_NOT_FOUND
req = make_comm_msg("job_info_batch", job_id, False)
err = JobIDException(JOB_NOT_REG_ERR, job_id)
with self.assertRaisesRegex(type(err), str(err)):
self.jc._handle_comm_message(req)
self.check_error_message("job_info_batch", {"job_id": job_id}, err)
def test_lookup_job_info_batch__not_batch(self):
job_id = BATCH_COMPLETED
req = make_comm_msg("job_info_batch", job_id, False)
err = JobIDException(JOB_NOT_BATCH_ERR, job_id)
with self.assertRaisesRegex(type(err), str(err)):
self.jc._handle_comm_message(req)
self.check_error_message("job_info_batch", {"job_id": job_id}, err)
# ------------
# Cancel list of jobs
# ------------
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_cancel_jobs__single_job_id_in(self):
job_id = JOB_RUNNING
req = make_comm_msg("cancel_job", job_id, False)
self.jc._handle_comm_message(req)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_status",
"content": {
JOB_RUNNING: self.job_states[JOB_RUNNING],
},
},
msg["data"],
)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_cancel_jobs__1_ok(self):
job_id_list = [JOB_RUNNING]
req = make_comm_msg("cancel_job", job_id_list, True)
self.jc._cancel_jobs(req)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_status",
"content": {
JOB_RUNNING: self.job_states[JOB_RUNNING],
},
},
msg["data"],
)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_cancel_jobs__2_ok(self):
job_id_list = [JOB_CREATED, JOB_RUNNING, None]
req = make_comm_msg("cancel_job", job_id_list, True)
self.jc._cancel_jobs(req)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_status",
"content": {
JOB_CREATED: self.job_states[JOB_CREATED],
JOB_RUNNING: self.job_states[JOB_RUNNING],
},
},
msg["data"],
)
def test_cancel_jobs__no_job(self):
job_id_list = None
# Create req manually because want job_id_list to be not list
req = {
"msg_id": "some_id",
"content": {"data": {"request_type": "cancel_job", "job_id_list": job_id_list}},
}
err = TypeError(JOBS_TYPE_ERR)
with self.assertRaisesRegex(type(err), str(err)):
self.jc._handle_comm_message(req)
job_id_list = [None, ""]
req = make_comm_msg("cancel_job", job_id_list, False)
err = JobIDException(JOBS_MISSING_FALSY_ERR, job_id_list)
with self.assertRaisesRegex(type(err), re.escape(str(err))):
self.jc._handle_comm_message(req)
self.check_error_message("cancel_job", {"job_id_list": job_id_list}, err)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_cancel_jobs__some_bad_jobs(self):
FAKE_JOB = "fake_job_id"
job_id_list = [
None,
JOB_NOT_FOUND,
JOB_NOT_FOUND,
"",
JOB_RUNNING,
JOB_CREATED,
FAKE_JOB,
]
req = make_comm_msg("cancel_job", job_id_list, True)
self.jc._cancel_jobs(req)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_status",
"content": {
JOB_RUNNING: self.job_states[JOB_RUNNING],
JOB_CREATED: self.job_states[JOB_CREATED],
JOB_NOT_FOUND: get_error_output_state(JOB_NOT_FOUND),
FAKE_JOB: get_error_output_state(FAKE_JOB),
},
},
msg["data"],
)
def test_cancel_jobs__all_bad_jobs(self):
FAKE_JOB = "fake_job_id"
job_id_list = [None, "", JOB_NOT_FOUND, JOB_NOT_FOUND, FAKE_JOB]
req = make_comm_msg("cancel_job", job_id_list, True)
self.jc._cancel_jobs(req)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_status",
"content": {
JOB_NOT_FOUND: get_error_output_state(JOB_NOT_FOUND),
FAKE_JOB: get_error_output_state(FAKE_JOB),
},
},
msg["data"],
)
@mock.patch(
"biokbase.narrative.clients.get",
get_failing_mock_client,
)
def test_cancel_jobs__failure(self):
job_id_list = [JOB_RUNNING]
req = make_comm_msg("cancel_job", job_id_list, False)
with self.assertRaises(NarrativeException) as e:
self.jc._handle_comm_message(req)
self.assertIn("Can't cancel job", str(e.exception))
msg = self.jc._comm.last_message
self.assertEqual("job_comm_error", msg["data"]["msg_type"])
self.assertEqual("cancel_job", msg["data"]["content"]["source"])
self.assertEqual(job_id_list, msg["data"]["content"]["job_id_list"])
self.assertEqual("Unable to cancel job", msg["data"]["content"]["error"])
# ------------
# Retry list of jobs
# ------------
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_retry_jobs_1_ok(self):
job_id_list = [JOB_TERMINATED]
req = make_comm_msg("retry_job", job_id_list, True)
self.jc._retry_jobs(req)
msg = self.jc._comm.last_message
self.assertEqual(
{"job_id_list": [JOB_TERMINATED[::-1]]}, msg["data"]["content"]
)
self.assertEqual("new_job", msg["data"]["msg_type"])
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_retry_jobs_2_ok(self):
job_id_list = [JOB_TERMINATED, JOB_ERROR, None]
req = make_comm_msg("retry_job", job_id_list, True)
self.jc._retry_jobs(req)
msg = self.jc._comm.last_message
self.assertEqual(
{"job_id_list": [JOB_TERMINATED[::-1], JOB_ERROR[::-1]]},
msg["data"]["content"],
)
self.assertEqual("new_job", msg["data"]["msg_type"])
def test_retry_jobs_no_job(self):
job_id_list = [None, ""]
req = make_comm_msg("retry_job", job_id_list, False)
err = JobIDException(JOBS_MISSING_FALSY_ERR, job_id_list)
with self.assertRaisesRegex(type(err), re.escape(str(err))):
self.jc._handle_comm_message(req)
self.check_error_message("retry_job", {"job_id_list": job_id_list}, err)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_retry_jobs_some_bad_jobs(self):
job_id_list = [JOB_TERMINATED, "nope", "no"]
req = make_comm_msg("retry_job", job_id_list, True)
self.jc._retry_jobs(req)
msg = self.jc._comm.last_message
self.assertEqual(
{"job_id_list": [JOB_TERMINATED[::-1]]}, msg["data"]["content"]
)
self.assertEqual("new_job", msg["data"]["msg_type"])
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_retry_jobs_all_bad_jobs(self):
job_id_list = ["nope", "no"]
req = make_comm_msg("retry_job", job_id_list, True)
self.jc._retry_jobs(req)
msg = self.jc._comm.last_message
self.assertEqual({"job_id_list": []}, msg["data"]["content"])
self.assertEqual("new_job", msg["data"]["msg_type"])
@mock.patch(
"biokbase.narrative.clients.get",
get_failing_mock_client,
)
def test_retry_jobs_failure(self):
job_id_list = [JOB_COMPLETED, JOB_CREATED, JOB_TERMINATED]
req = make_comm_msg("retry_job", job_id_list, False)
with self.assertRaises(NarrativeException) as e:
self.jc._handle_comm_message(req)
self.assertIn("Jobs retry failed", str(e.exception))
msg = self.jc._comm.last_message
self.assertEqual("job_comm_error", msg["data"]["msg_type"])
self.assertEqual(job_id_list, msg["data"]["content"]["job_id_list"])
self.assertEqual("Unable to retry job(s)", msg["data"]["content"]["error"])
# -----------------
# Fetching job logs
# -----------------
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_get_job_logs_ok(self):
job_id = JOB_COMPLETED
lines_available = 100 # just for convenience if the mock changes
# first_line, num_lines, latest, number of lines in output
cases = [
(0, 10, False, 10),
(-100, 10, False, 10),
(50, 20, False, 20),
(0, 5000, False, lines_available),
(0, None, False, lines_available),
(80, None, False, 20),
(0, 10, True, 10),
(-100, 10, True, 10),
(50, 20, True, 20),
(0, 5000, True, lines_available),
(0, None, True, lines_available),
(80, None, True, lines_available),
]
for c in cases:
content = {"first_line": c[0], "num_lines": c[1], "latest": c[2]}
req = make_comm_msg("job_logs", job_id, True, content)
self.jc._get_job_logs(req)
msg = self.jc._comm.last_message
self.assertEqual(job_id, msg["data"]["content"]["job_id"])
self.assertEqual(None, msg["data"]["content"]["batch_id"])
self.assertEqual("job_logs", msg["data"]["msg_type"])
self.assertEqual(lines_available, msg["data"]["content"]["max_lines"])
self.assertEqual(c[3], len(msg["data"]["content"]["lines"]))
self.assertEqual(c[2], msg["data"]["content"]["latest"])
first = 0 if c[1] is None and c[2] is True else c[0]
n_lines = c[1] if c[1] else lines_available
if first < 0:
first = 0
if c[2]:
first = lines_available - min(n_lines, lines_available)
self.assertEqual(first, msg["data"]["content"]["first"])
for idx, line in enumerate(msg["data"]["content"]["lines"]):
self.assertIn(str(first + idx), line["line"])
self.assertEqual(0, line["is_error"])
@mock.patch(
"biokbase.narrative.clients.get",
get_failing_mock_client,
)
def test_get_job_logs_failure(self):
job_id = JOB_COMPLETED
req = make_comm_msg("job_logs", job_id, False)
with self.assertRaises(NarrativeException) as e:
self.jc._handle_comm_message(req)
self.assertIn("Can't get job logs", str(e.exception))
msg = self.jc._comm.last_message
self.assertEqual("job_comm_error", msg["data"]["msg_type"])
self.assertEqual("Unable to retrieve job logs", msg["data"]["content"]["error"])
def test_get_job_logs_no_job(self):
job_id = None
req = make_comm_msg("job_logs", job_id, False)
err = JobIDException(JOB_NOT_REG_ERR, job_id)
with self.assertRaisesRegex(type(err), str(err)):
self.jc._handle_comm_message(req)
self.check_error_message("job_logs", {"job_id": job_id}, err)
def test_get_job_logs_bad_job(self):
job_id = "bad_job"
req = make_comm_msg("job_logs", job_id, False)
err = JobIDException(JOB_NOT_REG_ERR, job_id)
with self.assertRaisesRegex(type(err), str(err)):
self.jc._handle_comm_message(req)
self.check_error_message("job_logs", {"job_id": job_id}, err)
# ------------------------
# Modify job update
# ------------------------
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_modify_job_update__start__ok(self):
job_id_list = [JOB_COMPLETED, JOB_CREATED, BATCH_PARENT]
req = make_comm_msg("start_job_update", job_id_list, True)
self.jc._modify_job_updates(req)
msg = self.jc._comm.last_message
self.assertEqual(
{"msg_type": "job_status", "content": get_test_job_states(job_id_list)},
msg["data"],
)
for job_id in ALL_JOBS:
if job_id in job_id_list:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
int(not JOBS_TERMINALITY[job_id]) + 1,
)
else:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
int(not JOBS_TERMINALITY[job_id]),
)
self.assertTrue(self.jc._lookup_timer)
self.assertTrue(self.jc._running_lookup_loop)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_modify_job_update__stop__ok(self):
job_id_list = [JOB_COMPLETED, JOB_CREATED, BATCH_PARENT]
req = make_comm_msg("stop_job_update", job_id_list, True)
self.jc._modify_job_updates(req)
for job_id in ALL_JOBS:
if job_id in job_id_list:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
max(int(not JOBS_TERMINALITY[job_id]) - 1, 0),
)
else:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
int(not JOBS_TERMINALITY[job_id]),
)
self.assertIsNone(self.jc._lookup_timer)
self.assertFalse(self.jc._running_lookup_loop)
def test_modify_job_update__no_job(self):
job_id_list = [None]
req = make_comm_msg("start_job_update", job_id_list, False)
err = JobIDException(JOBS_MISSING_FALSY_ERR, job_id_list)
with self.assertRaisesRegex(type(err), re.escape(str(err))):
self.jc._handle_comm_message(req)
self.check_error_message("start_job_update", {"job_id_list": job_id_list}, err)
def test_modify_job_update__stop__ok_bad_job(self):
job_id_list = [JOB_COMPLETED]
req = make_comm_msg("stop_job_update", job_id_list + [JOB_NOT_FOUND], True)
self.jc._modify_job_updates(req)
for job_id in ALL_JOBS:
if job_id in job_id_list:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
max(int(not JOBS_TERMINALITY[job_id]) - 1, 0),
)
else:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
int(not JOBS_TERMINALITY[job_id]),
)
self.assertIsNone(self.jc._lookup_timer)
self.assertFalse(self.jc._running_lookup_loop)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_modify_job_update__stop__loop_still_running(self):
"""Lookup loop should not get stopped"""
self.jc.start_job_status_loop()
job_id_list = [JOB_COMPLETED, BATCH_PARENT, JOB_RUNNING]
req = make_comm_msg("stop_job_update", job_id_list, True)
self.jc._modify_job_updates(req)
for job_id in ALL_JOBS:
if job_id in job_id_list:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
max(int(not JOBS_TERMINALITY[job_id]) - 1, 0),
)
else:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
int(not JOBS_TERMINALITY[job_id]),
)
self.assertTrue(self.jc._lookup_timer)
self.assertTrue(self.jc._running_lookup_loop)
# ------------------------
# Modify job update batch
# ------------------------
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_modify_job_update_batch__start__ok(self):
job_id = BATCH_PARENT
job_id_list = [BATCH_PARENT] + BATCH_CHILDREN
req = make_comm_msg("start_job_update_batch", job_id, True)
self.jc._modify_job_updates_batch(req)
msg = self.jc._comm.last_message
self.assertEqual(
{"msg_type": "job_status", "content": get_test_job_states(job_id_list)},
msg["data"],
)
for job_id in ALL_JOBS:
if job_id in job_id_list:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
int(not JOBS_TERMINALITY[job_id]) + 1,
)
else:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
int(not JOBS_TERMINALITY[job_id]),
)
self.assertTrue(self.jc._lookup_timer)
self.assertTrue(self.jc._running_lookup_loop)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_modify_job_update_batch__stop__ok(self):
job_id = BATCH_PARENT
job_id_list = [BATCH_PARENT] + BATCH_CHILDREN
req = make_comm_msg("stop_job_update_batch", job_id, True)
self.jc._modify_job_updates_batch(req)
for job_id in ALL_JOBS:
if job_id in job_id_list:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
max(int(not JOBS_TERMINALITY[job_id]) - 1, 0),
)
else:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
int(not JOBS_TERMINALITY[job_id]),
)
self.assertIsNone(self.jc._lookup_timer)
self.assertFalse(self.jc._running_lookup_loop)
def test_modify_job_update_batch__no_job(self):
job_id = None
req = make_comm_msg("start_job_update_batch", job_id, False)
err = JobIDException(JOB_NOT_REG_ERR, job_id)
with self.assertRaisesRegex(type(err), str(err)):
self.jc._handle_comm_message(req)
self.check_error_message("start_job_update_batch", {"job_id": job_id}, err)
def test_modify_job_update_batch__bad_job(self):
job_id = JOB_NOT_FOUND
req = make_comm_msg("start_job_update_batch", job_id, False)
err = JobIDException(JOB_NOT_REG_ERR, job_id)
with self.assertRaisesRegex(type(err), str(err)):
self.jc._handle_comm_message(req)
self.check_error_message("start_job_update_batch", {"job_id": job_id}, err)
def test_modify_job_update_batch__not_batch(self):
job_id = JOB_RUNNING
req = make_comm_msg("start_job_update_batch", job_id, False)
err = JobIDException(JOB_NOT_BATCH_ERR, job_id)
with self.assertRaisesRegex(type(err), str(err)):
self.jc._handle_comm_message(req)
self.check_error_message("start_job_update_batch", {"job_id": job_id}, err)
# ------------------------
# Handle bad comm messages
# ------------------------
def test_handle_comm_message_bad(self):
with self.assertRaises(ValueError) as e:
self.jc._handle_comm_message({"foo": "bar"})
self.assertIn("Improperly formatted job channel message!", str(e.exception))
with self.assertRaises(ValueError) as e:
self.jc._handle_comm_message({"content": {"data": {"request_type": None}}})
self.assertIn("Missing request type in job channel message!", str(e.exception))
def test_handle_comm_message_unknown(self):
unknown = "NotAJobRequest"
with self.assertRaises(ValueError) as e:
self.jc._handle_comm_message(
{"content": {"data": {"request_type": unknown}}}
)
self.assertIn(f"Unknown KBaseJobs message '{unknown}'", str(e.exception))
# From here, this test the ability for the _handle_comm_message function to
# deal with the various types of messages that will get passed to it. While
# the majority of the tests above are sent directly to the function to be
# tested, these just craft the message and pass it to the message handler.
@mock.patch("biokbase.narrative.clients.get", get_mock_client)
def test_handle_all_states_msg(self):
req = make_comm_msg("all_status", None, False)
self.jc._handle_comm_message(req)
msg = self.jc._comm.last_message
self.assertEqual(msg["data"]["msg_type"], "job_status_all")
states = msg["data"]["content"]
self.assertIsInstance(states, dict)
for job_id in states:
print("ALL JOB STATE TESTING")
print(states[job_id])
validate_job_state(states[job_id])
def test_handle_job_status_msg(self):
job_id = JOB_COMPLETED
req = make_comm_msg("job_status", job_id, False)
self.jc._handle_comm_message(req)
msg = self.jc._comm.last_message
self.assertEqual(msg["data"]["msg_type"], "job_status")
validate_job_state(msg["data"]["content"][JOB_COMPLETED])
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_handle_job_info_msg(self):
job_id = JOB_COMPLETED
req = make_comm_msg("job_info", job_id, False)
self.jc._handle_comm_message(req)
msg = self.jc._comm.last_message
self.assertEqual(
{"msg_type": "job_info", "content": {job_id: get_test_job_info(job_id)}},
msg["data"],
)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_handle_job_status_batch_msg(self):
job_id = BATCH_PARENT
req = make_comm_msg("job_status_batch", job_id, False)
self.jc._handle_comm_message(req)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_status",
"content": get_test_job_states([BATCH_PARENT] + BATCH_CHILDREN),
},
msg["data"],
)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_handle_job_info_batch_msg(self):
job_id = BATCH_PARENT
req = make_comm_msg("job_info_batch", job_id, False)
self.jc._handle_comm_message(req)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_info",
"content": get_test_job_infos([BATCH_PARENT] + BATCH_CHILDREN),
},
msg["data"],
)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_handle_cancel_job_msg(self):
job_id = JOB_COMPLETED
req = make_comm_msg("cancel_job", job_id, False)
self.jc._handle_comm_message(req)
msg = self.jc._comm.last_message
self.assertEqual(msg["data"]["msg_type"], "job_status")
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_handle_start_job_update_msg(self):
job_id_list = [JOB_CREATED, JOB_COMPLETED, BATCH_PARENT]
req = make_comm_msg("start_job_update", job_id_list, False)
self.jc._handle_comm_message(req)
msg = self.jc._comm.last_message
self.assertEqual(
{"msg_type": "job_status", "content": get_test_job_states(job_id_list)},
msg["data"],
)
for job_id in ALL_JOBS:
if job_id in job_id_list:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
int(not JOBS_TERMINALITY[job_id]) + 1,
)
else:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
int(not JOBS_TERMINALITY[job_id]),
)
self.assertTrue(self.jc._lookup_timer)
self.assertTrue(self.jc._running_lookup_loop)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_handle_stop_job_update_msg(self):
job_id_list = [JOB_CREATED, JOB_COMPLETED, BATCH_PARENT]
req = make_comm_msg("stop_job_update", job_id_list, False)
self.jc._handle_comm_message(req)
for job_id in ALL_JOBS:
if job_id in job_id_list:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
max(int(not JOBS_TERMINALITY[job_id]) - 1, 0),
)
else:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
int(not JOBS_TERMINALITY[job_id]),
)
self.assertIsNone(self.jc._lookup_timer)
self.assertFalse(self.jc._running_lookup_loop)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_handle_start_job_update_batch_msg(self):
job_id = BATCH_PARENT
job_id_list = [BATCH_PARENT] + BATCH_CHILDREN
req = make_comm_msg("start_job_update_batch", job_id, False)
self.jc._handle_comm_message(req)
msg = self.jc._comm.last_message
self.assertEqual(
{"msg_type": "job_status", "content": get_test_job_states(job_id_list)},
msg["data"],
)
for job_id in ALL_JOBS:
if job_id in job_id_list:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
int(not JOBS_TERMINALITY[job_id]) + 1,
)
else:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
int(not JOBS_TERMINALITY[job_id]),
)
self.assertTrue(self.jc._lookup_timer)
self.assertTrue(self.jc._running_lookup_loop)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_handle_stop_job_update_batch_msg(self):
job_id = BATCH_PARENT
job_id_list = [BATCH_PARENT] + BATCH_CHILDREN
req = make_comm_msg("stop_job_update_batch", job_id, False)
self.jc._handle_comm_message(req)
for job_id in ALL_JOBS:
if job_id in job_id_list:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
max(int(not JOBS_TERMINALITY[job_id]) - 1, 0),
)
else:
self.assertEqual(
self.jm._running_jobs[job_id]["refresh"],
int(not JOBS_TERMINALITY[job_id]),
)
self.assertIsNone(self.jc._lookup_timer)
self.assertFalse(self.jc._running_lookup_loop)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_handle_latest_job_logs_msg(self):
job_id = JOB_COMPLETED
req = make_comm_msg(
"job_logs", job_id, False, content={"num_lines": 10, "latest": True}
)
self.jc._handle_comm_message(req)
msg = self.jc._comm.last_message
self.assertEqual(msg["data"]["msg_type"], "job_logs")
self.assertEqual(msg["data"]["content"]["job_id"], job_id)
self.assertTrue(msg["data"]["content"]["latest"])
self.assertEqual(msg["data"]["content"]["first"], 90)
self.assertEqual(msg["data"]["content"]["max_lines"], 100)
self.assertEqual(len(msg["data"]["content"]["lines"]), 10)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_handle_job_logs_msg(self):
job_id = JOB_COMPLETED
req = make_comm_msg(
"job_logs", job_id, False, content={"num_lines": 10, "first_line": 0}
)
self.jc._handle_comm_message(req)
msg = self.jc._comm.last_message
self.assertEqual(msg["data"]["msg_type"], "job_logs")
self.assertEqual(msg["data"]["content"]["job_id"], job_id)
self.assertFalse(msg["data"]["content"]["latest"])
self.assertEqual(msg["data"]["content"]["first"], 0)
self.assertEqual(msg["data"]["content"]["max_lines"], 100)
self.assertEqual(len(msg["data"]["content"]["lines"]), 10)
@mock.patch(
"biokbase.narrative.clients.get", get_mock_client
)
def test_handle_cancel_job_msg_with_job_id_list(self):
job_id_list = [JOB_COMPLETED]
req = make_comm_msg("cancel_job", job_id_list, False)
self.jc._handle_comm_message(req)
msg = self.jc._comm.last_message
self.assertEqual(msg["data"]["msg_type"], "job_status")
class JobRequestTestCase(unittest.TestCase):
"""
Test the JobRequest module.
This makes sure that it knows what to do with ok requests, bad requests,
etc.
"""
def test_request_ok(self):
rq_msg = {
"msg_id": "some_id",
"content": {"data": {"request_type": "a_request"}},
}
rq = JobRequest(rq_msg)
self.assertEqual(rq.msg_id, "some_id")
self.assertEqual(rq.request, "a_request")
with self.assertRaisesRegex(JobIDException, JOB_NOT_PROVIDED_ERR):
rq.job_id
with self.assertRaisesRegex(JobIDException, JOBS_NOT_PROVIDED_ERR):
rq.job_id_list
def test_request_no_data(self):
rq_msg = {"msg_id": "some_id", "content": {}}
with self.assertRaises(ValueError) as e:
JobRequest(rq_msg)
self.assertIn("Improperly formatted job channel message!", str(e.exception))
def test_request_no_req(self):
rq_msg = {"msg_id": "some_id", "content": {"data": {"request_type": None}}}
rq_msg2 = {"msg_id": "some_other_id", "content": {"data": {}}}
for msg in [rq_msg, rq_msg2]:
with self.assertRaises(ValueError) as e:
JobRequest(msg)
self.assertIn(
"Missing request type in job channel message!", str(e.exception)
)
def test_request_both_inputs(self):
msg = {
"msg_id": "some_id",
"content": {"data": {
"request_type": "job_status",
"job_id": "ababab",
"job_id_list": []
}},
}
with self.assertRaisesRegex(ValueError, "Both job_id and job_id_list present"):
JobRequest(msg)
def test_request__no_input(self):
msg = {
"msg_id": "some_id",
"content": {"data": {
"request_type": "job_status",
}},
}
req = JobRequest(msg)
with self.assertRaisesRegex(JobIDException, JOB_NOT_PROVIDED_ERR):
req.job_id
with self.assertRaisesRegex(JobIDException, JOBS_NOT_PROVIDED_ERR):
req.job_id_list
def _check_rq_equal(self, rq0, rq1):
self.assertEqual(rq0.msg_id, rq1.msg_id)
self.assertEqual(rq0.rq_data, rq1.rq_data)
self.assertEqual(rq0.request, rq1.request)
self.assertEqual(rq0.job_id, rq1.job_id)
def test_convert_to_using_job_id_list(self):
rq_msg = make_comm_msg("a_request", "a", False)
rq = JobRequest._convert_to_using_job_id_list(rq_msg)
self.assertEqual(rq.request, "a_request")
with self.assertRaisesRegex(JobIDException, JOB_NOT_PROVIDED_ERR):
rq.job_id
self.assertEqual(rq.job_id_list, ["a"])
def test_split_request_by_job_id(self):
rq_msg = make_comm_msg("a_request", ["a", "b", "c"], False)
rqa0 = make_comm_msg("a_request", "a", True)
rqb0 = make_comm_msg("a_request", "b", True)
rqc0 = make_comm_msg("a_request", "c", True)
rqa1, rqb1, rqc1 = JobRequest._split_request_by_job_id(rq_msg)
self._check_rq_equal(rqa0, rqa1)
self._check_rq_equal(rqb0, rqb1)
self._check_rq_equal(rqc0, rqc1)
def test_translate_require_job_id(self):
rq_msg = make_comm_msg(JobRequest.REQUIRE_JOB_ID[0], "a", False)
rqs = JobRequest.translate(rq_msg)
self.assertEqual(len(rqs), 1)
self.assertEqual(rqs[0].job_id, "a")
with self.assertRaisesRegex(JobIDException, JOBS_NOT_PROVIDED_ERR):
rqs[0].job_id_list
rq_msg = make_comm_msg(JobRequest.REQUIRE_JOB_ID[0], ["a", "b"], False)
rqs = JobRequest.translate(rq_msg)
self.assertEqual(len(rqs), 2)
self.assertEqual(rqs[0].job_id, "a")
with self.assertRaisesRegex(JobIDException, JOBS_NOT_PROVIDED_ERR):
rqs[0].job_id_list
self.assertEqual(rqs[1].job_id, "b")
with self.assertRaisesRegex(JobIDException, JOBS_NOT_PROVIDED_ERR):
rqs[1].job_id_list
def test_translate_require_job_id_list(self):
rq_msg = make_comm_msg(JobRequest.REQUIRE_JOB_ID_LIST[0], "a", False)
rqs = JobRequest.translate(rq_msg)
self.assertEqual(len(rqs), 1)
with self.assertRaisesRegex(JobIDException, JOB_NOT_PROVIDED_ERR):
rqs[0].job_id
self.assertEqual(rqs[0].job_id_list, ["a"])
rq_msg = make_comm_msg(JobRequest.REQUIRE_JOB_ID_LIST[0], ["a", "b"], False)
rqs = JobRequest.translate(rq_msg)
self.assertEqual(len(rqs), 1)
with self.assertRaisesRegex(JobIDException, JOB_NOT_PROVIDED_ERR):
rqs[0].job_id
self.assertEqual(rqs[0].job_id_list, ["a", "b"])
def test_translate_doesnt_require_any_job_ids(self):
rq_msg = make_comm_msg("all_status", None, False)
rqs = JobRequest.translate(rq_msg)
self.assertEqual(len(rqs), 1)
def test_translate_both_inputs(self):
msg = {
"msg_id": "some_id",
"content": {"data": {
"request_type": "job_status",
"job_id": "ababab",
"job_id_list": []
}},
}
with self.assertRaisesRegex(ValueError, "Both job_id and job_id_list present"):
JobRequest.translate(msg)
class exc_to_msgTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.jc = JobComm()
cls.jc._comm = MockComm()
def setUp(self):
self.jc._comm.clear_message_cache()
@staticmethod
def foo(req, f):
with exc_to_msg(req):
f()
@staticmethod
def bar(req, f, indic):
with exc_to_msg(req):
try:
f()
except Exception:
indic += ["A"]
raise
indic += ["B"]
indic += ["C"]
def test_with_nested_try__raise(self):
job_id_list = [BATCH_TERMINATED, JOB_TERMINATED]
req_type = "stop_job_status_loop"
req = make_comm_msg(req_type, job_id_list, True)
message = (
"Casteism is the investment in keeping the hierarchy "
"as it is in order to maintain your own ranking, advantage, "
"privelige, or to elevate yourself above others or keep "
"others beneath you"
)
def f():
raise RuntimeError(message)
with self.assertRaisesRegex(RuntimeError, message):
f_var = []
self.bar(req, f, f_var)
self.assertEqual(["A"], f_var)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_comm_error",
"content": {
"source": req_type,
"job_id_list": job_id_list,
"name": "RuntimeError",
"message": message,
}
},
msg["data"]
)
def test_with_nested_try__succeed(self):
job_id_list = [BATCH_ERROR_RETRIED, JOB_RUNNING]
req_type = "cancel_job"
req = make_comm_msg(req_type, job_id_list, True)
message = (
"If the majority knew of the root of this evil, "
"then the road to its cure would not be long."
)
def f():
print(message)
f_var = []
self.bar(req, f, f_var)
self.assertEqual(["B", "C"], f_var)
msg = self.jc._comm.last_message
self.assertIsNone(msg)
def test_NarrativeException(self):
job_id_list = BATCH_CHILDREN
req_type = "start_job_status_loop"
req = make_comm_msg(req_type, job_id_list, True)
message = (
"In a similar experiment, conducted in Stanford in 1975 ... "
"Participants gave the dehumanized people twice the punishment "
"of the humanized ones and significantly more than the ones "
"they knew nothing about"
)
error = "Unable to perform this request"
def f():
raise transform_job_exception(Exception(message), error)
with self.assertRaisesRegex(NarrativeException, message):
self.foo(req, f)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_comm_error",
"content": {
"source": req_type,
"job_id_list": job_id_list,
# Below are from transform_job_exception
"name": "Exception",
"message": message,
"error": error,
"code": -1,
}
},
msg["data"]
)
def test_JobIDException(self):
job_id = BATCH_PARENT
req_type = "job_info_all"
req = make_comm_msg(req_type, job_id, True)
message = (
"Because even if I should speak, "
"no one would believe me. "
"And they would not believe me precisely because "
"they would know that what I said was true."
)
def f():
raise JobIDException(message, "a0a0a0")
with self.assertRaisesRegex(JobIDException, f"{message}: a0a0a0"):
self.foo(req, f)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_comm_error",
"content": {
"source": req_type,
"job_id": job_id,
"name": "JobIDException",
"message": f"{message}: a0a0a0",
}
},
msg["data"]
)
def test_ValueError(self):
job_id_list = [JOB_RUNNING, JOB_COMPLETED]
req_type = "job_status"
req = make_comm_msg(req_type, job_id_list, True)
message = (
"Caste is the granting or withholding of respect, status, "
"honor, attention, privileges, resources, benefit of the "
"doubt, and human kindness to someone on the basis of their "
"perceived rank or standing in the hierarchy"
)
def f():
raise ValueError(message)
with self.assertRaisesRegex(ValueError, message):
self.foo(req, f)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_comm_error",
"content": {
"source": req_type,
"job_id_list": job_id_list,
"name": "ValueError",
"message": message,
}
},
msg["data"]
)
def test_dict_req__no_err(self):
job_id = JOB_ERROR
req_type = "retry_job"
req = make_comm_msg(req_type, job_id, False)
message = (
"Casteism can mean seeking to keep those on your "
"disfavored rung from gaining on you, to curry the "
"favor and remain in the good graces of the dominant "
"caste, all of which serve to keep the structure intact."
)
def f():
print(message)
self.foo(req, f)
msg = self.jc._comm.last_message
self.assertIsNone(msg)
def test_dict_req__error_down_the_stack(self):
job_id = JOB_CREATED
req_type = "job_status"
req = make_comm_msg(req_type, job_id, False)
message = (
"Caste is the granting or witholding of "
"respect, status, honor, attention, priveliges, "
"resources, benefit of the doubt, and human "
"kindness to someone on the basis of their "
"perceived rank or standing in the hierarchy."
)
# Throw a few frames into stack
def f(i=5):
if i == 0:
raise ValueError(message)
else:
f(i - 1)
with self.assertRaisesRegex(ValueError, message):
self.foo(req, f)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_comm_error",
"content": {
"source": req_type,
"job_id": job_id,
"name": "ValueError",
"message": message,
}
},
msg["data"]
)
def test_dict_req__both_inputs(self):
job_id = JOB_CREATED
job_id_list = []
req_type = "job_status"
# can give it both job_id and job_id_list since it's not a JobRequest
req = make_comm_msg(req_type, job_id, False)
req["content"]["data"]["job_id_list"] = job_id_list
message = (
"What some people call racism could be seen as merely "
"one manifestation of the degree to which we have internalized "
"the larger American caste system."
)
def f():
raise ValueError(message)
with self.assertRaisesRegex(ValueError, message):
self.foo(req, f)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_comm_error",
"content": {
"source": req_type,
"job_id": job_id,
"job_id_list": job_id_list,
"name": "ValueError",
"message": message,
}
},
msg["data"]
)
def test_None_req(self):
source = None
message = "Hi"
err = ValueError(message)
def f():
raise err
with self.assertRaisesRegex(type(err), str(err)):
self.foo(source, f)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_comm_error",
"content": {
"source": source,
"name": "ValueError",
"message": message,
}
},
msg["data"]
)
def test_str_req(self):
source = "test_jobcomm"
message = "Hi"
err = ValueError(message)
def f():
raise err
with self.assertRaisesRegex(type(err), str(err)):
self.foo(source, f)
msg = self.jc._comm.last_message
self.assertEqual(
{
"msg_type": "job_comm_error",
"content": {
"source": source,
"name": "ValueError",
"message": message,
}
},
msg["data"]
)
| 35.822887
| 102
| 0.570518
|
4a101275b0bcb4402effa8fde80a3beb5034e6cf
| 2,918
|
py
|
Python
|
dsgrid/utils/utilities.py
|
dsgrid/dsgrid
|
a5eb4a0865b18fb5eb5fee838a37aa83f09e00d3
|
[
"BSD-3-Clause"
] | 4
|
2021-12-10T21:20:45.000Z
|
2022-01-05T00:27:53.000Z
|
dsgrid/utils/utilities.py
|
dsgrid/dsgrid
|
a5eb4a0865b18fb5eb5fee838a37aa83f09e00d3
|
[
"BSD-3-Clause"
] | 21
|
2021-11-04T18:39:37.000Z
|
2022-03-21T21:28:23.000Z
|
dsgrid/utils/utilities.py
|
dsgrid/dsgrid
|
a5eb4a0865b18fb5eb5fee838a37aa83f09e00d3
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Helpful utility functions for dsgrid
"""
import logging
import inspect
import json
import os
from enum import Enum
from prettytable import PrettyTable
try:
from IPython.display import display, HTML
from IPython import get_ipython
from ipykernel.zmqshell import ZMQInteractiveShell
_IPYTHON_INSTALLED = True
except ImportError:
_IPYTHON_INSTALLED = False
from dsgrid.exceptions import DSGJSONError
logger = logging.getLogger(__name__)
def safe_json_load(fpath):
"""Perform a json file load with better exception handling.
Parameters
----------
fpath : str
Filepath to .json file.
Returns
-------
j : dict
Loaded json dictionary.
Examples
--------
>>> json_path = "./path_to_json.json"
>>> safe_json_load(json_path)
{key1: value1,
key2: value2}
"""
if not isinstance(fpath, str):
raise TypeError("Filepath must be str to load json: {}".format(fpath))
if not fpath.endswith(".json"):
raise DSGJSONError("Filepath must end in .json to load json: {}".format(fpath))
if not os.path.isfile(fpath):
raise DSGJSONError("Could not find json file to load: {}".format(fpath))
try:
with open(fpath, "r") as f:
j = json.load(f)
except json.decoder.JSONDecodeError as e:
emsg = "JSON Error:\n{}\nCannot read json file: " '"{}"'.format(e, fpath)
raise DSGJSONError(emsg)
return j
def get_class_properties(cls):
"""Get all class properties
Used to check against config keys
Returns
-------
properties : list
List of class properties, each of which should represent a valid
config key/entry
"""
properties = [
attr for attr, attr_obj in inspect.getmembers(cls) if isinstance(attr_obj, property)
]
return properties
def check_uniqueness(iterable, tag):
"""Raises ValueError if iterable has duplicate entries.
Parameters
----------
iterable : list | generator
tag : str
tag to add to the exception string
"""
values = set()
for item in iterable:
if item in values:
raise ValueError(f"duplicate {tag}: {item}")
values.add(item)
def list_enum_values(enum: Enum):
"""Returns list enum values."""
return [e.value for e in enum]
def in_jupyter_notebook():
"""Returns True if the current interpreter is running in a Jupyter notebook.
Returns
-------
bool
"""
if not _IPYTHON_INSTALLED:
return False
return isinstance(get_ipython(), ZMQInteractiveShell)
def display_table(table: PrettyTable):
"""Displays a table in an ASCII or HTML format as determined by the current interpreter.
Parameters
----------
table : PrettyTable
"""
if in_jupyter_notebook():
display(HTML(table.get_html_string()))
else:
print(table)
| 21.93985
| 92
| 0.641535
|
4a1012c4f6515ccf32c600b3a8b1c754ac0bef28
| 886
|
py
|
Python
|
python_modules/dagster/dagster/utils/yaml_utils.py
|
bambielli-flex/dagster
|
30b75ba7c62fc536bc827f177c1dc6ba20f5ae20
|
[
"Apache-2.0"
] | 1
|
2019-07-15T17:34:04.000Z
|
2019-07-15T17:34:04.000Z
|
python_modules/dagster/dagster/utils/yaml_utils.py
|
bambielli-flex/dagster
|
30b75ba7c62fc536bc827f177c1dc6ba20f5ae20
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/utils/yaml_utils.py
|
bambielli-flex/dagster
|
30b75ba7c62fc536bc827f177c1dc6ba20f5ae20
|
[
"Apache-2.0"
] | null | null | null |
import glob
import yaml
from dagster import check
from .merger import dict_merge
def load_yaml_from_globs(*globs):
return load_yaml_from_glob_list(list(globs))
def load_yaml_from_glob_list(glob_list):
check.list_param(glob_list, 'glob_list', of_type=str)
all_files_list = []
for env_file_pattern in glob_list:
all_files_list.extend(glob.glob(env_file_pattern))
check.invariant(all_files_list, 'Config file(s) not found at path(s) {}'.format(glob_list))
return merge_yamls(all_files_list)
def merge_yamls(file_list):
check.list_param(file_list, 'file_list', of_type=str)
merged = {}
for yaml_file in file_list:
merged = dict_merge(load_yaml_from_path(yaml_file) or {}, merged)
return merged
def load_yaml_from_path(path):
check.str_param(path, 'path')
with open(path, 'r') as ff:
return yaml.load(ff)
| 23.315789
| 95
| 0.724605
|
4a1015a13e94f62a15e47cc23fc254f0d7306f66
| 26,071
|
py
|
Python
|
gsm/utils.py
|
kernsuite-debian/gsm
|
a41e8b3ea26db94b3deb93bd413baa4a389fcaaa
|
[
"BSD-3-Clause"
] | null | null | null |
gsm/utils.py
|
kernsuite-debian/gsm
|
a41e8b3ea26db94b3deb93bd413baa4a389fcaaa
|
[
"BSD-3-Clause"
] | null | null | null |
gsm/utils.py
|
kernsuite-debian/gsm
|
a41e8b3ea26db94b3deb93bd413baa4a389fcaaa
|
[
"BSD-3-Clause"
] | null | null | null |
# LOFAR IMAGING PIPELINE
#
# BBS Source Catalogue List
# Bart Scheers, 2011
# L.H.A.Scheers@uva.nl
# ------------------------------------------------------------------------------
import sys, string, logging
import numpy as np
from pymonetdb.exceptions import Error as DBError
from gsm.exceptions import GSMException
from gsm.db.qf import queryfile
def expected_fluxes_in_fov(conn, basecat, ra_central, decl_central, fov_radius,
assoc_theta, bbsfile, flux_cutoff, patchname,
storespectraplots, deruiter_radius=3.717):
"""Function to prepare the query input parameters for the
cross-match query.
If the central ra, dec with the fov_radius cross the 0/360 meridian,
the query is slightly reformatted.
"""
DERUITER_R = deruiter_radius
if DERUITER_R <= 0:
try:
from tkp.config import config
DERUITER_R = config['source_association']['deruiter_radius']
##print "DERUITER_R =",DERUITER_R
except:
DERUITER_R=3.717
deRuiter_reduced = DERUITER_R/3600.
if ra_central - alpha(decl_central, fov_radius) < 0:
ra_min1 = np.float(ra_central - alpha(decl_central, fov_radius) + 360.0)
ra_max1 = np.float(360.0)
ra_min2 = np.float(0.0)
ra_max2 = np.float(ra_central + alpha(decl_central, fov_radius))
q = "q_across_ra0"
elif ra_central + alpha(decl_central, fov_radius) > 360:
ra_min1 = np.float(ra_central - alpha(decl_central, fov_radius))
ra_max1 = np.float(360.0)
ra_min2 = np.float(0.0)
ra_max2 = np.float(ra_central + alpha(decl_central, fov_radius) - 360)
q = "q_across_ra0"
elif ra_central - alpha(decl_central, fov_radius) < 0 and \
ra_central + alpha(decl_central, fov_radius) > 360:
raise BaseException("ra = %s > 360 degrees, not implemented yet" \
% str(ra_central + alpha(decl_central, fov_radius)))
else:
ra_min = np.float64(ra_central - alpha(decl_central, fov_radius))
ra_max = np.float64(ra_central + alpha(decl_central, fov_radius))
q = "q0"
if basecat == "TGSS":
if q == "q0":
qf = queryfile('db/sql/cm_tgss.sql')
with open(qf, 'r') as f:
qu = f.read()
params = {'decl_central': decl_central
,'ra_central': ra_central
,'fov_radius': fov_radius
,'assoc_theta': assoc_theta
,'deRuiter_reduced': deRuiter_reduced
,'tgss_flux_cutoff': flux_cutoff}
query = qu % (params)
else:
qf = queryfile('db/sql/cm_wrap_tgss.sql')
with open(qf, 'r') as f:
qu = f.read()
params = {'decl_central': decl_central
,'ra_central': ra_central
,'ra_min1': ra_min1
,'ra_max1': ra_max1
,'ra_min2': ra_min2
,'ra_max2': ra_max2
,'fov_radius': fov_radius
,'assoc_theta': assoc_theta
,'deRuiter_reduced': deRuiter_reduced
,'tgss_flux_cutoff': flux_cutoff}
query = qu % (params)
expected_fluxes_in_fov_tgss(conn, query, bbsfile, storespectraplots, patchname='')
else:
if q == "q0":
qf = queryfile('db/sql/cm_vlss.sql')
with open(qf, 'r') as f:
qu = f.read()
params = {'izone_min': int(np.floor(decl_central - fov_radius))
,'izone_max': int(np.floor(decl_central + fov_radius))
,'idecl_min': np.float64(decl_central - fov_radius)
,'idecl_max': np.float64(decl_central + fov_radius)
,'ira_min': ra_min
,'ira_max': ra_max
,'ix': np.cos(np.radians(decl_central)) * np.cos(np.radians(ra_central))
,'iy': np.cos(np.radians(decl_central)) * np.sin(np.radians(ra_central))
,'iz': np.sin(np.radians(decl_central))
,'cosradfov_radius': np.cos(np.radians(fov_radius))
,'assoc_theta': assoc_theta
,'deRuiter_reduced': deRuiter_reduced
,'vlss_flux_cutoff': flux_cutoff}
query = qu % (params)
else:
qf = queryfile('db/sql/cm_wrap_vlss.sql')
with open(qf, 'r') as f:
qu = f.read()
params = {'decl_central': decl_central
,'ra_central': ra_central
,'ra_min1': ra_min1
,'ra_max1': ra_max1
,'ra_min2': ra_min2
,'ra_max2': ra_max2
,'fov_radius': fov_radius
,'assoc_theta': assoc_theta
,'deRuiter_reduced': deRuiter_reduced
,'vlss_flux_cutoff': flux_cutoff}
query = qu % (params)
expected_fluxes_in_fov_vlss(conn, query, bbsfile, storespectraplots, patchname='')
def expected_fluxes_in_fov_vlss(conn, query, bbsfile, storespectraplots, patchname=''):
# TODO: We should include TGSS in the search as well!
"""Search for VLSS, WENSS and NVSS sources that are in the given FoV.
The FoV is set by its central position (ra_central, decl_central)
out to a radius of fov_radius. The query looks for cross-matches
around the sources, out to a radius of assoc_theta.
All units are in degrees.
deruiter_radius is a measure for the association uncertainty that takes
position errors into account (see thesis Bart Scheers). If not given
as a positive value, it is read from the TKP config file. If not
available, it defaults to 3.717.
The query returns all vlss sources (id) that are in the FoV.
If so, the counterparts from other catalogues are returned as well
(also their ids).
If patchname is given, all sources get that patch name and the center of
the patch is given central ra/dec. Its brightness is the summed flux.
"""
status = True
bbsrows = []
totalFlux = 0.
# This is dimensionless search radius that takes into account
# the ra and decl difference between two sources weighted by
# their positional errors.
## deRuiter_reduced = DERUITER_R/3600.
try:
cursor = conn.cursor()
cursor.execute(query)
results = zip(*cursor.fetchall())
cursor.close()
if len(results) == 0:
raise GSMException("No sources found, so Sky Model File %s is not created" % (bbsfile,))
except DBError, e:
logging.warn("Failed on query nr %s; for reason %s" % (query, e))
raise
vlss_catsrcid = results[0]
vlss_name = results[1]
wenssm_catsrcid = results[2]
wenssp_catsrcid = results[3]
nvss_catsrcid = results[4]
v_flux = results[5]
wm_flux = results[6]
wp_flux = results[7]
n_flux = results[8]
v_flux_err = results[9]
wm_flux_err = results[10]
wp_flux_err = results[11]
n_flux_err = results[12]
wm_assoc_distance_arcsec = results[13]
wm_assoc_r = results[14]
wp_assoc_distance_arcsec = results[15]
wp_assoc_r = results[16]
n_assoc_distance_arcsec = results[17]
n_assoc_r = results[18]
pa = results[19]
major = results[20]
minor = results[21]
ra = results[22]
decl = results[23]
spectrumfiles = []
# Check for duplicate vlss_names. This may arise when a VLSS source
# is associated with one or more (genuine) counterparts.
# Eg., if two NVSS sources are seen as counterparts
# VLSS - WENSS - NVSS_1
# VLSS - WENSS - NVSS_2
# two rows will be added to the sky model, where the VLSS name
# is postfixed with _0 and _1, resp.
import collections
items = collections.defaultdict(list)
src_name = list(vlss_name)
for i, item in enumerate(src_name):
items[item].append(i)
for item, locs in items.iteritems():
if len(locs) > 1:
#print "duplicates of", item, "at", locs
for j in range(len(locs)):
src_name[locs[j]] = src_name[locs[j]] + "_" + str(j)
if len(results) != 0:
for i in range(len(vlss_catsrcid)):
##print "\ni = ", i
bbsrow = ""
# Here we check the cases for the degree of the polynomial spectral index fit
#print i, vlss_name[i],vlss_catsrcid[i], wenssm_catsrcid[i], wenssp_catsrcid[i], nvss_catsrcid[i]
# Write the vlss name of the source (either postfixed or not)
bbsrow += src_name[i] + ", "
# According to Jess, only sources that have values for all
# three are considered as GAUSSIAN
if pa[i] is not None and major[i] is not None and minor[i] is not None:
#print "Gaussian:", pa[i], major[i], minor[i]
bbsrow += "GAUSSIAN, "
else:
#print "POINT"
bbsrow += "POINT, "
#print "ra = ", ra[i], "; decl = ", decl[i]
#print "BBS ra = ", ra2bbshms(ra[i]), "; BBS decl = ", decl2bbsdms(decl[i])
bbsrow += ra2bbshms(ra[i]) + ", " + decl2bbsdms(decl[i]) + ", "
# Stokes I id default, so field is empty
#bbsrow += ", "
lognu = []
logflux = []
lognu.append(np.log10(74.0/60.0))
logflux.append(np.log10(v_flux[i]))
if wenssm_catsrcid[i] is not None:
lognu.append(np.log10(325.0/60.0))
logflux.append(np.log10(wm_flux[i]))
if wenssp_catsrcid[i] is not None:
lognu.append(np.log10(352.0/60.0))
logflux.append(np.log10(wp_flux[i]))
if nvss_catsrcid[i] is not None:
lognu.append(np.log10(1400.0/60.0))
logflux.append(np.log10(n_flux[i]))
f = ""
for j in range(len(logflux)):
f += str(10**logflux[j]) + "; "
# TODO: We should include TGSS in the search as well!
#print "%s:\tvlss = %s; wenssm = %s; wenssp = %s; nvss = %s" % (i, vlss_catsrcid[i], \
# wenssm_catsrcid[i], wenssp_catsrcid[i], nvss_catsrcid[i])
#print "\tlognu = %s" % (lognu)
#print "\tlogflux = %s" % (logflux)
# Here we write the expected flux values at 60 MHz, and the fitted spectral index and
# and curvature term
if len(lognu) == 1:
#print "Exp. flux:", 10**(np.log10(v_flux[i]) + 0.7 * np.log10(74.0/60.0))
#print "Default -0.7"
fluxrow = round(10**(np.log10(v_flux[i]) + 0.7 * np.log10(74.0/60.0)), 2)
totalFlux += fluxrow
bbsrow += str(fluxrow) + ", , , , , "
bbsrow += "[-0.7]"
elif len(lognu) == 2 or (len(lognu) == 3 and nvss_catsrcid[i] is None):
#print "Do a 1-degree polynomial fit"
# p has form : p(x) = p[0] + p[1]*x
p = np.poly1d(np.polyfit(np.array(lognu), np.array(logflux), 1))
#print p
if storespectraplots == True:
spectrumfile = plotSpectrum(np.array(lognu), np.array(logflux), p, "vlss_%s.eps" % vlss_name[i])
spectrumfiles.append(spectrumfile)
# Default reference frequency is reported, so we leave it empty here;
# Catalogues just report on Stokes I, so others are empty.
fluxrow = round(10**p[0], 4)
totalFlux += fluxrow
bbsrow += str(fluxrow) + ", , , , , "
bbsrow += "[" + str(round(p[1], 4)) + "]"
elif (len(lognu) == 3 and nvss_catsrcid[i] is not None) or len(lognu) == 4:
#print "Do a 2-degree polynomial fit"
# p has form : p(x) = p[0] + p[1]*x + p[2]*x**2
p = np.poly1d(np.polyfit(np.array(lognu), np.array(logflux), 2))
#print p
if storespectraplots == True:
spectrumfile = plotSpectrum(np.array(lognu), np.array(logflux), p, "vlss_%s.eps" % vlss_name[i])
spectrumfiles.append(spectrumfile)
# Default reference frequency is reported, so we leave it empty here
bbsrow += str(round(10**p[0], 4)) + ", , , , , "
bbsrow += "[" + str(round(p[1],4)) + ", " + str(round(p[2],4)) + "]"
if pa[i] is not None and major[i] is not None and minor[i] is not None:
# Gaussian source:
bbsrow += ", " + str(round(major[i], 2)) + \
", " + str(round(minor[i], 2)) + \
", " + str(round(pa[i], 2))
#print bbsrow
bbsrows.append (bbsrow)
if storespectraplots == True:
print "Spectra available in:", spectrumfiles
# Write the format line.
# Optionally it contains a column containing the patch name.
skymodel = open(bbsfile, 'w')
header = "FORMAT = Name, Type, Ra, Dec, I, Q, U, V, ReferenceFrequency='60e6', SpectralIndex='[0.0]', MajorAxis, MinorAxis, Orientation"
# Add fixed patch name to the header and add a line defining the patch.
if len(patchname) > 0:
header += ", patch=fixed'" + patchname + "'\n\n"
header += "# the next line defines the patch\n"
header += ',, ' + ra2bbshms(ra_central) + ', ' + decl2bbsdms(decl_central) + ', ' + str(totalFlux)
header += "\n\n# the next lines define the sources\n"
skymodel.write(header)
for bbsrow in bbsrows:
skymodel.write(bbsrow + '\n')
skymodel.close()
print "Sky model stored in source table: %s" % (bbsfile)
def expected_fluxes_in_fov_tgss(conn, query, bbsfile, storespectraplots=False, patchname=''):
"""Identical to _vlss. Base is TGSS
"""
status = True
bbsrows = []
totalFlux = 0.
try:
cursor = conn.cursor()
cursor.execute(query)
results = zip(*cursor.fetchall())
cursor.close()
if len(results) == 0:
raise GSMException("No sources found, so Sky Model File %s is not created" % (bbsfile,))
except DBError, e:
logging.warn("Failed on query nr %s; for reason %s" % (query, e))
raise
tgss_catsrcid = results[0]
tgss_name = results[1]
wenssm_catsrcid = results[2]
wenssp_catsrcid = results[3]
vlss_catsrcid = results[4]
nvss_catsrcid = results[5]
t_flux = results[6]
wm_flux = results[7]
wp_flux = results[8]
v_flux = results[9]
n_flux = results[10]
t_flux_err = results[11]
wm_flux_err = results[12]
wp_flux_err = results[13]
v_flux_err = results[14]
n_flux_err = results[15]
wm_assoc_distance_arcsec = results[16]
wm_assoc_r = results[17]
wp_assoc_distance_arcsec = results[18]
wp_assoc_r = results[19]
v_assoc_distance_arcsec = results[20]
v_assoc_r = results[21]
n_assoc_distance_arcsec = results[22]
n_assoc_r = results[23]
pa = results[24]
major = results[25]
minor = results[26]
ra = results[27]
decl = results[28]
spectrumfiles = []
# Check for duplicate tgss_names. This may arise when a VLSS source
# is associated with one or more (genuine) counterparts.
# Eg., if two NVSS sources are seen as counterparts
# VLSS - WENSS - NVSS_1
# VLSS - WENSS - NVSS_2
# two rows will be added to the sky model, where the VLSS name
# is postfixed with _0 and _1, resp.
import collections
items = collections.defaultdict(list)
src_name = list(tgss_name)
#storespectraplots=False
for i, item in enumerate(src_name):
items[item].append(i)
for item, locs in items.iteritems():
if len(locs) > 1:
#print "duplicates of", item, "at", locs
for j in range(len(locs)):
src_name[locs[j]] = src_name[locs[j]] + "_" + str(j)
if len(results) != 0:
for i in range(len(tgss_catsrcid)):
##print "\ni = ", i
bbsrow = ""
# Here we check the cases for the degree of the polynomial spectral index fit
# Write the vlss name of the source (either postfixed or not)
bbsrow += src_name[i] + ", "
# According to Jess, only sources that have values for all
# three are considered as GAUSSIAN
if pa[i] is not None and major[i] is not None and minor[i] is not None:
#print "Gaussian:", pa[i], major[i], minor[i]
bbsrow += "GAUSSIAN, "
else:
#print "POINT"
bbsrow += "POINT, "
#print "ra = ", ra[i], "; decl = ", decl[i]
#print "BBS ra = ", ra2bbshms(ra[i]), "; BBS decl = ", decl2bbsdms(decl[i])
bbsrow += ra2bbshms(ra[i]) + ", " + decl2bbsdms(decl[i]) + ", "
# Stokes I id default, so filed is empty
#bbsrow += ", "
lognu = []
logflux = []
if vlss_catsrcid[i] is not None:
lognu.append(np.log10(73.8/60.0))
logflux.append(np.log10(v_flux[i]))
# tgss is never None, because it is the basecat.
# It is kept in the sequence of increasing frequency ratio
if tgss_catsrcid[i] is not None:
lognu.append(np.log10(150.0/60.0))
logflux.append(np.log10(t_flux[i]))
if wenssm_catsrcid[i] is not None:
lognu.append(np.log10(325.0/60.0))
logflux.append(np.log10(wm_flux[i]))
if wenssp_catsrcid[i] is not None:
lognu.append(np.log10(352.0/60.0))
logflux.append(np.log10(wp_flux[i]))
if nvss_catsrcid[i] is not None:
lognu.append(np.log10(1400.0/60.0))
logflux.append(np.log10(n_flux[i]))
f = ""
for j in range(len(logflux)):
f += str(10**logflux[j]) + "; "
#print "%s:\tvlss = %s; tgss = %s; wenssm = %s; wenssp = %s; nvss = %s" % (i, vlss_catsrcid[i], \
# tgss_catsrcid[i], wenssm_catsrcid[i], wenssp_catsrcid[i], nvss_catsrcid[i])
#print "\tlognu = %s" % (lognu)
#print "\tlogflux = %s" % (logflux)
# Here we write the expected flux values at 60 MHz and
# the fitted spectral index, curvature and higher-order curvature terms
#if len(lognu)>=4: pass #print "longlognu"
if len(lognu) == 1:
# p = p_0
fluxrow = round(10**(np.log10(t_flux[i]) + 0.73 * np.log10(150.0/60.0)), 2)
totalFlux += fluxrow
bbsrow += str(fluxrow) + ", , , , , "
bbsrow += "[-0.73]"
elif len(lognu) == 2 or (len(lognu) == 3
and wenssm_catsrcid[i] is not None
and wenssp_catsrcid[i] is not None):
# p = p_0 + p_1 x
# p = np.poly1d(np.polyfit(np.array(lognu), np.array(logflux), 1))
p = np.poly1d(np.polyfit(np.array(lognu), np.array(logflux), 1))
if storespectraplots:
spectrumfile = plotSpectrum(np.array(lognu), np.array(logflux), p, "tgss_%s.eps" % tgss_name[i])
spectrumfiles.append(spectrumfile)
# Default reference frequency is reported, so we leave it empty here;
# Catalogues just report on Stokes I, so others are empty.
fluxrow = round(10**p[0], 4)
totalFlux += fluxrow
bbsrow += str(fluxrow) + ", , , , , "
bbsrow += "[" + str(round(p[1], 4)) + "]"
elif len(lognu) == 3 or (len(lognu) == 4
and wenssm_catsrcid[i] is not None
and wenssp_catsrcid[i] is not None):
# p = p_0 + p_1 x + p_2 x^2
# p = np.poly1d(np.polyfit(np.array(lognu), np.array(logflux), 2))
p = np.poly1d(np.polyfit(np.array(lognu), np.array(logflux), 2))
#print p
if storespectraplots:
spectrumfile = plotSpectrum(np.array(lognu), np.array(logflux), p, "tgss_%s.eps" % tgss_name[i])
spectrumfiles.append(spectrumfile)
# Default reference frequency is reported, so we leave it empty here
bbsrow += str(round(10**p[0], 4)) + ", , , , , "
bbsrow += "[" + str(round(p[1],4)) + ", " + str(round(p[2],4)) + "]"
elif len(lognu) == 4 or len(lognu) == 5:
# p = p_0 + p_1 x + p_2 x^2 + p_3 x^3
# p = np.poly1d(np.polyfit(np.array(lognu), np.array(logflux), 3))
p = np.poly1d(np.polyfit(np.array(lognu), np.array(logflux), 3))
#print p
if storespectraplots:
spectrumfile = plotSpectrum(np.array(lognu), np.array(logflux), p, "tgss_%s.eps" % (tgss_name[i]))
spectrumfiles.append(spectrumfile)
# Default reference frequency is reported, so we leave it empty here
bbsrow += str(round(10**p[0], 4)) + ", , , , , "
bbsrow += "[" + str(round(p[1],4)) + ", " + str(round(p[2],4)) + ", " + str(round(p[3],4)) + "]"
if pa[i] is not None and major[i] is not None and minor[i] is not None:
# Gaussian source:
bbsrow += ", " + str(round(major[i], 2)) + ", " + str(round(minor[i], 2)) + ", " + str(round(pa[i], 2))
#print bbsrow
bbsrows.append (bbsrow)
if storespectraplots:
print "Spectra available in: %s" % (spectrumfiles)
# Write the format line.
# Optionally it contains a column containing the patch name.
skymodel = open(bbsfile, 'w')
header = "FORMAT = Name, Type, Ra, Dec, I, Q, U, V, ReferenceFrequency='60e6', SpectralIndex='[0.0]', MajorAxis, MinorAxis, Orientation"
# Add fixed patch name to the header and add a line defining the patch.
if len(patchname) > 0:
header += ", patch=fixed'" + patchname + "'\n\n"
header += "# the next line defines the patch\n"
header += ',, ' + ra2bbshms(ra_central) + ', ' + decl2bbsdms(decl_central) + ', ' + str(totalFlux)
header += "\n\n# the next lines define the sources\n"
skymodel.write(header)
for bbsrow in bbsrows:
skymodel.write(bbsrow + '\n')
skymodel.close()
print "Sky model stored in source table:", bbsfile
def plotSpectrum(x, y, p, f):
import pylab
expflux = "Exp. flux: " + str(round(10**p(0),3)) + " Jy"
fig = pylab.figure()
ax = fig.add_subplot(111)
for i in range(len(ax.get_xticklabels())):
ax.get_xticklabels()[i].set_size('x-large')
for i in range(len(ax.get_yticklabels())):
ax.get_yticklabels()[i].set_size('x-large')
ax.set_xlabel(r'$\log \nu/\nu_0$', size='x-large')
ax.set_ylabel('$\log S$', size='x-large')
# Roughly between log10(30/60) and log10(1500/60)
xp = np.linspace(-0.3, 1.5, 100)
ax.plot(x, y, 'o', label='cat fluxes')
ax.plot(0.0, p(0), 'o', color='k', label=expflux )
ax.plot(xp, p(xp), linestyle='--', linewidth=2, label='fit')
pylab.legend(numpoints=1, loc='best')
pylab.grid(True)
pylab.savefig(f, dpi=600)
pylab.close()
return f
def decl2bbsdms(d):
"""Based on function deg2dec Written by Enno Middelberg 2001
http://www.atnf.csiro.au/people/Enno.Middelberg/python/python.html
"""
deg = float(d)
sign = "+"
# test whether the input numbers are sane:
# if negative, store "-" in sign and continue calulation
# with positive value
if deg < 0:
sign = "-"
deg = deg * (-1)
#if deg > 180:
# logging.warn("%s: inputs may not exceed 180!" % deg)
# raise
#if deg > 90:
# print `deg`+" exceeds 90, will convert it to negative dec\n"
# deg=deg-90
# sign="-"
if deg < -90 or deg > 90:
logging.warn("%s: inputs may not exceed 90 degrees!" % deg)
hh = int(deg)
mm = int((deg - int(deg)) * 60)
ss = '%10.8f' % (((deg - int(deg)) * 60 - mm) * 60)
#print '\t'+sign+string.zfill(`hh`,2)+':'+string.zfill(`mm`,2)+':'+'%10.8f' % ss
#print '\t'+sign+string.zfill(`hh`,2)+' '+string.zfill(`mm`,2)+' '+'%10.8f' % ss
#print '\t'+sign+string.zfill(`hh`,2)+'h'+string.zfill(`mm`,2)+'m'+'%10.8fs\n' % ss
return sign + string.zfill(`hh`, 2) + '.' + string.zfill(`mm`, 2) + '.' + string.zfill(ss, 11)
def ra2bbshms(a):
deg=float(a)
# test whether the input numbers are sane:
if deg < 0 or deg > 360:
logging.warn("%s: inputs may not exceed 90 degrees!" % deg)
hh = int(deg / 15)
mm = int((deg - 15 * hh) * 4)
ss = '%10.8f' % ((4 * deg - 60 * hh - mm) * 60)
#print '\t'+string.zfill(`hh`,2)+':'+string.zfill(`mm`,2)+':'+'%10.8f' % ss
#print '\t'+string.zfill(`hh`,2)+' '+string.zfill(`mm`,2)+' '+'%10.8f' % ss
#print '\t'+string.zfill(`hh`,2)+'h'+string.zfill(`mm`,2)+'m'+'%10.8fs\n' % ss
return string.zfill(`hh`, 2) + ':' + string.zfill(`mm`, 2) + ':' + string.zfill(ss, 11)
# To keep it consistent with the database sys.alpha function
# first arg is declination, second radius
def alpha(decl, theta):
if abs(decl) + theta > 89.9:
return 180.0
else:
return degrees(abs(np.arctan(np.sin(radians(theta)) / np.sqrt(abs(np.cos(radians(decl - theta)) * np.cos(radians(decl + theta)))))))
def degrees(r):
return r * 180 / np.pi
def radians(d):
return d * np.pi / 180
| 44.642123
| 140
| 0.545664
|
4a1017204308da16bba3ad1fe17f4c178115bdf7
| 16,699
|
py
|
Python
|
boost/libs/python/pyste/src/Pyste/GCCXMLParser.py
|
randolphwong/mcsema
|
eb5b376736e7f57ff0a61f7e4e5a436bbb874720
|
[
"BSD-3-Clause"
] | 18
|
2016-03-04T15:44:24.000Z
|
2021-12-31T11:06:25.000Z
|
boost/libs/python/pyste/src/Pyste/GCCXMLParser.py
|
randolphwong/mcsema
|
eb5b376736e7f57ff0a61f7e4e5a436bbb874720
|
[
"BSD-3-Clause"
] | 49
|
2016-02-29T17:59:52.000Z
|
2019-05-05T04:59:26.000Z
|
boost/libs/python/pyste/src/Pyste/GCCXMLParser.py
|
randolphwong/mcsema
|
eb5b376736e7f57ff0a61f7e4e5a436bbb874720
|
[
"BSD-3-Clause"
] | 9
|
2015-09-09T02:38:32.000Z
|
2021-01-30T00:24:24.000Z
|
# Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from declarations import *
try:
# try to use internal elementtree
from xml.etree.cElementTree import ElementTree
except ImportError:
# try to use cElementTree if avaiable
try:
from cElementTree import ElementTree
except ImportError:
# fall back to the normal elementtree
from elementtree.ElementTree import ElementTree
from xml.parsers.expat import ExpatError
from copy import deepcopy
from utils import enumerate
#==============================================================================
# Exceptions
#==============================================================================
class InvalidXMLError(Exception): pass
class ParserError(Exception): pass
class InvalidContextError(ParserError): pass
#==============================================================================
# GCCXMLParser
#==============================================================================
class GCCXMLParser(object):
'Parse a GCC_XML file and extract the top-level declarations.'
interested_tags = {'Class':0, 'Function':0, 'Variable':0, 'Enumeration':0}
def Parse(self, filename):
self.elements = self.GetElementsFromXML(filename)
# high level declarations
self.declarations = []
self._names = {}
# parse the elements
for id in self.elements:
element, decl = self.elements[id]
if decl is None:
try:
self.ParseElement(id, element)
except InvalidContextError:
pass # ignore those nodes with invalid context
# (workaround gccxml bug)
def Declarations(self):
return self.declarations
def AddDecl(self, decl):
if decl.FullName() in self._names:
decl.is_unique= False
for d in self.declarations:
if d.FullName() == decl.FullName():
d.is_unique = False
self._names[decl.FullName()] = 0
self.declarations.append(decl)
def ParseElement(self, id, element):
method = 'Parse' + element.tag
if hasattr(self, method):
func = getattr(self, method)
func(id, element)
else:
self.ParseUnknown(id, element)
def GetElementsFromXML(self,filename):
'Extracts a dictionary of elements from the gcc_xml file.'
tree = ElementTree()
try:
tree.parse(filename)
except ExpatError:
raise InvalidXMLError, 'Not a XML file: %s' % filename
root = tree.getroot()
if root.tag != 'GCC_XML':
raise InvalidXMLError, 'Not a valid GCC_XML file'
# build a dictionary of id -> element, None
elementlist = root.getchildren()
elements = {}
for element in elementlist:
id = element.get('id')
if id:
elements[id] = element, None
return elements
def GetDecl(self, id):
if id not in self.elements:
if id == '_0':
raise InvalidContextError, 'Invalid context found in the xml file.'
else:
msg = 'ID not found in elements: %s' % id
raise ParserError, msg
elem, decl = self.elements[id]
if decl is None:
self.ParseElement(id, elem)
elem, decl = self.elements[id]
if decl is None:
raise ParserError, 'Could not parse element: %s' % elem.tag
return decl
def GetType(self, id):
def Check(id, feature):
pos = id.find(feature)
if pos != -1:
id = id[:pos] + id[pos+1:]
return True, id
else:
return False, id
const, id = Check(id, 'c')
volatile, id = Check(id, 'v')
restricted, id = Check(id, 'r')
decl = self.GetDecl(id)
if isinstance(decl, Type):
res = deepcopy(decl)
if const:
res.const = const
if volatile:
res.volatile = volatile
if restricted:
res.restricted = restricted
else:
res = Type(decl.FullName(), const)
res.volatile = volatile
res.restricted = restricted
return res
def GetLocation(self, location):
file, line = location.split(':')
file = self.GetDecl(file)
return file, int(line)
def Update(self, id, decl):
element, _ = self.elements[id]
self.elements[id] = element, decl
def ParseUnknown(self, id, element):
name = '__Unknown_Element_%s' % id
decl = Unknown(name)
self.Update(id, decl)
def ParseNamespace(self, id, element):
namespace = element.get('name')
context = element.get('context')
if context:
outer = self.GetDecl(context)
if not outer.endswith('::'):
outer += '::'
namespace = outer + namespace
if namespace.startswith('::'):
namespace = namespace[2:]
self.Update(id, namespace)
def ParseFile(self, id, element):
filename = element.get('name')
self.Update(id, filename)
def ParseVariable(self, id, element):
# in gcc_xml, a static Field is declared as a Variable, so we check
# this and call the Field parser.
context = self.GetDecl(element.get('context'))
if isinstance(context, Class):
self.ParseField(id, element)
elem, decl = self.elements[id]
decl.static = True
else:
namespace = context
name = element.get('name')
type_ = self.GetType(element.get('type'))
location = self.GetLocation(element.get('location'))
variable = Variable(type_, name, namespace)
variable.location = location
self.AddDecl(variable)
self.Update(id, variable)
def GetArguments(self, element):
args = []
for child in element:
if child.tag == 'Argument':
type = self.GetType(child.get('type'))
type.default = child.get('default')
args.append(type)
return args
def GetExceptions(self, exception_list):
if exception_list is None:
return None
exceptions = []
for t in exception_list.split():
exceptions.append(self.GetType(t))
return exceptions
def ParseFunction(self, id, element, functionType=Function):
'''functionType is used because a Operator is identical to a normal
function, only the type of the function changes.'''
name = element.get('name')
returns = self.GetType(element.get('returns'))
namespace = self.GetDecl(element.get('context'))
location = self.GetLocation(element.get('location'))
params = self.GetArguments(element)
incomplete = bool(int(element.get('incomplete', 0)))
throws = self.GetExceptions(element.get('throw', None))
function = functionType(name, namespace, returns, params, throws)
function.location = location
self.AddDecl(function)
self.Update(id, function)
def ParseOperatorFunction(self, id, element):
self.ParseFunction(id, element, Operator)
def GetHierarchy(self, bases):
'''Parses the string "bases" from the xml into a list of tuples of Base
instances. The first tuple is the most direct inheritance, and then it
goes up in the hierarchy.
'''
if bases is None:
return []
base_names = bases.split()
this_level = []
next_levels = []
for base in base_names:
# get the visibility
split = base.split(':')
if len(split) == 2:
visib = split[0]
base = split[1]
else:
visib = Scope.public
decl = self.GetDecl(base)
if not isinstance(decl, Class):
# on windows, there are some classes which "bases" points to an
# "Unimplemented" tag, but we are not interested in this classes
# anyway
continue
base = Base(decl.FullName(), visib)
this_level.append(base)
# normalize with the other levels
for index, level in enumerate(decl.hierarchy):
if index < len(next_levels):
next_levels[index] = next_levels[index] + level
else:
next_levels.append(level)
hierarchy = []
if this_level:
hierarchy.append(tuple(this_level))
if next_levels:
hierarchy.extend(next_levels)
return hierarchy
def GetMembers(self, member_list):
# members must be a string with the ids of the members
if member_list is None:
return []
members = []
for member in member_list.split():
decl = self.GetDecl(member)
if type(decl) in Class.ValidMemberTypes():
members.append(decl)
return members
def ParseClass(self, id, element):
name = element.get('name')
abstract = bool(int(element.get('abstract', '0')))
location = self.GetLocation(element.get('location'))
context = self.GetDecl(element.get('context'))
incomplete = bool(int(element.get('incomplete', 0)))
if isinstance(context, str):
class_ = Class(name, context, [], abstract)
else:
# a nested class
visib = element.get('access', Scope.public)
class_ = NestedClass(
name, context.FullName(), visib, [], abstract)
class_.incomplete = incomplete
# we have to add the declaration of the class before trying
# to parse its members and bases, to avoid recursion.
self.AddDecl(class_)
class_.location = location
self.Update(id, class_)
# now we can get the members and the bases
class_.hierarchy = self.GetHierarchy(element.get('bases'))
if class_.hierarchy:
class_.bases = class_.hierarchy[0]
members = self.GetMembers(element.get('members'))
for member in members:
class_.AddMember(member)
def ParseStruct(self, id, element):
self.ParseClass(id, element)
FUNDAMENTAL_RENAME = {
'long long int' : 'boost::int64_t',
'long long unsigned int' : 'boost::uint64_t',
}
def ParseFundamentalType(self, id, element):
name = element.get('name')
name = self.FUNDAMENTAL_RENAME.get(name, name)
type_ = FundamentalType(name)
self.Update(id, type_)
def ParseArrayType(self, id, element):
type = self.GetType(element.get('type'))
min = element.get('min')
max = element.get('max')
array = ArrayType(type.name, type.const, min, max)
self.Update(id, array)
def ParseReferenceType(self, id, element):
type = self.GetType(element.get('type'))
expand = not isinstance(type, FunctionType)
ref = ReferenceType(type.name, type.const, None, expand, type.suffix)
self.Update(id, ref)
def ParsePointerType(self, id, element):
type = self.GetType(element.get('type'))
expand = not isinstance(type, FunctionType)
ref = PointerType(type.name, type.const, None, expand, type.suffix)
self.Update(id, ref)
def ParseFunctionType(self, id, element):
result = self.GetType(element.get('returns'))
args = self.GetArguments(element)
func = FunctionType(result, args)
self.Update(id, func)
def ParseMethodType(self, id, element):
class_ = self.GetDecl(element.get('basetype')).FullName()
result = self.GetType(element.get('returns'))
args = self.GetArguments(element)
method = MethodType(result, args, class_)
self.Update(id, method)
def ParseField(self, id, element):
name = element.get('name')
visib = element.get('access', Scope.public)
classname = self.GetDecl(element.get('context')).FullName()
type_ = self.GetType(element.get('type'))
static = bool(int(element.get('extern', '0')))
location = self.GetLocation(element.get('location'))
var = ClassVariable(type_, name, classname, visib, static)
var.location = location
self.Update(id, var)
def ParseMethod(self, id, element, methodType=Method):
name = element.get('name')
result = self.GetType(element.get('returns'))
classname = self.GetDecl(element.get('context')).FullName()
visib = element.get('access', Scope.public)
static = bool(int(element.get('static', '0')))
virtual = bool(int(element.get('virtual', '0')))
abstract = bool(int(element.get('pure_virtual', '0')))
const = bool(int(element.get('const', '0')))
location = self.GetLocation(element.get('location'))
throws = self.GetExceptions(element.get('throw', None))
params = self.GetArguments(element)
method = methodType(
name, classname, result, params, visib, virtual, abstract, static, const, throws)
method.location = location
self.Update(id, method)
def ParseOperatorMethod(self, id, element):
self.ParseMethod(id, element, ClassOperator)
def ParseConstructor(self, id, element):
name = element.get('name')
visib = element.get('access', Scope.public)
classname = self.GetDecl(element.get('context')).FullName()
location = self.GetLocation(element.get('location'))
params = self.GetArguments(element)
artificial = element.get('artificial', False)
ctor = Constructor(name, classname, params, visib)
ctor.location = location
self.Update(id, ctor)
def ParseDestructor(self, id, element):
name = element.get('name')
visib = element.get('access', Scope.public)
classname = self.GetDecl(element.get('context')).FullName()
virtual = bool(int(element.get('virtual', '0')))
location = self.GetLocation(element.get('location'))
des = Destructor(name, classname, visib, virtual)
des.location = location
self.Update(id, des)
def ParseConverter(self, id, element):
self.ParseMethod(id, element, ConverterOperator)
def ParseTypedef(self, id, element):
name = element.get('name')
type = self.GetType(element.get('type'))
context = self.GetDecl(element.get('context'))
if isinstance(context, Class):
context = context.FullName()
typedef = Typedef(type, name, context)
self.Update(id, typedef)
self.AddDecl(typedef)
def ParseEnumeration(self, id, element):
name = element.get('name')
location = self.GetLocation(element.get('location'))
context = self.GetDecl(element.get('context'))
incomplete = bool(int(element.get('incomplete', 0)))
if isinstance(context, str):
enum = Enumeration(name, context)
else:
visib = element.get('access', Scope.public)
enum = ClassEnumeration(name, context.FullName(), visib)
self.AddDecl(enum)
enum.location = location
for child in element:
if child.tag == 'EnumValue':
name = child.get('name')
value = int(child.get('init'))
enum.values[name] = value
enum.incomplete = incomplete
self.Update(id, enum)
def ParseDeclarations(filename):
'Returns a list of the top declarations found in the gcc_xml file.'
parser = GCCXMLParser()
parser.Parse(filename)
return parser.Declarations()
if __name__ == '__main__':
ParseDeclarations(r'D:\Programming\Libraries\boost-cvs\boost\libs\python\pyste\example\test.xml')
| 34.862213
| 101
| 0.566441
|
4a1018acfd1fd0519e080d391a5926b974cd05ef
| 7,553
|
py
|
Python
|
densenetfinaltest.py
|
molyswu/Project
|
5f36d07fa979c47110e5e80404b5ba55b6899b5a
|
[
"Apache-2.0"
] | 35
|
2017-09-08T13:24:30.000Z
|
2022-03-01T01:22:01.000Z
|
densenetfinaltest.py
|
molyswu/Project
|
5f36d07fa979c47110e5e80404b5ba55b6899b5a
|
[
"Apache-2.0"
] | 2
|
2018-02-28T07:29:54.000Z
|
2019-04-29T10:37:29.000Z
|
densenetfinaltest.py
|
molyswu/Project
|
5f36d07fa979c47110e5e80404b5ba55b6899b5a
|
[
"Apache-2.0"
] | 17
|
2017-11-21T10:49:47.000Z
|
2021-03-19T08:16:40.000Z
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import tensorflow as tf
def unpickle(file):
import _pickle as cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo,encoding='latin1')
fo.close()
if 'data' in dict:
dict['data'] = dict['data'].reshape((-1, 3, 32, 32)).swapaxes(1, 3).swapaxes(1, 2).reshape(-1, 32*32*3) / 256.
return dict
def load_data_one(f):
batch = unpickle(f)
data = batch['data']
labels = batch['labels']
print ("Loading %s: %d" % (f, len(data)))
return data, labels
def load_data(files, data_dir, label_count):
data, labels = load_data_one(data_dir + '/' + files[0])
for f in files[1:]:
data_n, labels_n = load_data_one(data_dir + '/' + f)
data = np.append(data, data_n, axis=0)
labels = np.append(labels, labels_n, axis=0)
labels = np.array([ [ float(i == label) for i in range(label_count) ] for label in labels ])
return data, labels
def run_in_batch_avg(session, tensors, batch_placeholders, feed_dict={}, batch_size=200):
res = [ 0 ] * len(tensors)
batch_tensors = [ (placeholder, feed_dict[ placeholder ]) for placeholder in batch_placeholders ]
total_size = len(batch_tensors[0][1])
batch_count = int((total_size + batch_size - 1) / batch_size)
for batch_idx in range(batch_count):
current_batch_size = None
for (placeholder, tensor) in batch_tensors:
batch_tensor = tensor[ batch_idx*batch_size : (batch_idx+1)*batch_size ]
current_batch_size = len(batch_tensor)
feed_dict[placeholder] = tensor[ batch_idx*batch_size : (batch_idx+1)*batch_size ]
tmp = session.run(tensors, feed_dict=feed_dict)
res = [ r + t * current_batch_size for (r, t) in zip(res, tmp) ]
return [ r / float(total_size) for r in res ]
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.01)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.01, shape=shape)
return tf.Variable(initial)
def conv2d(input, in_features, out_features, kernel_size, with_bias=False):
W = weight_variable([ kernel_size, kernel_size, in_features, out_features ])
conv = tf.nn.conv2d(input, W, [ 1, 1, 1, 1 ], padding='SAME')
if with_bias:
return conv + bias_variable([ out_features ])
return conv
def batch_activ_conv(current, in_features, out_features, kernel_size, is_training, keep_prob):
current = tf.contrib.layers.batch_norm(current, scale=True, is_training=is_training, updates_collections=None)
current = tf.nn.relu(current)
current = conv2d(current, in_features, out_features, kernel_size)
current = tf.nn.dropout(current, keep_prob)
return current
def block(input, layers, in_features, growth, is_training, keep_prob):
current = input
features = in_features
for idx in range(layers):
tmp = batch_activ_conv(current, features, growth, 3, is_training, keep_prob)
current = tf.concat((current, tmp),3)
features += growth
return current, features
def avg_pool(input, s):
return tf.nn.avg_pool(input, [ 1, s, s, 1 ], [1, s, s, 1 ], 'VALID')
def run_model(data, image_dim, label_count, depth):
weight_decay = 1e-4
layers = int((depth - 4) / 3)
xs = tf.placeholder("float", shape=[None, image_dim])
ys = tf.placeholder("float", shape=[None, label_count])
lr = tf.placeholder("float", shape=[])
keep_prob = tf.placeholder(tf.float32)
is_training = tf.placeholder("bool", shape=[])
current = tf.reshape(xs, [ -1, 32, 32, 3 ])
current = conv2d(current, 3, 16, 3)
current, features = block(current, layers, 16, 12, is_training, keep_prob)
current = batch_activ_conv(current, features, features, 1, is_training, keep_prob)
current = avg_pool(current, 2)
current, features = block(current, layers, features, 12, is_training, keep_prob)
current = batch_activ_conv(current, features, features, 1, is_training, keep_prob)
current = avg_pool(current, 2)
current, features = block(current, layers, features, 12, is_training, keep_prob)
current = tf.contrib.layers.batch_norm(current, scale=True, is_training=is_training, updates_collections=None)
current = tf.nn.relu(current)
current = avg_pool(current, 8)
final_dim = features
current = tf.reshape(current, [ -1, final_dim ])
Wfc = weight_variable([ final_dim, label_count ])
bfc = bias_variable([ label_count ])
ys_ = tf.nn.softmax( tf.matmul(current, Wfc) + bfc )
cross_entropy = -tf.reduce_mean(ys * tf.log(ys_ + 1e-12))
l2 = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
train_step = tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True).minimize(cross_entropy + l2 * weight_decay)
correct_prediction = tf.equal(tf.argmax(ys_, 1), tf.argmax(ys, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
parm_dict={}
for k in tf.global_variables():
if k not in tf.contrib.framework.get_variables_by_suffix('Momentum'):
parm_dict[k.name[:-2]] = k
session = tf.InteractiveSession()
batch_size = 64
learning_rate = 0.1
session.run(tf.global_variables_initializer())
saver = tf.train.Saver(parm_dict)
train_data, train_labels = data['train_data'], data['train_labels']
batch_count = int(len(train_data) / batch_size)
batches_data = np.split(train_data[:batch_count * batch_size], batch_count)
batches_labels = np.split(train_labels[:batch_count * batch_size], batch_count)
print ("Batch per epoch: ", batch_count)
saver.restore(session,'inq16100s.ckpt')
test_results = run_in_batch_avg(session, [ cross_entropy, accuracy ], [ xs, ys ],
feed_dict = { xs: data['test_data'], ys: data['test_labels'], is_training: False, keep_prob: 1. })
print(test_results)
# if test_results[1]>0.93: saver.save(session, 'densenetest_%d.ckpt' % test_results)
data_dir = 'data'
image_size = 32
image_dim = image_size * image_size * 3
meta = unpickle(data_dir + '/batches.meta')
label_names = meta['label_names']
label_count = len(label_names)
train_files = [ 'data_batch_%d' % d for d in range(1, 6) ]
train_data, train_labels = load_data(train_files, data_dir, label_count)
pi = np.random.permutation(len(train_data))
train_data, train_labels = train_data[pi], train_labels[pi]
test_data, test_labels = load_data([ 'test_batch' ], data_dir, label_count)
print ("Train:", np.shape(train_data), np.shape(train_labels))
print ("Test:", np.shape(test_data), np.shape(test_labels))
data = { 'train_data': train_data,
'train_labels': train_labels,
'test_data': test_data,
'test_labels': test_labels }
run_model(data, image_dim, label_count, 40)
| 46.054878
| 133
| 0.617635
|
4a1018ce1f9deadba93be716e69faeccb6f65449
| 3,669
|
py
|
Python
|
data_loader/datasets_importer/random_prid.py
|
Luxios22/Dual_Norm
|
b404a03b15fc05749e0c648d9e46ffe70f6b2a80
|
[
"MIT"
] | null | null | null |
data_loader/datasets_importer/random_prid.py
|
Luxios22/Dual_Norm
|
b404a03b15fc05749e0c648d9e46ffe70f6b2a80
|
[
"MIT"
] | null | null | null |
data_loader/datasets_importer/random_prid.py
|
Luxios22/Dual_Norm
|
b404a03b15fc05749e0c648d9e46ffe70f6b2a80
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
import glob
import re
from .BaseDataset import BaseImageDataset
class PRID_AB(BaseImageDataset):
"""
A -> B
# WARNING: single shot instead of multiple shot.
pid >= 200 for unrelated gallery images.
"""
dataset_dir = 'PRID2011'
def __init__(self, cfg, verbose=True, **kwargs):
super().__init__()
self.dataset_dir = os.path.join(cfg.DATASETS.STORE_DIR, self.dataset_dir, 'single_shot')
self.query_dir = os.path.join(self.dataset_dir, 'cam_a')
self.gallery_dir = os.path.join(self.dataset_dir, 'cam_b')
self._check_before_run()
train = []
query = self._process_dir(self.query_dir)
gallery = self._process_dir(self.gallery_dir)
if verbose:
print("=> PRID A->B Loaded")
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)
self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)
self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not os.path.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not os.path.exists(self.query_dir):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if not os.path.exists(self.gallery_dir):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path):
domain = os.path.basename(dir_path)
img_paths = glob.glob(os.path.join(dir_path, '*.png'))
pattern = re.compile(r'^person_([-\d]+).png')
dataset = []
for img_path in img_paths:
pid = list(map(int, pattern.search(os.path.basename(img_path)).groups()))[0]
camid = 0 if domain=='cam_a' else 1 # index starts from 0
if domain=='cam_a':
if pid <= 200:
pid -=1
dataset.append((img_path, pid, camid))
else:
pid -=1
dataset.append((img_path, pid, camid))
return dataset
class Random_PRID(BaseImageDataset):
"""A to B"""
def __init__(self, cfg, verbose=True):
data = PRID_AB(cfg, verbose=False)
query = data.query
gallery = data.gallery
query_ids = np.arange(200)
np.random.shuffle(query_ids)
query_ids = query_ids[:100]
self.train = []
self.query = []
self.gallery = []
for ele in query:
if ele[1] in query_ids:
self.query.append(ele)
for ele in gallery:
if ele[1] in query_ids or ele[1]>=200:
self.gallery.append(ele)
if verbose:
print("=> Random PRID Loaded")
self.print_dataset_statistics(self.train, self.query, self.gallery)
self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)
self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)
self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)
| 36.326733
| 116
| 0.605615
|
4a1018d3084d6c3371fab3686c9608ef0fd8a67e
| 887
|
py
|
Python
|
setup.py
|
nilsfast/turbines
|
c4269394a06f909868705b368861afaf69e8fe1e
|
[
"MIT"
] | 1
|
2022-02-09T09:10:08.000Z
|
2022-02-09T09:10:08.000Z
|
setup.py
|
nilsfast/turbines
|
c4269394a06f909868705b368861afaf69e8fe1e
|
[
"MIT"
] | null | null | null |
setup.py
|
nilsfast/turbines
|
c4269394a06f909868705b368861afaf69e8fe1e
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='turbines',
packages=find_packages(where='src'), #
version='0.0.1',
license='MIT',
description='Simple web framework for Python (obviously)',
author='Nils Fast', # Type in your name
author_email='', # Type in your E-Mail
url='https://github.com/nilsfast/turbines',
keywords='web, framework, simple',
install_requires=[],
package_dir={'': 'src'},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 30.586207
| 62
| 0.590755
|
4a1018ff29961a7a56ab75fa901774a94eecd2bb
| 167
|
py
|
Python
|
kokemomo/plugins/admin/__init__.py
|
Kokemomo/Kokemomo
|
614504dc49b2f509b25c9ec2229f4438db73bab7
|
[
"MIT"
] | 4
|
2016-06-12T13:19:23.000Z
|
2020-01-29T09:46:15.000Z
|
kokemomo/plugins/admin/__init__.py
|
Kokemomo/Kokemomo
|
614504dc49b2f509b25c9ec2229f4438db73bab7
|
[
"MIT"
] | 67
|
2015-09-10T04:28:33.000Z
|
2019-09-19T09:08:11.000Z
|
kokemomo/plugins/admin/__init__.py
|
Kokemomo/Kokemomo
|
614504dc49b2f509b25c9ec2229f4438db73bab7
|
[
"MIT"
] | 2
|
2016-06-13T11:20:42.000Z
|
2016-07-22T07:44:31.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from kokemomo.plugins.admin.controller.km_admin import KMAdmin
__author__ = 'hiroki'
# 各コントローラの初期化
admin = KMAdmin()
| 15.181818
| 62
| 0.712575
|
4a101969796f8f85101b2e165550375c8761f404
| 12,952
|
py
|
Python
|
src/data_loaders/attribute_annotation_data_loader.py
|
shigashiyama/seikanlp_sematt
|
b192c2568dc27cba497245a334baaa175b73270c
|
[
"MIT"
] | null | null | null |
src/data_loaders/attribute_annotation_data_loader.py
|
shigashiyama/seikanlp_sematt
|
b192c2568dc27cba497245a334baaa175b73270c
|
[
"MIT"
] | null | null | null |
src/data_loaders/attribute_annotation_data_loader.py
|
shigashiyama/seikanlp_sematt
|
b192c2568dc27cba497245a334baaa175b73270c
|
[
"MIT"
] | null | null | null |
import sys
import constants, constants_sematt
import dictionary
from data_loaders import data_loader
from data_loaders.data_loader import DataLoader, Data, RestorableData
DELIM = ','
COLON = ':'
COLON_ALT = '^'
class AttributeAnnotationDataLoader(DataLoader):
def __init__(self,
token_index=1,
label_index=2,
attr_indexes=[],
attr_depths=[],
attr_chunking_flags=[],
attr_target_labelsets=[],
attr_delim=None,
lowercasing=False,
normalize_digits=True,
):
self.token_index = token_index
self.label_index = label_index
self.attr_indexes = attr_indexes
self.attr_depths = attr_depths
self.attr_chunking_flags = attr_chunking_flags
self.attr_target_labelsets = attr_target_labelsets
self.attr_delim = attr_delim
self.lowercasing = lowercasing
self.normalize_digits = normalize_digits
def load_gold_data(self, path, data_format=None, dic=None, train=True):
data, dic = self.load_gold_data_WL(path, dic, train)
return data, dic
def load_decode_data(self, path, data_format, dic=None):
if data_format == constants.WL_FORMAT:
data = self.load_decode_data_WL(path, dic)
else:
data = self.load_decode_data_SL(path, dic)
return data
def parse_commandline_input(self, line, dic):
attr_delim = self.attr_delim if self.attr_delim else constants.SL_ATTR_DELIM
num_attrs = len(self.attr_indexes)
get_unigram_id = dic.tables[constants.UNIGRAM].get_id
if constants.ATTR_LABEL(0) in dic.tables:
use_attr0 = True
get_attr0_id = dic.tables[constants.ATTR_LABEL(0)].get_id
else:
use_attr0 = False
get_attr0_id = None
org_arr = line.split(' ')
if use_attr0:
attr0_seq = [
elem.split(attr_delim)[self.attr_indexes[0]]
if attr_delim in elem else ''
for elem in org_arr]
org_attr0_seq = [
self.preprocess_attribute(attr, self.attr_depths[0], self.attr_target_labelsets[0])
for attr in attr0_seq]
org_attr0_seqs = [org_attr0_seq]
attr0_seq = [get_attr0_id(attr) for attr in org_attr0_seq]
attr0_seqs = [attr0_seq]
else:
org_attr0_seqs = []
attr0_seqs = []
org_token_seq = [elem.split(attr_delim)[0] for elem in org_arr]
org_token_seqs = [org_token_seq]
ptoken_seq = [self.preprocess_token(word) for word in org_token_seq]
uni_seq = [get_unigram_id(word) for word in ptoken_seq]
uni_seqs = [uni_seq]
inputs = [uni_seqs]
outputs = [attr0_seqs]
orgdata = [org_token_seqs, org_attr0_seqs]
return RestorableData(inputs, outputs, orgdata=orgdata)
def load_gold_data_WL(self, path, dic, train=True):
attr_delim = self.attr_delim if self.attr_delim else constants.WL_ATTR_DELIM
num_attrs = len(self.attr_indexes)
if not dic:
dic = init_dictionary(num_attrs=num_attrs)
get_unigram_id = dic.tables[constants.UNIGRAM].get_id
get_label_id = dic.tables[constants_sematt.SEM_LABEL].get_id
get_ith_attr_id = []
for i in range(num_attrs):
get_ith_attr_id.append(dic.tables[constants.ATTR_LABEL(i)].get_id)
token_seqs = []
label_seqs = [] # list of semantic attribute sequences
attr_seqs_list = [[] for i in range(num_attrs)]
ins_cnt = 0
word_clm = self.token_index
label_clm = self.label_index
with open(path) as f:
uni_seq = []
label_seq = []
attr_seq_list = [[] for i in range(num_attrs)]
for line in f:
line = self.normalize_input_line(line)
if len(line) == 0:
if len(uni_seq) > 0:
token_seqs.append(uni_seq)
uni_seq = []
label_seqs.append(label_seq)
label_seq = []
for i, attr_seq in enumerate(attr_seq_list):
if self.attr_chunking_flags[i]:
attr_seq = [get_ith_attr_id[i](attr, update=train) for attr in
data_loader.get_labelseq_BIOES(attr_seq)]
attr_seqs_list[i].append(attr_seq)
attr_seq_list = [[] for i in range(num_attrs)]
ins_cnt += 1
if ins_cnt % constants.NUM_FOR_REPORTING == 0:
print('Read', ins_cnt, 'sentences', file=sys.stderr)
continue
elif line[0] == constants.COMMENT_SYM:
continue
array = line.split(attr_delim)
token = self.preprocess_token(array[word_clm])
tlen = len(token)
attrs = [None] * max(num_attrs, 1)
if len(array) < 2 + num_attrs:
continue
for i in range(num_attrs):
attrs[i] = self.preprocess_attribute(
array[self.attr_indexes[i]], self.attr_depths[i], self.attr_target_labelsets[i])
attr_tmp = attrs[i] if self.attr_chunking_flags[i] else get_ith_attr_id[i](
attrs[i], update=train)
attr_seq_list[i].append(attr_tmp)
update_token = self.to_be_registered(token, train)
uni_seq.append(get_unigram_id(token, update=update_token))
label = array[label_clm] if len(array) > label_clm else constants.NONE_SYMBOL
if label == '':
label = constants.NONE_SYMBOL
if DELIM in label:
labels = label.split(DELIM)
label = labels[0]
if COLON in label: # ':' is used as a special character when reading/writing a txt-format model
label = label.replace(COLON, COLON_ALT)
label_seq.append(get_label_id(label, update=train))
# register last sentenece
if len(uni_seq) > 0:
token_seqs.append(uni_seq)
label_seqs.append(label_seq)
for i, attr_seq in enumerate(attr_seq_list):
if self.attr_chunking_flags[i]:
attr_seq = [get_ith_attr_id[i](attr, update=train) for attr in
data_loader.get_labelseq_BIOES(attr_seq)]
attr_seqs_list[i].append(attr_seq)
inputs = [token_seqs]
inputs.append(attr_seqs_list[0] if len(attr_seqs_list) > 0 else None)
outputs = [label_seqs]
return Data(inputs, outputs), dic
def load_decode_data_WL(self, path, dic):
attr_delim = self.attr_delim if self.attr_delim else constants.WL_ATTR_DELIM
num_attrs = len(self.attr_indexes)
get_unigram_id = dic.tables[constants.UNIGRAM].get_id
get_attr_id = dic.tables[constants.ATTR_LABEL(0)].get_id if num_attrs > 0 else None
org_token_seqs = []
org_attr_seqs = [] # second or later attribute is ignored
token_seqs = []
attr_seqs = []
ins_cnt = 0
word_clm = self.token_index
with open(path) as f:
org_token_seq = []
org_attr_seq = []
token_seq = []
attr_seq_list = []
for line in f:
line = self.normalize_input_line(line)
if len(line) == 0:
if len(token_seq) > 0:
org_token_seqs.append(org_token_seq)
org_token_seq = []
token_seqs.append(token_seq)
token_seq = []
if num_attrs > 0:
if self.attr_chunking_flags[0]:
org_attr_seq = [attr for attr in data_loader.get_labelseq_BIOES(attr_seq)]
org_attr_seqs.append(org_attr_seq)
attr_seq = [get_attr_id(attr) for attr in org_attr_seq]
attr_seqs.append(attr_seq)
org_attr_seq = []
attr_seq = []
ins_cnt += 1
if ins_cnt % constants.NUM_FOR_REPORTING == 0:
print('Read', ins_cnt, 'sentences', file=sys.stderr)
continue
elif line[0] == constants.COMMENT_SYM:
continue
array = line.split(attr_delim)
org_token = array[word_clm]
org_token_seq.append(org_token)
token_seq.append(get_unigram_id(self.preprocess_token(org_token)))
attrs = [None] * max(num_attrs, 1)
if num_attrs > 0:
attr = self.preprocess_attribute(
array[self.attr_indexes[0]], self.attr_depths[0], self.attr_target_labelsets[0])
org_attr_seq.append(attr)
# register last sentenece
if len(token_seq) > 0:
org_token_seqs.append(org_token_seq)
token_seqs.append(token_seq)
if num_attrs > 0:
if self.attr_chunking_flags[0]:
org_attr_seq = [attr for attr in data_loader.get_labelseq_BIOES(attr_seq)]
org_attr_seqs.append(org_attr_seq)
attr_seq = [get_attr_id(attr) for attr in org_attr_seq]
attr_seqs.append(attr_seq)
inputs = [token_seqs, None]
outputs = []
outputs.append(attr_seqs if num_attrs > 0 else None)
orgdata = [org_token_seqs]
orgdata.append(org_attr_seqs if num_attrs > 0 else None)
return RestorableData(inputs, outputs, orgdata=orgdata)
def load_decode_data_SL(self, path, dic):
attr_delim = self.attr_delim if self.attr_delim else constants.SL_ATTR_DELIM
num_attrs = len(self.attr_indexes)
word_clm = self.token_index
get_unigram_id = dic.tables[constants.UNIGRAM].get_id
get_attr_id = dic.tables[constants.ATTR_LABEL(0)].get_id if num_attrs > 0 else None
org_token_seqs = []
org_attr_seqs = [] # second or later attribute is ignored
token_seqs = []
attr_seqs = []
ins_cnt = 0
with open(path) as f:
for line in f:
line = self.normalize_input_line(line)
if len(line) <= 1:
continue
elif line[0] == constants.COMMENT_SYM:
continue
org_arr = line.split(constants.SL_TOKEN_DELIM)
org_token_seq = [elem.split(attr_delim)[word_clm] for elem in org_arr]
org_token_seqs.append(org_token_seq)
token_seq = [get_unigram_id(self.preprocess_token(token)) for token in org_token_seq]
token_seqs.append(token_seq)
if num_attrs > 0:
org_token_seq = [elem.split(attr_delim)[0] for elem in org_arr]
org_attr_seq = [
self.preprocess_attribute(
elem.split(attr_delim)[self.attr_indexes[0]],
self.attr_depths[0], self.attr_target_labelsets[0])
for elem in org_arr]
org_attr_seqs.append(org_attr_seq)
attr_seq = [get_attr_id(attr) for attr in org_attr_seq]
attr_seqs.append(attr_seq)
ins_cnt += 1
if ins_cnt % constants.NUM_FOR_REPORTING == 0:
print('Read', ins_cnt, 'sentences', file=sys.stderr)
inputs = [token_seqs, None]
outputs = []
outputs.append(attr_seqs if num_attrs > 0 else None)
orgdata = [org_token_seqs]
orgdata.append(org_attr_seqs if num_attrs > 0 else None)
return RestorableData(inputs, outputs, orgdata=orgdata)
def init_dictionary(num_attrs=0):
dic = dictionary.Dictionary()
# unigram
dic.create_table(constants.UNIGRAM)
dic.tables[constants.UNIGRAM].set_unk(constants.UNK_SYMBOL)
# semantic label
dic.create_table(constants_sematt.SEM_LABEL)
# attributes
for i in range(num_attrs):
dic.create_table(constants.ATTR_LABEL(i))
# dic.tables[constants.ATTR_LABEL(i)].set_unk(constants.UNK_SYMBOL)
return dic
| 37.760933
| 111
| 0.555435
|
4a101a5c8c176b7432f3b5f67b62be77493ab7c2
| 1,496
|
py
|
Python
|
preprocessing/convertcsv2geojson.py
|
ellyrath/MIDSVizFinalProject
|
6c07f819b558e490b8449535fea56561f30f272e
|
[
"MIT"
] | null | null | null |
preprocessing/convertcsv2geojson.py
|
ellyrath/MIDSVizFinalProject
|
6c07f819b558e490b8449535fea56561f30f272e
|
[
"MIT"
] | null | null | null |
preprocessing/convertcsv2geojson.py
|
ellyrath/MIDSVizFinalProject
|
6c07f819b558e490b8449535fea56561f30f272e
|
[
"MIT"
] | null | null | null |
import csv, json
import sys
from geojson import Feature, FeatureCollection, Point
features = []
with open(sys.argv[1]) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
rownum = 0
for line_ in reader:
if rownum == 0:
rownum = 1
continue
latitude = line_[4]
longitude = line_[5]
temp = line_[29]
prec = line_[30]
street = line_[7]
xstreet = line_[8]
numinjured = line_[10]
numkilled = line_[11]
contributingfactor = line_[18]
vehicle_type = line_[24]
incidentDate = line_[0]
incidentTime = line_[1]
latitude, longitude = map(float, (latitude, longitude))
features.append(
Feature(
geometry = Point((longitude, latitude)),
properties = {
'temp': temp,
'prec': prec,
'street': street,
'xstreet': xstreet,
'numinjured': numinjured,
'numkilled': numkilled,
'contributingfactor': contributingfactor,
'vehicletype': vehicle_type,
'incidentDate': incidentDate,
'incidentTime': incidentTime
}
)
)
collection = FeatureCollection(features)
print type(collection)
with open(sys.argv[2],"w") as f:
print "over here"
f.write('%s' % collection)
| 29.92
| 63
| 0.509358
|
4a101b95a9f9b5b18ef9add61ed9b2d50a990060
| 288
|
py
|
Python
|
app/manage.py
|
giordan83/dockdj
|
8d57057b89cf2414e18e6cda733af944e32dbbe7
|
[
"MIT"
] | 64
|
2015-10-04T02:54:06.000Z
|
2021-03-30T04:02:47.000Z
|
app/manage.py
|
kodani/elasticdock
|
a413be1075a0b3c5ac77bb2686b97ac39ef5cb1d
|
[
"MIT"
] | 7
|
2015-10-05T15:11:34.000Z
|
2017-10-30T03:44:03.000Z
|
app/manage.py
|
kodani/elasticdock
|
a413be1075a0b3c5ac77bb2686b97ac39ef5cb1d
|
[
"MIT"
] | 15
|
2015-11-13T21:02:48.000Z
|
2019-10-01T08:06:01.000Z
|
#!/usr/bin/env python
"""Django's command line utility."""
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.153846
| 71
| 0.756944
|
4a101bb74cf1b1af872e58af10edf0946b8d8d64
| 2,064
|
py
|
Python
|
lib/constants.py
|
sebbASF/steve
|
14959c1861e421b642e08463c6ae2240ef3d39a4
|
[
"Apache-2.0"
] | 12
|
2016-02-06T08:59:56.000Z
|
2022-03-06T17:25:47.000Z
|
lib/constants.py
|
sebbASF/steve
|
14959c1861e421b642e08463c6ae2240ef3d39a4
|
[
"Apache-2.0"
] | 1
|
2021-11-02T12:01:56.000Z
|
2021-11-02T12:01:56.000Z
|
lib/constants.py
|
isabella232/steve-1
|
a642241836919199e590cd1b1c2a2f1560d4898a
|
[
"Apache-2.0"
] | 9
|
2016-03-08T16:17:44.000Z
|
2022-02-25T00:11:43.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
VOTE_TYPES = (
)
DB_TYPES = (
)
def appendVote(*types):
""" Append a new type of voting to the list"""
global VOTE_TYPES
for t in types:
found = False
for v in VOTE_TYPES:
if v['key'] == t['key']:
found = True
break
if not found:
VOTE_TYPES += (t,)
def appendBackend(t, c):
"""Append a new database backend"""
global DB_TYPES
found = False
for b in DB_TYPES:
if b.get('id') == t:
found = True
break
if not found:
DB_TYPES += ( {
'id': t,
'constructor': c
},)
def initBackend(config):
# Set up DB backend
backend = None
if config.has_option("database", "disabled") and config.get("database", "disabled") == "true":
return
dbtype = config.get("database", "dbsys")
for b in DB_TYPES:
if b.get('id') == dbtype:
backend = b['constructor'](config)
break
if not backend:
raise Exception("Unknown database backend: %s" % dbtype)
return backend
# For vote types with N number of seats/spots, this value denotes
# the max number of useable types to display via the API
MAX_NUM = 10
| 28.666667
| 98
| 0.622093
|
4a101c843b256d19167e87920a37c193c70dc85e
| 1,135
|
py
|
Python
|
mlmodels/model_tf/misc/tf_serving/16.celery-hadoop-flask-text-classification/classification.py
|
gitter-badger/mlmodels
|
f08cc9b6ec202d4ad25ecdda2f44487da387569d
|
[
"MIT"
] | 1
|
2022-03-11T07:57:48.000Z
|
2022-03-11T07:57:48.000Z
|
mlmodels/model_tf/misc/tf_serving/16.celery-hadoop-flask-text-classification/classification.py
|
whitetiger1002/mlmodels
|
f70f1da7434e8855eed50adc67b49cc169f2ea24
|
[
"MIT"
] | null | null | null |
mlmodels/model_tf/misc/tf_serving/16.celery-hadoop-flask-text-classification/classification.py
|
whitetiger1002/mlmodels
|
f70f1da7434e8855eed50adc67b49cc169f2ea24
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import json
import sys
import numpy as np
import tensorflow as tf
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
g = load_graph("frozen_model.pb")
label = ["negative", "positive"]
X = g.get_tensor_by_name("import/Placeholder:0")
Y = g.get_tensor_by_name("import/logits:0")
sess = tf.InteractiveSession(graph=g)
maxlen = 50
UNK = 3
with open("dictionary-test.json", "r") as fopen:
dic = json.load(fopen)
for line in sys.stdin:
sentences = list(filter(None, line.split("\n")))
x = np.zeros((len(sentences), maxlen))
for i, sentence in enumerate(sentences):
for no, k in enumerate(sentence.split()[:maxlen][::-1]):
val = dic[k] if k in dic else UNK
x[i, -1 - no] = val
indices = np.argmax(sess.run(Y, feed_dict={X: x}), axis=1)
for no, index in enumerate(indices):
print("%s: %s" % (sentences[no], label[index]))
| 27.02381
| 64
| 0.651101
|
4a101caac4555d82c9d858f22bdc290f0174718d
| 1,968
|
py
|
Python
|
castle-dragonsnax.py
|
Arihant25/beginner-python-projects
|
43c6489b6973522246073f2187a682487f1684c1
|
[
"Unlicense"
] | 1
|
2020-06-11T06:10:08.000Z
|
2020-06-11T06:10:08.000Z
|
castle-dragonsnax.py
|
Arihant25/beginner-python-projects
|
43c6489b6973522246073f2187a682487f1684c1
|
[
"Unlicense"
] | 1
|
2021-07-16T03:50:09.000Z
|
2021-07-16T04:00:35.000Z
|
castle-dragonsnax.py
|
Arihant25/beginner-python-projects
|
43c6489b6973522246073f2187a682487f1684c1
|
[
"Unlicense"
] | 1
|
2021-07-16T03:28:39.000Z
|
2021-07-16T03:28:39.000Z
|
import random
exitChoice = "Nothing"
while exitChoice != "EXIT":
print("You are in a dark room in a mysterious castle.")
print("In front of you are four doors. You must choose one.")
playerChoice = input("Choose 1, 2, 3 or 4...")
if playerChoice == "1":
print("You find a room full of treasure. You are rich!")
print("GAME OVER, YOU WIN")
elif playerChoice == "2":
print("The door opens and an angry ogre hits you with his club.")
print("GAME OVER, YOU LOSE.")
elif playerChoice == "3":
print("You go into the room and find a sleeping dragon.")
print("You can either:")
print("1) Try to steal some of the dragon's gold.")
print("2) Try to sneak around the dragon to the exit.")
dragonChoice = input("Type 1 or 2...")
if dragonChoice == "1":
print("The dragon wakes up and eats you. You are delicious.")
print("GAME OVER, YOU LOSE.")
elif dragonChoice == "2":
print("You sneak around the dragon and escape from the castle, blinking in the sunshine.")
print("GAME OVER, YOU WIN")
else:
print("Sorry, you didn't enter 1 or 2!")
elif playerChoice == "4":
print("You enter a room with a sphinx.")
print("It asks you to guess what number it is thinking of, between 1 and 10.")
number = int(input("What number do you choose?"))
if number == random.randint(1, 10):
print("The sphinx hisses in disappointment. You guessed correctly.")
print("It must let you go free.")
print("GAME OVER, YOU WIN")
else:
print("The sphinx tells you that your guess is incorrect.")
print("You are now its prisoner forever.")
print("GAME OVER, YOU LOSE.")
else:
print("Sorry, you didn't enter 1, 2, 3 or 4!")
exitChoice = input("Press return to play again, or type EXIT to leave.")
| 45.767442
| 102
| 0.594512
|
4a101e16cf4938fb1d8c9520b0bae4a4af1ef4ce
| 86
|
py
|
Python
|
chmod_monkey/__version__.py
|
Toilal/python-chmod-monkey
|
66cc5c441152845f1d28a8a10357d94da4815381
|
[
"MIT"
] | 4
|
2020-05-12T08:05:32.000Z
|
2022-03-14T02:41:19.000Z
|
chmod_monkey/__version__.py
|
Toilal/python-chmod-monkey
|
66cc5c441152845f1d28a8a10357d94da4815381
|
[
"MIT"
] | null | null | null |
chmod_monkey/__version__.py
|
Toilal/python-chmod-monkey
|
66cc5c441152845f1d28a8a10357d94da4815381
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# pragma: no cover
"""
Version
"""
__version__ = '1.1.2.dev0'
| 12.285714
| 26
| 0.55814
|
4a101e82b21156acf5cc712672e7ef78648ce25f
| 1,419
|
py
|
Python
|
kas/__version__.py
|
texierp/kas
|
71b9c43472acefd7b516899e1da78a5c6853681b
|
[
"MIT"
] | null | null | null |
kas/__version__.py
|
texierp/kas
|
71b9c43472acefd7b516899e1da78a5c6853681b
|
[
"MIT"
] | null | null | null |
kas/__version__.py
|
texierp/kas
|
71b9c43472acefd7b516899e1da78a5c6853681b
|
[
"MIT"
] | null | null | null |
# kas - setup tool for bitbake based projects
#
# Copyright (c) Siemens AG, 2017-2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module contains the version of kas.
"""
__license__ = 'MIT'
__copyright__ = 'Copyright (c) Siemens AG, 2017-2018'
__version__ = '2.0'
# Please update docs/format-changelog.rst when changing the file version.
__file_version__ = 8
__compatible_file_version__ = 1
| 43
| 79
| 0.770261
|
4a101e8a2e4d722cd7d080671281ae305a97f064
| 1,312
|
py
|
Python
|
help/app/uploader.py
|
relarizky/flask-pre-order
|
d336c179414892a8fad4bdbe63c8cb5449921729
|
[
"MIT"
] | null | null | null |
help/app/uploader.py
|
relarizky/flask-pre-order
|
d336c179414892a8fad4bdbe63c8cb5449921729
|
[
"MIT"
] | null | null | null |
help/app/uploader.py
|
relarizky/flask-pre-order
|
d336c179414892a8fad4bdbe63c8cb5449921729
|
[
"MIT"
] | null | null | null |
# Author : Relarizky
# Github : https://github.com/relarizky
# File Name : help/app/uploader.py
# Last Modified : 01/27/21, 21:58 PM
# Copyright © Relarizky 2021
from typing import Union
from help.hash import create_md5
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
class FileUploader:
"""
contains method as helper for uploading a file
"""
@staticmethod
def filter_extension(file_name: str) -> bool:
"""
filter file name extension
"""
ext = file_name.split(".")[-1].lower()
return ext in ("jpg", "png", "jpeg")
@staticmethod
def create_file_name(file_name: str) -> str:
"""
create hash file name
"""
file_name, file_ext = file_name.split(".")
file_name = create_md5(file_name)
return file_name + "." + file_ext.lower()
def upload_file(self, file: FileStorage, dir: str) -> Union[None, str]:
"""
upload file
"""
file_name = file.filename
if file_name.__len__() == 0:
return
if not self.filter_extension(file_name):
return
file_name = secure_filename(self.create_file_name(file_name))
file.save(dir + file_name)
return file_name
| 23.017544
| 75
| 0.610518
|
4a101f9234aa1910415a6bc89ceb6f618802bf87
| 20,683
|
py
|
Python
|
moto/ec2/utils.py
|
Preskton/moto
|
c9c30b82867294030833cef292db167955bc8240
|
[
"Apache-2.0"
] | 1
|
2021-02-24T05:48:54.000Z
|
2021-02-24T05:48:54.000Z
|
moto/ec2/utils.py
|
Preskton/moto
|
c9c30b82867294030833cef292db167955bc8240
|
[
"Apache-2.0"
] | null | null | null |
moto/ec2/utils.py
|
Preskton/moto
|
c9c30b82867294030833cef292db167955bc8240
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import base64
import hashlib
import fnmatch
import random
import re
import six
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from moto.core import ACCOUNT_ID
from moto.iam import iam_backends
EC2_RESOURCE_TO_PREFIX = {
"customer-gateway": "cgw",
"dhcp-options": "dopt",
"flow-logs": "fl",
"image": "ami",
"instance": "i",
"internet-gateway": "igw",
"launch-template": "lt",
"nat-gateway": "nat",
"network-acl": "acl",
"network-acl-subnet-assoc": "aclassoc",
"network-interface": "eni",
"network-interface-attachment": "eni-attach",
"reserved-instance": "uuid4",
"route-table": "rtb",
"route-table-association": "rtbassoc",
"security-group": "sg",
"snapshot": "snap",
"spot-instance-request": "sir",
"spot-fleet-request": "sfr",
"subnet": "subnet",
"reservation": "r",
"volume": "vol",
"vpc": "vpc",
"vpc-cidr-association-id": "vpc-cidr-assoc",
"vpc-elastic-ip": "eipalloc",
"vpc-elastic-ip-association": "eipassoc",
"vpc-peering-connection": "pcx",
"vpn-connection": "vpn",
"vpn-gateway": "vgw",
"iam-instance-profile-association": "iip-assoc",
}
EC2_PREFIX_TO_RESOURCE = dict((v, k) for (k, v) in EC2_RESOURCE_TO_PREFIX.items())
def random_resource_id(size=8):
chars = list(range(10)) + ["a", "b", "c", "d", "e", "f"]
resource_id = "".join(six.text_type(random.choice(chars)) for _ in range(size))
return resource_id
def random_id(prefix="", size=8):
return "{0}-{1}".format(prefix, random_resource_id(size))
def random_ami_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["image"])
def random_instance_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["instance"], size=17)
def random_reservation_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["reservation"])
def random_security_group_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["security-group"])
def random_flow_log_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["flow-logs"])
def random_snapshot_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["snapshot"])
def random_spot_request_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["spot-instance-request"])
def random_spot_fleet_request_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["spot-fleet-request"])
def random_subnet_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["subnet"])
def random_subnet_association_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["route-table-association"])
def random_network_acl_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["network-acl"])
def random_network_acl_subnet_association_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["network-acl-subnet-assoc"])
def random_vpn_gateway_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["vpn-gateway"])
def random_vpn_connection_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["vpn-connection"])
def random_customer_gateway_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["customer-gateway"])
def random_volume_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["volume"])
def random_vpc_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["vpc"])
def random_vpc_cidr_association_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["vpc-cidr-association-id"])
def random_vpc_peering_connection_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["vpc-peering-connection"])
def random_eip_association_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["vpc-elastic-ip-association"])
def random_internet_gateway_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["internet-gateway"])
def random_route_table_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["route-table"])
def random_eip_allocation_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["vpc-elastic-ip"])
def random_dhcp_option_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["dhcp-options"])
def random_eni_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["network-interface"])
def random_eni_attach_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["network-interface-attachment"])
def random_nat_gateway_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["nat-gateway"], size=17)
def random_launch_template_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["launch-template"], size=17)
def random_iam_instance_profile_association_id():
return random_id(prefix=EC2_RESOURCE_TO_PREFIX["iam-instance-profile-association"])
def random_public_ip():
return "54.214.{0}.{1}".format(random.choice(range(255)), random.choice(range(255)))
def random_private_ip():
return "10.{0}.{1}.{2}".format(
random.choice(range(255)), random.choice(range(255)), random.choice(range(255))
)
def random_ip():
return "127.{0}.{1}.{2}".format(
random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)
)
def randor_ipv4_cidr():
return "10.0.{}.{}/16".format(random.randint(0, 255), random.randint(0, 255))
def random_ipv6_cidr():
return "2400:6500:{}:{}::/56".format(random_resource_id(4), random_resource_id(4))
def generate_route_id(route_table_id, cidr_block, ipv6_cidr_block=None):
if ipv6_cidr_block and not cidr_block:
cidr_block = ipv6_cidr_block
return "%s~%s" % (route_table_id, cidr_block)
def generate_vpc_end_point_id(vpc_id):
return "%s-%s" % ("vpce", vpc_id[4:])
def create_dns_entries(service_name, vpc_endpoint_id):
dns_entries = {}
dns_entries["dns_name"] = "{}-{}.{}".format(
vpc_endpoint_id, random_resource_id(8), service_name
)
dns_entries["hosted_zone_id"] = random_resource_id(13).upper()
return dns_entries
def split_route_id(route_id):
values = route_id.split("~")
return values[0], values[1]
def dhcp_configuration_from_querystring(querystring, option="DhcpConfiguration"):
"""
turn:
{u'AWSAccessKeyId': [u'the_key'],
u'Action': [u'CreateDhcpOptions'],
u'DhcpConfiguration.1.Key': [u'domain-name'],
u'DhcpConfiguration.1.Value.1': [u'example.com'],
u'DhcpConfiguration.2.Key': [u'domain-name-servers'],
u'DhcpConfiguration.2.Value.1': [u'10.0.0.6'],
u'DhcpConfiguration.2.Value.2': [u'10.0.0.7'],
u'Signature': [u'uUMHYOoLM6r+sT4fhYjdNT6MHw22Wj1mafUpe0P0bY4='],
u'SignatureMethod': [u'HmacSHA256'],
u'SignatureVersion': [u'2'],
u'Timestamp': [u'2014-03-18T21:54:01Z'],
u'Version': [u'2013-10-15']}
into:
{u'domain-name': [u'example.com'], u'domain-name-servers': [u'10.0.0.6', u'10.0.0.7']}
"""
key_needle = re.compile("{0}.[0-9]+.Key".format(option), re.UNICODE)
response_values = {}
for key, value in querystring.items():
if key_needle.match(key):
values = []
key_index = key.split(".")[1]
value_index = 1
while True:
value_key = "{0}.{1}.Value.{2}".format(option, key_index, value_index)
if value_key in querystring:
values.extend(querystring[value_key])
else:
break
value_index += 1
response_values[value[0]] = values
return response_values
def filters_from_querystring(querystring_dict):
response_values = {}
last_tag_key = None
for key, value in sorted(querystring_dict.items()):
match = re.search(r"Filter.(\d).Name", key)
if match:
filter_index = match.groups()[0]
value_prefix = "Filter.{0}.Value".format(filter_index)
filter_values = [
filter_value[0]
for filter_key, filter_value in querystring_dict.items()
if filter_key.startswith(value_prefix)
]
if value[0] == "tag-key":
last_tag_key = "tag:" + filter_values[0]
elif last_tag_key and value[0] == "tag-value":
response_values[last_tag_key] = filter_values
response_values[value[0]] = filter_values
return response_values
def dict_from_querystring(parameter, querystring_dict):
use_dict = {}
for key, value in querystring_dict.items():
match = re.search(r"{0}.(\d).(\w+)".format(parameter), key)
if match:
use_dict_index = match.groups()[0]
use_dict_element_property = match.groups()[1]
if not use_dict.get(use_dict_index):
use_dict[use_dict_index] = {}
use_dict[use_dict_index][use_dict_element_property] = value[0]
return use_dict
def get_object_value(obj, attr):
keys = attr.split(".")
val = obj
for key in keys:
if key == "owner_id":
return ACCOUNT_ID
elif hasattr(val, key):
val = getattr(val, key)
elif isinstance(val, dict):
val = val[key]
elif isinstance(val, list):
for item in val:
item_val = get_object_value(item, key)
if item_val:
return item_val
else:
return None
return val
def is_tag_filter(filter_name):
return (
filter_name.startswith("tag:")
or filter_name.startswith("tag-value")
or filter_name.startswith("tag-key")
)
def get_obj_tag(obj, filter_name):
tag_name = filter_name.replace("tag:", "", 1)
tags = dict((tag["key"], tag["value"]) for tag in obj.get_tags())
return tags.get(tag_name)
def get_obj_tag_names(obj):
tags = set((tag["key"] for tag in obj.get_tags()))
return tags
def get_obj_tag_values(obj):
tags = set((tag["value"] for tag in obj.get_tags()))
return tags
def tag_filter_matches(obj, filter_name, filter_values):
regex_filters = [re.compile(simple_aws_filter_to_re(f)) for f in filter_values]
if filter_name == "tag-key":
tag_values = get_obj_tag_names(obj)
elif filter_name == "tag-value":
tag_values = get_obj_tag_values(obj)
elif filter_name.startswith("tag:"):
tag_values = get_obj_tag_values(obj)
else:
tag_values = [get_obj_tag(obj, filter_name) or ""]
for tag_value in tag_values:
if any(regex.match(tag_value) for regex in regex_filters):
return True
return False
filter_dict_attribute_mapping = {
"instance-state-name": "state",
"instance-id": "id",
"state-reason-code": "_state_reason.code",
"source-dest-check": "source_dest_check",
"vpc-id": "vpc_id",
"group-id": "security_groups.id",
"instance.group-id": "security_groups.id",
"instance.group-name": "security_groups.name",
"instance-type": "instance_type",
"private-ip-address": "private_ip",
"ip-address": "public_ip",
"availability-zone": "placement",
"architecture": "architecture",
"image-id": "image_id",
"network-interface.private-dns-name": "private_dns",
"private-dns-name": "private_dns",
"owner-id": "owner_id",
}
def passes_filter_dict(instance, filter_dict):
for filter_name, filter_values in filter_dict.items():
if filter_name in filter_dict_attribute_mapping:
instance_attr = filter_dict_attribute_mapping[filter_name]
instance_value = get_object_value(instance, instance_attr)
if not instance_value_in_filter_values(instance_value, filter_values):
return False
elif is_tag_filter(filter_name):
if not tag_filter_matches(instance, filter_name, filter_values):
return False
else:
raise NotImplementedError(
"Filter dicts have not been implemented in Moto for '%s' yet. Feel free to open an issue at https://github.com/spulec/moto/issues"
% filter_name
)
return True
def instance_value_in_filter_values(instance_value, filter_values):
if isinstance(instance_value, list):
if not set(filter_values).intersection(set(instance_value)):
return False
elif instance_value not in filter_values:
return False
return True
def filter_reservations(reservations, filter_dict):
result = []
for reservation in reservations:
new_instances = []
for instance in reservation.instances:
if passes_filter_dict(instance, filter_dict):
new_instances.append(instance)
if new_instances:
reservation.instances = new_instances
result.append(reservation)
return result
filter_dict_igw_mapping = {
"attachment.vpc-id": "vpc.id",
"attachment.state": "attachment_state",
"internet-gateway-id": "id",
}
def passes_igw_filter_dict(igw, filter_dict):
for filter_name, filter_values in filter_dict.items():
if filter_name in filter_dict_igw_mapping:
igw_attr = filter_dict_igw_mapping[filter_name]
if get_object_value(igw, igw_attr) not in filter_values:
return False
elif is_tag_filter(filter_name):
if not tag_filter_matches(igw, filter_name, filter_values):
return False
else:
raise NotImplementedError(
"Internet Gateway filter dicts have not been implemented in Moto for '%s' yet. Feel free to open an issue at https://github.com/spulec/moto/issues",
filter_name,
)
return True
def filter_internet_gateways(igws, filter_dict):
result = []
for igw in igws:
if passes_igw_filter_dict(igw, filter_dict):
result.append(igw)
return result
def is_filter_matching(obj, filter, filter_value):
value = obj.get_filter_value(filter)
if not filter_value:
return False
if isinstance(value, six.string_types):
if not isinstance(filter_value, list):
filter_value = [filter_value]
if any(fnmatch.fnmatch(value, pattern) for pattern in filter_value):
return True
return False
try:
value = set(value)
return (value and value.issubset(filter_value)) or value.issuperset(
filter_value
)
except TypeError:
return value in filter_value
def generic_filter(filters, objects):
if filters:
for (_filter, _filter_value) in filters.items():
objects = [
obj
for obj in objects
if is_filter_matching(obj, _filter, _filter_value)
]
return objects
def simple_aws_filter_to_re(filter_string):
tmp_filter = filter_string.replace(r"\?", "[?]")
tmp_filter = tmp_filter.replace(r"\*", "[*]")
tmp_filter = fnmatch.translate(tmp_filter)
return tmp_filter
def random_key_pair():
private_key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend()
)
private_key_material = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
public_key_fingerprint = rsa_public_key_fingerprint(private_key.public_key())
return {
"fingerprint": public_key_fingerprint,
"material": private_key_material.decode("ascii"),
}
def get_prefix(resource_id):
resource_id_prefix, separator, after = resource_id.partition("-")
if resource_id_prefix == EC2_RESOURCE_TO_PREFIX["network-interface"]:
if after.startswith("attach"):
resource_id_prefix = EC2_RESOURCE_TO_PREFIX["network-interface-attachment"]
if resource_id_prefix not in EC2_RESOURCE_TO_PREFIX.values():
uuid4hex = re.compile(r"[0-9a-f]{12}4[0-9a-f]{3}[89ab][0-9a-f]{15}\Z", re.I)
if uuid4hex.match(resource_id) is not None:
resource_id_prefix = EC2_RESOURCE_TO_PREFIX["reserved-instance"]
else:
return None
return resource_id_prefix
def is_valid_resource_id(resource_id):
valid_prefixes = EC2_RESOURCE_TO_PREFIX.values()
resource_id_prefix = get_prefix(resource_id)
if resource_id_prefix not in valid_prefixes:
return False
resource_id_pattern = resource_id_prefix + "-[0-9a-f]{8}"
resource_pattern_re = re.compile(resource_id_pattern)
return resource_pattern_re.match(resource_id) is not None
def is_valid_cidr(cird):
cidr_pattern = r"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(\d|[1-2]\d|3[0-2]))$"
cidr_pattern_re = re.compile(cidr_pattern)
return cidr_pattern_re.match(cird) is not None
def generate_instance_identity_document(instance):
"""
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
A JSON file that describes an instance. Usually retrieved by URL:
http://169.254.169.254/latest/dynamic/instance-identity/document
Here we just fill a dictionary that represents the document
Typically, this document is used by the amazon-ecs-agent when registering a
new ContainerInstance
"""
document = {
"devPayProductCodes": None,
"availabilityZone": instance.placement["AvailabilityZone"],
"privateIp": instance.private_ip_address,
"version": "2010-8-31",
"region": instance.placement["AvailabilityZone"][:-1],
"instanceId": instance.id,
"billingProducts": None,
"instanceType": instance.instance_type,
"accountId": "012345678910",
"pendingTime": "2015-11-19T16:32:11Z",
"imageId": instance.image_id,
"kernelId": instance.kernel_id,
"ramdiskId": instance.ramdisk_id,
"architecture": instance.architecture,
}
return document
def rsa_public_key_parse(key_material):
# These imports take ~.5s; let's keep them local
import sshpubkeys.exceptions
from sshpubkeys.keys import SSHKey
try:
if not isinstance(key_material, six.binary_type):
key_material = key_material.encode("ascii")
decoded_key = base64.b64decode(key_material).decode("ascii")
public_key = SSHKey(decoded_key)
except (sshpubkeys.exceptions.InvalidKeyException, UnicodeDecodeError):
raise ValueError("bad key")
if not public_key.rsa:
raise ValueError("bad key")
return public_key.rsa
def rsa_public_key_fingerprint(rsa_public_key):
key_data = rsa_public_key.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
fingerprint_hex = hashlib.md5(key_data).hexdigest()
fingerprint = re.sub(r"([a-f0-9]{2})(?!$)", r"\1:", fingerprint_hex)
return fingerprint
def filter_iam_instance_profile_associations(iam_instance_associations, filter_dict):
if not filter_dict:
return iam_instance_associations
result = []
for iam_instance_association in iam_instance_associations:
filter_passed = True
if filter_dict.get("instance-id"):
if (
iam_instance_association.instance.id
not in filter_dict.get("instance-id").values()
):
filter_passed = False
if filter_dict.get("state"):
if iam_instance_association.state not in filter_dict.get("state").values():
filter_passed = False
if filter_passed:
result.append(iam_instance_association)
return result
def filter_iam_instance_profiles(iam_instance_profile_arn, iam_instance_profile_name):
instance_profile = None
instance_profile_by_name = None
instance_profile_by_arn = None
if iam_instance_profile_name:
instance_profile_by_name = iam_backends["global"].get_instance_profile(
iam_instance_profile_name
)
instance_profile = instance_profile_by_name
if iam_instance_profile_arn:
instance_profile_by_arn = iam_backends["global"].get_instance_profile_by_arn(
iam_instance_profile_arn
)
instance_profile = instance_profile_by_arn
# We would prefer instance profile that we found by arn
if iam_instance_profile_arn and iam_instance_profile_name:
if instance_profile_by_name == instance_profile_by_arn:
instance_profile = instance_profile_by_arn
else:
instance_profile = None
return instance_profile
| 31.82
| 164
| 0.672436
|
4a101fa05af9993f3fee44299a65e9e40e288715
| 422
|
py
|
Python
|
ccpy/common.py
|
kindkaktus/CcPy
|
8df2e445b254cbb456e379950c495ef55f984b72
|
[
"BSD-3-Clause"
] | null | null | null |
ccpy/common.py
|
kindkaktus/CcPy
|
8df2e445b254cbb456e379950c495ef55f984b72
|
[
"BSD-3-Clause"
] | 1
|
2015-05-19T05:50:41.000Z
|
2015-06-01T12:31:52.000Z
|
ccpy/common.py
|
kindkaktus/CcPy
|
8df2e445b254cbb456e379950c495ef55f984b72
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Andrei Korostelev <andrei at korostelev dot net>
#
# Before using this product in any way please read the license agreement.
# If you do not agree to the terms in this agreement you are not allowed
# to use this product or parts of it. You can read this license in the
# file named LICENSE.
#
"""
CcPy global variables
"""
ProductName = 'CcPy'
ProductVersion = '1.0.5'
DaemonName = 'ccpyd'
LoggerName = 'ccpyd'
| 23.444444
| 74
| 0.722749
|
4a10203d22b78d0dcea4103a9438f47814f260d2
| 1,884
|
py
|
Python
|
dewiki_functions.py
|
tonypdmtr/PlainTextWikipedia
|
7e6ec7ddaf2e9c7f6daaf0807fc611e1b86fedcf
|
[
"MIT"
] | 199
|
2020-11-24T21:47:06.000Z
|
2022-02-01T21:07:50.000Z
|
dewiki_functions.py
|
tonypdmtr/PlainTextWikipedia
|
7e6ec7ddaf2e9c7f6daaf0807fc611e1b86fedcf
|
[
"MIT"
] | 6
|
2021-04-06T17:27:18.000Z
|
2021-04-21T16:39:38.000Z
|
dewiki_functions.py
|
tonypdmtr/PlainTextWikipedia
|
7e6ec7ddaf2e9c7f6daaf0807fc611e1b86fedcf
|
[
"MIT"
] | 17
|
2020-11-24T21:47:03.000Z
|
2021-09-14T01:08:19.000Z
|
from threading import Thread
import json
import re
from html2text import html2text as htt
import wikitextparser as wtp
def dewiki(text):
text = wtp.parse(text).plain_text() # wiki to plaintext
text = htt(text) # remove any HTML
text = text.replace('\\n',' ') # replace newlines
text = re.sub('\s+', ' ', text) # replace excess whitespace
return text
def analyze_chunk(text):
try:
if '<redirect title="' in text: # this is not the main article
return None
if '(disambiguation)' in text: # this is not an article
return None
else:
title = text.split('<title>')[1].split('</title>')[0]
title = htt(title)
if ':' in title: # most articles with : in them are not articles we care about
return None
serial = text.split('<id>')[1].split('</id>')[0]
content = text.split('</text')[0].split('<text')[1].split('>', maxsplit=1)[1]
content = dewiki(content)
return {'title': title.strip(), 'text': content.strip(), 'id': serial.strip()}
except Exception as oops:
print(oops)
return None
def save_article(article, savedir):
doc = analyze_chunk(article)
if doc:
print('SAVING:', doc['title'])
filename = doc['id'] + '.json'
with open(savedir + filename, 'w', encoding='utf-8') as outfile:
json.dump(doc, outfile, sort_keys=True, indent=1, ensure_ascii=False)
def process_file_text(filename, savedir):
article = ''
with open(filename, 'r', encoding='utf-8') as infile:
for line in infile:
if '<page>' in line:
article = ''
elif '</page>' in line: # end of article
Thread(target=save_article, args=(article, savedir)).start()
else:
article += line
| 34.888889
| 91
| 0.570064
|
4a10222e0295891ea117c70d9a6eea9c9be4eb5b
| 796
|
py
|
Python
|
manage.py
|
PuzzlehuntAtoZany/online-puzzle-hunt
|
9f91e6b211a7ec83a08f7cf9810cc0667d452a93
|
[
"MIT"
] | null | null | null |
manage.py
|
PuzzlehuntAtoZany/online-puzzle-hunt
|
9f91e6b211a7ec83a08f7cf9810cc0667d452a93
|
[
"MIT"
] | null | null | null |
manage.py
|
PuzzlehuntAtoZany/online-puzzle-hunt
|
9f91e6b211a7ec83a08f7cf9810cc0667d452a93
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gph.settings.dev')
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions.
try:
import django
except ImportError:
raise ImportError(
'Couldn\'t import Django. Are you sure it\'s installed and '
'available on your PYTHONPATH environment variable? Did you '
'forget to activate a virtual environment?'
)
raise
execute_from_command_line(sys.argv)
| 34.608696
| 77
| 0.638191
|
4a102331db860b40998406b10807103c8f9c30f8
| 591
|
py
|
Python
|
src/article/urls.py
|
metinonat/Django-Blog
|
d8e3f736bea36f56059bb1e6ad770ffb76a291a4
|
[
"MIT"
] | 1
|
2020-02-03T21:18:51.000Z
|
2020-02-03T21:18:51.000Z
|
src/article/urls.py
|
metinonat/Django-Blog
|
d8e3f736bea36f56059bb1e6ad770ffb76a291a4
|
[
"MIT"
] | null | null | null |
src/article/urls.py
|
metinonat/Django-Blog
|
d8e3f736bea36f56059bb1e6ad770ffb76a291a4
|
[
"MIT"
] | null | null | null |
from django.urls import path,include
from article.views import ArticleListView, ArticleCreateView, ArticleDetailView, ArticleUpdateView, ArticleDeleteView
urlpatterns = [
path('', ArticleListView.as_view(), name='articles'),
path('create', ArticleCreateView.as_view(), name='article_create'),
path('detail/<int:article_id>/', ArticleDetailView.as_view(), name = 'article_detail'),
path('detail/<int:article_id>/update', ArticleUpdateView.as_view(), name = 'article_update'),
path('detail/<int:article_id>/delete', ArticleDeleteView.as_view(), name = 'article_delete'),
]
| 59.1
| 117
| 0.751269
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.