max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
bin/asngen.py | kurtkeller/TA-asngen | 0 | 6622551 | #!/usr/bin/env python
from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators
import sys
import os
import ConfigParser
from StringIO import StringIO
from zipfile import ZipFile
import urllib2
import re
import socket
import struct
@Configuration(type='reporting')
class ASNGenCommand(GeneratingCommand):
def generate(self):
proxies = {'http': None, 'https': None}
maxmind = {'license_key': None}
try:
configparser = ConfigParser.ConfigParser()
# first try to read the defaults (in case we are in a cluster with deployed config)
configparser.read(os.path.join(os.getcwd(), '../default/asngen.conf'))
# then try to read the overrides
configparser.read(os.path.join(os.getcwd(), '../local/asngen.conf'))
if configparser.has_section('proxies'):
if configparser.has_option('proxies', 'https'):
if len(configparser.get('proxies', 'https')) > 0:
proxies['https'] = configparser.get('proxies', 'https')
if configparser.has_section('maxmind'):
if configparser.has_option('maxmind', 'license_key'):
if len(configparser.get('maxmind', 'license_key')) > 0:
maxmind['license_key'] = configparser.get('maxmind', 'license_key')
except:
raise Exception("Error reading configuration. Please check your local asngen.conf file.")
if proxies['https'] is not None:
proxy = urllib2.ProxyHandler(proxies)
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
if maxmind['license_key'] is None:
raise Exception("maxmind license_key is required")
try:
link = "https://download.maxmind.com/app/geoip_download" + "?"
link += "edition_id=GeoLite2-ASN-CSV" + "&"
link += "license_key=" + maxmind['license_key'] + "&"
link += "suffix=zip"
url = urllib2.urlopen(link)
except:
raise Exception("Please check app proxy settings and license_key.")
if url.getcode()==200:
try:
zipfile = ZipFile(StringIO(url.read()))
except:
raise Exception("Invalid zip file")
else:
raise Exception("Received response: " + url.getcode())
for name in zipfile.namelist():
entries = re.findall(r'^(\d+\.\d+\.\d+\.\d+)\/(\d+),(\d+),\"?([^\"\n]+)\"?', zipfile.open(name).read(), re.MULTILINE)
for line in entries:
yield {'ip': line[0] + "/" + line[1], 'asn': line[2], 'autonomous_system': line[3].decode('utf-8', 'ignore')}
dispatch(ASNGenCommand, sys.argv, sys.stdin, sys.stdout, __name__)
| #!/usr/bin/env python
from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators
import sys
import os
import ConfigParser
from StringIO import StringIO
from zipfile import ZipFile
import urllib2
import re
import socket
import struct
@Configuration(type='reporting')
class ASNGenCommand(GeneratingCommand):
def generate(self):
proxies = {'http': None, 'https': None}
maxmind = {'license_key': None}
try:
configparser = ConfigParser.ConfigParser()
# first try to read the defaults (in case we are in a cluster with deployed config)
configparser.read(os.path.join(os.getcwd(), '../default/asngen.conf'))
# then try to read the overrides
configparser.read(os.path.join(os.getcwd(), '../local/asngen.conf'))
if configparser.has_section('proxies'):
if configparser.has_option('proxies', 'https'):
if len(configparser.get('proxies', 'https')) > 0:
proxies['https'] = configparser.get('proxies', 'https')
if configparser.has_section('maxmind'):
if configparser.has_option('maxmind', 'license_key'):
if len(configparser.get('maxmind', 'license_key')) > 0:
maxmind['license_key'] = configparser.get('maxmind', 'license_key')
except:
raise Exception("Error reading configuration. Please check your local asngen.conf file.")
if proxies['https'] is not None:
proxy = urllib2.ProxyHandler(proxies)
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
if maxmind['license_key'] is None:
raise Exception("maxmind license_key is required")
try:
link = "https://download.maxmind.com/app/geoip_download" + "?"
link += "edition_id=GeoLite2-ASN-CSV" + "&"
link += "license_key=" + maxmind['license_key'] + "&"
link += "suffix=zip"
url = urllib2.urlopen(link)
except:
raise Exception("Please check app proxy settings and license_key.")
if url.getcode()==200:
try:
zipfile = ZipFile(StringIO(url.read()))
except:
raise Exception("Invalid zip file")
else:
raise Exception("Received response: " + url.getcode())
for name in zipfile.namelist():
entries = re.findall(r'^(\d+\.\d+\.\d+\.\d+)\/(\d+),(\d+),\"?([^\"\n]+)\"?', zipfile.open(name).read(), re.MULTILINE)
for line in entries:
yield {'ip': line[0] + "/" + line[1], 'asn': line[2], 'autonomous_system': line[3].decode('utf-8', 'ignore')}
dispatch(ASNGenCommand, sys.argv, sys.stdin, sys.stdout, __name__)
| en | 0.887568 | #!/usr/bin/env python # first try to read the defaults (in case we are in a cluster with deployed config) # then try to read the overrides | 2.415957 | 2 |
src/google_foobar/P007_binary_bunnies/solution_01_tests.py | lakshmikanth-tesla/ProgrammingProblems | 1 | 6622552 | import unittest
from src.google_foobar.P007_binary_bunnies.solution_01 import answer
class TestSolution(unittest.TestCase):
def testcase_001(self):
seq = [5, 9, 8, 2, 1]
expected = '6'
self.assertEqual(answer(seq), expected)
def testcase_002(self):
seq = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
expected = '1'
self.assertEqual(answer(seq), expected)
def testcase_003(self):
seq = [5]
expected = '1'
self.assertEqual(answer(seq), expected)
def testcase_004(self):
seq = [5, 9]
expected = '1'
self.assertEqual(answer(seq), expected)
def testcase_005(self):
seq = [5, 9, 8, 2, 1, 10, 3]
expected = '80'
self.assertEqual(answer(seq), expected)
def testcase_006(self):
seq = [10, 8, 15, 6, 9, 4, 5]
expected = '24'
self.assertEqual(answer(seq), expected)
def testcase_007(self):
seq = [12, 6, 19, 15, 5]
expected = '6'
self.assertEqual(answer(seq), expected)
def testcase_008(self):
seq = [44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64]
expected = '1'
self.assertEqual(answer(seq), expected)
if __name__ == '__main__':
unittest.main()
| import unittest
from src.google_foobar.P007_binary_bunnies.solution_01 import answer
class TestSolution(unittest.TestCase):
def testcase_001(self):
seq = [5, 9, 8, 2, 1]
expected = '6'
self.assertEqual(answer(seq), expected)
def testcase_002(self):
seq = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
expected = '1'
self.assertEqual(answer(seq), expected)
def testcase_003(self):
seq = [5]
expected = '1'
self.assertEqual(answer(seq), expected)
def testcase_004(self):
seq = [5, 9]
expected = '1'
self.assertEqual(answer(seq), expected)
def testcase_005(self):
seq = [5, 9, 8, 2, 1, 10, 3]
expected = '80'
self.assertEqual(answer(seq), expected)
def testcase_006(self):
seq = [10, 8, 15, 6, 9, 4, 5]
expected = '24'
self.assertEqual(answer(seq), expected)
def testcase_007(self):
seq = [12, 6, 19, 15, 5]
expected = '6'
self.assertEqual(answer(seq), expected)
def testcase_008(self):
seq = [44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64]
expected = '1'
self.assertEqual(answer(seq), expected)
if __name__ == '__main__':
unittest.main()
| none | 1 | 3.053319 | 3 | |
plugin_example/conversations/utils.py | maquinuz/flask-shop | 141 | 6622553 | <reponame>maquinuz/flask-shop
from .models import Conversation, Message
def get_message_count(user):
"""Returns the number of private messages of the given user.
:param user: The user object.
"""
return Conversation.query.filter(Conversation.user_id == user.id).count()
def get_unread_count(user):
"""Returns the unread message count for the given user.
:param user: The user object.
"""
return Conversation.query.filter(
Conversation.unread, Conversation.user_id == user.id
).count()
def get_latest_messages(user):
"""Returns all unread messages for the given user.
:param user: The user object.
"""
return (
Conversation.query.filter(Conversation.unread, Conversation.user_id == user.id)
.order_by(Conversation.id.desc())
.limit(99)
.all()
)
| from .models import Conversation, Message
def get_message_count(user):
"""Returns the number of private messages of the given user.
:param user: The user object.
"""
return Conversation.query.filter(Conversation.user_id == user.id).count()
def get_unread_count(user):
"""Returns the unread message count for the given user.
:param user: The user object.
"""
return Conversation.query.filter(
Conversation.unread, Conversation.user_id == user.id
).count()
def get_latest_messages(user):
"""Returns all unread messages for the given user.
:param user: The user object.
"""
return (
Conversation.query.filter(Conversation.unread, Conversation.user_id == user.id)
.order_by(Conversation.id.desc())
.limit(99)
.all()
) | en | 0.717419 | Returns the number of private messages of the given user. :param user: The user object. Returns the unread message count for the given user. :param user: The user object. Returns all unread messages for the given user. :param user: The user object. | 2.651453 | 3 |
train.py | applejenny66/cdQA | 0 | 6622554 | # train.py
import pandas as pd
from ast import literal_eval
from cdqa.pipeline import QAPipeline
def traing():
df = pd.read_csv('your-custom-corpus-here.csv', converters={'paragraphs': literal_eval})
cdqa_pipeline = QAPipeline(reader='bert_qa_vCPU-sklearn.joblib')
cdqa_pipeline.fit_retriever(df=df)
cdqa_pipeline = QAPipeline(reader='bert_qa_vGPU-sklearn.joblib')
cdqa_pipeline.fit_reader('path-to-custom-squad-like-dataset.json')
cdqa_pipeline.dump_reader('path-to-save-bert-reader.joblib')
return (cdqa_pipeline)
def predict(cdqa_pipeline, N):
simple = cdqa_pipeline.predict(query='your question')
n_best = cdqa_pipeline.predict(query='your question', n_predictions=N)
weight_best = cdqa_pipeline.predict(query='your question', retriever_score_weight=0.35)
print ("simple answer: ", simple)
print ("n best answer: ", n_best)
print ("weight answer: ", weight_best)
if __name__ == "__main__":
cuda_pipeline = traing()
predict(cuda_pipeline, N = 3)
| # train.py
import pandas as pd
from ast import literal_eval
from cdqa.pipeline import QAPipeline
def traing():
df = pd.read_csv('your-custom-corpus-here.csv', converters={'paragraphs': literal_eval})
cdqa_pipeline = QAPipeline(reader='bert_qa_vCPU-sklearn.joblib')
cdqa_pipeline.fit_retriever(df=df)
cdqa_pipeline = QAPipeline(reader='bert_qa_vGPU-sklearn.joblib')
cdqa_pipeline.fit_reader('path-to-custom-squad-like-dataset.json')
cdqa_pipeline.dump_reader('path-to-save-bert-reader.joblib')
return (cdqa_pipeline)
def predict(cdqa_pipeline, N):
simple = cdqa_pipeline.predict(query='your question')
n_best = cdqa_pipeline.predict(query='your question', n_predictions=N)
weight_best = cdqa_pipeline.predict(query='your question', retriever_score_weight=0.35)
print ("simple answer: ", simple)
print ("n best answer: ", n_best)
print ("weight answer: ", weight_best)
if __name__ == "__main__":
cuda_pipeline = traing()
predict(cuda_pipeline, N = 3)
| en | 0.481188 | # train.py | 2.835958 | 3 |
datcore-sdk/python/datcore_sdk/models/create_package_request.py | mguidon/aiohttp-dsm | 0 | 6622555 | # coding: utf-8
"""
Blackfynn Swagger
Swagger documentation for the Blackfynn api # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class CreatePackageRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'parent': 'str',
'name': 'str',
'state': 'PackageState',
'package_type': 'PackageType',
'properties': 'list[GraphNodePropertyRO]',
'dataset': 'str',
'owner': 'str'
}
attribute_map = {
'parent': 'parent',
'name': 'name',
'state': 'state',
'package_type': 'packageType',
'properties': 'properties',
'dataset': 'dataset',
'owner': 'owner'
}
def __init__(self, parent=None, name=None, state=None, package_type=None, properties=None, dataset=None, owner=None): # noqa: E501
"""CreatePackageRequest - a model defined in OpenAPI""" # noqa: E501
self._parent = None
self._name = None
self._state = None
self._package_type = None
self._properties = None
self._dataset = None
self._owner = None
self.discriminator = None
if parent is not None:
self.parent = parent
self.name = name
if state is not None:
self.state = state
self.package_type = package_type
self.properties = properties
self.dataset = dataset
if owner is not None:
self.owner = owner
@property
def parent(self):
"""Gets the parent of this CreatePackageRequest. # noqa: E501
:return: The parent of this CreatePackageRequest. # noqa: E501
:rtype: str
"""
return self._parent
@parent.setter
def parent(self, parent):
"""Sets the parent of this CreatePackageRequest.
:param parent: The parent of this CreatePackageRequest. # noqa: E501
:type: str
"""
self._parent = parent
@property
def name(self):
"""Gets the name of this CreatePackageRequest. # noqa: E501
:return: The name of this CreatePackageRequest. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CreatePackageRequest.
:param name: The name of this CreatePackageRequest. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def state(self):
"""Gets the state of this CreatePackageRequest. # noqa: E501
:return: The state of this CreatePackageRequest. # noqa: E501
:rtype: PackageState
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this CreatePackageRequest.
:param state: The state of this CreatePackageRequest. # noqa: E501
:type: PackageState
"""
self._state = state
@property
def package_type(self):
"""Gets the package_type of this CreatePackageRequest. # noqa: E501
:return: The package_type of this CreatePackageRequest. # noqa: E501
:rtype: PackageType
"""
return self._package_type
@package_type.setter
def package_type(self, package_type):
"""Sets the package_type of this CreatePackageRequest.
:param package_type: The package_type of this CreatePackageRequest. # noqa: E501
:type: PackageType
"""
if package_type is None:
raise ValueError("Invalid value for `package_type`, must not be `None`") # noqa: E501
self._package_type = package_type
@property
def properties(self):
"""Gets the properties of this CreatePackageRequest. # noqa: E501
:return: The properties of this CreatePackageRequest. # noqa: E501
:rtype: list[GraphNodePropertyRO]
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this CreatePackageRequest.
:param properties: The properties of this CreatePackageRequest. # noqa: E501
:type: list[GraphNodePropertyRO]
"""
if properties is None:
raise ValueError("Invalid value for `properties`, must not be `None`") # noqa: E501
self._properties = properties
@property
def dataset(self):
"""Gets the dataset of this CreatePackageRequest. # noqa: E501
:return: The dataset of this CreatePackageRequest. # noqa: E501
:rtype: str
"""
return self._dataset
@dataset.setter
def dataset(self, dataset):
"""Sets the dataset of this CreatePackageRequest.
:param dataset: The dataset of this CreatePackageRequest. # noqa: E501
:type: str
"""
if dataset is None:
raise ValueError("Invalid value for `dataset`, must not be `None`") # noqa: E501
self._dataset = dataset
@property
def owner(self):
"""Gets the owner of this CreatePackageRequest. # noqa: E501
:return: The owner of this CreatePackageRequest. # noqa: E501
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this CreatePackageRequest.
:param owner: The owner of this CreatePackageRequest. # noqa: E501
:type: str
"""
self._owner = owner
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreatePackageRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| # coding: utf-8
"""
Blackfynn Swagger
Swagger documentation for the Blackfynn api # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class CreatePackageRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'parent': 'str',
'name': 'str',
'state': 'PackageState',
'package_type': 'PackageType',
'properties': 'list[GraphNodePropertyRO]',
'dataset': 'str',
'owner': 'str'
}
attribute_map = {
'parent': 'parent',
'name': 'name',
'state': 'state',
'package_type': 'packageType',
'properties': 'properties',
'dataset': 'dataset',
'owner': 'owner'
}
def __init__(self, parent=None, name=None, state=None, package_type=None, properties=None, dataset=None, owner=None): # noqa: E501
"""CreatePackageRequest - a model defined in OpenAPI""" # noqa: E501
self._parent = None
self._name = None
self._state = None
self._package_type = None
self._properties = None
self._dataset = None
self._owner = None
self.discriminator = None
if parent is not None:
self.parent = parent
self.name = name
if state is not None:
self.state = state
self.package_type = package_type
self.properties = properties
self.dataset = dataset
if owner is not None:
self.owner = owner
@property
def parent(self):
"""Gets the parent of this CreatePackageRequest. # noqa: E501
:return: The parent of this CreatePackageRequest. # noqa: E501
:rtype: str
"""
return self._parent
@parent.setter
def parent(self, parent):
"""Sets the parent of this CreatePackageRequest.
:param parent: The parent of this CreatePackageRequest. # noqa: E501
:type: str
"""
self._parent = parent
@property
def name(self):
"""Gets the name of this CreatePackageRequest. # noqa: E501
:return: The name of this CreatePackageRequest. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CreatePackageRequest.
:param name: The name of this CreatePackageRequest. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def state(self):
"""Gets the state of this CreatePackageRequest. # noqa: E501
:return: The state of this CreatePackageRequest. # noqa: E501
:rtype: PackageState
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this CreatePackageRequest.
:param state: The state of this CreatePackageRequest. # noqa: E501
:type: PackageState
"""
self._state = state
@property
def package_type(self):
"""Gets the package_type of this CreatePackageRequest. # noqa: E501
:return: The package_type of this CreatePackageRequest. # noqa: E501
:rtype: PackageType
"""
return self._package_type
@package_type.setter
def package_type(self, package_type):
"""Sets the package_type of this CreatePackageRequest.
:param package_type: The package_type of this CreatePackageRequest. # noqa: E501
:type: PackageType
"""
if package_type is None:
raise ValueError("Invalid value for `package_type`, must not be `None`") # noqa: E501
self._package_type = package_type
@property
def properties(self):
"""Gets the properties of this CreatePackageRequest. # noqa: E501
:return: The properties of this CreatePackageRequest. # noqa: E501
:rtype: list[GraphNodePropertyRO]
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this CreatePackageRequest.
:param properties: The properties of this CreatePackageRequest. # noqa: E501
:type: list[GraphNodePropertyRO]
"""
if properties is None:
raise ValueError("Invalid value for `properties`, must not be `None`") # noqa: E501
self._properties = properties
@property
def dataset(self):
"""Gets the dataset of this CreatePackageRequest. # noqa: E501
:return: The dataset of this CreatePackageRequest. # noqa: E501
:rtype: str
"""
return self._dataset
@dataset.setter
def dataset(self, dataset):
"""Sets the dataset of this CreatePackageRequest.
:param dataset: The dataset of this CreatePackageRequest. # noqa: E501
:type: str
"""
if dataset is None:
raise ValueError("Invalid value for `dataset`, must not be `None`") # noqa: E501
self._dataset = dataset
@property
def owner(self):
"""Gets the owner of this CreatePackageRequest. # noqa: E501
:return: The owner of this CreatePackageRequest. # noqa: E501
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this CreatePackageRequest.
:param owner: The owner of this CreatePackageRequest. # noqa: E501
:type: str
"""
self._owner = owner
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreatePackageRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| en | 0.572739 | # coding: utf-8 Blackfynn Swagger Swagger documentation for the Blackfynn api # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://openapi-generator.tech # noqa: F401 NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. # noqa: E501 CreatePackageRequest - a model defined in OpenAPI # noqa: E501 Gets the parent of this CreatePackageRequest. # noqa: E501 :return: The parent of this CreatePackageRequest. # noqa: E501 :rtype: str Sets the parent of this CreatePackageRequest. :param parent: The parent of this CreatePackageRequest. # noqa: E501 :type: str Gets the name of this CreatePackageRequest. # noqa: E501 :return: The name of this CreatePackageRequest. # noqa: E501 :rtype: str Sets the name of this CreatePackageRequest. :param name: The name of this CreatePackageRequest. # noqa: E501 :type: str # noqa: E501 Gets the state of this CreatePackageRequest. # noqa: E501 :return: The state of this CreatePackageRequest. # noqa: E501 :rtype: PackageState Sets the state of this CreatePackageRequest. :param state: The state of this CreatePackageRequest. # noqa: E501 :type: PackageState Gets the package_type of this CreatePackageRequest. # noqa: E501 :return: The package_type of this CreatePackageRequest. # noqa: E501 :rtype: PackageType Sets the package_type of this CreatePackageRequest. :param package_type: The package_type of this CreatePackageRequest. # noqa: E501 :type: PackageType # noqa: E501 Gets the properties of this CreatePackageRequest. # noqa: E501 :return: The properties of this CreatePackageRequest. # noqa: E501 :rtype: list[GraphNodePropertyRO] Sets the properties of this CreatePackageRequest. :param properties: The properties of this CreatePackageRequest. # noqa: E501 :type: list[GraphNodePropertyRO] # noqa: E501 Gets the dataset of this CreatePackageRequest. # noqa: E501 :return: The dataset of this CreatePackageRequest. # noqa: E501 :rtype: str Sets the dataset of this CreatePackageRequest. :param dataset: The dataset of this CreatePackageRequest. # noqa: E501 :type: str # noqa: E501 Gets the owner of this CreatePackageRequest. # noqa: E501 :return: The owner of this CreatePackageRequest. # noqa: E501 :rtype: str Sets the owner of this CreatePackageRequest. :param owner: The owner of this CreatePackageRequest. # noqa: E501 :type: str Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal | 1.750516 | 2 |
Problemas/UT4/Ejercicio 3.1a/solucion.py | JuanFKurucz/TEOCOMP | 0 | 6622556 | <filename>Problemas/UT4/Ejercicio 3.1a/solucion.py
import random
import time
def minDeMaxOutput(matriz):
comparaciones = 0
asignaciones = 1
min = float('inf')
for lista in matriz:
comparaciones += 1
asignaciones += 2
max = float('-inf')
for i in lista:
asignaciones += 1
comparaciones += 1
if i>max:
asignaciones += 1
comparaciones += 1
max=i
if max<min:
asignaciones += 1
comparaciones += 1
min = max
print("Comparaciones: %i, Asignaciones: %i, Total: %i" % (comparaciones,asignaciones,(comparaciones+asignaciones)))
return min
def minDeMax(matriz):
min = float('inf')
for lista in matriz:
max = float('-inf')
for i in lista:
if i>max:
max=i
if max<min:
min = max
return min
def generarLista(cantidadListas,cantidadNumeros,min,max):
lista = []
for i in range(cantidadListas):
lista.append([])
for n in range(cantidadNumeros):
lista[i].append(random.randint(min, max))
return lista
def prueba(listas,numeros):
print("Cantidad listas: %i" % listas)
print("Cantidad numeros: %i" % numeros)
matriz = generarLista(listas,numeros,-100,100)
print("Arranco")
start = time.perf_counter()
print(minDeMax(matriz))
elapsed = time.perf_counter()
elapsed = elapsed - start
print("--- %s segundos ---" % (elapsed))
for i in range(1,10):
prueba(200,50)
prueba(2000,50)
prueba(20000,50)
print("=============================================")
| <filename>Problemas/UT4/Ejercicio 3.1a/solucion.py
import random
import time
def minDeMaxOutput(matriz):
comparaciones = 0
asignaciones = 1
min = float('inf')
for lista in matriz:
comparaciones += 1
asignaciones += 2
max = float('-inf')
for i in lista:
asignaciones += 1
comparaciones += 1
if i>max:
asignaciones += 1
comparaciones += 1
max=i
if max<min:
asignaciones += 1
comparaciones += 1
min = max
print("Comparaciones: %i, Asignaciones: %i, Total: %i" % (comparaciones,asignaciones,(comparaciones+asignaciones)))
return min
def minDeMax(matriz):
min = float('inf')
for lista in matriz:
max = float('-inf')
for i in lista:
if i>max:
max=i
if max<min:
min = max
return min
def generarLista(cantidadListas,cantidadNumeros,min,max):
lista = []
for i in range(cantidadListas):
lista.append([])
for n in range(cantidadNumeros):
lista[i].append(random.randint(min, max))
return lista
def prueba(listas,numeros):
print("Cantidad listas: %i" % listas)
print("Cantidad numeros: %i" % numeros)
matriz = generarLista(listas,numeros,-100,100)
print("Arranco")
start = time.perf_counter()
print(minDeMax(matriz))
elapsed = time.perf_counter()
elapsed = elapsed - start
print("--- %s segundos ---" % (elapsed))
for i in range(1,10):
prueba(200,50)
prueba(2000,50)
prueba(20000,50)
print("=============================================")
| none | 1 | 3.550285 | 4 | |
rates/views.py | avara1986/currency-api | 0 | 6622557 | from django_filters import rest_framework as filters
from oauth2_provider.contrib.rest_framework import TokenHasReadWriteScope
from rest_framework import viewsets, permissions
from rest_framework.decorators import action
from rest_framework.response import Response
from rates.models import Rate, Currency
from rates.serializers import RateSerializer, RateSerializerVersion2
from rates.utils import time_weighted_rate
class MilestoneRangeFilter(filters.FilterSet):
start_date = filters.DateFilter(field_name='milestone', lookup_expr=('gte'), )
end_date = filters.DateFilter(field_name='milestone', lookup_expr=('lte'))
date_invested = filters.DateFilter(field_name='milestone')
currency = filters.CharFilter(field_name='currency')
class Meta:
model = Rate
fields = ['milestone']
# ViewSets define the view behavior.
class RateViewSet(viewsets.ModelViewSet):
queryset = Rate.objects.all()
serializer_class = RateSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = MilestoneRangeFilter
permission_classes = [permissions.IsAuthenticated, TokenHasReadWriteScope]
def get_serializer_class(self):
if self.request.version == 'v1':
return RateSerializer
return RateSerializerVersion2
@action(methods=['post'], detail=False)
def exchange(self, request):
currency_from = Currency.objects.get(code=request.data["origin_currency"])
currency_to = Currency.objects.get(code=request.data["target_currency"])
amount = request.data["amount"]
date_invested = request.data["date_invested"]
query_from = Rate.objects.all().f_currency(currency_from)
query_to = Rate.objects.all().f_currency(currency_to)
if date_invested:
query_from = query_from.f_milestone(date_invested)
query_to = query_to.f_milestone(date_invested)
query_from = query_from.order_by("-milestone").first()
query_to = query_to.order_by("-milestone").first()
rate = (query_to.amount / query_from.amount)
return Response({
"success": True,
"query": {
"from": currency_from.code,
"to": currency_to.code,
"amount": amount
},
"info": {
"rate": rate
},
"date": query_from.milestone,
"result": amount * rate
})
@action(methods=['post'], detail=False)
def time_weighted_rate(self, request):
currency_from = Currency.objects.get(code=request.data["origin_currency"])
currency_to = Currency.objects.get(code=request.data["target_currency"])
amount = request.data["amount"]
date_invested = request.data["date_invested"]
query_from = Rate.objects.all().f_currency(currency_from)
query_to = Rate.objects.all().f_currency(currency_to)
query_from = query_from.f_milestone(date_invested)
query_from = query_from.order_by("-milestone").first()
query_to = query_to.order_by("-milestone").first()
return Response({
"success": True,
"from": currency_from.code,
"to": currency_to.code,
"result": "%.3f" % (time_weighted_rate(amount, query_from.amount, query_to.amount) * 100)
})
@action(methods=['get'], detail=False, permission_classes=[permissions.IsAuthenticated])
def graph(self, request):
query = self.get_queryset().filter(currency=request.query_params["currency"].upper())
return Response([{"date": k["day"], "amount": k["wieght"]} for k in query.group_months()])
| from django_filters import rest_framework as filters
from oauth2_provider.contrib.rest_framework import TokenHasReadWriteScope
from rest_framework import viewsets, permissions
from rest_framework.decorators import action
from rest_framework.response import Response
from rates.models import Rate, Currency
from rates.serializers import RateSerializer, RateSerializerVersion2
from rates.utils import time_weighted_rate
class MilestoneRangeFilter(filters.FilterSet):
start_date = filters.DateFilter(field_name='milestone', lookup_expr=('gte'), )
end_date = filters.DateFilter(field_name='milestone', lookup_expr=('lte'))
date_invested = filters.DateFilter(field_name='milestone')
currency = filters.CharFilter(field_name='currency')
class Meta:
model = Rate
fields = ['milestone']
# ViewSets define the view behavior.
class RateViewSet(viewsets.ModelViewSet):
queryset = Rate.objects.all()
serializer_class = RateSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = MilestoneRangeFilter
permission_classes = [permissions.IsAuthenticated, TokenHasReadWriteScope]
def get_serializer_class(self):
if self.request.version == 'v1':
return RateSerializer
return RateSerializerVersion2
@action(methods=['post'], detail=False)
def exchange(self, request):
currency_from = Currency.objects.get(code=request.data["origin_currency"])
currency_to = Currency.objects.get(code=request.data["target_currency"])
amount = request.data["amount"]
date_invested = request.data["date_invested"]
query_from = Rate.objects.all().f_currency(currency_from)
query_to = Rate.objects.all().f_currency(currency_to)
if date_invested:
query_from = query_from.f_milestone(date_invested)
query_to = query_to.f_milestone(date_invested)
query_from = query_from.order_by("-milestone").first()
query_to = query_to.order_by("-milestone").first()
rate = (query_to.amount / query_from.amount)
return Response({
"success": True,
"query": {
"from": currency_from.code,
"to": currency_to.code,
"amount": amount
},
"info": {
"rate": rate
},
"date": query_from.milestone,
"result": amount * rate
})
@action(methods=['post'], detail=False)
def time_weighted_rate(self, request):
currency_from = Currency.objects.get(code=request.data["origin_currency"])
currency_to = Currency.objects.get(code=request.data["target_currency"])
amount = request.data["amount"]
date_invested = request.data["date_invested"]
query_from = Rate.objects.all().f_currency(currency_from)
query_to = Rate.objects.all().f_currency(currency_to)
query_from = query_from.f_milestone(date_invested)
query_from = query_from.order_by("-milestone").first()
query_to = query_to.order_by("-milestone").first()
return Response({
"success": True,
"from": currency_from.code,
"to": currency_to.code,
"result": "%.3f" % (time_weighted_rate(amount, query_from.amount, query_to.amount) * 100)
})
@action(methods=['get'], detail=False, permission_classes=[permissions.IsAuthenticated])
def graph(self, request):
query = self.get_queryset().filter(currency=request.query_params["currency"].upper())
return Response([{"date": k["day"], "amount": k["wieght"]} for k in query.group_months()])
| en | 0.531826 | # ViewSets define the view behavior. | 2.117395 | 2 |
backend/currency_exchanger/stocks/admin.py | norbertcyran/currency-exchanger | 0 | 6622558 | <reponame>norbertcyran/currency-exchanger<filename>backend/currency_exchanger/stocks/admin.py
from django.contrib import admin
from .models import Stock, StockTransfer
admin.site.register(Stock)
admin.site.register(StockTransfer)
| from django.contrib import admin
from .models import Stock, StockTransfer
admin.site.register(Stock)
admin.site.register(StockTransfer) | none | 1 | 1.197983 | 1 | |
src/AxisPrediction/Validation.py | zmcx16/AxisTradeCult | 35 | 6622559 | from builtins import int
from enum import Enum
from numpy import NaN
import random
from sklearn import datasets
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.neighbors import *
from sklearn.svm import *
from sklearn.naive_bayes import *
from sklearn.neural_network import *
from sklearn.tree import *
from sklearn.gaussian_process import *
from sklearn import metrics
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import label_binarize
from sklearn.calibration import CalibratedClassifierCV
from imblearn.over_sampling import RandomOverSampler
from attr._make import validate
import Program.GlobalVar as gv
from CommonDef.DefStr import *
from Program.Common import *
from Program.ProcessData import *
from Statistics_TechIndicators.CalcStatistics import *
from AxisPrediction.Backtest import *
def ForwardingLeaveOneOutValidation(Data, Target, TrDataSize):
OriginalCash = 50000
perShareSize = 100
backtestParam = {BacktestParam.strStrategyParams:
{BacktestParam.strBuyStrategy: BacktestParam.BuyFixed
, BacktestParam.strSellStrategy: BacktestParam.SellAll
, BacktestParam.perShareSize: perShareSize}}
backtest = Backtest(OriginalCash, False, backtestParam)
ValidateResult = []
TsLength = len(Data.index) - TrDataSize
print("TrData Size: {0}".format(TrDataSize))
print("TsData Size: {0}".format(TsLength))
for i in range(TsLength):
x_train = Data[i:TrDataSize+i]
y_train = Target[i:TrDataSize+i]
x_test = Data[TrDataSize+i:TrDataSize+i+1]
y_test = Target[TrDataSize+i:TrDataSize+i+1]
scaler = preprocessing.StandardScaler().fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
# Binarize the output
y_train = label_binarize(y_train, classes=[-1, 0, 1])
y_test = label_binarize(y_test, classes=[-1, 0, 1])
#clf= OneVsRestClassifier(KNeighborsClassifier(n_neighbors=5))
#clf = OneVsRestClassifier(LinearSVC(random_state=0))
#clf = OneVsRestClassifier(GaussianNB())
#clf = OneVsRestClassifier(MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1))
clf = OneVsRestClassifier(DecisionTreeClassifier(random_state=0))
#clf = OneVsRestClassifier(GaussianProcessClassifier(kernel=1.0 * kernels.RBF(length_scale=1.0)))
clf.fit(x_train, y_train)
try:
predict_prob = clf.predict_proba(x_test)
except Exception as exc:
#print('Generated an exception: %s' % exc)
predict_prob = clf.decision_function(x_test)
#for index in range(len(predict_prob)):
# print(predict_prob[index])
# OvR clf.predict may produce [0 0 0] when prob equal: [0.4 0.4 0.2], we calculate by predict_proba
class_labels = [-1, 0, 1]
max_prob_index_list = predict_prob.argmax(axis=1)
temp = []
for x in range(len(max_prob_index_list)):
temp.append(class_labels[max_prob_index_list[x]])
predict_result = label_binarize(temp, class_labels)
date = Data.iloc[TrDataSize+i].name
close = Data.iloc[TrDataSize+i][strClose]
high = Data.iloc[TrDataSize+i][strHigh]
low = Data.iloc[TrDataSize+i][strLow]
params = {strDate: date, strClose: close, strHigh: high, strLow: low}
if predict_result[0][0] == 1:
backtest.RunStrategy(BacktestParam.BuySignal, BacktestParam.EasyStrategy, params)
elif predict_result[0][2] == 1:
backtest.RunStrategy(BacktestParam.SellSignal, BacktestParam.EasyStrategy, params)
ValidateResult.append({strPredictVal: predict_result, strAnsVal: y_test, strPredictProbVal: predict_prob})
if i%100 == 0:
print("Training & Testing Model{0} finished".format(i))
print("Training & Testing finished.")
backtest.RunStrategy(BacktestParam.SellSignal, BacktestParam.EasyStrategy, params)
print("---Trade List----------------------")
backtest.PrintTradeList()
print("-----------------------------------")
FinalCash = backtest.Cash
Profit = FinalCash - OriginalCash
print("Profit: {0}".format(Profit))
print("ROI: {:.2%}".format(Profit/OriginalCash))
backtest.PlotTradeChart(Data[TrDataSize:])
#print(ValidateResult)
total_result = {strPredictVal:ValidateResult[0][strPredictVal], strPredictProbVal:ValidateResult[0][strPredictProbVal], strAnsVal:ValidateResult[0][strAnsVal]}
for index in range(1, len(ValidateResult)):
total_result[strPredictVal] = numpy.concatenate((total_result[strPredictVal], ValidateResult[index][strPredictVal]), axis=0)
total_result[strPredictProbVal] = numpy.concatenate((total_result[strPredictProbVal], ValidateResult[index][strPredictProbVal]), axis=0)
total_result[strAnsVal] = numpy.concatenate((total_result[strAnsVal], ValidateResult[index][strAnsVal]), axis=0)
#print(total_result)
ShowSensitivitySpecificityForMultiLabels(total_result[strAnsVal], total_result[strPredictVal], total_result[strPredictProbVal], [1, 0, -1])
def ForwardingLeaveOneOutRandom(Data, Target, TrDataSize):
OriginalCash = 50000
perShareSize = 100
RandomCount = 1000
TotalProfit = 0
for r_count in range(RandomCount):
random.seed(r_count)
backtestParam = {BacktestParam.strStrategyParams:
{BacktestParam.strBuyStrategy: BacktestParam.BuyFixed
, BacktestParam.strSellStrategy: BacktestParam.SellAll
, BacktestParam.perShareSize: perShareSize}}
backtest = Backtest(OriginalCash, False, backtestParam)
TsLength = len(Data.index) - TrDataSize
for i in range(TsLength):
r = random.randint(0,99)
if r >= 98:
date = Data.iloc[TrDataSize+i].name
close = Data.iloc[TrDataSize+i][strClose]
high = Data.iloc[TrDataSize+i][strHigh]
low = Data.iloc[TrDataSize+i][strLow]
params = {strDate: date, strClose: close, strHigh: high, strLow: low}
backtest.RunStrategy(BacktestParam.BuySignal, BacktestParam.EasyStrategy, params)
elif r < 2:
date = Data.iloc[TrDataSize+i].name
close = Data.iloc[TrDataSize+i][strClose]
high = Data.iloc[TrDataSize+i][strHigh]
low = Data.iloc[TrDataSize+i][strLow]
params = {strDate: date, strClose: close, strHigh: high, strLow: low}
backtest.RunStrategy(BacktestParam.SellSignal, BacktestParam.EasyStrategy, params)
backtest.RunStrategy(BacktestParam.SellSignal, BacktestParam.EasyStrategy, params)
FinalCash = backtest.Cash
Profit = FinalCash - OriginalCash
TotalProfit += Profit
print("Profit_{0}: {1}".format(r_count, Profit))
print("AvgProfit: {0}".format(TotalProfit/RandomCount))
print("ROI: {:.2%}".format(TotalProfit/RandomCount/OriginalCash))
def RunValidation(Data, Target, type, param):
if ValidationType.ForwardingLeaveOneOut == type:
ForwardingLeaveOneOutValidation(Data, Target, param[ValidationType.TrDataSize])
elif ValidationType.ForwardingLeaveOneOutRandom == type:
ForwardingLeaveOneOutRandom(Data, Target, param[ValidationType.TrDataSize])
class ValidationType(Enum):
ForwardingLeaveOneOut = 1
ForwardingLeaveOneOutRandom = 999
# ForwardingLeaveOneOut params
TrDataSize = 'TrDataSize' | from builtins import int
from enum import Enum
from numpy import NaN
import random
from sklearn import datasets
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.neighbors import *
from sklearn.svm import *
from sklearn.naive_bayes import *
from sklearn.neural_network import *
from sklearn.tree import *
from sklearn.gaussian_process import *
from sklearn import metrics
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import label_binarize
from sklearn.calibration import CalibratedClassifierCV
from imblearn.over_sampling import RandomOverSampler
from attr._make import validate
import Program.GlobalVar as gv
from CommonDef.DefStr import *
from Program.Common import *
from Program.ProcessData import *
from Statistics_TechIndicators.CalcStatistics import *
from AxisPrediction.Backtest import *
def ForwardingLeaveOneOutValidation(Data, Target, TrDataSize):
OriginalCash = 50000
perShareSize = 100
backtestParam = {BacktestParam.strStrategyParams:
{BacktestParam.strBuyStrategy: BacktestParam.BuyFixed
, BacktestParam.strSellStrategy: BacktestParam.SellAll
, BacktestParam.perShareSize: perShareSize}}
backtest = Backtest(OriginalCash, False, backtestParam)
ValidateResult = []
TsLength = len(Data.index) - TrDataSize
print("TrData Size: {0}".format(TrDataSize))
print("TsData Size: {0}".format(TsLength))
for i in range(TsLength):
x_train = Data[i:TrDataSize+i]
y_train = Target[i:TrDataSize+i]
x_test = Data[TrDataSize+i:TrDataSize+i+1]
y_test = Target[TrDataSize+i:TrDataSize+i+1]
scaler = preprocessing.StandardScaler().fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
# Binarize the output
y_train = label_binarize(y_train, classes=[-1, 0, 1])
y_test = label_binarize(y_test, classes=[-1, 0, 1])
#clf= OneVsRestClassifier(KNeighborsClassifier(n_neighbors=5))
#clf = OneVsRestClassifier(LinearSVC(random_state=0))
#clf = OneVsRestClassifier(GaussianNB())
#clf = OneVsRestClassifier(MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1))
clf = OneVsRestClassifier(DecisionTreeClassifier(random_state=0))
#clf = OneVsRestClassifier(GaussianProcessClassifier(kernel=1.0 * kernels.RBF(length_scale=1.0)))
clf.fit(x_train, y_train)
try:
predict_prob = clf.predict_proba(x_test)
except Exception as exc:
#print('Generated an exception: %s' % exc)
predict_prob = clf.decision_function(x_test)
#for index in range(len(predict_prob)):
# print(predict_prob[index])
# OvR clf.predict may produce [0 0 0] when prob equal: [0.4 0.4 0.2], we calculate by predict_proba
class_labels = [-1, 0, 1]
max_prob_index_list = predict_prob.argmax(axis=1)
temp = []
for x in range(len(max_prob_index_list)):
temp.append(class_labels[max_prob_index_list[x]])
predict_result = label_binarize(temp, class_labels)
date = Data.iloc[TrDataSize+i].name
close = Data.iloc[TrDataSize+i][strClose]
high = Data.iloc[TrDataSize+i][strHigh]
low = Data.iloc[TrDataSize+i][strLow]
params = {strDate: date, strClose: close, strHigh: high, strLow: low}
if predict_result[0][0] == 1:
backtest.RunStrategy(BacktestParam.BuySignal, BacktestParam.EasyStrategy, params)
elif predict_result[0][2] == 1:
backtest.RunStrategy(BacktestParam.SellSignal, BacktestParam.EasyStrategy, params)
ValidateResult.append({strPredictVal: predict_result, strAnsVal: y_test, strPredictProbVal: predict_prob})
if i%100 == 0:
print("Training & Testing Model{0} finished".format(i))
print("Training & Testing finished.")
backtest.RunStrategy(BacktestParam.SellSignal, BacktestParam.EasyStrategy, params)
print("---Trade List----------------------")
backtest.PrintTradeList()
print("-----------------------------------")
FinalCash = backtest.Cash
Profit = FinalCash - OriginalCash
print("Profit: {0}".format(Profit))
print("ROI: {:.2%}".format(Profit/OriginalCash))
backtest.PlotTradeChart(Data[TrDataSize:])
#print(ValidateResult)
total_result = {strPredictVal:ValidateResult[0][strPredictVal], strPredictProbVal:ValidateResult[0][strPredictProbVal], strAnsVal:ValidateResult[0][strAnsVal]}
for index in range(1, len(ValidateResult)):
total_result[strPredictVal] = numpy.concatenate((total_result[strPredictVal], ValidateResult[index][strPredictVal]), axis=0)
total_result[strPredictProbVal] = numpy.concatenate((total_result[strPredictProbVal], ValidateResult[index][strPredictProbVal]), axis=0)
total_result[strAnsVal] = numpy.concatenate((total_result[strAnsVal], ValidateResult[index][strAnsVal]), axis=0)
#print(total_result)
ShowSensitivitySpecificityForMultiLabels(total_result[strAnsVal], total_result[strPredictVal], total_result[strPredictProbVal], [1, 0, -1])
def ForwardingLeaveOneOutRandom(Data, Target, TrDataSize):
OriginalCash = 50000
perShareSize = 100
RandomCount = 1000
TotalProfit = 0
for r_count in range(RandomCount):
random.seed(r_count)
backtestParam = {BacktestParam.strStrategyParams:
{BacktestParam.strBuyStrategy: BacktestParam.BuyFixed
, BacktestParam.strSellStrategy: BacktestParam.SellAll
, BacktestParam.perShareSize: perShareSize}}
backtest = Backtest(OriginalCash, False, backtestParam)
TsLength = len(Data.index) - TrDataSize
for i in range(TsLength):
r = random.randint(0,99)
if r >= 98:
date = Data.iloc[TrDataSize+i].name
close = Data.iloc[TrDataSize+i][strClose]
high = Data.iloc[TrDataSize+i][strHigh]
low = Data.iloc[TrDataSize+i][strLow]
params = {strDate: date, strClose: close, strHigh: high, strLow: low}
backtest.RunStrategy(BacktestParam.BuySignal, BacktestParam.EasyStrategy, params)
elif r < 2:
date = Data.iloc[TrDataSize+i].name
close = Data.iloc[TrDataSize+i][strClose]
high = Data.iloc[TrDataSize+i][strHigh]
low = Data.iloc[TrDataSize+i][strLow]
params = {strDate: date, strClose: close, strHigh: high, strLow: low}
backtest.RunStrategy(BacktestParam.SellSignal, BacktestParam.EasyStrategy, params)
backtest.RunStrategy(BacktestParam.SellSignal, BacktestParam.EasyStrategy, params)
FinalCash = backtest.Cash
Profit = FinalCash - OriginalCash
TotalProfit += Profit
print("Profit_{0}: {1}".format(r_count, Profit))
print("AvgProfit: {0}".format(TotalProfit/RandomCount))
print("ROI: {:.2%}".format(TotalProfit/RandomCount/OriginalCash))
def RunValidation(Data, Target, type, param):
if ValidationType.ForwardingLeaveOneOut == type:
ForwardingLeaveOneOutValidation(Data, Target, param[ValidationType.TrDataSize])
elif ValidationType.ForwardingLeaveOneOutRandom == type:
ForwardingLeaveOneOutRandom(Data, Target, param[ValidationType.TrDataSize])
class ValidationType(Enum):
ForwardingLeaveOneOut = 1
ForwardingLeaveOneOutRandom = 999
# ForwardingLeaveOneOut params
TrDataSize = 'TrDataSize' | en | 0.277404 | # Binarize the output #clf= OneVsRestClassifier(KNeighborsClassifier(n_neighbors=5)) #clf = OneVsRestClassifier(LinearSVC(random_state=0)) #clf = OneVsRestClassifier(GaussianNB()) #clf = OneVsRestClassifier(MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)) #clf = OneVsRestClassifier(GaussianProcessClassifier(kernel=1.0 * kernels.RBF(length_scale=1.0))) #print('Generated an exception: %s' % exc) #for index in range(len(predict_prob)): # print(predict_prob[index]) # OvR clf.predict may produce [0 0 0] when prob equal: [0.4 0.4 0.2], we calculate by predict_proba #print(ValidateResult) #print(total_result) # ForwardingLeaveOneOut params | 2.339751 | 2 |
src/predictor.py | reddelexc/cryptoping-trader | 12 | 6622560 | import os
import gc
import time
import joblib
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error, median_absolute_error
from datetime import datetime
from threading import RLock, Thread
from .util import PoolObject, yobit_err, form_traceback
from .constants import predictor_main_cols, predictor_target_col, predictor_dataset, \
predictor_dummy_cols, trained_model, learning_period
lock = RLock()
class Predictor(PoolObject):
def __init__(self):
PoolObject.__init__(self)
self.dummies = None
self.model = None
self.model_date = None
self.metrics = None
self.load_stuff()
self.data = None
self.train_data = None
self.val_data = None
self.available = True
print('predictor: started')
def get_report(self):
report = ' - Model last training date:\n'
report += ' * {0}\n'.format(self.model_date)
report += ' - Model metrics:\n'
if self.metrics is None:
report += ' * None\n'
return report
for k, v in self.metrics.items():
if k == 'cols':
report += ' * {0}:\n'.format(k)
for token in v.split(','):
report += ' > {0}\n'.format(token)
else:
report += ' * {0}: {1:.2f}\n'.format(k, v)
return report
def learn(self):
self.read_and_prepare_data()
self.train()
def predict(self, signal):
self.data = pd.DataFrame(signal, index=[0])
if self.model is None:
self.load_stuff()
self.read_and_prepare_data(to_predict=True)
data_use_cols = self.data[predictor_main_cols]
data_dummied = data_use_cols.reindex(columns=self.dummies, fill_value=0)
data_dummied.pop(predictor_target_col)
x = data_dummied
preds = self.model.predict(x)
return preds[0]
def read_and_prepare_data(self, to_predict=False):
if not to_predict:
self.data = pd.read_csv(predictor_dataset)
self.data = self.data[self.data['1h_max'].notnull()]
train_size = int(self.data.shape[0] * 0.75)
self.data = self.data.iloc[-train_size:].reset_index(drop=True)
self.data['date'] = pd.to_datetime(self.data['date'], format='%Y-%m-%d %H:%M:%S')
self.data['year'] = self.data['date'].apply(lambda d: d.year)
self.data['month'] = self.data['date'].apply(lambda d: d.month)
self.data['day'] = self.data['date'].apply(lambda d: d.day)
self.data['hour'] = self.data['date'].apply(lambda d: d.hour)
self.data['minute'] = self.data['date'].apply(lambda d: d.minute)
self.data['exchange'] = self.data['exchange'].apply(yobit_err)
self.data['1h_per'] = (self.data['1h_max'] / self.data['price_btc'] - 1) * 100
self.data['6h_per'] = (self.data['6h_max'] / self.data['price_btc'] - 1) * 100
self.data['24h_per'] = (self.data['24h_max'] / self.data['price_btc'] - 1) * 100
self.data['48h_per'] = (self.data['48h_max'] / self.data['price_btc'] - 1) * 100
self.data['7d_per'] = (self.data['7d_max'] / self.data['price_btc'] - 1) * 100
if not to_predict:
last_index = self.data.shape[0] - 1
last_day = self.data.iloc[-1]['day']
while self.data.iloc[last_index]['day'] == last_day:
last_index -= 1
val_end_index = last_index + 1
last_day = self.data.iloc[last_index]['day']
while self.data.iloc[last_index]['day'] == last_day:
last_index -= 1
val_start_index = last_index + 1
self.train_data = self.data.iloc[:val_start_index].reset_index(drop=True)
self.val_data = self.data.iloc[val_start_index:val_end_index].reset_index(drop=True)
def train(self):
train_data_use_cols = self.train_data[predictor_main_cols]
val_data_use_cols = self.val_data[predictor_main_cols]
train_data_dummied = pd.get_dummies(train_data_use_cols, columns=predictor_dummy_cols)
val_data_dummied = val_data_use_cols.reindex(columns=train_data_dummied.columns, fill_value=0)
train_y = train_data_dummied.pop(predictor_target_col)
train_x = train_data_dummied
test_y = val_data_dummied.pop(predictor_target_col)
test_x = val_data_dummied
self.pool['bot'].send(['Predictor: started training for metrics'])
val_model = RandomForestRegressor(n_estimators=100, random_state=100)
val_model.fit(train_x, train_y)
self.metrics = Predictor.get_metrics(predictor_main_cols, test_y, val_model.predict(test_x))
self.dump_metrics()
self.train_data = None
self.val_data = None
gc.collect()
self.pool['bot'].send(['Predictor: finished training for metrics'])
data_use_cols = self.data[predictor_main_cols]
data_dummied = pd.get_dummies(data_use_cols, columns=predictor_dummy_cols)
self.dummies = data_dummied.columns
train_y = data_dummied.pop(predictor_target_col)
train_x = data_dummied
self.pool['bot'].send(['Predictor: started training for real'])
model = RandomForestRegressor(n_estimators=100, random_state=100)
model.fit(train_x, train_y)
self.model = model
self.model_date = datetime.utcnow()
self.dump_stuff()
self.model = None
self.dummies = None
self.data = None
gc.collect()
self.pool['bot'].send(['Predictor: finished training for real'])
def dump_stuff(self):
with lock:
self.available = False
if not os.path.exists(trained_model):
os.makedirs(trained_model)
joblib.dump(self.dummies, os.path.join(trained_model, 'dummies'))
joblib.dump(self.model, os.path.join(trained_model, 'model'))
joblib.dump(self.model_date, os.path.join(trained_model, 'model_date'))
with lock:
self.available = True
def dump_metrics(self):
with lock:
self.available = False
if not os.path.exists(trained_model):
os.makedirs(trained_model)
joblib.dump(self.metrics, os.path.join(trained_model, 'metrics'))
with lock:
self.available = True
def load_stuff(self):
if not os.path.exists(trained_model):
return
self.dummies = joblib.load(os.path.join(trained_model, 'dummies'))
self.model = joblib.load(os.path.join(trained_model, 'model'))
self.model_date = joblib.load(os.path.join(trained_model, 'model_date'))
self.metrics = joblib.load(os.path.join(trained_model, 'metrics'))
@staticmethod
def get_metrics(cols, real, preds):
dev_1 = 0
dev_5 = 0
dev_10 = 0
less_pred = 0
more_pred = 0
length = len(real)
real = real.values
for i in range(len(real)):
if preds[i] >= real[i]:
more_pred += 1
if preds[i] < real[i]:
less_pred += 1
if abs(real[i] - preds[i]) <= 1:
dev_1 += 1
if abs(real[i] - preds[i]) <= 5:
dev_5 += 1
if abs(real[i] - preds[i]) <= 10:
dev_10 += 1
metrics = {
'cols': ', '.join(cols),
'real_mean': real.mean(),
'real_median': np.median(real),
'real_75_percentile': np.percentile(real, 75),
'preds_mean': preds.mean(),
'preds_median': np.median(preds),
'preds_75_percentile': np.percentile(preds, 75),
'mean_deviation': mean_absolute_error(real, preds),
'median_deviation': median_absolute_error(real, preds),
'deviation <= 1%': dev_1 / length,
'deviation <= 5%': dev_5 / length,
'deviation <= 10%': dev_10 / length,
'pred < real': less_pred / length,
'pred >= real': more_pred / length
}
return metrics
class PredictorLearnThread(Thread):
def __init__(self, predictor, client, bot):
Thread.__init__(self)
self.predictor = predictor
self.client = client
self.bot = bot
def run(self):
while True:
try:
self.bot.send(['Updating dataset...'])
self.client.update_dataset()
self.bot.send(['Dataset updated'])
self.bot.send(['Completing dataset...'])
self.client.complete_dataset()
self.bot.send(['Dataset completed'])
self.predictor.learn()
except Exception as exc:
self.bot.send(['Something wrong happened:', form_traceback(exc)])
time.sleep(learning_period)
| import os
import gc
import time
import joblib
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error, median_absolute_error
from datetime import datetime
from threading import RLock, Thread
from .util import PoolObject, yobit_err, form_traceback
from .constants import predictor_main_cols, predictor_target_col, predictor_dataset, \
predictor_dummy_cols, trained_model, learning_period
lock = RLock()
class Predictor(PoolObject):
def __init__(self):
PoolObject.__init__(self)
self.dummies = None
self.model = None
self.model_date = None
self.metrics = None
self.load_stuff()
self.data = None
self.train_data = None
self.val_data = None
self.available = True
print('predictor: started')
def get_report(self):
report = ' - Model last training date:\n'
report += ' * {0}\n'.format(self.model_date)
report += ' - Model metrics:\n'
if self.metrics is None:
report += ' * None\n'
return report
for k, v in self.metrics.items():
if k == 'cols':
report += ' * {0}:\n'.format(k)
for token in v.split(','):
report += ' > {0}\n'.format(token)
else:
report += ' * {0}: {1:.2f}\n'.format(k, v)
return report
def learn(self):
self.read_and_prepare_data()
self.train()
def predict(self, signal):
self.data = pd.DataFrame(signal, index=[0])
if self.model is None:
self.load_stuff()
self.read_and_prepare_data(to_predict=True)
data_use_cols = self.data[predictor_main_cols]
data_dummied = data_use_cols.reindex(columns=self.dummies, fill_value=0)
data_dummied.pop(predictor_target_col)
x = data_dummied
preds = self.model.predict(x)
return preds[0]
def read_and_prepare_data(self, to_predict=False):
if not to_predict:
self.data = pd.read_csv(predictor_dataset)
self.data = self.data[self.data['1h_max'].notnull()]
train_size = int(self.data.shape[0] * 0.75)
self.data = self.data.iloc[-train_size:].reset_index(drop=True)
self.data['date'] = pd.to_datetime(self.data['date'], format='%Y-%m-%d %H:%M:%S')
self.data['year'] = self.data['date'].apply(lambda d: d.year)
self.data['month'] = self.data['date'].apply(lambda d: d.month)
self.data['day'] = self.data['date'].apply(lambda d: d.day)
self.data['hour'] = self.data['date'].apply(lambda d: d.hour)
self.data['minute'] = self.data['date'].apply(lambda d: d.minute)
self.data['exchange'] = self.data['exchange'].apply(yobit_err)
self.data['1h_per'] = (self.data['1h_max'] / self.data['price_btc'] - 1) * 100
self.data['6h_per'] = (self.data['6h_max'] / self.data['price_btc'] - 1) * 100
self.data['24h_per'] = (self.data['24h_max'] / self.data['price_btc'] - 1) * 100
self.data['48h_per'] = (self.data['48h_max'] / self.data['price_btc'] - 1) * 100
self.data['7d_per'] = (self.data['7d_max'] / self.data['price_btc'] - 1) * 100
if not to_predict:
last_index = self.data.shape[0] - 1
last_day = self.data.iloc[-1]['day']
while self.data.iloc[last_index]['day'] == last_day:
last_index -= 1
val_end_index = last_index + 1
last_day = self.data.iloc[last_index]['day']
while self.data.iloc[last_index]['day'] == last_day:
last_index -= 1
val_start_index = last_index + 1
self.train_data = self.data.iloc[:val_start_index].reset_index(drop=True)
self.val_data = self.data.iloc[val_start_index:val_end_index].reset_index(drop=True)
def train(self):
train_data_use_cols = self.train_data[predictor_main_cols]
val_data_use_cols = self.val_data[predictor_main_cols]
train_data_dummied = pd.get_dummies(train_data_use_cols, columns=predictor_dummy_cols)
val_data_dummied = val_data_use_cols.reindex(columns=train_data_dummied.columns, fill_value=0)
train_y = train_data_dummied.pop(predictor_target_col)
train_x = train_data_dummied
test_y = val_data_dummied.pop(predictor_target_col)
test_x = val_data_dummied
self.pool['bot'].send(['Predictor: started training for metrics'])
val_model = RandomForestRegressor(n_estimators=100, random_state=100)
val_model.fit(train_x, train_y)
self.metrics = Predictor.get_metrics(predictor_main_cols, test_y, val_model.predict(test_x))
self.dump_metrics()
self.train_data = None
self.val_data = None
gc.collect()
self.pool['bot'].send(['Predictor: finished training for metrics'])
data_use_cols = self.data[predictor_main_cols]
data_dummied = pd.get_dummies(data_use_cols, columns=predictor_dummy_cols)
self.dummies = data_dummied.columns
train_y = data_dummied.pop(predictor_target_col)
train_x = data_dummied
self.pool['bot'].send(['Predictor: started training for real'])
model = RandomForestRegressor(n_estimators=100, random_state=100)
model.fit(train_x, train_y)
self.model = model
self.model_date = datetime.utcnow()
self.dump_stuff()
self.model = None
self.dummies = None
self.data = None
gc.collect()
self.pool['bot'].send(['Predictor: finished training for real'])
def dump_stuff(self):
with lock:
self.available = False
if not os.path.exists(trained_model):
os.makedirs(trained_model)
joblib.dump(self.dummies, os.path.join(trained_model, 'dummies'))
joblib.dump(self.model, os.path.join(trained_model, 'model'))
joblib.dump(self.model_date, os.path.join(trained_model, 'model_date'))
with lock:
self.available = True
def dump_metrics(self):
with lock:
self.available = False
if not os.path.exists(trained_model):
os.makedirs(trained_model)
joblib.dump(self.metrics, os.path.join(trained_model, 'metrics'))
with lock:
self.available = True
def load_stuff(self):
if not os.path.exists(trained_model):
return
self.dummies = joblib.load(os.path.join(trained_model, 'dummies'))
self.model = joblib.load(os.path.join(trained_model, 'model'))
self.model_date = joblib.load(os.path.join(trained_model, 'model_date'))
self.metrics = joblib.load(os.path.join(trained_model, 'metrics'))
@staticmethod
def get_metrics(cols, real, preds):
dev_1 = 0
dev_5 = 0
dev_10 = 0
less_pred = 0
more_pred = 0
length = len(real)
real = real.values
for i in range(len(real)):
if preds[i] >= real[i]:
more_pred += 1
if preds[i] < real[i]:
less_pred += 1
if abs(real[i] - preds[i]) <= 1:
dev_1 += 1
if abs(real[i] - preds[i]) <= 5:
dev_5 += 1
if abs(real[i] - preds[i]) <= 10:
dev_10 += 1
metrics = {
'cols': ', '.join(cols),
'real_mean': real.mean(),
'real_median': np.median(real),
'real_75_percentile': np.percentile(real, 75),
'preds_mean': preds.mean(),
'preds_median': np.median(preds),
'preds_75_percentile': np.percentile(preds, 75),
'mean_deviation': mean_absolute_error(real, preds),
'median_deviation': median_absolute_error(real, preds),
'deviation <= 1%': dev_1 / length,
'deviation <= 5%': dev_5 / length,
'deviation <= 10%': dev_10 / length,
'pred < real': less_pred / length,
'pred >= real': more_pred / length
}
return metrics
class PredictorLearnThread(Thread):
def __init__(self, predictor, client, bot):
Thread.__init__(self)
self.predictor = predictor
self.client = client
self.bot = bot
def run(self):
while True:
try:
self.bot.send(['Updating dataset...'])
self.client.update_dataset()
self.bot.send(['Dataset updated'])
self.bot.send(['Completing dataset...'])
self.client.complete_dataset()
self.bot.send(['Dataset completed'])
self.predictor.learn()
except Exception as exc:
self.bot.send(['Something wrong happened:', form_traceback(exc)])
time.sleep(learning_period)
| none | 1 | 2.146864 | 2 | |
aiida_vasp/workchains/tests/test_vasp_wc.py | DropD/aiida_vasp | 3 | 6622561 | <reponame>DropD/aiida_vasp<gh_stars>1-10
"""
Test submitting a VaspWorkChain.
This does not seem to work, for `submit` the daemon will not pick up the workchain
and `run` just seems to get stuck after a while.
"""
# pylint: disable=unused-import,wildcard-import,unused-wildcard-import,unused-argument,redefined-outer-name, import-outside-toplevel
from __future__ import print_function
import pytest
import numpy as np
from aiida.common.extendeddicts import AttributeDict
from aiida.manage.tests.pytest_fixtures import aiida_caplog
from aiida.plugins.factories import DataFactory
from aiida_vasp.utils.fixtures import *
from aiida_vasp.utils.fixtures.data import POTCAR_FAMILY_NAME, POTCAR_MAP
from aiida_vasp.utils.aiida_utils import get_data_node, aiida_version, cmp_version, create_authinfo
from aiida_vasp.utils.mock_code import MockRegistry
@pytest.mark.parametrize(['vasp_structure', 'vasp_kpoints'], [('str', 'mesh')], indirect=True)
def test_vasp_wc(fresh_aiida_env, run_vasp_process):
"""Test submitting only, not correctness, with mocked vasp code."""
results, node = run_vasp_process(process_type='workchain')
assert node.exit_status == 0
assert 'retrieved' in results
assert 'misc' in results
assert 'remote_folder' in results
misc = results['misc'].get_dict()
assert misc['maximum_stress'] == pytest.approx(22.8499295)
assert misc['total_energies']['energy_extrapolated'] == pytest.approx(-14.16209692)
@pytest.mark.parametrize(['vasp_structure', 'vasp_kpoints'], [('str', 'mesh')], indirect=True)
def test_vasp_wc_chgcar(fresh_aiida_env, run_vasp_process, aiida_caplog):
"""Test submitting only, not correctness, with mocked vasp code, test fetching and parsing of the CHGCAR content."""
settings = {'ADDITIONAL_RETRIEVE_LIST': ['CHGCAR'], 'parser_settings': {'add_charge_density': True}}
results, node = run_vasp_process(settings=settings, process_type='workchain')
assert node.exit_status == 0
assert 'charge_density' in results
assert 'misc' in results
test_array = np.array([[[0.09329446, 0.18658892, 0.27988338], [0.37317784, 0.4664723, 0.55976676], [0.65306122, 0.74635569, 0.83965015],
[0.93294461, 1.02623907, 1.11953353]],
[[1.21282799, 1.30612245, 1.39941691], [1.49271137, 1.58600583, 1.67930029],
[1.77259475, 1.86588921, 1.95918367], [2.05247813, 2.14577259, 2.23906706]],
[[2.33236152, 2.42565598, 2.51895044], [2.6122449, 2.70553936, 2.79883382], [2.89212828, 2.98542274, 3.0787172],
[3.17201166, 3.26530612, 3.35860058]],
[[3.45189504, 3.5451895, 3.63848397], [3.73177843, 3.82507289, 3.91836735], [4.01166181, 4.10495627, 4.19825073],
[4.29154519, 4.38483965, 4.47813411]],
[[4.57142857, 4.66472303, 4.75801749], [4.85131195, 4.94460641, 5.03790087], [5.13119534, 5.2244898, 5.31778426],
[5.41107872, 5.50437318, 5.59766764]]])
charge_density = results['charge_density'].get_array('charge_density')
assert np.allclose(charge_density, test_array)
def upload_real_workchain(node, name):
"""
Upload the workchain to the repository to make it work with mocking
This function should be called once after the REAL vasp calculation is run during the test
"""
reg = MockRegistry()
print(reg.base_path)
reg.upload_aiida_work(node, name)
def upload_real_pseudopotentials(path):
"""
Upload real pseudopotentials for workchain test mock deposition
This function should be called once before the REAL vasp calculation is launch to setup the
correct POTCARs
"""
global POTCAR_FAMILY_NAME # pylint: disable=global-statement
POTCAR_FAMILY_NAME = 'TEMP'
potcar_data_cls = DataFactory('vasp.potcar')
potcar_data_cls.upload_potcar_family(path, 'TEMP', 'TEMP-REALPOTCARS', stop_if_existing=False, dry_run=False)
### COMPLEX WORKCHAIN TEST ###
def si_structure():
"""
Setup a silicon structure in a displaced FCC setting
"""
structure_data = DataFactory('structure')
alat = 3.9
lattice = np.array([[.5, .5, 0], [0, .5, .5], [.5, 0, .5]]) * alat
structure = structure_data(cell=lattice)
positions = [[0.1, 0.0, 0.0]]
for pos_direct in positions:
pos_cartesian = np.dot(pos_direct, lattice)
structure.append_atom(position=pos_cartesian, symbols='Si')
return structure
# TEST INPUT FOR AUTOMATIC correction of NELM
# calculation should finish in the second run where the calculation
INCAR_ELEC_CONV = {
'encut': 240,
'ismear': 0,
'sigma': 0.1,
'ediff': 1e-9,
'nelm': 7,
'ibrion': -1,
'potim': 0.01,
'nsw': -1,
'isif': 3,
# 'ediffg': -0.01
}
INCAR_IONIC_CONV = {
'encut': 240,
'ismear': 0,
'sigma': 0.1,
'ediff': 1e-9,
'nelm': 15,
'ibrion': 1,
'potim': 0.1,
'nsw': 5,
'isif': 3,
}
# Parameters for test handling unfinished VASP. The first iteration was killed manually.
INCAR_IONIC_UNFINISHED = {
'encut': 500,
'ismear': 0,
'isym': 0,
'sigma': 0.1,
'ediff': 1e-9,
'nelm': 15,
'ibrion': 1,
'potim': 0.1,
'nsw': 20,
'isif': 3,
}
def setup_vasp_workchain(structure, incar, nkpts, code=None):
"""
Setup the inputs for a VaspWorkChain.
"""
from aiida.orm import Code
inputs = AttributeDict()
inputs.structure = structure
inputs.parameters = get_data_node('dict', dict={'incar': incar})
kpoints = get_data_node('array.kpoints')
kpoints.set_kpoints_mesh((nkpts, nkpts, nkpts))
inputs.kpoints = kpoints
inputs.potential_family = get_data_node('str', POTCAR_FAMILY_NAME)
inputs.potential_mapping = get_data_node('dict', dict=POTCAR_MAP)
inputs.options = get_data_node('dict',
dict={
'withmpi': False,
'queue_name': 'None',
'resources': {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
},
'max_wallclock_seconds': 3600
})
inputs.settings = get_data_node('dict', dict={'parser_settings': {'add_structure': True}})
# If code is not passed, use the mock code
if code is None:
mock = Code.get_from_string('mock-vasp-strict@localhost')
inputs.code = mock
else:
inputs.code = code
return inputs
def test_vasp_wc_nelm(fresh_aiida_env, potentials, mock_vasp_strict):
"""Test with mocked vasp code for handling electronic convergence issues"""
from aiida.orm import Code
from aiida.plugins import WorkflowFactory
from aiida.engine import run
from aiida.cmdline.utils.common import get_calcjob_report, get_workchain_report
workchain = WorkflowFactory('vasp.vasp')
mock_vasp_strict.store()
create_authinfo(computer=mock_vasp_strict.computer, store=True)
inputs = setup_vasp_workchain(si_structure(), INCAR_ELEC_CONV, 8)
inputs.verbose = get_data_node('bool', True)
results, node = run.get_node(workchain, **inputs)
called_nodes = list(node.called)
called_nodes.sort(key=lambda x: x.ctime)
print(get_workchain_report(node, 'DEBUG'))
for child in called_nodes:
print(get_calcjob_report(child))
child = called_nodes[0]
print(child.get_object_content('INCAR'))
print(child.get_object_content('POSCAR'))
print(child.get_object_content('KPOINTS'))
print(child.outputs.retrieved.get_object_content('vasp_output'))
print(child.outputs.retrieved.list_object_names())
print(child.outputs.misc.get_dict())
print(child.exit_status)
child = called_nodes[1]
print(child.get_object_content('INCAR'))
print(child.get_object_content('POSCAR'))
print(child.get_object_content('KPOINTS'))
print(child.outputs.retrieved.get_object_content('vasp_output'))
print(child.outputs.retrieved.list_object_names())
print(child.outputs.misc.get_dict())
print(child.exit_status)
assert node.exit_status == 0
assert 'retrieved' in results
assert 'misc' in results
assert 'remote_folder' in results
assert results['misc']['total_energies']['energy_extrapolated'] == pytest.approx(-4.82467802)
# Sort the called nodes by creation time
called_nodes = list(node.called)
called_nodes.sort(key=lambda x: x.ctime)
assert called_nodes[0].exit_status == 701
assert called_nodes[1].exit_status == 0
@pytest.mark.parametrize('incar,nkpts,exit_codes', [[INCAR_IONIC_CONV, 8, [702, 0]], [INCAR_IONIC_UNFINISHED, 16, [700, 0]]])
def test_vasp_wc_ionic_continue(fresh_aiida_env, potentials, mock_vasp_strict, incar, nkpts, exit_codes):
"""Test with mocked vasp code for handling ionic convergence issues"""
from aiida.orm import Code
from aiida.plugins import WorkflowFactory
from aiida.engine import run
workchain = WorkflowFactory('vasp.vasp')
mock_vasp_strict.store()
create_authinfo(computer=mock_vasp_strict.computer, store=True)
inputs = setup_vasp_workchain(si_structure(), incar, nkpts)
inputs.verbose = get_data_node('bool', True)
# The test calculation contain NELM breaches during the relaxation - set to ignore it.
inputs.handler_overrides = get_data_node('dict', dict={'ignore_nelm_breach_relax': True})
results, node = run.get_node(workchain, **inputs)
assert node.exit_status == 0
assert 'retrieved' in results
assert 'misc' in results
assert 'remote_folder' in results
assert results['misc']['run_status']['ionic_converged']
# Sort the called nodes by creation time
called_nodes = list(node.called)
called_nodes.sort(key=lambda x: x.ctime)
# Check the child status - here the first calculation is not finished but the second one is
for idx, code in enumerate(exit_codes):
assert called_nodes[idx].exit_status == code
def test_vasp_wc_ionic_magmom_carry(fresh_aiida_env, potentials, mock_vasp_strict):
"""Test with mocked vasp code for handling ionic convergence issues"""
from aiida.orm import Code
from aiida.plugins import WorkflowFactory
from aiida.engine import run
workchain = WorkflowFactory('vasp.vasp')
mock_vasp_strict.store()
create_authinfo(computer=mock_vasp_strict.computer, store=True)
incar = dict(INCAR_IONIC_CONV)
incar['ispin'] = 2
incar['lorbit'] = 10
incar['nupdown'] = 2
inputs = setup_vasp_workchain(si_structure(), incar, 8)
inputs.verbose = get_data_node('bool', True)
# The test calculation contain NELM breaches during the relaxation - set to ignore it.
inputs.handler_overrides = get_data_node('dict', dict={'ignore_nelm_breach_relax': True})
inputs.settings = get_data_node('dict', dict={'parser_settings': {
'add_structure': True,
'add_site_magnetization': True,
}})
inputs.max_iterations = get_data_node('int', 2)
_, node = run.get_node(workchain, **inputs)
assert node.exit_status == 0
called_nodes = list(node.called)
called_nodes.sort(key=lambda x: x.ctime)
# Check that the second node takes the magnetization of the first node
assert called_nodes[1].inputs.parameters['magmom'] == [0.646]
| """
Test submitting a VaspWorkChain.
This does not seem to work, for `submit` the daemon will not pick up the workchain
and `run` just seems to get stuck after a while.
"""
# pylint: disable=unused-import,wildcard-import,unused-wildcard-import,unused-argument,redefined-outer-name, import-outside-toplevel
from __future__ import print_function
import pytest
import numpy as np
from aiida.common.extendeddicts import AttributeDict
from aiida.manage.tests.pytest_fixtures import aiida_caplog
from aiida.plugins.factories import DataFactory
from aiida_vasp.utils.fixtures import *
from aiida_vasp.utils.fixtures.data import POTCAR_FAMILY_NAME, POTCAR_MAP
from aiida_vasp.utils.aiida_utils import get_data_node, aiida_version, cmp_version, create_authinfo
from aiida_vasp.utils.mock_code import MockRegistry
@pytest.mark.parametrize(['vasp_structure', 'vasp_kpoints'], [('str', 'mesh')], indirect=True)
def test_vasp_wc(fresh_aiida_env, run_vasp_process):
"""Test submitting only, not correctness, with mocked vasp code."""
results, node = run_vasp_process(process_type='workchain')
assert node.exit_status == 0
assert 'retrieved' in results
assert 'misc' in results
assert 'remote_folder' in results
misc = results['misc'].get_dict()
assert misc['maximum_stress'] == pytest.approx(22.8499295)
assert misc['total_energies']['energy_extrapolated'] == pytest.approx(-14.16209692)
@pytest.mark.parametrize(['vasp_structure', 'vasp_kpoints'], [('str', 'mesh')], indirect=True)
def test_vasp_wc_chgcar(fresh_aiida_env, run_vasp_process, aiida_caplog):
"""Test submitting only, not correctness, with mocked vasp code, test fetching and parsing of the CHGCAR content."""
settings = {'ADDITIONAL_RETRIEVE_LIST': ['CHGCAR'], 'parser_settings': {'add_charge_density': True}}
results, node = run_vasp_process(settings=settings, process_type='workchain')
assert node.exit_status == 0
assert 'charge_density' in results
assert 'misc' in results
test_array = np.array([[[0.09329446, 0.18658892, 0.27988338], [0.37317784, 0.4664723, 0.55976676], [0.65306122, 0.74635569, 0.83965015],
[0.93294461, 1.02623907, 1.11953353]],
[[1.21282799, 1.30612245, 1.39941691], [1.49271137, 1.58600583, 1.67930029],
[1.77259475, 1.86588921, 1.95918367], [2.05247813, 2.14577259, 2.23906706]],
[[2.33236152, 2.42565598, 2.51895044], [2.6122449, 2.70553936, 2.79883382], [2.89212828, 2.98542274, 3.0787172],
[3.17201166, 3.26530612, 3.35860058]],
[[3.45189504, 3.5451895, 3.63848397], [3.73177843, 3.82507289, 3.91836735], [4.01166181, 4.10495627, 4.19825073],
[4.29154519, 4.38483965, 4.47813411]],
[[4.57142857, 4.66472303, 4.75801749], [4.85131195, 4.94460641, 5.03790087], [5.13119534, 5.2244898, 5.31778426],
[5.41107872, 5.50437318, 5.59766764]]])
charge_density = results['charge_density'].get_array('charge_density')
assert np.allclose(charge_density, test_array)
def upload_real_workchain(node, name):
"""
Upload the workchain to the repository to make it work with mocking
This function should be called once after the REAL vasp calculation is run during the test
"""
reg = MockRegistry()
print(reg.base_path)
reg.upload_aiida_work(node, name)
def upload_real_pseudopotentials(path):
"""
Upload real pseudopotentials for workchain test mock deposition
This function should be called once before the REAL vasp calculation is launch to setup the
correct POTCARs
"""
global POTCAR_FAMILY_NAME # pylint: disable=global-statement
POTCAR_FAMILY_NAME = 'TEMP'
potcar_data_cls = DataFactory('vasp.potcar')
potcar_data_cls.upload_potcar_family(path, 'TEMP', 'TEMP-REALPOTCARS', stop_if_existing=False, dry_run=False)
### COMPLEX WORKCHAIN TEST ###
def si_structure():
"""
Setup a silicon structure in a displaced FCC setting
"""
structure_data = DataFactory('structure')
alat = 3.9
lattice = np.array([[.5, .5, 0], [0, .5, .5], [.5, 0, .5]]) * alat
structure = structure_data(cell=lattice)
positions = [[0.1, 0.0, 0.0]]
for pos_direct in positions:
pos_cartesian = np.dot(pos_direct, lattice)
structure.append_atom(position=pos_cartesian, symbols='Si')
return structure
# TEST INPUT FOR AUTOMATIC correction of NELM
# calculation should finish in the second run where the calculation
INCAR_ELEC_CONV = {
'encut': 240,
'ismear': 0,
'sigma': 0.1,
'ediff': 1e-9,
'nelm': 7,
'ibrion': -1,
'potim': 0.01,
'nsw': -1,
'isif': 3,
# 'ediffg': -0.01
}
INCAR_IONIC_CONV = {
'encut': 240,
'ismear': 0,
'sigma': 0.1,
'ediff': 1e-9,
'nelm': 15,
'ibrion': 1,
'potim': 0.1,
'nsw': 5,
'isif': 3,
}
# Parameters for test handling unfinished VASP. The first iteration was killed manually.
INCAR_IONIC_UNFINISHED = {
'encut': 500,
'ismear': 0,
'isym': 0,
'sigma': 0.1,
'ediff': 1e-9,
'nelm': 15,
'ibrion': 1,
'potim': 0.1,
'nsw': 20,
'isif': 3,
}
def setup_vasp_workchain(structure, incar, nkpts, code=None):
"""
Setup the inputs for a VaspWorkChain.
"""
from aiida.orm import Code
inputs = AttributeDict()
inputs.structure = structure
inputs.parameters = get_data_node('dict', dict={'incar': incar})
kpoints = get_data_node('array.kpoints')
kpoints.set_kpoints_mesh((nkpts, nkpts, nkpts))
inputs.kpoints = kpoints
inputs.potential_family = get_data_node('str', POTCAR_FAMILY_NAME)
inputs.potential_mapping = get_data_node('dict', dict=POTCAR_MAP)
inputs.options = get_data_node('dict',
dict={
'withmpi': False,
'queue_name': 'None',
'resources': {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
},
'max_wallclock_seconds': 3600
})
inputs.settings = get_data_node('dict', dict={'parser_settings': {'add_structure': True}})
# If code is not passed, use the mock code
if code is None:
mock = Code.get_from_string('mock-vasp-strict@localhost')
inputs.code = mock
else:
inputs.code = code
return inputs
def test_vasp_wc_nelm(fresh_aiida_env, potentials, mock_vasp_strict):
"""Test with mocked vasp code for handling electronic convergence issues"""
from aiida.orm import Code
from aiida.plugins import WorkflowFactory
from aiida.engine import run
from aiida.cmdline.utils.common import get_calcjob_report, get_workchain_report
workchain = WorkflowFactory('vasp.vasp')
mock_vasp_strict.store()
create_authinfo(computer=mock_vasp_strict.computer, store=True)
inputs = setup_vasp_workchain(si_structure(), INCAR_ELEC_CONV, 8)
inputs.verbose = get_data_node('bool', True)
results, node = run.get_node(workchain, **inputs)
called_nodes = list(node.called)
called_nodes.sort(key=lambda x: x.ctime)
print(get_workchain_report(node, 'DEBUG'))
for child in called_nodes:
print(get_calcjob_report(child))
child = called_nodes[0]
print(child.get_object_content('INCAR'))
print(child.get_object_content('POSCAR'))
print(child.get_object_content('KPOINTS'))
print(child.outputs.retrieved.get_object_content('vasp_output'))
print(child.outputs.retrieved.list_object_names())
print(child.outputs.misc.get_dict())
print(child.exit_status)
child = called_nodes[1]
print(child.get_object_content('INCAR'))
print(child.get_object_content('POSCAR'))
print(child.get_object_content('KPOINTS'))
print(child.outputs.retrieved.get_object_content('vasp_output'))
print(child.outputs.retrieved.list_object_names())
print(child.outputs.misc.get_dict())
print(child.exit_status)
assert node.exit_status == 0
assert 'retrieved' in results
assert 'misc' in results
assert 'remote_folder' in results
assert results['misc']['total_energies']['energy_extrapolated'] == pytest.approx(-4.82467802)
# Sort the called nodes by creation time
called_nodes = list(node.called)
called_nodes.sort(key=lambda x: x.ctime)
assert called_nodes[0].exit_status == 701
assert called_nodes[1].exit_status == 0
@pytest.mark.parametrize('incar,nkpts,exit_codes', [[INCAR_IONIC_CONV, 8, [702, 0]], [INCAR_IONIC_UNFINISHED, 16, [700, 0]]])
def test_vasp_wc_ionic_continue(fresh_aiida_env, potentials, mock_vasp_strict, incar, nkpts, exit_codes):
"""Test with mocked vasp code for handling ionic convergence issues"""
from aiida.orm import Code
from aiida.plugins import WorkflowFactory
from aiida.engine import run
workchain = WorkflowFactory('vasp.vasp')
mock_vasp_strict.store()
create_authinfo(computer=mock_vasp_strict.computer, store=True)
inputs = setup_vasp_workchain(si_structure(), incar, nkpts)
inputs.verbose = get_data_node('bool', True)
# The test calculation contain NELM breaches during the relaxation - set to ignore it.
inputs.handler_overrides = get_data_node('dict', dict={'ignore_nelm_breach_relax': True})
results, node = run.get_node(workchain, **inputs)
assert node.exit_status == 0
assert 'retrieved' in results
assert 'misc' in results
assert 'remote_folder' in results
assert results['misc']['run_status']['ionic_converged']
# Sort the called nodes by creation time
called_nodes = list(node.called)
called_nodes.sort(key=lambda x: x.ctime)
# Check the child status - here the first calculation is not finished but the second one is
for idx, code in enumerate(exit_codes):
assert called_nodes[idx].exit_status == code
def test_vasp_wc_ionic_magmom_carry(fresh_aiida_env, potentials, mock_vasp_strict):
"""Test with mocked vasp code for handling ionic convergence issues"""
from aiida.orm import Code
from aiida.plugins import WorkflowFactory
from aiida.engine import run
workchain = WorkflowFactory('vasp.vasp')
mock_vasp_strict.store()
create_authinfo(computer=mock_vasp_strict.computer, store=True)
incar = dict(INCAR_IONIC_CONV)
incar['ispin'] = 2
incar['lorbit'] = 10
incar['nupdown'] = 2
inputs = setup_vasp_workchain(si_structure(), incar, 8)
inputs.verbose = get_data_node('bool', True)
# The test calculation contain NELM breaches during the relaxation - set to ignore it.
inputs.handler_overrides = get_data_node('dict', dict={'ignore_nelm_breach_relax': True})
inputs.settings = get_data_node('dict', dict={'parser_settings': {
'add_structure': True,
'add_site_magnetization': True,
}})
inputs.max_iterations = get_data_node('int', 2)
_, node = run.get_node(workchain, **inputs)
assert node.exit_status == 0
called_nodes = list(node.called)
called_nodes.sort(key=lambda x: x.ctime)
# Check that the second node takes the magnetization of the first node
assert called_nodes[1].inputs.parameters['magmom'] == [0.646] | en | 0.862167 | Test submitting a VaspWorkChain. This does not seem to work, for `submit` the daemon will not pick up the workchain and `run` just seems to get stuck after a while. # pylint: disable=unused-import,wildcard-import,unused-wildcard-import,unused-argument,redefined-outer-name, import-outside-toplevel Test submitting only, not correctness, with mocked vasp code. Test submitting only, not correctness, with mocked vasp code, test fetching and parsing of the CHGCAR content. Upload the workchain to the repository to make it work with mocking This function should be called once after the REAL vasp calculation is run during the test Upload real pseudopotentials for workchain test mock deposition This function should be called once before the REAL vasp calculation is launch to setup the correct POTCARs # pylint: disable=global-statement ### COMPLEX WORKCHAIN TEST ### Setup a silicon structure in a displaced FCC setting # TEST INPUT FOR AUTOMATIC correction of NELM # calculation should finish in the second run where the calculation # 'ediffg': -0.01 # Parameters for test handling unfinished VASP. The first iteration was killed manually. Setup the inputs for a VaspWorkChain. # If code is not passed, use the mock code Test with mocked vasp code for handling electronic convergence issues # Sort the called nodes by creation time Test with mocked vasp code for handling ionic convergence issues # The test calculation contain NELM breaches during the relaxation - set to ignore it. # Sort the called nodes by creation time # Check the child status - here the first calculation is not finished but the second one is Test with mocked vasp code for handling ionic convergence issues # The test calculation contain NELM breaches during the relaxation - set to ignore it. # Check that the second node takes the magnetization of the first node | 1.729673 | 2 |
learndraw/draw.py | tydcg/pythonlearn | 0 | 6622562 | import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from mpl_toolkits.mplot3d import Axes3D
def drawLine():
x = np.linspace(-1, 1, 50)
y = 2 * x + 1
plt.figure()
plt.plot(x, y)
def draw_scatter():
n = 1024
X = np.random.normal(0, 1, 1024) # 正态分布
Y = np.random.normal(0, 1, 1024)
T = np.arctan2(X, Y) # for color
plt.figure()
plt.xlim(-1.5, 1.5)
plt.ylim(-1.5, 1.5)
plt.xticks(())
plt.yticks(())
plt.scatter(X, Y, c=T, alpha=0.65)
def draw_image():
img = Image.new('RGB', (255, 255), "black")
for i in range(img.size[0]):
for j in range(img.size[1]):
img.putpixel((i, j), (i, j, i));
img.show()
def draw_image3D():
fig = plt.figure()
ax3 = plt.axes(projection='3d')
# 定义三维数据
xx = np.arange(-5, 5, 0.5)
yy = np.arange(-5, 5, 0.5)
X, Y = np.meshgrid(xx, yy)
Z = np.sin(X) + np.cos(Y)
# 作图
# ax3.plot_surface(X, Y, Z, cmap='rainbow')
ax3.plot_surface(X, Y, Z, c='r')
def draw_image3DMy():
fig = plt.figure()
ax3 = plt.axes(projection='3d')
# 定义三维数据
x = np.linspace(1, 10, 10)
y = np.linspace(1, 10, 10)
z = []
for i in range(x.size):
z.append(x[i] + y[i])
z = np.array(z)
z, _ = np.meshgrid(z, y)
x, y = np.meshgrid(x, y)
# 作图
ax3.plot_surface(x, y, z, cmap='rainbow')
def draw_image3DMy():
fig = plt.figure()
ax3 = plt.axes(projection='3d')
# 定义三维数据
x = np.linspace(1, 10, 10)
y = np.linspace(1, 10, 10)
z = []
m = []
for i in range(x.size):
z.append(x[i] * x[i] * y[i] * y[i])
z = np.array(z)
z, _ = np.meshgrid(z, y)
x, y = np.meshgrid(x, y)
# 作图
ax3.plot_surface(x, y, z, cmap='rainbow')
# ax3.scatter(x, y, z, c='r')
# drawLine()
# draw_scatter()
# plt.show()
# draw_image()
# draw_image3D()
draw_image3DMy()
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from mpl_toolkits.mplot3d import Axes3D
def drawLine():
x = np.linspace(-1, 1, 50)
y = 2 * x + 1
plt.figure()
plt.plot(x, y)
def draw_scatter():
n = 1024
X = np.random.normal(0, 1, 1024) # 正态分布
Y = np.random.normal(0, 1, 1024)
T = np.arctan2(X, Y) # for color
plt.figure()
plt.xlim(-1.5, 1.5)
plt.ylim(-1.5, 1.5)
plt.xticks(())
plt.yticks(())
plt.scatter(X, Y, c=T, alpha=0.65)
def draw_image():
img = Image.new('RGB', (255, 255), "black")
for i in range(img.size[0]):
for j in range(img.size[1]):
img.putpixel((i, j), (i, j, i));
img.show()
def draw_image3D():
fig = plt.figure()
ax3 = plt.axes(projection='3d')
# 定义三维数据
xx = np.arange(-5, 5, 0.5)
yy = np.arange(-5, 5, 0.5)
X, Y = np.meshgrid(xx, yy)
Z = np.sin(X) + np.cos(Y)
# 作图
# ax3.plot_surface(X, Y, Z, cmap='rainbow')
ax3.plot_surface(X, Y, Z, c='r')
def draw_image3DMy():
fig = plt.figure()
ax3 = plt.axes(projection='3d')
# 定义三维数据
x = np.linspace(1, 10, 10)
y = np.linspace(1, 10, 10)
z = []
for i in range(x.size):
z.append(x[i] + y[i])
z = np.array(z)
z, _ = np.meshgrid(z, y)
x, y = np.meshgrid(x, y)
# 作图
ax3.plot_surface(x, y, z, cmap='rainbow')
def draw_image3DMy():
fig = plt.figure()
ax3 = plt.axes(projection='3d')
# 定义三维数据
x = np.linspace(1, 10, 10)
y = np.linspace(1, 10, 10)
z = []
m = []
for i in range(x.size):
z.append(x[i] * x[i] * y[i] * y[i])
z = np.array(z)
z, _ = np.meshgrid(z, y)
x, y = np.meshgrid(x, y)
# 作图
ax3.plot_surface(x, y, z, cmap='rainbow')
# ax3.scatter(x, y, z, c='r')
# drawLine()
# draw_scatter()
# plt.show()
# draw_image()
# draw_image3D()
draw_image3DMy()
plt.show()
| zh | 0.481809 | # 正态分布 # for color # 定义三维数据 # 作图 # ax3.plot_surface(X, Y, Z, cmap='rainbow') # 定义三维数据 # 作图 # 定义三维数据 # 作图 # ax3.scatter(x, y, z, c='r') # drawLine() # draw_scatter() # plt.show() # draw_image() # draw_image3D() | 2.932629 | 3 |
HW8/Vaulin/HW8.3.py | kolyasalubov/Lv-677.PythonCore | 0 | 6622563 | <reponame>kolyasalubov/Lv-677.PythonCore<filename>HW8/Vaulin/HW8.3.py
class Employee:
count = 0
def __init__(self, name, salary):
Employee.count += 1
self.name = name
self.salary = salary
@classmethod
def counters(cls):
return print(f"Employee count = {cls.count}")
def total_members(self):
return print(f"This is {self.name}. And his/her salary is {self.salary}")
michal = Employee("Michal", 1800)
ivan = Employee("Ivan", 1800)
# print(ivan.total_members())
print(Employee.__base__)
print(Employee.__dict__)
print(Employee.__name__)
print(Employee.__module__)
print(Employee.__doc__) | class Employee:
count = 0
def __init__(self, name, salary):
Employee.count += 1
self.name = name
self.salary = salary
@classmethod
def counters(cls):
return print(f"Employee count = {cls.count}")
def total_members(self):
return print(f"This is {self.name}. And his/her salary is {self.salary}")
michal = Employee("Michal", 1800)
ivan = Employee("Ivan", 1800)
# print(ivan.total_members())
print(Employee.__base__)
print(Employee.__dict__)
print(Employee.__name__)
print(Employee.__module__)
print(Employee.__doc__) | en | 0.223698 | # print(ivan.total_members()) | 3.63554 | 4 |
salsa/utils/collect_files.py | clairekope/SALSA | 3 | 6622564 | from os import listdir
import yt
from astropy.table import QTable, vstack
from mpi4py import MPI
import pandas as pd
import numpy as np
def collect_files(directory, file_ext='.h5', key_words=[], black_list=[]):
"""
Finds and returns files in a given directory with given extension. Optional
key_word and black_list requirements
Parameters
----------
directory : str
Directory in which to search for files
file_ext : str
The file extension to look for (ie '.py', '.h5')
key_words : list of str
Key words that must in the filename to be collected
black_list : list of str
list of files to exclude from collection
Returns
-------
files : list of str
List of filenames in the directory that pass all requirements for file
extension, keywords, black_list.
"""
all_files = listdir(directory)
# get np files only
files=[]
for f in all_files:
#check has file extension and not black listed
if file_ext in f and check_file(f, key_words, black_list):
files.append(f)
return files
def check_file(file, key_words, black_list):
"""
Check the file against a black list as well as check if it has keywords in
it.
Parameters
----------
file : string
filename to check
key_words : list
list of strings required to be in file name
black_list : list
list of files that are black listed, that file can't be called
Returns
--------
all_files_pass : bool
Returns True if file is not in black_list and contains all key_words
"""
#check if ile in black list
if file in black_list:
return False
else:
#check if keyword not in file
for k in key_words:
if k not in file:
return False
#return true if passes through
return True
def check_rays(ray_dir, n_rays, fields, parallel=True):
"""
Check if a directory already contains a given number of trident lrays and
contains necessary fields
Parameters
----------
ray_dir : str
The path to the directory where rays are held
n_rays : int
The number of lrays that should be in the directory
fields : list, str
List of the fields needed in each light ray
Returns
--------
ray_bool : bool
`True` if there are `n_rays` in the `ray_dir` and each one contains
necessary fields. If no rays are found then returns False. Otherwise if
the rays found don't match requirements an error is raised (see below).
Raises
------
RuntimeError
This is raised if there are a non-zero number of rays but they don't match
the specified number of rays, don't contain the specified fields, or simply can't
be opened.
"""
ray_files = np.array(collect_files(ray_dir, key_words=['ray']))
#check if correct number
if len(ray_files) == n_rays:
if parallel:
comm = MPI.COMM_WORLD
#split up rays across processes
ray_files_split = np.array_split(ray_files, comm.size)
my_ray_files = ray_files_split[comm.rank]
else:
my_ray_files = ray_files
# check if fields are in each ray
for rfile in my_ray_files:
#load ray file
try:
ray = yt.load(f"{ray_dir}/{rfile}")
except yt.utilities.exceptions.YTOutputNotIdentified:
print(f"Couldn't load {rfile}. Reconstructing rays")
raise RuntimeError(f"Couldn't load {rfile}. Delete these rays so new ones can be constructed")
# check each field is in ray
for fld in fields:
if ('all', fld) in ray.field_list:
pass
else:
raise RuntimeError(f"{fld} not present in {rfile}. Either delete these rays so new ones can be constructed or remove this field")
# all rays passed
return True
else:
if len(ray_files) == 0:
print(f"No rays found, Constructing new ones")
return False
else:
raise RuntimeError(f"found {len(ray_files)} rays instead of {n_rays}. Either delete rays or change number of rays to match")
def combine_astropy_files(directory, kw='ice', outfile=None):
#get files
files = collect_files(directory, key_words=['ray', kw])
tables = []
# open up tables
for f in files:
tables.append(QTable.read(f"{directory}/{f}"))
if len(tables) >0:
#combine tables
main_table = vstack(tables)
#write table
if outfile is not None:
main_table.write(outfile, overwrite=True)
else:
out_err = outfile.split('.')[0] + ".out"
#write out dummy
f= open(out_err, 'w')
f.write(f"No files found in {directory} using key_words= ['ray', {kw}]")
f.close()
main_table = None
return main_table
def combine_pandas_files(directory, kw='ice', outfile=None):
#get files
files = collect_files(directory, key_words=['ray', kw])
dfs = []
# open up tables
for f in files:
dfs.append(pd.read_hdf(f"{directory}/{f}"))
if len(tables) >0:
#combine tables
main_table = pd.concat(dfs, ignore_index=True)
#write table
if outfile is not None:
main_table.write_hdf(outfile, mode='w')
else:
out_err = outfile.split('.')[0] + ".out"
#write out dummy
f= open(out_err, 'w')
f.write(f"No files found in {directory} using key_words= ['ray', {kw}]")
f.close()
main_table = None
return main_table
| from os import listdir
import yt
from astropy.table import QTable, vstack
from mpi4py import MPI
import pandas as pd
import numpy as np
def collect_files(directory, file_ext='.h5', key_words=[], black_list=[]):
"""
Finds and returns files in a given directory with given extension. Optional
key_word and black_list requirements
Parameters
----------
directory : str
Directory in which to search for files
file_ext : str
The file extension to look for (ie '.py', '.h5')
key_words : list of str
Key words that must in the filename to be collected
black_list : list of str
list of files to exclude from collection
Returns
-------
files : list of str
List of filenames in the directory that pass all requirements for file
extension, keywords, black_list.
"""
all_files = listdir(directory)
# get np files only
files=[]
for f in all_files:
#check has file extension and not black listed
if file_ext in f and check_file(f, key_words, black_list):
files.append(f)
return files
def check_file(file, key_words, black_list):
"""
Check the file against a black list as well as check if it has keywords in
it.
Parameters
----------
file : string
filename to check
key_words : list
list of strings required to be in file name
black_list : list
list of files that are black listed, that file can't be called
Returns
--------
all_files_pass : bool
Returns True if file is not in black_list and contains all key_words
"""
#check if ile in black list
if file in black_list:
return False
else:
#check if keyword not in file
for k in key_words:
if k not in file:
return False
#return true if passes through
return True
def check_rays(ray_dir, n_rays, fields, parallel=True):
"""
Check if a directory already contains a given number of trident lrays and
contains necessary fields
Parameters
----------
ray_dir : str
The path to the directory where rays are held
n_rays : int
The number of lrays that should be in the directory
fields : list, str
List of the fields needed in each light ray
Returns
--------
ray_bool : bool
`True` if there are `n_rays` in the `ray_dir` and each one contains
necessary fields. If no rays are found then returns False. Otherwise if
the rays found don't match requirements an error is raised (see below).
Raises
------
RuntimeError
This is raised if there are a non-zero number of rays but they don't match
the specified number of rays, don't contain the specified fields, or simply can't
be opened.
"""
ray_files = np.array(collect_files(ray_dir, key_words=['ray']))
#check if correct number
if len(ray_files) == n_rays:
if parallel:
comm = MPI.COMM_WORLD
#split up rays across processes
ray_files_split = np.array_split(ray_files, comm.size)
my_ray_files = ray_files_split[comm.rank]
else:
my_ray_files = ray_files
# check if fields are in each ray
for rfile in my_ray_files:
#load ray file
try:
ray = yt.load(f"{ray_dir}/{rfile}")
except yt.utilities.exceptions.YTOutputNotIdentified:
print(f"Couldn't load {rfile}. Reconstructing rays")
raise RuntimeError(f"Couldn't load {rfile}. Delete these rays so new ones can be constructed")
# check each field is in ray
for fld in fields:
if ('all', fld) in ray.field_list:
pass
else:
raise RuntimeError(f"{fld} not present in {rfile}. Either delete these rays so new ones can be constructed or remove this field")
# all rays passed
return True
else:
if len(ray_files) == 0:
print(f"No rays found, Constructing new ones")
return False
else:
raise RuntimeError(f"found {len(ray_files)} rays instead of {n_rays}. Either delete rays or change number of rays to match")
def combine_astropy_files(directory, kw='ice', outfile=None):
#get files
files = collect_files(directory, key_words=['ray', kw])
tables = []
# open up tables
for f in files:
tables.append(QTable.read(f"{directory}/{f}"))
if len(tables) >0:
#combine tables
main_table = vstack(tables)
#write table
if outfile is not None:
main_table.write(outfile, overwrite=True)
else:
out_err = outfile.split('.')[0] + ".out"
#write out dummy
f= open(out_err, 'w')
f.write(f"No files found in {directory} using key_words= ['ray', {kw}]")
f.close()
main_table = None
return main_table
def combine_pandas_files(directory, kw='ice', outfile=None):
#get files
files = collect_files(directory, key_words=['ray', kw])
dfs = []
# open up tables
for f in files:
dfs.append(pd.read_hdf(f"{directory}/{f}"))
if len(tables) >0:
#combine tables
main_table = pd.concat(dfs, ignore_index=True)
#write table
if outfile is not None:
main_table.write_hdf(outfile, mode='w')
else:
out_err = outfile.split('.')[0] + ".out"
#write out dummy
f= open(out_err, 'w')
f.write(f"No files found in {directory} using key_words= ['ray', {kw}]")
f.close()
main_table = None
return main_table
| en | 0.776793 | Finds and returns files in a given directory with given extension. Optional key_word and black_list requirements Parameters ---------- directory : str Directory in which to search for files file_ext : str The file extension to look for (ie '.py', '.h5') key_words : list of str Key words that must in the filename to be collected black_list : list of str list of files to exclude from collection Returns ------- files : list of str List of filenames in the directory that pass all requirements for file extension, keywords, black_list. # get np files only #check has file extension and not black listed Check the file against a black list as well as check if it has keywords in it. Parameters ---------- file : string filename to check key_words : list list of strings required to be in file name black_list : list list of files that are black listed, that file can't be called Returns -------- all_files_pass : bool Returns True if file is not in black_list and contains all key_words #check if ile in black list #check if keyword not in file #return true if passes through Check if a directory already contains a given number of trident lrays and contains necessary fields Parameters ---------- ray_dir : str The path to the directory where rays are held n_rays : int The number of lrays that should be in the directory fields : list, str List of the fields needed in each light ray Returns -------- ray_bool : bool `True` if there are `n_rays` in the `ray_dir` and each one contains necessary fields. If no rays are found then returns False. Otherwise if the rays found don't match requirements an error is raised (see below). Raises ------ RuntimeError This is raised if there are a non-zero number of rays but they don't match the specified number of rays, don't contain the specified fields, or simply can't be opened. #check if correct number #split up rays across processes # check if fields are in each ray #load ray file # check each field is in ray # all rays passed #get files # open up tables #combine tables #write table #write out dummy #get files # open up tables #combine tables #write table #write out dummy | 2.848157 | 3 |
examples/hello/sling/ext/hello/__init__.py | slinghq/sling | 6 | 6622565 | __version__ = '0.1.0'
from sling import g, logger
import sling
class HelloResource(object):
def on_get(self, req, res):
logger.info('Saying hello as module...')
res.body = "Hello! I'm ExampleModule."
logger.info('Said hello as module!')
def test_function(name):
logger.info('Test function')
class HelloNameResource(object):
def on_get(self, req, res, name):
logger.info('Saying hello...')
test_function(name)
res.body = 'Hello, %s' % name
logger.info('Said hello!')
def install_module(app):
app.api.add_route('/hello', HelloResource())
app.api.add_route('/hello/{name}', HelloNameResource())
import command
| __version__ = '0.1.0'
from sling import g, logger
import sling
class HelloResource(object):
def on_get(self, req, res):
logger.info('Saying hello as module...')
res.body = "Hello! I'm ExampleModule."
logger.info('Said hello as module!')
def test_function(name):
logger.info('Test function')
class HelloNameResource(object):
def on_get(self, req, res, name):
logger.info('Saying hello...')
test_function(name)
res.body = 'Hello, %s' % name
logger.info('Said hello!')
def install_module(app):
app.api.add_route('/hello', HelloResource())
app.api.add_route('/hello/{name}', HelloNameResource())
import command
| none | 1 | 2.297276 | 2 | |
src/old/1.py | ytyaru/Python.DataClass.20210701133458 | 0 | 6622566 | #!/usr/bin/env python3
# coding: utf8
# https://docs.python.org/ja/3.7/library/dataclasses.html
# https://docs.python.org/ja/3/library/stdtypes.html
# https://www.python.org/dev/peps/pep-0484/
from dataclasses import dataclass, field
import typing
from typing import Any, Callable, Iterable, Tuple, Optional, List, Tuple, Dict, ClassVar, Union, TypeVar, NewType, Sequence, Mapping
from decimal import Decimal
from pathlib import Path
#import re
T = TypeVar('T', int, str)
Id = NewType('Id', int)
@dataclass
class MyData:
any: Any = None
name: str = ''
group: chr = 'A'
fix_data: bytes = b''
data: bytearray = b''
mem: memoryview = b''
age: int = 0
rate: float = 0.0
img: complex = 0j
dec: Decimal = Decimal(0.00)
is_dead: bool = False
r: range = range(0,9,1)
oi: Optional[int] = None
pattern: typing.re.Pattern = None
match: typing.re.Match = None
path: Path = None
l: List[str] = field(default_factory=list)
d: Dict[str, str] = field(default_factory=dict)
t: Tuple[int, float, str] = field(default_factory=tuple)
c: ClassVar[str] = 'クラス変数'
u: Union[int, str] = 'int | str どちらでもよい'
t: type = None
tv: T = None
id: Id = 0
s: Sequence[int] = field(default_factory=list)
m: Mapping[str, int] = field(default_factory=dict)
# d: dict = {}
# l: list = []
# t: tuple = (,)
# s: set = set([1,2,3,4,5])
# fs: frozenset = frozenset([1,2,3,4,5])
# gl: list[int] = []
# gd: dict[str, int] = {}
def intro(self) -> str: return f'My name is {self.name}.'
def wrap(self, func: Callable[[],None]) -> None:
print('start')
func()
print('end')
d = MyData()
d.name = 'Yamada'
d.img = 3+1j
d.age = 'a' # エラーにならない...
d.age = 100
print(f"{d.name}, {d.age}, {d.img}")
print(d.intro())
def myfunc(): print('myfunc()')
d.wrap(myfunc)
#def myfunc2(a): print('myfunc2()')
#d.wrap(myfunc2)
| #!/usr/bin/env python3
# coding: utf8
# https://docs.python.org/ja/3.7/library/dataclasses.html
# https://docs.python.org/ja/3/library/stdtypes.html
# https://www.python.org/dev/peps/pep-0484/
from dataclasses import dataclass, field
import typing
from typing import Any, Callable, Iterable, Tuple, Optional, List, Tuple, Dict, ClassVar, Union, TypeVar, NewType, Sequence, Mapping
from decimal import Decimal
from pathlib import Path
#import re
T = TypeVar('T', int, str)
Id = NewType('Id', int)
@dataclass
class MyData:
any: Any = None
name: str = ''
group: chr = 'A'
fix_data: bytes = b''
data: bytearray = b''
mem: memoryview = b''
age: int = 0
rate: float = 0.0
img: complex = 0j
dec: Decimal = Decimal(0.00)
is_dead: bool = False
r: range = range(0,9,1)
oi: Optional[int] = None
pattern: typing.re.Pattern = None
match: typing.re.Match = None
path: Path = None
l: List[str] = field(default_factory=list)
d: Dict[str, str] = field(default_factory=dict)
t: Tuple[int, float, str] = field(default_factory=tuple)
c: ClassVar[str] = 'クラス変数'
u: Union[int, str] = 'int | str どちらでもよい'
t: type = None
tv: T = None
id: Id = 0
s: Sequence[int] = field(default_factory=list)
m: Mapping[str, int] = field(default_factory=dict)
# d: dict = {}
# l: list = []
# t: tuple = (,)
# s: set = set([1,2,3,4,5])
# fs: frozenset = frozenset([1,2,3,4,5])
# gl: list[int] = []
# gd: dict[str, int] = {}
def intro(self) -> str: return f'My name is {self.name}.'
def wrap(self, func: Callable[[],None]) -> None:
print('start')
func()
print('end')
d = MyData()
d.name = 'Yamada'
d.img = 3+1j
d.age = 'a' # エラーにならない...
d.age = 100
print(f"{d.name}, {d.age}, {d.img}")
print(d.intro())
def myfunc(): print('myfunc()')
d.wrap(myfunc)
#def myfunc2(a): print('myfunc2()')
#d.wrap(myfunc2)
| en | 0.427376 | #!/usr/bin/env python3 # coding: utf8 # https://docs.python.org/ja/3.7/library/dataclasses.html # https://docs.python.org/ja/3/library/stdtypes.html # https://www.python.org/dev/peps/pep-0484/ #import re # d: dict = {} # l: list = [] # t: tuple = (,) # s: set = set([1,2,3,4,5]) # fs: frozenset = frozenset([1,2,3,4,5]) # gl: list[int] = [] # gd: dict[str, int] = {} # エラーにならない... #def myfunc2(a): print('myfunc2()') #d.wrap(myfunc2) | 2.781114 | 3 |
example/20NewsGroups/example.py | louisgeisler/Doc2Map | 14 | 6622567 | from Doc2Map import Doc2Map
from sklearn.datasets import fetch_20newsgroups
#Doc2Map.test_20newsgroups("test-learn", "all")
d2m = Doc2Map(speed="learn", ramification=22)
dataset = fetch_20newsgroups(subset='test', shuffle=True, random_state=42)
for i, (data, target) in enumerate(zip(dataset.data, dataset.target)):
d2m.add_text(data, str(i), target=target)
d2m.build()
d2m.display_tree()
d2m.display_simplified_tree()
d2m.plotly_interactive_map()
d2m.scatter()
d2m.interactive_map()
print("Fin") | from Doc2Map import Doc2Map
from sklearn.datasets import fetch_20newsgroups
#Doc2Map.test_20newsgroups("test-learn", "all")
d2m = Doc2Map(speed="learn", ramification=22)
dataset = fetch_20newsgroups(subset='test', shuffle=True, random_state=42)
for i, (data, target) in enumerate(zip(dataset.data, dataset.target)):
d2m.add_text(data, str(i), target=target)
d2m.build()
d2m.display_tree()
d2m.display_simplified_tree()
d2m.plotly_interactive_map()
d2m.scatter()
d2m.interactive_map()
print("Fin") | en | 0.206479 | #Doc2Map.test_20newsgroups("test-learn", "all") | 2.87031 | 3 |
Validation/RecoHI/python/selectSimTracks_cff.py | ckamtsikis/cmssw | 852 | 6622568 | import FWCore.ParameterSet.Config as cms
# Not actually functioning at the moment, use primaryChgSimTracks - <NAME>, 24/7/2013
findableSimTracks = cms.EDFilter("HitPixelLayersTPSelection",
src = cms.InputTag("mix","MergedTrackTruth"),
tripletSeedOnly = cms.bool(True),
chargedOnly = cms.bool(True),
signalOnly = cms.bool(False),
primaryOnly = cms.bool(True),
tpStatusBased = cms.bool(True), # for primary particle definition
ptMin = cms.double(2.0),
minHit = cms.int32(8),
minRapidity = cms.double(-2.5),
maxRapidity = cms.double(2.5),
tip = cms.double(3.5),
lip = cms.double(30.0),
pdgId = cms.vint32()
)
primaryChgSimTracks = cms.EDFilter("HitPixelLayersTPSelection",
src = cms.InputTag("mix","MergedTrackTruth"),
tripletSeedOnly = cms.bool(False),
chargedOnly = cms.bool(True),
signalOnly = cms.bool(False),
primaryOnly = cms.bool(True),
tpStatusBased = cms.bool(True),
ptMin = cms.double(0.1),
minHit = cms.int32(3),
minRapidity = cms.double(-2.5),
maxRapidity = cms.double(2.5),
tip = cms.double(3.5),
lip = cms.double(30.0),
pdgId = cms.vint32()
)
| import FWCore.ParameterSet.Config as cms
# Not actually functioning at the moment, use primaryChgSimTracks - <NAME>, 24/7/2013
findableSimTracks = cms.EDFilter("HitPixelLayersTPSelection",
src = cms.InputTag("mix","MergedTrackTruth"),
tripletSeedOnly = cms.bool(True),
chargedOnly = cms.bool(True),
signalOnly = cms.bool(False),
primaryOnly = cms.bool(True),
tpStatusBased = cms.bool(True), # for primary particle definition
ptMin = cms.double(2.0),
minHit = cms.int32(8),
minRapidity = cms.double(-2.5),
maxRapidity = cms.double(2.5),
tip = cms.double(3.5),
lip = cms.double(30.0),
pdgId = cms.vint32()
)
primaryChgSimTracks = cms.EDFilter("HitPixelLayersTPSelection",
src = cms.InputTag("mix","MergedTrackTruth"),
tripletSeedOnly = cms.bool(False),
chargedOnly = cms.bool(True),
signalOnly = cms.bool(False),
primaryOnly = cms.bool(True),
tpStatusBased = cms.bool(True),
ptMin = cms.double(0.1),
minHit = cms.int32(3),
minRapidity = cms.double(-2.5),
maxRapidity = cms.double(2.5),
tip = cms.double(3.5),
lip = cms.double(30.0),
pdgId = cms.vint32()
)
| en | 0.692675 | # Not actually functioning at the moment, use primaryChgSimTracks - <NAME>, 24/7/2013 # for primary particle definition | 1.002777 | 1 |
scripts/thr_hist_replica.py | dragonboy1994/libhotstuff | 0 | 6622569 | import sys
import re
import argparse
import numpy as np
from datetime import datetime, timedelta
def remove_outliers(x, outlierConstant = 1.5):
a = np.array(x)
upper_quartile = np.percentile(a, 75)
lower_quartile = np.percentile(a, 25)
IQR = (upper_quartile - lower_quartile) * outlierConstant
quartileSet = (lower_quartile - IQR, upper_quartile + IQR)
resultList = []
removedList = []
for y in a.tolist():
if y >= quartileSet[0] and y <= quartileSet[1]:
resultList.append(y)
else:
removedList.append(y)
return (resultList, removedList)
def str2datetime(s):
parts = s.split('.')
dt = datetime.strptime(parts[0], "%Y-%m-%d %H:%M:%S")
return dt.replace(microsecond=int(parts[1]))
def plot_thr(fname):
import matplotlib.pyplot as plt
x = range(len(values))
y = values
plt.xlabel(r"time")
plt.ylabel(r"tx/sec")
plt.plot(x, y)
plt.show()
plt.savefig(fname)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--interval', type=float, default=1, required=False)
parser.add_argument('--output', type=str, default="hist.png", required=False)
parser.add_argument('--plot', action='store_true')
args = parser.parse_args()
computation_pat = re.compile('([^[].*) \[hotstuff computation info\] ([0-9.]*)$')
cmd_pat = re.compile('([^[].*) \[hotstuff cmd info\] ([0-9.]*)$')
circle_pat = re.compile('([^[].*) the circle size is: ([0-9.]*)$')
interval = args.interval
cnt = 0
computation_lats = []
cmd_transfer_lats = []
timestamps = []
values = []
circle_sz = []
for line in sys.stdin:
computation_m = computation_pat.match(line)
if computation_m:
computation_lats.append(float(computation_m.group(2)))
cmd_m = cmd_pat.match(line)
if cmd_m:
cmd_transfer_lats.append(float(cmd_m.group(2)))
circle_m = circle_pat.match(line)
if circle_m:
circle_sz.append(float(circle_m.group(2)))
if len(computation_lats) == 0:
print("len(computation_lats == 0)")
else:
print("computation_lat = {:.3f}ms".format(sum(computation_lats) / len(computation_lats) * 1e3))
computation_lats, _ = remove_outliers(computation_lats)
print("computation_lat after remove_outliers = {:.3f}ms".format(sum(computation_lats) / len(computation_lats) * 1e3))
if len(cmd_transfer_lats) == 0:
print("len(cmd_transfer_lats == 0)")
else:
print("cmd_lat = {:.3f}ms".format(sum(cmd_transfer_lats) / len(cmd_transfer_lats) * 1e3))
cmd_transfer_lats, _ = remove_outliers(cmd_transfer_lats)
print("cmd_lat after remove_outliers = {:.3f}ms".format(sum(cmd_transfer_lats) / len(cmd_transfer_lats) * 1e3))
print("circle_size = {:.3f}".format(sum(circle_sz) / len(circle_sz)))
circle_sz, _ = remove_outliers(circle_sz)
print("circle_size after remove_outliers = {:.3f}".format(sum(circle_sz) / len(circle_sz)))
if args.plot:
plot_thr(args.output)
| import sys
import re
import argparse
import numpy as np
from datetime import datetime, timedelta
def remove_outliers(x, outlierConstant = 1.5):
a = np.array(x)
upper_quartile = np.percentile(a, 75)
lower_quartile = np.percentile(a, 25)
IQR = (upper_quartile - lower_quartile) * outlierConstant
quartileSet = (lower_quartile - IQR, upper_quartile + IQR)
resultList = []
removedList = []
for y in a.tolist():
if y >= quartileSet[0] and y <= quartileSet[1]:
resultList.append(y)
else:
removedList.append(y)
return (resultList, removedList)
def str2datetime(s):
parts = s.split('.')
dt = datetime.strptime(parts[0], "%Y-%m-%d %H:%M:%S")
return dt.replace(microsecond=int(parts[1]))
def plot_thr(fname):
import matplotlib.pyplot as plt
x = range(len(values))
y = values
plt.xlabel(r"time")
plt.ylabel(r"tx/sec")
plt.plot(x, y)
plt.show()
plt.savefig(fname)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--interval', type=float, default=1, required=False)
parser.add_argument('--output', type=str, default="hist.png", required=False)
parser.add_argument('--plot', action='store_true')
args = parser.parse_args()
computation_pat = re.compile('([^[].*) \[hotstuff computation info\] ([0-9.]*)$')
cmd_pat = re.compile('([^[].*) \[hotstuff cmd info\] ([0-9.]*)$')
circle_pat = re.compile('([^[].*) the circle size is: ([0-9.]*)$')
interval = args.interval
cnt = 0
computation_lats = []
cmd_transfer_lats = []
timestamps = []
values = []
circle_sz = []
for line in sys.stdin:
computation_m = computation_pat.match(line)
if computation_m:
computation_lats.append(float(computation_m.group(2)))
cmd_m = cmd_pat.match(line)
if cmd_m:
cmd_transfer_lats.append(float(cmd_m.group(2)))
circle_m = circle_pat.match(line)
if circle_m:
circle_sz.append(float(circle_m.group(2)))
if len(computation_lats) == 0:
print("len(computation_lats == 0)")
else:
print("computation_lat = {:.3f}ms".format(sum(computation_lats) / len(computation_lats) * 1e3))
computation_lats, _ = remove_outliers(computation_lats)
print("computation_lat after remove_outliers = {:.3f}ms".format(sum(computation_lats) / len(computation_lats) * 1e3))
if len(cmd_transfer_lats) == 0:
print("len(cmd_transfer_lats == 0)")
else:
print("cmd_lat = {:.3f}ms".format(sum(cmd_transfer_lats) / len(cmd_transfer_lats) * 1e3))
cmd_transfer_lats, _ = remove_outliers(cmd_transfer_lats)
print("cmd_lat after remove_outliers = {:.3f}ms".format(sum(cmd_transfer_lats) / len(cmd_transfer_lats) * 1e3))
print("circle_size = {:.3f}".format(sum(circle_sz) / len(circle_sz)))
circle_sz, _ = remove_outliers(circle_sz)
print("circle_size after remove_outliers = {:.3f}".format(sum(circle_sz) / len(circle_sz)))
if args.plot:
plot_thr(args.output)
| none | 1 | 2.770854 | 3 | |
pybb/migrations/0010_auto_20180130_0848.py | stPhoenix/projecttango | 0 | 6622570 | <reponame>stPhoenix/projecttango<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-01-30 06:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pybb', '0009_account_accountdeletion_emailaddress_emailconfirmation_passwordexpiry_passwordhistory_signupcode_sig'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='autosubscribe',
field=models.BooleanField(default=False, help_text='Automatically subscribe to topics that you answer', verbose_name='Automatically subscribe'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-01-30 06:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pybb', '0009_account_accountdeletion_emailaddress_emailconfirmation_passwordexpiry_passwordhistory_signupcode_sig'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='autosubscribe',
field=models.BooleanField(default=False, help_text='Automatically subscribe to topics that you answer', verbose_name='Automatically subscribe'),
),
] | en | 0.630409 | # -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2018-01-30 06:48 | 1.486246 | 1 |
configuration.py | Adrian-Tamas/PythonLibaryFrontend | 0 | 6622571 | <gh_stars>0
import configparser
configuration = configparser.ConfigParser(allow_no_value=True)
configuration.read("config.ini")
backend_url = configuration.get("defaults", "backend_url")
| import configparser
configuration = configparser.ConfigParser(allow_no_value=True)
configuration.read("config.ini")
backend_url = configuration.get("defaults", "backend_url") | none | 1 | 1.704317 | 2 | |
djstripe/migrations/0028_auto_20180604_0609.py | ComFreight/cmft-stripe-integration | 0 | 6622572 | <gh_stars>0
# Generated by Django 2.0.5 on 2018-06-04 03:09
from django.db import migrations
import djstripe.fields
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('djstripe', '0027_auto_20180528_1111'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='account_balance',
field=djstripe.fields.StripeIntegerField(help_text="Current balance, if any, being stored on the customer's account. If negative, the customer has credit to apply to the next invoice. If positive, the customer has an amount owed that will be added to thenext invoice. The balance does not refer to any unpaid invoices; it solely takes into account amounts that have yet to be successfullyapplied to any invoice. This balance is only taken into account for recurring billing purposes (i.e., subscriptions, invoices, invoice items)."),
),
migrations.AlterField(
model_name='payout',
name='statement_descriptor',
field=djstripe.fields.StripeCharField(blank=True, help_text="Extra information about a payout to be displayed on the user's bank statement.", max_length=255, null=True),
),
migrations.AlterField(
model_name='plan',
name='name',
field=djstripe.fields.StripeTextField(help_text='Name of the plan, to be displayed on invoices and in the web interface.', null=True),
),
migrations.AlterField(
model_name='source',
name='statement_descriptor',
field=djstripe.fields.StripeCharField(blank=True, help_text="Extra information about a source. This will appear on your customer's statement every time you charge the source.", max_length=255, null=True),
),
migrations.AlterField(
model_name='transfer',
name='destination_type',
field=djstripe.fields.StripeCharField(blank=True, help_text='The type of the transfer destination.', max_length=14, null=True),
),
]
| # Generated by Django 2.0.5 on 2018-06-04 03:09
from django.db import migrations
import djstripe.fields
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('djstripe', '0027_auto_20180528_1111'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='account_balance',
field=djstripe.fields.StripeIntegerField(help_text="Current balance, if any, being stored on the customer's account. If negative, the customer has credit to apply to the next invoice. If positive, the customer has an amount owed that will be added to thenext invoice. The balance does not refer to any unpaid invoices; it solely takes into account amounts that have yet to be successfullyapplied to any invoice. This balance is only taken into account for recurring billing purposes (i.e., subscriptions, invoices, invoice items)."),
),
migrations.AlterField(
model_name='payout',
name='statement_descriptor',
field=djstripe.fields.StripeCharField(blank=True, help_text="Extra information about a payout to be displayed on the user's bank statement.", max_length=255, null=True),
),
migrations.AlterField(
model_name='plan',
name='name',
field=djstripe.fields.StripeTextField(help_text='Name of the plan, to be displayed on invoices and in the web interface.', null=True),
),
migrations.AlterField(
model_name='source',
name='statement_descriptor',
field=djstripe.fields.StripeCharField(blank=True, help_text="Extra information about a source. This will appear on your customer's statement every time you charge the source.", max_length=255, null=True),
),
migrations.AlterField(
model_name='transfer',
name='destination_type',
field=djstripe.fields.StripeCharField(blank=True, help_text='The type of the transfer destination.', max_length=14, null=True),
),
] | en | 0.671788 | # Generated by Django 2.0.5 on 2018-06-04 03:09 | 2.010469 | 2 |
publication/map_helper.py | ahaberlie/SVRIMG | 7 | 6622573 | <gh_stars>1-10
import cartopy
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, BoundaryNorm
from matplotlib.patches import Patch
import matplotlib.patheffects as PathEffects
import numpy as np
def draw_box(lon, lat, tree, pmm_image, out_image, out_shape):
d, pos = tree.query([lon, lat], k=1, distance_upper_bound=.1)
mid = int((512 / 2) / 2)
y, x = np.unravel_index(pos, shape=out_shape)
y_ = y-mid
x_ = x-mid
z_pt = np.where(pmm_image>=1)
x_z = z_pt[1]
y_z = z_pt[0]
out_image[y_ + y_z, x_ + x_z] = pmm_image[y_z, x_z]
def plot_box_stats(ax, grid_count):
delta = 512000
y_0 = 860000
x_0 = -930000
l_y = 1250000
l_x = -930000
i = 1
for y in range(0, 5):
for x in range(0, 6):
txt = ax.text(x_0 + (x*delta), y_0 - (y*delta), "n={}".format(grid_count[i]), fontsize=14, zorder=8)
txt.set_path_effects([PathEffects.withStroke(linewidth=5, foreground='w')])
txt = ax.text(l_x + (x*delta), l_y - (y*delta), "{}".format(i), fontsize=14, zorder=8)
txt.set_path_effects([PathEffects.withStroke(linewidth=5, foreground='w')])
i += 1
return ax
def coords(gx, gy):
r"""Calculate x,y coordinates of each grid cell.
Parameters
----------
gx: numeric
x coordinates in meshgrid
gy: numeric
y coordinates in meshgrid
Returns
-------
(X, Y) ndarray
List of coordinates in meshgrid
"""
return np.vstack([gx.ravel(), gy.ravel()]).T
def radar_norm():
cmap = radar_colormap()
classes = np.array(list(range(0, 85, 5)))
norm = BoundaryNorm(classes, ncolors=cmap.N)
return cmap, norm
def radar_colormap():
nws_reflectivity_colors = [ "#ffffff", # 0
"#04e9e7", # 5
"#019ff4", # 10
"#0300f4", # 15
"#02fd02", # 20
"#01c501", # 25
"#008e00", # 30
"#fdf802", # 35
"#e5bc00", # 40
"#fd9500", # 45
"#fd0000", # 50
"#d40000", # 55
"#bc0000", # 60
"#f800fd", # 65
"#9854c6", # 70
"#4B0082", # 75
"#000000"]
cmap = ListedColormap(nws_reflectivity_colors)
return cmap
def draw_box_plot(ax, img):
cmap, norm = radar_norm()
mmp = ax.imshow(np.flipud(img), cmap=cmap, norm=norm)
ax.arrow(125.5, 119, 0, -0.5, head_width=10, head_length=15, fc='k', ec='k', zorder=10)
ax.text(120, 130, "N", fontsize=35, zorder=10)
plt.colorbar(mmp, ax=ax, shrink=0.35, pad=0.01)
ax.set_yticks(list(range(0, 153, 17)))
ax.set_yticklabels([ 0 , 64, 128 , 192, 256 , 320,
384 , 448, 512])
ax.set_xticks(list(range(0, 153, 17)))
ax.set_xticklabels([ 0 , 64, 128 , 192, 256 , 320,
384 , 448, 512])
ax.set_xlabel("km")
ax.set_ylabel("km")
ax.grid()
return ax
def draw_geography(ax, geo_data_dir='../data/geo'):
countries_shp = shpreader.natural_earth(resolution='50m',
category='cultural',
name='admin_0_countries')
for country, info in zip(shpreader.Reader(countries_shp).geometries(),
shpreader.Reader(countries_shp).records()):
if info.attributes['NAME_LONG'] != 'United States':
ax.add_geometries([country], ccrs.PlateCarree(),
facecolor='lightgrey', edgecolor='k', zorder=6)
lakes_shp = shpreader.natural_earth(resolution='50m',
category='physical',
name='lakes')
for lake, info in zip(shpreader.Reader(lakes_shp).geometries(),
shpreader.Reader(lakes_shp).records()):
name = info.attributes['name']
if name == 'Lake Superior' or name == 'Lake Michigan' or \
name == 'Lake Huron' or name == 'Lake Erie' or name == 'Lake Ontario':
ax.add_geometries([lake], ccrs.PlateCarree(),
facecolor='lightsteelblue', edgecolor='k', zorder=6)
ax.add_feature(cfeature.NaturalEarthFeature('physical', 'ocean', '50m', edgecolor='face',
facecolor='lightsteelblue'), zorder=6)
ax.add_feature(cfeature.NaturalEarthFeature('physical', 'coastline', '50m', edgecolor='face',
facecolor='None'), zorder=6)
shapename = geo_data_dir + "/grid512km_latlon.shp"
for grid, info in zip(shpreader.Reader(shapename).geometries(), shpreader.Reader(shapename).records()):
ax.add_geometries([grid], ccrs.PlateCarree(),
facecolor='white', edgecolor='k')
for grid, info in zip(shpreader.Reader(shapename).geometries(), shpreader.Reader(shapename).records()):
ax.add_geometries([grid], ccrs.PlateCarree(), linewidth=3,
facecolor='None', edgecolor='k', zorder=7)
shapename = 'admin_1_states_provinces_lakes_shp'
states_shp = shpreader.natural_earth(resolution='50m',
category='cultural', name=shapename)
for state, info in zip(shpreader.Reader(states_shp).geometries(), shpreader.Reader(states_shp).records()):
if info.attributes['admin'] == 'United States of America':
ax.add_geometries([state], ccrs.PlateCarree(),
facecolor='white', edgecolor='k')
for state, info in zip(shpreader.Reader(states_shp).geometries(), shpreader.Reader(states_shp).records()):
if info.attributes['admin'] == 'United States of America':
ax.add_geometries([state], ccrs.PlateCarree(),
facecolor='None', edgecolor='k', zorder=6)
return ax | import cartopy
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, BoundaryNorm
from matplotlib.patches import Patch
import matplotlib.patheffects as PathEffects
import numpy as np
def draw_box(lon, lat, tree, pmm_image, out_image, out_shape):
d, pos = tree.query([lon, lat], k=1, distance_upper_bound=.1)
mid = int((512 / 2) / 2)
y, x = np.unravel_index(pos, shape=out_shape)
y_ = y-mid
x_ = x-mid
z_pt = np.where(pmm_image>=1)
x_z = z_pt[1]
y_z = z_pt[0]
out_image[y_ + y_z, x_ + x_z] = pmm_image[y_z, x_z]
def plot_box_stats(ax, grid_count):
delta = 512000
y_0 = 860000
x_0 = -930000
l_y = 1250000
l_x = -930000
i = 1
for y in range(0, 5):
for x in range(0, 6):
txt = ax.text(x_0 + (x*delta), y_0 - (y*delta), "n={}".format(grid_count[i]), fontsize=14, zorder=8)
txt.set_path_effects([PathEffects.withStroke(linewidth=5, foreground='w')])
txt = ax.text(l_x + (x*delta), l_y - (y*delta), "{}".format(i), fontsize=14, zorder=8)
txt.set_path_effects([PathEffects.withStroke(linewidth=5, foreground='w')])
i += 1
return ax
def coords(gx, gy):
r"""Calculate x,y coordinates of each grid cell.
Parameters
----------
gx: numeric
x coordinates in meshgrid
gy: numeric
y coordinates in meshgrid
Returns
-------
(X, Y) ndarray
List of coordinates in meshgrid
"""
return np.vstack([gx.ravel(), gy.ravel()]).T
def radar_norm():
cmap = radar_colormap()
classes = np.array(list(range(0, 85, 5)))
norm = BoundaryNorm(classes, ncolors=cmap.N)
return cmap, norm
def radar_colormap():
nws_reflectivity_colors = [ "#ffffff", # 0
"#04e9e7", # 5
"#019ff4", # 10
"#0300f4", # 15
"#02fd02", # 20
"#01c501", # 25
"#008e00", # 30
"#fdf802", # 35
"#e5bc00", # 40
"#fd9500", # 45
"#fd0000", # 50
"#d40000", # 55
"#bc0000", # 60
"#f800fd", # 65
"#9854c6", # 70
"#4B0082", # 75
"#000000"]
cmap = ListedColormap(nws_reflectivity_colors)
return cmap
def draw_box_plot(ax, img):
cmap, norm = radar_norm()
mmp = ax.imshow(np.flipud(img), cmap=cmap, norm=norm)
ax.arrow(125.5, 119, 0, -0.5, head_width=10, head_length=15, fc='k', ec='k', zorder=10)
ax.text(120, 130, "N", fontsize=35, zorder=10)
plt.colorbar(mmp, ax=ax, shrink=0.35, pad=0.01)
ax.set_yticks(list(range(0, 153, 17)))
ax.set_yticklabels([ 0 , 64, 128 , 192, 256 , 320,
384 , 448, 512])
ax.set_xticks(list(range(0, 153, 17)))
ax.set_xticklabels([ 0 , 64, 128 , 192, 256 , 320,
384 , 448, 512])
ax.set_xlabel("km")
ax.set_ylabel("km")
ax.grid()
return ax
def draw_geography(ax, geo_data_dir='../data/geo'):
countries_shp = shpreader.natural_earth(resolution='50m',
category='cultural',
name='admin_0_countries')
for country, info in zip(shpreader.Reader(countries_shp).geometries(),
shpreader.Reader(countries_shp).records()):
if info.attributes['NAME_LONG'] != 'United States':
ax.add_geometries([country], ccrs.PlateCarree(),
facecolor='lightgrey', edgecolor='k', zorder=6)
lakes_shp = shpreader.natural_earth(resolution='50m',
category='physical',
name='lakes')
for lake, info in zip(shpreader.Reader(lakes_shp).geometries(),
shpreader.Reader(lakes_shp).records()):
name = info.attributes['name']
if name == 'Lake Superior' or name == 'Lake Michigan' or \
name == 'Lake Huron' or name == 'Lake Erie' or name == 'Lake Ontario':
ax.add_geometries([lake], ccrs.PlateCarree(),
facecolor='lightsteelblue', edgecolor='k', zorder=6)
ax.add_feature(cfeature.NaturalEarthFeature('physical', 'ocean', '50m', edgecolor='face',
facecolor='lightsteelblue'), zorder=6)
ax.add_feature(cfeature.NaturalEarthFeature('physical', 'coastline', '50m', edgecolor='face',
facecolor='None'), zorder=6)
shapename = geo_data_dir + "/grid512km_latlon.shp"
for grid, info in zip(shpreader.Reader(shapename).geometries(), shpreader.Reader(shapename).records()):
ax.add_geometries([grid], ccrs.PlateCarree(),
facecolor='white', edgecolor='k')
for grid, info in zip(shpreader.Reader(shapename).geometries(), shpreader.Reader(shapename).records()):
ax.add_geometries([grid], ccrs.PlateCarree(), linewidth=3,
facecolor='None', edgecolor='k', zorder=7)
shapename = 'admin_1_states_provinces_lakes_shp'
states_shp = shpreader.natural_earth(resolution='50m',
category='cultural', name=shapename)
for state, info in zip(shpreader.Reader(states_shp).geometries(), shpreader.Reader(states_shp).records()):
if info.attributes['admin'] == 'United States of America':
ax.add_geometries([state], ccrs.PlateCarree(),
facecolor='white', edgecolor='k')
for state, info in zip(shpreader.Reader(states_shp).geometries(), shpreader.Reader(states_shp).records()):
if info.attributes['admin'] == 'United States of America':
ax.add_geometries([state], ccrs.PlateCarree(),
facecolor='None', edgecolor='k', zorder=6)
return ax | en | 0.321498 | Calculate x,y coordinates of each grid cell. Parameters ---------- gx: numeric x coordinates in meshgrid gy: numeric y coordinates in meshgrid Returns ------- (X, Y) ndarray List of coordinates in meshgrid # 0 # 5 # 10 # 15 # 20 # 25 # 30 # 35 # 40 # 45 # 50 # 55 # 60 # 65 # 70 # 75 | 2.114252 | 2 |
core/engine/validation_helper.py | M-Spencer-94/configNOW | 3 | 6622574 | <gh_stars>1-10
# ============================================================================
#
# Copyright (c) 2007-2010 Integral Technology Solutions Pty Ltd,
# All Rights Reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE
# LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# FOR FURTHER INFORMATION PLEASE SEE THE INTEGRAL TECHNOLOGY SOLUTIONS
# END USER LICENSE AGREEMENT (ELUA).
#
# ============================================================================
from org.apache.log4j import Logger, PropertyConfigurator
True=1
False=0
log=Logger.getLogger('validation')
def invalid_property_error_msg(propertyName, propertyValue):
log.error('Property ' + str(propertyName) + ' with value of ' + str(propertyValue) + ' is invalid.')
def required_property_error_msg(propertyName):
log.error('Required property ' + str(propertyName) + ' does not exist.')
def malformed_list_error_msg(propertyName):
log.error('Property list ' + str(propertyName) + ' is malformed.')
def invalid_boolean_error_msg(propertyName):
log.error('Boolean property ' + str(propertyName) + ' must have a value of true or false')
def invalid_number_error_msg(propertyName):
log.error('Property ' + str(propertyName) + ' must have a numerical value')
def validateBoolean(config, propertyName):
property=config.getProperty(propertyName)
if property:
if property.upper() != 'TRUE' and property.upper() != 'FALSE':
invalid_boolean_error_msg(propertyName)
return False
log.debug(propertyName + ' is valid')
return True
def validateNumber(config, propertyName):
property=config.getProperty(propertyName)
if property:
try:
int(property)
except ValueError:
invalid_number_error_msg(propertyName)
return False
log.debug(propertyName + ' is valid')
return True
def validateList(config, propertyName):
property=config.getProperty(propertyName)
if property:
items=property.split(',')
for item in items:
if len(item)==0:
malformed_list_error_msg(propertyName)
return False
return True
def listContainsValue(listProperty, value):
if listProperty and value:
list=listProperty.split(',')
for item in list:
if item==value:
return True
return False
def printHeader(text):
line=getLine(text)
log.debug(line)
log.debug(text)
log.debug(line)
def getLine(text):
count=len(text)
output = ''
for i in xrange(0,count):
output = output+'='
return output | # ============================================================================
#
# Copyright (c) 2007-2010 Integral Technology Solutions Pty Ltd,
# All Rights Reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE
# LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# FOR FURTHER INFORMATION PLEASE SEE THE INTEGRAL TECHNOLOGY SOLUTIONS
# END USER LICENSE AGREEMENT (ELUA).
#
# ============================================================================
from org.apache.log4j import Logger, PropertyConfigurator
True=1
False=0
log=Logger.getLogger('validation')
def invalid_property_error_msg(propertyName, propertyValue):
log.error('Property ' + str(propertyName) + ' with value of ' + str(propertyValue) + ' is invalid.')
def required_property_error_msg(propertyName):
log.error('Required property ' + str(propertyName) + ' does not exist.')
def malformed_list_error_msg(propertyName):
log.error('Property list ' + str(propertyName) + ' is malformed.')
def invalid_boolean_error_msg(propertyName):
log.error('Boolean property ' + str(propertyName) + ' must have a value of true or false')
def invalid_number_error_msg(propertyName):
log.error('Property ' + str(propertyName) + ' must have a numerical value')
def validateBoolean(config, propertyName):
property=config.getProperty(propertyName)
if property:
if property.upper() != 'TRUE' and property.upper() != 'FALSE':
invalid_boolean_error_msg(propertyName)
return False
log.debug(propertyName + ' is valid')
return True
def validateNumber(config, propertyName):
property=config.getProperty(propertyName)
if property:
try:
int(property)
except ValueError:
invalid_number_error_msg(propertyName)
return False
log.debug(propertyName + ' is valid')
return True
def validateList(config, propertyName):
property=config.getProperty(propertyName)
if property:
items=property.split(',')
for item in items:
if len(item)==0:
malformed_list_error_msg(propertyName)
return False
return True
def listContainsValue(listProperty, value):
if listProperty and value:
list=listProperty.split(',')
for item in list:
if item==value:
return True
return False
def printHeader(text):
line=getLine(text)
log.debug(line)
log.debug(text)
log.debug(line)
def getLine(text):
count=len(text)
output = ''
for i in xrange(0,count):
output = output+'='
return output | en | 0.513793 | # ============================================================================ # # Copyright (c) 2007-2010 Integral Technology Solutions Pty Ltd, # All Rights Reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE # LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # # FOR FURTHER INFORMATION PLEASE SEE THE INTEGRAL TECHNOLOGY SOLUTIONS # END USER LICENSE AGREEMENT (ELUA). # # ============================================================================ | 2.097093 | 2 |
remo/management/commands/savecurrentsensorvalue.py | onsendev/django_iot_workshop_sample | 0 | 6622575 | from django.core.management.base import BaseCommand
from remo.models import SensorValue
from remo.modules.api import NatureRemoApi
class Command(BaseCommand):
def handle(self, *args, **options):
# 追記
# センサーの値をAPIで取得し、SensorValueモデルを1件新たに保存
temperature, humidity, illuminate = NatureRemoApi().fetch_sensor_values()
sensor_value = SensorValue()
sensor_value.temperature = temperature
sensor_value.humidity = humidity
sensor_value.illumination = illuminate
sensor_value.save()
| from django.core.management.base import BaseCommand
from remo.models import SensorValue
from remo.modules.api import NatureRemoApi
class Command(BaseCommand):
def handle(self, *args, **options):
# 追記
# センサーの値をAPIで取得し、SensorValueモデルを1件新たに保存
temperature, humidity, illuminate = NatureRemoApi().fetch_sensor_values()
sensor_value = SensorValue()
sensor_value.temperature = temperature
sensor_value.humidity = humidity
sensor_value.illumination = illuminate
sensor_value.save()
| ja | 0.990657 | # 追記 # センサーの値をAPIで取得し、SensorValueモデルを1件新たに保存 | 1.8568 | 2 |
tools/typed/type_gen.py | andyc655/gunyah-hypervisor | 61 | 6622576 | #!/usr/bin/env python3
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# 2019 Cog Systems Pty Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
from lark import Lark
from exceptions import RangeError, DSLError
from ir import TransformTypes
from abi import AArch64ABI
import argparse
import sys
import os
import subprocess
import inspect
import logging
import pickle
logger = logging.getLogger(__name__)
abi_classes = {
'aarch64': AArch64ABI,
}
def parse_dsl(parser, inputs, abi):
trees = []
for p in inputs:
text = p.read()
parse_tree = parser.parse(text)
cur_tree = TransformTypes(text).transform(parse_tree)
trees.append(cur_tree.get_intermediate_tree())
final_tree = trees.pop(0)
for t in trees:
final_tree.merge(t)
final_tree.update(abi_classes[abi]())
return final_tree
def apply_template(tree, template, public_only=False):
if template is None:
code = tree.gen_output(public_only=public_only)
else:
code = tree.apply_template(template, public_only=public_only)
return code
def main():
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
)
__loc__ = os.path.realpath(
os.path.dirname(os.path.join(os.getcwd(), os.path.dirname(__file__))))
arg_parser = argparse.ArgumentParser()
mode_args = arg_parser.add_mutually_exclusive_group(required=True)
mode_args.add_argument('-P', '--dump-pickle', type=argparse.FileType('wb'),
help="Dump the IR to a Python pickle")
mode_args.add_argument("-o", "--output",
help="Output file (default stdout)",
type=argparse.FileType('w', encoding='utf-8'),
default=sys.stdout)
arg_parser.add_argument('-t', '--template',
type=argparse.FileType('r', encoding='utf-8'),
help="Template file used to generate output")
arg_parser.add_argument('--public', action='store_true',
help="Include only public API types")
arg_parser.add_argument('--traceback', action="store_true",
help="Print a full traceback if an error occurs")
arg_parser.add_argument("-a", "--abi", help="specify the target machine "
"compiler ABI name", choices=abi_classes.keys(),
required=True)
arg_parser.add_argument("-f", "--formatter",
help="specify clang-format to format the code")
arg_parser.add_argument("-d", "--deps", default=None,
type=argparse.FileType('w', encoding='utf-8'),
help="write implicit dependencies to a Makefile")
arg_parser.add_argument("input", metavar='INPUT', nargs="*",
type=argparse.FileType('r', encoding='utf-8'),
help="Input type DSL files to process")
arg_parser.add_argument('-p', '--load-pickle',
type=argparse.FileType('rb'),
help="Load the IR from a Python pickle")
options = arg_parser.parse_args()
# Calling sanity checks
if options.input and options.load_pickle:
logger.error("Cannot specify both inputs and --load-pickle")
arg_parser.print_usage()
sys.exit(1)
grammar_file = os.path.join(__loc__, 'grammars', 'typed_dsl.lark')
parser = Lark.open(grammar_file, 'start', propagate_positions=True)
if options.input:
try:
ir = parse_dsl(parser, options.input, options.abi)
except (DSLError, RangeError) as e:
if options.traceback:
import traceback
traceback.print_exc(file=sys.stderr)
else:
logger.error("Parse error", e)
sys.exit(1)
if options.dump_pickle:
pickle.dump(ir, options.dump_pickle, protocol=4)
elif options.load_pickle:
ir = pickle.load(options.load_pickle)
else:
logger.error("Must specify inputs or --load-pickle")
arg_parser.print_usage()
sys.exit(1)
if not options.dump_pickle:
result = apply_template(ir, options.template,
public_only=options.public)
if options.formatter:
ret = subprocess.run([options.formatter],
input=result.encode("utf-8"),
stdout=subprocess.PIPE)
result = ret.stdout.decode("utf-8")
if ret.returncode != 0:
logger.error("Error formatting output", result)
sys.exit(1)
options.output.write(result)
options.output.close()
if options.deps is not None:
deps = set()
deps.add(grammar_file)
if options.template is not None:
deps.add(options.template.name)
for m in sys.modules.values():
try:
f = inspect.getsourcefile(m)
except TypeError:
continue
if f is None:
continue
f = os.path.relpath(f)
if f.startswith('../'):
continue
deps.add(f)
# TODO: include Cheetah templates
if options.dump_pickle:
out_name = options.dump_pickle.name
else:
out_name = options.output.name
options.deps.write(out_name + ' : ')
options.deps.write(' '.join(sorted(deps)))
options.deps.write('\n')
options.deps.close()
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# 2019 Cog Systems Pty Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
from lark import Lark
from exceptions import RangeError, DSLError
from ir import TransformTypes
from abi import AArch64ABI
import argparse
import sys
import os
import subprocess
import inspect
import logging
import pickle
logger = logging.getLogger(__name__)
abi_classes = {
'aarch64': AArch64ABI,
}
def parse_dsl(parser, inputs, abi):
trees = []
for p in inputs:
text = p.read()
parse_tree = parser.parse(text)
cur_tree = TransformTypes(text).transform(parse_tree)
trees.append(cur_tree.get_intermediate_tree())
final_tree = trees.pop(0)
for t in trees:
final_tree.merge(t)
final_tree.update(abi_classes[abi]())
return final_tree
def apply_template(tree, template, public_only=False):
if template is None:
code = tree.gen_output(public_only=public_only)
else:
code = tree.apply_template(template, public_only=public_only)
return code
def main():
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
)
__loc__ = os.path.realpath(
os.path.dirname(os.path.join(os.getcwd(), os.path.dirname(__file__))))
arg_parser = argparse.ArgumentParser()
mode_args = arg_parser.add_mutually_exclusive_group(required=True)
mode_args.add_argument('-P', '--dump-pickle', type=argparse.FileType('wb'),
help="Dump the IR to a Python pickle")
mode_args.add_argument("-o", "--output",
help="Output file (default stdout)",
type=argparse.FileType('w', encoding='utf-8'),
default=sys.stdout)
arg_parser.add_argument('-t', '--template',
type=argparse.FileType('r', encoding='utf-8'),
help="Template file used to generate output")
arg_parser.add_argument('--public', action='store_true',
help="Include only public API types")
arg_parser.add_argument('--traceback', action="store_true",
help="Print a full traceback if an error occurs")
arg_parser.add_argument("-a", "--abi", help="specify the target machine "
"compiler ABI name", choices=abi_classes.keys(),
required=True)
arg_parser.add_argument("-f", "--formatter",
help="specify clang-format to format the code")
arg_parser.add_argument("-d", "--deps", default=None,
type=argparse.FileType('w', encoding='utf-8'),
help="write implicit dependencies to a Makefile")
arg_parser.add_argument("input", metavar='INPUT', nargs="*",
type=argparse.FileType('r', encoding='utf-8'),
help="Input type DSL files to process")
arg_parser.add_argument('-p', '--load-pickle',
type=argparse.FileType('rb'),
help="Load the IR from a Python pickle")
options = arg_parser.parse_args()
# Calling sanity checks
if options.input and options.load_pickle:
logger.error("Cannot specify both inputs and --load-pickle")
arg_parser.print_usage()
sys.exit(1)
grammar_file = os.path.join(__loc__, 'grammars', 'typed_dsl.lark')
parser = Lark.open(grammar_file, 'start', propagate_positions=True)
if options.input:
try:
ir = parse_dsl(parser, options.input, options.abi)
except (DSLError, RangeError) as e:
if options.traceback:
import traceback
traceback.print_exc(file=sys.stderr)
else:
logger.error("Parse error", e)
sys.exit(1)
if options.dump_pickle:
pickle.dump(ir, options.dump_pickle, protocol=4)
elif options.load_pickle:
ir = pickle.load(options.load_pickle)
else:
logger.error("Must specify inputs or --load-pickle")
arg_parser.print_usage()
sys.exit(1)
if not options.dump_pickle:
result = apply_template(ir, options.template,
public_only=options.public)
if options.formatter:
ret = subprocess.run([options.formatter],
input=result.encode("utf-8"),
stdout=subprocess.PIPE)
result = ret.stdout.decode("utf-8")
if ret.returncode != 0:
logger.error("Error formatting output", result)
sys.exit(1)
options.output.write(result)
options.output.close()
if options.deps is not None:
deps = set()
deps.add(grammar_file)
if options.template is not None:
deps.add(options.template.name)
for m in sys.modules.values():
try:
f = inspect.getsourcefile(m)
except TypeError:
continue
if f is None:
continue
f = os.path.relpath(f)
if f.startswith('../'):
continue
deps.add(f)
# TODO: include Cheetah templates
if options.dump_pickle:
out_name = options.dump_pickle.name
else:
out_name = options.output.name
options.deps.write(out_name + ' : ')
options.deps.write(' '.join(sorted(deps)))
options.deps.write('\n')
options.deps.close()
if __name__ == '__main__':
main()
| en | 0.573487 | #!/usr/bin/env python3 # © 2021 Qualcomm Innovation Center, Inc. All rights reserved. # # 2019 Cog Systems Pty Ltd. # # SPDX-License-Identifier: BSD-3-Clause # Calling sanity checks # TODO: include Cheetah templates | 1.946167 | 2 |
ipytv/m3u_tools.py | Beer4Ever83/ipytv | 5 | 6622577 | <reponame>Beer4Ever83/ipytv<gh_stars>1-10
#!/usr/env/bin python3
import re
import urllib.parse
from typing import List
from ipytv import M3UPlaylist
from ipytv.channel import IPTVChannel, IPTVAttr
class M3UDoctor:
@staticmethod
def fix_split_quoted_string(m3u_rows: List) -> List:
"""
This covers the case of rows beginning with double quotes that belong to the previous row.
Example:
#EXTINF:-1 tvg-id="Cinema1
" tvg-name="Cinema1" group-title="Cinema",Cinema One
"""
fixed_m3u_rows: List = []
lines = len(m3u_rows)
index: int
for index in range(lines):
current_row: str = m3u_rows[index]
previous_row: str = m3u_rows[index-1]
if index > 0 and re.match(r"^\s*\"", current_row) and \
previous_row.startswith("#EXTINF:"):
fixed_m3u_rows.pop()
fixed_m3u_rows.append(previous_row.rstrip() + current_row.lstrip())
else:
fixed_m3u_rows.append(current_row)
return fixed_m3u_rows
class IPTVChannelDoctor:
@staticmethod
def urlencode_logo(channel: IPTVChannel) -> IPTVChannel:
"""
This covers the case of tvg-logo attributes not being correctly url-encoded.
Example (commas in the url):
tvg-logo="https://some.image.com/images/V1_UX182_CR0,0,182,268_AL_.jpg"
"""
new_channel = channel.copy()
logo = new_channel.attributes[IPTVAttr.TVG_LOGO.value]
new_channel.attributes[IPTVAttr.TVG_LOGO.value] = urllib.parse.quote(logo, safe=':/%')
return new_channel
@staticmethod
def sanitize_attributes(channel: IPTVChannel) -> IPTVChannel:
attr: str
new_channel = channel.copy()
for attr in channel.attributes.keys():
IPTVChannelDoctor.__sanitize_commas(new_channel, attr)
IPTVChannelDoctor.__attributes_to_lowercase(new_channel, attr)
return new_channel
@staticmethod
def __attributes_to_lowercase(channel: IPTVChannel, attribute_name: str):
"""
This covers the case of well-known attributes (i.e. the ones in IPTVAttr)
spelled wrongly.
Example:
tvg-ID="" (should be tvg-id="")
"""
try:
IPTVAttr(attribute_name)
except ValueError:
try:
key = IPTVAttr(attribute_name.lower()).value
value = channel.attributes[attribute_name]
del channel.attributes[attribute_name]
channel.attributes[key] = value
except ValueError:
# It seems not a well-known attribute, so we leave it untouched.
pass
@staticmethod
def __sanitize_commas(channel: IPTVChannel, attribute_name: str):
""""
This covers the case of attributes values containing a comma, which can confuse some
parsing libraries (not this one, though)
"""
if attribute_name == IPTVAttr.TVG_LOGO.value:
return
value: str = channel.attributes[attribute_name]
if "," in value:
value = value.replace(",", "_")
channel.attributes[attribute_name] = value
class M3UPlaylistDoctor:
@staticmethod
def urlencode_all_logos(playlist: M3UPlaylist):
"""
This makes sure that all logo URLs in the playlist are encoded correctly.
"""
new_playlist: M3UPlaylist = M3UPlaylist()
channel: IPTVChannel
for channel in playlist.list:
new_playlist.add_channel(IPTVChannelDoctor.urlencode_logo(channel))
return new_playlist
@staticmethod
def sanitize_all_attributes(playlist: M3UPlaylist):
"""
This makes sure that all well-known attributes in the playlist are spelled correctly
and that no commas appear in the attributes value.
"""
new_playlist: M3UPlaylist = M3UPlaylist()
channel: IPTVChannel
for channel in playlist.list:
new_playlist.add_channel(IPTVChannelDoctor.sanitize_attributes(channel))
return new_playlist
| #!/usr/env/bin python3
import re
import urllib.parse
from typing import List
from ipytv import M3UPlaylist
from ipytv.channel import IPTVChannel, IPTVAttr
class M3UDoctor:
@staticmethod
def fix_split_quoted_string(m3u_rows: List) -> List:
"""
This covers the case of rows beginning with double quotes that belong to the previous row.
Example:
#EXTINF:-1 tvg-id="Cinema1
" tvg-name="Cinema1" group-title="Cinema",Cinema One
"""
fixed_m3u_rows: List = []
lines = len(m3u_rows)
index: int
for index in range(lines):
current_row: str = m3u_rows[index]
previous_row: str = m3u_rows[index-1]
if index > 0 and re.match(r"^\s*\"", current_row) and \
previous_row.startswith("#EXTINF:"):
fixed_m3u_rows.pop()
fixed_m3u_rows.append(previous_row.rstrip() + current_row.lstrip())
else:
fixed_m3u_rows.append(current_row)
return fixed_m3u_rows
class IPTVChannelDoctor:
@staticmethod
def urlencode_logo(channel: IPTVChannel) -> IPTVChannel:
"""
This covers the case of tvg-logo attributes not being correctly url-encoded.
Example (commas in the url):
tvg-logo="https://some.image.com/images/V1_UX182_CR0,0,182,268_AL_.jpg"
"""
new_channel = channel.copy()
logo = new_channel.attributes[IPTVAttr.TVG_LOGO.value]
new_channel.attributes[IPTVAttr.TVG_LOGO.value] = urllib.parse.quote(logo, safe=':/%')
return new_channel
@staticmethod
def sanitize_attributes(channel: IPTVChannel) -> IPTVChannel:
attr: str
new_channel = channel.copy()
for attr in channel.attributes.keys():
IPTVChannelDoctor.__sanitize_commas(new_channel, attr)
IPTVChannelDoctor.__attributes_to_lowercase(new_channel, attr)
return new_channel
@staticmethod
def __attributes_to_lowercase(channel: IPTVChannel, attribute_name: str):
"""
This covers the case of well-known attributes (i.e. the ones in IPTVAttr)
spelled wrongly.
Example:
tvg-ID="" (should be tvg-id="")
"""
try:
IPTVAttr(attribute_name)
except ValueError:
try:
key = IPTVAttr(attribute_name.lower()).value
value = channel.attributes[attribute_name]
del channel.attributes[attribute_name]
channel.attributes[key] = value
except ValueError:
# It seems not a well-known attribute, so we leave it untouched.
pass
@staticmethod
def __sanitize_commas(channel: IPTVChannel, attribute_name: str):
""""
This covers the case of attributes values containing a comma, which can confuse some
parsing libraries (not this one, though)
"""
if attribute_name == IPTVAttr.TVG_LOGO.value:
return
value: str = channel.attributes[attribute_name]
if "," in value:
value = value.replace(",", "_")
channel.attributes[attribute_name] = value
class M3UPlaylistDoctor:
@staticmethod
def urlencode_all_logos(playlist: M3UPlaylist):
"""
This makes sure that all logo URLs in the playlist are encoded correctly.
"""
new_playlist: M3UPlaylist = M3UPlaylist()
channel: IPTVChannel
for channel in playlist.list:
new_playlist.add_channel(IPTVChannelDoctor.urlencode_logo(channel))
return new_playlist
@staticmethod
def sanitize_all_attributes(playlist: M3UPlaylist):
"""
This makes sure that all well-known attributes in the playlist are spelled correctly
and that no commas appear in the attributes value.
"""
new_playlist: M3UPlaylist = M3UPlaylist()
channel: IPTVChannel
for channel in playlist.list:
new_playlist.add_channel(IPTVChannelDoctor.sanitize_attributes(channel))
return new_playlist | en | 0.867304 | #!/usr/env/bin python3 This covers the case of rows beginning with double quotes that belong to the previous row. Example: #EXTINF:-1 tvg-id="Cinema1 " tvg-name="Cinema1" group-title="Cinema",Cinema One This covers the case of tvg-logo attributes not being correctly url-encoded. Example (commas in the url): tvg-logo="https://some.image.com/images/V1_UX182_CR0,0,182,268_AL_.jpg" This covers the case of well-known attributes (i.e. the ones in IPTVAttr) spelled wrongly. Example: tvg-ID="" (should be tvg-id="") # It seems not a well-known attribute, so we leave it untouched. " This covers the case of attributes values containing a comma, which can confuse some parsing libraries (not this one, though) This makes sure that all logo URLs in the playlist are encoded correctly. This makes sure that all well-known attributes in the playlist are spelled correctly and that no commas appear in the attributes value. | 3.150235 | 3 |
concurrency/tools.py | guesswh0/concurrency | 0 | 6622578 | <filename>concurrency/tools.py
import logging
import sys
import time
import uuid
from functools import wraps
def get_logger():
# stdout stream handler
out = logging.StreamHandler(sys.stdout)
out.setLevel(logging.INFO)
out.addFilter(lambda record: record.levelno <= logging.INFO)
out.setFormatter(logging.Formatter(
'[PID:%(process)5d]'
'[%(asctime)s.%(msecs).3d]'
# '[%(threadName)10s]'
' %(message)s',
'%H:%M:%S'
))
# stderr stream handler
err = logging.StreamHandler(sys.stderr)
err.setLevel(logging.WARNING)
err.addFilter(lambda record: record.levelno <= logging.WARNING)
err.setFormatter(logging.Formatter(
'[PID:%(process)5d]'
'[%(asctime)s.%(msecs).3d]'
' %(message)s',
'%H:%M:%S'
))
log = logging.getLogger()
log.setLevel(logging.NOTSET)
log.addHandler(out)
log.addHandler(err)
return log
# project default logger
logger = get_logger()
def timeit(fn):
def wrapper(*args, **kwargs):
logger.log(logging.WARNING, 'Starting ...')
t1 = time.perf_counter()
fn(*args, **kwargs)
t2 = time.perf_counter()
logger.log(logging.WARNING, f'Time to complete: {t2 - t1:.2f} seconds')
return wrapper
def async_timeit(coro):
async def wrapper(*args, **kwargs):
logger.log(logging.WARNING, 'Starting ...')
t1 = time.perf_counter()
await coro(*args, **kwargs)
t2 = time.perf_counter()
logger.log(logging.WARNING, f'Time to complete: {t2 - t1:.2f} seconds')
return wrapper
def _format_args(*args, **kwargs):
s1 = ','.join(str(arg) for arg in args)
s2 = ','.join(f'{key}={value}' for key, value in kwargs.items())
if not s2:
return s1
return ','.join((s1, s2))
def log_task(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
name = uuid.uuid4().hex[:5]
s = _format_args(*args, **kwargs)
logger.info(f"{fn.__name__}({s}): {name}")
t1 = time.perf_counter()
fn(*args, **kwargs)
t2 = time.perf_counter()
logger.info(f"{fn.__name__}({s}): {name}, done in {t2 - t1:.2f} seconds")
return wrapper
def log_async_task(coro):
@wraps(coro)
async def wrapper(*args, **kwargs):
name = uuid.uuid4().hex[:5]
s = _format_args(*args, **kwargs)
logger.info(f"{coro.__name__}({s}): {name}")
t1 = time.perf_counter()
await coro(*args, **kwargs)
t2 = time.perf_counter()
logger.info(f"{coro.__name__}({s}): {name}, done in {t2 - t1:.2f} seconds")
return wrapper
| <filename>concurrency/tools.py
import logging
import sys
import time
import uuid
from functools import wraps
def get_logger():
# stdout stream handler
out = logging.StreamHandler(sys.stdout)
out.setLevel(logging.INFO)
out.addFilter(lambda record: record.levelno <= logging.INFO)
out.setFormatter(logging.Formatter(
'[PID:%(process)5d]'
'[%(asctime)s.%(msecs).3d]'
# '[%(threadName)10s]'
' %(message)s',
'%H:%M:%S'
))
# stderr stream handler
err = logging.StreamHandler(sys.stderr)
err.setLevel(logging.WARNING)
err.addFilter(lambda record: record.levelno <= logging.WARNING)
err.setFormatter(logging.Formatter(
'[PID:%(process)5d]'
'[%(asctime)s.%(msecs).3d]'
' %(message)s',
'%H:%M:%S'
))
log = logging.getLogger()
log.setLevel(logging.NOTSET)
log.addHandler(out)
log.addHandler(err)
return log
# project default logger
logger = get_logger()
def timeit(fn):
def wrapper(*args, **kwargs):
logger.log(logging.WARNING, 'Starting ...')
t1 = time.perf_counter()
fn(*args, **kwargs)
t2 = time.perf_counter()
logger.log(logging.WARNING, f'Time to complete: {t2 - t1:.2f} seconds')
return wrapper
def async_timeit(coro):
async def wrapper(*args, **kwargs):
logger.log(logging.WARNING, 'Starting ...')
t1 = time.perf_counter()
await coro(*args, **kwargs)
t2 = time.perf_counter()
logger.log(logging.WARNING, f'Time to complete: {t2 - t1:.2f} seconds')
return wrapper
def _format_args(*args, **kwargs):
s1 = ','.join(str(arg) for arg in args)
s2 = ','.join(f'{key}={value}' for key, value in kwargs.items())
if not s2:
return s1
return ','.join((s1, s2))
def log_task(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
name = uuid.uuid4().hex[:5]
s = _format_args(*args, **kwargs)
logger.info(f"{fn.__name__}({s}): {name}")
t1 = time.perf_counter()
fn(*args, **kwargs)
t2 = time.perf_counter()
logger.info(f"{fn.__name__}({s}): {name}, done in {t2 - t1:.2f} seconds")
return wrapper
def log_async_task(coro):
@wraps(coro)
async def wrapper(*args, **kwargs):
name = uuid.uuid4().hex[:5]
s = _format_args(*args, **kwargs)
logger.info(f"{coro.__name__}({s}): {name}")
t1 = time.perf_counter()
await coro(*args, **kwargs)
t2 = time.perf_counter()
logger.info(f"{coro.__name__}({s}): {name}, done in {t2 - t1:.2f} seconds")
return wrapper
| en | 0.346739 | # stdout stream handler # '[%(threadName)10s]' # stderr stream handler # project default logger | 2.335464 | 2 |
cwmud/core/attributes.py | whutch/cwmud | 11 | 6622579 | # -*- coding: utf-8 -*-
"""Data collections and attributes."""
# Part of Clockwork MUD Server (https://github.com/whutch/cwmud)
# :copyright: (c) 2008 - 2017 <NAME>
# :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
from collections import abc
from .logs import get_logger
from .utils import class_name, joins
from .utils.exceptions import AlreadyExists
from .utils.mixins import HasWeaks, HasWeaksMeta
log = get_logger("attrs")
class _DataBlobMeta(HasWeaksMeta):
def __init__(cls, name, bases, namespace):
super().__init__(name, bases, namespace)
cls._blobs = {}
cls._attrs = {}
def register_blob(cls, name):
"""Decorate a data blob to register it in this blob.
:param str name: The name of the field to store the blob
:returns None:
:raises AlreadyExists: If the given name already exists as an attr
:raises TypeError: If the supplied or decorated class is not a
subclass of DataBlob
"""
if hasattr(cls, name):
raise AlreadyExists(name, getattr(cls, name))
def _inner(blob_class):
if (not isinstance(blob_class, type)
or not issubclass(blob_class, DataBlob)):
raise TypeError("must be subclass of DataBlob to register")
cls._blobs[name] = blob_class
setattr(cls, name, property(lambda s: s._blobs[name]))
return blob_class
return _inner
def register_attr(cls, name):
"""Decorate an attribute to register it in this blob.
:param str name: The name of the field to store the attribute
:returns None:
:raises AlreadyExists: If the given name already exists as an attr
:raises TypeError: If the supplied or decorated class is not a
subclass of Attribute
"""
if hasattr(cls, name):
raise AlreadyExists(name, getattr(cls, name))
def _inner(attr_class):
if (not isinstance(attr_class, type)
or not issubclass(attr_class, Attribute)):
raise TypeError("must be subclass of Attribute to register")
cls._attrs[name] = attr_class
getter = lambda s: s._get_attr_val(name)
setter = (lambda s, v: s._set_attr_val(name, v)
if not attr_class._read_only else None)
setattr(cls, name, property(getter, setter))
return attr_class
return _inner
class DataBlob(HasWeaks, metaclass=_DataBlobMeta):
"""A collection of attributes and sub-blobs on an entity."""
# These are overridden in the metaclass, I just put them here
# to avoid a lot of unresolved reference errors in IDE introspection.
_blobs = None
_attrs = None
def __init__(self, entity):
super().__init__()
self._entity = entity
self._attr_values = {}
for key, attr in self._attrs.items():
self._attr_values[key] = attr.get_default(entity)
self._blobs = self._blobs.copy()
for key, blob in self._blobs.items():
self._blobs[key] = blob(entity)
@property
def _entity(self):
return self._get_weak("entity")
@_entity.setter
def _entity(self, new_entity):
self._set_weak("entity", new_entity)
def _get_attr_val(self, name):
return self._attr_values.get(name)
def _set_attr_val(self, name, value, validate=True, raw=False):
attr = self._attrs[name]
old_value = self._attr_values.get(name)
entity = self._entity
if value is not Unset:
if validate:
value = attr.validate(entity, value)
if not raw:
value = attr.finalize(entity, value)
self._attr_values[name] = value
entity.dirty()
attr.changed(entity, self, old_value, value)
# Update entity caches.
cache = entity._caches.get(name)
if cache:
if old_value in cache:
cache[old_value].discard(entity)
if value not in cache:
cache[value] = {entity}
else:
cache[value].add(entity)
def _update(self, blob):
"""Merge this blob with another, replacing blobs and attrs.
Sub-blobs and attrs on the given blob with take precedent over those
existing on this blob.
:param DataBlob blob: The blob to merge this blob with
:returns None:
"""
self._blobs.update(blob._blobs)
self._attrs.update(blob._attrs)
self._attr_values.update(blob._attr_values)
def serialize(self):
"""Create a dict from this blob, sanitized and suitable for storage.
All sub-blobs will in turn be serialized.
:returns dict: The serialized data
"""
data = {}
for key, blob in self._blobs.items():
data[key] = blob.serialize()
for key, attr in self._attrs.items():
if key in data:
raise KeyError(joins("duplicate blob key:", key))
value = self._attr_values.get(key)
if value is Unset:
value = "unset"
else:
value = attr.serialize(self._entity, value)
data[key] = value
return data
def deserialize(self, data):
"""Update this blob's data using values from a dict.
All sub-blobs found will in turn be deserialized. Be careful where
you deserialize data from, as it will be loaded raw and unvalidated.
:param dict data: The data to deserialize
:returns None:
"""
for key, value in data.items():
if key in self._attrs:
if value == "unset":
value = Unset
else:
value = self._attrs[key].deserialize(self._entity, value)
self._set_attr_val(key, value, validate=False, raw=True)
elif key in self._blobs:
self._blobs[key].deserialize(value)
else:
log.warning(joins("Unused data while deserializing ",
class_name(self), ": '", key, "':'",
value, "'.", sep=""))
class _UnsetMeta(type):
def __repr__(cls):
return "<Unset>"
def __bool__(cls):
return False
class Unset(metaclass=_UnsetMeta):
"""A unique value to note that an attribute hasn't been set."""
class Attribute:
"""A single attribute of an entity.
These are templates for the behavior of an attribute, they will not be
instantiated and as such have no instance-based variables.
The value of `default` should not be set to a mutable type, as it will
be passed by reference to all instantiated blobs and risks being changed
elsewhere in the code.
"""
_default = Unset # Do NOT use mutable types for this.
_read_only = False
@classmethod
def get_default(cls, entity):
"""Get the default value for this attribute.
:param entity: The entity this attribute is on
:returns: The default value
"""
return cls._default
@classmethod
def validate(cls, entity, new_value):
"""Validate a value for this attribute.
This will be called by the blob when setting the value for this
attribute, override it to perform any checks or sanitation. This
should either return a valid value for the attribute or raise an
exception as to why the value is invalid.
:param entity: The entity this attribute is on
:param new_value: The potential value to validate
:returns: The validated (and optionally sanitized) value
"""
return new_value
@classmethod
def finalize(cls, entity, new_value):
"""Finalize the value for this attribute.
This will be called by the blob when setting the value for this
attribute, after validation; override it to perform any sanitation
or transformation. The value should be considered valid.
:param entity: The entity this attribute is on
:param new_value: The new, validated value
:returns: The finalized value
"""
return new_value
@classmethod
def changed(cls, entity, blob, old_value, new_value):
"""Perform any actions necessary after this attribute's value changes.
This will be called by the blob after the value of this attribute
has changed, override it to do any necessary post-setter actions.
:param entity: The entity this attribute is on
:param DataBlob blob: The blob that changed
:param old_value: The previous value
:param new_value: The new value
:returns None:
"""
@classmethod
def serialize(cls, entity, value):
"""Serialize a value for this attribute that is suitable for storage.
This will be called by the blob when serializing itself, override it
to perform any necessary conversion or sanitation.
:param entity: The entity this attribute is on
:param value: The value to serialize
:returns: The serialized value
"""
return value
@classmethod
def deserialize(cls, entity, value):
"""Deserialize a value for this attribute from storage.
This will be called by the blob when deserializing itself, override it
to perform any necessary conversion or sanitation.
:param entity: The entity this attribute is on
:param value: The value to deserialize
:returns: The deserialized value
"""
return value
class MutableAttribute(Attribute):
"""A mutable attribute of an entity."""
_read_only = True
class Proxy:
def __init__(self, entity):
raise NotImplementedError
@classmethod
def get_default(cls, entity):
"""Return a bound proxy instance for this mutable attribute.
:param entity: The entity this attribute is on
:returns: A bound proxy instance
"""
return cls.Proxy(entity)
class ListAttribute(MutableAttribute):
"""An entity attribute that proxies a list."""
class Proxy(abc.MutableSequence):
def __init__(self, entity, items=()):
self._items = list(items)
self._entity = entity
def __getitem__(self, index):
return self._items[index]
def __setitem__(self, index, value):
self._items[index] = value
self._entity.dirty()
def __delitem__(self, index):
del self._items[index]
self._entity.dirty()
def __len__(self):
return len(self._items)
def insert(self, index, value):
self._items.insert(index, value)
self._entity.dirty()
class DictAttribute(MutableAttribute):
"""An entity attribute that proxies a dictionary."""
class Proxy(abc.MutableMapping):
def __init__(self, entity, items=None):
self._items = dict(items or {})
self._entity = entity
def __getitem__(self, key):
return self._items[key]
def __setitem__(self, key, value):
self._items[key] = value
self._entity.dirty()
def __delitem__(self, key):
del self._items[key]
self._entity.dirty()
def __len__(self):
return len(self._items)
def __iter__(self):
return iter(self._items)
class SetAttribute(MutableAttribute):
"""An entity attribute that proxies a set."""
class Proxy(abc.MutableSet):
def __init__(self, entity, items=()):
self._items = set(items)
self._entity = entity
def __len__(self):
return len(self._items)
def __iter__(self):
return iter(self._items)
def __contains__(self, value):
return value in self._items
def add(self, value):
self._items.add(value)
self._entity.dirty()
def discard(self, value):
self._items.discard(value)
self._entity.dirty()
| # -*- coding: utf-8 -*-
"""Data collections and attributes."""
# Part of Clockwork MUD Server (https://github.com/whutch/cwmud)
# :copyright: (c) 2008 - 2017 <NAME>
# :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
from collections import abc
from .logs import get_logger
from .utils import class_name, joins
from .utils.exceptions import AlreadyExists
from .utils.mixins import HasWeaks, HasWeaksMeta
log = get_logger("attrs")
class _DataBlobMeta(HasWeaksMeta):
def __init__(cls, name, bases, namespace):
super().__init__(name, bases, namespace)
cls._blobs = {}
cls._attrs = {}
def register_blob(cls, name):
"""Decorate a data blob to register it in this blob.
:param str name: The name of the field to store the blob
:returns None:
:raises AlreadyExists: If the given name already exists as an attr
:raises TypeError: If the supplied or decorated class is not a
subclass of DataBlob
"""
if hasattr(cls, name):
raise AlreadyExists(name, getattr(cls, name))
def _inner(blob_class):
if (not isinstance(blob_class, type)
or not issubclass(blob_class, DataBlob)):
raise TypeError("must be subclass of DataBlob to register")
cls._blobs[name] = blob_class
setattr(cls, name, property(lambda s: s._blobs[name]))
return blob_class
return _inner
def register_attr(cls, name):
"""Decorate an attribute to register it in this blob.
:param str name: The name of the field to store the attribute
:returns None:
:raises AlreadyExists: If the given name already exists as an attr
:raises TypeError: If the supplied or decorated class is not a
subclass of Attribute
"""
if hasattr(cls, name):
raise AlreadyExists(name, getattr(cls, name))
def _inner(attr_class):
if (not isinstance(attr_class, type)
or not issubclass(attr_class, Attribute)):
raise TypeError("must be subclass of Attribute to register")
cls._attrs[name] = attr_class
getter = lambda s: s._get_attr_val(name)
setter = (lambda s, v: s._set_attr_val(name, v)
if not attr_class._read_only else None)
setattr(cls, name, property(getter, setter))
return attr_class
return _inner
class DataBlob(HasWeaks, metaclass=_DataBlobMeta):
"""A collection of attributes and sub-blobs on an entity."""
# These are overridden in the metaclass, I just put them here
# to avoid a lot of unresolved reference errors in IDE introspection.
_blobs = None
_attrs = None
def __init__(self, entity):
super().__init__()
self._entity = entity
self._attr_values = {}
for key, attr in self._attrs.items():
self._attr_values[key] = attr.get_default(entity)
self._blobs = self._blobs.copy()
for key, blob in self._blobs.items():
self._blobs[key] = blob(entity)
@property
def _entity(self):
return self._get_weak("entity")
@_entity.setter
def _entity(self, new_entity):
self._set_weak("entity", new_entity)
def _get_attr_val(self, name):
return self._attr_values.get(name)
def _set_attr_val(self, name, value, validate=True, raw=False):
attr = self._attrs[name]
old_value = self._attr_values.get(name)
entity = self._entity
if value is not Unset:
if validate:
value = attr.validate(entity, value)
if not raw:
value = attr.finalize(entity, value)
self._attr_values[name] = value
entity.dirty()
attr.changed(entity, self, old_value, value)
# Update entity caches.
cache = entity._caches.get(name)
if cache:
if old_value in cache:
cache[old_value].discard(entity)
if value not in cache:
cache[value] = {entity}
else:
cache[value].add(entity)
def _update(self, blob):
"""Merge this blob with another, replacing blobs and attrs.
Sub-blobs and attrs on the given blob with take precedent over those
existing on this blob.
:param DataBlob blob: The blob to merge this blob with
:returns None:
"""
self._blobs.update(blob._blobs)
self._attrs.update(blob._attrs)
self._attr_values.update(blob._attr_values)
def serialize(self):
"""Create a dict from this blob, sanitized and suitable for storage.
All sub-blobs will in turn be serialized.
:returns dict: The serialized data
"""
data = {}
for key, blob in self._blobs.items():
data[key] = blob.serialize()
for key, attr in self._attrs.items():
if key in data:
raise KeyError(joins("duplicate blob key:", key))
value = self._attr_values.get(key)
if value is Unset:
value = "unset"
else:
value = attr.serialize(self._entity, value)
data[key] = value
return data
def deserialize(self, data):
"""Update this blob's data using values from a dict.
All sub-blobs found will in turn be deserialized. Be careful where
you deserialize data from, as it will be loaded raw and unvalidated.
:param dict data: The data to deserialize
:returns None:
"""
for key, value in data.items():
if key in self._attrs:
if value == "unset":
value = Unset
else:
value = self._attrs[key].deserialize(self._entity, value)
self._set_attr_val(key, value, validate=False, raw=True)
elif key in self._blobs:
self._blobs[key].deserialize(value)
else:
log.warning(joins("Unused data while deserializing ",
class_name(self), ": '", key, "':'",
value, "'.", sep=""))
class _UnsetMeta(type):
def __repr__(cls):
return "<Unset>"
def __bool__(cls):
return False
class Unset(metaclass=_UnsetMeta):
"""A unique value to note that an attribute hasn't been set."""
class Attribute:
"""A single attribute of an entity.
These are templates for the behavior of an attribute, they will not be
instantiated and as such have no instance-based variables.
The value of `default` should not be set to a mutable type, as it will
be passed by reference to all instantiated blobs and risks being changed
elsewhere in the code.
"""
_default = Unset # Do NOT use mutable types for this.
_read_only = False
@classmethod
def get_default(cls, entity):
"""Get the default value for this attribute.
:param entity: The entity this attribute is on
:returns: The default value
"""
return cls._default
@classmethod
def validate(cls, entity, new_value):
"""Validate a value for this attribute.
This will be called by the blob when setting the value for this
attribute, override it to perform any checks or sanitation. This
should either return a valid value for the attribute or raise an
exception as to why the value is invalid.
:param entity: The entity this attribute is on
:param new_value: The potential value to validate
:returns: The validated (and optionally sanitized) value
"""
return new_value
@classmethod
def finalize(cls, entity, new_value):
"""Finalize the value for this attribute.
This will be called by the blob when setting the value for this
attribute, after validation; override it to perform any sanitation
or transformation. The value should be considered valid.
:param entity: The entity this attribute is on
:param new_value: The new, validated value
:returns: The finalized value
"""
return new_value
@classmethod
def changed(cls, entity, blob, old_value, new_value):
"""Perform any actions necessary after this attribute's value changes.
This will be called by the blob after the value of this attribute
has changed, override it to do any necessary post-setter actions.
:param entity: The entity this attribute is on
:param DataBlob blob: The blob that changed
:param old_value: The previous value
:param new_value: The new value
:returns None:
"""
@classmethod
def serialize(cls, entity, value):
"""Serialize a value for this attribute that is suitable for storage.
This will be called by the blob when serializing itself, override it
to perform any necessary conversion or sanitation.
:param entity: The entity this attribute is on
:param value: The value to serialize
:returns: The serialized value
"""
return value
@classmethod
def deserialize(cls, entity, value):
"""Deserialize a value for this attribute from storage.
This will be called by the blob when deserializing itself, override it
to perform any necessary conversion or sanitation.
:param entity: The entity this attribute is on
:param value: The value to deserialize
:returns: The deserialized value
"""
return value
class MutableAttribute(Attribute):
"""A mutable attribute of an entity."""
_read_only = True
class Proxy:
def __init__(self, entity):
raise NotImplementedError
@classmethod
def get_default(cls, entity):
"""Return a bound proxy instance for this mutable attribute.
:param entity: The entity this attribute is on
:returns: A bound proxy instance
"""
return cls.Proxy(entity)
class ListAttribute(MutableAttribute):
"""An entity attribute that proxies a list."""
class Proxy(abc.MutableSequence):
def __init__(self, entity, items=()):
self._items = list(items)
self._entity = entity
def __getitem__(self, index):
return self._items[index]
def __setitem__(self, index, value):
self._items[index] = value
self._entity.dirty()
def __delitem__(self, index):
del self._items[index]
self._entity.dirty()
def __len__(self):
return len(self._items)
def insert(self, index, value):
self._items.insert(index, value)
self._entity.dirty()
class DictAttribute(MutableAttribute):
"""An entity attribute that proxies a dictionary."""
class Proxy(abc.MutableMapping):
def __init__(self, entity, items=None):
self._items = dict(items or {})
self._entity = entity
def __getitem__(self, key):
return self._items[key]
def __setitem__(self, key, value):
self._items[key] = value
self._entity.dirty()
def __delitem__(self, key):
del self._items[key]
self._entity.dirty()
def __len__(self):
return len(self._items)
def __iter__(self):
return iter(self._items)
class SetAttribute(MutableAttribute):
"""An entity attribute that proxies a set."""
class Proxy(abc.MutableSet):
def __init__(self, entity, items=()):
self._items = set(items)
self._entity = entity
def __len__(self):
return len(self._items)
def __iter__(self):
return iter(self._items)
def __contains__(self, value):
return value in self._items
def add(self, value):
self._items.add(value)
self._entity.dirty()
def discard(self, value):
self._items.discard(value)
self._entity.dirty()
| en | 0.806196 | # -*- coding: utf-8 -*- Data collections and attributes. # Part of Clockwork MUD Server (https://github.com/whutch/cwmud) # :copyright: (c) 2008 - 2017 <NAME> # :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt) Decorate a data blob to register it in this blob. :param str name: The name of the field to store the blob :returns None: :raises AlreadyExists: If the given name already exists as an attr :raises TypeError: If the supplied or decorated class is not a subclass of DataBlob Decorate an attribute to register it in this blob. :param str name: The name of the field to store the attribute :returns None: :raises AlreadyExists: If the given name already exists as an attr :raises TypeError: If the supplied or decorated class is not a subclass of Attribute A collection of attributes and sub-blobs on an entity. # These are overridden in the metaclass, I just put them here # to avoid a lot of unresolved reference errors in IDE introspection. # Update entity caches. Merge this blob with another, replacing blobs and attrs. Sub-blobs and attrs on the given blob with take precedent over those existing on this blob. :param DataBlob blob: The blob to merge this blob with :returns None: Create a dict from this blob, sanitized and suitable for storage. All sub-blobs will in turn be serialized. :returns dict: The serialized data Update this blob's data using values from a dict. All sub-blobs found will in turn be deserialized. Be careful where you deserialize data from, as it will be loaded raw and unvalidated. :param dict data: The data to deserialize :returns None: A unique value to note that an attribute hasn't been set. A single attribute of an entity. These are templates for the behavior of an attribute, they will not be instantiated and as such have no instance-based variables. The value of `default` should not be set to a mutable type, as it will be passed by reference to all instantiated blobs and risks being changed elsewhere in the code. # Do NOT use mutable types for this. Get the default value for this attribute. :param entity: The entity this attribute is on :returns: The default value Validate a value for this attribute. This will be called by the blob when setting the value for this attribute, override it to perform any checks or sanitation. This should either return a valid value for the attribute or raise an exception as to why the value is invalid. :param entity: The entity this attribute is on :param new_value: The potential value to validate :returns: The validated (and optionally sanitized) value Finalize the value for this attribute. This will be called by the blob when setting the value for this attribute, after validation; override it to perform any sanitation or transformation. The value should be considered valid. :param entity: The entity this attribute is on :param new_value: The new, validated value :returns: The finalized value Perform any actions necessary after this attribute's value changes. This will be called by the blob after the value of this attribute has changed, override it to do any necessary post-setter actions. :param entity: The entity this attribute is on :param DataBlob blob: The blob that changed :param old_value: The previous value :param new_value: The new value :returns None: Serialize a value for this attribute that is suitable for storage. This will be called by the blob when serializing itself, override it to perform any necessary conversion or sanitation. :param entity: The entity this attribute is on :param value: The value to serialize :returns: The serialized value Deserialize a value for this attribute from storage. This will be called by the blob when deserializing itself, override it to perform any necessary conversion or sanitation. :param entity: The entity this attribute is on :param value: The value to deserialize :returns: The deserialized value A mutable attribute of an entity. Return a bound proxy instance for this mutable attribute. :param entity: The entity this attribute is on :returns: A bound proxy instance An entity attribute that proxies a list. An entity attribute that proxies a dictionary. An entity attribute that proxies a set. | 2.341714 | 2 |
train.py | HocRiser01/GEV-NN | 0 | 6622580 | <filename>train.py
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils import data
import VAE
from config import config
import display
from construct import construct
from sklearn.svm import SVC
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
eps = 1e-8
def CEL(x, x_hat):
sm = nn.Softmax(dim = 1)
x = sm(x)
x_hat = sm(x)
return -(x * torch.log(x_hat + eps) + (1 - x) * torch.log(1 - x_hat + eps)).sum() / x.nelement()
class sampleData(data.Dataset):
def __init__(self, X, y):
self.X = X
self.y = y
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class Discriminator(nn.Module):
def __init__(self, in_dim):
super().__init__()
self.fc1 = nn.Linear(in_dim, 250)
self.fc2 = nn.Linear(250, 512)
self.fc3 = nn.Linear(512, 128)
self.fc4 = nn.Linear(128, 1)
def forward(self, x):
x = torch.sigmoid(self.fc1(x))
x = torch.sigmoid(self.fc2(x))
x = torch.sigmoid(self.fc3(x))
x = self.fc4(x)
return x
def gradient_penalty(netD, real, fake):
alpha = torch.rand(real.shape[0], 1).expand(real.shape).to(device)
interpolates = alpha * real + (1 - alpha) * fake
interpolates = torch.autograd.Variable(interpolates.to(device), requires_grad=True)
disc_interpolates = netD(interpolates)
grads = torch.autograd.grad(inputs = interpolates, outputs = disc_interpolates,
grad_outputs = torch.ones(disc_interpolates.shape).to(device),
create_graph = True, retain_graph = True,
only_inputs = True)[0]
gp = ((grads.norm(2, dim = 1) - 1) ** 2).mean()
return gp
def AE(X_train, Y_train, X_test, Y_test, learning_rate, epochs, batch_size):
dataset = sampleData(X_train, Y_train)
dataloader = data.DataLoader(dataset, batch_size = batch_size,
shuffle = True, drop_last = False, **config.kwargs)
netG = VAE.net(X_train.shape[1], X_train.shape[1]).to(device)
netD = Discriminator(X_train.shape[1])
G_optimizer = torch.optim.Adam(netG.parameters(), lr = learning_rate)
D_optimizer = torch.optim.Adam(netD.parameters(), lr = learning_rate)
A1 = []
A2 = []
for epoch in range(epochs):
for step, [X, y] in enumerate(dataloader):
x_hat , mu, log_var, h = netG(X)
rec_loss = CEL(X, x_hat)
kl_div = (-0.5 * (log_var + 1 - mu ** 2 - torch.exp(log_var))).sum() / X.shape[0]
g_loss = rec_loss + kl_div
G_optimizer.zero_grad()
g_loss.backward()
G_optimizer.step()
fake = torch.Tensor(X.shape).uniform_(-1, 1)
fake, _, _, _ = netG(fake)
fake_logits = netD(fake)
real, _, _, _ = netG(X)
real_logits = netD(real)
fake_loss = CEL(fake_logits, torch.zeros_like(fake_logits)).mean()
real_loss = CEL(real_logits, torch.ones_like(real_logits)).mean()
gp = gradient_penalty(netD, X, fake)
d_loss = fake_loss + real_loss + 10. * gp
D_optimizer.zero_grad()
d_loss.backward()
D_optimizer.step()
# if step == 0:
# print(epoch, 'kl div:%.4f' % kl_div.item(), 'rec loss:%.4f' % rec_loss.item(), 'dloss:%.4f' % d_loss.item())
torch.save(netG.state_dict(), './result/netG-01.pt')
X1, index = construct(X_train, Y_train)
X2, _ = construct(X_test, Y_test)
y_output = SVC().fit(X1[:, index], Y_train).predict(X1[:, index])
TP, TN, FP, FN, P, R, mF1, Acc, G_mean = display.Eval(Y_train, y_output)
A1.append(Acc)
y_output = SVC().fit(X1[:, index], Y_train).predict(X2[:, index])
TP, TN, FP, FN, P, R, mF1, Acc, G_mean = display.Eval(Y_test, y_output)
A2.append(Acc)
display.Disp(A1, A2)
torch.save(netG.state_dict(), './result/netG-01.pt')
for step, [X, y] in enumerate(dataloader):
if (step == 0):
x_hat, mu, log_var, hid = netG(X)
else:
x_hat, mu, log_var, h = netG(X)
hid = torch.cat([hid, h], axis = 0)
return hid
def train(X_train, Y_train, X_test, Y_test):
hid = AE(X_train, Y_train, X_test, Y_test, learning_rate = 0.001, epochs = 50, batch_size = 128)
# hid = minmaxscaler(hid)
return hid.detach().cpu().numpy() | <filename>train.py
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils import data
import VAE
from config import config
import display
from construct import construct
from sklearn.svm import SVC
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
eps = 1e-8
def CEL(x, x_hat):
sm = nn.Softmax(dim = 1)
x = sm(x)
x_hat = sm(x)
return -(x * torch.log(x_hat + eps) + (1 - x) * torch.log(1 - x_hat + eps)).sum() / x.nelement()
class sampleData(data.Dataset):
def __init__(self, X, y):
self.X = X
self.y = y
def __getitem__(self, index):
return self.X[index], self.y[index]
def __len__(self):
return len(self.X)
class Discriminator(nn.Module):
def __init__(self, in_dim):
super().__init__()
self.fc1 = nn.Linear(in_dim, 250)
self.fc2 = nn.Linear(250, 512)
self.fc3 = nn.Linear(512, 128)
self.fc4 = nn.Linear(128, 1)
def forward(self, x):
x = torch.sigmoid(self.fc1(x))
x = torch.sigmoid(self.fc2(x))
x = torch.sigmoid(self.fc3(x))
x = self.fc4(x)
return x
def gradient_penalty(netD, real, fake):
alpha = torch.rand(real.shape[0], 1).expand(real.shape).to(device)
interpolates = alpha * real + (1 - alpha) * fake
interpolates = torch.autograd.Variable(interpolates.to(device), requires_grad=True)
disc_interpolates = netD(interpolates)
grads = torch.autograd.grad(inputs = interpolates, outputs = disc_interpolates,
grad_outputs = torch.ones(disc_interpolates.shape).to(device),
create_graph = True, retain_graph = True,
only_inputs = True)[0]
gp = ((grads.norm(2, dim = 1) - 1) ** 2).mean()
return gp
def AE(X_train, Y_train, X_test, Y_test, learning_rate, epochs, batch_size):
dataset = sampleData(X_train, Y_train)
dataloader = data.DataLoader(dataset, batch_size = batch_size,
shuffle = True, drop_last = False, **config.kwargs)
netG = VAE.net(X_train.shape[1], X_train.shape[1]).to(device)
netD = Discriminator(X_train.shape[1])
G_optimizer = torch.optim.Adam(netG.parameters(), lr = learning_rate)
D_optimizer = torch.optim.Adam(netD.parameters(), lr = learning_rate)
A1 = []
A2 = []
for epoch in range(epochs):
for step, [X, y] in enumerate(dataloader):
x_hat , mu, log_var, h = netG(X)
rec_loss = CEL(X, x_hat)
kl_div = (-0.5 * (log_var + 1 - mu ** 2 - torch.exp(log_var))).sum() / X.shape[0]
g_loss = rec_loss + kl_div
G_optimizer.zero_grad()
g_loss.backward()
G_optimizer.step()
fake = torch.Tensor(X.shape).uniform_(-1, 1)
fake, _, _, _ = netG(fake)
fake_logits = netD(fake)
real, _, _, _ = netG(X)
real_logits = netD(real)
fake_loss = CEL(fake_logits, torch.zeros_like(fake_logits)).mean()
real_loss = CEL(real_logits, torch.ones_like(real_logits)).mean()
gp = gradient_penalty(netD, X, fake)
d_loss = fake_loss + real_loss + 10. * gp
D_optimizer.zero_grad()
d_loss.backward()
D_optimizer.step()
# if step == 0:
# print(epoch, 'kl div:%.4f' % kl_div.item(), 'rec loss:%.4f' % rec_loss.item(), 'dloss:%.4f' % d_loss.item())
torch.save(netG.state_dict(), './result/netG-01.pt')
X1, index = construct(X_train, Y_train)
X2, _ = construct(X_test, Y_test)
y_output = SVC().fit(X1[:, index], Y_train).predict(X1[:, index])
TP, TN, FP, FN, P, R, mF1, Acc, G_mean = display.Eval(Y_train, y_output)
A1.append(Acc)
y_output = SVC().fit(X1[:, index], Y_train).predict(X2[:, index])
TP, TN, FP, FN, P, R, mF1, Acc, G_mean = display.Eval(Y_test, y_output)
A2.append(Acc)
display.Disp(A1, A2)
torch.save(netG.state_dict(), './result/netG-01.pt')
for step, [X, y] in enumerate(dataloader):
if (step == 0):
x_hat, mu, log_var, hid = netG(X)
else:
x_hat, mu, log_var, h = netG(X)
hid = torch.cat([hid, h], axis = 0)
return hid
def train(X_train, Y_train, X_test, Y_test):
hid = AE(X_train, Y_train, X_test, Y_test, learning_rate = 0.001, epochs = 50, batch_size = 128)
# hid = minmaxscaler(hid)
return hid.detach().cpu().numpy() | en | 0.079498 | # if step == 0: # print(epoch, 'kl div:%.4f' % kl_div.item(), 'rec loss:%.4f' % rec_loss.item(), 'dloss:%.4f' % d_loss.item()) # hid = minmaxscaler(hid) | 2.556905 | 3 |
gs-engine/gse_api_server/app.py | gedge-platform/GEdge-Platform | 13 | 6622581 | from flask import Flask
from werkzeug.exceptions import HTTPException
from controller.service import service
from controller.service_mesh import service_mesh
from controller.service_schedulehint import service_schedulehint
from controller.hpa import hpa
from controller.template import template
from controller.vpa import vpa
from controller.limitrange import limitrange
from controller.imageinfo import imageinfo
from controller.node import node
from controller.namespace import namespace
from controller.utility import utility
# from tools.job import service_ip_collector
# from tools.scheduler import Scheduler
# sched = Scheduler()
# sched.add_schdule(service_ip_collector, 'interval', job_id='service_ip_collector', seconds=5, args=(1, ))
app = Flask(__name__, static_url_path='/static')
app.register_blueprint(namespace, url_prefix='/gse/namespaces')
app.register_blueprint(node, url_prefix='/gse/nodes')
app.register_blueprint(service, url_prefix='/gse/services')
app.register_blueprint(template, url_prefix='/gse/templates')
app.register_blueprint(service_mesh, url_prefix='/gse/service-mesh')
app.register_blueprint(utility, url_prefix='/gse/utility')
app.register_blueprint(limitrange, url_prefix='/gse/limitrange')
app.register_blueprint(imageinfo, url_prefix='/gse/imageinfo')
app.register_blueprint(hpa, url_prefix='/gse/hpa')
app.register_blueprint(vpa, url_prefix='/gse/vpa')
app.register_blueprint(service_schedulehint, url_prefix='/gse/service/schedulehint')
@app.errorhandler(404)
def resource_not_found(error):
msg = {
"error": {
'code': 404,
'name': error.name,
'message': "resource not found"
}
}
if error.description != "":
msg["error"]["message"] = error.description
return msg, 404
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8887)
@app.errorhandler(HTTPException)
def resource_not_found(error):
msg = {
"error": {
'code': error.code,
'name': error.name,
'message': error.description
}
}
return msg, error.code
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8887)
'''
apt-get install python3-pymysql
apt-get install python3-pandas
실행방법
gunicorn app:app --bind=0.0.0.0:8888 --daemon --reload
--daemon: 데몬 프로세스로 실행
--reload: 소스 변경시 재구동
'''
| from flask import Flask
from werkzeug.exceptions import HTTPException
from controller.service import service
from controller.service_mesh import service_mesh
from controller.service_schedulehint import service_schedulehint
from controller.hpa import hpa
from controller.template import template
from controller.vpa import vpa
from controller.limitrange import limitrange
from controller.imageinfo import imageinfo
from controller.node import node
from controller.namespace import namespace
from controller.utility import utility
# from tools.job import service_ip_collector
# from tools.scheduler import Scheduler
# sched = Scheduler()
# sched.add_schdule(service_ip_collector, 'interval', job_id='service_ip_collector', seconds=5, args=(1, ))
app = Flask(__name__, static_url_path='/static')
app.register_blueprint(namespace, url_prefix='/gse/namespaces')
app.register_blueprint(node, url_prefix='/gse/nodes')
app.register_blueprint(service, url_prefix='/gse/services')
app.register_blueprint(template, url_prefix='/gse/templates')
app.register_blueprint(service_mesh, url_prefix='/gse/service-mesh')
app.register_blueprint(utility, url_prefix='/gse/utility')
app.register_blueprint(limitrange, url_prefix='/gse/limitrange')
app.register_blueprint(imageinfo, url_prefix='/gse/imageinfo')
app.register_blueprint(hpa, url_prefix='/gse/hpa')
app.register_blueprint(vpa, url_prefix='/gse/vpa')
app.register_blueprint(service_schedulehint, url_prefix='/gse/service/schedulehint')
@app.errorhandler(404)
def resource_not_found(error):
msg = {
"error": {
'code': 404,
'name': error.name,
'message': "resource not found"
}
}
if error.description != "":
msg["error"]["message"] = error.description
return msg, 404
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8887)
@app.errorhandler(HTTPException)
def resource_not_found(error):
msg = {
"error": {
'code': error.code,
'name': error.name,
'message': error.description
}
}
return msg, error.code
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8887)
'''
apt-get install python3-pymysql
apt-get install python3-pandas
실행방법
gunicorn app:app --bind=0.0.0.0:8888 --daemon --reload
--daemon: 데몬 프로세스로 실행
--reload: 소스 변경시 재구동
'''
| ko | 0.222939 | # from tools.job import service_ip_collector # from tools.scheduler import Scheduler # sched = Scheduler() # sched.add_schdule(service_ip_collector, 'interval', job_id='service_ip_collector', seconds=5, args=(1, )) apt-get install python3-pymysql apt-get install python3-pandas 실행방법 gunicorn app:app --bind=0.0.0.0:8888 --daemon --reload --daemon: 데몬 프로세스로 실행 --reload: 소스 변경시 재구동 | 1.94745 | 2 |
produce/models.py | jmickela/stalkexchange | 0 | 6622582 | <gh_stars>0
from django.db import models
from django.utils.translation import ugettext as _
from django.conf import settings
class ProduceType(models.Model):
title = models.CharField(_('Type'), max_length=255)
description = models.TextField(_('Description'))
image = models.ImageField(_('Image'), blank=True)
def __unicode__(self):
return self.title
class GardenItem(models.Model):
QUANTITY_LITTLE = 0
QUANTITY_LOT = 10
quantity_choices = (
(QUANTITY_LITTLE, _("A little")),
(QUANTITY_LOT, _("A lot")),
)
SIZE_BIG = 0
SIZE_MEDIUM = 1
SIZE_SMALL = 2
size_choices = (
(SIZE_BIG, _('Big')),
(SIZE_MEDIUM, _('Medium')),
(SIZE_SMALL, _('Small'))
)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="garden_items")
produce = models.ForeignKey(ProduceType)
quantity = models.IntegerField(_('Quantity'), help_text=_('How many do you have?'), choices=quantity_choices)
is_organic = models.BooleanField(_('Is Organic?'), default=False)
size = models.IntegerField(_('Size'), choices=size_choices, help_text=_('How big is this item?'))
description = models.TextField(_('Description'), help_text=_('Extra information...'), blank=True)
image = models.ImageField(_('Image'), blank=True) | from django.db import models
from django.utils.translation import ugettext as _
from django.conf import settings
class ProduceType(models.Model):
title = models.CharField(_('Type'), max_length=255)
description = models.TextField(_('Description'))
image = models.ImageField(_('Image'), blank=True)
def __unicode__(self):
return self.title
class GardenItem(models.Model):
QUANTITY_LITTLE = 0
QUANTITY_LOT = 10
quantity_choices = (
(QUANTITY_LITTLE, _("A little")),
(QUANTITY_LOT, _("A lot")),
)
SIZE_BIG = 0
SIZE_MEDIUM = 1
SIZE_SMALL = 2
size_choices = (
(SIZE_BIG, _('Big')),
(SIZE_MEDIUM, _('Medium')),
(SIZE_SMALL, _('Small'))
)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="garden_items")
produce = models.ForeignKey(ProduceType)
quantity = models.IntegerField(_('Quantity'), help_text=_('How many do you have?'), choices=quantity_choices)
is_organic = models.BooleanField(_('Is Organic?'), default=False)
size = models.IntegerField(_('Size'), choices=size_choices, help_text=_('How big is this item?'))
description = models.TextField(_('Description'), help_text=_('Extra information...'), blank=True)
image = models.ImageField(_('Image'), blank=True) | none | 1 | 2.164297 | 2 | |
tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_BE-2011] 1.py | gour/holidata | 32 | 6622583 | <reponame>gour/holidata<filename>tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[de_BE-2011] 1.py
[
{
'date': '2011-01-01',
'description': 'Neujahr',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-04-24',
'description': 'Ostern',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-04-25',
'description': 'Ostermontag',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-05-01',
'description': 'Tag der Arbeit',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-06-02',
'description': '<NAME>',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-06-12',
'description': 'Pfingsten',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-06-13',
'description': 'Pfingstmontag',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-07-21',
'description': 'Nationalfeiertag',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-08-15',
'description': '<NAME>',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-11-01',
'description': 'Allerheiligen',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-11-11',
'description': 'Waffenstillstand',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-12-25',
'description': 'Weihnacht',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRF'
}
] | 1.py
[
{
'date': '2011-01-01',
'description': 'Neujahr',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-04-24',
'description': 'Ostern',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-04-25',
'description': 'Ostermontag',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-05-01',
'description': 'Tag der Arbeit',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-06-02',
'description': '<NAME>',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-06-12',
'description': 'Pfingsten',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-06-13',
'description': 'Pfingstmontag',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2011-07-21',
'description': 'Nationalfeiertag',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-08-15',
'description': '<NAME>',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-11-01',
'description': 'Allerheiligen',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2011-11-11',
'description': 'Waffenstillstand',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2011-12-25',
'description': 'Weihnacht',
'locale': 'de-BE',
'notes': '',
'region': '',
'type': 'NRF'
}
] | none | 1 | 1.601555 | 2 | |
PythonM3/media.py | MiguelTeixeiraUFPB/PythonM3 | 0 | 6622584 | <filename>PythonM3/media.py
def media(*notas):
soma=0
tam=len(notas)
for nota in notas:
soma=soma+nota
print('A média de notas é ',soma/(len(notas)))
print(f'A quantidade de notas é {tam}')
media(5,5,5)
| <filename>PythonM3/media.py
def media(*notas):
soma=0
tam=len(notas)
for nota in notas:
soma=soma+nota
print('A média de notas é ',soma/(len(notas)))
print(f'A quantidade de notas é {tam}')
media(5,5,5)
| none | 1 | 3.111028 | 3 | |
examples/truncated_square_svd_approximation.py | melven/pitts | 2 | 6622585 | #!/usr/bin/env python3
""" Use TT-SVD to approximate a large square SVD """
__authors__ = ['<NAME> <<EMAIL>>']
__date__ = '2020-12-16'
import numpy as np
import pitts_py
import functools
import timeit
import argparse
def timer(func):
"""measure runtime of the decorated function"""
@functools.wraps(func)
def wrapper_fun(*args, **kwargs):
wtime = timeit.default_timer()
value = func(*args, **kwargs)
wtime = timeit.default_timer() - wtime
print(func.__name__, "wtime:", wtime)
return value
return wrapper_fun
@timer
def random_square_low_rank_matrix(n, r):
sigma = np.linspace(1, 0, r, endpoint=False, dtype=np.float64)
U, _ = np.linalg.qr(np.random.rand(n,r).astype(dtype=np.float64))
Vt, _ = np.linalg.qr(np.random.rand(n,r).astype(dtype=np.float64))
M = U @ np.diag(sigma) @ Vt.T
return M, U, sigma, Vt
@timer
def numpy_svd(M):
U, S, Vt = np.linalg.svd(M)
return U, S, Vt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='approximate truncated square SVD of a 2^n x 2^n matrix by TT-SVD')
parser.add_argument('d', type=int)
parser.add_argument('r', type=int)
parser.add_argument('rtt', type=int)
parser.add_argument('nIter', type=int)
args = parser.parse_args()
pitts_py.initialize(True)
# generate square random matrix of given rank
n = 2**args.d
M, U_ref, sigma_ref, Vt_ref = random_square_low_rank_matrix(n, args.r)
print('Singular values:', sigma_ref)
U, sigma, Vt = numpy_svd(M)
print('Computed singular values:', sigma[:args.r])
err = np.linalg.norm(sigma[:args.r] - sigma_ref, ord=np.inf)
print('Error:', err)
Xm = pitts_py.MultiVector_double(n*n//2, 2)
Xm_view = np.array(Xm, copy=False)
Xm_view[...] = M.reshape((n*n//2, 2), order='F')
Xm_view = None
dims = [2,]*args.d*2
work = pitts_py.MultiVector_double()
Xtt = pitts_py.fromDense(Xm, work, dims, rankTolerance=0.0001, maxRank=args.rtt)
cores = [Xtt.getSubTensor(i) for i in range(len(dims))]
for i in range(args.d):
r1 = cores[i].shape[0]
# 2 = cores[i].shape[1]
r2 = cores[i].shape[2]
coreU, coreS, coreVt = np.linalg.svd(cores[i].reshape((r1*2,r2), order='F'), full_matrices=False)
cores[i] = coreU.reshape((r1,2,r2), order='F')
r3 = cores[i+1].shape[2]
tmp = cores[i+1].reshape((r2,2*r3), order='F')
if i+1 < args.d:
tmp = np.diag(coreS) @ coreVt.T @ tmp
else:
sigmaTT = coreS
tmp = coreVt.T @ tmp
cores[i+1] = tmp.reshape((r2,2,r3), order='F')
print('TT-SVD computed singular values:', sigmaTT[:args.r])
err = np.linalg.norm(sigmaTT[:args.r] - sigma_ref, ord=np.inf)
print('Error:', err)
UTT = np.ones((1,1), dtype=np.float64)
for i in range(args.d):
r1 = cores[i].shape[0]
# 2 = cores[i].shape[1]
r2 = cores[i].shape[2]
UTT = UTT @ cores[i].reshape((r1,2*r2), order='F')
UTT = UTT.reshape((UTT.size//r2, r2), order='F')
print('UTT', UTT[:,:args.r].T @ U_ref)
VTT = np.eye(len(sigmaTT), dtype=np.float64)
for i in range(args.d, 2*args.d):
r1 = cores[i].shape[0]
# 2 = cores[i].shape[1]
r2 = cores[i].shape[2]
VTT = VTT @ cores[i].reshape((r1,2*r2), order='F')
VTT = VTT.reshape((VTT.size//r2, r2), order='F')
VTT = VTT.reshape((len(sigmaTT),n), order='F')
print('VTT', VTT[:args.r,:] @ Vt_ref)
pitts_py.finalize(True)
| #!/usr/bin/env python3
""" Use TT-SVD to approximate a large square SVD """
__authors__ = ['<NAME> <<EMAIL>>']
__date__ = '2020-12-16'
import numpy as np
import pitts_py
import functools
import timeit
import argparse
def timer(func):
"""measure runtime of the decorated function"""
@functools.wraps(func)
def wrapper_fun(*args, **kwargs):
wtime = timeit.default_timer()
value = func(*args, **kwargs)
wtime = timeit.default_timer() - wtime
print(func.__name__, "wtime:", wtime)
return value
return wrapper_fun
@timer
def random_square_low_rank_matrix(n, r):
sigma = np.linspace(1, 0, r, endpoint=False, dtype=np.float64)
U, _ = np.linalg.qr(np.random.rand(n,r).astype(dtype=np.float64))
Vt, _ = np.linalg.qr(np.random.rand(n,r).astype(dtype=np.float64))
M = U @ np.diag(sigma) @ Vt.T
return M, U, sigma, Vt
@timer
def numpy_svd(M):
U, S, Vt = np.linalg.svd(M)
return U, S, Vt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='approximate truncated square SVD of a 2^n x 2^n matrix by TT-SVD')
parser.add_argument('d', type=int)
parser.add_argument('r', type=int)
parser.add_argument('rtt', type=int)
parser.add_argument('nIter', type=int)
args = parser.parse_args()
pitts_py.initialize(True)
# generate square random matrix of given rank
n = 2**args.d
M, U_ref, sigma_ref, Vt_ref = random_square_low_rank_matrix(n, args.r)
print('Singular values:', sigma_ref)
U, sigma, Vt = numpy_svd(M)
print('Computed singular values:', sigma[:args.r])
err = np.linalg.norm(sigma[:args.r] - sigma_ref, ord=np.inf)
print('Error:', err)
Xm = pitts_py.MultiVector_double(n*n//2, 2)
Xm_view = np.array(Xm, copy=False)
Xm_view[...] = M.reshape((n*n//2, 2), order='F')
Xm_view = None
dims = [2,]*args.d*2
work = pitts_py.MultiVector_double()
Xtt = pitts_py.fromDense(Xm, work, dims, rankTolerance=0.0001, maxRank=args.rtt)
cores = [Xtt.getSubTensor(i) for i in range(len(dims))]
for i in range(args.d):
r1 = cores[i].shape[0]
# 2 = cores[i].shape[1]
r2 = cores[i].shape[2]
coreU, coreS, coreVt = np.linalg.svd(cores[i].reshape((r1*2,r2), order='F'), full_matrices=False)
cores[i] = coreU.reshape((r1,2,r2), order='F')
r3 = cores[i+1].shape[2]
tmp = cores[i+1].reshape((r2,2*r3), order='F')
if i+1 < args.d:
tmp = np.diag(coreS) @ coreVt.T @ tmp
else:
sigmaTT = coreS
tmp = coreVt.T @ tmp
cores[i+1] = tmp.reshape((r2,2,r3), order='F')
print('TT-SVD computed singular values:', sigmaTT[:args.r])
err = np.linalg.norm(sigmaTT[:args.r] - sigma_ref, ord=np.inf)
print('Error:', err)
UTT = np.ones((1,1), dtype=np.float64)
for i in range(args.d):
r1 = cores[i].shape[0]
# 2 = cores[i].shape[1]
r2 = cores[i].shape[2]
UTT = UTT @ cores[i].reshape((r1,2*r2), order='F')
UTT = UTT.reshape((UTT.size//r2, r2), order='F')
print('UTT', UTT[:,:args.r].T @ U_ref)
VTT = np.eye(len(sigmaTT), dtype=np.float64)
for i in range(args.d, 2*args.d):
r1 = cores[i].shape[0]
# 2 = cores[i].shape[1]
r2 = cores[i].shape[2]
VTT = VTT @ cores[i].reshape((r1,2*r2), order='F')
VTT = VTT.reshape((VTT.size//r2, r2), order='F')
VTT = VTT.reshape((len(sigmaTT),n), order='F')
print('VTT', VTT[:args.r,:] @ Vt_ref)
pitts_py.finalize(True)
| en | 0.686847 | #!/usr/bin/env python3 Use TT-SVD to approximate a large square SVD measure runtime of the decorated function # generate square random matrix of given rank # 2 = cores[i].shape[1] # 2 = cores[i].shape[1] # 2 = cores[i].shape[1] | 2.645765 | 3 |
data/test/python/2635cacebbff947f74c9baae1b26d2a5752c15b8present_team.py | harshp8l/deep-learning-lang-detection | 84 | 6622586 | from board import present_team
from django.views.generic import View
from whiteboard_web.observers.present_team_observer import PresentTeamObserver
from whiteboard_web.repositories.new_face_repository import NewFaceRepository
from whiteboard_web.repositories.team_repository import TeamRepository
class PresentTeamDetailView(View):
@staticmethod
def get(request, team_identifier):
team_identifier = int(team_identifier)
observer = PresentTeamObserver()
team_repository = TeamRepository()
new_face_repository = NewFaceRepository()
command = present_team(team_identifier=team_identifier,
team_repository=team_repository,
new_face_repository=new_face_repository,
observer=observer)
command.execute()
return observer.response() | from board import present_team
from django.views.generic import View
from whiteboard_web.observers.present_team_observer import PresentTeamObserver
from whiteboard_web.repositories.new_face_repository import NewFaceRepository
from whiteboard_web.repositories.team_repository import TeamRepository
class PresentTeamDetailView(View):
@staticmethod
def get(request, team_identifier):
team_identifier = int(team_identifier)
observer = PresentTeamObserver()
team_repository = TeamRepository()
new_face_repository = NewFaceRepository()
command = present_team(team_identifier=team_identifier,
team_repository=team_repository,
new_face_repository=new_face_repository,
observer=observer)
command.execute()
return observer.response() | none | 1 | 1.927735 | 2 | |
benchmark.py | FahmidKader/Rail-Vision-System | 0 | 6622587 | <reponame>FahmidKader/Rail-Vision-System<gh_stars>0
#!/usr/bin/env python3
from time import time
import numpy as np
import cv2
def test(filename='rail.png'):
start_time = time()
# Loads an image
src = cv2.imread(cv2.samples.findFile(filename), cv2.IMREAD_GRAYSCALE)
# Check if image is loaded fine
if src is None:
print('Error opening image!')
# print('Usage: hough_lines.py [image_name -- default ' + default_file + '] \n')
return -1
dst = cv2.Canny(src, 50, 200, None, 3)
# Copy edges to the images that will display the results in BGR)
cdst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)
cdstP = np.copy(cdst)
lines = cv2.HoughLines(dst, 1, np.pi / 180, 150, None, 0, 0)
if lines is not None:
for i in range(0, len(lines)):
rho = lines[i][0][0]
theta = lines[i][0][1]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (int(x0 + 1000 * (-b)), int(y0 + 1000 * (a)))
pt2 = (int(x0 - 1000 * (-b)), int(y0 - 1000 * (a)))
cv2.line(cdst, pt1, pt2, (0, 0, 255), 3, cv2.LINE_AA)
linesP = cv2.HoughLinesP(dst, 1, np.pi / 180, 50, None, 50, 10)
if linesP is not None:
for i in range(0, len(linesP)):
l = linesP[i][0]
cv2.line(cdstP, (l[0], l[1]), (l[2], l[3]), (0, 0, 255), 3, cv2.LINE_AA)
cv2.imshow("Source", src)
cv2.imshow("Detected Lines (in red) - Standard Hough Line Transform", cdst)
cv2.imshow("Detected Lines (in red) - Probabilistic Line Transform", cdstP)
print((time() - start_time) ** -1, 'Hz')
cv2.waitKey()
def main():
start = time()
img = cv2.imread('rail.png')
edges = cv2.Canny(img, 200, 110)
lines = cv2.HoughLinesP(edges, 10, 0.1, 400)
output = np.copy(img)
if lines is not None:
for i in range(0, len(lines)):
rho = lines[i][0][0]
theta = lines[i][0][1]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (int(x0 + 1000 * (-b)), int(y0 + 1000 * (a)))
pt2 = (int(x0 - 1000 * (-b)), int(y0 - 1000 * (a)))
cv2.line(img, pt1, pt2, (0, 0, 255), 3, cv2.LINE_AA)
cv2.imshow('Edges', output)
cv2.waitKey(0)
print((time() - start) ** -1, 'Hz')
if __name__ == '__main__':
test()
| #!/usr/bin/env python3
from time import time
import numpy as np
import cv2
def test(filename='rail.png'):
start_time = time()
# Loads an image
src = cv2.imread(cv2.samples.findFile(filename), cv2.IMREAD_GRAYSCALE)
# Check if image is loaded fine
if src is None:
print('Error opening image!')
# print('Usage: hough_lines.py [image_name -- default ' + default_file + '] \n')
return -1
dst = cv2.Canny(src, 50, 200, None, 3)
# Copy edges to the images that will display the results in BGR)
cdst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)
cdstP = np.copy(cdst)
lines = cv2.HoughLines(dst, 1, np.pi / 180, 150, None, 0, 0)
if lines is not None:
for i in range(0, len(lines)):
rho = lines[i][0][0]
theta = lines[i][0][1]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (int(x0 + 1000 * (-b)), int(y0 + 1000 * (a)))
pt2 = (int(x0 - 1000 * (-b)), int(y0 - 1000 * (a)))
cv2.line(cdst, pt1, pt2, (0, 0, 255), 3, cv2.LINE_AA)
linesP = cv2.HoughLinesP(dst, 1, np.pi / 180, 50, None, 50, 10)
if linesP is not None:
for i in range(0, len(linesP)):
l = linesP[i][0]
cv2.line(cdstP, (l[0], l[1]), (l[2], l[3]), (0, 0, 255), 3, cv2.LINE_AA)
cv2.imshow("Source", src)
cv2.imshow("Detected Lines (in red) - Standard Hough Line Transform", cdst)
cv2.imshow("Detected Lines (in red) - Probabilistic Line Transform", cdstP)
print((time() - start_time) ** -1, 'Hz')
cv2.waitKey()
def main():
start = time()
img = cv2.imread('rail.png')
edges = cv2.Canny(img, 200, 110)
lines = cv2.HoughLinesP(edges, 10, 0.1, 400)
output = np.copy(img)
if lines is not None:
for i in range(0, len(lines)):
rho = lines[i][0][0]
theta = lines[i][0][1]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (int(x0 + 1000 * (-b)), int(y0 + 1000 * (a)))
pt2 = (int(x0 - 1000 * (-b)), int(y0 - 1000 * (a)))
cv2.line(img, pt1, pt2, (0, 0, 255), 3, cv2.LINE_AA)
cv2.imshow('Edges', output)
cv2.waitKey(0)
print((time() - start) ** -1, 'Hz')
if __name__ == '__main__':
test() | en | 0.629465 | #!/usr/bin/env python3 # Loads an image # Check if image is loaded fine # print('Usage: hough_lines.py [image_name -- default ' + default_file + '] \n') # Copy edges to the images that will display the results in BGR) | 2.706606 | 3 |
f5/bigip/tm/asm/policies/test/unit/test___init__.py | nghia-tran/f5-common-python | 272 | 6622588 | # Copyright 2015 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5.bigip import ManagementRoot
from f5.bigip.tm.asm import Asm
from f5.bigip.tm.asm.policies.parameters import Parameter
from f5.bigip.tm.asm.policies.parameters import Parameters_s
from f5.bigip.tm.asm.policies.parameters import ParametersCollection
from f5.bigip.tm.asm.policies.parameters import ParametersResource
from f5.bigip.tm.asm.policies.parameters import UrlParametersCollection
from f5.bigip.tm.asm.policies.parameters import UrlParametersResource
from f5.bigip.tm.asm.policies import Policy
from f5.bigip.tm.asm.policies.urls import Url
from f5.sdk_exception import MissingRequiredCreationParameter
from six import iterkeys
import mock
import pytest
@pytest.fixture
def FakeURL():
pol = mock.MagicMock()
url = Url(pol)
url._meta_data['uri'] = \
'https://192.168.1.1/mgmt/tm/asm/policies/' \
'Lx3553-321/urls/vIlmHUz1-CQx5yxDEuf0Rw'
return url
@pytest.fixture
def FakeUrlParameters():
fake_policy = mock.MagicMock()
fake_param = UrlParametersCollection(fake_policy)
fake_param._meta_data['bigip'].tmos_version = '11.6.0'
return fake_param
@pytest.fixture
def FakePolicy(fakeicontrolsession):
mr = ManagementRoot('192.168.1.1', 'admin', 'admin')
fake_asm = Asm(mr.tm)
fake_policy = Policy(fake_asm)
return fake_policy
def MakePolicy(fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
p = b.tm.asm.policies_s.policy
p._meta_data['uri'] = \
'https://192.168.1.1/mgmt/tm/asm/policies/Lx3553-321'
return p
@pytest.fixture
def FakePolicyParameters():
fake_policy = mock.MagicMock()
fake_param = ParametersCollection(fake_policy)
fake_param._meta_data['bigip'].tmos_version = '11.6.0'
return fake_param
class TestPolicy(object):
def test_create_two(self, fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
t1 = b.tm.asm.policies_s.policy
t2 = b.tm.asm.policies_s.policy
assert t1 is t2
def test_create_no_args(self, FakePolicy):
with pytest.raises(MissingRequiredCreationParameter):
FakePolicy.create()
def test_collection(self, fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
t = b.tm.asm.policies_s
test_meta = t._meta_data['attribute_registry']
test_meta2 = t._meta_data['allowed_lazy_attributes']
kind = 'tm:asm:policies:policystate'
assert kind in list(iterkeys(test_meta))
assert Policy in test_meta2
assert t._meta_data['object_has_stats'] is False
def test_set_attr_reg_v11(self, fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
t1 = b.tm.asm.policies_s.policy
v11kind = 'tm:asm:policies:blocking-settings'
assert v11kind in t1._meta_data['attribute_registry'].keys()
def test_set_attr_reg_v12(self, fakeicontrolsession_v12):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
t1 = b.tm.asm.policies_s.policy
v12kind = 'tm:asm:policies:blocking-settings:blocking-settingcollectionstate'
assert v12kind in t1._meta_data['attribute_registry'].keys()
class TestParameters_s(object):
def test_policycol_new(self, fakeicontrolsession):
param = Parameters_s(MakePolicy(fakeicontrolsession))
assert isinstance(param, ParametersCollection)
def test_urlcol_new(self, FakeURL):
param = Parameters_s(FakeURL)
assert isinstance(param, UrlParametersCollection)
class TestParameter(object):
def test_policyres_new(self, FakePolicyParameters):
param = Parameter(FakePolicyParameters)
assert isinstance(param, ParametersResource)
def test_urlres_new(self, FakeUrlParameters):
param = Parameter(FakeUrlParameters)
assert isinstance(param, UrlParametersResource)
| # Copyright 2015 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5.bigip import ManagementRoot
from f5.bigip.tm.asm import Asm
from f5.bigip.tm.asm.policies.parameters import Parameter
from f5.bigip.tm.asm.policies.parameters import Parameters_s
from f5.bigip.tm.asm.policies.parameters import ParametersCollection
from f5.bigip.tm.asm.policies.parameters import ParametersResource
from f5.bigip.tm.asm.policies.parameters import UrlParametersCollection
from f5.bigip.tm.asm.policies.parameters import UrlParametersResource
from f5.bigip.tm.asm.policies import Policy
from f5.bigip.tm.asm.policies.urls import Url
from f5.sdk_exception import MissingRequiredCreationParameter
from six import iterkeys
import mock
import pytest
@pytest.fixture
def FakeURL():
pol = mock.MagicMock()
url = Url(pol)
url._meta_data['uri'] = \
'https://192.168.1.1/mgmt/tm/asm/policies/' \
'Lx3553-321/urls/vIlmHUz1-CQx5yxDEuf0Rw'
return url
@pytest.fixture
def FakeUrlParameters():
fake_policy = mock.MagicMock()
fake_param = UrlParametersCollection(fake_policy)
fake_param._meta_data['bigip'].tmos_version = '11.6.0'
return fake_param
@pytest.fixture
def FakePolicy(fakeicontrolsession):
mr = ManagementRoot('192.168.1.1', 'admin', 'admin')
fake_asm = Asm(mr.tm)
fake_policy = Policy(fake_asm)
return fake_policy
def MakePolicy(fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
p = b.tm.asm.policies_s.policy
p._meta_data['uri'] = \
'https://192.168.1.1/mgmt/tm/asm/policies/Lx3553-321'
return p
@pytest.fixture
def FakePolicyParameters():
fake_policy = mock.MagicMock()
fake_param = ParametersCollection(fake_policy)
fake_param._meta_data['bigip'].tmos_version = '11.6.0'
return fake_param
class TestPolicy(object):
def test_create_two(self, fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
t1 = b.tm.asm.policies_s.policy
t2 = b.tm.asm.policies_s.policy
assert t1 is t2
def test_create_no_args(self, FakePolicy):
with pytest.raises(MissingRequiredCreationParameter):
FakePolicy.create()
def test_collection(self, fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
t = b.tm.asm.policies_s
test_meta = t._meta_data['attribute_registry']
test_meta2 = t._meta_data['allowed_lazy_attributes']
kind = 'tm:asm:policies:policystate'
assert kind in list(iterkeys(test_meta))
assert Policy in test_meta2
assert t._meta_data['object_has_stats'] is False
def test_set_attr_reg_v11(self, fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
t1 = b.tm.asm.policies_s.policy
v11kind = 'tm:asm:policies:blocking-settings'
assert v11kind in t1._meta_data['attribute_registry'].keys()
def test_set_attr_reg_v12(self, fakeicontrolsession_v12):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
t1 = b.tm.asm.policies_s.policy
v12kind = 'tm:asm:policies:blocking-settings:blocking-settingcollectionstate'
assert v12kind in t1._meta_data['attribute_registry'].keys()
class TestParameters_s(object):
def test_policycol_new(self, fakeicontrolsession):
param = Parameters_s(MakePolicy(fakeicontrolsession))
assert isinstance(param, ParametersCollection)
def test_urlcol_new(self, FakeURL):
param = Parameters_s(FakeURL)
assert isinstance(param, UrlParametersCollection)
class TestParameter(object):
def test_policyres_new(self, FakePolicyParameters):
param = Parameter(FakePolicyParameters)
assert isinstance(param, ParametersResource)
def test_urlres_new(self, FakeUrlParameters):
param = Parameter(FakeUrlParameters)
assert isinstance(param, UrlParametersResource)
| en | 0.850922 | # Copyright 2015 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # | 1.557538 | 2 |
validation.py | pupumao/HandwrittenGraphemeClassification | 0 | 6622589 | <reponame>pupumao/HandwrittenGraphemeClassification
from lib.dataset.dataietr import DataIter
from train_config import config
from lib.core.model.ShuffleNet_Series.ShuffleNetV2.network import ShuffleNetV2
from lib.core.model.semodel.SeResnet import se_resnet50
import torch
import time
import argparse
import sklearn.metrics
from tqdm import tqdm
import numpy as np
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import cv2
from train_config import config as cfg
cfg.TRAIN.batch_size=1
ds = DataIter(cfg.DATA.root_path,cfg.DATA.val_txt_path,False)
def vis(model_path):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
###build model
model=se_resnet50()
model.load_state_dict(torch.load(model_path, map_location=device), strict=False)
model.to(device)
model.eval()
cls1_pre_list=[]
cls1_true_list = []
cls2_pre_list = []
cls2_true_list = []
cls3_pre_list = []
cls3_true_list = []
for step in tqdm(range(ds.size)):
images, labels = ds()
cls1_true_list.append(labels[0][0])
cls2_true_list.append(labels[0][1])
cls3_true_list.append(labels[0][2])
img_show = np.array(images)
# img_show=np.transpose(img_show[0],axes=[1,2,0])
images=torch.from_numpy(images)
images=images.to(device)
start=time.time()
logit1, logit2, logit3 = model(images)
res1 = torch.softmax(logit1,1)
res2 = torch.softmax(logit2,1)
res3 = torch.softmax(logit3,1)
res1=res1.cpu().detach().numpy()[0,:]
res2 = res2.cpu().detach().numpy()[0,:]
res3 = res3.cpu().detach().numpy()[0,:]
cls1_pre_list.append(np.argmax(res1))
cls2_pre_list.append(np.argmax(res2))
cls3_pre_list.append(np.argmax(res3))
#print(res)
# img_show=img_show.astype(np.uint8)
#
# img_show=cv2.cvtColor(img_show, cv2.COLOR_BGR2RGB)
# cv2.imshow('tmp',img_show)
# cv2.waitKey(0)
score1=sklearn.metrics.recall_score(
cls1_true_list, cls1_pre_list, average='macro')
score2 = sklearn.metrics.recall_score(
cls2_true_list, cls2_pre_list, average='macro')
score3 = sklearn.metrics.recall_score(
cls3_true_list, cls3_pre_list, average='macro')
final_score = np.average([score1,score2,score3], weights=[2, 1, 1])
print('cu score is %5f'%final_score)
def load_checkpoint(net, checkpoint,device):
# from collections import OrderedDict
#
# temp = OrderedDict()
# if 'state_dict' in checkpoint:
# checkpoint = dict(checkpoint['state_dict'])
# for k in checkpoint:
# k2 = 'module.'+k if not k.startswith('module.') else k
# temp[k2] = checkpoint[k]
net.load_state_dict(torch.load(checkpoint,map_location=device), strict=True)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Start train.')
parser.add_argument('--model', dest='model', type=str, default=None, \
help='the model to use')
args = parser.parse_args()
vis(args.model)
| from lib.dataset.dataietr import DataIter
from train_config import config
from lib.core.model.ShuffleNet_Series.ShuffleNetV2.network import ShuffleNetV2
from lib.core.model.semodel.SeResnet import se_resnet50
import torch
import time
import argparse
import sklearn.metrics
from tqdm import tqdm
import numpy as np
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import cv2
from train_config import config as cfg
cfg.TRAIN.batch_size=1
ds = DataIter(cfg.DATA.root_path,cfg.DATA.val_txt_path,False)
def vis(model_path):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
###build model
model=se_resnet50()
model.load_state_dict(torch.load(model_path, map_location=device), strict=False)
model.to(device)
model.eval()
cls1_pre_list=[]
cls1_true_list = []
cls2_pre_list = []
cls2_true_list = []
cls3_pre_list = []
cls3_true_list = []
for step in tqdm(range(ds.size)):
images, labels = ds()
cls1_true_list.append(labels[0][0])
cls2_true_list.append(labels[0][1])
cls3_true_list.append(labels[0][2])
img_show = np.array(images)
# img_show=np.transpose(img_show[0],axes=[1,2,0])
images=torch.from_numpy(images)
images=images.to(device)
start=time.time()
logit1, logit2, logit3 = model(images)
res1 = torch.softmax(logit1,1)
res2 = torch.softmax(logit2,1)
res3 = torch.softmax(logit3,1)
res1=res1.cpu().detach().numpy()[0,:]
res2 = res2.cpu().detach().numpy()[0,:]
res3 = res3.cpu().detach().numpy()[0,:]
cls1_pre_list.append(np.argmax(res1))
cls2_pre_list.append(np.argmax(res2))
cls3_pre_list.append(np.argmax(res3))
#print(res)
# img_show=img_show.astype(np.uint8)
#
# img_show=cv2.cvtColor(img_show, cv2.COLOR_BGR2RGB)
# cv2.imshow('tmp',img_show)
# cv2.waitKey(0)
score1=sklearn.metrics.recall_score(
cls1_true_list, cls1_pre_list, average='macro')
score2 = sklearn.metrics.recall_score(
cls2_true_list, cls2_pre_list, average='macro')
score3 = sklearn.metrics.recall_score(
cls3_true_list, cls3_pre_list, average='macro')
final_score = np.average([score1,score2,score3], weights=[2, 1, 1])
print('cu score is %5f'%final_score)
def load_checkpoint(net, checkpoint,device):
# from collections import OrderedDict
#
# temp = OrderedDict()
# if 'state_dict' in checkpoint:
# checkpoint = dict(checkpoint['state_dict'])
# for k in checkpoint:
# k2 = 'module.'+k if not k.startswith('module.') else k
# temp[k2] = checkpoint[k]
net.load_state_dict(torch.load(checkpoint,map_location=device), strict=True)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Start train.')
parser.add_argument('--model', dest='model', type=str, default=None, \
help='the model to use')
args = parser.parse_args()
vis(args.model) | en | 0.242064 | ###build model # img_show=np.transpose(img_show[0],axes=[1,2,0]) #print(res) # img_show=img_show.astype(np.uint8) # # img_show=cv2.cvtColor(img_show, cv2.COLOR_BGR2RGB) # cv2.imshow('tmp',img_show) # cv2.waitKey(0) # from collections import OrderedDict # # temp = OrderedDict() # if 'state_dict' in checkpoint: # checkpoint = dict(checkpoint['state_dict']) # for k in checkpoint: # k2 = 'module.'+k if not k.startswith('module.') else k # temp[k2] = checkpoint[k] | 2.136935 | 2 |
bin/geometric-cli.py | Vikicsizmadia/ctp | 41 | 6622590 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import multiprocessing
import numpy as np
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from torch_geometric.data import Data as GeometricData, Batch
from ctp.util import make_batches
from ctp.clutrr import Data, Instance
from ctp.geometric import GraphAttentionNetwork
from ctp.geometric import GraphConvolutionalNetwork
from ctp.geometric import VecBaselineNetworkV1
from ctp.geometric import VecBaselineNetworkV2
from ctp.geometric import Seq2VecEncoderFactory
from typing import Dict, List, Tuple, Optional
import logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
np.set_printoptions(linewidth=256, precision=4, suppress=True, threshold=sys.maxsize)
torch.set_num_threads(multiprocessing.cpu_count())
# PYTHONPATH=. python3 ./bin/geometric-cli.py
# --train data/clutrr-emnlp/data_089907f8/*train*
# --test data/clutrr-emnlp/data_089907f8/*test*
def to_data(instance: Instance,
relation_to_idx: Dict[str, int],
test_relation_to_idx: Dict[str, int],
nb_entities: int,
is_predicate: bool,
predicate_to_idx: Dict[str, int],
relation_to_predicate: Dict[str, str],
test_predicate_to_idx: Dict[str, int],
device: Optional[torch.device] = None) -> Tuple[GeometricData, Tuple[int, int]]:
entity_lst = sorted({x for t in instance.story for x in {t[0], t[2]}})
entity_to_idx = {e: i for i, e in enumerate(entity_lst)}
x = torch.arange(nb_entities, device=device).view(-1, 1)
edge_list = [(entity_to_idx[s], entity_to_idx[o]) for (s, _, o) in instance.story]
edge_index = torch.tensor(list(zip(*edge_list)), dtype=torch.long, device=device)
if is_predicate is True:
edge_types = [predicate_to_idx[relation_to_predicate[p]] for (_, p, _) in instance.story]
y = torch.tensor([test_predicate_to_idx[relation_to_predicate[instance.target[1]]]], device=device)
else:
edge_types = [relation_to_idx[p] for (_, p, _) in instance.story]
y = torch.tensor([test_relation_to_idx[instance.target[1]]], device=device)
edge_attr = torch.tensor(edge_types, dtype=torch.long, device=device).view(-1, 1)
res = GeometricData(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)
target_pair = (entity_to_idx[instance.target[0]], entity_to_idx[instance.target[2]])
return res, target_pair
def to_batches(instances: List[Instance],
batch_size: int,
relation_to_idx: Dict[str, int],
test_relation_to_idx: Dict[str, int],
is_predicate: bool,
predicate_to_idx: Dict[str, int],
relation_to_predicate: Dict[str, str],
test_predicate_to_idx: Dict[str, int],
device: Optional[torch.device] = None) -> List[Tuple[Batch, List[int], Tensor, List[Instance]]]:
nb_instances, res = len(instances), []
batches = make_batches(nb_instances, batch_size)
for batch_start, batch_end in batches:
batch_instances = instances[batch_start:batch_end]
max_nb_entities = max(i.nb_nodes for i in batch_instances)
this_batch_size = len(batch_instances)
batch_pairs = [
to_data(i, relation_to_idx, test_relation_to_idx, max_nb_entities,
is_predicate, predicate_to_idx, relation_to_predicate, test_predicate_to_idx, device=device)
for i in batch_instances
]
batch_data: List[GeometricData] = [d for d, _ in batch_pairs]
batch_targets: List[List[int]] = [[p[0], p[1]] for _, p in batch_pairs]
max_node = max(i + 1 for b in batch_data for i in b.x[:, 0].cpu().numpy())
batch = Batch.from_data_list(batch_data)
slices = [max_node for _ in batch_data]
targets = torch.tensor(batch_targets, dtype=torch.long, device=device).view(this_batch_size, 1, 2)
res += [(batch, slices, targets, batch_instances)]
return res
def main(argv):
argparser = argparse.ArgumentParser('Geometric CLUTRR', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
train_path = "data/clutrr-emnlp/data_test/64.csv"
argparser.add_argument('--train', action='store', type=str, default=train_path)
argparser.add_argument('--test', nargs='+', type=str, default=[])
argparser.add_argument('--model', '-m', action='store', type=str, default='gat')
# training params
argparser.add_argument('--epochs', '-e', action='store', type=int, default=100)
argparser.add_argument('--learning-rate', '-l', action='store', type=float, default=0.001)
argparser.add_argument('--batch-size', '-b', action='store', type=int, default=100)
argparser.add_argument('--embedding-size', '-k', action='store', type=int, default=100)
argparser.add_argument('--edge-embedding-size', '-K', action='store', type=int, default=20)
argparser.add_argument('--hidden-size', action='store', type=int, default=100)
argparser.add_argument('--nb-filters', action='store', type=int, default=100)
argparser.add_argument('--nb-heads', action='store', type=int, default=3)
argparser.add_argument('--nb-rounds', action='store', type=int, default=3)
argparser.add_argument('--nb-highway', action='store', type=int, default=2)
argparser.add_argument('--seed', action='store', type=int, default=0)
argparser.add_argument('--evaluate-every', '-V', action='store', type=int, default=1)
argparser.add_argument('--v2', action='store_true', default=False)
argparser.add_argument('--predicate', action='store_true', default=False)
args = argparser.parse_args(argv)
train_path = args.train
test_paths = args.test
model_name = args.model
nb_epochs = args.epochs
learning_rate = args.learning_rate
batch_size = args.batch_size
embedding_size = args.embedding_size
edge_embedding_size = args.edge_embedding_size
hidden_size = args.hidden_size
nb_filters = args.nb_filters
nb_heads = args.nb_heads
nb_rounds = args.nb_rounds
nb_highway = args.nb_highway
seed = args.seed
evaluate_every = args.evaluate_every
is_v2 = args.v2
is_predicate = args.predicate
np.random.seed(seed)
torch.manual_seed(seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info(f'Device: {device}')
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
data = Data(train_path=train_path, test_paths=test_paths)
entity_lst, _, relation_lst = data.entity_lst, data.predicate_lst, data.relation_lst
predicate_lst = data.predicate_lst
relation_to_predicate = data.relation_to_predicate
test_relation_lst = ["aunt", "brother", "daughter", "daughter-in-law", "father", "father-in-law", "granddaughter",
"grandfather", "grandmother", "grandson", "mother", "mother-in-law", "nephew", "niece",
"sister", "son", "son-in-law", "uncle"]
test_predicate_lst = sorted({relation_to_predicate[r] for r in test_relation_lst})
relation_to_idx = {r: i for i, r in enumerate(relation_lst)}
test_relation_to_idx = {r: i for i, r in enumerate(test_relation_lst)}
predicate_to_idx = {p: i for i, p in enumerate(predicate_lst)}
test_predicate_to_idx = {p: i for i, p in enumerate(test_predicate_lst)}
nb_nodes = len(entity_lst)
nb_edge_types = len(relation_lst)
nb_targets = len(test_relation_lst)
if is_predicate is True:
nb_edge_types = len(predicate_lst)
nb_targets = len(test_predicate_lst)
nb_instances = len(data.train)
batches = to_batches(data.train, batch_size=batch_size,
relation_to_idx=relation_to_idx,
test_relation_to_idx=test_relation_to_idx,
is_predicate=is_predicate,
predicate_to_idx=predicate_to_idx,
relation_to_predicate=relation_to_predicate,
test_predicate_to_idx=test_predicate_to_idx,
device=device)
if model_name in {'gat'}:
model = GraphAttentionNetwork(nb_nodes=nb_nodes, nb_edge_types=nb_edge_types, target_size=nb_targets,
nb_heads=nb_heads, embedding_size=embedding_size,
edge_embedding_size=edge_embedding_size, nb_rounds=nb_rounds)
elif model_name in {'gcn'}:
model = GraphConvolutionalNetwork(nb_nodes=nb_nodes, nb_edge_types=nb_edge_types, target_size=nb_targets,
embedding_size=embedding_size, edge_embedding_size=edge_embedding_size,
nb_rounds=nb_rounds)
else:
encoder_factory = Seq2VecEncoderFactory()
encoder = encoder_factory.build(name=model_name, embedding_dim=embedding_size, hidden_size=hidden_size,
num_filters=nb_filters, num_heads=nb_heads, num_highway=nb_highway)
if is_v2 is False:
model = VecBaselineNetworkV1(nb_nodes=nb_nodes, nb_edge_types=nb_targets, relation_lst=relation_lst,
encoder=encoder, embedding_size=embedding_size)
else:
model = VecBaselineNetworkV2(nb_nodes=nb_nodes, nb_edge_types=nb_targets, relation_lst=relation_lst,
encoder=encoder, embedding_size=embedding_size)
model = model.to(device)
params_lst = nn.ParameterList([p for p in model.parameters()])
optimizer = torch.optim.Adam(params_lst, lr=learning_rate)
def test(test_set) -> float:
correct = 0
model.eval()
test_batches = to_batches(test_set, batch_size=batch_size, relation_to_idx=relation_to_idx,
test_relation_to_idx=test_relation_to_idx,
is_predicate=is_predicate,
predicate_to_idx=predicate_to_idx,
relation_to_predicate=relation_to_predicate,
test_predicate_to_idx=test_predicate_to_idx,
device=device)
for test_batch, test_slices, test_targets, test_instances in test_batches:
test_logits = model(test_batch, test_slices, test_targets, test_instances)
test_predictions = test_logits.max(dim=1)[1]
correct += test_predictions.eq(test_batch.y).sum().item()
return correct / len(test_set)
for epoch in range(1, nb_epochs + 1):
loss_total = 0.0
model.train()
for batch, slices, targets, instances in batches:
logits = model(batch, slices, targets, instances)
assert logits.shape[1] == len(test_relation_lst if not is_predicate else test_predicate_lst)
loss = F.cross_entropy(logits, batch.y, reduction='sum')
loss_total += loss.item()
loss.backward()
optimizer.step()
optimizer.zero_grad()
# train_accuracy = test(data.train)
print(f'Epoch: {epoch:03d}, Train Loss: {loss_total / nb_instances:.7f}')
if epoch % evaluate_every == 0:
for name in data.test:
test_accuracy = test(data.test[name])
print(f'Epoch: {epoch:03d}, Test Set: {name}, Accuracy: {test_accuracy:.7f}')
logger.info("Training finished")
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
print(' '.join(sys.argv))
main(sys.argv[1:])
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import multiprocessing
import numpy as np
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from torch_geometric.data import Data as GeometricData, Batch
from ctp.util import make_batches
from ctp.clutrr import Data, Instance
from ctp.geometric import GraphAttentionNetwork
from ctp.geometric import GraphConvolutionalNetwork
from ctp.geometric import VecBaselineNetworkV1
from ctp.geometric import VecBaselineNetworkV2
from ctp.geometric import Seq2VecEncoderFactory
from typing import Dict, List, Tuple, Optional
import logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
np.set_printoptions(linewidth=256, precision=4, suppress=True, threshold=sys.maxsize)
torch.set_num_threads(multiprocessing.cpu_count())
# PYTHONPATH=. python3 ./bin/geometric-cli.py
# --train data/clutrr-emnlp/data_089907f8/*train*
# --test data/clutrr-emnlp/data_089907f8/*test*
def to_data(instance: Instance,
relation_to_idx: Dict[str, int],
test_relation_to_idx: Dict[str, int],
nb_entities: int,
is_predicate: bool,
predicate_to_idx: Dict[str, int],
relation_to_predicate: Dict[str, str],
test_predicate_to_idx: Dict[str, int],
device: Optional[torch.device] = None) -> Tuple[GeometricData, Tuple[int, int]]:
entity_lst = sorted({x for t in instance.story for x in {t[0], t[2]}})
entity_to_idx = {e: i for i, e in enumerate(entity_lst)}
x = torch.arange(nb_entities, device=device).view(-1, 1)
edge_list = [(entity_to_idx[s], entity_to_idx[o]) for (s, _, o) in instance.story]
edge_index = torch.tensor(list(zip(*edge_list)), dtype=torch.long, device=device)
if is_predicate is True:
edge_types = [predicate_to_idx[relation_to_predicate[p]] for (_, p, _) in instance.story]
y = torch.tensor([test_predicate_to_idx[relation_to_predicate[instance.target[1]]]], device=device)
else:
edge_types = [relation_to_idx[p] for (_, p, _) in instance.story]
y = torch.tensor([test_relation_to_idx[instance.target[1]]], device=device)
edge_attr = torch.tensor(edge_types, dtype=torch.long, device=device).view(-1, 1)
res = GeometricData(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)
target_pair = (entity_to_idx[instance.target[0]], entity_to_idx[instance.target[2]])
return res, target_pair
def to_batches(instances: List[Instance],
batch_size: int,
relation_to_idx: Dict[str, int],
test_relation_to_idx: Dict[str, int],
is_predicate: bool,
predicate_to_idx: Dict[str, int],
relation_to_predicate: Dict[str, str],
test_predicate_to_idx: Dict[str, int],
device: Optional[torch.device] = None) -> List[Tuple[Batch, List[int], Tensor, List[Instance]]]:
nb_instances, res = len(instances), []
batches = make_batches(nb_instances, batch_size)
for batch_start, batch_end in batches:
batch_instances = instances[batch_start:batch_end]
max_nb_entities = max(i.nb_nodes for i in batch_instances)
this_batch_size = len(batch_instances)
batch_pairs = [
to_data(i, relation_to_idx, test_relation_to_idx, max_nb_entities,
is_predicate, predicate_to_idx, relation_to_predicate, test_predicate_to_idx, device=device)
for i in batch_instances
]
batch_data: List[GeometricData] = [d for d, _ in batch_pairs]
batch_targets: List[List[int]] = [[p[0], p[1]] for _, p in batch_pairs]
max_node = max(i + 1 for b in batch_data for i in b.x[:, 0].cpu().numpy())
batch = Batch.from_data_list(batch_data)
slices = [max_node for _ in batch_data]
targets = torch.tensor(batch_targets, dtype=torch.long, device=device).view(this_batch_size, 1, 2)
res += [(batch, slices, targets, batch_instances)]
return res
def main(argv):
argparser = argparse.ArgumentParser('Geometric CLUTRR', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
train_path = "data/clutrr-emnlp/data_test/64.csv"
argparser.add_argument('--train', action='store', type=str, default=train_path)
argparser.add_argument('--test', nargs='+', type=str, default=[])
argparser.add_argument('--model', '-m', action='store', type=str, default='gat')
# training params
argparser.add_argument('--epochs', '-e', action='store', type=int, default=100)
argparser.add_argument('--learning-rate', '-l', action='store', type=float, default=0.001)
argparser.add_argument('--batch-size', '-b', action='store', type=int, default=100)
argparser.add_argument('--embedding-size', '-k', action='store', type=int, default=100)
argparser.add_argument('--edge-embedding-size', '-K', action='store', type=int, default=20)
argparser.add_argument('--hidden-size', action='store', type=int, default=100)
argparser.add_argument('--nb-filters', action='store', type=int, default=100)
argparser.add_argument('--nb-heads', action='store', type=int, default=3)
argparser.add_argument('--nb-rounds', action='store', type=int, default=3)
argparser.add_argument('--nb-highway', action='store', type=int, default=2)
argparser.add_argument('--seed', action='store', type=int, default=0)
argparser.add_argument('--evaluate-every', '-V', action='store', type=int, default=1)
argparser.add_argument('--v2', action='store_true', default=False)
argparser.add_argument('--predicate', action='store_true', default=False)
args = argparser.parse_args(argv)
train_path = args.train
test_paths = args.test
model_name = args.model
nb_epochs = args.epochs
learning_rate = args.learning_rate
batch_size = args.batch_size
embedding_size = args.embedding_size
edge_embedding_size = args.edge_embedding_size
hidden_size = args.hidden_size
nb_filters = args.nb_filters
nb_heads = args.nb_heads
nb_rounds = args.nb_rounds
nb_highway = args.nb_highway
seed = args.seed
evaluate_every = args.evaluate_every
is_v2 = args.v2
is_predicate = args.predicate
np.random.seed(seed)
torch.manual_seed(seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info(f'Device: {device}')
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
data = Data(train_path=train_path, test_paths=test_paths)
entity_lst, _, relation_lst = data.entity_lst, data.predicate_lst, data.relation_lst
predicate_lst = data.predicate_lst
relation_to_predicate = data.relation_to_predicate
test_relation_lst = ["aunt", "brother", "daughter", "daughter-in-law", "father", "father-in-law", "granddaughter",
"grandfather", "grandmother", "grandson", "mother", "mother-in-law", "nephew", "niece",
"sister", "son", "son-in-law", "uncle"]
test_predicate_lst = sorted({relation_to_predicate[r] for r in test_relation_lst})
relation_to_idx = {r: i for i, r in enumerate(relation_lst)}
test_relation_to_idx = {r: i for i, r in enumerate(test_relation_lst)}
predicate_to_idx = {p: i for i, p in enumerate(predicate_lst)}
test_predicate_to_idx = {p: i for i, p in enumerate(test_predicate_lst)}
nb_nodes = len(entity_lst)
nb_edge_types = len(relation_lst)
nb_targets = len(test_relation_lst)
if is_predicate is True:
nb_edge_types = len(predicate_lst)
nb_targets = len(test_predicate_lst)
nb_instances = len(data.train)
batches = to_batches(data.train, batch_size=batch_size,
relation_to_idx=relation_to_idx,
test_relation_to_idx=test_relation_to_idx,
is_predicate=is_predicate,
predicate_to_idx=predicate_to_idx,
relation_to_predicate=relation_to_predicate,
test_predicate_to_idx=test_predicate_to_idx,
device=device)
if model_name in {'gat'}:
model = GraphAttentionNetwork(nb_nodes=nb_nodes, nb_edge_types=nb_edge_types, target_size=nb_targets,
nb_heads=nb_heads, embedding_size=embedding_size,
edge_embedding_size=edge_embedding_size, nb_rounds=nb_rounds)
elif model_name in {'gcn'}:
model = GraphConvolutionalNetwork(nb_nodes=nb_nodes, nb_edge_types=nb_edge_types, target_size=nb_targets,
embedding_size=embedding_size, edge_embedding_size=edge_embedding_size,
nb_rounds=nb_rounds)
else:
encoder_factory = Seq2VecEncoderFactory()
encoder = encoder_factory.build(name=model_name, embedding_dim=embedding_size, hidden_size=hidden_size,
num_filters=nb_filters, num_heads=nb_heads, num_highway=nb_highway)
if is_v2 is False:
model = VecBaselineNetworkV1(nb_nodes=nb_nodes, nb_edge_types=nb_targets, relation_lst=relation_lst,
encoder=encoder, embedding_size=embedding_size)
else:
model = VecBaselineNetworkV2(nb_nodes=nb_nodes, nb_edge_types=nb_targets, relation_lst=relation_lst,
encoder=encoder, embedding_size=embedding_size)
model = model.to(device)
params_lst = nn.ParameterList([p for p in model.parameters()])
optimizer = torch.optim.Adam(params_lst, lr=learning_rate)
def test(test_set) -> float:
correct = 0
model.eval()
test_batches = to_batches(test_set, batch_size=batch_size, relation_to_idx=relation_to_idx,
test_relation_to_idx=test_relation_to_idx,
is_predicate=is_predicate,
predicate_to_idx=predicate_to_idx,
relation_to_predicate=relation_to_predicate,
test_predicate_to_idx=test_predicate_to_idx,
device=device)
for test_batch, test_slices, test_targets, test_instances in test_batches:
test_logits = model(test_batch, test_slices, test_targets, test_instances)
test_predictions = test_logits.max(dim=1)[1]
correct += test_predictions.eq(test_batch.y).sum().item()
return correct / len(test_set)
for epoch in range(1, nb_epochs + 1):
loss_total = 0.0
model.train()
for batch, slices, targets, instances in batches:
logits = model(batch, slices, targets, instances)
assert logits.shape[1] == len(test_relation_lst if not is_predicate else test_predicate_lst)
loss = F.cross_entropy(logits, batch.y, reduction='sum')
loss_total += loss.item()
loss.backward()
optimizer.step()
optimizer.zero_grad()
# train_accuracy = test(data.train)
print(f'Epoch: {epoch:03d}, Train Loss: {loss_total / nb_instances:.7f}')
if epoch % evaluate_every == 0:
for name in data.test:
test_accuracy = test(data.test[name])
print(f'Epoch: {epoch:03d}, Test Set: {name}, Accuracy: {test_accuracy:.7f}')
logger.info("Training finished")
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
print(' '.join(sys.argv))
main(sys.argv[1:])
| en | 0.373873 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # PYTHONPATH=. python3 ./bin/geometric-cli.py # --train data/clutrr-emnlp/data_089907f8/*train* # --test data/clutrr-emnlp/data_089907f8/*test* # training params # train_accuracy = test(data.train) | 2.110676 | 2 |
RL/utils.py | lluissalord/sentiment_trader | 7 | 6622591 | <filename>RL/utils.py
""" Utils for RL agent """
import numpy as np
import collections
import os
import time
from absl import logging
from tensorflow import equal as tf_equal
from tensorflow import add as tf_add
from tensorflow.compat.v2 import summary
from gym import spaces
from tf_agents.environments import tf_py_environment, parallel_py_environment
from tf_agents.environments.gym_wrapper import GymWrapper
from tf_agents.eval import metric_utils
from tf_agents.utils import common
from tf_agents.drivers import dynamic_step_driver, dynamic_episode_driver
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.policies import random_tf_policy
from tf_agents.policies import policy_saver
from RL.stock_env import RLStocksEnv, REVENUE_REWARD, PRICE_REWARD
def generateSplitEnvs(
train_df,
valid_df,
test_df,
window_size,
steps_per_episode,
feature_columns,
reward_type=REVENUE_REWARD,
max_final_reward=1,
max_step_reward=0,
num_parallel_environments=1,
position_as_observation=True,
constant_step=False,
is_training=True,
seed=12345,
):
""" Create environments for train, validation and test based on their respective DataFrames and properties """
eval_env = RLStocksEnv(
df=valid_df,
window_size=window_size,
frame_bound=(window_size, len(valid_df)),
steps_per_episode=steps_per_episode,
is_training=is_training,
constant_step=constant_step,
feature_columns=feature_columns,
position_as_observation=position_as_observation,
reward_type=reward_type,
max_final_reward=max_final_reward,
max_step_reward=max_step_reward,
)
eval_env.seed(seed)
eval_env.reset()
test_env = RLStocksEnv(
df=test_df,
window_size=window_size,
frame_bound=(window_size, len(test_df)),
steps_per_episode=steps_per_episode,
is_training=is_training,
constant_step=constant_step,
feature_columns=feature_columns,
position_as_observation=position_as_observation,
reward_type=reward_type,
max_final_reward=max_final_reward,
max_step_reward=max_step_reward,
)
test_env.seed(seed)
test_env.reset()
# Otherwise raise error on evaluating ChosenActionHistogram metric
spec_dtype_map = {spaces.Discrete: np.int32}
tf_parallel_envs = []
for i in range(num_parallel_environments):
train_env = RLStocksEnv(
df=train_df,
window_size=window_size,
frame_bound=(window_size, len(train_df)),
steps_per_episode=steps_per_episode,
is_training=True,
constant_step=constant_step,
feature_columns=feature_columns,
position_as_observation=position_as_observation,
reward_type=reward_type,
max_final_reward=max_final_reward,
max_step_reward=max_step_reward,
)
train_env.seed(seed + i)
train_env.reset()
tf_parallel_envs.append(
GymWrapper(train_env, spec_dtype_map=spec_dtype_map)
)
# TODO: Implement Parallel Environment (need tf_agents.system.multiprocessing.enable_interactive_mode() added in github last updates)
if num_parallel_environments != 1:
tf_env = tf_py_environment.TFPyEnvironment(
parallel_py_environment.ParallelPyEnvironment(tf_parallel_envs))
else:
tf_env = tf_py_environment.TFPyEnvironment(tf_parallel_envs[0])
eval_tf_env = tf_py_environment.TFPyEnvironment(
GymWrapper(eval_env, spec_dtype_map=spec_dtype_map))
test_tf_env = tf_py_environment.TFPyEnvironment(
GymWrapper(test_env, spec_dtype_map=spec_dtype_map))
return tf_env, eval_tf_env, test_tf_env
class AgentEarlyStopping():
def __init__(self,
monitor='AverageReturn',
min_delta=0,
patience=0,
warmup=0,
verbose=0,
mode='max',
baseline=None):
"""Initialize an AgentEarlyStopping.
Arguments:
monitor: Quantity to be monitored.
min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: Number of iterations with no improvement
after which training will be stopped.
warmup: Number of iterations to wait till starts to
take monitor quantity.
verbose: verbosity mode.
mode: One of `{"auto", "min", "max"}`. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
"""
# super(AgentEarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.warmup = warmup
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.checkpointers = []
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
elif 'return' in self.monitor.lower():
self.monitor_op = np.less
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
self.reset()
def reset(self):
# Allow instances to be re-used
self.wait = 0
self._count = 0
self.best_step = 0
self.stopped_step = 0
self.stop_training = False
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
# TODO: Calculate a EWMA with alpha = 0.999 and calculate max buffer with length = (log 0.01) / (log 0.999) (being 0.01 minimum weight)
def __call__(self, computed_metrics, global_step):
current = self.get_monitor_value(computed_metrics)
if current is None:
return
if self.warmup <= self._count:
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.best_step = global_step
self.wait = 0
logging.info(f'Saved best {self.monitor} = {self.best:.5f} on step {global_step}')
for checkpointer in self.checkpointers:
checkpointer.save(global_step)
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_step = global_step
self.stop_training = True
logging.info('Global step %05d: early stopping' %
(self.stopped_step + 1))
else:
self._count += 1
def add_checkpointer(self, checkpointer):
self.checkpointers.append(checkpointer)
def get_monitor_value(self, computed_metrics):
computed_metrics = computed_metrics or {}
monitor_value = computed_metrics.get(self.monitor).numpy()
if monitor_value is None:
logging.warning('Agent early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(computed_metrics.keys())))
return monitor_value
def evaluate(eval_metrics, eval_tf_env, eval_policy, num_eval_episodes, num_eval_seeds, global_step=None, eval_summary_writer=None, summary_prefix='Metrics', seed=12345):
""" Evaluate policy on the evaluation environment for the specified episodes and metrics """
all_results = []
# Calculate metrics for the number of seeds provided in order to get more accurated results
for i in range(num_eval_seeds):
for env in eval_tf_env.envs:
env.seed(seed + i)
# One final eval before exiting.
results = metric_utils.eager_compute(
eval_metrics,
eval_tf_env,
eval_policy,
num_episodes=num_eval_episodes,
train_step=global_step,
)
all_results.append(results)
# Calculate mean of the resulting metrics
mean_results = collections.OrderedDict(results)
if num_eval_seeds > 1:
for metric in mean_results:
metric_sum = 0
for result in all_results:
metric_sum = tf_add(metric_sum, result[metric])
mean_results[metric] = metric_sum / len(all_results)
# Write on Tensorboard writer if provided
if global_step and eval_summary_writer:
with eval_summary_writer.as_default():
for metric, value in mean_results.items():
tag = common.join_scope(summary_prefix, metric)
summary.scalar(name=tag, data=value, step=global_step)
# Print out the results of the metrics
log = ['{0} = {1}'.format(metric, value)
for metric, value in mean_results.items()]
logging.info('%s \n\t\t %s', '', '\n\t\t '.join(log))
return mean_results
def train_eval(tf_agent, num_iterations, batch_size, tf_env, eval_tf_env, train_metrics, step_metrics, eval_metrics, global_step, steps_per_episode, num_parallel_environments, collect_per_iteration, train_steps_per_iteration, train_dir, saved_model_dir, eval_summary_writer, num_eval_episodes, num_eval_seeds=1, eval_metrics_callback=None, train_sequence_length=1, initial_collect_steps=1000, log_interval=100, eval_interval=400, policy_checkpoint_interval=400, train_checkpoint_interval=1200, rb_checkpoint_interval=2000, train_model=True, use_tf_functions=True, eval_early_stopping=False, seed=12345):
""" Train and evaluation function of a TF Agent given the properties provided """
# Define seed for each environment
for i, env in enumerate(tf_env.envs):
env.seed(seed + i)
for i, env in enumerate(eval_tf_env.envs):
env.seed(seed + i)
tf_env.reset()
eval_tf_env.reset()
tf_agent.initialize()
agent_name = tf_agent.__dict__['_name']
# Define policies
eval_policy = tf_agent.policy
collect_policy = tf_agent.collect_policy
# Define Replay Buffer
replay_buffer_capacity = steps_per_episode * \
collect_per_iteration // num_parallel_environments + 1
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=tf_agent.collect_data_spec,
batch_size=num_parallel_environments, # batch_size=tf_env.batch_size,
max_length=replay_buffer_capacity)
# Define Dynamic driver to go through the environment depending on the agent
if train_model:
if agent_name in ['dqn_agent']:
collect_driver = dynamic_step_driver.DynamicStepDriver(
tf_env,
collect_policy,
observers=[replay_buffer.add_batch] + train_metrics,
num_steps=collect_per_iteration)
elif agent_name in ['ppo_agent']:
collect_driver = dynamic_episode_driver.DynamicEpisodeDriver(
tf_env,
collect_policy,
observers=[replay_buffer.add_batch] + train_metrics,
num_episodes=collect_per_iteration)
else:
raise NotImplementedError(
f'{agent_name} agent not yet implemented')
# Define Checkpointers for train and policy
train_checkpointer = common.Checkpointer(
ckpt_dir=train_dir,
agent=tf_agent,
global_step=global_step,
metrics=metric_utils.MetricsGroup(train_metrics, 'train_metrics'))
policy_checkpointer = common.Checkpointer(
ckpt_dir=os.path.join(train_dir, 'policy'),
policy=eval_policy,
global_step=global_step)
saved_model = policy_saver.PolicySaver(eval_policy, train_step=global_step)
# rb_checkpointer = common.Checkpointer(
# ckpt_dir=os.path.join(train_dir, 'replay_buffer'),
# max_to_keep=1,
# replay_buffer=replay_buffer)
policy_checkpointer.initialize_or_restore() # TODO: To be tested
train_checkpointer.initialize_or_restore()
# rb_checkpointer.initialize_or_restore()
if train_model:
eval_metrics_callback.add_checkpointer(policy_checkpointer)
eval_metrics_callback.add_checkpointer(train_checkpointer)
# eval_metrics_callback.add_checkpointer(rb_checkpointer)
# TODO: should they use autograph=False?? as in tf_agents/agents/ppo/examples/v2/train_eval_clip_agent.py
if use_tf_functions:
# To speed up collect use common.function.
collect_driver.run = common.function(collect_driver.run)
tf_agent.train = common.function(tf_agent.train)
# Only run Replay buffer initialization if using one of the following agents
if agent_name in ['dqn_agent']:
initial_collect_policy = random_tf_policy.RandomTFPolicy(
tf_env.time_step_spec(), tf_env.action_spec())
# Collect initial replay data.
logging.info(
'Initializing replay buffer by collecting experience for %d steps with '
'a random policy.', initial_collect_steps)
dynamic_step_driver.DynamicStepDriver(
tf_env,
initial_collect_policy,
observers=[replay_buffer.add_batch] + train_metrics,
num_steps=initial_collect_steps).run()
# num_eval_episodes = eval_tf_env.envs[0].frame_bound[-1] // eval_tf_env.envs[0].steps_per_episode
logging.info(
f'Initial eval metric'
)
results = evaluate(eval_metrics, eval_tf_env, eval_policy, num_eval_episodes,
num_eval_seeds, global_step, eval_summary_writer, summary_prefix='Metrics', seed=seed)
if eval_early_stopping and not isinstance(eval_metrics_callback, AgentEarlyStopping):
raise ValueError(
'Cannot set eval_early_stopping without eval_metric_callback being Agent Early Stopping instance')
# Once evaluate has been done call eval metrics callback
if eval_metrics_callback is not None:
eval_metrics_callback(results, global_step.numpy())
# Initialize training variables
time_step = None
policy_state = collect_policy.get_initial_state(tf_env.batch_size)
timed_at_step = global_step.numpy()
collect_time = 0
train_time = 0
summary_time = 0
# Define train_step and generate dataset if required
if agent_name in ['dqn_agent']:
# Dataset generates trajectories with shape [Bx2x...]
logging.info(
f'Dataset generates trajectories'
)
dataset = replay_buffer.as_dataset(
num_parallel_calls=3,
sample_batch_size=batch_size,
# single_deterministic_pass=True,
num_steps=train_sequence_length + 1).prefetch(3)
iterator = iter(dataset)
def train_step():
experience, _ = next(iterator)
return tf_agent.train(experience)
elif agent_name in ['ppo_agent']:
def train_step():
trajectories = replay_buffer.gather_all()
return tf_agent.train(experience=trajectories)
else:
raise NotImplementedError(
f'{agent_name} agent not yet implemented')
if use_tf_functions:
train_step = common.function(train_step)
logging.info(
f'Starting training...'
)
for _ in range(num_iterations):
# Collect data
start_time = time.time()
if agent_name in ['dqn_agent']:
time_step, policy_state = collect_driver.run(
time_step=time_step,
policy_state=policy_state,
)
elif agent_name in ['ppo_agent']:
collect_driver.run()
else:
raise NotImplementedError(
f'{agent_name} agent not yet implemented')
collect_time += time.time() - start_time
# Train on collected data
start_time = time.time()
for _ in range(train_steps_per_iteration):
train_loss = train_step()
train_time += time.time() - start_time
# Write on Tensorboard the training results
start_time = time.time()
for train_metric in train_metrics:
train_metric.tf_summaries(
train_step=global_step, step_metrics=step_metrics)
summary_time += time.time() - start_time
# Print out metrics and reset variables
if global_step.numpy() % log_interval == 0:
logging.info('step = %d, loss = %f', global_step.numpy(),
train_loss.loss)
steps_per_sec = (global_step.numpy() - timed_at_step) / \
(train_time + collect_time + summary_time)
logging.info('%.3f steps/sec', steps_per_sec)
logging.info('collect_time = %.3f, train_time = %.3f, summary_time = %.3f', collect_time,
train_time, summary_time)
summary.scalar(
name='global_steps_per_sec', data=steps_per_sec, step=global_step)
timed_at_step = global_step.numpy()
collect_time = 0
train_time = 0
summary_time = 0
# Save train checkpoint
if global_step.numpy() % train_checkpoint_interval == 0:
start_time = time.time()
train_checkpointer.save(global_step=global_step.numpy())
logging.info(
f'Saving Train lasts: {time.time() - start_time:.3f} s'
)
# Save policy checkpoint
if global_step.numpy() % policy_checkpoint_interval == 0:
start_time = time.time()
policy_checkpointer.save(global_step=global_step.numpy())
saved_model_path = os.path.join(
saved_model_dir, 'policy_' + ('%d' % global_step.numpy()).zfill(9))
saved_model.save(saved_model_path)
logging.info(
f'Saving Policy lasts: {time.time() - start_time:.3f} s'
)
# if global_step.numpy() % rb_checkpoint_interval == 0:
# start_time = time.time()
# rb_checkpointer.save(global_step=global_step.numpy())
# logging.info(
# f'Saving Replay Buffer lasts: {time.time() - start_time:.3f} s'
# )
# Evaluate on evaluation environment
if global_step.numpy() % eval_interval == 0:
start_time = time.time()
results = evaluate(eval_metrics, eval_tf_env, eval_policy, num_eval_episodes,
num_eval_seeds, global_step, eval_summary_writer, summary_prefix='Metrics', seed=seed)
if eval_metrics_callback is not None:
eval_metrics_callback(results, global_step.numpy())
logging.info(
f'Calculate Evaluation lasts {time.time() - start_time:.3f} s'
)
# Stop training if EarlyStopping says so
if eval_early_stopping and eval_metrics_callback.stop_training:
logging.info(
f'Training stopped due to Agent Early Stopping at step: {global_step.numpy()}'
)
logging.info(
f'Best {eval_metrics_callback.monitor} was {eval_metrics_callback.best:.5f} at step {eval_metrics_callback.best_step}'
)
def loadBestCheckpoint(checkpointer, ckpt_dir=None):
latest_dir = checkpointer._manager.latest_checkpoint
if latest_dir is not None:
best_dir = latest_dir.split('-')
best_dir[-1] = str(eval_metrics_callback.best_step)
best_dir = '-'.join(best_dir)
elif ckpt_dir is not None:
best_dir = os.path.join(
ckpt_dir, f'ckpt-{eval_metrics_callback.best_step}')
else:
raise ValueError(
'Checkpointer with previous checkpoints or ckpt_dir must be provided')
policy_checkpointer \
._checkpoint \
.restore(best_dir)
# Load policy with best evaluation metric according to EarlyStopping
loadBestCheckpoint(
policy_checkpointer, os.path.join(train_dir, 'policy'))
loadBestCheckpoint(train_checkpointer, train_dir)
# loadBestCheckpoint(rb_checkpointer, os.path.join(train_dir, 'replay_buffer'))
eval_metrics_callback.reset()
break
| <filename>RL/utils.py
""" Utils for RL agent """
import numpy as np
import collections
import os
import time
from absl import logging
from tensorflow import equal as tf_equal
from tensorflow import add as tf_add
from tensorflow.compat.v2 import summary
from gym import spaces
from tf_agents.environments import tf_py_environment, parallel_py_environment
from tf_agents.environments.gym_wrapper import GymWrapper
from tf_agents.eval import metric_utils
from tf_agents.utils import common
from tf_agents.drivers import dynamic_step_driver, dynamic_episode_driver
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.policies import random_tf_policy
from tf_agents.policies import policy_saver
from RL.stock_env import RLStocksEnv, REVENUE_REWARD, PRICE_REWARD
def generateSplitEnvs(
train_df,
valid_df,
test_df,
window_size,
steps_per_episode,
feature_columns,
reward_type=REVENUE_REWARD,
max_final_reward=1,
max_step_reward=0,
num_parallel_environments=1,
position_as_observation=True,
constant_step=False,
is_training=True,
seed=12345,
):
""" Create environments for train, validation and test based on their respective DataFrames and properties """
eval_env = RLStocksEnv(
df=valid_df,
window_size=window_size,
frame_bound=(window_size, len(valid_df)),
steps_per_episode=steps_per_episode,
is_training=is_training,
constant_step=constant_step,
feature_columns=feature_columns,
position_as_observation=position_as_observation,
reward_type=reward_type,
max_final_reward=max_final_reward,
max_step_reward=max_step_reward,
)
eval_env.seed(seed)
eval_env.reset()
test_env = RLStocksEnv(
df=test_df,
window_size=window_size,
frame_bound=(window_size, len(test_df)),
steps_per_episode=steps_per_episode,
is_training=is_training,
constant_step=constant_step,
feature_columns=feature_columns,
position_as_observation=position_as_observation,
reward_type=reward_type,
max_final_reward=max_final_reward,
max_step_reward=max_step_reward,
)
test_env.seed(seed)
test_env.reset()
# Otherwise raise error on evaluating ChosenActionHistogram metric
spec_dtype_map = {spaces.Discrete: np.int32}
tf_parallel_envs = []
for i in range(num_parallel_environments):
train_env = RLStocksEnv(
df=train_df,
window_size=window_size,
frame_bound=(window_size, len(train_df)),
steps_per_episode=steps_per_episode,
is_training=True,
constant_step=constant_step,
feature_columns=feature_columns,
position_as_observation=position_as_observation,
reward_type=reward_type,
max_final_reward=max_final_reward,
max_step_reward=max_step_reward,
)
train_env.seed(seed + i)
train_env.reset()
tf_parallel_envs.append(
GymWrapper(train_env, spec_dtype_map=spec_dtype_map)
)
# TODO: Implement Parallel Environment (need tf_agents.system.multiprocessing.enable_interactive_mode() added in github last updates)
if num_parallel_environments != 1:
tf_env = tf_py_environment.TFPyEnvironment(
parallel_py_environment.ParallelPyEnvironment(tf_parallel_envs))
else:
tf_env = tf_py_environment.TFPyEnvironment(tf_parallel_envs[0])
eval_tf_env = tf_py_environment.TFPyEnvironment(
GymWrapper(eval_env, spec_dtype_map=spec_dtype_map))
test_tf_env = tf_py_environment.TFPyEnvironment(
GymWrapper(test_env, spec_dtype_map=spec_dtype_map))
return tf_env, eval_tf_env, test_tf_env
class AgentEarlyStopping():
def __init__(self,
monitor='AverageReturn',
min_delta=0,
patience=0,
warmup=0,
verbose=0,
mode='max',
baseline=None):
"""Initialize an AgentEarlyStopping.
Arguments:
monitor: Quantity to be monitored.
min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: Number of iterations with no improvement
after which training will be stopped.
warmup: Number of iterations to wait till starts to
take monitor quantity.
verbose: verbosity mode.
mode: One of `{"auto", "min", "max"}`. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
"""
# super(AgentEarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.warmup = warmup
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.checkpointers = []
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
elif 'return' in self.monitor.lower():
self.monitor_op = np.less
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
self.reset()
def reset(self):
# Allow instances to be re-used
self.wait = 0
self._count = 0
self.best_step = 0
self.stopped_step = 0
self.stop_training = False
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
# TODO: Calculate a EWMA with alpha = 0.999 and calculate max buffer with length = (log 0.01) / (log 0.999) (being 0.01 minimum weight)
def __call__(self, computed_metrics, global_step):
current = self.get_monitor_value(computed_metrics)
if current is None:
return
if self.warmup <= self._count:
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.best_step = global_step
self.wait = 0
logging.info(f'Saved best {self.monitor} = {self.best:.5f} on step {global_step}')
for checkpointer in self.checkpointers:
checkpointer.save(global_step)
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_step = global_step
self.stop_training = True
logging.info('Global step %05d: early stopping' %
(self.stopped_step + 1))
else:
self._count += 1
def add_checkpointer(self, checkpointer):
self.checkpointers.append(checkpointer)
def get_monitor_value(self, computed_metrics):
computed_metrics = computed_metrics or {}
monitor_value = computed_metrics.get(self.monitor).numpy()
if monitor_value is None:
logging.warning('Agent early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(computed_metrics.keys())))
return monitor_value
def evaluate(eval_metrics, eval_tf_env, eval_policy, num_eval_episodes, num_eval_seeds, global_step=None, eval_summary_writer=None, summary_prefix='Metrics', seed=12345):
""" Evaluate policy on the evaluation environment for the specified episodes and metrics """
all_results = []
# Calculate metrics for the number of seeds provided in order to get more accurated results
for i in range(num_eval_seeds):
for env in eval_tf_env.envs:
env.seed(seed + i)
# One final eval before exiting.
results = metric_utils.eager_compute(
eval_metrics,
eval_tf_env,
eval_policy,
num_episodes=num_eval_episodes,
train_step=global_step,
)
all_results.append(results)
# Calculate mean of the resulting metrics
mean_results = collections.OrderedDict(results)
if num_eval_seeds > 1:
for metric in mean_results:
metric_sum = 0
for result in all_results:
metric_sum = tf_add(metric_sum, result[metric])
mean_results[metric] = metric_sum / len(all_results)
# Write on Tensorboard writer if provided
if global_step and eval_summary_writer:
with eval_summary_writer.as_default():
for metric, value in mean_results.items():
tag = common.join_scope(summary_prefix, metric)
summary.scalar(name=tag, data=value, step=global_step)
# Print out the results of the metrics
log = ['{0} = {1}'.format(metric, value)
for metric, value in mean_results.items()]
logging.info('%s \n\t\t %s', '', '\n\t\t '.join(log))
return mean_results
def train_eval(tf_agent, num_iterations, batch_size, tf_env, eval_tf_env, train_metrics, step_metrics, eval_metrics, global_step, steps_per_episode, num_parallel_environments, collect_per_iteration, train_steps_per_iteration, train_dir, saved_model_dir, eval_summary_writer, num_eval_episodes, num_eval_seeds=1, eval_metrics_callback=None, train_sequence_length=1, initial_collect_steps=1000, log_interval=100, eval_interval=400, policy_checkpoint_interval=400, train_checkpoint_interval=1200, rb_checkpoint_interval=2000, train_model=True, use_tf_functions=True, eval_early_stopping=False, seed=12345):
""" Train and evaluation function of a TF Agent given the properties provided """
# Define seed for each environment
for i, env in enumerate(tf_env.envs):
env.seed(seed + i)
for i, env in enumerate(eval_tf_env.envs):
env.seed(seed + i)
tf_env.reset()
eval_tf_env.reset()
tf_agent.initialize()
agent_name = tf_agent.__dict__['_name']
# Define policies
eval_policy = tf_agent.policy
collect_policy = tf_agent.collect_policy
# Define Replay Buffer
replay_buffer_capacity = steps_per_episode * \
collect_per_iteration // num_parallel_environments + 1
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=tf_agent.collect_data_spec,
batch_size=num_parallel_environments, # batch_size=tf_env.batch_size,
max_length=replay_buffer_capacity)
# Define Dynamic driver to go through the environment depending on the agent
if train_model:
if agent_name in ['dqn_agent']:
collect_driver = dynamic_step_driver.DynamicStepDriver(
tf_env,
collect_policy,
observers=[replay_buffer.add_batch] + train_metrics,
num_steps=collect_per_iteration)
elif agent_name in ['ppo_agent']:
collect_driver = dynamic_episode_driver.DynamicEpisodeDriver(
tf_env,
collect_policy,
observers=[replay_buffer.add_batch] + train_metrics,
num_episodes=collect_per_iteration)
else:
raise NotImplementedError(
f'{agent_name} agent not yet implemented')
# Define Checkpointers for train and policy
train_checkpointer = common.Checkpointer(
ckpt_dir=train_dir,
agent=tf_agent,
global_step=global_step,
metrics=metric_utils.MetricsGroup(train_metrics, 'train_metrics'))
policy_checkpointer = common.Checkpointer(
ckpt_dir=os.path.join(train_dir, 'policy'),
policy=eval_policy,
global_step=global_step)
saved_model = policy_saver.PolicySaver(eval_policy, train_step=global_step)
# rb_checkpointer = common.Checkpointer(
# ckpt_dir=os.path.join(train_dir, 'replay_buffer'),
# max_to_keep=1,
# replay_buffer=replay_buffer)
policy_checkpointer.initialize_or_restore() # TODO: To be tested
train_checkpointer.initialize_or_restore()
# rb_checkpointer.initialize_or_restore()
if train_model:
eval_metrics_callback.add_checkpointer(policy_checkpointer)
eval_metrics_callback.add_checkpointer(train_checkpointer)
# eval_metrics_callback.add_checkpointer(rb_checkpointer)
# TODO: should they use autograph=False?? as in tf_agents/agents/ppo/examples/v2/train_eval_clip_agent.py
if use_tf_functions:
# To speed up collect use common.function.
collect_driver.run = common.function(collect_driver.run)
tf_agent.train = common.function(tf_agent.train)
# Only run Replay buffer initialization if using one of the following agents
if agent_name in ['dqn_agent']:
initial_collect_policy = random_tf_policy.RandomTFPolicy(
tf_env.time_step_spec(), tf_env.action_spec())
# Collect initial replay data.
logging.info(
'Initializing replay buffer by collecting experience for %d steps with '
'a random policy.', initial_collect_steps)
dynamic_step_driver.DynamicStepDriver(
tf_env,
initial_collect_policy,
observers=[replay_buffer.add_batch] + train_metrics,
num_steps=initial_collect_steps).run()
# num_eval_episodes = eval_tf_env.envs[0].frame_bound[-1] // eval_tf_env.envs[0].steps_per_episode
logging.info(
f'Initial eval metric'
)
results = evaluate(eval_metrics, eval_tf_env, eval_policy, num_eval_episodes,
num_eval_seeds, global_step, eval_summary_writer, summary_prefix='Metrics', seed=seed)
if eval_early_stopping and not isinstance(eval_metrics_callback, AgentEarlyStopping):
raise ValueError(
'Cannot set eval_early_stopping without eval_metric_callback being Agent Early Stopping instance')
# Once evaluate has been done call eval metrics callback
if eval_metrics_callback is not None:
eval_metrics_callback(results, global_step.numpy())
# Initialize training variables
time_step = None
policy_state = collect_policy.get_initial_state(tf_env.batch_size)
timed_at_step = global_step.numpy()
collect_time = 0
train_time = 0
summary_time = 0
# Define train_step and generate dataset if required
if agent_name in ['dqn_agent']:
# Dataset generates trajectories with shape [Bx2x...]
logging.info(
f'Dataset generates trajectories'
)
dataset = replay_buffer.as_dataset(
num_parallel_calls=3,
sample_batch_size=batch_size,
# single_deterministic_pass=True,
num_steps=train_sequence_length + 1).prefetch(3)
iterator = iter(dataset)
def train_step():
experience, _ = next(iterator)
return tf_agent.train(experience)
elif agent_name in ['ppo_agent']:
def train_step():
trajectories = replay_buffer.gather_all()
return tf_agent.train(experience=trajectories)
else:
raise NotImplementedError(
f'{agent_name} agent not yet implemented')
if use_tf_functions:
train_step = common.function(train_step)
logging.info(
f'Starting training...'
)
for _ in range(num_iterations):
# Collect data
start_time = time.time()
if agent_name in ['dqn_agent']:
time_step, policy_state = collect_driver.run(
time_step=time_step,
policy_state=policy_state,
)
elif agent_name in ['ppo_agent']:
collect_driver.run()
else:
raise NotImplementedError(
f'{agent_name} agent not yet implemented')
collect_time += time.time() - start_time
# Train on collected data
start_time = time.time()
for _ in range(train_steps_per_iteration):
train_loss = train_step()
train_time += time.time() - start_time
# Write on Tensorboard the training results
start_time = time.time()
for train_metric in train_metrics:
train_metric.tf_summaries(
train_step=global_step, step_metrics=step_metrics)
summary_time += time.time() - start_time
# Print out metrics and reset variables
if global_step.numpy() % log_interval == 0:
logging.info('step = %d, loss = %f', global_step.numpy(),
train_loss.loss)
steps_per_sec = (global_step.numpy() - timed_at_step) / \
(train_time + collect_time + summary_time)
logging.info('%.3f steps/sec', steps_per_sec)
logging.info('collect_time = %.3f, train_time = %.3f, summary_time = %.3f', collect_time,
train_time, summary_time)
summary.scalar(
name='global_steps_per_sec', data=steps_per_sec, step=global_step)
timed_at_step = global_step.numpy()
collect_time = 0
train_time = 0
summary_time = 0
# Save train checkpoint
if global_step.numpy() % train_checkpoint_interval == 0:
start_time = time.time()
train_checkpointer.save(global_step=global_step.numpy())
logging.info(
f'Saving Train lasts: {time.time() - start_time:.3f} s'
)
# Save policy checkpoint
if global_step.numpy() % policy_checkpoint_interval == 0:
start_time = time.time()
policy_checkpointer.save(global_step=global_step.numpy())
saved_model_path = os.path.join(
saved_model_dir, 'policy_' + ('%d' % global_step.numpy()).zfill(9))
saved_model.save(saved_model_path)
logging.info(
f'Saving Policy lasts: {time.time() - start_time:.3f} s'
)
# if global_step.numpy() % rb_checkpoint_interval == 0:
# start_time = time.time()
# rb_checkpointer.save(global_step=global_step.numpy())
# logging.info(
# f'Saving Replay Buffer lasts: {time.time() - start_time:.3f} s'
# )
# Evaluate on evaluation environment
if global_step.numpy() % eval_interval == 0:
start_time = time.time()
results = evaluate(eval_metrics, eval_tf_env, eval_policy, num_eval_episodes,
num_eval_seeds, global_step, eval_summary_writer, summary_prefix='Metrics', seed=seed)
if eval_metrics_callback is not None:
eval_metrics_callback(results, global_step.numpy())
logging.info(
f'Calculate Evaluation lasts {time.time() - start_time:.3f} s'
)
# Stop training if EarlyStopping says so
if eval_early_stopping and eval_metrics_callback.stop_training:
logging.info(
f'Training stopped due to Agent Early Stopping at step: {global_step.numpy()}'
)
logging.info(
f'Best {eval_metrics_callback.monitor} was {eval_metrics_callback.best:.5f} at step {eval_metrics_callback.best_step}'
)
def loadBestCheckpoint(checkpointer, ckpt_dir=None):
latest_dir = checkpointer._manager.latest_checkpoint
if latest_dir is not None:
best_dir = latest_dir.split('-')
best_dir[-1] = str(eval_metrics_callback.best_step)
best_dir = '-'.join(best_dir)
elif ckpt_dir is not None:
best_dir = os.path.join(
ckpt_dir, f'ckpt-{eval_metrics_callback.best_step}')
else:
raise ValueError(
'Checkpointer with previous checkpoints or ckpt_dir must be provided')
policy_checkpointer \
._checkpoint \
.restore(best_dir)
# Load policy with best evaluation metric according to EarlyStopping
loadBestCheckpoint(
policy_checkpointer, os.path.join(train_dir, 'policy'))
loadBestCheckpoint(train_checkpointer, train_dir)
# loadBestCheckpoint(rb_checkpointer, os.path.join(train_dir, 'replay_buffer'))
eval_metrics_callback.reset()
break
| en | 0.749778 | Utils for RL agent Create environments for train, validation and test based on their respective DataFrames and properties # Otherwise raise error on evaluating ChosenActionHistogram metric # TODO: Implement Parallel Environment (need tf_agents.system.multiprocessing.enable_interactive_mode() added in github last updates) Initialize an AgentEarlyStopping. Arguments: monitor: Quantity to be monitored. min_delta: Minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute change of less than min_delta, will count as no improvement. patience: Number of iterations with no improvement after which training will be stopped. warmup: Number of iterations to wait till starts to take monitor quantity. verbose: verbosity mode. mode: One of `{"auto", "min", "max"}`. In `min` mode, training will stop when the quantity monitored has stopped decreasing; in `max` mode it will stop when the quantity monitored has stopped increasing; in `auto` mode, the direction is automatically inferred from the name of the monitored quantity. baseline: Baseline value for the monitored quantity. Training will stop if the model doesn't show improvement over the baseline. # super(AgentEarlyStopping, self).__init__() # Allow instances to be re-used # TODO: Calculate a EWMA with alpha = 0.999 and calculate max buffer with length = (log 0.01) / (log 0.999) (being 0.01 minimum weight) Evaluate policy on the evaluation environment for the specified episodes and metrics # Calculate metrics for the number of seeds provided in order to get more accurated results # One final eval before exiting. # Calculate mean of the resulting metrics # Write on Tensorboard writer if provided # Print out the results of the metrics Train and evaluation function of a TF Agent given the properties provided # Define seed for each environment # Define policies # Define Replay Buffer # batch_size=tf_env.batch_size, # Define Dynamic driver to go through the environment depending on the agent # Define Checkpointers for train and policy # rb_checkpointer = common.Checkpointer( # ckpt_dir=os.path.join(train_dir, 'replay_buffer'), # max_to_keep=1, # replay_buffer=replay_buffer) # TODO: To be tested # rb_checkpointer.initialize_or_restore() # eval_metrics_callback.add_checkpointer(rb_checkpointer) # TODO: should they use autograph=False?? as in tf_agents/agents/ppo/examples/v2/train_eval_clip_agent.py # To speed up collect use common.function. # Only run Replay buffer initialization if using one of the following agents # Collect initial replay data. # num_eval_episodes = eval_tf_env.envs[0].frame_bound[-1] // eval_tf_env.envs[0].steps_per_episode # Once evaluate has been done call eval metrics callback # Initialize training variables # Define train_step and generate dataset if required # Dataset generates trajectories with shape [Bx2x...] # single_deterministic_pass=True, # Collect data # Train on collected data # Write on Tensorboard the training results # Print out metrics and reset variables # Save train checkpoint # Save policy checkpoint # if global_step.numpy() % rb_checkpoint_interval == 0: # start_time = time.time() # rb_checkpointer.save(global_step=global_step.numpy()) # logging.info( # f'Saving Replay Buffer lasts: {time.time() - start_time:.3f} s' # ) # Evaluate on evaluation environment # Stop training if EarlyStopping says so # Load policy with best evaluation metric according to EarlyStopping # loadBestCheckpoint(rb_checkpointer, os.path.join(train_dir, 'replay_buffer')) | 1.99867 | 2 |
www/oldhawaii_metadata/apps/api/utilities.py | oldhawaii/oldhawaii-metadata | 1 | 6622592 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import requests
class ResourceApiClient(object):
def __init__(self, base_url, resource_type):
super(ResourceApiClient, self).__init__()
self.base_url = base_url
self.resource_type = resource_type
def endpoint(self):
return '{0}/{1}/'.format(self.base_url, self.resource_type)
def create(self, resource_as_json):
headers = {'Content-Type': 'application/json'}
r = requests.post(
self.endpoint(),
data=json.dumps(resource_as_json),
headers=headers)
if r.status_code == 201:
return r.json()["_id"]
else:
return None
def get_all(self, pagination_and_filters=None):
url = "{0}?{1}".format(self.endpoint(), pagination_and_filters or '')
r = requests.get(url)
if r.status_code == 200:
return r.json()
else:
return None
def get_by_id(self, id):
url = '{0}{1}'.format(self.endpoint(), id)
r = requests.get(url)
if r.status_code == 200:
return r.json()
else:
return None
def update(self, resource_as_json):
headers = {'Content-Type': 'application/json'}
url = '{0}{1}'.format(self.endpoint(), resource_as_json.pop('_id'))
r = requests.patch(
url,
data=json.dumps(resource_as_json),
headers=headers)
if r.status_code == 200:
return r.json()["_id"]
else:
return None
def delete(self, id):
headers = {'Content-Type': 'application/json'}
url = '{0}{1}'.format(self.endpoint(), id)
r = requests.delete(url, headers=headers)
return True if r.status_code == 204 else False
# vim: filetype=python
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import requests
class ResourceApiClient(object):
def __init__(self, base_url, resource_type):
super(ResourceApiClient, self).__init__()
self.base_url = base_url
self.resource_type = resource_type
def endpoint(self):
return '{0}/{1}/'.format(self.base_url, self.resource_type)
def create(self, resource_as_json):
headers = {'Content-Type': 'application/json'}
r = requests.post(
self.endpoint(),
data=json.dumps(resource_as_json),
headers=headers)
if r.status_code == 201:
return r.json()["_id"]
else:
return None
def get_all(self, pagination_and_filters=None):
url = "{0}?{1}".format(self.endpoint(), pagination_and_filters or '')
r = requests.get(url)
if r.status_code == 200:
return r.json()
else:
return None
def get_by_id(self, id):
url = '{0}{1}'.format(self.endpoint(), id)
r = requests.get(url)
if r.status_code == 200:
return r.json()
else:
return None
def update(self, resource_as_json):
headers = {'Content-Type': 'application/json'}
url = '{0}{1}'.format(self.endpoint(), resource_as_json.pop('_id'))
r = requests.patch(
url,
data=json.dumps(resource_as_json),
headers=headers)
if r.status_code == 200:
return r.json()["_id"]
else:
return None
def delete(self, id):
headers = {'Content-Type': 'application/json'}
url = '{0}{1}'.format(self.endpoint(), id)
r = requests.delete(url, headers=headers)
return True if r.status_code == 204 else False
# vim: filetype=python
| en | 0.259407 | #!/usr/bin/env python # -*- coding: utf-8 -*- # vim: filetype=python | 2.474186 | 2 |
SprityBird/spritybird/python3.5/lib/python3.5/site-packages/openpyxl/writer/tests/test_lxml.py | MobileAnalytics/iPython-Framework | 4 | 6622593 | <reponame>MobileAnalytics/iPython-Framework<filename>SprityBird/spritybird/python3.5/lib/python3.5/site-packages/openpyxl/writer/tests/test_lxml.py
from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
# stdlib
import datetime
import decimal
from io import BytesIO
# package
from openpyxl import Workbook
from lxml.etree import xmlfile
# test imports
import pytest
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def worksheet():
from openpyxl import Workbook
wb = Workbook()
return wb.active
@pytest.mark.lxml_required
@pytest.mark.parametrize("value, expected",
[
(9781231231230, """<c t="n" r="A1"><v>9781231231230</v></c>"""),
(decimal.Decimal('3.14'), """<c t="n" r="A1"><v>3.14</v></c>"""),
(1234567890, """<c t="n" r="A1"><v>1234567890</v></c>"""),
("=sum(1+1)", """<c r="A1"><f>sum(1+1)</f><v></v></c>"""),
(True, """<c t="b" r="A1"><v>1</v></c>"""),
("Hello", """<c t="s" r="A1"><v>0</v></c>"""),
("", """<c r="A1" t="s"></c>"""),
(None, """<c r="A1" t="n"></c>"""),
(datetime.date(2011, 12, 25), """<c r="A1" t="n" s="1"><v>40902</v></c>"""),
])
def test_write_cell(worksheet, value, expected):
from .. lxml_worksheet import write_cell
ws = worksheet
cell = ws['A1']
cell.value = value
out = BytesIO()
with xmlfile(out) as xf:
cell = ws['A1']
write_cell(xf, ws, cell, cell.has_style)
xml = out.getvalue()
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.lxml_required
def test_write_cell_string(worksheet):
from .. lxml_worksheet import write_cell
ws = worksheet
ws['A1'] = "Hello"
out = BytesIO()
with xmlfile(out) as xf:
write_cell(xf, ws, ws['A1'])
assert ws.parent.shared_strings == ["Hello"]
@pytest.fixture
def write_rows():
from .. lxml_worksheet import write_rows
return write_rows
@pytest.mark.lxml_required
def test_write_sheetdata(worksheet, write_rows):
ws = worksheet
ws['A1'] = 10
out = BytesIO()
with xmlfile(out) as xf:
write_rows(xf, ws)
xml = out.getvalue()
expected = """<sheetData><row r="1" spans="1:1"><c t="n" r="A1"><v>10</v></c></row></sheetData>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.lxml_required
def test_write_formula(worksheet, write_rows):
ws = worksheet
ws['F1'] = 10
ws['F2'] = 32
ws['F3'] = '=F1+F2'
ws['A4'] = '=A1+A2+A3'
ws['B4'] = "=SUM(A10:A14*B10:B14)"
ws.formula_attributes['B4'] = {'t': 'array', 'ref': 'B4:B8'}
out = BytesIO()
with xmlfile(out) as xf:
write_rows(xf, ws)
xml = out.getvalue()
expected = """
<sheetData>
<row r="1" spans="1:6">
<c r="F1" t="n">
<v>10</v>
</c>
</row>
<row r="2" spans="1:6">
<c r="F2" t="n">
<v>32</v>
</c>
</row>
<row r="3" spans="1:6">
<c r="F3">
<f>F1+F2</f>
<v></v>
</c>
</row>
<row r="4" spans="1:6">
<c r="A4">
<f>A1+A2+A3</f>
<v></v>
</c>
<c r="B4">
<f ref="B4:B8" t="array">SUM(A10:A14*B10:B14)</f>
<v></v>
</c>
</row>
</sheetData>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.lxml_required
def test_row_height(worksheet, write_rows):
from openpyxl.worksheet.dimensions import RowDimension
ws = worksheet
ws['F1'] = 10
ws.row_dimensions[1] = RowDimension(ws, height=30)
ws.row_dimensions[2] = RowDimension(ws, height=30)
out = BytesIO()
with xmlfile(out) as xf:
write_rows(xf, ws)
xml = out.getvalue()
expected = """
<sheetData>
<row customHeight="1" ht="30" r="1" spans="1:6">
<c r="F1" t="n">
<v>10</v>
</c>
</row>
<row customHeight="1" ht="30" r="2" spans="1:6"></row>
</sheetData>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
| from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
# stdlib
import datetime
import decimal
from io import BytesIO
# package
from openpyxl import Workbook
from lxml.etree import xmlfile
# test imports
import pytest
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def worksheet():
from openpyxl import Workbook
wb = Workbook()
return wb.active
@pytest.mark.lxml_required
@pytest.mark.parametrize("value, expected",
[
(9781231231230, """<c t="n" r="A1"><v>9781231231230</v></c>"""),
(decimal.Decimal('3.14'), """<c t="n" r="A1"><v>3.14</v></c>"""),
(1234567890, """<c t="n" r="A1"><v>1234567890</v></c>"""),
("=sum(1+1)", """<c r="A1"><f>sum(1+1)</f><v></v></c>"""),
(True, """<c t="b" r="A1"><v>1</v></c>"""),
("Hello", """<c t="s" r="A1"><v>0</v></c>"""),
("", """<c r="A1" t="s"></c>"""),
(None, """<c r="A1" t="n"></c>"""),
(datetime.date(2011, 12, 25), """<c r="A1" t="n" s="1"><v>40902</v></c>"""),
])
def test_write_cell(worksheet, value, expected):
from .. lxml_worksheet import write_cell
ws = worksheet
cell = ws['A1']
cell.value = value
out = BytesIO()
with xmlfile(out) as xf:
cell = ws['A1']
write_cell(xf, ws, cell, cell.has_style)
xml = out.getvalue()
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.lxml_required
def test_write_cell_string(worksheet):
from .. lxml_worksheet import write_cell
ws = worksheet
ws['A1'] = "Hello"
out = BytesIO()
with xmlfile(out) as xf:
write_cell(xf, ws, ws['A1'])
assert ws.parent.shared_strings == ["Hello"]
@pytest.fixture
def write_rows():
from .. lxml_worksheet import write_rows
return write_rows
@pytest.mark.lxml_required
def test_write_sheetdata(worksheet, write_rows):
ws = worksheet
ws['A1'] = 10
out = BytesIO()
with xmlfile(out) as xf:
write_rows(xf, ws)
xml = out.getvalue()
expected = """<sheetData><row r="1" spans="1:1"><c t="n" r="A1"><v>10</v></c></row></sheetData>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.lxml_required
def test_write_formula(worksheet, write_rows):
ws = worksheet
ws['F1'] = 10
ws['F2'] = 32
ws['F3'] = '=F1+F2'
ws['A4'] = '=A1+A2+A3'
ws['B4'] = "=SUM(A10:A14*B10:B14)"
ws.formula_attributes['B4'] = {'t': 'array', 'ref': 'B4:B8'}
out = BytesIO()
with xmlfile(out) as xf:
write_rows(xf, ws)
xml = out.getvalue()
expected = """
<sheetData>
<row r="1" spans="1:6">
<c r="F1" t="n">
<v>10</v>
</c>
</row>
<row r="2" spans="1:6">
<c r="F2" t="n">
<v>32</v>
</c>
</row>
<row r="3" spans="1:6">
<c r="F3">
<f>F1+F2</f>
<v></v>
</c>
</row>
<row r="4" spans="1:6">
<c r="A4">
<f>A1+A2+A3</f>
<v></v>
</c>
<c r="B4">
<f ref="B4:B8" t="array">SUM(A10:A14*B10:B14)</f>
<v></v>
</c>
</row>
</sheetData>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.lxml_required
def test_row_height(worksheet, write_rows):
from openpyxl.worksheet.dimensions import RowDimension
ws = worksheet
ws['F1'] = 10
ws.row_dimensions[1] = RowDimension(ws, height=30)
ws.row_dimensions[2] = RowDimension(ws, height=30)
out = BytesIO()
with xmlfile(out) as xf:
write_rows(xf, ws)
xml = out.getvalue()
expected = """
<sheetData>
<row customHeight="1" ht="30" r="1" spans="1:6">
<c r="F1" t="n">
<v>10</v>
</c>
</row>
<row customHeight="1" ht="30" r="2" spans="1:6"></row>
</sheetData>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff | en | 0.178695 | # Copyright (c) 2010-2016 openpyxl # stdlib # package # test imports <c t="n" r="A1"><v>9781231231230</v></c> <c t="n" r="A1"><v>3.14</v></c> <c t="n" r="A1"><v>1234567890</v></c> <c r="A1"><f>sum(1+1)</f><v></v></c> <c t="b" r="A1"><v>1</v></c> <c t="s" r="A1"><v>0</v></c> <c r="A1" t="s"></c> <c r="A1" t="n"></c> <c r="A1" t="n" s="1"><v>40902</v></c> <sheetData><row r="1" spans="1:1"><c t="n" r="A1"><v>10</v></c></row></sheetData> <sheetData> <row r="1" spans="1:6"> <c r="F1" t="n"> <v>10</v> </c> </row> <row r="2" spans="1:6"> <c r="F2" t="n"> <v>32</v> </c> </row> <row r="3" spans="1:6"> <c r="F3"> <f>F1+F2</f> <v></v> </c> </row> <row r="4" spans="1:6"> <c r="A4"> <f>A1+A2+A3</f> <v></v> </c> <c r="B4"> <f ref="B4:B8" t="array">SUM(A10:A14*B10:B14)</f> <v></v> </c> </row> </sheetData> <sheetData> <row customHeight="1" ht="30" r="1" spans="1:6"> <c r="F1" t="n"> <v>10</v> </c> </row> <row customHeight="1" ht="30" r="2" spans="1:6"></row> </sheetData> | 2.153184 | 2 |
tests/controller/test_cdecontroller.py | aueb-wim/HBPMedical-QCtool | 8 | 6622594 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import pytest
from mipqctool.controller.cdescontroller import CDEsController
JSON_PATH1 = 'tests/test_datasets/simple_dc_cdes.json'
JSON_PATH2 = 'tests/test_datasets/dementia_cdes_v3.json'
CDES1 = ['dataset', 'av45' , 'fdg', 'pib','minimentalstate', 'montrealcognitiveassessment',
'updrshy', 'updrstotal', 'agegroup', 'gender', 'handedness', 'subjectage', 'subjectageyears']
@pytest.mark.parametrize('jsonpath, csvpath', [
(JSON_PATH1, 'tests/test_datasets/cde_headers_only.csv')
])
def test_save_csv_headers_only(jsonpath, csvpath):
test = CDEsController.from_disc(jsonpath)
test.save_csv_headers_only(csvpath)
@pytest.mark.parametrize('jsonpath, result', [
(JSON_PATH1, CDES1)
])
def test_cde_names(jsonpath, result):
test = CDEsController.from_disc(jsonpath)
assert set(test.cde_headers) == set(result) | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import pytest
from mipqctool.controller.cdescontroller import CDEsController
JSON_PATH1 = 'tests/test_datasets/simple_dc_cdes.json'
JSON_PATH2 = 'tests/test_datasets/dementia_cdes_v3.json'
CDES1 = ['dataset', 'av45' , 'fdg', 'pib','minimentalstate', 'montrealcognitiveassessment',
'updrshy', 'updrstotal', 'agegroup', 'gender', 'handedness', 'subjectage', 'subjectageyears']
@pytest.mark.parametrize('jsonpath, csvpath', [
(JSON_PATH1, 'tests/test_datasets/cde_headers_only.csv')
])
def test_save_csv_headers_only(jsonpath, csvpath):
test = CDEsController.from_disc(jsonpath)
test.save_csv_headers_only(csvpath)
@pytest.mark.parametrize('jsonpath, result', [
(JSON_PATH1, CDES1)
])
def test_cde_names(jsonpath, result):
test = CDEsController.from_disc(jsonpath)
assert set(test.cde_headers) == set(result) | none | 1 | 2.063974 | 2 | |
experiment.py | fahadahaf/ZnFIR | 2 | 6622595 | import gensim
import numpy as np
import os
import pandas as pd
import pdb
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from argparse import ArgumentParser
from fastprogress import progress_bar
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
from random import randint
from sklearn import metrics
from torch.backends import cudnn
from torch.utils import data
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
from torch.utils.data.sampler import SubsetRandomSampler
from torch.autograd import Function # import Function to create custom activations
from torch.nn.parameter import Parameter # import Parameter to create custom activations with learnable parameters
from torch import optim # import optimizers for demonstrations
#local imports
from datasets import DatasetLoadAll, DatasetLazyLoad, DatasetEmbd
from extract_motifs import get_motif
from models import Basset, AttentionNet
from utils import get_params_dict
###########################################################################################################################
#--------------------------------------------Train and Evaluate Functions-------------------------------------------------#
###########################################################################################################################
def trainRegular(model, device, iterator, optimizer, criterion, useEmb=False):
model.train()
running_loss = 0.0
train_auc = []
for batch_idx, (headers, seqs, data, target) in enumerate(iterator):
#pdb.set_trace()
if useEmb:
data, target = data.to(device,dtype=torch.long), target.to(device,dtype=torch.long)
else:
data, target = data.to(device,dtype=torch.float), target.to(device,dtype=torch.long)
optimizer.zero_grad()
outputs = model(data)
loss = criterion(outputs, target)
#loss = F.binary_cross_entropy(outputs, target)
labels = target.cpu().numpy()
softmax = torch.nn.Softmax(dim=1)
pred = softmax(outputs)
pred = pred.cpu().detach().numpy()
#print(pred)
try:
train_auc.append(metrics.roc_auc_score(labels, pred[:,1]))
except:
train_auc.append(0.0)
loss.backward()
optimizer.step()
running_loss += loss.item()
#return outputs
return running_loss/len(iterator),train_auc
def evaluateRegular(net, device, iterator, criterion, out_dirc=None, getCNN=False, storeCNNout = False, getSeqs = False, useEmb=False):
#pdb.set_trace()
running_loss = 0.0
valid_auc = []
roc = np.asarray([[],[]]).T
per_batch_labelPreds = {}
per_batch_CNNoutput = {}
per_batch_testSeqs = {}
per_batch_info = {}
net.eval()
CNNlayer = net.layer1[0:3]
CNNlayer.eval()
with torch.no_grad():
for batch_idx, (headers, seqs, data, target) in enumerate(iterator):
if useEmb:
data, target = data.to(device,dtype=torch.long), target.to(device,dtype=torch.long)
else:
data, target = data.to(device,dtype=torch.float), target.to(device,dtype=torch.long)
# Model computations
outputs = net(data)
loss = criterion(outputs, target)
softmax = torch.nn.Softmax(dim=1)
labels=target.cpu().numpy()
pred = softmax(outputs)
pred=pred.cpu().detach().numpy()
label_pred = np.column_stack((labels,pred[:,1]))
per_batch_labelPreds[batch_idx] = label_pred
roc = np.row_stack((roc,label_pred))
try:
valid_auc.append(metrics.roc_auc_score(labels, pred[:,1]))
except:
valid_auc.append(0.0)
running_loss += loss.item()
if getCNN:
try: #if the network has an embedding layer (input must be embedded as well)
data = net.embedding(data)
outputCNN = CNNlayer(data.permute(0,2,1))
except:
outputCNN = CNNlayer(data)
if storeCNNout:
if not os.path.exists(out_dirc):
os.makedirs(out_dirc)
with open(out_dirc+'/CNNout_batch-'+str(batch_idx)+'.pckl','wb') as f:
pickle.dump(outputCNN.cpu().detach().numpy(),f)
per_batch_CNNoutput[batch_idx] = out_dirc+'/CNNout_batch-'+str(batch_idx)+'.pckl'
else:
per_batch_CNNoutput[batch_idx] = outputCNN.cpu().detach().numpy()
if getSeqs:
per_batch_testSeqs[batch_idx] = np.column_stack((headers,seqs))
labels = roc[:,0]
preds = roc[:,1]
valid_auc = metrics.roc_auc_score(labels,preds)
return running_loss/len(iterator),valid_auc,roc,per_batch_labelPreds,per_batch_CNNoutput,per_batch_testSeqs
###########################################################################################################################
#---------------------------------------------------------End-------------------------------------------------------------#
###########################################################################################################################
def get_indices(dataset_size, test_split, output_dir, shuffle_data=True, seed_val=100, mode='train'):
indices = list(range(dataset_size))
split_val = int(np.floor(test_split*dataset_size))
if shuffle_data:
np.random.seed(seed_val)
np.random.shuffle(indices)
train_indices, test_indices, valid_indices = indices[2*split_val:], indices[:split_val], indices[split_val:2*split_val]
#--save indices for later use, when testing for example---#
if mode=='train':
np.savetxt(output_dir+'/valid_indices.txt', valid_indices, fmt='%s')
np.savetxt(output_dir+'/test_indices.txt', test_indices, fmt='%s')
np.savetxt(output_dir+'/train_indices.txt', train_indices, fmt='%s')
else:
try:
valid_indices = np.loadtxt(output_dir+'/valid_indices.txt', dtype=int)
test_indices = np.loadtxt(output_dir+'/test_indices.txt', dtype=int)
train_indices = np.loadtxt(output_dir+'/train_indices.txt', dtype=int)
except:
print("Error! looks like you haven't trained the model yet. Rerun with --mode train.")
return train_indices, test_indices, valid_indices
def load_datasets(arg_space, use_embds, batchSize, kmer_len=None, embd_size=None, embd_window=None):
"""
Loads and processes the data.
"""
input_prefix = arg_space.inputprefix
output_dir = 'results/'+arg_space.directory
if not os.path.exists(output_dir):
os.makedirs(output_dir)
#save arguments to keep record
with open(output_dir+'/arguments.txt','w') as f:
f.writelines(str(arg_space))
test_split = arg_space.splitperc/100
if arg_space.verbose:
print("test/validation split val: %.2f"%test_split)
modelwv = None
if not use_embds:
if arg_space.deskLoad:
final_dataset = DatasetLazyLoad(input_prefix)
else:
final_dataset = DatasetLoadAll(input_prefix)
#train_indices, test_indices, valid_indices = get_indices(len(final_dataset), test_split, output_dir)
else:
w2v_path = arg_space.wvPath+'/' if arg_space.wvPath[-1]!='/' else arg_space.wvPath #'Word2Vec_Models/'
w2v_filename = 'Word2Vec_Model_kmerLen'+str(kmer_len)+'_win'+str(embd_window)+'_embSize'+str(embd_size)
modelwv = Word2Vec.load(w2v_path+w2v_filename)
data_all = DatasetLoadAll(input_prefix, for_embeddings=True)
final_dataset = pd.merge(data_all.df_seq_final, data_all.df, on='header')[['header','sequence',7]]
final_dataset = DatasetEmbd(final_dataset.values.tolist(), modelwv, kmer_len)
#train_indices, test_indices, valid_indices = get_indices(len(final_dataset), test_split, output_dir)
#pdb.set_trace()
train_indices, test_indices, valid_indices = get_indices(len(final_dataset), test_split, output_dir, mode=arg_space.mode)
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
valid_sampler = SubsetRandomSampler(valid_indices)
train_loader = DataLoader(final_dataset, batch_size = batchSize, sampler = train_sampler)
test_loader = DataLoader(final_dataset, batch_size = batchSize, sampler = test_sampler)
valid_loader = DataLoader(final_dataset, batch_size = batchSize, sampler = valid_sampler)
return train_loader, test_loader, valid_loader, modelwv, output_dir
def run_experiment(device, arg_space, params):
"""
Run the main experiment, that is, load the data and train-test the model and generate/store results.
Args:
device: (torch.device) Specifies the device (either gpu or cpu).
arg_space: ArgParser object containing all the user-specified arguments.
params: (dict) Dictionary of hyperparameters.
"""
net_type = arg_space.netType
num_labels = params['num_classes']
get_CNNout = params['get_CNNout']
get_sequences = params['get_seqs']
batch_size = params['batch_size']
max_epochs = params['num_epochs']
use_embds = arg_space.useEmbeddings
kmer_len, embd_size, embd_window = [None]*3
if use_embds:
kmer_len = params['embd_kmersize']
embd_size = params['embd_size']
embd_window = params['embd_window']
prefix = 'modelRes' #Using generic, not sure if we need it as an argument or part of the params dict
train_loader, test_loader, valid_loader, modelwv, output_dir = load_datasets(arg_space, use_embds, batch_size, kmer_len, embd_size, embd_window)
#print(params)
if net_type == 'basset':
if arg_space.verbose:
print("Using Basset-like model.")
net = Basset(params, wvmodel=modelwv, useEmbeddings=use_embds).to(device)
else:
if arg_space.verbose:
print("Using Attention-based model.")
net = AttentionNet(params, wvmodel=modelwv, useEmbeddings=use_embds, device=device).to(device)
criterion = nn.CrossEntropyLoss(reduction='mean')
optimizer = optim.Adam(net.parameters())
##-------Main train/test loop----------##
if arg_space.mode == 'train':
best_valid_loss = np.inf
best_valid_auc = np.inf
for epoch in progress_bar(range(1, max_epochs + 1)):
res_train = trainRegular(net, device, train_loader, optimizer, criterion, useEmb=use_embds)
res_valid = evaluateRegular(net, device, valid_loader, criterion, useEmb=use_embds)
res_train_auc = np.asarray(res_train[1]).mean()
res_train_loss = res_train[0]
res_valid_auc = np.asarray(res_valid[1]).mean()
res_valid_loss = res_valid[0]
if res_valid_loss < best_valid_loss:
best_valid_loss = res_valid_loss
best_valid_auc = res_valid_auc
if arg_space.verbose:
print("Best Validation Loss: %.3f and AUC: %.2f"%(best_valid_loss, best_valid_auc), "\n")
torch.save({'epoch': epoch,
'model_state_dict': net.state_dict(),
'optimizer_state_dict':optimizer.state_dict(),
'loss':res_valid_loss
},output_dir+'/'+prefix+'_model')
try:
checkpoint = torch.load(output_dir+'/'+prefix+'_model')
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
except:
print("No pre-trained model found at %s! Please run with --mode set to train."%output_dir)
return
#print(net)
res_test = evaluateRegular(net, device, test_loader, criterion, output_dir+"/Stored_Values",
getCNN=get_CNNout, storeCNNout=arg_space.storeCNN, getSeqs=get_sequences, useEmb=use_embds)
test_loss = res_test[0]
labels = res_test[2][:,0]
preds = res_test[2][:,1]
auc_test = metrics.roc_auc_score(labels, preds)
if arg_space.verbose:
print("Test Loss: %.3f and AUC: %.2f"%(test_loss, auc_test), "\n")
auprc_test = metrics.average_precision_score(labels,preds)
some_res = [['Test_Loss','Test_AUC', 'Test_AUPRC']]
some_res.append([test_loss,auc_test,auprc_test])
#---Calculate roc and prc values---#
fpr,tpr,thresholds = metrics.roc_curve(labels,preds)
precision,recall,thresholdsPR = metrics.precision_recall_curve(labels,preds)
roc_dict = {'fpr':fpr, 'tpr':tpr, 'thresholds':thresholds}
prc_dict = {'precision':precision, 'recall':recall, 'thresholds':thresholdsPR}
#---Store results----#
with open(output_dir+'/'+prefix+'_roc.pckl','wb') as f:
pickle.dump(roc_dict,f)
with open(output_dir+'/'+prefix+'_prc.pckl','wb') as f:
pickle.dump(prc_dict,f)
np.savetxt(output_dir+'/'+prefix+'_results.txt',some_res,fmt='%s',delimiter='\t')
CNNWeights = net.layer1[0].weight.cpu().detach().numpy()
return res_test, CNNWeights
def motif_analysis(res_test, CNNWeights, argSpace, for_negative=False):
"""
Infer regulatory motifs by analyzing the first CNN layer filters.
Args:
res_test: (list) Returned by the experiment function after testing the model.
CNNWeights: (numpy.ndarray) Weights of the first CNN layer.
argSpace: The ArgParser object containing values of all the user-specificed arguments.
for_negative: (bool) Determines if the motif analysis is for the positive or negative set.
"""
output_dir = 'results/'+argSpace.directory
if not os.path.exists(output_dir):
print("Error! output directory doesn't exist.")
return
NumExamples = 0
pos_score_cutoff = argSpace.scoreCutoff
k = 0 #batch number
per_batch_labelPreds = res_test[3][k]
#per_batch_Embdoutput = res_test[5][k]
CNNoutput = res_test[4][k]
if argSpace.storeCNN:
with open(CNNoutput,'rb') as f:
CNNoutput = pickle.load(f)
Seqs = np.asarray(res_test[-1][k])
if for_negative:
tp_indices = [i for i in range(0,per_batch_labelPreds.shape[0]) if (per_batch_labelPreds[i][0]==0 and per_batch_labelPreds[i][1]<(1-pos_score_cutoff))]
else:
tp_indices = [i for i in range(0,per_batch_labelPreds.shape[0]) if (per_batch_labelPreds[i][0]==1 and per_batch_labelPreds[i][1]>(1-pos_score_cutoff))]
NumExamples += len(tp_indices)
CNNoutput = CNNoutput[tp_indices]
Seqs = Seqs[tp_indices]
for k in range(1,len(res_test[3])):
if argSpace.verbose:
print("batch number: ",k)
per_batch_labelPreds = res_test[3][k]
per_batch_CNNoutput = res_test[4][k]
if argSpace.storeCNN:
with open(per_batch_CNNoutput,'rb') as f:
per_batch_CNNoutput = pickle.load(f)
per_batch_seqs = np.asarray(res_test[-1][k])
if for_negative:
tp_indices = [i for i in range(0,per_batch_labelPreds.shape[0]) if (per_batch_labelPreds[i][0]==0 and per_batch_labelPreds[i][1]<(1-pos_score_cutoff))]
else:
tp_indices = [i for i in range(0,per_batch_labelPreds.shape[0]) if (per_batch_labelPreds[i][0]==1 and per_batch_labelPreds[i][1]>(1-pos_score_cutoff))]
NumExamples += len(tp_indices)
CNNoutput = np.concatenate((CNNoutput,per_batch_CNNoutput[tp_indices]),axis=0)
Seqs = np.concatenate((Seqs,per_batch_seqs[tp_indices]))
if argSpace.tfDatabase == None:
dbpath = '/s/jawar/h/nobackup/fahad/MEME_SUITE/motif_databases/CIS-BP/Homo_sapiens.meme'
else:
dbpath = argSpace.tfDatabase
if argSpace.tomtomPath == None:
tomtomPath = '/s/jawar/h/nobackup/fahad/MEME_SUITE/meme-5.0.3/src/tomtom'
else:
tomtomPath = argSpace.tomtomPath
if for_negative:
motif_dir = output_dir + '/Motif_Analysis_Negative'
else:
motif_dir = output_dir + '/Motif_Analysis'
get_motif(CNNWeights, CNNoutput, Seqs, dbpath, dir1 = motif_dir, embd=argSpace.useEmbeddings,
data='DNA', tomtom=tomtomPath, tomtompval=argSpace.tomtomPval, tomtomdist=argSpace.tomtomDist)
return motif_dir, NumExamples
| import gensim
import numpy as np
import os
import pandas as pd
import pdb
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from argparse import ArgumentParser
from fastprogress import progress_bar
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
from random import randint
from sklearn import metrics
from torch.backends import cudnn
from torch.utils import data
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
from torch.utils.data.sampler import SubsetRandomSampler
from torch.autograd import Function # import Function to create custom activations
from torch.nn.parameter import Parameter # import Parameter to create custom activations with learnable parameters
from torch import optim # import optimizers for demonstrations
#local imports
from datasets import DatasetLoadAll, DatasetLazyLoad, DatasetEmbd
from extract_motifs import get_motif
from models import Basset, AttentionNet
from utils import get_params_dict
###########################################################################################################################
#--------------------------------------------Train and Evaluate Functions-------------------------------------------------#
###########################################################################################################################
def trainRegular(model, device, iterator, optimizer, criterion, useEmb=False):
model.train()
running_loss = 0.0
train_auc = []
for batch_idx, (headers, seqs, data, target) in enumerate(iterator):
#pdb.set_trace()
if useEmb:
data, target = data.to(device,dtype=torch.long), target.to(device,dtype=torch.long)
else:
data, target = data.to(device,dtype=torch.float), target.to(device,dtype=torch.long)
optimizer.zero_grad()
outputs = model(data)
loss = criterion(outputs, target)
#loss = F.binary_cross_entropy(outputs, target)
labels = target.cpu().numpy()
softmax = torch.nn.Softmax(dim=1)
pred = softmax(outputs)
pred = pred.cpu().detach().numpy()
#print(pred)
try:
train_auc.append(metrics.roc_auc_score(labels, pred[:,1]))
except:
train_auc.append(0.0)
loss.backward()
optimizer.step()
running_loss += loss.item()
#return outputs
return running_loss/len(iterator),train_auc
def evaluateRegular(net, device, iterator, criterion, out_dirc=None, getCNN=False, storeCNNout = False, getSeqs = False, useEmb=False):
#pdb.set_trace()
running_loss = 0.0
valid_auc = []
roc = np.asarray([[],[]]).T
per_batch_labelPreds = {}
per_batch_CNNoutput = {}
per_batch_testSeqs = {}
per_batch_info = {}
net.eval()
CNNlayer = net.layer1[0:3]
CNNlayer.eval()
with torch.no_grad():
for batch_idx, (headers, seqs, data, target) in enumerate(iterator):
if useEmb:
data, target = data.to(device,dtype=torch.long), target.to(device,dtype=torch.long)
else:
data, target = data.to(device,dtype=torch.float), target.to(device,dtype=torch.long)
# Model computations
outputs = net(data)
loss = criterion(outputs, target)
softmax = torch.nn.Softmax(dim=1)
labels=target.cpu().numpy()
pred = softmax(outputs)
pred=pred.cpu().detach().numpy()
label_pred = np.column_stack((labels,pred[:,1]))
per_batch_labelPreds[batch_idx] = label_pred
roc = np.row_stack((roc,label_pred))
try:
valid_auc.append(metrics.roc_auc_score(labels, pred[:,1]))
except:
valid_auc.append(0.0)
running_loss += loss.item()
if getCNN:
try: #if the network has an embedding layer (input must be embedded as well)
data = net.embedding(data)
outputCNN = CNNlayer(data.permute(0,2,1))
except:
outputCNN = CNNlayer(data)
if storeCNNout:
if not os.path.exists(out_dirc):
os.makedirs(out_dirc)
with open(out_dirc+'/CNNout_batch-'+str(batch_idx)+'.pckl','wb') as f:
pickle.dump(outputCNN.cpu().detach().numpy(),f)
per_batch_CNNoutput[batch_idx] = out_dirc+'/CNNout_batch-'+str(batch_idx)+'.pckl'
else:
per_batch_CNNoutput[batch_idx] = outputCNN.cpu().detach().numpy()
if getSeqs:
per_batch_testSeqs[batch_idx] = np.column_stack((headers,seqs))
labels = roc[:,0]
preds = roc[:,1]
valid_auc = metrics.roc_auc_score(labels,preds)
return running_loss/len(iterator),valid_auc,roc,per_batch_labelPreds,per_batch_CNNoutput,per_batch_testSeqs
###########################################################################################################################
#---------------------------------------------------------End-------------------------------------------------------------#
###########################################################################################################################
def get_indices(dataset_size, test_split, output_dir, shuffle_data=True, seed_val=100, mode='train'):
indices = list(range(dataset_size))
split_val = int(np.floor(test_split*dataset_size))
if shuffle_data:
np.random.seed(seed_val)
np.random.shuffle(indices)
train_indices, test_indices, valid_indices = indices[2*split_val:], indices[:split_val], indices[split_val:2*split_val]
#--save indices for later use, when testing for example---#
if mode=='train':
np.savetxt(output_dir+'/valid_indices.txt', valid_indices, fmt='%s')
np.savetxt(output_dir+'/test_indices.txt', test_indices, fmt='%s')
np.savetxt(output_dir+'/train_indices.txt', train_indices, fmt='%s')
else:
try:
valid_indices = np.loadtxt(output_dir+'/valid_indices.txt', dtype=int)
test_indices = np.loadtxt(output_dir+'/test_indices.txt', dtype=int)
train_indices = np.loadtxt(output_dir+'/train_indices.txt', dtype=int)
except:
print("Error! looks like you haven't trained the model yet. Rerun with --mode train.")
return train_indices, test_indices, valid_indices
def load_datasets(arg_space, use_embds, batchSize, kmer_len=None, embd_size=None, embd_window=None):
"""
Loads and processes the data.
"""
input_prefix = arg_space.inputprefix
output_dir = 'results/'+arg_space.directory
if not os.path.exists(output_dir):
os.makedirs(output_dir)
#save arguments to keep record
with open(output_dir+'/arguments.txt','w') as f:
f.writelines(str(arg_space))
test_split = arg_space.splitperc/100
if arg_space.verbose:
print("test/validation split val: %.2f"%test_split)
modelwv = None
if not use_embds:
if arg_space.deskLoad:
final_dataset = DatasetLazyLoad(input_prefix)
else:
final_dataset = DatasetLoadAll(input_prefix)
#train_indices, test_indices, valid_indices = get_indices(len(final_dataset), test_split, output_dir)
else:
w2v_path = arg_space.wvPath+'/' if arg_space.wvPath[-1]!='/' else arg_space.wvPath #'Word2Vec_Models/'
w2v_filename = 'Word2Vec_Model_kmerLen'+str(kmer_len)+'_win'+str(embd_window)+'_embSize'+str(embd_size)
modelwv = Word2Vec.load(w2v_path+w2v_filename)
data_all = DatasetLoadAll(input_prefix, for_embeddings=True)
final_dataset = pd.merge(data_all.df_seq_final, data_all.df, on='header')[['header','sequence',7]]
final_dataset = DatasetEmbd(final_dataset.values.tolist(), modelwv, kmer_len)
#train_indices, test_indices, valid_indices = get_indices(len(final_dataset), test_split, output_dir)
#pdb.set_trace()
train_indices, test_indices, valid_indices = get_indices(len(final_dataset), test_split, output_dir, mode=arg_space.mode)
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
valid_sampler = SubsetRandomSampler(valid_indices)
train_loader = DataLoader(final_dataset, batch_size = batchSize, sampler = train_sampler)
test_loader = DataLoader(final_dataset, batch_size = batchSize, sampler = test_sampler)
valid_loader = DataLoader(final_dataset, batch_size = batchSize, sampler = valid_sampler)
return train_loader, test_loader, valid_loader, modelwv, output_dir
def run_experiment(device, arg_space, params):
"""
Run the main experiment, that is, load the data and train-test the model and generate/store results.
Args:
device: (torch.device) Specifies the device (either gpu or cpu).
arg_space: ArgParser object containing all the user-specified arguments.
params: (dict) Dictionary of hyperparameters.
"""
net_type = arg_space.netType
num_labels = params['num_classes']
get_CNNout = params['get_CNNout']
get_sequences = params['get_seqs']
batch_size = params['batch_size']
max_epochs = params['num_epochs']
use_embds = arg_space.useEmbeddings
kmer_len, embd_size, embd_window = [None]*3
if use_embds:
kmer_len = params['embd_kmersize']
embd_size = params['embd_size']
embd_window = params['embd_window']
prefix = 'modelRes' #Using generic, not sure if we need it as an argument or part of the params dict
train_loader, test_loader, valid_loader, modelwv, output_dir = load_datasets(arg_space, use_embds, batch_size, kmer_len, embd_size, embd_window)
#print(params)
if net_type == 'basset':
if arg_space.verbose:
print("Using Basset-like model.")
net = Basset(params, wvmodel=modelwv, useEmbeddings=use_embds).to(device)
else:
if arg_space.verbose:
print("Using Attention-based model.")
net = AttentionNet(params, wvmodel=modelwv, useEmbeddings=use_embds, device=device).to(device)
criterion = nn.CrossEntropyLoss(reduction='mean')
optimizer = optim.Adam(net.parameters())
##-------Main train/test loop----------##
if arg_space.mode == 'train':
best_valid_loss = np.inf
best_valid_auc = np.inf
for epoch in progress_bar(range(1, max_epochs + 1)):
res_train = trainRegular(net, device, train_loader, optimizer, criterion, useEmb=use_embds)
res_valid = evaluateRegular(net, device, valid_loader, criterion, useEmb=use_embds)
res_train_auc = np.asarray(res_train[1]).mean()
res_train_loss = res_train[0]
res_valid_auc = np.asarray(res_valid[1]).mean()
res_valid_loss = res_valid[0]
if res_valid_loss < best_valid_loss:
best_valid_loss = res_valid_loss
best_valid_auc = res_valid_auc
if arg_space.verbose:
print("Best Validation Loss: %.3f and AUC: %.2f"%(best_valid_loss, best_valid_auc), "\n")
torch.save({'epoch': epoch,
'model_state_dict': net.state_dict(),
'optimizer_state_dict':optimizer.state_dict(),
'loss':res_valid_loss
},output_dir+'/'+prefix+'_model')
try:
checkpoint = torch.load(output_dir+'/'+prefix+'_model')
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
except:
print("No pre-trained model found at %s! Please run with --mode set to train."%output_dir)
return
#print(net)
res_test = evaluateRegular(net, device, test_loader, criterion, output_dir+"/Stored_Values",
getCNN=get_CNNout, storeCNNout=arg_space.storeCNN, getSeqs=get_sequences, useEmb=use_embds)
test_loss = res_test[0]
labels = res_test[2][:,0]
preds = res_test[2][:,1]
auc_test = metrics.roc_auc_score(labels, preds)
if arg_space.verbose:
print("Test Loss: %.3f and AUC: %.2f"%(test_loss, auc_test), "\n")
auprc_test = metrics.average_precision_score(labels,preds)
some_res = [['Test_Loss','Test_AUC', 'Test_AUPRC']]
some_res.append([test_loss,auc_test,auprc_test])
#---Calculate roc and prc values---#
fpr,tpr,thresholds = metrics.roc_curve(labels,preds)
precision,recall,thresholdsPR = metrics.precision_recall_curve(labels,preds)
roc_dict = {'fpr':fpr, 'tpr':tpr, 'thresholds':thresholds}
prc_dict = {'precision':precision, 'recall':recall, 'thresholds':thresholdsPR}
#---Store results----#
with open(output_dir+'/'+prefix+'_roc.pckl','wb') as f:
pickle.dump(roc_dict,f)
with open(output_dir+'/'+prefix+'_prc.pckl','wb') as f:
pickle.dump(prc_dict,f)
np.savetxt(output_dir+'/'+prefix+'_results.txt',some_res,fmt='%s',delimiter='\t')
CNNWeights = net.layer1[0].weight.cpu().detach().numpy()
return res_test, CNNWeights
def motif_analysis(res_test, CNNWeights, argSpace, for_negative=False):
"""
Infer regulatory motifs by analyzing the first CNN layer filters.
Args:
res_test: (list) Returned by the experiment function after testing the model.
CNNWeights: (numpy.ndarray) Weights of the first CNN layer.
argSpace: The ArgParser object containing values of all the user-specificed arguments.
for_negative: (bool) Determines if the motif analysis is for the positive or negative set.
"""
output_dir = 'results/'+argSpace.directory
if not os.path.exists(output_dir):
print("Error! output directory doesn't exist.")
return
NumExamples = 0
pos_score_cutoff = argSpace.scoreCutoff
k = 0 #batch number
per_batch_labelPreds = res_test[3][k]
#per_batch_Embdoutput = res_test[5][k]
CNNoutput = res_test[4][k]
if argSpace.storeCNN:
with open(CNNoutput,'rb') as f:
CNNoutput = pickle.load(f)
Seqs = np.asarray(res_test[-1][k])
if for_negative:
tp_indices = [i for i in range(0,per_batch_labelPreds.shape[0]) if (per_batch_labelPreds[i][0]==0 and per_batch_labelPreds[i][1]<(1-pos_score_cutoff))]
else:
tp_indices = [i for i in range(0,per_batch_labelPreds.shape[0]) if (per_batch_labelPreds[i][0]==1 and per_batch_labelPreds[i][1]>(1-pos_score_cutoff))]
NumExamples += len(tp_indices)
CNNoutput = CNNoutput[tp_indices]
Seqs = Seqs[tp_indices]
for k in range(1,len(res_test[3])):
if argSpace.verbose:
print("batch number: ",k)
per_batch_labelPreds = res_test[3][k]
per_batch_CNNoutput = res_test[4][k]
if argSpace.storeCNN:
with open(per_batch_CNNoutput,'rb') as f:
per_batch_CNNoutput = pickle.load(f)
per_batch_seqs = np.asarray(res_test[-1][k])
if for_negative:
tp_indices = [i for i in range(0,per_batch_labelPreds.shape[0]) if (per_batch_labelPreds[i][0]==0 and per_batch_labelPreds[i][1]<(1-pos_score_cutoff))]
else:
tp_indices = [i for i in range(0,per_batch_labelPreds.shape[0]) if (per_batch_labelPreds[i][0]==1 and per_batch_labelPreds[i][1]>(1-pos_score_cutoff))]
NumExamples += len(tp_indices)
CNNoutput = np.concatenate((CNNoutput,per_batch_CNNoutput[tp_indices]),axis=0)
Seqs = np.concatenate((Seqs,per_batch_seqs[tp_indices]))
if argSpace.tfDatabase == None:
dbpath = '/s/jawar/h/nobackup/fahad/MEME_SUITE/motif_databases/CIS-BP/Homo_sapiens.meme'
else:
dbpath = argSpace.tfDatabase
if argSpace.tomtomPath == None:
tomtomPath = '/s/jawar/h/nobackup/fahad/MEME_SUITE/meme-5.0.3/src/tomtom'
else:
tomtomPath = argSpace.tomtomPath
if for_negative:
motif_dir = output_dir + '/Motif_Analysis_Negative'
else:
motif_dir = output_dir + '/Motif_Analysis'
get_motif(CNNWeights, CNNoutput, Seqs, dbpath, dir1 = motif_dir, embd=argSpace.useEmbeddings,
data='DNA', tomtom=tomtomPath, tomtompval=argSpace.tomtomPval, tomtomdist=argSpace.tomtomDist)
return motif_dir, NumExamples
| en | 0.319189 | # import Function to create custom activations # import Parameter to create custom activations with learnable parameters # import optimizers for demonstrations #local imports ########################################################################################################################### #--------------------------------------------Train and Evaluate Functions-------------------------------------------------# ########################################################################################################################### #pdb.set_trace() #loss = F.binary_cross_entropy(outputs, target) #print(pred) #return outputs #pdb.set_trace() # Model computations #if the network has an embedding layer (input must be embedded as well) ########################################################################################################################### #---------------------------------------------------------End-------------------------------------------------------------# ########################################################################################################################### #--save indices for later use, when testing for example---# Loads and processes the data. #save arguments to keep record #train_indices, test_indices, valid_indices = get_indices(len(final_dataset), test_split, output_dir) #'Word2Vec_Models/' #train_indices, test_indices, valid_indices = get_indices(len(final_dataset), test_split, output_dir) #pdb.set_trace() Run the main experiment, that is, load the data and train-test the model and generate/store results. Args: device: (torch.device) Specifies the device (either gpu or cpu). arg_space: ArgParser object containing all the user-specified arguments. params: (dict) Dictionary of hyperparameters. #Using generic, not sure if we need it as an argument or part of the params dict #print(params) ##-------Main train/test loop----------## #print(net) #---Calculate roc and prc values---# #---Store results----# Infer regulatory motifs by analyzing the first CNN layer filters. Args: res_test: (list) Returned by the experiment function after testing the model. CNNWeights: (numpy.ndarray) Weights of the first CNN layer. argSpace: The ArgParser object containing values of all the user-specificed arguments. for_negative: (bool) Determines if the motif analysis is for the positive or negative set. #batch number #per_batch_Embdoutput = res_test[5][k] | 1.808526 | 2 |
parser/parser.py | mapr-demos/stock-exchange | 0 | 6622596 | import sys
import os
def create_directory(dir_path):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
print "Directory " + dir_path + " Created "
else:
print "Directory " + dir_path + " already exists "
create_directory(sys.argv[1])
dir_path = sys.argv[1] + "/stock"
create_directory(dir_path)
file_path = sys.argv[2]
with open(file_path) as inputFile:
inputFile.readline()
line = inputFile.readline()
symbol = ''
outputFile = None
while line:
parts = line.split('|')
if parts[0] != 'END':
current_Symbol = parts[2]
if symbol != current_Symbol:
symbol = current_Symbol
current_Symbol = current_Symbol.replace(" ", "_")
print 'Symbol: ' + symbol
if outputFile is not None and not outputFile.closed:
outputFile.close()
create_directory(dir_path + "/" + current_Symbol)
outputFile = open(dir_path + "/" + current_Symbol + "/" + current_Symbol + "_" + file_path[-8:] +
".txt", "w")
outputFile.writelines(line)
line = inputFile.readline()
outputFile.close()
| import sys
import os
def create_directory(dir_path):
if not os.path.exists(dir_path):
os.mkdir(dir_path)
print "Directory " + dir_path + " Created "
else:
print "Directory " + dir_path + " already exists "
create_directory(sys.argv[1])
dir_path = sys.argv[1] + "/stock"
create_directory(dir_path)
file_path = sys.argv[2]
with open(file_path) as inputFile:
inputFile.readline()
line = inputFile.readline()
symbol = ''
outputFile = None
while line:
parts = line.split('|')
if parts[0] != 'END':
current_Symbol = parts[2]
if symbol != current_Symbol:
symbol = current_Symbol
current_Symbol = current_Symbol.replace(" ", "_")
print 'Symbol: ' + symbol
if outputFile is not None and not outputFile.closed:
outputFile.close()
create_directory(dir_path + "/" + current_Symbol)
outputFile = open(dir_path + "/" + current_Symbol + "/" + current_Symbol + "_" + file_path[-8:] +
".txt", "w")
outputFile.writelines(line)
line = inputFile.readline()
outputFile.close()
| none | 1 | 3.440932 | 3 | |
Youtube_videos/random_walk.py | aizardar/WhirlwindTourOfPython | 0 | 6622597 | import random
def random_walk(n):
""" Return coordinates after 'n' block random walk."""
x,y = 0,0
for i in range(n):
(dx, dy) = random.choice([(0,1), (0,-1), (1,0), (-1,0)])
x += dx
y += dy
return (x, y)
for i in range(5):
walk = random_walk(100000)
print(walk, " Distance from origin = ", abs(walk[0]) + abs(walk[1]))
| import random
def random_walk(n):
""" Return coordinates after 'n' block random walk."""
x,y = 0,0
for i in range(n):
(dx, dy) = random.choice([(0,1), (0,-1), (1,0), (-1,0)])
x += dx
y += dy
return (x, y)
for i in range(5):
walk = random_walk(100000)
print(walk, " Distance from origin = ", abs(walk[0]) + abs(walk[1]))
| en | 0.582464 | Return coordinates after 'n' block random walk. | 4.037637 | 4 |
selfservice-api/src/selfservice_api/resources/__init__.py | bcgov/BCSC-BPS | 2 | 6622598 | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exposes all of the resource endpoints mounted in Flask-Blueprint style.
Uses restplus namespaces to mount individual api endpoints into the service.
All services have 2 defaults sets of endpoints:
- ops
- meta
That are used to expose operational health information about the service, and meta information.
"""
from flask_jwt_oidc import AuthError
from flask_restplus import Api
from ..exceptions import BusinessException
from .contact_us import API as CONTACTUS_API
from .meta import API as META_API
from .oidc_config import API as OIDC_CONFIG_API
from .ops import API as OPS_API
from .project import API as PROJECT_API
from .project_audit import API as PROJECTAUDIT_API
from .scope_package import API as SCOPEPACKAGE_API
from .team import API as TEAM_API
from .technical_req import API as TECHNICALREQ_API
from .test_account import API as TESTACCOUNT_API
from .user import API as USER_API
from .values import API as VALUES_API
# This will add the Authorize button to the swagger docs
# oauth2 & openid may not yet be supported by restplus
AUTHORIZATIONS = {
'apikey': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}
}
API = Api(
title='BC Services Card Self Service API',
version='1.0',
description='The API for the BC Services Card Application',
prefix='/api/v1',
security=['apikey'],
authorizations=AUTHORIZATIONS)
@API.errorhandler(BusinessException)
def handle_business_exception(error: BusinessException):
"""Handle Business exception."""
return {'message': error.error}, error.status_code, {'Access-Control-Allow-Origin': '*'}
@API.errorhandler(AuthError)
def handle_auth_error(error: AuthError):
"""Handle Business exception."""
return {'message': 'Access Denied'}, error.status_code, {'Access-Control-Allow-Origin': '*'}
API.add_namespace(OPS_API, path='/ops')
API.add_namespace(META_API, path='/meta')
API.add_namespace(CONTACTUS_API, path='/contactus')
API.add_namespace(USER_API, path='/user')
API.add_namespace(PROJECT_API, path='/project/info')
API.add_namespace(TEAM_API, path='/project/<int:project_id>/team')
API.add_namespace(TECHNICALREQ_API, path='/project/<int:project_id>/technical-req')
API.add_namespace(OIDC_CONFIG_API, path='/project/<int:project_id>/oidc-config')
API.add_namespace(PROJECTAUDIT_API, path='/project/<int:project_id>/audit')
API.add_namespace(VALUES_API, path='/values')
API.add_namespace(SCOPEPACKAGE_API, path='/scope-package')
API.add_namespace(TESTACCOUNT_API, path='/test-account')
| # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exposes all of the resource endpoints mounted in Flask-Blueprint style.
Uses restplus namespaces to mount individual api endpoints into the service.
All services have 2 defaults sets of endpoints:
- ops
- meta
That are used to expose operational health information about the service, and meta information.
"""
from flask_jwt_oidc import AuthError
from flask_restplus import Api
from ..exceptions import BusinessException
from .contact_us import API as CONTACTUS_API
from .meta import API as META_API
from .oidc_config import API as OIDC_CONFIG_API
from .ops import API as OPS_API
from .project import API as PROJECT_API
from .project_audit import API as PROJECTAUDIT_API
from .scope_package import API as SCOPEPACKAGE_API
from .team import API as TEAM_API
from .technical_req import API as TECHNICALREQ_API
from .test_account import API as TESTACCOUNT_API
from .user import API as USER_API
from .values import API as VALUES_API
# This will add the Authorize button to the swagger docs
# oauth2 & openid may not yet be supported by restplus
AUTHORIZATIONS = {
'apikey': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}
}
API = Api(
title='BC Services Card Self Service API',
version='1.0',
description='The API for the BC Services Card Application',
prefix='/api/v1',
security=['apikey'],
authorizations=AUTHORIZATIONS)
@API.errorhandler(BusinessException)
def handle_business_exception(error: BusinessException):
"""Handle Business exception."""
return {'message': error.error}, error.status_code, {'Access-Control-Allow-Origin': '*'}
@API.errorhandler(AuthError)
def handle_auth_error(error: AuthError):
"""Handle Business exception."""
return {'message': 'Access Denied'}, error.status_code, {'Access-Control-Allow-Origin': '*'}
API.add_namespace(OPS_API, path='/ops')
API.add_namespace(META_API, path='/meta')
API.add_namespace(CONTACTUS_API, path='/contactus')
API.add_namespace(USER_API, path='/user')
API.add_namespace(PROJECT_API, path='/project/info')
API.add_namespace(TEAM_API, path='/project/<int:project_id>/team')
API.add_namespace(TECHNICALREQ_API, path='/project/<int:project_id>/technical-req')
API.add_namespace(OIDC_CONFIG_API, path='/project/<int:project_id>/oidc-config')
API.add_namespace(PROJECTAUDIT_API, path='/project/<int:project_id>/audit')
API.add_namespace(VALUES_API, path='/values')
API.add_namespace(SCOPEPACKAGE_API, path='/scope-package')
API.add_namespace(TESTACCOUNT_API, path='/test-account')
| en | 0.829993 | # Copyright © 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Exposes all of the resource endpoints mounted in Flask-Blueprint style. Uses restplus namespaces to mount individual api endpoints into the service. All services have 2 defaults sets of endpoints: - ops - meta That are used to expose operational health information about the service, and meta information. # This will add the Authorize button to the swagger docs # oauth2 & openid may not yet be supported by restplus Handle Business exception. Handle Business exception. | 1.76785 | 2 |
fabfile.py | giraldeau/giraldeau.github.io | 0 | 6622599 | <filename>fabfile.py
from fabric.api import local, run, env, lcd, cd, sudo
from fabric.operations import put
from fabric.context_managers import shell_env, prefix
from fabric.decorators import with_settings
env.hosts = ['step.polymtl.ca']
env.user = 'fgiraldeau'
homedir = '/home/etudiant/fgiraldeau/'
basedir = homedir + 'nova_html'
def deploy():
with cd(basedir):
run("git pull origin master")
| <filename>fabfile.py
from fabric.api import local, run, env, lcd, cd, sudo
from fabric.operations import put
from fabric.context_managers import shell_env, prefix
from fabric.decorators import with_settings
env.hosts = ['step.polymtl.ca']
env.user = 'fgiraldeau'
homedir = '/home/etudiant/fgiraldeau/'
basedir = homedir + 'nova_html'
def deploy():
with cd(basedir):
run("git pull origin master")
| none | 1 | 1.556825 | 2 | |
main.py | RainbowAsteroids/ColorBot | 0 | 6622600 | <filename>main.py
#https://discordapp.com/api/oauth2/authorize?client_id=592772548949835807&permissions=268503072&scope=bot
import discord, asyncio, json, random, os
os.chdir(os.path.dirname(os.path.realpath(__file__)))
client = discord.Client()
token = ""
ready = False #When the bot isn't ready, it should be False.
guilds = None
invite_url = "https://discordapp.com/api/oauth2/authorize?client_id=592772548949835807&permissions=268503072&scope=bot"
helptext = """```
!color is the prefix used
help - this text
leave - leaves this guild and cleans up
invite - bot invite url
```"""
async def permission(message, permission='manage_messages'):
if eval(f"message.channel.permissions_for(message.author).{permission}"):
return True
else:
await message.channel.send(f'<@{message.author.id}> Hmm... seems like you don\'t have the "{permission}" permission, so I can\'t let you do that.')
return False
async def leave(guild):
await guild.get_role(guilds[guild.id]["service_role"]).delete()
await guild.leave()
async def get_guilds():
#Grab the current guilds from the API and the saved guilds
#Get rid of guilds the bot isn't in and leave the guilds we are in
current_guilds = client.guilds
current_guild_ids = [guild.id for guild in current_guilds]
unmapped_saved_guilds = json.loads(open('guilds.json').read())
saved_guilds = {}
for id in unmapped_saved_guilds: #Make the ids ints
saved_guilds[int(id)] = unmapped_saved_guilds[id]
saved_guilds_immutable = dict(saved_guilds) #To avoid runtime error, reference this dict only
new_guilds = []
for guild in current_guilds:
if not guild.id in saved_guilds.keys():
new_guilds.append(guild)
for id in saved_guilds_immutable.keys():
if not id in current_guild_ids:
del saved_guilds[id]
for guild in new_guilds:
await guild.owner.send("In order to make sure the bot inviting process goes smoothly, it's important to re-invite me.\nhttps://discordapp.com/api/oauth2/authorize?client_id=592772548949835807&permissions=268503072&scope=bot")
await guild.leave()
return saved_guilds
async def update():
#Update the JSON file
open('guilds.json','w').write(json.dumps(guilds))
def random_color():
return discord.Color.from_rgb(
random.randrange(256),
random.randrange(256),
random.randrange(256))
@client.event
async def on_ready():
global ready
global guilds
ready = True
print(f"Logged in as {client.user.name}")
guilds = await get_guilds()
await update()
await client.change_presence(activity=discord.Game(name="!color help"))
while True:
if guilds == None: continue #Skip while that async function is running
await asyncio.sleep(5)
if ready:
for id in guilds:
guild = client.get_guild(id) # Grab the real guild
await guild.get_role(guilds[id]["service_role"]).edit(color=random_color()) #Set random color
@client.event
async def on_disconnect():
global ready
ready = False
@client.event
async def on_message(message):
#Implement some command stuff here
if message.content.lower().startswith('!color'):
command = message.content.lower().split(' ')[1:]
if len(command) == 0 or command[0] == 'help':
await message.channel.send(helptext)
elif command[0] == 'leave':
if await permission(message, 'manage_guild'):
await message.channel.send("Goodbye!")
await leave(message.guild)
elif command[0] == 'close':
if message.author.id == 2<PASSWORD>:
await client.close()
elif command[0] == 'invite':
await message.channel.send(f"<@{message.author.id}> {invite_url}")
else:
await message.channel.send(helptext)
@client.event
async def on_guild_join(guild):
#Tell the owner how to setup bot
#Log the important roles to not_set_up
await asyncio.sleep(1)
info = '''Hello there <@{}>! {} is a bot that changes the user color of certain users after 5 seconds. It does this by editing the color of a service role to a random color. If you're wondering how roles work, read Discord's Role Management 101 (https://support.discordapp.com/hc/en-us/articles/214836687-Role-Management-101). I've made a role by the name of "Color Changing Role". Feel free to change the name of the role and/or any other properties. Just make sure to keep my highest role above the service role.'''
main_role = guild.me.top_role
service_role = await guild.create_role(
reason=f"{client.user.name}'s service role",
name="Color Changing Role")
guilds[guild.id] = {
"main_role":main_role.id,
"service_role":service_role.id,
}
for member in guild.members:
if member.guild_permissions.manage_roles:
await member.send(info.format(member.id, client.user.name))
await update()
@client.event
async def on_guild_remove(guild):
try:
del guilds[guild.id]
except TypeError:
pass
await update()
"""
@client.event
async def on_guild_role_update(_, __):
#Make sure the guild is in not_set_up
#Check if guild is set up
#If set up, remove from not_set_up
#If went from set up to not set up, then alert the owner
await asyncio.sleep(1)
guild = _.guild
if guilds[guild.id]["setup"]: return
main_role = (guilds[guild.id]["main_role"])
service_role = (guilds[guild.id]["service_role"])
if main_role == guild.roles[-1].id and service_role == guild.roles[-2].id:
guilds[guild.id]["setup"] = True
await guild.owner.send("Setup Complete! You can now move the roles as you wish. Just make sure my highest role is above the service role.")
await update()
"""
client.run(token) | <filename>main.py
#https://discordapp.com/api/oauth2/authorize?client_id=592772548949835807&permissions=268503072&scope=bot
import discord, asyncio, json, random, os
os.chdir(os.path.dirname(os.path.realpath(__file__)))
client = discord.Client()
token = ""
ready = False #When the bot isn't ready, it should be False.
guilds = None
invite_url = "https://discordapp.com/api/oauth2/authorize?client_id=592772548949835807&permissions=268503072&scope=bot"
helptext = """```
!color is the prefix used
help - this text
leave - leaves this guild and cleans up
invite - bot invite url
```"""
async def permission(message, permission='manage_messages'):
if eval(f"message.channel.permissions_for(message.author).{permission}"):
return True
else:
await message.channel.send(f'<@{message.author.id}> Hmm... seems like you don\'t have the "{permission}" permission, so I can\'t let you do that.')
return False
async def leave(guild):
await guild.get_role(guilds[guild.id]["service_role"]).delete()
await guild.leave()
async def get_guilds():
#Grab the current guilds from the API and the saved guilds
#Get rid of guilds the bot isn't in and leave the guilds we are in
current_guilds = client.guilds
current_guild_ids = [guild.id for guild in current_guilds]
unmapped_saved_guilds = json.loads(open('guilds.json').read())
saved_guilds = {}
for id in unmapped_saved_guilds: #Make the ids ints
saved_guilds[int(id)] = unmapped_saved_guilds[id]
saved_guilds_immutable = dict(saved_guilds) #To avoid runtime error, reference this dict only
new_guilds = []
for guild in current_guilds:
if not guild.id in saved_guilds.keys():
new_guilds.append(guild)
for id in saved_guilds_immutable.keys():
if not id in current_guild_ids:
del saved_guilds[id]
for guild in new_guilds:
await guild.owner.send("In order to make sure the bot inviting process goes smoothly, it's important to re-invite me.\nhttps://discordapp.com/api/oauth2/authorize?client_id=592772548949835807&permissions=268503072&scope=bot")
await guild.leave()
return saved_guilds
async def update():
#Update the JSON file
open('guilds.json','w').write(json.dumps(guilds))
def random_color():
return discord.Color.from_rgb(
random.randrange(256),
random.randrange(256),
random.randrange(256))
@client.event
async def on_ready():
global ready
global guilds
ready = True
print(f"Logged in as {client.user.name}")
guilds = await get_guilds()
await update()
await client.change_presence(activity=discord.Game(name="!color help"))
while True:
if guilds == None: continue #Skip while that async function is running
await asyncio.sleep(5)
if ready:
for id in guilds:
guild = client.get_guild(id) # Grab the real guild
await guild.get_role(guilds[id]["service_role"]).edit(color=random_color()) #Set random color
@client.event
async def on_disconnect():
global ready
ready = False
@client.event
async def on_message(message):
#Implement some command stuff here
if message.content.lower().startswith('!color'):
command = message.content.lower().split(' ')[1:]
if len(command) == 0 or command[0] == 'help':
await message.channel.send(helptext)
elif command[0] == 'leave':
if await permission(message, 'manage_guild'):
await message.channel.send("Goodbye!")
await leave(message.guild)
elif command[0] == 'close':
if message.author.id == 2<PASSWORD>:
await client.close()
elif command[0] == 'invite':
await message.channel.send(f"<@{message.author.id}> {invite_url}")
else:
await message.channel.send(helptext)
@client.event
async def on_guild_join(guild):
#Tell the owner how to setup bot
#Log the important roles to not_set_up
await asyncio.sleep(1)
info = '''Hello there <@{}>! {} is a bot that changes the user color of certain users after 5 seconds. It does this by editing the color of a service role to a random color. If you're wondering how roles work, read Discord's Role Management 101 (https://support.discordapp.com/hc/en-us/articles/214836687-Role-Management-101). I've made a role by the name of "Color Changing Role". Feel free to change the name of the role and/or any other properties. Just make sure to keep my highest role above the service role.'''
main_role = guild.me.top_role
service_role = await guild.create_role(
reason=f"{client.user.name}'s service role",
name="Color Changing Role")
guilds[guild.id] = {
"main_role":main_role.id,
"service_role":service_role.id,
}
for member in guild.members:
if member.guild_permissions.manage_roles:
await member.send(info.format(member.id, client.user.name))
await update()
@client.event
async def on_guild_remove(guild):
try:
del guilds[guild.id]
except TypeError:
pass
await update()
"""
@client.event
async def on_guild_role_update(_, __):
#Make sure the guild is in not_set_up
#Check if guild is set up
#If set up, remove from not_set_up
#If went from set up to not set up, then alert the owner
await asyncio.sleep(1)
guild = _.guild
if guilds[guild.id]["setup"]: return
main_role = (guilds[guild.id]["main_role"])
service_role = (guilds[guild.id]["service_role"])
if main_role == guild.roles[-1].id and service_role == guild.roles[-2].id:
guilds[guild.id]["setup"] = True
await guild.owner.send("Setup Complete! You can now move the roles as you wish. Just make sure my highest role is above the service role.")
await update()
"""
client.run(token) | en | 0.808826 | #https://discordapp.com/api/oauth2/authorize?client_id=592772548949835807&permissions=268503072&scope=bot #When the bot isn't ready, it should be False. ``` !color is the prefix used help - this text leave - leaves this guild and cleans up invite - bot invite url ``` #Grab the current guilds from the API and the saved guilds #Get rid of guilds the bot isn't in and leave the guilds we are in #Make the ids ints #To avoid runtime error, reference this dict only #Update the JSON file #Skip while that async function is running # Grab the real guild #Set random color #Implement some command stuff here #Tell the owner how to setup bot #Log the important roles to not_set_up Hello there <@{}>! {} is a bot that changes the user color of certain users after 5 seconds. It does this by editing the color of a service role to a random color. If you're wondering how roles work, read Discord's Role Management 101 (https://support.discordapp.com/hc/en-us/articles/214836687-Role-Management-101). I've made a role by the name of "Color Changing Role". Feel free to change the name of the role and/or any other properties. Just make sure to keep my highest role above the service role. @client.event async def on_guild_role_update(_, __): #Make sure the guild is in not_set_up #Check if guild is set up #If set up, remove from not_set_up #If went from set up to not set up, then alert the owner await asyncio.sleep(1) guild = _.guild if guilds[guild.id]["setup"]: return main_role = (guilds[guild.id]["main_role"]) service_role = (guilds[guild.id]["service_role"]) if main_role == guild.roles[-1].id and service_role == guild.roles[-2].id: guilds[guild.id]["setup"] = True await guild.owner.send("Setup Complete! You can now move the roles as you wish. Just make sure my highest role is above the service role.") await update() | 2.692407 | 3 |
src/database/exceptions.py | pockeleewout/DataCleaner-public | 1 | 6622601 |
class DataError(Exception):
"""Base error used by the Data class"""
pass
class VersionError(DataError):
"""Base error used by the DataVersion class"""
pass
class TableError(DataError):
"""Base error used by the DataTable class"""
pass
class JoinError(TableError):
"""Error returned when joining of tables fails"""
pass
|
class DataError(Exception):
"""Base error used by the Data class"""
pass
class VersionError(DataError):
"""Base error used by the DataVersion class"""
pass
class TableError(DataError):
"""Base error used by the DataTable class"""
pass
class JoinError(TableError):
"""Error returned when joining of tables fails"""
pass
| en | 0.821182 | Base error used by the Data class Base error used by the DataVersion class Base error used by the DataTable class Error returned when joining of tables fails | 2.349049 | 2 |
tests/bindings/python/test_validator.py | 0u812/libcellml | 0 | 6622602 | #
# Tests the Validator class bindings
#
import unittest
class ValidatorTestCase(unittest.TestCase):
def test_create_destroy(self):
from libcellml import Validator
x = Validator()
del(x)
y = Validator()
z = Validator(y)
del(y, z)
def test_inheritance(self):
import libcellml
from libcellml import Validator
# Test inheritance
x = Validator()
self.assertIsInstance(x, libcellml.Logger)
# Test access to inherited methods
self.assertIsNone(x.error(0))
self.assertIsNone(x.error(-1))
self.assertEqual(x.errorCount(), 0)
x.addError(libcellml.Error())
self.assertEqual(x.errorCount(), 1)
def test_validate_model(self):
import libcellml
from libcellml import Validator
# void validateModel(const ModelPtr &model)
v = Validator()
v.validateModel(libcellml.Model())
if __name__ == '__main__':
unittest.main()
| #
# Tests the Validator class bindings
#
import unittest
class ValidatorTestCase(unittest.TestCase):
def test_create_destroy(self):
from libcellml import Validator
x = Validator()
del(x)
y = Validator()
z = Validator(y)
del(y, z)
def test_inheritance(self):
import libcellml
from libcellml import Validator
# Test inheritance
x = Validator()
self.assertIsInstance(x, libcellml.Logger)
# Test access to inherited methods
self.assertIsNone(x.error(0))
self.assertIsNone(x.error(-1))
self.assertEqual(x.errorCount(), 0)
x.addError(libcellml.Error())
self.assertEqual(x.errorCount(), 1)
def test_validate_model(self):
import libcellml
from libcellml import Validator
# void validateModel(const ModelPtr &model)
v = Validator()
v.validateModel(libcellml.Model())
if __name__ == '__main__':
unittest.main()
| en | 0.52646 | # # Tests the Validator class bindings # # Test inheritance # Test access to inherited methods # void validateModel(const ModelPtr &model) | 2.90983 | 3 |
catalog/bindings/csw/time_reference_system.py | NIVANorge/s-enda-playground | 0 | 6622603 | from dataclasses import dataclass
from bindings.csw.abstract_time_reference_system_type import (
AbstractTimeReferenceSystemType,
)
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class TimeReferenceSystem(AbstractTimeReferenceSystemType):
"""
Abstract element serves primarily as the head of a substitution group for
temporal reference systems.
"""
class Meta:
name = "_TimeReferenceSystem"
namespace = "http://www.opengis.net/gml"
| from dataclasses import dataclass
from bindings.csw.abstract_time_reference_system_type import (
AbstractTimeReferenceSystemType,
)
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class TimeReferenceSystem(AbstractTimeReferenceSystemType):
"""
Abstract element serves primarily as the head of a substitution group for
temporal reference systems.
"""
class Meta:
name = "_TimeReferenceSystem"
namespace = "http://www.opengis.net/gml"
| en | 0.882262 | Abstract element serves primarily as the head of a substitution group for temporal reference systems. | 2.42829 | 2 |
Ekeopara_Praise/Phase 1/Python Basic 2/Day21 Tasks/Task1.py | CodedLadiesInnovateTech/-python-challenge-solutions | 6 | 6622604 | '''1. Write a Python program to compute and print sum of two given integers (more than or equal to zero).
If given integers or the sum have more than 80 digits, print "overflow".
Input first integer:
25
Input second integer:
22
Sum of the two integers: 47'''
def sum_integers(x, y):
if x >= 10 ** 80 or y >= 10 ** 80 or x + y >= 10 ** 80:
response = "Overflow!"
else:
response = x + y
return response
print(sum_integers(25, 22))
print(sum_integers(25**80, 22)) | '''1. Write a Python program to compute and print sum of two given integers (more than or equal to zero).
If given integers or the sum have more than 80 digits, print "overflow".
Input first integer:
25
Input second integer:
22
Sum of the two integers: 47'''
def sum_integers(x, y):
if x >= 10 ** 80 or y >= 10 ** 80 or x + y >= 10 ** 80:
response = "Overflow!"
else:
response = x + y
return response
print(sum_integers(25, 22))
print(sum_integers(25**80, 22)) | en | 0.684714 | 1. Write a Python program to compute and print sum of two given integers (more than or equal to zero). If given integers or the sum have more than 80 digits, print "overflow". Input first integer: 25 Input second integer: 22 Sum of the two integers: 47 | 4.236693 | 4 |
config.py | Jiuxe/mancala-league | 0 | 6622605 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
LOG_FILE = os.path.join(basedir, 'log/app_logger.log')
CSRF_ENABLED = True
SECRET_KEY = '<KEY>'
DEBUG = True | import os
basedir = os.path.abspath(os.path.dirname(__file__))
LOG_FILE = os.path.join(basedir, 'log/app_logger.log')
CSRF_ENABLED = True
SECRET_KEY = '<KEY>'
DEBUG = True | none | 1 | 1.562431 | 2 | |
dsul/confidence_intervals/base.py | EdgarTeixeira/eul | 0 | 6622606 | <filename>dsul/confidence_intervals/base.py
from enum import Enum
from dataclasses import dataclass
class BootstrapCI(Enum):
percentile = 0
normal = 1
bca = 2
class IntervalType(Enum):
symmetric = 0
left = 1
right = 2
@dataclass
class ConfidenceInterval:
lower: float
upper: float
alpha: float
def contains(self, value: float) -> bool:
return self.lower <= value <= self.upper
def _get_alphas(alpha, interval_type: IntervalType):
if interval_type == IntervalType.symmetric:
return alpha / 2, 1 - alpha / 2
elif interval_type == IntervalType.left:
return alpha, 1.0
elif interval_type == IntervalType.right:
return 0.0, 1 - alpha
raise NotImplementedError("Invalid IntervalType")
| <filename>dsul/confidence_intervals/base.py
from enum import Enum
from dataclasses import dataclass
class BootstrapCI(Enum):
percentile = 0
normal = 1
bca = 2
class IntervalType(Enum):
symmetric = 0
left = 1
right = 2
@dataclass
class ConfidenceInterval:
lower: float
upper: float
alpha: float
def contains(self, value: float) -> bool:
return self.lower <= value <= self.upper
def _get_alphas(alpha, interval_type: IntervalType):
if interval_type == IntervalType.symmetric:
return alpha / 2, 1 - alpha / 2
elif interval_type == IntervalType.left:
return alpha, 1.0
elif interval_type == IntervalType.right:
return 0.0, 1 - alpha
raise NotImplementedError("Invalid IntervalType")
| none | 1 | 3.275355 | 3 | |
nally/core/layers/transport/udp/udp_packet.py | FreibergVlad/port-scanner | 0 | 6622607 | import struct
from nally.core.layers.packet import Packet
from nally.core.layers.transport.transport_layer_utils \
import TransportLayerUtils
from nally.core.utils.utils import Utils
class UdpPacket(Packet):
"""
Represents UDP (User Datagram Protocol) datagram
"""
UDP_HEADER_FORMAT = "!HHHH"
"""
Defines UDP header format:
* Source port field : 2 bytes
* Destination port field : 2 bytes
* Length field : 2 bytes
* Checksum field : 2 bytes
"""
UDP_HEADER_LENGTH_BYTES = 8
def __init__(
self,
dest_port: int,
source_port: int = 0,
):
"""
Initializes UDP packet instance
:param dest_port: Destination port field value,
integer in range [0; 65535]
:param source_port: Source port field value,
integer in range [0; 65535]. 0 by default
"""
super().__init__()
self.__dest_port = TransportLayerUtils.validate_port_num(dest_port)
self.__source_port = TransportLayerUtils.validate_port_num(source_port)
def to_bytes(self):
payload = self.raw_payload
length = self.UDP_HEADER_LENGTH_BYTES + len(payload)
header_fields = [self.__source_port, self.__dest_port, length, 0]
# allocate 20 bytes buffer to put header in
header_buffer = bytearray(self.UDP_HEADER_LENGTH_BYTES)
# pack header without checksum to the buffer
struct.pack_into(
self.UDP_HEADER_FORMAT,
header_buffer,
0,
*header_fields
)
# generate pseudo header using underlying IP packet
pseudo_header = TransportLayerUtils.get_pseudo_header(self, length)
# calculate checksum
checksum_bytes = Utils.calc_checksum(
pseudo_header + header_buffer + payload
)
# checksum takes 6-th and 7-th bytes of the header (counting from 0)
# see https://tools.ietf.org/html/rfc768 for more details
header_buffer[6] = checksum_bytes[0]
header_buffer[7] = checksum_bytes[1]
return TransportLayerUtils.validate_packet_length(
bytes(header_buffer) + payload
)
@staticmethod
def from_bytes(packet_bytes: bytes):
header_bytes = packet_bytes[:UdpPacket.UDP_HEADER_LENGTH_BYTES]
payload = packet_bytes[UdpPacket.UDP_HEADER_LENGTH_BYTES:]
header_fields = struct.unpack(
UdpPacket.UDP_HEADER_FORMAT,
header_bytes
)
source_port = header_fields[0]
dest_port = header_fields[1]
udp_header = UdpPacket(dest_port=dest_port, source_port=source_port)
return udp_header / payload if len(payload) > 0 else udp_header
def is_response(self, packet) -> bool:
if UdpPacket not in packet:
return False
udp_layer = packet[UdpPacket]
# check that destination and source ports are correct
if self.dest_port != udp_layer.source_port \
or self.source_port != udp_layer.dest_port:
return False
# here we know that 'self' is a valid response on UDP layer,
# now delegate further processing to the upper layer if one exists
return (
self.upper_layer.is_response(packet)
if self.upper_layer is not None
else True
)
@property
def dest_port(self) -> int:
return self.__dest_port
@property
def source_port(self) -> int:
return self.__source_port
@property
def length(self) -> int:
return self.UDP_HEADER_LENGTH_BYTES + len(self.raw_payload)
def __eq__(self, other: object) -> bool:
if isinstance(other, UdpPacket):
return self.dest_port == other.dest_port and \
self.source_port == other.source_port and \
self.upper_layer == other.upper_layer
def __str__(self) -> str:
return f"UDP(dst_port={self.dest_port}, " \
f"src_port={self.source_port}, " \
f"length={self.length})"
| import struct
from nally.core.layers.packet import Packet
from nally.core.layers.transport.transport_layer_utils \
import TransportLayerUtils
from nally.core.utils.utils import Utils
class UdpPacket(Packet):
"""
Represents UDP (User Datagram Protocol) datagram
"""
UDP_HEADER_FORMAT = "!HHHH"
"""
Defines UDP header format:
* Source port field : 2 bytes
* Destination port field : 2 bytes
* Length field : 2 bytes
* Checksum field : 2 bytes
"""
UDP_HEADER_LENGTH_BYTES = 8
def __init__(
self,
dest_port: int,
source_port: int = 0,
):
"""
Initializes UDP packet instance
:param dest_port: Destination port field value,
integer in range [0; 65535]
:param source_port: Source port field value,
integer in range [0; 65535]. 0 by default
"""
super().__init__()
self.__dest_port = TransportLayerUtils.validate_port_num(dest_port)
self.__source_port = TransportLayerUtils.validate_port_num(source_port)
def to_bytes(self):
payload = self.raw_payload
length = self.UDP_HEADER_LENGTH_BYTES + len(payload)
header_fields = [self.__source_port, self.__dest_port, length, 0]
# allocate 20 bytes buffer to put header in
header_buffer = bytearray(self.UDP_HEADER_LENGTH_BYTES)
# pack header without checksum to the buffer
struct.pack_into(
self.UDP_HEADER_FORMAT,
header_buffer,
0,
*header_fields
)
# generate pseudo header using underlying IP packet
pseudo_header = TransportLayerUtils.get_pseudo_header(self, length)
# calculate checksum
checksum_bytes = Utils.calc_checksum(
pseudo_header + header_buffer + payload
)
# checksum takes 6-th and 7-th bytes of the header (counting from 0)
# see https://tools.ietf.org/html/rfc768 for more details
header_buffer[6] = checksum_bytes[0]
header_buffer[7] = checksum_bytes[1]
return TransportLayerUtils.validate_packet_length(
bytes(header_buffer) + payload
)
@staticmethod
def from_bytes(packet_bytes: bytes):
header_bytes = packet_bytes[:UdpPacket.UDP_HEADER_LENGTH_BYTES]
payload = packet_bytes[UdpPacket.UDP_HEADER_LENGTH_BYTES:]
header_fields = struct.unpack(
UdpPacket.UDP_HEADER_FORMAT,
header_bytes
)
source_port = header_fields[0]
dest_port = header_fields[1]
udp_header = UdpPacket(dest_port=dest_port, source_port=source_port)
return udp_header / payload if len(payload) > 0 else udp_header
def is_response(self, packet) -> bool:
if UdpPacket not in packet:
return False
udp_layer = packet[UdpPacket]
# check that destination and source ports are correct
if self.dest_port != udp_layer.source_port \
or self.source_port != udp_layer.dest_port:
return False
# here we know that 'self' is a valid response on UDP layer,
# now delegate further processing to the upper layer if one exists
return (
self.upper_layer.is_response(packet)
if self.upper_layer is not None
else True
)
@property
def dest_port(self) -> int:
return self.__dest_port
@property
def source_port(self) -> int:
return self.__source_port
@property
def length(self) -> int:
return self.UDP_HEADER_LENGTH_BYTES + len(self.raw_payload)
def __eq__(self, other: object) -> bool:
if isinstance(other, UdpPacket):
return self.dest_port == other.dest_port and \
self.source_port == other.source_port and \
self.upper_layer == other.upper_layer
def __str__(self) -> str:
return f"UDP(dst_port={self.dest_port}, " \
f"src_port={self.source_port}, " \
f"length={self.length})"
| en | 0.664059 | Represents UDP (User Datagram Protocol) datagram Defines UDP header format: * Source port field : 2 bytes * Destination port field : 2 bytes * Length field : 2 bytes * Checksum field : 2 bytes Initializes UDP packet instance :param dest_port: Destination port field value, integer in range [0; 65535] :param source_port: Source port field value, integer in range [0; 65535]. 0 by default # allocate 20 bytes buffer to put header in # pack header without checksum to the buffer # generate pseudo header using underlying IP packet # calculate checksum # checksum takes 6-th and 7-th bytes of the header (counting from 0) # see https://tools.ietf.org/html/rfc768 for more details # check that destination and source ports are correct # here we know that 'self' is a valid response on UDP layer, # now delegate further processing to the upper layer if one exists | 2.666261 | 3 |
scraper.py | ElijahMwambazi/beforwardScraper | 0 | 6622608 | import os
import re
import time
import logging
from datetime import datetime
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException, NoSuchElementException
print(
"======================================= START ======================================="
)
print("Ensure all instances of 'Data.csv' are closed to prevent data lose.")
# Initialize logger.
logging.basicConfig(
filename="log.txt", level=logging.ERROR, format="%(asctime)s - %(message)s"
)
# logging.disable(logging.CRITICAL) # <====== REMEMBER TO COMMENT THIS.
# Get location of workign directory.
working_dir = os.getcwd()
# Create empty list variables that will contain scraped data.
parsed_data = []
car_type = []
car_make = []
price = []
total_price = []
mileage = []
year = []
engine = []
transmition = []
location = []
model_code = []
engine_code = []
steering = []
color = []
fuel = []
drive = []
seats = []
doors = []
# Declare chromedriver options.
chrome_options = webdriver.ChromeOptions()
window_vis = int(
input(" Press 1: To show window.\n Press 2: To hide window.\n")
) # Hide window option
if window_vis != (1 or 2):
raise Exception("You didn't pick either one of the specified options")
try:
if window_vis == 1:
pass
elif window_vis == 2:
chrome_options.add_argument("--headless")
except Exception as e:
print(e)
chrome_options.add_argument("--no-zygote")
chrome_options.add_argument("--mute-audio")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--log-level=3")
chrome_options.add_argument("--disable-breakpad")
chrome_options.add_argument("--disable-infobars")
chrome_options.add_argument("--ignore-ssl-errors")
chrome_options.add_argument("--disable-web-security")
chrome_options.add_argument("--ignore-certificate-errors")
chrome_options.add_argument("--allow-running-insecure-content")
chrome_options.add_argument("--ignore-certificate-errors-spki-list")
chrome_options.add_argument("--disable-features=VizDisplayCompositor")
chrome_options.add_experimental_option("excludeSwitches", ["enable-logging"])
# Set desired capabilities to ignore SSL stuff.
desired_capabilities = chrome_options.to_capabilities()
desired_capabilities["acceptInsecureCerts"] = True
desired_capabilities["acceptSslCerts"] = True
# Set up Chromedriver.
url = r"https://www.beforward.jp//"
driver_location = os.path.join(working_dir, r"chromedriver.exe")
driver = webdriver.Chrome(
driver_location,
options=chrome_options,
desired_capabilities=desired_capabilities,
)
# Check for next Page function.
def check_next_page():
try:
check = driver.find_element(By.CSS_SELECTOR(".pagination-next"))
return True
except (NoSuchElementException):
return False
# Getting Start datetime.
start_time = datetime.now()
# Run Chromedriver.
print("Starting chromedriver")
try:
driver.maximize_window()
print("Loading URL")
driver.get(url)
# Scraper code.
print("Starting scraping process")
# Locate Shop by type element.
shop_by_type = driver.find_element_by_xpath("//ul[@id='shop-by-type']")
car_types = shop_by_type.find_elements_by_tag_name("li") # Get car type elements.
for item in car_types: # Iterate through Each car type.
print("Selecting car element")
car_t = item.text
car_final = re.sub("[(\d, )]", "", car_t)
item.click()
time.sleep(3)
# Toggle 100 results button.
print("Attempting to toggle result button")
try:
select = driver.find_element_by_xpath(
"//div[@class='results results-bottom']//select[@name='view_cnt']"
)
select.find_element_by_css_selector(
"div[class='results results-bottom'] option[value='100']"
).click()
print("Result button toggled successfully")
except (NoSuchElementException):
print("Didn't find result button")
pass
page_count = 1
while check_next_page != False:
time.sleep(2)
# Delete login element.
print(("Attempting to delete login element"))
attempts = 0
while attempts < 2:
try:
driver.execute_script(
"""var element = document.getElementsByClassName("stocklist-row login-banner-table")[0];
element.parentNode.removeChild(element);"""
)
print("Login element deleted")
attempts = 2
except:
attempts += 1
if attempts == 2:
print("Didn't find login element")
else:
pass
# Get car row wrapper.
time.sleep(2)
print("Getting car row wrapper...")
car_container = driver.find_element_by_xpath(
"//table[@class='stocklist-row-wrap']"
)
# Get list of cars in stock within car row wrapper.
print("Getting cars in stock row wrapper")
cars = car_container.find_elements_by_class_name("stocklist-row")
count = 0
# Loop through cars in car wrapper.
print(f"Attempting to scrape car type {car_final} data")
for car in cars:
try:
count += 1
if (count == 15) or (count == 23):
pass
else:
car_type.append(car_final)
try:
car_make.append(
car.find_element_by_class_name("make-model").text
)
except (NoSuchElementException):
car_make.append(None)
try:
price.append(car.find_element_by_class_name("price").text)
except (NoSuchElementException):
price.append(None)
try:
total_price.append(
car.find_element_by_class_name("total-price").text
)
except (NoSuchElementException):
total_price.append(None)
try:
mileage.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) .mileage > .val",
).text
)
except (NoSuchElementException):
mileage.append(None)
try:
year.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) .year > .val",
).text
)
except (NoSuchElementException):
year.append(None)
try:
engine.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) .engine > .val",
).text
)
except (NoSuchElementException):
engine.append(None)
try:
transmition.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) .trans > .val",
).text
)
except (NoSuchElementException):
transmition.append(None)
try:
location.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) .val > span",
).text
)
except (NoSuchElementException):
location.append(None)
try:
model_code.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(1) > .td-1st",
).text
)
except (NoSuchElementException):
model_code.append(None)
try:
engine_code.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(2) > .td-1st",
).text
)
except (NoSuchElementException):
engine_code.append(None)
try:
steering.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(1) > .td-2nd",
).text
)
except (NoSuchElementException):
steering.append(None)
try:
color.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(2) > .td-2nd",
).text
)
except (NoSuchElementException):
color.append(None)
try:
fuel.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(1) > .td-3rd",
).text
)
except (NoSuchElementException):
fuel.append(None)
try:
drive.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(2) > .td-3rd",
).text
)
except (NoSuchElementException):
drive.append(None)
try:
seats.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(1) > .td-4th",
).text
)
except (NoSuchElementException):
seats.append(None)
try:
doors.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(2) > .td-4th",
).text
)
except (NoSuchElementException):
doors.append(None)
if count > 100:
pass
else:
print(
f"Done getting data for car number {count} of type {car_final} on page {page_count}"
)
except Exception as e:
logging.error(e)
pass
# Attempt to remove popup if present.
attempts = 0
while attempts < 2:
try:
driver.execute_script(
"""var element = document.getElementsByClassName("not-operation-popup active")[0];
element.parentNode.removeChild(element);"""
)
print("Found popup and removed it")
attempts = 2
except (NoSuchElementException):
attempts += 1
pass
print(f"Done scraping data for all {car_final} cars on page {page_count}\n")
# Next page.
attempts = 0
while attempts < 3:
try:
print("Attempting to go to next Page")
pagination = driver.find_element_by_class_name("results-pagination")
pagination.find_element_by_xpath(
"//a[normalize-space()='Next Page']"
).click()
attempts = 3
time.sleep(2)
except:
attempts += 1
pass
page_count += 1
print(f"Done scraping data for all {car_final}\n")
print("Done Scrapping data for all cars\n")
except (TimeoutException):
logging.error("Took too long to load webpage. Check your internet connection")
except Exception as e:
logging.error(e)
pass
finally:
# Teardown window.
end_time = datetime.now() # Get end datetime.
time_taken = end_time - start_time # Get time taken.
print("Terminating chromedriver...")
driver.quit()
# Print time taken.
print("Started at: ", start_time)
print("Finished at: ", end_time)
print("Time taken: ", time_taken)
# Create data frame and add data to it.
data = {
"car type": car_type,
"car make": car_make,
"price": price,
"total price": total_price,
"mileage": mileage,
"engine": engine,
"transmition": transmition,
"location": location,
"model code": model_code,
"engine code": engine_code,
"steering": steering,
"color": color,
"fuel": fuel,
"drive": drive,
"seats": seats,
"doors": doors,
}
print("Creating data frame")
data_frame = pd.DataFrame(data)
print("Showing data frame head\n")
print(data_frame.head(10))
# Save dataframe to CSV file.
try:
data_frame.to_csv(os.path.join(working_dir, "Data.csv"), index=False, header=True)
except:
print("Unable to save csv file")
print("End of program.")
print(
"======================================== END ========================================"
)
| import os
import re
import time
import logging
from datetime import datetime
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException, NoSuchElementException
print(
"======================================= START ======================================="
)
print("Ensure all instances of 'Data.csv' are closed to prevent data lose.")
# Initialize logger.
logging.basicConfig(
filename="log.txt", level=logging.ERROR, format="%(asctime)s - %(message)s"
)
# logging.disable(logging.CRITICAL) # <====== REMEMBER TO COMMENT THIS.
# Get location of workign directory.
working_dir = os.getcwd()
# Create empty list variables that will contain scraped data.
parsed_data = []
car_type = []
car_make = []
price = []
total_price = []
mileage = []
year = []
engine = []
transmition = []
location = []
model_code = []
engine_code = []
steering = []
color = []
fuel = []
drive = []
seats = []
doors = []
# Declare chromedriver options.
chrome_options = webdriver.ChromeOptions()
window_vis = int(
input(" Press 1: To show window.\n Press 2: To hide window.\n")
) # Hide window option
if window_vis != (1 or 2):
raise Exception("You didn't pick either one of the specified options")
try:
if window_vis == 1:
pass
elif window_vis == 2:
chrome_options.add_argument("--headless")
except Exception as e:
print(e)
chrome_options.add_argument("--no-zygote")
chrome_options.add_argument("--mute-audio")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--log-level=3")
chrome_options.add_argument("--disable-breakpad")
chrome_options.add_argument("--disable-infobars")
chrome_options.add_argument("--ignore-ssl-errors")
chrome_options.add_argument("--disable-web-security")
chrome_options.add_argument("--ignore-certificate-errors")
chrome_options.add_argument("--allow-running-insecure-content")
chrome_options.add_argument("--ignore-certificate-errors-spki-list")
chrome_options.add_argument("--disable-features=VizDisplayCompositor")
chrome_options.add_experimental_option("excludeSwitches", ["enable-logging"])
# Set desired capabilities to ignore SSL stuff.
desired_capabilities = chrome_options.to_capabilities()
desired_capabilities["acceptInsecureCerts"] = True
desired_capabilities["acceptSslCerts"] = True
# Set up Chromedriver.
url = r"https://www.beforward.jp//"
driver_location = os.path.join(working_dir, r"chromedriver.exe")
driver = webdriver.Chrome(
driver_location,
options=chrome_options,
desired_capabilities=desired_capabilities,
)
# Check for next Page function.
def check_next_page():
try:
check = driver.find_element(By.CSS_SELECTOR(".pagination-next"))
return True
except (NoSuchElementException):
return False
# Getting Start datetime.
start_time = datetime.now()
# Run Chromedriver.
print("Starting chromedriver")
try:
driver.maximize_window()
print("Loading URL")
driver.get(url)
# Scraper code.
print("Starting scraping process")
# Locate Shop by type element.
shop_by_type = driver.find_element_by_xpath("//ul[@id='shop-by-type']")
car_types = shop_by_type.find_elements_by_tag_name("li") # Get car type elements.
for item in car_types: # Iterate through Each car type.
print("Selecting car element")
car_t = item.text
car_final = re.sub("[(\d, )]", "", car_t)
item.click()
time.sleep(3)
# Toggle 100 results button.
print("Attempting to toggle result button")
try:
select = driver.find_element_by_xpath(
"//div[@class='results results-bottom']//select[@name='view_cnt']"
)
select.find_element_by_css_selector(
"div[class='results results-bottom'] option[value='100']"
).click()
print("Result button toggled successfully")
except (NoSuchElementException):
print("Didn't find result button")
pass
page_count = 1
while check_next_page != False:
time.sleep(2)
# Delete login element.
print(("Attempting to delete login element"))
attempts = 0
while attempts < 2:
try:
driver.execute_script(
"""var element = document.getElementsByClassName("stocklist-row login-banner-table")[0];
element.parentNode.removeChild(element);"""
)
print("Login element deleted")
attempts = 2
except:
attempts += 1
if attempts == 2:
print("Didn't find login element")
else:
pass
# Get car row wrapper.
time.sleep(2)
print("Getting car row wrapper...")
car_container = driver.find_element_by_xpath(
"//table[@class='stocklist-row-wrap']"
)
# Get list of cars in stock within car row wrapper.
print("Getting cars in stock row wrapper")
cars = car_container.find_elements_by_class_name("stocklist-row")
count = 0
# Loop through cars in car wrapper.
print(f"Attempting to scrape car type {car_final} data")
for car in cars:
try:
count += 1
if (count == 15) or (count == 23):
pass
else:
car_type.append(car_final)
try:
car_make.append(
car.find_element_by_class_name("make-model").text
)
except (NoSuchElementException):
car_make.append(None)
try:
price.append(car.find_element_by_class_name("price").text)
except (NoSuchElementException):
price.append(None)
try:
total_price.append(
car.find_element_by_class_name("total-price").text
)
except (NoSuchElementException):
total_price.append(None)
try:
mileage.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) .mileage > .val",
).text
)
except (NoSuchElementException):
mileage.append(None)
try:
year.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) .year > .val",
).text
)
except (NoSuchElementException):
year.append(None)
try:
engine.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) .engine > .val",
).text
)
except (NoSuchElementException):
engine.append(None)
try:
transmition.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) .trans > .val",
).text
)
except (NoSuchElementException):
transmition.append(None)
try:
location.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) .val > span",
).text
)
except (NoSuchElementException):
location.append(None)
try:
model_code.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(1) > .td-1st",
).text
)
except (NoSuchElementException):
model_code.append(None)
try:
engine_code.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(2) > .td-1st",
).text
)
except (NoSuchElementException):
engine_code.append(None)
try:
steering.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(1) > .td-2nd",
).text
)
except (NoSuchElementException):
steering.append(None)
try:
color.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(2) > .td-2nd",
).text
)
except (NoSuchElementException):
color.append(None)
try:
fuel.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(1) > .td-3rd",
).text
)
except (NoSuchElementException):
fuel.append(None)
try:
drive.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(2) > .td-3rd",
).text
)
except (NoSuchElementException):
drive.append(None)
try:
seats.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(1) > .td-4th",
).text
)
except (NoSuchElementException):
seats.append(None)
try:
doors.append(
driver.find_element(
By.CSS_SELECTOR,
f".stocklist-row:nth-child({count}) tr:nth-child(2) > .td-4th",
).text
)
except (NoSuchElementException):
doors.append(None)
if count > 100:
pass
else:
print(
f"Done getting data for car number {count} of type {car_final} on page {page_count}"
)
except Exception as e:
logging.error(e)
pass
# Attempt to remove popup if present.
attempts = 0
while attempts < 2:
try:
driver.execute_script(
"""var element = document.getElementsByClassName("not-operation-popup active")[0];
element.parentNode.removeChild(element);"""
)
print("Found popup and removed it")
attempts = 2
except (NoSuchElementException):
attempts += 1
pass
print(f"Done scraping data for all {car_final} cars on page {page_count}\n")
# Next page.
attempts = 0
while attempts < 3:
try:
print("Attempting to go to next Page")
pagination = driver.find_element_by_class_name("results-pagination")
pagination.find_element_by_xpath(
"//a[normalize-space()='Next Page']"
).click()
attempts = 3
time.sleep(2)
except:
attempts += 1
pass
page_count += 1
print(f"Done scraping data for all {car_final}\n")
print("Done Scrapping data for all cars\n")
except (TimeoutException):
logging.error("Took too long to load webpage. Check your internet connection")
except Exception as e:
logging.error(e)
pass
finally:
# Teardown window.
end_time = datetime.now() # Get end datetime.
time_taken = end_time - start_time # Get time taken.
print("Terminating chromedriver...")
driver.quit()
# Print time taken.
print("Started at: ", start_time)
print("Finished at: ", end_time)
print("Time taken: ", time_taken)
# Create data frame and add data to it.
data = {
"car type": car_type,
"car make": car_make,
"price": price,
"total price": total_price,
"mileage": mileage,
"engine": engine,
"transmition": transmition,
"location": location,
"model code": model_code,
"engine code": engine_code,
"steering": steering,
"color": color,
"fuel": fuel,
"drive": drive,
"seats": seats,
"doors": doors,
}
print("Creating data frame")
data_frame = pd.DataFrame(data)
print("Showing data frame head\n")
print(data_frame.head(10))
# Save dataframe to CSV file.
try:
data_frame.to_csv(os.path.join(working_dir, "Data.csv"), index=False, header=True)
except:
print("Unable to save csv file")
print("End of program.")
print(
"======================================== END ========================================"
)
| en | 0.562079 | # Initialize logger. # logging.disable(logging.CRITICAL) # <====== REMEMBER TO COMMENT THIS. # Get location of workign directory. # Create empty list variables that will contain scraped data. # Declare chromedriver options. # Hide window option # Set desired capabilities to ignore SSL stuff. # Set up Chromedriver. # Check for next Page function. # Getting Start datetime. # Run Chromedriver. # Scraper code. # Locate Shop by type element. # Get car type elements. # Iterate through Each car type. # Toggle 100 results button. # Delete login element. var element = document.getElementsByClassName("stocklist-row login-banner-table")[0]; element.parentNode.removeChild(element); # Get car row wrapper. # Get list of cars in stock within car row wrapper. # Loop through cars in car wrapper. # Attempt to remove popup if present. var element = document.getElementsByClassName("not-operation-popup active")[0]; element.parentNode.removeChild(element); # Next page. # Teardown window. # Get end datetime. # Get time taken. # Print time taken. # Create data frame and add data to it. # Save dataframe to CSV file. | 2.949302 | 3 |
DerinOgrenme/KelimeUret/main.py | onselaydin/pytry | 0 | 6622609 | <reponame>onselaydin/pytry<filename>DerinOgrenme/KelimeUret/main.py
import os
import re
import string
import requests
import numpy as np
import collections
import random
import tensorflow as tf
import LSTM
min_word_freq = 5
rnn_size = 128
batch_size = 128
learning_rate = 0.001
training_seq_len = 50
embedding_size = rnn_size
prime_texts = ['ankara', 'kitap']
epochs = 10
data_dir = 'data'
data_file = 'eksidata.txt'
punctuation = string.punctuation
punctuation = ''.join([x for x in punctuation if x not in ['-', "'"]])
if not os.path.exists(data_dir):
os.makedirs(data_dir)
print('Data yükleniyor.')
if not os.path.isfile(os.path.join(data_dir, data_file)):
print('Data bulunamadı. İndiriliyor...')
data_url = 'https://raw.githubusercontent.com/hakanceb/eksi/master/eksidata.txt'
response = requests.get(data_url)
eksi_file = response.content
s_text = eksi_file.decode('utf-8')
s_text = s_text.replace('\r\n', '')
s_text = s_text.replace('\n', '')
with open(os.path.join(data_dir, data_file), 'w') as out_conn:
out_conn.write(s_text)
else:
with open(os.path.join(data_dir, data_file), 'r') as file_conn:
s_text = file_conn.read().replace('\n', '')
print('Text temizleniyor.')
s_text = re.sub(r'[{}]'.format(punctuation), ' ', s_text)
s_text = re.sub('\s+', ' ', s_text).strip().lower()
def build_vocab(text, min_word_freq):
word_counts = collections.Counter(text.split(' '))
word_counts = {key: val for key, val in word_counts.items() if val > min_word_freq}
words = word_counts.keys()
vocab_to_ix_dict = {key: (ix + 1) for ix, key in enumerate(words)}
vocab_to_ix_dict['unknown'] = 0
ix_to_vocab_dict = {val: key for key, val in vocab_to_ix_dict.items()}
return (ix_to_vocab_dict, vocab_to_ix_dict)
ix2vocab, vocab2ix = build_vocab(s_text, min_word_freq)
vocab_size = len(ix2vocab) + 1
print('Vocabulary Length = {}'.format(vocab_size))
assert (len(ix2vocab) == len(vocab2ix))
s_text_words = s_text.split(' ')
s_text_ix = []
for ix, x in enumerate(s_text_words):
try:
s_text_ix.append(vocab2ix[x])
except:
s_text_ix.append(0)
s_text_ix = np.array(s_text_ix)
lstm_model = LSTM.LSTM_Model(embedding_size, rnn_size, batch_size, learning_rate, training_seq_len, vocab_size)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
test_lstm_model = LSTM.LSTM_Model(embedding_size, rnn_size, batch_size, learning_rate, training_seq_len, vocab_size,
infer_sample=True)
n_batches = int(len(s_text_ix)) / (batch_size * training_seq_len) + 1
batches = np.array_split(s_text_ix, n_batches)
batches = [np.resize(x, [batch_size, training_seq_len]) for x in batches]
sess = tf.Session()
sess.run(tf.global_variables_initializer())
iterations = 1
for epoch in range (epochs):
random.shuffle(batches)
targets = [np.roll(x, -1, axis=1) for x in batches]
print('Starting Epoch #{} of {}.'.format(epoch + 1, epochs))
state = sess.run(lstm_model.initial_state)
for ix, batch in enumerate(batches):
feed_dict_train = {lstm_model.x_data: batch, lstm_model.y_output: targets[ix]}
c, h = lstm_model.initial_state
feed_dict_train[c] = state.c
feed_dict_train[h] = state.h
[train_loss, state, _] = sess.run([lstm_model.loss, lstm_model.final_state, lstm_model.train_op],
feed_dict=feed_dict_train)
if iterations % 10 == 0:
summary_nums = (iterations, epoch + 1, ix + 1, n_batches + 1, train_loss)
print('Iteration: {}, Epoch: {}, Batch: {} out of {}, Loss: {:.2f}'.format(*summary_nums))
if iterations % 50 == 0:
for sample in prime_texts:
print(test_lstm_model.sample(sess, ix2vocab, vocab2ix, num=10, prime_text=sample))
iterations += 1
| import os
import re
import string
import requests
import numpy as np
import collections
import random
import tensorflow as tf
import LSTM
min_word_freq = 5
rnn_size = 128
batch_size = 128
learning_rate = 0.001
training_seq_len = 50
embedding_size = rnn_size
prime_texts = ['ankara', 'kitap']
epochs = 10
data_dir = 'data'
data_file = 'eksidata.txt'
punctuation = string.punctuation
punctuation = ''.join([x for x in punctuation if x not in ['-', "'"]])
if not os.path.exists(data_dir):
os.makedirs(data_dir)
print('Data yükleniyor.')
if not os.path.isfile(os.path.join(data_dir, data_file)):
print('Data bulunamadı. İndiriliyor...')
data_url = 'https://raw.githubusercontent.com/hakanceb/eksi/master/eksidata.txt'
response = requests.get(data_url)
eksi_file = response.content
s_text = eksi_file.decode('utf-8')
s_text = s_text.replace('\r\n', '')
s_text = s_text.replace('\n', '')
with open(os.path.join(data_dir, data_file), 'w') as out_conn:
out_conn.write(s_text)
else:
with open(os.path.join(data_dir, data_file), 'r') as file_conn:
s_text = file_conn.read().replace('\n', '')
print('Text temizleniyor.')
s_text = re.sub(r'[{}]'.format(punctuation), ' ', s_text)
s_text = re.sub('\s+', ' ', s_text).strip().lower()
def build_vocab(text, min_word_freq):
word_counts = collections.Counter(text.split(' '))
word_counts = {key: val for key, val in word_counts.items() if val > min_word_freq}
words = word_counts.keys()
vocab_to_ix_dict = {key: (ix + 1) for ix, key in enumerate(words)}
vocab_to_ix_dict['unknown'] = 0
ix_to_vocab_dict = {val: key for key, val in vocab_to_ix_dict.items()}
return (ix_to_vocab_dict, vocab_to_ix_dict)
ix2vocab, vocab2ix = build_vocab(s_text, min_word_freq)
vocab_size = len(ix2vocab) + 1
print('Vocabulary Length = {}'.format(vocab_size))
assert (len(ix2vocab) == len(vocab2ix))
s_text_words = s_text.split(' ')
s_text_ix = []
for ix, x in enumerate(s_text_words):
try:
s_text_ix.append(vocab2ix[x])
except:
s_text_ix.append(0)
s_text_ix = np.array(s_text_ix)
lstm_model = LSTM.LSTM_Model(embedding_size, rnn_size, batch_size, learning_rate, training_seq_len, vocab_size)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
test_lstm_model = LSTM.LSTM_Model(embedding_size, rnn_size, batch_size, learning_rate, training_seq_len, vocab_size,
infer_sample=True)
n_batches = int(len(s_text_ix)) / (batch_size * training_seq_len) + 1
batches = np.array_split(s_text_ix, n_batches)
batches = [np.resize(x, [batch_size, training_seq_len]) for x in batches]
sess = tf.Session()
sess.run(tf.global_variables_initializer())
iterations = 1
for epoch in range (epochs):
random.shuffle(batches)
targets = [np.roll(x, -1, axis=1) for x in batches]
print('Starting Epoch #{} of {}.'.format(epoch + 1, epochs))
state = sess.run(lstm_model.initial_state)
for ix, batch in enumerate(batches):
feed_dict_train = {lstm_model.x_data: batch, lstm_model.y_output: targets[ix]}
c, h = lstm_model.initial_state
feed_dict_train[c] = state.c
feed_dict_train[h] = state.h
[train_loss, state, _] = sess.run([lstm_model.loss, lstm_model.final_state, lstm_model.train_op],
feed_dict=feed_dict_train)
if iterations % 10 == 0:
summary_nums = (iterations, epoch + 1, ix + 1, n_batches + 1, train_loss)
print('Iteration: {}, Epoch: {}, Batch: {} out of {}, Loss: {:.2f}'.format(*summary_nums))
if iterations % 50 == 0:
for sample in prime_texts:
print(test_lstm_model.sample(sess, ix2vocab, vocab2ix, num=10, prime_text=sample))
iterations += 1 | uk | 0.077952 | #{} of {}.'.format(epoch + 1, epochs)) | 2.552983 | 3 |
example.py | williamdean/DockerHubAdmin | 0 | 6622610 | #!/usr/bin/env python3
import DockerHubAdmin
import argparse
parser = argparse.ArgumentParser(description='Python admin interface to Docker Hub')
parser.add_argument('-u','--username', help='Admin username for Docker Hub', required=True)
parser.add_argument('-p','--password', help='Admin password for Docker Hub', required=True)
parser.add_argument('-o','--organization', help='Password for Docker Hub', required=True)
parser.add_argument('--user', help='Non-Admin User Account on Docker Hub')
parser.add_argument('-t','--team', help='A Docker Hub Team')
parser.add_argument('--adduser', help='Add account to a Docker Hub Team', action='store_true')
parser.add_argument('--rmuser', help='Remove account from a Docker Hub Team', action='store_true')
parser.add_argument('--listmembers', help='List members of a Docker Hub Team', action='store_true')
parser.add_argument('--listteams', help='List all teams within an org', action='store_true')
parser.add_argument('--finduserteams', help='List all teams an account is a member of within an org', action='store_true')
args = parser.parse_args()
hub = DockerHubAdmin.DockerHubAdmin(args.username, args.password)
if args.listteams:
print(hub.listGroups(args.organization))
elif args.listmembers:
print(hub.listMembers(args.organization, args.team))
elif args.finduserteams:
print(hub.findUserGroups(args.organization, args.user))
elif args.adduser:
hub.addUserGroup(args.organization, args.user, args.team)
elif args.rmuser:
hub.removeUserGroup(args.organization, args.user, args.team) | #!/usr/bin/env python3
import DockerHubAdmin
import argparse
parser = argparse.ArgumentParser(description='Python admin interface to Docker Hub')
parser.add_argument('-u','--username', help='Admin username for Docker Hub', required=True)
parser.add_argument('-p','--password', help='Admin password for Docker Hub', required=True)
parser.add_argument('-o','--organization', help='Password for Docker Hub', required=True)
parser.add_argument('--user', help='Non-Admin User Account on Docker Hub')
parser.add_argument('-t','--team', help='A Docker Hub Team')
parser.add_argument('--adduser', help='Add account to a Docker Hub Team', action='store_true')
parser.add_argument('--rmuser', help='Remove account from a Docker Hub Team', action='store_true')
parser.add_argument('--listmembers', help='List members of a Docker Hub Team', action='store_true')
parser.add_argument('--listteams', help='List all teams within an org', action='store_true')
parser.add_argument('--finduserteams', help='List all teams an account is a member of within an org', action='store_true')
args = parser.parse_args()
hub = DockerHubAdmin.DockerHubAdmin(args.username, args.password)
if args.listteams:
print(hub.listGroups(args.organization))
elif args.listmembers:
print(hub.listMembers(args.organization, args.team))
elif args.finduserteams:
print(hub.findUserGroups(args.organization, args.user))
elif args.adduser:
hub.addUserGroup(args.organization, args.user, args.team)
elif args.rmuser:
hub.removeUserGroup(args.organization, args.user, args.team) | fr | 0.221828 | #!/usr/bin/env python3 | 2.785575 | 3 |
code/frameworks/superresolution/train_sr_model.py | wukailu/EDSR-PyTorch | 0 | 6622611 | import sys
import os
sys.path = [os.getcwd()] + sys.path
print('current path is ', sys.path)
import model
print('path for model is ', os.path.realpath(sys.modules['model'].__file__))
from frameworks.superresolution.SRModel import load_model
from frameworks.classification.train_single_model import get_params, train_model, inference_statics
import utils.backend as backend
print('current backend is ', backend.name)
def prepare_params(params):
from utils.tools import parse_params
params = parse_params(params)
if params['backbone'] == 'VDSR':
if isinstance(params['datasets'], dict):
params['datasets']['input_large'] = True
else:
params['datasets'] = {'name': params['datasets'], 'input_large': True}
default_keys = {
'metric': 'psnr255',
'inference_statics': False,
'skip_train': False,
'save_model': False,
'test_benchmark': False,
}
params = {**default_keys, **params}
return params
def test_SR_benchmark(test_model):
from pytorch_lightning import Trainer
trainer = Trainer(gpus=1)
from datasets import DataProvider
benchmarks = ['Set5', 'Set14', 'B100', 'Urban100']
for d in benchmarks:
dataset_params = {
'name': d,
'test_only': True,
'patch_size': test_model.params['dataset']['patch_size'],
'ext': 'sep',
'scale': test_model.params['scale'],
"batch_size": 1,
}
provider = DataProvider(dataset_params)
ret = trainer.test(test_dataloaders=provider.test_dl, model=test_model)
backend.log_metric(d + '_' + test_model.params['metric'], ret[0]['test/' + test_model.params['metric']])
if __name__ == "__main__":
params = get_params()
params = prepare_params(params)
print(params)
model = load_model(params)
if not params['skip_train']:
model = train_model(model, params, save_name='super_resolution', mode='max')
if params['test_benchmark']:
test_SR_benchmark(model)
if 'test_ssim' in params and params['test_ssim']:
model.params['metric'] = model.params['metric'].lower().replace('psnr', 'ssim')
model.metric = model.choose_metric()
test_SR_benchmark(model)
if params['inference_statics']:
inference_statics(model, batch_size=1)
| import sys
import os
sys.path = [os.getcwd()] + sys.path
print('current path is ', sys.path)
import model
print('path for model is ', os.path.realpath(sys.modules['model'].__file__))
from frameworks.superresolution.SRModel import load_model
from frameworks.classification.train_single_model import get_params, train_model, inference_statics
import utils.backend as backend
print('current backend is ', backend.name)
def prepare_params(params):
from utils.tools import parse_params
params = parse_params(params)
if params['backbone'] == 'VDSR':
if isinstance(params['datasets'], dict):
params['datasets']['input_large'] = True
else:
params['datasets'] = {'name': params['datasets'], 'input_large': True}
default_keys = {
'metric': 'psnr255',
'inference_statics': False,
'skip_train': False,
'save_model': False,
'test_benchmark': False,
}
params = {**default_keys, **params}
return params
def test_SR_benchmark(test_model):
from pytorch_lightning import Trainer
trainer = Trainer(gpus=1)
from datasets import DataProvider
benchmarks = ['Set5', 'Set14', 'B100', 'Urban100']
for d in benchmarks:
dataset_params = {
'name': d,
'test_only': True,
'patch_size': test_model.params['dataset']['patch_size'],
'ext': 'sep',
'scale': test_model.params['scale'],
"batch_size": 1,
}
provider = DataProvider(dataset_params)
ret = trainer.test(test_dataloaders=provider.test_dl, model=test_model)
backend.log_metric(d + '_' + test_model.params['metric'], ret[0]['test/' + test_model.params['metric']])
if __name__ == "__main__":
params = get_params()
params = prepare_params(params)
print(params)
model = load_model(params)
if not params['skip_train']:
model = train_model(model, params, save_name='super_resolution', mode='max')
if params['test_benchmark']:
test_SR_benchmark(model)
if 'test_ssim' in params and params['test_ssim']:
model.params['metric'] = model.params['metric'].lower().replace('psnr', 'ssim')
model.metric = model.choose_metric()
test_SR_benchmark(model)
if params['inference_statics']:
inference_statics(model, batch_size=1)
| none | 1 | 2.195712 | 2 | |
app/users/migrations/0002_organization_created_at_organization_updated_at.py | thevahidal/hoopoe-core | 5 | 6622612 | # Generated by Django 4.0 on 2021-12-23 16:09
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='organization',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='organization',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
| # Generated by Django 4.0 on 2021-12-23 16:09
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='organization',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
preserve_default=False,
),
migrations.AddField(
model_name='organization',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
| en | 0.902068 | # Generated by Django 4.0 on 2021-12-23 16:09 | 1.838587 | 2 |
models/multiphase/d2q9_csf/check.py | dlightbody/TCLB | 0 | 6622613 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 5 14:13:06 2016
@author: mdzik
"""
from CLB import *
import matplotlib.pyplot as plt
import numpy as np
import bearded_octo_wookie.lbm as lbm
from sympy.plotting import *
from sympy import *
import scipy.optimize as so
init_printing()
n=Symbol('n')
W=Symbol('w')
n0=Symbol('n0')
phi = -tanh(2*(n-n0)*W) / 2.
lap = diff(phi,n,2) + diff(phi,n) / n
grad = diff(phi,n)
grad = diff(phi,n)
pprint( simplify(expand(( lap - diff(phi,n,2))/grad)) )
half = 64
Wnum = 0.125
array2mat = [{'ImmutableMatrix': np.matrix}, 'numpy']
laplace = lambdify([n,n0, W], lap, modules=array2mat)
phase = lambdify([n,n0, W], phi, modules=array2mat)
gradient = lambdify([n,n0, W], grad, modules=array2mat)
#
#
#X,Y = np.meshgrid(np.arange(256)-128,np.arange(256)-128)
#R = np.sqrt(X*X+Y*Y)
#R0 = 64
#PHI = phase(R,R0,0.25)
#z = -PHI*2.
#plt.plot( np.arctanh(z)[128,:] / 2. / 0.25, 'o' )
#plt.plot(R[128,:] - R0)
#plt.show()
#sfsdf
for c, fvti in [
#('k','/home/mdzik/projekty/TCLB/output/test1_2_VTK_P00_00001000.pvti'),
#('r','/home/mdzik/projekty/TCLB/output/test1_omega1_VTK_P00_00006000.pvti'),
#('r','/home/mdzik/projekty/TCLB/output/test1_VTK_P00_00002000.pvti') ,
('r','/home/mdzik/projekty/TCLB/output/test1_by_pf_VTK_P00_00006000.pvti')
]:
vti = VTIFile.VTIFile(fvti, True)
PhaseField = vti.get('PhaseField', vector=False)
#Curvature = vti.get('Curvature', vector=False)
X,Y = vti.getMeshGrid()
### find n00 and ww
(n00_l, ww_l, n00_r, ww_r), err = so.leastsq(lambda (C): -phase(X[half,:],C[0], C[1]) + phase(X[half,:],C[2],C[3]) -0.5 - PhaseField[half,:], (32., .25, 96., .25) )
X = X - (n00_l + n00_r) / 2.
Y = Y - half
R = np.sqrt(X**2 + Y**2)
(n00, ww), err = so.leastsq(lambda (C): phase(R,C[0], C[1])[half,:] - PhaseField[half,:], (n00_l, ww_l) )
print n00, ww
#ww = 0.025
#ww = 0.01
#plt.imshow( phase(R, n00, ww) - PhaseField )
#plt.colorbar()
#plt.show()
#plt.plot(phase(R, n00, ww)[half,:])
#plt.plot(PhaseField[half,:])
#plt.show()
#plt.plot( phase(R,n00, ww)[half,:] , 'o')
#plt.plot(PhaseField[half,:])
#plt.plot(R[half,:] - n00)
r_r0 = np.arctanh(-PhaseField * 2) /2 / Wnum
r_r0 = np.where(np.isnan(r_r0), 0, r_r0)
r_r0 = np.where(np.isinf(r_r0), 0, r_r0)
r_r0 = np.where(np.isneginf(r_r0), 0, r_r0)
#plt.plot(R[half,:] - n00)
#plt.plot(r_r0[half,:])
#plt.show()
print (n00_l, ww_l, n00_r, ww_r)
dn = 10
laplace2 = np.zeros_like(R)
grad2 = np.zeros_like(R)
grad2_X = np.zeros_like(R)
grad2_Y = np.zeros_like(R)
for i in range(9):
grad2_X = grad2_X + lbm.W[i] * lbm.e[i,0] * np.roll(np.roll(PhaseField,shift=-lbm.e[i,0],axis=0),shift=-lbm.e[i,1],axis=1) * 3.
grad2_Y = grad2_Y + lbm.W[i] * lbm.e[i,1] * np.roll(np.roll(PhaseField,shift=-lbm.e[i,0],axis=0),shift=-lbm.e[i,1],axis=1) * 3.
grad2 = np.sqrt(grad2_X**2 + grad2_Y**2)
grad2_inv = np.where( grad2 > 0, grad2 , 1)
grad2_inv = np.where( grad2 > 0, 1./grad2_inv , 0)
normal_X = np.where( grad2 > 0, grad2_X * grad2_inv, 0)
normal_Y = np.where( grad2 > 0, grad2_Y * grad2_inv, 0)
#plt.quiver(X.T,Y.T,normal_X, normal_Y, units='xy', scale=0.5, angles= 'xy')
#plt.imshow(PhaseField)
#lt.show()
dr = 0.001
rr0 = np.ones_like(R) * 25
xx = -X
yy = -Y
rt = np.sqrt(xx*xx + yy*yy)
#nx = xx / rt
#ny = yy / rt
nx = normal_X
ny = normal_Y
rr0 = np.ones_like(R)
for it in range(16):
#xx = nx * (r_r0+rr0)
#yy = ny * (r_r0+rr0)
xx = nx * (r_r0+rr0)
yy = ny * (r_r0+rr0)
#plt.plot( (R-np.sqrt(xx*xx+yy*yy))[half,:] , '-')
#plt.plot( (r_r0)[half,:] , 'o')
# plt.plot( normal_Y[half,:] , '-')
#plt.show()
f1 = np.zeros_like(R)
for i in range(9):
r_r0i = np.roll(np.roll(r_r0,shift=lbm.e[i,0],axis=0),shift=lbm.e[i,1],axis=1)
ri = np.sqrt( (lbm.e[i,0] - xx)**2 + (lbm.e[i,1] - yy)**2 )
f1 = f1 + ( r_r0i - ( ri - rr0 ) )
rr0 = rr0 + dr
xx = nx * (r_r0+rr0)
yy = ny * (r_r0+rr0)
f2 = np.zeros_like(R)
for i in range(9):
r_r0i = np.roll(np.roll(r_r0,shift=lbm.e[i,0],axis=0),shift=lbm.e[i,1],axis=1)
ri = np.sqrt( (lbm.e[i,0] - xx)**2 + (lbm.e[i,1] - yy)**2 )
f2 = f2 + ( r_r0i - ( ri - rr0 ) )
A = (f2 - f1) / dr
B = f2 - A * (rr0)
temp = - B / A
rr0 = temp#np.where( temp < 0, rr0 * 0.5, temp)
pme = rr0
pme = np.where(-(4 * PhaseField**2 - 1) < 0.1, 0, pme)
plt.plot(pme[half,:], 'wo')
plt.plot(pme[half,:], 'k-', lw=1)
#plt.plot(Curvature[half,:], 'k+')
plt.show()
#plt.imshow(np.where(np.absolute(R - n00) < 4, rr0, 0), interpolation='nearest')
# plt.colorbar()
# plt.show()
#
# laplace2 = PhaseField * (1./9 - 1.)
# for i in range(1,9):
# laplace2 = laplace2 + np.roll(np.roll(PhaseField,shift=-lbm.e[i,0],axis=0),shift=-lbm.e[i,1],axis=1) / 9.
#
#
#
# grad2 = np.sqrt(grad2_X**2 + grad2_Y**2)[half, :]
#
# p2 = PhaseField[half, :]**2
# grad_lengt = (1. - 4 * p2 ) * ww
# curvature = ( laplace2[half, :] - 2 * PhaseField[half, :] * (16 * p2 - 4. ) * ww**2 ) / grad_lengt
#
#
# rrA = np.where(np.absolute(R - n00) < 6, rr0, 0)
# plt.plot( rrA[half,:] , 'o', label="Circ")
#
# rr1 = np.where(np.absolute(R - n00) < 6, 1./curvature, 0)
# plt.plot( rr1[half,:] , 'o', label="Lap")
# plt.legend()
# plt.show()
#
#
#plt.plot(laplace2[half, n00-dn:n00+dn] , c+'o')
#plt.plot(laplace(R,n00, ww)[half, n00-dn:n00+dn], c+'-')
#plt.plot(laplace(R,n00, ww)[half, n00-dn:n00+dn] - laplace2[half, n00-dn:n00+dn] , c+'-')
#plt.plot(laplace2[half, n00-dn:n00+dn], c+'o')
#plt.plot( R[half, n00-dn:n00+dn], Curvature[half, n00-dn:n00+dn], c+'o')
#plt.plot( R[half, n00-dn:n00+dn], np.ones_like(curvature)[n00-dn:n00+dn] / R[half, n00-dn:n00+dn] )
#plt.plot((n00,n00), (0, 1./n00))
# plt.figure()
#
# plt.plot( curvature[n00-dn:n00+dn], 'o-' )
# plt.twinx()
# plt.plot(phase(R,n00, ww)[half, n00-dn:n00+dn], 'k')
# #plt.plot( np.ones_like(curvature)[n00-dn:n00+dn] * R[half, n00-dn:n00+dn] )
#
#
#
# plt.figure()
# plt.plot(grad2, 'o')
# plt.plot(grad_lengt)
#
# #plt.plot(grad_lengt )
#
# #dn = 10
# #plt.plot( curvature[n00-dn:n00+dn] )
# #plt.plot( np.ones_like(curvature)[n00-dn:n00+dn] * R[half, n00-dn:n00+dn] )
# ##plt.plot(grad_lengt, 'o-')
#
# plt.figure()
# plt.plot(phase(R,n00, ww)[half, :])
# plt.plot(PhaseField[half,:], 'o')
plt.show()
| # -*- coding: utf-8 -*-
"""
Created on Tue Apr 5 14:13:06 2016
@author: mdzik
"""
from CLB import *
import matplotlib.pyplot as plt
import numpy as np
import bearded_octo_wookie.lbm as lbm
from sympy.plotting import *
from sympy import *
import scipy.optimize as so
init_printing()
n=Symbol('n')
W=Symbol('w')
n0=Symbol('n0')
phi = -tanh(2*(n-n0)*W) / 2.
lap = diff(phi,n,2) + diff(phi,n) / n
grad = diff(phi,n)
grad = diff(phi,n)
pprint( simplify(expand(( lap - diff(phi,n,2))/grad)) )
half = 64
Wnum = 0.125
array2mat = [{'ImmutableMatrix': np.matrix}, 'numpy']
laplace = lambdify([n,n0, W], lap, modules=array2mat)
phase = lambdify([n,n0, W], phi, modules=array2mat)
gradient = lambdify([n,n0, W], grad, modules=array2mat)
#
#
#X,Y = np.meshgrid(np.arange(256)-128,np.arange(256)-128)
#R = np.sqrt(X*X+Y*Y)
#R0 = 64
#PHI = phase(R,R0,0.25)
#z = -PHI*2.
#plt.plot( np.arctanh(z)[128,:] / 2. / 0.25, 'o' )
#plt.plot(R[128,:] - R0)
#plt.show()
#sfsdf
for c, fvti in [
#('k','/home/mdzik/projekty/TCLB/output/test1_2_VTK_P00_00001000.pvti'),
#('r','/home/mdzik/projekty/TCLB/output/test1_omega1_VTK_P00_00006000.pvti'),
#('r','/home/mdzik/projekty/TCLB/output/test1_VTK_P00_00002000.pvti') ,
('r','/home/mdzik/projekty/TCLB/output/test1_by_pf_VTK_P00_00006000.pvti')
]:
vti = VTIFile.VTIFile(fvti, True)
PhaseField = vti.get('PhaseField', vector=False)
#Curvature = vti.get('Curvature', vector=False)
X,Y = vti.getMeshGrid()
### find n00 and ww
(n00_l, ww_l, n00_r, ww_r), err = so.leastsq(lambda (C): -phase(X[half,:],C[0], C[1]) + phase(X[half,:],C[2],C[3]) -0.5 - PhaseField[half,:], (32., .25, 96., .25) )
X = X - (n00_l + n00_r) / 2.
Y = Y - half
R = np.sqrt(X**2 + Y**2)
(n00, ww), err = so.leastsq(lambda (C): phase(R,C[0], C[1])[half,:] - PhaseField[half,:], (n00_l, ww_l) )
print n00, ww
#ww = 0.025
#ww = 0.01
#plt.imshow( phase(R, n00, ww) - PhaseField )
#plt.colorbar()
#plt.show()
#plt.plot(phase(R, n00, ww)[half,:])
#plt.plot(PhaseField[half,:])
#plt.show()
#plt.plot( phase(R,n00, ww)[half,:] , 'o')
#plt.plot(PhaseField[half,:])
#plt.plot(R[half,:] - n00)
r_r0 = np.arctanh(-PhaseField * 2) /2 / Wnum
r_r0 = np.where(np.isnan(r_r0), 0, r_r0)
r_r0 = np.where(np.isinf(r_r0), 0, r_r0)
r_r0 = np.where(np.isneginf(r_r0), 0, r_r0)
#plt.plot(R[half,:] - n00)
#plt.plot(r_r0[half,:])
#plt.show()
print (n00_l, ww_l, n00_r, ww_r)
dn = 10
laplace2 = np.zeros_like(R)
grad2 = np.zeros_like(R)
grad2_X = np.zeros_like(R)
grad2_Y = np.zeros_like(R)
for i in range(9):
grad2_X = grad2_X + lbm.W[i] * lbm.e[i,0] * np.roll(np.roll(PhaseField,shift=-lbm.e[i,0],axis=0),shift=-lbm.e[i,1],axis=1) * 3.
grad2_Y = grad2_Y + lbm.W[i] * lbm.e[i,1] * np.roll(np.roll(PhaseField,shift=-lbm.e[i,0],axis=0),shift=-lbm.e[i,1],axis=1) * 3.
grad2 = np.sqrt(grad2_X**2 + grad2_Y**2)
grad2_inv = np.where( grad2 > 0, grad2 , 1)
grad2_inv = np.where( grad2 > 0, 1./grad2_inv , 0)
normal_X = np.where( grad2 > 0, grad2_X * grad2_inv, 0)
normal_Y = np.where( grad2 > 0, grad2_Y * grad2_inv, 0)
#plt.quiver(X.T,Y.T,normal_X, normal_Y, units='xy', scale=0.5, angles= 'xy')
#plt.imshow(PhaseField)
#lt.show()
dr = 0.001
rr0 = np.ones_like(R) * 25
xx = -X
yy = -Y
rt = np.sqrt(xx*xx + yy*yy)
#nx = xx / rt
#ny = yy / rt
nx = normal_X
ny = normal_Y
rr0 = np.ones_like(R)
for it in range(16):
#xx = nx * (r_r0+rr0)
#yy = ny * (r_r0+rr0)
xx = nx * (r_r0+rr0)
yy = ny * (r_r0+rr0)
#plt.plot( (R-np.sqrt(xx*xx+yy*yy))[half,:] , '-')
#plt.plot( (r_r0)[half,:] , 'o')
# plt.plot( normal_Y[half,:] , '-')
#plt.show()
f1 = np.zeros_like(R)
for i in range(9):
r_r0i = np.roll(np.roll(r_r0,shift=lbm.e[i,0],axis=0),shift=lbm.e[i,1],axis=1)
ri = np.sqrt( (lbm.e[i,0] - xx)**2 + (lbm.e[i,1] - yy)**2 )
f1 = f1 + ( r_r0i - ( ri - rr0 ) )
rr0 = rr0 + dr
xx = nx * (r_r0+rr0)
yy = ny * (r_r0+rr0)
f2 = np.zeros_like(R)
for i in range(9):
r_r0i = np.roll(np.roll(r_r0,shift=lbm.e[i,0],axis=0),shift=lbm.e[i,1],axis=1)
ri = np.sqrt( (lbm.e[i,0] - xx)**2 + (lbm.e[i,1] - yy)**2 )
f2 = f2 + ( r_r0i - ( ri - rr0 ) )
A = (f2 - f1) / dr
B = f2 - A * (rr0)
temp = - B / A
rr0 = temp#np.where( temp < 0, rr0 * 0.5, temp)
pme = rr0
pme = np.where(-(4 * PhaseField**2 - 1) < 0.1, 0, pme)
plt.plot(pme[half,:], 'wo')
plt.plot(pme[half,:], 'k-', lw=1)
#plt.plot(Curvature[half,:], 'k+')
plt.show()
#plt.imshow(np.where(np.absolute(R - n00) < 4, rr0, 0), interpolation='nearest')
# plt.colorbar()
# plt.show()
#
# laplace2 = PhaseField * (1./9 - 1.)
# for i in range(1,9):
# laplace2 = laplace2 + np.roll(np.roll(PhaseField,shift=-lbm.e[i,0],axis=0),shift=-lbm.e[i,1],axis=1) / 9.
#
#
#
# grad2 = np.sqrt(grad2_X**2 + grad2_Y**2)[half, :]
#
# p2 = PhaseField[half, :]**2
# grad_lengt = (1. - 4 * p2 ) * ww
# curvature = ( laplace2[half, :] - 2 * PhaseField[half, :] * (16 * p2 - 4. ) * ww**2 ) / grad_lengt
#
#
# rrA = np.where(np.absolute(R - n00) < 6, rr0, 0)
# plt.plot( rrA[half,:] , 'o', label="Circ")
#
# rr1 = np.where(np.absolute(R - n00) < 6, 1./curvature, 0)
# plt.plot( rr1[half,:] , 'o', label="Lap")
# plt.legend()
# plt.show()
#
#
#plt.plot(laplace2[half, n00-dn:n00+dn] , c+'o')
#plt.plot(laplace(R,n00, ww)[half, n00-dn:n00+dn], c+'-')
#plt.plot(laplace(R,n00, ww)[half, n00-dn:n00+dn] - laplace2[half, n00-dn:n00+dn] , c+'-')
#plt.plot(laplace2[half, n00-dn:n00+dn], c+'o')
#plt.plot( R[half, n00-dn:n00+dn], Curvature[half, n00-dn:n00+dn], c+'o')
#plt.plot( R[half, n00-dn:n00+dn], np.ones_like(curvature)[n00-dn:n00+dn] / R[half, n00-dn:n00+dn] )
#plt.plot((n00,n00), (0, 1./n00))
# plt.figure()
#
# plt.plot( curvature[n00-dn:n00+dn], 'o-' )
# plt.twinx()
# plt.plot(phase(R,n00, ww)[half, n00-dn:n00+dn], 'k')
# #plt.plot( np.ones_like(curvature)[n00-dn:n00+dn] * R[half, n00-dn:n00+dn] )
#
#
#
# plt.figure()
# plt.plot(grad2, 'o')
# plt.plot(grad_lengt)
#
# #plt.plot(grad_lengt )
#
# #dn = 10
# #plt.plot( curvature[n00-dn:n00+dn] )
# #plt.plot( np.ones_like(curvature)[n00-dn:n00+dn] * R[half, n00-dn:n00+dn] )
# ##plt.plot(grad_lengt, 'o-')
#
# plt.figure()
# plt.plot(phase(R,n00, ww)[half, :])
# plt.plot(PhaseField[half,:], 'o')
plt.show()
| en | 0.216943 | # -*- coding: utf-8 -*- Created on Tue Apr 5 14:13:06 2016 @author: mdzik # # #X,Y = np.meshgrid(np.arange(256)-128,np.arange(256)-128) #R = np.sqrt(X*X+Y*Y) #R0 = 64 #PHI = phase(R,R0,0.25) #z = -PHI*2. #plt.plot( np.arctanh(z)[128,:] / 2. / 0.25, 'o' ) #plt.plot(R[128,:] - R0) #plt.show() #sfsdf #('k','/home/mdzik/projekty/TCLB/output/test1_2_VTK_P00_00001000.pvti'), #('r','/home/mdzik/projekty/TCLB/output/test1_omega1_VTK_P00_00006000.pvti'), #('r','/home/mdzik/projekty/TCLB/output/test1_VTK_P00_00002000.pvti') , #Curvature = vti.get('Curvature', vector=False) ### find n00 and ww #ww = 0.025 #ww = 0.01 #plt.imshow( phase(R, n00, ww) - PhaseField ) #plt.colorbar() #plt.show() #plt.plot(phase(R, n00, ww)[half,:]) #plt.plot(PhaseField[half,:]) #plt.show() #plt.plot( phase(R,n00, ww)[half,:] , 'o') #plt.plot(PhaseField[half,:]) #plt.plot(R[half,:] - n00) #plt.plot(R[half,:] - n00) #plt.plot(r_r0[half,:]) #plt.show() #plt.quiver(X.T,Y.T,normal_X, normal_Y, units='xy', scale=0.5, angles= 'xy') #plt.imshow(PhaseField) #lt.show() #nx = xx / rt #ny = yy / rt #xx = nx * (r_r0+rr0) #yy = ny * (r_r0+rr0) #plt.plot( (R-np.sqrt(xx*xx+yy*yy))[half,:] , '-') #plt.plot( (r_r0)[half,:] , 'o') # plt.plot( normal_Y[half,:] , '-') #plt.show() #np.where( temp < 0, rr0 * 0.5, temp) #plt.plot(Curvature[half,:], 'k+') #plt.imshow(np.where(np.absolute(R - n00) < 4, rr0, 0), interpolation='nearest') # plt.colorbar() # plt.show() # # laplace2 = PhaseField * (1./9 - 1.) # for i in range(1,9): # laplace2 = laplace2 + np.roll(np.roll(PhaseField,shift=-lbm.e[i,0],axis=0),shift=-lbm.e[i,1],axis=1) / 9. # # # # grad2 = np.sqrt(grad2_X**2 + grad2_Y**2)[half, :] # # p2 = PhaseField[half, :]**2 # grad_lengt = (1. - 4 * p2 ) * ww # curvature = ( laplace2[half, :] - 2 * PhaseField[half, :] * (16 * p2 - 4. ) * ww**2 ) / grad_lengt # # # rrA = np.where(np.absolute(R - n00) < 6, rr0, 0) # plt.plot( rrA[half,:] , 'o', label="Circ") # # rr1 = np.where(np.absolute(R - n00) < 6, 1./curvature, 0) # plt.plot( rr1[half,:] , 'o', label="Lap") # plt.legend() # plt.show() # # #plt.plot(laplace2[half, n00-dn:n00+dn] , c+'o') #plt.plot(laplace(R,n00, ww)[half, n00-dn:n00+dn], c+'-') #plt.plot(laplace(R,n00, ww)[half, n00-dn:n00+dn] - laplace2[half, n00-dn:n00+dn] , c+'-') #plt.plot(laplace2[half, n00-dn:n00+dn], c+'o') #plt.plot( R[half, n00-dn:n00+dn], Curvature[half, n00-dn:n00+dn], c+'o') #plt.plot( R[half, n00-dn:n00+dn], np.ones_like(curvature)[n00-dn:n00+dn] / R[half, n00-dn:n00+dn] ) #plt.plot((n00,n00), (0, 1./n00)) # plt.figure() # # plt.plot( curvature[n00-dn:n00+dn], 'o-' ) # plt.twinx() # plt.plot(phase(R,n00, ww)[half, n00-dn:n00+dn], 'k') # #plt.plot( np.ones_like(curvature)[n00-dn:n00+dn] * R[half, n00-dn:n00+dn] ) # # # # plt.figure() # plt.plot(grad2, 'o') # plt.plot(grad_lengt) # # #plt.plot(grad_lengt ) # # #dn = 10 # #plt.plot( curvature[n00-dn:n00+dn] ) # #plt.plot( np.ones_like(curvature)[n00-dn:n00+dn] * R[half, n00-dn:n00+dn] ) # ##plt.plot(grad_lengt, 'o-') # # plt.figure() # plt.plot(phase(R,n00, ww)[half, :]) # plt.plot(PhaseField[half,:], 'o') | 2.24014 | 2 |
Detectors/UnmergingDetector.py | rothadamg/UPSITE | 1 | 6622614 | from SingleStageDetector import SingleStageDetector
from ExampleBuilders.UnmergingExampleBuilder import UnmergingExampleBuilder
from ExampleWriters.UnmergingExampleWriter import UnmergingExampleWriter
from Classifiers.SVMMultiClassClassifier import SVMMultiClassClassifier
from Evaluators.AveragingMultiClassEvaluator import AveragingMultiClassEvaluator
import itertools, sys, os
class UnmergingDetector(SingleStageDetector):
"""
Makes valid argument combinations for BioNLP type events.
"""
def __init__(self):
SingleStageDetector.__init__(self)
self.exampleBuilder = UnmergingExampleBuilder
self.exampleWriter = UnmergingExampleWriter()
self.Classifier = SVMMultiClassClassifier
self.evaluator = AveragingMultiClassEvaluator
self.tag = "unmerging-"
def buildExamples(self, model, datas, outputs, golds=[], exampleStyle=None, saveIdsToModel=False, parse=None):
if exampleStyle == None:
exampleStyle = model.getStr(self.tag+"example-style")
if parse == None:
parse = self.getStr(self.tag+"parse", model)
self.structureAnalyzer.load(model)
self.exampleBuilder.structureAnalyzer = self.structureAnalyzer
for data, output, gold in itertools.izip_longest(datas, outputs, golds, fillvalue=[]):
print >> sys.stderr, "Example generation for", output
if not isinstance(data, (list, tuple)): data = [data]
if not isinstance(gold, (list, tuple)): gold = [gold]
append = False
for dataSet, goldSet in itertools.izip_longest(data, gold, fillvalue=None):
if goldSet == None:
goldSet = dataSet
if dataSet != None:
self.exampleBuilder.run(dataSet, output, parse, None, exampleStyle, model.get(self.tag+"ids.classes",
True), model.get(self.tag+"ids.features", True), goldSet, append, saveIdsToModel,
structureAnalyzer=self.structureAnalyzer)
append = True
if saveIdsToModel:
model.save() | from SingleStageDetector import SingleStageDetector
from ExampleBuilders.UnmergingExampleBuilder import UnmergingExampleBuilder
from ExampleWriters.UnmergingExampleWriter import UnmergingExampleWriter
from Classifiers.SVMMultiClassClassifier import SVMMultiClassClassifier
from Evaluators.AveragingMultiClassEvaluator import AveragingMultiClassEvaluator
import itertools, sys, os
class UnmergingDetector(SingleStageDetector):
"""
Makes valid argument combinations for BioNLP type events.
"""
def __init__(self):
SingleStageDetector.__init__(self)
self.exampleBuilder = UnmergingExampleBuilder
self.exampleWriter = UnmergingExampleWriter()
self.Classifier = SVMMultiClassClassifier
self.evaluator = AveragingMultiClassEvaluator
self.tag = "unmerging-"
def buildExamples(self, model, datas, outputs, golds=[], exampleStyle=None, saveIdsToModel=False, parse=None):
if exampleStyle == None:
exampleStyle = model.getStr(self.tag+"example-style")
if parse == None:
parse = self.getStr(self.tag+"parse", model)
self.structureAnalyzer.load(model)
self.exampleBuilder.structureAnalyzer = self.structureAnalyzer
for data, output, gold in itertools.izip_longest(datas, outputs, golds, fillvalue=[]):
print >> sys.stderr, "Example generation for", output
if not isinstance(data, (list, tuple)): data = [data]
if not isinstance(gold, (list, tuple)): gold = [gold]
append = False
for dataSet, goldSet in itertools.izip_longest(data, gold, fillvalue=None):
if goldSet == None:
goldSet = dataSet
if dataSet != None:
self.exampleBuilder.run(dataSet, output, parse, None, exampleStyle, model.get(self.tag+"ids.classes",
True), model.get(self.tag+"ids.features", True), goldSet, append, saveIdsToModel,
structureAnalyzer=self.structureAnalyzer)
append = True
if saveIdsToModel:
model.save() | en | 0.58144 | Makes valid argument combinations for BioNLP type events. | 2.309776 | 2 |
moisture_tracers/regrid_common.py | LSaffin/moisture_tracers | 0 | 6622615 | <reponame>LSaffin/moisture_tracers
"""Put all forecast data on a common grid
Usage:
regrid_common.py <path> <year> <month> <day> <target>
Arguments:
<path>
<year>
<month>
<day>
<target>
Options:
-h --help
Show this screen.
"""
import datetime
import docopt
import numpy as np
import iris
from iris.coords import DimCoord
from . import grey_zone_forecast
def main():
args = docopt.docopt(__doc__)
start_time = datetime.datetime(
year=int(args["<year>"]),
month=int(args["<month>"]),
day=int(args["<day>"]),
)
forecast = grey_zone_forecast(
args["<path>"],
start_time=start_time,
lead_times=range(1, 48 + 1),
grid=None,
)
target_cube = iris.load_cube(args["<target>"])
for cubes in forecast:
print(forecast.lead_time)
regridder = iris.analysis.AreaWeighted()
newcubes = iris.cube.CubeList()
for cube in cubes:
if cube.ndim > 1 and cube.name() not in ["longitude", "latitude"]:
newcube = cube.regrid(target_cube, regridder)
newcubes.append(newcube)
iris.save(
newcubes,
"{}_T+{:02d}_common_grid.nc".format(
forecast.start_time.strftime("%Y%m%dT%H%M"),
int(forecast.lead_time.total_seconds() // 3600),
),
)
def generate_common_grid(high_res_cube, low_res_cube):
"""We want a cube with the grid spacing of the low_res_cube but restricted to the
domain of the high_res_cube
"""
lon = high_res_cube.coord("grid_longitude").points
lat = high_res_cube.coord("grid_latitude").points
common_grid_cube = low_res_cube.extract(
iris.Constraint(
grid_longitude=lambda x: lon[0] < x < lon[-1],
grid_latitude=lambda x: lat[0] < x < lat[-1],
)
)
return common_grid_cube
def generate_1km_grid(cube_500m, coarse_factor=2):
"""Generate a 1km grid from the 500m-resolution forecast"""
# Need to recreate the coordinates as just subsetting the cube keeps the old
# coordinate bounds which then don't allow area-weighted regridding because they
# are not contiguous
# Extract coordinates
lon = cube_500m.coord("grid_longitude")
lat = cube_500m.coord("grid_latitude")
# Get the average coordinate of every n points, where n is the coarse graining
# factor
# Chop off where the domain doesn't divide into the coarse factor
lon_points = chop_coord(lon.points, coarse_factor)
lat_points = chop_coord(lat.points, coarse_factor)
lon = DimCoord(
lon_points,
standard_name=lon.standard_name,
units=lon.units,
attributes=lon.attributes,
coord_system=lon.coord_system,
circular=lon.circular,
)
lat = DimCoord(
lat_points,
standard_name=lat.standard_name,
units=lat.units,
attributes=lat.attributes,
coord_system=lat.coord_system,
circular=lat.circular,
)
lon.guess_bounds()
lat.guess_bounds()
cube_1km_grid = iris.cube.Cube(
data=np.zeros([len(lat.points), len(lon.points)]),
dim_coords_and_dims=[(lat, 0), (lon, 1)],
)
return cube_1km_grid
def chop_coord(points, coarse_factor):
offset = len(points) % coarse_factor
if offset > 0:
lon_points = points[:-offset]
else:
lon_points = points
return np.mean(lon_points.reshape(-1, coarse_factor), axis=1)
if __name__ == "__main__":
main()
| """Put all forecast data on a common grid
Usage:
regrid_common.py <path> <year> <month> <day> <target>
Arguments:
<path>
<year>
<month>
<day>
<target>
Options:
-h --help
Show this screen.
"""
import datetime
import docopt
import numpy as np
import iris
from iris.coords import DimCoord
from . import grey_zone_forecast
def main():
args = docopt.docopt(__doc__)
start_time = datetime.datetime(
year=int(args["<year>"]),
month=int(args["<month>"]),
day=int(args["<day>"]),
)
forecast = grey_zone_forecast(
args["<path>"],
start_time=start_time,
lead_times=range(1, 48 + 1),
grid=None,
)
target_cube = iris.load_cube(args["<target>"])
for cubes in forecast:
print(forecast.lead_time)
regridder = iris.analysis.AreaWeighted()
newcubes = iris.cube.CubeList()
for cube in cubes:
if cube.ndim > 1 and cube.name() not in ["longitude", "latitude"]:
newcube = cube.regrid(target_cube, regridder)
newcubes.append(newcube)
iris.save(
newcubes,
"{}_T+{:02d}_common_grid.nc".format(
forecast.start_time.strftime("%Y%m%dT%H%M"),
int(forecast.lead_time.total_seconds() // 3600),
),
)
def generate_common_grid(high_res_cube, low_res_cube):
"""We want a cube with the grid spacing of the low_res_cube but restricted to the
domain of the high_res_cube
"""
lon = high_res_cube.coord("grid_longitude").points
lat = high_res_cube.coord("grid_latitude").points
common_grid_cube = low_res_cube.extract(
iris.Constraint(
grid_longitude=lambda x: lon[0] < x < lon[-1],
grid_latitude=lambda x: lat[0] < x < lat[-1],
)
)
return common_grid_cube
def generate_1km_grid(cube_500m, coarse_factor=2):
"""Generate a 1km grid from the 500m-resolution forecast"""
# Need to recreate the coordinates as just subsetting the cube keeps the old
# coordinate bounds which then don't allow area-weighted regridding because they
# are not contiguous
# Extract coordinates
lon = cube_500m.coord("grid_longitude")
lat = cube_500m.coord("grid_latitude")
# Get the average coordinate of every n points, where n is the coarse graining
# factor
# Chop off where the domain doesn't divide into the coarse factor
lon_points = chop_coord(lon.points, coarse_factor)
lat_points = chop_coord(lat.points, coarse_factor)
lon = DimCoord(
lon_points,
standard_name=lon.standard_name,
units=lon.units,
attributes=lon.attributes,
coord_system=lon.coord_system,
circular=lon.circular,
)
lat = DimCoord(
lat_points,
standard_name=lat.standard_name,
units=lat.units,
attributes=lat.attributes,
coord_system=lat.coord_system,
circular=lat.circular,
)
lon.guess_bounds()
lat.guess_bounds()
cube_1km_grid = iris.cube.Cube(
data=np.zeros([len(lat.points), len(lon.points)]),
dim_coords_and_dims=[(lat, 0), (lon, 1)],
)
return cube_1km_grid
def chop_coord(points, coarse_factor):
offset = len(points) % coarse_factor
if offset > 0:
lon_points = points[:-offset]
else:
lon_points = points
return np.mean(lon_points.reshape(-1, coarse_factor), axis=1)
if __name__ == "__main__":
main() | en | 0.830618 | Put all forecast data on a common grid Usage: regrid_common.py <path> <year> <month> <day> <target> Arguments: <path> <year> <month> <day> <target> Options: -h --help Show this screen. We want a cube with the grid spacing of the low_res_cube but restricted to the domain of the high_res_cube Generate a 1km grid from the 500m-resolution forecast # Need to recreate the coordinates as just subsetting the cube keeps the old # coordinate bounds which then don't allow area-weighted regridding because they # are not contiguous # Extract coordinates # Get the average coordinate of every n points, where n is the coarse graining # factor # Chop off where the domain doesn't divide into the coarse factor | 2.663635 | 3 |
programmirovanie/01_the_basics_of_programming/02_pascal/02/fib.py | uldash/stolyarov | 0 | 6622616 | <filename>programmirovanie/01_the_basics_of_programming/02_pascal/02/fib.py<gh_stars>0
#!/usr/bin/env python3
fib1 = 1
fib2 = 1
n = input("Номер элемента ряда Фибоначчи: ")
n = int(n)
i = 0
while i < n - 2:
fib_sum = fib1 + fib2
fib1 = fib2
fib2 = fib_sum
i = i + 1
print("Значение этого элемента:", fib2)
| <filename>programmirovanie/01_the_basics_of_programming/02_pascal/02/fib.py<gh_stars>0
#!/usr/bin/env python3
fib1 = 1
fib2 = 1
n = input("Номер элемента ряда Фибоначчи: ")
n = int(n)
i = 0
while i < n - 2:
fib_sum = fib1 + fib2
fib1 = fib2
fib2 = fib_sum
i = i + 1
print("Значение этого элемента:", fib2)
| fr | 0.221828 | #!/usr/bin/env python3 | 3.872893 | 4 |
code/StoreProject/storeApp/models.py | nicolasiscoding/IEEEOpenCV2016 | 0 | 6622617 | from django.db import models
from django.core.exceptions import ValidationError
from django.core.validators import MinLengthValidator
#user entity
class User(models.Model):
user_id = models.AutoField(primary_key=True, blank = False)
def __unicode__(self):
return self.user_id
user_firstname = models.CharField(max_length=50, blank = False)
def __unicode__(self):
return self.user_firstname
user_lastname = models.CharField(max_length=50, blank = False)
def __unicode__(self):
return self.user_lastname
password = models.CharField(max_length=20,
blank = False,
validators=[MinLengthValidator(8, "Your password must contain at least 8 characters.")],)
def __unicode__(self):
return self.password
user_address = models.CharField(max_length=50, blank = False)
def __unicode__(self):
return self.user_address
user_email = models.CharField(max_length=30, blank = False)
def __unicode__(self):
return self.user_email
username = models.CharField(max_length=50, blank=False)
def __unicode__(self):
return self.username
user_is_staff = models.BooleanField(default=True, blank = False)
pass
#supplier entity
class Supplier(models.Model):
supplier_id = models.AutoField(primary_key=True)
def __unicode__(self):
return self.supplier_id
supplier_name = models.CharField(max_length=100)
def __unicode__(self):
return self.supplier_name
pass
#product = models.ManyToManyField(Product)
#order entity
class Order(models.Model):
order_date = models.DateField()
def __unicode__(self):
return self.order_date
order_paid = models.BooleanField(default=False)
def __unicode__(self):
return self.order_paid
orders = models.ForeignKey(User, editable = False, default = 1)
def __str__(self):
return '%s' % (self.orders)
order_id = models.AutoField(primary_key=True)
def __unicode__(self):
return self.order_id
pass
class Contains(models.Model):
stock = models.IntegerField()
def __unicode__(self):
return self.stock
products = models.ManyToManyField(Order, through='Product')
def __unicode__(self):
return '%s' % (self.products)
#product entity
class Product(models.Model):
product_id = models.AutoField(primary_key=True)
def __unicode__(self):
return self.product_id
product_price = models.IntegerField()
def __unicode__(self):
return self.product_price
product_stock_quantity = models.IntegerField()
def __unicode__(self):
return self.product_stock_quantity
product_description = models.CharField(max_length=400)
def __unicode__(self):
return self.product_description
product_active = models.BooleanField(default=False)
def __unicode__(self):
return self.product_active
#order = models.ManyToManyField(Order)
#def __str__(self):
# return self.order
orders = models.ForeignKey(Order, editable=False, default = 1)
def __unicode__(self):
return '%s' % (self.orders)
product_name = models.CharField(max_length=50)
def __unicode__(self):
return self.product_name
supplies = models.ForeignKey(Supplier, editable=False, default = 1)
def __unicode__(self):
return '%s' % (self.supplies)
contains = models.ForeignKey(Contains, editable=False, default = 1)
def __unicode__(self):
return '%s' % (self.contains)
pass
| from django.db import models
from django.core.exceptions import ValidationError
from django.core.validators import MinLengthValidator
#user entity
class User(models.Model):
user_id = models.AutoField(primary_key=True, blank = False)
def __unicode__(self):
return self.user_id
user_firstname = models.CharField(max_length=50, blank = False)
def __unicode__(self):
return self.user_firstname
user_lastname = models.CharField(max_length=50, blank = False)
def __unicode__(self):
return self.user_lastname
password = models.CharField(max_length=20,
blank = False,
validators=[MinLengthValidator(8, "Your password must contain at least 8 characters.")],)
def __unicode__(self):
return self.password
user_address = models.CharField(max_length=50, blank = False)
def __unicode__(self):
return self.user_address
user_email = models.CharField(max_length=30, blank = False)
def __unicode__(self):
return self.user_email
username = models.CharField(max_length=50, blank=False)
def __unicode__(self):
return self.username
user_is_staff = models.BooleanField(default=True, blank = False)
pass
#supplier entity
class Supplier(models.Model):
supplier_id = models.AutoField(primary_key=True)
def __unicode__(self):
return self.supplier_id
supplier_name = models.CharField(max_length=100)
def __unicode__(self):
return self.supplier_name
pass
#product = models.ManyToManyField(Product)
#order entity
class Order(models.Model):
order_date = models.DateField()
def __unicode__(self):
return self.order_date
order_paid = models.BooleanField(default=False)
def __unicode__(self):
return self.order_paid
orders = models.ForeignKey(User, editable = False, default = 1)
def __str__(self):
return '%s' % (self.orders)
order_id = models.AutoField(primary_key=True)
def __unicode__(self):
return self.order_id
pass
class Contains(models.Model):
stock = models.IntegerField()
def __unicode__(self):
return self.stock
products = models.ManyToManyField(Order, through='Product')
def __unicode__(self):
return '%s' % (self.products)
#product entity
class Product(models.Model):
product_id = models.AutoField(primary_key=True)
def __unicode__(self):
return self.product_id
product_price = models.IntegerField()
def __unicode__(self):
return self.product_price
product_stock_quantity = models.IntegerField()
def __unicode__(self):
return self.product_stock_quantity
product_description = models.CharField(max_length=400)
def __unicode__(self):
return self.product_description
product_active = models.BooleanField(default=False)
def __unicode__(self):
return self.product_active
#order = models.ManyToManyField(Order)
#def __str__(self):
# return self.order
orders = models.ForeignKey(Order, editable=False, default = 1)
def __unicode__(self):
return '%s' % (self.orders)
product_name = models.CharField(max_length=50)
def __unicode__(self):
return self.product_name
supplies = models.ForeignKey(Supplier, editable=False, default = 1)
def __unicode__(self):
return '%s' % (self.supplies)
contains = models.ForeignKey(Contains, editable=False, default = 1)
def __unicode__(self):
return '%s' % (self.contains)
pass
| en | 0.235996 | #user entity #supplier entity #product = models.ManyToManyField(Product) #order entity #product entity #order = models.ManyToManyField(Order) #def __str__(self): # return self.order | 2.43452 | 2 |
rudolph/rudolph.py | JohnCatn/rpi | 0 | 6622618 | #!/usr/bin/python3
import RPi.GPIO as GPIO
import time
from time import sleep
import pygame.mixer
# SETUP
MIN_DIST = 50.00 ## The distance from the sensor the LED should be brightest
MAX_DIST = 100.00 # The distance from the sensor the LED should be on
GPIO.setmode(GPIO.BCM) # Use board numbers
PIN_TRIGGER = 4
PIN_ECHO = 17
PIN_NOSE = 18
# Set up distanc sensor
GPIO.setup(PIN_TRIGGER, GPIO.OUT)
GPIO.setup(PIN_ECHO, GPIO.IN)
GPIO.output(PIN_TRIGGER, GPIO.LOW)
# Set up nose LED
GPIO.setup(PIN_NOSE, GPIO.OUT)
nose_led = GPIO.PWM(PIN_NOSE, 100) # create object white for PWM on GPIO port 25 (pin 22) at 100 Hertz
nose_led.start(0) # start white led on 0 percent duty cycle (off)
# now the fun starts, we'll vary the duty cycle to
# dim/brighten the led
# Set up Audio
said_hello = False
playing = False
pygame.mixer.init(44100,-16,2,1024)
pygame.mixer.music.set_volume(1.0)
name = "rudolph.mp3"
pygame.mixer.music.load(name)
print("Loaded track - "+ str(name))
# Sets the brightness of the nose LED
def set_nose_brightness(dist_in_cm):
if dist_in_cm < MAX_DIST:
say_hello()
if dist_in_cm < MIN_DIST:
nose_led.ChangeDutyCycle(100)
else:
# Value can be between 0 and 100, we want 100 when it is at the min distance so use percentage calc
duty_cycle = (1 - ((dist_in_cm - MIN_DIST) / (MAX_DIST - MIN_DIST))) * 100
print("Duty Cycle: ",duty_cycle)
nose_led.ChangeDutyCycle(duty_cycle)
else: # too far away turn LED Off
print("nothing in range - LED off")
nose_led.ChangeDutyCycle(0)
def say_hello():
global said_hello, playing
if said_hello == False and playing == False:
playing=True
pygame.mixer.music.play()
said_hello=True
playing=False
def distance()
print ("Calculating distance")
GPIO.output(PIN_TRIGGER, GPIO.HIGH)
sleep(0.00001)
GPIO.output(PIN_TRIGGER, GPIO.LOW)
pulse_end_time = 0
while GPIO.input(PIN_ECHO)==0:
pulse_start_time = time.time()
while GPIO.input(PIN_ECHO)==1:
pulse_end_time = time.time()
pulse_duration = pulse_end_time - pulse_start_time
distance = round(pulse_duration * 17150, 2)
return distance
if __name__ == '__main__':
# INITIALIZE
print ("Waiting for sensor to settle")
sleep(2)
try:
while True:
dist = distance()
print ("Measured Distance = %.1f cm" % dist)
set_nose_brightness(distance)
say_hello()
time.sleep(1)
# Reset by pressing CTRL + C
except KeyboardInterrupt:
print("Measurement stopped by User")
GPIO.cleanup()
| #!/usr/bin/python3
import RPi.GPIO as GPIO
import time
from time import sleep
import pygame.mixer
# SETUP
MIN_DIST = 50.00 ## The distance from the sensor the LED should be brightest
MAX_DIST = 100.00 # The distance from the sensor the LED should be on
GPIO.setmode(GPIO.BCM) # Use board numbers
PIN_TRIGGER = 4
PIN_ECHO = 17
PIN_NOSE = 18
# Set up distanc sensor
GPIO.setup(PIN_TRIGGER, GPIO.OUT)
GPIO.setup(PIN_ECHO, GPIO.IN)
GPIO.output(PIN_TRIGGER, GPIO.LOW)
# Set up nose LED
GPIO.setup(PIN_NOSE, GPIO.OUT)
nose_led = GPIO.PWM(PIN_NOSE, 100) # create object white for PWM on GPIO port 25 (pin 22) at 100 Hertz
nose_led.start(0) # start white led on 0 percent duty cycle (off)
# now the fun starts, we'll vary the duty cycle to
# dim/brighten the led
# Set up Audio
said_hello = False
playing = False
pygame.mixer.init(44100,-16,2,1024)
pygame.mixer.music.set_volume(1.0)
name = "rudolph.mp3"
pygame.mixer.music.load(name)
print("Loaded track - "+ str(name))
# Sets the brightness of the nose LED
def set_nose_brightness(dist_in_cm):
if dist_in_cm < MAX_DIST:
say_hello()
if dist_in_cm < MIN_DIST:
nose_led.ChangeDutyCycle(100)
else:
# Value can be between 0 and 100, we want 100 when it is at the min distance so use percentage calc
duty_cycle = (1 - ((dist_in_cm - MIN_DIST) / (MAX_DIST - MIN_DIST))) * 100
print("Duty Cycle: ",duty_cycle)
nose_led.ChangeDutyCycle(duty_cycle)
else: # too far away turn LED Off
print("nothing in range - LED off")
nose_led.ChangeDutyCycle(0)
def say_hello():
global said_hello, playing
if said_hello == False and playing == False:
playing=True
pygame.mixer.music.play()
said_hello=True
playing=False
def distance()
print ("Calculating distance")
GPIO.output(PIN_TRIGGER, GPIO.HIGH)
sleep(0.00001)
GPIO.output(PIN_TRIGGER, GPIO.LOW)
pulse_end_time = 0
while GPIO.input(PIN_ECHO)==0:
pulse_start_time = time.time()
while GPIO.input(PIN_ECHO)==1:
pulse_end_time = time.time()
pulse_duration = pulse_end_time - pulse_start_time
distance = round(pulse_duration * 17150, 2)
return distance
if __name__ == '__main__':
# INITIALIZE
print ("Waiting for sensor to settle")
sleep(2)
try:
while True:
dist = distance()
print ("Measured Distance = %.1f cm" % dist)
set_nose_brightness(distance)
say_hello()
time.sleep(1)
# Reset by pressing CTRL + C
except KeyboardInterrupt:
print("Measurement stopped by User")
GPIO.cleanup()
| en | 0.804408 | #!/usr/bin/python3 # SETUP ## The distance from the sensor the LED should be brightest # The distance from the sensor the LED should be on # Use board numbers # Set up distanc sensor # Set up nose LED # create object white for PWM on GPIO port 25 (pin 22) at 100 Hertz # start white led on 0 percent duty cycle (off) # now the fun starts, we'll vary the duty cycle to # dim/brighten the led # Set up Audio # Sets the brightness of the nose LED # Value can be between 0 and 100, we want 100 when it is at the min distance so use percentage calc # too far away turn LED Off # INITIALIZE # Reset by pressing CTRL + C | 3.292961 | 3 |
packaging/setup/plugins/ovirt-engine-common/base/core/reconfigure.py | hbraha/ovirt-engine | 347 | 6622619 | #
# ovirt-engine-setup -- ovirt engine setup
#
# Copyright oVirt Authors
# SPDX-License-Identifier: Apache-2.0
#
#
"""Reconfigure env plugin."""
import gettext
from otopi import constants as otopicons
from otopi import plugin
from otopi import util
from ovirt_engine_setup import constants as osetupcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Reconfigure env plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
after=(
otopicons.Stages.CORE_CONFIG_INIT,
),
)
def _init(self):
self.environment.setdefault(
osetupcons.CoreEnv.RECONFIGURE_OPTIONAL_COMPONENTS,
None
)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
before=(
osetupcons.Stages.DIALOG_TITLES_S_PRODUCT_OPTIONS,
),
)
def _customization(self):
if self.environment[
osetupcons.CoreEnv.RECONFIGURE_OPTIONAL_COMPONENTS
]:
consts = []
for constobj in self.environment[
osetupcons.CoreEnv.SETUP_ATTRS_MODULES
]:
consts.extend(constobj.__dict__['__osetup_attrs__'])
for c in consts:
for k in c.__dict__.values():
if (
hasattr(k, '__osetup_attrs__') and
k.__osetup_attrs__['reconfigurable']
):
k = k.fget(None)
if (
k in self.environment and
# We reset it only if it's disabled.
# Can't currently use this code to
# disable already-enabled components.
not self.environment[k]
):
self.logger.debug(
'Resetting optional component env key {key} '
'old value was {val}'.format(
key=k,
val=self.environment[k],
)
)
self.environment[k] = None
# vim: expandtab tabstop=4 shiftwidth=4
| #
# ovirt-engine-setup -- ovirt engine setup
#
# Copyright oVirt Authors
# SPDX-License-Identifier: Apache-2.0
#
#
"""Reconfigure env plugin."""
import gettext
from otopi import constants as otopicons
from otopi import plugin
from otopi import util
from ovirt_engine_setup import constants as osetupcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Reconfigure env plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
after=(
otopicons.Stages.CORE_CONFIG_INIT,
),
)
def _init(self):
self.environment.setdefault(
osetupcons.CoreEnv.RECONFIGURE_OPTIONAL_COMPONENTS,
None
)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
before=(
osetupcons.Stages.DIALOG_TITLES_S_PRODUCT_OPTIONS,
),
)
def _customization(self):
if self.environment[
osetupcons.CoreEnv.RECONFIGURE_OPTIONAL_COMPONENTS
]:
consts = []
for constobj in self.environment[
osetupcons.CoreEnv.SETUP_ATTRS_MODULES
]:
consts.extend(constobj.__dict__['__osetup_attrs__'])
for c in consts:
for k in c.__dict__.values():
if (
hasattr(k, '__osetup_attrs__') and
k.__osetup_attrs__['reconfigurable']
):
k = k.fget(None)
if (
k in self.environment and
# We reset it only if it's disabled.
# Can't currently use this code to
# disable already-enabled components.
not self.environment[k]
):
self.logger.debug(
'Resetting optional component env key {key} '
'old value was {val}'.format(
key=k,
val=self.environment[k],
)
)
self.environment[k] = None
# vim: expandtab tabstop=4 shiftwidth=4
| en | 0.551362 | # # ovirt-engine-setup -- ovirt engine setup # # Copyright oVirt Authors # SPDX-License-Identifier: Apache-2.0 # # Reconfigure env plugin. Reconfigure env plugin. # We reset it only if it's disabled. # Can't currently use this code to # disable already-enabled components. # vim: expandtab tabstop=4 shiftwidth=4 | 1.907053 | 2 |
saucelab_api_client/saucelab_api_client.py | Slamnlc/saucelab-api-client | 0 | 6622620 | <gh_stars>0
from saucelab_api_client.base_classes.accounts_api import Accounts
from saucelab_api_client.base_classes.insights_api import Insights
from saucelab_api_client.base_classes.insights_real_devices_api import InsightsRealDeviceApi
from saucelab_api_client.base_classes.job_api import JobsApi
from saucelab_api_client.base_classes.performance_api import PerformanceApi
from saucelab_api_client.base_classes.platform_api import Platform
from saucelab_api_client.base_classes.real_devices_api import RealDevices
from saucelab_api_client.base_classes.sauce_connect_api import SauceConnectApi
from saucelab_api_client.base_classes.storage_api import Storage
from saucelab_api_client.session import Session
class SauceLab(Session):
@property
def devices(self):
return RealDevices(self)
@property
def insights(self):
return Insights(self)
@property
def storage(self):
return Storage(self)
@property
def accounts(self):
return Accounts(self)
@property
def platform(self):
return Platform(self)
@property
def jobs(self):
return JobsApi(self)
@property
def performance(self):
return PerformanceApi(self)
@property
def sauce_connect(self):
return SauceConnectApi(self)
@property
def real_devices_insights(self):
return InsightsRealDeviceApi(self)
| from saucelab_api_client.base_classes.accounts_api import Accounts
from saucelab_api_client.base_classes.insights_api import Insights
from saucelab_api_client.base_classes.insights_real_devices_api import InsightsRealDeviceApi
from saucelab_api_client.base_classes.job_api import JobsApi
from saucelab_api_client.base_classes.performance_api import PerformanceApi
from saucelab_api_client.base_classes.platform_api import Platform
from saucelab_api_client.base_classes.real_devices_api import RealDevices
from saucelab_api_client.base_classes.sauce_connect_api import SauceConnectApi
from saucelab_api_client.base_classes.storage_api import Storage
from saucelab_api_client.session import Session
class SauceLab(Session):
@property
def devices(self):
return RealDevices(self)
@property
def insights(self):
return Insights(self)
@property
def storage(self):
return Storage(self)
@property
def accounts(self):
return Accounts(self)
@property
def platform(self):
return Platform(self)
@property
def jobs(self):
return JobsApi(self)
@property
def performance(self):
return PerformanceApi(self)
@property
def sauce_connect(self):
return SauceConnectApi(self)
@property
def real_devices_insights(self):
return InsightsRealDeviceApi(self) | none | 1 | 1.736375 | 2 | |
1101-1200/1153.string-transforms-into-another-string.py | guangxu-li/leetcode-in-python | 0 | 6622621 | <reponame>guangxu-li/leetcode-in-python
#
# @lc app=leetcode id=1153 lang=python3
#
# [1153] String Transforms Into Another String
#
# @lc code=start
class Solution:
def canConvert(self, str1: str, str2: str) -> bool:
mapping = dict()
return (
str1 == str2
or len(set(str2)) < 26
and all(mapping.setdefault(ch1, ch2) == ch2 for ch1, ch2 in zip(str1, str2))
)
# @lc code=end
| #
# @lc app=leetcode id=1153 lang=python3
#
# [1153] String Transforms Into Another String
#
# @lc code=start
class Solution:
def canConvert(self, str1: str, str2: str) -> bool:
mapping = dict()
return (
str1 == str2
or len(set(str2)) < 26
and all(mapping.setdefault(ch1, ch2) == ch2 for ch1, ch2 in zip(str1, str2))
)
# @lc code=end | en | 0.41734 | # # @lc app=leetcode id=1153 lang=python3 # # [1153] String Transforms Into Another String # # @lc code=start # @lc code=end | 3.586638 | 4 |
www/config_default.py | coder-zhuyu/create-python-web-mvc | 0 | 6622622 | # encoding: utf-8
configs = {
'debug': True,
'db': {
'host': '192.168.1.104',
'port': 3306,
'user': 'root',
'password': '<PASSWORD>',
'db': 'awesome'
},
'session': {
'secret': 'Awesome'
}
}
| # encoding: utf-8
configs = {
'debug': True,
'db': {
'host': '192.168.1.104',
'port': 3306,
'user': 'root',
'password': '<PASSWORD>',
'db': 'awesome'
},
'session': {
'secret': 'Awesome'
}
}
| en | 0.83829 | # encoding: utf-8 | 1.185223 | 1 |
cli/token.py | W-DEJONG/Id-manager | 1 | 6622623 | import time
import click
from flask import Blueprint
from manager.models import OAuth2Token, OAuth2AuthorizationCode, db
bp = Blueprint('token', __name__)
bp.cli.help = 'Token maintenance'
@bp.cli.command(name='list')
def token_list():
""" List access tokens """
tokens = OAuth2Token.query.all()
click.echo('%-37s %-20s %-20s %-8s %s' % ('client id', 'access token', 'scope', 'revoked', 'Expires'))
_show_line()
for token in tokens:
click.echo('%-37s %-20s %-20s %-8s %s' %
(token.client_id,
token.access_token[0:20],
token.scope[0:20],
token.revoked,
time.ctime(token.issued_at+token.expires_in)))
@bp.cli.command(name='cleanup')
def token_cleanup():
""" Cleanup outdated access tokens and authorization codes. """
OAuth2Token.query.filter(OAuth2Token.issued_at+OAuth2Token.expires_in < time.time()).delete()
OAuth2AuthorizationCode.query.filter(OAuth2AuthorizationCode.auth_time+600 < time.time()).delete()
db.session.commit()
def _show_line():
click.echo(str().ljust(120, '-'))
| import time
import click
from flask import Blueprint
from manager.models import OAuth2Token, OAuth2AuthorizationCode, db
bp = Blueprint('token', __name__)
bp.cli.help = 'Token maintenance'
@bp.cli.command(name='list')
def token_list():
""" List access tokens """
tokens = OAuth2Token.query.all()
click.echo('%-37s %-20s %-20s %-8s %s' % ('client id', 'access token', 'scope', 'revoked', 'Expires'))
_show_line()
for token in tokens:
click.echo('%-37s %-20s %-20s %-8s %s' %
(token.client_id,
token.access_token[0:20],
token.scope[0:20],
token.revoked,
time.ctime(token.issued_at+token.expires_in)))
@bp.cli.command(name='cleanup')
def token_cleanup():
""" Cleanup outdated access tokens and authorization codes. """
OAuth2Token.query.filter(OAuth2Token.issued_at+OAuth2Token.expires_in < time.time()).delete()
OAuth2AuthorizationCode.query.filter(OAuth2AuthorizationCode.auth_time+600 < time.time()).delete()
db.session.commit()
def _show_line():
click.echo(str().ljust(120, '-'))
| en | 0.775851 | List access tokens Cleanup outdated access tokens and authorization codes. | 2.536488 | 3 |
publishconf.py | ZoomQuiet/ZoomQuiet.io | 4 | 6622624 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = 'https://blog.zoomquiet.io'
DISQUS_SITENAME = u"blogzoomquietio" #填入你的Shortname
DELETE_OUTPUT_DIRECTORY = None #因为嵌套仓库的原因,不能清除发布目录!
# Feed generation is usually not desired when developing
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
TRANSLATION_FEED_ATOM = None
FEED_ALL_RSS = None
CATEGORY_FEED_RSS= None
SOCIAL = SOCIAL + (('rss', SITEURL + '/' + FEED_ALL_ATOM),)
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
ARTICLE_URL = '{category}/{slug}.html'
ARTICLE_SAVE_AS = ARTICLE_URL
PAGE_URL = '{slug}.html'
PAGE_SAVE_AS = PAGE_URL
CATEGORY_URL = '{slug}/index.html'
CATEGORY_SAVE_AS = CATEGORY_URL
TAG_URL = 'tag/{slug}.html'
TAG_SAVE_AS = TAG_URL
TAGS_SAVE_AS = 'tag/index.html'
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
RELATIVE_URLS = False
| #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
SITEURL = 'https://blog.zoomquiet.io'
DISQUS_SITENAME = u"blogzoomquietio" #填入你的Shortname
DELETE_OUTPUT_DIRECTORY = None #因为嵌套仓库的原因,不能清除发布目录!
# Feed generation is usually not desired when developing
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
TRANSLATION_FEED_ATOM = None
FEED_ALL_RSS = None
CATEGORY_FEED_RSS= None
SOCIAL = SOCIAL + (('rss', SITEURL + '/' + FEED_ALL_ATOM),)
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = ""
ARTICLE_URL = '{category}/{slug}.html'
ARTICLE_SAVE_AS = ARTICLE_URL
PAGE_URL = '{slug}.html'
PAGE_SAVE_AS = PAGE_URL
CATEGORY_URL = '{slug}/index.html'
CATEGORY_SAVE_AS = CATEGORY_URL
TAG_URL = 'tag/{slug}.html'
TAG_SAVE_AS = TAG_URL
TAGS_SAVE_AS = 'tag/index.html'
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
RELATIVE_URLS = False
| en | 0.7129 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # This file is only used if you use `make publish` or # explicitly specify it as your config file. #填入你的Shortname #因为嵌套仓库的原因,不能清除发布目录! # Feed generation is usually not desired when developing # Following items are often useful when publishing #DISQUS_SITENAME = "" #GOOGLE_ANALYTICS = "" # Uncomment following line if you want document-relative URLs when developing #RELATIVE_URLS = True | 1.457088 | 1 |
q_learning.py | sherlockHSY/FlappyBird-RL | 0 | 6622625 | <gh_stars>0
import random
import pandas as pd
class QLearning(object):
def __init__(self, action_space, capacity=100000, learning_rate=0.7, reward_decay=0.95, e_greedy=0.9):
super().__init__()
self.lr_decay = 0.00003
self.gamma = reward_decay # discount factor 折扣因子
self.lr = learning_rate
self.epsilon = e_greedy
self.action_space = action_space
self.capacity = capacity
self.history_moves = []
self.q_table = {}
def check_state_exit(self, state):
if state != 'terminal':
if self.q_table.get(state) is None:
self.q_table[state] = [0,0]
def choose_action(self, state):
self.check_state_exit(state)
state_action = self.q_table[state]
# 在这个游戏场景下,选择动作为1 即小鸟往上飞的情况并不需要很多,所以当两个动作的价值相同时,我们更倾向于选择 0 动作
# In this environment, we are more inclined to choose action 0
action = 0 if state_action[0]>=state_action[1] else 1
# Eplsion-greedy的方法在这个环境下并不适用,该环境下不需要过多的探索
# Eplsion-greedy is not efficient or required for this agent and environment
# if random.random() < self.epsilon:
# # 选取Q值最大的动作,但当出现相同Q值的动作时,需在这些动作中随机选择
# if len(state_action.index(max(state_action))) == 1 :
# action = state_action.index(max(state_action))[0]
# else:
# # 但在这个游戏中,明显 0 动作要比 1 动作 更普遍,我们更倾向于选择0
# action = 0 if random.random()>0.1 else 1
# else:
# # choose random action
# action = random.choice(self.action_space)
# self.
return action
# 更新Q表 Update Q-table
def learn(self, s, a, r, s_,done):
self.check_state_exit(s_)
self.history_moves.append({
"s": s,
"a": a,
"r": r,
"s_": s_
})
# 正常单步更新 Q-learing is Single-step update
if s_ != 'terminal':
q_target = r + self.gamma * max(self.q_table[s_][0:2])
else:
q_target = r
self.q_table[s][a] = (1 - self.lr) * (self.q_table[s][a]) + \
self.lr * (q_target)
# 结束一局游戏后,额外更新Q表 Additional update of Q-table
if done:
history = list(reversed(self.history_moves))
# 小鸟如果撞到的是上方的障碍物,就让它最后的两个状态不要往上飞
# Flag if the bird died in the top pipe, don't flap if this is the case
high_death_flag = True if int(s.split("_")[1]) > 120 else False
t, last_flap = 0, True
for move in history:
t += 1
update_s,update_a, update_r,upadte_s_ = move["s"],move["a"],move["r"],move["s_"]
if t <=2 :
if t==2:
update_r = -1000
move["r"] = -1000
self.q_table[update_s][update_a] = (1 - self.lr) * (self.q_table[update_s][update_a]) + \
self.lr * (update_r + self.gamma *
max(self.q_table[upadte_s_][0:2]))
if update_a:
last_flap = False
elif (last_flap or high_death_flag) and update_a:
update_r = -1000
move["r"] = -1000
last_flap = False
high_death_flag = False
self.q_table[update_s][update_a] = (1 - self.lr) * (self.q_table[update_s][update_a]) + \
self.lr * (update_r + self.gamma *
max(self.q_table[upadte_s_][0:2]))
self.history_moves = []
# 调整学习率 decay learning rate
if self.lr > 0.1:
self.lr = max(self.lr - self.lr_decay, 0.1)
if len(self.q_table) == self.capacity:
print('-------Q-table already have {} data-------'.format(self.capacity))
def save_q_table(self, file_name):
df = pd.DataFrame(self.q_table)
df = df.T
path = 'data/'+file_name+'.csv'
df.to_csv(path)
print('Saving Q-table to file: {}'.format(path))
def load_q_table(self,file_path):
df = pd.read_csv(file_path)
for idx, data in df.iterrows():
state = data[0]
self.q_table[state] = [data['0'],data['1']]
print('Loading Q-table from trained data: {}'.format(file_path)) | import random
import pandas as pd
class QLearning(object):
def __init__(self, action_space, capacity=100000, learning_rate=0.7, reward_decay=0.95, e_greedy=0.9):
super().__init__()
self.lr_decay = 0.00003
self.gamma = reward_decay # discount factor 折扣因子
self.lr = learning_rate
self.epsilon = e_greedy
self.action_space = action_space
self.capacity = capacity
self.history_moves = []
self.q_table = {}
def check_state_exit(self, state):
if state != 'terminal':
if self.q_table.get(state) is None:
self.q_table[state] = [0,0]
def choose_action(self, state):
self.check_state_exit(state)
state_action = self.q_table[state]
# 在这个游戏场景下,选择动作为1 即小鸟往上飞的情况并不需要很多,所以当两个动作的价值相同时,我们更倾向于选择 0 动作
# In this environment, we are more inclined to choose action 0
action = 0 if state_action[0]>=state_action[1] else 1
# Eplsion-greedy的方法在这个环境下并不适用,该环境下不需要过多的探索
# Eplsion-greedy is not efficient or required for this agent and environment
# if random.random() < self.epsilon:
# # 选取Q值最大的动作,但当出现相同Q值的动作时,需在这些动作中随机选择
# if len(state_action.index(max(state_action))) == 1 :
# action = state_action.index(max(state_action))[0]
# else:
# # 但在这个游戏中,明显 0 动作要比 1 动作 更普遍,我们更倾向于选择0
# action = 0 if random.random()>0.1 else 1
# else:
# # choose random action
# action = random.choice(self.action_space)
# self.
return action
# 更新Q表 Update Q-table
def learn(self, s, a, r, s_,done):
self.check_state_exit(s_)
self.history_moves.append({
"s": s,
"a": a,
"r": r,
"s_": s_
})
# 正常单步更新 Q-learing is Single-step update
if s_ != 'terminal':
q_target = r + self.gamma * max(self.q_table[s_][0:2])
else:
q_target = r
self.q_table[s][a] = (1 - self.lr) * (self.q_table[s][a]) + \
self.lr * (q_target)
# 结束一局游戏后,额外更新Q表 Additional update of Q-table
if done:
history = list(reversed(self.history_moves))
# 小鸟如果撞到的是上方的障碍物,就让它最后的两个状态不要往上飞
# Flag if the bird died in the top pipe, don't flap if this is the case
high_death_flag = True if int(s.split("_")[1]) > 120 else False
t, last_flap = 0, True
for move in history:
t += 1
update_s,update_a, update_r,upadte_s_ = move["s"],move["a"],move["r"],move["s_"]
if t <=2 :
if t==2:
update_r = -1000
move["r"] = -1000
self.q_table[update_s][update_a] = (1 - self.lr) * (self.q_table[update_s][update_a]) + \
self.lr * (update_r + self.gamma *
max(self.q_table[upadte_s_][0:2]))
if update_a:
last_flap = False
elif (last_flap or high_death_flag) and update_a:
update_r = -1000
move["r"] = -1000
last_flap = False
high_death_flag = False
self.q_table[update_s][update_a] = (1 - self.lr) * (self.q_table[update_s][update_a]) + \
self.lr * (update_r + self.gamma *
max(self.q_table[upadte_s_][0:2]))
self.history_moves = []
# 调整学习率 decay learning rate
if self.lr > 0.1:
self.lr = max(self.lr - self.lr_decay, 0.1)
if len(self.q_table) == self.capacity:
print('-------Q-table already have {} data-------'.format(self.capacity))
def save_q_table(self, file_name):
df = pd.DataFrame(self.q_table)
df = df.T
path = 'data/'+file_name+'.csv'
df.to_csv(path)
print('Saving Q-table to file: {}'.format(path))
def load_q_table(self,file_path):
df = pd.read_csv(file_path)
for idx, data in df.iterrows():
state = data[0]
self.q_table[state] = [data['0'],data['1']]
print('Loading Q-table from trained data: {}'.format(file_path)) | zh | 0.524786 | # discount factor 折扣因子 # 在这个游戏场景下,选择动作为1 即小鸟往上飞的情况并不需要很多,所以当两个动作的价值相同时,我们更倾向于选择 0 动作 # In this environment, we are more inclined to choose action 0 # Eplsion-greedy的方法在这个环境下并不适用,该环境下不需要过多的探索 # Eplsion-greedy is not efficient or required for this agent and environment # if random.random() < self.epsilon: # # 选取Q值最大的动作,但当出现相同Q值的动作时,需在这些动作中随机选择 # if len(state_action.index(max(state_action))) == 1 : # action = state_action.index(max(state_action))[0] # else: # # 但在这个游戏中,明显 0 动作要比 1 动作 更普遍,我们更倾向于选择0 # action = 0 if random.random()>0.1 else 1 # else: # # choose random action # action = random.choice(self.action_space) # self. # 更新Q表 Update Q-table # 正常单步更新 Q-learing is Single-step update # 结束一局游戏后,额外更新Q表 Additional update of Q-table # 小鸟如果撞到的是上方的障碍物,就让它最后的两个状态不要往上飞 # Flag if the bird died in the top pipe, don't flap if this is the case # 调整学习率 decay learning rate | 3.411379 | 3 |
test.py | TechStudent11/CostumTk | 1 | 6622626 | from UI import Window
from tkinter import *
class MainWindow(Window):
def build(self):
Label(self.surface, text="Hello, World!").pack()
if __name__ == "__main__":
MainWindow(width=70).run() | from UI import Window
from tkinter import *
class MainWindow(Window):
def build(self):
Label(self.surface, text="Hello, World!").pack()
if __name__ == "__main__":
MainWindow(width=70).run() | none | 1 | 3.13006 | 3 | |
pythonlibs/mantis/fundamental/plugin/p_redis.py | adoggie/Tibet.6 | 22 | 6622627 | <filename>pythonlibs/mantis/fundamental/plugin/p_redis.py<gh_stars>10-100
#coding:utf-8
from mantis.fundamental.plugin.base import BasePlugin
from mantis.fundamental.redis.datasource import CacheManagerRedis
class RedisServiceFacet( BasePlugin):
def __init__(self,id):
BasePlugin.__init__(self,id,'redis')
pass
def init(self,cfgs):
self._cfgs = cfgs
CacheManagerRedis.instance().init(cfgs)
def open(self):
CacheManagerRedis.instance().open()
def close(self):
CacheManagerRedis.instance().close()
def getElement(self, name='default', category='backend'):
return CacheManagerRedis.instance().caches.get(name)
MainClass = RedisServiceFacet
__all__ = (MainClass,) | <filename>pythonlibs/mantis/fundamental/plugin/p_redis.py<gh_stars>10-100
#coding:utf-8
from mantis.fundamental.plugin.base import BasePlugin
from mantis.fundamental.redis.datasource import CacheManagerRedis
class RedisServiceFacet( BasePlugin):
def __init__(self,id):
BasePlugin.__init__(self,id,'redis')
pass
def init(self,cfgs):
self._cfgs = cfgs
CacheManagerRedis.instance().init(cfgs)
def open(self):
CacheManagerRedis.instance().open()
def close(self):
CacheManagerRedis.instance().close()
def getElement(self, name='default', category='backend'):
return CacheManagerRedis.instance().caches.get(name)
MainClass = RedisServiceFacet
__all__ = (MainClass,) | en | 0.795494 | #coding:utf-8 | 2.113079 | 2 |
wxPySTC_DocMap_v0.2.py | TSN-ADMIN/wxPySTC_DocMap | 4 | 6622628 | <filename>wxPySTC_DocMap_v0.2.py
import wx
import wx.stc as stc
##########################################################################################
##########################################################################################
# ZoneRectRounded = True
# ZoneRectRoundedRadius = 5
# ZoneRectLineColour = "#0000FF"
# ZoneRectLineStyle = 112
# ZoneRectLineWidth = 1
# ZoneFillColour = "#FFE7CE"
# ZoneFillAlpha = 64
# ZoneCentreLine = True
# ZoneCentreLineColour = RED
# ZoneCentreLineStyle = 101
# ZoneCentreLineWidth = 1
# ZoneCentreDot = True
# ZoneCentreDotColour = BLUE
# ZoneCentreDotRadius = 2
# ScrollNumLinesWheel = 10
# ScrollNumLinesEdge = 25
# ScrollFactorWheel = 11
# EdgeTextIndicator = True
# EdgeTextTop = " [ Top ] "
# EdgeTextBottom = " [ Bottom ] "
# EdgeTextFont = Courier New
# EdgeTextForeColour = BLUE
# EdgeTextBackColour = "#FFD5AA"
# CursorTypeNormal = 1
# CursorTypeHover = 19
# CursorTypeDrag = 6
# CursorTypeScroll = 24
# CursorTypeEdge = 11
# CursorTypeHoverShow = True
# CursorTypeDragShow = True
# TooltipHoverShow = True
# TooltipDragShow = True
# # use local 'cfg' for convenient short naming
# cfg = self.cfg['DocumentMap']
# # zone rectangle, outline and fill
# dc.SetPen(wx.Pen(cfg['ZoneRectLineColour'], cfg['ZoneRectLineWidth'], cfg['ZoneRectLineStyle']))
# clr = [int(cfg['ZoneFillColour'][i:i + 2], 16) for i in (1, 3, 5)] # (r, g, b)
# clr.append(cfg['ZoneFillAlpha']) # transparency -> (r, g, b, a)
# dc.SetBrush(wx.Brush(clr))
# if cfg['ZoneRectRounded']:
# dc.DrawRoundedRectangle(self.zone_rect, cfg['ZoneRectRoundedRadius'])
# else:
# dc.DrawRectangle(self.zone_rect) # WHEN USING wx.GraphicsContext: dc.DrawRectangle(*self.zone_rect)
# mid = self.zone_size[1] // 2
# # zone line, centered
# if cfg['ZoneCentreLine']:
# left = (0, self.zone_startPos[1] + mid)
# right = (self.zone_endPos[0], self.zone_endPos[1] - mid)
# dc.SetPen(wx.Pen(cfg['ZoneCentreLineColour'], cfg['ZoneCentreLineWidth'], cfg['ZoneCentreLineStyle'])) # , wx.PENSTYLE_DOT
# dc.DrawLine(left, right) # WHEN USING wx.GraphicsContext: dc.DrawLines((left, right))
# # zone dot, centered
# if cfg['ZoneCentreDot']:
# dc.SetPen(wx.Pen(cfg['ZoneCentreDotColour'], 1)) # , wx.PENSTYLE_DOT
# # dc.SetBrush(wx.Brush('BLUE'))
# dc.DrawCircle(self.zone_size[0] // 2, self.zone_startPos[1] + mid, cfg['ZoneCentreDotRadius'])
# # zone text, top/bottom indicator
# if cfg['EdgeTextIndicator']:
# txt = ''
# if self.top and self.bof:
# txt = cfg['EdgeTextTop']
# if self.bot and self.eof:
# txt = cfg['EdgeTextBottom']
# #FIX, cfg['DocumentMap']['Edge...'], fine tuning, EdgeTextPosition
# if txt:
# dc.SetBackgroundMode(wx.SOLID) # wx.TRANSPARENT
# dc.SetFont(wx.Font(10, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, cfg['EdgeTextFont']))
# dc.SetTextForeground(cfg['EdgeTextForeColour'])
# dc.SetTextBackground(cfg['EdgeTextBackColour'])
# wid = dc.GetTextExtent(txt)[0] + 5
# dc.DrawText(txt, self.zone_rect[:2] + wx.Point(self.zone_size[0] - wid, 5))
##########################################################################################
##########################################################################################
class DragZone:
def __init__(self):
self.bmp = None
self.pos = (0, 0)
self.shown = True
def Contains(self, pt):
return self.GetRect().Contains(pt)
def GetRect(self):
return wx.Rect(self.pos, self.bmp.Size)
def Draw(self, dc):
self.SetTransparency(0x80)
dc.DrawBitmap(self.bmp, self.GetRect()[:2])
#INFO, wx.DragImage transparency- · Issue #378 · wxWidgets-Phoenix
#INFO, URL=https://github.com/wxWidgets/Phoenix/issues/378
def SetTransparency(self, alpha=0x80):
img = self.bmp.ConvertToImage()
if not img.HasAlpha():
img.InitAlpha()
for x in range(img.Width):
for y in range(img.Height):
img.SetAlpha(x, y, alpha)
self.bmp = img.ConvertToBitmap()
#INFO, URL=http://www.informit.com/articles/article.aspx?p=405047
#INFO, Drawing on Bitmaps with wxMemoryDC
def Create(self, size):
# limit zone size
min_size = 3
size = (max(min_size, size[0]), max(min_size, size[1]))
# self.pos = (0, max(0, self.pos[1]))
# prepare memory bitmap for drawing
mdc = wx.MemoryDC()
self.bmp = wx.Bitmap(size)
mdc.SelectObject(self.bmp)
# mdc.Clear()
# zone surface
mdc.SetPen(wx.Pen('BLUE', 1, wx.PENSTYLE_SOLID))
mdc.SetBrush(wx.Brush('#FFE7CE'))
mdc.DrawRectangle(0, 0, *size) # mdc.DrawRoundedRectangle(0, 0, *size, 5)
# zone line, centered
x, _, w, h = self.GetRect()
mid = h // 2
left = (x, mid)
right = (w, mid)
mdc.SetPen(wx.Pen('RED', 1, wx.PENSTYLE_DOT))
mdc.DrawLine(left, right)
# zone dot, centered
mdc.SetPen(wx.Pen('BLUE', 1))
mdc.SetBrush(wx.Brush('BLUE', wx.BRUSHSTYLE_TRANSPARENT))
mdc.DrawCircle(w // 2, mid, 2)
# # zone text, top/bottom indicator
# txt = ''
# if self.pos[1] <= 0:
# txt = ' [ Top ] '
# if self.pos[1] > 300:
# txt = ' [ Bottom ] '
# if txt:
# mdc.SetBackgroundMode(wx.SOLID) # wx.TRANSPARENT
# mdc.SetFont(wx.Font(10, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, 'Courier New'))
# mdc.SetTextForeground('BLUE')
# mdc.SetTextBackground('#FFD5AA')
# wid = mdc.GetTextExtent(txt)[0] + 5
# mdc.DrawText(txt, (0, 0) + wx.Point(size[0] - wid, 5))
mdc.SelectObject(wx.NullBitmap)
class DocumentMap(stc.StyledTextCtrl):
def __init__(self, parent, doc):
super(DocumentMap, self).__init__(parent)
self.parent = parent
self.doc = doc
# create 2nd view for document
self.doc.AddRefDocument(self.doc.DocPointer)
self.SetDocPointer(self.doc.DocPointer)
self.dragImage = None
self.dragShape = None
self.zone = DragZone()
self.SetCursor(wx.Cursor(wx.CURSOR_ARROW))
self.InitSTC()
# wx.CallAfter(self.RefreshZone)
self.parent.Bind(wx.EVT_SIZE, self.Size)
self.doc.Bind(stc.EVT_STC_UPDATEUI, self.DocPosChanged)
self.doc.Bind(stc.EVT_STC_ZOOM, lambda e: self.Refresh())
self.Bind(stc.EVT_STC_PAINTED, self.Paint)
self.Bind(wx.EVT_LEFT_DOWN, self.LeftDown)
self.Bind(wx.EVT_LEFT_UP, self.LeftUp)
self.Bind(wx.EVT_MOTION, self.Motion)
# disable map text selection and mouse wheel
self.Bind(wx.EVT_LEFT_DCLICK, lambda e: e.Skip)
self.Bind(wx.EVT_MOUSEWHEEL, lambda e: e.SetWheelRotation(0))
def InitSTC(self):
self.SetZoom(-10)
self.SetExtraAscent(0)
self.SetExtraDescent(-1)
self.SetDoubleBuffered(True) # ensure smooth zone drawing
self.UsePopUp(False) # disable popup menu
mlh = False # marker line background colour
self.SetMarginWidth(0, 0)
self.SetMarginWidth(1, 0 if mlh else 1)
self.SetMarginWidth(2, 0)
self.SetIndentationGuides(stc.STC_IV_NONE)
# no scrollbars
self.SetUseHorizontalScrollBar(False)
self.SetUseVerticalScrollBar(False)
self.SetScrollWidthTracking(False)
# hide caret
self.SetCaretWidth(0)
self.SetReadOnly(True)
self.doc.SetReadOnly(False)
# self.Enable(False)
# self.SetCursor(wx.Cursor(wx.CURSOR_ARROW)) # sometimes text insert cursor shows
#TODO, tightly coupled with DocumentEditor!
self.doc.Styling(self)
def Size(self, evt):
self.SetSize(self.parent.Size)
self.RefreshZone()
# keep zone inside map
x, y, _, h = self.zone.GetRect()
if y + h > self.ClientSize[1] - self.TextHeight(0):
self.SetFirstVisibleLine(self.FirstVisibleLine + 1)
self.zone.pos = (x, y - self.TextHeight(0))
self.Refresh()
def Paint(self, evt):
dc = wx.PaintDC(self)
self.RefreshZone()
if self.zone.shown:
self.zone.Draw(dc)
def LeftDown(self, evt):
pos = evt.Position
# If drag zone was 'hit', then set that as the shape we're going to
# drag around. Get our start position. Dragging has not yet started.
if self.zone.Contains(pos):
self.dragShape = self.zone
self.dragStartPos = pos
# prevent interfering with drag
self.doc.Bind(stc.EVT_STC_UPDATEUI, None)
return
# center drag zone around clicked line
self.CalcHeights()
clicked_line = self.FirstVisibleLine - (self.zone_height // 2 - pos[1]) // self.TextHeight(0)
top_y = clicked_line * self.GetDocScrollRatio()
top_y = min(top_y, self.scroll_height)
top_line = self.GetTopLine(top_y)
self.SyncDoc(top_line, top_y)
self.SyncMap(top_line, top_y)
def LeftUp(self, evt):
self.SetToolTip('')
if not self.dragImage or not self.dragShape:
self.dragImage = None
self.dragShape = None
return
self.doc.Bind(stc.EVT_STC_UPDATEUI, self.DocPosChanged)
# adjust mouse pointer position
# self.WarpPointer(self.dragStartPos[0], evt.Position[1])
# Hide the image, end dragging, and nuke out the drag image.
self.dragImage.EndDrag()
self.dragImage = None
top_y = self.GetTopY(evt.Position[1])
self.dragShape.pos = (0, top_y)
self.dragShape.shown = True
self.RefreshRect(self.zone.GetRect(), True)
self.dragShape = None
def Motion(self, evt):
# Ignore mouse movement if we're not dragging.
if not self.dragShape or not evt.Dragging() or not evt.LeftIsDown():
return
# if we have a shape, but haven't started dragging yet
if self.dragShape and not self.dragImage:
pos = evt.Position
# refresh map area where the drag zone was so it will get erased.
self.dragShape.shown = False
self.RefreshRect(self.zone.GetRect(), True)
self.Update()
img = self.dragShape.bmp
#TODO, mask zone surface colour
# mask = wx.Mask(img, '#FFE7CE')
# img.SetMask(mask)
# img.SetMaskColour('#FFE7CE')
#TODO,
# img = wx.Bitmap.FromRGBA(img.Width, img.Height, 0xFF, 0xE7, 0xCE, 0xFF,)
#TODO,
self.dragImage = wx.DragImage(img, wx.Cursor(wx.CURSOR_HAND))
self.hotspot = self.dragStartPos - self.dragShape.pos
self.dragImage.BeginDrag(self.hotspot, self, fullScreen=True)
self.dragImage.Move(pos)
self.dragImage.Show()
# if we have shape and image then move drag zone
elif self.dragShape and self.dragImage:
self.CalcHeights()
top_y = self.GetTopY(evt.Position[1])
# align position with drag start
pos = (self.dragStartPos[0], top_y + self.hotspot[1])
top_line = self.GetTopLine(top_y)
self.SyncDoc(top_line, top_y)
self.SetFirstVisibleLine(top_line) # in document map
# show line number during drag
self.SetToolTip('Top Line: %7d' % (self.doc.FirstVisibleLine + 1))
# adjust mouse pointer position
self.WarpPointer(*pos)
self.dragImage.Move(pos)
self.dragImage.Show()
def DocPosChanged(self, evt):
# copy text selection to map
self.SetSelection(*self.doc.GetSelection())
self.CalcHeights()
top_y = self.doc.FirstVisibleLine * self.GetDocScrollRatio()
top_line = self.GetTopLine(top_y) + 1
self.SyncMap(top_line, top_y)
def CalcHeights(self):
# calculate document map height values
txt_height = self.LineCount * self.TextHeight(0)
self.clt_height = self.ClientSize[1]
self.max_height = min(txt_height, self.clt_height)
self.zone_height = self.zone.GetRect()[3]
self.scroll_height = max(.1, self.max_height - self.zone_height)
# print(txt_height, self.clt_height, self.max_height, self.zone_height, self.scroll_height)
def GetDocScrollRatio(self):
ratio = self.doc.LineCount - self.doc.LinesOnScreen()
# prevent division by zero
if ratio == 0:
ratio = -1
return self.scroll_height / ratio
def GetTopLine(self, top_y):
top_line = top_y / self.scroll_height * (self.LineCount - self.LinesOnScreen())
return top_line
def GetTopY(self, posY):
# drag zone's top Y coordinate
top_y = self.zone.pos[1] + posY - self.dragStartPos[1]
# prevent 'drag stutter' at top edge
if top_y < 1:
top_y = 0
# adjust position when mouse released past top/bottom edge
top_y = max(top_y, 0)
top_y = min(top_y, self.scroll_height)
return top_y
def RefreshZone(self):
self.zone.Create((self.ClientSize[0], self.doc.LinesOnScreen() * self.TextHeight(0)))
def SyncDoc(self, top_line, top_y):
if self.max_height < self.clt_height:
top_line = 0
self.doc.SetFirstVisibleLine(top_line + top_y // self.TextHeight(0))
def SyncMap(self, top_line, top_y):
self.RefreshRect(self.zone.GetRect(), True)
# adjust map top line
if top_line == 1:
top_line = 0
self.SetFirstVisibleLine(top_line)
self.zone.pos = (0, top_y)
self.Refresh()
class DocumentEditor(stc.StyledTextCtrl):
def __init__(self, parent):
super(DocumentEditor, self).__init__(parent)
self.parent = parent
self.InitSTC()
self.parent.Bind(wx.EVT_SIZE, self.Size)
def InitSTC(self):
self.SetMarginType(0, stc.STC_MARGIN_NUMBER) # 0: LINE numbers
self.SetMarginWidth(0, 50)
self.SetMarginType(3, stc.STC_MARGIN_TEXT) # 3: LEFT
self.SetMarginLeft(4)
self.SetSelForeground(True, '#FFFFFF')
self.SetSelBackground(True, '#3399FF')
self.SetSelAlpha(256)
self.LoadFile('.\SPyE - Copy.py')
#TODO, optional: restore location
# wx.CallAfter(self.SetFirstVisibleLine, 3500)
self.Styling(self)
def Styling(self, doc):
doc.StyleSetSpec(stc.STC_STYLE_DEFAULT, 'face:Courier New,size:10')
doc.StyleSetBackground(stc.STC_STYLE_DEFAULT, '#E6F2FF')
doc.StyleClearAll() # reset all styles to default
# example language: Python
STYLE = {
'Default|0': "fore:#000000,face:Courier New,size:10",
'String single quoted|4': "fore:#CF0000,face:Courier New,size:10",
'Class name|8': "fore:#0000FF,bold,underline,size:10",
'Comment 1|12': "fore:#7F7F7F,size:10",
'Comment 2|1': "fore:#007F00,face:Consolas,size:10",
'Function name|9': "fore:#007F7F,bold,size:10",
'Identifier|11': "fore:#0000FF,face:Courier New,size:10",
'Number|2': "fore:#007F7F,size:10",
'Operator|10': "fore:#D66100,bold,size:10",
'String double quoted|3': "fore:#7F007F,face:Courier New,size:10",
'String double quoted at EOL|13': "fore:#000000,face:Courier New,back:#E0C0E0,eol,size:10",
'String triple single quotes|6': "fore:#7F0000,size:10",
'String triple double quotes|7': "fore:#7F0000,size:10",
'Keyword|5': "fore:#00007F,bold,size:10",
'Keyword 2|14': "fore:#FF40FF,bold,size:10",
}
doc.SetLexer(stc.STC_LEX_PYTHON)
# style code elements for current language
for elem in STYLE.keys():
_, tok = elem.split('|')
sty = STYLE[elem]
doc.StyleSetSpec(int(tok), sty)
def Size(self, evt):
self.SetSize(self.parent.Size)
self.Refresh()
if __name__ == '__main__':
app = wx.App(redirect=False)
frm = wx.Frame(None, title="wxPython - wx.StyledTextCtrl - Document Map Demo", pos=(0, 0), size=(500, 1024))
# pnl = wx.Panel(frm, -1)
# dcm = DocumentMap(pnl)
spl = wx.SplitterWindow(frm)
pn1 = wx.Panel(spl)
pn2 = wx.Panel(spl)
spl.SplitVertically(pn1, pn2, -200)
doc = DocumentEditor(pn1)
#TODO,
dcm = DocumentMap(pn2, doc) #, style=Styling) # 3rd parm: styling function, default: None??
# import wx.lib.mixins.inspection as inspection
# wx.lib.inspection.InspectionTool().Show(selectObj=dcm, refreshTree=True)
frm.Show()
app.MainLoop()
| <filename>wxPySTC_DocMap_v0.2.py
import wx
import wx.stc as stc
##########################################################################################
##########################################################################################
# ZoneRectRounded = True
# ZoneRectRoundedRadius = 5
# ZoneRectLineColour = "#0000FF"
# ZoneRectLineStyle = 112
# ZoneRectLineWidth = 1
# ZoneFillColour = "#FFE7CE"
# ZoneFillAlpha = 64
# ZoneCentreLine = True
# ZoneCentreLineColour = RED
# ZoneCentreLineStyle = 101
# ZoneCentreLineWidth = 1
# ZoneCentreDot = True
# ZoneCentreDotColour = BLUE
# ZoneCentreDotRadius = 2
# ScrollNumLinesWheel = 10
# ScrollNumLinesEdge = 25
# ScrollFactorWheel = 11
# EdgeTextIndicator = True
# EdgeTextTop = " [ Top ] "
# EdgeTextBottom = " [ Bottom ] "
# EdgeTextFont = Courier New
# EdgeTextForeColour = BLUE
# EdgeTextBackColour = "#FFD5AA"
# CursorTypeNormal = 1
# CursorTypeHover = 19
# CursorTypeDrag = 6
# CursorTypeScroll = 24
# CursorTypeEdge = 11
# CursorTypeHoverShow = True
# CursorTypeDragShow = True
# TooltipHoverShow = True
# TooltipDragShow = True
# # use local 'cfg' for convenient short naming
# cfg = self.cfg['DocumentMap']
# # zone rectangle, outline and fill
# dc.SetPen(wx.Pen(cfg['ZoneRectLineColour'], cfg['ZoneRectLineWidth'], cfg['ZoneRectLineStyle']))
# clr = [int(cfg['ZoneFillColour'][i:i + 2], 16) for i in (1, 3, 5)] # (r, g, b)
# clr.append(cfg['ZoneFillAlpha']) # transparency -> (r, g, b, a)
# dc.SetBrush(wx.Brush(clr))
# if cfg['ZoneRectRounded']:
# dc.DrawRoundedRectangle(self.zone_rect, cfg['ZoneRectRoundedRadius'])
# else:
# dc.DrawRectangle(self.zone_rect) # WHEN USING wx.GraphicsContext: dc.DrawRectangle(*self.zone_rect)
# mid = self.zone_size[1] // 2
# # zone line, centered
# if cfg['ZoneCentreLine']:
# left = (0, self.zone_startPos[1] + mid)
# right = (self.zone_endPos[0], self.zone_endPos[1] - mid)
# dc.SetPen(wx.Pen(cfg['ZoneCentreLineColour'], cfg['ZoneCentreLineWidth'], cfg['ZoneCentreLineStyle'])) # , wx.PENSTYLE_DOT
# dc.DrawLine(left, right) # WHEN USING wx.GraphicsContext: dc.DrawLines((left, right))
# # zone dot, centered
# if cfg['ZoneCentreDot']:
# dc.SetPen(wx.Pen(cfg['ZoneCentreDotColour'], 1)) # , wx.PENSTYLE_DOT
# # dc.SetBrush(wx.Brush('BLUE'))
# dc.DrawCircle(self.zone_size[0] // 2, self.zone_startPos[1] + mid, cfg['ZoneCentreDotRadius'])
# # zone text, top/bottom indicator
# if cfg['EdgeTextIndicator']:
# txt = ''
# if self.top and self.bof:
# txt = cfg['EdgeTextTop']
# if self.bot and self.eof:
# txt = cfg['EdgeTextBottom']
# #FIX, cfg['DocumentMap']['Edge...'], fine tuning, EdgeTextPosition
# if txt:
# dc.SetBackgroundMode(wx.SOLID) # wx.TRANSPARENT
# dc.SetFont(wx.Font(10, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, cfg['EdgeTextFont']))
# dc.SetTextForeground(cfg['EdgeTextForeColour'])
# dc.SetTextBackground(cfg['EdgeTextBackColour'])
# wid = dc.GetTextExtent(txt)[0] + 5
# dc.DrawText(txt, self.zone_rect[:2] + wx.Point(self.zone_size[0] - wid, 5))
##########################################################################################
##########################################################################################
class DragZone:
def __init__(self):
self.bmp = None
self.pos = (0, 0)
self.shown = True
def Contains(self, pt):
return self.GetRect().Contains(pt)
def GetRect(self):
return wx.Rect(self.pos, self.bmp.Size)
def Draw(self, dc):
self.SetTransparency(0x80)
dc.DrawBitmap(self.bmp, self.GetRect()[:2])
#INFO, wx.DragImage transparency- · Issue #378 · wxWidgets-Phoenix
#INFO, URL=https://github.com/wxWidgets/Phoenix/issues/378
def SetTransparency(self, alpha=0x80):
img = self.bmp.ConvertToImage()
if not img.HasAlpha():
img.InitAlpha()
for x in range(img.Width):
for y in range(img.Height):
img.SetAlpha(x, y, alpha)
self.bmp = img.ConvertToBitmap()
#INFO, URL=http://www.informit.com/articles/article.aspx?p=405047
#INFO, Drawing on Bitmaps with wxMemoryDC
def Create(self, size):
# limit zone size
min_size = 3
size = (max(min_size, size[0]), max(min_size, size[1]))
# self.pos = (0, max(0, self.pos[1]))
# prepare memory bitmap for drawing
mdc = wx.MemoryDC()
self.bmp = wx.Bitmap(size)
mdc.SelectObject(self.bmp)
# mdc.Clear()
# zone surface
mdc.SetPen(wx.Pen('BLUE', 1, wx.PENSTYLE_SOLID))
mdc.SetBrush(wx.Brush('#FFE7CE'))
mdc.DrawRectangle(0, 0, *size) # mdc.DrawRoundedRectangle(0, 0, *size, 5)
# zone line, centered
x, _, w, h = self.GetRect()
mid = h // 2
left = (x, mid)
right = (w, mid)
mdc.SetPen(wx.Pen('RED', 1, wx.PENSTYLE_DOT))
mdc.DrawLine(left, right)
# zone dot, centered
mdc.SetPen(wx.Pen('BLUE', 1))
mdc.SetBrush(wx.Brush('BLUE', wx.BRUSHSTYLE_TRANSPARENT))
mdc.DrawCircle(w // 2, mid, 2)
# # zone text, top/bottom indicator
# txt = ''
# if self.pos[1] <= 0:
# txt = ' [ Top ] '
# if self.pos[1] > 300:
# txt = ' [ Bottom ] '
# if txt:
# mdc.SetBackgroundMode(wx.SOLID) # wx.TRANSPARENT
# mdc.SetFont(wx.Font(10, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, 'Courier New'))
# mdc.SetTextForeground('BLUE')
# mdc.SetTextBackground('#FFD5AA')
# wid = mdc.GetTextExtent(txt)[0] + 5
# mdc.DrawText(txt, (0, 0) + wx.Point(size[0] - wid, 5))
mdc.SelectObject(wx.NullBitmap)
class DocumentMap(stc.StyledTextCtrl):
def __init__(self, parent, doc):
super(DocumentMap, self).__init__(parent)
self.parent = parent
self.doc = doc
# create 2nd view for document
self.doc.AddRefDocument(self.doc.DocPointer)
self.SetDocPointer(self.doc.DocPointer)
self.dragImage = None
self.dragShape = None
self.zone = DragZone()
self.SetCursor(wx.Cursor(wx.CURSOR_ARROW))
self.InitSTC()
# wx.CallAfter(self.RefreshZone)
self.parent.Bind(wx.EVT_SIZE, self.Size)
self.doc.Bind(stc.EVT_STC_UPDATEUI, self.DocPosChanged)
self.doc.Bind(stc.EVT_STC_ZOOM, lambda e: self.Refresh())
self.Bind(stc.EVT_STC_PAINTED, self.Paint)
self.Bind(wx.EVT_LEFT_DOWN, self.LeftDown)
self.Bind(wx.EVT_LEFT_UP, self.LeftUp)
self.Bind(wx.EVT_MOTION, self.Motion)
# disable map text selection and mouse wheel
self.Bind(wx.EVT_LEFT_DCLICK, lambda e: e.Skip)
self.Bind(wx.EVT_MOUSEWHEEL, lambda e: e.SetWheelRotation(0))
def InitSTC(self):
self.SetZoom(-10)
self.SetExtraAscent(0)
self.SetExtraDescent(-1)
self.SetDoubleBuffered(True) # ensure smooth zone drawing
self.UsePopUp(False) # disable popup menu
mlh = False # marker line background colour
self.SetMarginWidth(0, 0)
self.SetMarginWidth(1, 0 if mlh else 1)
self.SetMarginWidth(2, 0)
self.SetIndentationGuides(stc.STC_IV_NONE)
# no scrollbars
self.SetUseHorizontalScrollBar(False)
self.SetUseVerticalScrollBar(False)
self.SetScrollWidthTracking(False)
# hide caret
self.SetCaretWidth(0)
self.SetReadOnly(True)
self.doc.SetReadOnly(False)
# self.Enable(False)
# self.SetCursor(wx.Cursor(wx.CURSOR_ARROW)) # sometimes text insert cursor shows
#TODO, tightly coupled with DocumentEditor!
self.doc.Styling(self)
def Size(self, evt):
self.SetSize(self.parent.Size)
self.RefreshZone()
# keep zone inside map
x, y, _, h = self.zone.GetRect()
if y + h > self.ClientSize[1] - self.TextHeight(0):
self.SetFirstVisibleLine(self.FirstVisibleLine + 1)
self.zone.pos = (x, y - self.TextHeight(0))
self.Refresh()
def Paint(self, evt):
dc = wx.PaintDC(self)
self.RefreshZone()
if self.zone.shown:
self.zone.Draw(dc)
def LeftDown(self, evt):
pos = evt.Position
# If drag zone was 'hit', then set that as the shape we're going to
# drag around. Get our start position. Dragging has not yet started.
if self.zone.Contains(pos):
self.dragShape = self.zone
self.dragStartPos = pos
# prevent interfering with drag
self.doc.Bind(stc.EVT_STC_UPDATEUI, None)
return
# center drag zone around clicked line
self.CalcHeights()
clicked_line = self.FirstVisibleLine - (self.zone_height // 2 - pos[1]) // self.TextHeight(0)
top_y = clicked_line * self.GetDocScrollRatio()
top_y = min(top_y, self.scroll_height)
top_line = self.GetTopLine(top_y)
self.SyncDoc(top_line, top_y)
self.SyncMap(top_line, top_y)
def LeftUp(self, evt):
self.SetToolTip('')
if not self.dragImage or not self.dragShape:
self.dragImage = None
self.dragShape = None
return
self.doc.Bind(stc.EVT_STC_UPDATEUI, self.DocPosChanged)
# adjust mouse pointer position
# self.WarpPointer(self.dragStartPos[0], evt.Position[1])
# Hide the image, end dragging, and nuke out the drag image.
self.dragImage.EndDrag()
self.dragImage = None
top_y = self.GetTopY(evt.Position[1])
self.dragShape.pos = (0, top_y)
self.dragShape.shown = True
self.RefreshRect(self.zone.GetRect(), True)
self.dragShape = None
def Motion(self, evt):
# Ignore mouse movement if we're not dragging.
if not self.dragShape or not evt.Dragging() or not evt.LeftIsDown():
return
# if we have a shape, but haven't started dragging yet
if self.dragShape and not self.dragImage:
pos = evt.Position
# refresh map area where the drag zone was so it will get erased.
self.dragShape.shown = False
self.RefreshRect(self.zone.GetRect(), True)
self.Update()
img = self.dragShape.bmp
#TODO, mask zone surface colour
# mask = wx.Mask(img, '#FFE7CE')
# img.SetMask(mask)
# img.SetMaskColour('#FFE7CE')
#TODO,
# img = wx.Bitmap.FromRGBA(img.Width, img.Height, 0xFF, 0xE7, 0xCE, 0xFF,)
#TODO,
self.dragImage = wx.DragImage(img, wx.Cursor(wx.CURSOR_HAND))
self.hotspot = self.dragStartPos - self.dragShape.pos
self.dragImage.BeginDrag(self.hotspot, self, fullScreen=True)
self.dragImage.Move(pos)
self.dragImage.Show()
# if we have shape and image then move drag zone
elif self.dragShape and self.dragImage:
self.CalcHeights()
top_y = self.GetTopY(evt.Position[1])
# align position with drag start
pos = (self.dragStartPos[0], top_y + self.hotspot[1])
top_line = self.GetTopLine(top_y)
self.SyncDoc(top_line, top_y)
self.SetFirstVisibleLine(top_line) # in document map
# show line number during drag
self.SetToolTip('Top Line: %7d' % (self.doc.FirstVisibleLine + 1))
# adjust mouse pointer position
self.WarpPointer(*pos)
self.dragImage.Move(pos)
self.dragImage.Show()
def DocPosChanged(self, evt):
# copy text selection to map
self.SetSelection(*self.doc.GetSelection())
self.CalcHeights()
top_y = self.doc.FirstVisibleLine * self.GetDocScrollRatio()
top_line = self.GetTopLine(top_y) + 1
self.SyncMap(top_line, top_y)
def CalcHeights(self):
# calculate document map height values
txt_height = self.LineCount * self.TextHeight(0)
self.clt_height = self.ClientSize[1]
self.max_height = min(txt_height, self.clt_height)
self.zone_height = self.zone.GetRect()[3]
self.scroll_height = max(.1, self.max_height - self.zone_height)
# print(txt_height, self.clt_height, self.max_height, self.zone_height, self.scroll_height)
def GetDocScrollRatio(self):
ratio = self.doc.LineCount - self.doc.LinesOnScreen()
# prevent division by zero
if ratio == 0:
ratio = -1
return self.scroll_height / ratio
def GetTopLine(self, top_y):
top_line = top_y / self.scroll_height * (self.LineCount - self.LinesOnScreen())
return top_line
def GetTopY(self, posY):
# drag zone's top Y coordinate
top_y = self.zone.pos[1] + posY - self.dragStartPos[1]
# prevent 'drag stutter' at top edge
if top_y < 1:
top_y = 0
# adjust position when mouse released past top/bottom edge
top_y = max(top_y, 0)
top_y = min(top_y, self.scroll_height)
return top_y
def RefreshZone(self):
self.zone.Create((self.ClientSize[0], self.doc.LinesOnScreen() * self.TextHeight(0)))
def SyncDoc(self, top_line, top_y):
if self.max_height < self.clt_height:
top_line = 0
self.doc.SetFirstVisibleLine(top_line + top_y // self.TextHeight(0))
def SyncMap(self, top_line, top_y):
self.RefreshRect(self.zone.GetRect(), True)
# adjust map top line
if top_line == 1:
top_line = 0
self.SetFirstVisibleLine(top_line)
self.zone.pos = (0, top_y)
self.Refresh()
class DocumentEditor(stc.StyledTextCtrl):
def __init__(self, parent):
super(DocumentEditor, self).__init__(parent)
self.parent = parent
self.InitSTC()
self.parent.Bind(wx.EVT_SIZE, self.Size)
def InitSTC(self):
self.SetMarginType(0, stc.STC_MARGIN_NUMBER) # 0: LINE numbers
self.SetMarginWidth(0, 50)
self.SetMarginType(3, stc.STC_MARGIN_TEXT) # 3: LEFT
self.SetMarginLeft(4)
self.SetSelForeground(True, '#FFFFFF')
self.SetSelBackground(True, '#3399FF')
self.SetSelAlpha(256)
self.LoadFile('.\SPyE - Copy.py')
#TODO, optional: restore location
# wx.CallAfter(self.SetFirstVisibleLine, 3500)
self.Styling(self)
def Styling(self, doc):
doc.StyleSetSpec(stc.STC_STYLE_DEFAULT, 'face:Courier New,size:10')
doc.StyleSetBackground(stc.STC_STYLE_DEFAULT, '#E6F2FF')
doc.StyleClearAll() # reset all styles to default
# example language: Python
STYLE = {
'Default|0': "fore:#000000,face:Courier New,size:10",
'String single quoted|4': "fore:#CF0000,face:Courier New,size:10",
'Class name|8': "fore:#0000FF,bold,underline,size:10",
'Comment 1|12': "fore:#7F7F7F,size:10",
'Comment 2|1': "fore:#007F00,face:Consolas,size:10",
'Function name|9': "fore:#007F7F,bold,size:10",
'Identifier|11': "fore:#0000FF,face:Courier New,size:10",
'Number|2': "fore:#007F7F,size:10",
'Operator|10': "fore:#D66100,bold,size:10",
'String double quoted|3': "fore:#7F007F,face:Courier New,size:10",
'String double quoted at EOL|13': "fore:#000000,face:Courier New,back:#E0C0E0,eol,size:10",
'String triple single quotes|6': "fore:#7F0000,size:10",
'String triple double quotes|7': "fore:#7F0000,size:10",
'Keyword|5': "fore:#00007F,bold,size:10",
'Keyword 2|14': "fore:#FF40FF,bold,size:10",
}
doc.SetLexer(stc.STC_LEX_PYTHON)
# style code elements for current language
for elem in STYLE.keys():
_, tok = elem.split('|')
sty = STYLE[elem]
doc.StyleSetSpec(int(tok), sty)
def Size(self, evt):
self.SetSize(self.parent.Size)
self.Refresh()
if __name__ == '__main__':
app = wx.App(redirect=False)
frm = wx.Frame(None, title="wxPython - wx.StyledTextCtrl - Document Map Demo", pos=(0, 0), size=(500, 1024))
# pnl = wx.Panel(frm, -1)
# dcm = DocumentMap(pnl)
spl = wx.SplitterWindow(frm)
pn1 = wx.Panel(spl)
pn2 = wx.Panel(spl)
spl.SplitVertically(pn1, pn2, -200)
doc = DocumentEditor(pn1)
#TODO,
dcm = DocumentMap(pn2, doc) #, style=Styling) # 3rd parm: styling function, default: None??
# import wx.lib.mixins.inspection as inspection
# wx.lib.inspection.InspectionTool().Show(selectObj=dcm, refreshTree=True)
frm.Show()
app.MainLoop()
| en | 0.44097 | ########################################################################################## ########################################################################################## # ZoneRectRounded = True # ZoneRectRoundedRadius = 5 # ZoneRectLineColour = "#0000FF" # ZoneRectLineStyle = 112 # ZoneRectLineWidth = 1 # ZoneFillColour = "#FFE7CE" # ZoneFillAlpha = 64 # ZoneCentreLine = True # ZoneCentreLineColour = RED # ZoneCentreLineStyle = 101 # ZoneCentreLineWidth = 1 # ZoneCentreDot = True # ZoneCentreDotColour = BLUE # ZoneCentreDotRadius = 2 # ScrollNumLinesWheel = 10 # ScrollNumLinesEdge = 25 # ScrollFactorWheel = 11 # EdgeTextIndicator = True # EdgeTextTop = " [ Top ] " # EdgeTextBottom = " [ Bottom ] " # EdgeTextFont = Courier New # EdgeTextForeColour = BLUE # EdgeTextBackColour = "#FFD5AA" # CursorTypeNormal = 1 # CursorTypeHover = 19 # CursorTypeDrag = 6 # CursorTypeScroll = 24 # CursorTypeEdge = 11 # CursorTypeHoverShow = True # CursorTypeDragShow = True # TooltipHoverShow = True # TooltipDragShow = True # # use local 'cfg' for convenient short naming # cfg = self.cfg['DocumentMap'] # # zone rectangle, outline and fill # dc.SetPen(wx.Pen(cfg['ZoneRectLineColour'], cfg['ZoneRectLineWidth'], cfg['ZoneRectLineStyle'])) # clr = [int(cfg['ZoneFillColour'][i:i + 2], 16) for i in (1, 3, 5)] # (r, g, b) # clr.append(cfg['ZoneFillAlpha']) # transparency -> (r, g, b, a) # dc.SetBrush(wx.Brush(clr)) # if cfg['ZoneRectRounded']: # dc.DrawRoundedRectangle(self.zone_rect, cfg['ZoneRectRoundedRadius']) # else: # dc.DrawRectangle(self.zone_rect) # WHEN USING wx.GraphicsContext: dc.DrawRectangle(*self.zone_rect) # mid = self.zone_size[1] // 2 # # zone line, centered # if cfg['ZoneCentreLine']: # left = (0, self.zone_startPos[1] + mid) # right = (self.zone_endPos[0], self.zone_endPos[1] - mid) # dc.SetPen(wx.Pen(cfg['ZoneCentreLineColour'], cfg['ZoneCentreLineWidth'], cfg['ZoneCentreLineStyle'])) # , wx.PENSTYLE_DOT # dc.DrawLine(left, right) # WHEN USING wx.GraphicsContext: dc.DrawLines((left, right)) # # zone dot, centered # if cfg['ZoneCentreDot']: # dc.SetPen(wx.Pen(cfg['ZoneCentreDotColour'], 1)) # , wx.PENSTYLE_DOT # # dc.SetBrush(wx.Brush('BLUE')) # dc.DrawCircle(self.zone_size[0] // 2, self.zone_startPos[1] + mid, cfg['ZoneCentreDotRadius']) # # zone text, top/bottom indicator # if cfg['EdgeTextIndicator']: # txt = '' # if self.top and self.bof: # txt = cfg['EdgeTextTop'] # if self.bot and self.eof: # txt = cfg['EdgeTextBottom'] # #FIX, cfg['DocumentMap']['Edge...'], fine tuning, EdgeTextPosition # if txt: # dc.SetBackgroundMode(wx.SOLID) # wx.TRANSPARENT # dc.SetFont(wx.Font(10, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, cfg['EdgeTextFont'])) # dc.SetTextForeground(cfg['EdgeTextForeColour']) # dc.SetTextBackground(cfg['EdgeTextBackColour']) # wid = dc.GetTextExtent(txt)[0] + 5 # dc.DrawText(txt, self.zone_rect[:2] + wx.Point(self.zone_size[0] - wid, 5)) ########################################################################################## ########################################################################################## #INFO, wx.DragImage transparency- · Issue #378 · wxWidgets-Phoenix #INFO, URL=https://github.com/wxWidgets/Phoenix/issues/378 #INFO, URL=http://www.informit.com/articles/article.aspx?p=405047 #INFO, Drawing on Bitmaps with wxMemoryDC # limit zone size # self.pos = (0, max(0, self.pos[1])) # prepare memory bitmap for drawing # mdc.Clear() # zone surface # mdc.DrawRoundedRectangle(0, 0, *size, 5) # zone line, centered # zone dot, centered # # zone text, top/bottom indicator # txt = '' # if self.pos[1] <= 0: # txt = ' [ Top ] ' # if self.pos[1] > 300: # txt = ' [ Bottom ] ' # if txt: # mdc.SetBackgroundMode(wx.SOLID) # wx.TRANSPARENT # mdc.SetFont(wx.Font(10, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, 'Courier New')) # mdc.SetTextForeground('BLUE') # mdc.SetTextBackground('#FFD5AA') # wid = mdc.GetTextExtent(txt)[0] + 5 # mdc.DrawText(txt, (0, 0) + wx.Point(size[0] - wid, 5)) # create 2nd view for document # wx.CallAfter(self.RefreshZone) # disable map text selection and mouse wheel # ensure smooth zone drawing # disable popup menu # marker line background colour # no scrollbars # hide caret # self.Enable(False) # self.SetCursor(wx.Cursor(wx.CURSOR_ARROW)) # sometimes text insert cursor shows #TODO, tightly coupled with DocumentEditor! # keep zone inside map # If drag zone was 'hit', then set that as the shape we're going to # drag around. Get our start position. Dragging has not yet started. # prevent interfering with drag # center drag zone around clicked line # adjust mouse pointer position # self.WarpPointer(self.dragStartPos[0], evt.Position[1]) # Hide the image, end dragging, and nuke out the drag image. # Ignore mouse movement if we're not dragging. # if we have a shape, but haven't started dragging yet # refresh map area where the drag zone was so it will get erased. #TODO, mask zone surface colour # mask = wx.Mask(img, '#FFE7CE') # img.SetMask(mask) # img.SetMaskColour('#FFE7CE') #TODO, # img = wx.Bitmap.FromRGBA(img.Width, img.Height, 0xFF, 0xE7, 0xCE, 0xFF,) #TODO, # if we have shape and image then move drag zone # align position with drag start # in document map # show line number during drag # adjust mouse pointer position # copy text selection to map # calculate document map height values # print(txt_height, self.clt_height, self.max_height, self.zone_height, self.scroll_height) # prevent division by zero # drag zone's top Y coordinate # prevent 'drag stutter' at top edge # adjust position when mouse released past top/bottom edge # adjust map top line # 0: LINE numbers # 3: LEFT #TODO, optional: restore location # wx.CallAfter(self.SetFirstVisibleLine, 3500) # reset all styles to default # example language: Python #000000,face:Courier New,size:10", #CF0000,face:Courier New,size:10", #0000FF,bold,underline,size:10", #7F7F7F,size:10", #007F00,face:Consolas,size:10", #007F7F,bold,size:10", #0000FF,face:Courier New,size:10", #007F7F,size:10", #D66100,bold,size:10", #7F007F,face:Courier New,size:10", #000000,face:Courier New,back:#E0C0E0,eol,size:10", #7F0000,size:10", #7F0000,size:10", #00007F,bold,size:10", #FF40FF,bold,size:10", # style code elements for current language # pnl = wx.Panel(frm, -1) # dcm = DocumentMap(pnl) #TODO, #, style=Styling) # 3rd parm: styling function, default: None?? # import wx.lib.mixins.inspection as inspection # wx.lib.inspection.InspectionTool().Show(selectObj=dcm, refreshTree=True) | 1.785053 | 2 |
puzzler/exact_cover_x2.py | tiwo/puzzler | 0 | 6622629 | <reponame>tiwo/puzzler<gh_stars>0
#!/usr/bin/env python
# $Id$
# Author: <NAME> <<EMAIL>>
# Copyright: (C) 1998-2015 by <NAME>;
# portions copyright 2010 by Ali Assaf
# License: GPL 2 (see __init__.py)
"""
An implementation of <NAME>'s 'Algorithm X' [1]_ for the generalized
exact cover problem [2]_ using a high-level native data structure technique
devised by Ali Assaf [3]_.
.. [1] http://en.wikipedia.org/wiki/Knuth%27s_Algorithm_X
.. [2] http://en.wikipedia.org/wiki/Exact_cover
.. [3] http://www.cs.mcgill.ca/~aassaf9/python/algorithm_x.html
"""
from pprint import pprint
# optional acceleration with Psyco
try:
import psyco
psyco.full()
except ImportError:
pass
class ExactCover(object):
"""
Given a sparse matrix of 0s and 1s, find every set of rows containing
exactly one 1 in each primary column (and at most one 1 in each secondary
column). See `load_matrix` for a description of the data structure.
Uses the native approach to Knuth's Algorithm X.
"""
def __init__(self, matrix=None, secondary=0, state=None):
"""
Parameters:
* `matrix` & `secondary`: see `self.load_matrix`.
* `state`: a `puzzler.SessionState` object which stores the runtime
state of this puzzle (we're resuming a previously interrupted
puzzle), or None (no state, we're starting from the beginning).
"""
self.columns = None
"""A dictionary mapping column names to sets of row indices (the index
of each row which contains a 1/True for that column)."""
self.secondary_columns = None
"""A set of secondary column names."""
self.rows = None
"""A list of lists of column names. Each list represents one row of
the exact cover matrix: all the columns containing a 1/True."""
self.solution = []
self.num_solutions = 0
self.num_searches = 0
if state:
self.solution = state.solution
self.num_solutions = state.num_solutions
self.num_searches = state.num_searches
if matrix:
self.load_matrix(matrix, secondary)
def load_matrix(self, matrix, secondary=0):
"""
Convert and store the input `matrix` into `self.columns`,
`self.secondary_columns`, and `self.rows`.
The input `matrix` is a two-dimensional list of tuples:
* Each row is a tuple of equal length.
* The first row contains the column names: first the puzzle piece
names, then the solution space coordinates. For example::
('A', 'B', 'C', '0,0', '1,0', '0,1', '1,1')
* The subsequent rows consist of 1 & 0 (True & False) values. Each
row contains a 1/True value in the column identifying the piece, and
1/True values in each column identifying the position. There must
be one row for each possible position of each puzzle piece.
The `secondary` parameter is the number of secondary (rightmost)
columns: columns which may, but need not, participate in the solution.
"""
matrix_iter = iter(matrix)
column_names = matrix_iter.next()
self.secondary_columns = set(
column_names[(len(column_names) - secondary):])
self.columns = dict((j, set()) for j in column_names)
self.rows = [
[column_names[j] for j in range(len(column_names)) if row[j]]
for row in matrix_iter]
for (r, row) in enumerate(self.rows):
for c in row:
self.columns[c].add(r)
def solve(self, level=0):
"""A generator that produces all solutions: Algorithm X."""
if not (set(self.columns) - self.secondary_columns):
yield self.full_solution()
return
self.num_searches += 1
_size, c = min((len(self.columns[column]), column)
for column in self.columns
if column not in self.secondary_columns)
# Since `self.columns` is being modified, a copy must be made here.
# `sorted()` is used instead of `list()` to get reproducible output.
for r in sorted(self.columns[c]):
if len(self.solution) > level:
if self.solution[level] != r:
# skip rows already fully explored
continue
else:
self.solution.append(r)
covered = self.cover(r)
for s in self.solve(level+1):
yield s
self.uncover(r, covered)
self.solution.pop()
def cover(self, r):
columns = self.columns
rows = self.rows
covered = []
for j in rows[r]:
for i in columns[j]:
for k in rows[i]:
if k != j:
columns[k].remove(i)
covered.append(self.columns.pop(j))
return covered
def uncover(self, r, covered):
columns = self.columns
rows = self.rows
for j in reversed(rows[r]):
columns[j] = covered.pop()
for i in columns[j]:
for k in rows[i]:
if k != j:
columns[k].add(i)
def full_solution(self):
"""
Return an expanded representation (full row details) of a solution,
based on the internal minimal representation (row indices).
"""
return [sorted(self.rows[r]) for r in self.solution]
def format_solution(self):
"""Return a simple formatted string representation of the solution."""
self.num_solutions += 1
solution = self.full_solution()
parts = ['solution %i:' % self.num_solutions]
for row in solution:
parts.append(
' '.join(cell for cell in row
# omit secondary columns (intersections):
if not ((',' in cell) and (cell.endswith('i')))))
return '\n'.join(parts)
if __name__ == '__main__':
print 'testing exact_cover_x2.py:\n'
matrix = [
'A B C D E F G'.split(),
[0, 0, 1, 0, 1, 1, 0],
[1, 0, 0, 1, 0, 0, 1],
[0, 1, 1, 0, 0, 1, 0],
[1, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 0, 1]]
puzzle = ExactCover(matrix)
print 'columns ='
pprint(puzzle.columns)
print '\nrows ='
pprint(puzzle.rows)
for solution in puzzle.solve():
print '\n', puzzle.format_solution(), '\n'
print 'unformatted:\n', solution, '\n'
print puzzle.num_searches, 'searches'
| #!/usr/bin/env python
# $Id$
# Author: <NAME> <<EMAIL>>
# Copyright: (C) 1998-2015 by <NAME>;
# portions copyright 2010 by Ali Assaf
# License: GPL 2 (see __init__.py)
"""
An implementation of <NAME>'s 'Algorithm X' [1]_ for the generalized
exact cover problem [2]_ using a high-level native data structure technique
devised by Ali Assaf [3]_.
.. [1] http://en.wikipedia.org/wiki/Knuth%27s_Algorithm_X
.. [2] http://en.wikipedia.org/wiki/Exact_cover
.. [3] http://www.cs.mcgill.ca/~aassaf9/python/algorithm_x.html
"""
from pprint import pprint
# optional acceleration with Psyco
try:
import psyco
psyco.full()
except ImportError:
pass
class ExactCover(object):
"""
Given a sparse matrix of 0s and 1s, find every set of rows containing
exactly one 1 in each primary column (and at most one 1 in each secondary
column). See `load_matrix` for a description of the data structure.
Uses the native approach to Knuth's Algorithm X.
"""
def __init__(self, matrix=None, secondary=0, state=None):
"""
Parameters:
* `matrix` & `secondary`: see `self.load_matrix`.
* `state`: a `puzzler.SessionState` object which stores the runtime
state of this puzzle (we're resuming a previously interrupted
puzzle), or None (no state, we're starting from the beginning).
"""
self.columns = None
"""A dictionary mapping column names to sets of row indices (the index
of each row which contains a 1/True for that column)."""
self.secondary_columns = None
"""A set of secondary column names."""
self.rows = None
"""A list of lists of column names. Each list represents one row of
the exact cover matrix: all the columns containing a 1/True."""
self.solution = []
self.num_solutions = 0
self.num_searches = 0
if state:
self.solution = state.solution
self.num_solutions = state.num_solutions
self.num_searches = state.num_searches
if matrix:
self.load_matrix(matrix, secondary)
def load_matrix(self, matrix, secondary=0):
"""
Convert and store the input `matrix` into `self.columns`,
`self.secondary_columns`, and `self.rows`.
The input `matrix` is a two-dimensional list of tuples:
* Each row is a tuple of equal length.
* The first row contains the column names: first the puzzle piece
names, then the solution space coordinates. For example::
('A', 'B', 'C', '0,0', '1,0', '0,1', '1,1')
* The subsequent rows consist of 1 & 0 (True & False) values. Each
row contains a 1/True value in the column identifying the piece, and
1/True values in each column identifying the position. There must
be one row for each possible position of each puzzle piece.
The `secondary` parameter is the number of secondary (rightmost)
columns: columns which may, but need not, participate in the solution.
"""
matrix_iter = iter(matrix)
column_names = matrix_iter.next()
self.secondary_columns = set(
column_names[(len(column_names) - secondary):])
self.columns = dict((j, set()) for j in column_names)
self.rows = [
[column_names[j] for j in range(len(column_names)) if row[j]]
for row in matrix_iter]
for (r, row) in enumerate(self.rows):
for c in row:
self.columns[c].add(r)
def solve(self, level=0):
"""A generator that produces all solutions: Algorithm X."""
if not (set(self.columns) - self.secondary_columns):
yield self.full_solution()
return
self.num_searches += 1
_size, c = min((len(self.columns[column]), column)
for column in self.columns
if column not in self.secondary_columns)
# Since `self.columns` is being modified, a copy must be made here.
# `sorted()` is used instead of `list()` to get reproducible output.
for r in sorted(self.columns[c]):
if len(self.solution) > level:
if self.solution[level] != r:
# skip rows already fully explored
continue
else:
self.solution.append(r)
covered = self.cover(r)
for s in self.solve(level+1):
yield s
self.uncover(r, covered)
self.solution.pop()
def cover(self, r):
columns = self.columns
rows = self.rows
covered = []
for j in rows[r]:
for i in columns[j]:
for k in rows[i]:
if k != j:
columns[k].remove(i)
covered.append(self.columns.pop(j))
return covered
def uncover(self, r, covered):
columns = self.columns
rows = self.rows
for j in reversed(rows[r]):
columns[j] = covered.pop()
for i in columns[j]:
for k in rows[i]:
if k != j:
columns[k].add(i)
def full_solution(self):
"""
Return an expanded representation (full row details) of a solution,
based on the internal minimal representation (row indices).
"""
return [sorted(self.rows[r]) for r in self.solution]
def format_solution(self):
"""Return a simple formatted string representation of the solution."""
self.num_solutions += 1
solution = self.full_solution()
parts = ['solution %i:' % self.num_solutions]
for row in solution:
parts.append(
' '.join(cell for cell in row
# omit secondary columns (intersections):
if not ((',' in cell) and (cell.endswith('i')))))
return '\n'.join(parts)
if __name__ == '__main__':
print 'testing exact_cover_x2.py:\n'
matrix = [
'A B C D E F G'.split(),
[0, 0, 1, 0, 1, 1, 0],
[1, 0, 0, 1, 0, 0, 1],
[0, 1, 1, 0, 0, 1, 0],
[1, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 0, 1]]
puzzle = ExactCover(matrix)
print 'columns ='
pprint(puzzle.columns)
print '\nrows ='
pprint(puzzle.rows)
for solution in puzzle.solve():
print '\n', puzzle.format_solution(), '\n'
print 'unformatted:\n', solution, '\n'
print puzzle.num_searches, 'searches' | en | 0.740311 | #!/usr/bin/env python # $Id$ # Author: <NAME> <<EMAIL>> # Copyright: (C) 1998-2015 by <NAME>; # portions copyright 2010 by Ali Assaf # License: GPL 2 (see __init__.py) An implementation of <NAME>'s 'Algorithm X' [1]_ for the generalized exact cover problem [2]_ using a high-level native data structure technique devised by Ali Assaf [3]_. .. [1] http://en.wikipedia.org/wiki/Knuth%27s_Algorithm_X .. [2] http://en.wikipedia.org/wiki/Exact_cover .. [3] http://www.cs.mcgill.ca/~aassaf9/python/algorithm_x.html # optional acceleration with Psyco Given a sparse matrix of 0s and 1s, find every set of rows containing exactly one 1 in each primary column (and at most one 1 in each secondary column). See `load_matrix` for a description of the data structure. Uses the native approach to Knuth's Algorithm X. Parameters: * `matrix` & `secondary`: see `self.load_matrix`. * `state`: a `puzzler.SessionState` object which stores the runtime state of this puzzle (we're resuming a previously interrupted puzzle), or None (no state, we're starting from the beginning). A dictionary mapping column names to sets of row indices (the index of each row which contains a 1/True for that column). A set of secondary column names. A list of lists of column names. Each list represents one row of the exact cover matrix: all the columns containing a 1/True. Convert and store the input `matrix` into `self.columns`, `self.secondary_columns`, and `self.rows`. The input `matrix` is a two-dimensional list of tuples: * Each row is a tuple of equal length. * The first row contains the column names: first the puzzle piece names, then the solution space coordinates. For example:: ('A', 'B', 'C', '0,0', '1,0', '0,1', '1,1') * The subsequent rows consist of 1 & 0 (True & False) values. Each row contains a 1/True value in the column identifying the piece, and 1/True values in each column identifying the position. There must be one row for each possible position of each puzzle piece. The `secondary` parameter is the number of secondary (rightmost) columns: columns which may, but need not, participate in the solution. A generator that produces all solutions: Algorithm X. # Since `self.columns` is being modified, a copy must be made here. # `sorted()` is used instead of `list()` to get reproducible output. # skip rows already fully explored Return an expanded representation (full row details) of a solution, based on the internal minimal representation (row indices). Return a simple formatted string representation of the solution. # omit secondary columns (intersections): | 3.146862 | 3 |
cea/technologies/constants.py | pajotca/CityEnergyAnalyst | 1 | 6622630 | """
Constants used throughout the cea.technologies package.
History lesson: This is a first step at removing the `cea.globalvars.GlobalVariables` object.
"""
# Heat Exchangers
U_COOL = 2500.0 # W/m2K
U_HEAT = 2500.0 # W/m2K
DT_HEAT = 5.0 # K - pinch delta at design conditions
DT_COOL = 2.0 # K - pinch delta at design conditions
DT_INTERNAL_HEX = 2.0 # K - minimum difference between cold side outflow and hot side inflow temperatures
HEAT_EX_EFFECTIVENESS = 0.9 # assume starting value for heat exchanger effectiveness (exergy)
MAX_NODE_FLOW = 22.0 # kg/s
# Heat pump
HP_MAX_SIZE = 20.0E6 # max thermal design size [Wth]
HP_MIN_SIZE = 1.0E6 # min thermal design size [Wth]
HP_ETA_EX = 0.6 # exergetic efficiency of WSHP [L. Girardin et al., 2010]_
HP_DELTA_T_COND = 2.0 # pinch for condenser [K]
HP_DELTA_T_EVAP = 2.0 # pinch for evaporator [K]
HP_MAX_T_COND = 140 + 273.0 # max temperature at condenser [K]
HP_AUXRATIO = 0.83 # Wdot_comp / Wdot_total (circulating pumps)
# Substation data
ROUGHNESS = 0.02 / 1000 # roughness coefficient for heating network pipe in m (for a steel pipe, from Li &
NETWORK_DEPTH = 1 # m
# Initial Diameter guess
REDUCED_TIME_STEPS = 50 # number of time steps of maximum demand which are evaluated as an initial guess of the edge diameters
MAX_INITIAL_DIAMETER_ITERATIONS = 15 #number of initial guess iterations for pipe diameters
# Loop Network Diameter iterations
FULL_COOLING_SYSTEMS_LIST = ['cs_ahu', 'cs_aru', 'cs_scu', 'cs_data', 'cs_re']
FULL_HEATING_SYSTEMS_LIST = ['hs_ahu', 'hs_aru', 'hs_shu', 'hs_ww']
# Cogeneration (CCGT)
SPEC_VOLUME_STEAM = 0.0010 # m3/kg
# Storage tank
TANK_HEX_EFFECTIVENESS = 0.9 # assuming 90% effectiveness
| """
Constants used throughout the cea.technologies package.
History lesson: This is a first step at removing the `cea.globalvars.GlobalVariables` object.
"""
# Heat Exchangers
U_COOL = 2500.0 # W/m2K
U_HEAT = 2500.0 # W/m2K
DT_HEAT = 5.0 # K - pinch delta at design conditions
DT_COOL = 2.0 # K - pinch delta at design conditions
DT_INTERNAL_HEX = 2.0 # K - minimum difference between cold side outflow and hot side inflow temperatures
HEAT_EX_EFFECTIVENESS = 0.9 # assume starting value for heat exchanger effectiveness (exergy)
MAX_NODE_FLOW = 22.0 # kg/s
# Heat pump
HP_MAX_SIZE = 20.0E6 # max thermal design size [Wth]
HP_MIN_SIZE = 1.0E6 # min thermal design size [Wth]
HP_ETA_EX = 0.6 # exergetic efficiency of WSHP [L. Girardin et al., 2010]_
HP_DELTA_T_COND = 2.0 # pinch for condenser [K]
HP_DELTA_T_EVAP = 2.0 # pinch for evaporator [K]
HP_MAX_T_COND = 140 + 273.0 # max temperature at condenser [K]
HP_AUXRATIO = 0.83 # Wdot_comp / Wdot_total (circulating pumps)
# Substation data
ROUGHNESS = 0.02 / 1000 # roughness coefficient for heating network pipe in m (for a steel pipe, from Li &
NETWORK_DEPTH = 1 # m
# Initial Diameter guess
REDUCED_TIME_STEPS = 50 # number of time steps of maximum demand which are evaluated as an initial guess of the edge diameters
MAX_INITIAL_DIAMETER_ITERATIONS = 15 #number of initial guess iterations for pipe diameters
# Loop Network Diameter iterations
FULL_COOLING_SYSTEMS_LIST = ['cs_ahu', 'cs_aru', 'cs_scu', 'cs_data', 'cs_re']
FULL_HEATING_SYSTEMS_LIST = ['hs_ahu', 'hs_aru', 'hs_shu', 'hs_ww']
# Cogeneration (CCGT)
SPEC_VOLUME_STEAM = 0.0010 # m3/kg
# Storage tank
TANK_HEX_EFFECTIVENESS = 0.9 # assuming 90% effectiveness
| en | 0.797818 | Constants used throughout the cea.technologies package. History lesson: This is a first step at removing the `cea.globalvars.GlobalVariables` object. # Heat Exchangers # W/m2K # W/m2K # K - pinch delta at design conditions # K - pinch delta at design conditions # K - minimum difference between cold side outflow and hot side inflow temperatures # assume starting value for heat exchanger effectiveness (exergy) # kg/s # Heat pump # max thermal design size [Wth] # min thermal design size [Wth] # exergetic efficiency of WSHP [L. Girardin et al., 2010]_ # pinch for condenser [K] # pinch for evaporator [K] # max temperature at condenser [K] # Wdot_comp / Wdot_total (circulating pumps) # Substation data # roughness coefficient for heating network pipe in m (for a steel pipe, from Li & # m # Initial Diameter guess # number of time steps of maximum demand which are evaluated as an initial guess of the edge diameters #number of initial guess iterations for pipe diameters # Loop Network Diameter iterations # Cogeneration (CCGT) # m3/kg # Storage tank # assuming 90% effectiveness | 1.762762 | 2 |
airbyte-integrations/connectors/source-slack-singer/source_slack_singer/__init__.py | rajatariya21/airbyte | 2 | 6622631 | from .source import SourceSlackSinger
__all__ = ["SourceSlackSinger"]
| from .source import SourceSlackSinger
__all__ = ["SourceSlackSinger"]
| none | 1 | 1.037758 | 1 | |
Elements/ElemLineBase.py | hjabird/XFEM_Boundary_Cooling_Solver | 0 | 6622632 | <reponame>hjabird/XFEM_Boundary_Cooling_Solver<filename>Elements/ElemLineBase.py
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@copyright Copyright 2016, <NAME>
@licence: MIT
@status: alpha
"""
import numpy as np
from .ElemBaseClass import ElemBaseClass
class ElemLineBase(ElemBaseClass):
""" Base class for 1D line element
(-1)------(1)
Class adds nd() and gen_gp()
"""
@staticmethod
def nd():
return 1;
def gen_gp(self, gauss_order):
# gauss_order should be 1 element tuple.
assert(len(gauss_order) == 1)
return self.gauss_legendre_1D(gauss_order[0]) | # -*- coding: utf-8 -*-
"""
@author: <NAME>
@copyright Copyright 2016, <NAME>
@licence: MIT
@status: alpha
"""
import numpy as np
from .ElemBaseClass import ElemBaseClass
class ElemLineBase(ElemBaseClass):
""" Base class for 1D line element
(-1)------(1)
Class adds nd() and gen_gp()
"""
@staticmethod
def nd():
return 1;
def gen_gp(self, gauss_order):
# gauss_order should be 1 element tuple.
assert(len(gauss_order) == 1)
return self.gauss_legendre_1D(gauss_order[0]) | en | 0.588346 | # -*- coding: utf-8 -*- @author: <NAME>
@copyright Copyright 2016, <NAME>
@licence: MIT
@status: alpha Base class for 1D line element
(-1)------(1)
Class adds nd() and gen_gp() # gauss_order should be 1 element tuple. | 2.777748 | 3 |
src/posts/sitemaps.py | pratik-devkota/notesewa | 0 | 6622633 | # core django imports
from django.contrib.sitemaps import Sitemap
# app imports
from posts.models import Post
class PostSiteMap(Sitemap):
"""
A sitemap class indicating the objects, change frequency, and
various pages of our website for better search engine indexing
"""
changefreq = 'weekly'
priority = 0.9
def items(self):
"""
Returns the queryset of objects to include in our sitemap
"""
return Post.published.all()
def lastmod(self, obj):
return obj.modified
| # core django imports
from django.contrib.sitemaps import Sitemap
# app imports
from posts.models import Post
class PostSiteMap(Sitemap):
"""
A sitemap class indicating the objects, change frequency, and
various pages of our website for better search engine indexing
"""
changefreq = 'weekly'
priority = 0.9
def items(self):
"""
Returns the queryset of objects to include in our sitemap
"""
return Post.published.all()
def lastmod(self, obj):
return obj.modified
| en | 0.852053 | # core django imports # app imports A sitemap class indicating the objects, change frequency, and various pages of our website for better search engine indexing Returns the queryset of objects to include in our sitemap | 2.214101 | 2 |
auth-center/App/api/user_source/user_list.py | Basic-Components/auth-center | 1 | 6622634 | """
操作User这张表,可以查看,添加或者删除一条记录,修改需要对应的其他resource
"""
import uuid
import datetime
import peewee
from sanic.views import HTTPMethodView
from sanic.response import json
from App.model import User, Role
from App.decorator import authorized, role_check
class UserListSource(HTTPMethodView):
"""操作User这张表
"""
decorators = [role_check(),authorized()]
async def get(self, request):
"""直接查看User中的全部内容,可以添加参数name查看username为name的用户是否存在
"""
name = request.args.get("name")
if name:
try:
user = await User.get(User.username == name)
except:
return json({"message":"找不到用户"},400)
else:
users = [user]
else:
users = await User.select()
return json({"userlist": [
{"_id": str(user._id),
"username": user.username,
"main_email":user.main_email,
"roles": [i.service_name for i in await user.roles]} for user in users]
})
async def post(self, request):
"""为User表批量添加新的成员,使用inser_many,传入的必须为一个名为users的列表,每个元素包含username和password和main_email
"""
try:
request.json["users"]
except:
return json({"message":"需要传入一个名为users的列表,每个元素包含username和password和main_email"},500)
iq = User.insert_many([{"_id": uuid.uuid4(),
"username": i["username"],
'password':i['password'],
"main_email":i['main_email'],
"ctime":datetime.datetime.now()
} for i in request.json["users"]])
try:
result = await iq.execute()
except peewee.IntegrityError as pe:
return json({"message":"用户数据已存在"},400)
except Exception as e:
return json({"message":"数据库错误","error":str(e)},500)
else:
if result:
return json({
"result": True
})
else:
return json({
"result": False
})
async def delete(self, request):
"""在User表中删除_id在users的用户,users传入的是一串_id列表
"""
try:
_ids = request.json["users"]
except:
return json({"message":"需要传入一个名为users的列表,每个元素为user的_id"},400)
dq = User.delete().where(User._id << _ids)
try:
result = await dq.execute()
print(result)
except Exception as e:
return json({"message":"数据库错误","error":str(e)},500)
else:
if result:
return json({
"result": True
})
else:
return json({
"result": False
})
| """
操作User这张表,可以查看,添加或者删除一条记录,修改需要对应的其他resource
"""
import uuid
import datetime
import peewee
from sanic.views import HTTPMethodView
from sanic.response import json
from App.model import User, Role
from App.decorator import authorized, role_check
class UserListSource(HTTPMethodView):
"""操作User这张表
"""
decorators = [role_check(),authorized()]
async def get(self, request):
"""直接查看User中的全部内容,可以添加参数name查看username为name的用户是否存在
"""
name = request.args.get("name")
if name:
try:
user = await User.get(User.username == name)
except:
return json({"message":"找不到用户"},400)
else:
users = [user]
else:
users = await User.select()
return json({"userlist": [
{"_id": str(user._id),
"username": user.username,
"main_email":user.main_email,
"roles": [i.service_name for i in await user.roles]} for user in users]
})
async def post(self, request):
"""为User表批量添加新的成员,使用inser_many,传入的必须为一个名为users的列表,每个元素包含username和password和main_email
"""
try:
request.json["users"]
except:
return json({"message":"需要传入一个名为users的列表,每个元素包含username和password和main_email"},500)
iq = User.insert_many([{"_id": uuid.uuid4(),
"username": i["username"],
'password':i['password'],
"main_email":i['main_email'],
"ctime":datetime.datetime.now()
} for i in request.json["users"]])
try:
result = await iq.execute()
except peewee.IntegrityError as pe:
return json({"message":"用户数据已存在"},400)
except Exception as e:
return json({"message":"数据库错误","error":str(e)},500)
else:
if result:
return json({
"result": True
})
else:
return json({
"result": False
})
async def delete(self, request):
"""在User表中删除_id在users的用户,users传入的是一串_id列表
"""
try:
_ids = request.json["users"]
except:
return json({"message":"需要传入一个名为users的列表,每个元素为user的_id"},400)
dq = User.delete().where(User._id << _ids)
try:
result = await dq.execute()
print(result)
except Exception as e:
return json({"message":"数据库错误","error":str(e)},500)
else:
if result:
return json({
"result": True
})
else:
return json({
"result": False
})
| zh | 0.931657 | 操作User这张表,可以查看,添加或者删除一条记录,修改需要对应的其他resource 操作User这张表 直接查看User中的全部内容,可以添加参数name查看username为name的用户是否存在 为User表批量添加新的成员,使用inser_many,传入的必须为一个名为users的列表,每个元素包含username和password和main_email 在User表中删除_id在users的用户,users传入的是一串_id列表 | 2.422012 | 2 |
create_dummies.py | InsaneFirebat/SM_Savestates | 0 | 6622635 | #!/usr/bin/env python
import sys
import os
if sys.argv[1] == "":
print("create_dummies.py <00_file> <ff_file>")
sys.exit()
else:
zero_name = sys.argv[1]
ff_name = sys.argv[2]
fo_z = open(os.path.dirname(os.path.realpath(__file__)) + "/" + zero_name, "wb")
fo_f = open(os.path.dirname(os.path.realpath(__file__)) + "/" + ff_name, "wb")
fo_z.write(bytearray([0x00] * 1024 * 1024 * 4))
fo_f.write(bytearray([0xff] * 1024 * 1024 * 4))
fo_z.close()
fo_f.close()
| #!/usr/bin/env python
import sys
import os
if sys.argv[1] == "":
print("create_dummies.py <00_file> <ff_file>")
sys.exit()
else:
zero_name = sys.argv[1]
ff_name = sys.argv[2]
fo_z = open(os.path.dirname(os.path.realpath(__file__)) + "/" + zero_name, "wb")
fo_f = open(os.path.dirname(os.path.realpath(__file__)) + "/" + ff_name, "wb")
fo_z.write(bytearray([0x00] * 1024 * 1024 * 4))
fo_f.write(bytearray([0xff] * 1024 * 1024 * 4))
fo_z.close()
fo_f.close()
| ru | 0.26433 | #!/usr/bin/env python | 2.585008 | 3 |
src/gurobi_cases/dense.py | bingrao/cache_managment | 0 | 6622636 | # This example formulates and solves the following simple QP model:
#
# minimize x + y + x^2 + x*y + y^2 + y*z + z^2
# subject to x + 2 y + 3 z >= 4
# x + y >= 1
# x, y, z non-negative
#
# The example illustrates the use of dense matrices to store A and Q
# (and dense vectors for the other relevant data). We don't recommend
# that you use dense matrices, but this example may be helpful if you
# already have your data in this format.
import sys
import gurobipy as gp
from gurobipy import GRB
import numpy as np
import scipy.sparse as sp
def dense_optimize():
# Put model data into dense matrices
c = [1, 1, 0]
Q = [[1, 1, 0], [0, 1, 1], [0, 0, 1]]
A = [[1, 2, 3], [1, 1, 0]]
sense = [GRB.GREATER_EQUAL, GRB.GREATER_EQUAL]
rhs = [4, 1]
lb = [0, 0, 0]
ub = [GRB.INFINITY, GRB.INFINITY, GRB.INFINITY]
vtype = [GRB.CONTINUOUS, GRB.CONTINUOUS, GRB.CONTINUOUS]
solution = [0] * 3
rows = 2
cols = 3
# Optimize
model = gp.Model()
# Add variables to model
vars = []
for j in range(cols):
vars.append(model.addVar(lb=lb[j], ub=ub[j], vtype=vtype[j]))
# Populate A matrix
for i in range(rows):
expr = gp.LinExpr()
for j in range(cols):
if A[i][j] != 0:
expr += A[i][j]*vars[j]
model.addConstr(expr, sense[i], rhs[i])
# Populate objective
obj = gp.QuadExpr()
for i in range(cols):
for j in range(cols):
if Q[i][j] != 0:
obj += Q[i][j]*vars[i]*vars[j]
for j in range(cols):
if c[j] != 0:
obj += c[j]*vars[j]
model.setObjective(obj)
# Solve
model.optimize()
# Write model to a file
# model.write('dense.lp')
if model.status == GRB.OPTIMAL:
x = model.getAttr('x', vars)
for i in range(cols):
solution[i] = x[i]
return True, solution
else:
return False, solution
def dense_optimize_v2():
solution = [0] * 3
# Optimize
model = gp.Model()
xyz = model.addMVar(shape=3, lb=0.0, ub=GRB.INFINITY, vtype=GRB.CONTINUOUS, name="xyz")
x = xyz.vararr[0]
y = xyz.vararr[1]
z = xyz.vararr[2]
# Build (sparse) constraint matrix
data = np.array([1.0, 2.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0])
row = np.array([0, 0, 0, 1, 1, 2, 3, 4])
col = np.array([0, 1, 2, 0, 1, 0, 1, 2])
A = sp.csr_matrix((data, (row, col)), shape=(5, 3))
# Build rhs vector
rhs = np.array([4.0, 1.0, 0.0, 0.0, 0.0])
# Add constraints
model.addConstr(A @ xyz >= rhs, name="c")
# Populate objective
obj = gp.QuadExpr()
obj += x + y + x * x + y * y + y * z + z * z
model.setObjective(obj)
# Solve
model.optimize()
# Write model to a file
# model.write('dense.lp')
if model.status == GRB.OPTIMAL:
x = model.getAttr('x', vars)
for i in range(3):
solution[i] = x[i]
return True, solution
else:
return False, solution
if __name__ == "__main__":
success, sol = dense_optimize()
if success:
print('x: %g, y: %g, z: %g' % (sol[0], sol[1], sol[2]))
# print("*********************************************************")
# success, sol = dense_optimize_v2()
# if success:
# print('x: %g, y: %g, z: %g' % (sol[0], sol[1], sol[2])) | # This example formulates and solves the following simple QP model:
#
# minimize x + y + x^2 + x*y + y^2 + y*z + z^2
# subject to x + 2 y + 3 z >= 4
# x + y >= 1
# x, y, z non-negative
#
# The example illustrates the use of dense matrices to store A and Q
# (and dense vectors for the other relevant data). We don't recommend
# that you use dense matrices, but this example may be helpful if you
# already have your data in this format.
import sys
import gurobipy as gp
from gurobipy import GRB
import numpy as np
import scipy.sparse as sp
def dense_optimize():
# Put model data into dense matrices
c = [1, 1, 0]
Q = [[1, 1, 0], [0, 1, 1], [0, 0, 1]]
A = [[1, 2, 3], [1, 1, 0]]
sense = [GRB.GREATER_EQUAL, GRB.GREATER_EQUAL]
rhs = [4, 1]
lb = [0, 0, 0]
ub = [GRB.INFINITY, GRB.INFINITY, GRB.INFINITY]
vtype = [GRB.CONTINUOUS, GRB.CONTINUOUS, GRB.CONTINUOUS]
solution = [0] * 3
rows = 2
cols = 3
# Optimize
model = gp.Model()
# Add variables to model
vars = []
for j in range(cols):
vars.append(model.addVar(lb=lb[j], ub=ub[j], vtype=vtype[j]))
# Populate A matrix
for i in range(rows):
expr = gp.LinExpr()
for j in range(cols):
if A[i][j] != 0:
expr += A[i][j]*vars[j]
model.addConstr(expr, sense[i], rhs[i])
# Populate objective
obj = gp.QuadExpr()
for i in range(cols):
for j in range(cols):
if Q[i][j] != 0:
obj += Q[i][j]*vars[i]*vars[j]
for j in range(cols):
if c[j] != 0:
obj += c[j]*vars[j]
model.setObjective(obj)
# Solve
model.optimize()
# Write model to a file
# model.write('dense.lp')
if model.status == GRB.OPTIMAL:
x = model.getAttr('x', vars)
for i in range(cols):
solution[i] = x[i]
return True, solution
else:
return False, solution
def dense_optimize_v2():
solution = [0] * 3
# Optimize
model = gp.Model()
xyz = model.addMVar(shape=3, lb=0.0, ub=GRB.INFINITY, vtype=GRB.CONTINUOUS, name="xyz")
x = xyz.vararr[0]
y = xyz.vararr[1]
z = xyz.vararr[2]
# Build (sparse) constraint matrix
data = np.array([1.0, 2.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0])
row = np.array([0, 0, 0, 1, 1, 2, 3, 4])
col = np.array([0, 1, 2, 0, 1, 0, 1, 2])
A = sp.csr_matrix((data, (row, col)), shape=(5, 3))
# Build rhs vector
rhs = np.array([4.0, 1.0, 0.0, 0.0, 0.0])
# Add constraints
model.addConstr(A @ xyz >= rhs, name="c")
# Populate objective
obj = gp.QuadExpr()
obj += x + y + x * x + y * y + y * z + z * z
model.setObjective(obj)
# Solve
model.optimize()
# Write model to a file
# model.write('dense.lp')
if model.status == GRB.OPTIMAL:
x = model.getAttr('x', vars)
for i in range(3):
solution[i] = x[i]
return True, solution
else:
return False, solution
if __name__ == "__main__":
success, sol = dense_optimize()
if success:
print('x: %g, y: %g, z: %g' % (sol[0], sol[1], sol[2]))
# print("*********************************************************")
# success, sol = dense_optimize_v2()
# if success:
# print('x: %g, y: %g, z: %g' % (sol[0], sol[1], sol[2])) | en | 0.675378 | # This example formulates and solves the following simple QP model: # # minimize x + y + x^2 + x*y + y^2 + y*z + z^2 # subject to x + 2 y + 3 z >= 4 # x + y >= 1 # x, y, z non-negative # # The example illustrates the use of dense matrices to store A and Q # (and dense vectors for the other relevant data). We don't recommend # that you use dense matrices, but this example may be helpful if you # already have your data in this format. # Put model data into dense matrices # Optimize # Add variables to model # Populate A matrix # Populate objective # Solve # Write model to a file # model.write('dense.lp') # Optimize # Build (sparse) constraint matrix # Build rhs vector # Add constraints # Populate objective # Solve # Write model to a file # model.write('dense.lp') # print("*********************************************************") # success, sol = dense_optimize_v2() # if success: # print('x: %g, y: %g, z: %g' % (sol[0], sol[1], sol[2])) | 2.845248 | 3 |
OSX-replace-chars-in-iPhoto.py | bitounu/shitz | 0 | 6622637 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import EasyDialogs
import os
def gdzie_zapisac():
# ask for destination folder
dir = EasyDialogs.AskFolder(
message='Select destinatin folder',
defaultLocation=os.getcwd(),
wanted=unicode,
)
return dir
def napraw_nazwy(album):
# loop for ZNAKI table
ZNAKI = [',', '.', '\\', '/', '+', ':', ' ']
for s in ZNAKI:
album = album.replace(s, '_')
return album
dir = gdzie_zapisac()
iPAlbumCount = os.getenv('iPAlbumCount')
for i in range(int(iPAlbumCount)):
ALBUM_x = 'iPAlbumName_' + str(i) # variable name for particular album
ALBUM = os.getenv(ALBUM_x) # link to its value with eval()
ALBUM = napraw_nazwy(ALBUM) # replacing characters in albums name
# ALBUMPATH = os.path.join(dir,ALBUM) # path to folder
ALBUMPATH = dir + '/' + ALBUM.decode('utf-8')
print "ALBUMPATH = " + ALBUMPATH.encode('utf-8')
os.mkdir(ALBUMPATH) # create folder
| #!/usr/bin/python
# -*- coding: utf-8 -*-
import EasyDialogs
import os
def gdzie_zapisac():
# ask for destination folder
dir = EasyDialogs.AskFolder(
message='Select destinatin folder',
defaultLocation=os.getcwd(),
wanted=unicode,
)
return dir
def napraw_nazwy(album):
# loop for ZNAKI table
ZNAKI = [',', '.', '\\', '/', '+', ':', ' ']
for s in ZNAKI:
album = album.replace(s, '_')
return album
dir = gdzie_zapisac()
iPAlbumCount = os.getenv('iPAlbumCount')
for i in range(int(iPAlbumCount)):
ALBUM_x = 'iPAlbumName_' + str(i) # variable name for particular album
ALBUM = os.getenv(ALBUM_x) # link to its value with eval()
ALBUM = napraw_nazwy(ALBUM) # replacing characters in albums name
# ALBUMPATH = os.path.join(dir,ALBUM) # path to folder
ALBUMPATH = dir + '/' + ALBUM.decode('utf-8')
print "ALBUMPATH = " + ALBUMPATH.encode('utf-8')
os.mkdir(ALBUMPATH) # create folder
| en | 0.748161 | #!/usr/bin/python # -*- coding: utf-8 -*- # ask for destination folder # loop for ZNAKI table # variable name for particular album # link to its value with eval() # replacing characters in albums name # ALBUMPATH = os.path.join(dir,ALBUM) # path to folder # create folder | 3.04568 | 3 |
src/medigan/constants.py | RichardObi/medigan | 2 | 6622638 | # -*- coding: utf-8 -*-
# ! /usr/bin/env python
"""Global constants of the medigan library
.. codeauthor:: <NAME> <<EMAIL>>
.. codeauthor:: <NAME> <<EMAIL>>
"""
""" Static link to the config of medigan. Note: To add a model, please create pull request in this github repo. """
CONFIG_FILE_URL = "https://raw.githubusercontent.com/RichardObi/medigan-models/main/global.json"
""" Folder path that will be created to locally store the config file. """
CONFIG_FILE_FOLDER = "config"
""" Name and extensions of config file. """
CONFIG_FILE_NAME_AND_EXTENSION = "global.json"
""" The key under which the execution dictionary of a model is nested in the config file. """
CONFIG_FILE_KEY_EXECUTION = 'execution'
""" The key under which the selection dictionary of a model is nested in the config file. """
CONFIG_FILE_KEY_SELECTION = 'selection'
""" The key under which the description dictionary of a model is nested in the config file. """
CONFIG_FILE_KEY_DESCRIPTION = 'description'
""" Below the selection dict, the key under which the performance dictionary of a model is nested in the config file. """
CONFIG_FILE_KEY_PERFORMANCE = 'performance'
""" Below the execution dict, the key under which the dependencies dictionary of a model is nested in the config file. """
CONFIG_FILE_KEY_DEPENDENCIES = "dependencies"
""" Below the execution dict, the key under which the package link of a model is present in the config file.
Note: The model packages are per convention stored on Zenodo where they retrieve a static DOI avoiding security issues
due to static non-modifiable content on Zenodo. Zenodo also helps to maintain clarity of who the owners and contributors
of each generative model (and its IP) in medigan are. """
CONFIG_FILE_KEY_PACKAGE_LINK = "package_link"
""" Below the execution dict, the key under which the extension of a model is present in the config file. """
CONFIG_FILE_KEY_MODEL_EXTENSION = "extension"
""" Below the execution dict, the key under which the package_name of a model is present in the config file. """
CONFIG_FILE_KEY_PACKAGE_NAME = "package_name"
""" Below the execution dict, the key under which the package_name of a model is present in the config file. """
CONFIG_FILE_KEY_GENERATOR = "generator"
""" Below the execution dict, the key under which a model's generator's is present in the config file. """
CONFIG_FILE_KEY_GENERATOR_NAME = "name"
""" Below the execution dict, the key under which a model's image_size is present in the config file. """
CONFIG_FILE_KEY_IMAGE_SIZE = "image_size"
""" Below the execution dict, the key under which a model's name is present in the config file. """
CONFIG_FILE_KEY_MODEL_NAME = "model_name"
""" Below the execution dict, the key under which a nested dict with info on the model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE = "generate_method"
""" Below the execution dict, the key under which the exact name of a model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE_NAME = "name"
""" Below the execution dict, the key under which a nested dict with info on the arguments of a model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE_ARGS = "args"
""" Below the execution dict, the key under which an array of mandatory base arguments of any model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE_ARGS_BASE = "base"
""" Below the execution dict, the key under which a nested dict of key-value pairs of model specific custom arguments of a model's generate() function are present. """
CONFIG_FILE_KEY_GENERATE_ARGS_CUSTOM = "custom"
""" Below the execution dict, the key under which the model_file argument value of any model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE_ARGS_MODEL_FILE = "model_file"
""" Below the execution dict, the key under which the num_samples argument value of any model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE_ARGS_NUM_SAMPLES = "num_samples"
""" Below the execution dict, the key under which the output_path argument value of any model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE_ARGS_OUTPUT_PATH = "output_path"
""" Below the execution dict, the key under which the save images boolean flag argument value of any model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE_ARGS_SAVE_IMAGES = "save_images"
""" The filetype of any of the generative model's python packages after download and before unpacking. """
PACKAGE_EXTENSION = ".zip"
""" The string describing a model's unique id in medigan's data structures. """
MODEL_ID = 'model_id'
""" The default path to a folder under which the outputs of the medigan package (i.e. generated samples) are stored. """
DEFAULT_OUTPUT_FOLDER = "output"
| # -*- coding: utf-8 -*-
# ! /usr/bin/env python
"""Global constants of the medigan library
.. codeauthor:: <NAME> <<EMAIL>>
.. codeauthor:: <NAME> <<EMAIL>>
"""
""" Static link to the config of medigan. Note: To add a model, please create pull request in this github repo. """
CONFIG_FILE_URL = "https://raw.githubusercontent.com/RichardObi/medigan-models/main/global.json"
""" Folder path that will be created to locally store the config file. """
CONFIG_FILE_FOLDER = "config"
""" Name and extensions of config file. """
CONFIG_FILE_NAME_AND_EXTENSION = "global.json"
""" The key under which the execution dictionary of a model is nested in the config file. """
CONFIG_FILE_KEY_EXECUTION = 'execution'
""" The key under which the selection dictionary of a model is nested in the config file. """
CONFIG_FILE_KEY_SELECTION = 'selection'
""" The key under which the description dictionary of a model is nested in the config file. """
CONFIG_FILE_KEY_DESCRIPTION = 'description'
""" Below the selection dict, the key under which the performance dictionary of a model is nested in the config file. """
CONFIG_FILE_KEY_PERFORMANCE = 'performance'
""" Below the execution dict, the key under which the dependencies dictionary of a model is nested in the config file. """
CONFIG_FILE_KEY_DEPENDENCIES = "dependencies"
""" Below the execution dict, the key under which the package link of a model is present in the config file.
Note: The model packages are per convention stored on Zenodo where they retrieve a static DOI avoiding security issues
due to static non-modifiable content on Zenodo. Zenodo also helps to maintain clarity of who the owners and contributors
of each generative model (and its IP) in medigan are. """
CONFIG_FILE_KEY_PACKAGE_LINK = "package_link"
""" Below the execution dict, the key under which the extension of a model is present in the config file. """
CONFIG_FILE_KEY_MODEL_EXTENSION = "extension"
""" Below the execution dict, the key under which the package_name of a model is present in the config file. """
CONFIG_FILE_KEY_PACKAGE_NAME = "package_name"
""" Below the execution dict, the key under which the package_name of a model is present in the config file. """
CONFIG_FILE_KEY_GENERATOR = "generator"
""" Below the execution dict, the key under which a model's generator's is present in the config file. """
CONFIG_FILE_KEY_GENERATOR_NAME = "name"
""" Below the execution dict, the key under which a model's image_size is present in the config file. """
CONFIG_FILE_KEY_IMAGE_SIZE = "image_size"
""" Below the execution dict, the key under which a model's name is present in the config file. """
CONFIG_FILE_KEY_MODEL_NAME = "model_name"
""" Below the execution dict, the key under which a nested dict with info on the model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE = "generate_method"
""" Below the execution dict, the key under which the exact name of a model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE_NAME = "name"
""" Below the execution dict, the key under which a nested dict with info on the arguments of a model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE_ARGS = "args"
""" Below the execution dict, the key under which an array of mandatory base arguments of any model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE_ARGS_BASE = "base"
""" Below the execution dict, the key under which a nested dict of key-value pairs of model specific custom arguments of a model's generate() function are present. """
CONFIG_FILE_KEY_GENERATE_ARGS_CUSTOM = "custom"
""" Below the execution dict, the key under which the model_file argument value of any model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE_ARGS_MODEL_FILE = "model_file"
""" Below the execution dict, the key under which the num_samples argument value of any model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE_ARGS_NUM_SAMPLES = "num_samples"
""" Below the execution dict, the key under which the output_path argument value of any model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE_ARGS_OUTPUT_PATH = "output_path"
""" Below the execution dict, the key under which the save images boolean flag argument value of any model's generate() function is present. """
CONFIG_FILE_KEY_GENERATE_ARGS_SAVE_IMAGES = "save_images"
""" The filetype of any of the generative model's python packages after download and before unpacking. """
PACKAGE_EXTENSION = ".zip"
""" The string describing a model's unique id in medigan's data structures. """
MODEL_ID = 'model_id'
""" The default path to a folder under which the outputs of the medigan package (i.e. generated samples) are stored. """
DEFAULT_OUTPUT_FOLDER = "output"
| en | 0.902822 | # -*- coding: utf-8 -*- # ! /usr/bin/env python Global constants of the medigan library .. codeauthor:: <NAME> <<EMAIL>> .. codeauthor:: <NAME> <<EMAIL>> Static link to the config of medigan. Note: To add a model, please create pull request in this github repo. Folder path that will be created to locally store the config file. Name and extensions of config file. The key under which the execution dictionary of a model is nested in the config file. The key under which the selection dictionary of a model is nested in the config file. The key under which the description dictionary of a model is nested in the config file. Below the selection dict, the key under which the performance dictionary of a model is nested in the config file. Below the execution dict, the key under which the dependencies dictionary of a model is nested in the config file. Below the execution dict, the key under which the package link of a model is present in the config file. Note: The model packages are per convention stored on Zenodo where they retrieve a static DOI avoiding security issues due to static non-modifiable content on Zenodo. Zenodo also helps to maintain clarity of who the owners and contributors of each generative model (and its IP) in medigan are. Below the execution dict, the key under which the extension of a model is present in the config file. Below the execution dict, the key under which the package_name of a model is present in the config file. Below the execution dict, the key under which the package_name of a model is present in the config file. Below the execution dict, the key under which a model's generator's is present in the config file. Below the execution dict, the key under which a model's image_size is present in the config file. Below the execution dict, the key under which a model's name is present in the config file. Below the execution dict, the key under which a nested dict with info on the model's generate() function is present. Below the execution dict, the key under which the exact name of a model's generate() function is present. Below the execution dict, the key under which a nested dict with info on the arguments of a model's generate() function is present. Below the execution dict, the key under which an array of mandatory base arguments of any model's generate() function is present. Below the execution dict, the key under which a nested dict of key-value pairs of model specific custom arguments of a model's generate() function are present. Below the execution dict, the key under which the model_file argument value of any model's generate() function is present. Below the execution dict, the key under which the num_samples argument value of any model's generate() function is present. Below the execution dict, the key under which the output_path argument value of any model's generate() function is present. Below the execution dict, the key under which the save images boolean flag argument value of any model's generate() function is present. The filetype of any of the generative model's python packages after download and before unpacking. The string describing a model's unique id in medigan's data structures. The default path to a folder under which the outputs of the medigan package (i.e. generated samples) are stored. | 1.824188 | 2 |
dbtsdk/Dbt.py | Nilpo/dbt-sdk-python | 1 | 6622639 | <reponame>Nilpo/dbt-sdk-python
#!/usr/bin/env python3
"""
DBT API Client SDK for DPT API v2
Copyright (c) 2018 <NAME> (https://robdunham.info)
This software is available under the MIT license. See http://opensource.org/licenses/MIT for more info.
Documentation for DBT API calls is located at http://www.digitalbibleplatform.com/docs/
This is a Python fork of the DBT API Client PHP SDK (dbt-sdk-php)
https://bitbucket.org/faithcomesbyhearing/dbt-sdk-php
Source: https://github.com/Nilpo/dbt-sdk-python.git
"""
from typing import Dict, Optional
from urllib.request import urlopen
from urllib.parse import urlencode
import urllib.error
import json
import time
class Dbt:
"""DBT Class"""
# Configuration
_api_endpoint: str = 'http://dbt.io'
# API Version
_api_version: str = '2'
# Pointer to method that returns the response format constructed for the object
# made public so that a user can change response type after initialization (mostly for debugging)
response: str
def __init__(self, application_key: str, api_uri: str = None, reply: str = None, response_type: str = None, echo: str = None, callback: str = None) -> None:
"""
:param application_key: The identity of the app in the form of an application key
:param api_uri: URL to use instead of default url
:param reply: reply protocol
:param response_type: return type of function (json[default]| list[python List]|url[only returns api url])
:param echo: [true|false] whether or not to echo the call parameters
:param callback: function name to use for JSONP reply
"""
# URI to which to GET
self._api_uri: str = api_uri or self._api_endpoint
# Params which are shared on every API call
self._dbt_params: Dict[str, str] = {
'v': self._api_version,
'key': application_key,
'reply': reply or 'json',
'callback': callback,
'echo': echo
}
try :
if 'array' in response_type:
self._response = '_get_list_response'
elif 'url' in response_type:
self._response = '_get_api_uri'
else:
self._response = '_get_json_response'
except:
self._response = '_get_json_response'
def __getattr__(self, item):
return self[item]
def __getitem__(self, item):
return getattr(self, item)
def _get_list_response(self, resource_group: str, resource: str, params: Dict[str, str]) -> Optional[Dict]:
"""Imports a JSON api response to a Python List to be used by the server.
:param resource_group: api resource group to call
:param resource: api resource to call
:param params: resource group resource method params
:return: return from API as Python List or None
"""
# feed = self.get_json_response(resource_group, resource, params)
# if feed is not None:
# return json.loads(feed)
# else:
# return None
return json.loads(self._get_json_response(resource_group, resource, params)) or None
def _get_json_response(self, resource_group: str, resource: str, params: Dict[str, str]) -> Optional[str]:
"""Queries DBT API and returns the response in JSON format.
:param resource_group: api resource group to call
:param resource: api resource to call
:param params: resource group resource method params
:return: JSON return from API or None
"""
feed = None
uri = self._get_api_uri(resource_group, resource, params) or None
if uri is not None:
try:
with urlopen(uri) as response:
feed = response.read().decode()
except urllib.error.HTTPError as e:
print(e)
raise
return feed
def _get_api_uri(self, resource_group: str, resource: str, params: Dict[str, str]) -> str:
"""Builds a specific API call URL depending on passed parameters
:param resource_group: api resource group to call
:param resource: api resource to call
:param params: resource group resource method params
:return: API endpoint URL
"""
# request_params = dict(self._dbt_params)
# request_params.update(params)
request_params = dict(self._dbt_params, **params)
# remove keys with empty values
request_params = {k: v for k, v in request_params.items() if v is not None}
query_string = urlencode(request_params)
return self._api_uri + '/' + resource_group + '/' + resource + '?' + query_string
def get_api_version(self) -> Optional[str]:
"""Wrapper method for /api/apiversion call"""
return self[self._response]('api', 'apiversion', {})
def get_api_reply(self) -> Optional[str]:
"""Wrapper method for /api/reply call"""
return self[self._response]('api', 'reply', {})
def get_audio_location(self, protocol: str = '') -> Optional[str]:
"""Wrapper method for /audio/location call
:param protocol: Allows the caller to filter out servers that do not support a specified protocol (e.g http, https, rtmp, rtmp-amazon)
"""
params = {'protocol': protocol}
return self[self._response]('audio', 'location', params)
def get_audio_path(self, dam_id: str, book_id: str = None, chapter_id: str = None) -> Optional[str]:
"""Wrapper method for /audio/path call
:param dam_id: DAM ID of volume
:param book_id: book id of the book to get chapters for
:param chapter_id: chapter id of the chapter to get audio for
"""
params = {
'dam_id': dam_id,
'book_id': book_id,
'chapter_id': chapter_id
}
return self[self._response]('audio', 'path', params)
def get_audio_zippath(self, dam_id: str) -> Optional[str]:
"""Wrapper method for /audio/zippath call
:param dam_id: DAM ID of volume
"""
params = {'dam_id': dam_id}
return self[self._response]('audio', 'zippath', params)
def get_verse_start(self, dam_id: str, book_id: str, chapter_id: str) -> Optional[str]:
"""Wrapper method for /audio/versestart call
:param dam_id: DAM ID of volume
:param book_id: book id of the book to get chapters for
:param chapter_id: chapter id of the chapter to get audio for
"""
params = {
'dam_id': dam_id,
'osis_code': book_id,
'chapter_number': chapter_id
}
return self[self._response]('audio', 'versestart', params)
def get_library_language(self, code: str = None, name: str = None, sort_by: str = None, full_word: str = None, family_only: str = None) -> Optional[str]:
"""Wrapper method for /library/language call
:param code: language code on which to filter
:param name: language name in either native language or English on which to filter
:param sort_by: [code|name|english]
:param full_word: [true|false] interpret name: as full words only
:param family_only: [true|false] return only language families
"""
params = {
'code': code,
'name': name,
'full_word': full_word,
'family_only': family_only,
'sort_by': sort_by
}
# return self[self._response]('library', 'language', params)
return getattr(self, self._response, None)('library', 'language', params)
def get_library_version(self, code: str = None, name: str = None, sort_by: str = None) -> Optional[str]:
"""Wrapper method for /library/version call
:param code: language code on which to filter
:param name: language name in either native language or English on which to filter
:param sort_by: [code|name|english]
"""
params = {
'code': code,
'name': name,
'sort_by': sort_by
}
# return self[self._response]('library', 'version', params)
return self.__getattribute__(self._response)('library', 'version', params)
def get_library_volume(self, dam_id: str = None, fcbh_id: str = None, media: str = None, delivery: str = None, language: str = None, language_code: str = None, version_code: str = None, updated: time.time = None, status: str = None, expired: str = None, org_id: int = None, full_word: str = None, language_family_code: str = None) -> Optional[str]:
"""Wrapper method for /library/volume call
:param dam_id: DAM ID of volume
:param fcbh_id:
:param media: [text|audio|video] the format of languages the caller is interested in. All by default.
:param delivery: [streaming|download|mobile|any|none] a criteria for approved delivery method. 'any' means any of the supported methods (this list may change over time). 'none' means assets that are not approved for any of the supported methods. All returned by default.
:param language: Filter the versions returned to a specified language. For example return all the 'English' volumes.
:param language_code: Filter the volumes returned to a specified language code. For example return all the 'eng' volumes.
:param version_code: Filter the volumes returned to a specified version code. For example return all the 'ESV' volumes.
:param updated: This is a unix timestamp in UTC to restrict volumes returned only if they were modified since the specified time.
:param status: publishing status of volume
:param expired: [true|false] whether or not the volume is expired
:param org_id: Id of organization to which volume belongs
:param full_word: [true|false] interpret name: as full words only
:param language_family_code: Filter the volumes returned to a specified language code for language family
"""
params = {
'dam_id': dam_id,
'fcbh_id': fcbh_id,
'media': media,
'delivery': delivery,
'language': language,
'full_word': full_word,
'language_code': language_code,
'language_family_code': language_family_code,
'version_code': version_code,
'updated': updated,
'status': status,
'expired': expired,
'organization_id': org_id
}
return self[self._response]('library', 'volume', params)
def get_library_volumelanguage(self, root: str = None, language_code: str = None, media: str = None, delivery: str = None, status: str = None, org_id: int = None, full_word: str = None) -> Optional[str]:
"""Wrapper method for /library/volumelanguage call
:param root: the language name root. Can be used to restrict the response to only languages that start with 'Quechua' for example
:param language_code: (optional) 3 letter language code
:param media: [text|audio|both] the format of languages the caller is interested in. This specifies if you want languages available in text or languages available in audio or only languages available in both. All are returned by default.
:param delivery: [streaming|download|mobile|any|none] a criteria for approved delivery method. 'any' means any of the supported methods (this list may change over time). 'none' means assets that are not approved for any of the supported methods. All returned by default.
:param status:
:param org_id:
:param full_word: [true|false] interpret $name as full words only
"""
params = {
'root': root,
'language_code': language_code,
'media': media,
'delivery': delivery,
'status': status,
'organization_id': org_id,
'full_word': full_word
}
return self[self._response]('library', 'volumelanguage', params)
def get_library_volumelanguagefamily(self, root: str = None, language_code: str = None, media: str = None, delivery: str = None, status: str = None, org_id: int = None, full_word: str = None) -> Optional[str]:
"""Wrapper method for /library/volumelanguagefamily call
:param root: the language name root. Can be used to restrict the response to only languages that start with 'Quechua' for example
:param language_code: (optional) 3 letter language code
:param media: [text|audio|both] the format of languages the caller is interested in. This specifies if you want languages available in text or languages available in audio or only languages available in both. All are returned by default.
:param delivery: [streaming|download|mobile|any|none] a criteria for approved delivery method. 'any' means any of the supported methods (this list may change over time). 'none' means assets that are not approved for any of the supported methods. All returned by default.
:param status:
:param org_id:
:param full_word: [true|false] interpret $name as full words only
"""
params = {
'root': root,
'language_code': language_code,
'media': media,
'delivery': delivery,
'status': status,
'organization_id': org_id,
'full_word': full_word
}
return self[self._response]('library', 'volumelanguagefamily', params)
def get_library_bookorder(self, dam_id: str) -> Optional[str]:
"""Wrapper method for /library/bookorder call
:param dam_id: DAM ID of a volume
"""
params = {
'dam_id': dam_id
}
return self[self._response]('library', 'bookorder', params)
def get_library_book(self, dam_id: str) -> Optional[str]:
"""Wrapper method for /library/book
:param dam_id: DAM ID of a volume
"""
params = {
'dam_id': dam_id
}
return self[self._response]('library', 'book', params)
def get_library_bookname(self, language_code: str) -> Optional[str]:
"""Wrapper method for /library/bookname call
:param language_code: language code for book names
"""
params = {
'language_code': language_code
}
return self[self._response]('library', 'bookname', params)
def get_library_chapter(self, dam_id: str, book_id: str = None) -> Optional[str]:
"""Wrapper method for /library/chapter call
:param dam_id: DAM ID of volume
:param book_id: id of the book to get chapters for
"""
params = {
'dam_id': dam_id,
'book_id': book_id
}
return self[self._response]('library', 'chapter', params)
def get_library_verseinfo(self, dam_id: str, book_id: str = None, chapter_id: int = None, verse_start: int = None, verse_end: int = None) -> Optional[str]:
"""Wrapper method for /library/verseinfo call
:param dam_id: DAM ID of volume
:param book_id: id of the book to get text for
:param chapter_id: id of the chapter to get text for
:param verse_start: id of the verse to get text for (starting position)
:param verse_end: id of the verse to get text for (ending position)
"""
params = {
'dam_id': dam_id,
'book_id': book_id,
'chapter_id': chapter_id,
'verse_start': verse_start,
'verse_end': verse_end
}
return self[self._response]('library', 'verseinfo', params)
def get_library_numbers(self, language_code: str, start: int, end: int) -> Optional[str]:
"""Wrapper method for /library/numbers call
:param language_code: language code for book names
:param start: first number for series of consecutive numbers returned
:param end: last number for series of consecutive numbers returned
"""
params = {
'language_code': language_code,
'start': start,
'end': end
}
return self[self._response]('library', 'numbers', params)
def get_library_metadata(self, dam_id: str = None, org_id: int = None) -> Optional[str]:
"""Wrapper method for /library/metadata
:param dam_id: DAM ID of volume
:param org_id: ID for organization by which to filter
"""
params = {
'dam_id': dam_id,
'organization_id': org_id
}
return self[self._response]('library', 'metadata', params)
def get_library_asset(self, dam_id: str = None) -> Optional[str]:
"""Wrapper method for /library/asset call
:param dam_id: DAM ID of volume
"""
params = {
'dam_id': dam_id
}
return self[self._response]('library', 'asset', params)
def get_library_organization(self, org_name: str = None, org_id: int = None) -> Optional[str]:
"""Wrapper method for /library/organization call
:param org_name: name of organization
:param org_id: ID for organization by which to filter
"""
params = {
'name': org_name,
'id': org_id
}
return self[self._response]('library', 'organization', params)
def get_text_verse(self, dam_id: str, book_id: str = None, chapter_id: int = None, verse_start: int = None, verse_end: int = None, markup: str = None) -> Optional[str]:
"""Wrapper method for /text/verse call
:param dam_id: DAM ID of volume
:param book_id: id of the book to get text for
:param chapter_id: id of the chapter to get text for
:param verse_start: id of the verse to get text for (starting position)
:param verse_end: id of the verse to get text for (ending position)
:param markup: If specified returns the verse text in a variety of standarized formats. Current options include OSIS, and native (the default DBT format).
"""
params = {
'dam_id': dam_id,
'book_id': book_id,
'chapter_id': chapter_id,
'verse_start': verse_start,
'verse_end': verse_end,
'markup': markup
}
return self[self._response]('text', 'verse', params)
def get_text_search(self, dam_id: str, query: str, book_id: str = None, offset: int = None, limit: int = None) -> Optional[str]:
"""Wrapper method for /text/search call
:param dam_id: DAM ID of volume
:param query: The text the caller wishes to search for in the specified text
:param book_id: The book ID to limit the search to
:param offset: The offset for the set of results to return to start from
:param limit: The number of results to return. Default is 50.
"""
params = {
'dam_id': dam_id,
'query': query,
'book_id': book_id,
'offset': offset,
'limit': limit
}
return self[self._response]('text', 'search', params)
def get_text_searchgroup(self, dam_id: str, query: str) -> Optional[str]:
"""Wrapper method for /text/searchgroup call
:param dam_id: DAM ID of volume
:param query: The text the caller wishes to search for in the specified text
"""
params = {
'dam_id': dam_id,
'query': query
}
return self[self._response]('text', 'searchgroup', params)
def get_video_jesusfilm(self, dam_id: str, encoding: str, book_id: str = None, chapter_id: int = None, verse_id: int = None) -> Optional[str]:
"""Wrapper method for /video/jesusfilm call
:param dam_id: DAM ID of volume
:param encoding: The encoding to request. Either mp4 or m3u8.
:param book_id: book ID of the book to get text for
:param chapter_id: chapter ID of the chapter to get text for
:param verse_id: Verse id ID request
"""
params = {
'dam_id': dam_id,
'encoding': encoding,
'book_id': book_id,
'chapter_id': chapter_id,
'verse_id': verse_id
}
return self[self._response]('video', 'jesusfilm', params)
def get_video_videopath(self, dam_id: str, encoding: str = 'mp4', resolution: str = 'lo', segment_order: int = None, book_id: str = None, chapter_id: int = None, verse_id: int = None) -> Optional[str]:
"""Wrapper method for /video/videopath call
:param dam_id: DAM ID of volume (null to use default from the class init)
:param encoding: The encoding to request. Either mp4 or m3u8.
:param resolution: The video resolution: lo, med, or hi
:param segment_order: The segment order to retrieve
:param book_id: book ID of the book to get text for
:param chapter_id: chapter ID of the chapter to get text for
:param verse_id: verse ID to request
"""
params = {
'dam_id': dam_id,
'encoding': encoding,
'resolution': resolution,
'segment_order': segment_order,
'book_id': book_id,
'chapter_id': chapter_id,
'verse_id': verse_id
}
return self[self._response]('video', 'videopath', params)
| #!/usr/bin/env python3
"""
DBT API Client SDK for DPT API v2
Copyright (c) 2018 <NAME> (https://robdunham.info)
This software is available under the MIT license. See http://opensource.org/licenses/MIT for more info.
Documentation for DBT API calls is located at http://www.digitalbibleplatform.com/docs/
This is a Python fork of the DBT API Client PHP SDK (dbt-sdk-php)
https://bitbucket.org/faithcomesbyhearing/dbt-sdk-php
Source: https://github.com/Nilpo/dbt-sdk-python.git
"""
from typing import Dict, Optional
from urllib.request import urlopen
from urllib.parse import urlencode
import urllib.error
import json
import time
class Dbt:
"""DBT Class"""
# Configuration
_api_endpoint: str = 'http://dbt.io'
# API Version
_api_version: str = '2'
# Pointer to method that returns the response format constructed for the object
# made public so that a user can change response type after initialization (mostly for debugging)
response: str
def __init__(self, application_key: str, api_uri: str = None, reply: str = None, response_type: str = None, echo: str = None, callback: str = None) -> None:
"""
:param application_key: The identity of the app in the form of an application key
:param api_uri: URL to use instead of default url
:param reply: reply protocol
:param response_type: return type of function (json[default]| list[python List]|url[only returns api url])
:param echo: [true|false] whether or not to echo the call parameters
:param callback: function name to use for JSONP reply
"""
# URI to which to GET
self._api_uri: str = api_uri or self._api_endpoint
# Params which are shared on every API call
self._dbt_params: Dict[str, str] = {
'v': self._api_version,
'key': application_key,
'reply': reply or 'json',
'callback': callback,
'echo': echo
}
try :
if 'array' in response_type:
self._response = '_get_list_response'
elif 'url' in response_type:
self._response = '_get_api_uri'
else:
self._response = '_get_json_response'
except:
self._response = '_get_json_response'
def __getattr__(self, item):
return self[item]
def __getitem__(self, item):
return getattr(self, item)
def _get_list_response(self, resource_group: str, resource: str, params: Dict[str, str]) -> Optional[Dict]:
"""Imports a JSON api response to a Python List to be used by the server.
:param resource_group: api resource group to call
:param resource: api resource to call
:param params: resource group resource method params
:return: return from API as Python List or None
"""
# feed = self.get_json_response(resource_group, resource, params)
# if feed is not None:
# return json.loads(feed)
# else:
# return None
return json.loads(self._get_json_response(resource_group, resource, params)) or None
def _get_json_response(self, resource_group: str, resource: str, params: Dict[str, str]) -> Optional[str]:
"""Queries DBT API and returns the response in JSON format.
:param resource_group: api resource group to call
:param resource: api resource to call
:param params: resource group resource method params
:return: JSON return from API or None
"""
feed = None
uri = self._get_api_uri(resource_group, resource, params) or None
if uri is not None:
try:
with urlopen(uri) as response:
feed = response.read().decode()
except urllib.error.HTTPError as e:
print(e)
raise
return feed
def _get_api_uri(self, resource_group: str, resource: str, params: Dict[str, str]) -> str:
"""Builds a specific API call URL depending on passed parameters
:param resource_group: api resource group to call
:param resource: api resource to call
:param params: resource group resource method params
:return: API endpoint URL
"""
# request_params = dict(self._dbt_params)
# request_params.update(params)
request_params = dict(self._dbt_params, **params)
# remove keys with empty values
request_params = {k: v for k, v in request_params.items() if v is not None}
query_string = urlencode(request_params)
return self._api_uri + '/' + resource_group + '/' + resource + '?' + query_string
def get_api_version(self) -> Optional[str]:
"""Wrapper method for /api/apiversion call"""
return self[self._response]('api', 'apiversion', {})
def get_api_reply(self) -> Optional[str]:
"""Wrapper method for /api/reply call"""
return self[self._response]('api', 'reply', {})
def get_audio_location(self, protocol: str = '') -> Optional[str]:
"""Wrapper method for /audio/location call
:param protocol: Allows the caller to filter out servers that do not support a specified protocol (e.g http, https, rtmp, rtmp-amazon)
"""
params = {'protocol': protocol}
return self[self._response]('audio', 'location', params)
def get_audio_path(self, dam_id: str, book_id: str = None, chapter_id: str = None) -> Optional[str]:
"""Wrapper method for /audio/path call
:param dam_id: DAM ID of volume
:param book_id: book id of the book to get chapters for
:param chapter_id: chapter id of the chapter to get audio for
"""
params = {
'dam_id': dam_id,
'book_id': book_id,
'chapter_id': chapter_id
}
return self[self._response]('audio', 'path', params)
def get_audio_zippath(self, dam_id: str) -> Optional[str]:
"""Wrapper method for /audio/zippath call
:param dam_id: DAM ID of volume
"""
params = {'dam_id': dam_id}
return self[self._response]('audio', 'zippath', params)
def get_verse_start(self, dam_id: str, book_id: str, chapter_id: str) -> Optional[str]:
"""Wrapper method for /audio/versestart call
:param dam_id: DAM ID of volume
:param book_id: book id of the book to get chapters for
:param chapter_id: chapter id of the chapter to get audio for
"""
params = {
'dam_id': dam_id,
'osis_code': book_id,
'chapter_number': chapter_id
}
return self[self._response]('audio', 'versestart', params)
def get_library_language(self, code: str = None, name: str = None, sort_by: str = None, full_word: str = None, family_only: str = None) -> Optional[str]:
"""Wrapper method for /library/language call
:param code: language code on which to filter
:param name: language name in either native language or English on which to filter
:param sort_by: [code|name|english]
:param full_word: [true|false] interpret name: as full words only
:param family_only: [true|false] return only language families
"""
params = {
'code': code,
'name': name,
'full_word': full_word,
'family_only': family_only,
'sort_by': sort_by
}
# return self[self._response]('library', 'language', params)
return getattr(self, self._response, None)('library', 'language', params)
def get_library_version(self, code: str = None, name: str = None, sort_by: str = None) -> Optional[str]:
"""Wrapper method for /library/version call
:param code: language code on which to filter
:param name: language name in either native language or English on which to filter
:param sort_by: [code|name|english]
"""
params = {
'code': code,
'name': name,
'sort_by': sort_by
}
# return self[self._response]('library', 'version', params)
return self.__getattribute__(self._response)('library', 'version', params)
def get_library_volume(self, dam_id: str = None, fcbh_id: str = None, media: str = None, delivery: str = None, language: str = None, language_code: str = None, version_code: str = None, updated: time.time = None, status: str = None, expired: str = None, org_id: int = None, full_word: str = None, language_family_code: str = None) -> Optional[str]:
"""Wrapper method for /library/volume call
:param dam_id: DAM ID of volume
:param fcbh_id:
:param media: [text|audio|video] the format of languages the caller is interested in. All by default.
:param delivery: [streaming|download|mobile|any|none] a criteria for approved delivery method. 'any' means any of the supported methods (this list may change over time). 'none' means assets that are not approved for any of the supported methods. All returned by default.
:param language: Filter the versions returned to a specified language. For example return all the 'English' volumes.
:param language_code: Filter the volumes returned to a specified language code. For example return all the 'eng' volumes.
:param version_code: Filter the volumes returned to a specified version code. For example return all the 'ESV' volumes.
:param updated: This is a unix timestamp in UTC to restrict volumes returned only if they were modified since the specified time.
:param status: publishing status of volume
:param expired: [true|false] whether or not the volume is expired
:param org_id: Id of organization to which volume belongs
:param full_word: [true|false] interpret name: as full words only
:param language_family_code: Filter the volumes returned to a specified language code for language family
"""
params = {
'dam_id': dam_id,
'fcbh_id': fcbh_id,
'media': media,
'delivery': delivery,
'language': language,
'full_word': full_word,
'language_code': language_code,
'language_family_code': language_family_code,
'version_code': version_code,
'updated': updated,
'status': status,
'expired': expired,
'organization_id': org_id
}
return self[self._response]('library', 'volume', params)
def get_library_volumelanguage(self, root: str = None, language_code: str = None, media: str = None, delivery: str = None, status: str = None, org_id: int = None, full_word: str = None) -> Optional[str]:
"""Wrapper method for /library/volumelanguage call
:param root: the language name root. Can be used to restrict the response to only languages that start with 'Quechua' for example
:param language_code: (optional) 3 letter language code
:param media: [text|audio|both] the format of languages the caller is interested in. This specifies if you want languages available in text or languages available in audio or only languages available in both. All are returned by default.
:param delivery: [streaming|download|mobile|any|none] a criteria for approved delivery method. 'any' means any of the supported methods (this list may change over time). 'none' means assets that are not approved for any of the supported methods. All returned by default.
:param status:
:param org_id:
:param full_word: [true|false] interpret $name as full words only
"""
params = {
'root': root,
'language_code': language_code,
'media': media,
'delivery': delivery,
'status': status,
'organization_id': org_id,
'full_word': full_word
}
return self[self._response]('library', 'volumelanguage', params)
def get_library_volumelanguagefamily(self, root: str = None, language_code: str = None, media: str = None, delivery: str = None, status: str = None, org_id: int = None, full_word: str = None) -> Optional[str]:
"""Wrapper method for /library/volumelanguagefamily call
:param root: the language name root. Can be used to restrict the response to only languages that start with 'Quechua' for example
:param language_code: (optional) 3 letter language code
:param media: [text|audio|both] the format of languages the caller is interested in. This specifies if you want languages available in text or languages available in audio or only languages available in both. All are returned by default.
:param delivery: [streaming|download|mobile|any|none] a criteria for approved delivery method. 'any' means any of the supported methods (this list may change over time). 'none' means assets that are not approved for any of the supported methods. All returned by default.
:param status:
:param org_id:
:param full_word: [true|false] interpret $name as full words only
"""
params = {
'root': root,
'language_code': language_code,
'media': media,
'delivery': delivery,
'status': status,
'organization_id': org_id,
'full_word': full_word
}
return self[self._response]('library', 'volumelanguagefamily', params)
def get_library_bookorder(self, dam_id: str) -> Optional[str]:
"""Wrapper method for /library/bookorder call
:param dam_id: DAM ID of a volume
"""
params = {
'dam_id': dam_id
}
return self[self._response]('library', 'bookorder', params)
def get_library_book(self, dam_id: str) -> Optional[str]:
"""Wrapper method for /library/book
:param dam_id: DAM ID of a volume
"""
params = {
'dam_id': dam_id
}
return self[self._response]('library', 'book', params)
def get_library_bookname(self, language_code: str) -> Optional[str]:
"""Wrapper method for /library/bookname call
:param language_code: language code for book names
"""
params = {
'language_code': language_code
}
return self[self._response]('library', 'bookname', params)
def get_library_chapter(self, dam_id: str, book_id: str = None) -> Optional[str]:
"""Wrapper method for /library/chapter call
:param dam_id: DAM ID of volume
:param book_id: id of the book to get chapters for
"""
params = {
'dam_id': dam_id,
'book_id': book_id
}
return self[self._response]('library', 'chapter', params)
def get_library_verseinfo(self, dam_id: str, book_id: str = None, chapter_id: int = None, verse_start: int = None, verse_end: int = None) -> Optional[str]:
"""Wrapper method for /library/verseinfo call
:param dam_id: DAM ID of volume
:param book_id: id of the book to get text for
:param chapter_id: id of the chapter to get text for
:param verse_start: id of the verse to get text for (starting position)
:param verse_end: id of the verse to get text for (ending position)
"""
params = {
'dam_id': dam_id,
'book_id': book_id,
'chapter_id': chapter_id,
'verse_start': verse_start,
'verse_end': verse_end
}
return self[self._response]('library', 'verseinfo', params)
def get_library_numbers(self, language_code: str, start: int, end: int) -> Optional[str]:
"""Wrapper method for /library/numbers call
:param language_code: language code for book names
:param start: first number for series of consecutive numbers returned
:param end: last number for series of consecutive numbers returned
"""
params = {
'language_code': language_code,
'start': start,
'end': end
}
return self[self._response]('library', 'numbers', params)
def get_library_metadata(self, dam_id: str = None, org_id: int = None) -> Optional[str]:
"""Wrapper method for /library/metadata
:param dam_id: DAM ID of volume
:param org_id: ID for organization by which to filter
"""
params = {
'dam_id': dam_id,
'organization_id': org_id
}
return self[self._response]('library', 'metadata', params)
def get_library_asset(self, dam_id: str = None) -> Optional[str]:
"""Wrapper method for /library/asset call
:param dam_id: DAM ID of volume
"""
params = {
'dam_id': dam_id
}
return self[self._response]('library', 'asset', params)
def get_library_organization(self, org_name: str = None, org_id: int = None) -> Optional[str]:
"""Wrapper method for /library/organization call
:param org_name: name of organization
:param org_id: ID for organization by which to filter
"""
params = {
'name': org_name,
'id': org_id
}
return self[self._response]('library', 'organization', params)
def get_text_verse(self, dam_id: str, book_id: str = None, chapter_id: int = None, verse_start: int = None, verse_end: int = None, markup: str = None) -> Optional[str]:
"""Wrapper method for /text/verse call
:param dam_id: DAM ID of volume
:param book_id: id of the book to get text for
:param chapter_id: id of the chapter to get text for
:param verse_start: id of the verse to get text for (starting position)
:param verse_end: id of the verse to get text for (ending position)
:param markup: If specified returns the verse text in a variety of standarized formats. Current options include OSIS, and native (the default DBT format).
"""
params = {
'dam_id': dam_id,
'book_id': book_id,
'chapter_id': chapter_id,
'verse_start': verse_start,
'verse_end': verse_end,
'markup': markup
}
return self[self._response]('text', 'verse', params)
def get_text_search(self, dam_id: str, query: str, book_id: str = None, offset: int = None, limit: int = None) -> Optional[str]:
"""Wrapper method for /text/search call
:param dam_id: DAM ID of volume
:param query: The text the caller wishes to search for in the specified text
:param book_id: The book ID to limit the search to
:param offset: The offset for the set of results to return to start from
:param limit: The number of results to return. Default is 50.
"""
params = {
'dam_id': dam_id,
'query': query,
'book_id': book_id,
'offset': offset,
'limit': limit
}
return self[self._response]('text', 'search', params)
def get_text_searchgroup(self, dam_id: str, query: str) -> Optional[str]:
"""Wrapper method for /text/searchgroup call
:param dam_id: DAM ID of volume
:param query: The text the caller wishes to search for in the specified text
"""
params = {
'dam_id': dam_id,
'query': query
}
return self[self._response]('text', 'searchgroup', params)
def get_video_jesusfilm(self, dam_id: str, encoding: str, book_id: str = None, chapter_id: int = None, verse_id: int = None) -> Optional[str]:
"""Wrapper method for /video/jesusfilm call
:param dam_id: DAM ID of volume
:param encoding: The encoding to request. Either mp4 or m3u8.
:param book_id: book ID of the book to get text for
:param chapter_id: chapter ID of the chapter to get text for
:param verse_id: Verse id ID request
"""
params = {
'dam_id': dam_id,
'encoding': encoding,
'book_id': book_id,
'chapter_id': chapter_id,
'verse_id': verse_id
}
return self[self._response]('video', 'jesusfilm', params)
def get_video_videopath(self, dam_id: str, encoding: str = 'mp4', resolution: str = 'lo', segment_order: int = None, book_id: str = None, chapter_id: int = None, verse_id: int = None) -> Optional[str]:
"""Wrapper method for /video/videopath call
:param dam_id: DAM ID of volume (null to use default from the class init)
:param encoding: The encoding to request. Either mp4 or m3u8.
:param resolution: The video resolution: lo, med, or hi
:param segment_order: The segment order to retrieve
:param book_id: book ID of the book to get text for
:param chapter_id: chapter ID of the chapter to get text for
:param verse_id: verse ID to request
"""
params = {
'dam_id': dam_id,
'encoding': encoding,
'resolution': resolution,
'segment_order': segment_order,
'book_id': book_id,
'chapter_id': chapter_id,
'verse_id': verse_id
}
return self[self._response]('video', 'videopath', params) | en | 0.747297 | #!/usr/bin/env python3 DBT API Client SDK for DPT API v2 Copyright (c) 2018 <NAME> (https://robdunham.info) This software is available under the MIT license. See http://opensource.org/licenses/MIT for more info. Documentation for DBT API calls is located at http://www.digitalbibleplatform.com/docs/ This is a Python fork of the DBT API Client PHP SDK (dbt-sdk-php) https://bitbucket.org/faithcomesbyhearing/dbt-sdk-php Source: https://github.com/Nilpo/dbt-sdk-python.git DBT Class # Configuration # API Version # Pointer to method that returns the response format constructed for the object # made public so that a user can change response type after initialization (mostly for debugging) :param application_key: The identity of the app in the form of an application key :param api_uri: URL to use instead of default url :param reply: reply protocol :param response_type: return type of function (json[default]| list[python List]|url[only returns api url]) :param echo: [true|false] whether or not to echo the call parameters :param callback: function name to use for JSONP reply # URI to which to GET # Params which are shared on every API call Imports a JSON api response to a Python List to be used by the server. :param resource_group: api resource group to call :param resource: api resource to call :param params: resource group resource method params :return: return from API as Python List or None # feed = self.get_json_response(resource_group, resource, params) # if feed is not None: # return json.loads(feed) # else: # return None Queries DBT API and returns the response in JSON format. :param resource_group: api resource group to call :param resource: api resource to call :param params: resource group resource method params :return: JSON return from API or None Builds a specific API call URL depending on passed parameters :param resource_group: api resource group to call :param resource: api resource to call :param params: resource group resource method params :return: API endpoint URL # request_params = dict(self._dbt_params) # request_params.update(params) # remove keys with empty values Wrapper method for /api/apiversion call Wrapper method for /api/reply call Wrapper method for /audio/location call :param protocol: Allows the caller to filter out servers that do not support a specified protocol (e.g http, https, rtmp, rtmp-amazon) Wrapper method for /audio/path call :param dam_id: DAM ID of volume :param book_id: book id of the book to get chapters for :param chapter_id: chapter id of the chapter to get audio for Wrapper method for /audio/zippath call :param dam_id: DAM ID of volume Wrapper method for /audio/versestart call :param dam_id: DAM ID of volume :param book_id: book id of the book to get chapters for :param chapter_id: chapter id of the chapter to get audio for Wrapper method for /library/language call :param code: language code on which to filter :param name: language name in either native language or English on which to filter :param sort_by: [code|name|english] :param full_word: [true|false] interpret name: as full words only :param family_only: [true|false] return only language families # return self[self._response]('library', 'language', params) Wrapper method for /library/version call :param code: language code on which to filter :param name: language name in either native language or English on which to filter :param sort_by: [code|name|english] # return self[self._response]('library', 'version', params) Wrapper method for /library/volume call :param dam_id: DAM ID of volume :param fcbh_id: :param media: [text|audio|video] the format of languages the caller is interested in. All by default. :param delivery: [streaming|download|mobile|any|none] a criteria for approved delivery method. 'any' means any of the supported methods (this list may change over time). 'none' means assets that are not approved for any of the supported methods. All returned by default. :param language: Filter the versions returned to a specified language. For example return all the 'English' volumes. :param language_code: Filter the volumes returned to a specified language code. For example return all the 'eng' volumes. :param version_code: Filter the volumes returned to a specified version code. For example return all the 'ESV' volumes. :param updated: This is a unix timestamp in UTC to restrict volumes returned only if they were modified since the specified time. :param status: publishing status of volume :param expired: [true|false] whether or not the volume is expired :param org_id: Id of organization to which volume belongs :param full_word: [true|false] interpret name: as full words only :param language_family_code: Filter the volumes returned to a specified language code for language family Wrapper method for /library/volumelanguage call :param root: the language name root. Can be used to restrict the response to only languages that start with 'Quechua' for example :param language_code: (optional) 3 letter language code :param media: [text|audio|both] the format of languages the caller is interested in. This specifies if you want languages available in text or languages available in audio or only languages available in both. All are returned by default. :param delivery: [streaming|download|mobile|any|none] a criteria for approved delivery method. 'any' means any of the supported methods (this list may change over time). 'none' means assets that are not approved for any of the supported methods. All returned by default. :param status: :param org_id: :param full_word: [true|false] interpret $name as full words only Wrapper method for /library/volumelanguagefamily call :param root: the language name root. Can be used to restrict the response to only languages that start with 'Quechua' for example :param language_code: (optional) 3 letter language code :param media: [text|audio|both] the format of languages the caller is interested in. This specifies if you want languages available in text or languages available in audio or only languages available in both. All are returned by default. :param delivery: [streaming|download|mobile|any|none] a criteria for approved delivery method. 'any' means any of the supported methods (this list may change over time). 'none' means assets that are not approved for any of the supported methods. All returned by default. :param status: :param org_id: :param full_word: [true|false] interpret $name as full words only Wrapper method for /library/bookorder call :param dam_id: DAM ID of a volume Wrapper method for /library/book :param dam_id: DAM ID of a volume Wrapper method for /library/bookname call :param language_code: language code for book names Wrapper method for /library/chapter call :param dam_id: DAM ID of volume :param book_id: id of the book to get chapters for Wrapper method for /library/verseinfo call :param dam_id: DAM ID of volume :param book_id: id of the book to get text for :param chapter_id: id of the chapter to get text for :param verse_start: id of the verse to get text for (starting position) :param verse_end: id of the verse to get text for (ending position) Wrapper method for /library/numbers call :param language_code: language code for book names :param start: first number for series of consecutive numbers returned :param end: last number for series of consecutive numbers returned Wrapper method for /library/metadata :param dam_id: DAM ID of volume :param org_id: ID for organization by which to filter Wrapper method for /library/asset call :param dam_id: DAM ID of volume Wrapper method for /library/organization call :param org_name: name of organization :param org_id: ID for organization by which to filter Wrapper method for /text/verse call :param dam_id: DAM ID of volume :param book_id: id of the book to get text for :param chapter_id: id of the chapter to get text for :param verse_start: id of the verse to get text for (starting position) :param verse_end: id of the verse to get text for (ending position) :param markup: If specified returns the verse text in a variety of standarized formats. Current options include OSIS, and native (the default DBT format). Wrapper method for /text/search call :param dam_id: DAM ID of volume :param query: The text the caller wishes to search for in the specified text :param book_id: The book ID to limit the search to :param offset: The offset for the set of results to return to start from :param limit: The number of results to return. Default is 50. Wrapper method for /text/searchgroup call :param dam_id: DAM ID of volume :param query: The text the caller wishes to search for in the specified text Wrapper method for /video/jesusfilm call :param dam_id: DAM ID of volume :param encoding: The encoding to request. Either mp4 or m3u8. :param book_id: book ID of the book to get text for :param chapter_id: chapter ID of the chapter to get text for :param verse_id: Verse id ID request Wrapper method for /video/videopath call :param dam_id: DAM ID of volume (null to use default from the class init) :param encoding: The encoding to request. Either mp4 or m3u8. :param resolution: The video resolution: lo, med, or hi :param segment_order: The segment order to retrieve :param book_id: book ID of the book to get text for :param chapter_id: chapter ID of the chapter to get text for :param verse_id: verse ID to request | 2.364858 | 2 |
example/app.py | zhzLuke96/Yoi | 0 | 6622640 | <gh_stars>0
from werkzeug.wrappers import Request, Response
@Request.application
def application(environ,request):
# from pprint import pprint;pprint(environ)
name = request.args.get("name","PyCon")
return Response([f'<h1>hello {name}!</h1>'])
if __name__ == '__main__':
from wsgiref.simple_server import make_server
# httpd = make_server("127.0.0.1", 8000, app)
httpd = make_server("127.0.0.1", 8000, application)
httpd.serve_forever()
| from werkzeug.wrappers import Request, Response
@Request.application
def application(environ,request):
# from pprint import pprint;pprint(environ)
name = request.args.get("name","PyCon")
return Response([f'<h1>hello {name}!</h1>'])
if __name__ == '__main__':
from wsgiref.simple_server import make_server
# httpd = make_server("127.0.0.1", 8000, app)
httpd = make_server("127.0.0.1", 8000, application)
httpd.serve_forever() | en | 0.289905 | # from pprint import pprint;pprint(environ) # httpd = make_server("127.0.0.1", 8000, app) | 2.260515 | 2 |
typhon/objects/signals.py | monte-language/typhon | 63 | 6622641 | from rpython.rlib.rarithmetic import intmask
from typhon.atoms import getAtom
from typhon.autohelp import autohelp, method
from typhon.objects.root import Object
from typhon.objects.collections.maps import EMPTY_MAP
from typhon.objects.data import IntObject
from typhon.ruv import (alloc_signal, free, SignalStart, SignalStop,
stashSignal, unstashSignal, unstashingSignal)
RUN_1 = getAtom(u"run", 1)
def _signalCB(signal, signum):
with unstashingSignal(signal) as (vat, handle):
vat.sendOnly(handle._target, RUN_1, [IntObject(intmask(signum))],
EMPTY_MAP)
@autohelp
class SignalHandle(Object):
def __init__(self, signum, target, vat):
self._signum = signum
self._target = target
self._vat = vat
self._signal = alloc_signal(vat.uv_loop)
SignalStart(self._signal, _signalCB, self._signum)
stashSignal(self._signal, (vat, self))
@method("Void")
def disarm(self):
unstashSignal(self._signal)
SignalStop(self._signal)
free(self._signal)
| from rpython.rlib.rarithmetic import intmask
from typhon.atoms import getAtom
from typhon.autohelp import autohelp, method
from typhon.objects.root import Object
from typhon.objects.collections.maps import EMPTY_MAP
from typhon.objects.data import IntObject
from typhon.ruv import (alloc_signal, free, SignalStart, SignalStop,
stashSignal, unstashSignal, unstashingSignal)
RUN_1 = getAtom(u"run", 1)
def _signalCB(signal, signum):
with unstashingSignal(signal) as (vat, handle):
vat.sendOnly(handle._target, RUN_1, [IntObject(intmask(signum))],
EMPTY_MAP)
@autohelp
class SignalHandle(Object):
def __init__(self, signum, target, vat):
self._signum = signum
self._target = target
self._vat = vat
self._signal = alloc_signal(vat.uv_loop)
SignalStart(self._signal, _signalCB, self._signum)
stashSignal(self._signal, (vat, self))
@method("Void")
def disarm(self):
unstashSignal(self._signal)
SignalStop(self._signal)
free(self._signal)
| none | 1 | 2.082384 | 2 | |
ando/tools/generator/tests/test_AnDOGenerator.py | catalystneuro/AnDO | 1 | 6622642 | <reponame>catalystneuro/AnDO<filename>ando/tools/generator/tests/test_AnDOGenerator.py
import unittest
from ando.tools.generator.AnDOGenerator import *
from ando.tools.generator.tests.utils import *
class Test_AnDOData(unittest.TestCase):
def setUp(self):
test_dir = Path(initialize_test_directory(clean=True))
self.sub_id = 'sub5'
self.ses_id = 'ses1'
self.tasks = None
self.runs = None
sources = test_dir / 'sources'
sources.mkdir()
project = test_dir / 'project-A'
project.mkdir()
self.basedir = project
d = AnDOData(self.sub_id, self.ses_id)
d.basedir = project
self.ando_data = d
prefix = f'sub-{self.sub_id}_ses-{self.ses_id}'
self.test_data_files = [sources / (prefix + '_ephy.nix'),
sources / (prefix + '_ephy.nwb')]
self.test_mdata_files = [sources / 'dataset_description.json',
sources / (prefix + '_probes.tsv'),
sources / (prefix + '_contacts.json')]
for f in self.test_mdata_files + self.test_data_files:
f.touch()
def test_get_data_folder(self):
df = self.ando_data.get_data_folder()
self.assertTrue(df)
df_abs = self.ando_data.get_data_folder('absolute')
df_local = self.ando_data.get_data_folder('local')
self.assertTrue(df_local)
self.assertTrue(str(df_abs).endswith(str(df_local)))
def test_generate_structure(self):
self.ando_data.generate_structure()
df = self.ando_data.get_data_folder()
self.assertTrue(df.exists())
def test_data_files(self):
self.ando_data.generate_structure()
self.ando_data.register_data_files(*self.test_data_files)
self.ando_data.generate_data_files()
for f in self.test_data_files:
self.assertTrue((self.ando_data.basedir / f).exists())
def test_metadata_files(self):
self.ando_data.generate_structure()
self.ando_data.register_metadata_files(*self.test_mdata_files)
self.ando_data.generate_metadata_files()
prefix = 'sub-sub5_ses-ses1'
for f in [prefix + '_probes.tsv', prefix + '_contacts.json']:
self.assertTrue((self.ando_data.get_data_folder() / f).exists())
self.assertTrue((self.basedir / 'dataset_description.json').exists())
def tearDown(self):
initialize_test_directory(clean=True)
class Test_ReadCsv(unittest.TestCase):
def setUp(self):
csv_filename = generate_simple_csv_file()
self.csv_file = csv_filename
def test_read_csv(self):
df = extract_structure_from_csv(self.csv_file)
expected_headers = ['sub_id', 'ses_id']
self.assertListEqual(expected_headers, list(df))
class Test_GenerateStruct(unittest.TestCase):
def setUp(self):
initialize_test_directory(clean=True)
csv_filename = generate_simple_csv_file()
self.csv_file = csv_filename
def test_generate_example_structure(self):
generate_Struct(self.csv_file, test_directory)
# extract all paths that exist in the test directory
existing_paths = [p[0] for p in os.walk(test_directory)]
# find path that is corresponding to each line of the csv file
with open(self.csv_file) as f:
header = f.readline()
# iterate through sessions
for line in f.readlines():
found_path = False
for existing_path in existing_paths:
if all(key in existing_path for key in line.strip().split(',')):
found_path = True
break
if not found_path:
print(line.strip().split(','))
self.assertTrue(found_path)
def doCleanups(self):
initialize_test_directory(clean=True)
if __name__ == '__main__':
unittest.main()
| import unittest
from ando.tools.generator.AnDOGenerator import *
from ando.tools.generator.tests.utils import *
class Test_AnDOData(unittest.TestCase):
def setUp(self):
test_dir = Path(initialize_test_directory(clean=True))
self.sub_id = 'sub5'
self.ses_id = 'ses1'
self.tasks = None
self.runs = None
sources = test_dir / 'sources'
sources.mkdir()
project = test_dir / 'project-A'
project.mkdir()
self.basedir = project
d = AnDOData(self.sub_id, self.ses_id)
d.basedir = project
self.ando_data = d
prefix = f'sub-{self.sub_id}_ses-{self.ses_id}'
self.test_data_files = [sources / (prefix + '_ephy.nix'),
sources / (prefix + '_ephy.nwb')]
self.test_mdata_files = [sources / 'dataset_description.json',
sources / (prefix + '_probes.tsv'),
sources / (prefix + '_contacts.json')]
for f in self.test_mdata_files + self.test_data_files:
f.touch()
def test_get_data_folder(self):
df = self.ando_data.get_data_folder()
self.assertTrue(df)
df_abs = self.ando_data.get_data_folder('absolute')
df_local = self.ando_data.get_data_folder('local')
self.assertTrue(df_local)
self.assertTrue(str(df_abs).endswith(str(df_local)))
def test_generate_structure(self):
self.ando_data.generate_structure()
df = self.ando_data.get_data_folder()
self.assertTrue(df.exists())
def test_data_files(self):
self.ando_data.generate_structure()
self.ando_data.register_data_files(*self.test_data_files)
self.ando_data.generate_data_files()
for f in self.test_data_files:
self.assertTrue((self.ando_data.basedir / f).exists())
def test_metadata_files(self):
self.ando_data.generate_structure()
self.ando_data.register_metadata_files(*self.test_mdata_files)
self.ando_data.generate_metadata_files()
prefix = 'sub-sub5_ses-ses1'
for f in [prefix + '_probes.tsv', prefix + '_contacts.json']:
self.assertTrue((self.ando_data.get_data_folder() / f).exists())
self.assertTrue((self.basedir / 'dataset_description.json').exists())
def tearDown(self):
initialize_test_directory(clean=True)
class Test_ReadCsv(unittest.TestCase):
def setUp(self):
csv_filename = generate_simple_csv_file()
self.csv_file = csv_filename
def test_read_csv(self):
df = extract_structure_from_csv(self.csv_file)
expected_headers = ['sub_id', 'ses_id']
self.assertListEqual(expected_headers, list(df))
class Test_GenerateStruct(unittest.TestCase):
def setUp(self):
initialize_test_directory(clean=True)
csv_filename = generate_simple_csv_file()
self.csv_file = csv_filename
def test_generate_example_structure(self):
generate_Struct(self.csv_file, test_directory)
# extract all paths that exist in the test directory
existing_paths = [p[0] for p in os.walk(test_directory)]
# find path that is corresponding to each line of the csv file
with open(self.csv_file) as f:
header = f.readline()
# iterate through sessions
for line in f.readlines():
found_path = False
for existing_path in existing_paths:
if all(key in existing_path for key in line.strip().split(',')):
found_path = True
break
if not found_path:
print(line.strip().split(','))
self.assertTrue(found_path)
def doCleanups(self):
initialize_test_directory(clean=True)
if __name__ == '__main__':
unittest.main() | en | 0.91793 | # extract all paths that exist in the test directory # find path that is corresponding to each line of the csv file # iterate through sessions | 2.461265 | 2 |
git.py | miguelvelezmj25/fabfiles | 1 | 6622643 | <reponame>miguelvelezmj25/fabfiles
from common import *
def git_clone(repo, options=None, name=None, dir="."):
with cd(dir):
command = 'git clone '
if options is not None:
command += '{} '.format(options)
command += '{} '.format(repo)
if name is not None:
command += '{} '.format(name)
run(command)
def git_remote_update(dir='.'):
with cd(dir):
run('git remote update')
def git_status(dir='.'):
with cd(dir):
run('git status')
def git_pull(dir='.'):
with cd(dir):
run('git pull')
def git_checkout(options='', dir='.'):
with cd(dir):
run('git checkout {}'.format(options))
def git_submodule(options='', dir='.'):
with cd(dir):
run('git submodule {}'.format(options))
| from common import *
def git_clone(repo, options=None, name=None, dir="."):
with cd(dir):
command = 'git clone '
if options is not None:
command += '{} '.format(options)
command += '{} '.format(repo)
if name is not None:
command += '{} '.format(name)
run(command)
def git_remote_update(dir='.'):
with cd(dir):
run('git remote update')
def git_status(dir='.'):
with cd(dir):
run('git status')
def git_pull(dir='.'):
with cd(dir):
run('git pull')
def git_checkout(options='', dir='.'):
with cd(dir):
run('git checkout {}'.format(options))
def git_submodule(options='', dir='.'):
with cd(dir):
run('git submodule {}'.format(options)) | none | 1 | 2.468868 | 2 | |
preprocess.py | freesinger/readmission_prediction | 4 | 6622644 | import numpy as np
import pandas as pd
import scipy.stats as sp
# file path
DATA_DIR = "./data"
ORI_DATA_PATH = DATA_DIR + "/diabetic_data.csv"
MAP_PATH = DATA_DIR + "/IDs_mapping.csv"
OUTPUT_DATA_PATH = DATA_DIR + "/preprocessed_data.csv"
# load data
dataframe_ori = pd.read_csv(ORI_DATA_PATH)
NUM_RECORDS = dataframe_ori.shape[0]
NUM_FEATURE = dataframe_ori.shape[1]
# make a copy of the dataframe for preprocessing
df = dataframe_ori.copy(deep=True)
# Drop features
df = df.drop(['weight', 'payer_code', 'medical_specialty', 'examide', 'citoglipton'], axis=1)
# drop bad data with 3 '?' in diag
drop_ID = set(df[(df['diag_1'] == '?') & (df['diag_2'] == '?') & (df['diag_3'] == '?')].index)
# drop died patient data which 'discharge_disposition_id' == 11 | 19 | 20 | 21 indicates 'Expired'
drop_ID = drop_ID.union(set(df[(df['discharge_disposition_id'] == 11) | (df['discharge_disposition_id'] == 19) | \
(df['discharge_disposition_id'] == 20) | (df['discharge_disposition_id'] == 21)].index))
# drop 3 data with 'Unknown/Invalid' gender
drop_ID = drop_ID.union(df['gender'][df['gender'] == 'Unknown/Invalid'].index)
new_ID = list(set(df.index) - set(drop_ID))
df = df.iloc[new_ID]
# process readmitted data
df['readmitted'] = df['readmitted'].replace('>30', 2)
df['readmitted'] = df['readmitted'].replace('<30', 1)
df['readmitted'] = df['readmitted'].replace('NO', 0)
# cnt0, cnt1, cnt2 = 0, 0, 0
'''
for i in df['readmitted']:
if i == 0:
cnt0 += 1
if i == 1:
cnt1 += 1
else:
cnt2 += 1
print(cnt0, cnt1, cnt2)
'''
# 53208 11357 88753
# calculate change times through 23 kinds of medicines
# high change times refer to higher prob to readmit
# 'num_med_changed' to counts medicine change
print('\n--Medicine related--')
medicine = ['metformin', 'repaglinide', 'nateglinide', 'chlorpropamide', 'glimepiride', 'glipizide', 'glyburide',
'pioglitazone', 'rosiglitazone', 'acarbose', 'miglitol', 'insulin', 'glyburide-metformin', 'tolazamide',
'metformin-pioglitazone', 'metformin-rosiglitazone', 'glimepiride-pioglitazone', 'glipizide-metformin',
'troglitazone', 'tolbutamide', 'acetohexamide']
for med in medicine:
tmp = med + 'temp'
df[tmp] = df[med].apply(lambda x: 1 if (x == 'Down' or x == 'Up') else 0)
# two new feature
df['num_med_changed'] = 0
for med in medicine:
tmp = med + 'temp'
df['num_med_changed'] += df[tmp]
del df[tmp]
for i in medicine:
df[i] = df[i].replace('Steady', 1)
df[i] = df[i].replace('No', 0)
df[i] = df[i].replace('Up', 1)
df[i] = df[i].replace('Down', 1)
df['num_med_taken'] = 0
for med in medicine:
print(med)
df['num_med_taken'] = df['num_med_taken'] + df[med]
# encode race
df['race'] = df['race'].replace('Asian', 0)
df['race'] = df['race'].replace('AfricanAmerican', 1)
df['race'] = df['race'].replace('Caucasian', 2)
df['race'] = df['race'].replace('Hispanic', 3)
df['race'] = df['race'].replace('Other', 4)
df['race'] = df['race'].replace('?', 4)
# map
df['A1Cresult'] = df['A1Cresult'].replace('None', -99) # -1 -> -99
df['A1Cresult'] = df['A1Cresult'].replace('>8', 1)
df['A1Cresult'] = df['A1Cresult'].replace('>7', 1)
df['A1Cresult'] = df['A1Cresult'].replace('Norm', 0)
df['max_glu_serum'] = df['max_glu_serum'].replace('>200', 1)
df['max_glu_serum'] = df['max_glu_serum'].replace('>300', 1)
df['max_glu_serum'] = df['max_glu_serum'].replace('Norm', 0)
df['max_glu_serum'] = df['max_glu_serum'].replace('None', -99) # -1 -> -99
df['change'] = df['change'].replace('No', 0)
df['change'] = df['change'].replace("Ch", 1)
df['gender'] = df['gender'].replace('Male', 1)
df['gender'] = df['gender'].replace('Female', 0)
df['diabetesMed'] = df['diabetesMed'].replace('Yes', 1)
df['diabetesMed'] = df['diabetesMed'].replace('No', 0)
print('diabetesMed end')
age_dict = {'[0-10)': 5, '[10-20)': 15, '[20-30)': 25, '[30-40)': 35, '[40-50)': 45, '[50-60)': 55, '[60-70)': 65,
'[70-80)': 75, '[80-90)': 85, '[90-100)': 95}
df['age'] = df.age.map(age_dict)
df['age'] = df['age'].astype('int64')
print('age end')
# simplify
# admission_type_id : [2, 7] -> 1, [6, 8] -> 5
a, b = [2, 7], [6, 8]
for i in a:
df['admission_type_id'] = df['admission_type_id'].replace(i, 1)
for j in b:
df['admission_type_id'] = df['admission_type_id'].replace(j, 5)
# discharge_disposition_id : [6, 8, 9, 13] -> 1, [3, 4, 5, 14, 22, 23, 24] -> 2,
# [12, 15, 16, 17] -> 10, [19, 20, 21] -> 11, [25, 26] -> 18
a, b, c, d, e = [6, 8, 9, 13], [3, 4, 5, 14, 22, 23, 24], [12, 15, 16, 17], \
[19, 20, 21], [25, 26]
for i in a:
df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(i, 1)
for j in b:
df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(j, 2)
for k in c:
df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(k, 10)
# data of died patients have been dropped
# for p in d:
# df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(p, 11)
for q in e:
df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(q, 18)
# admission_source_id : [3, 2] -> 1, [5, 6, 10, 22, 25] -> 4,
# [15, 17, 20, 21] -> 9, [13, 14] -> 11
a, b, c, d = [3, 2], [5, 6, 10, 22, 25], [15, 17, 20, 21], [13, 14]
for i in a:
df['admission_source_id'] = df['admission_source_id'].replace(i, 1)
for j in b:
df['admission_source_id'] = df['admission_source_id'].replace(j, 4)
for k in c:
df['admission_source_id'] = df['admission_source_id'].replace(k, 9)
for p in d:
df['admission_source_id'] = df['admission_source_id'].replace(p, 11)
print('id end')
# Classify Diagnoses by ICD-9
df.loc[df['diag_1'].str.contains('V', na=False), ['diag_1']] = 0
df.loc[df['diag_1'].str.contains('E', na=False), ['diag_1']] = 0
df['diag_1'] = df['diag_1'].replace('?', -1)
df['diag_1'] = pd.to_numeric(df['diag_1'], errors='coerce')
for index, row in df.iterrows():
if (row['diag_1'] >= 1 and row['diag_1'] <= 139):
df.loc[index, 'diag_1'] = 1
elif (row['diag_1'] >= 140 and row['diag_1'] <= 239):
df.loc[index, 'diag_1'] = 2
elif (row['diag_1'] >= 240 and row['diag_1'] <= 279):
df.loc[index, 'diag_1'] = 3
elif (row['diag_1'] >= 280 and row['diag_1'] <= 289):
df.loc[index, 'diag_1'] = 4
elif (row['diag_1'] >= 290 and row['diag_1'] <= 319):
df.loc[index, 'diag_1'] = 5
elif (row['diag_1'] >= 320 and row['diag_1'] <= 389):
df.loc[index, 'diag_1'] = 6
elif (row['diag_1'] >= 390 and row['diag_1'] <= 459):
df.loc[index, 'diag_1'] = 7
elif (row['diag_1'] >= 460 and row['diag_1'] <= 519):
df.loc[index, 'diag_1'] = 8
elif (row['diag_1'] >= 520 and row['diag_1'] <= 579):
df.loc[index, 'diag_1'] = 9
elif (row['diag_1'] >= 580 and row['diag_1'] <= 629):
df.loc[index, 'diag_1'] = 10
elif (row['diag_1'] >= 630 and row['diag_1'] <= 679):
df.loc[index, 'diag_1'] = 11
elif (row['diag_1'] >= 680 and row['diag_1'] <= 709):
df.loc[index, 'diag_1'] = 12
elif (row['diag_1'] >= 710 and row['diag_1'] <= 739):
df.loc[index, 'diag_1'] = 13
elif (row['diag_1'] >= 740 and row['diag_1'] <= 759):
df.loc[index, 'diag_1'] = 14
elif (row['diag_1'] >= 760 and row['diag_1'] <= 779):
df.loc[index, 'diag_1'] = 15
elif (row['diag_1'] >= 780 and row['diag_1'] <= 799):
df.loc[index, 'diag_1'] = 16
elif (row['diag_1'] >= 800 and row['diag_1'] <= 999):
df.loc[index, 'diag_1'] = 17
print('diag_1 end')
df.loc[df['diag_2'].str.contains('V', na=False), ['diag_2']] = 0
df.loc[df['diag_2'].str.contains('E', na=False), ['diag_2']] = 0
df['diag_2'] = df['diag_2'].replace('?', -1)
df['diag_2'] = pd.to_numeric(df['diag_2'], errors='coerce')
for index, row in df.iterrows():
if (row['diag_2'] >= 1 and row['diag_2'] <= 139):
df.loc[index, 'diag_2'] = 1
elif (row['diag_2'] >= 140 and row['diag_2'] <= 239):
df.loc[index, 'diag_2'] = 2
elif (row['diag_2'] >= 240 and row['diag_2'] <= 279):
df.loc[index, 'diag_2'] = 3
elif (row['diag_2'] >= 280 and row['diag_2'] <= 289):
df.loc[index, 'diag_2'] = 4
elif (row['diag_2'] >= 290 and row['diag_2'] <= 319):
df.loc[index, 'diag_2'] = 5
elif (row['diag_2'] >= 320 and row['diag_2'] <= 389):
df.loc[index, 'diag_2'] = 6
elif (row['diag_2'] >= 390 and row['diag_2'] <= 459):
df.loc[index, 'diag_2'] = 7
elif (row['diag_2'] >= 460 and row['diag_2'] <= 519):
df.loc[index, 'diag_2'] = 8
elif (row['diag_2'] >= 520 and row['diag_2'] <= 579):
df.loc[index, 'diag_2'] = 9
elif (row['diag_2'] >= 580 and row['diag_2'] <= 629):
df.loc[index, 'diag_2'] = 10
elif (row['diag_2'] >= 630 and row['diag_2'] <= 679):
df.loc[index, 'diag_2'] = 11
elif (row['diag_2'] >= 680 and row['diag_2'] <= 709):
df.loc[index, 'diag_2'] = 12
elif (row['diag_2'] >= 710 and row['diag_2'] <= 739):
df.loc[index, 'diag_2'] = 13
elif (row['diag_2'] >= 740 and row['diag_2'] <= 759):
df.loc[index, 'diag_2'] = 14
elif (row['diag_2'] >= 760 and row['diag_2'] <= 779):
df.loc[index, 'diag_2'] = 15
elif (row['diag_2'] >= 780 and row['diag_2'] <= 799):
df.loc[index, 'diag_2'] = 16
elif (row['diag_2'] >= 800 and row['diag_2'] <= 999):
df.loc[index, 'diag_2'] = 17
print('diag_2 end')
df.loc[df['diag_3'].str.contains('V', na=False), ['diag_3']] = 0
df.loc[df['diag_3'].str.contains('E', na=False), ['diag_3']] = 0
df['diag_3'] = df['diag_3'].replace('?', -1)
df['diag_3'] = pd.to_numeric(df['diag_3'], errors='coerce')
for index, row in df.iterrows():
if (row['diag_3'] >= 1 and row['diag_3'] <= 139):
df.loc[index, 'diag_3'] = 1
elif (row['diag_3'] >= 140 and row['diag_3'] <= 239):
df.loc[index, 'diag_3'] = 2
elif (row['diag_3'] >= 240 and row['diag_3'] <= 279):
df.loc[index, 'diag_3'] = 3
elif (row['diag_3'] >= 280 and row['diag_3'] <= 289):
df.loc[index, 'diag_3'] = 4
elif (row['diag_3'] >= 290 and row['diag_3'] <= 319):
df.loc[index, 'diag_3'] = 5
elif (row['diag_3'] >= 320 and row['diag_3'] <= 389):
df.loc[index, 'diag_3'] = 6
elif (row['diag_3'] >= 390 and row['diag_3'] <= 459):
df.loc[index, 'diag_3'] = 7
elif (row['diag_3'] >= 460 and row['diag_3'] <= 519):
df.loc[index, 'diag_3'] = 8
elif (row['diag_3'] >= 520 and row['diag_3'] <= 579):
df.loc[index, 'diag_3'] = 9
elif (row['diag_3'] >= 580 and row['diag_3'] <= 629):
df.loc[index, 'diag_3'] = 10
elif (row['diag_3'] >= 630 and row['diag_3'] <= 679):
df.loc[index, 'diag_3'] = 11
elif (row['diag_3'] >= 680 and row['diag_3'] <= 709):
df.loc[index, 'diag_3'] = 12
elif (row['diag_3'] >= 710 and row['diag_3'] <= 739):
df.loc[index, 'diag_3'] = 13
elif (row['diag_3'] >= 740 and row['diag_3'] <= 759):
df.loc[index, 'diag_3'] = 14
elif (row['diag_3'] >= 760 and row['diag_3'] <= 779):
df.loc[index, 'diag_3'] = 15
elif (row['diag_3'] >= 780 and row['diag_3'] <= 799):
df.loc[index, 'diag_3'] = 16
elif (row['diag_3'] >= 800 and row['diag_3'] <= 999):
df.loc[index, 'diag_3'] = 17
print('diag_3 end')
# df['new_1'] = df['num_medications'] * df['time_in_hospital']
# # df['add_feature_2'] = df['change'] * df['num_medications']
# df['new_3'] = df['age'] * df['number_diagnoses']
print('diag end')
def standardize(raw_data):
return ((raw_data - np.mean(raw_data, axis=0)) / np.std(raw_data, axis=0))
numerics = ['race', 'age', 'time_in_hospital', 'num_medications', 'number_diagnoses',
'num_med_changed', 'num_med_taken', 'number_inpatient', 'number_outpatient', 'number_emergency',
'num_procedures', 'num_lab_procedures']
df[numerics] = standardize(df[numerics])
df = df[(np.abs(sp.stats.zscore(df[numerics])) < 3).all(axis=1)]
print('begin out')
print(OUTPUT_DATA_PATH)
df.to_csv(OUTPUT_DATA_PATH)
| import numpy as np
import pandas as pd
import scipy.stats as sp
# file path
DATA_DIR = "./data"
ORI_DATA_PATH = DATA_DIR + "/diabetic_data.csv"
MAP_PATH = DATA_DIR + "/IDs_mapping.csv"
OUTPUT_DATA_PATH = DATA_DIR + "/preprocessed_data.csv"
# load data
dataframe_ori = pd.read_csv(ORI_DATA_PATH)
NUM_RECORDS = dataframe_ori.shape[0]
NUM_FEATURE = dataframe_ori.shape[1]
# make a copy of the dataframe for preprocessing
df = dataframe_ori.copy(deep=True)
# Drop features
df = df.drop(['weight', 'payer_code', 'medical_specialty', 'examide', 'citoglipton'], axis=1)
# drop bad data with 3 '?' in diag
drop_ID = set(df[(df['diag_1'] == '?') & (df['diag_2'] == '?') & (df['diag_3'] == '?')].index)
# drop died patient data which 'discharge_disposition_id' == 11 | 19 | 20 | 21 indicates 'Expired'
drop_ID = drop_ID.union(set(df[(df['discharge_disposition_id'] == 11) | (df['discharge_disposition_id'] == 19) | \
(df['discharge_disposition_id'] == 20) | (df['discharge_disposition_id'] == 21)].index))
# drop 3 data with 'Unknown/Invalid' gender
drop_ID = drop_ID.union(df['gender'][df['gender'] == 'Unknown/Invalid'].index)
new_ID = list(set(df.index) - set(drop_ID))
df = df.iloc[new_ID]
# process readmitted data
df['readmitted'] = df['readmitted'].replace('>30', 2)
df['readmitted'] = df['readmitted'].replace('<30', 1)
df['readmitted'] = df['readmitted'].replace('NO', 0)
# cnt0, cnt1, cnt2 = 0, 0, 0
'''
for i in df['readmitted']:
if i == 0:
cnt0 += 1
if i == 1:
cnt1 += 1
else:
cnt2 += 1
print(cnt0, cnt1, cnt2)
'''
# 53208 11357 88753
# calculate change times through 23 kinds of medicines
# high change times refer to higher prob to readmit
# 'num_med_changed' to counts medicine change
print('\n--Medicine related--')
medicine = ['metformin', 'repaglinide', 'nateglinide', 'chlorpropamide', 'glimepiride', 'glipizide', 'glyburide',
'pioglitazone', 'rosiglitazone', 'acarbose', 'miglitol', 'insulin', 'glyburide-metformin', 'tolazamide',
'metformin-pioglitazone', 'metformin-rosiglitazone', 'glimepiride-pioglitazone', 'glipizide-metformin',
'troglitazone', 'tolbutamide', 'acetohexamide']
for med in medicine:
tmp = med + 'temp'
df[tmp] = df[med].apply(lambda x: 1 if (x == 'Down' or x == 'Up') else 0)
# two new feature
df['num_med_changed'] = 0
for med in medicine:
tmp = med + 'temp'
df['num_med_changed'] += df[tmp]
del df[tmp]
for i in medicine:
df[i] = df[i].replace('Steady', 1)
df[i] = df[i].replace('No', 0)
df[i] = df[i].replace('Up', 1)
df[i] = df[i].replace('Down', 1)
df['num_med_taken'] = 0
for med in medicine:
print(med)
df['num_med_taken'] = df['num_med_taken'] + df[med]
# encode race
df['race'] = df['race'].replace('Asian', 0)
df['race'] = df['race'].replace('AfricanAmerican', 1)
df['race'] = df['race'].replace('Caucasian', 2)
df['race'] = df['race'].replace('Hispanic', 3)
df['race'] = df['race'].replace('Other', 4)
df['race'] = df['race'].replace('?', 4)
# map
df['A1Cresult'] = df['A1Cresult'].replace('None', -99) # -1 -> -99
df['A1Cresult'] = df['A1Cresult'].replace('>8', 1)
df['A1Cresult'] = df['A1Cresult'].replace('>7', 1)
df['A1Cresult'] = df['A1Cresult'].replace('Norm', 0)
df['max_glu_serum'] = df['max_glu_serum'].replace('>200', 1)
df['max_glu_serum'] = df['max_glu_serum'].replace('>300', 1)
df['max_glu_serum'] = df['max_glu_serum'].replace('Norm', 0)
df['max_glu_serum'] = df['max_glu_serum'].replace('None', -99) # -1 -> -99
df['change'] = df['change'].replace('No', 0)
df['change'] = df['change'].replace("Ch", 1)
df['gender'] = df['gender'].replace('Male', 1)
df['gender'] = df['gender'].replace('Female', 0)
df['diabetesMed'] = df['diabetesMed'].replace('Yes', 1)
df['diabetesMed'] = df['diabetesMed'].replace('No', 0)
print('diabetesMed end')
age_dict = {'[0-10)': 5, '[10-20)': 15, '[20-30)': 25, '[30-40)': 35, '[40-50)': 45, '[50-60)': 55, '[60-70)': 65,
'[70-80)': 75, '[80-90)': 85, '[90-100)': 95}
df['age'] = df.age.map(age_dict)
df['age'] = df['age'].astype('int64')
print('age end')
# simplify
# admission_type_id : [2, 7] -> 1, [6, 8] -> 5
a, b = [2, 7], [6, 8]
for i in a:
df['admission_type_id'] = df['admission_type_id'].replace(i, 1)
for j in b:
df['admission_type_id'] = df['admission_type_id'].replace(j, 5)
# discharge_disposition_id : [6, 8, 9, 13] -> 1, [3, 4, 5, 14, 22, 23, 24] -> 2,
# [12, 15, 16, 17] -> 10, [19, 20, 21] -> 11, [25, 26] -> 18
a, b, c, d, e = [6, 8, 9, 13], [3, 4, 5, 14, 22, 23, 24], [12, 15, 16, 17], \
[19, 20, 21], [25, 26]
for i in a:
df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(i, 1)
for j in b:
df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(j, 2)
for k in c:
df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(k, 10)
# data of died patients have been dropped
# for p in d:
# df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(p, 11)
for q in e:
df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(q, 18)
# admission_source_id : [3, 2] -> 1, [5, 6, 10, 22, 25] -> 4,
# [15, 17, 20, 21] -> 9, [13, 14] -> 11
a, b, c, d = [3, 2], [5, 6, 10, 22, 25], [15, 17, 20, 21], [13, 14]
for i in a:
df['admission_source_id'] = df['admission_source_id'].replace(i, 1)
for j in b:
df['admission_source_id'] = df['admission_source_id'].replace(j, 4)
for k in c:
df['admission_source_id'] = df['admission_source_id'].replace(k, 9)
for p in d:
df['admission_source_id'] = df['admission_source_id'].replace(p, 11)
print('id end')
# Classify Diagnoses by ICD-9
df.loc[df['diag_1'].str.contains('V', na=False), ['diag_1']] = 0
df.loc[df['diag_1'].str.contains('E', na=False), ['diag_1']] = 0
df['diag_1'] = df['diag_1'].replace('?', -1)
df['diag_1'] = pd.to_numeric(df['diag_1'], errors='coerce')
for index, row in df.iterrows():
if (row['diag_1'] >= 1 and row['diag_1'] <= 139):
df.loc[index, 'diag_1'] = 1
elif (row['diag_1'] >= 140 and row['diag_1'] <= 239):
df.loc[index, 'diag_1'] = 2
elif (row['diag_1'] >= 240 and row['diag_1'] <= 279):
df.loc[index, 'diag_1'] = 3
elif (row['diag_1'] >= 280 and row['diag_1'] <= 289):
df.loc[index, 'diag_1'] = 4
elif (row['diag_1'] >= 290 and row['diag_1'] <= 319):
df.loc[index, 'diag_1'] = 5
elif (row['diag_1'] >= 320 and row['diag_1'] <= 389):
df.loc[index, 'diag_1'] = 6
elif (row['diag_1'] >= 390 and row['diag_1'] <= 459):
df.loc[index, 'diag_1'] = 7
elif (row['diag_1'] >= 460 and row['diag_1'] <= 519):
df.loc[index, 'diag_1'] = 8
elif (row['diag_1'] >= 520 and row['diag_1'] <= 579):
df.loc[index, 'diag_1'] = 9
elif (row['diag_1'] >= 580 and row['diag_1'] <= 629):
df.loc[index, 'diag_1'] = 10
elif (row['diag_1'] >= 630 and row['diag_1'] <= 679):
df.loc[index, 'diag_1'] = 11
elif (row['diag_1'] >= 680 and row['diag_1'] <= 709):
df.loc[index, 'diag_1'] = 12
elif (row['diag_1'] >= 710 and row['diag_1'] <= 739):
df.loc[index, 'diag_1'] = 13
elif (row['diag_1'] >= 740 and row['diag_1'] <= 759):
df.loc[index, 'diag_1'] = 14
elif (row['diag_1'] >= 760 and row['diag_1'] <= 779):
df.loc[index, 'diag_1'] = 15
elif (row['diag_1'] >= 780 and row['diag_1'] <= 799):
df.loc[index, 'diag_1'] = 16
elif (row['diag_1'] >= 800 and row['diag_1'] <= 999):
df.loc[index, 'diag_1'] = 17
print('diag_1 end')
df.loc[df['diag_2'].str.contains('V', na=False), ['diag_2']] = 0
df.loc[df['diag_2'].str.contains('E', na=False), ['diag_2']] = 0
df['diag_2'] = df['diag_2'].replace('?', -1)
df['diag_2'] = pd.to_numeric(df['diag_2'], errors='coerce')
for index, row in df.iterrows():
if (row['diag_2'] >= 1 and row['diag_2'] <= 139):
df.loc[index, 'diag_2'] = 1
elif (row['diag_2'] >= 140 and row['diag_2'] <= 239):
df.loc[index, 'diag_2'] = 2
elif (row['diag_2'] >= 240 and row['diag_2'] <= 279):
df.loc[index, 'diag_2'] = 3
elif (row['diag_2'] >= 280 and row['diag_2'] <= 289):
df.loc[index, 'diag_2'] = 4
elif (row['diag_2'] >= 290 and row['diag_2'] <= 319):
df.loc[index, 'diag_2'] = 5
elif (row['diag_2'] >= 320 and row['diag_2'] <= 389):
df.loc[index, 'diag_2'] = 6
elif (row['diag_2'] >= 390 and row['diag_2'] <= 459):
df.loc[index, 'diag_2'] = 7
elif (row['diag_2'] >= 460 and row['diag_2'] <= 519):
df.loc[index, 'diag_2'] = 8
elif (row['diag_2'] >= 520 and row['diag_2'] <= 579):
df.loc[index, 'diag_2'] = 9
elif (row['diag_2'] >= 580 and row['diag_2'] <= 629):
df.loc[index, 'diag_2'] = 10
elif (row['diag_2'] >= 630 and row['diag_2'] <= 679):
df.loc[index, 'diag_2'] = 11
elif (row['diag_2'] >= 680 and row['diag_2'] <= 709):
df.loc[index, 'diag_2'] = 12
elif (row['diag_2'] >= 710 and row['diag_2'] <= 739):
df.loc[index, 'diag_2'] = 13
elif (row['diag_2'] >= 740 and row['diag_2'] <= 759):
df.loc[index, 'diag_2'] = 14
elif (row['diag_2'] >= 760 and row['diag_2'] <= 779):
df.loc[index, 'diag_2'] = 15
elif (row['diag_2'] >= 780 and row['diag_2'] <= 799):
df.loc[index, 'diag_2'] = 16
elif (row['diag_2'] >= 800 and row['diag_2'] <= 999):
df.loc[index, 'diag_2'] = 17
print('diag_2 end')
df.loc[df['diag_3'].str.contains('V', na=False), ['diag_3']] = 0
df.loc[df['diag_3'].str.contains('E', na=False), ['diag_3']] = 0
df['diag_3'] = df['diag_3'].replace('?', -1)
df['diag_3'] = pd.to_numeric(df['diag_3'], errors='coerce')
for index, row in df.iterrows():
if (row['diag_3'] >= 1 and row['diag_3'] <= 139):
df.loc[index, 'diag_3'] = 1
elif (row['diag_3'] >= 140 and row['diag_3'] <= 239):
df.loc[index, 'diag_3'] = 2
elif (row['diag_3'] >= 240 and row['diag_3'] <= 279):
df.loc[index, 'diag_3'] = 3
elif (row['diag_3'] >= 280 and row['diag_3'] <= 289):
df.loc[index, 'diag_3'] = 4
elif (row['diag_3'] >= 290 and row['diag_3'] <= 319):
df.loc[index, 'diag_3'] = 5
elif (row['diag_3'] >= 320 and row['diag_3'] <= 389):
df.loc[index, 'diag_3'] = 6
elif (row['diag_3'] >= 390 and row['diag_3'] <= 459):
df.loc[index, 'diag_3'] = 7
elif (row['diag_3'] >= 460 and row['diag_3'] <= 519):
df.loc[index, 'diag_3'] = 8
elif (row['diag_3'] >= 520 and row['diag_3'] <= 579):
df.loc[index, 'diag_3'] = 9
elif (row['diag_3'] >= 580 and row['diag_3'] <= 629):
df.loc[index, 'diag_3'] = 10
elif (row['diag_3'] >= 630 and row['diag_3'] <= 679):
df.loc[index, 'diag_3'] = 11
elif (row['diag_3'] >= 680 and row['diag_3'] <= 709):
df.loc[index, 'diag_3'] = 12
elif (row['diag_3'] >= 710 and row['diag_3'] <= 739):
df.loc[index, 'diag_3'] = 13
elif (row['diag_3'] >= 740 and row['diag_3'] <= 759):
df.loc[index, 'diag_3'] = 14
elif (row['diag_3'] >= 760 and row['diag_3'] <= 779):
df.loc[index, 'diag_3'] = 15
elif (row['diag_3'] >= 780 and row['diag_3'] <= 799):
df.loc[index, 'diag_3'] = 16
elif (row['diag_3'] >= 800 and row['diag_3'] <= 999):
df.loc[index, 'diag_3'] = 17
print('diag_3 end')
# df['new_1'] = df['num_medications'] * df['time_in_hospital']
# # df['add_feature_2'] = df['change'] * df['num_medications']
# df['new_3'] = df['age'] * df['number_diagnoses']
print('diag end')
def standardize(raw_data):
return ((raw_data - np.mean(raw_data, axis=0)) / np.std(raw_data, axis=0))
numerics = ['race', 'age', 'time_in_hospital', 'num_medications', 'number_diagnoses',
'num_med_changed', 'num_med_taken', 'number_inpatient', 'number_outpatient', 'number_emergency',
'num_procedures', 'num_lab_procedures']
df[numerics] = standardize(df[numerics])
df = df[(np.abs(sp.stats.zscore(df[numerics])) < 3).all(axis=1)]
print('begin out')
print(OUTPUT_DATA_PATH)
df.to_csv(OUTPUT_DATA_PATH)
| en | 0.765105 | # file path # load data # make a copy of the dataframe for preprocessing # Drop features # drop bad data with 3 '?' in diag # drop died patient data which 'discharge_disposition_id' == 11 | 19 | 20 | 21 indicates 'Expired' # drop 3 data with 'Unknown/Invalid' gender # process readmitted data # cnt0, cnt1, cnt2 = 0, 0, 0 for i in df['readmitted']: if i == 0: cnt0 += 1 if i == 1: cnt1 += 1 else: cnt2 += 1 print(cnt0, cnt1, cnt2) # 53208 11357 88753 # calculate change times through 23 kinds of medicines # high change times refer to higher prob to readmit # 'num_med_changed' to counts medicine change # two new feature # encode race # map # -1 -> -99 # -1 -> -99 # simplify # admission_type_id : [2, 7] -> 1, [6, 8] -> 5 # discharge_disposition_id : [6, 8, 9, 13] -> 1, [3, 4, 5, 14, 22, 23, 24] -> 2, # [12, 15, 16, 17] -> 10, [19, 20, 21] -> 11, [25, 26] -> 18 # data of died patients have been dropped # for p in d: # df['discharge_disposition_id'] = df['discharge_disposition_id'].replace(p, 11) # admission_source_id : [3, 2] -> 1, [5, 6, 10, 22, 25] -> 4, # [15, 17, 20, 21] -> 9, [13, 14] -> 11 # Classify Diagnoses by ICD-9 # df['new_1'] = df['num_medications'] * df['time_in_hospital'] # # df['add_feature_2'] = df['change'] * df['num_medications'] # df['new_3'] = df['age'] * df['number_diagnoses'] | 3.015893 | 3 |
jazzband/content.py | tipabu/jazzband-website | 0 | 6622645 | <filename>jazzband/content.py
import babel.dates
from flask import (
Blueprint,
Response,
current_app,
render_template,
redirect,
request,
url_for,
send_from_directory,
safe_join,
)
from flask_flatpages import FlatPages
from flask_login import current_user
from pyatom import AtomFeed
from .assets import styles
from .decorators import templated
from .utils import full_url
content = Blueprint("content", __name__)
about_pages = FlatPages(name="about")
news_pages = FlatPages(name="news")
@content.app_template_filter()
def format_datetime(value):
return babel.dates.format_datetime(value)
@content.route("/join")
def join():
return redirect(url_for("account.join"))
@content.route("/security")
def security():
return redirect("/about/contact#security")
@content.route("/docs", defaults={"path": "index"})
@content.route("/docs/<path:path>")
def docs(path):
"Just a redirect from the old URL"
return redirect(url_for("content.about", path=path))
@content.route("/about", defaults={"path": "index"})
@content.route("/about/<path:path>")
def about(path):
page = about_pages.get_or_404(path)
template = "layouts/%s.html" % page.meta.get("layout", "about")
return render_template(template, page=page)
@content.route("/news/feed")
def news_feed():
feed = AtomFeed(
"Jazzband News Feed", feed_url=request.url, url=request.url_root, generator=None
)
for page in news_pages:
if page.path == "index":
continue
published = page.meta.get("published", None)
updated = page.meta.get("updated", published)
summary = page.meta.get("summary", None)
feed.add(
title=page.meta["title"],
content=str(page.html),
content_type="html",
summary=summary,
summary_type="text",
author=page.meta.get("author", None),
url=full_url(url_for("content.news", path=page.path)),
updated=updated,
published=published,
)
return Response(feed.to_string(), mimetype="application/atom+xml")
@content.route("/news", defaults={"path": "index"})
@content.route("/news/<path:path>")
def news(path):
page = news_pages.get_or_404(path)
template = "layouts/%s.html" % page.meta.get("layout", "news_detail")
return render_template(template, page=page)
@content.route("/")
@templated()
def index():
if current_user.is_authenticated:
return redirect(url_for("account.dashboard"))
return {}
@content.route("/static/css/styles.css")
def styles_css():
urls = styles.urls()
return redirect(urls[0])
@content.route("/favicon.ico")
def favicon():
filename = "favicon.ico"
cache_timeout = current_app.get_send_file_max_age(filename)
favicon_path = safe_join(current_app.static_folder, "favicons")
return send_from_directory(
favicon_path,
filename,
mimetype="image/vnd.microsoft.icon",
cache_timeout=cache_timeout,
)
| <filename>jazzband/content.py
import babel.dates
from flask import (
Blueprint,
Response,
current_app,
render_template,
redirect,
request,
url_for,
send_from_directory,
safe_join,
)
from flask_flatpages import FlatPages
from flask_login import current_user
from pyatom import AtomFeed
from .assets import styles
from .decorators import templated
from .utils import full_url
content = Blueprint("content", __name__)
about_pages = FlatPages(name="about")
news_pages = FlatPages(name="news")
@content.app_template_filter()
def format_datetime(value):
return babel.dates.format_datetime(value)
@content.route("/join")
def join():
return redirect(url_for("account.join"))
@content.route("/security")
def security():
return redirect("/about/contact#security")
@content.route("/docs", defaults={"path": "index"})
@content.route("/docs/<path:path>")
def docs(path):
"Just a redirect from the old URL"
return redirect(url_for("content.about", path=path))
@content.route("/about", defaults={"path": "index"})
@content.route("/about/<path:path>")
def about(path):
page = about_pages.get_or_404(path)
template = "layouts/%s.html" % page.meta.get("layout", "about")
return render_template(template, page=page)
@content.route("/news/feed")
def news_feed():
feed = AtomFeed(
"Jazzband News Feed", feed_url=request.url, url=request.url_root, generator=None
)
for page in news_pages:
if page.path == "index":
continue
published = page.meta.get("published", None)
updated = page.meta.get("updated", published)
summary = page.meta.get("summary", None)
feed.add(
title=page.meta["title"],
content=str(page.html),
content_type="html",
summary=summary,
summary_type="text",
author=page.meta.get("author", None),
url=full_url(url_for("content.news", path=page.path)),
updated=updated,
published=published,
)
return Response(feed.to_string(), mimetype="application/atom+xml")
@content.route("/news", defaults={"path": "index"})
@content.route("/news/<path:path>")
def news(path):
page = news_pages.get_or_404(path)
template = "layouts/%s.html" % page.meta.get("layout", "news_detail")
return render_template(template, page=page)
@content.route("/")
@templated()
def index():
if current_user.is_authenticated:
return redirect(url_for("account.dashboard"))
return {}
@content.route("/static/css/styles.css")
def styles_css():
urls = styles.urls()
return redirect(urls[0])
@content.route("/favicon.ico")
def favicon():
filename = "favicon.ico"
cache_timeout = current_app.get_send_file_max_age(filename)
favicon_path = safe_join(current_app.static_folder, "favicons")
return send_from_directory(
favicon_path,
filename,
mimetype="image/vnd.microsoft.icon",
cache_timeout=cache_timeout,
)
| es | 0.481087 | #security") | 2.085305 | 2 |
RTplzrunBlog/InputOutput/2446.py | lkc263/Algorithm_Study_Python | 0 | 6622646 | n = int(input())
cnt_idx = 0
for idx in range(n, 0, -1):
print(" " * cnt_idx + "*" * (2 * idx - 1))
cnt_idx += 1
cnt_idx -= 1
for idx in range(2, n + 1):
cnt_idx -= 1
print(" " * cnt_idx + "*" * (2 * idx - 1))
| n = int(input())
cnt_idx = 0
for idx in range(n, 0, -1):
print(" " * cnt_idx + "*" * (2 * idx - 1))
cnt_idx += 1
cnt_idx -= 1
for idx in range(2, n + 1):
cnt_idx -= 1
print(" " * cnt_idx + "*" * (2 * idx - 1))
| none | 1 | 3.46062 | 3 | |
Day_16.py | iamakkkhil/DailyCoding | 8 | 6622647 | """
DAY 16 : Different Operations on Matrices.
https://www.geeksforgeeks.org/different-operation-matrices/
QUESTION : Perform Addititon, Subtraction and Multiplication on given Matrices.
"""
def add(matrix1, matrix2, n1, m1, n2, m2):
# n = no of rows
# m = no of columns
add = []
if n1 == n2 and m1 == m2:
for i in range(n1):
inner = []
for j in range(m1):
sum_ = matrix1[i][j] + matrix2[i][j]
inner.append(sum_)
add.append(inner)
print(f'Addition is : {add}')
else:
print(f'Addition not possible')
def sub(matrix1, matrix2, n1, m1, n2, m2):
# n = no of rows
# m = no of columns
sub = []
if n1 == n2 and m1 == m2:
for i in range(n1):
inner = []
for j in range(m1):
sum_ = matrix1[i][j] - matrix2[i][j]
inner.append(sum_)
sub.append(inner)
print(f'Subtraction is : {sub}')
else:
print(f'Subtraction not possible')
def multiply(matrix1, matrix2, n1, m1, n2, m2):
# n = no of rows
# m = no of columns
multiply = []
if m1 == n2:
for i in range(len(matrix1)):
inner = []
sum = 0
for j in range(len(matrix2[0])):
sum = 0
for k in range(len(matrix2)):
sum += matrix1[i][k] * matrix2[k][j]
inner.append(sum)
multiply.append(inner)
print(f'Multiplication is : {multiply}')
else:
print(f'Multiplication not possible')
matrix1 = [[1,2], [3,4], [5,6]]
matrix2 = [[5,6,7], [7,8,9]]
add(matrix1, matrix2, 2,3,3,2)
| """
DAY 16 : Different Operations on Matrices.
https://www.geeksforgeeks.org/different-operation-matrices/
QUESTION : Perform Addititon, Subtraction and Multiplication on given Matrices.
"""
def add(matrix1, matrix2, n1, m1, n2, m2):
# n = no of rows
# m = no of columns
add = []
if n1 == n2 and m1 == m2:
for i in range(n1):
inner = []
for j in range(m1):
sum_ = matrix1[i][j] + matrix2[i][j]
inner.append(sum_)
add.append(inner)
print(f'Addition is : {add}')
else:
print(f'Addition not possible')
def sub(matrix1, matrix2, n1, m1, n2, m2):
# n = no of rows
# m = no of columns
sub = []
if n1 == n2 and m1 == m2:
for i in range(n1):
inner = []
for j in range(m1):
sum_ = matrix1[i][j] - matrix2[i][j]
inner.append(sum_)
sub.append(inner)
print(f'Subtraction is : {sub}')
else:
print(f'Subtraction not possible')
def multiply(matrix1, matrix2, n1, m1, n2, m2):
# n = no of rows
# m = no of columns
multiply = []
if m1 == n2:
for i in range(len(matrix1)):
inner = []
sum = 0
for j in range(len(matrix2[0])):
sum = 0
for k in range(len(matrix2)):
sum += matrix1[i][k] * matrix2[k][j]
inner.append(sum)
multiply.append(inner)
print(f'Multiplication is : {multiply}')
else:
print(f'Multiplication not possible')
matrix1 = [[1,2], [3,4], [5,6]]
matrix2 = [[5,6,7], [7,8,9]]
add(matrix1, matrix2, 2,3,3,2)
| en | 0.67482 | DAY 16 : Different Operations on Matrices.
https://www.geeksforgeeks.org/different-operation-matrices/
QUESTION : Perform Addititon, Subtraction and Multiplication on given Matrices. # n = no of rows # m = no of columns # n = no of rows # m = no of columns # n = no of rows # m = no of columns | 4.004066 | 4 |
run_segmentation.py | cansik/DPT | 1 | 6622648 | """Compute segmentation maps for images in the input folder.
"""
import os
import glob
import cv2
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
import util.io
from util.io import get_images_in_path
from torchvision.transforms import Compose
from dpt.models import DPTSegmentationModel
from dpt.transforms import Resize, NormalizeImage, PrepareForNet
def run(input_path, output_path, model_path, model_type="dpt_hybrid", optimize=True):
"""Run segmentation network
Args:
input_path (str): path to input folder
output_path (str): path to output folder
model_path (str): path to saved model
"""
print("initialize")
# select device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: %s" % device)
net_w = net_h = 480
# load network
if model_type == "dpt_large":
model = DPTSegmentationModel(
150,
path=model_path,
backbone="vitl16_384",
)
elif model_type == "dpt_hybrid":
model = DPTSegmentationModel(
150,
path=model_path,
backbone="vitb_rn50_384",
)
else:
assert (
False
), f"model_type '{model_type}' not implemented, use: --model_type [dpt_large|dpt_hybrid]"
transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method="minimal",
image_interpolation_method=cv2.INTER_CUBIC,
),
NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
PrepareForNet(),
]
)
model.eval()
if optimize == True and device == torch.device("cuda"):
model = model.to(memory_format=torch.channels_last)
model = model.half()
model.to(device)
# get input
img_names = get_images_in_path(input_path)
num_images = len(img_names)
# create output folder
os.makedirs(output_path, exist_ok=True)
print("start processing")
with tqdm.tqdm(total=len(img_names)) as prog:
for ind, img_name in enumerate(img_names):
# print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
# input
img = util.io.read_image(img_name)
img_input = transform({"image": img})["image"]
# compute
with torch.no_grad():
sample = torch.from_numpy(img_input).to(device).unsqueeze(0)
if optimize == True and device == torch.device("cuda"):
sample = sample.to(memory_format=torch.channels_last)
sample = sample.half()
out = model.forward(sample)
prediction = torch.nn.functional.interpolate(
out, size=img.shape[:2], mode="bicubic", align_corners=False
)
prediction = torch.argmax(prediction, dim=1) + 1
prediction = prediction.squeeze().cpu().numpy()
# output
filename = os.path.join(
output_path, os.path.splitext(os.path.basename(img_name))[0]
)
if args.mask is not None:
filtered_predictions = prediction == args.mask
# apply mask in opencv
cv_mask = (filtered_predictions * 255).astype(np.uint8)
# blur mask as a preprocess step
if args.blur > 0:
cv2.blur(cv_mask, (args.blur, args.blur), dst=cv_mask)
cv2.threshold(cv_mask, 1, 255, cv2.THRESH_BINARY, dst=cv_mask)
cv_image = cv2.imread(img_name)
output_image = cv2.bitwise_and(cv_image, cv_image, mask=cv_mask)
output_image[cv_mask == 0] = args.mask_background
if args.threshold:
gray = cv2.cvtColor(output_image, cv2.COLOR_RGB2GRAY)
_, output_image = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)
cv2.imwrite("%s.png" % filename, output_image)
# cv2.imwrite("%s.png" % filename, cv_mask)
else:
util.io.write_segm_img(filename, img, prediction, alpha=0.5)
prog.update()
print("finished")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-i", "--input_path", default="input", help="folder with input images"
)
parser.add_argument(
"-o", "--output_path", default="output_semseg", help="folder for output images"
)
parser.add_argument(
"-m",
"--model_weights",
default=None,
help="path to the trained weights of model",
)
# 'vit_large', 'vit_hybrid'
parser.add_argument("-t", "--model_type", default="dpt_hybrid", help="model type")
parser.add_argument("--optimize", dest="optimize", action="store_true")
parser.add_argument("--no-optimize", dest="optimize", action="store_false")
parser.set_defaults(optimize=True)
parser.add_argument("--mask", default=None, type=int, help="create masks of these ADE20K classes")
parser.add_argument("--mask-background", default=[0, 0, 0], type=int, nargs=3,
metavar=("r", "g", "b"), help="Background color of the mask")
parser.add_argument("--threshold", action="store_true", help="Threshold the image to create a black-white mask.")
parser.add_argument("--blur", default=-1, type=int, help="mask blur factor to increase area")
args = parser.parse_args()
default_models = {
"dpt_large": "weights/dpt_large-ade20k-b12dca68.pt",
"dpt_hybrid": "weights/dpt_hybrid-ade20k-53898607.pt",
}
if args.model_weights is None:
args.model_weights = default_models[args.model_type]
# set torch options
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
# compute segmentation maps
run(
args.input_path,
args.output_path,
args.model_weights,
args.model_type,
args.optimize,
)
| """Compute segmentation maps for images in the input folder.
"""
import os
import glob
import cv2
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
import util.io
from util.io import get_images_in_path
from torchvision.transforms import Compose
from dpt.models import DPTSegmentationModel
from dpt.transforms import Resize, NormalizeImage, PrepareForNet
def run(input_path, output_path, model_path, model_type="dpt_hybrid", optimize=True):
"""Run segmentation network
Args:
input_path (str): path to input folder
output_path (str): path to output folder
model_path (str): path to saved model
"""
print("initialize")
# select device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: %s" % device)
net_w = net_h = 480
# load network
if model_type == "dpt_large":
model = DPTSegmentationModel(
150,
path=model_path,
backbone="vitl16_384",
)
elif model_type == "dpt_hybrid":
model = DPTSegmentationModel(
150,
path=model_path,
backbone="vitb_rn50_384",
)
else:
assert (
False
), f"model_type '{model_type}' not implemented, use: --model_type [dpt_large|dpt_hybrid]"
transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method="minimal",
image_interpolation_method=cv2.INTER_CUBIC,
),
NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
PrepareForNet(),
]
)
model.eval()
if optimize == True and device == torch.device("cuda"):
model = model.to(memory_format=torch.channels_last)
model = model.half()
model.to(device)
# get input
img_names = get_images_in_path(input_path)
num_images = len(img_names)
# create output folder
os.makedirs(output_path, exist_ok=True)
print("start processing")
with tqdm.tqdm(total=len(img_names)) as prog:
for ind, img_name in enumerate(img_names):
# print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
# input
img = util.io.read_image(img_name)
img_input = transform({"image": img})["image"]
# compute
with torch.no_grad():
sample = torch.from_numpy(img_input).to(device).unsqueeze(0)
if optimize == True and device == torch.device("cuda"):
sample = sample.to(memory_format=torch.channels_last)
sample = sample.half()
out = model.forward(sample)
prediction = torch.nn.functional.interpolate(
out, size=img.shape[:2], mode="bicubic", align_corners=False
)
prediction = torch.argmax(prediction, dim=1) + 1
prediction = prediction.squeeze().cpu().numpy()
# output
filename = os.path.join(
output_path, os.path.splitext(os.path.basename(img_name))[0]
)
if args.mask is not None:
filtered_predictions = prediction == args.mask
# apply mask in opencv
cv_mask = (filtered_predictions * 255).astype(np.uint8)
# blur mask as a preprocess step
if args.blur > 0:
cv2.blur(cv_mask, (args.blur, args.blur), dst=cv_mask)
cv2.threshold(cv_mask, 1, 255, cv2.THRESH_BINARY, dst=cv_mask)
cv_image = cv2.imread(img_name)
output_image = cv2.bitwise_and(cv_image, cv_image, mask=cv_mask)
output_image[cv_mask == 0] = args.mask_background
if args.threshold:
gray = cv2.cvtColor(output_image, cv2.COLOR_RGB2GRAY)
_, output_image = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)
cv2.imwrite("%s.png" % filename, output_image)
# cv2.imwrite("%s.png" % filename, cv_mask)
else:
util.io.write_segm_img(filename, img, prediction, alpha=0.5)
prog.update()
print("finished")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-i", "--input_path", default="input", help="folder with input images"
)
parser.add_argument(
"-o", "--output_path", default="output_semseg", help="folder for output images"
)
parser.add_argument(
"-m",
"--model_weights",
default=None,
help="path to the trained weights of model",
)
# 'vit_large', 'vit_hybrid'
parser.add_argument("-t", "--model_type", default="dpt_hybrid", help="model type")
parser.add_argument("--optimize", dest="optimize", action="store_true")
parser.add_argument("--no-optimize", dest="optimize", action="store_false")
parser.set_defaults(optimize=True)
parser.add_argument("--mask", default=None, type=int, help="create masks of these ADE20K classes")
parser.add_argument("--mask-background", default=[0, 0, 0], type=int, nargs=3,
metavar=("r", "g", "b"), help="Background color of the mask")
parser.add_argument("--threshold", action="store_true", help="Threshold the image to create a black-white mask.")
parser.add_argument("--blur", default=-1, type=int, help="mask blur factor to increase area")
args = parser.parse_args()
default_models = {
"dpt_large": "weights/dpt_large-ade20k-b12dca68.pt",
"dpt_hybrid": "weights/dpt_hybrid-ade20k-53898607.pt",
}
if args.model_weights is None:
args.model_weights = default_models[args.model_type]
# set torch options
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
# compute segmentation maps
run(
args.input_path,
args.output_path,
args.model_weights,
args.model_type,
args.optimize,
)
| en | 0.487574 | Compute segmentation maps for images in the input folder. Run segmentation network Args: input_path (str): path to input folder output_path (str): path to output folder model_path (str): path to saved model # select device # load network # get input # create output folder # print(" processing {} ({}/{})".format(img_name, ind + 1, num_images)) # input # compute # output # apply mask in opencv # blur mask as a preprocess step # cv2.imwrite("%s.png" % filename, cv_mask) # 'vit_large', 'vit_hybrid' # set torch options # compute segmentation maps | 2.408037 | 2 |
ex054.py | ezequielwish/Python3 | 1 | 6622649 | <reponame>ezequielwish/Python3
# Crie um programa que leia o ano de nascimento de sete pessoas. No final,
# mostre quantas pessoas ainda não atingiram a maioridade e quantas já são maiores.
from datetime import datetime
year = datetime.now().year
majors = 0
minors = 0
for people in range(1, 8):
birth = int(input(f'Data de nascimento da {people}° pessoa: '))
age = year - birth
if age >= 18:
majors += 1
else:
minors += 1
print('Maiores de idade:', majors)
print('Menores de idade:', minors)
| # Crie um programa que leia o ano de nascimento de sete pessoas. No final,
# mostre quantas pessoas ainda não atingiram a maioridade e quantas já são maiores.
from datetime import datetime
year = datetime.now().year
majors = 0
minors = 0
for people in range(1, 8):
birth = int(input(f'Data de nascimento da {people}° pessoa: '))
age = year - birth
if age >= 18:
majors += 1
else:
minors += 1
print('Maiores de idade:', majors)
print('Menores de idade:', minors) | pt | 0.998296 | # Crie um programa que leia o ano de nascimento de sete pessoas. No final, # mostre quantas pessoas ainda não atingiram a maioridade e quantas já são maiores. | 4.198824 | 4 |
new-component.py | syncush/react-boilerplate-cli | 1 | 6622650 | <gh_stars>1-10
import sys
import os
name_string = "$componentName"
styles_extention_token = "$styleExtention"
dir_path = os.path.dirname(os.path.realpath(__file__))
template_dir = os.path.join(dir_path, "templates")
components_dir = os.path.join(template_dir, "components")
cwd = os.getcwd()
input_name = sys.argv[-1]
component_dir = os.path.join(cwd, input_name)
if os.path.exists(component_dir):
print("Component already exists")
exit()
os.makedirs(component_dir)
def create_file(template_file_path, new_file_name, spaces, styleExtention="css"):
new_file = open(os.path.join(component_dir, new_file_name), "w+")
template_file = open(template_file_path, "r")
template_string = template_file.read().replace(" ", spaces)
new_file_string = template_string.replace(name_string, input_name)
new_file_string = new_file_string.replace(styles_extention_token, styleExtention)
new_file.write(new_file_string)
new_file.close()
template_file.close()
args_set = set(sys.argv)
spaces = " "
if "--2spaces" in args_set:
spaces = " " * 2
styles_extention = 'css'
if '--sass' in args_set:
styles_extention = 'scss'
if '--less' in args_set:
styles_extention = 'less'
react_file_extention = 'jsx'
index_file_extention = 'js'
if '--ts' in args_set:
react_file_extention = 'tsx'
index_file_extention = 'ts'
create_file(os.path.join(template_dir, "index.js.template"), "index." + index_file_extention, spaces)
create_file(os.path.join(template_dir, "style.css.template"), input_name + '.' + styles_extention, spaces, styles_extention)
component_template_name = "class"
if "--hooks" in args_set:
component_template_name = "hooks"
elif "--func" in args_set or "--function" in args_set:
component_template_name = "func"
elif "--ts" in args_set:
component_template_name = "ts_class"
template_file_path = os.path.join(components_dir,
component_template_name + ".template")
create_file(template_file_path, input_name + '.' + react_file_extention, spaces, styles_extention)
if "--story" in args_set:
story_file_path = os.path.join(template_dir, "stories.js.template")
create_file(story_file_path, 'stories.' + index_file_extention, spaces)
| import sys
import os
name_string = "$componentName"
styles_extention_token = "$styleExtention"
dir_path = os.path.dirname(os.path.realpath(__file__))
template_dir = os.path.join(dir_path, "templates")
components_dir = os.path.join(template_dir, "components")
cwd = os.getcwd()
input_name = sys.argv[-1]
component_dir = os.path.join(cwd, input_name)
if os.path.exists(component_dir):
print("Component already exists")
exit()
os.makedirs(component_dir)
def create_file(template_file_path, new_file_name, spaces, styleExtention="css"):
new_file = open(os.path.join(component_dir, new_file_name), "w+")
template_file = open(template_file_path, "r")
template_string = template_file.read().replace(" ", spaces)
new_file_string = template_string.replace(name_string, input_name)
new_file_string = new_file_string.replace(styles_extention_token, styleExtention)
new_file.write(new_file_string)
new_file.close()
template_file.close()
args_set = set(sys.argv)
spaces = " "
if "--2spaces" in args_set:
spaces = " " * 2
styles_extention = 'css'
if '--sass' in args_set:
styles_extention = 'scss'
if '--less' in args_set:
styles_extention = 'less'
react_file_extention = 'jsx'
index_file_extention = 'js'
if '--ts' in args_set:
react_file_extention = 'tsx'
index_file_extention = 'ts'
create_file(os.path.join(template_dir, "index.js.template"), "index." + index_file_extention, spaces)
create_file(os.path.join(template_dir, "style.css.template"), input_name + '.' + styles_extention, spaces, styles_extention)
component_template_name = "class"
if "--hooks" in args_set:
component_template_name = "hooks"
elif "--func" in args_set or "--function" in args_set:
component_template_name = "func"
elif "--ts" in args_set:
component_template_name = "ts_class"
template_file_path = os.path.join(components_dir,
component_template_name + ".template")
create_file(template_file_path, input_name + '.' + react_file_extention, spaces, styles_extention)
if "--story" in args_set:
story_file_path = os.path.join(template_dir, "stories.js.template")
create_file(story_file_path, 'stories.' + index_file_extention, spaces) | none | 1 | 2.545078 | 3 |