hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6ccf32f517955fcbf679927cd5f7f3e4219cdd8f | 6,138 | py | Python | scionlab/tests/test_certificates.py | andreatulimiero/scionlab | d6063ca4fbc5cbf153287e1b4b98cdf8d507f6ba | [
"Apache-2.0"
] | null | null | null | scionlab/tests/test_certificates.py | andreatulimiero/scionlab | d6063ca4fbc5cbf153287e1b4b98cdf8d507f6ba | [
"Apache-2.0"
] | null | null | null | scionlab/tests/test_certificates.py | andreatulimiero/scionlab | d6063ca4fbc5cbf153287e1b4b98cdf8d507f6ba | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from django.test import TestCase
from scionlab.models.core import ISD, AS
from scionlab.tests import utils
from lib.errors import SCIONVerificationError
class TRCAndCoreASCertificateTestsSimple(TestCase):
def test_empty_isd(self):
isd = ISD.objects.create(isd_id=1, label='empty')
# No TRC set unless explicitly created. This ISD is invalid either way!
isd.init_trc_and_certificates()
self.assertEqual(isd.trc['CoreASes'], {})
self.assertEqual(isd.trc['Signatures'], {})
self.assertEqual(isd.trc_priv_keys, {})
def test_create_delete_create(self):
isd = ISD.objects.create(isd_id=1, label='one')
as1_id = 'ffaa:0:0101'
as1_ia = '%i-%s' % (isd.isd_id, as1_id)
AS.objects.create(isd, as1_id, is_core=True)
trc_v1 = utils.check_trc_and_certs(self, 1, {as1_ia}, expected_version=1)
AS.objects.filter(as_id=as1_id).delete()
isd.refresh_from_db()
trc_v2 = utils.check_trc_and_certs(self, 1, {}, expected_version=2, prev_trc=trc_v1)
AS.objects.create(isd, as1_id, is_core=True)
utils.check_trc_and_certs(self, 1, {as1_ia}, expected_version=3, prev_trc=trc_v2)
def test_random_mutations(self):
NUM_MUTATIONS = 66
random.seed(5)
isd_id = 1
def make_as_id(i):
return "ffaa:0:%.4x" % i
def ia(as_id):
return "%i-%s" % (isd_id, as_id)
ISD.objects.create(isd_id=isd_id, label='some')
prev_trc = None
expected_version = 1
expected_set = set()
for i in range(NUM_MUTATIONS):
if not expected_set or random.getrandbits(1):
# add one: i has not been used yet
as_id = make_as_id(i)
expected_set.add(as_id)
AS.objects.create(ISD.objects.get(isd_id=isd_id), as_id, is_core=True)
else:
as_id = random.sample(expected_set, 1)[0]
expected_set.remove(as_id)
# Let's test both ways:
if random.getrandbits(1):
AS.objects.filter(as_id=as_id).delete()
else:
AS.objects.get(as_id=as_id).delete()
trc = utils.check_trc_and_certs(self,
isd_id,
{ia(as_id) for as_id in expected_set},
expected_version=expected_version,
prev_trc=prev_trc)
expected_version += 1
prev_trc = trc
class TRCAndCoreASCertificateTestsISD19(TestCase):
fixtures = ['testdata']
isd19_core_ases = ['19-ffaa:0:1301', '19-ffaa:0:1302']
def test_create_initial(self):
isd = ISD.objects.get(isd_id=19)
_reset_trc_and_certificates(isd)
self.assertEqual(isd.trc, None)
self.assertEqual(isd.trc_priv_keys, None)
isd.init_trc_and_certificates()
utils.check_trc_and_certs(self, 19, self.isd19_core_ases, expected_version=1)
def test_create_update(self):
isd = ISD.objects.get(isd_id=19)
trc_v1 = utils.check_trc(self, isd, self.isd19_core_ases, expected_version=1)
utils.check_core_as_certs(self, isd)
isd.update_trc_and_core_certificates()
trc_v2 = utils.check_trc(self, isd, self.isd19_core_ases, expected_version=2)
trc_v2.verify(trc_v1)
utils.check_core_as_certs(self, isd)
# XXX: this might have to be fixed if the grace period is set up. Sleep?
isd.update_trc_and_core_certificates()
trc_v3 = utils.check_trc(self, isd, self.isd19_core_ases, expected_version=3)
trc_v3.verify(trc_v2)
utils.check_core_as_certs(self, isd)
with self.assertRaises(SCIONVerificationError):
trc_v3.verify(trc_v1)
def test_update_single_cert(self):
isd = ISD.objects.get(isd_id=19)
as_ = isd.ases.filter(is_core=False).first()
# Generate fresh cert with same keys
original_certificate_chain = as_.certificate_chain
as_.generate_certificate_chain()
as_.save()
utils.check_cert_chain(self, as_, isd.trc)
self.assertEqual(as_.certificate_chain['0']['Version'],
original_certificate_chain['0']['Version'] + 1)
# Update keys and generate cert
as_.update_keys()
utils.check_cert_chain(self, as_, isd.trc)
self.assertEqual(as_.certificate_chain['0']['Version'],
original_certificate_chain['0']['Version'] + 2)
def test_update_core_cert(self):
isd = ISD.objects.get(isd_id=19)
trc_v1 = utils.check_trc(self, isd, self.isd19_core_ases, expected_version=1)
AS.update_core_as_keys(isd.ases.filter(is_core=True))
trc_v2 = utils.check_trc(self, isd, self.isd19_core_ases, expected_version=2)
trc_v2.verify(trc_v1)
utils.check_core_as_certs(self, isd)
# Certs for non-core ASes not updated; will be updated as new TRC is disseminated
for as_ in isd.ases.filter(is_core=False).iterator():
self.assertEqual(
as_.certificate_chain['0']['TRCVersion'],
trc_v1.version
)
def _reset_trc_and_certificates(isd):
isd.trc = None
isd.trc_priv_keys = None
isd.save()
for as_ in isd.ases.iterator():
as_.certificate_chain = None
as_.core_certificate = None
as_.save()
| 35.275862 | 92 | 0.631313 | 847 | 6,138 | 4.31405 | 0.220779 | 0.030651 | 0.035577 | 0.027915 | 0.435413 | 0.372195 | 0.301587 | 0.252326 | 0.244937 | 0.198686 | 0 | 0.024994 | 0.269958 | 6,138 | 173 | 93 | 35.479769 | 0.790449 | 0.144347 | 0 | 0.252252 | 0 | 0 | 0.026955 | 0 | 0 | 0 | 0 | 0 | 0.081081 | 1 | 0.09009 | false | 0 | 0.045045 | 0.018018 | 0.189189 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ccfef2e1ab4ccf5e76fbf6c645bae7f3e9a2201 | 9,994 | py | Python | python/swagger_client/models/vehicle_type.py | byung90/graphhopper | 443891657e20cd1393675b1d4a1b9f38c691cdff | [
"Apache-2.0"
] | null | null | null | python/swagger_client/models/vehicle_type.py | byung90/graphhopper | 443891657e20cd1393675b1d4a1b9f38c691cdff | [
"Apache-2.0"
] | null | null | null | python/swagger_client/models/vehicle_type.py | byung90/graphhopper | 443891657e20cd1393675b1d4a1b9f38c691cdff | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
GraphHopper Directions API
You use the GraphHopper Directions API to add route planning, navigation and route optimization to your software. E.g. the Routing API has turn instructions and elevation data and the Route Optimization API solves your logistic problems and supports various constraints like time window and capacity restrictions. Also it is possible to get all distances between all locations with our fast Matrix API. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class VehicleType(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type_id': 'str',
'profile': 'str',
'capacity': 'list[int]',
'speed_factor': 'float',
'service_time_factor': 'float',
'cost_per_meter': 'float',
'cost_per_second': 'float',
'cost_per_activation': 'float'
}
attribute_map = {
'type_id': 'type_id',
'profile': 'profile',
'capacity': 'capacity',
'speed_factor': 'speed_factor',
'service_time_factor': 'service_time_factor',
'cost_per_meter': 'cost_per_meter',
'cost_per_second': 'cost_per_second',
'cost_per_activation': 'cost_per_activation'
}
def __init__(self, type_id=None, profile=None, capacity=None, speed_factor=None, service_time_factor=None, cost_per_meter=None, cost_per_second=None, cost_per_activation=None): # noqa: E501
"""VehicleType - a model defined in Swagger""" # noqa: E501
self._type_id = None
self._profile = None
self._capacity = None
self._speed_factor = None
self._service_time_factor = None
self._cost_per_meter = None
self._cost_per_second = None
self._cost_per_activation = None
self.discriminator = None
if type_id is not None:
self.type_id = type_id
if profile is not None:
self.profile = profile
if capacity is not None:
self.capacity = capacity
if speed_factor is not None:
self.speed_factor = speed_factor
if service_time_factor is not None:
self.service_time_factor = service_time_factor
if cost_per_meter is not None:
self.cost_per_meter = cost_per_meter
if cost_per_second is not None:
self.cost_per_second = cost_per_second
if cost_per_activation is not None:
self.cost_per_activation = cost_per_activation
@property
def type_id(self):
"""Gets the type_id of this VehicleType. # noqa: E501
Unique identifier for the vehicle type # noqa: E501
:return: The type_id of this VehicleType. # noqa: E501
:rtype: str
"""
return self._type_id
@type_id.setter
def type_id(self, type_id):
"""Sets the type_id of this VehicleType.
Unique identifier for the vehicle type # noqa: E501
:param type_id: The type_id of this VehicleType. # noqa: E501
:type: str
"""
self._type_id = type_id
@property
def profile(self):
"""Gets the profile of this VehicleType. # noqa: E501
Profile of vehicle type # noqa: E501
:return: The profile of this VehicleType. # noqa: E501
:rtype: str
"""
return self._profile
@profile.setter
def profile(self, profile):
"""Sets the profile of this VehicleType.
Profile of vehicle type # noqa: E501
:param profile: The profile of this VehicleType. # noqa: E501
:type: str
"""
allowed_values = ["car", "bike", "foot", "hike", "mtb", "racingbike", "scooter", "truck", "small_truck"] # noqa: E501
if profile not in allowed_values:
raise ValueError(
"Invalid value for `profile` ({0}), must be one of {1}" # noqa: E501
.format(profile, allowed_values)
)
self._profile = profile
@property
def capacity(self):
"""Gets the capacity of this VehicleType. # noqa: E501
array of capacity dimensions # noqa: E501
:return: The capacity of this VehicleType. # noqa: E501
:rtype: list[int]
"""
return self._capacity
@capacity.setter
def capacity(self, capacity):
"""Sets the capacity of this VehicleType.
array of capacity dimensions # noqa: E501
:param capacity: The capacity of this VehicleType. # noqa: E501
:type: list[int]
"""
self._capacity = capacity
@property
def speed_factor(self):
"""Gets the speed_factor of this VehicleType. # noqa: E501
speed_factor of vehicle type # noqa: E501
:return: The speed_factor of this VehicleType. # noqa: E501
:rtype: float
"""
return self._speed_factor
@speed_factor.setter
def speed_factor(self, speed_factor):
"""Sets the speed_factor of this VehicleType.
speed_factor of vehicle type # noqa: E501
:param speed_factor: The speed_factor of this VehicleType. # noqa: E501
:type: float
"""
self._speed_factor = speed_factor
@property
def service_time_factor(self):
"""Gets the service_time_factor of this VehicleType. # noqa: E501
service time factor of vehicle type # noqa: E501
:return: The service_time_factor of this VehicleType. # noqa: E501
:rtype: float
"""
return self._service_time_factor
@service_time_factor.setter
def service_time_factor(self, service_time_factor):
"""Sets the service_time_factor of this VehicleType.
service time factor of vehicle type # noqa: E501
:param service_time_factor: The service_time_factor of this VehicleType. # noqa: E501
:type: float
"""
self._service_time_factor = service_time_factor
@property
def cost_per_meter(self):
"""Gets the cost_per_meter of this VehicleType. # noqa: E501
cost parameter per distance unit, here meter is used # noqa: E501
:return: The cost_per_meter of this VehicleType. # noqa: E501
:rtype: float
"""
return self._cost_per_meter
@cost_per_meter.setter
def cost_per_meter(self, cost_per_meter):
"""Sets the cost_per_meter of this VehicleType.
cost parameter per distance unit, here meter is used # noqa: E501
:param cost_per_meter: The cost_per_meter of this VehicleType. # noqa: E501
:type: float
"""
self._cost_per_meter = cost_per_meter
@property
def cost_per_second(self):
"""Gets the cost_per_second of this VehicleType. # noqa: E501
cost parameter per time unit, here second is used # noqa: E501
:return: The cost_per_second of this VehicleType. # noqa: E501
:rtype: float
"""
return self._cost_per_second
@cost_per_second.setter
def cost_per_second(self, cost_per_second):
"""Sets the cost_per_second of this VehicleType.
cost parameter per time unit, here second is used # noqa: E501
:param cost_per_second: The cost_per_second of this VehicleType. # noqa: E501
:type: float
"""
self._cost_per_second = cost_per_second
@property
def cost_per_activation(self):
"""Gets the cost_per_activation of this VehicleType. # noqa: E501
cost parameter vehicle activation, i.e. fixed costs per vehicle # noqa: E501
:return: The cost_per_activation of this VehicleType. # noqa: E501
:rtype: float
"""
return self._cost_per_activation
@cost_per_activation.setter
def cost_per_activation(self, cost_per_activation):
"""Sets the cost_per_activation of this VehicleType.
cost parameter vehicle activation, i.e. fixed costs per vehicle # noqa: E501
:param cost_per_activation: The cost_per_activation of this VehicleType. # noqa: E501
:type: float
"""
self._cost_per_activation = cost_per_activation
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VehicleType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.526814 | 420 | 0.61667 | 1,244 | 9,994 | 4.737138 | 0.150322 | 0.071271 | 0.092313 | 0.085525 | 0.554896 | 0.471237 | 0.421178 | 0.284236 | 0.214152 | 0.133548 | 0 | 0.02103 | 0.30058 | 9,994 | 316 | 421 | 31.626582 | 0.822031 | 0.396938 | 0 | 0.074627 | 0 | 0 | 0.09218 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.164179 | false | 0 | 0.022388 | 0 | 0.313433 | 0.014925 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cd00de5c8d59f007ac8ba764e45305035fd1e13 | 1,029 | py | Python | python/linguagem-de-programacao/estrutura/exercicio_1.py | BrenoPremoli/Faculdade | dcf5d106b10453224e37af830a53b2214af1d2ee | [
"MIT"
] | 1 | 2022-03-19T22:50:12.000Z | 2022-03-19T22:50:12.000Z | python/linguagem-de-programacao/estrutura/exercicio_1.py | BrenoPremoli/Faculdade | dcf5d106b10453224e37af830a53b2214af1d2ee | [
"MIT"
] | null | null | null | python/linguagem-de-programacao/estrutura/exercicio_1.py | BrenoPremoli/Faculdade | dcf5d106b10453224e37af830a53b2214af1d2ee | [
"MIT"
] | null | null | null | # 1. Elabore uma estrutura para representar um produto (código, nome, preço). Aplique 10% de aumento no preço do produto e apresente.
class TipoProduto:
codigo = 0
nome = ''
preco = 0.0
def main():
p1 = TipoProduto()
p1.codigo = int ( input('Cadastre o código do produto: '))
p1.nome = input('C1dastre o nome do produto: ')
p1.preco = float ( input('Cadastre o preço do produto R$ '))
print(f'Código: {p1.codigo} \tNome: {p1.nome} \tPreço R$ {p1.preco:.2f}')
p1.preco = p1.preco + p1.preco * 10 / 100
print(f'Código: {p1.codigo} \tNome: {p1.nome} \tPreço R$ {p1.preco:.2f}')
p2 = TipoProduto()
p2.codigo = int ( input('Cadastre o código do produto: '))
p2.nome = input('C1dastre o nome do produto: ')
p2.preco = float ( input('Cadastre o preço do produto R$ '))
print(f'Código: {p2.codigo} \tNome: {p2.nome} \tPreço R$ {p2.preco:.2f}')
p2.preco = p2.preco + p2.preco * 10 / 100
print(f'Código: {p2.codigo} \tNome: {p2.nome} \tPreço R$ {p2.preco:.2f}')
main() | 42.875 | 133 | 0.623907 | 158 | 1,029 | 4.063291 | 0.259494 | 0.098131 | 0.087227 | 0.068536 | 0.660436 | 0.660436 | 0.629283 | 0.53271 | 0.41433 | 0.41433 | 0 | 0.059041 | 0.209913 | 1,029 | 24 | 134 | 42.875 | 0.730627 | 0.127308 | 0 | 0.2 | 0 | 0.2 | 0.479376 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0 | 0 | 0.25 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cd1eedeb6f3bee8fa8a7be3d35140384637a572 | 568 | py | Python | tests/algorithms/gd/test_adamax.py | vishalbelsare/neupy | 684313cdaddcad326f2169384fb15ec3aa29d991 | [
"MIT"
] | null | null | null | tests/algorithms/gd/test_adamax.py | vishalbelsare/neupy | 684313cdaddcad326f2169384fb15ec3aa29d991 | [
"MIT"
] | null | null | null | tests/algorithms/gd/test_adamax.py | vishalbelsare/neupy | 684313cdaddcad326f2169384fb15ec3aa29d991 | [
"MIT"
] | null | null | null | from neupy import algorithms
from data import simple_classification
from base import BaseTestCase
class AdamaxTestCase(BaseTestCase):
def test_simple_adamax(self):
x_train, _, y_train, _ = simple_classification()
mnet = algorithms.Adamax(
(10, 20, 1),
step=.01,
batch_size='full',
verbose=False,
epsilon=1e-8,
beta1=0.9,
beta2=0.999,
)
mnet.train(x_train, y_train, epochs=100)
self.assertAlmostEqual(0.038, mnet.errors.last(), places=3)
| 27.047619 | 67 | 0.598592 | 66 | 568 | 4.984848 | 0.681818 | 0.121581 | 0.042553 | 0.072948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.063131 | 0.302817 | 568 | 20 | 68 | 28.4 | 0.767677 | 0 | 0 | 0 | 0 | 0 | 0.007042 | 0 | 0 | 0 | 0 | 0 | 0.058824 | 1 | 0.058824 | false | 0 | 0.176471 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cd379fefb41f2c248ad774a6765389d5cc95be5 | 5,363 | py | Python | src/sources/house_of_tartan.py | thetartan/tartan-database | 55adcebed4c68c717fbf714d7619da95da6a70db | [
"MIT"
] | 7 | 2016-11-11T06:49:44.000Z | 2021-10-13T03:45:46.000Z | src/sources/house_of_tartan.py | kravets-levko/tartan-database | 55adcebed4c68c717fbf714d7619da95da6a70db | [
"MIT"
] | null | null | null | src/sources/house_of_tartan.py | kravets-levko/tartan-database | 55adcebed4c68c717fbf714d7619da95da6a70db | [
"MIT"
] | 1 | 2018-04-27T08:42:48.000Z | 2018-04-27T08:42:48.000Z | from ..core import Source, log, utils
import re
import json
import requests
re_extract_ids = re.compile(
'onclick="Frm\(\'([0-9]+)\'\)"',
re.IGNORECASE
)
# 'a'..'z'
catalogue_index = [chr(i) for i in range(ord('a'), ord('z') + 1)]
re_extract_attr = re.compile(
'<div class="(title|ftr-hdr|ftr-txt|ftr-cpy)">(.*?)</div>',
re.IGNORECASE | re.DOTALL)
re_extract_pattern = re.compile(
'Tartan\.setup\((".*")\);',
re.IGNORECASE | re.DOTALL)
re_normalize_palette = re.compile(
'([a-z]+)=([0-9a-f]{6})([a-z\s()]*)[;,]',
re.IGNORECASE | re.DOTALL
)
re_normalize_threadcount = re.compile(
'^([a-z]+)([0-9]+([a-z]+[0-9]+)+[a-z]+)([0-9]+)$',
re.IGNORECASE
)
attr_map = {
'title': 'name',
'ftr-hdr': 'overview',
'ftr-txt': 'comment',
'ftr-cpy': 'copyright',
}
def normalize_palette(value):
result = map(
lambda v: v[0].upper() + '#' + v[1].upper() + ' ' + v[2],
re_normalize_palette.findall(value)
)
if len(result) > 0:
result.append('')
return '; '.join(result).strip()
def normalize_threadcount(value, reflect=False):
result = re.sub('\s', '', value).strip('.').split('.')
if reflect:
result = map(
lambda v: re.sub(re_normalize_threadcount, '\\1/\\2/\\4', v),
result
)
result = map(
lambda v: re.sub('([0-9]+)', '\\1 ', v).strip(),
result
)
return ' // '.join(filter(len, result)).upper()
class HouseOfTartan(Source):
id = 'house_of_tartan'
name = 'House of Tartan'
folders = [
'index',
'grabbed'
]
headers = [
('origin_id', 'Origin ID', 'string'),
('category', 'Category', 'string'),
('name', 'Name', 'string'),
('palette', 'Palette', 'string'),
('threadcount', 'Threadcount', 'string'),
('overview', 'Overview', 'string'),
('comment', 'Comment', 'string'),
('copyright', 'Copyright', 'string'),
('origin_url', 'Origin URL', 'string'),
]
resourceAdditionalAttributes = {
'attributes': {
'id': 'origin_id',
'name': 'name',
'category': [
{'fields': 'category'},
{'join': ';'},
{'split': ';'},
'trim',
'filter',
'unique',
{'sort': 'asc'},
],
'description': [
{'fields': ['overview', 'comment', 'copyright']},
'filter',
],
'url': 'origin_url',
'sett': [
{'fields': ['palette', 'threadcount']},
'filter',
{'join': '\n'},
],
'palette': 'palette',
'threadcount': 'threadcount',
'overview': 'overview',
'comment': 'comment',
'copyright': 'copyright',
}
}
host = 'http://www.house-of-tartan.scotland.net'
url = 'http://www.house-of-tartan.scotland.net/'
def get_items(self):
result = []
for letter in catalogue_index:
url = self.host + '/house/' + letter + '.asp'
log.message('Loading ' + letter + '... ', suffix='')
resp = requests.get(url)
log.http_status(resp.status_code, resp.reason, suffix='')
if resp.status_code == 200:
self.file_put('index/' + letter + '.html', resp.content)
ids = re_extract_ids.findall(resp.content)
result += ids
log.message(
' found: ' + log.BOLD + str(len(ids)) + log.END + ' ID(s)',
suffix=''
)
log.newline()
return sorted(map(int, list(set(result))))
def retrieve(self, item):
url = self.host + '/house/TartanViewjs.asp'
params = {'colr': 'Def', 'tnam': item}
log.message('Loading ' + str(item), suffix='... ')
resp = requests.get(url, params=params)
log.http_status(resp.status_code, resp.reason)
return self.process_retrieved(
resp, 'grabbed/' + str(item).zfill(6) + '.html'
)
def extract_items(self, item, context):
log.message('Parsing ' + str(item) + '...')
result = {}
data = self.file_get('grabbed/' + str(item).zfill(6) + '.html')
data = data.decode('utf-8')
attributes = re_extract_attr.findall(data)
for attr in attributes:
result[attr_map[attr[0]]] = utils.cleanup(attr[1])
# Parse category
result['category'] = utils.parse_category_from_name(result['name'])
if result['category'] == '':
result['category'] = 'Other'
result['origin_id'] = str(item)
result['origin_url'] = self.host + '/house/TartanViewjs.asp' + \
'?colr=Def&tnam=' + str(item)
# Parse pattern components
pattern = re_extract_pattern.search(data)
if pattern:
pattern = json.loads('[' + utils.cleanup(pattern.group(1)) + ']')
result['threadcount'] = normalize_threadcount(
pattern[0],
# P - Pivoting, R - Repeating, W - ? (but also repeat)
pattern[2] == 'P')
result['palette'] = normalize_palette(pattern[1])
return [result]
| 28.833333 | 79 | 0.495059 | 550 | 5,363 | 4.738182 | 0.28 | 0.020721 | 0.004605 | 0.023024 | 0.17383 | 0.13891 | 0.056792 | 0.033001 | 0 | 0 | 0 | 0.009259 | 0.315309 | 5,363 | 185 | 80 | 28.989189 | 0.700436 | 0.018833 | 0 | 0.102041 | 0 | 0.006803 | 0.211337 | 0.039186 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034014 | false | 0 | 0.027211 | 0 | 0.14966 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cd386ab342045a23e42596447462146553393d0 | 7,543 | py | Python | valet/utils/core/communicate.py | sadmicrowave/valet | 39724c3f2a49b2253e89044af2b103e3e89d4cd8 | [
"MIT"
] | null | null | null | valet/utils/core/communicate.py | sadmicrowave/valet | 39724c3f2a49b2253e89044af2b103e3e89d4cd8 | [
"MIT"
] | null | null | null | valet/utils/core/communicate.py | sadmicrowave/valet | 39724c3f2a49b2253e89044af2b103e3e89d4cd8 | [
"MIT"
] | null | null | null | ###########################################################################
#
## @file communicate.py
#
###########################################################################
import os, sys, textwrap, subprocess, queue
from . import communicate_pb2_grpc
#from collections.abc import Iterable
from ..canonical.colors import Colors
from ..io.speech.speak import Speak
from ..io.speech.fragments.greeting import Greeting
from ..io.speech.fragments.salutation import Salutation
class Communicate :
"""###########################################################################
A wrapper for Valet to communicate with written or vocalized speech
providing one callable class which handles the delegation of output
context through cli and other i/o registered devices
###########################################################################
"""
#######################################################################
#
## Class level variables
#
#######################################################################
formatted_output = None
listen = True
colors = Colors
###################################################################
#
## Set the parts of speech modules accessible to Valet, allowing
# accessors from the main valet.py to import Speech without
# directly importing all parts of speech
#
###################################################################
greeting = Greeting ( )
salutation = Salutation ( )
def __init__ ( self, valet=None, grpcserver=None, communicationservicer=None, ) :
"""#######################################################################
Initialize communication manager to handle and interact with
calls for output and inputs.
@valet / instance of valet itself to interact with main object
#######################################################################
"""
_ = self.__configure ( valet, grpcserver, )
def say ( self, *args, ) -> bool :
"""#######################################################################
Say provides an additional wrapper to invoke both cli write &
vocalized speech with .vocalize - only if vocalization is enabled
within the config file main. Processes list of strings to output
or single string (which gets put into a list)
*args / str|list > string or list of values to print as output
/ from Valet - passed as multiple arguments or single arg
/ list
#######################################################################
"""
for arg in args or ( ) :
item = arg
# Make item into a Queue type if not already
if not isinstance ( item, Queue, ) :
item = queue.Queue ( )
item.put ( arg )
while not item.empty ( ) :
_current = item.get_nowait ( )
if self.speak.enabled :
self.speak.vocalize ( _current, )
self.__write ( _current, )
return True
def __write ( self, output, _console=None, ) -> bool :
"""#######################################################################
Write the provided output to the provided / configured device
output - this is a print wrapper
@output / str > string value to print as output from Valet
Returns / bool > signifying completion
#######################################################################
"""
( _console or self.console ).fill (
'%s %s%s' % ( self.colors.OKBLUE, output, self.colors.ENDC )
)
return True
def __configure ( self, valet=None, grpcserver=None, communicationservicer=None, ) -> bool :
"""#######################################################################
Perform any setup needed for the class to do its job.
@valet / instance of valet itself to interact with main object
@grpcserver / grpc instance > protobuf server instance for thread management
Returns / bool > signifying completion
#######################################################################
"""
self.valet = valet
if grpcserver :
self.communicationservicer = communicationservicer
communicate_pb2_grpc.add_CommunicateServicer_to_server ( self, grpcserver, )
# super ( Communicate, self, ).__init__ ( )
# self.target = self.say
# self.daemon = daemon
# self.name = self.__class__.__name__
#self.pipeline = queue.Queue ( maxsize= int ( valet.ini.pool['valet']['main']['concurrency_slots'] ), )
self.name = valet.ini.pool['valet']['attributes']['name']
###################################################################
#
## Setup the speech engine to lookup phrases and speech partials
# to return during speech / writing through the communicate
# property
#
###################################################################
#self.speak = Speak ( valet, )
###################################################################
#
## Setup the console properties for the cli and written i/o
#
###################################################################
rows, columns = subprocess.check_output ( ['stty', 'size'] ).split ( )
self.console = textwrap.TextWrapper ( initial_indent = '%s> @%s ⇾' % ( self.colors.OKBLUE, self.name ),
width = int ( columns ),
replace_whitespace = False,
)
return True
# def say ( self, ) :
# """#######################################################################
#
# Say provides an additional wrapper to invoke both cli write &
# vocalized speech with .vocalize - only if vocalization is enabled
# within the config file main. Processes list of strings to output
# or single string (which gets put into a list)
#
# #######################################################################
# """
#
# while True :
#
# try :
#
# # Extract the next item from the communication pipeline
# corpus = self.pipeline.get ( timeout=1, )
#
# if corpus :
#
# self.valet.log.debug ( 'Communicate Corpus Received : %s' % corpus, )
#
# self.__saymapper ( corpus, )
# corpus.task_done ( )
#
#
# except queue.Empty :
#
# continue
#
#
#
# def _saymapper ( self, corpus, ) -> bool :
# """#######################################################################
#
# Iterate over items in communication pipeline item if iterable, and process
# each item as a standalone communication event
#
# @output / str > string value to print as output from Valet
#
# Returns / bool > signifying completion
#
# #######################################################################
# """
#
# if isinstance ( corpus, Iterable ) and not isinstance ( corpus, str ) :
#
# for _corpus in corpus :
#
# self.__saymapper ( _corpus, )
#
# else :
#
# ###################################################################
# #
# ## Write to the stdout window unless otherwise expressed
# #
# ###################################################################
# self._write ( corpus, )
#
# ###################################################################
# #
# ## Only send to speech module to vocalize the corpus if the speak
# # service/functionality is enabled
# #
# ###################################################################
# if self.speak.enabled :
#
# self.speak.vocalize ( corpus, )
#
#
# return True
| 29.464844 | 108 | 0.463343 | 652 | 7,543 | 5.282209 | 0.338957 | 0.013066 | 0.010453 | 0.013066 | 0.24158 | 0.24158 | 0.234611 | 0.184088 | 0.184088 | 0.184088 | 0 | 0.000502 | 0.207477 | 7,543 | 255 | 109 | 29.580392 | 0.575443 | 0.552698 | 0 | 0.069767 | 0 | 0 | 0.021586 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093023 | false | 0 | 0.139535 | 0 | 0.44186 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cd432537cf723bb57e727d757ab1c0b4f8e3c03 | 3,335 | py | Python | app/routes/dog_routes.py | Kbhlee2121/flasky | 1ddf91cb5cfcc17aa082cfee0b28e30896adfea8 | [
"MIT"
] | null | null | null | app/routes/dog_routes.py | Kbhlee2121/flasky | 1ddf91cb5cfcc17aa082cfee0b28e30896adfea8 | [
"MIT"
] | null | null | null | app/routes/dog_routes.py | Kbhlee2121/flasky | 1ddf91cb5cfcc17aa082cfee0b28e30896adfea8 | [
"MIT"
] | null | null | null | from app import db
from app.models.dog import Dog
from flask import Blueprint, jsonify, make_response, request, abort
import random
dog_bp = Blueprint("dog", __name__, url_prefix="/dogs")
# Helper Functions
def valid_int(number, parameter_type):
try:
number = int(number)
except:
# abort(make_response({"error": "parameter_type must be an int"}, 400))
abort(400, {"error": f"{parameter_type} must be an int"})
def get_dog_from_id(dog_id):
valid_int(dog_id, "dog_id")
# .get_or_404 is an alternative to using if dog is None
return Dog.query.get_or_404(dog_id, description ="{dog not found}")
@dog_bp.route("/<dog_id>/add_chip", methods=["PATCH"])
def add_chip_to_dog(dog_id):
dog = get_dog_from_id(dog_id)
chip = str(random.randint(1000,9999))
dog.chip = chip
db.session.commit()
return (dog.to_dict())
# Create dog and get all dogs
@dog_bp.route("", methods=["GET"])
def read_all_dogs():
age_query = request.args.get("age")
older_query = request.args.get("older")
sort_query = request.args.get("sort")
# Multiple params: dogs?age=2&sort=asc
dogs = Dog.query
if age_query:
valid_int(age_query, "age")
dogs = Dog.query.filter_by(age=age_query)
# query: dogs?older=2
elif older_query:
valid_int(older_query, "older")
dogs = dogs.filter(Dog.age > older_query)
# dogs?sort=asc
if sort_query == "asc":
dogs = dogs.order_by(Dog.age.asc())
elif sort_query == "desc":
dogs = dogs.order_by(Dog.age.desc())
else:
dogs = Dog.query.all()
dogs_response = []
for dog in dogs:
dogs_response.append(
dog.to_dict()
)
# return jsonify(dogs_response)
return jsonify(dogs_response)
@dog_bp.route("", methods =["POST"])
def create_dog():
request_body = request.get_json()
# request method takes the JSON in the HTTP request and gives us a python dict
if "name" not in request_body or "breed" not in request_body or "age" not in request_body:
return {"error": "incomplete request body"}, 400
new_dog = Dog (
name = request_body["name"],
breed = request_body["breed"],
age = request_body["age"]
)
db.session.add(new_dog)
db.session.commit()
return make_response(new_dog.to_dict(), 201)
# Routes
@dog_bp.route("/<dog_id>/formalize", methods=["PATCH"])
def formalize_dog(dog_id):
dog = get_dog_from_id(dog_id)
dog.name = f"Mx. {dog.name}"
# db.session.commit()
return jsonify(f"Why hello, {dog.name}!")
@dog_bp.route("/<dog_id>", methods=["GET"])
def get_dog(dog_id):
dog = get_dog_from_id(dog_id)
return dog.to_dict()
@dog_bp.route("/<dog_id>", methods=["PATCH"])
def update_dog(dog_id):
dog = get_dog_from_id(dog_id)
request_body = request.get_json()
if "name" in request_body:
dog.name = request_body["name"]
if "breed" in request_body:
dog.breed = request_body["breed"]
if "age" in request_body:
dog.age = request_body["age"]
db.session.commit()
return jsonify(dog.to_dict())
@dog_bp.route("/<dog_id>", methods =["DELETE"])
def delete_dog(dog_id):
dog = get_dog_from_id(dog_id)
db.session.delete(dog)
db.session.commit()
return jsonify(dog.to_dict(), 204)
| 27.791667 | 94 | 0.647376 | 504 | 3,335 | 4.049603 | 0.210317 | 0.046546 | 0.024008 | 0.035277 | 0.320431 | 0.215091 | 0.130818 | 0.130818 | 0.098971 | 0.068594 | 0 | 0.011796 | 0.211994 | 3,335 | 119 | 95 | 28.02521 | 0.76484 | 0.111844 | 0 | 0.130952 | 0 | 0 | 0.101356 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.047619 | 0 | 0.261905 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cd7906a50836e3522d33376e9729ff7ca341359 | 403 | py | Python | sorting/bubble_sort.py | rpg711/Interview-Prep | 2d12a11738d4c709bc593dcfdbf54d9f92141d61 | [
"MIT"
] | null | null | null | sorting/bubble_sort.py | rpg711/Interview-Prep | 2d12a11738d4c709bc593dcfdbf54d9f92141d61 | [
"MIT"
] | null | null | null | sorting/bubble_sort.py | rpg711/Interview-Prep | 2d12a11738d4c709bc593dcfdbf54d9f92141d61 | [
"MIT"
] | null | null | null | def bubble_sort(A, compare_func):
if len(A) <= 1:
return
swapoccurred = True
while swapoccurred:
swapoccurred = False
for i in range(len(A) - 1):
if compare_func(A[i], A[i+1]):
A[i], A[i+1] = A[i+1], A[i]
swapoccurred = True
if __name__ == '__main__':
A = [5,1,4,2,8]
bubble_sort(A, lambda a,b: a < b)
print(A) | 26.866667 | 43 | 0.503722 | 63 | 403 | 3.031746 | 0.428571 | 0.062827 | 0.04712 | 0.062827 | 0.078534 | 0.062827 | 0.062827 | 0 | 0 | 0 | 0 | 0.037879 | 0.344913 | 403 | 15 | 44 | 26.866667 | 0.685606 | 0 | 0 | 0.142857 | 0 | 0 | 0.019802 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.142857 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cd861724dfed64644d3ee4472a173f378bc5c7a | 16,863 | py | Python | goblin/session.py | brean/goblin | c61177de7f0e59661fbe74531ea4cb58558a1f31 | [
"Apache-2.0"
] | null | null | null | goblin/session.py | brean/goblin | c61177de7f0e59661fbe74531ea4cb58558a1f31 | [
"Apache-2.0"
] | null | null | null | goblin/session.py | brean/goblin | c61177de7f0e59661fbe74531ea4cb58558a1f31 | [
"Apache-2.0"
] | null | null | null | """Main OGM API classes and constructors"""
import asyncio
import collections
import logging
import weakref
import aiogremlin
from aiogremlin.driver.protocol import Message
from aiogremlin.driver.resultset import ResultSet
from aiogremlin.process.graph_traversal import __
from gremlin_python.process.traversal import T
from gremlin_python.driver.remote_connection import RemoteTraversal
from gremlin_python.process.traversal import Binding, Cardinality, Traverser
from gremlin_python.structure.graph import Edge, Vertex
from goblin import exception, mapper
from goblin.element import GenericEdge, GenericVertex, VertexProperty
from goblin.manager import VertexPropertyManager
logger = logging.getLogger(__name__)
def bindprop(element_class, ogm_name, val, *, binding=None):
"""
Helper function for binding ogm properties/values to corresponding db
properties/values for traversals.
:param goblin.element.Element element_class: User defined element class
:param str ogm_name: Name of property as defined in the ogm
:param val: The property value
:param str binding: The binding for val (optional)
:returns: tuple object ('db_property_name', ('binding(if passed)', val))
"""
db_name = getattr(element_class, ogm_name, ogm_name)
_, data_type = element_class.__mapping__.ogm_properties[ogm_name]
val = data_type.to_db(val)
if binding:
val = (binding, val)
return db_name, val
class Session:
"""
Provides the main API for interacting with the database. Does not
necessarily correpsond to a database session. Don't instantiate directly,
instead use :py:meth:`Goblin.session<goblin.app.Goblin.session>`.
:param goblin.app.Goblin app:
:param aiogremlin.driver.connection.Connection conn:
"""
def __init__(self, app, remote_connection, get_hashable_id):
self._app = app
self._remote_connection = remote_connection
self._loop = self._app._loop
self._use_session = False
self._pending = collections.deque()
self._current = dict()
self._get_hashable_id = get_hashable_id
self._graph = aiogremlin.Graph()
@property
def graph(self):
return self._graph
@property
def app(self):
return self._app
@property
def remote_connection(self):
return self._remote_connection
@property
def current(self):
return self._current
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
self.close()
def close(self):
"""
"""
self._remote_connection = None
self._app = None
# Traversal API
@property
def g(self):
"""
Get a simple traversal source.
:returns:
`gremlin_python.process.GraphTraversalSource`
object
"""
return self.traversal()
@property
def _g(self):
"""
Traversal source for internal use. Uses undelying conn. Doesn't
trigger complex deserailization.
"""
return self.graph.traversal().withRemote(self.remote_connection)
def traversal(self, element_class=None):
"""
Generate a traversal using a user defined element class as a
starting point.
:param goblin.element.Element element_class: An optional element
class that will dictate the element type (vertex/edge) as well as
the label for the traversal source
:returns: `aiogremlin.process.graph_traversal.AsyncGraphTraversal`
"""
traversal = self.graph.traversal().withRemote(self)
if element_class:
label = element_class.__mapping__.label
if element_class.__type__ == 'vertex':
traversal = traversal.V()
if element_class.__type__ == 'edge':
traversal = traversal.E()
traversal = traversal.hasLabel(label)
return traversal
async def submit(self, bytecode):
"""
Submit a query to the Gremiln Server.
:param str gremlin: Gremlin script to submit to server.
:param dict bindings: A mapping of bindings for Gremlin script.
:returns:
`gremlin_python.driver.remove_connection.RemoteTraversal`
object
"""
await self.flush()
remote_traversal = await self.remote_connection.submit(bytecode)
traversers = remote_traversal.traversers
side_effects = remote_traversal.side_effects
result_set = ResultSet(traversers.request_id, traversers._timeout,
self._loop)
self._loop.create_task(self._receive(traversers, result_set))
return RemoteTraversal(result_set, side_effects)
async def _receive(self, traversers, result_set):
try:
async for result in traversers:
result = await self._deserialize_result(result)
msg = Message(200, result, '')
result_set.queue_result(msg)
except Exception as e:
msg = Message(500, None, e.args[0])
result_set.queue_result(msg)
finally:
result_set.queue_result(None)
async def _deserialize_result(self, result):
if isinstance(result, Traverser):
bulk = result.bulk
obj = result.object
if isinstance(obj, (Vertex, Edge)):
hashable_id = self._get_hashable_id(obj.id)
current = self.current.get(hashable_id, None)
if isinstance(obj, Vertex):
# why doesn't this come in on the vertex?
label = await self._g.V(obj.id).label().next()
if not current:
current = self.app.vertices.get(label, GenericVertex)()
props = await self._get_vertex_properties(obj.id, label)
if isinstance(obj, Edge):
props = await self._g.E(obj.id).valueMap(True).next()
if not current:
current = self.app.edges.get(
props.get(T.label), GenericEdge)()
current.source = GenericVertex()
current.target = GenericVertex()
element = current.__mapping__.mapper_func(obj, props, current)
self.current[hashable_id] = element
return Traverser(element, bulk)
else:
return result
# Recursive serialization is broken in goblin
elif isinstance(result, dict):
for key in result:
result[key] = self._deserialize_result(result[key])
return result
elif isinstance(result, list):
return [self._deserialize_result(item) for item in result]
else:
return result
async def _get_vertex_properties(self, vid, label):
projection = self._g.V(vid).properties() \
.project('id', 'key', 'value', 'meta') \
.by(__.id()).by(__.key()).by(__.value()) \
.by(__.valueMap())
props = await projection.toList()
new_props = {'label': label, 'id': vid}
for prop in props:
key = prop['key']
val = prop['value']
# print('val_type', type(val))
meta = prop['meta']
new_props.setdefault(key, [])
if meta:
meta['key'] = key
meta['value'] = val
meta['id'] = prop['id']
val = meta
new_props[key].append(val)
return new_props
# Creation API
def add(self, *elements):
"""
Add elements to session pending queue.
:param goblin.element.Element elements: Elements to be added
"""
for elem in elements:
self._pending.append(elem)
async def flush(self):
"""
Issue creation/update queries to database for all elements in the
session pending queue.
"""
while self._pending:
elem = self._pending.popleft()
await self.save(elem)
async def remove_vertex(self, vertex):
"""
Remove a vertex from the db.
:param goblin.element.Vertex vertex: Vertex to be removed
"""
traversal = self._g.V(Binding('vid', vertex.id)).drop()
result = await self._simple_traversal(traversal, vertex)
hashable_id = self._get_hashable_id(vertex.id)
if hashable_id in self.current:
vertex = self.current.pop(hashable_id)
else:
msg = 'Vertex {} does not belong to this session obj {}'.format(
vertex, self)
logger.warning(msg)
del vertex
return result
async def remove_edge(self, edge):
"""
Remove an edge from the db.
:param goblin.element.Edge edge: Element to be removed
"""
eid = edge.id
if isinstance(eid, dict):
eid = Binding('eid', edge.id)
traversal = self._g.E(eid).drop()
result = await self._simple_traversal(traversal, edge)
hashable_id = self._get_hashable_id(edge.id)
if hashable_id in self.current:
edge = self.current.pop(hashable_id)
else:
msg = 'Edge {} does not belong to this session obj {}'.format(
edge, self)
logger.warning(msg)
del edge
return result
async def save(self, elem):
"""
Save an element to the db.
:param goblin.element.Element element: Vertex or Edge to be saved
:returns: :py:class:`Element<goblin.element.Element>` object
"""
if elem.__type__ == 'vertex':
result = await self.save_vertex(elem)
elif elem.__type__ == 'edge':
result = await self.save_edge(elem)
else:
raise exception.ElementError("Unknown element type: {}".format(
elem.__type__))
return result
async def save_vertex(self, vertex):
"""
Save a vertex to the db.
:param goblin.element.Vertex element: Vertex to be saved
:returns: :py:class:`Vertex<goblin.element.Vertex>` object
"""
result = await self._save_element(
vertex, self._check_vertex, self._add_vertex, self._update_vertex)
hashable_id = self._get_hashable_id(result.id)
self.current[hashable_id] = result
return result
async def save_edge(self, edge):
"""
Save an edge to the db.
:param goblin.element.Edge element: Edge to be saved
:returns: :py:class:`Edge<goblin.element.Edge>` object
"""
if not (hasattr(edge, 'source') and hasattr(edge, 'target')):
raise exception.ElementError(
"Edges require both source/target vertices")
result = await self._save_element(edge, self._check_edge,
self._add_edge, self._update_edge)
hashable_id = self._get_hashable_id(result.id)
self.current[hashable_id] = result
return result
async def get_vertex(self, vertex):
"""
Get a vertex from the db. Vertex must have id.
:param goblin.element.Vertex element: Vertex to be retrieved
:returns: :py:class:`Vertex<goblin.element.Vertex>` | None
"""
return await self.g.V(Binding('vid', vertex.id)).next()
async def get_edge(self, edge):
"""
Get a edge from the db. Edge must have id.
:param goblin.element.Edge element: Edge to be retrieved
:returns: :py:class:`Edge<goblin.element.Edge>` | None
"""
eid = edge.id
if isinstance(eid, dict):
eid = Binding('eid', edge.id)
return await self.g.E(eid).next()
async def _update_vertex(self, vertex):
"""
Update a vertex, generally to change/remove property values.
:param goblin.element.Vertex vertex: Vertex to be updated
:returns: :py:class:`Vertex<goblin.element.Vertex>` object
"""
props = mapper.map_props_to_db(vertex, vertex.__mapping__)
traversal = self._g.V(Binding('vid', vertex.id))
return await self._update_vertex_properties(vertex, traversal, props)
async def _update_edge(self, edge):
"""
Update an edge, generally to change/remove property values.
:param goblin.element.Edge edge: Edge to be updated
:returns: :py:class:`Edge<goblin.element.Edge>` object
"""
props = mapper.map_props_to_db(edge, edge.__mapping__)
eid = edge.id
if isinstance(eid, dict):
eid = Binding('eid', edge.id)
traversal = self._g.E(eid)
return await self._update_edge_properties(edge, traversal, props)
# *metodos especiales privados for creation API
async def _simple_traversal(self, traversal, element):
elem = await traversal.next()
if elem:
if element.__type__ == 'vertex':
# Look into this
label = await self._g.V(elem.id).label().next()
props = await self._get_vertex_properties(elem.id, label)
elif element.__type__ == 'edge':
props = await self._g.E(elem.id).valueMap(True).next()
elem = element.__mapping__.mapper_func(elem, props, element)
return elem
async def _save_element(self, elem, check_func, create_func, update_func):
if hasattr(elem, 'id'):
exists = await check_func(elem)
if not exists:
result = await create_func(elem)
else:
result = await update_func(elem)
else:
result = await create_func(elem)
return result
async def _add_vertex(self, vertex):
"""Convenience function for generating crud traversals."""
props = mapper.map_props_to_db(vertex, vertex.__mapping__)
traversal = self._g.addV(vertex.__mapping__.label)
return await self._add_properties(traversal, props, vertex)
async def _add_edge(self, edge):
"""Convenience function for generating crud traversals."""
props = mapper.map_props_to_db(edge, edge.__mapping__)
traversal = self._g.V(Binding('sid', edge.source.id))
traversal = traversal.addE(edge.__mapping__._label)
traversal = traversal.to(__.V(Binding('tid', edge.target.id)))
return await self._add_properties(traversal, props, edge)
async def _check_vertex(self, vertex):
"""Used to check for existence, does not update session vertex"""
msg = await self._g.V(Binding('vid', vertex.id)).next()
return msg
async def _check_edge(self, edge):
"""Used to check for existence, does not update session edge"""
eid = edge.id
if isinstance(eid, dict):
eid = Binding('eid', edge.id)
return await self._g.E(eid).next()
async def _update_vertex_properties(self, vertex, traversal, props):
await self._g.V(vertex.id).properties().drop().iterate()
return await self._add_properties(traversal, props, vertex)
async def _update_edge_properties(self, edge, traversal, props):
await self._g.E(edge.id).properties().drop().iterate()
return await self._add_properties(traversal, props, edge)
async def _add_properties(self, traversal, props, elem):
binding = 0
for card, db_name, val, metaprops in props:
if not metaprops:
metaprops = {}
if val is not None:
key = ('k' + str(binding), db_name)
val = ('v' + str(binding), val)
if card:
# Maybe use a dict here as a translator
if card == Cardinality.list_:
card = Cardinality.list_
elif card == Cardinality.set_:
card = Cardinality.set_
else:
card = Cardinality.single
metas = [
j
for i in zip(metaprops.keys(), metaprops.values())
for j in i
]
traversal = traversal.property(card, key, val, *metas)
else:
metas = [
j
for i in zip(metaprops.keys(), metaprops.values())
for j in i
]
traversal = traversal.property(key, val, *metas)
binding += 1
return await self._simple_traversal(traversal, elem)
| 36.032051 | 79 | 0.592896 | 1,921 | 16,863 | 5.022384 | 0.141593 | 0.027052 | 0.022388 | 0.014511 | 0.350228 | 0.298404 | 0.250933 | 0.201907 | 0.148839 | 0.11619 | 0 | 0.000777 | 0.313349 | 16,863 | 467 | 80 | 36.109208 | 0.832455 | 0.094289 | 0 | 0.221453 | 0 | 0 | 0.022 | 0 | 0 | 0 | 0 | 0.002141 | 0 | 1 | 0.038062 | false | 0 | 0.051903 | 0.013841 | 0.211073 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cd8d4430b71d1605ae8e8e1181821f85d3d6750 | 8,108 | py | Python | routes/fa12_route.py | hicetnunc2000/- | 11810bb682a5800b5ca079a4e803519297c9db55 | [
"MIT"
] | 11 | 2020-10-27T16:36:07.000Z | 2021-07-11T15:38:03.000Z | routes/fa12_route.py | hicetnunc2000/tezos-storage-flask | 11810bb682a5800b5ca079a4e803519297c9db55 | [
"MIT"
] | 2 | 2020-11-19T20:28:28.000Z | 2020-12-08T20:32:43.000Z | routes/fa12_route.py | hicetnunc2000/tezos-storage-flask | 11810bb682a5800b5ca079a4e803519297c9db55 | [
"MIT"
] | 5 | 2021-03-06T02:06:39.000Z | 2021-11-12T21:53:32.000Z | # @crzypatchwork
from flask import Blueprint, request, session
from pytezos import Contract
from pytezos import pytezos
from pytezos.operation.result import OperationResult
from flask import Flask
from flask_restx import fields, Resource, Api, Namespace
from controllers.validate import Validate
import distutils.util
import requests
import urllib
import json
pytezos = pytezos
OperationResult = OperationResult
v = Validate()
api = Namespace('fa12', description='publish and other entrypoints')
@api.route('/publish')
@api.doc(params={
'forge': 'boolean',
'admin': 'admin tz address',
'total_supply': 'total supply of tokens'
})
class publish_fa12(Resource):
def post(self):
payload = v.read_requests(request)
try:
sess = v.read_session(session)
except:
pytz = v.load_keystore()
if payload['forge'] == True:
pass
contract = Contract.from_file('./smart_contracts/fa12.tz')
op = pytz.origination(script=contract.script(storage={'ledger': {
}, 'admin': payload['admin'], 'paused': False, 'totalSupply': payload['total_supply']})).autofill().sign().inject(_async=False, num_blocks_wait=2)
return OperationResult.originated_contracts(op)
@api.route('/transfer')
@api.doc(params={
'forge': 'boolean',
'contract': 'fa12 contract address',
'from': 'public key hash',
'to': 'public key hash',
'value': 'amount (nat)'
})
class transfer_fa12(Resource):
def post(self):
try:
payload = v.read_requests(request)
sess = v.read_session(session)
# if sess is False:
# pytz = v.load_keystore()
# if payload['forge'] == True:
# pass pytz.contract(payload['contract'])
r = ci.transfer({'from': payload['from'], "to": payload['to'], "value": int(
payload['value'])}).inject()
return r
except:
return 500
@api.route('/approve')
@api.doc(params={
'forge': 'boolean',
'contract': 'fa12 KT contract address',
'spender': 'tz address',
'value': 'value (nat)'
})
class approve_fa12(Resource):
def post(self):
try:
payload = v.read_requests(request)
sess = v.read_session(session)
if sess is False:
pytz = v.load_keystore()
#ci =
if payload['forge'] == True:
pass
pytz.contract(payload['contract'])
r = ci.approve(
{"spender": payload['spender'], "value": int(payload['value'])}).inject()
return r
except:
return 500
@api.route('/get_allowance')
@api.doc(params={
'forge': 'boolean',
'contract': 'fa12 KT contract address',
'owner': 'tz address',
'spender': 'tz address'
# 'contract_2': 'response view KT address'
})
class get_allowance_fa12(Resource):
def post(self):
payload = v.read_requests(request)
sess = v.read_session(session)
if sess is False:
pytz = v.load_keystore()
if payload['forge'] == True:
pass
ci = pytz.contract(payload['contract'])
j = {}
j['owner'] = payload['owner']
j['approvals']['spender'] = ci.big_map_get(
payload['owner']['approvals'][payload['spender']])
return j
# Maintance View
@api.route("/get_balance")
@api.doc(params={
'forge': 'boolean',
'contract': 'fa12 KT contract address',
'owner': 'tz address'
# 'contract_1': 'callback KT address'
})
# "contract_1": $contract (nat)
class get_balance_fa12(Resource):
def post(self):
try:
payload = v.read_requests(request)
sess = v.read_session(session)
if sess is False:
pytz = v.load_keystore()
if payload['forge'] == True:
pass
ci = pytz.contract(payload['contract'])
j = {}
j['owner'] = payload['owner']
j['balance'] = ci.big_map_get(payload['owner'])['balance']
return j
except:
return 500
# Maintence View
@api.route('/get_total_supply')
@api.doc(params={
'forge': 'boolean',
'contract': 'fa12 KT contract address'
})
class get_total_supply_fa12(Resource):
def post(self):
try:
payload = v.read_requests(request)
sess = v.read_session(session)
if sess is False:
pytz = v.load_keystore()
if payload['forge'] == True:
pass
ci = pytz.contract(payload['contract'])
r = {}
aux = ci.storage()
r['total_supply'] = aux['totalSupply']
return r
except:
return 500
@api.route('/set_pause')
@api.doc(params={
'forge': 'boolean',
"contract": "fa12 KT contract address",
"bool": "boolean True or False"
})
class set_pause_fa12(Resource):
def post(self):
try:
payload = v.read_requests(request)
sess = v.read_session(session)
if sess is False:
pytz = v.load_keystore()
if payload['forge'] == True:
pass
ci = pytz.contract(payload['contract'])
r = ci.setPause(
bool(distutils.util.strtobool(payload['bool']))).inject()
return r
except:
return 500
@api.route('/set_administrator')
@api.doc(params={
'forge': 'boolean',
'contract': 'fa12 KT contract address',
'adm': 'tz address'
})
class set_administrator_fa12(Resource):
def post(self):
try:
payload = v.read_requests(request)
sess = v.read_session(session)
if sess is False:
pytz = v.load_keystore()
if payload['forge'] == True:
pass
ci = pytz.contract(payload['contract'])
r = ci.setAdministrator(payload['adm']).inject()
return r
except:
return 500
# Maintence
# Get adm
@api.route('/get_administrator')
@api.doc(params={
'forge': 'boolean',
'kt': 'fa12 KT contract address'
})
class get_administrator_fa12(Resource):
def post(self):
try:
payload = v.read_requests(request)
sess = v.read_session(session)
if sess is False:
pytz = v.load_keystore()
if payload['forge'] == True:
pass
ci = pytz.contract(payload['contract'])
r = {}
aux = ci.storage()
r['admin'] = aux['admin']
return r
except:
return 500
@api.route('/mint')
@api.doc(params={
'forge': 'boolean',
'contract': 'fa12 kt address',
'to': 'tz address destination',
'value': 'nat'
})
class mint_fa12(Resource):
def post(self):
try:
payload = v.read_requests(request)
sess = v.read_session(session)
if sess is False:
pytz = v.load_keystore()
prin
if payload['forge'] == True:
passt(payload)
ci = pytz.contract(payload['contract'])
r = ci.mint(
{"to": payload['to'], "value": int(payload['value'])}).inject()
return r
except:
return 500
@api.route('/burn')
@api.doc(params={
'forge': 'boolean',
'contract': 'fa12 kt address',
'from': 'tz address',
'value': 'nat'
})
class burn_fa12(Resource):
def post(self):
try:
payload = v.read_requests(request)
sess = v.read_session(session)
if sess is False:
pytz = v.load_keystore()
if payload['forge'] == True:
pass
ci = pytz.contract(payload['contract'])
r = ci.burn(
{"from": payload['from'], "value": int(payload['value'])}).inject()
return r
except:
return 500
| 24.947692 | 154 | 0.540824 | 872 | 8,108 | 4.942661 | 0.142202 | 0.025522 | 0.030626 | 0.043387 | 0.640835 | 0.62993 | 0.592575 | 0.569838 | 0.553132 | 0.523202 | 0 | 0.01402 | 0.322644 | 8,108 | 324 | 155 | 25.024691 | 0.770757 | 0.03663 | 0 | 0.681633 | 0 | 0 | 0.160062 | 0.003206 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044898 | false | 0.040816 | 0.044898 | 0 | 0.216327 | 0.004082 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cd9f0184447f45b3373feb9a260b5d46e898c01 | 7,760 | py | Python | tests/test_annotate.py | BookLaugh/serialization | a3ff87aa2cd5b3322daee7ebee1e025783438b46 | [
"MIT"
] | 12 | 2016-03-01T15:04:08.000Z | 2020-11-23T14:49:32.000Z | tests/test_annotate.py | BookLaugh/serialization | a3ff87aa2cd5b3322daee7ebee1e025783438b46 | [
"MIT"
] | 1 | 2020-11-23T15:31:24.000Z | 2020-11-23T19:46:50.000Z | tests/test_annotate.py | BookLaugh/serialization | a3ff87aa2cd5b3322daee7ebee1e025783438b46 | [
"MIT"
] | 2 | 2020-11-23T14:45:38.000Z | 2020-11-23T17:03:48.000Z | # F3AT - Flumotion Asynchronous Autonomous Agent Toolkit
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# See "LICENSE.GPL" in the source distribution for more information.
# Headers in this file shall remain intact.
# -*- coding: utf-8 -*-
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
from __future__ import absolute_import
from future.utils import PY3
from six import iterkeys
from serialization import annotate
class GoodDummy(annotate.Annotable):
'''Reference for the following test.'''
annotate.injectClassCallback("dummy", 2, "good_method")
@classmethod
def good_method(cls):
pass
def accompany(accompaniment):
'''Method decorator'''
def decorator(method):
def get_accompaniment(self, *args, **kwargs):
#Create a method for the accompaniment
return self.name + " wants " + accompaniment
# Inject the new method in the class
annotate.injectAttribute("accompany", 3,
accompaniment, get_accompaniment)
# Inject the original method with a new name
annotate.injectAttribute("accompany", 3,
"original_" + method.__name__, method)
def wrapper(self, *args, **kwargs):
#Wrapp a method call and add an accompaniment to its result
result = method(self, *args, **kwargs)
return result + " and " + accompaniment
# Call the class to register the decorator
annotate.injectClassCallback("accompany", 3, "_decorator",
accompaniment, method, wrapper)
return wrapper
return decorator
def shop(animal, status):
'''Class annotation. Create a getter method'''
def getter(self):
return self.name + " " + animal + " is " + status
annotate.injectAttribute("shop", 3, "get_" + animal, getter)
return status
class Annotated(annotate.Annotable):
class_init = False
obj_init = False
accompaniments = {}
# Annotations
shop("parrot", "dead")
shop("slug", "mute")
@classmethod
def __class__init__(cls, name, bases, dct):
cls.class_init = True
@classmethod
def _decorator(cls, accompaniment, old, new):
cls.accompaniments[accompaniment] = (old, new)
def __init__(self, name):
self.obj_init = True
self.name = name
@accompany("beans")
def spam(self, kind):
return self.name + " like " + kind + " spam"
@accompany("eggs")
def bacon(self, kind):
return self.name + " like " + kind + " bacon"
try:
bad_annotation_method_fail = False
class BadDummy(annotate.Annotable):
'''Reference for the following test.'''
annotate.injectClassCallback("dummy", 2, "wrong_method")
@classmethod
def good_method(cls):
pass
except annotate.AnnotationError:
bad_annotation_method_fail = True
def mixin(fun):
annotate.injectClassCallback("mixin", 3, "_register", fun)
return fun
class MixinTestBase(annotate.Annotable):
values = None
@classmethod
def __class__init__(cls, name, bases, dct):
values = dict()
for base in [cls] + list(bases):
parent_values = getattr(base, "values", None)
if parent_values:
values.update(parent_values)
cls.values = values
@classmethod
def _register(cls, value):
if cls.values is None:
cls.values = dict()
assert value not in cls.values, "Values are: %r" % (cls.values, )
cls.values[value] = cls
@mixin
def first_annotation(self):
pass
class MixinTestMixin(object):
@mixin
def mixin_annotation(self):
pass
@mixin
def overloaded_annotation(self):
'''this is to test overloading annotated methods'''
class MixinTestDummy(MixinTestBase, MixinTestMixin):
@mixin
def second_annotation(self):
pass
@mixin
def overloaded_annotation(self):
'''this is to test overloading annotated methods'''
class TestAnnotation(object):
def test_mix_in(self):
assert (
set(iterkeys(MixinTestBase.values)) ==
set([_get_func(MixinTestBase.first_annotation)])
)
assert (
set(iterkeys(MixinTestDummy.values)) ==
set([
_get_func(MixinTestBase.first_annotation),
_get_func(MixinTestDummy.second_annotation),
_get_func(MixinTestDummy.overloaded_annotation),
_get_func(MixinTestMixin.mixin_annotation),
_get_func(MixinTestMixin.overloaded_annotation),
])
)
# now check that the _register call has been done with correct cls
# as the parameter
assert not hasattr(MixinTestMixin, "values")
def get_cls(fun):
return MixinTestDummy.values.get(_get_func(fun))
assert (
MixinTestBase ==
get_cls(MixinTestBase.first_annotation))
assert (
MixinTestDummy ==
get_cls(MixinTestDummy.second_annotation))
assert (
MixinTestDummy ==
get_cls(MixinTestDummy.overloaded_annotation))
assert (
MixinTestDummy ==
get_cls(MixinTestMixin.overloaded_annotation))
assert (
MixinTestDummy ==
get_cls(MixinTestMixin.mixin_annotation))
def testMetaErrors(self):
assert bad_annotation_method_fail
def testInitialization(self):
assert Annotated.class_init
assert not Annotated.obj_init
obj = Annotated("Monthy")
assert obj.class_init
assert obj.obj_init
def testAnnotations(self):
assert hasattr(Annotated, "get_parrot")
assert hasattr(Annotated, "get_slug")
obj = Annotated("Monthy")
assert hasattr(obj, "get_parrot")
assert hasattr(obj, "get_slug")
assert "Monthy parrot is dead" == obj.get_parrot()
assert "Monthy slug is mute" == obj.get_slug()
def testDecorator(self):
assert hasattr(Annotated, "spam")
assert hasattr(Annotated, "bacon")
assert hasattr(Annotated, "original_spam")
assert hasattr(Annotated, "original_bacon")
assert hasattr(Annotated, "beans")
assert hasattr(Annotated, "eggs")
assert "beans" in Annotated.accompaniments
assert "eggs" in Annotated.accompaniments
obj = Annotated("Monthy")
assert "Monthy like a lot of spam and beans" == obj.spam("a lot of")
assert "Monthy like so much bacon and eggs" == obj.bacon("so much")
assert "Monthy like a lot of spam" == obj.original_spam("a lot of")
assert "Monthy like so much bacon" == obj.original_bacon("so much")
assert "Monthy wants beans" == obj.beans()
assert "Monthy wants eggs" == obj.eggs()
def _get_func(f):
if PY3:
return f
else:
return f.__func__
| 28.955224 | 76 | 0.633763 | 872 | 7,760 | 5.511468 | 0.269495 | 0.02705 | 0.036621 | 0.027466 | 0.229713 | 0.217228 | 0.184769 | 0.102788 | 0.086975 | 0.086975 | 0 | 0.006039 | 0.274485 | 7,760 | 267 | 77 | 29.06367 | 0.847602 | 0.194072 | 0 | 0.231707 | 0 | 0 | 0.084907 | 0 | 0 | 0 | 0 | 0 | 0.207317 | 1 | 0.170732 | false | 0.030488 | 0.02439 | 0.030488 | 0.335366 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cdfe4e21850718998221c09a4f2c81fea03da87 | 2,138 | py | Python | brew_thermometer/aws_iot_reporter.py | ksletmoe/brew_thermometer | 740cee8e237d401ae0f5d97bd1f4916969365b6b | [
"BSD-3-Clause"
] | null | null | null | brew_thermometer/aws_iot_reporter.py | ksletmoe/brew_thermometer | 740cee8e237d401ae0f5d97bd1f4916969365b6b | [
"BSD-3-Clause"
] | null | null | null | brew_thermometer/aws_iot_reporter.py | ksletmoe/brew_thermometer | 740cee8e237d401ae0f5d97bd1f4916969365b6b | [
"BSD-3-Clause"
] | null | null | null | from paho.mqtt import client as mqtt
import ssl
from brew_thermometer.errors import ReporterError
import json
class AwsIotReporter:
def __init__(self, config_hash, logger):
self._logger = logger.getChild("AwsIotReporter")
self._broker_host = config_hash["host"]
self._broker_port = config_hash["port"]
self._topic = config_hash["topic_name"]
self._ca_cert_path = config_hash["certificate_authority_cert_file_path"]
self._cert_file_path = config_hash["cert_file_path"]
self._private_key_path = config_hash["private_key_path"]
self._client = mqtt.Client()
self._client.on_connect = self._on_connect
self._client.on_disconnect = self._on_disconnect
self._connected = False
def publish_payload(self, payload_hash):
payload_str = json.dumps(payload_hash)
self._ensure_connection()
res, _ = self._client.publish(self._topic, payload=payload_str, qos=1)
if res == mqtt.MQTT_ERR_SUCCESS:
return True
else:
self._logger.error("Error publishing message '%s': %s", payload_str, str(res))
return False
def _ensure_connection(self):
if self._connected is False:
self._client.tls_set(self._ca_cert_path, certfile=self._cert_file_path, keyfile=self._private_key_path,
cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None)
self._client.connect(self._broker_host, self._broker_port, keepalive=60)
self._client.loop_start()
self._connected = True
def _on_connect(self, client, userdata, flags, rc):
if rc != 0:
raise ReporterError("Error connecting to AWS IOT service: {0}".format(str(rc)))
else:
self._logger.info("Connected: %s", str(rc))
def _on_disconnect(self, client, userdata, rc):
if rc != 0: # unexpected disconnect
self._logger.error("Unexpected AWS IOT service disconnect: %s -- Reconnecting...", str(rc))
else:
self._logger.info("Disconnected: %s", str(rc))
| 40.339623 | 115 | 0.658559 | 268 | 2,138 | 4.902985 | 0.335821 | 0.068493 | 0.03653 | 0.027397 | 0.035008 | 0.035008 | 0 | 0 | 0 | 0 | 0 | 0.004917 | 0.239008 | 2,138 | 52 | 116 | 41.115385 | 0.802704 | 0.009822 | 0 | 0.116279 | 0 | 0 | 0.12299 | 0.017029 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116279 | false | 0 | 0.093023 | 0 | 0.27907 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ce2af899c72663acbd5323a33647b4be50b34f9 | 4,905 | py | Python | tests/test_utils_hyper_params_factory.py | gcattan/pyRiemann-qiskit | a53f2f891f4d8726b97ed8f0baaf89f86b5ea731 | [
"BSD-3-Clause"
] | 7 | 2022-01-10T19:19:59.000Z | 2022-02-21T20:13:24.000Z | tests/test_utils_hyper_params_factory.py | toncho11/pyRiemann-qiskit | 93c11801127ef8d80c9e94ea0f31549a6b863238 | [
"BSD-3-Clause"
] | 28 | 2021-09-27T11:53:29.000Z | 2022-03-29T08:39:55.000Z | tests/test_utils_hyper_params_factory.py | toncho11/pyRiemann-qiskit | 93c11801127ef8d80c9e94ea0f31549a6b863238 | [
"BSD-3-Clause"
] | 2 | 2021-09-25T17:04:23.000Z | 2022-02-07T16:34:20.000Z | import pytest
from pyriemann_qiskit.utils.hyper_params_factory import (gen_zz_feature_map,
gen_two_local, gates,
get_spsa)
class TestGenZZFeatureMapParams:
@pytest.mark.parametrize(
'entanglement', ['full', 'linear', 'circular', 'sca']
)
def test_entangl_strings(self, entanglement):
"""Test gen_zz_feature_map with different
string options of entanglement
"""
n_features = 2
feature_map = gen_zz_feature_map(entanglement=entanglement)(n_features)
assert isinstance(feature_map.parameters, set)
def test_entangl_idx(self, get_pauli_z_linear_entangl_idx):
"""Test gen_zz_feature_map with valid indices value"""
n_features, reps = 2, 2
indices = get_pauli_z_linear_entangl_idx(reps, n_features)
feature_map_handle = gen_zz_feature_map(reps=reps,
entanglement=indices)
feature_map = feature_map_handle(n_features)
assert isinstance(feature_map.parameters, set)
def test_entangl_handle(self, get_pauli_z_linear_entangl_handle):
"""Test gen_zz_feature_map with a valid callable"""
n_features = 2
indices = get_pauli_z_linear_entangl_handle(n_features)
feature_map = gen_zz_feature_map(entanglement=indices)(n_features)
assert isinstance(feature_map.parameters, set)
def test_entangl_invalid_value(self):
"""Test gen_zz_feature_map with uncorrect value"""
n_features = 2
feature_map = gen_zz_feature_map(entanglement="invalid")(n_features)
with pytest.raises(ValueError):
feature_map.parameters
class TestTwoLocalParams:
def test_default(self):
"""Test default values of gen_zz_feature_map"""
n_features = 2
two_local_handle = gen_two_local()
two_local = two_local_handle(n_features)
assert two_local._num_qubits == n_features
assert len(two_local._rotation_blocks) == 2
assert len(two_local._entanglement_blocks) == 1
@pytest.mark.parametrize('rotation_blocks', gates)
@pytest.mark.parametrize('entanglement_blocks', gates)
def test_strings(self, rotation_blocks, entanglement_blocks):
"""Test gen_two_local with different string options"""
n_features = 2
two_local_handle = \
gen_two_local(rotation_blocks=rotation_blocks,
entanglement_blocks=entanglement_blocks)
two_local = two_local_handle(n_features)
assert isinstance(two_local._rotation_blocks, list)
assert isinstance(two_local._entanglement_blocks, list)
def test_local_list(self):
"""Test gen_two_local with a list as rotation
and entanglement blocks
"""
n_features = 2
rotation_blocks = ['cx', 'cz']
entanglement_blocks = ['rx', 'rz']
two_local_handle = \
gen_two_local(rotation_blocks=rotation_blocks,
entanglement_blocks=entanglement_blocks)
two_local = two_local_handle(n_features)
assert isinstance(two_local._rotation_blocks, list)
assert isinstance(two_local._entanglement_blocks, list)
def test_invalid_string(self):
"""Test gen_two_local with invalid strings option"""
rotation_blocks = 'invalid'
entanglement_blocks = 'invalid'
with pytest.raises(ValueError):
gen_two_local(rotation_blocks=rotation_blocks,
entanglement_blocks=entanglement_blocks)
def test_invalid_list(self):
"""Test gen_two_local with invalid strings option"""
rotation_blocks = ['invalid', 'invalid']
entanglement_blocks = ['invalid', 'invalid']
with pytest.raises(ValueError):
gen_two_local(rotation_blocks=rotation_blocks,
entanglement_blocks=entanglement_blocks)
class TestGetSPSAParams:
def test_default(self):
"""Test to create spsa with default parameters"""
spsa = get_spsa()
assert spsa._parameters[4] == 4.0
assert spsa._maxiter == 40
assert spsa._skip_calibration
def test_auto_calibration(self):
"""Test to create spsa with all none control parameters"""
spsa = get_spsa(c=(None, None, None, None, None))
for i in range(5):
# Should use qiskit default values
assert spsa._parameters[i] is not None
assert not spsa._skip_calibration
def test_custom(self):
"""Test to create spsa with custom parameters"""
spsa = get_spsa(max_trials=100, c=(0.0, 1.0, 2.0, 3.0, 4.0))
for i in range(5):
assert spsa._parameters[i] == i
assert spsa._skip_calibration
assert spsa._maxiter == 100
| 41.218487 | 79 | 0.651376 | 578 | 4,905 | 5.179931 | 0.178201 | 0.069472 | 0.04008 | 0.0501 | 0.546092 | 0.500334 | 0.422178 | 0.379426 | 0.367067 | 0.352037 | 0 | 0.009197 | 0.268502 | 4,905 | 118 | 80 | 41.567797 | 0.825251 | 0.12946 | 0 | 0.406977 | 0 | 0 | 0.029758 | 0 | 0 | 0 | 0 | 0 | 0.209302 | 1 | 0.139535 | false | 0 | 0.023256 | 0 | 0.197674 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ce76a463b6a97c5763ca5f9e8e9046e6b614846 | 13,344 | py | Python | bin/kerdenSOM.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | bin/kerdenSOM.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | null | null | null | bin/kerdenSOM.py | leschzinerlab/myami-3.2-freeHand | 974b8a48245222de0d9cfb0f433533487ecce60d | [
"MIT"
] | 1 | 2019-09-05T20:58:37.000Z | 2019-09-05T20:58:37.000Z | #!/bin/python
"""
Kernel Probability Density Estimator Self-Organizing Map
"""
# python
import re
import os
import sys
import glob
import time
import numpy
import shutil
import subprocess
# appion
from appionlib import appionScript
from appionlib import apXmipp
from appionlib import apDisplay
from appionlib import appiondata
from appionlib import apEMAN
from appionlib import apFile
from appionlib import apProject
from appionlib import apFourier
from appionlib import apImagicFile
from appionlib import apImage
#======================
#======================
class kerdenSOMScript(appionScript.AppionScript):
#======================
def setupParserOptions(self):
self.parser.add_option("-a", "--alignid", dest="alignstackid", type="int",
help="Alignment stack id", metavar="#")
self.parser.add_option("-m", "--maskrad", dest="maskrad", type="float",
help="Mask radius in Angstroms", metavar="#")
self.parser.add_option("-x", "--xdim", dest="xdim", type="int", default=4,
help="X dimension", metavar="#")
self.parser.add_option("-y", "--ydim", dest="ydim", type="int", default=3,
help="Y dimension", metavar="#")
self.parser.add_option("--numpart", dest="numpart", type="int",
help="Number of particles, default all in stack", metavar="#")
self.convergemodes = ( "normal", "fast", "slow" )
self.parser.add_option("--converge", dest="converge",
help="Convergence criteria mode", metavar="MODE",
type="choice", choices=self.convergemodes, default="normal" )
#======================
def checkConflicts(self):
if self.params['alignstackid'] is None:
apDisplay.printError("Please enter an aligned stack id, e.g. --alignstackid=4")
if self.params['numpart'] is None:
alignstackdata = appiondata.ApAlignStackData.direct_query(self.params['alignstackid'])
self.params['numpart'] = alignstackdata['num_particles']
if self.params['xdim'] > 16 or self.params['xdim'] > 16:
apDisplay.printError("Dimensions must be less than 15")
#======================
def setRunDir(self):
self.alignstackdata = appiondata.ApAlignStackData.direct_query(self.params['alignstackid'])
path = self.alignstackdata['path']['path']
uppath = os.path.abspath(os.path.join(path, ".."))
self.params['rundir'] = os.path.join(uppath, self.params['runname'])
#======================
def insertKerDenSOM(self, binned=None):
### Preliminary data
projectid = apProject.getProjectIdFromAlignStackId(self.params['alignstackid'])
alignstackdata = appiondata.ApAlignStackData.direct_query(self.params['alignstackid'])
numclass = self.params['xdim']*self.params['ydim']
pathdata = appiondata.ApPathData(path=os.path.abspath(self.params['rundir']))
### KerDen SOM Params object
kerdenq = appiondata.ApKerDenSOMParamsData()
kerdenq['mask_diam'] = 2.0*self.params['maskrad']
kerdenq['x_dimension'] = self.params['xdim']
kerdenq['y_dimension'] = self.params['ydim']
kerdenq['convergence'] = self.params['converge']
kerdenq['run_seconds'] = time.time()-self.t0
### Align Analysis Run object
analysisq = appiondata.ApAlignAnalysisRunData()
analysisq['runname'] = self.params['runname']
analysisq['path'] = pathdata
analysisq['description'] = self.params['description']
analysisq['alignstack'] = alignstackdata
analysisq['hidden'] = False
### linked through cluster not analysis
#analysisq['kerdenparams'] = kerdenq
### Clustering Run object
clusterrunq = appiondata.ApClusteringRunData()
clusterrunq['runname'] = self.params['runname']
clusterrunq['description'] = self.params['description']
# what if we binned the aligned stack to get the new one
if binned is None:
boxsize = alignstackdata['boxsize']
pixelsize = alignstackdata['pixelsize']
else:
boxsize = alignstackdata['boxsize'] / binned
pixelsize = alignstackdata['pixelsize'] * binned
clusterrunq['boxsize'] = boxsize
clusterrunq['pixelsize'] = pixelsize
clusterrunq['num_particles'] = self.params['numpart']
clusterrunq['alignstack'] = alignstackdata
clusterrunq['analysisrun'] = analysisq
clusterrunq['kerdenparams'] = kerdenq
### Clustering Stack object
clusterstackq = appiondata.ApClusteringStackData()
clusterstackq['avg_imagicfile'] = "kerdenstack"+self.timestamp+".hed"
clusterstackq['num_classes'] = numclass
clusterstackq['clusterrun'] = clusterrunq
clusterstackq['path'] = pathdata
clusterstackq['hidden'] = False
imagicfile = os.path.join(self.params['rundir'], clusterstackq['avg_imagicfile'])
if not os.path.isfile(imagicfile):
apDisplay.printError("could not find average stack file: "+imagicfile)
### looping over clusters
apDisplay.printColor("Inserting particle classification data, please wait", "cyan")
for i in range(numclass):
classnum = i+1
classroot = "%s.%d"% (self.timestamp, classnum-1)
classdocfile = os.path.join(self.params['rundir'], classroot)
partlist = self.readClassDocFile(classdocfile)
### Clustering Particle object
clusterrefq = appiondata.ApClusteringReferenceData()
clusterrefq['refnum'] = classnum
clusterrefq['avg_mrcfile'] = classroot+".mrc"
clusterrefq['clusterrun'] = clusterrunq
clusterrefq['path'] = pathdata
clusterrefq['num_particles'] = len(partlist)
clusterrefq['ssnr_resolution'] = self.cluster_resolution[i]
### looping over particles
sys.stderr.write(".")
for partnum in partlist:
alignpartdata = self.getAlignParticleData(partnum, alignstackdata)
### Clustering Particle objects
clusterpartq = appiondata.ApClusteringParticleData()
clusterpartq['clusterstack'] = clusterstackq
clusterpartq['alignparticle'] = alignpartdata
clusterpartq['partnum'] = partnum
clusterpartq['refnum'] = classnum
clusterpartq['clusterreference'] = clusterrefq
### finally we can insert parameters
if self.params['commit'] is True:
clusterpartq.insert()
#=====================
def getAlignParticleData(self, partnum, alignstackdata):
alignpartq = appiondata.ApAlignParticleData()
alignpartq['alignstack'] = alignstackdata
alignpartq['partnum'] = partnum
alignparts = alignpartq.query(results=1)
return alignparts[0]
#=====================
def readClassDocFile(self, docfile):
if not os.path.isfile(docfile):
return []
partlist = []
f = open(docfile, 'r')
for line in f:
sline = line.strip()
if re.match("[0-9]+", sline):
# numbers start at zero
partnum = int(sline)+1
partlist.append(partnum)
f.close()
if not partlist:
return []
partlist.sort()
return partlist
#======================
def runKerdenSOM(self, indata):
"""
From http://xmipp.cnb.csic.es/twiki/bin/view/Xmipp/KerDenSOM
KerDenSOM stands for "Kernel Probability Density Estimator Self-Organizing Map".
It maps a set of high dimensional input vectors into a two-dimensional grid.
"""
apDisplay.printMsg("Running KerDen SOM")
outstamp = os.path.join(self.params['rundir'], self.timestamp)
kerdencmd = ( "xmipp_classify_kerdensom -verb 1 -i %s -o %s -xdim %d -ydim %d -saveclusters "%
(indata, outstamp, self.params['xdim'], self.params['ydim'])
)
### convergence criteria
if self.params['converge'] == "fast":
kerdencmd += " -eps 1e-5 "
elif self.params['converge'] == "slow":
kerdencmd += " -eps 1e-9 "
else:
kerdencmd += " -eps 1e-7 "
apDisplay.printColor(kerdencmd, "cyan")
proc = subprocess.Popen(kerdencmd, shell=True)
proc.wait()
time.sleep(1)
return
#======================
def fileId(self, fname):
ext = os.path.splitext(fname)[1]
num = int(ext[1:])
return num
#======================
def sortFile(self, a, b):
if self.fileId(a) > self.fileId(b):
return 1
return -1
#======================
def createMontageByEMAN(self):
self.cluster_resolution = []
apDisplay.printMsg("Converting files")
### create crappy files
emancmd = ( "proc2d "+self.instack+" crap.mrc first=0 last=0 mask=1" )
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
emancmd = ( "proc2d crap.mrc crap.png" )
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
files = glob.glob(self.timestamp+".[0-9]*")
files.sort(self.sortFile)
montagecmd = ("montage -geometry +4+4 -tile %dx%d "%(self.params['xdim'], self.params['ydim']))
stackname = "kerdenstack"+self.timestamp+".hed"
count = 0
numclass = self.params['xdim']*self.params['ydim']
i = 0
for listname in files:
i += 1
apDisplay.printMsg("%d of %d classes"%(i,len(files)))
#listname = self.timestamp+str(i)
if not os.path.isfile(listname) or apFile.fileSize(listname) < 1:
### create a ghost particle
emancmd = ( "proc2d crap.mrc "+stackname+" " )
sys.stderr.write("skipping "+listname+"\n")
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
### create png
shutil.copy("crap.png", listname+".png")
else:
### average particles
emancmd = ("proc2d %s %s list=%s average"%
(self.instack, stackname, listname))
apEMAN.executeEmanCmd(emancmd, showcmd=True, verbose=False)
### create mrc
emancmd = ("proc2d %s %s first=%d last=%d"%
(stackname, listname+".mrc", count, count))
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
### create png
emancmd = ("proc2d %s %s"%
(listname+".mrc", listname+".png"))
apEMAN.executeEmanCmd(emancmd, showcmd=False, verbose=False)
### FIX ME: for now fill self.clsuter_sersolution with None, although it
### should be possible to calculate it if particle list exists like in createMontageInMemory
self.cluster_resolution.append(None)
montagecmd += listname+".png "
count +=1
montagecmd += "montage.png"
apEMAN.executeEmanCmd(montagecmd, showcmd=True, verbose=False)
time.sleep(1)
apFile.removeFile("crap.mrc")
apFile.removeFile("crap.png")
apFile.removeFilePattern(self.timestamp+".*.png")
#======================
def readListFile(self, listfile):
partlist = []
f = open(listfile, "r")
for line in f:
sline = line.strip()
if re.match("[0-9]+$", sline):
partnum = int(sline)+1
partlist.append(partnum)
f.close()
return partlist
#======================
def createMontageInMemory(self, apix):
self.cluster_resolution = []
apDisplay.printMsg("Converting files")
### Set binning of images
boxsize = apImagicFile.getBoxsize(self.instack)
bin = 1
while boxsize/bin > 200:
bin+=1
binboxsize = boxsize/bin
### create averages
files = glob.glob(self.timestamp+".[0-9]*")
files.sort(self.sortFile)
montage = []
montagepngs = []
i = 0
for listname in files:
i += 1
apDisplay.printMsg("%d of %d classes"%(i,len(files)))
pngfile = listname+".png"
if not os.path.isfile(listname) or apFile.fileSize(listname) < 1:
### create a ghost particle
sys.stderr.write("skipping "+listname+"\n")
blank = numpy.ones((binboxsize, binboxsize), dtype=numpy.float32)
### add to montage stack
montage.append(blank)
self.cluster_resolution.append(None)
### create png
apImage.arrayToPng(blank, pngfile)
else:
### read particle list
partlist = self.readListFile(listname)
### average particles
partdatalist = apImagicFile.readParticleListFromStack(self.instack, partlist, boxsize, msg=False)
partdataarray = numpy.asarray(partdatalist)
finaldata = partdataarray.mean(0)
if bin > 1:
finaldata = apImage.binImg(finaldata, bin)
### add to montage stack
montage.append(finaldata)
res = apFourier.spectralSNR(partdatalist, apix)
self.cluster_resolution.append(res)
### create png
apImage.arrayToPng(finaldata, pngfile)
### check for png file
if os.path.isfile(pngfile):
montagepngs.append(pngfile)
else:
apDisplay.printError("failed to create montage")
stackname = "kerdenstack"+self.timestamp+".hed"
apImagicFile.writeImagic(montage, stackname)
### create montage
montagecmd = ("montage -geometry +4+4 -tile %dx%d "%(self.params['xdim'], self.params['ydim']))
for monpng in montagepngs:
montagecmd += monpng+" "
montagecmd += "montage.png"
apEMAN.executeEmanCmd(montagecmd, showcmd=True, verbose=False)
time.sleep(1)
apFile.removeFilePattern(self.timestamp+".*.png")
return bin
#======================
def start(self):
aligndata = appiondata.ApAlignStackData.direct_query(self.params['alignstackid'])
boxsize = aligndata['boxsize']
apix = aligndata['pixelsize']
maskpixrad = self.params['maskrad']/apix
if maskpixrad*2 > boxsize-2:
apDisplay.printError("Mask radius is too big for boxsize: %d > %d"%(maskpixrad*2,boxsize-2))
apDisplay.printMsg("Mask radius and boxsize: %.1f < %d"%(maskpixrad*2,boxsize-2))
self.instack = os.path.join(aligndata['path']['path'], aligndata['imagicfile'])
outdata = "stack.data"
apXmipp.convertStackToXmippData(self.instack, outdata, maskpixrad,
boxsize, numpart=self.params['numpart']-1)
self.runKerdenSOM(outdata)
if apFile.stackSize(self.instack) > 3.0*(1024**3):
# Big stacks use eman
self.createMontageByEMAN()
binned = None
else:
binned = self.createMontageInMemory(apix)
self.insertKerDenSOM(binned=binned)
apFile.removeFile(outdata)
apFile.removeFilePattern("*.cod")
#======================
#======================
if __name__ == '__main__':
kerdenSOM = kerdenSOMScript()
kerdenSOM.start()
kerdenSOM.close()
| 34.040816 | 101 | 0.684502 | 1,510 | 13,344 | 6.023179 | 0.249007 | 0.04398 | 0.020891 | 0.012534 | 0.255965 | 0.215393 | 0.182628 | 0.139967 | 0.104013 | 0.081363 | 0 | 0.007478 | 0.148231 | 13,344 | 391 | 102 | 34.127877 | 0.792715 | 0.119529 | 0 | 0.234043 | 0 | 0.003546 | 0.164987 | 0.002068 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046099 | false | 0 | 0.06383 | 0 | 0.148936 | 0.046099 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ce79e7fd96bdac62db00f64aee93bac8d10eccd | 1,941 | py | Python | storagetransfer/nearline_request_test.py | InstantDomain/python-docs-samples | f8e293c722998b269da38b7fe11b98aae8932b8f | [
"Apache-2.0"
] | 1 | 2022-01-30T07:11:54.000Z | 2022-01-30T07:11:54.000Z | storagetransfer/nearline_request_test.py | InstantDomain/python-docs-samples | f8e293c722998b269da38b7fe11b98aae8932b8f | [
"Apache-2.0"
] | 1 | 2021-12-13T05:15:00.000Z | 2021-12-13T05:15:00.000Z | storagetransfer/nearline_request_test.py | FFHixio/python-docs-samples | b39441b3ca0a7b27e9c141e9b43e78e729105573 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import backoff
from google.api_core.exceptions import RetryError
from google.cloud.storage import Bucket
from googleapiclient.errors import HttpError
import nearline_request
import nearline_request_apiary
@backoff.on_exception(backoff.expo, (RetryError,), max_time=60)
def test_nearline_request(
capsys, project_id: str, source_bucket: Bucket,
destination_bucket: Bucket, job_description_unique: str):
nearline_request.create_daily_nearline_30_day_migration(
project_id=project_id,
description=job_description_unique,
source_bucket=source_bucket.name,
sink_bucket=destination_bucket.name,
start_date=datetime.utcnow()
)
out, _ = capsys.readouterr()
assert "Created transferJob" in out
@backoff.on_exception(backoff.expo, (HttpError,), max_time=60)
def test_nearline_request_apiary(
capsys, project_id: str, source_bucket: Bucket,
destination_bucket: Bucket, job_description_unique: str):
nearline_request_apiary.main(
description=job_description_unique,
project_id=project_id,
start_date=datetime.utcnow(),
start_time=datetime.utcnow(),
source_bucket=source_bucket.name,
sink_bucket=destination_bucket.name
)
out, _ = capsys.readouterr()
assert "Returned transferJob" in out
| 32.35 | 74 | 0.751159 | 252 | 1,941 | 5.579365 | 0.448413 | 0.042674 | 0.065434 | 0.02276 | 0.307255 | 0.266003 | 0.266003 | 0.221906 | 0.221906 | 0.221906 | 0 | 0.008772 | 0.177743 | 1,941 | 59 | 75 | 32.898305 | 0.87218 | 0.281298 | 0 | 0.352941 | 0 | 0 | 0.02822 | 0 | 0 | 0 | 0 | 0 | 0.058824 | 1 | 0.058824 | false | 0 | 0.205882 | 0 | 0.264706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ce9ce2dd0e46784853652492cb563b90869ae67 | 8,688 | py | Python | gamestate-changes/change_statistics/other/plotChangeTypeStatistics.py | phylib/MinecraftNDN-RAFNET19 | c7bfa7962707af367fafe9d879bc63637c06aec7 | [
"MIT"
] | 1 | 2020-05-18T15:55:09.000Z | 2020-05-18T15:55:09.000Z | gamestate-changes/change_statistics/other/plotChangeTypeStatistics.py | phylib/MinecraftNDN-RAFNET19 | c7bfa7962707af367fafe9d879bc63637c06aec7 | [
"MIT"
] | null | null | null | gamestate-changes/change_statistics/other/plotChangeTypeStatistics.py | phylib/MinecraftNDN-RAFNET19 | c7bfa7962707af367fafe9d879bc63637c06aec7 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from mpldatacursor import datacursor
import pandas as pd
class ChangeTypeStatisticPlot():
def __init__(self):
self.reset()
def reset(self):
self.intervals = {
"changedChunksAll":[],
"changedChunksBlock":[],
"changedChunksBlockEntity":[],
"changedChunksEntity":[],
"changedSectionsAll":[],
"changedSectionsBlock":[],
"changedSectionsBlockEntity":[],
"changedSectionsEntity":[],
"changedBlocks":[],
"changedBlockEntities":[],
"changedEntities":[],
"bytesAll_min":[],
"bytesBlock_min":[],
"bytesBlockEntity_min":[],
"bytesEntity_min":[],
"bytesAll_mid": [],
"bytesBlock_mid": [],
"bytesBlockEntity_mid": [],
"bytesEntity_mid": [],
"bytesAll_max": [],
"bytesBlock_max": [],
"bytesBlockEntity_max": [],
"bytesEntity_max": [],
"changedSectionsPerChangedChunk": [],
"changedBlocksPerChangedSection": []
}
def importIntervalData(self, intervalStatList):
self.reset()
for intervalStats in intervalStatList:
self.intervals["changedChunksAll"].append(intervalStats[0])
self.intervals["changedChunksBlock"].append(intervalStats[1])
self.intervals["changedChunksBlockEntity"].append(intervalStats[2])
self.intervals["changedChunksEntity"].append(intervalStats[3])
self.intervals["changedSectionsAll"].append(intervalStats[4])
self.intervals["changedSectionsBlock"].append(intervalStats[5])
self.intervals["changedSectionsBlockEntity"].append(intervalStats[6])
self.intervals["changedSectionsEntity"].append(intervalStats[7])
self.intervals["changedBlocks"].append(intervalStats[8])
self.intervals["changedBlockEntities"].append(intervalStats[9])
self.intervals["changedEntities"].append(intervalStats[10])
self.intervals["bytesAll_min"].append(intervalStats[11])
self.intervals["bytesBlock_min"].append(intervalStats[12])
self.intervals["bytesBlockEntity_min"].append(intervalStats[13])
self.intervals["bytesEntity_min"].append(intervalStats[14])
self.intervals["bytesAll_mid"].append(intervalStats[15])
self.intervals["bytesBlock_mid"].append(intervalStats[16])
self.intervals["bytesBlockEntity_mid"].append(intervalStats[17])
self.intervals["bytesEntity_mid"].append(intervalStats[18])
self.intervals["bytesAll_max"].append(intervalStats[19])
self.intervals["bytesBlock_max"].append(intervalStats[20])
self.intervals["bytesBlockEntity_max"].append(intervalStats[21])
self.intervals["bytesEntity_max"].append(intervalStats[22])
# place to calculate additional stuff
# number of changed sections per changed chunk
ratio = 0 if self.intervals["changedChunksAll"][-1] == 0 else self.intervals["changedSectionsAll"][-1] / self.intervals["changedChunksAll"][-1]
self.intervals["changedSectionsPerChangedChunk"].append(ratio)
# number of changed blocks per changed section
ratio = 0 if self.intervals['changedSectionsAll'][-1] == 0 else self.intervals['changedBlocks'][-1] / self.intervals['changedSectionsAll'][-1]
self.intervals['changedBlocksPerChangedSection'].append(ratio)
self.data_frame = pd.DataFrame(self.intervals)
def plot(self, intervalStatList, intervalLength):
self.importIntervalData(intervalStatList)
self.intervalLength = str(intervalLength)
# let pandas describe the data_frame
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(self.data_frame.describe())
self.data_frame.describe().to_csv("plotChangeTypeStatistics_" + self.intervalLength + "s.csv")
# changed chunks
self.showBoxplot(self.data_frame, ["changedChunksAll"], "changedChunksAll")
self.showBoxplot(self.data_frame, ["changedChunksBlock"], "changedChunksBlock")
self.showBoxplot(self.data_frame, ["changedChunksBlockEntity"], "changedChunksBlockEntity")
self.showBoxplot(self.data_frame, ["changedChunksEntity"], "changedChunksEntity")
self.showBoxplots(self.data_frame, ["changedChunksBlock","changedChunksBlockEntity","changedChunksEntity"], "ChangedChunks", False)
# changed sections
self.showBoxplot(self.data_frame, ["changedSectionsAll"], "changedSectionsAll")
self.showBoxplot(self.data_frame, ["changedSectionsBlock"], "changedSectionsBlock")
self.showBoxplot(self.data_frame, ["changedSectionsBlockEntity"], "changedSectionsBlockEntity")
self.showBoxplot(self.data_frame, ["changedSectionsEntity"], "changedSectionsEntity")
self.showBoxplots(self.data_frame, ["changedSectionsBlock", "changedSectionsBlockEntity", "changedSectionsEntity"], "ChangedSections", False)
# changed sections per changed chunk
self.showBoxplot(self.data_frame, ["changedSectionsPerChangedChunk"], "changedSectionsPerChangedChunk")
# changed blocks, blockEntities, Entities
self.showBoxplot(self.data_frame, ["changedBlocks"], "changedBlocks")
self.showBoxplot(self.data_frame, ["changedBlockEntities"], "changedBlockEntities")
self.showBoxplot(self.data_frame, ["changedEntities"], "changedEntities")
self.showBoxplots(self.data_frame, ["changedBlocks", "changedBlockEntities", "changedEntities"], "Changed Blocks/BlockEntities/Entities", False)
# changed sections per changed chunk
self.showBoxplot(self.data_frame, ["changedBlocksPerChangedSection"], "changedBlocksPerChangedSection")
# bytes changes (min)
self.showBoxplot(self.data_frame, ["bytesAll_min"], "bytesAll_min")
self.showBoxplot(self.data_frame, ["bytesBlock_min"], "bytesBlock_min")
self.showBoxplot(self.data_frame, ["bytesBlockEntity_min"], "bytesBlockEntity_min")
self.showBoxplot(self.data_frame, ["bytesEntity_min"], "bytesEntity_min")
self.showBoxplots(self.data_frame, ["bytesBlock_min", "bytesBlockEntity_min", "bytesEntity_min"], "#Bytes changed (min)", False)
# bytes changes (mid)
self.showBoxplot(self.data_frame, ["bytesAll_mid"], "bytesAll_mid")
self.showBoxplot(self.data_frame, ["bytesBlock_mid"], "bytesBlock_mid")
self.showBoxplot(self.data_frame, ["bytesBlockEntity_mid"], "bytesBlockEntity_mid")
self.showBoxplot(self.data_frame, ["bytesEntity_mid"], "bytesEntity_mid")
self.showBoxplots(self.data_frame, ["bytesBlock_mid", "bytesBlockEntity_mid", "bytesEntity_mid"], "#Bytes changed (mid)", False)
# bytes changes (max)
self.showBoxplot(self.data_frame, ["bytesAll_max"], "bytesAll_max")
self.showBoxplot(self.data_frame, ["bytesBlock_max"], "bytesBlock_max")
self.showBoxplot(self.data_frame, ["bytesBlockEntity_max"], "bytesBlockEntity_max")
self.showBoxplot(self.data_frame, ["bytesEntity_max"], "bytesEntity_max")
self.showBoxplots(self.data_frame, ["bytesBlock_max", "bytesBlockEntity_max", "bytesEntity_max"], "#Bytes changed (max)", False)
self.showBoxplots(self.data_frame, ["bytesAll_min","bytesAll_mid","bytesAll_max", "bytesBlock_min","bytesBlock_mid", "bytesBlock_max"
, "bytesBlockEntity_min","bytesBlockEntity_mid", "bytesBlockEntity_max", "bytesEntity_min","bytesEntity_mid", "bytesEntity_max"],
"#Bytes changed (min, mid, max)", False)
# TODO: check bytes calculation
def showBoxplot(self, data_frame, keys, title):
f, (ax1, ax2) = plt.subplots(1, 2, sharey=False)
data_frame.boxplot(column=keys, ax=ax1, return_type='axes')
plt.suptitle(title + (" ("+ str(self.intervalLength) + "s interval)"))
ax1.set_title('With outliers')
data_frame.boxplot(column=keys, ax=ax2, return_type='axes', showfliers=False)
ax2.set_title('Without outliers')
plt.show(block=True)
def showBoxplots(self, data_frame, keys, title, showFliers):
f, ax1 = plt.subplots()
data_frame.boxplot(column=keys, ax=ax1, return_type='axes', showfliers=showFliers)
plt.suptitle(title + (" ("+ str(self.intervalLength) + "s interval)"))
ax1.set_title('With outliers' if showFliers else 'Without outliers')
plt.show(block=True)
| 52.97561 | 155 | 0.674839 | 787 | 8,688 | 7.290978 | 0.174079 | 0.064308 | 0.083827 | 0.108749 | 0.349948 | 0.252178 | 0.061694 | 0.061694 | 0.061694 | 0.061694 | 0 | 0.00813 | 0.193025 | 8,688 | 163 | 156 | 53.300614 | 0.810298 | 0.04512 | 0 | 0.05042 | 0 | 0 | 0.329993 | 0.078363 | 0 | 0 | 0 | 0.006135 | 0 | 1 | 0.05042 | false | 0 | 0.042017 | 0 | 0.10084 | 0.008403 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cea43f71d85af41272392dee594ce4cf3b7017f | 4,221 | py | Python | 2017/17_spinlock.py | pchudzik/adventofcode | e1d6521621f6ca90f9dc53cf3d1ed5b8c5c2b7d1 | [
"MIT"
] | null | null | null | 2017/17_spinlock.py | pchudzik/adventofcode | e1d6521621f6ca90f9dc53cf3d1ed5b8c5c2b7d1 | [
"MIT"
] | null | null | null | 2017/17_spinlock.py | pchudzik/adventofcode | e1d6521621f6ca90f9dc53cf3d1ed5b8c5c2b7d1 | [
"MIT"
] | null | null | null | """
--- Day 17: Spinlock ---
Suddenly, whirling in the distance, you notice what looks like a massive, pixelated hurricane: a deadly spinlock. This
spinlock isn't just consuming computing power, but memory, too; vast, digital mountains are being ripped from the ground
and consumed by the vortex.
If you don't move quickly, fixing that printer will be the least of your problems.
This spinlock's algorithm is simple but efficient, quickly consuming everything in its path. It starts with a circular
buffer containing only the value 0, which it marks as the current position. It then steps forward through the circular
buffer some number of steps (your puzzle input) before inserting the first new value, 1, after the value it stopped on.
The inserted value becomes the current position. Then, it steps forward from there the same number of steps, and
wherever it stops, inserts after it the second new value, 2, and uses that as the new current position again.
It repeats this process of stepping forward, inserting a new value, and using the location of the inserted value as the
new current position a total of 2017 times, inserting 2017 as its final operation, and ending with a total of 2018
values (including 0) in the circular buffer.
For example, if the spinlock were to step 3 times per insert, the circular buffer would begin to evolve like this (using
parentheses to mark the current position after each iteration of the algorithm):
* (0), the initial state before any insertions.
* 0 (1): the spinlock steps forward three times (0, 0, 0), and then inserts the first value, 1, after it. 1 becomes the
current position.
* 0 (2) 1: the spinlock steps forward three times (0, 1, 0), and then inserts the second value, 2, after it. 2 becomes the
current position.
* 0 2 (3) 1: the spinlock steps forward three times (1, 0, 2), and then inserts the third value, 3, after it. 3 becomes
the current position.
And so on:
0 2 (4) 3 1
0 (5) 2 4 3 1
0 5 2 4 3 (6) 1
0 5 (7) 2 4 3 6 1
0 5 7 2 4 3 (8) 6 1
0 (9) 5 7 2 4 3 8 6 1
Eventually, after 2017 insertions, the section of the circular buffer near the last insertion looks like this:
1512 1134 151 (2017) 638 1513 851
Perhaps, if you can identify the value that will ultimately be after the last value written (2017), you can
short-circuit the spinlock. In this example, that would be 638.
What is the value after 2017 in your completed circular buffer?
Your puzzle answer was 204.
--- Part Two ---
The spinlock does not short-circuit. Instead, it gets more angry. At least, you assume that's what happened; it's
spinning significantly faster than it was a moment ago.
You have good news and bad news.
The good news is that you have improved calculations for how to stop the spinlock. They indicate that you actually need
to identify the value after 0 in the current state of the circular buffer.
The bad news is that while you were determining this, the spinlock has just finished inserting its fifty millionth value
(50000000).
What is the value after 0 the moment 50000000 is inserted?
Your puzzle answer was 28954211.
Both parts of this puzzle are complete! They provide two gold stars: **
At this point, you should return to your Advent calendar and try another puzzle.
Your puzzle input was 380.
"""
import collections
class Buffer:
def __init__(self, step_size):
self.step_size = step_size
self.status = collections.deque([0])
self.iteration = 0
def __next__(self):
self.iteration += 1
self.status.rotate(-self.step_size)
self.status.append(self.iteration)
return self.status[0]
def value_after(self, number):
index = self.status.index(number)
return self.status[index + 1 if index + 1 < len(self.status) else 0]
def part_1(puzzle):
buffer = Buffer(puzzle)
last = -1
for i in range(2017):
last = next(buffer)
return last
def part_2(puzzle):
buffer = Buffer(puzzle)
for i in range(50_000_000):
next(buffer)
return buffer.value_after(0)
if __name__ == "__main__":
puzzle = 380
print(f"part 1: {part_1(puzzle)}")
print(f"part 2: {part_2(puzzle)}")
| 35.470588 | 122 | 0.725421 | 713 | 4,221 | 4.255259 | 0.336606 | 0.039552 | 0.035597 | 0.03296 | 0.105471 | 0.06592 | 0.048121 | 0.036915 | 0.010218 | 0.006262 | 0 | 0.057409 | 0.211798 | 4,221 | 118 | 123 | 35.771186 | 0.854524 | 0.784174 | 0 | 0.068966 | 0 | 0 | 0.062016 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.172414 | false | 0 | 0.034483 | 0 | 0.37931 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cebb53641c4070903587094a84cbef7d3c86b94 | 1,191 | py | Python | pymeera/utils/support.py | norecces/pymeera | 14cfff6797b0bda231980d9329b480b2c178c803 | [
"MIT"
] | null | null | null | pymeera/utils/support.py | norecces/pymeera | 14cfff6797b0bda231980d9329b480b2c178c803 | [
"MIT"
] | null | null | null | pymeera/utils/support.py | norecces/pymeera | 14cfff6797b0bda231980d9329b480b2c178c803 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals, division
def hash_index(v, group):
"""
Hash values to store hierarchical index
:param v: index value
:param group: variables from which index was derived
:return: str
v = [1, 2]
group = ['q1', 'q2]
return 'q1::1__q2::2'
"""
if not isinstance(v, (list, tuple)):
_hash = list(zip(group, [v]))
else:
_hash = list(zip(group, v))
return '__'.join(list(map(lambda x: '%s::%s' % (x[0], x[1]), _hash)))
def unhash_index(_hash):
"""
Decode hased value to tuple
:param _hash: str, hash_index result
:return: tuple of tuples
hash = 'q1::1__q2::2'
return ((q1, 1), (q2, 2))
"""
try:
return tuple(map(lambda x: tuple(x.split('::')), _hash.split('__')))
except:
print(_hash)
def equlize_size(lst):
"""
Equalize size of incoming iterables
:param args: 2-dimensional iterables
:return: tuple of tuples
"""
max_size = max(map(lambda x: len(x), lst))
ret = []
for v in lst:
ret.append(tuple(v) + (('', ''),) * (max_size - len(v)))
return tuple(ret)
| 19.85 | 76 | 0.565911 | 163 | 1,191 | 3.969325 | 0.417178 | 0.068006 | 0.023184 | 0.027821 | 0.089645 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022989 | 0.269521 | 1,191 | 59 | 77 | 20.186441 | 0.72069 | 0.373636 | 0 | 0 | 0 | 0 | 0.01849 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.055556 | 0 | 0.388889 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cedb3413a7ff2267fed6a64b3ccc1e501d735c0 | 6,186 | py | Python | generators/gen_gp.py | Limacon101/EA_PODI_Library | 6dee7210cd05a6112c919928fccadbd8857d7f10 | [
"Apache-2.0"
] | null | null | null | generators/gen_gp.py | Limacon101/EA_PODI_Library | 6dee7210cd05a6112c919928fccadbd8857d7f10 | [
"Apache-2.0"
] | null | null | null | generators/gen_gp.py | Limacon101/EA_PODI_Library | 6dee7210cd05a6112c919928fccadbd8857d7f10 | [
"Apache-2.0"
] | null | null | null | """
Problem generator that creates random solutions for a Genetic Programming
problem -- symbolic regression. Briefly, this problem involves estimating
the (polynomial) line that best fits a set of data points.
For testing purposes, these data points are generated from a test function,
but (random) data points could be used. Solutions are represented using a
binary tree data structure.
Attributes:
max_nodes (int): Maximum number of leaf nodes
operators ([string]): Mathematical operators used to estimate the formula
constant_range ([int]): Range of constants used in estimating the formula
input_var (char): Input variable symbol (Recommended not to change)
input_func (function): Function used to get data points
input_range ([int]): Range of input data points (x-axis)
zero_div_penalty (float): Solution penalty for division by zero error
"""
import sys
import parser
import math
from core import ops, solver
from algorithms import algo_ea
max_nodes = 10
operators = ['+', '-', '*']
constant_range = [0, 5]
input_var = 'X'
# input_func = lambda x: x**2 + 2
# input_func = lambda x: x**3 + x*x + 2*x - 5
input_func = lambda x: x**4 + x*(x+2) - 5
input_range = [-5, 5]
zero_div_penalty = 1000
class BNode:
"""
Simple binary data structure used to (recursively) represent a GP solution
"""
def __init__(self, val, left=None, right=None):
self.value = val
self.left = left
self.right = right
def __str__(self):
return get_formula(self)
@staticmethod
def print_nodes(root, level=0):
print(level, root.value)
if type(root.left) == BNode:
BNode.print_nodes(root.left, level + 1)
else:
print(level + 1, root.left)
if type(root.right) == BNode:
BNode.print_nodes(root.right, level + 1)
else:
print(level + 1, root.right)
def create_solution(t=None):
"""
Creates a (pseudo) random GP solution.
:param t: Trace object
:return: Solution
"""
root = BNode(_random_from_list(operators, t))
# Contains all nodes that have empty leaves (left, right = None)
has_leaves = [root]
# Populates tree with operators, constants and inputs
for i in range(max_nodes // 2):
if len(has_leaves) == 0:
break
node = has_leaves.pop(t.randrange(0, len(has_leaves)))
node.left = new_node = _choose_node_type(3, 2, 1, t)
if type(new_node) == BNode:
has_leaves.append(node.left)
node.right = new_node = _choose_node_type(3, 2, 1, t)
if type(new_node) == BNode:
has_leaves.append(node.right)
# For any remaining leaves that are operators, complete with constants and inputs
for node in has_leaves:
node.left = _choose_node_type(0, 1, 1, t)
node.right = _choose_node_type(0, 1, 1, t)
# BNode.print_nodes(root)
# print(get_formula(root))
return root
def _choose_node_type(w_operator, w_constant, w_input, t):
"""
Choose a random node (from operators, constants and input variables)
:param w_operator: Weighting of choosing an operator
:param w_constant: Weighting of choosing a constant
:param w_input: Weighting of choosing an input
:param t: Trace object
:return: An operator, constant or input variable
"""
w_sum = w_operator + w_constant + w_input
rb = t.random()
# print('Chose:', rb)
r = rb * w_sum
# r = random.uniform(0, w_sum)
if r < w_operator:
return BNode(_random_from_list(operators, t))
elif r < w_operator + w_constant:
return _random_constant(t)
else:
return input_var
# Replacement for random.sample for the time being
def _random_from_list(l, t):
rb = t.randrange(0, len(l))
# print('Chose from List:', rb)
t_ind = rb
return l[t_ind]
def _random_constant(t):
rb = t.randint(constant_range[0], constant_range[1])
# print('chose constant:', rb)
return rb
def fitness(root):
"""
Get the fitness of a GP solution. Calculated by summing the differences
between the data points and the solution's corresponding outputs over
the input range.
:param root: A solution (A BNode tree)
:return: Fitness value
"""
formula = get_formula(root)
total_deviation = 0
code = parser.expr(formula).compile()
penalty = 0
for i in range(input_range[0], input_range[1]):
X = i
try:
total_deviation += abs(eval(code) - input_func(i)) # Experiment with rms
except ZeroDivisionError:
total_deviation += 0.001 # Prevent total_deviation finishing at 0
penalty += zero_div_penalty
# total_deviation = math.sqrt(total_deviation / (input_range[1] - input_range[0]))
return total_deviation + penalty
def get_formula(root):
"""
Returns the solution in a string format
"""
if type(root) is not BNode:
return str(root)
else:
return "(" + get_formula(root.left) + str(root.value) + get_formula(root.right) + ")"
if __name__ == '__main__':
# Optimise GP problem using data points from the input_func lambda function
# using an evolutionary algorithm
print("Genetic Programming -- Symbolic Regression")
print("\nInput data points:")
for i in range(input_range[0], input_range[1]):
print("(", i, ",", input_func(i), ")", end=' ', sep='')
print()
# Run optimisation process:
gen = sys.modules[__name__]
alg = algo_ea
alg_params = solver.AlgoParams(select=ops.select_tournament,
crossover=ops.crossover_one_point,
mutate=ops.mutate_trace_gauss,
generations=50,
pop_size=30,
mutation_rate=0.1,
minimising=True)
s = solver.Solver(gen, alg, alg_params).solve()
print("\nOptimisation result and fitness:")
print(s)
| 30.776119 | 93 | 0.623828 | 825 | 6,186 | 4.512727 | 0.282424 | 0.024174 | 0.018802 | 0.012893 | 0.141553 | 0.102605 | 0.074134 | 0.051034 | 0.051034 | 0.051034 | 0 | 0.013677 | 0.279017 | 6,186 | 200 | 94 | 30.93 | 0.821076 | 0.396541 | 0 | 0.08 | 0 | 0 | 0.031781 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09 | false | 0 | 0.05 | 0.01 | 0.25 | 0.12 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cee58845ea7ead95f7ea366265201b50364a581 | 48,355 | py | Python | jenkins_jobs_addons/views.py | itaborda/jenkins-job-builder-addons | 63c5bbf9e1b1c70a12ea397f001b2a13ffae27cf | [
"Apache-2.0"
] | null | null | null | jenkins_jobs_addons/views.py | itaborda/jenkins-job-builder-addons | 63c5bbf9e1b1c70a12ea397f001b2a13ffae27cf | [
"Apache-2.0"
] | null | null | null | jenkins_jobs_addons/views.py | itaborda/jenkins-job-builder-addons | 63c5bbf9e1b1c70a12ea397f001b2a13ffae27cf | [
"Apache-2.0"
] | null | null | null | """
Views show job status.
**Component**: views
:Macro: views
:Entry Point: jenkins_jobs.views
"""
import logging
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
import jenkins_jobs.modules.helpers as helpers
logger = logging.getLogger(__name__)
def all_view(parser, xml_parent, data):
"""
All view
:arg bool filter-executors: only those build executors will be shown that
could execute the jobs in this view.
:arg bool filter-queue: only jobs in this view will be shown in the queue.
:arg bool folder: Wether or not this view is in a folder.
Example:
.. literalinclude:: /../tests/views/fixtures/all_view.yaml
"""
view = XML.SubElement(xml_parent, 'hudson.model.AllView')
XML.SubElement(view, 'name').text = 'All'
parent = data.get('parent', False)
if parent:
clazz = 'com.cloudbees.hudson.plugins.folder.Folder'
if ("nested-view" == parent):
clazz = 'hudson.plugins.nested_view.NestedView'
elif ("list-view" == parent):
clazz = 'hudson.model.ListView'
owner_attrs = dict()
owner_attrs['class'] = clazz
owner_attrs['reference'] = '../../..'
XML.SubElement(view, 'owner', attrib=owner_attrs)
logger.debug("Read parent '{0}' to '{1}'".format(parent, data.get('name')))
executors = data.get('filter-executors', False)
XML.SubElement(view, 'filterExecutors').text = str(executors).lower()
queue = data.get('filter-queue', False)
XML.SubElement(view, 'filterQueue').text = str(queue).lower()
properties_attributes = dict()
properties_attributes['class'] = 'hudson.model.View$PropertyList'
XML.SubElement(view, 'properties', attrib=properties_attributes)
def workflow_pipeline_view(parser, xml_parent, data):
"""
Delivery Pipeline View requires the Jenkins `Delivery Pipeline Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Delivery+Pipeline+Plugin>`_
:arg bool filter-executors: only those build executors will be shown that
could execute the jobs in this view.
:arg bool filter-queue: only jobs in this view will be shown in the queue.
:arg bool folder: Wether or not this view is in a folder.
:arg str name: The name of this view.
:arg dict components: The components (jobs) for this pipeline:
* **name** (str): Name of the pipeline, usually the name of the
component or product.
* **first-job** (str): First job in the pipeline. Usually the
build/compile job. The build number/build
display name will be used as the version in
later tasks or stages. If using folders, it
should be a full path to the job.
:arg int number-of-pipelines: Number of pipelines instances shown for each
pipeline.
:arg int number-of-columns: Number of columns used for showing pipelines.
Useful for multiple components in the view to
show them beside each others.
:arg int sorting: How to sort the pipeline in the view.
Only applicable for several pipelines.
Can be sorted by latest activity or by name.
:arg int update-interval: How often will the view be updated in seconds.
:arg bool allow-pipeline-start: Start a new pipeline build.
:arg bool show-changes: Show SCM change log for the first job in the
pipeline. If Repository browser is configured, link to change will be
created to the repository browser.
:arg list regexp-first-jobs: Find jenkins job matching regular expression.
^build-(.+?)-project
Example:
.. literalinclude:: /../tests/views/fixtures/delivery_pipeline.yaml
"""
delivery_pipeline = 'se.diabol.jenkins.workflow.WorkflowPipelineView'
view = XML.SubElement(xml_parent, delivery_pipeline)
parent = data.get('parent', False)
if parent:
clazz = 'com.cloudbees.hudson.plugins.folder.Folder'
if ("nested-view" == parent):
clazz = 'hudson.plugins.nested_view.NestedView'
elif ("list-view" == parent):
clazz = 'hudson.model.ListView'
owner_attrs = dict()
owner_attrs['class'] = clazz
owner_attrs['reference'] = '../../..'
XML.SubElement(view, 'owner', attrib=owner_attrs)
logger.debug("Read parent '{0}' to '{1}'".format(parent, data.get('name')))
XML.SubElement(view, 'name').text = data.get('name')
executors = data.get('filter-executors', False)
XML.SubElement(view, 'filterExecutors').text = str(executors).lower()
queue = data.get('filter-queue', False)
XML.SubElement(view, 'filterQueue').text = str(queue).lower()
properties_attributes = dict()
properties_attributes['class'] = 'hudson.model.View$PropertyList'
XML.SubElement(view, 'properties', attrib=properties_attributes)
xml_components = XML.SubElement(view, 'componentSpecs')
components = data.get('components', [])
for component in components:
spec_class = "se.diabol.jenkins.workflow."\
"WorkflowPipelineView_-ComponentSpec"
component_spec = XML.SubElement(xml_components, spec_class)
name = component.get('name')
XML.SubElement(component_spec, 'name').text = name
first_job = component.get('job')
XML.SubElement(component_spec, 'job').text = first_job
number_of_pipelines = str(data.get('number-of-pipelines', 3))
XML.SubElement(
view, 'noOfPipelines').text = number_of_pipelines
number_of_columns = str(data.get('number-of-columns', 1))
XML.SubElement(view, 'noOfColumns').text = number_of_columns
sorting_options = ['none', 'Name', 'LatestActivity']
sorting = data.get('sorting', 'none')
if sorting not in sorting_options:
raise ValueError('sorting must be one of {} '.format(sorting_options))
if sorting == 'none':
XML.SubElement(view, 'sorting').text = 'none'
else:
XML.SubElement(
view, 'sorting'
).text = 'se.diabol.jenkins.pipeline.sort.{}Comparator'.format(sorting)
update_interval = str(data.get('update-interval', 1))
XML.SubElement(view, 'updateInterval').text = update_interval
show_changes = str(data.get('show-changes', False)).lower()
XML.SubElement(view, 'showChanges').text = str(show_changes).lower()
pipeline_start = str(data.get('allow-pipeline-start', False)).lower()
XML.SubElement(view, 'allowPipelineStart').text = pipeline_start
def delivery_pipeline_view(parser, xml_parent, data):
"""
Delivery Pipeline View requires the Jenkins `Delivery Pipeline Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Delivery+Pipeline+Plugin>`_
:arg bool filter-executors: only those build executors will be shown that
could execute the jobs in this view.
:arg bool filter-queue: only jobs in this view will be shown in the queue.
:arg bool folder: Wether or not this view is in a folder.
:arg str name: The name of this view.
:arg dict components: The components (jobs) for this pipeline:
* **name** (str): Name of the pipeline, usually the name of the
component or product.
* **first-job** (str): First job in the pipeline. Usually the
build/compile job. The build number/build
display name will be used as the version in
later tasks or stages. If using folders, it
should be a full path to the job.
:arg int number-of-pipelines: Number of pipelines instances shown for each
pipeline.
:arg bool show-aggregated-pipeline: Show an aggregated view where each
stage shows the latest version being
executed.
:arg int number-of-columns: Number of columns used for showing pipelines.
Useful for multiple components in the view to
show them beside each others.
:arg int sorting: How to sort the pipeline in the view.
Only applicable for several pipelines.
Can be sorted by latest activity or by name.
:arg int update-interval: How often will the view be updated in seconds.
:arg bool allow-pipeline-start: Start a new pipeline build.
:arg bool allow-manual-triggers: If a task is manual (Build other projects
(manual step) from Build Pipeline Plugin, show a button.
:arg bool allow-rebuild: Rebuild a task.
:arg str show-avatars: Show avatars pictures instead of names of the people
involved in a pipeline instance. Use the `Avatar Plugin
<https://https://wiki.jenkins-ci.org/display/JENKINS/Avatar+Plugin>`_
or the `Gravatar Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Gravatar+plugin>`_ or
similar to set avatar picture for contributors.
:arg bool show-changes: Show SCM change log for the first job in the
pipeline. If Repository browser is configured, link to change will be
created to the repository browser.
:arg bool show-description: Show build description connected to a task.
:arg bool show-promotions: Show promotions from the `Promoted Builds
Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Promoted+Builds+Plugin>_`
:arg bool show-total-buildtime: Show total build time of a pipeline.
If there are multiple routes in a pipeline, total build time is
calculated as the sum of the build times in the longest route.
:arg str css-url: Possibility to override CSS for the normal view.
Enter the full url to the custom CSS.
:arg str fullscreen-css-url: Possibility to override CSS for the
fullscreen view. Enter the full url to the custom CSS.
:arg list regexp-first-jobs: Find jenkins job matching regular expression.
^build-(.+?)-project
Example:
.. literalinclude:: /../tests/views/fixtures/delivery_pipeline.yaml
"""
delivery_pipeline = 'se.diabol.jenkins.pipeline.DeliveryPipelineView'
view = XML.SubElement(xml_parent, delivery_pipeline)
parent = data.get('parent', False)
if parent:
clazz = 'com.cloudbees.hudson.plugins.folder.Folder'
if ("nested-view" == parent):
clazz = 'hudson.plugins.nested_view.NestedView'
elif ("list-view" == parent):
clazz = 'hudson.model.ListView'
owner_attrs = dict()
owner_attrs['class'] = clazz
owner_attrs['reference'] = '../../..'
XML.SubElement(view, 'owner', attrib=owner_attrs)
logger.debug("Read parent '{0}' to '{1}'".format(parent, data.get('name')))
XML.SubElement(view, 'name').text = data.get('name')
executors = data.get('filter-executors', False)
XML.SubElement(view, 'filterExecutors').text = str(executors).lower()
queue = data.get('filter-queue', False)
XML.SubElement(view, 'filterQueue').text = str(queue).lower()
properties_attributes = dict()
properties_attributes['class'] = 'hudson.model.View$PropertyList'
XML.SubElement(view, 'properties', attrib=properties_attributes)
xml_components = XML.SubElement(view, 'componentSpecs')
components = data.get('components', [])
for component in components:
spec_class = "se.diabol.jenkins.pipeline."\
"DeliveryPipelineView_-ComponentSpec"
component_spec = XML.SubElement(xml_components, spec_class)
name = component.get('name')
XML.SubElement(component_spec, 'name').text = name
first_job = component.get('first-job')
XML.SubElement(component_spec, 'firstJob').text = first_job
number_of_pipelines = str(data.get('number-of-pipelines', 3))
XML.SubElement(
view, 'noOfPipelines').text = number_of_pipelines
aggregated_pipeline_raw = data.get('show-aggregated-pipeline', False)
aggregated_pipeline = str(aggregated_pipeline_raw).lower()
XML.SubElement(view, 'showAggregatedPipeline').text = aggregated_pipeline
number_of_columns = str(data.get('number-of-columns', 1))
XML.SubElement(view, 'noOfColumns').text = number_of_columns
sorting_options = ['none', 'Name', 'LatestActivity']
sorting = data.get('sorting', 'none')
if sorting not in sorting_options:
raise ValueError('sorting must be one of {} '.format(sorting_options))
if sorting == 'none':
XML.SubElement(view, 'sorting').text = 'none'
else:
XML.SubElement(
view, 'sorting'
).text = 'se.diabol.jenkins.pipeline.sort.{}Comparator'.format(sorting)
show_avatars = data.get('show-avatars', False)
XML.SubElement(view, 'showAvatars').text = str(show_avatars).lower()
update_interval = str(data.get('update-interval', 1))
XML.SubElement(view, 'updateInterval').text = update_interval
show_changes = str(data.get('show-changes', False)).lower()
XML.SubElement(view, 'showChanges').text = str(show_changes).lower()
manual_triggers = str(data.get('allow-manual-triggers', False)).lower()
XML.SubElement(view, 'allowManualTriggers').text = manual_triggers
total_build_time = str(data.get('show-total-buildtime', False)).lower()
XML.SubElement(view, 'showTotalBuildTime').text = total_build_time
allow_rebuild = str(data.get('allow-rebuild', False)).lower()
XML.SubElement(view, 'allowRebuild').text = allow_rebuild
pipeline_start = str(data.get('allow-pipeline-start', False)).lower()
XML.SubElement(view, 'allowPipelineStart').text = pipeline_start
show_description = str(data.get('show-description', False)).lower()
XML.SubElement(view, 'showDescription').text = show_description
show_promotions = str(data.get('show-promotions', False)).lower()
XML.SubElement(view, 'showPromotions').text = show_promotions
xml_jobs = XML.SubElement(view, 'regexpFirstJobs')
jobs = data.get('regexp-first-jobs', [])
for job in jobs:
xml_job = XML.SubElement(xml_jobs, 'se.diabol.jenkins.pipeline.'
'DeliveryPipelineView_-RegExpSpec')
XML.SubElement(xml_job, 'regexp').text = job
XML.SubElement(view, 'fullScreenCss').text = data.get('csss-url')
XML.SubElement(view, 'embeddedCss').text = data.get('fullscreen-csss-url')
def build_pipeline_view(parser, xml_parent, data):
"""
Build Pipeline View requires the Jenkins `Build Pipeline Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Build+Pipeline+Plugin>`_
:arg bool filter-executors: only those build executors will be shown that
could execute the jobs in this view.
:arg bool filter-queue: only jobs in this view will be shown in the queue.
:arg bool folder: Wether or not this view is in a folder.
:arg str name: The name of this view.
:arg str first-job: Select the initial or parent Job in the build
pipeline view.
:arg int display-number-of-builds: Select the number of build pipelines to
display in the view.
:arg str build-view-title: The title of this view.
:arg str console-output-link-style: One the following:
* **This Window**
* **New Window**
* **Light Box** (default)
:arg bool trigger-only-latest-job: Select this option to restrict the
display of a Trigger button to only the most recent successful build
pipelines. This option will also limit retries to just unsuccessful
builds of the most recent build pipelines.
* **True**: Only the most recent successful builds displayed on the
view will have a manual trigger button for the next build
in the pipeline.
* **False**: All successful builds displayed on the view will have a
manual trigger button for the next build in the pipeline.
:arg bool always-allow-manual-trigger: Select this option if you want to
be able to execute again a successful pipeline step. If the build is
parameterized, this will re-execute the step using the same parameter
values that were used when it was previously executed.
:arg bool start-with-parameters: Select this option if you want to
show the pipeline definition header in the pipeline view. If this option
is not selected, then a pipeline that has never been run will not show
any details about its jobs and appear like a blank form. Job details will
only appear after the pipeline has been run at least once.
:arg bool show-pipeline-parameters-in-header: Select this option if you
want to display the parameters used to run the latest successful job
in the pipeline's project headers.
:arg bool show-pipeline-parameters: Select this option if you want to
display the parameters used to run the first job in each pipeline's
revision box.
:arg bool refresh-frequency: Frequency at which the Build Pipeline
Plugin updates the build cards in seconds
:arg str css-url: Link to override style sheet
Example:
.. literalinclude:: /../tests/views/fixtures/build_pipeline_view.yaml
"""
build_pipeline = 'au.com.centrumsystems.hudson.plugin.'\
'buildpipeline.BuildPipelineView'
view = XML.SubElement(xml_parent, build_pipeline)
parent = data.get('parent', False)
if parent:
clazz = 'com.cloudbees.hudson.plugins.folder.Folder'
if ("nested-view" == parent):
clazz = 'hudson.plugins.nested_view.NestedView'
elif ("list-view" == parent):
clazz = 'hudson.model.ListView'
owner_attrs = dict()
owner_attrs['class'] = clazz
owner_attrs['reference'] = '../../..'
XML.SubElement(view, 'owner', attrib=owner_attrs)
logger.debug("Read parent '{0}' to '{1}'".format(parent, data.get('name')))
XML.SubElement(view, 'name').text = data.get('name')
executors = data.get('filter-executors', False)
XML.SubElement(view, 'filterExecutors').text = str(executors).lower()
queue = data.get('filter-queue', False)
XML.SubElement(view, 'filterQueue').text = str(queue).lower()
properties_attributes = dict()
properties_attributes['class'] = 'hudson.model.View$PropertyList'
XML.SubElement(view, 'properties', attrib=properties_attributes)
grid_attrs = dict()
grid_attrs['class'] = 'au.com.centrumsystems.hudson.plugin.buildpipeline.'\
'DownstreamProjectGridBuilder'
grid = XML.SubElement(view, 'gridBuilder', attrib=grid_attrs)
first_job = data.get('first-job', None)
XML.SubElement(grid, 'firstJob').text = first_job
display_number_of_builds = str(data.get('display-number-of-builds', 10))
XML.SubElement(view, 'noOfDisplayedBuilds').text = display_number_of_builds
build_view_title = data.get('build-view-title')
XML.SubElement(view, 'buildViewTitle').text = build_view_title
console_output_links = ['This Window', 'New Window', 'Light Box']
console_output_link_style = data.get(
'console-output-link-style', 'Light Box')
if console_output_link_style not in console_output_links:
raise ValueError('console-output-link-style must '
'be one of {}'.format(console_output_links))
XML.SubElement(
view, 'consoleOutputLinkStyle'
).text = console_output_link_style
XML.SubElement(view, 'cssUrl').text = data.get('csss-url')
job = XML.SubElement(view, 'triggerOnlyLatestJob')
job.text = str(data.get('trigger-only-latest-job', False)).lower()
manual_trigger = data.get('always-allow-manual-trigger', False)
manual_trigger = str(manual_trigger).lower()
XML.SubElement(
view, 'alwaysAllowManualTrigger'
).text = manual_trigger
parmas = str(data.get('show-pipeline-parameters', False)).lower()
XML.SubElement(view, 'showPipelineParameters').text = parmas
headers_raw = data.get('show-pipeline-parameters-in-header', False)
headers = str(headers_raw).lower()
XML.SubElement(
view, 'showPipelineParametersInHeaders'
).text = headers
start_with_params = str(data.get('start-with-parameters', False)).lower()
XML.SubElement(
view, 'startsWithParameters'
).text = start_with_params
refresh_freq = data.get('refresh-frequency', 3)
XML.SubElement(view, 'refreshFrequency').text = str(refresh_freq)
show_def_raw = data.get('show-pipeline-definition-in-headers', False)
show_def = str(show_def_raw).lower()
XML.SubElement(view, 'showPipelineDefinitionHeader').text = show_def
def nested_view(parser, xml_parent, data):
"""
Build Pipeline View requires the Jenkins `Build Pipeline Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Build+Pipeline+Plugin>`_
:arg bool filter-executors: only those build executors will be shown that
could execute the jobs in this view.
:arg bool filter-queue: only jobs in this view will be shown in the queue.
:arg bool folder: Wether or not this view is in a folder.
:arg str name: The name of this view.
:arg str first-job: Select the initial or parent Job in the build
pipeline view.
:arg int display-number-of-builds: Select the number of build pipelines to
display in the view.
:arg str build-view-title: The title of this view.
:arg str console-output-link-style: One the following:
* **This Window**
* **New Window**
* **Light Box** (default)
:arg bool trigger-only-latest-job: Select this option to restrict the
display of a Trigger button to only the most recent successful build
pipelines. This option will also limit retries to just unsuccessful
builds of the most recent build pipelines.
* **True**: Only the most recent successful builds displayed on the
view will have a manual trigger button for the next build
in the pipeline.
* **False**: All successful builds displayed on the view will have a
manual trigger button for the next build in the pipeline.
:arg bool always-allow-manual-trigger: Select this option if you want to
be able to execute again a successful pipeline step. If the build is
parameterized, this will re-execute the step using the same parameter
values that were used when it was previously executed.
:arg bool start-with-parameters: Select this option if you want to
show the pipeline definition header in the pipeline view. If this option
is not selected, then a pipeline that has never been run will not show
any details about its jobs and appear like a blank form. Job details will
only appear after the pipeline has been run at least once.
:arg bool show-pipeline-parameters-in-header: Select this option if you
want to display the parameters used to run the latest successful job
in the pipeline's project headers.
:arg bool show-pipeline-parameters: Select this option if you want to
display the parameters used to run the first job in each pipeline's
revision box.
:arg bool refresh-frequency: Frequency at which the Build Pipeline
Plugin updates the build cards in seconds
:arg str css-url: Link to override style sheet
Example:
.. literalinclude:: /../tests/views/fixtures/build_pipeline_view.yaml
"""
COLUMN_DICT = {
'status': 'hudson.views.StatusColumn',
'weather': 'hudson.views.WeatherColumn',
}
DEFAULT_COLUMNS = ['status', 'weather']
delivery_pipeline = 'hudson.plugins.nested_view.NestedView'
view = XML.SubElement(xml_parent, delivery_pipeline)
XML.SubElement(view, 'name').text = data.get('name')
XML.SubElement(view, 'description').text = data.get('description', None)
parent = data.get('parent', False)
if parent:
clazz = 'com.cloudbees.hudson.plugins.folder.Folder'
if ("nested-view" == parent):
clazz = 'hudson.plugins.nested_view.NestedView'
elif ("list-view" == parent):
clazz = 'hudson.model.ListView'
owner_attrs = dict()
owner_attrs['class'] = clazz
owner_attrs['reference'] = '../../..'
XML.SubElement(view, 'owner', attrib=owner_attrs)
logger.debug("Read parent '{0}' to '{1}'".format(parent, data.get('name')))
executors = data.get('filter-executors', False)
XML.SubElement(view, 'filterExecutors').text = str(executors).lower()
queue = data.get('filter-queue', False)
XML.SubElement(view, 'filterQueue').text = str(queue).lower()
defaultView = data.get('default-view', None)
XML.SubElement(view, 'defaultView').text = defaultView
XML.SubElement(view, 'views')
c_xml = None
if ("nested-view" == parent):
c_xml = XML.SubElement(view, 'columns')
c_xml = XML.SubElement(c_xml, 'columns')
else:
c_xml = XML.SubElement(view, 'columns')
columns = data.get('columns', DEFAULT_COLUMNS)
for column in columns:
if isinstance(column, dict):
if 'extra-build-parameter' in column:
p_name = column['extra-build-parameter']
x = XML.SubElement(
c_xml,
'jenkins.plugins.extracolumns.BuildParametersColumn',
plugin='extra-columns'
)
x.append(XML.fromstring(
'<singlePara>true</singlePara>'))
x.append(XML.fromstring(
'<parameterName>%s</parameterName>' % p_name))
else:
if column in COLUMN_DICT:
if isinstance(COLUMN_DICT[column], list):
x = XML.SubElement(c_xml, COLUMN_DICT[column][0][0],
**COLUMN_DICT[column][0][1])
for tag in COLUMN_DICT[column][1:]:
x.append(XML.fromstring(tag))
else:
XML.SubElement(c_xml, COLUMN_DICT[column])
def sublist_view(parser, xml_parent, data):
COLUMN_DICT = {
'status': 'hudson.views.StatusColumn',
'weather': 'hudson.views.WeatherColumn',
'job': 'hudson.views.JobColumn',
'last-success': 'hudson.views.LastSuccessColumn',
'last-failure': 'hudson.views.LastFailureColumn',
'last-duration': 'hudson.views.LastDurationColumn',
'build-button': 'hudson.views.BuildButtonColumn',
'last-stable': 'hudson.views.LastStableColumn',
'robot-list': 'hudson.plugins.robot.view.RobotListViewColumn',
'find-bugs': 'hudson.plugins.findbugs.FindBugsColumn',
'jacoco': 'hudson.plugins.jacococoveragecolumn.JaCoCoColumn',
'git-branch': 'hudson.plugins.git.GitBranchSpecifierColumn',
'schedule-build':
'org.jenkinsci.plugins.schedulebuild.ScheduleBuildButtonColumn',
'priority-sorter': 'jenkins.advancedqueue.PrioritySorterJobColumn',
'build-filter': 'hudson.views.BuildFilterColumn',
'desc': 'jenkins.branch.DescriptionColumn',
'policy-violations':
'com.sonatype.insight.ci.hudson.QualityColumn '
'plugin="sonatype-clm-ci"',
'member-graph-view':
'com.barchart.jenkins.cascade.GraphViewColumn '
'plugin="maven-release-cascade"',
'extra-tests-total': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>2</testResultFormat>'],
'extra-tests-failed': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>3</testResultFormat>'],
'extra-tests-passed': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>4</testResultFormat>'],
'extra-tests-skipped': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>5</testResultFormat>'],
'extra-tests-format-0': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>0</testResultFormat>'],
'extra-tests-format-1': [
['jenkins.plugins.extracolumns.TestResultColumn',
{'plugin': 'extra-columns'}],
'<testResultFormat>1</testResultFormat>'],
'extra-build-description': [
['jenkins.plugins.extracolumns.BuildDescriptionColumn',
{'plugin': 'extra-columns'}],
'<columnWidth>3</columnWidth>', '<forceWidth>false</forceWidth>'],
'extra-build-parameters': [
['jenkins.plugins.extracolumns.BuildParametersColumn',
{'plugin': 'extra-columns'}],
'<singlePara>false</singlePara>', '<parameterName/>'],
'extra-last-user-name':
'jenkins.plugins.extracolumns.UserNameColumn'
' plugin="extra-columns"',
'extra-last-output':
'jenkins.plugins.extracolumns.LastBuildConsoleColumn'
' plugin="extra-columns"',
'extra-workspace-link':
'jenkins.plugins.extracolumns.WorkspaceColumn '
'plugin="extra-columns"',
'extra-configure-button':
'jenkins.plugins.extracolumns.ConfigureProjectColumn'
' plugin="extra-columns"',
}
DEFAULT_COLUMNS = ['status', 'weather', 'job', 'last-success',
'last-failure', 'last-duration', 'build-button']
#view = XML.Element('hudson.model.ListView')
view = XML.SubElement(xml_parent, 'hudson.model.ListView')
mapping = [
('name', 'name', None),
('description', 'description', ''),
('filter-executors', 'filterExecutors', False),
('filter-queue', 'filterQueue', False),
]
helpers.convert_mapping_to_xml(view, data, mapping, fail_required=True)
parent = data.get('parent', False)
if parent:
clazz = 'com.cloudbees.hudson.plugins.folder.Folder'
if ("nested-view" == parent):
clazz = 'hudson.plugins.nested_view.NestedView'
elif ("list-view" == parent):
clazz = 'hudson.model.ListView'
owner_attrs = dict()
owner_attrs['class'] = clazz
owner_attrs['reference'] = '../../..'
XML.SubElement(view, 'owner', attrib=owner_attrs)
logger.debug("Read parent '{0}' to '{1}'".format(parent, data.get('name')))
XML.SubElement(view, 'properties',
{'class': 'hudson.model.View$PropertyList'})
jn_xml = XML.SubElement(view, 'jobNames')
jobnames = data.get('job-name', None)
XML.SubElement(
jn_xml,
'comparator', {
'class': 'hudson.util.CaseInsensitiveComparator'
}
)
if jobnames is not None:
# Job names must be sorted in the xml
jobnames = sorted(jobnames, key=str.lower)
for jobname in jobnames:
XML.SubElement(jn_xml, 'string').text = str(jobname)
job_filter_xml = XML.SubElement(view, 'jobFilters')
jobfilters = data.get('job-filters', [])
for jobfilter in jobfilters:
if jobfilter == 'most-recent':
mr_xml = XML.SubElement(job_filter_xml,
'hudson.views.MostRecentJobsFilter')
mr_xml.set('plugin', 'view-job-filters')
mr_data = jobfilters.get('most-recent')
mapping = [
('max-to-include', 'maxToInclude', '0'),
('check-start-time', 'checkStartTime', False),
]
helpers.convert_mapping_to_xml(mr_xml, mr_data, mapping,
fail_required=True)
if jobfilter == 'build-duration':
bd_xml = XML.SubElement(job_filter_xml,
'hudson.views.BuildDurationFilter')
bd_xml.set('plugin', 'view-job-filters')
bd_data = jobfilters.get('build-duration')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('build-duration-type', 'buildCountTypeString', 'Latest'),
('amount-type', 'amountTypeString', 'Hours'),
('amount', 'amount', '0'),
('less-than', 'lessThan', True),
('build-duration-minutes', 'buildDurationMinutes', '0'),
]
helpers.convert_mapping_to_xml(bd_xml, bd_data, mapping,
fail_required=True)
if jobfilter == 'build-trend':
bt_xml = XML.SubElement(job_filter_xml,
'hudson.views.BuildTrendFilter')
bt_xml.set('plugin', 'view-job-filters')
bt_data = jobfilters.get('build-trend')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('build-trend-type', 'buildCountTypeString', 'Latest'),
('amount-type', 'amountTypeString', 'Hours'),
('amount', 'amount', '0'),
('status', 'statusTypeString', 'Completed'),
]
helpers.convert_mapping_to_xml(bt_xml, bt_data, mapping,
fail_required=True)
if jobfilter == 'job-status':
js_xml = XML.SubElement(job_filter_xml,
'hudson.views.JobStatusFilter')
js_xml.set('plugin', 'view-job-filters')
js_data = jobfilters.get('job-status')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('unstable', 'unstable', False),
('failed', 'failed', False),
('aborted', 'aborted', False),
('disabled', 'disabled', False),
('stable', 'stable', False),
]
helpers.convert_mapping_to_xml(js_xml, js_data, mapping,
fail_required=True)
if jobfilter == 'upstream-downstream':
ud_xml = XML.SubElement(
job_filter_xml,
'hudson.views.UpstreamDownstreamJobsFilter'
)
ud_xml.set('plugin', 'view-job-filters')
ud_data = jobfilters.get('upstream-downstream')
mapping = [
('include-upstream', 'includeUpstream',
False),
('include-downstream', 'includeDownstream', False),
('recursive', 'recursive', False),
('exclude-originals', 'excludeOriginals', False),
]
helpers.convert_mapping_to_xml(ud_xml, ud_data, mapping,
fail_required=True)
if jobfilter == 'fallback':
fb_xml = XML.SubElement(
job_filter_xml,
'hudson.views.AddRemoveFallbackFilter'
)
fb_xml.set('plugin', 'view-job-filters')
fb_data = jobfilters.get('fallback')
mapping = [
('fallback-type', 'fallbackTypeString',
'REMOVE_ALL_IF_ALL_INCLUDED'),
('fallback-type', 'fallbackType',
'REMOVE_ALL_IF_ALL_INCLUDED'),
]
helpers.convert_mapping_to_xml(fb_xml, fb_data, mapping,
fail_required=True)
if jobfilter == 'build-status':
bs_xml = XML.SubElement(job_filter_xml,
'hudson.views.BuildStatusFilter')
bs_xml.set('plugin', 'view-job-filters')
bs_data = jobfilters.get('build-status')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('never-built', 'neverBuilt', False),
('building', 'building', False),
('in-build-queue', 'inBuildQueue', False),
]
helpers.convert_mapping_to_xml(bs_xml, bs_data, mapping,
fail_required=True)
if jobfilter == 'user-relevence':
ur_xml = XML.SubElement(job_filter_xml,
'hudson.views.UserRelevanceFilter')
ur_xml.set('plugin', 'view-job-filters')
ur_data = jobfilters.get('user-relevence')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('build-count', 'buildCountTypeString', 'AtLeastOne'),
('amount-type', 'amountTypeString', 'Hours'),
('amount', 'amount', '0'),
('match-user-id', 'matchUserId', False),
('match-user-fullname', 'matchUserFullName', False),
('ignore-case', 'ignoreCase', False),
('ignore-whitespace', 'ignoreWhitespace', False),
('ignore-non-alphaNumeric', 'ignoreNonAlphaNumeric',
False),
('match-builder', 'matchBuilder', False),
('match-email', 'matchEmail', False),
('match-scm-changes', 'matchScmChanges', False),
]
helpers.convert_mapping_to_xml(ur_xml, ur_data, mapping,
fail_required=True)
if jobfilter == 'regex-job':
rj_xml = XML.SubElement(job_filter_xml,
'hudson.views.RegExJobFilter')
rj_xml.set('plugin', 'view-job-filters')
rj_data = jobfilters.get('regex-job')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('regex-name', 'valueTypeString', ''),
('regex', 'regex', ''),
]
helpers.convert_mapping_to_xml(rj_xml, rj_data, mapping,
fail_required=True)
if jobfilter == 'job-type':
jt_xml = XML.SubElement(job_filter_xml,
'hudson.views.JobTypeFilter')
jt_xml.set('plugin', 'view-job-filters')
jt_data = jobfilters.get('job-type')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('job-type', 'jobType', 'hudson.model.FreeStyleProject'),
]
helpers.convert_mapping_to_xml(jt_xml, jt_data, mapping,
fail_required=True)
if jobfilter == 'parameter':
pr_xml = XML.SubElement(job_filter_xml,
'hudson.views.ParameterFilter')
pr_xml.set('plugin', 'view-job-filters')
pr_data = jobfilters.get('parameter')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('name', 'nameRegex', ''),
('value', 'valueRegex', ''),
('description', 'descriptionRegex', ''),
('use-default', 'useDefaultValue', False),
('match-builds-in-progress', 'matchBuildsInProgress',
False),
('match-all-builds', 'matchAllBuilds', False),
('max-builds-to-match', 'maxBuildsToMatch', 0),
]
helpers.convert_mapping_to_xml(pr_xml, pr_data, mapping,
fail_required=True)
if jobfilter == 'other-views':
ov_xml = XML.SubElement(job_filter_xml,
'hudson.views.OtherViewsFilter')
ov_xml.set('plugin', 'view-job-filters')
ov_data = jobfilters.get('other-views')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('view-name', 'otherViewName',
'<select a view other than this one>'),
]
helpers.convert_mapping_to_xml(ov_xml, ov_data, mapping,
fail_required=True)
if jobfilter == 'scm':
st_xml = XML.SubElement(job_filter_xml,
'hudson.views.ScmTypeFilter')
st_xml.set('plugin', 'view-job-filters')
st_data = jobfilters.get('scm')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('scm-type', 'scmType', 'hudson.scm.NullSCM'),
]
helpers.convert_mapping_to_xml(st_xml, st_data, mapping,
fail_required=True)
if jobfilter == 'secured-job':
sj_xml = XML.SubElement(job_filter_xml,
'hudson.views.SecuredJobsFilter')
sj_xml.set('plugin', 'view-job-filters')
sj_data = jobfilters.get('secured-job')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
]
helpers.convert_mapping_to_xml(sj_xml, sj_data, mapping,
fail_required=True)
if jobfilter == 'user-permissions':
up_xml = XML.SubElement(job_filter_xml,
'hudson.views.SecurityFilter')
up_xml.set('plugin', 'view-job-filters')
up_data = jobfilters.get('user-permissions')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
('configure', 'configure', False),
('build', 'build', False),
('workspace', 'workspace', False),
('permission-check', 'permissionCheckType',
'MustMatchAll'),
]
helpers.convert_mapping_to_xml(up_xml, up_data, mapping,
fail_required=True)
if jobfilter == 'unclassified':
uc_xml = XML.SubElement(job_filter_xml,
'hudson.views.UnclassifiedJobsFilter')
uc_xml.set('plugin', 'view-job-filters')
uc_data = jobfilters.get('unclassified')
mapping = [
('match-type', 'includeExcludeTypeString',
'includeMatched'),
]
helpers.convert_mapping_to_xml(uc_xml, uc_data, mapping,
fail_required=True)
c_xml = None
if ("nested-view" == parent):
c_xml = XML.SubElement(view, 'columns')
c_xml = XML.SubElement(c_xml, 'columns')
else:
c_xml = XML.SubElement(view, 'columns')
columns = data.get('columns', DEFAULT_COLUMNS)
for column in columns:
if isinstance(column, dict):
if 'extra-build-parameter' in column:
p_name = column['extra-build-parameter']
x = XML.SubElement(
c_xml,
'jenkins.plugins.extracolumns.BuildParametersColumn',
plugin='extra-columns'
)
x.append(XML.fromstring(
'<singlePara>true</singlePara>'))
x.append(XML.fromstring(
'<parameterName>%s</parameterName>' % p_name))
else:
if column in COLUMN_DICT:
if isinstance(COLUMN_DICT[column], list):
x = XML.SubElement(c_xml, COLUMN_DICT[column][0][0],
**COLUMN_DICT[column][0][1])
for tag in COLUMN_DICT[column][1:]:
x.append(XML.fromstring(tag))
else:
XML.SubElement(c_xml, COLUMN_DICT[column])
mapping = [
('regex', 'includeRegex', None),
('recurse', 'recurse', False),
('status-filter', 'statusFilter', None),
]
helpers.convert_mapping_to_xml(
view, data, mapping, fail_required=False)
class NestedRoot(jenkins_jobs.modules.base.Base):
sequence = 0
def root_xml(self, data):
COLUMN_DICT = {
'status': 'hudson.views.StatusColumn',
'weather': 'hudson.views.WeatherColumn',
}
DEFAULT_COLUMNS = ['status', 'weather']
root = XML.Element('hudson.plugins.nested_view.NestedView',
{'plugin': 'nested-view'})
XML.SubElement(root, 'name').text = data.get('name')
XML.SubElement(root, 'description').text = data.get('description', None)
executors = data.get('filter-executors', False)
XML.SubElement(root, 'filterExecutors').text = str(executors).lower()
queue = data.get('filter-queue', False)
XML.SubElement(root, 'filterQueue').text = str(queue).lower()
defaultView = data.get('default-view', None)
XML.SubElement(root, 'defaultView').text = defaultView
XML.SubElement(root, 'views')
c_xml = c_xml = XML.SubElement(root, 'columns')
c_xml = XML.SubElement(c_xml, 'columns')
columns = data.get('columns', DEFAULT_COLUMNS)
for column in columns:
if isinstance(column, dict):
if 'extra-build-parameter' in column:
p_name = column['extra-build-parameter']
x = XML.SubElement(
c_xml,
'jenkins.plugins.extracolumns.BuildParametersColumn',
plugin='extra-columns'
)
x.append(XML.fromstring(
'<singlePara>true</singlePara>'))
x.append(XML.fromstring(
'<parameterName>%s</parameterName>' % p_name))
else:
if column in COLUMN_DICT:
if isinstance(COLUMN_DICT[column], list):
x = XML.SubElement(c_xml, COLUMN_DICT[column][0][0],
**COLUMN_DICT[column][0][1])
for tag in COLUMN_DICT[column][1:]:
x.append(XML.fromstring(tag))
else:
XML.SubElement(c_xml, COLUMN_DICT[column])
return root
class Views(jenkins_jobs.modules.base.Base):
sequence = 20
component_type = 'view'
component_list_type = 'views'
def gen_xml(self, xml_parent, data):
views = XML.SubElement(xml_parent, 'views')
for view in data.get('views', []):
if isinstance(view, dict):
template_name, view_data = next(iter(view.items()))
if not isinstance(view_data, dict):
view_data = {}
else:
continue
view_data = self.registry.parser._applyDefaults(view_data)
views_data = []
template = self.registry.parser._getViewTemplate(template_name)
if template:
d = type(view_data)(view_data)
d['views'] = []
views_data = self.registry.parser._expandYamlForTemplateView(d,
template)
else:
view_data['name'] = self.registry.parser._getfullname(view_data)
logger.debug("Expanding view '{0}'".format(view_data['name']))
self.registry.parser._formatDescription(view_data)
#self.registry.parser.views.append(view)
views_data.append(view_data)
for v_data in views_data:
dict_view = {}
dict_view[v_data.get('view-type', template_name)] = v_data
logger.debug("reading view node on '{0}' - '{1}'".format(
v_data.get('view-type', template_name), dict_view))
self.registry.dispatch('view', views, dict_view)
| 41.829585 | 80 | 0.599586 | 5,250 | 48,355 | 5.42419 | 0.112571 | 0.05752 | 0.043579 | 0.014538 | 0.705517 | 0.654809 | 0.605998 | 0.582646 | 0.524985 | 0.524985 | 0 | 0.001646 | 0.283735 | 48,355 | 1,155 | 81 | 41.865801 | 0.820557 | 0.227588 | 0 | 0.48374 | 0 | 0 | 0.290456 | 0.14007 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01084 | false | 0.001355 | 0.00542 | 0 | 0.025745 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ceef45fd00a4fefffef3511012a9cb07586edc9 | 992 | py | Python | 1029.Two-City-Scheduling.py | mickey0524/leetcode | 6bedeb6ff29b02a97178cca464c5fd639951801f | [
"MIT"
] | 18 | 2018-07-14T12:45:37.000Z | 2022-03-26T14:51:04.000Z | 1029.Two-City-Scheduling.py | mickey0524/leetcode | 6bedeb6ff29b02a97178cca464c5fd639951801f | [
"MIT"
] | null | null | null | 1029.Two-City-Scheduling.py | mickey0524/leetcode | 6bedeb6ff29b02a97178cca464c5fd639951801f | [
"MIT"
] | 3 | 2019-05-29T04:09:22.000Z | 2021-06-07T23:37:46.000Z | # https://leetcode.com/problems/two-city-scheduling/
# Easy (51.19%)
# Total Accepted: 3,913
# Total Submissions: 7,644
import math
class Solution(object):
def twoCitySchedCost(self, costs):
"""
:type costs: List[List[int]]
:rtype: int
"""
def comp(c1, c2):
return int(math.fabs(c2[0] - c2[1]) - math.fabs(c1[0] - c1[1]))
costs.sort(cmp=comp)
length = len(costs)
threshold = length / 2
A, B, res = 0, 0, 0
for i in xrange(length):
if costs[i][0] <= costs[i][1]:
if A < threshold:
res += costs[i][0]
A += 1
else:
res += costs[i][1]
B += 1
else:
if B < threshold:
res += costs[i][1]
B += 1
else:
res += costs[i][0]
A += 1
return res
| 24.8 | 75 | 0.403226 | 115 | 992 | 3.478261 | 0.452174 | 0.09 | 0.09 | 0.09 | 0.15 | 0.14 | 0.08 | 0 | 0 | 0 | 0 | 0.067669 | 0.46371 | 992 | 39 | 76 | 25.435897 | 0.684211 | 0.157258 | 0 | 0.44 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.04 | 0.04 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cef0b375dc416be2c958051d5eef16f02884782 | 1,238 | py | Python | restler/checkers/checker_log.py | mkleshchenok/restler-fuzzer | 1bd7bc68a6c4de997e9fda9a9db5ffb0504b864c | [
"MIT"
] | 1,539 | 2020-11-16T19:20:55.000Z | 2022-03-30T16:36:49.000Z | restler/checkers/checker_log.py | mkleshchenok/restler-fuzzer | 1bd7bc68a6c4de997e9fda9a9db5ffb0504b864c | [
"MIT"
] | 282 | 2020-11-17T04:53:38.000Z | 2022-03-31T13:16:25.000Z | restler/checkers/checker_log.py | mkleshchenok/restler-fuzzer | 1bd7bc68a6c4de997e9fda9a9db5ffb0504b864c | [
"MIT"
] | 171 | 2020-11-16T21:55:59.000Z | 2022-03-28T12:56:26.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import print_function
import os
import threading
import utils.logger as logger
class CheckerLog(object):
def __init__(self, checker_name):
""" Creates a log file of a specified name
@param checker_name: The name of the checker that this log is for
@type checker_name: Str
"""
self._checker_name = checker_name
thread_id = threading.current_thread().ident
self._log_path = os.path.join(logger.LOGS_DIR, f'{self._checker_name}.{thread_id!s}.txt')
if not os.path.exists(self._log_path):
try:
os.makedirs(os.path.dirname(self._log_path))
except OSError:
return None
def checker_print(self, msg, print_to_network_log=True):
""" Prints message to the checker log file
@param msg: The message to print.
@type msg: Str
"""
msg = logger.remove_tokens_from_logs(msg)
with open(self._log_path, "a+", encoding='utf-8') as log_file:
print(msg, file=log_file)
if print_to_network_log:
logger.raw_network_logging(self._checker_name + ' ' + msg)
| 30.95 | 97 | 0.641357 | 168 | 1,238 | 4.458333 | 0.452381 | 0.102804 | 0.080107 | 0.050734 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001104 | 0.268174 | 1,238 | 39 | 98 | 31.74359 | 0.825607 | 0.234249 | 0 | 0 | 0 | 0 | 0.052154 | 0.043084 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.4 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cf28d611ee9dcd8d53d1a651cec5d7de16772e8 | 907 | py | Python | menpofit/lucaskanade/appearance/base.py | trigeorgis/menpofit | 742f4d1aeeb822a615d88ac499df40009b05795f | [
"BSD-3-Clause"
] | 1 | 2015-07-26T18:33:56.000Z | 2015-07-26T18:33:56.000Z | menpofit/lucaskanade/appearance/base.py | ersisimou/menpofit | 55ec53205ba31fd42ca054b2ce07590490decb8c | [
"BSD-3-Clause"
] | null | null | null | menpofit/lucaskanade/appearance/base.py | ersisimou/menpofit | 55ec53205ba31fd42ca054b2ce07590490decb8c | [
"BSD-3-Clause"
] | null | null | null | from menpofit.lucaskanade.residual import SSD
from menpofit.lucaskanade.base import LucasKanade
class AppearanceLucasKanade(LucasKanade):
def __init__(self, model, transform, eps=10**-6):
# Note that the only supported residual for Appearance LK is SSD.
# This is because, in general, we don't know how to take the appropriate
# derivatives for arbitrary residuals with (for instance) a project out
# AAM.
# See https://github.com/menpo/menpo/issues/130 for details.
super(AppearanceLucasKanade, self).__init__(SSD(),
transform, eps=eps)
# in appearance alignment, target image is aligned to appearance model
self.appearance_model = model
# by default, template is assigned to mean appearance
self.template = model.mean()
# pre-compute
self._set_up()
| 41.227273 | 80 | 0.654906 | 108 | 907 | 5.398148 | 0.638889 | 0.041166 | 0.078902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009105 | 0.273429 | 907 | 21 | 81 | 43.190476 | 0.875569 | 0.442117 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cf4283b40f2711fae51f7a72680c8eec927755d | 7,672 | py | Python | scrapy_httpcache/policy/rfc2616.py | nyov/scrapy-httpcache | dda4d7b3dda0f0df945d02235740c705bccc3d8a | [
"BSD-3-Clause"
] | 1 | 2021-07-26T17:49:59.000Z | 2021-07-26T17:49:59.000Z | scrapy_httpcache/policy/rfc2616.py | nyov/scrapy-httpcache | dda4d7b3dda0f0df945d02235740c705bccc3d8a | [
"BSD-3-Clause"
] | null | null | null | scrapy_httpcache/policy/rfc2616.py | nyov/scrapy-httpcache | dda4d7b3dda0f0df945d02235740c705bccc3d8a | [
"BSD-3-Clause"
] | null | null | null | from time import time
from weakref import WeakKeyDictionary
from scrapy.http import Response
from scrapy.utils.httpobj import urlparse_cached
from .base import CachePolicy, parse_cachecontrol, rfc1123_to_epoch
class RFC2616Policy(CachePolicy):
""" Cache Policy following RFC 2616, implementing browser-like
caching behavior.
"""
MAXAGE = 3600 * 24 * 365 # one year
def __init__(self, settings):
super(RFC2616Policy, self).__init__(settings)
self._cc_parsed = WeakKeyDictionary()
def _parse_cachecontrol(self, r):
if r not in self._cc_parsed:
cch = r.headers.get(b'Cache-Control', b'')
parsed = parse_cachecontrol(cch)
if isinstance(r, Response):
for key in self.ignore_response_cache_controls:
parsed.pop(key, None)
self._cc_parsed[r] = parsed
return self._cc_parsed[r]
def should_cache_request(self, request):
if urlparse_cached(request).scheme in self.ignore_schemes:
return False
cc = self._parse_cachecontrol(request)
# obey user-agent directive "Cache-Control: no-store"
if b'no-store' in cc:
return False
# Any other is eligible for caching
return True
def should_cache_response(self, response, request):
# Respect any locally ignored HTTP codes (like DummyPolicy)
# TODO: add testcase and enable this
#if response.status in self.ignore_http_codes:
# return False
# What is cacheable - https://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec14.9.1
# Response cacheability - https://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.4
# Status code 206 is not included because cache can not deal with partial contents
cc = self._parse_cachecontrol(response)
# obey directive "Cache-Control: no-store"
if b'no-store' in cc:
return False
# Never cache 304 (Not Modified) responses
elif response.status == 304:
return False
# Cache unconditionally if configured to do so
elif self.always_store:
return True
# Any hint on response expiration is good
elif b'max-age' in cc or b'Expires' in response.headers:
return True
# Firefox fallbacks this statuses to one year expiration if none is set
elif response.status in (300, 301, 308):
return True
# Other statuses without expiration requires at least one validator
elif response.status in (200, 203, 401):
return b'Last-Modified' in response.headers or b'ETag' in response.headers
# Any other is probably not eligible for caching
# Makes no sense to cache responses that does not contain expiration
# info and can not be revalidated
else:
return False
def is_cached_response_fresh(self, cachedresponse, request):
cc = self._parse_cachecontrol(cachedresponse)
ccreq = self._parse_cachecontrol(request)
if b'no-cache' in cc or b'no-cache' in ccreq:
return False
now = time()
freshnesslifetime = self._compute_freshness_lifetime(cachedresponse, request, now)
currentage = self._compute_current_age(cachedresponse, request, now)
reqmaxage = self._get_max_age(ccreq)
if reqmaxage is not None:
freshnesslifetime = min(freshnesslifetime, reqmaxage)
if currentage < freshnesslifetime:
return True
if b'max-stale' in ccreq and b'must-revalidate' not in cc:
# From RFC2616: "Indicates that the client is willing to
# accept a response that has exceeded its expiration time.
# If max-stale is assigned a value, then the client is
# willing to accept a response that has exceeded its
# expiration time by no more than the specified number of
# seconds. If no value is assigned to max-stale, then the
# client is willing to accept a stale response of any age."
staleage = ccreq[b'max-stale']
if staleage is None:
return True
try:
if currentage < freshnesslifetime + max(0, int(staleage)):
return True
except ValueError:
pass
# Cached response is stale, try to set validators if any
self._set_conditional_validators(request, cachedresponse)
return False
def is_cached_response_valid(self, cachedresponse, response, request):
# Use the cached response if the new response is a server error,
# as long as the old response didn't specify must-revalidate.
if response.status >= 500:
cc = self._parse_cachecontrol(cachedresponse)
if b'must-revalidate' not in cc:
return True
# Use the cached response if the server says it hasn't changed.
return response.status == 304
def _set_conditional_validators(self, request, cachedresponse):
if b'Last-Modified' in cachedresponse.headers:
request.headers[b'If-Modified-Since'] = cachedresponse.headers[b'Last-Modified']
if b'ETag' in cachedresponse.headers:
request.headers[b'If-None-Match'] = cachedresponse.headers[b'ETag']
def _get_max_age(self, cc):
try:
return max(0, int(cc[b'max-age']))
except (KeyError, ValueError):
return None
def _compute_freshness_lifetime(self, response, request, now):
# Reference nsHttpResponseHead::ComputeFreshnessLifetime
# https://dxr.mozilla.org/mozilla-central/source/netwerk/protocol/http/nsHttpResponseHead.cpp#706
cc = self._parse_cachecontrol(response)
maxage = self._get_max_age(cc)
if maxage is not None:
return maxage
# Parse date header or synthesize it if none exists
date = rfc1123_to_epoch(response.headers.get(b'Date')) or now
# Try HTTP/1.0 Expires header
if b'Expires' in response.headers:
expires = rfc1123_to_epoch(response.headers[b'Expires'])
# When parsing Expires header fails RFC 2616 section 14.21 says we
# should treat this as an expiration time in the past.
return max(0, expires - date) if expires else 0
# Fallback to heuristic using last-modified header
# This is not in RFC but on Firefox caching implementation
lastmodified = rfc1123_to_epoch(response.headers.get(b'Last-Modified'))
if lastmodified and lastmodified <= date:
return (date - lastmodified) / 10
# This request can be cached indefinitely
if response.status in (300, 301, 308):
return self.MAXAGE
# Insufficient information to compute fresshness lifetime
return 0
def _compute_current_age(self, response, request, now):
# Reference nsHttpResponseHead::ComputeCurrentAge
# https://dxr.mozilla.org/mozilla-central/source/netwerk/protocol/http/nsHttpResponseHead.cpp#658
currentage = 0
# If Date header is not set we assume it is a fast connection, and
# clock is in sync with the server
date = rfc1123_to_epoch(response.headers.get(b'Date')) or now
if now > date:
currentage = now - date
if b'Age' in response.headers:
try:
age = int(response.headers[b'Age'])
currentage = max(currentage, age)
except ValueError:
pass
return currentage
| 41.247312 | 105 | 0.643639 | 954 | 7,672 | 5.083857 | 0.275681 | 0.030928 | 0.025979 | 0.023711 | 0.258763 | 0.214433 | 0.162474 | 0.126392 | 0.119175 | 0.100619 | 0 | 0.025571 | 0.286366 | 7,672 | 185 | 106 | 41.47027 | 0.860274 | 0.318431 | 0 | 0.275229 | 0 | 0 | 0.045683 | 0 | 0 | 0 | 0 | 0.005405 | 0 | 1 | 0.091743 | false | 0.018349 | 0.045872 | 0 | 0.394495 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cf42f6f443c43a7f39d1f54ae917b2beaa8ef3a | 4,608 | py | Python | stub-generator/matrix_analytics_stub_generator/kotlin.py | matrix-org/matrix-analytics-events | a0687ca6fbdb7258543d49b99fb88b9201e900b0 | [
"Apache-2.0"
] | 4 | 2022-01-14T17:39:29.000Z | 2022-02-23T20:46:48.000Z | stub-generator/matrix_analytics_stub_generator/kotlin.py | matrix-org/matrix-analytics-events | a0687ca6fbdb7258543d49b99fb88b9201e900b0 | [
"Apache-2.0"
] | 25 | 2021-11-25T22:47:58.000Z | 2022-02-24T14:27:43.000Z | stub-generator/matrix_analytics_stub_generator/kotlin.py | matrix-org/matrix-analytics-events | a0687ca6fbdb7258543d49b99fb88b9201e900b0 | [
"Apache-2.0"
] | 1 | 2021-11-24T09:50:20.000Z | 2021-11-24T09:50:20.000Z | from .schema import Schema, is_mobile_screen_event, first_letter_up, split_text
def compute_kotlin(schema: Schema) -> str:
"""Compute the output for Kotlin."""
is_screen = is_mobile_screen_event(schema.klass)
result = """/*
* Copyright (c) 2021 New Vector Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package im.vector.app.features.analytics.plan
"""
if is_screen:
itf = "VectorAnalyticsScreen"
elif schema.event_name:
itf = "VectorAnalyticsEvent"
else:
itf = ""
if itf:
result += f"import im.vector.app.features.analytics.itf.{itf}\n\n"
result += (
f"// GENERATED FILE, DO NOT EDIT. FOR MORE INFORMATION VISIT\n"
f"// https://github.com/matrix-org/matrix-analytics-events/\n\n"
f"/**\n"
f"{split_text(' * ', schema.data.get('description'))}\n"
f" */\n"
f"data class {schema.klass}(\n"
)
for member in schema.members:
if member.description:
result += f" /**\n"
result += f"{split_text(' * ', member.description)}\n"
result += f" */\n"
if member.required:
defaultValue = ""
else:
defaultValue = "? = null"
result += " "
if member.type == "string":
if member.enum:
result += f"val {member.name}: {first_letter_up(member.name)}"
else:
result += f"val {member.name}: String"
elif member.type == "number":
result += f"val {member.name}: Double"
elif member.type == "integer":
result += f"val {member.name}: Int"
elif member.type == "boolean":
result += f"val {member.name}: Boolean"
else:
raise Exception(f"Not handled yet: {member.type}")
result += f"{defaultValue},\n"
if itf:
result += f") : {itf} " + "{\n"
else:
result += ") {\n"
isFirstEnum = True
for enum in schema.enums:
result += "\n"
result += f" enum class {enum.name} " + "{\n"
enum.values.sort()
for value in enum.values:
if value.description:
if not isFirstEnum:
result += "\n"
result += f" /**\n"
result += f"{split_text(' * ', value.description)}\n"
result += f" */\n"
result += f" {value.name},\n"
isFirstEnum = False
result += " }\n"
if is_screen:
result += "\n"
result += " override fun getName() = screenName.name\n"
elif schema.event_name:
result += "\n"
result += f' override fun getName() = "{schema.event_name}"\n'
result += "\n"
if not schema.members:
result += " override fun getProperties(): Map<String, Any>? = null\n"
else:
if itf:
result += " override fun getProperties(): Map<String, Any>? {\n"
else:
result += " fun getProperties(): Map<String, Any>? {\n"
result += " return mutableMapOf<String, Any>().apply {\n"
for member in schema.members:
if member.name == "screenName" and is_screen:
continue
if member.required:
if member.enum:
result += f' put("{member.name}", {member.name}.name)\n'
else:
result += f' put("{member.name}", {member.name})\n'
else:
if member.enum:
result += ' %s?.let { put("%s", it.name) }\n' % (
member.name,
member.name,
)
else:
result += ' %s?.let { put("%s", it) }\n' % (
member.name,
member.name,
)
result += " }.takeIf { it.isNotEmpty() }\n"
result += " }\n"
result += "}\n"
return result
| 34.646617 | 87 | 0.501085 | 503 | 4,608 | 4.54672 | 0.304175 | 0.061216 | 0.031482 | 0.03498 | 0.25798 | 0.147355 | 0.112812 | 0.028859 | 0 | 0 | 0 | 0.00273 | 0.364149 | 4,608 | 132 | 88 | 34.909091 | 0.777816 | 0.00651 | 0 | 0.34188 | 0 | 0 | 0.433946 | 0.055774 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008547 | false | 0 | 0.017094 | 0 | 0.034188 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cf4a417a39cb86d2babede76f44ea6d7a3ec33e | 15,103 | py | Python | hokudai_furima/product/views.py | TetsuFe/hokuma | b981a52b3bf8d7268bf791c5827bbe8af90afef6 | [
"MIT"
] | 1 | 2021-02-13T03:51:42.000Z | 2021-02-13T03:51:42.000Z | hokudai_furima/product/views.py | TetsuFe/hokuma | b981a52b3bf8d7268bf791c5827bbe8af90afef6 | [
"MIT"
] | null | null | null | hokudai_furima/product/views.py | TetsuFe/hokuma | b981a52b3bf8d7268bf791c5827bbe8af90afef6 | [
"MIT"
] | 1 | 2021-09-18T09:25:48.000Z | 2021-09-18T09:25:48.000Z | from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from .models import Product, Category
from .forms import ProductForm, ProductImageForm
from django.contrib import messages
from django.conf import settings
from hokudai_furima.chat.models import Talk, Chat
from hokudai_furima.chat.forms import TalkForm
from django.http import HttpResponse
from functools import reduce
import os
from versatileimagefield.placeholder import OnDiscPlaceholderImage
from hokudai_furima.account.models import User
from hokudai_furima.notification.models import Notification
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.utils.datastructures import MultiValueDict
import re
from hokudai_furima.todo_list.models import ReportToRecieveTodo, RatingTodo
from rules.contrib.views import permission_required
from .emails import send_decided_buyer_email, send_rating_other_email, send_want_your_product_email, send_cancel_your_product_email
from hokudai_furima.core.decorators import site_rules_confirm_required
from hokudai_furima.core.utils import is_object_form_and_imageforms_valid
from .utils import get_public_product_list
def get_product_by_pk(request, pk):
return get_object_or_404(Product, pk=pk)
def get_product_by_pk_for_chat(request, product_pk, wanting_user_pk):
return get_object_or_404(Product, pk=product_pk)
def get_product_by_product_pk(request, product_pk):
return get_object_or_404(Product, pk=product_pk)
def product_list(request):
products = product.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'product/product_list.html', {'products': products})
@site_rules_confirm_required
@login_required
def create_product(request):
if request.method == "POST":
product_image_forms = []
for i, _file in enumerate(request.FILES.getlist('image')):
product_image_forms.append(ProductImageForm(i, request.POST, {'image':_file}))
product_form = ProductForm(request.POST)
if is_object_form_and_imageforms_valid(product_form, product_image_forms):
product = product_form.save(commit=False)
product.seller = request.user
product.save()
for product_image_form in product_image_forms:
product_image = product_image_form.save(commit=False)
product_image.product = product
product_image.save()
product.productimage_set.add(product_image)
product.save()
messages.success(request, '出品に成功しました')
response = redirect('product:product_details', pk=product.pk)
response['location'] += '?is_redirect_from_created_product=true'
return response
else:
product_form = ProductForm()
product_image_forms = [ProductImageForm(_i) for _i in range(4)]
return render(request, 'product/create_product.html', {'product_form': product_form, 'product_image_forms': product_image_forms})
def make_product_image_forms(request):
product_image_forms = []
for i, _file in enumerate(request.FILES.getlist('image')):
product_image_forms.append(ProductImageForm(i, request.POST, {'image':_file}))
return product_image_forms
def get_posted_product_images(request):
posted_images = request.FILES.getlist('image')
return posted_images
@site_rules_confirm_required
@login_required
def update_product(request, product_pk):
product = get_object_or_404(Product, pk=product_pk)
if product.is_sold:
return render(request, 'product/cant_update_sold_product.html', {'product_name': product.title, 'product_pk': product.pk})
product_seller_id = product.seller.id
if product_seller_id != request.user.id:
return HttpResponse('invalid request')
else:
if request.method == "POST":
product_form = ProductForm(request.POST, instance=product)
product_image_forms = make_product_image_forms(request)
if is_object_form_and_imageforms_valid(product_form, product_image_forms):
product = product_form.save(commit=False)
product.seller = request.user
product.save()
changed_image_flags = [request.POST['image_'+str(i)+'_exists'] for i in range(4)]
changed_flag_1_length = len([_ for _ in changed_image_flags if _ == '1'])
before_product_images = [bpi for bpi in product.productimage_set.all()]
posted_images = get_posted_product_images(request)
posted_image_index = 0
for image_form_index, flag in enumerate(changed_image_flags):
if flag == '1':
if posted_image_index < len(posted_images):
if image_form_index < len(before_product_images):
product_image = before_product_images[image_form_index]
product_image.image = product_image_forms[posted_image_index].save(commit=False).image
product_image.product = product
product_image.update()
posted_image_index += 1
else:
product_image = product_image_forms[posted_image_index].save(commit=False)
product_image.product = product
product_image.save()
product.productimage_set.add(product_image)
product.save()
posted_image_index += 1
elif flag == '2':
before_product_image = before_product_images[image_form_index]
before_product_image.delete()
messages.success(request, '商品情報を更新しました')
return redirect('product:product_details', pk=product.pk)
product_form = ProductForm(instance=product)
product_image_forms = []
product_images = product.productimage_set.all()
product_image_thumbnail_urls = [product_image.thumbnail_url for product_image in product_images]
for _i in range(4):
if _i < len(product_images):
product_image_forms.append(ProductImageForm(_i, instance=product_images[_i]))
else:
product_image_forms.append(ProductImageForm(_i))
return render(request, 'product/update_product.html', {'product_form': product_form, 'product_image_forms': product_image_forms, 'product':product, 'product_image_thumbnail_urls': product_image_thumbnail_urls, 'placeholder_image_number_list': range(len(product_image_thumbnail_urls), 4)})
@permission_required('products.can_access', fn=get_product_by_pk, raise_exception=True)
def product_details(request, pk):
product = get_object_or_404(Product, pk=pk)
if request.user.pk != product.seller.pk:
product.increment_watched_count()
wanting_users = product.wanting_users.all()
ogp_image = product.productimage_set.first()
if ogp_image:
ogp_image_url = ogp_image.thumbnail_url
else:
ogp_image_url = None
if request.user.is_authenticated:
if request.user == product.seller:
chatting_users = list(map(lambda x:x.product_wanting_user, Chat.objects.filter(product=product)))
chatting_but_not_wanting_users = [user for user in chatting_users if user not in wanting_users]
contexts = {'product': product, 'wanting_users': wanting_users, 'chatting_but_not_wanting_users': chatting_but_not_wanting_users, 'ogp_image_url': ogp_image_url}
print(request.GET)
is_redirect_from_created_product = request.GET.get('is_redirect_from_created_product')
if is_redirect_from_created_product:
contexts['is_redirect_from_created_product'] = is_redirect_from_created_product
print(contexts)
return render(request, 'product/product_details.html', contexts)
else:
return render(request, 'product/product_details.html', {'product': product, 'wanting_users': wanting_users, 'ogp_image_url': ogp_image_url})
return render(request, 'product/product_details.html', {'product': product, 'ogp_image_url': ogp_image_url})
@site_rules_confirm_required
@login_required
@permission_required('products.can_access', fn=get_product_by_pk, raise_exception=True)
def want_product(request, pk):
if request.method == 'POST':
wanting_user = request.user
product = get_object_or_404(Product, pk=pk)
product.wanting_users.add(wanting_user)
product.save()
relative_url = reverse('product:product_details', kwargs={'pk': product.pk})
notification = Notification(reciever=product.seller, message=wanting_user.username+'さんが「'+product.title+'」の購入を希望しました。', relative_url=relative_url)
notification.save()
send_want_your_product_email(pk, wanting_user.pk, product.seller.email)
messages.success(request, '購入希望が送信されました')
return redirect('product:product_details', pk=product.pk)
else:
return HttpResponse('can\'t accept GET request')
@site_rules_confirm_required
@login_required
@permission_required('products.can_access', fn=get_product_by_pk, raise_exception=True)
def cancel_want_product(request, pk):
product = get_object_or_404(Product, pk=pk)
product.wanting_users.remove(request.user)
messages.success(request, '購入希望をキャンセルしました')
send_cancel_your_product_email(pk, request.user.pk, product.seller.email)
return redirect('product:product_details', pk=product.pk)
@site_rules_confirm_required
@permission_required('products.can_access', fn=get_product_by_pk_for_chat, raise_exception=True)
@login_required
def product_direct_chat(request, product_pk, wanting_user_pk):
wanting_user = get_object_or_404(User, pk=wanting_user_pk)
product = get_object_or_404(Product, pk=product_pk)
if (request.user == wanting_user and request.user != product.seller) or (request.user == product.seller and request.user != wanting_user):
chat = Chat.objects.filter(product=product, product_wanting_user=wanting_user, product_seller=product.seller)
if chat.exists():
talks = chat[0].talk_set.all().order_by('created_date')
else:
# チャットルームがなけれは、新たにチャットルームを作る(ただし、保存はしない。トークの投稿があって初めて保存)
# 売り手以外の人は、自由にチャットルームを作れる
# 売り手からチャットルームを作成する場合、相手が購入希望をしている時だけ
if request.user != product.seller or wanting_user in product.wanting_users.all():
chat = Chat(product=product, product_wanting_user=wanting_user, product_seller=product.seller, created_date=timezone.now())
talks = []
else:
return HttpResponse('invalid request')
if request.user == product.seller:
talk_reciever_id = wanting_user.id
else:
talk_reciever_id = product.seller.id
talk_form = TalkForm()
return render(request, 'product/product_direct_chat.html', {'product': product, 'form': talk_form, 'talks':talks, 'wanting_user': wanting_user, 'chat': chat, 'talk_reciever_id': talk_reciever_id})
else:
return HttpResponse('invalid request')
@site_rules_confirm_required
@login_required
def decide_to_sell(request, product_pk, wanting_user_pk):
wanting_user = get_object_or_404(User, pk=wanting_user_pk)
product = get_object_or_404(Product, pk=product_pk)
if request.user == product.seller:
product.is_sold = True
product.sold_date = timezone.now()
product.buyer = wanting_user
product.update()
relative_url = reverse('product:product_direct_chat', kwargs={'product_pk': product.pk, 'wanting_user_pk': wanting_user.pk})
notification = Notification(reciever=wanting_user, message=request.user.username+'が「'+product.title+'」をあなたに販売することを確定しました。チャットで出品者と取引方法を確認し合ってください', relative_url=relative_url)
notification.save()
todo = ReportToRecieveTodo(user=product.buyer, relative_url=relative_url, product=product)
todo.set_template_message()
todo.save()
send_decided_buyer_email(product_pk, wanting_user_pk, wanting_user.email)
messages.success(request, '購入者を決定しました。チャットで購入者と話し合いの上、商品と料金の受け渡し方法を決定してください。このサイト上での決済はできませんのでご注意ください。')
return redirect('product:product_details', pk=product.pk)
else:
return HttpResponse('invalid request')
@site_rules_confirm_required
@permission_required('products.can_access', fn=get_product_by_product_pk, raise_exception=True)
@login_required
def complete_to_recieve(request, product_pk):
product = get_object_or_404(Product, pk=product_pk)
if request.user == product.buyer:
rating_relative_url = reverse('rating:post_rating', kwargs={'product_pk': product.pk})
notification = Notification(reciever=request.user, message=product.seller.username+'との間での「'+product.title+'」の受け渡しの完了を確認しました。最後に出品者を評価してください。', relative_url=rating_relative_url)
notification.save()
notification = Notification(reciever=product.seller, message=request.user.username+'との間での「'+product.title+'」の受け渡しの完了を確認しました。最後に購入者を評価してください。', relative_url=rating_relative_url)
notification.save()
report_to_recieve_todo = ReportToRecieveTodo.objects.get(user=product.buyer, product=product)
report_to_recieve_todo.done()
report_to_recieve_todo.update()
seller_rating_todo = RatingTodo(user=product.seller, relative_url=rating_relative_url, product=product)
seller_rating_todo.set_template_message()
seller_rating_todo.save()
buyer_rating_todo = RatingTodo(user=product.buyer, relative_url=rating_relative_url, product=product)
buyer_rating_todo.set_template_message()
buyer_rating_todo.save()
send_rating_other_email(product_pk, product.seller.username, product.buyer.email)
send_rating_other_email(product_pk, product.buyer.username, product.seller.email)
messages.success(request, '商品の受け取り処理が完了しました。最後に出品者を評価してください。')
return redirect('rating:post_rating', product_pk=product.pk)
else:
return HttpResponse('invalid request')
def category_details(request, pk):
category = get_object_or_404(Category, pk=pk)
category_parent_chain = [category]
temp_parent_category = category.parent
while(temp_parent_category):
category_parent_chain.append(temp_parent_category)
temp_parent_category = temp_parent_category.parent
category_parent_chain.reverse()
category_products = get_public_product_list(category.product_category_products.all().order_by('-id'))
child_categories = category.children.all()
return render(request, 'product/category_details.html', {'category': category, 'category_parent_chain': category_parent_chain, 'child_categories': child_categories, 'category_products': category_products})
| 51.19661 | 296 | 0.716877 | 1,821 | 15,103 | 5.606809 | 0.120813 | 0.050539 | 0.034966 | 0.019197 | 0.556709 | 0.421743 | 0.339471 | 0.273066 | 0.24094 | 0.222331 | 0 | 0.004432 | 0.193207 | 15,103 | 294 | 297 | 51.370748 | 0.833484 | 0.007548 | 0 | 0.339921 | 0 | 0.011858 | 0.102836 | 0.05699 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059289 | false | 0 | 0.094862 | 0.011858 | 0.256917 | 0.007905 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cf7c0d7c5e6384a67c09a07fd8d29914a976217 | 911 | py | Python | netbox/virtualization/forms/bulk_create.py | cybarox/netbox | ea197eff5f4fe925bb354d1375912decd81752bd | [
"Apache-2.0"
] | 4,994 | 2019-07-01T13:15:44.000Z | 2022-03-31T19:55:45.000Z | netbox/virtualization/forms/bulk_create.py | cybarox/netbox | ea197eff5f4fe925bb354d1375912decd81752bd | [
"Apache-2.0"
] | 4,045 | 2019-07-01T14:24:09.000Z | 2022-03-31T16:07:39.000Z | netbox/virtualization/forms/bulk_create.py | cybarox/netbox | ea197eff5f4fe925bb354d1375912decd81752bd | [
"Apache-2.0"
] | 1,225 | 2019-07-01T15:34:03.000Z | 2022-03-31T16:47:09.000Z | from django import forms
from utilities.forms import BootstrapMixin, ExpandableNameField, form_from_model
from virtualization.models import VMInterface, VirtualMachine
__all__ = (
'VMInterfaceBulkCreateForm',
)
class VirtualMachineBulkAddComponentForm(BootstrapMixin, forms.Form):
pk = forms.ModelMultipleChoiceField(
queryset=VirtualMachine.objects.all(),
widget=forms.MultipleHiddenInput()
)
name_pattern = ExpandableNameField(
label='Name'
)
def clean_tags(self):
# Because we're feeding TagField data (on the bulk edit form) to another TagField (on the model form), we
# must first convert the list of tags to a string.
return ','.join(self.cleaned_data.get('tags'))
class VMInterfaceBulkCreateForm(
form_from_model(VMInterface, ['enabled', 'mtu', 'description', 'tags']),
VirtualMachineBulkAddComponentForm
):
pass
| 29.387097 | 113 | 0.726674 | 93 | 911 | 7 | 0.612903 | 0.024578 | 0.039939 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.18551 | 911 | 30 | 114 | 30.366667 | 0.877358 | 0.16685 | 0 | 0 | 0 | 0 | 0.078042 | 0.033069 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0.047619 | 0.142857 | 0.047619 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cf9a05ce6bc3958bcc674a40c30311812b35c86 | 10,373 | py | Python | example/test/L8_myClass.py | Michael8968/skulpt | 15956a60398fac92ee1dab25bf661ffc003b2eaf | [
"MIT"
] | 2 | 2021-12-18T06:34:26.000Z | 2022-01-05T05:08:47.000Z | example/test/L8_myClass.py | Michael8968/skulpt | 15956a60398fac92ee1dab25bf661ffc003b2eaf | [
"MIT"
] | null | null | null | example/test/L8_myClass.py | Michael8968/skulpt | 15956a60398fac92ee1dab25bf661ffc003b2eaf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#! -*-conding:utf-8 -*-
#!@Time :2018/4/11 17:14
#!@Author :@liuweqia
#!@File :class.py
import pygame,random
from pygame.locals import *
from pygame import Rect
class Arrow(pygame.sprite.Sprite):
def __init__(self, x, y, property=0):
pygame.sprite.Sprite.__init__(self)
# self.target_surface = target
self.x = x
self.y = y
self.Img = []
if property:
ff = pygame.image.load('lesson8/img/fireArrow.png').convert_alpha()
self.Img.append(ff.subsurface(Rect(0,0,48,15)))
self.Img.append(ff.subsurface(Rect(0,0,48,15)))
self.Img.append(ff.subsurface(Rect(0,0,48,15)))
else:
ii = pygame.image.load('lesson8/img/iceArrow.png').convert_alpha()
self.Img.append(ii.subsurface(Rect(0,0,48,15)))
self.Img.append(ii.subsurface(Rect(0,0,48,15)))
self.Img.append(ii.subsurface(Rect(0,0,48,15)))
self.image = self.Img[0]
self.last_time = 0
self.property = property
self.frame = 0
self.old_frame = -1
self.rect = Rect(self.x,self.y,48,15)
self.rect.left = x
self.rect.top = y
#self.mask = pygame.mask.from_surface(self.image)
def update(self, current_time, rate = 300):
#self.x += 10
#self.rect = Rect(self.x,self.y,48,15)
self.rect.top = self.y
self.rect.left += 10
self.rect = Rect(self.rect.left,self.rect.top,48,15)
if current_time > self.last_time + rate:
self.frame += 1
if self.frame >= 3:
self.frame = 0
self.last_time = current_time
if self.frame != self.old_frame:
self.image = self.Img[self.frame]
#self.mask = pygame.mask.from_surface(self.image)
self.old_frame = self.frame
if self.rect.left > 1000:
self.kill()
class Fire(pygame.sprite.Sprite):
def __init__(self, x,y,huo,bing,property = 0):
pygame.sprite.Sprite.__init__(self)
# self.target_surface = target
self.x = x
self.y = y
self.img = []
self.last_time = 0
if property == 0:
for i in range(3):
self.img.append(huo[i])
else:
self.img.append(bing)
self.image = self.img[0]
self.rect = Rect(self.x,self.y, 30,26)
self.property = property
self.frame = 0
self.old_frame = -1
def update(self, current_time, rate = 300):
self.x -= 5
self.rect = Rect(self.x,self.y, 30,26)
if self.property == 1:
self.image = self.img[0]
else:
if current_time > self.last_time + rate:
self.frame += 1
if self.frame >= 3:
self.frame = 0
self.last_time = current_time
if self.frame != self.old_frame:
self.image = self.img[self.frame]
self.old_frame = self.frame
if self.rect.left <= -20:
self.kill()
class Rock(pygame.sprite.Sprite):
def __init__(self, x, y, width, height):
pygame.sprite.Sprite.__init__(self)
self.x = x
self.y = y
self.image = pygame.Surface((width,height))
self.rect = Rect(self.x,self.y,width,height)
#print(self.rect)
def update(self, current_time):
pass
class Long(pygame.sprite.Sprite):
def __init__(self, x, y,group):
pygame.sprite.Sprite.__init__(self)
# self.target_surface = target
self.x = x
self.y = y
self.group = group
self.hp = 100
self.huoimg = []
self.bingimg = []
ll1 = pygame.image.load('lesson8/img/long11.png').convert_alpha()
ll1 = pygame.transform.smoothscale(ll1, (200,210))
ll2 = pygame.image.load('lesson8/img/long22.png').convert_alpha()
ll2 = pygame.transform.smoothscale(ll2, (200,210))
ll3 = pygame.image.load('lesson8/img/long33.png').convert_alpha()
ll3 = pygame.transform.smoothscale(ll3, (200,210))
ll4 = pygame.image.load('lesson8/img/long44.png').convert_alpha()
ll4 = pygame.transform.smoothscale(ll4, (200, 210))
ll5 = pygame.image.load('lesson8/img/long55.png').convert_alpha()
ll5 = pygame.transform.smoothscale(ll5, (200, 210))
ll6 = pygame.image.load('lesson8/img/long66.png').convert_alpha()
ll6 = pygame.transform.smoothscale(ll6, (200, 210))
self.huoimg.append(ll1)
self.huoimg.append(ll2)
self.huoimg.append(ll1)
self.huoimg.append(ll2)
self.huoimg.append(ll1)
self.huoimg.append(ll3)
self.bingimg.append(ll4)
self.bingimg.append(ll5)
self.bingimg.append(ll4)
self.bingimg.append(ll5)
self.bingimg.append(ll4)
self.bingimg.append(ll6)
#self.img.append(ll2)
self.image = ll1
self.rect = Rect(self.x, self.y, 200, 210)
self.last_time = 0
self.frame = 0
self.old_frame = -1
self.ff = None
self.fireGroup = []
self.property = 0
self.change = 0
#创建龙喷的火与冰
self.huo = []
huo1 = pygame.image.load('lesson8/img/huo1.png').convert_alpha()
huo1 = pygame.transform.smoothscale(huo1, (30,26))
self.huo.append(huo1)
huo2 = pygame.image.load('lesson8/img/huo2.png').convert_alpha()
huo2 = pygame.transform.smoothscale(huo2, (30,26))
self.huo.append(huo2)
huo3 = pygame.image.load('lesson8/img/huo3.png').convert_alpha()
huo3 = pygame.transform.smoothscale(huo3, (30,26))
self.huo.append(huo3)
self.bing = pygame.image.load('lesson8/img/bing1.png').convert_alpha()
self.bing = pygame.transform.smoothscale(self.bing, (30,26))
def update(self, current_time, rate = 300):
global fireGroup
self.rect = Rect(self.x, self.y, 200, 210)
if current_time > self.last_time + rate:
self.change += 1
self.frame += 1
if self.frame == 5:
if self.property == 0:
self.ff = Fire(self.x, self.y+100,self.huo,self.bing)
else:
self.ff = Fire(self.x, self.y+100,self.huo,self.bing, 1)
#self.property = 1
#print(self.ff.rect)
self.ff.add(self.group)
self.fireGroup.append(self.ff)
if self.frame >= 6:
self.y = random.randint(0,420)
self.frame = 0
self.last_time = current_time
if self.change >= 20:
if self.property == 0:
self.property = 1
else:
self.property = 0
self.change = 0
if self.frame != self.old_frame:
if self.property == 0:
self.image = self.huoimg[self.frame]
else:
self.image = self.bingimg[self.frame]
self.old_frame = self.frame
class Hero(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.x = x
self.y = y
self.rect = Rect(self.x, self.y, 62,90)
self.last_time = 1
self.frame = 0
self.old_frame = -1
self.direction = 0
self.right = []
self.gravity = 3
self.jumping = False
self.air = False
self.jump_vel = -15
self.player_start_y = self.y
self.moveLeft = True
self.moveRight = True
self.moving = False
self.shoot = False
self.shootTime = 0
self.hp = 100
for i in range(4):
aa = pygame.image.load('lesson8/img/' + str(4-i)*2 + '.png').convert_alpha()
self.right.append(aa)
self.left = []
for i in range(4):
aa = pygame.image.load('lesson8/img/' + str(4-i)*2 + '.png').convert_alpha()
aa = pygame.transform.flip(aa,1,0)
self.left.append(aa)
self.shootImg = []
aa = pygame.image.load('lesson8/img/she.png').convert_alpha()
#aa = pygame.transform.smoothscale(aa, (79,90))
self.shootImg.append(aa)
self.shootImg.append(aa)
self.up = pygame.image.load('lesson8/img/11.png').convert_alpha()
self.stop = pygame.image.load('lesson8/img/stand.png').convert_alpha()
self.image = self.stop
def update(self, current_time, rate = 20):
RIGHT = 0
LEFT = 1
UP = 2
STOP = 3
self.x = self.rect.x
self.y = self.rect.y
if current_time > self.last_time + rate:
self.frame += 1
if self.frame >= 4:
self.frame = 0
#self.shoot = False
self.last_time = current_time
if self.frame != self.old_frame:
#print(self.shoot)
if self.shoot:
self.shootTime += 1
self.image = self.shootImg[self.frame%2]
if self.shootTime == 4:
self.shootTime = 0
self.shoot = False
elif self.direction == RIGHT:# and self.moveRight:
#self.image = self.right[self.frame]
#self.x += 5
if self.air:
self.image = self.up
elif not self.moving:
self.image = self.stop
elif not self.air:
self.image = self.right[self.frame]
#elif not self.moving:
# self.image = self.stop
elif self.direction == LEFT:# and self.moveLeft:
#self.image = self.left[self.frame]
#self.x -= 5
if self.air:
self.image = pygame.transform.flip(self.up ,1 ,0)
elif not self.moving:
self.image = pygame.transform.flip(self.stop, 1, 0)
elif not self.air:
self.image = self.left[self.frame]
elif self.direction == UP:
self.image = self.up
elif self.direction == STOP:
#self.jump = 0
self.image = self.stop
self.old_frame = self.frame
| 34.576667 | 88 | 0.535139 | 1,328 | 10,373 | 4.103163 | 0.112952 | 0.056157 | 0.045329 | 0.068636 | 0.620297 | 0.504496 | 0.432189 | 0.404478 | 0.346853 | 0.287759 | 0 | 0.048277 | 0.337029 | 10,373 | 299 | 89 | 34.692308 | 0.744074 | 0.066422 | 0 | 0.485477 | 0 | 0 | 0.036458 | 0.023097 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041494 | false | 0.004149 | 0.012448 | 0 | 0.074689 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cf9c6386316374c888d01ac85367c3d97ca4259 | 324 | py | Python | app/utils/uuid.py | kanson1996/IIMS | 4612e3f4ce7b3f6c49a46e26112aad8254bc0592 | [
"MIT"
] | null | null | null | app/utils/uuid.py | kanson1996/IIMS | 4612e3f4ce7b3f6c49a46e26112aad8254bc0592 | [
"MIT"
] | null | null | null | app/utils/uuid.py | kanson1996/IIMS | 4612e3f4ce7b3f6c49a46e26112aad8254bc0592 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created by Kanson on 2020/1/12 16:03.
"""
import random
import datetime
def tid_maker():
a = int (datetime.datetime.now ().timestamp ())
b = int (''.join ([str (random.randint (0, 9)) for i in range (3)]))
a = str (a)
b = str (b)
c = a + b
return c
| 18 | 72 | 0.552469 | 52 | 324 | 3.423077 | 0.730769 | 0.022472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066116 | 0.253086 | 324 | 17 | 73 | 19.058824 | 0.669421 | 0.237654 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cfb5bd5a18d50f9f80e29de3630c1d8c65d9055 | 1,194 | py | Python | GolVe_Classification/GolVe+lr/make_glove_data.py | majingliang/machine_learning | cd70e3a07bd1f0803ebcffebca565e70aff96de8 | [
"MIT"
] | 1 | 2019-09-29T13:36:29.000Z | 2019-09-29T13:36:29.000Z | GolVe_Classification/GolVe+lr/make_glove_data.py | yummydeli/machine_learning | 54471182ac21ef0eee26557a7bd6f3a3dc3a09bd | [
"MIT"
] | null | null | null | GolVe_Classification/GolVe+lr/make_glove_data.py | yummydeli/machine_learning | 54471182ac21ef0eee26557a7bd6f3a3dc3a09bd | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import pickle
train_comment_path = '/Users/slade/Documents/YMM/Code/UCGPCG/src/jobs/terror_recognition/train_model/new_model/comment_cutwords.csv'
train_comment_data = pd.read_csv(open(train_comment_path, 'rU'), header=0)
train_comment_data = train_comment_data.sort_values(['label'], ascending=False)
for i in range(train_comment_data.shape[0]):
if i == 0:
content = train_comment_data.iloc[i, 1]
else:
contt = str(train_comment_data.iloc[i, 1])
content += contt
f = open(
'/Users/slade/Documents/YMM/Code/UCGPCG/src/jobs/terror_recognition/train_model/new_model/model_data/content.txt',
'w')
f.write(content)
f.close()
# run sh demo.sh
# follow the bolg content
vector_path = '/Users/slade/glove/vectors.txt'
with open(vector_path, 'r') as file1:
vocab_emb = {}
for line in file1.readlines():
row = line.strip().split(' ')
vocab_emb[row[0]] = [eval(x) for x in row[1:]]
with open(
'/Users/slade/Documents/YMM/Code/UCGPCG/src/jobs/terror_recognition/train_model/new_model/model_data/vocab_emb.dat',
'wb') as f:
pickle.dump(vocab_emb, f, pickle.HIGHEST_PROTOCOL)
| 31.421053 | 132 | 0.70938 | 186 | 1,194 | 4.354839 | 0.419355 | 0.118519 | 0.118519 | 0.081481 | 0.360494 | 0.360494 | 0.306173 | 0.306173 | 0.306173 | 0.306173 | 0 | 0.00892 | 0.154941 | 1,194 | 37 | 133 | 32.27027 | 0.793855 | 0.031826 | 0 | 0 | 0 | 0.111111 | 0.325239 | 0.314831 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cfb63a64bae5ca66fd31a9408f58acd538f1711 | 4,215 | py | Python | 2021/Practice/Solution.py | Ashwin-op/Google-HashCode-2020 | 05b385ce9454673ecd85fe9461aa1ffde515873d | [
"MIT"
] | 5 | 2020-03-10T08:24:27.000Z | 2020-07-10T15:25:08.000Z | 2021/Practice/Solution.py | Ashwin-op/Google-HashCode-2020 | 05b385ce9454673ecd85fe9461aa1ffde515873d | [
"MIT"
] | null | null | null | 2021/Practice/Solution.py | Ashwin-op/Google-HashCode-2020 | 05b385ce9454673ecd85fe9461aa1ffde515873d | [
"MIT"
] | 1 | 2020-06-29T04:29:55.000Z | 2020-06-29T04:29:55.000Z | import operator
import os
from tqdm import tqdm
PART_SIZE = 100
def solve2(teams, pizzas):
if len(pizzas) < 1:
return
for team in tqdm(teams):
if len(pizzas) < 1:
break
maxScore = 0
maxScoreIdx = 0
for i, pizza in enumerate(pizzas[0:PART_SIZE]):
score = team.calcSc(pizza)
if score > maxScore:
maxScore = score
maxScoreIdx = i
team.add(pizzas[maxScoreIdx])
pizzas.pop(maxScoreIdx)
def solve(teams, pizzas):
if len(pizzas) < 1:
return
for team in tqdm(teams):
if len(pizzas) < 1:
break
for i in range(team.cap):
maxScore = 0
maxScoreIdx = 0
for i, pizza in enumerate(pizzas[0:PART_SIZE]):
score = team.calcSc(pizza)
if score > maxScore:
maxScore = score
maxScoreIdx = i
team.add(pizzas[maxScoreIdx])
pizzas.pop(maxScoreIdx)
class Team():
def __init__(self, cap):
self.cap = cap
self.pizzas = []
self.ings = set()
# @staticmethod
# def calcScore(pizzas):
# uniq = set.union( [p.ings for p in pizzas] )
# total = 0
# for p in pizzas:
# total += p.count
# return len(uniq) / total
def calcSc(self, pizza):
comm = self.ings.intersection(pizza.ings)
uniq = self.ings.union(pizza.ings)
total = pizza.count
for p in self.pizzas:
total += p.count
sc = len(uniq) - len(comm) # (len(uniq)**2)/ (total)
return sc
def add(self, pizza):
assert 1 + len(self.pizzas) <= self.cap
self.pizzas.append(pizza)
assert pizza.selected == False
pizza.selected = True
self.ings = self.ings.union(pizza.ings)
def __repr__(self):
return '[{}-{}-{}]'.format(self.cap, [p.index for p in self.pizzas], self.ings)
@property
def is_full(self):
return len(self.pizzas) == self.cap
class Pizza(object):
def __init__(self, index, ings):
self.index = index
self.ings = set(ings)
self.count = len(self.ings)
self.selected = False
self.score = {}
def __repr__(self):
return '[id:{}-len:{}-{}]'.format(self.index, self.count, self.ings)
def readF(filename):
f = open(filename)
nPizza, n2, n3, n4 = [int(x) for x in f.readline().split(' ')[0:4]]
pizzaL = []
teamL2, teamL3, teamL4 = [], [], []
unqPizza = set()
total = 0
for i in range(nPizza):
ings = f.readline().replace('\n', '').split(' ')[1:]
pizzaL.append(
Pizza(i, ings)
)
unqPizza = unqPizza.union(pizzaL[-1].ings)
total += len(pizzaL[-1].ings)
for i in range(n2):
teamL2.append(Team(2))
for i in range(n3):
teamL3.append(Team(3))
for i in range(n4):
teamL4.append(Team(4))
print('------', filename)
print('Avg Ings:', total/len(pizzaL))
print('Total Pizza:', nPizza)
print('Nums:', n2, n3, n4)
print('Unique ings:', len(unqPizza))
print('Total Cap:', n2*2 + n3*3 + n4*4)
return nPizza, n2, n3, n4, pizzaL, teamL2, teamL3, teamL4
def outF(filename, teamL2, teamL3, teamL4):
f = open(filename, 'w+')
nLine = 0
for team in teamL4+teamL3+teamL2:
if team.is_full:
nLine += 1
f.write(str(nLine) + '\n')
for team in teamL4+teamL3+teamL2:
if team.is_full:
s = ' '.join([str(p.index) for p in team.pizzas])
f.write('{} {}\n'.format(team.cap, s))
f.close()
def solveAll(filename):
nPizza, n2, n3, n4, pizzaL, teamL2, teamL3, teamL4 = readF(filename)
pizzaLSorted = sorted(
pizzaL, key=operator.attrgetter('count'), reverse=True)
solve(teamL4, pizzaLSorted)
solve(teamL3, pizzaLSorted)
solve(teamL2, pizzaLSorted)
outF(filename.replace('data/', '')+'.out', teamL2, teamL3, teamL4)
solveAll("./Input/b_little_bit_of_everything.in")
solveAll("./Input/c_many_ingredients.in")
solveAll("./Input/d_many_pizzas.in")
solveAll("./Input/e_many_teams.in")
| 27.019231 | 87 | 0.554686 | 535 | 4,215 | 4.31028 | 0.213084 | 0.031223 | 0.01301 | 0.023851 | 0.344319 | 0.259324 | 0.259324 | 0.259324 | 0.228101 | 0.228101 | 0 | 0.02585 | 0.302491 | 4,215 | 155 | 88 | 27.193548 | 0.758503 | 0.046975 | 0 | 0.285714 | 0 | 0 | 0.055888 | 0.028194 | 0 | 0 | 0 | 0 | 0.016807 | 1 | 0.10084 | false | 0 | 0.02521 | 0.02521 | 0.201681 | 0.05042 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6cfc58827e6770ccfeb2db535c0a4dcb9fd69bd3 | 4,524 | py | Python | day7aventcal2019.py | Capoaleman/Advent-of-code-2019 | 979259698113ed166453bc5eee843317e12fa622 | [
"MIT"
] | null | null | null | day7aventcal2019.py | Capoaleman/Advent-of-code-2019 | 979259698113ed166453bc5eee843317e12fa622 | [
"MIT"
] | null | null | null | day7aventcal2019.py | Capoaleman/Advent-of-code-2019 | 979259698113ed166453bc5eee843317e12fa622 | [
"MIT"
] | null | null | null | # Advent of Code 2019
# challenge day 7
# https://adventofcode.com/2019/day/7
from itertools import permutations
def grav_asis_prog(arr, entrada, output, i=0):
flag = True
while i <= len(arr):
# Add
if arr[i] % 100 == 1:
par1, par2, par3 = arr[i+1:i+4]
mod_1par = arr[i] // 100 % 10
mod_2par = arr[i] // 1000 % 10
x1 = par1 if mod_1par == 1 else arr[par1]
x2 = par2 if mod_2par == 1 else arr[par2]
arr[par3] = x1+x2
i += 4
# Multiply
elif arr[i] % 100 == 2:
par1, par2, par3 = arr[i+1:i+4]
mod_1par = arr[i] // 100 % 10
mod_2par = arr[i] // 1000 % 10
x1 = par1 if mod_1par == 1 else arr[par1]
x2 = par2 if mod_2par == 1 else arr[par2]
arr[par3] = x1*x2
i += 4
# get input
elif arr[i] % 100 == 3:
par1 = arr[i+1]
if flag:
arr[par1] = entrada
flag = False
else:
arr[par1] = output
i += 2
# set output
elif arr[i] % 100 == 4:
par1 = arr[i+1]
resul = par1 if (arr[i] // 100 % 10) == 1 else arr[par1]
i += 2
# return for the 2nd part of the challenge
return resul, True, i
# Jump-if-True
elif arr[i] % 100 == 5:
mod_1par = arr[i] // 100 % 10
mod_2par = arr[i] // 1000 % 10
par1, par2 = arr[i+1:i+3]
x1 = par1 if mod_1par == 1 else arr[par1]
x2 = par2 if mod_2par == 1 else arr[par2]
i = x2 if x1 != 0 else i+3
# Jump-if-False
elif arr[i] % 100 == 6:
mod_1par = arr[i] // 100 % 10
mod_2par = arr[i] // 1000 % 10
par1, par2 = arr[i+1:i+3]
x1 = par1 if mod_1par == 1 else arr[par1]
x2 = par2 if mod_2par == 1 else arr[par2]
i = x2 if x1 == 0 else i+3
# less than
elif arr[i] % 100 == 7:
par1, par2, pos = arr[i+1:i+4]
mod_1par = arr[i] // 100 % 10
mod_2par = arr[i] // 1000 % 10
x1 = par1 if mod_1par == 1 else arr[par1]
x2 = par2 if mod_2par == 1 else arr[par2]
arr[pos] = 1 if x1 < x2 else 0
i += 4
# equals
elif arr[i] % 100 == 8:
par1, par2, pos = arr[i+1:i+4]
mod_1par = arr[i] // 100 % 10
mod_2par = arr[i] // 1000 % 10
x1 = par1 if mod_1par == 1 else arr[par1]
x2 = par2 if mod_2par == 1 else arr[par2]
arr[pos] = 1 if x1 == x2 else 0
i += 4
elif arr[i] == 99:
return output, False, 0
else:
raise ValueError("Unknown instruction {}".format(arr[i]))
return resul, True
if __name__ == "__main__":
f = open("./aventcal2019/day 7/inputday7.txt", "r")
input_arr = list(map(int, f.readline().split(",")))
# part 2 of the challenge
arr_out = []
for item in permutations([5, 6, 7, 8, 9], 5):
output = 0
arr_A = input_arr.copy()
arr_B = input_arr.copy()
arr_C = input_arr.copy()
arr_D = input_arr.copy()
arr_E = input_arr.copy()
output, flag, i_a = grav_asis_prog(arr_A, item[0], output)
output, flag, i_b = grav_asis_prog(arr_B, item[1], output)
output, flag, i_c = grav_asis_prog(arr_C, item[2], output)
output, flag, i_d = grav_asis_prog(arr_D, item[3], output)
output, flag, i_e = grav_asis_prog(arr_E, item[4], output)
while flag:
output, flag, i_a = grav_asis_prog(arr_A, output, output, i_a)
output, flag, i_b = grav_asis_prog(arr_B, output, output, i_b)
output, flag, i_c = grav_asis_prog(arr_C, output, output, i_c)
output, flag, i_d = grav_asis_prog(arr_D, output, output, i_d)
output, flag, i_e = grav_asis_prog(arr_E, output, output, i_e)
if not flag:
arr_out.append(output)
print(max(arr_out))
# Part 1 of the challenge
total_output = []
for item in permutations([0, 1, 2, 3, 4], 5):
output = 0
for phase in item:
output = grav_asis_prog(input_arr, phase, output)
total_output.append(output)
print(max(total_output))
| 37.7 | 75 | 0.482538 | 666 | 4,524 | 3.129129 | 0.15015 | 0.059501 | 0.050384 | 0.079175 | 0.479846 | 0.479846 | 0.479846 | 0.479846 | 0.479846 | 0.345489 | 0 | 0.102828 | 0.398099 | 4,524 | 119 | 76 | 38.016807 | 0.662505 | 0.052608 | 0 | 0.42 | 0 | 0 | 0.015896 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01 | false | 0 | 0.01 | 0 | 0.05 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f00027a30f6f8e9207dc52114439d0a9cb6f1a7 | 1,349 | py | Python | belt.py | zignig/cqparts_bucket | 9707b0948a9dd1ed514e03c291a3b96fddc4a22d | [
"Apache-2.0"
] | 10 | 2018-09-18T08:09:02.000Z | 2022-03-18T06:24:22.000Z | belt.py | zignig/cqparts-bucket | 9707b0948a9dd1ed514e03c291a3b96fddc4a22d | [
"Apache-2.0"
] | 1 | 2018-08-09T01:57:32.000Z | 2018-08-09T01:57:32.000Z | belt.py | zignig/cqparts-bucket | 9707b0948a9dd1ed514e03c291a3b96fddc4a22d | [
"Apache-2.0"
] | 1 | 2018-12-07T20:14:04.000Z | 2018-12-07T20:14:04.000Z | import cadquery as cq
import cqparts
from cadquery import Solid
from cqparts.params import *
from cqparts.display import render_props, display
from cqparts.constraint import Fixed, Coincident
from cqparts.constraint import Mate
from cqparts.utils.geometry import CoordSystem
class Belt(cqparts.Part):
# Parameters
rad = PositiveFloat(10)
spacing = PositiveFloat(100)
belt_width = PositiveFloat(5)
belt_thickness = PositiveFloat(1)
# default appearance
_render = render_props(template="red")
def profile(self):
p = cq.Workplane("XZ").rect(self.belt_width, self.belt_thickness)
return p
def make(self):
outer = self.profile().extrude(self.spacing).translate((0, 0, -self.rad))
p2 = (
self.profile()
.revolve(180, (2, self.rad), (1, self.rad))
.translate((0, 0, -self.rad))
)
outer = outer.union(p2)
p3 = self.profile().extrude(self.spacing).translate((0, 0, self.rad))
outer = outer.union(p3)
p4 = (
self.profile()
.revolve(180, (-2, self.rad), (1, self.rad))
.translate((0, -self.spacing, -self.rad))
)
outer = outer.union(p4)
return outer
if __name__ == "__main__":
from cqparts.display import display
B = Belt()
display(B)
| 26.98 | 81 | 0.618977 | 164 | 1,349 | 5 | 0.359756 | 0.068293 | 0.040244 | 0.054878 | 0.302439 | 0.27561 | 0.27561 | 0.27561 | 0.229268 | 0.229268 | 0 | 0.02997 | 0.257969 | 1,349 | 49 | 82 | 27.530612 | 0.789211 | 0.021497 | 0 | 0.052632 | 0 | 0 | 0.009871 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.236842 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f00cc4632a0795ede88a3a5eecaae3f9f488916 | 1,497 | py | Python | app/book/serializers.py | albamr09/django-api | e50d694f1551ea49bc74916f2e5993f47a8c60db | [
"MIT"
] | null | null | null | app/book/serializers.py | albamr09/django-api | e50d694f1551ea49bc74916f2e5993f47a8c60db | [
"MIT"
] | null | null | null | app/book/serializers.py | albamr09/django-api | e50d694f1551ea49bc74916f2e5993f47a8c60db | [
"MIT"
] | null | null | null | from rest_framework import serializers
from core.models import Tag, Author, Book
class TagSerializer(serializers.ModelSerializer):
"""Serializer for tag objects"""
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class AuthorSerializer(serializers.ModelSerializer):
"""Serializer for author objects"""
class Meta:
model = Author
fields = ('id', 'name')
read_only_fields = ('id',)
class BookSerializer(serializers.ModelSerializer):
"""Serialize a book"""
authors = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Author.objects.all()
)
tags = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Tag.objects.all()
)
class Meta:
model = Book
fields = (
'id', 'title', 'pages', 'year', 'tags', 'authors',
'price', 'link'
)
read_only_fields = ('id',)
class BookDetailSerializer(BookSerializer):
"""Serialize a book detail"""
# Serialize the author attribute with the Author serializer
authors = AuthorSerializer(many=True, read_only=True)
# Serialize the tag attribute with the Tag serializer
tags = TagSerializer(many=True, read_only=True)
class BookImageSerializer(serializers.ModelSerializer):
"""Serializer for uploading images to books"""
class Meta:
model = Book
fields = ('id', 'image')
read_only_fields = ('id',)
| 25.372881 | 63 | 0.633935 | 150 | 1,497 | 6.253333 | 0.333333 | 0.06823 | 0.059701 | 0.06823 | 0.295309 | 0.1258 | 0.070362 | 0.070362 | 0 | 0 | 0 | 0 | 0.252505 | 1,497 | 58 | 64 | 25.810345 | 0.838248 | 0.166333 | 0 | 0.388889 | 0 | 0 | 0.051597 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f01cf09bb6250224e89c048f306a4cb48f65b6f | 436 | py | Python | bot.py | ahnaf-zamil/discord-bot-live-stream-1 | 10eae9e7407748f253ab46d41ebb2c68782d62ba | [
"MIT"
] | null | null | null | bot.py | ahnaf-zamil/discord-bot-live-stream-1 | 10eae9e7407748f253ab46d41ebb2c68782d62ba | [
"MIT"
] | null | null | null | bot.py | ahnaf-zamil/discord-bot-live-stream-1 | 10eae9e7407748f253ab46d41ebb2c68782d62ba | [
"MIT"
] | 1 | 2021-04-13T03:23:31.000Z | 2021-04-13T03:23:31.000Z | import discord
from discord.ext import commands
from config import TOKEN
import os
bot = commands.Bot(command_prefix=">>")
@bot.event
async def on_ready():
print("Ready")
@bot.command()
async def ping(ctx):
latency = round(bot.latency * 1000, 2) # 364.98 ms
await ctx.send(f"**Latency:** {latency} ms")
for i in os.listdir("./cogs"):
if i.endswith(".py"):
bot.load_extension(f"cogs.{i[:-3]}")
bot.run(TOKEN) | 20.761905 | 54 | 0.658257 | 68 | 436 | 4.176471 | 0.602941 | 0.070423 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030303 | 0.167431 | 436 | 21 | 55 | 20.761905 | 0.752066 | 0.020642 | 0 | 0 | 0 | 0 | 0.126761 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f02aa7f566f68a9f19eb1b1f409e1dc8ed853d5 | 3,335 | py | Python | functions/cloudfront.py | brayest/api-control | d5a8a69ac2b50bec564a63de95fce3f4a1f9b859 | [
"Apache-2.0"
] | null | null | null | functions/cloudfront.py | brayest/api-control | d5a8a69ac2b50bec564a63de95fce3f4a1f9b859 | [
"Apache-2.0"
] | null | null | null | functions/cloudfront.py | brayest/api-control | d5a8a69ac2b50bec564a63de95fce3f4a1f9b859 | [
"Apache-2.0"
] | null | null | null | import os
import json
import boto3
import botocore
from datetime import date
def lambda_handler(event, context):
dynamodb = boto3.resource('dynamodb', region_name = 'us-east-1')
dynamoTable = "oneconnect-jenkins-dev"
print(event)
request = event['Records'][0]['cf']['request']
headers = request['headers']
queryString = request['querystring']
url = request['uri']
origin = request['origin']
customer_id = request['headers']['client'][0]['value'] if 'client' in request['headers'] else ""
parameters = {
"customer_id": request['headers']['client'][0]['value'] if 'client' in request['headers'] else "",
"fiToken": request['headers']['fiToken'][0]['value'] if 'fiToken' in request['headers'] else "",
"appToken": request['headers']['appToken'][0]['value'] if 'appToken' in request['headers'] else "",
}
processData = Get(dynamodb, dynamoTable, parameters)
print(processData)
if processData['Item'] != "NULL":
request['headers']['host'][0]['value'] = processData['Item']['URL']
request['origin']['custom']['domainName'] = processData['Item']['URL']
else:
print("Unable to retrieve custom URL, processing default")
print(request)
return request
def Get(dynamodb, dynamoTable, parameters):
table = dynamodb.Table(dynamoTable)
time = date.today().strftime("%d/%m/%Y %H:%M:%S")
if parameters['customer_id']:
print("Getting client: {}".format(parameters['customer_id']))
else:
result = {
"HTTPStatusCode": "501",
"date": time,
"RequestId": parameters,
"ResponseMessage": "Not enough parameters {}".format(parameters['customer_id']),
"Item": "NULL"
}
return result
try:
response = table.get_item(
Key={
'CUSTOMER_ID': parameters['customer_id'],
'NAME': parameters['customer_id']
}
)
ResponseMessage = "{} has been retrieved succesfully".format(parameters['customer_id'])
print(response)
if 'Item' in response:
result = {
"HTTPStatusCode": response['ResponseMetadata']['HTTPStatusCode'],
"date": response['ResponseMetadata']['HTTPHeaders']['date'],
"RequestId": response['ResponseMetadata']['RequestId'],
"ResponseMessage": ResponseMessage,
"Parameters": parameters,
"Item": response['Item']
}
else:
result = {
"HTTPStatusCode": response['ResponseMetadata']['HTTPStatusCode'],
"date": response['ResponseMetadata']['HTTPHeaders']['date'],
"RequestId": response['ResponseMetadata']['RequestId'],
"ResponseMessage": ResponseMessage,
"Parameters": parameters,
"Item": "NULL"
}
return result
except botocore.exceptions.ClientError as e:
print(e)
result = {
"HTTPStatusCode": "502",
"date": time,
"RequestId": parameters,
"ResponseMessage": "Unable to retrieve {}".format(parameters['customer_id']),
"Item": "NULL"
}
return result | 33.686869 | 120 | 0.564018 | 291 | 3,335 | 6.419244 | 0.309278 | 0.082441 | 0.085653 | 0.042827 | 0.373662 | 0.328694 | 0.328694 | 0.328694 | 0.279443 | 0.279443 | 0 | 0.006268 | 0.282459 | 3,335 | 99 | 121 | 33.686869 | 0.774342 | 0 | 0 | 0.333333 | 0 | 0 | 0.285971 | 0.006595 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024691 | false | 0 | 0.061728 | 0 | 0.135802 | 0.08642 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f0427d0e9802767838aefea49771e89e6832915 | 1,041 | py | Python | test/dom/ext/test_memory.py | jkloth/pyxml | de15b3ad0fe095f6ce36b1c5ad7438046aae8d3d | [
"MIT"
] | 2 | 2018-05-28T23:01:20.000Z | 2018-05-29T03:59:38.000Z | test/dom/ext/test_memory.py | jkloth/pyxml | de15b3ad0fe095f6ce36b1c5ad7438046aae8d3d | [
"MIT"
] | null | null | null | test/dom/ext/test_memory.py | jkloth/pyxml | de15b3ad0fe095f6ce36b1c5ad7438046aae8d3d | [
"MIT"
] | null | null | null | import Cyclops,sys
from xml.dom.ext.reader import Sax2
from xml.dom import ext
def test():
data = sys.stdin.read()
doc = Sax2.FromXml(data)
b1 = doc.createElementNS("http://foo.com","foo:branch")
c1 = doc.createElementNS("http://foo.com","foo:child1")
c2 = doc.createElementNS("http://foo.com","foo:child2")
b1.setAttributeNS("http://foo.com","foo:a1","value-1")
a1 = b1.getAttributeNodeNS("http://foo.com","a1")
a1.value = "This shouldn't leak"
b1.appendChild(c1)
b1.appendChild(c2)
doc.documentElement.appendChild(b1)
r1 = doc.createElementNS("http://foo.com","foo:replace")
doc.documentElement.replaceChild(r1,b1)
b1.removeChild(c2)
import cStringIO
s = cStringIO.StringIO()
import xml.dom.ext
xml.dom.ext.Print(doc, stream = s)
ext.ReleaseNode(doc)
ext.ReleaseNode(b1)
doc = Sax2.FromXml(data)
ext.ReleaseNode(doc)
if __name__ == '__main__':
cy = Cyclops.CycleFinder()
cy.run(test)
cy.find_cycles()
cy.show_cycles()
| 21.244898 | 60 | 0.652257 | 141 | 1,041 | 4.744681 | 0.397163 | 0.06278 | 0.089686 | 0.09716 | 0.185351 | 0.185351 | 0 | 0 | 0 | 0 | 0 | 0.030624 | 0.184438 | 1,041 | 48 | 61 | 21.6875 | 0.757362 | 0 | 0 | 0.129032 | 0 | 0 | 0.160423 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.16129 | 0 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f0637f31c936135c46b83977efac7d7ce44ca3c | 20,957 | py | Python | dataset.py | BalinLin/attention-target-detection | fd69aaddc07256ebef5036f668c3cf8cbbe4a923 | [
"MIT"
] | 1 | 2022-02-09T18:45:09.000Z | 2022-02-09T18:45:09.000Z | dataset.py | BalinLin/attention-target-detection | fd69aaddc07256ebef5036f668c3cf8cbbe4a923 | [
"MIT"
] | null | null | null | dataset.py | BalinLin/attention-target-detection | fd69aaddc07256ebef5036f668c3cf8cbbe4a923 | [
"MIT"
] | null | null | null | import torch
from torch.utils.data.dataset import Dataset
from torchvision import transforms
import torchvision.transforms.functional as TF
import numpy as np
from PIL import Image, ImageFilter, ImageDraw
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import cm
import matplotlib.pyplot as plt
from scipy.misc import imresize
import os
import glob
import csv
from utils import imutils
from utils import myutils
from config import *
import warnings
warnings.simplefilter(action='ignore')
class GazeFollow(Dataset):
def __init__(self, data_dir, csv_path, transform, input_size=input_resolution, output_size=output_resolution,
test=False, imshow=False):
if test:
column_names = ['path', 'idx', 'body_bbox_x', 'body_bbox_y', 'body_bbox_w', 'body_bbox_h', 'eye_x', 'eye_y',
'gaze_x', 'gaze_y', 'bbox_x_min', 'bbox_y_min', 'bbox_x_max', 'bbox_y_max', 'meta']
df = pd.read_csv(csv_path, sep=',', names=column_names, index_col=False, encoding="utf-8-sig")
# make ['path', 'eye_x'] as pair key, one ['path', 'eye_x'] pair have a lot of label with different "gaze_x, gaze_y"
df = df[['path', 'eye_x', 'eye_y', 'gaze_x', 'gaze_y', 'bbox_x_min', 'bbox_y_min', 'bbox_x_max',
'bbox_y_max']].groupby(['path', 'eye_x'])
self.keys = list(df.groups.keys()) # ['path', 'eye_x'] pair key
self.X_test = df
self.length = len(self.keys)
else:
column_names = ['path', 'idx', 'body_bbox_x', 'body_bbox_y', 'body_bbox_w', 'body_bbox_h', 'eye_x', 'eye_y',
'gaze_x', 'gaze_y', 'bbox_x_min', 'bbox_y_min', 'bbox_x_max', 'bbox_y_max', 'inout', 'meta']
df = pd.read_csv(csv_path, sep=',', names=column_names, index_col=False, encoding="utf-8-sig")
df = df[df['inout'] != -1] # only use "in" or "out "gaze. (-1 is invalid, 0 is out gaze)
df.reset_index(inplace=True)
self.y_train = df[['bbox_x_min', 'bbox_y_min', 'bbox_x_max', 'bbox_y_max', 'eye_x', 'eye_y', 'gaze_x',
'gaze_y', 'inout']]
self.X_train = df['path']
self.length = len(df)
self.data_dir = data_dir
self.transform = transform
self.test = test
self.input_size = input_size
self.output_size = output_size
self.imshow = imshow
def __getitem__(self, index):
if self.test:
g = self.X_test.get_group(self.keys[index]) # label of ['path', 'eye_x'] pair
cont_gaze = []
for i, row in g.iterrows():
path = row['path']
x_min = row['bbox_x_min']
y_min = row['bbox_y_min']
x_max = row['bbox_x_max']
y_max = row['bbox_y_max']
eye_x = row['eye_x']
eye_y = row['eye_y']
gaze_x = row['gaze_x']
gaze_y = row['gaze_y']
cont_gaze.append([gaze_x, gaze_y]) # all ground truth gaze are stacked up
for j in range(len(cont_gaze), 20):
cont_gaze.append([-1, -1]) # pad dummy gaze to match size for batch processing
cont_gaze = torch.FloatTensor(cont_gaze)
gaze_inside = True # always consider test samples as inside
else:
path = self.X_train.iloc[index]
x_min, y_min, x_max, y_max, eye_x, eye_y, gaze_x, gaze_y, inout = self.y_train.iloc[index]
gaze_inside = bool(inout)
# expand face bbox a bit
k = 0.1
x_min -= k * abs(x_max - x_min)
y_min -= k * abs(y_max - y_min)
x_max += k * abs(x_max - x_min)
y_max += k * abs(y_max - y_min)
img = Image.open(os.path.join(self.data_dir, path))
img = img.convert('RGB')
width, height = img.size
x_min, y_min, x_max, y_max = map(float, [x_min, y_min, x_max, y_max]) # map type to float
if self.imshow:
img.save("origin_img.jpg")
if self.test:
imsize = torch.IntTensor([width, height])
else:
## data augmentation
# Jitter (expansion-only) bounding box size
if np.random.random_sample() <= 0.5:
k = np.random.random_sample() * 0.2
x_min -= k * abs(x_max - x_min)
y_min -= k * abs(y_max - y_min)
x_max += k * abs(x_max - x_min)
y_max += k * abs(y_max - y_min)
# Random Crop
if np.random.random_sample() <= 0.5:
# Calculate the minimum valid range of the crop that doesn't exclude the face and the gaze target
crop_x_min = np.min([gaze_x * width, x_min, x_max])
crop_y_min = np.min([gaze_y * height, y_min, y_max])
crop_x_max = np.max([gaze_x * width, x_min, x_max])
crop_y_max = np.max([gaze_y * height, y_min, y_max])
# Randomly select a random top left corner
if crop_x_min >= 0:
crop_x_min = np.random.uniform(0, crop_x_min)
if crop_y_min >= 0:
crop_y_min = np.random.uniform(0, crop_y_min)
# Find the range of valid crop width and height starting from the (crop_x_min, crop_y_min)
crop_width_min = crop_x_max - crop_x_min
crop_height_min = crop_y_max - crop_y_min
crop_width_max = width - crop_x_min
crop_height_max = height - crop_y_min
# Randomly select a width and a height
crop_width = np.random.uniform(crop_width_min, crop_width_max)
crop_height = np.random.uniform(crop_height_min, crop_height_max)
# Crop it (https://pytorch.org/vision/master/_modules/torchvision/transforms/functional.html)
img = TF.crop(img, crop_y_min, crop_x_min, crop_height, crop_width)
# Record the crop's (x, y) offset
offset_x, offset_y = crop_x_min, crop_y_min
# convert coordinates into the cropped frame
x_min, y_min, x_max, y_max = x_min - offset_x, y_min - offset_y, x_max - offset_x, y_max - offset_y
# if gaze_inside:
gaze_x, gaze_y = (gaze_x * width - offset_x) / float(crop_width), \
(gaze_y * height - offset_y) / float(crop_height)
# else:
# gaze_x = -1; gaze_y = -1
width, height = crop_width, crop_height
# Random flip
if np.random.random_sample() <= 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
x_max_2 = width - x_min
x_min_2 = width - x_max
x_max = x_max_2
x_min = x_min_2
gaze_x = 1 - gaze_x
# Random color change
if np.random.random_sample() <= 0.5:
img = TF.adjust_brightness(img, brightness_factor=np.random.uniform(0.5, 1.5))
img = TF.adjust_contrast(img, contrast_factor=np.random.uniform(0.5, 1.5))
img = TF.adjust_saturation(img, saturation_factor=np.random.uniform(0, 1.5))
# get head position
head_channel = imutils.get_head_box_channel(x_min, y_min, x_max, y_max, width, height,
resolution=self.input_size, coordconv=False).unsqueeze(0)
# Crop the face
face = img.crop((int(x_min), int(y_min), int(x_max), int(y_max)))
if self.imshow:
img.save("img_aug.jpg")
face.save('face_aug.jpg')
if self.transform is not None:
img = self.transform(img)
face = self.transform(face)
# generate the heat map used for deconv prediction
gaze_heatmap = torch.zeros(self.output_size, self.output_size) # set the size of the output
if self.test: # aggregated heatmap
# NOTE: torch.max(gaze_heatmap) ~= 0.1
num_valid = 0
for gaze_x, gaze_y in cont_gaze:
if gaze_x != -1:
num_valid += 1
gaze_heatmap = imutils.draw_labelmap(gaze_heatmap, [gaze_x * self.output_size, gaze_y * self.output_size],
3,
type='Gaussian')
gaze_heatmap /= num_valid
else:
# NOTE: torch.max(gaze_heatmap) = 1
# if gaze_inside:
gaze_heatmap = imutils.draw_labelmap(gaze_heatmap, [gaze_x * self.output_size, gaze_y * self.output_size],
3,
type='Gaussian')
if self.imshow:
fig = plt.figure(111)
img = 255 - imutils.unnorm(img.numpy()) * 255
img = np.clip(img, 0, 255)
plt.imshow(np.transpose(img, (1, 2, 0)))
plt.imshow(imresize(gaze_heatmap, (self.input_size, self.input_size)), cmap='jet', alpha=0.3)
plt.imshow(imresize(1 - head_channel.squeeze(0), (self.input_size, self.input_size)), alpha=0.2)
plt.savefig('viz_aug.png')
if self.test:
return img, face, head_channel, gaze_heatmap, cont_gaze, imsize, path
else:
return img, face, head_channel, gaze_heatmap, path, gaze_inside
def __len__(self):
return self.length
class VideoAttTarget_video(Dataset):
def __init__(self, data_dir, annotation_dir, transform, input_size=input_resolution, output_size=output_resolution,
test=False, imshow=False, seq_len_limit=400):
shows = glob.glob(os.path.join(annotation_dir, '*'))
self.all_sequence_paths = []
for s in shows:
sequence_annotations = glob.glob(os.path.join(s, '*', '*.txt'))
self.all_sequence_paths.extend(sequence_annotations)
self.data_dir = data_dir
self.transform = transform
self.input_size = input_size
self.output_size = output_size
self.test = test
self.imshow = imshow
self.length = len(self.all_sequence_paths)
self.seq_len_limit = seq_len_limit
def __getitem__(self, index):
sequence_path = self.all_sequence_paths[index]
df = pd.read_csv(sequence_path, header=None, index_col=False,
names=['path', 'xmin', 'ymin', 'xmax', 'ymax', 'gazex', 'gazey'])
show_name = sequence_path.split('/')[-3]
clip = sequence_path.split('/')[-2]
seq_len = len(df.index)
# moving-avg smoothing
window_size = 11 # should be odd number
df['xmin'] = myutils.smooth_by_conv(window_size, df, 'xmin')
df['ymin'] = myutils.smooth_by_conv(window_size, df, 'ymin')
df['xmax'] = myutils.smooth_by_conv(window_size, df, 'xmax')
df['ymax'] = myutils.smooth_by_conv(window_size, df, 'ymax')
if not self.test:
# cond for data augmentation
cond_jitter = np.random.random_sample()
cond_flip = np.random.random_sample()
cond_color = np.random.random_sample()
if cond_color < 0.5:
n1 = np.random.uniform(0.5, 1.5)
n2 = np.random.uniform(0.5, 1.5)
n3 = np.random.uniform(0.5, 1.5)
cond_crop = np.random.random_sample()
# if longer than seq_len_limit, cut it down to the limit with the init index randomly sampled
if seq_len > self.seq_len_limit:
sampled_ind = np.random.randint(0, seq_len - self.seq_len_limit)
seq_len = self.seq_len_limit
else:
sampled_ind = 0
if cond_crop < 0.5:
sliced_x_min = df['xmin'].iloc[sampled_ind:sampled_ind+seq_len]
sliced_x_max = df['xmax'].iloc[sampled_ind:sampled_ind+seq_len]
sliced_y_min = df['ymin'].iloc[sampled_ind:sampled_ind+seq_len]
sliced_y_max = df['ymax'].iloc[sampled_ind:sampled_ind+seq_len]
sliced_gaze_x = df['gazex'].iloc[sampled_ind:sampled_ind+seq_len]
sliced_gaze_y = df['gazey'].iloc[sampled_ind:sampled_ind+seq_len]
check_sum = sliced_gaze_x.sum() + sliced_gaze_y.sum()
all_outside = check_sum == -2*seq_len
# Calculate the minimum valid range of the crop that doesn't exclude the face and the gaze target
if all_outside:
crop_x_min = np.min([sliced_x_min.min(), sliced_x_max.min()])
crop_y_min = np.min([sliced_y_min.min(), sliced_y_max.min()])
crop_x_max = np.max([sliced_x_min.max(), sliced_x_max.max()])
crop_y_max = np.max([sliced_y_min.max(), sliced_y_max.max()])
else:
crop_x_min = np.min([sliced_gaze_x.min(), sliced_x_min.min(), sliced_x_max.min()])
crop_y_min = np.min([sliced_gaze_y.min(), sliced_y_min.min(), sliced_y_max.min()])
crop_x_max = np.max([sliced_gaze_x.max(), sliced_x_min.max(), sliced_x_max.max()])
crop_y_max = np.max([sliced_gaze_y.max(), sliced_y_min.max(), sliced_y_max.max()])
# Randomly select a random top left corner
if crop_x_min >= 0:
crop_x_min = np.random.uniform(0, crop_x_min)
if crop_y_min >= 0:
crop_y_min = np.random.uniform(0, crop_y_min)
# Get image size
path = os.path.join(self.data_dir, show_name, clip, df['path'].iloc[0])
img = Image.open(path)
img = img.convert('RGB')
width, height = img.size
# Find the range of valid crop width and height starting from the (crop_x_min, crop_y_min)
crop_width_min = crop_x_max - crop_x_min
crop_height_min = crop_y_max - crop_y_min
crop_width_max = width - crop_x_min
crop_height_max = height - crop_y_min
# Randomly select a width and a height
crop_width = np.random.uniform(crop_width_min, crop_width_max)
crop_height = np.random.uniform(crop_height_min, crop_height_max)
else:
sampled_ind = 0
faces, images, head_channels, heatmaps, paths, gazes, imsizes, gaze_inouts = [], [], [], [], [], [], [], []
index_tracker = -1
for i, row in df.iterrows():
index_tracker = index_tracker+1
if not self.test:
if index_tracker < sampled_ind or index_tracker >= (sampled_ind + self.seq_len_limit):
continue
face_x1 = row['xmin'] # note: Already in image coordinates
face_y1 = row['ymin'] # note: Already in image coordinates
face_x2 = row['xmax'] # note: Already in image coordinates
face_y2 = row['ymax'] # note: Already in image coordinates
gaze_x = row['gazex'] # note: Already in image coordinates
gaze_y = row['gazey'] # note: Already in image coordinates
impath = os.path.join(self.data_dir, show_name, clip, row['path'])
img = Image.open(impath)
img = img.convert('RGB')
width, height = img.size
imsize = torch.FloatTensor([width, height])
# imsizes.append(imsize)
face_x1, face_y1, face_x2, face_y2 = map(float, [face_x1, face_y1, face_x2, face_y2])
gaze_x, gaze_y = map(float, [gaze_x, gaze_y])
if gaze_x == -1 and gaze_y == -1:
gaze_inside = False
else:
if gaze_x < 0: # move gaze point that was sliglty outside the image back in
gaze_x = 0
if gaze_y < 0:
gaze_y = 0
gaze_inside = True
if not self.test:
## data augmentation
# Jitter (expansion-only) bounding box size.
if cond_jitter < 0.5:
k = cond_jitter * 0.1
face_x1 -= k * abs(face_x2 - face_x1)
face_y1 -= k * abs(face_y2 - face_y1)
face_x2 += k * abs(face_x2 - face_x1)
face_y2 += k * abs(face_y2 - face_y1)
face_x1 = np.clip(face_x1, 0, width)
face_x2 = np.clip(face_x2, 0, width)
face_y1 = np.clip(face_y1, 0, height)
face_y2 = np.clip(face_y2, 0, height)
# Random Crop
if cond_crop < 0.5:
# Crop it
img = TF.crop(img, crop_y_min, crop_x_min, crop_height, crop_width)
# Record the crop's (x, y) offset
offset_x, offset_y = crop_x_min, crop_y_min
# convert coordinates into the cropped frame
face_x1, face_y1, face_x2, face_y2 = face_x1 - offset_x, face_y1 - offset_y, face_x2 - offset_x, face_y2 - offset_y
if gaze_inside:
gaze_x, gaze_y = (gaze_x- offset_x), \
(gaze_y - offset_y)
else:
gaze_x = -1; gaze_y = -1
width, height = crop_width, crop_height
# Flip?
if cond_flip < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
x_max_2 = width - face_x1
x_min_2 = width - face_x2
face_x2 = x_max_2
face_x1 = x_min_2
if gaze_x != -1 and gaze_y != -1:
gaze_x = width - gaze_x
# Random color change
if cond_color < 0.5:
img = TF.adjust_brightness(img, brightness_factor=n1)
img = TF.adjust_contrast(img, contrast_factor=n2)
img = TF.adjust_saturation(img, saturation_factor=n3)
# Face crop
face = img.copy().crop((int(face_x1), int(face_y1), int(face_x2), int(face_y2)))
# Head channel image
head_channel = imutils.get_head_box_channel(face_x1, face_y1, face_x2, face_y2, width, height,
resolution=self.input_size, coordconv=False).unsqueeze(0)
if self.transform is not None:
img = self.transform(img)
face = self.transform(face)
# Deconv output
if gaze_inside:
gaze_x /= float(width) # fractional gaze
gaze_y /= float(height)
gaze_heatmap = torch.zeros(self.output_size, self.output_size) # set the size of the output
gaze_map = imutils.draw_labelmap(gaze_heatmap, [gaze_x * self.output_size, gaze_y * self.output_size],
3,
type='Gaussian')
gazes.append(torch.FloatTensor([gaze_x, gaze_y]))
else:
gaze_map = torch.zeros(self.output_size, self.output_size)
gazes.append(torch.FloatTensor([-1, -1]))
faces.append(face)
images.append(img)
head_channels.append(head_channel)
heatmaps.append(gaze_map)
gaze_inouts.append(torch.FloatTensor([int(gaze_inside)]))
if self.imshow:
for i in range(len(faces)):
fig = plt.figure(111)
img = 255 - imutils.unnorm(images[i].numpy()) * 255
img = np.clip(img, 0, 255)
plt.imshow(np.transpose(img, (1, 2, 0)))
plt.imshow(imresize(heatmaps[i], (self.input_size, self.input_size)), cmap='jet', alpha=0.3)
plt.imshow(imresize(1 - head_channels[i].squeeze(0), (self.input_size, self.input_size)), alpha=0.2)
plt.savefig(os.path.join('debug', 'viz_%d_inout=%d.png' % (i, gaze_inouts[i])))
plt.close('all')
faces = torch.stack(faces)
images = torch.stack(images)
head_channels = torch.stack(head_channels)
heatmaps = torch.stack(heatmaps)
gazes = torch.stack(gazes)
gaze_inouts = torch.stack(gaze_inouts)
# imsizes = torch.stack(imsizes)
# print(faces.shape, images.shape, head_channels.shape, heatmaps.shape)
if self.test:
return images, faces, head_channels, heatmaps, gazes, gaze_inouts
else: # train
return images, faces, head_channels, heatmaps, gaze_inouts
def __len__(self):
return self.length
| 46.059341 | 135 | 0.549602 | 2,811 | 20,957 | 3.820704 | 0.11704 | 0.019367 | 0.014153 | 0.013035 | 0.601397 | 0.560335 | 0.51108 | 0.451862 | 0.418343 | 0.35 | 0 | 0.016034 | 0.345278 | 20,957 | 454 | 136 | 46.160793 | 0.766708 | 0.109987 | 0 | 0.368421 | 0 | 0 | 0.039281 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017544 | false | 0 | 0.052632 | 0.005848 | 0.093567 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f068949b8d532400d0c62a6e21fcf94df34dbea | 1,678 | py | Python | src/bob/handlers.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | src/bob/handlers.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | src/bob/handlers.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import httplib
import json
import logging
import webapp2
from solution_server_settings import get_solution_server_settings
from bob.bizz import set_ios_app_id
from rogerthat.bizz.app import AppDoesNotExistException
def _validate_request(handler):
solution_server_settings = get_solution_server_settings()
secret = handler.request.headers.get("X-BOB-SECRET")
if not solution_server_settings.jenkins_incoming_secret:
logging.error("jenkins_incoming_secret is not set yet")
handler.abort(401)
if secret != solution_server_settings.jenkins_incoming_secret:
handler.abort(401)
class SetIosAppIdHandler(webapp2.RequestHandler):
def post(self):
_validate_request(self)
data = json.loads(self.request.body)
app_id = data['app_id']
ios_app_id = data['ios_app_id']
ios_dev_team = data['ios_dev_team']
try:
set_ios_app_id(app_id, ios_app_id, ios_dev_team)
except AppDoesNotExistException:
self.abort(httplib.NOT_FOUND)
| 33.56 | 74 | 0.740167 | 235 | 1,678 | 5.085106 | 0.485106 | 0.033473 | 0.11046 | 0.026778 | 0.117155 | 0.102092 | 0 | 0 | 0 | 0 | 0 | 0.013848 | 0.18236 | 1,678 | 49 | 75 | 34.244898 | 0.857143 | 0.361144 | 0 | 0.076923 | 0 | 0 | 0.073934 | 0.021801 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.269231 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f08b75d36a9d33cd574cf93abd2c6e9f072616b | 5,531 | py | Python | src/the_tale/the_tale/linguistics/migrations/0001_initial.py | devapromix/the-tale | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | [
"BSD-3-Clause"
] | 1 | 2020-04-02T11:51:20.000Z | 2020-04-02T11:51:20.000Z | src/the_tale/the_tale/linguistics/migrations/0001_initial.py | devapromix/the-tale | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/linguistics/migrations/0001_initial.py | devapromix/the-tale | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import models, migrations
import rels.django
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Contribution',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('type', rels.django.RelationIntegerField(default=0, db_index=True)),
('source', rels.django.RelationIntegerField(db_index=True)),
('entity_id', models.BigIntegerField(db_index=True)),
('account', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Restriction',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('name', models.CharField(max_length=128)),
('group', rels.django.RelationIntegerField(db_index=True)),
('external_id', models.BigIntegerField(db_index=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Template',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('raw_template', models.TextField(db_index=True)),
('data', models.TextField()),
('state', rels.django.RelationIntegerField(db_index=True)),
('key', rels.django.RelationIntegerField(db_index=True)),
('errors_status', rels.django.RelationIntegerField(default=0, db_index=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, blank=True, to='linguistics.Template', unique=True)),
],
options={
'permissions': (('moderate_template', '\u041c\u043e\u0436\u0435\u0442 \u043c\u043e\u0434\u0435\u0440\u0438\u0440\u043e\u0432\u0430\u0442\u044c \u0448\u0430\u0431\u043b\u043e\u043d\u044b \u0444\u0440\u0430\u0437'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TemplateRestriction',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('variable', models.CharField(max_length=32, db_index=True)),
('restriction', models.ForeignKey(to='linguistics.Restriction')),
('template', models.ForeignKey(to='linguistics.Template')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Word',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('normal_form', models.CharField(max_length=64)),
('forms', models.TextField()),
('state', rels.django.RelationIntegerField(db_index=True)),
('type', rels.django.RelationIntegerField(db_index=True)),
('used_in_ingame_templates', models.IntegerField(default=0)),
('used_in_onreview_templates', models.IntegerField(default=0)),
('used_in_status', rels.django.RelationIntegerField(default=2, db_index=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, blank=True, to='linguistics.Word', unique=True)),
],
options={
'permissions': (('moderate_word', '\u041c\u043e\u0436\u0435\u0442 \u043c\u043e\u0434\u0435\u0440\u0438\u0440\u043e\u0432\u0430\u0442\u044c \u0441\u043b\u043e\u0432\u0430'),),
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='word',
unique_together=set([('normal_form', 'type', 'state')]),
),
migrations.AlterUniqueTogether(
name='templaterestriction',
unique_together=set([('restriction', 'template', 'variable')]),
),
migrations.AlterUniqueTogether(
name='restriction',
unique_together=set([('group', 'external_id')]),
),
migrations.AlterUniqueTogether(
name='contribution',
unique_together=set([('type', 'account', 'entity_id')]),
),
]
| 48.946903 | 231 | 0.590309 | 539 | 5,531 | 5.890538 | 0.215213 | 0.033071 | 0.051969 | 0.047244 | 0.660472 | 0.615118 | 0.538583 | 0.455433 | 0.428032 | 0.389606 | 0 | 0.04797 | 0.265052 | 5,531 | 112 | 232 | 49.383929 | 0.733087 | 0.003797 | 0 | 0.504762 | 0 | 0.019048 | 0.167393 | 0.06772 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.038095 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f08da1b252077f73e7d95a8c812bb528e37fbec | 1,197 | py | Python | exec_analyze.py | hibibol/PriLog_web | d15a8111424e3b3b5bd8d786ef8bb8949c9c8d90 | [
"MIT"
] | null | null | null | exec_analyze.py | hibibol/PriLog_web | d15a8111424e3b3b5bd8d786ef8bb8949c9c8d90 | [
"MIT"
] | null | null | null | exec_analyze.py | hibibol/PriLog_web | d15a8111424e3b3b5bd8d786ef8bb8949c9c8d90 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import analyze as al
import common as cm
import app as ap
def do_analyze():
# 変数不足時は終了
if not sys.argv[1]:
return
youtube_url = sys.argv[1]
# ID部分の取り出し
youtube_id = al.get_youtube_id(youtube_url)
if not youtube_id:
return
queue_path = ap.queue_dir + str(youtube_id)
pending_path = ap.pending_dir + str(youtube_id)
cached = cm.cache_check(youtube_id)
# 5分経過した3xx キャッシュ以外は再解析しない
if cached:
cm.clear_path(queue_path)
cm.clear_path(pending_path)
return
# youtube動画検索/検証
path, title, length, thumbnail, url_result = al.search(youtube_id)
if url_result % 100 // 10 == 2:
cm.save_cache(youtube_id, title, False, False, False, False, False, url_result)
else:
# TL解析
time_line, time_line_enemy, time_data, total_damage, debuff_value, damages ,analyze_result = al.analyze_movie(path)
# キャッシュ保存
cm.save_cache(youtube_id, title, time_line, time_line_enemy, False, total_damage, debuff_value, damages, analyze_result)
cm.clear_path(queue_path)
cm.clear_path(pending_path)
if __name__ == "__main__":
do_analyze()
| 25.468085 | 128 | 0.674185 | 169 | 1,197 | 4.455621 | 0.372781 | 0.10757 | 0.058433 | 0.039841 | 0.345286 | 0.289509 | 0.223108 | 0.111554 | 0.111554 | 0.111554 | 0 | 0.01197 | 0.232247 | 1,197 | 46 | 129 | 26.021739 | 0.807399 | 0.077694 | 0 | 0.25 | 0 | 0 | 0.007299 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f09f3b54f59153ab1824409e91b4c34b9e4168e | 857 | py | Python | scrubcam/lora.py | icr-ctl/scrubcam | 4ee1b20d2b68558ccd6ab408eae12e467079f25e | [
"MIT"
] | 2 | 2021-11-18T17:02:26.000Z | 2022-01-18T18:53:45.000Z | scrubcam/lora.py | icr-ctl/scrubcam | 4ee1b20d2b68558ccd6ab408eae12e467079f25e | [
"MIT"
] | 8 | 2021-11-07T20:44:21.000Z | 2022-03-11T23:41:11.000Z | scrubcam/lora.py | icr-ctl/scrubcam | 4ee1b20d2b68558ccd6ab408eae12e467079f25e | [
"MIT"
] | null | null | null | import busio
from digitalio import DigitalInOut
import board
import adafruit_rfm9x
class LoRaSender():
"""Manages sending messages on LoRa radio
Specifically interfaces with the RFM9x module that we are
currently connecting to the ScrubCam as part of the Adafruit
Radio+OLED Bonnet.
"""
def __init__(self):
CS = DigitalInOut(board.CE1)
RESET = DigitalInOut(board.D25)
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
self.rfm9x = adafruit_rfm9x.RFM9x(spi, CS, RESET, 915.0)
self.rfm9x.tx_power = 23
def send(self, data_strg):
""""Send a string on LoRa Radio
Parameters
----------
data_strg : string
Data to send over LoRa
"""
send_data = bytes(f'{data_strg}\r\n', 'utf-8')
self.rfm9x.send(send_data)
| 24.485714 | 68 | 0.631272 | 113 | 857 | 4.681416 | 0.548673 | 0.05104 | 0.041588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0272 | 0.270712 | 857 | 34 | 69 | 25.205882 | 0.8192 | 0.322054 | 0 | 0 | 0 | 0 | 0.039216 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.285714 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f0cb8deae5b4ddff9e533d2eb1bff8765df92fd | 719 | py | Python | src/toil/test/docs/scripts/tutorial_managing.py | jeffrey856/toil | 2447cece3b0fae6f103322b3d9e4ff3bd7a9f7bf | [
"Apache-2.0"
] | null | null | null | src/toil/test/docs/scripts/tutorial_managing.py | jeffrey856/toil | 2447cece3b0fae6f103322b3d9e4ff3bd7a9f7bf | [
"Apache-2.0"
] | null | null | null | src/toil/test/docs/scripts/tutorial_managing.py | jeffrey856/toil | 2447cece3b0fae6f103322b3d9e4ff3bd7a9f7bf | [
"Apache-2.0"
] | null | null | null | from toil.common import Toil
from toil.job import Job
class LocalFileStoreJob(Job):
def run(self, fileStore):
# self.TempDir will always contain the name of a directory within the allocated disk space reserved for the job
scratchDir = self.tempDir
# Similarly create a temporary file.
scratchFile = fileStore.getLocalTempFile()
if __name__=="__main__":
options = Job.Runner.getDefaultOptions("./toilWorkflowRun")
options.logLevel = "INFO"
options.clean = "always"
# Create an instance of FooJob which will have at least 2 gigabytes of storage space.
j = LocalFileStoreJob(disk="2G")
#Run the workflow
with Toil(options) as toil:
toil.start(j) | 32.681818 | 119 | 0.702364 | 91 | 719 | 5.461538 | 0.637363 | 0.032193 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003565 | 0.21975 | 719 | 22 | 120 | 32.681818 | 0.882353 | 0.33936 | 0 | 0 | 0 | 0 | 0.078556 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f0e6d94143fc7d74f115a60ca4ceb0ad9baa180 | 2,527 | py | Python | PTO-yelp/LM/dataloaders/yelp.py | LegendTianjin/Point-Then-Operate | a6b0818343bc34c468738ab91ecea89dd03a9535 | [
"Apache-2.0"
] | 50 | 2019-06-06T05:30:32.000Z | 2021-11-18T07:24:36.000Z | PTO-yelp/LM/dataloaders/yelp.py | lancopku/Point-Then-Operate | 1c04ec326b52fc65f97f5610a6f16f6e938d583e | [
"Apache-2.0"
] | 2 | 2019-08-30T09:49:26.000Z | 2020-01-17T04:20:53.000Z | PTO-yelp/LM/dataloaders/yelp.py | ChenWu98/Point-Then-Operate | a6b0818343bc34c468738ab91ecea89dd03a9535 | [
"Apache-2.0"
] | 7 | 2019-06-17T06:20:47.000Z | 2020-10-26T03:19:44.000Z | from torch.utils import data
import torch
import os
from collections import defaultdict
import numpy as np
from utils.vocab import Vocabulary, build_vocab
import random
from nltk import word_tokenize
from LM.lm_config import Config
config = Config()
class Yelp(object):
"""The Yelp dataset."""
def __init__(self, mode, noisy_for_train, sentiment, direction):
self.mode = mode
self.root = os.path.join('../data', 'yelp')
voc_f = os.path.join(self.root, 'yelp.vocab')
self.noisy = self.mode == 'train' and noisy_for_train
# Load data from domain 0 and domain 1.
path = os.path.join(self.root, 'sentiment.{}.{}'.format(mode, sentiment))
# Load vocabulary.
print('----- Loading vocab -----')
self.vocab = Vocabulary(voc_f)
print('vocabulary size:', self.vocab.size)
self.pad = self.vocab.word2id['<pad>']
self.go = self.vocab.word2id['<go>']
self.eos = self.vocab.word2id['<eos>']
self.unk = self.vocab.word2id['<unk>']
# Tokenize file content
with open(path, 'r') as f:
ids = []
for line in f:
words = ['<go>'] + line.split() + ['<eos>']
if direction == 'forward':
pass
elif direction == 'backward':
words.reverse()
else:
raise ValueError()
for word in words:
ids.append(self.vocab.word2id[word] if word in self.vocab.word2id else self.unk)
self.ids = torch.LongTensor(ids) # (very_long, )
self.ids = batchify(self.ids, config.batch_size, config) # shape = (, batch_size)
def makeup(_x, n):
x = []
for i in range(n):
x.append(_x[i % len(_x)])
return x
def noise(x, unk, word_drop=0.0, k=3):
n = len(x)
for i in range(n):
if random.random() < word_drop:
x[i] = unk
# slight shuffle such that |sigma[i]-i| <= k
sigma = (np.arange(n) + (k+1) * np.random.rand(n)).argsort()
return [x[sigma[i]] for i in range(n)]
def batchify(data, bsz, args):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
if args.gpu:
data = data.cuda()
return data | 31.987342 | 100 | 0.574594 | 344 | 2,527 | 4.159884 | 0.369186 | 0.050314 | 0.067086 | 0.023061 | 0.051712 | 0.018169 | 0 | 0 | 0 | 0 | 0 | 0.008909 | 0.289276 | 2,527 | 79 | 101 | 31.987342 | 0.787862 | 0.139691 | 0 | 0.035088 | 0 | 0 | 0.058306 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070175 | false | 0.017544 | 0.157895 | 0 | 0.298246 | 0.035088 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f0f96d2aabdded6e0c0f1238414633dcb1bdc2c | 770 | py | Python | spug_api/apps/account/models.py | lucasaytt/arena_platform | 16a8c682e71d90b62c746da126cafc8e6444fe5f | [
"MIT"
] | null | null | null | spug_api/apps/account/models.py | lucasaytt/arena_platform | 16a8c682e71d90b62c746da126cafc8e6444fe5f | [
"MIT"
] | null | null | null | spug_api/apps/account/models.py | lucasaytt/arena_platform | 16a8c682e71d90b62c746da126cafc8e6444fe5f | [
"MIT"
] | null | null | null | from public import db
from libs.model import ModelMixin
from sqlalchemy import text, func
from flask_login import UserMixin, login_manager
class User(db.Model, ModelMixin, UserMixin):
__tablename__ = 'account_users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50), unique=True, nullable=False)
is_supper = db.Column(db.Boolean, default=False)
is_active = db.Column(db.Boolean, default=True)
access_token = db.Column(db.String(32))
token_expired = db.Column(db.Integer)
create_time = db.Column(db.DateTime, server_default=func.now(), comment='创建时间')
update_time = db.Column(db.DateTime, server_default=text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'),
comment='修改时间') | 42.777778 | 110 | 0.724675 | 106 | 770 | 5.09434 | 0.490566 | 0.118519 | 0.148148 | 0.062963 | 0.218519 | 0.12963 | 0.12963 | 0 | 0 | 0 | 0 | 0.006221 | 0.164935 | 770 | 18 | 111 | 42.777778 | 0.833593 | 0 | 0 | 0 | 0 | 0 | 0.085603 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.933333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f1288ee8767cffcef0b23927489a5e97f96cee5 | 1,639 | py | Python | adjust_n_clusters.py | Ting-Wu0701/Clustering-Ensemble | 2583fcfbacce6c2ffb1314802cdba7a73d5f6271 | [
"Apache-2.0"
] | 2 | 2021-03-16T01:41:57.000Z | 2021-04-17T01:44:39.000Z | adjust_n_clusters.py | Ting-Wu0701/Clustering-Ensemble | 2583fcfbacce6c2ffb1314802cdba7a73d5f6271 | [
"Apache-2.0"
] | null | null | null | adjust_n_clusters.py | Ting-Wu0701/Clustering-Ensemble | 2583fcfbacce6c2ffb1314802cdba7a73d5f6271 | [
"Apache-2.0"
] | 1 | 2021-03-16T01:41:28.000Z | 2021-03-16T01:41:28.000Z | """
当我们获取的预结构的聚类数大于实际的聚类数时,我们需要对预结构中的簇进行调整(合并)
此处我们选用k-means算法进行簇与簇之间的合并
"""
from sklearn.cluster import KMeans
import numpy as np
import pandas as pd
# 假设簇核集样本为第1、3、4、5个样本
core = [0, 2, 3, 4]
# 假设簇环集样本为第2、6个样本
halo = [1, 5]
# 如果能将halo中的样本的类别标记为[1,2]则说明标记对了
# 假设核结构为【1 3 4 2】
core_structure = [1, 3, 4, 2]
pre_structure = [1, 3, 4, 2, 1, 2]
# 组合核-环-预结构
core1_halo1 = core + halo
print(core1_halo1)
core1_halo1 = pd.DataFrame(core1_halo1)
pre_structure1 = pd.DataFrame(pre_structure)
table = pd.concat((core1_halo1,pre_structure1),axis=1)
table.columns = list(['num_sample','label'])
print(table)
x1 = [100,100]
x2 = [95,95]
x3 = [70,70]
x4 = [50,53]
x5 = [30,30]
x6 = [20,20]
X = np.vstack((x1, x3, x4, x5, x2, x6))
# table1为组合好的矩阵,第一列代表样本编号,第二列代表样本编号对应的类别标签,其余列为样本编号对应的样本数据
table1 = np.hstack((table,X))
# table2为去除掉样本编号的矩阵,第1列代表类别标签,其余列为样本数据
table2 = table1[:, 1:table1.shape[1]]
table2 = pd.DataFrame(table2)
print(table2)
combine = table2.groupby(table2.loc[:,0])
# print(combine.mean())
combine1 = np.array(combine.mean())
print(combine1)
combine2 = pd.DataFrame(combine1)
# 用k-means聚类 聚成k类
def kmeans1(data,k):
iteration = 50
data_zs = 1.0*(data - data.mean())/data.std() # 数据标准化
model = KMeans(n_clusters=k, max_iter=iteration) # 分为k类,并发数4
model.fit(data_zs) # 开始聚类
# 详细输出原始数据及其类别
r = pd.concat([data, pd.Series(model.labels_, index=data.index)], axis=1)
return (r)
result = kmeans1(combine2,2)
print(result)
table2 = np.array(table2)
for i in range(0,table2.shape[0]):
if table2[i, 0] == 1 or table2[i, 0] == 3:
table2[i, 0] = 0
else:
table2[i, 0] = 1
print(table2)
| 24.102941 | 77 | 0.680903 | 254 | 1,639 | 4.330709 | 0.452756 | 0.009091 | 0.029091 | 0.010909 | 0.023636 | 0 | 0 | 0 | 0 | 0 | 0 | 0.09104 | 0.155583 | 1,639 | 67 | 78 | 24.462687 | 0.703757 | 0.201342 | 0 | 0.044444 | 0 | 0 | 0.011646 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.066667 | 0 | 0.111111 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f146a59cdcde0391b1020058df14e6cdb41cb50 | 7,545 | py | Python | helper.py | szilaga/investment_ml | c27fe06239dc7c56fabb876c950d60378fe9c8b2 | [
"RSA-MD"
] | null | null | null | helper.py | szilaga/investment_ml | c27fe06239dc7c56fabb876c950d60378fe9c8b2 | [
"RSA-MD"
] | null | null | null | helper.py | szilaga/investment_ml | c27fe06239dc7c56fabb876c950d60378fe9c8b2 | [
"RSA-MD"
] | null | null | null | from pandas_datareader import data as pdr
import pandas as pd
import numpy as np
from datetime import timedelta
from get_all_tickers import get_tickers as gt
import ipywidgets as widgets
import requests
class Helper:
def getParameters(self):
forecast = widgets.Text(
value='7;14;30',
placeholder='Type something',
description='Forecasts:',
disabled=False
)
tickers = widgets.Text(
value='GOOG;NOK',
placeholder='Type something',
description='Tickers:',
disabled=False
)
start_date = widgets.DatePicker(
description='Start Date:',
disabled=False
)
end_date = widgets.DatePicker(
description='End Date:',
disabled=False,
value=pd.Timestamp.today(tz=None))
r_std = widgets.IntSlider(value=30, min=0, description='Rolling std:')
r_sma1 = widgets.IntSlider(value=30, min=0, description='SMA 1:')
r_sma2 = widgets.IntSlider(value=50, min=0, description='SMA 2:')
r_rsi = widgets.IntSlider(value=14, min=0, description='RSI:')
return r_std,r_sma1,r_sma2, r_rsi,start_date, end_date, forecast, tickers
#### Helper functions
def get_Tickers_Yahoo(self):
'''
This functions querys all available tickers of Yahoo
:return: list of ticker symbols
'''
return gt.get_tickers()
def get_Alphavantage(self,ticker,key):
'''
Queries data from the alphavantage api
:param ticker: Symbol of stock
:param key: Api key to quers data
:return: Dataframe
'''
API_URL = "https://www.alphavantage.co/query"
data = {
"function": "TIME_SERIES_DAILY",
"symbol": "sth",
"apikey": "key",
}
data.update(symbol=ticker, apikey=key)
response = requests.get(API_URL, data)
response_json = response.json() # maybe redundant
data = pd.DataFrame.from_dict(response_json['Time Series (Daily)'], orient='index').sort_index(axis=1)
data = data.rename(columns={'1. open': 'Open', '2. high': 'High', '3. low': 'Low', '4. close': 'Close',
'5. adjusted close': 'AdjClose', '6. volume': 'Volume'})
data = data[['Open', 'High', 'Low', 'Close']]
return data
def get_Data_Yahoo(self,stock_code, start_date, end_date):
'''
Get data from yfinance
https://pypi.org/project/yfinance/
:param stock_code:
:param start_date:
:param end_date:
:return: Dataframe
'''
# Data fetching
return pdr.get_data_yahoo(stock_code, start_date, end_date)
def cleanDataFrame(self,df):
'''
Clean dataframe by following features
FrontFill if by missing dates
:return: Dataframe
'''
# create dataframe with entire timespan
df_time = pd.DataFrame(index=pd.date_range(start=df.index[0], end=df.index[-1]))
# join with extracted dataframe
df_time = df_time.join(df, how='left')
# perform frontfill
df_time.ffill(axis=0, inplace=True)
return df_time
def get_nullValues_ext(self, df, axis=0):
'''
The function counts the number of nan values per column\n",
and the total number of nan\n",
Note: axis = 0 -> row wise; axis = 1 column wise\n",
'''
return df.isnull().sum(axis=axis).to_frame()
def normalize(self, df):
'''
normalize all values of a dataframe between 0 and 1
:param df:
:return: Dataframe, min, max
'''
# normalize values
return (df - df.min()) / (df.max() - df.min()),df.max(),df.min()
def denormalize(self,df,max,min):
'''
recreate dataframe before normalization
:param df:
:param max:
:param min:
:return:Dataframe
'''
return df * (max - min) + min
def normalize_price(self, data):
'''
returns normalized price
return: dataframe
'''
n_price = data / data.iloc[0]
return pd.DataFrame(n_price)
def slice_forward(self, df, sample):
'''
forward slice of dataframe by sample
return: dataframe
'''
days = timedelta(sample)
start_date = df.index[0]
end_date = df.index[0] + days
return slice_df(df, start=start_date, end=end_date)
def slice_backward(self, df, sample):
'''
backward slice of dataframe by sample
return: dataframe
'''
days = timedelta(sample)
start_date = df.index[-1] - days
return slice_df(df, start=start_date, end=df.index[-1])
def slice_df(self, df, start='2021-01-01', end='2021-06-04'):
'''
slices the dataframe along the y-axis
respective the rows (dates)
return: dataframe
'''
return df.loc[start:end]
def nth_root(self, num, root):
'''
calculte the nth root of a numer
return: float
'''
return num ** (1 / root)
def get_SplitData(self, df, train_pct):
'''
split the dataset into train and test data
:param train_pct:
:return: Dataframes: x_train, y_train, x_test, y_test, train, tes
'''
# train_pct is percentage for training dataset
train_pt = int(df.shape[0] * train_pct)
# extract train dataset
train = df.iloc[:train_pt, :]
# extract test dataset
test = df.iloc[train_pt:, :]
x_train = train.iloc[:, :-1]
y_train = train.iloc[:, -1]
x_test = test.iloc[:, :-1]
y_test = test.iloc[:, -1]
return x_train, y_train, x_test, y_test, train, test
def get_SplitData_(self, df, n_forecast, train_pct, ticker):
'''
Different approach: Split data into train and test dataset
:param n_forecast:
:param train_pct:
:param ticker:
:return: Dataframes
'''
# train_pct is percentage for training dataset
train_pt = int(df.shape[0] * train_pct)
# extract train dataset
train = df.iloc[:train_pt, :]
# extract test dataset
test = df.iloc[train_pt:, :]
test = slice_forward(test, n_forecast)
x_train = train.iloc[:, 1:]
y_train = train[ticker]
x_test = test.iloc[:, 1:]
y_test = test[ticker]
return x_train, y_train, x_test, y_test, train, test
def set_shift(self, data, forecast, column):
'''
This function shifts the stockprice according to the length,
the stock shall be predicted
:param data:
:param forecast:
:param column:
:return: Dataframe
'''
# extract price forecast
shift = np.array(data[column].shift(periods=-forecast, axis=0))[:-forecast]
# set index to column
data['date'] = data.index
# shit column date
data['date'] = data['date'].shift(periods=-forecast, axis=0)
# cut last columns
data.drop(data.tail(forecast).index, inplace=True)
# set column to index
#data = data.set_index('date')
# drop date column
data.drop('date', axis=1, inplace=True)
# add forecast values
data['{}_shift'.format(column)] = shift
return data
| 29.244186 | 111 | 0.570577 | 918 | 7,545 | 4.570806 | 0.252723 | 0.021449 | 0.014299 | 0.012393 | 0.223785 | 0.202574 | 0.1847 | 0.166587 | 0.142755 | 0.119161 | 0 | 0.013757 | 0.315971 | 7,545 | 257 | 112 | 29.357977 | 0.799264 | 0.261498 | 0 | 0.158879 | 0 | 0 | 0.072209 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.149533 | false | 0 | 0.065421 | 0 | 0.373832 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f151973048e707c76e9ac8951cabfb49a90e449 | 10,592 | py | Python | pos_tagger.py | aldoram5/NLP-Utils | 41b8f78c91ba32e44caace0f04142ffef96021e2 | [
"MIT"
] | null | null | null | pos_tagger.py | aldoram5/NLP-Utils | 41b8f78c91ba32e44caace0f04142ffef96021e2 | [
"MIT"
] | null | null | null | pos_tagger.py | aldoram5/NLP-Utils | 41b8f78c91ba32e44caace0f04142ffef96021e2 | [
"MIT"
] | null | null | null | #
# This Python script is a combination of various scripts and a pickle found at
# https://github.com/sloria/textblob-aptagger which are licensed under
# the MIT License https://github.com/sloria/textblob-aptagger/blob/dev/LICENSE
# for those scripts: Copyright 2013 Matthew Honnibal
#
# more in detail explanation of how it works can be found at
# https://explosion.ai/blog/part-of-speech-pos-tagger-in-python
#
# minor modifications where made so it works as some sort of module
#
import pickle
import os
import random
import logging
from utils import string_utils
from collections import defaultdict
class AveragedPerceptron(object):
'''An averaged perceptron, as implemented by Matthew Honnibal.
See more implementation details here:
http://honnibal.wordpress.com/2013/09/11/a-good-part-of-speechpos-tagger-in-about-200-lines-of-python/
'''
def __init__(self):
# Each feature gets its own weight vector, so weights is a dict-of-dicts
self.weights = {}
self.classes = set()
# The accumulated values, for the averaging. These will be keyed by
# feature/class tuples
self._totals = defaultdict(int)
# The last time the feature was changed, for the averaging. Also
# keyed by feature/class tuples
# (tstamps is short for timestamps)
self._tstamps = defaultdict(int)
# Number of instances seen
self.i = 0
def predict(self, features):
'''Dot-product the features and current weights and return the best label.'''
scores = defaultdict(float)
for feat, value in features.items():
if feat not in self.weights or value == 0:
continue
weights = self.weights[feat]
for label, weight in weights.items():
scores[label] += value * weight
# Do a secondary alphabetic sort, for stability
return max(self.classes, key=lambda label: (scores[label], label))
def update(self, truth, guess, features):
'''Update the feature weights.'''
def upd_feat(c, f, w, v):
param = (f, c)
self._totals[param] += (self.i - self._tstamps[param]) * w
self._tstamps[param] = self.i
self.weights[f][c] = w + v
self.i += 1
if truth == guess:
return None
for f in features:
weights = self.weights.setdefault(f, {})
upd_feat(truth, f, weights.get(truth, 0.0), 1.0)
upd_feat(guess, f, weights.get(guess, 0.0), -1.0)
return None
def average_weights(self):
'''Average weights from all iterations.'''
for feat, weights in self.weights.items():
new_feat_weights = {}
for clas, weight in weights.items():
param = (feat, clas)
total = self._totals[param]
total += (self.i - self._tstamps[param]) * weight
averaged = round(total / float(self.i), 3)
if averaged:
new_feat_weights[clas] = averaged
self.weights[feat] = new_feat_weights
return None
def save(self, path):
'''Save the pickled model weights.'''
return pickle.dump(dict(self.weights), open(path, 'w'))
def load(self, path):
'''Load the pickled model weights.'''
self.weights = pickle.load(open(path))
return None
def train(nr_iter, examples):
'''Return an averaged perceptron model trained on ``examples`` for
``nr_iter`` iterations.
'''
model = AveragedPerceptron()
for i in range(nr_iter):
random.shuffle(examples)
for features, class_ in examples:
scores = model.predict(features)
guess, score = max(scores.items(), key=lambda i: i[1])
if guess != class_:
model.update(class_, guess, features)
model.average_weights()
return model
PICKLE = "tagger.pickle"
def tokenize( text, include_punc=False):
'''Return a list of word tokens.
:param text: string of text.
:param include_punc: (optional) whether to include punctuation as separate tokens. Default to True.
'''
tokens = text.split()
if include_punc:
return tokens
else:
# Return each word token
# Strips punctuation unless the word comes from a contraction
# e.g. "Let's" => ["Let", "'s"]
# e.g. "Can't" => ["Ca", "n't"]
# e.g. "home." => ['home']
return [word if word.startswith("'") else string_utils.strip_punc(word, all=False)
for word in tokens if string_utils.strip_punc(word, all=False)]
class PerceptronTagger():
'''Greedy Averaged Perceptron tagger, as implemented by Matthew Honnibal.
See more implementation details here:
http://honnibal.wordpress.com/2013/09/11/a-good-part-of-speechpos-tagger-in-about-200-lines-of-python/
:param load: Load the pickled model upon instantiation.
'''
START = ['-START-', '-START2-']
END = ['-END-', '-END2-']
AP_MODEL_LOC = os.path.join(os.path.dirname(__file__), PICKLE)
def __init__(self, load=True, base_dir=None):
self.model = AveragedPerceptron()
self.tagdict = {}
self.classes = set()
if load:
self.load(self.AP_MODEL_LOC if base_dir is None else os.path.join(base_dir, PICKLE))
def tag(self, corpus, use_tokens=True):
'''Tags a string `corpus`.'''
# Assume untokenized corpus has \n between sentences and ' ' between words
w_split = tokenize if use_tokens else lambda s: s.split()
def split_sents(corpus):
yield w_split(corpus)
prev, prev2 = self.START
tokens = []
for words in split_sents(corpus):
context = self.START + [self._normalize(w) for w in words] + self.END
for i, word in enumerate(words):
tag = self.tagdict.get(word)
if not tag:
features = self._get_features(i, word, context, prev, prev2)
tag = self.model.predict(features)
tokens.append((word, tag))
prev2 = prev
prev = tag
return tokens
def train(self, sentences, save_loc=None, nr_iter=5):
'''Train a model from sentences, and save it at ``save_loc``. ``nr_iter``
controls the number of Perceptron training iterations.
:param sentences: A list of (words, tags) tuples.
:param save_loc: If not ``None``, saves a pickled model in this location.
:param nr_iter: Number of training iterations.
'''
self._make_tagdict(sentences)
self.model.classes = self.classes
for iter_ in range(nr_iter):
c = 0
n = 0
for words, tags in sentences:
prev, prev2 = self.START
context = self.START + [self._normalize(w) for w in words] \
+ self.END
for i, word in enumerate(words):
guess = self.tagdict.get(word)
if not guess:
feats = self._get_features(i, word, context, prev, prev2)
guess = self.model.predict(feats)
self.model.update(tags[i], guess, feats)
prev2 = prev
prev = guess
c += guess == tags[i]
n += 1
random.shuffle(sentences)
logging.info("Iter {0}: {1}/{2}={3}".format(iter_, c, n, _pc(c, n)))
self.model.average_weights()
# Pickle as a binary file
if save_loc is not None:
pickle.dump((self.model.weights, self.tagdict, self.classes),
open(save_loc, 'wb'), -1)
return None
def load(self, loc):
'''Load a pickled model.'''
try:
w_td_c = pickle.load(open(loc, 'rb'))
except IOError:
msg = ("Missing trontagger.pickle file.")
raise Exception(msg)
self.model.weights, self.tagdict, self.classes = w_td_c
self.model.classes = self.classes
return None
def _normalize(self, word):
'''Normalization used in pre-processing.
- All words are lower cased
- Digits in the range 1800-2100 are represented as !YEAR;
- Other digits are represented as !DIGITS
:rtype: str
'''
if '-' in word and word[0] != '-':
return '!HYPHEN'
elif word.isdigit() and len(word) == 4:
return '!YEAR'
elif word[0].isdigit():
return '!DIGITS'
else:
return word.lower()
def _get_features(self, i, word, context, prev, prev2):
'''Map tokens into a feature representation, implemented as a
{hashable: float} dict. If the features change, a new model must be
trained.
'''
def add(name, *args):
features[' '.join((name,) + tuple(args))] += 1
i += len(self.START)
features = defaultdict(int)
# It's useful to have a constant feature, which acts sort of like a prior
add('bias')
add('i suffix', word[-3:])
add('i pref1', word[0])
add('i-1 tag', prev)
add('i-2 tag', prev2)
add('i tag+i-2 tag', prev, prev2)
add('i word', context[i])
add('i-1 tag+i word', prev, context[i])
add('i-1 word', context[i-1])
add('i-1 suffix', context[i-1][-3:])
add('i-2 word', context[i-2])
add('i+1 word', context[i+1])
add('i+1 suffix', context[i+1][-3:])
add('i+2 word', context[i+2])
return features
def _make_tagdict(self, sentences):
'''Make a tag dictionary for single-tag words.'''
counts = defaultdict(lambda: defaultdict(int))
for words, tags in sentences:
for word, tag in zip(words, tags):
counts[word][tag] += 1
self.classes.add(tag)
freq_thresh = 20
ambiguity_thresh = 0.97
for word, tag_freqs in counts.items():
tag, mode = max(tag_freqs.items(), key=lambda item: item[1])
n = sum(tag_freqs.values())
# Don't add rare words to the tag dictionary
# Only add quite unambiguous words
if n >= freq_thresh and (float(mode) / n) >= ambiguity_thresh:
self.tagdict[word] = tag
def _pc(n, d):
return (float(n) / d) * 100
| 36.524138 | 110 | 0.57213 | 1,352 | 10,592 | 4.411982 | 0.247041 | 0.008718 | 0.005029 | 0.011065 | 0.18575 | 0.146521 | 0.126739 | 0.103269 | 0.091199 | 0.091199 | 0 | 0.014044 | 0.314294 | 10,592 | 289 | 111 | 36.650519 | 0.807242 | 0.270204 | 0 | 0.122905 | 0 | 0 | 0.031744 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106145 | false | 0 | 0.03352 | 0.005587 | 0.268156 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f1523e42854f087d407db9159bffbe7608c4089 | 3,374 | py | Python | cocoa_folder/mutualfriends/scripts/generate_inverse_lexicon_data.py | s-akanksha/DialoGraph_ICLR21 | d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc | [
"Apache-2.0"
] | 12 | 2021-03-17T05:15:33.000Z | 2022-01-19T06:09:21.000Z | cocoa_folder/mutualfriends/scripts/generate_inverse_lexicon_data.py | s-akanksha/DialoGraph_ICLR21 | d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc | [
"Apache-2.0"
] | 2 | 2021-05-25T07:28:46.000Z | 2022-02-11T01:54:43.000Z | cocoa_folder/mutualfriends/scripts/generate_inverse_lexicon_data.py | s-akanksha/DialoGraph_ICLR21 | d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc | [
"Apache-2.0"
] | 4 | 2021-10-11T03:39:38.000Z | 2022-02-01T23:58:50.000Z | import argparse
import json
import re
import sys
sys.path.append("..")
from src.core.lexicon import Lexicon, add_lexicon_arguments
from src.core.schema import Schema
from stop_words import get_stop_words
from src.core.event import Event
from src.model.vocab import is_entity
from src.model.preprocess import Preprocessor
from src.core.dataset import Example
"""
Generate data for building inverse lexicon by
running regular lexicon on transcripts. Data outputted
should be of form:
<entity \t <span> \t <type>
for each entity linked by lexicon
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser("arguments for.core.testing lexicon")
parser.add_argument("--schema", type=str, help="path to schema to use")
parser.add_argument("--ranker-data", type=str, help="path to train data")
parser.add_argument("--annotated-examples-path", help="Json of annotated examples", type=str)
parser.add_argument("--scenarios-json", help="Json of scenario information", type=str)
parser.add_argument("--transcripts", help="Json file of all transcripts collected")
parser.add_argument("--output", help="Output path")
add_lexicon_arguments(parser)
args = parser.parse_args()
path = args.schema
schema = Schema(path)
re_pattern = r"[\w*\']+|[(\w*&)]+|[\w]+|\.|\(|\)|\\|\"|\/|;|\#|\$|\%|\@|\{|\}|\:"
lexicon = Lexicon(schema, learned_lex=False, entity_ranker=None, scenarios_json=args.scenarios_json, stop_words=args.stop_words)
with open(args.annotated_examples_path, "r") as f:
annotated_examples = json.load(f)
with open(args.transcripts, "r") as f:
examples = json.load(f)
if not args.output:
fout = open("../../data/inverse_lexicon_data.txt", "w")
else:
fout = open(args.output, 'w')
# Process annotated examples
for ex in annotated_examples:
scenario_uuid = ex["scenario_uuid"]
for e in ex["events"]:
msg_data = e["data"]
action = e["action"]
agent = e["agent"]
if action == "message":
raw_tokens = re.findall(re_pattern, msg_data)
lower_raw_tokens = [r.lower() for r in raw_tokens]
_, candidate_annotation = lexicon.link_entity(lower_raw_tokens, return_entities=True, agent=agent, uuid=scenario_uuid)
for c in candidate_annotation:
# Entity, Span, Type
fout.write(c[1][0] + "\t" + c[0] + "\t" + c[1][1] + "\n")
preprocessor = Preprocessor(schema, lexicon, 'canonical', 'canonical', 'canonical', False)
for raw in examples:
ex = Example.from_dict(None, raw)
kbs = ex.scenario.kbs
mentioned_entities = set()
for i, event in enumerate(ex.events):
if event.action == 'message':
utterance = preprocessor.process_event(event, kbs[event.agent], mentioned_entities)
# Skip empty utterances
if utterance:
utterance = utterance[0]
for token in utterance:
if is_entity(token):
span, entity = token
entity, type_ = entity
# Entity, Span, Type
fout.write(entity + "\t" + span + "\t" + type_ + "\n")
fout.close()
| 36.27957 | 134 | 0.61144 | 414 | 3,374 | 4.838164 | 0.299517 | 0.020969 | 0.050924 | 0.011982 | 0.07988 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002406 | 0.260818 | 3,374 | 92 | 135 | 36.673913 | 0.800722 | 0.025489 | 0 | 0 | 0 | 0.032258 | 0.14022 | 0.031736 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.177419 | 0 | 0.177419 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f1760aab4578c4b1571e99fb2068552c3c6cb17 | 562 | py | Python | forms/job.py | jakobadam/iftek-paas | 787f1baf881f934a8073702605c2aa9253a17650 | [
"MIT"
] | null | null | null | forms/job.py | jakobadam/iftek-paas | 787f1baf881f934a8073702605c2aa9253a17650 | [
"MIT"
] | null | null | null | forms/job.py | jakobadam/iftek-paas | 787f1baf881f934a8073702605c2aa9253a17650 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2011, Cabo Communications A/S
# All rights reserved.
#
import models
from flaskext import wtf
class JobForm(wtf.Form):
url = wtf.html5.URLField('URL', [
wtf.Required('Hmm, husk du skal at indtaste en URL'),
wtf.URL('Hey, URLen er ikke gyldig!')
])
hour = wtf.IntegerField(u'Kald URL denne time', [
wtf.Required(u'Indtast timen hvor jobbet skal køres'),
wtf.NumberRange(min=0, max=23, message='Indtast et tal fra 0 til 23')
])
submit = wtf.SubmitField('Opret')
| 21.615385 | 77 | 0.631673 | 80 | 562 | 4.4375 | 0.7625 | 0.050704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027778 | 0.231317 | 562 | 25 | 78 | 22.48 | 0.793981 | 0.153025 | 0 | 0.166667 | 0 | 0 | 0.323404 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f17ef59602730d8c0bb836e44c516b50c316c8c | 5,157 | py | Python | src/ufoRig.py | kateliev/ufoRig | bd32cf868cc28c822f1351e9c509853e8a0ec7c7 | [
"MIT"
] | null | null | null | src/ufoRig.py | kateliev/ufoRig | bd32cf868cc28c822f1351e9c509853e8a0ec7c7 | [
"MIT"
] | null | null | null | src/ufoRig.py | kateliev/ufoRig | bd32cf868cc28c822f1351e9c509853e8a0ec7c7 | [
"MIT"
] | null | null | null | # SCRIPT: ufoRig
# DESCRIPTION: A GUI based low level tool for editing
# DESCRIPTION: Unified Font Object (.UFO) files
# -----------------------------------------------------------
# (C) Vassil Kateliev, 2021 (http://www.kateliev.com)
# ------------------------------------------------------------
# https://github.com/kateliev
# - Dependencies ---------------------------------------------
import os
import sys
import pathlib
import json
import plistlib
import xml.etree.ElementTree as ET
from lib import widgets
from PyQt5 import QtCore, QtGui, QtWidgets
# - Init ----------------------------------------------------
app_name, app_version = 'ufoRig', '1.38'
# - Config --------------------------------------------------
cfg_file_open_formats = 'UFO Designspace (*.designspace);; UFO Plist (*.plist);; UFO (*.ufo);;'
# - Dialogs and Main -----------------------------------------
class main_ufoRig(QtWidgets.QMainWindow):
def __init__(self):
super(main_ufoRig, self).__init__()
# - Init
self.setTabPosition(QtCore.Qt.TopDockWidgetArea, QtWidgets.QTabWidget.North )
self.setDockOptions(QtWidgets.QMainWindow.ForceTabbedDocks )
# -- Status bar
self.status_bar = QtWidgets.QStatusBar()
self.setStatusBar(self.status_bar)
# -- Tab widget
self.wgt_tabs = QtWidgets.QTabWidget()
self.wgt_tabs.setTabsClosable(True)
self.wgt_tabs.tabCloseRequested.connect(lambda index: self.wgt_tabs.removeTab(index))
# -- Central Widget
self.setCentralWidget(self.wgt_tabs)
# - Menu bar
self.menu_file = QtWidgets.QMenu('File', self)
# -- Actions
act_data_open_file = QtWidgets.QAction('Open', self)
act_data_open_folder = QtWidgets.QAction('Open UFO', self)
act_data_save_file = QtWidgets.QAction('Save', self)
act_data_open_file.triggered.connect(self.file_open)
act_data_open_folder.triggered.connect(self.folder_open)
act_data_save_file.triggered.connect(self.file_save)
self.menu_file.addAction(act_data_open_file)
self.menu_file.addAction(act_data_open_folder)
self.menu_file.addAction(act_data_save_file)
# -- Set Menu
self.menuBar().addMenu(self.menu_file)
# - Set
self.setWindowTitle('%s %s' %(app_name, app_version))
self.setGeometry(300, 100, 900, 720)
# - Docks ----------------------------------------------
def __park_docks(self):
all_docks = self.findChildren(QtWidgets.QDockWidget)
for dock in all_docks[1:]:
self.tabifyDockWidget(all_docks[0], dock)
# - File IO ---------------------------------------------
# -- Classes Reader
def file_save(self):
curr_path = pathlib.Path(__file__).parent.absolute()
# - Get data from current active tab
curr_tab = self.wgt_tabs.widget(self.wgt_tabs.currentIndex())
if curr_tab.file_type == '.designspace':
export_file = QtWidgets.QFileDialog.getSaveFileName(self, 'Save file', str(curr_path), 'UFO Designspace (*.designspace)')
curr_data = curr_tab.trw_explorer.get_tree()
if len(export_file[0]):
with open(export_file[0], 'wb') as exportFile:
curr_data.write(exportFile, encoding='utf-8', xml_declaration=True)
elif curr_tab.file_type == '.plist':
export_file = QtWidgets.QFileDialog.getSaveFileName(self, 'Save file', str(curr_path), 'UFO (*.plist)')
curr_data = curr_tab.trw_explorer.get_tree()
if len(export_file[0]):
with open(export_file[0], 'wb') as exportFile:
plistlib.dump(curr_data[1], exportFile)
self.status_bar.showMessage('File Saved: {}'.format(export_file[0]))
def file_open(self):
curr_path = pathlib.Path(__file__).parent.absolute()
import_file = QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', str(curr_path), cfg_file_open_formats)
if len(import_file[0]):
if '.designspace' in import_file[0]:
file_tree = ET.parse(import_file[0])
tab_caption = os.path.split(import_file[0])[1]
self.wgt_tabs.addTab(widgets.wgt_designspace_manager(file_tree, self.status_bar), tab_caption)
if '.plist' in import_file[0]:
with open(import_file[0], 'rb') as plist_file:
file_tree = plistlib.load(plist_file)
tab_caption = os.path.split(import_file[0])[1]
self.wgt_tabs.addTab(widgets.wgt_plist_manager((tab_caption, file_tree), self.status_bar), tab_caption)
self.status_bar.showMessage('File Loaded: {}'.format(import_file[0]))
def folder_open(self):
curr_path = pathlib.Path(__file__).parent.absolute()
import_folder = QtWidgets.QFileDialog.getExistingDirectory(self, 'Open UFO', str(curr_path))
collect_ufo_plist = list(pathlib.Path(import_folder).rglob('*.plist'))
if len(collect_ufo_plist):
tab_caption = os.path.split(import_folder)[1]
data_tree = []
for import_file in collect_ufo_plist:
with open(import_file, 'rb') as plist_file:
file_tree = plistlib.load(plist_file)
data_tree.append((import_file.name, file_tree))
if len(data_tree):
self.wgt_tabs.addTab(widgets.wgt_plist_manager(data_tree, self.status_bar), tab_caption)
self.status_bar.showMessage('Loaded: {}'.format(import_folder))
# - Run -----------------------------
main_app = QtWidgets.QApplication(sys.argv)
main_dialog = main_ufoRig()
main_dialog.show()
main_app.exec_()
| 35.321918 | 124 | 0.674229 | 665 | 5,157 | 4.959399 | 0.258647 | 0.019709 | 0.033354 | 0.019406 | 0.317768 | 0.291086 | 0.274409 | 0.244391 | 0.216495 | 0.216495 | 0 | 0.008859 | 0.124491 | 5,157 | 145 | 125 | 35.565517 | 0.721595 | 0.165406 | 0 | 0.151163 | 0 | 0 | 0.064999 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05814 | false | 0 | 0.27907 | 0 | 0.348837 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f194ca308c8c0faee1fc38bb910852f3f743fc1 | 7,290 | py | Python | covid_flask_app.py | Spooks075/course-work-1 | ae2c90930ba313ff05917df0684fc732896ad038 | [
"MIT"
] | null | null | null | covid_flask_app.py | Spooks075/course-work-1 | ae2c90930ba313ff05917df0684fc732896ad038 | [
"MIT"
] | null | null | null | covid_flask_app.py | Spooks075/course-work-1 | ae2c90930ba313ff05917df0684fc732896ad038 | [
"MIT"
] | null | null | null | '''
Description:
displays covid data for local and national, covid news articles
with links to those articles and scheduled updates.
Takes input data and then uses it to schedule updates.
updates data when scheduled.
'''
import json
import sched
import time
import logging
from typing import Callable
from datetime import datetime
from flask import Flask, render_template
from flask import request
from covid_news_handling import call_articles
from covid_data_handler import call_function
FORMAT = '%(levelname)s: %(asctime)s: %(message)s'
logging.basicConfig(filename='log_file.log', format=FORMAT, level=logging.INFO)
#gets settings from config file
with open('config.json',encoding="utf-8") as f:
config_data = json.load(f)
#global list for api data
Data = call_function()
#global list for api news articals
Articles = call_articles()
#global list for schedualed updates
Update_list = []
app = Flask(__name__, template_folder='template')
s = sched.scheduler(time.time,time.sleep)
@app.route("/")
def home():
'''
Description:
Function that renders image, title, covid data, news articles and updates
as well as checking schedualing
Arguments:
None
Returns:
render_template {string} : values input into the HTML
'''
#checks if any schedualed updates are due when the page refreshes
s.run(blocking=False)
return render_template('template.html',
image = "catimage.jpg",
title = "Covid Data and News Board",
location = config_data["local location"],
nation_location = config_data["nation location"],
local_7day_infections = Data[0],
national_7day_infections = Data[1],
hospital_cases = Data[2],
deaths_total = Data[3],
news_articles = Articles[0:4],
updates = Update_list
)
def update_function(up_news:str,up_data:str) -> Callable:
'''
Description:
Function that updates covid data and news and checks if update
should be scheduled again or not
Arguments:
up_news {string} : states whether or not to update data
up_data {string} : states whether or not to update news
Returns:
home {function} : runs home function
'''
logging.info('SCHEDULED UPDATE RAN')
global Data
global Articles
if up_data == 'true':
logging.info('COVID DATA UPDATED')
Data = call_function()
if up_news == 'true':
logging.info('COVID ARTICLES UPDATED')
Articles = call_articles()
#goes through each item in update list
for item in Update_list:
if (item['sched update'] not in s.queue) and (item['repeat'] == 'true'):
#if update has been run and is supposed to repeat
sched_update = s.enter(seconds(item['input time']),
1,update_function,(item['covid data'],item['covid news']))
item['sched update'] = sched_update
logging.info('UPDATE RE SCHEDULED')
elif (item['sched update'] not in s.queue) and (item['repeat'] == 'false'):
#if update has been run but is not supposed to repeat
Update_list.remove(item)
logging.info('UPDATE REMOVED FROM LIST')
return home()
def seconds(input_time:str) -> int:
'''
Description:
Function that calculates the number of seconds till schedualed
update should run
Arguments:
input_time {string} : the input time of when the update should run
Returns:
time_till {int} : number of seconds unti update should run
'''
#and returns seconds until that time
now = datetime.now()
hour = int(now.strftime("%H"))
minute = int(now.strftime("%M"))
second = int(now.strftime("%S"))
#takes current hour away from input hour
hour = int(input_time[0:2]) - hour
#takes current minute away from input
minute = int(input_time[3:]) - minute
#if minute is less than 0 then update is tomorrow
if minute < 0:
minute += 60
hour += 24
#if hour less than 0 then update is for tomorrow so
elif hour < 0:
hour += 24
#if seconds till update is 0 then update is the same time tommorow so
if (((hour*60)+minute)*60) == 0:
hour += 24
#coverts hours and minutes into seconds
time_till = (((hour*60)+minute)*60)+second
logging.info('TIME TILL UPDATE CALCULATED')
return time_till
@app.route("/index",methods = ["GET"])
def close_button() -> Callable:
'''
Description:
Function that runs when a cross button is pressed that removes updates
and news articles when their cross is clicked as well as creating and update
and adding it to the update list when create update is pressed
Arguments:
None
Returns:
home {function} : runs home function'''
s.run(blocking=False)
#when cross is pressed on articles
if request.args.get("notif"):
#gets the title of article that thr cross was clicked
title = request.args.get("notif")
for news in Articles:
#goes through articals till title matches then removes that article from list
if news['title'] == title:
Articles.remove(news)
logging.info('ARTICLE REMOVED')
#when the update button is pressed
if request.args.get('update'):
input_time = request.args.get('update')
name = request.args.get('two')
#converts toggle buttons to true or false strings
if request.args.get('repeat') is not None:
repeat = 'true'
else:
repeat = 'false'
if request.args.get('covid-data') is not None:
covid_data = 'true'
else:
covid_data = 'false'
if request.args.get('news') is not None:
covid_news = 'true'
else:
covid_news = 'false'
#schedules an update with input specifications
sched_update = s.enter(seconds(input_time),1,update_function,(covid_data, covid_news))
#creates dictionary with updates specifications
update_dict = {'title':'name: '+name,
'content':'time: '+input_time
+' updating covid data: '+str(covid_data)
+' updating covid news: '+str(covid_news)
+' repeating: '+str(repeat),
'repeat' : repeat,
'input time': input_time,
'covid data': covid_data,
'covid news':covid_news,
'sched update' : sched_update
}
#adds dictionary to list of updates
Update_list.append(update_dict)
logging.info('UPDATE SCHEDULED')
#if update cross is clicked
if request.args.get('update_item'):
#gets title of update cross clicked
title = request.args.get("update_item")
for items in Update_list:
#gets title of update cross clicked
if items['title'] == title:
Update_list.remove(items)
for updates in s.queue:
#finds update in scheduled updated and cancels it
if updates[4] == items['title']:
s.cancel(updates)
logging.info('UPDATED CANCELED')
return home()
if __name__ == "__main__":
app.run(debug=True)
| 28.700787 | 94 | 0.631962 | 944 | 7,290 | 4.791314 | 0.23411 | 0.029847 | 0.030953 | 0.021225 | 0.130223 | 0.068981 | 0.031395 | 0.017245 | 0.017245 | 0.017245 | 0 | 0.006989 | 0.2738 | 7,290 | 253 | 95 | 28.814229 | 0.847374 | 0.338683 | 0 | 0.115702 | 0 | 0 | 0.148701 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033058 | false | 0 | 0.082645 | 0 | 0.14876 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f1ab0c0e4bda659a77a776c2e10f4a4ca0c2b90 | 10,454 | py | Python | benchmarks/bench_cuda.py | abitrolly/numba-benchmark | 4bea9c23276fd0399df26452d19f13810a6496c7 | [
"BSD-2-Clause"
] | 6 | 2015-10-19T09:18:50.000Z | 2021-11-29T10:37:10.000Z | benchmarks/bench_cuda.py | abitrolly/numba-benchmark | 4bea9c23276fd0399df26452d19f13810a6496c7 | [
"BSD-2-Clause"
] | 9 | 2015-03-03T09:50:56.000Z | 2021-10-13T08:34:06.000Z | benchmarks/bench_cuda.py | abitrolly/numba-benchmark | 4bea9c23276fd0399df26452d19f13810a6496c7 | [
"BSD-2-Clause"
] | 7 | 2015-09-09T17:38:11.000Z | 2021-09-24T15:06:19.000Z | """
Benchmarks for the CUDA backend.
"""
from __future__ import division
import math
import sys
import numpy as np
def _jit_setup1():
from numba import cuda, float32, float64
def addmul(x, y, out):
i = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
if i >= x.shape[0]:
return
out[i] = x[i] + y[i] * math.fabs(x[i])
addmul_f32 = cuda.jit()(addmul)
addmul_f64 = cuda.jit()(addmul)
def no_op():
pass
# N-body simulation. We actually only run the step which computes the
# accelerations from the positions and weights of the bodies (updating
# speeds and positions is relatively uninteresting).
# CUDA version adapted from http://http.developer.nvidia.com/GPUGems3/gpugems3_ch31.html
eps_2 = np.float32(1e-6)
zero = np.float32(0.0)
one = np.float32(1.0)
@cuda.jit(device=True, inline=True)
def body_body_interaction(xi, yi, xj, yj, wj, axi, ayi):
"""
Compute the influence of body j on the acceleration of body i.
"""
rx = xj - xi
ry = yj - yi
sqr_dist = rx * rx + ry * ry + eps_2
sixth_dist = sqr_dist * sqr_dist * sqr_dist
inv_dist_cube = one / math.sqrt(sixth_dist)
s = wj * inv_dist_cube
axi += rx * s
ayi += ry * s
return axi, ayi
@cuda.jit(device=True, inline=True)
def tile_calculation(xi, yi, axi, ayi, positions, weights):
"""
Compute the contribution of this block's tile to the acceleration
of body i.
"""
for j in range(cuda.blockDim.x):
xj = positions[j,0]
yj = positions[j,1]
wj = weights[j]
axi, ayi = body_body_interaction(xi, yi, xj, yj, wj, axi, ayi)
return axi, ayi
globals().update(locals())
tile_size = 128
# Don't JIT this function at the top-level as it breaks until Numba 0.16.
def calculate_forces(positions, weights, accelerations):
"""
Calculate accelerations produced on all bodies by mutual gravitational
forces.
"""
sh_positions = cuda.shared.array((tile_size, 2), float32)
sh_weights = cuda.shared.array(tile_size, float32)
i = cuda.grid(1)
axi = 0.0
ayi = 0.0
xi = positions[i,0]
yi = positions[i,1]
for j in range(0, len(weights), tile_size):
index = (j // tile_size) * cuda.blockDim.x + cuda.threadIdx.x
sh_index = cuda.threadIdx.x
sh_positions[sh_index,0] = positions[index,0]
sh_positions[sh_index,1] = positions[index,1]
sh_weights[sh_index] = weights[index]
cuda.syncthreads()
axi, ayi = tile_calculation(xi, yi, axi, ayi,
sh_positions, sh_weights)
cuda.syncthreads()
accelerations[i,0] = axi
accelerations[i,1] = ayi
class NBodyCUDARunner:
def __init__(self, positions, weights):
self.calculate_forces = cuda.jit(
argtypes=(float32[:,:], float32[:], float32[:,:])
)(calculate_forces)
self.accelerations = np.zeros_like(positions)
self.n_bodies = len(weights)
self.stream = cuda.stream()
self.d_pos = cuda.to_device(positions, self.stream)
self.d_wei = cuda.to_device(weights, self.stream)
self.d_acc = cuda.to_device(self.accelerations, self.stream)
self.stream.synchronize()
def run(self):
blockdim = tile_size
griddim = int(math.ceil(self.n_bodies / blockdim))
self.calculate_forces[griddim, blockdim, self.stream](
self.d_pos, self.d_wei, self.d_acc)
self.stream.synchronize()
def results(self):
self.d_acc.copy_to_host(self.accelerations, self.stream)
self.stream.synchronize()
return self.accelerations
def run_cpu_nbody(positions, weights):
accelerations = np.zeros_like(positions)
n = weights.size
for j in range(n):
# Compute influence of j'th body on all bodies
r = positions[j] - positions
rx = r[:,0]
ry = r[:,1]
sqr_dist = rx * rx + ry * ry + eps_2
sixth_dist = sqr_dist * sqr_dist * sqr_dist
inv_dist_cube = one / np.sqrt(sixth_dist)
s = weights[j] * inv_dist_cube
accelerations += (r.transpose() * s).transpose()
return accelerations
def make_nbody_samples(n_bodies):
positions = np.random.RandomState(0).uniform(-1.0, 1.0, (n_bodies, 2))
weights = np.random.RandomState(0).uniform(1.0, 2.0, n_bodies)
return positions.astype(np.float32), weights.astype(np.float32)
# Taken from numba.cuda.tests.cudapy.test_blackscholes
N = 16384
RISKFREE = 0.02
VOLATILITY = 0.30
A1 = 0.31938153
A2 = -0.356563782
A3 = 1.781477937
A4 = -1.821255978
A5 = 1.330274429
RSQRT2PI = 0.39894228040143267793994605993438
callResultGold = np.zeros(N)
putResultGold = np.zeros(N)
stockPrice = np.random.RandomState(0).uniform(5.0, 30.0, N)
optionStrike = np.random.RandomState(1).uniform(1.0, 100.0, N)
optionYears = np.random.RandomState(2).uniform(0.25, 10.0, N)
args = (callResultGold, putResultGold, stockPrice, optionStrike,
optionYears, RISKFREE, VOLATILITY)
def _jit_setup2():
from numba import cuda
@cuda.jit(device=True, inline=True)
def cnd_cuda(d):
K = 1.0 / (1.0 + 0.2316419 * math.fabs(d))
ret_val = (RSQRT2PI * math.exp(-0.5 * d * d) *
(K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))))
if d > 0:
ret_val = 1.0 - ret_val
return ret_val
@cuda.jit()
def black_scholes_cuda(callResult, putResult, S, X, T, R, V):
i = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
if i >= S.shape[0]:
return
sqrtT = math.sqrt(T[i])
d1 = (math.log(S[i] / X[i]) + (R + 0.5 * V * V) * T[i]) / (V * sqrtT)
d2 = d1 - V * sqrtT
cndd1 = cnd_cuda(d1)
cndd2 = cnd_cuda(d2)
expRT = math.exp((-1. * R) * T[i])
callResult[i] = (S[i] * cndd1 - X[i] * expRT * cndd2)
putResult[i] = (X[i] * expRT * (1.0 - cndd2) - S[i] * (1.0 - cndd1))
globals().update(locals())
class Synthetic:
"""
Micro-Benchmarks.
"""
n = 4 * 256 * 1024
def setup(self):
self.no_op = cuda.jit(argtypes=())(no_op)
self.stream = cuda.stream()
self.f32 = np.zeros(self.n, dtype=np.float32)
self.d_f32 = cuda.to_device(self.f32, self.stream)
self.f64 = np.zeros(self.n, dtype=np.float64)
self.d_f64 = cuda.to_device(self.f64, self.stream)
self.sum_reduce = cuda.reduce(lambda x, y: x+y)
self.res_f32 = cuda.to_device(np.zeros(1, dtype=np.float32))
self.res_f64 = cuda.to_device(np.zeros(1, dtype=np.float64))
self.stream.synchronize()
def time_addmul_f32(self):
blockdim = 512, 1
griddim = int(math.ceil(float(self.n) / blockdim[0])), 1
for i in range(10):
addmul_f32[griddim, blockdim, self.stream](
self.d_f32, self.d_f32, self.d_f32)
self.stream.synchronize()
def time_addmul_f64(self):
blockdim = 512, 1
griddim = int(math.ceil(float(self.n) / blockdim[0])), 1
for i in range(10):
addmul_f64[griddim, blockdim, self.stream](
self.d_f64, self.d_f64, self.d_f64)
self.stream.synchronize()
def time_run_empty_kernel(self):
self.no_op[1, 1, self.stream]()
self.stream.synchronize()
def time_reduce_f32(self):
self.sum_reduce(self.d_f32, res=self.res_f32, stream=self.stream)
self.stream.synchronize()
def time_reduce_f64(self):
self.sum_reduce(self.d_f64, res=self.res_f64, stream=self.stream)
self.stream.synchronize()
class BlackScholes:
def setup(self):
self.stream = cuda.stream()
self.d_callResult = cuda.to_device(callResultGold, self.stream)
self.d_putResult = cuda.to_device(putResultGold, self.stream)
self.d_stockPrice = cuda.to_device(stockPrice, self.stream)
self.d_optionStrike = cuda.to_device(optionStrike, self.stream)
self.d_optionYears = cuda.to_device(optionYears, self.stream)
self.stream.synchronize()
def time_blackscholes(self):
blockdim = 512, 1
griddim = int(math.ceil(float(N) / blockdim[0])), 1
for i in range(10):
black_scholes_cuda[griddim, blockdim, self.stream](
self.d_callResult, self.d_putResult,
self.d_stockPrice, self.d_optionStrike, self.d_optionYears,
RISKFREE, VOLATILITY)
self.stream.synchronize()
class NBody:
n_bodies = 4096
def setup(self):
# Sanity check our implementation
p, w = make_nbody_samples(tile_size * 2)
runner = NBodyCUDARunner(p, w)
runner.run()
cuda_res = runner.results()
cpu_res = run_cpu_nbody(p, w)
assert np.allclose(cuda_res, cpu_res, 1e-4), (cuda_res, cpu_res)
# Make actual benchmark samples and prepare data transfer
self.positions, self.weights = make_nbody_samples(self.n_bodies)
self.runner = NBodyCUDARunner(self.positions, self.weights)
def time_cpu_nbody(self):
run_cpu_nbody(self.positions, self.weights)
def time_cuda_nbody(self):
self.runner.run()
class DataTransfer:
def setup(self):
self.stream = cuda.stream()
self.small_data = np.zeros(512, dtype=np.float64)
self.large_data = np.zeros(512 * 1024, dtype=np.float64)
self.d_small_data = cuda.to_device(self.small_data, self.stream)
self.d_large_data = cuda.to_device(self.large_data, self.stream)
self.stream.synchronize()
def time_transfer_to_gpu_small(self):
for i in range(10):
cuda.to_device(self.small_data, self.stream)
self.stream.synchronize()
def time_transfer_to_gpu_large(self):
for i in range(10):
cuda.to_device(self.large_data, self.stream)
self.stream.synchronize()
def time_transfer_from_gpu_small(self):
for i in range(10):
self.d_small_data.copy_to_host(self.small_data, self.stream)
self.stream.synchronize()
def time_transfer_from_gpu_large(self):
for i in range(10):
self.d_large_data.copy_to_host(self.large_data, self.stream)
self.stream.synchronize()
def setup():
_jit_setup1()
_jit_setup2()
| 31.969419 | 92 | 0.619093 | 1,471 | 10,454 | 4.25085 | 0.171992 | 0.070366 | 0.053734 | 0.049896 | 0.361906 | 0.314249 | 0.243883 | 0.189829 | 0.14537 | 0.121542 | 0 | 0.046962 | 0.258561 | 10,454 | 326 | 93 | 32.067485 | 0.759773 | 0.076908 | 0 | 0.226496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004274 | 1 | 0.132479 | false | 0.004274 | 0.025641 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f203eaaafb7603c5af08d8251f48283e8b44249 | 2,161 | py | Python | tests/item_test.py | eraustud/supermarket_exercise | 33ea35875f07694fa982c475bded6da1319b0624 | [
"Apache-2.0"
] | null | null | null | tests/item_test.py | eraustud/supermarket_exercise | 33ea35875f07694fa982c475bded6da1319b0624 | [
"Apache-2.0"
] | null | null | null | tests/item_test.py | eraustud/supermarket_exercise | 33ea35875f07694fa982c475bded6da1319b0624 | [
"Apache-2.0"
] | null | null | null | import unittest
from src.supermarket import *
from test_helpers import *
import sqlite3
class TestItem(unittest.TestCase):
def setup(self):
self.database_path = 'example.db'
init_empty_database(self.database_path)
database.Database.database_path = self.database_path
def teardown(self):
kill_database(self.database_path)
def test_constructor(self):
test_item = item.Item('1983', 'toothbrush', 199)
self.assertNotEqual(test_item, None)
def test_create_product(self):
self.setup()
toothbrush = item.Item('1983', 'toothbrush', 199)
toothbrush.create_product() # save to database
# fetch from database directly (don't contaminate this test with the read_product method)
connection = sqlite3.connect(self.database_path)
cursor = connection.cursor()
cursor.execute('SELECT * FROM products WHERE SKU=(?)', ('1983',))
result = cursor.fetchall()
connection.commit()
connection.close()
# make sure the created product has the expected properties
self.assertEqual(len(result), 1)
record = result[0]
self.assertEqual(record[1], toothbrush.sku)
self.assertEqual(record[2], toothbrush.name)
self.assertEqual(record[3], toothbrush.price)
self.teardown()
def test_read_product(self):
self.setup()
# create database record directly (don't contaminate this test with the create_product method)
connection = sqlite3.connect(self.database_path)
cursor = connection.cursor()
sku = '1983'
item_args = (sku, 'toothbrush', 199)
cursor.execute('INSERT INTO products(SKU, name, price) VALUES(?, ?, ?)', item_args)
connection.commit()
connection.close()
# check that the product record read has the expected properties
toothbrush = item.Item.read_product(sku)
self.assertEqual(toothbrush.sku, sku)
self.assertEqual(toothbrush.name, 'toothbrush')
self.assertEqual(toothbrush.price, 199)
self.teardown()
if __name__ == "__main__":
unittest.main() | 33.246154 | 102 | 0.65988 | 245 | 2,161 | 5.689796 | 0.318367 | 0.060258 | 0.068867 | 0.034433 | 0.197991 | 0.162123 | 0.162123 | 0.162123 | 0.107604 | 0.107604 | 0 | 0.021818 | 0.236465 | 2,161 | 65 | 103 | 33.246154 | 0.82303 | 0.147154 | 0 | 0.26087 | 0 | 0 | 0.089227 | 0 | 0 | 0 | 0 | 0 | 0.173913 | 1 | 0.108696 | false | 0 | 0.086957 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f2138c601fef6d54882fdc4e74d4f8bba613f99 | 1,515 | py | Python | backend-flask-sqlalchemy/exemplo_tech_store/test.py | santiagosilas/dweb20192-vuejs | d6b322c075682247ca2e1eb70c9c64d27d741fa2 | [
"MIT"
] | 1 | 2020-11-12T15:27:20.000Z | 2020-11-12T15:27:20.000Z | cap_07_flask_sqlalchemy/exemplo_tech_store/test.py | santiagosilas/BasicoFlaskDevWeb | de7b952427e453b365c84a7f26882174d5cb13ae | [
"MIT"
] | 36 | 2019-12-05T10:39:07.000Z | 2022-02-27T10:34:55.000Z | backend-flask-sqlalchemy/exemplo_tech_store/test.py | santiagosilas/dweb20192-vuejs | d6b322c075682247ca2e1eb70c9c64d27d741fa2 | [
"MIT"
] | null | null | null | from TechApp import db, models
from TechApp.models import Loja
import unittest
class TechAppTestCase(unittest.TestCase):
def setUp(self):
# create the database
db.drop_all()
db.create_all()
def test_obter_lojas(self):
db.session.add(Loja('Macavi'))
db.session.add(Loja('Casas Bahia'))
db.session.commit()
lojas = models.obter_lojas()
self.assertEqual(len(lojas),2)
self.assertEqual(lojas, Loja.query.all())
def test_inserir_loja(self):
db.drop_all()
db.create_all()
count1 = len(Loja.query.filter_by(titulo = 'Leleo').all())
models.inserir_loja(titulo = 'Leleo')
count2 = len(Loja.query.filter_by(titulo = 'Leleo').all())
loja = Loja.query.filter_by(titulo = 'zxc').first()
self.assertEqual(loja, None)
loja = Loja.query.filter_by(titulo = 'Leleo').first()
self.assertEqual(loja.titulo, 'Leleo')
self.assertNotEqual(count1, count2)
self.assertEqual(count1 + 1, count2)
def test_atualizar_loja(self):
loja = Loja('Loja X')
db.session.add(loja)
db.session.commit()
models.atualizar_loja(loja.id, 'novo nome')
self.assertEqual(loja.titulo, 'novo nome')
def test_remover_loja(self):
loja = Loja('Loja X')
db.session.add(loja)
db.session.commit()
models.remover_loja(id = loja.id)
self.assertEqual(Loja.query.get(loja.id), None)
if __name__ == '__main__':
unittest.main() | 30.918367 | 66 | 0.631683 | 194 | 1,515 | 4.78866 | 0.273196 | 0.067815 | 0.051668 | 0.068891 | 0.304629 | 0.304629 | 0.198062 | 0.198062 | 0.124865 | 0.124865 | 0 | 0.006867 | 0.231023 | 1,515 | 49 | 67 | 30.918367 | 0.790558 | 0.012541 | 0 | 0.275 | 0 | 0 | 0.055518 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.125 | false | 0 | 0.075 | 0 | 0.225 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f25b927331a31d4410e202039b6adfe5e308a0e | 14,959 | py | Python | src/vision_transformer.py | sail-sg/mugs | 7652939078b256dc9aadc09c67d1e310253d49b9 | [
"Apache-2.0"
] | 7 | 2022-03-29T09:58:42.000Z | 2022-03-31T10:46:09.000Z | src/vision_transformer.py | sail-sg/mugs | 7652939078b256dc9aadc09c67d1e310253d49b9 | [
"Apache-2.0"
] | null | null | null | src/vision_transformer.py | sail-sg/mugs | 7652939078b256dc9aadc09c67d1e310253d49b9 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:28:28.000Z | 2022-03-30T07:28:28.000Z | # Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ViT backbones, including ViT-small, ViT-base, ViT-large
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import math
from functools import partial
import torch
import torch.nn as nn
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
lower = norm_cdf((a - mean) / std)
upper = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * lower - 1, 2 * upper - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
# type: (torch.tensor, float, float, float, float) -> torch.tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
"""
MLP module in ViT
"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
"""
Attention module in ViT
"""
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
reshaped_qkv = qkv.permute(2, 0, 3, 1, 4)
q, k, v = reshaped_qkv[0], reshaped_qkv[1], reshaped_qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
"""
ViT block, including Attention, MLP, etc.
"""
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
"""Image to Patch Embedding"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
def forward(self, x):
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
"""Vision Transformer"""
def __init__(
self,
img_size=[224, 224],
patch_size=16,
in_chans=3,
num_classes=0,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
norm_layer=nn.LayerNorm,
num_relation_blocks=0,
**kwargs
):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_size = patch_size
self.num_classes = num_classes
self.depth = depth
self.patch_embed = PatchEmbed(
img_size=img_size[0],
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
trunc_normal_(self.pos_embed, std=0.02)
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
)
for i in range(depth)
]
)
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = (
nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
)
self.num_relation_blocks = num_relation_blocks
if num_relation_blocks > 0:
self.relation_blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
)
for i in range(int(num_relation_blocks))
]
)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def add_pos_emb_for_cls_token(self):
pe_cls_token = torch.zeros([1, 1, self.embed_dim], dtype=torch.float32)
self.pos_embed = nn.Parameter(torch.cat([pe_cls_token, self.pos_embed], dim=1))
self.pos_embed.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(
1, int(math.sqrt(N)), int(math.sqrt(N)), dim
).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode="bicubic",
)
assert (
int(w0) == patch_pos_embed.shape[-2]
and int(h0) == patch_pos_embed.shape[-1]
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x, return_all=False, local_group_memory_inputs=None, **kwargs):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
if self.num_relation_blocks > 0:
mem = local_group_memory_inputs.get("mem")
if mem is not None:
m, _ = mem(x.mean(1))
rx = torch.cat((x.mean(1).unsqueeze(1), m), dim=1)
else:
rx = x
for i, blk in enumerate(self.relation_blocks):
rx = blk(rx)
relation_out = self.norm(rx[:, 0])
x = self.norm(x)
if self.num_classes > 0:
return self.head(x[:, 0])
if return_all:
return x, relation_out
else:
return x[:, 0], relation_out
def forward_knn(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def get_num_layers(self):
return self.depth
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size,
embed_dim=192,
depth=12,
num_heads=3,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs
)
return model
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs
)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs
)
return model
def vit_large(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs
)
return model
| 30.404472 | 95 | 0.569891 | 2,035 | 14,959 | 3.987715 | 0.17887 | 0.028835 | 0.009612 | 0.015527 | 0.278004 | 0.224769 | 0.189772 | 0.167837 | 0.160444 | 0.152557 | 0 | 0.024812 | 0.323752 | 14,959 | 491 | 96 | 30.466395 | 0.777382 | 0.145999 | 0 | 0.357143 | 0 | 0 | 0.010596 | 0.00174 | 0 | 0 | 0 | 0 | 0.002747 | 1 | 0.07967 | false | 0 | 0.010989 | 0.013736 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f28f7fa6f337b07ffe36d2b5fe9306bf0454618 | 3,024 | py | Python | btc_payment/views.py | davit-gh/ecommerce | 2dd2f31fdb4e037e5a5daaa380ee11ed5a83d8e9 | [
"MIT"
] | 22 | 2018-05-11T03:03:25.000Z | 2022-02-24T17:11:20.000Z | btc_payment/views.py | morshedmasud/ecommerce | e3798f116b26f24a1a4a40dc3bd8497ba1539de1 | [
"MIT"
] | 14 | 2018-05-14T02:08:57.000Z | 2022-03-11T23:20:39.000Z | btc_payment/views.py | morshedmasud/ecommerce | e3798f116b26f24a1a4a40dc3bd8497ba1539de1 | [
"MIT"
] | 5 | 2018-11-11T11:17:11.000Z | 2022-01-17T13:11:24.000Z | import datetime, pytz
from django.shortcuts import get_object_or_404
from django.http import HttpResponse
from django.http import HttpResponseServerError
from django.http import HttpResponseBadRequest
from django.urls import reverse
from blockchain.blockexplorer import get_address
from blockchain.v2.receive import receive
from mezzanine.conf import settings
from .models import BtcInvoice, BtcInvoicePayment
from .models import BtcPendingInvoicePayment
def _create_callback_url(request, invoice_id, secret):
relative_url = reverse("payment_handler", args=[invoice_id, secret])
callback_url = request.build_absolute_uri(relative_url)
return callback_url
def create_handler(request, order_total, btc_total):
"""
Create a new bitcoin address if the latest created one has received funds.
Otherwise use the latest address. The aim is to avoid 'address gap' issue.
invoice_id should be something unique for the transaction.
"""
invoice_id = str(request.cart.id)
recv = BtcInvoice.objects.latest()
address = recv.address
received = get_address(address).total_received
if received > 0:
callback_url = _create_callback_url(
request, invoice_id, settings.SECRET_KEY
)
recv = receive(settings.XPUB, callback_url, settings.API_KEY)
address = recv.address
invoice = BtcInvoice(
invoice_id=invoice_id,
price_in_usd=order_total,
price_in_btc=btc_total,
address=address
)
invoice.save()
else:
recv.price_in_usd = order_total
recv.price_in_btc = btc_total
recv.added_time = datetime.datetime.now(pytz.utc)
recv.save()
invoice_id = recv.invoice_id
return (address, invoice_id)
def payment_handler(request, invoice_id, secret):
"""Handle the response from blockchain.info."""
address = request.GET.get('address')
confirmations = request.GET.get('confirmations')
tx_hash = request.GET.get('transaction_hash')
value = int(request.GET.get('value'))
order = get_object_or_404(BtcInvoice, invoice_id=invoice_id)
if address != order.address:
return HttpResponseBadRequest('Incorrect Receiving Address')
if secret != settings.SECRET_KEY:
return HttpResponseBadRequest('Invalid secret')
if int(confirmations) >= 4:
pay = BtcInvoicePayment(
transaction_hash=tx_hash,
value=value,
invoice=order
)
pay.save()
obj = get_object_or_404(BtcPendingInvoicePayment, invoice_id=invoice_id)
obj.delete()
return HttpResponse('*ok*', content_type='text/plain')
else:
pending, created = BtcPendingInvoicePayment.objects.get_or_create(
invoice_id=invoice_id,
transaction_hash=tx_hash,
value=value,
)
return HttpResponse('Waiting for confirmations')
# should never reach here!
return HttpResponseServerError('Something went wrong')
| 35.576471 | 80 | 0.700066 | 356 | 3,024 | 5.741573 | 0.325843 | 0.074853 | 0.031311 | 0.035225 | 0.127202 | 0.062622 | 0 | 0 | 0 | 0 | 0 | 0.005098 | 0.221561 | 3,024 | 84 | 81 | 36 | 0.863212 | 0.091601 | 0 | 0.147059 | 0 | 0 | 0.057395 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044118 | false | 0 | 0.161765 | 0 | 0.308824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f2c444d7136697719e38631114d31c237cd7861 | 656 | py | Python | python-algorithm/leetcode/problem_599.py | isudox/leetcode-solution | 60085e64deaf396a171367affc94b18114565c43 | [
"MIT"
] | 5 | 2017-06-11T09:19:34.000Z | 2019-01-16T16:58:31.000Z | python-algorithm/leetcode/problem_599.py | isudox/leetcode-solution | 60085e64deaf396a171367affc94b18114565c43 | [
"MIT"
] | null | null | null | python-algorithm/leetcode/problem_599.py | isudox/leetcode-solution | 60085e64deaf396a171367affc94b18114565c43 | [
"MIT"
] | 1 | 2019-03-02T15:50:43.000Z | 2019-03-02T15:50:43.000Z | """599. Minimum Index Sum of Two Lists
https://leetcode.com/problems/minimum-index-sum-of-two-lists/
"""
from typing import List
class Solution:
def findRestaurant(self, list1: List[str], list2: List[str]) -> List[str]:
ans = []
idx_sum = 2000
store = dict()
for i, v in enumerate(list2):
store[v] = i
for i, v in enumerate(list1):
if v in store:
tmp_sum = i + store[v]
if tmp_sum < idx_sum:
idx_sum = tmp_sum
ans = [v]
elif tmp_sum == idx_sum:
ans.append(v)
return ans
| 28.521739 | 78 | 0.501524 | 85 | 656 | 3.776471 | 0.447059 | 0.074766 | 0.084112 | 0.105919 | 0.255452 | 0.155763 | 0 | 0 | 0 | 0 | 0 | 0.027569 | 0.391768 | 656 | 22 | 79 | 29.818182 | 0.776942 | 0.147866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f2f7ca7d678d2afb51d07068b7522de5b5d1b3d | 1,009 | py | Python | blackcat/test/test_post_processor.py | pluralsight/BlackCat | b071fef95f9d8162696556bb25ceb5b208d150cf | [
"Apache-2.0"
] | 4 | 2019-11-21T20:35:39.000Z | 2022-01-11T18:40:55.000Z | blackcat/test/test_post_processor.py | Plazmaz/BlackCat | b071fef95f9d8162696556bb25ceb5b208d150cf | [
"Apache-2.0"
] | null | null | null | blackcat/test/test_post_processor.py | Plazmaz/BlackCat | b071fef95f9d8162696556bb25ceb5b208d150cf | [
"Apache-2.0"
] | 2 | 2020-03-06T19:42:20.000Z | 2021-06-29T12:14:24.000Z | from blackcat.postprocessing.post_processor import BlackCatPostProcessor
from blackcat.postprocessing.steps.trim_to_relevant_data import PostStepFlattenRelevantData
class TestPostProcessor(object):
TEST_OBJ = {
'node': {
'securityVulnerability': {'test-field': True},
'dismisser': 'Nobody',
'dismissedAt': 'Never',
'dismissReason': 'None of your business',
'vulnerableManifestPath': 'package.json'
}
}
def test_post_processor(self):
BlackCatPostProcessor.add_post_step(PostStepFlattenRelevantData())
out = BlackCatPostProcessor.run(1, 'test_org', 'https://www.google.com', self.TEST_OBJ)
assert out == {
'test-field': True,
'dismisser': 'Nobody',
'dismissed_at': 'Never',
'dismissed_reason': 'None of your business',
'manifest_file': 'package.json',
'run_id': 1,
'repo_url': 'https://www.google.com'
}
| 36.035714 | 95 | 0.611497 | 91 | 1,009 | 6.604396 | 0.604396 | 0.039933 | 0.086522 | 0.073211 | 0.093178 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002703 | 0.266601 | 1,009 | 27 | 96 | 37.37037 | 0.809459 | 0 | 0 | 0.083333 | 0 | 0 | 0.301288 | 0.042616 | 0 | 0 | 0 | 0 | 0.041667 | 1 | 0.041667 | false | 0 | 0.083333 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f330abfc4ce7d2b28f1acc8b042f2e7489fb0bb | 1,881 | py | Python | foreshadow/concrete/internals/tfidf.py | adithyabsk/foreshadow | ca2e927c396ae0d61923b287d6e32e142f3ba96f | [
"Apache-2.0"
] | 25 | 2018-07-26T17:30:31.000Z | 2021-02-23T22:54:01.000Z | foreshadow/concrete/internals/tfidf.py | adithyabsk/foreshadow | ca2e927c396ae0d61923b287d6e32e142f3ba96f | [
"Apache-2.0"
] | 150 | 2018-11-02T18:09:12.000Z | 2020-05-15T01:01:35.000Z | foreshadow/concrete/internals/tfidf.py | adithyabsk/foreshadow | ca2e927c396ae0d61923b287d6e32e142f3ba96f | [
"Apache-2.0"
] | 1 | 2019-02-20T22:24:00.000Z | 2019-02-20T22:24:00.000Z | """FixedTfidfVectorizer."""
import numpy as np
from sklearn.feature_extraction.text import (
TfidfVectorizer as SklearnTfidfVectorizer,
VectorizerMixin,
)
from sklearn.utils import check_array
from foreshadow.base import BaseEstimator
from foreshadow.wrapper import pandas_wrap
@pandas_wrap
class FixedTfidfVectorizer(BaseEstimator, VectorizerMixin):
"""Fix TfidfVectorizer input format to fit transformer standard."""
def __init__(self, **kwargs):
self.encoder = SklearnTfidfVectorizer(**kwargs)
def fit(self, X, y=None):
"""Fit the TfidfVectorizer.
Args:
X: iterable
y (optional): iterable
Returns:
self
"""
X = check_array(
X, accept_sparse=True, dtype=None, force_all_finite=False
).ravel()
self.encoder.fit(X, y)
return self
def transform(self, X):
"""Transform using the fit TfidfVectorizer.
Args:
X: iterable
Returns:
array-like
"""
X = check_array(
X, accept_sparse=True, dtype=None, force_all_finite=False
).ravel()
return self.encoder.transform(X)
def fit_transform(self, X, y=None):
"""Fit and transform in one step.
Args:
X: iterable
y: labels
Returns:
(array-like) Transformed samples
"""
X = check_array(
X, accept_sparse=True, dtype=None, force_all_finite=False
).ravel()
return self.encoder.fit_transform(X, y)
def inverse_transform(self, X):
"""Transform encoding back to original encoding.
Args:
X: iterable
Returns:
iterable: Inverted transformed samples
"""
return np.array([list(i) for i in self.encoder.inverse_transform(X)])
| 23.5125 | 77 | 0.599149 | 202 | 1,881 | 5.460396 | 0.351485 | 0.049864 | 0.047144 | 0.032638 | 0.220308 | 0.196736 | 0.196736 | 0.196736 | 0.196736 | 0.196736 | 0 | 0 | 0.310473 | 1,881 | 79 | 78 | 23.810127 | 0.850424 | 0.267411 | 0 | 0.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f351210093652493854833e64bf843a7091de0b | 771 | py | Python | base/middleware.py | mhhabib/Django-custom-registration | 97c30d25e1efc08e515dcad1b02f084eb1996894 | [
"MIT"
] | null | null | null | base/middleware.py | mhhabib/Django-custom-registration | 97c30d25e1efc08e515dcad1b02f084eb1996894 | [
"MIT"
] | null | null | null | base/middleware.py | mhhabib/Django-custom-registration | 97c30d25e1efc08e515dcad1b02f084eb1996894 | [
"MIT"
] | null | null | null | from django.contrib.auth import logout
import datetime
from settings import SESSION_IDLE_TIMEOUT
class SessionIdleTimeout(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if request.user.is_authenticated:
current_datetime = datetime.datetime.now()
if 'last_active_time' in request.session:
idle_period = (current_datetime -
request.session['last_active_time']).seconds
if idle_period > SESSION_IDLE_TIMEOUT:
logout(request, 'base/login.html')
request.session['last_active_time'] = current_datetime
response = self.get_response(request)
return response
| 35.045455 | 75 | 0.652399 | 83 | 771 | 5.722892 | 0.445783 | 0.092632 | 0.094737 | 0.096842 | 0.117895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.274968 | 771 | 21 | 76 | 36.714286 | 0.849732 | 0 | 0 | 0 | 0 | 0 | 0.081712 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.176471 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f3782e517f1dea43774318f5bb70c4ddf3ecdf0 | 3,828 | py | Python | Line-1/main.py | Manu-Fraile/Network-Representation-Learning | d84414c144cc6146d406e606ed5be8120d4244a9 | [
"MIT"
] | null | null | null | Line-1/main.py | Manu-Fraile/Network-Representation-Learning | d84414c144cc6146d406e606ed5be8120d4244a9 | [
"MIT"
] | null | null | null | Line-1/main.py | Manu-Fraile/Network-Representation-Learning | d84414c144cc6146d406e606ed5be8120d4244a9 | [
"MIT"
] | null | null | null | # Network representation learning with Line algorithm
# Author: Sebastian Haslebacher 2021-12-22
import networkx as nx # https://networkx.org/documentation/stable/tutorial.html
import numpy as np
import random
import argparse
import pickle
class Sampler:
"""
Maintains data-structure for negative sampling.
"""
def __init__(self, G):
self.G = G
self.nodes = []
for node in G.nodes:
deg = (int)(np.ceil(np.power(G.degree[node], 3/4)))
for _ in range(deg):
self.nodes.append(node)
random.shuffle(self.nodes)
self.current = 0
def draw(self, K):
samples = []
for k in range(K):
if(self.current == len(self.nodes)):
random.shuffle(self.nodes)
self.current = 0
samples.append(self.nodes[self.current])
self.current += 1
return samples
def sig(x):
"""
Sigmoid-function.
"""
return (1 / (1 + np.exp(-x)))
def line_first_order(G, timesteps, K, d, eps=0.1):
"""
G: networkx graph
timesteps: number of optimisation steps
K: number of negative edges for every positive edge
d: dimensionality of embeddings
"""
E = {}
for node in G.nodes:
E[node] = np.random.rand(d) - 0.5
edges = [(e[0], e[1]) for e in G.edges]
sampler = Sampler(G)
# asynchronous SGD to learn embeddings
count = 0
N = timesteps
while(count < N):
random.shuffle(edges)
for u, v in edges:
neg_nbrs = sampler.draw(K)
# update according to the (positive) edge
del_u = E[v] * (1 - sig(np.dot(E[v], E[u])))
del_v = E[u] * (1 - sig(np.dot(E[v], E[u])))
# updates for negative edges
for nbr in neg_nbrs:
del_u -= E[nbr] * (1 - sig(-np.dot(E[nbr], E[u]))) / K
del_v -= E[nbr] * (1 - sig(-np.dot(E[v], E[nbr]))) / K
# only update one of the two endpoints
coin = random.randint(0, 1)
if(coin):
E[u] += eps * del_u
else:
E[v] += eps * del_v
if ((count + 1) % 10000 == 0):
print('Completed %d out of %d optimisation steps'%(count + 1, N))
count += 1
if count >= N:
break
return E
def main(args):
# https://networkx.org/documentation/stable/reference/readwrite/gpickle.html
G = nx.read_gpickle(args.graph_path)
print('Loaded graph with %d nodes and %d edges'%(len(G.nodes), len(G.edges)))
print('Start to learn Line-1 embeddings')
E = line_first_order(G, args.timesteps, args.K, args.d)
with open(args.output_path, 'wb') as handle:
pickle.dump(E, handle)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='learn network representations with Line')
# command-line arguments
parser.add_argument('graph_path', type=str,
help='path to pickle-file of networkx graph', action='store')
parser.add_argument('output_path', type=str,
help='path to output file where represenations are stored', action='store')
parser.add_argument('--seed', dest='seed', type=int,
help='fix random seeds', action='store', default=1)
parser.add_argument('-T', dest='timesteps', type=int,
help='number of optimisation steps', action='store', default=1)
parser.add_argument('-K', dest='K', type=int,
help='number of negative samples for every edge', action='store', default=5)
parser.add_argument('-d', dest='d', type=int,
help='dimensionality of embeddings', action='store', default=128)
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
main(args) | 33.286957 | 91 | 0.576019 | 522 | 3,828 | 4.153257 | 0.300766 | 0.024908 | 0.047048 | 0.016605 | 0.199262 | 0.109779 | 0.090406 | 0.011993 | 0 | 0 | 0 | 0.016171 | 0.289185 | 3,828 | 115 | 92 | 33.286957 | 0.780595 | 0.155695 | 0 | 0.075949 | 0 | 0 | 0.138933 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063291 | false | 0 | 0.063291 | 0 | 0.177215 | 0.037975 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f384e2e63d619a327ec19ad8701a5dc65afe3e5 | 777 | py | Python | src/main.py | Jogius/devolo-reader | 2601b17d9b2dc2ab18756b2dbdc919229544df93 | [
"MIT"
] | null | null | null | src/main.py | Jogius/devolo-reader | 2601b17d9b2dc2ab18756b2dbdc919229544df93 | [
"MIT"
] | null | null | null | src/main.py | Jogius/devolo-reader | 2601b17d9b2dc2ab18756b2dbdc919229544df93 | [
"MIT"
] | null | null | null | from sys import argv
if len(argv) < 3:
exit(1)
device_id = argv[1]
sensor_type = argv[2]
import logging
import os
from dotenv import load_dotenv
from devolo_home_control_api.homecontrol import HomeControl
from devolo_home_control_api.mydevolo import Mydevolo
load_dotenv()
logging.disable()
mydevolo = Mydevolo()
mydevolo.user = os.getenv("DEVOLO_USER")
mydevolo.password = os.getenv("DEVOLO_PASSWORD")
gateway_id = mydevolo.get_gateway_ids()[0]
with HomeControl(gateway_id=gateway_id, mydevolo_instance=mydevolo) as homecontrol :
if device_id not in homecontrol.devices:
exit(1)
device = homecontrol.devices.get(device_id)
sensor = device.get_property("multi_level_sensor")
for s in sensor:
if s.sensor_type == sensor_type:
print(s.value)
| 21.583333 | 84 | 0.76834 | 114 | 777 | 5.017544 | 0.394737 | 0.041958 | 0.038462 | 0.073427 | 0.083916 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008982 | 0.140283 | 777 | 35 | 85 | 22.2 | 0.847305 | 0 | 0 | 0.083333 | 0 | 0 | 0.056628 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.041667 | 0.25 | 0 | 0.25 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f391f0367b52d2be2418d658beed188949eecf8 | 779 | py | Python | setup.py | asanka9/singlish | 0ebed2a0524f2b8e206e2a61eaec18d70d6e3086 | [
"MIT"
] | null | null | null | setup.py | asanka9/singlish | 0ebed2a0524f2b8e206e2a61eaec18d70d6e3086 | [
"MIT"
] | null | null | null | setup.py | asanka9/singlish | 0ebed2a0524f2b8e206e2a61eaec18d70d6e3086 | [
"MIT"
] | null | null | null | import setuptools
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Education',
'Operating System :: Microsoft :: Windows :: Windows 10',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
]
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name="singlish", # Replace with your own username
version="0.0.1",
author="asanka9",
description="A language processing tool for Singlish",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/asanka9/singlish",
packages=setuptools.find_packages(),
install_requires=[''],
classifiers=classifiers,
python_requires='>=3.6'
) | 26.862069 | 59 | 0.686778 | 87 | 779 | 6.045977 | 0.724138 | 0.114068 | 0.072243 | 0.114068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017107 | 0.174583 | 779 | 29 | 60 | 26.862069 | 0.800933 | 0.038511 | 0 | 0 | 0 | 0 | 0.430481 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.043478 | 0 | 0.043478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f3aee08921868b3c16d3ab5f75aec2bc6c2c29f | 1,424 | py | Python | CreateScopes.py | InsightSSG/CiscoSecureWorkload | 075403f2b2688f2216fa76889ad2e60d6b228e07 | [
"Unlicense"
] | null | null | null | CreateScopes.py | InsightSSG/CiscoSecureWorkload | 075403f2b2688f2216fa76889ad2e60d6b228e07 | [
"Unlicense"
] | null | null | null | CreateScopes.py | InsightSSG/CiscoSecureWorkload | 075403f2b2688f2216fa76889ad2e60d6b228e07 | [
"Unlicense"
] | 1 | 2022-02-09T14:17:17.000Z | 2022-02-09T14:17:17.000Z | import json
from csv import reader
from tetpyclient import RestClient
#Tetration IP address
API_ENDPOINT="https://x.x.x.x"
csvName="apps.csv"
restclient = RestClient(API_ENDPOINT, credentials_file='credentials.json', verify=False)
#Scope ID's that are used most Frequently, the ID can be found in the GUI by clicking the scope and viewing the ID in the URI
defaultScope = "5b058fbe755f023c58c5a256"
testScope = ""
#Open CSV file
with open(csvName, 'r') as read_obj:
csv_reader = reader(read_obj)
header = next(csv_reader)
# Check file as empty
if header != None:
# Iterate over each row after the header in the csv
# Creates Scope with name of 2 column in each row, and the query value in the first column of each row, Parent Scope ID must aslo be set.
# Parent Scope ID can be found in the GUI by clicking the parent scope, and then the ID is in the URI
for row in csv_reader:
req_payload = {
"short_name": row[1],
"short_query": {
"type":"contains",
"field":"host_name",
"value": row[0]
},
"parent_app_scope_id": defaultScope
}
resp = restclient.post('/app_scopes', json_body=json.dumps(req_payload))
#print(resp.text) ##If Needed##
#print(row[0],row[1]) ##Used to Print Test Rows to verify before pushing##
| 40.685714 | 145 | 0.639747 | 205 | 1,424 | 4.356098 | 0.473171 | 0.033595 | 0.006719 | 0.026876 | 0.073908 | 0.073908 | 0.073908 | 0.073908 | 0.073908 | 0.073908 | 0 | 0.02027 | 0.272472 | 1,424 | 34 | 146 | 41.882353 | 0.841699 | 0.391854 | 0 | 0 | 0 | 0 | 0.171967 | 0.028269 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f3b55ac7f32bd3ddaf3293cdb9606d38fbb2474 | 2,893 | py | Python | hamgr/hamgr/logger.py | platform9/pf9-ha | 7d64f9fe6b72fb4c1e5ed5d23e372a62c9e218a8 | [
"Apache-2.0"
] | 11 | 2016-09-06T09:59:29.000Z | 2021-10-02T07:24:07.000Z | hamgr/hamgr/logger.py | platform9/pf9-ha | 7d64f9fe6b72fb4c1e5ed5d23e372a62c9e218a8 | [
"Apache-2.0"
] | 5 | 2017-10-16T06:47:14.000Z | 2020-07-06T07:20:13.000Z | hamgr/hamgr/logger.py | platform9/pf9-ha | 7d64f9fe6b72fb4c1e5ed5d23e372a62c9e218a8 | [
"Apache-2.0"
] | 3 | 2016-09-01T06:20:51.000Z | 2017-10-16T02:27:07.000Z | # Copyright (c) 2019 Platform9 Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import logging.config
import logging.handlers
import hamgr
from os.path import exists
from os.path import dirname
from os import makedirs
from shared.constants import ROOT_LOGGER
from six.moves.configparser import ConfigParser
def setup_root_logger(conf=None):
if conf is None:
conf = ConfigParser()
if exists(hamgr.DEFAULT_CONF_FILE):
with open(hamgr.DEFAULT_CONF_FILE) as fp:
conf.readfp(fp)
log_file = hamgr.DEFAULT_LOG_FILE
if conf.has_option("log", "location"):
log_file = conf.get("log", "location")
log_level = hamgr.DEFAULT_LOG_LEVEL
if conf.has_option("log", "level"):
log_level = conf.get("log", "level")
log_mode = 'a'
log_rotate_count = hamgr.DEFAULT_ROTATE_COUNT
if conf.has_option("log", "rotate_count"):
log_rotate_count = conf.get("log", "rotate_count")
log_rotate_size = hamgr.DEFAULT_ROTATE_SIZE
if conf.has_option("log", "rotate_size"):
log_rotate_size = conf.get("log", "rotate_size")
log_format = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s')
logger = logging.getLogger(ROOT_LOGGER)
logger.setLevel(log_level)
# to mitigate the drawback of linux built-in log rotation which runs just once a day
# let the RotatingFileHandler to rotate the log , the built-in log rotation will do
# daily clean up and archives
handler = logging.handlers.RotatingFileHandler(log_file,
mode=log_mode,
maxBytes=int(log_rotate_size),
backupCount=int(log_rotate_count))
handler.setLevel(log_level)
handler.setFormatter(log_format)
for hdl in logger.handlers:
logger.removeHandler(hdl)
logger.addHandler(handler)
try:
if dirname(log_file) != '' and not exists(dirname(log_file)):
makedirs(dirname(log_file))
except Exception as e:
logger.exception(e)
raise
logger.info('root logger created : name - %s, level - %s, file - %s, '
'size - %s, backups - %s', ROOT_LOGGER, str(log_level),
log_file, str(log_rotate_size), str(log_rotate_count))
return logger
| 38.065789 | 88 | 0.665054 | 389 | 2,893 | 4.799486 | 0.375321 | 0.057847 | 0.044992 | 0.032137 | 0.06481 | 0.02571 | 0 | 0 | 0 | 0 | 0 | 0.004117 | 0.244383 | 2,893 | 75 | 89 | 38.573333 | 0.849954 | 0.26132 | 0 | 0 | 0 | 0 | 0.104717 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0 | 0.18 | 0 | 0.22 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f3fe129f4caca377cee59996a7e8818c5d8030b | 2,440 | py | Python | tveebot_organizer/filter.py | tveebot/organizer | 8c0aca7e22a34f91968f961cfc0e5195b69a7242 | [
"MIT"
] | null | null | null | tveebot_organizer/filter.py | tveebot/organizer | 8c0aca7e22a34f91968f961cfc0e5195b69a7242 | [
"MIT"
] | null | null | null | tveebot_organizer/filter.py | tveebot/organizer | 8c0aca7e22a34f91968f961cfc0e5195b69a7242 | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import Optional
class Filter:
"""
The filter is one of the sub-components of the *Organizer*. An organizer is associated with a
single filter. The organizer relies on the filter to filter out files that do not correspond
to episode files. Thus, it calls the filter every time is starts to organize something.
The filter includes a single method *find_episode_file()* which takes a path. This method is
able to handle both files and directories. If the input is a directory, it looks for the
episode file inside that directory and ignores all other files.
"""
# Supported video file extensions
video_extensions = {'.mkv', '.mp4', '.avi', '.m4p', '.m4v'}
def find_episode_file(self, path: Path) -> Optional[Path]:
"""
Finds the episode file corresponding to the given *path* and returns it.
The 'path' argument may be a file or a directory.
If *path* is a file, then it assumes this file must be the episode file.
If *path* is a directory, then it looks for the largest video file inside the directory
and considers that to be the episode file.
If the episode file does not correspond to a video file, then it returns None. Returning
None indicates the filter was not able to find an episode file for the given *path*.
:raise: ValueError: if *path* is neither a file or a directory
"""
if path.is_file():
if self.is_video_file(path):
episode_file = path
else:
episode_file = None
elif path.is_dir():
try:
# Look for the biggest video file inside the directory
episode_file = max([file for file in path.iterdir() if self.is_video_file(file)],
key=lambda p: p.stat().st_size)
except ValueError:
# The directory is empty
episode_file = None
else:
raise ValueError(f"path '{path}' is neither a file or a directory")
return episode_file
@staticmethod
def is_video_file(path: Path) -> bool:
"""
Determines whether not *path* corresponds to a video file or not.
:return: True if *path* is a video file and False if otherwise.
"""
return path.is_file() and path.suffix.lower() in Filter.video_extensions
| 39.354839 | 97 | 0.635246 | 346 | 2,440 | 4.419075 | 0.343931 | 0.093525 | 0.045782 | 0.015697 | 0.141923 | 0.060824 | 0.060824 | 0.060824 | 0 | 0 | 0 | 0.001753 | 0.29877 | 2,440 | 61 | 98 | 40 | 0.891876 | 0.544672 | 0 | 0.181818 | 0 | 0 | 0.068536 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f40be08084a7777ee0f686ac1b38e4ced24077a | 3,243 | py | Python | ooobuild/lo/text/text_column.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/text/text_column.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/text/text_column.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Struct Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.text
# Libre Office Version: 7.3
from ooo.oenv.env_const import UNO_NONE
import typing
class TextColumn(object):
"""
Struct Class
defines a single text column.
See Also:
`API TextColumn <https://api.libreoffice.org/docs/idl/ref/structcom_1_1sun_1_1star_1_1text_1_1TextColumn.html>`_
"""
__ooo_ns__: str = 'com.sun.star.text'
__ooo_full_ns__: str = 'com.sun.star.text.TextColumn'
__ooo_type_name__: str = 'struct'
typeName: str = 'com.sun.star.text.TextColumn'
"""Literal Constant ``com.sun.star.text.TextColumn``"""
def __init__(self, Width: typing.Optional[int] = 0, LeftMargin: typing.Optional[int] = 0, RightMargin: typing.Optional[int] = 0) -> None:
"""
Constructor
Arguments:
Width (int, optional): Width value.
LeftMargin (int, optional): LeftMargin value.
RightMargin (int, optional): RightMargin value.
"""
super().__init__()
if isinstance(Width, TextColumn):
oth: TextColumn = Width
self.Width = oth.Width
self.LeftMargin = oth.LeftMargin
self.RightMargin = oth.RightMargin
return
kargs = {
"Width": Width,
"LeftMargin": LeftMargin,
"RightMargin": RightMargin,
}
self._init(**kargs)
def _init(self, **kwargs) -> None:
self._width = kwargs["Width"]
self._left_margin = kwargs["LeftMargin"]
self._right_margin = kwargs["RightMargin"]
@property
def Width(self) -> int:
"""
contains the relative width of the column, including both margins.
Width isn't a metric value, it's a relative value to the sum of the width of all columns.
"""
return self._width
@Width.setter
def Width(self, value: int) -> None:
self._width = value
@property
def LeftMargin(self) -> int:
"""
contains the left margin of the column.
This is a metric value.
"""
return self._left_margin
@LeftMargin.setter
def LeftMargin(self, value: int) -> None:
self._left_margin = value
@property
def RightMargin(self) -> int:
"""
contains the right margin of the column.
This is a metric value.
"""
return self._right_margin
@RightMargin.setter
def RightMargin(self, value: int) -> None:
self._right_margin = value
__all__ = ['TextColumn']
| 28.955357 | 141 | 0.626272 | 396 | 3,243 | 4.984848 | 0.388889 | 0.030395 | 0.025329 | 0.035461 | 0.12614 | 0.083587 | 0.045593 | 0.045593 | 0.045593 | 0.045593 | 0 | 0.009354 | 0.274746 | 3,243 | 111 | 142 | 29.216216 | 0.829932 | 0.407956 | 0 | 0.068182 | 0 | 0 | 0.085662 | 0.034022 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.045455 | 0 | 0.431818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f44dcb36faf44616df69046b0b6b98feb411427 | 115,957 | py | Python | tests/pipeline/test_quarters_estimates.py | Hertz-and-Alpha/zipline-reloaded | eea4a2ccfc03d7fa4946defcb3cdf23469e4ae39 | [
"Apache-2.0"
] | 254 | 2021-03-29T16:18:39.000Z | 2022-03-31T22:06:01.000Z | tests/pipeline/test_quarters_estimates.py | Hertz-and-Alpha/zipline-reloaded | eea4a2ccfc03d7fa4946defcb3cdf23469e4ae39 | [
"Apache-2.0"
] | 52 | 2021-04-06T01:46:24.000Z | 2022-03-29T20:54:19.000Z | tests/pipeline/test_quarters_estimates.py | Hertz-and-Alpha/zipline-reloaded | eea4a2ccfc03d7fa4946defcb3cdf23469e4ae39 | [
"Apache-2.0"
] | 53 | 2021-04-05T14:43:29.000Z | 2022-03-31T22:06:04.000Z | from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
pd.Timestamp("2015-01-30"),
),
}
)
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame(
{
SID_FIELD_NAME: 30,
"ratio": (8, 9, 10, 11, 12),
"effective_date": (
# Split before the event and before the
# split-asof-date.
pd.Timestamp("2015-01-07"),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp("2015-01-09"),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
),
}
)
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame(
{
SID_FIELD_NAME: 40,
"ratio": (13, 14),
"effective_date": (
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-22"),
),
}
)
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame(
{
SID_FIELD_NAME: 50,
"ratio": (15, 16),
"effective_date": (
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
),
}
)
return pd.concat(
[
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
]
)
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-12")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-01-21")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 250.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 150.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 130 * 1 / 10, cls.window_test_start_date),
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-09"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 230 * 1 / 10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-12"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 16, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 5, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 0.7, cls.window_test_start_date),
(20, 121 * 0.7, pd.Timestamp("2015-01-07")),
(30, 230 * 11, cls.window_test_start_date),
(40, 240, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 5 * 6, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 120 * 0.7 * 0.8, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-07")),
(30, 230 * 11 * 12, cls.window_test_start_date),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240 * 13, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-21"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-22"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 310 * 0.3, pd.Timestamp("2015-01-09")),
(10, 311 * 0.3, pd.Timestamp("2015-01-15")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(10, 310 * 0.3, pd.Timestamp("2015-01-09")),
(10, 311 * 0.3, pd.Timestamp("2015-01-15")),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 5 / 3, cls.window_test_start_date),
(30, 230 * 1 / 10, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
(50, np.NaN, cls.window_test_start_date),
],
pd.Timestamp("2015-01-09"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 1 / 4, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 5 / 3, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
pd.Timestamp("2015-01-12"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-13", "2015-01-14")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedMultipleEstimateColumns(WithEstimates):
"""
ZiplineTestCase mixin for having multiple estimate columns that are
split-adjusted to make sure that adjustments are applied correctly.
Attributes
----------
test_start_date : pd.Timestamp
The start date of the test.
test_end_date : pd.Timestamp
The start date of the test.
split_adjusted_asof : pd.Timestamp
The split-adjusted-asof-date of the data used in the test, to be used
to create all loaders of test classes that subclass this mixin.
Methods
-------
make_expected_timelines_1q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range for
each column. Only for 1 quarter out.
make_expected_timelines_2q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range. For 2
quarters out, so only for the column that is requested to be loaded
with 2 quarters out.
Tests
-----
test_adjustments_with_multiple_adjusted_columns
Tests that if you have multiple columns, we still split-adjust
correctly.
test_multiple_datasets_different_num_announcements
Tests that if you have multiple datasets that ask for a different
number of quarters out, and each asks for a different estimates column,
we still split-adjust correctly.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
test_start_date = pd.Timestamp("2015-01-06", tz="utc")
test_end_date = pd.Timestamp("2015-01-12", tz="utc")
split_adjusted_asof = pd.Timestamp("2015-01-08")
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
sid_0_events = pd.DataFrame(
{
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp("2015-01-05"), pd.Timestamp("2015-01-05")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
],
"estimate1": [1100.0, 1200.0],
"estimate2": [2100.0, 2200.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# This is just an extra sid to make sure that we apply adjustments
# correctly for multiple columns when we have multiple sids.
sid_1_events = pd.DataFrame(
{
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp("2015-01-05"), pd.Timestamp("2015-01-05")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-08"),
pd.Timestamp("2015-01-11"),
],
"estimate1": [1110.0, 1210.0],
"estimate2": [2110.0, 2210.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 1,
}
)
return pd.concat([sid_0_events, sid_1_events])
@classmethod
def make_splits_data(cls):
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (0.3, 3.0),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
),
}
)
sid_1_splits = pd.DataFrame(
{
SID_FIELD_NAME: 1,
"ratio": (0.4, 4.0),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
),
}
)
return pd.concat([sid_0_splits, sid_1_splits])
@classmethod
def make_expected_timelines_1q_out(cls):
return {}
@classmethod
def make_expected_timelines_2q_out(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithSplitAdjustedMultipleEstimateColumns, cls).init_class_fixtures()
cls.timelines_1q_out = cls.make_expected_timelines_1q_out()
cls.timelines_2q_out = cls.make_expected_timelines_2q_out()
def test_adjustments_with_multiple_adjusted_columns(self):
dataset = MultipleColumnsQuartersEstimates(1)
timelines = self.timelines_1q_out
window_len = 3
class SomeFactor(CustomFactor):
inputs = [dataset.estimate1, dataset.estimate2]
window_length = window_len
def compute(self, today, assets, out, estimate1, estimate2):
assert_almost_equal(estimate1, timelines[today]["estimate1"])
assert_almost_equal(estimate2, timelines[today]["estimate2"])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=self.test_start_date,
# last event date we have
end_date=self.test_end_date,
)
def test_multiple_datasets_different_num_announcements(self):
dataset1 = MultipleColumnsQuartersEstimates(1)
dataset2 = MultipleColumnsQuartersEstimates(2)
timelines_1q_out = self.timelines_1q_out
timelines_2q_out = self.timelines_2q_out
window_len = 3
class SomeFactor1(CustomFactor):
inputs = [dataset1.estimate1]
window_length = window_len
def compute(self, today, assets, out, estimate1):
assert_almost_equal(estimate1, timelines_1q_out[today]["estimate1"])
class SomeFactor2(CustomFactor):
inputs = [dataset2.estimate2]
window_length = window_len
def compute(self, today, assets, out, estimate2):
assert_almost_equal(estimate2, timelines_2q_out[today]["estimate2"])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est1": SomeFactor1(), "est2": SomeFactor2()}),
start_date=self.test_start_date,
# last event date we have
end_date=self.test_end_date,
)
class PreviousWithSplitAdjustedMultipleEstimateColumns(
WithSplitAdjustedMultipleEstimateColumns, ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate1", "estimate2"],
split_adjusted_asof=cls.split_adjusted_asof,
)
@classmethod
def make_expected_timelines_1q_out(cls):
return {
pd.Timestamp("2015-01-06", tz="utc"): {
"estimate1": np.array([[np.NaN, np.NaN]] * 3),
"estimate2": np.array([[np.NaN, np.NaN]] * 3),
},
pd.Timestamp("2015-01-07", tz="utc"): {
"estimate1": np.array([[np.NaN, np.NaN]] * 3),
"estimate2": np.array([[np.NaN, np.NaN]] * 3),
},
pd.Timestamp("2015-01-08", tz="utc"): {
"estimate1": np.array([[np.NaN, np.NaN]] * 2 + [[np.NaN, 1110.0]]),
"estimate2": np.array([[np.NaN, np.NaN]] * 2 + [[np.NaN, 2110.0]]),
},
pd.Timestamp("2015-01-09", tz="utc"): {
"estimate1": np.array(
[[np.NaN, np.NaN]]
+ [[np.NaN, 1110.0 * 4]]
+ [[1100 * 3.0, 1110.0 * 4]]
),
"estimate2": np.array(
[[np.NaN, np.NaN]]
+ [[np.NaN, 2110.0 * 4]]
+ [[2100 * 3.0, 2110.0 * 4]]
),
},
pd.Timestamp("2015-01-12", tz="utc"): {
"estimate1": np.array(
[[np.NaN, np.NaN]] * 2 + [[1200 * 3.0, 1210.0 * 4]]
),
"estimate2": np.array(
[[np.NaN, np.NaN]] * 2 + [[2200 * 3.0, 2210.0 * 4]]
),
},
}
@classmethod
def make_expected_timelines_2q_out(cls):
return {
pd.Timestamp("2015-01-06", tz="utc"): {
"estimate2": np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp("2015-01-07", tz="utc"): {
"estimate2": np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp("2015-01-08", tz="utc"): {
"estimate2": np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp("2015-01-09", tz="utc"): {
"estimate2": np.array([[np.NaN, np.NaN]] * 3)
},
pd.Timestamp("2015-01-12", tz="utc"): {
"estimate2": np.array(
[[np.NaN, np.NaN]] * 2 + [[2100 * 3.0, 2110.0 * 4]]
)
},
}
class NextWithSplitAdjustedMultipleEstimateColumns(
WithSplitAdjustedMultipleEstimateColumns, ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate1", "estimate2"],
split_adjusted_asof=cls.split_adjusted_asof,
)
@classmethod
def make_expected_timelines_1q_out(cls):
return {
pd.Timestamp("2015-01-06", tz="utc"): {
"estimate1": np.array(
[[np.NaN, np.NaN]] + [[1100.0 * 1 / 0.3, 1110.0 * 1 / 0.4]] * 2
),
"estimate2": np.array(
[[np.NaN, np.NaN]] + [[2100.0 * 1 / 0.3, 2110.0 * 1 / 0.4]] * 2
),
},
pd.Timestamp("2015-01-07", tz="utc"): {
"estimate1": np.array([[1100.0, 1110.0]] * 3),
"estimate2": np.array([[2100.0, 2110.0]] * 3),
},
pd.Timestamp("2015-01-08", tz="utc"): {
"estimate1": np.array([[1100.0, 1110.0]] * 3),
"estimate2": np.array([[2100.0, 2110.0]] * 3),
},
pd.Timestamp("2015-01-09", tz="utc"): {
"estimate1": np.array([[1100 * 3.0, 1210.0 * 4]] * 3),
"estimate2": np.array([[2100 * 3.0, 2210.0 * 4]] * 3),
},
pd.Timestamp("2015-01-12", tz="utc"): {
"estimate1": np.array([[1200 * 3.0, np.NaN]] * 3),
"estimate2": np.array([[2200 * 3.0, np.NaN]] * 3),
},
}
@classmethod
def make_expected_timelines_2q_out(cls):
return {
pd.Timestamp("2015-01-06", tz="utc"): {
"estimate2": np.array(
[[np.NaN, np.NaN]] + [[2200 * 1 / 0.3, 2210.0 * 1 / 0.4]] * 2
)
},
pd.Timestamp("2015-01-07", tz="utc"): {
"estimate2": np.array([[2200.0, 2210.0]] * 3)
},
pd.Timestamp("2015-01-08", tz="utc"): {
"estimate2": np.array([[2200, 2210.0]] * 3)
},
pd.Timestamp("2015-01-09", tz="utc"): {
"estimate2": np.array([[2200 * 3.0, np.NaN]] * 3)
},
pd.Timestamp("2015-01-12", tz="utc"): {
"estimate2": np.array([[np.NaN, np.NaN]] * 3)
},
}
class WithAdjustmentBoundaries(WithEstimates):
"""
ZiplineTestCase mixin providing class-level attributes, methods,
and a test to make sure that when the split-adjusted-asof-date is not
strictly within the date index, we can still apply adjustments correctly.
Attributes
----------
split_adjusted_before_start : pd.Timestamp
A split-adjusted-asof-date before the start date of the test.
split_adjusted_after_end : pd.Timestamp
A split-adjusted-asof-date before the end date of the test.
split_adjusted_asof_dates : list of tuples of pd.Timestamp
All the split-adjusted-asof-dates over which we want to parameterize
the test.
Methods
-------
make_expected_out -> dict[pd.Timestamp -> pd.DataFrame]
A dictionary of the expected output of the pipeline at each of the
dates of interest.
"""
START_DATE = pd.Timestamp("2015-01-04", tz="utc")
# We want to run the pipeline starting from `START_DATE`, but the
# pipeline results will start from the next day, which is
# `test_start_date`.
test_start_date = pd.Timestamp("2015-01-05", tz="UTC")
END_DATE = test_end_date = pd.Timestamp("2015-01-12", tz="utc")
split_adjusted_before_start = test_start_date - timedelta(days=1)
split_adjusted_after_end = test_end_date + timedelta(days=1)
# Must parametrize over this because there can only be 1 such date for
# each set of data.
split_adjusted_asof_dates = [
(test_start_date,),
(test_end_date,),
(split_adjusted_before_start,),
(split_adjusted_after_end,),
]
@classmethod
def init_class_fixtures(cls):
super(WithAdjustmentBoundaries, cls).init_class_fixtures()
cls.s0 = cls.asset_finder.retrieve_asset(0)
cls.s1 = cls.asset_finder.retrieve_asset(1)
cls.s2 = cls.asset_finder.retrieve_asset(2)
cls.s3 = cls.asset_finder.retrieve_asset(3)
cls.s4 = cls.asset_finder.retrieve_asset(4)
cls.expected = cls.make_expected_out()
@classmethod
def make_events(cls):
# We can create a sid for each configuration of dates for KDs, events,
# and splits. For this test we don't care about overwrites so we only
# test 1 quarter.
sid_0_timeline = pd.DataFrame(
{
# KD on first date of index
TS_FIELD_NAME: cls.test_start_date,
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-09"),
"estimate": 10.0,
FISCAL_QUARTER_FIELD_NAME: 1,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
},
index=[0],
)
sid_1_timeline = pd.DataFrame(
{
TS_FIELD_NAME: cls.test_start_date,
# event date on first date of index
EVENT_DATE_FIELD_NAME: cls.test_start_date,
"estimate": 11.0,
FISCAL_QUARTER_FIELD_NAME: 1,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 1,
},
index=[0],
)
sid_2_timeline = pd.DataFrame(
{
# KD on first date of index
TS_FIELD_NAME: cls.test_end_date,
EVENT_DATE_FIELD_NAME: cls.test_end_date + timedelta(days=1),
"estimate": 12.0,
FISCAL_QUARTER_FIELD_NAME: 1,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 2,
},
index=[0],
)
sid_3_timeline = pd.DataFrame(
{
TS_FIELD_NAME: cls.test_end_date - timedelta(days=1),
EVENT_DATE_FIELD_NAME: cls.test_end_date,
"estimate": 13.0,
FISCAL_QUARTER_FIELD_NAME: 1,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 3,
},
index=[0],
)
# KD and event date don't fall on date index boundaries
sid_4_timeline = pd.DataFrame(
{
TS_FIELD_NAME: cls.test_end_date - timedelta(days=1),
EVENT_DATE_FIELD_NAME: cls.test_end_date - timedelta(days=1),
"estimate": 14.0,
FISCAL_QUARTER_FIELD_NAME: 1,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 4,
},
index=[0],
)
return pd.concat(
[
sid_0_timeline,
sid_1_timeline,
sid_2_timeline,
sid_3_timeline,
sid_4_timeline,
]
)
@classmethod
def make_splits_data(cls):
# Here we want splits that collide
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": 0.10,
"effective_date": cls.test_start_date,
},
index=[0],
)
sid_1_splits = pd.DataFrame(
{
SID_FIELD_NAME: 1,
"ratio": 0.11,
"effective_date": cls.test_start_date,
},
index=[0],
)
sid_2_splits = pd.DataFrame(
{
SID_FIELD_NAME: 2,
"ratio": 0.12,
"effective_date": cls.test_end_date,
},
index=[0],
)
sid_3_splits = pd.DataFrame(
{
SID_FIELD_NAME: 3,
"ratio": 0.13,
"effective_date": cls.test_end_date,
},
index=[0],
)
# We want 2 splits here - at the starting boundary and at the end
# boundary - while there is no collision with KD/event date for the
# sid.
sid_4_splits = pd.DataFrame(
{
SID_FIELD_NAME: 4,
"ratio": (0.14, 0.15),
"effective_date": (cls.test_start_date, cls.test_end_date),
}
)
return pd.concat(
[sid_0_splits, sid_1_splits, sid_2_splits, sid_3_splits, sid_4_splits]
)
@parameterized.expand(split_adjusted_asof_dates)
def test_boundaries(self, split_date):
dataset = QuartersEstimates(1)
loader = self.loader(split_adjusted_asof=split_date)
engine = self.make_engine(loader)
result = engine.run_pipeline(
Pipeline({"estimate": dataset.estimate.latest}),
start_date=self.trading_days[0],
# last event date we have
end_date=self.trading_days[-1],
)
expected = self.expected[split_date]
assert_frame_equal(result, expected, check_names=False)
@classmethod
def make_expected_out(cls):
return {}
class PreviousWithAdjustmentBoundaries(WithAdjustmentBoundaries, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return partial(
PreviousSplitAdjustedEarningsEstimatesLoader,
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
)
@classmethod
def make_expected_out(cls):
split_adjusted_at_start_boundary = (
pd.concat(
[
pd.DataFrame(
{
SID_FIELD_NAME: cls.s0,
"estimate": np.NaN,
},
index=pd.date_range(
cls.test_start_date,
pd.Timestamp("2015-01-08", tz="UTC"),
tz="utc",
),
),
pd.DataFrame(
{
SID_FIELD_NAME: cls.s0,
"estimate": 10.0,
},
index=pd.date_range(
pd.Timestamp("2015-01-09", tz="UTC"),
cls.test_end_date,
tz="utc",
),
),
pd.DataFrame(
{
SID_FIELD_NAME: cls.s1,
"estimate": 11.0,
},
index=pd.date_range(
cls.test_start_date, cls.test_end_date, tz="utc"
),
),
pd.DataFrame(
{SID_FIELD_NAME: cls.s2, "estimate": np.NaN},
index=pd.date_range(
cls.test_start_date, cls.test_end_date, tz="utc"
),
),
pd.DataFrame(
{SID_FIELD_NAME: cls.s3, "estimate": np.NaN},
index=pd.date_range(
cls.test_start_date,
cls.test_end_date - timedelta(1),
tz="utc",
),
),
pd.DataFrame(
{SID_FIELD_NAME: cls.s3, "estimate": 13.0 * 0.13},
index=pd.date_range(
cls.test_end_date, cls.test_end_date, tz="utc"
),
),
pd.DataFrame(
{SID_FIELD_NAME: cls.s4, "estimate": np.NaN},
index=pd.date_range(
cls.test_start_date,
cls.test_end_date - timedelta(2),
tz="utc",
),
),
pd.DataFrame(
{SID_FIELD_NAME: cls.s4, "estimate": 14.0 * 0.15},
index=pd.date_range(
cls.test_end_date - timedelta(1),
cls.test_end_date,
tz="utc",
),
),
]
)
.set_index(SID_FIELD_NAME, append=True)
.unstack(SID_FIELD_NAME)
.reindex(cls.trading_days)
.stack(SID_FIELD_NAME, dropna=False)
)
split_adjusted_at_end_boundary = (
pd.concat(
[
pd.DataFrame(
{
SID_FIELD_NAME: cls.s0,
"estimate": np.NaN,
},
index=pd.date_range(
cls.test_start_date,
pd.Timestamp("2015-01-08", tz="UTC"),
tz="utc",
),
),
pd.DataFrame(
{
SID_FIELD_NAME: cls.s0,
"estimate": 10.0,
},
index=pd.date_range(
pd.Timestamp("2015-01-09", tz="UTC"),
cls.test_end_date,
tz="utc",
),
),
pd.DataFrame(
{
SID_FIELD_NAME: cls.s1,
"estimate": 11.0,
},
index=pd.date_range(
cls.test_start_date, cls.test_end_date, tz="utc"
),
),
pd.DataFrame(
{SID_FIELD_NAME: cls.s2, "estimate": np.NaN},
index=pd.date_range(
cls.test_start_date, cls.test_end_date, tz="utc"
),
),
pd.DataFrame(
{SID_FIELD_NAME: cls.s3, "estimate": np.NaN},
index=pd.date_range(
cls.test_start_date,
cls.test_end_date - timedelta(1),
tz="utc",
),
),
pd.DataFrame(
{SID_FIELD_NAME: cls.s3, "estimate": 13.0},
index=pd.date_range(
cls.test_end_date, cls.test_end_date, tz="utc"
),
),
pd.DataFrame(
{SID_FIELD_NAME: cls.s4, "estimate": np.NaN},
index=pd.date_range(
cls.test_start_date,
cls.test_end_date - timedelta(2),
tz="utc",
),
),
pd.DataFrame(
{SID_FIELD_NAME: cls.s4, "estimate": 14.0},
index=pd.date_range(
cls.test_end_date - timedelta(1),
cls.test_end_date,
tz="utc",
),
),
]
)
.set_index(SID_FIELD_NAME, append=True)
.unstack(SID_FIELD_NAME)
.reindex(cls.trading_days)
.stack(SID_FIELD_NAME, dropna=False)
)
split_adjusted_before_start_boundary = split_adjusted_at_start_boundary
split_adjusted_after_end_boundary = split_adjusted_at_end_boundary
return {
cls.test_start_date: split_adjusted_at_start_boundary,
cls.split_adjusted_before_start: split_adjusted_before_start_boundary,
cls.test_end_date: split_adjusted_at_end_boundary,
cls.split_adjusted_after_end: split_adjusted_after_end_boundary,
}
class NextWithAdjustmentBoundaries(WithAdjustmentBoundaries, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return partial(
NextSplitAdjustedEarningsEstimatesLoader,
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
)
@classmethod
def make_expected_out(cls):
split_adjusted_at_start_boundary = (
pd.concat(
[
pd.DataFrame(
{
SID_FIELD_NAME: cls.s0,
"estimate": 10,
},
index=pd.date_range(
cls.test_start_date,
pd.Timestamp("2015-01-09", tz="UTC"),
tz="utc",
),
),
pd.DataFrame(
{
SID_FIELD_NAME: cls.s1,
"estimate": 11.0,
},
index=pd.date_range(
cls.test_start_date, cls.test_start_date, tz="utc"
),
),
pd.DataFrame(
{
SID_FIELD_NAME: cls.s2,
"estimate": 12.0,
},
index=pd.date_range(
cls.test_end_date, cls.test_end_date, tz="utc"
),
),
pd.DataFrame(
{
SID_FIELD_NAME: cls.s3,
"estimate": 13.0 * 0.13,
},
index=pd.date_range(
cls.test_end_date - timedelta(1),
cls.test_end_date,
tz="utc",
),
),
pd.DataFrame(
{
SID_FIELD_NAME: cls.s4,
"estimate": 14.0,
},
index=pd.date_range(
cls.test_end_date - timedelta(1),
cls.test_end_date - timedelta(1),
tz="utc",
),
),
]
)
.set_index(SID_FIELD_NAME, append=True)
.unstack(SID_FIELD_NAME)
.reindex(cls.trading_days)
.stack(SID_FIELD_NAME, dropna=False)
)
split_adjusted_at_end_boundary = (
pd.concat(
[
pd.DataFrame(
{
SID_FIELD_NAME: cls.s0,
"estimate": 10,
},
index=pd.date_range(
cls.test_start_date,
pd.Timestamp("2015-01-09", tz="UTC"),
tz="utc",
),
),
pd.DataFrame(
{
SID_FIELD_NAME: cls.s1,
"estimate": 11.0,
},
index=pd.date_range(
cls.test_start_date, cls.test_start_date, tz="utc"
),
),
pd.DataFrame(
{
SID_FIELD_NAME: cls.s2,
"estimate": 12.0,
},
index=pd.date_range(
cls.test_end_date, cls.test_end_date, tz="utc"
),
),
pd.DataFrame(
{
SID_FIELD_NAME: cls.s3,
"estimate": 13.0,
},
index=pd.date_range(
cls.test_end_date - timedelta(1),
cls.test_end_date,
tz="utc",
),
),
pd.DataFrame(
{
SID_FIELD_NAME: cls.s4,
"estimate": 14.0,
},
index=pd.date_range(
cls.test_end_date - timedelta(1),
cls.test_end_date - timedelta(1),
tz="utc",
),
),
]
)
.set_index(SID_FIELD_NAME, append=True)
.unstack(SID_FIELD_NAME)
.reindex(cls.trading_days)
.stack(SID_FIELD_NAME, dropna=False)
)
split_adjusted_before_start_boundary = split_adjusted_at_start_boundary
split_adjusted_after_end_boundary = split_adjusted_at_end_boundary
return {
cls.test_start_date: split_adjusted_at_start_boundary,
cls.split_adjusted_before_start: split_adjusted_before_start_boundary,
cls.test_end_date: split_adjusted_at_end_boundary,
cls.split_adjusted_after_end: split_adjusted_after_end_boundary,
}
class TestQuarterShift:
"""
This tests, in isolation, quarter calculation logic for shifting quarters
backwards/forwards from a starting point.
"""
def test_quarter_normalization(self):
input_yrs = pd.Series(range(2011, 2015), dtype=np.int64)
input_qtrs = pd.Series(range(1, 5), dtype=np.int64)
result_years, result_quarters = split_normalized_quarters(
normalize_quarters(input_yrs, input_qtrs)
)
# Can't use assert_series_equal here with check_names=False
# because that still fails due to name differences.
# TODO: With pandas > 1. assert_series_equal seems to work fine
assert_equal(input_yrs, result_years)
assert_equal(input_qtrs, result_quarters)
| 39.121795 | 88 | 0.491881 | 12,330 | 115,957 | 4.434144 | 0.058151 | 0.076253 | 0.100415 | 0.100744 | 0.694273 | 0.651565 | 0.605601 | 0.578604 | 0.561557 | 0.529823 | 0 | 0.088953 | 0.40609 | 115,957 | 2,963 | 89 | 39.134998 | 0.704928 | 0.139491 | 0 | 0.596635 | 0 | 0 | 0.062233 | 0.000518 | 0 | 0 | 0 | 0.000338 | 0.012942 | 1 | 0.040984 | false | 0.001726 | 0.008628 | 0.016825 | 0.112597 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f451c92b7dac95c814165b7df6b2c381a7e95ca | 1,407 | py | Python | python/463.island-perimeter.py | fengbaoheng/leetcode | 2b6ec9adea383503acc23622ca5623161f7ca520 | [
"MIT"
] | 1 | 2019-04-11T12:34:55.000Z | 2019-04-11T12:34:55.000Z | python/463.island-perimeter.py | fengbaoheng/leetcode | 2b6ec9adea383503acc23622ca5623161f7ca520 | [
"MIT"
] | null | null | null | python/463.island-perimeter.py | fengbaoheng/leetcode | 2b6ec9adea383503acc23622ca5623161f7ca520 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode.cn id=463 lang=python3
#
# [463] 岛屿的周长
#
# https://leetcode-cn.com/problems/island-perimeter/description/
#
# algorithms
# Easy (59.76%)
# Total Accepted: 5.4K
# Total Submissions: 9K
# Testcase Example: '[[0,1,0,0],[1,1,1,0],[0,1,0,0],[1,1,0,0]]'
#
# 给定一个包含 0 和 1 的二维网格地图,其中 1 表示陆地 0 表示水域。
#
# 网格中的格子水平和垂直方向相连(对角线方向不相连)。整个网格被水完全包围,但其中恰好有一个岛屿(或者说,一个或多个表示陆地的格子相连组成的岛屿)。
#
# 岛屿中没有“湖”(“湖” 指水域在岛屿内部且不和岛屿周围的水相连)。格子是边长为 1 的正方形。网格为长方形,且宽度和高度均不超过 100
# 。计算这个岛屿的周长。
#
#
#
# 示例 :
#
# 输入:
# [[0,1,0,0],
# [1,1,1,0],
# [0,1,0,0],
# [1,1,0,0]]
#
# 输出: 16
#
# 解释: 它的周长是下面图片中的 16 个黄色的边:
#
#
#
#
#
from typing import List
class Solution:
# 仅需识别每个岛屿周围四个边的水域或边界数目
def islandPerimeter(self, grid: List[List[int]]) -> int:
try:
row_count = len(grid)
col_count = len(grid[0])
count = 0
for r in range(row_count):
for c in range(col_count):
if grid[r][c] == 1:
up = 1 if r == 0 or grid[r - 1][c] == 0 else 0
down = 1 if r == row_count - 1 or grid[r + 1][c] == 0 else 0
left = 1 if c == 0 or grid[r][c - 1] == 0 else 0
right = 1 if c == col_count - 1 or grid[r][c + 1] == 0 else 0
count += up + down + left + right
return count
except:
return 0
| 22.333333 | 85 | 0.510306 | 211 | 1,407 | 3.388626 | 0.421801 | 0.027972 | 0.029371 | 0.022378 | 0.137063 | 0.12028 | 0.103497 | 0.083916 | 0 | 0 | 0 | 0.086681 | 0.327647 | 1,407 | 62 | 86 | 22.693548 | 0.665962 | 0.394456 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f45923bf32c54dc1d821bc697cfd96aa4870fdd | 3,411 | py | Python | MarkerMotionSimulation/simMarkMotionField.py | CMURoboTouch/Taxim | d067cc63892fab8de736a1d3f449d01368b32205 | [
"MIT"
] | 19 | 2021-10-04T22:14:23.000Z | 2022-03-28T08:12:33.000Z | MarkerMotionSimulation/simMarkMotionField.py | radhen/Taxim | d067cc63892fab8de736a1d3f449d01368b32205 | [
"MIT"
] | null | null | null | MarkerMotionSimulation/simMarkMotionField.py | radhen/Taxim | d067cc63892fab8de736a1d3f449d01368b32205 | [
"MIT"
] | 3 | 2021-12-23T00:39:20.000Z | 2022-02-02T13:22:46.000Z | import matplotlib.pyplot as plt
import numpy as np
import os
from os import path as osp
import argparse
import sys
sys.path.append("..")
import Basics.sensorParams as psp
from compose.dataLoader import dataLoader
from compose.superposition import SuperPosition, fill_blank, cropMap
parser = argparse.ArgumentParser()
parser.add_argument("-obj", nargs='?', default='square',
help="Name of Object to be tested, supported_objects_list = [square, cylinder6]")
parser.add_argument('-dx', default = 0.0, type=float, help='Shear load on X axis.')
parser.add_argument('-dy', default = 0.0, type=float, help='Shear load on Y axis.')
parser.add_argument('-dz', default = 1.0, type=float, help='Shear load on Z axis.')
args = parser.parse_args()
def getDomeHeightMap(filePath, obj, press_depth, domeMap):
"""
get the height map & contact mask from the object and gelpad model
obj: object's point cloud
press_depth: in millimeter
domeMap: gelpad model
return:
zq: height map with contact
contact_mask
"""
# read in the object's model
objPath = osp.join(filePath,obj)
f = open(objPath)
lines = f.readlines()
verts_num = int(lines[3].split(' ')[-1])
verts_lines = lines[10:10 + verts_num]
vertices = np.array([list(map(float, l.strip().split(' '))) for l in verts_lines])
heightMap = np.zeros((psp.d,psp.d))
cx = np.mean(vertices[:,0])
cy = np.mean(vertices[:,1])
uu = ((vertices[:,0] - cx)/psp.pixmm + psp.d//2).astype(int)
vv = ((vertices[:,1] - cy)/psp.pixmm + psp.d//2).astype(int)
mask_u = np.logical_and(uu > 0, uu < psp.d)
mask_v = np.logical_and(vv > 0, vv < psp.d)
mask_z = vertices[:,2] > 0.2
mask_map = mask_u & mask_v & mask_z
heightMap[vv[mask_map],uu[mask_map]] = vertices[mask_map][:,2]/psp.pixmm
max_o = np.max(heightMap)
heightMap -= max_o
pressing_height_pix = press_depth/psp.pixmm
gel_map = heightMap+pressing_height_pix
contact_mask = (gel_map > domeMap)
zq = np.zeros((psp.d,psp.d))
zq[contact_mask] = gel_map[contact_mask] - domeMap[contact_mask]
return zq, contact_mask
if __name__ == "__main__":
# calibration file
data_folder = osp.join("..", "calibs", "femCalib.npz")
super = SuperPosition(data_folder)
# compose
filePath = osp.join('..', 'data', 'objects')
obj = args.obj+'.ply'
local_deform = np.array([args.dx, args.dy, args.dz])
press_depth = local_deform[2]
domeMap = np.load(osp.join('..', 'calibs', 'dome_gel.npy'))
gel_map, contact_mask = getDomeHeightMap(filePath, obj, press_depth, domeMap)
resultMap = super.compose_sparse(local_deform, gel_map, contact_mask)
#### for visualization/saving the results ###
compose_savePath = osp.join('..', 'results', args.obj+'_compose.jpg')
plt.figure(1)
plt.subplot(311)
fig = plt.imshow(fill_blank(resultMap[0,:,:]), cmap='RdBu')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.subplot(312)
fig = plt.imshow(fill_blank(resultMap[1,:,:]), cmap='RdBu')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.subplot(313)
fig = plt.imshow(fill_blank(resultMap[2,:,:]), cmap='RdBu')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
# plt.show()
plt.savefig(compose_savePath)
| 35.164948 | 101 | 0.668426 | 501 | 3,411 | 4.389222 | 0.313373 | 0.04502 | 0.027285 | 0.0191 | 0.259209 | 0.259209 | 0.16462 | 0.133242 | 0.133242 | 0.103229 | 0 | 0.013899 | 0.177367 | 3,411 | 96 | 102 | 35.53125 | 0.769779 | 0.085605 | 0 | 0.089552 | 0 | 0 | 0.084066 | 0.007168 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014925 | false | 0 | 0.134328 | 0 | 0.164179 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f46438dbabd7f10447a58a987f3988c1715e25a | 3,285 | py | Python | ImageCaptioning/reader.py | GT-AcerZhang/MyImageCaptioningModel | 83ccda0fb2b542d5c6693270247435f68a242629 | [
"Apache-2.0"
] | 2 | 2020-09-09T16:33:47.000Z | 2021-02-27T17:58:52.000Z | ImageCaptioning/reader.py | GT-AcerZhang/MyImageCaptioningModel | 83ccda0fb2b542d5c6693270247435f68a242629 | [
"Apache-2.0"
] | 1 | 2020-06-12T12:11:59.000Z | 2020-06-12T12:11:59.000Z | ImageCaptioning/reader.py | GT-AcerZhang/MyImageCaptioningModel | 83ccda0fb2b542d5c6693270247435f68a242629 | [
"Apache-2.0"
] | 1 | 2021-03-05T11:07:17.000Z | 2021-03-05T11:07:17.000Z | from paddle import fluid
import numpy as np
import os
from PIL import Image
import config
from tools import hdf5_manager
_image_mean = np.array(config.dc['ImageMean'], dtype='float32').reshape((3, 1, 1))
_image_std = np.array(config.dc['ImageStd'], dtype='float32').reshape((3, 1, 1))
def process_image(img):
if not isinstance(img, Image.Image):
raise ValueError('image 应当是Image类型,而传入的是{}'.format(type(img)))
shape = config.dc['ImageShape']
img = img.resize(shape, Image.ANTIALIAS)
img = np.array(img, dtype='float32')
if len(img.shape) != 3:
return None
img = img.transpose((2, 0, 1)) / 255
img -= _image_mean
img /= _image_std
return img
def read_image(path):
return Image.open(path)
class DataReader:
_word2index = None
_index2word = None
_hdf5 = None
def init_hdf5(self):
DataReader._hdf5 = hdf5_manager.Hdf5Manager()
DataReader._hdf5.load_database(config.data['H5Path'])
DataReader._hdf5.load_name2idx(config.dc['H5Name2Idx'])
def get_reader(self, batch_size=None, mode='train'):
if DataReader._hdf5 is None:
self.init_hdf5()
def h5_reader(x):
img = DataReader._hdf5.read(x)
return img.astype('float32')
return self._get_reader(h5_reader, batch_size, mode)
def _get_reader(self, processor, batch_size=None, mode='train'):
if mode not in ['train', 'dev', 'test']:
raise ValueError('DataReader不支持 {} 模式'.format(mode))
if mode == 'train':
captions, sentence_len = \
np.load(os.path.join(config.dc['DictPath'], 'train_cap.npy'), allow_pickle=True)
else:
path = os.path.join(config.dc['DictPath'], 'dev_data.npy' if mode == 'dev' else 'eval_data.npy')
files, files2cap = np.load(path, allow_pickle=True)
def reader_train():
for name, cap in captions:
img = processor(name)
caption = np.array(cap, dtype='int64')
yield img, caption
def reader_infer():
for name in files:
img = processor(name)
caption = files2cap[name]
yield img, caption
rd = reader_train if mode == 'train' else reader_infer
rd = rd if batch_size is None else fluid.io.batch(rd, batch_size)
rd = rd if mode == 'train' else fluid.io.buffered(rd, config.train['data_loader_capacity'])
return rd
@property
def word_index(self):
if DataReader._word2index is None:
word_index, index_word = \
np.load(os.path.join(config.dc['DictPath'], 'word_dict.npy'), allow_pickle=True)
DataReader._word2index = word_index
DataReader._index2word = index_word
return DataReader._word2index
@property
def index_word(self):
if DataReader._index2word is None:
word_index, index_word = \
np.load(os.path.join(config.dc['DictPath'], 'word_dict.npy'), allow_pickle=True)
DataReader._word2index = word_index
DataReader._index2word = index_word
return DataReader._index2word
if __name__ == "__main__":
dr = DataReader()
print(len(dr.index_word))
| 32.85 | 108 | 0.617656 | 414 | 3,285 | 4.710145 | 0.268116 | 0.032821 | 0.020513 | 0.032821 | 0.236923 | 0.236923 | 0.17641 | 0.17641 | 0.16 | 0.16 | 0 | 0.021559 | 0.265753 | 3,285 | 99 | 109 | 33.181818 | 0.786899 | 0 | 0 | 0.177215 | 0 | 0 | 0.086149 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126582 | false | 0 | 0.075949 | 0.012658 | 0.35443 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f497ba3c59044df0c76a764ab13eed689b22d42 | 2,791 | py | Python | getData.py | SamGalanakis/SimilarSummonersGraph | 26bb6448e8bbf3c7398fdc3638998d0998cfc25e | [
"MIT"
] | null | null | null | getData.py | SamGalanakis/SimilarSummonersGraph | 26bb6448e8bbf3c7398fdc3638998d0998cfc25e | [
"MIT"
] | 5 | 2021-06-08T22:16:33.000Z | 2022-03-12T00:46:02.000Z | getData.py | SamGalanakis/SimilarSummonersGraph | 26bb6448e8bbf3c7398fdc3638998d0998cfc25e | [
"MIT"
] | null | null | null | import cassiopeia as cass
from cassiopeia import Summoner
import csv
import collections
import pandas as pd
import random
with open("api_key.txt","r") as f:
api_key=f.read()
#settings
settings = cass.get_default_config()
settings["pipeline"]["RiotAPI"]["api_key"]=api_key
settings["logging"]["print_calls"]=False
cass.apply_settings(settings)
cass.set_default_region("EUW")
#use previously collected data to avoid duplicates
dfSoFar = pd.read_csv("mainData.csv")
seenSummoners=set(dfSoFar["summoner"]) # Use set for efficient set membership checking of summoner names, significantly faster than list
seedName = dfSoFar["summoner"].iloc[-1]
summoner_seed = cass.get_summoner(name=seedName,region="EUW")
#sort alphabetically for column labels
try:
columnLabels = [cm.champion.name for cm in summoner_seed.champion_masteries]
except:
seedName = dfSoFar["summoner"].iloc[-2]
summoner_seed = cass.get_summoner(name=seedName,region="EUW")
columnLabels = [cm.champion.name for cm in summoner_seed.champion_masteries]
columnLabels = sorted(columnLabels)
columnLabels.insert(0,"summoner")
with open('data//lastCollection.csv', mode='w',encoding="utf-8",newline='') as dataCSV:
dataCSVWriter = csv.writer(dataCSV, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
dataCSVWriter.writerow(columnLabels)
summoner=summoner_seed
while True:
print(summoner.name)
nextMatchSeed= 1
nextParticipantSeed = random.randint(0,5)
for index,match in enumerate(summoner.match_history):
try:
participants = match.participants
except:
continue
for participant in participants:
summonerFromHist = participant.summoner
summonerFromHistName = summonerFromHist.name
if not summonerFromHistName in seenSummoners: #make sure not taking duplicate summoners
seenSummoners.add(summonerFromHistName)
try:
sortedMasteries= sorted(summonerFromHist.champion_masteries,key = lambda x: x.champion.name)
sortedPoints = [cm.points for cm in sortedMasteries]
except:
continue
csvRow = sortedPoints
csvRow.insert(0,summonerFromHistName)
try:
dataCSVWriter.writerow(csvRow)
except:
print(f"Failed to write to csv for summoner {summonerFromHistName}") #rare api call fails for some summoners
summoner = participants[-1].summoner #get summoner for next run
| 32.08046 | 136 | 0.640272 | 289 | 2,791 | 6.103806 | 0.435986 | 0.034014 | 0.011905 | 0.030612 | 0.124717 | 0.124717 | 0.124717 | 0.124717 | 0.124717 | 0.070295 | 0 | 0.004453 | 0.275887 | 2,791 | 86 | 137 | 32.453488 | 0.868382 | 0.10498 | 0 | 0.254545 | 0 | 0 | 0.078313 | 0.018474 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.109091 | 0 | 0.109091 | 0.054545 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f49ad55c69ff7b0e8aedc0c69d313b374a69ec7 | 8,161 | py | Python | src/shell/analysis/type.py | Frky/scat | 551c3a155dd3804d78d6efb96ecdace430bd6473 | [
"MIT"
] | 77 | 2015-10-10T19:59:47.000Z | 2021-12-10T06:59:35.000Z | src/shell/analysis/type.py | Frky/scat | 551c3a155dd3804d78d6efb96ecdace430bd6473 | [
"MIT"
] | 19 | 2017-02-23T15:59:01.000Z | 2017-06-14T11:41:36.000Z | src/shell/analysis/type.py | Frky/scat | 551c3a155dd3804d78d6efb96ecdace430bd6473 | [
"MIT"
] | 12 | 2015-12-08T17:57:49.000Z | 2021-04-16T07:42:31.000Z | #-*- coding: utf-8 -*-
from datetime import datetime
import re
from analysis import Analysis
from src.shell.parser.type import TypeLogParser
class TypeAnalysis(Analysis):
def __init__(self, pgm, logfile, data=None):
Analysis.__init__(self, pgm, logfile)
self.data = data
if data == None:
self.protos = None
else:
self.protos = data.protos
def parse_log(self):
self.log = TypeLogParser(self.logfile)
def print_general_info(self):
if self.data is None:
Analysis.print_general_info(self)
else:
Analysis.print_general_info_with_data(self, self.data)
def check_function(self, fname, args, proto, undef_as_int = False):
ret_fp, ret_fn = 0, 0
param_fp, param_fn = 0, 0
ret_tot, param_tot = 1, 0
real_ret = self.get_one(proto[0])
# Eliminate problems due to arity detection
# on retval
if (real_ret == "VOID" and args[0] != "VOID") or \
(real_ret != "VOID" and args[0] == "VOID"):
return (0,0,0,0,0,0)
if self.check_one(proto[0], args[0], undef_as_int):
return_ok = 1
elif real_ret == 'ADDR':
ret_fn += 1
else:
ret_fp += 1
ar = min(len(args), len(proto))
for ref, inf in zip(proto[1:ar], args[1:ar]):
if ref == "...":
break
param_tot += 1
real_param = self.get_one(ref)
if self.check_one(ref, inf, undef_as_int):
pass
elif real_param == 'ADDR':
param_fn += 1
else:
param_fp += 1
return (param_fp, param_fn, param_tot, ret_fp, ret_fn, ret_tot)
def get_one(self, ref):
if '*' in ref or '[' in ref:
return 'ADDR'
elif ref == 'float' or ref == 'double':
return 'FLOAT'
elif ref == 'void':
return 'VOID'
else:
return 'INT'
def check_one(self, ref, inf, undef_as_int):
if inf == 'UNDEF':
if undef_as_int:
inf = 'INT'
else:
return False
if '*' in ref or '[' in ref:
return inf.startswith('ADDR')
elif ref == 'float' or ref == 'double':
return inf.startswith('FLOAT')
elif ref == 'void':
return inf.startswith('VOID')
else:
return inf.startswith('INT')
def args_str(self, img, imgaddr, fn, args):
line = ""
if len(args) == 0:
line += "void "
else:
endidx = args[0].find("(")
if endidx == -1:
line += args[0].lower()
else:
line += args[0][0:endidx].lower()
line += " "
if fn == "":
line += '[{}@{}]'.format(img, hex(imgaddr))
else:
line += fn
line += "("
if len(args) == 1:
line += "void"
if not (args is None or len(args) == 1):
for i, arg in enumerate(args[1:]):
endidx = arg.find("(")
if endidx == -1:
line += arg.lower()
else:
line += arg[0:endidx].lower()
if i != len(args) - 2:
line += ", "
line += ");"
return line
def pp_data_type(self, type):
if '*' in type or '[' in type:
return 'addr '
elif type == 'void':
return 'void '
elif type == 'float' or type == 'double':
return 'float'
else:
return 'int '
def pp_inferred_type(self, type):
idx = type.find("(")
if idx == -1:
return type.lower().ljust(5)
else:
return type[:idx].lower().ljust(5)
def display(self):
for function, args in self.log.get():
img, imgaddr, fn = function.split(":")
imgaddr = int(imgaddr)
print(self.args_str(img, imgaddr, fn, args))
print("")
self.print_general_info()
def accuracy(self, get=False, verbose=True, log=None, empty_time=0.0, nopin_time=0.0):
if verbose:
self.print_general_info()
print("")
without_name = 0
variadic = 0
pseudo_functions = 0
not_found = 0
return_ok = 0
return_total = 0
return_fp = 0
return_fn = 0
params_ok = 0
params_total = 0
param_fp = 0
param_fn = 0
for function, args in self.log.get():
fn = function.split(":")[-1]
if fn == "":
without_name += 1
continue
elif self.is_pseudo_function(fn):
pseudo_functions += 1
continue
elif fn not in self.protos.keys():
not_found += 1
continue
proto = self.protos[fn]
if self.is_variadic(proto):
variadic += 1
continue
pfp, pfn, ptot, rfp, rfn, rtot = self.check_function(fn, args, proto, undef_as_int = True)
params_ok += (ptot - pfn - pfp)
params_total += ptot
return_ok += (rtot - rfn - rfp)
return_total += rtot
return_fp += rfp
return_fn += rfn
param_fp += pfp
param_fn += pfn
if verbose:
print("Ignored")
print("| Without name: {0}".format(without_name))
print("| Variadic: {0}".format(variadic))
print("| Pseudo-Functions: {0}".format(pseudo_functions))
print("- Not in binary/source: {0}".format(not_found))
print("")
print("Accuracy of inference")
print("| Params Ok/Total tested: {0}/{1}".format(params_ok, params_total))
print("| Return Ok/Total tested: {0}/{1}".format(return_ok, return_total))
print("| Ratio params: {0:.2f}%".format(self.ratio(params_ok, params_total)))
print("- Ratio return: {0:.2f}%".format(self.ratio(return_ok, return_total)))
if log is not None:
params = self.log.get_params()
with open(log, "a") as f:
f.write("{}:{}:{}:{}:{}:{}:{}:{}:{}:{}:{}:{}:{}\n".format(
self.pgm,
params["MIN_VALS"],
params["MAX_VALS"],
params["ADDR_THRESHOLD"],
param_fp,
param_fn,
params_total,
return_fp,
return_fn,
return_total,
self.log.time(),
empty_time,
nopin_time,
))
if get:
return (params_ok, return_ok, param_fp, param_fn, return_fp, return_fn, params_total, return_total)
def mismatch(self):
self.print_general_info()
print("")
for function, args in self.log.get():
img, imgaddr, fname = function.split(":")
imgaddr = int(imgaddr)
if fname == "" or fname not in self.protos.keys():
continue
proto = self.protos[fname]
if self.is_variadic(proto):
continue
res = self.check_function(fname, args, proto, True)
if res[0] + res[1] == 0 and res[3] + res[4] == 0:
continue
print("[{}@{}] {}".format(img, hex(imgaddr), fname))
print(" {} -> {}".format(", ".join(proto[1:]), proto[0]))
print("Expected: {} -> {}".format(
", ".join(map(self.pp_data_type, proto[1:])),
self.pp_data_type(proto[0])))
print("Got: {} -> {}".format(
", ".join(map(self.pp_inferred_type, args[1:])),
self.pp_inferred_type(args[0])))
| 32.256917 | 111 | 0.463669 | 915 | 8,161 | 3.981421 | 0.159563 | 0.00549 | 0.026352 | 0.015372 | 0.228109 | 0.080977 | 0.069448 | 0.03843 | 0.020313 | 0 | 0 | 0.017754 | 0.406445 | 8,161 | 252 | 112 | 32.384921 | 0.73431 | 0.008945 | 0 | 0.224299 | 0 | 0 | 0.072736 | 0.004948 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056075 | false | 0.004673 | 0.018692 | 0 | 0.168224 | 0.116822 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f4c3bd669ebe4b7d15081f8ab2ae582b7d993d4 | 1,998 | py | Python | home/views.py | Vlad-404/phot-portfolio | ff3fba512645c0781755bf5b6f7cc455f09b3c5b | [
"BSD-Source-Code"
] | 1 | 2021-01-05T15:52:19.000Z | 2021-01-05T15:52:19.000Z | home/views.py | Vlad-404/phot-portfolio | ff3fba512645c0781755bf5b6f7cc455f09b3c5b | [
"BSD-Source-Code"
] | null | null | null | home/views.py | Vlad-404/phot-portfolio | ff3fba512645c0781755bf5b6f7cc455f09b3c5b | [
"BSD-Source-Code"
] | 1 | 2021-01-05T18:44:47.000Z | 2021-01-05T18:44:47.000Z | from django.shortcuts import render
from .models import Categories, SocialMedia
from django.contrib import messages
from django.core.mail import send_mail
from django.conf import settings
from django.template.loader import render_to_string
# Create your views here.
def index(request):
media_links = SocialMedia.objects.all()
all_categories = Categories.objects.all()
# if user uses contact form
if request.method == 'POST':
my_email = settings.DEFAULT_FROM_EMAIL
# gets the data
contact_name = request.POST['contact-name']
contact_email = request.POST['contact-email']
contact_message = request.POST['contact-message']
# Handles sending of confirmation emails
subject = render_to_string(
'home/contact_emails/contact_email_subject.txt',
{'contact_name': contact_name})
body = render_to_string(
'home/contact_emails/contact_email_body.txt',
{
'contact_email': contact_email,
'contact_message': contact_message,
'contact_name': contact_name
})
send_mail(
subject,
body,
contact_email,
[my_email]
)
# displays confirmation message and renders the page
messages.success(request, (
f'Thank you {contact_name}. Your message was sent succesfully \
and will be answered as soon as possible'))
context = {
'page_title': 'Welcome',
'categories': all_categories,
'media_links': media_links,
'contact_name': contact_name
}
return render(request, 'home/index.html', context)
else:
context = {
'page_title': 'Welcome',
'categories': all_categories,
'media_links': media_links
}
# base index.html view
return render(request, 'home/index.html', context)
| 32.225806 | 87 | 0.608609 | 212 | 1,998 | 5.533019 | 0.372642 | 0.084399 | 0.061381 | 0.056266 | 0.252344 | 0.252344 | 0.252344 | 0.185848 | 0.112532 | 0.112532 | 0 | 0 | 0.307808 | 1,998 | 61 | 88 | 32.754098 | 0.848156 | 0.087087 | 0 | 0.212766 | 0 | 0 | 0.165658 | 0.047881 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021277 | false | 0 | 0.12766 | 0 | 0.191489 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f4da4c17f296bfd5e72a4b2eabc50c5ce43b646 | 4,448 | py | Python | api/app.py | nikmons/screwdriver | 9e59ea5f6338eb8377475f139318c203b4c1a118 | [
"MIT"
] | 1 | 2018-10-29T07:08:44.000Z | 2018-10-29T07:08:44.000Z | api/app.py | nikmons/screwdriver | 9e59ea5f6338eb8377475f139318c203b4c1a118 | [
"MIT"
] | 49 | 2018-11-06T07:10:41.000Z | 2019-01-14T12:48:08.000Z | api/app.py | nikmons/screwdriver | 9e59ea5f6338eb8377475f139318c203b4c1a118 | [
"MIT"
] | 2 | 2019-01-12T10:13:23.000Z | 2019-01-12T10:18:04.000Z | #!api/api.py
import os
import datetime
from flask import Flask, jsonify, abort, make_response, render_template
from flask_restful import Api, Resource, reqparse, fields, marshal
from flask_jwt_extended import (
JWTManager, jwt_required, create_access_token,
get_jwt_identity, create_refresh_token,
jwt_refresh_token_required, get_raw_jwt
)
from flasgger import Swagger, swag_from
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from dotenv import load_dotenv
load_dotenv(verbose=True)
app = Flask(__name__, static_url_path="")
app.secret_key = os.getenv("FLASK_SECRET_KEY") # Load from env var
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = os.getenv("SQLALCHEMY_TRACK_MODIFICATIONS")
app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("DATABASE_URL")
app.config["ENVIRONMENT"] = os.getenv("ENV")
app.config["CSRF_ENABLED"] = True
app.config["SWAGGER"] = {"title":"Swagger JWT Authentication App", "uiversion":3}
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
app.config['PROPAGATE_EXCEPTIONS'] = True
app.config['CORS_HEADERS'] = 'Content-Type'
swagger_template={
"openapi": "2.0.0",
"info": {
"title": "Scewdriver API (JWT Auth)",
"version": "1.0",
},
"securityDefinitions": {
"Bearer":{
"type": "apiKey",
"name": "Authorization",
"in": "header"
}
},
"produces": [
"application/json",
],
"security": [
{"Bearer": "[]"}
]
}
swagger = Swagger(app, template=swagger_template)#, template=swagger_template)
api = Api(app)
db = SQLAlchemy(app)
jwt = JWTManager(app)
cors = CORS(app, resources={r"/api/issues/findByTrackNum" : {"origins":"*"}})
blacklist = set()
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return jti in blacklist
from resources.employee_list import EmployeeListAPI
from resources.employee import EmployeeAPI
from resources.device_list import DeviceListAPI
from resources.device import DeviceAPI
from resources.customer_list import CustomerListAPI
from resources.customer import CustomerAPI
from resources.login import LoginAPI
from resources.logout import LogoutAPI
from resources.problems_list import ProblemListAPI
from resources.states_list import StatesListAPI
from resources.issue_list import IssueListAPI
from resources.role import RoleAPI
from resources.role_list import RoleListAPI
from resources.employee_roles import EmployeeRolesAPI
from resources.user_issues_list import MyIssuesListAPI
from resources.issue import IssueAPI
from resources.timeline_list import IssueTimelineAPI
from resources.issue_by_tracknum import IssueFindByTrackNumAPI
from resources.statistics import StatisticsAPI
import models
api.add_resource(LoginAPI, '/api/login', endpoint='login')
api.add_resource(LogoutAPI, '/api/logout', endpoint='logout')
api.add_resource(DeviceListAPI, '/api/devices', endpoint='devices')
api.add_resource(DeviceAPI, '/api/devices/<int:id>', endpoint='device')
api.add_resource(CustomerListAPI, '/api/customers', endpoint='customers')
api.add_resource(CustomerAPI, '/api/customers/<int:id>', endpoint='customer')
api.add_resource(EmployeeListAPI, '/api/employees', endpoint='employees')
api.add_resource(EmployeeAPI, '/api/employees/<int:id>', endpoint='employee')
api.add_resource(ProblemListAPI, '/api/problems', endpoint='problems')
api.add_resource(StatesListAPI, '/api/states', endpoint='states')
api.add_resource(IssueListAPI, '/api/issues', endpoint='issues')
api.add_resource(RoleListAPI, '/api/roles', endpoint='roles')
api.add_resource(RoleAPI, '/api/roles/<int:id>', endpoint='role')
api.add_resource(EmployeeRolesAPI, '/api/employees/<int:id>/roles', endpoint='employee_roles')
api.add_resource(MyIssuesListAPI, '/api/myissues', endpoint='user_issues')
api.add_resource(IssueAPI, '/api/issues/<int:id>', endpoint='issue')
api.add_resource(IssueTimelineAPI, '/api/myissues/<int:id>/timeline', endpoint='issue_timeline')
api.add_resource(IssueFindByTrackNumAPI, '/api/issues/findByTrackNum', endpoint='issue_bytracknum')
api.add_resource(StatisticsAPI, '/api/statistics', endpoint='statistics')
@app.route("/")
@app.route("/index")
def index():
return render_template("index.html")
if __name__ == '__main__':
app.run(debug=True)
| 39.017544 | 99 | 0.748426 | 538 | 4,448 | 5.996283 | 0.27881 | 0.076565 | 0.082455 | 0.012399 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001537 | 0.122527 | 4,448 | 113 | 100 | 39.362832 | 0.825006 | 0.012815 | 0 | 0 | 0 | 0 | 0.226299 | 0.070419 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0 | 0.29 | 0.01 | 0.33 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f522a02ea86c80287eeccc84c1ab9a8aac4202d | 3,034 | py | Python | main.py | JoseNL27/SimplePoblationSim | 61dfce818296958107890363bbdef04d0fd4a3dd | [
"CC0-1.0"
] | 1 | 2021-04-18T13:28:55.000Z | 2021-04-18T13:28:55.000Z | main.py | JoseNL27/SimplePoblationSim | 61dfce818296958107890363bbdef04d0fd4a3dd | [
"CC0-1.0"
] | null | null | null | main.py | JoseNL27/SimplePoblationSim | 61dfce818296958107890363bbdef04d0fd4a3dd | [
"CC0-1.0"
] | null | null | null | #A very simple poblation simulator made to study what happen when a society reaches the food-consumption limit.
#Made by ElBarto27. Feel free to reproduce this script giving the correspondent credits.
from scipy.stats import norm
import random
#Opening and clearing the file where the code will write how many people are alive each day.
file = open("poblation.txt","r+")
file.truncate(0)
file.close()
peopleDictionary = []
x= 0
y = 0
startingPob = 10
#Setting up the class.
class Person():
def __init__(self):
self.age = int((norm.rvs(size=1,loc=0.5,scale=0.15)[0]*10).round(0)) #Using a Gaussian distribution to randomize with accuracy the starter age for eacch gen0 member.
self.death = False #Obviously each member will start alive.
self.hunger = 1 #Defining the starter hunger for each member.
def start(): #Function who adds the gen0 individuals to the dictionary.
for x in range(0,startingPob):
person = Person()
peopleDictionary.append(person)
def day(): #Function for each day rutine.
if len([person for person in peopleDictionary if person.death == False]) > 500: #It sets the food limit.
food = 400
else: #If the food limit isn´t reached there´ll be food for the 75% of the poblation.
food = int(len([person for person in peopleDictionary if person.death == False])*0.75)
for person in [person for person in peopleDictionary if person.death == False]: #Starts each member functions.
#print("#",peopleDictionary.index(person))
if person.hunger >= 2 and food > 0:
person.hunger = person.hunger - 2
food = food - 1
if person.hunger <= 1 and len([person for person in peopleDictionary if person.death == False]) > 1 and person.age in range (2,8):
bornRate = random.randint(0,100)
if bornRate < 56:
newBorn()
person.age += 1
person.hunger += 1
if person.age > 10:
person.death = True
peopleDictionary.remove(person)
if person.hunger > 5:
person.death = True
peopleDictionary.remove(person)
def newBorn():
person = Person()
peopleDictionary.append(person)
person.age = 0
start()
for y in range(0,300):
day()
print("DAY", y)
print("|||",len([person for person in peopleDictionary if person.death == False]))
saveFile1 = open("poblation.txt", "a")
write1 = str(len([person for person in peopleDictionary if person.death == False])) + "\n"
saveFile1.write(write1)
saveFile1.close()
y + 1
| 44.617647 | 185 | 0.560976 | 364 | 3,034 | 4.67033 | 0.373626 | 0.047059 | 0.045294 | 0.06 | 0.286471 | 0.239412 | 0.188824 | 0.188824 | 0.188824 | 0.158824 | 0 | 0.031504 | 0.351351 | 3,034 | 67 | 186 | 45.283582 | 0.831301 | 0.244891 | 0 | 0.150943 | 0 | 0 | 0.016742 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075472 | false | 0 | 0.037736 | 0 | 0.132075 | 0.037736 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f5b2564937faaac3a6c4255ab14be6315e51e60 | 13,147 | py | Python | src/models/train_model copy 2.py | gummz/cell | a741ca4900a11f1080b7572ac969f765e5ac2ffd | [
"MIT"
] | null | null | null | src/models/train_model copy 2.py | gummz/cell | a741ca4900a11f1080b7572ac969f765e5ac2ffd | [
"MIT"
] | null | null | null | src/models/train_model copy 2.py | gummz/cell | a741ca4900a11f1080b7572ac969f765e5ac2ffd | [
"MIT"
] | null | null | null | # from skimage.io import imread
import datetime
import os
import pickle
import sys
from os import mkdir
# from torchsummary import summary
from os.path import join
from time import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
import src.models.utils.utils as utils
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torchvision
from PIL import Image
from src.data.constants import (CV2_CONNECTED_ALGORITHM, DATA_DIR, IMG_DIR,
MASK_DIR, MEDIAN_FILTER_KERNEL,
NUMBER_CONNECTIVITY, SIMPLE_THRESHOLD)
from src.models.utils import transforms as T
from src.models.utils.model import get_instance_segmentation_model
from torch import optim
from torch.cuda.amp import GradScaler, autocast
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR
from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks
from BetaCellDataset import BetaCellDataset, get_dataloaders, print_unique
# import torch.optim as optim
# import torchvision
# Environment variable for memory management
alloc_conf = 'PYTORCH_CUDA_ALLOC_CONF'
try:
print(alloc_conf, os.environ[alloc_conf])
except KeyError:
print(alloc_conf, 'not found')
conn = NUMBER_CONNECTIVITY
algo = CV2_CONNECTED_ALGORITHM
kernel = MEDIAN_FILTER_KERNEL
threshold = SIMPLE_THRESHOLD
device = torch.device(
'cuda') if torch.cuda.is_available() else torch.device('cpu')
print(f'Running on {device}.')
# Set working directory to file location
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
# our dataset has two classes only - background and person
num_classes = 2
# get the model using our helper function
model = get_instance_segmentation_model(pretrained=True)
# move model to the right device
model.to(device)
# Get data
# Size of image
size = 1024
batch_size = 8 # 1024, 8; 128, 16
data_tr, data_val = get_dataloaders(
batch_size=batch_size, num_workers=2, resize=size)
# Unique identifier for newly saved objects
now = datetime.datetime.now()
time_str = f'{now.day:02d}_{now.month:02d}_{now.hour}H_{now.minute}M_{now.second}S'
save = f'interim/run_{time_str}'
def train(model, opt, epochs, data_tr, data_val, time_str):
'''Train'''
print(f'Training has begun for model: {time_str}')
scheduler = ReduceLROnPlateau(opt, threshold=0.01, verbose=True)
log_every = 1 # How often to print out losses
save_every = 10 # How often to save model
scaler = GradScaler()
loss_list = ['loss_mask', 'loss_rpn_box_reg',
'loss_classifier', 'loss_objectness']
# loss_classifier, loss_objectness
# TODO: remove loss_classifier from loss_list
# and also objectness? if we don't care about detecting
# objects, maybe the faint ones will be caught as well.
x_val, y_val = next(iter(data_val))
# Transforms
scale_jitter = T.ScaleJitter((size / 2, size / 2), scale_range=[0.7, 1.5])
transforms_list = [T.RandomIoUCrop(), scale_jitter]
transforms = T.Compose(transforms_list)
tot_train_losses = []
tot_val_losses = []
for i, epoch in enumerate(range(epochs)):
tic = time()
print(f'\n* Epoch {epoch+1}/{epochs}')
train_loss = 0
model.train() # train mode
for j, (x_batch, y_batch) in enumerate(data_tr):
with autocast():
print_unique(x_batch[0].cpu(), 'before to(device)')
x_batch = [x.to(device) for x in x_batch]
print_unique(x_batch[0].cpu(), 'after to(device)')
y_batch = [{k: v.to(device) for k, v in t.items()}
for t in y_batch]
# TODO: Invalid box coordinates
# "Found invalid box [68.75, 7.03125, 68.75, 7.8125]"
# print_unique(x_batch[0].cpu(), 'before transforms')
# Transforms that can't run parallel in dataloader
# need to be performed here
for (x, y) in zip(x_batch, y_batch):
x, y = transforms_list[0](x.squeeze(0), y)
print(np.unique(y['boxes'].cpu()), 'after crop')
x, y = transforms_list[1](x.squeeze(0), y)
print(np.unique(y['boxes'].cpu()), 'after scale jitter')
x_batch = torch.stack(x_batch)
print_unique(x_batch[0].cpu(), 'after transforms')
x_batch.to(device)
# print(x_batch.shape)
# set parameter gradients to zero
opt.zero_grad(set_to_none=True)
# forward
# for i, image in enumerate(y_batch):
# print(f'Image {i}:')
# print(image['boxes'], '\n\n')
print_unique(x_batch[0].cpu(), 'before forward pass')
Y_pred = model(x_batch, y_batch)
# {'loss_classifier': tensor(0.8181, device='cuda:0', grad_fn=<NllLossBackward0>), 'loss_box_reg': tensor(0.0933, device='cuda:0', grad_fn=<DivBackward0>), 'loss_mask': tensor(1.2863, device='cuda:0',
# grad_fn=<BinaryCrossEntropyWithLogitsBackward0>), 'loss_objectness': tensor(8.1862, device='cuda:0',
# grad_fn=<BinaryCrossEntropyWithLogitsBackward0>), 'loss_rpn_box_reg': tensor(0.9159, device='cuda:0', grad_fn=<DivBackward0>)}
# print(Y_pred)
# sys.exit()
# sum(loss for loss in Y_pred.values())
# print(Y_pred)
# print(type(Y_pred.values()))
# Select only losses of interest
losses = [value for loss, value in Y_pred.items()
if loss in loss_list]
losses = sum(losses)
# losses = Y_pred['loss_mask']
# print(losses)
# losses.backward()
# opt.step()
scaler.scale(losses).backward()
scaler.step(opt)
scaler.update()
# calculate metrics to show the user
train_loss += float(losses / len(data_tr))
toc = time()
tot_train_losses.append(train_loss)
time_print = f'''Training loss: {train_loss: .3f};
Time: {(toc-tic)/60: .1f} minutes'''
if i % log_every == 0:
print(time_print)
# Validation
with torch.no_grad(), autocast():
x_val = [x.to(device) for x in x_val]
y_val = [{k: v.to(device) for k, v in t.items()}
for t in y_val]
# print(model(X_val), '\n'*5)
output = model(x_val, y_val) # losses
# float(sum(loss for loss in output.values()))
val_losses = [value for loss, value
in output.items() if loss in loss_list]
val_losses = sum(val_losses)
# val_losses = output['loss_mask']
scheduler.step(val_losses)
# Get current learning rate
if i % log_every == 0:
# print(f'Current learning rate: {scheduler}')
print(f'Validation loss: {val_losses:.3f}')
tot_val_losses.append(float(val_losses))
# Save progress every 50 epochs
if i % save_every == 0:
# Make folder unique to this run in order to save model and loss
try:
mkdir(save)
except FileExistsError:
pass
pickle.dump(model, open(join(save, f'model_{time_str}.pkl'), 'wb'))
if i == save_every:
# Visualize the masks generated by opencv
# for debugging purposes
dataset = BetaCellDataset(DATA_DIR)
img, target = dataset[500]
plt.subplot(1, 2, 1)
plt.imshow(img, cmap='viridis')
plt.subplot(1, 2, 2)
# Values in target['masks'] are either 0 or 1
# so multiply by 255 for image pixel values
plotted = torch.sum(target['masks'], dim=0) * 255
plt.imshow(plotted, cmap='gray')
plt.savefig(join(save, 'opencv_mask.jpg'))
# model.eval()
# y_hat = model(x_val)
# y_hat = [y['masks'] for y in y_hat]
# y_hat = [item.squeeze(1) for item in y_hat]
# y_hat = torch.cat(y_hat, dim=0) # .detach().cpu()
# print('yhat', y_hat.shape)
# for x in x_val:
# print('first', x.shape)
# x = np.array(x.cpu())
# x = cv2.normalize(x, x, alpha=0, beta=255,
# dtype=cv2.CV_8UC1, norm_type=cv2.NORM_MINMAX)
# print('after normalize', x.shape)
# # x = np.expand_dims(x, 0)
# # print('after expand', x.shape)
# x = cv2.cvtColor(x, cv2.COLOR_GRAY2RGB)
# print('after cvtcolor', x.shape)
# x = torch.tensor(x)
# print(x.shape)
# print(x)
# y_hat = draw_segmentation_masks(x, y_hat)
# print('yhat_val', y_hat.shape)
# # y_batch = [{k: v.to(device) for k, v in t.items()}
# # for t in y_batch]
# for k in range(batch_size):
# plt.subplot(2, batch_size, k+1)
# plt.imshow(np.rollaxis(x_val[k].numpy(), 0, 3), cmap='gray')
# plt.title('Real')
# plt.axis('off')
# plt.subplot(2, batch_size, k+batch_size+1)
# plt.imshow(y_hat[k], cmap='gray')
# plt.title('Output')
# plt.axis('off')
# plt.suptitle('%d / %d - loss: %f' % (epoch+1, epochs, val_losses))
# plt.show()
# plt.savefig(join(save, 'training_{i}.jpg'))
# pickle.dump([tot_train_losses, tot_val_losses], open('loss.pkl', 'wb'))
return tot_train_losses, tot_val_losses
def predict(model, data):
'''Predict'''
model.eval() # testing mode
Y_pred = [F.sigmoid(model(X_batch.to(device))) for X_batch, _ in data]
return np.array(Y_pred)
def bce_loss(y_real, y_pred):
'''bce_loss'''
return torch.mean(y_pred - y_real * y_pred
+ torch.log(1 + torch.exp(-y_pred)))
# and a learning rate scheduler which decreases the learning rate by
# 10x every 3 epochs
# lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
# step_size=3,
# gamma=0.1)
params = [p for p in model.parameters() if p.requires_grad]
# Start with learning rate of just 1
# due to decreasing learning rate on plateau
# num_epochs = 3
# learning_rates = [1] # [1e-8, 1e-6, 1e-4, 1e-2, 0.1, 1]
# optimizers = ['SGD', 'Adam']
# weight_decays = [1e-8, 1e-2, 0.1, 1]
# def grid_search(model, params, data_tr, data_val, train, num_epochs, learning_rates, optimizers, weight_decays):
# '''Grid search for hyperparameters'''
# losses = []
# for learning_rate in learning_rates:
# for weight_decay in weight_decays:
# for name in optimizers:
# if name == 'SGD':
# opt = torch.optim.SGD(params, lr=learning_rate,
# momentum=0.9, weight_decay=weight_decay)
# else:
# opt = optim.Adam(params, lr=learning_rate,
# weight_decay=weight_decay)
# print('____________________________________')
# print('Training:')
# grid = f'lr={learning_rate}, wd={weight_decay}, opt={name}'
# print(grid)
# loss = train(model, opt, num_epochs, data_tr, data_val)
# losses.append([loss, grid])
# return np.array(losses)
# losses = grid_search(model, params, data_tr, data_val, train, num_epochs, learning_rates, optimizers, weight_decays)
# losses = grid_search(model, params, data_tr, data_val, train,
# num_epochs, learning_rates, optimizers, weight_decays)
num_epochs = 50 # 500
lr = 0.00001 # 0.0001
wd = 0.001 # 0.001
opt = optim.Adam(params, lr=lr, weight_decay=wd, betas=[0.9, 0.99])
losses = train(model, opt, num_epochs, data_tr, data_val, time_str)
losses = np.array(losses).T
description = f'''{time_str}\n
Learning rate: {lr}\n
Weight decay: {wd}\n
Optimizer: {opt}\n
'''
# Special note that is saved as the name of a file with
# a name which is the value of the string `special_mark`
special_mark = ''
if special_mark:
np.savetxt(join(save, f'{special_mark}_{time_str}.txt'), special_mark)
np.savetxt(join(save, f'descr_{time_str}.txt'), description)
np.savetxt(join(save, f'losses_{time_str}.csv'), losses)
pickle.dump(model, open(join(save, f'model_{time_str}.pkl'), 'wb'))
pickle.dump(np.array([]), open(join(save, f'loss_{losses[1][-1]:.3f}.pkl')))
plt.subplot(121)
plt.plot(losses[0])
plt.title('Training loss')
plt.xlabel('Epoch')
plt.ylabel('Total loss')
plt.subplot(122)
plt.plot(losses[1])
plt.title('Validation loss')
plt.xlabel('Epoch')
plt.ylabel('Total loss')
| 35.436658 | 216 | 0.589412 | 1,763 | 13,147 | 4.212706 | 0.229722 | 0.012926 | 0.009425 | 0.012253 | 0.204389 | 0.183924 | 0.142992 | 0.10004 | 0.090346 | 0.072304 | 0 | 0.023781 | 0.289952 | 13,147 | 370 | 217 | 35.532432 | 0.771826 | 0.400624 | 0 | 0.060241 | 0 | 0.006024 | 0.112909 | 0.024861 | 0 | 0 | 0 | 0.002703 | 0 | 1 | 0.018072 | false | 0.012048 | 0.150602 | 0 | 0.186747 | 0.090361 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f5d4a00dd65eaeb976bd195b852ee952ce9f96b | 7,060 | py | Python | scripts/vsYaneuraOu_onnx.py | SakodaShintaro/Miacis | af3508076660cc6e19186f17fa436499e32164f5 | [
"BSD-3-Clause"
] | 10 | 2019-05-14T12:54:49.000Z | 2022-02-28T12:02:52.000Z | scripts/vsYaneuraOu_onnx.py | SakodaShintaro/Miacis | af3508076660cc6e19186f17fa436499e32164f5 | [
"BSD-3-Clause"
] | null | null | null | scripts/vsYaneuraOu_onnx.py | SakodaShintaro/Miacis | af3508076660cc6e19186f17fa436499e32164f5 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python3
import os
import sys
# Ayaneをインポート
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(script_dir + "/../../Ayane/source")
import shogi.Ayane as ayane
# その他必要なものをインポート
import time
import glob
from natsort import natsorted
from collections import defaultdict
import argparse
from calc_elo_rate import calc_elo_rate
parser = argparse.ArgumentParser()
parser.add_argument("onnx_path", type=str)
parser.add_argument("--time1", type=int, default=1000)
parser.add_argument("--time2", type=int, default=1000)
parser.add_argument("--NodesLimit", type=int, default=0)
parser.add_argument("--game_num", type=int, default=1000)
parser.add_argument("--init_model_step", type=int, default=0)
parser.add_argument("--option", type=str, default=None)
parser.add_argument("--parameters", type=(lambda x: list(map(int, x.split()))))
parser.add_argument("--Suisho", action="store_true")
parser.add_argument("--total_num", type=(lambda x: list(map(int, x.split()))), default=[0, 0, 0])
parser.add_argument("--onnx", action="store_true")
args = parser.parse_args()
# 対局数(先後行うので偶数でなければならない)
assert args.game_num % 2 == 0
# ハッシュサイズ(共通)
hash_size = 2048
# 勝ち,負け,引き分けの結果を示す定数
WIN = 0
DRAW = 1
LOSE = 2
# Ayaneにおける結果をここでの結果に変換する辞書
result_converter = {ayane.GameResult.BLACK_WIN: WIN,
ayane.GameResult.WHITE_WIN: LOSE,
ayane.GameResult.DRAW: DRAW,
ayane.GameResult.MAX_MOVES: DRAW}
# インスタンス生成
server = ayane.AyaneruServer()
# サーバの設定
server.error_print = True
server.set_time_setting(f"byoyomi1p {args.time1} byoyomi2p {args.time2}")
server.moves_to_draw = 320
# YaneuraOuの設定
server.engines[1].set_engine_options({"USI_Ponder": "false",
"NodesLimit": args.NodesLimit,
"USI_Hash": hash_size,
"BookMoves": 0,
"NetworkDelay": 0,
"NetworkDelay2": 0
})
if args.Suisho:
server.engines[1].connect(script_dir + "/../../Suisho/bin/YaneuraOu-by-gcc")
else:
server.engines[1].connect(script_dir + "/../../YaneuraOu/bin/YaneuraOu-by-gcc")
# カレントディレクトリ内にある{prefix}_{step}.modelを評価する
curr_path = os.getcwd()
# ディレクトリ名が"/"で終わっていることの確認
if curr_path[-1] != "/":
curr_path += "/"
# 結果を書き込むファイルを取得
f = open(curr_path + "result.txt", mode="a")
f.write(f"\ntime1 = {args.time1}, time2 = {args.time2}, NodesLimit = {args.NodesLimit}\n")
model_name = f"{curr_path}{args.onnx_path}"
print(f"model_name = {model_name}")
if args.option is None:
# Miacisを準備
print("Miacisを準備開始")
server.engines[0].set_engine_options({"random_turn": 30,
"print_interval": 10000000,
"USI_Hash": hash_size,
"model_name": model_name,
"use_calibration_cache": "false",
"use_fp16": "true"})
binary_suffix = None
if "sca" in model_name:
binary_suffix = "scalar"
elif "cat" in model_name:
binary_suffix = "categorical"
elif "onnx" in model_name:
binary_suffix = "dlshogi"
else:
print("unknown model_name")
exit()
server.engines[0].connect(f"{script_dir}/../src/cmake-build-release/Miacis_shogi_{binary_suffix}")
# 戦績を初期化
total_num = args.total_num
# 引数で初期化するのは最初だけにしたいのでここで[0, 0, 0]を入れてしまう
args.total_num = [0, 0, 0]
# 棋譜の集合を初期化
sfens = defaultdict(int)
# iが偶数のときMiacis先手
for i in range(sum(total_num), args.game_num):
# 対局を実行
server.game_start()
while not server.game_result.is_gameover():
time.sleep(1)
# 重複を確認
if sfens[server.sfen] > 0:
# 同じ棋譜が2回生成された場合は記録しない
print(f"\n重複:", server.sfen)
else:
# 結果を記録
result = result_converter[server.game_result]
total_num[result if not server.flip_turn else LOSE - result] += 1
sfens[server.sfen] += 1
# ここまでの結果を文字列化
winning_rate = (total_num[WIN] + 0.5 * total_num[DRAW]) / sum(total_num)
elo_rate = calc_elo_rate(winning_rate)
result_str = f"{total_num[WIN]:3d}勝 {total_num[DRAW]:3d}引き分け {total_num[LOSE]:3d}敗 勝率 {100 * winning_rate:4.1f}% 相対レート {elo_rate:6.1f}"
sys.stdout.write("\033[2K\033[G")
print(result_str, end="\n" if i == args.game_num - 1 else "")
sys.stdout.flush()
# 手番反転
server.flip_turn = not server.flip_turn
# ファイルに書き込み
f.write(result_str + "\n")
f.flush()
else:
# パラメータを探索
for parameter in args.parameters:
# Miacisを準備
server.engines[0].set_engine_options({"random_turn": 30,
"print_interval": 10000000,
"USI_Hash": hash_size,
args.option: parameter,
"model_name": model_names[-1]})
binary_suffix = None
if "sca" in model_names[-1]:
binary_suffix = "scalar"
elif "cat" in model_names[-1]:
binary_suffix = "categorical"
elif "onnx" in model_names[-1]:
binary_suffix = "dlshogi"
else:
print("unknown model_name")
exit()
server.engines[0].connect(f"{script_dir}/../src/cmake-build-release/Miacis_shogi_{binary_suffix}")
# 戦績を初期化
total_num = args.total_num
# 引数で初期化するのは最初だけにしたいのでここで[0, 0, 0]を入れてしまう
args.total_num = [0, 0, 0]
# 棋譜の集合を初期化
sfens = defaultdict(int)
# iが偶数のときMiacis先手
for i in range(sum(total_num), args.game_num):
# 対局を実行
server.game_start()
while not server.game_result.is_gameover():
time.sleep(1)
# 重複を確認
if sfens[server.sfen] > 0:
# 同じ棋譜が2回生成された場合は記録しない
print(f"\n重複:", server.sfen)
else:
# 結果を記録
result = result_converter[server.game_result]
total_num[result if not server.flip_turn else LOSE - result] += 1
sfens[server.sfen] += 1
# ここまでの結果を文字列化
winning_rate = (total_num[WIN] + 0.5 * total_num[DRAW]) / sum(total_num)
elo_rate = calc_elo_rate(winning_rate)
result_str = f"{args.option}={parameter:7d} {total_num[WIN]:3d}勝 {total_num[DRAW]:3d}引き分け {total_num[LOSE]:3d}敗 勝率 {100 * winning_rate:4.1f}% 相対レート {elo_rate:6.1f}"
sys.stdout.write("\033[2K\033[G")
print(result_str, end="\n" if i == args.game_num - 1 else "")
sys.stdout.flush()
# 手番反転
server.flip_turn = not server.flip_turn
# ファイルに書き込み
f.write(result_str + "\n")
f.flush()
server.terminate()
| 33.459716 | 176 | 0.57847 | 843 | 7,060 | 4.652432 | 0.24911 | 0.046915 | 0.04768 | 0.017338 | 0.594085 | 0.58822 | 0.568332 | 0.475268 | 0.461499 | 0.461499 | 0 | 0.02833 | 0.295042 | 7,060 | 210 | 177 | 33.619048 | 0.759695 | 0.077479 | 0 | 0.489051 | 0 | 0.021898 | 0.178842 | 0.057606 | 0 | 0 | 0 | 0 | 0.007299 | 1 | 0 | false | 0 | 0.065693 | 0 | 0.065693 | 0.080292 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f5ddea40c81c51e855e2a7038a29cfb3b27c7d8 | 624 | py | Python | tests/snmp/test_snmp_queue.py | jerry-chang3300/sonic-mgmt | 9814959583e491997678f8ad7dc763e408340b77 | [
"Apache-2.0"
] | null | null | null | tests/snmp/test_snmp_queue.py | jerry-chang3300/sonic-mgmt | 9814959583e491997678f8ad7dc763e408340b77 | [
"Apache-2.0"
] | null | null | null | tests/snmp/test_snmp_queue.py | jerry-chang3300/sonic-mgmt | 9814959583e491997678f8ad7dc763e408340b77 | [
"Apache-2.0"
] | null | null | null | import pytest
from ansible_host import AnsibleHost
def test_snmp_queues(ansible_adhoc, duthost, creds, collect_techsupport):
lhost = AnsibleHost(ansible_adhoc, 'localhost', True)
hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']
snmp_facts = lhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts']
for k, v in snmp_facts['snmp_interfaces'].items():
if "Ethernet" in v['description']:
if not v.has_key('queues'):
pytest.fail("port %s does not have queue counters" % v['name'])
| 41.6 | 115 | 0.708333 | 82 | 624 | 5.195122 | 0.609756 | 0.06338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001912 | 0.161859 | 624 | 14 | 116 | 44.571429 | 0.81262 | 0 | 0 | 0 | 0 | 0 | 0.240385 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f5e20aa784c2b63c5f46d520cc79ba30f3d2c88 | 2,193 | py | Python | go_OCR.py | matmill5/KenBatcherPP-OCR | a3c6c4e7cdeb21d62e21f4ce7a1d995d22c65947 | [
"MIT"
] | 8 | 2020-03-05T00:52:41.000Z | 2021-05-22T22:35:55.000Z | go_OCR.py | matmill5/KenBatcherPP-OCR | a3c6c4e7cdeb21d62e21f4ce7a1d995d22c65947 | [
"MIT"
] | null | null | null | go_OCR.py | matmill5/KenBatcherPP-OCR | a3c6c4e7cdeb21d62e21f4ce7a1d995d22c65947 | [
"MIT"
] | 4 | 2020-01-29T10:11:25.000Z | 2021-05-22T22:36:02.000Z | # Project Title: Kenneth E. Batcher Historical Work OCR
# Project Description: Project to conduct OCR on the historical work of PP-father Kenneth E. Batcher
# Author: Matthew E. Miller
# Date: 02/01/2020 10:38:31
# Medium: https://medium.com/@matthew_earl_miller (where this is being published)
# Github: https://github.com/matmill5
# Linkedin: https://www.linkedin.com/in/matthew-miller-engineer/
# StackOverflow: https://stackoverflow.com/users/11937169/matthew-e-miller?tab=profile
# (c) Copyright by Matthew E. Miller
import pytesseract
from PIL import Image
import sys
from pdf2image import convert_from_path
import os
import io
# Note: Don't run this on my development laptop, will take up too much storage space with all of the image files.
# Note: Run this on my desktop machine.
# Note: The command below is good for setting the pytesseract path, if that becomes an issue.
# pytesseract.pytesseract.tesseract_cmd = r'C:\Users\Matthew\AppData\Local\Tesseract-OCR\tesseract.exe'
# For each pdf in the pdfs directory, convert the pdf to a jpg, do ocr on the jpg, and print the results to a results_filename[0:20].txt
for pdf in os.listdir('pdfs'):
pdf_path = str(pdf)
output_filename = "results_" + pdf_path.split('/')[-1].replace('.pdf','')[0:20] + ".txt"
pages = convert_from_path(pdf_path)
pg_cntr = 1
sub_dir = str("images/" + pdf_path.split('/')[-1].replace('.pdf','')[0:20] + "/")
if not os.path.exists(sub_dir):
os.makedirs(sub_dir)
for page in pages:
# if pg_cntr <= 20:
filename = "pg_"+str(pg_cntr)+'_'+pdf_path.split('/')[-1].replace('.pdf','.jpg')
page.save(sub_dir+filename)
with io.open(output_filename, 'a+', encoding='utf8') as f:
f.write(unicode("======================================================== PAGE " + str(pg_cntr) + " ========================================================\n"))
f.write(unicode(pytesseract.image_to_string(sub_dir+filename)+"\n"))
f.write(unicode("======================================================== ========================= ========================================================\n"))
pg_cntr = pg_cntr + 1
| 47.673913 | 173 | 0.608755 | 301 | 2,193 | 4.332226 | 0.448505 | 0.027607 | 0.032209 | 0.029908 | 0.057515 | 0.057515 | 0.039877 | 0.039877 | 0 | 0 | 0 | 0.022162 | 0.156407 | 2,193 | 45 | 174 | 48.733333 | 0.682703 | 0.458276 | 0 | 0 | 0 | 0 | 0.27094 | 0.216239 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.272727 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f5eaf32b8e63b4925b030dc531531d8c2e8560a | 6,272 | py | Python | Lib/site-packages/auth/CAS/REST/service.py | srimannaarayana/boxtestpy | 181332102ccbb49dd50ddb31ca875cb9d3adcc2d | [
"bzip2-1.0.6"
] | 55 | 2015-10-16T01:14:33.000Z | 2021-07-07T15:32:04.000Z | Lib/site-packages/auth/CAS/REST/service.py | srimannaarayana/boxtestpy | 181332102ccbb49dd50ddb31ca875cb9d3adcc2d | [
"bzip2-1.0.6"
] | 8 | 2015-10-27T00:54:29.000Z | 2019-03-11T09:01:03.000Z | Lib/site-packages/auth/CAS/REST/service.py | srimannaarayana/boxtestpy | 181332102ccbb49dd50ddb31ca875cb9d3adcc2d | [
"bzip2-1.0.6"
] | 12 | 2015-10-16T14:11:35.000Z | 2021-08-11T07:20:17.000Z |
#try:
# import eventlet
# eventlet.monkey_patch()
#except:
# pass
import falcon
import json
try:
import ujson as json
except ImportError:
pass
from auth.CAS.authorization import Authorization
class AuthComponent(object):
def process_request(self, req, resp):
"""Process the request before routing it.
Args:
req: Request object that will eventually be
routed to an on_* responder method.
resp: Response object that will be routed to
the on_* responder.
"""
def process_resource(self, req, resp, resource, params):
"""Process the request after routing.
Note:
This method is only called when the request matches
a route to a resource.
Args:
req: Request object that will be passed to the
routed responder.
resp: Response object that will be passed to the
responder.
resource: Resource object to which the request was
routed.
params: A dict-like object representing any additional
params derived from the route's URI template fields,
that will be passed to the resource's responder
method as keyword arguments.
"""
def process_response(self, req, resp, resource):
"""Post-processing of the response (after routing).
Args:
req: Request object.
resp: Response object.
resource: Resource object to which the request was
routed. May be None if no route was found
for the request.
"""
if isinstance(resp.body, dict):
try:
resp.body = json.dumps(resp.body)
except(nameError):
resp.status = falcon.HTTP_500
def stringify(req, resp):
"""
dumps all valid jsons
This is the latest after hook
"""
if isinstance(resp.body, dict):
try:
resp.body = json.dumps(resp.body)
except(nameError):
resp.status = falcon.HTTP_500
class Ping:
def on_get(self, req, resp):
"""Handles GET requests"""
resp.body = {'message':'PONG'}
class Membership:
def on_get(self, req, resp, client, user, group):
cas = Authorization(client)
resp.body={'result':False}
if cas.has_membership(user, group):
resp.body={'result':True}
def on_post(self, req, resp, client, user, group):
cas = Authorization(client)
resp.body={'result':False}
if cas.add_membership(user, group):
resp.body={'result':True}
def on_delete(self, req, resp, client, user, group):
cas = Authorization(client)
resp.body={'result':False}
if cas.del_membership(user, group):
resp.body={'result':True}
class Permission:
def on_get(self, req, resp, client, group, name):
cas = Authorization(client)
resp.body={'result':False}
if cas.has_permission(group, name):
resp.body={'result':True}
def on_post(self, req, resp, client, group, name):
cas = Authorization(client)
resp.body={'result':False}
if cas.add_permission(group, name):
resp.body={'result':True}
def on_delete(self, req, resp, client, group, name):
cas = Authorization(client)
resp.body={'result':False}
if cas.del_permission(group, name):
resp.body={'result':True}
class UserPermission:
def on_get(self, req, resp, client, user, name):
cas = Authorization(client)
resp.body={'result':False}
if cas.user_has_permission(user,name):
resp.body={'result':True}
class GetUserPermissions:
def on_get(self, req, resp, client, user):
cas = Authorization(client)
resp.body = {'results': cas.get_user_permissions(user)}
class GetRolePermissions:
def on_get(self, req, resp, client, role):
cas = Authorization(client)
resp.body = {'results': cas.get_permissions(role)}
class GetRoleMembers:
def on_get(self, req, resp, client, role):
cas = Authorization(client)
resp.body = {'result': cas.get_role_members(role)}
class GetUserRoles:
def on_get(self, req, resp, client, user):
cas = Authorization(client)
resp.body = {'result': cas.get_user_roles(user)}
class ListRoles:
def on_get(self, req, resp, client):
cas = Authorization(client)
resp.body = {'result':cas.roles}
class WhichRolesCan:
def on_get(self, req, resp, client, name):
cas = Authorization(client)
resp.body = {'result':cas.which_roles_can(name)}
class WhichUsersCan:
def on_get(self, req, resp, client, name):
cas = Authorization(client)
resp.body = {'result':cas.which_users_can(name)}
class Role:
def on_post(self, req, resp, client, role):
cas = Authorization(client)
resp.body={'result':False}
if cas.add_role(role):
resp.body={'result':True}
def on_delete(self, req, resp, client, group):
cas = Authorization(client)
resp.body={'result':False}
if cas.del_role(group):
resp.body={'result':True}
api = falcon.API(middleware=[AuthComponent()])
api.add_route('/ping', Ping())
api.add_route('/api/membership/{client}/{user}/{group}', Membership()) ## POST DELETE GET
api.add_route('/api/permission/{client}/{group}/{name}', Permission()) ## POST DELETE GET
api.add_route('/api/has_permission/{client}/{user}/{name}', UserPermission()) ## GET
api.add_route('/api/user_permissions/{client}/{user}', GetUserPermissions()) ## GET
api.add_route('/api/role_permissions/{client}/{role}', GetRolePermissions()) ## GET
api.add_route('/api/user_roles/{client}/{user}', GetUserRoles()) ## GET
api.add_route('/api/members/{client}/{role}', GetRoleMembers()) ## GET
api.add_route('/api/role/{client}/{role}', Role()) ## POST DELETE
api.add_route('/api/roles/{client}', ListRoles()) ## GET
api.add_route('/api/which_roles_can/{client}/{name}', WhichRolesCan()) ## GET
api.add_route('/api/which_users_can/{client}/{name}', WhichUsersCan()) ## GET
| 30.153846 | 90 | 0.612883 | 768 | 6,272 | 4.919271 | 0.164063 | 0.067761 | 0.08523 | 0.071996 | 0.580466 | 0.568555 | 0.493912 | 0.424299 | 0.402859 | 0.357332 | 0 | 0.001294 | 0.260842 | 6,272 | 207 | 91 | 30.299517 | 0.813632 | 0.196429 | 0 | 0.443478 | 0 | 0 | 0.113148 | 0.073746 | 0 | 0 | 0 | 0 | 0 | 1 | 0.182609 | false | 0.008696 | 0.043478 | 0 | 0.33913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f61732eda6d7dac5bbeb9ede36117e8a4a6794b | 2,895 | py | Python | sktime/classification/interval_based/tests/test_stsf.py | tombh/sktime | 53df0b9ed9d1fd800539165c414cc5611bcc56b3 | [
"BSD-3-Clause"
] | 1 | 2020-06-02T22:24:44.000Z | 2020-06-02T22:24:44.000Z | sktime/classification/interval_based/tests/test_stsf.py | abhishek-parashar/sktime | 1dfce6b41c2acdb576acfc04b09d11bf115c92d1 | [
"BSD-3-Clause"
] | 1 | 2020-11-20T13:51:20.000Z | 2020-11-20T13:51:20.000Z | sktime/classification/interval_based/tests/test_stsf.py | abhishek-parashar/sktime | 1dfce6b41c2acdb576acfc04b09d11bf115c92d1 | [
"BSD-3-Clause"
] | 3 | 2020-10-18T04:54:30.000Z | 2021-02-15T18:04:18.000Z | # -*- coding: utf-8 -*-
import numpy as np
from sklearn.model_selection import train_test_split
from sktime.classification.interval_based import SupervisedTimeSeriesForest
from sktime.datasets import load_gunpoint, load_italy_power_demand
def test_y_proba_on_gunpoint():
X, y = load_gunpoint(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.1, random_state=42
)
stsf = SupervisedTimeSeriesForest(random_state=42, n_estimators=20)
stsf.fit(X_train, y_train)
actual = stsf.predict_proba(X_test)
np.testing.assert_array_equal(actual, expected)
def test_stsf_on_power_demand():
# load power demand data
X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
X_test, y_test = load_italy_power_demand(split="test", return_X_y=True)
indices = np.random.RandomState(0).permutation(100)
# train STSF
stsf = SupervisedTimeSeriesForest(random_state=0, n_estimators=50)
stsf.fit(X_train, y_train)
score = stsf.score(X_test.iloc[indices], y_test[indices])
assert score >= 0.92
# expected y_proba
expected = np.array(
[
[
0.95,
0.05,
],
[
1.0,
0.0,
],
[
0.95,
0.05,
],
[
0.9,
0.1,
],
[
0.0,
1.0,
],
[
0.9,
0.1,
],
[
0.05,
0.95,
],
[
1.0,
0.0,
],
[
1.0,
0.0,
],
[
0.2,
0.8,
],
[
0.85,
0.15,
],
[
1.0,
0.0,
],
[
1.0,
0.0,
],
[
0.15,
0.85,
],
[
1.0,
0.0,
],
[
0.8,
0.2,
],
[
1.0,
0.0,
],
[
0.95,
0.05,
],
[
0.05,
0.95,
],
[
0.0,
1.0,
],
]
)
# def print_array(array):
# print('[')
# for sub_array in array:
# print('[')
# for value in sub_array:
# print(value.astype(str), end='')
# print(', ')
# print('],')
# print(']')
#
#
# if __name__ == "__main__":
# X, y = load_gunpoint(return_X_y=True)
# X_train, X_test, y_train, y_test = train_test_split(
# X, y, test_size=0.1, random_state=42
# )
# estimator = SupervisedTimeSeriesForest(random_state=42, n_estimators=20)
# estimator.fit(X_train, y_train)
# probas = estimator.predict_proba(X_test)
# print_array(probas)
| 20.531915 | 78 | 0.453541 | 332 | 2,895 | 3.698795 | 0.23494 | 0.035831 | 0.029316 | 0.022801 | 0.367264 | 0.289902 | 0.258958 | 0.174267 | 0.174267 | 0.141694 | 0 | 0.072673 | 0.42487 | 2,895 | 140 | 79 | 20.678571 | 0.664865 | 0.225561 | 0 | 0.590476 | 0 | 0 | 0.00406 | 0 | 0 | 0 | 0 | 0 | 0.019048 | 1 | 0.019048 | false | 0 | 0.038095 | 0 | 0.057143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f64abde377bef464d72b82d8d6a693e8b5b44e8 | 5,883 | py | Python | epidag/fitting/alg/ga.py | TimeWz667/PyEpiDAG | b337922fa7748ec2245e1dc63e032c00a773f0d5 | [
"MIT"
] | 2 | 2019-11-23T08:01:43.000Z | 2019-11-27T22:17:34.000Z | epidag/fitting/alg/ga.py | TimeWz667/PyEpiDAG | b337922fa7748ec2245e1dc63e032c00a773f0d5 | [
"MIT"
] | null | null | null | epidag/fitting/alg/ga.py | TimeWz667/PyEpiDAG | b337922fa7748ec2245e1dc63e032c00a773f0d5 | [
"MIT"
] | null | null | null | from epidag.fitting.alg.fitter import EvolutionaryFitter
from epidag.fitting.alg.genetic import *
from epidag.util import resample
class GA(EvolutionaryFitter):
DefaultParameters = {
'n_population': 100,
'n_generation': 20,
'p_mutation': 0.1,
'p_crossover': 0.1,
'target': 'MLE'
}
def __init__(self, model):
EvolutionaryFitter.__init__(self, model)
self.Population = list()
self.p_mutation = GA.DefaultParameters['p_mutation']
self.p_crossover = GA.DefaultParameters['p_crossover']
self.n_cycle = GA.DefaultParameters['n_cycle']
self.Moveable = self.Model.get_movable_nodes()
self.Mutators = list()
self.Crossover = AverageCrossover([d['Name'] for d in self.Moveable])
self.Target = GA.DefaultParameters['target']
for d in self.Moveable:
loci, lo, up = d['Name'], d['Lower'], d['Upper']
if d['Type'] is 'Double':
self.Mutators.append(DoubleMutator(loci, lo, up))
elif d['Type'] is 'Integer':
self.Mutators.append(IntegerMutator(loci, lo, up))
elif d['Type'] is 'Binary':
self.Mutators.append(BinaryMutator(loci))
self.Series = list()
self.Generation = 0
self.Stay = 0
self.MaxFitness = -float('inf')
self.MeanFitness = -float('inf')
def initialise(self):
self.Population = list()
self.Series = list()
self.Generation = 0
self.MaxFitness = -float('inf')
self.MeanFitness = -float('inf')
def fit(self, niter, **kwargs):
self.info('Initialising')
if 'n_cycle' in kwargs:
self.n_cycle = int(kwargs['n_cycle'])
if 'p_mutation' in kwargs:
self.p_mutation = kwargs['p_mutation']
if 'p_crossover' in kwargs:
self.p_mutation = kwargs['p_crossover']
if 'target' in kwargs:
self.Target = 'MLE' if kwargs['target'] == 'MLE' else 'MAP'
self.initialise()
self.__genesis(niter)
for i in range(self.n_cycle):
self.Generation += 1
self.__crossover()
self.__mutation()
self.__selection()
self.__find_elitism()
if self.__termination():
break
self.info('Finished')
def update(self, n, **kwargs):
self.info('Updating')
self.Stay = 0
for i in range(n):
self.Generation += 1
self.__crossover()
self.__mutation()
self.__selection()
self.__find_elitism()
if self.__termination():
break
self.info('Finished')
def __genesis(self, n):
for _ in range(n):
p = self.Model.sample_prior()
p.LogLikelihood = self.Model.evaluate_likelihood(p)
self.Population.append(p)
def __crossover(self):
pop = self.Population
n = len(pop)
sel = rd.binomial(1, self.p_crossover, int(n / 2))
for i, s in enumerate(sel):
if s:
p1, p2 = self.Crossover.crossover(pop[i * 2], pop[i * 2 + 1], self.Model.BN)
self.Model.evaluate_prior(p1)
self.Model.evaluate_prior(p2)
p1.LogLikelihood = self.Model.evaluate_likelihood(p1)
p2.LogLikelihood = self.Model.evaluate_likelihood(p2)
pop[i * 2], pop[i * 2 + 1] = p1, p2
def __mutation(self):
for node, mut in zip(self.Moveable, self.Mutators):
i = node['Name']
vs = [gene[i] for gene in self.Population]
mut.set_scale(vs)
pop = self.Population
n = len(pop)
sel = rd.binomial(1, self.p_mutation, n)
for i, s in enumerate(sel):
if s:
p = pop[i] = pop[i].clone()
loc = dict()
for mut in self.Mutators:
loc[mut.Name] = mut.proposal(p[mut.Name])
p.impulse(loc, self.Model.BN)
self.Model.evaluate_prior(p)
p.LogLikelihood = self.Model.evaluate_likelihood(p)
def __selection(self):
for p in self.Population:
if p.LogLikelihood is 0:
p.LogLikelihood = self.Model.evaluate_likelihood(p)
if self.Target == 'MAP':
wts = [p.LogPosterior for p in self.Population]
else:
wts = [p.LogLikelihood for p in self.Population]
pop, mean = resample(wts, self.Population)
self.Population = [p.clone() for p in pop]
self.MeanFitness = mean
def __find_elitism(self):
if self.Target == 'MAP':
self.BestFit = max(self.Population, key=lambda x: x.LogPosterior)
else:
self.BestFit = max(self.Population, key=lambda x: x.LogLikelihood)
fitness = self.BestFit.LogPosterior if self.Target == 'MAP' else self.BestFit.LogLikelihood
if fitness == self.MaxFitness:
self.Stay += 1
self.MaxFitness = fitness
self.Series.append({
'Generation': self.Generation,
'Max fitness': self.MaxFitness,
'Mean fitness': self.MeanFitness
})
self.info('Generation: {}, Mean fitness: {:.2E}, Max fitness: {:.2E}'.format(
self.Generation, self.MeanFitness, self.MaxFitness))
def __termination(self):
if self.Stay > 5:
return True
def summarise_fitness(self):
print('Model:', self.Model)
print('Target:', self.Target)
print('Best fitting', self.BestFit)
print('Max fitness', self.MaxFitness)
| 34.810651 | 100 | 0.540881 | 658 | 5,883 | 4.712766 | 0.194529 | 0.043534 | 0.043857 | 0.048371 | 0.344728 | 0.287972 | 0.287972 | 0.168978 | 0.154789 | 0.129636 | 0 | 0.010072 | 0.341832 | 5,883 | 168 | 101 | 35.017857 | 0.790806 | 0 | 0 | 0.302817 | 0 | 0 | 0.070166 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.077465 | false | 0 | 0.021127 | 0 | 0.119718 | 0.028169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f6bf303889821547c65beb66af4566c417918d0 | 1,430 | py | Python | scheduled_tasks/get_short_volume.py | lunarnautics/Stocksera | 09c114f588e95be28068af88c525565fdb98f92b | [
"MIT"
] | null | null | null | scheduled_tasks/get_short_volume.py | lunarnautics/Stocksera | 09c114f588e95be28068af88c525565fdb98f92b | [
"MIT"
] | null | null | null | scheduled_tasks/get_short_volume.py | lunarnautics/Stocksera | 09c114f588e95be28068af88c525565fdb98f92b | [
"MIT"
] | null | null | null | import os
import sys
import sqlite3
import yfinance as yf
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from scheduled_tasks.get_popular_tickers import full_ticker_list
from helpers import *
conn = sqlite3.connect(r"database/database.db", check_same_thread=False)
db = conn.cursor()
def short_volume(symbol):
"""
Get short volume data from http://shortvolumes.com
Parameters
----------
symbol: str
ticker symbol (e.g: AAPL)
"""
url = "http://shortvolumes.com/?t={}".format(symbol)
table = pd.read_html(url)
print("-" * 100)
print(f"Getting short volume data for {symbol} now ...")
try:
shorted_vol_daily = table[3].iloc[2:]
ticker = yf.Ticker(symbol)
history = ticker.history(period="1mo", interval="1d")
for index, row in shorted_vol_daily.iterrows():
date = datetime.strptime(row[0], "%Y-%m-%d")
close_price = round(history.loc[date]["Close"], 2)
db.execute("INSERT OR IGNORE INTO short_volume VALUES (?, ?, ?, ?, ?, ?)",
(symbol, row[0], close_price, row[1], row[2], row[3]))
conn.commit()
print("Short volume data for {} collected successfully!".format(symbol))
except IndexError:
print("Short volume data for {} not found!".format(symbol))
if __name__ == '__main__':
for i in full_ticker_list():
short_volume(i)
| 30.425532 | 86 | 0.620979 | 187 | 1,430 | 4.582888 | 0.550802 | 0.089848 | 0.070012 | 0.063011 | 0.053676 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013587 | 0.227972 | 1,430 | 46 | 87 | 31.086957 | 0.762681 | 0.07972 | 0 | 0 | 0 | 0 | 0.207944 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.2 | 0 | 0.233333 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f6dac5ae82b1ce12288b5d7a2879b4ebd3b8dde | 521 | py | Python | invetronic/wsgi.py | jejimenez/invetronic | 999d58bc0224b6056b16d4e54fefcc81a22e334c | [
"MIT"
] | null | null | null | invetronic/wsgi.py | jejimenez/invetronic | 999d58bc0224b6056b16d4e54fefcc81a22e334c | [
"MIT"
] | null | null | null | invetronic/wsgi.py | jejimenez/invetronic | 999d58bc0224b6056b16d4e54fefcc81a22e334c | [
"MIT"
] | null | null | null | """
WSGI config for invetronic project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os, sys
from django.core.wsgi import get_wsgi_application
path = '/srv/www/vhosts/dummy-host.example.com/invetronic'
if path not in sys.path:
sys.path.append(path)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "invetronic.settings")
application = get_wsgi_application()
| 21.708333 | 78 | 0.761996 | 76 | 521 | 5.144737 | 0.684211 | 0.051151 | 0.092072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006579 | 0.12476 | 521 | 23 | 79 | 22.652174 | 0.850877 | 0.416507 | 0 | 0 | 0 | 0 | 0.304054 | 0.239865 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f6de3b19ebc7fc96fbc744700209694000cf876 | 665 | py | Python | data/data/process.py | CarryChang/EasyUse_FastApi | 094c8d9dc8fdb3cf8bf935fa22abcfc86fe678fe | [
"Apache-2.0"
] | 9 | 2020-02-23T02:11:19.000Z | 2022-01-05T09:20:01.000Z | data/data/process.py | CarryChang/EasyUse_FastApi | 094c8d9dc8fdb3cf8bf935fa22abcfc86fe678fe | [
"Apache-2.0"
] | 1 | 2020-11-27T09:08:14.000Z | 2020-11-27T09:08:14.000Z | data/data/process.py | CarryChang/Tetx-CNN-BaseLine | 35a375cc48a91c2248eeae948b9377d2735f1743 | [
"Apache-2.0"
] | 2 | 2020-06-08T11:46:41.000Z | 2020-07-23T09:03:14.000Z | #!/user/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2020/2/13 0013 20:08
# @Author : CarryChang
# @Software: PyCharm
# @email: coolcahng@gmail.com
# @web :CarryChang.top
path_list = ['neg_all.txt', 'pos_all.txt']
train_all = open('train_all.txt', 'w', encoding='utf-8')
for path in path_list:
if 'pos' in path:
with open(path, 'r', encoding='utf-8') as file:
for content in file:
train_all.write('5'+'\t'+content.strip() + '\n')
else:
with open(path, 'r', encoding='utf-8') as file:
for content in file:
train_all.write('1' + '\t' + content.strip() + '\n')
train_all.close()
| 33.25 | 68 | 0.572932 | 98 | 665 | 3.795918 | 0.510204 | 0.107527 | 0.096774 | 0.069892 | 0.322581 | 0.322581 | 0.322581 | 0.322581 | 0.322581 | 0.322581 | 0 | 0.043478 | 0.239098 | 665 | 19 | 69 | 35 | 0.6917 | 0.249624 | 0 | 0.333333 | 0 | 0 | 0.134146 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f6f0bb875094707baeb0ff0fada95870451fa64 | 7,456 | py | Python | moto/cloudtrail/responses.py | symroe/moto | 4e106995af6f2820273528fca8a4e9ee288690a5 | [
"Apache-2.0"
] | null | null | null | moto/cloudtrail/responses.py | symroe/moto | 4e106995af6f2820273528fca8a4e9ee288690a5 | [
"Apache-2.0"
] | 1 | 2022-03-07T07:39:03.000Z | 2022-03-07T07:39:03.000Z | moto/cloudtrail/responses.py | symroe/moto | 4e106995af6f2820273528fca8a4e9ee288690a5 | [
"Apache-2.0"
] | null | null | null | """Handles incoming cloudtrail requests, invokes methods, returns responses."""
import json
from moto.core.responses import BaseResponse
from .models import cloudtrail_backends
from .exceptions import InvalidParameterCombinationException
class CloudTrailResponse(BaseResponse):
"""Handler for CloudTrail requests and responses."""
@property
def cloudtrail_backend(self):
"""Return backend instance specific for this region."""
return cloudtrail_backends[self.region]
def create_trail(self):
name = self._get_param("Name")
bucket_name = self._get_param("S3BucketName")
is_global = self._get_bool_param("IncludeGlobalServiceEvents", True)
is_multi_region = self._get_bool_param("IsMultiRegionTrail", False)
if not is_global and is_multi_region:
raise InvalidParameterCombinationException(
"Multi-Region trail must include global service events."
)
s3_key_prefix = self._get_param("S3KeyPrefix")
sns_topic_name = self._get_param("SnsTopicName")
log_validation = self._get_bool_param("EnableLogFileValidation", False)
is_org_trail = self._get_bool_param("IsOrganizationTrail", False)
cw_log_group_arn = self._get_param("CloudWatchLogsLogGroupArn")
cw_role_arn = self._get_param("CloudWatchLogsRoleArn")
kms_key_id = self._get_param("KmsKeyId")
tags_list = self._get_param("TagsList", [])
trail = self.cloudtrail_backend.create_trail(
name,
bucket_name,
s3_key_prefix,
sns_topic_name,
is_global,
is_multi_region,
log_validation,
is_org_trail,
cw_log_group_arn,
cw_role_arn,
kms_key_id,
tags_list,
)
return json.dumps(trail.description())
def get_trail(self):
name = self._get_param("Name")
trail = self.cloudtrail_backend.get_trail(name)
return json.dumps({"Trail": trail.description()})
def get_trail_status(self):
name = self._get_param("Name")
status = self.cloudtrail_backend.get_trail_status(name)
return json.dumps(status.description())
def describe_trails(self):
include_shadow_trails = self._get_bool_param("includeShadowTrails", True)
trails = self.cloudtrail_backend.describe_trails(include_shadow_trails)
return json.dumps(
{"trailList": [t.description(include_region=True) for t in trails]}
)
def list_trails(self):
all_trails = self.cloudtrail_backend.list_trails()
return json.dumps({"Trails": [t.short() for t in all_trails]})
def start_logging(self):
name = self._get_param("Name")
self.cloudtrail_backend.start_logging(name)
return json.dumps({})
def stop_logging(self):
name = self._get_param("Name")
self.cloudtrail_backend.stop_logging(name)
return json.dumps({})
def delete_trail(self):
name = self._get_param("Name")
self.cloudtrail_backend.delete_trail(name)
return json.dumps({})
def update_trail(self):
name = self._get_param("Name")
s3_bucket_name = self._get_param("S3BucketName")
s3_key_prefix = self._get_param("S3KeyPrefix")
sns_topic_name = self._get_param("SnsTopicName")
include_global_service_events = self._get_param("IncludeGlobalServiceEvents")
is_multi_region_trail = self._get_param("IsMultiRegionTrail")
enable_log_file_validation = self._get_param("EnableLogFileValidation")
is_organization_trail = self._get_param("IsOrganizationTrail")
cw_log_group_arn = self._get_param("CloudWatchLogsLogGroupArn")
cw_role_arn = self._get_param("CloudWatchLogsRoleArn")
kms_key_id = self._get_param("KmsKeyId")
trail = self.cloudtrail_backend.update_trail(
name=name,
s3_bucket_name=s3_bucket_name,
s3_key_prefix=s3_key_prefix,
sns_topic_name=sns_topic_name,
include_global_service_events=include_global_service_events,
is_multi_region_trail=is_multi_region_trail,
enable_log_file_validation=enable_log_file_validation,
is_organization_trail=is_organization_trail,
cw_log_group_arn=cw_log_group_arn,
cw_role_arn=cw_role_arn,
kms_key_id=kms_key_id,
)
return json.dumps(trail.description())
def put_event_selectors(self):
params = json.loads(self.body)
trail_name = params.get("TrailName")
event_selectors = params.get("EventSelectors")
advanced_event_selectors = params.get("AdvancedEventSelectors")
(
trail_arn,
event_selectors,
advanced_event_selectors,
) = self.cloudtrail_backend.put_event_selectors(
trail_name=trail_name,
event_selectors=event_selectors,
advanced_event_selectors=advanced_event_selectors,
)
return json.dumps(
dict(
TrailARN=trail_arn,
EventSelectors=event_selectors,
AdvancedEventSelectors=advanced_event_selectors,
)
)
def get_event_selectors(self):
params = json.loads(self.body)
trail_name = params.get("TrailName")
(
trail_arn,
event_selectors,
advanced_event_selectors,
) = self.cloudtrail_backend.get_event_selectors(trail_name=trail_name)
return json.dumps(
dict(
TrailARN=trail_arn,
EventSelectors=event_selectors,
AdvancedEventSelectors=advanced_event_selectors,
)
)
def add_tags(self):
params = json.loads(self.body)
resource_id = params.get("ResourceId")
tags_list = params.get("TagsList")
self.cloudtrail_backend.add_tags(resource_id=resource_id, tags_list=tags_list)
return json.dumps(dict())
def remove_tags(self):
resource_id = self._get_param("ResourceId")
tags_list = self._get_param("TagsList")
self.cloudtrail_backend.remove_tags(
resource_id=resource_id, tags_list=tags_list
)
return json.dumps(dict())
def list_tags(self):
params = json.loads(self.body)
resource_id_list = params.get("ResourceIdList")
resource_tag_list = self.cloudtrail_backend.list_tags(
resource_id_list=resource_id_list
)
return json.dumps(dict(ResourceTagList=resource_tag_list))
def put_insight_selectors(self):
trail_name = self._get_param("TrailName")
insight_selectors = self._get_param("InsightSelectors")
trail_arn, insight_selectors = self.cloudtrail_backend.put_insight_selectors(
trail_name=trail_name, insight_selectors=insight_selectors
)
return json.dumps(dict(TrailARN=trail_arn, InsightSelectors=insight_selectors))
def get_insight_selectors(self):
trail_name = self._get_param("TrailName")
trail_arn, insight_selectors = self.cloudtrail_backend.get_insight_selectors(
trail_name=trail_name
)
resp = {"TrailARN": trail_arn}
if insight_selectors:
resp["InsightSelectors"] = insight_selectors
return json.dumps(resp)
| 39.036649 | 87 | 0.663358 | 826 | 7,456 | 5.594431 | 0.145278 | 0.051504 | 0.075308 | 0.045012 | 0.522181 | 0.454447 | 0.351655 | 0.297338 | 0.285869 | 0.236745 | 0 | 0.002149 | 0.251207 | 7,456 | 190 | 88 | 39.242105 | 0.825542 | 0.0228 | 0 | 0.293413 | 0 | 0 | 0.088171 | 0.029161 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101796 | false | 0 | 0.023952 | 0 | 0.233533 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f701bc08c1c4dfe533065e4d0ee3002ac64f361 | 1,661 | py | Python | c++basic/DATA_STRUCTURES/prime_mult.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 16 | 2018-11-26T08:39:42.000Z | 2019-05-08T10:09:52.000Z | c++basic/DATA_STRUCTURES/prime_mult.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 8 | 2020-05-04T06:29:26.000Z | 2022-02-12T05:33:16.000Z | c++basic/DATA_STRUCTURES/prime_mult.py | SayanGhoshBDA/code-backup | 8b6135facc0e598e9686b2e8eb2d69dd68198b80 | [
"MIT"
] | 5 | 2020-02-11T16:02:21.000Z | 2021-02-05T07:48:30.000Z | from multiprocessing.pool import ThreadPool
import datetime
import numpy as np
import math
def __is_divisible__(a,b):
if a%b == 0:
return 1
return 0
def isPrime_n(a):
#is prime noob
k=0
for i in range(2,a-1):
if __is_divisible__(a,i) == 1:
k=1
break
if k==0:
return 1
else:
return 0
def isPrime_g(a):
#is prime good
k=0
for i in range(2,int(a/2)+1):
if __is_divisible__(a,i) == 1:
k=1
break
if k==0:
return 1
else:
return 0
def isPrime_b(a):
# is prime best
k=0
for i in range(2,int(math.sqrt(a)+1)):
if __is_divisible__(a,i) == 1:
k=1
break
if k==0:
return 1
else:
return 0
IS_PRIME = True
class mult_prime:
global IS_PRIME
def __is_divisible__(self,a,b):
if a%b == 0:
IS_PRIME = False
def method_prime(method_tr):
et = datetime.datetime.now()
for i in np.arange(100):
k = method_tr(i) and i
if k > 0:
print(k,end=" ")
print("\nMicro-Seconds : ",(et-datetime.datetime.now()).microseconds)
def multi_prime(a):
arr = np.arange(2,int(math.sqrt(a)+1))
#print(arr)
ThreadPool(30).imap_unordered(mult_prime.__is_divisible__,a,arr)
global IS_PRIME
if IS_PRIME == True:
print(a)
if __name__ == "__main__":
#print(__is_divisible__(5,2))
print("noob:",end="")
method_prime(isPrime_n)
print("good:",end="")
method_prime(isPrime_g)
print("best:",end="")
method_prime(isPrime_b)
print()
#multi_prime(10)
pass | 20.506173 | 73 | 0.5587 | 255 | 1,661 | 3.384314 | 0.25098 | 0.06489 | 0.069525 | 0.059096 | 0.282735 | 0.282735 | 0.24102 | 0.224797 | 0.1854 | 0.1854 | 0 | 0.036028 | 0.314871 | 1,661 | 81 | 74 | 20.506173 | 0.72232 | 0.05599 | 0 | 0.454545 | 0 | 0 | 0.026871 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106061 | false | 0.015152 | 0.060606 | 0 | 0.30303 | 0.106061 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f74ee1b09a2b7f62368df7e3837e037cb648a61 | 11,277 | py | Python | cloud_functions/big_query.py | aerosense-ai/data-gateway | 019b8e4a114e16d363a3167171a457cefdbf004f | [
"Apache-2.0"
] | null | null | null | cloud_functions/big_query.py | aerosense-ai/data-gateway | 019b8e4a114e16d363a3167171a457cefdbf004f | [
"Apache-2.0"
] | 34 | 2021-12-20T14:51:57.000Z | 2022-03-30T16:47:04.000Z | cloud_functions/big_query.py | aerosense-ai/data-gateway | 019b8e4a114e16d363a3167171a457cefdbf004f | [
"Apache-2.0"
] | null | null | null | import copy
import datetime
import json
import logging
import uuid
from blake3 import blake3
from google.cloud import bigquery
from exceptions import (
ConfigurationAlreadyExists,
InstallationWithSameNameAlreadyExists,
SensorTypeWithSameReferenceAlreadyExists,
)
logger = logging.getLogger(__name__)
SENSOR_NAME_MAPPING = {
"Mics": "microphone",
"Baros_P": "barometer",
"Baros_T": "barometer_thermometer",
"Diff_Baros": "differential_barometer",
"Acc": "accelerometer",
"Gyro": "gyroscope",
"Mag": "magnetometer",
"Analog Vbat": "battery_voltmeter",
"Constat": "connection_statistics",
}
class BigQueryDataset:
"""A wrapper for the Google BigQuery client for adding sensor data for an installation to a BigQuery dataset.
:param str project_name: name of Google Cloud project the BigQuery dataset belongs to
:param str dataset_name: name of the BigQuery dataset
:return None:
"""
def __init__(self, project_name, dataset_name):
self.client = bigquery.Client()
self.dataset_id = f"{project_name}.{dataset_name}"
self.table_names = {
"configuration": f"{self.dataset_id}.configuration",
"installation": f"{self.dataset_id}.installation",
"sensor_type": f"{self.dataset_id}.sensor_type",
"sensor_data": f"{self.dataset_id}.sensor_data",
"microphone_data": f"{self.dataset_id}.microphone_data",
}
def add_sensor_data(self, data, configuration_id, installation_reference, label=None):
"""Insert sensor data into the dataset for the given configuration and installation references.
:param dict data: data from the sensors - the keys are the sensor names and the values are samples in the form of lists of lists
:param str configuration_id: the UUID of the configuration used to produce the given data
:param str installation_reference: the reference (name) of the installation that produced the data
:param str|None label: an optional label relevant to the given data
:raise ValueError: if the insertion fails
:return None:
"""
rows = []
for sensor_name, samples in data.items():
sensor_type_reference = SENSOR_NAME_MAPPING[sensor_name]
for sample in samples:
rows.append(
{
"datetime": datetime.datetime.fromtimestamp(sample[0]),
"sensor_type_reference": sensor_type_reference,
"sensor_value": sample[1:],
"configuration_id": configuration_id,
"installation_reference": installation_reference,
"label": label,
}
)
errors = self.client.insert_rows(table=self.client.get_table(self.table_names["sensor_data"]), rows=rows)
if errors:
raise ValueError(errors)
logger.info("Uploaded %d samples of sensor data to BigQuery dataset %r.", len(rows), self.dataset_id)
def record_microphone_data_location_and_metadata(
self,
path,
project_name,
configuration_id,
installation_reference,
label=None,
):
"""Record the file location and metadata for a window of microphone data.
:param str path: the Google Cloud Storage path to the microphone data
:param str project_name: the name of the project the storage bucket belongs to
:param str configuration_id: the UUID of the configuration used to produce the data
:param str installation_reference: the reference for the installation that produced the data
:param str|None label: the label applied to the gateway session that produced the data
:raise ValueError: if the addition fails
:return None:
"""
errors = self.client.insert_rows(
table=self.client.get_table(self.table_names["microphone_data"]),
rows=[
{
"path": path,
"project_name": project_name,
"configuration_id": configuration_id,
"installation_reference": installation_reference,
"label": label,
}
],
)
if errors:
raise ValueError(errors)
logger.info("Added microphone data location and metadata to BigQuery dataset %r.", self.dataset_id)
def add_sensor_type(self, name, reference, description=None, measuring_unit=None, metadata=None):
"""Add a new sensor type to the BigQuery dataset. The sensor name is slugified on receipt.
:param str name: the name of the new sensor
:param str reference: the reference name for the sensor (usually slugified)
:param str|None description: a description of what the sensor is and does
:param str|None measuring_unit: the unit the sensor measures its relevant quantity in
:param dict|None metadata: any useful metadata about the sensor e.g. sensitivities
:raise ValueError: if the addition fails
:return None:
"""
sensor_type_already_exists = self._get_field_if_exists(
table_name=self.table_names["sensor_type"],
field_name="reference",
comparison_field_name="reference",
value=reference,
)
if sensor_type_already_exists:
raise SensorTypeWithSameReferenceAlreadyExists(
f"A sensor type with the reference {reference!r} already exists."
)
if not isinstance(metadata, str):
metadata = json.dumps(metadata or {})
errors = self.client.insert_rows(
table=self.client.get_table(self.table_names["sensor_type"]),
rows=[
{
"reference": reference,
"name": name,
"description": description,
"unit": measuring_unit,
"metadata": metadata,
}
],
)
if errors:
raise ValueError(errors)
logger.info("Added new sensor %r to BigQuery dataset %r.", reference, self.dataset_id)
def add_installation(self, reference, turbine_id, blade_id, hardware_version, sensor_coordinates, location=None):
"""Add a new installation to the BigQuery dataset.
:param str reference: the name to give to the installation
:param str turbine_id:
:param str blade_id:
:param str hardware_version: the version of the sensor hardware at this installation
:param dict sensor_coordinates: sensor name mapped to an array of (x, y, r) coordinates for each individual sensor
:param str|None location: the geographical location of the installation in WKT format if relevant (it may not be if it's a wind tunnel which could be set up anywhere)
:raise cloud_functions.exceptions.InstallationWithSameNameAlreadyExists: if an installation with the given name already exists
:raise ValueError: if the addition fails
:return None:
"""
installation_already_exists = self._get_field_if_exists(
table_name=self.table_names["installation"],
field_name="reference",
comparison_field_name="reference",
value=reference,
)
if installation_already_exists:
raise InstallationWithSameNameAlreadyExists(
f"An installation with the reference {reference!r} already exists."
)
errors = self.client.insert_rows(
table=self.client.get_table(self.table_names["installation"]),
rows=[
{
"reference": reference,
"turbine_id": turbine_id,
"blade_id": blade_id,
"hardware_version": hardware_version,
"sensor_coordinates": json.dumps(sensor_coordinates),
"location": location,
}
],
)
if errors:
raise ValueError(errors)
logger.info("Added new installation %r to BigQuery dataset %r.", reference, self.dataset_id)
def add_configuration(self, configuration):
"""Add a configuration to the BigQuery dataset.
:param dict configuration: the configuration to add
:raise cloud_functions.exceptions.ConfigurationAlreadyExists: if an identical configuration already exists in the dataset or the write operation fails; this error includes the UUID of the existing configuration as an argument
:raise ValueError: if the addition fails
:return str: UUID of the configuration
"""
configuration = copy.deepcopy(configuration)
# Installation data is stored in a separate column, so pop it before the next step.
installation_data = configuration.pop("installation_data")
software_configuration_json = json.dumps(configuration)
software_configuration_hash = blake3(software_configuration_json.encode()).hexdigest()
configuration_id = self._get_field_if_exists(
table_name=self.table_names["configuration"],
field_name="id",
comparison_field_name="software_configuration_hash",
value=software_configuration_hash,
)
if configuration_id:
raise ConfigurationAlreadyExists(
f"An identical configuration already exists in the database with UUID {configuration_id}.",
configuration_id,
)
configuration_id = str(uuid.uuid4())
installation_data_json = json.dumps(installation_data)
installation_data_hash = blake3(installation_data_json.encode()).hexdigest()
errors = self.client.insert_rows(
table=self.client.get_table(self.table_names["configuration"]),
rows=[
{
"id": configuration_id,
"software_configuration": software_configuration_json,
"software_configuration_hash": software_configuration_hash,
"installation_data": installation_data_json,
"installation_data_hash": installation_data_hash,
}
],
)
if errors:
raise ValueError(errors)
logger.info("Added configuration %r to BigQuery dataset %r.", configuration_id, self.dataset_id)
return configuration_id
def _get_field_if_exists(self, table_name, field_name, comparison_field_name, value):
"""Get the value of the given field for the row of the given table for which the comparison field has the
given value.
:param str table_name:
:param str field_name:
:param str comparison_field_name:
:param any value:
:return str|None:
"""
result = list(
self.client.query(
f"SELECT {field_name} FROM `{table_name}` WHERE `{comparison_field_name}`='{value}' LIMIT 1"
).result()
)
if result:
return getattr(result[0], field_name)
| 40.131673 | 233 | 0.632704 | 1,237 | 11,277 | 5.591754 | 0.175424 | 0.025445 | 0.020674 | 0.01012 | 0.32543 | 0.263843 | 0.250831 | 0.203123 | 0.171751 | 0.158161 | 0 | 0.001129 | 0.292808 | 11,277 | 280 | 234 | 40.275 | 0.866207 | 0.289527 | 0 | 0.217143 | 0 | 0 | 0.199658 | 0.058343 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.045714 | 0 | 0.102857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f771c23c9697d84a96b2b27ef3d9f1ad26ac3be | 3,067 | py | Python | fri/parameter_searcher.py | lpfann/fri | 518e3b1419b0993272b9507d0dc01eee60afe26a | [
"MIT"
] | 8 | 2017-04-26T13:47:51.000Z | 2020-10-22T00:25:09.000Z | fri/parameter_searcher.py | lpfann/fri | 518e3b1419b0993272b9507d0dc01eee60afe26a | [
"MIT"
] | 77 | 2018-02-26T23:18:58.000Z | 2022-03-01T13:10:09.000Z | fri/parameter_searcher.py | lpfann/fri | 518e3b1419b0993272b9507d0dc01eee60afe26a | [
"MIT"
] | 3 | 2019-04-26T10:17:10.000Z | 2020-04-03T08:51:39.000Z | """
In this class we use hyperparameter search to find parameters needed in our model.
Depending on the input model we sample parameters from a random distribution.
The sampling rate can be increased.
The model with the best internally defined accuracy is picked.
To increase robustness we use cross validation.
"""
import warnings
from sklearn.exceptions import FitFailedWarning
warnings.filterwarnings(action="ignore", category=FitFailedWarning)
from pprint import pprint
from typing import Tuple
import numpy as np
from sklearn.model_selection import RandomizedSearchCV
from fri.model.base_initmodel import InitModel
def find_best_model(
model_template: InitModel,
hyperparameters: dict,
data: Tuple[np.ndarray, np.ndarray],
random_state: np.random.RandomState,
n_iter: int,
n_jobs: int,
verbose: int = 0,
lupi_features=None,
kwargs: dict = None,
) -> Tuple[InitModel, float]:
"""
Search function which wraps `sklearns` `RandomizedSearchCV` function.
We use distributions and parameters defined in the `model_template`.
Parameters
----------
model_template : InitModel
A model template which is used to fit data.
hyperparameters : dict
Dictionary of hyperparameters.
data : tuple
Tuple of data (X,y)
random_state : RandomState
numpy RandomState object
n_iter : int
Amount of search samples.
n_jobs : int
Allows multiprocessing with `n_jobs` threads.
verbose : int
Allows verbose output when `verbose>0`.
lupi_features : int
Amount of lupi_features
kwargs : dict
Placeholder, dict to pass into fit functions.
"""
if lupi_features > 0:
model = model_template(lupi_features=lupi_features)
else:
model = model_template()
scorer, metric = model.make_scorer()
if scorer is None:
refit = True
else:
refit = metric
searcher = RandomizedSearchCV(
model,
hyperparameters,
scoring=scorer,
random_state=random_state,
refit=refit,
cv=3,
n_iter=n_iter,
n_jobs=n_jobs,
error_score=np.nan,
verbose=verbose,
)
X, y = data
# Ignore warnings for extremely bad model_state (when precision=0)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
searcher.fit(X, y)
best_model: InitModel = searcher.best_estimator_
best_score = best_model.score(X, y)
if verbose > 0:
print("*" * 20, "Best found baseline model", "*" * 20)
pprint(best_model)
print("score: ", best_score)
for k, v in best_model.constraints.items():
pprint((f"{k}: {v}"))
for k, v in best_model.model_state.items():
if hasattr(v, "shape"):
pprint((f"{k}: shape {v.shape}"))
else:
if "slack" in k:
continue
pprint((f"{k}: {v}"))
print("*" * 30)
return best_model, best_score
| 28.933962 | 86 | 0.637757 | 372 | 3,067 | 5.139785 | 0.36828 | 0.03295 | 0.028243 | 0.007322 | 0.016736 | 0.016736 | 0 | 0 | 0 | 0 | 0 | 0.005413 | 0.277144 | 3,067 | 105 | 87 | 29.209524 | 0.857014 | 0.338441 | 0 | 0.081967 | 0 | 0 | 0.048896 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016393 | false | 0 | 0.114754 | 0 | 0.147541 | 0.131148 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f799372036198fbf2f4c84b3580b761577152f6 | 1,077 | py | Python | src/tests/test_alembic.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | src/tests/test_alembic.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | src/tests/test_alembic.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
(c) 2017 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
"""
from __future__ import unicode_literals, absolute_import
import os
import subprocess
import unittest
import six
REPO_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
class TestAlembic(unittest.TestCase):
"""This test class contains tests pertaining to alembic."""
def test_alembic_history(self):
"""Enforce a linear alembic history.
This test runs the `alembic history | grep ' (head), '` command,
and ensure it returns only one line.
"""
proc1 = subprocess.Popen(
["alembic-3", "history"], cwd=REPO_PATH, stdout=subprocess.PIPE
)
proc2 = subprocess.Popen(
["grep", " (head), "], stdin=proc1.stdout, stdout=subprocess.PIPE
)
stdout = proc2.communicate()[0]
stdout = stdout.strip().decode("utf-8").split("\n")
self.assertEqual(len(stdout), 1)
if __name__ == "__main__":
unittest.main(verbosity=2)
| 22.914894 | 77 | 0.632312 | 128 | 1,077 | 5.148438 | 0.640625 | 0.027314 | 0.060698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016888 | 0.230269 | 1,077 | 46 | 78 | 23.413043 | 0.778046 | 0.277623 | 0 | 0 | 0 | 0 | 0.062928 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.052632 | false | 0 | 0.263158 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f7c5b8ae437b84529e4ceb74be74fe56162a39d | 792 | py | Python | data-processing/pwpp.py | tjwixtrom/verification | 00b42da31502a06388f826c3de359d07997496dc | [
"BSD-3-Clause"
] | 2 | 2020-03-24T20:30:11.000Z | 2021-08-18T03:11:26.000Z | data-processing/pwpp.py | tjwixtrom/wrf-scripts | 00b42da31502a06388f826c3de359d07997496dc | [
"BSD-3-Clause"
] | 3 | 2018-07-25T16:33:09.000Z | 2018-08-23T14:57:08.000Z | data-processing/pwpp.py | tjwixtrom/wrf-scripts | 00b42da31502a06388f826c3de359d07997496dc | [
"BSD-3-Clause"
] | null | null | null | #!/home/twixtrom/miniconda3/envs/wrfpost/bin/python
import sys
from PWPP import wrfpost
import numpy as np
from metpy.units import units
# import xarray as xr
infile = sys.argv[1]
outfile = sys.argv[2]
variables = ['temp',
'dewpt',
'uwnd',
'vwnd',
'wwnd',
'avor',
'height',
'temp_2m',
'dewpt_2m',
'mslp',
'q_2m',
'u_10m',
'v_10m',
'timestep_pcp',
'UH',
'cape',
'refl'
]
plevs = np.array([1000, 925, 850, 700, 500, 300, 250]) * units('hPa')
# chunks = {'Time': 1}
wrfpost(infile, outfile, variables, plevs=plevs, compression=True, complevel=4,
format='NETCDF4')
| 24 | 79 | 0.482323 | 85 | 792 | 4.423529 | 0.705882 | 0.058511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.071138 | 0.378788 | 792 | 32 | 80 | 24.75 | 0.693089 | 0.114899 | 0 | 0 | 0 | 0 | 0.137536 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.148148 | 0 | 0.148148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f7c75b19ac12c78114fee1f62eaa9cce9c12914 | 32,575 | py | Python | gui/routes_neighbours_IF_all.py | ejwillemse/mcarptif_gui | 3af127b75c3de483be22f5741d2c96a87dd58d6e | [
"MIT"
] | 1 | 2020-02-27T04:08:48.000Z | 2020-02-27T04:08:48.000Z | gui/routes_neighbours_IF_all.py | ejwillemse/mcarptif_gui | 3af127b75c3de483be22f5741d2c96a87dd58d6e | [
"MIT"
] | 2 | 2021-06-08T20:36:04.000Z | 2022-01-13T01:50:59.000Z | gui/routes_neighbours_IF_all.py | ejwillemse/mcarptif_gui | 3af127b75c3de483be22f5741d2c96a87dd58d6e | [
"MIT"
] | null | null | null | '''
Created on 05 May 2010
@author: ejwillemse
'''
from copy import deepcopy
#===============================================================================
#
#===============================================================================
class SinlgeRouteModifications(object):
def __init__(self, info):
self.info = info
self.SP = self.info.spDistanceD
self.Inv = self.info.invArcD
def singleRemove1route(self, routeI, position):
arcRemoved = routeI[position]
costChange = self.SP[routeI[position-1]][routeI[position+1]] - self.SP[routeI[position-1]][routeI[position]] - self.SP[routeI[position]][routeI[position+1]]
modifiedRoute = routeI[:]
del modifiedRoute[position]
return(modifiedRoute, arcRemoved, costChange)
def singleInsert1Route(self, routeI, arcInsert, insertPosistion):
i = insertPosistion
costChange = self.SP[routeI[i-1]][arcInsert] + self.SP[arcInsert][routeI[i]] - self.SP[routeI[i-1]][routeI[i]]
modifiedRoute = routeI[:]
modifiedRoute[i:i] = [arcInsert]
return(modifiedRoute, costChange)
def findBestInversionArcInsert(self, routeI, arcRemoved, position):
modifiedRoute1 = routeI[:]
j = position
(modifiedRoute2, costChange2) = self.singleInsert1Route(modifiedRoute1, arcRemoved, j)
if self.Inv[arcRemoved]:
(modifiedRoute3, costChange3) = self.singleInsert1Route(modifiedRoute1, self.Inv[arcRemoved], j)
if costChange3 < costChange2: modifiedRoute2, costChange2 = modifiedRoute3[:], costChange3
return(modifiedRoute2, costChange2)
def singleBestInsert1Route(self, routeI, arcInsert, depots = True, tabuPositions = None):
nPositions = len(routeI)
bestCostChange = 1e300000
if depots == True:
for i in range(1,nPositions-1):
if i not in tabuPositions:
costChange = self.SP[routeI[i-1]][arcInsert] + self.SP[arcInsert][routeI[i]] - self.SP[routeI[i-1]][routeI[i]]
if costChange < bestCostChange:
bestCostChange = costChange
bestInsertPosition = i
bestModifiedRoute = routeI[:]
bestModifiedRoute[i:i] = [arcInsert]
return(bestModifiedRoute, bestCostChange, bestInsertPosition)
def doubleRemove1route(self, routeI, position):
twoArcsRemoved = (routeI[position],routeI[position+1])
costChange = self.SP[routeI[position-1]][routeI[position+2]] - self.SP[routeI[position-1]][routeI[position]] - self.SP[routeI[position]][routeI[position+1]] - self.SP[routeI[position+1]][routeI[position+2]]
modifiedRoute = routeI[:]
del modifiedRoute[position+1]
del modifiedRoute[position]
return(modifiedRoute, twoArcsRemoved, costChange)
def doubleInsert1Route(self, routeI, twoArcsInsert, insertPosistion):
i = insertPosistion
(arc1, arc2) = twoArcsInsert
costChange = self.SP[routeI[i-1]][arc1] + self.SP[arc1][arc2] + self.SP[arc2][routeI[i]] - self.SP[routeI[i-1]][routeI[i]]
modifiedRoute = routeI[:]
modifiedRoute[i:i] = [arc1, arc2]
return(modifiedRoute, costChange)
def findBestInversionDoubleArcInsert(self, routeI, twoArcsRemoved, position):
(arcs1, arcs2) = twoArcsRemoved
modifiedRoute1 = routeI[:]
j= position
(modifiedRoute2, costChange2) = self.doubleInsert1Route(modifiedRoute1, (arcs1, arcs2), j)
(modifiedRoute2a, costChange2a) = self.doubleInsert1Route(modifiedRoute1, (arcs1, arcs2), j)
if costChange2a < costChange2: modifiedRoute2, costChange2 = modifiedRoute2a, costChange2a
if self.Inv[arcs1]:
(modifiedRoute3, costChange3) = self.doubleInsert1Route(modifiedRoute1, (self.Inv[arcs1], arcs2), j)
if costChange3 < costChange2: modifiedRoute2, costChange2 = modifiedRoute3, costChange3
(modifiedRoute3a, costChange3a) = self.doubleInsert1Route(modifiedRoute1, (arcs2, self.Inv[arcs1]), j)
if costChange3a < costChange2: modifiedRoute2, costChange2 = modifiedRoute3a, costChange3a
if self.Inv[arcs2]:
(modifiedRoute4, costChange4) = self.doubleInsert1Route(modifiedRoute1, (arcs1, self.Inv[arcs2]), j)
if costChange4 < costChange2: modifiedRoute2, costChange2 = modifiedRoute4, costChange4
(modifiedRoute4a, costChange4a) = self.doubleInsert1Route(modifiedRoute1, (self.Inv[arcs2], arcs1), j)
if costChange4a < costChange2: modifiedRoute2, costChange2 = modifiedRoute4a, costChange4a
if (self.Inv[arcs1]!=None) & (self.Inv[arcs2]!=None):
(modifiedRoute5, costChange5) = self.doubleInsert1Route(modifiedRoute1, (self.Inv[arcs1], self.Inv[arcs2]), j)
if costChange5 < costChange2: modifiedRoute2, costChange2 = modifiedRoute5, costChange5
(modifiedRoute5a, costChange5a) = self.doubleInsert1Route(modifiedRoute1, (self.Inv[arcs2],self.Inv[arcs1]), j)
if costChange5a < costChange2: modifiedRoute2, costChange2 = modifiedRoute5a, costChange5a
return(modifiedRoute2, costChange2)
def doubleBestInsert1Route(self, routeI, twoArcsInsert, depots = True, tabuPositions = None):
nPositions = len(routeI)
bestCostChange = 1e300000
[arc1, arc2] = twoArcsInsert
if depots == True:
for i in range(1,nPositions-1):
if (i not in tabuPositions) & (i+1 not in tabuPositions):
costChange = self.SP[routeI[i-1]][arc1] + self.SP[arc1][arc2] + self.SP[arc2][routeI[i]] - self.SP[routeI[i-1]][routeI[i]]
if costChange < bestCostChange:
bestCostChange = costChange
bestInsertPosition = i
bestModifiedRoute = routeI[:]
bestModifiedRoute[i:i] = [arc1, arc2]
return(bestModifiedRoute, bestCostChange, bestInsertPosition)
#===============================================================================
#
#===============================================================================
class SingleRouteRemoveInsertProcedure(SinlgeRouteModifications):
def __init__(self, info):
SinlgeRouteModifications.__init__(self, info)
self.info = info
self.SP = self.info.spDistanceD
self.Inv = self.info.invArcD
self.bestChange = 1e300000
self.bestNeighbour = [[self.bestChange]]
def removeInsertAllArcs(self, routeI, routeP = None):
nArcs = len(routeI)
removalInsertionNeighbourhood = []
for i in range(1,nArcs-1):
(modifiedRoute1, arcRemoved, costChange1) = self.singleRemove1route(routeI, i)
insertPosition = range(1, nArcs-1)
del insertPosition[i-1]
for j in insertPosition:
(modifiedRoute2, costChange2) = self.findBestInversionArcInsert(modifiedRoute1, arcRemoved, j)
if i < j:tabusTemp = [i-1,i,i+1,j,j+1]
else:tabusTemp = [i-1,i,i+1,j-1,j]
neighbour = {'routes':routeP,'pos':(i,j),'arc':arcRemoved,'costDelta':costChange1+costChange2,'loadDelta':(),'serviceDelta':(),'modifiedRoutes':modifiedRoute2,'tabus':deepcopy(tabusTemp)}
if (costChange1+costChange2 < self.bestChange):
self.bestChange = costChange1+costChange2
self.bestNeighbour = (costChange1+costChange2,neighbour,'RemoveInsertAllArcs','oneRoute')
return(removalInsertionNeighbourhood)
def exchangeAllArcs(self, routeI, routeP = None):
nArcs = len(routeI)
removalInsertionNeighbourhood = []
for i in range(1,nArcs-3):
(modifiedRoute1, arcRemoved1, costChange1) = self.singleRemove1route(routeI, i)
for j in range(i+2,nArcs-1):
(modifiedRoute2, arcRemoved2, costChange2) = self.singleRemove1route(modifiedRoute1, j-1)
(modifiedRoute2, costChange3) = self.findBestInversionArcInsert(modifiedRoute2, arcRemoved1, j-1)
(modifiedRoute2, costChange4) = self.findBestInversionArcInsert(modifiedRoute2, arcRemoved2, i)
neighbour = {'routes':routeP,'pos':(i,j),'arc':(arcRemoved1,arcRemoved2),'costDelta':costChange1+costChange2+costChange3+costChange4,'loadDelta':(),'serviceDelta':(),'modifiedRoutes':modifiedRoute2,'tabus':[i-1,i,i+1,j-1,j,j+1]}
if (costChange1+costChange2+costChange3+costChange4 < self.bestChange):
self.bestChange = costChange1+costChange2+costChange3+costChange4
self.bestNeighbour = (costChange1+costChange2+costChange3+costChange4,neighbour,'ExchangeAllArcs','oneRoute')
return(removalInsertionNeighbourhood)
def removeInsertAllDoubleArcs(self, routeI, routeP = None):
nArcs = len(routeI)
removalInsertionNeighbourhood = []
for i in range(1,nArcs-2):
(modifiedRoute1, twoArcsRemoved, costChange1) = self.doubleRemove1route(routeI, i)
insertPosition = range(1, nArcs-2)
del insertPosition[i-1]
for j in insertPosition:
if i < j:(modifiedRoute2, costChange2) = self.findBestInversionDoubleArcInsert(modifiedRoute1, twoArcsRemoved, j-1)
else:(modifiedRoute2, costChange2) = self.findBestInversionDoubleArcInsert(modifiedRoute1, twoArcsRemoved, j)
if i < j:tabusTemp = [i-1,i,i+1,i+2,j,j+1]
else:tabusTemp = [i-1,i,i+1,i+2,j-1,j]
neighbour = {'routes':routeP,'pos':(i,j),'arc':twoArcsRemoved,'costDelta':costChange1+costChange2,'loadDelta':(),'serviceDelta':(),'modifiedRoutes':modifiedRoute2,'tabus':deepcopy(tabusTemp)}
if (costChange1+costChange2 < self.bestChange):
self.bestChange = costChange1+costChange2
self.bestNeighbour = (costChange1+costChange2,neighbour,'RemoveInsertAllDoubleArcs','oneRoute')
return(removalInsertionNeighbourhood)
def exchangeAllDoubleArcs(self, routeI, routeP = None):
nArcs = len(routeI)
removalInsertionNeighbourhood = []
for i in range(1,nArcs-4):
(modifiedRoute1, twoArcsRemoved1, costChange1) = self.doubleRemove1route(routeI, i)
for j in range(i+3,nArcs-2):
(modifiedRoute2, twoArcsRemoved2, costChange2) = self.doubleRemove1route(modifiedRoute1, j-2)
(modifiedRoute2, costChange3) = self.findBestInversionDoubleArcInsert(modifiedRoute2, twoArcsRemoved1, j-2)
(modifiedRoute2, costChange4) = self.findBestInversionDoubleArcInsert(modifiedRoute2, twoArcsRemoved2, i)
neighbour = {'routes':routeP,'pos':(i,j),'arc':(twoArcsRemoved1,twoArcsRemoved2),'costDelta':costChange1+costChange2+costChange3+costChange4,'loadDelta':(),'serviceDelta':(),'modifiedRoutes':modifiedRoute2,'tabus':[i-1,i,i+1,i+2,j-1,j,j+1,j+2]}
if (costChange1+costChange2+costChange3+costChange4 < self.bestChange):
self.bestChange = costChange1+costChange2+costChange3+costChange4
self.bestNeighbour = (costChange1+costChange2+costChange3+costChange4,neighbour,'ExchangeAllDoubleArcs','oneRoute')
return(removalInsertionNeighbourhood)
def generateAllNeighbourhoodsSingleRoutes(self, route,routeP=None):
self.removeInsertAllArcs(route,routeP)
self.exchangeAllArcs(route,routeP)
self.removeInsertAllDoubleArcs(route,routeP)
self.exchangeAllDoubleArcs(route,routeP)
def generateAllRoutesNeighbourhoodsSingleRoutes(self, routes):
for routeP in range(len(routes)):
self.generateAllNeighbourhoodsSingleRoutes(routes[routeP], routeP)
#===============================================================================
#
#===============================================================================
class MultipleRouteRemoveInsertProcedure(SinlgeRouteModifications):
def __init__(self, info):
SinlgeRouteModifications.__init__(self, info)
self.info = info
self.capacity = info.capacity
self.SP = self.info.spDistanceD
self.Inv = self.info.invArcD
self.demandD = self.info.demandD
self.serviceD = self.info.serveCostD
self.bestChange = 1e300000
self.bestNeighbour = [[self.bestChange]]
self.maxTrip = self.info.maxTrip
def singleArcExchangeDeltas(self,arcRemovedI,arcRemovedJ,costsChange):
loadI = self.demandD[arcRemovedI]
loadJ = self.demandD[arcRemovedJ]
serviceCostI = self.serviceD[arcRemovedI]
serviceCostJ = self.serviceD[arcRemovedJ]
(costChangeI1, costChangeI2, costChangeJ1, costChangeJ2) = costsChange
costIdelta = costChangeI1 + costChangeI2
costJdelta = costChangeJ1 + costChangeJ2
loadIdelta = -loadI + loadJ
loadJdelta = loadI - loadJ
serviceCostIdelta = -serviceCostI + serviceCostJ
serviceCostJdelta = serviceCostI - serviceCostJ
Deltas = (costIdelta, costJdelta, loadIdelta, loadJdelta, serviceCostIdelta, serviceCostJdelta)
return(Deltas)
def doubleArcExchangeDeltas(self, twoArcsRemovedI, twoArcsRemovedJ, costsChange):
loadI = self.demandD[twoArcsRemovedI[0]] + self.demandD[twoArcsRemovedI[1]]
loadJ = self.demandD[twoArcsRemovedJ[0]] + self.demandD[twoArcsRemovedJ[1]]
serviceCostI = self.serviceD[twoArcsRemovedI[0]] + self.serviceD[twoArcsRemovedI[1]]
serviceCostJ = self.serviceD[twoArcsRemovedJ[0]] + self.serviceD[twoArcsRemovedJ[1]]
(costChangeI1, costChangeI2, costChangeJ1, costChangeJ2) = costsChange
costIdelta = costChangeI1 + costChangeI2
costJdelta = costChangeJ1 + costChangeJ2
loadIdelta = -loadI + loadJ
loadJdelta = loadI - loadJ
serviceCostIdelta = -serviceCostI + serviceCostJ
serviceCostJdelta = serviceCostI - serviceCostJ
Deltas = (costIdelta, costJdelta, loadIdelta, loadJdelta, serviceCostIdelta, serviceCostJdelta)
return(Deltas)
def removeInsertAllArcsRouteAllRoutes(self, routes, loads):
nRoutes = len(routes)
removalInsertionNeighbourhood = []
for routeIpos in range(nRoutes):
routeI = routes[routeIpos]
nRoutesJRange = range(nRoutes)
del nRoutesJRange[routeIpos]
nArcsI = len(routeI)
actualI = self.routeCostIndex[routeIpos][0]
for i in range(1,nArcsI-1):
(modifiedRouteI, arcRemoved, costChangeI) = self.singleRemove1route(routeI, i)
loadI = self.demandD[arcRemoved]
serviceCostI = self.serviceD[arcRemoved]
for routeJpos in nRoutesJRange:
actualJ = self.routeCostIndex[routeJpos][0]
costJ = self.tripCosts[actualJ]
routeJ = routes[routeJpos]
nArcsJ = len(routeJ)
for j in range(1, nArcsJ):
(modifiedRouteJ, costChangeJ) = self.findBestInversionArcInsert(routeJ, arcRemoved, j)
neighbour = {'routes':(routeIpos,routeJpos),'pos':(i,j),'arc':arcRemoved,'costDelta':(costChangeI,costChangeJ),'loadDelta':(-loadI,loadI),'serviceDelta':(-serviceCostI,serviceCostI),'modifiedRoutes':(modifiedRouteI,modifiedRouteJ),'tabus':{'I':[i-1,i,i+1],'J':[j-1,j]}}
if (actualI == actualJ) & (costChangeI+costChangeJ < self.bestChange) & (loads[routeJpos] + loadI <= self.capacity):
self.bestChange = costChangeI+costChangeJ
self.bestNeighbour = (costChangeI+costChangeJ,neighbour,'RemoveInsertAllArcsAllRoutes','twoRoutes')
elif ((costChangeI+costChangeJ) < self.bestChange) & (loads[routeJpos] + loadI <= self.capacity) & (costJ + costChangeJ + serviceCostI <= self.maxTrip):
self.bestChange = costChangeI+costChangeJ
self.bestNeighbour = (costChangeI+costChangeJ,neighbour,'RemoveInsertAllArcsAllRoutes','twoRoutes')
return(removalInsertionNeighbourhood)
def exchangeAllArcsAllRoutes(self, routes, loads):
nRoutes = len(routes)
removalInsertionNeighbourhood = []
for routeIpos in range(nRoutes-1):
routeI = routes[routeIpos]
nRoutesJRange = range(routeIpos+1,nRoutes)
nArcsI = len(routeI)
actualI = self.routeCostIndex[routeIpos][0]
costI = self.tripCosts[actualI]
for i in range(1,nArcsI-1):
(modifiedRouteIa, arcRemovedI, costChangeI1) = self.singleRemove1route(routeI, i)
for routeJpos in nRoutesJRange:
actualJ = self.routeCostIndex[routeJpos][0]
costJ = self.tripCosts[actualJ]
routeJ = routes[routeJpos]
nArcsJ = len(routeJ)
for j in range(1, nArcsJ-1):
(modifiedRouteJ, arcRemovedJ, costChangeJ1) = self.singleRemove1route(routeJ, j)
(modifiedRouteI, costChangeI2) = self.findBestInversionArcInsert(modifiedRouteIa, arcRemovedJ, i)
(modifiedRouteJ, costChangeJ2) = self.findBestInversionArcInsert(modifiedRouteJ, arcRemovedI, j)
costsChange = (costChangeI1, costChangeI2, costChangeJ1, costChangeJ2)
Deltas = self.singleArcExchangeDeltas(arcRemovedI,arcRemovedJ,costsChange)
(costIdelta, costJdelta, loadIdelta, loadJdelta, serviceCostIdelta, serviceCostJdelta) = Deltas
neighbour = {'routes':(routeIpos,routeJpos),'pos':(i,j),'arc':(arcRemovedI,arcRemovedI),'costDelta':(costIdelta,costJdelta),'loadDelta':(loadIdelta,loadJdelta),'serviceDelta':(serviceCostIdelta,serviceCostJdelta),'modifiedRoutes':(modifiedRouteI,modifiedRouteJ),'tabus':{'I':[i-1,i,i+1],'J':[j-1,j,j+1]}}
if (actualI == actualJ) & (costIdelta+costJdelta < self.bestChange) & (loads[routeIpos] + loadIdelta <= self.capacity) & (loads[routeJpos] + loadJdelta <= self.capacity):
self.bestChange = costIdelta+costJdelta
self.bestNeighbour = (costIdelta+costJdelta,neighbour,{'I':[i-1,i,i+1],'J':[j-1,j,j+1]},'ExchangeAllArcsAllRoutes','twoRoutes')
elif (costIdelta+costJdelta < self.bestChange) & (loads[routeIpos] + loadIdelta <= self.capacity) & (loads[routeJpos] + loadJdelta <= self.capacity) & (costI + costIdelta + serviceCostIdelta <= self.maxTrip) & (costJ + costJdelta + serviceCostJdelta <= self.maxTrip):
self.bestChange = costIdelta+costJdelta
self.bestNeighbour = (costIdelta+costJdelta,neighbour,{'I':[i-1,i,i+1],'J':[j-1,j,j+1]},'ExchangeAllArcsAllRoutes','twoRoutes')
return(removalInsertionNeighbourhood)
def removeInsertAllDoubleArcsAllRoutes(self, routes, loads):
nRoutes = len(routes)
removalInsertionNeighbourhood = []
for routeIpos in range(nRoutes):
routeI = routes[routeIpos]
nRoutesJRange = range(nRoutes)
del nRoutesJRange[routeIpos]
nArcsI = len(routeI)
actualI = self.routeCostIndex[routeIpos][0]
for i in range(1,nArcsI-2):
(modifiedRouteI, twoArcsRemoved, costChangeI) = self.doubleRemove1route(routeI, i)
loadI = self.demandD[twoArcsRemoved[0]] + self.demandD[twoArcsRemoved[1]]
serviceCostI = self.serviceD[twoArcsRemoved[0]] + self.serviceD[twoArcsRemoved[1]]
for routeJpos in nRoutesJRange:
actualJ = self.routeCostIndex[routeJpos][0]
costJ = self.tripCosts[actualJ]
routeJ = routes[routeJpos]
nArcsJ = len(routeJ)
for j in range(1, nArcsJ):
(modifiedRouteJ, costChangeJ) = self.findBestInversionDoubleArcInsert(routeJ, twoArcsRemoved, j)
neighbour = {'routes':(routeIpos,routeJpos),'pos':(i,j),'arc':twoArcsRemoved,'costDelta':(costChangeI,costChangeJ),'loadDelta':(-loadI,loadI),'serviceDelta':(-serviceCostI,serviceCostI),'modifiedRoutes':(modifiedRouteI,modifiedRouteJ),'tabus':{'I':[i-1,i,i+1,i+2],'J':[j-1,j]}}
if (actualI == actualJ) & (costChangeI+costChangeJ < self.bestChange) & (loads[routeJpos] + loadI <= self.capacity):
self.bestChange = costChangeI+costChangeJ
self.bestNeighbour = (costChangeI+costChangeJ,neighbour,'RemoveInsertAllDoubleArcsAllRoutes','twoRoutes')
elif (costChangeI+costChangeJ < self.bestChange) & (loads[routeJpos] + loadI <= self.capacity) & (costJ + costChangeJ + serviceCostI <= self.maxTrip):
self.bestChange = costChangeI+costChangeJ
self.bestNeighbour = (costChangeI+costChangeJ,neighbour,'RemoveInsertAllDoubleArcsAllRoutes','twoRoutes')
return(removalInsertionNeighbourhood)
def exchangeAllDoubleArcsAllRoutes(self, routes, loads):
nRoutes = len(routes)
removalInsertionNeighbourhood = []
for routeIpos in range(nRoutes-1):
routeI = routes[routeIpos]
nRoutesJRange = range(routeIpos+1,nRoutes)
nArcsI = len(routeI)
actualI = self.routeCostIndex[routeIpos][0]
costI = self.tripCosts[actualI]
for i in range(1,nArcsI-2):
(modifiedRouteIa, twoArcsRemovedI, costChangeI1) = self.doubleRemove1route(routeI, i)
for routeJpos in nRoutesJRange:
routeJ = routes[routeJpos]
nArcsJ = len(routeJ)
routeJ = routes[routeJpos]
nArcsJ = len(routeJ)
actualJ = self.routeCostIndex[routeJpos][0]
costJ = self.tripCosts[actualJ]
for j in range(1, nArcsJ-2):
(modifiedRouteJ, twoArcsRemovedJ, costChangeJ1) = self.doubleRemove1route(routeJ, j)
(modifiedRouteI, costChangeI2) = self.findBestInversionDoubleArcInsert(modifiedRouteIa, twoArcsRemovedJ, i)
(modifiedRouteJ, costChangeJ2) = self.findBestInversionDoubleArcInsert(modifiedRouteJ, twoArcsRemovedI, j)
costsChange = (costChangeI1, costChangeI2, costChangeJ1, costChangeJ2)
Deltas = self.doubleArcExchangeDeltas(twoArcsRemovedI,twoArcsRemovedJ,costsChange)
(costIdelta, costJdelta, loadIdelta, loadJdelta, serviceCostIdelta, serviceCostJdelta) = Deltas
neighbour = {'routes':(routeIpos,routeJpos),'pos':(i,j),'arc':(twoArcsRemovedI,twoArcsRemovedJ),'costDelta':(costIdelta,costJdelta),'loadDelta':(loadIdelta,loadJdelta),'serviceDelta':(serviceCostIdelta,serviceCostJdelta),'modifiedRoutes':(modifiedRouteI,modifiedRouteJ),'tabus':{'I':[i-1,i,i+1,i+2],'J':[j-1,j,j+1,j+2]}}
if (actualI == actualJ) & (costIdelta+costJdelta < self.bestChange) & (loads[routeIpos] + loadIdelta <= self.capacity) & (loads[routeJpos] + loadJdelta <= self.capacity):
self.bestChange = costIdelta+costJdelta
self.bestNeighbour = (costIdelta+costJdelta,neighbour,{'I':[i-1,i,i+1,i+2],'J':[j-1,j,j+1,j+2]},'ExchangeAllDoubleArcsAllRoutes','twoRoutes')
elif (costIdelta+costJdelta < self.bestChange) & (loads[routeIpos] + loadIdelta <= self.capacity) & (loads[routeJpos] + loadJdelta <= self.capacity) & (costI + costIdelta + serviceCostIdelta <= self.maxTrip) & (costJ + costJdelta + serviceCostJdelta <= self.maxTrip):
self.bestChange = costIdelta+costJdelta
self.bestNeighbour = (costIdelta+costJdelta,neighbour,{'I':[i-1,i,i+1,i+2],'J':[j-1,j,j+1,j+2]},'ExchangeAllDoubleArcsAllRoutes','twoRoutes')
return(removalInsertionNeighbourhood)
def generateAllNeighbourhoodsMulitRoutes(self, routes, loads, tripCosts, routeCostIndex):
self.tripCosts = tripCosts
self.routeCostIndex = routeCostIndex
self.removeInsertAllArcsRouteAllRoutes(routes, loads)
self.exchangeAllArcsAllRoutes(routes, loads)
self.removeInsertAllDoubleArcsAllRoutes(routes, loads)
self.exchangeAllDoubleArcsAllRoutes(routes, loads)
#===============================================================================
#
#===============================================================================
def unitTestRemoveInsert(info, solutionOld):
import testSolutions
er = testSolutions.testSolution(None, solutionOld, info)
er.testReportSolution()
RIP = SingleRouteRemoveInsertProcedure(info)
solution = solutionOld.copy()
while True:
routeI = solution[1]['Solution']
best = []
removalInsertionNeighbourhood1 = RIP.removeInsertAllDoubleArcs(routeI)
best += removalInsertionNeighbourhood1
removalInsertionNeighbourhood2 = RIP.removeInsertAllArcs(routeI)
best += removalInsertionNeighbourhood2
removalInsertionNeighbourhood3 = RIP.exchangeAllArcs(routeI)
best += removalInsertionNeighbourhood3
removalInsertionNeighbourhood4 = RIP.exchangeAllDoubleArcs(routeI)
best += removalInsertionNeighbourhood4
bestNeighbour = min(best)
print(bestNeighbour[0],bestNeighbour[-1])
if bestNeighbour[0] >= 0: break
solution[1]['Solution'] = bestNeighbour[1][2]
solution[1]['Cost'] = solution[1]['Cost'] + bestNeighbour[0]
solution['Total cost'] = solution['Total cost'] + bestNeighbour[0]
def unitTestRemoveInsert2(info, solutionOld):
import testSolutions
import time
er = testSolutions.testSolution(None, solutionOld, info)
er.testReportSolution()
RIP = SingleRouteRemoveInsertProcedure(info)
solution = deepcopy(solutionOld)
while True:
routeI = solution[1]['Solution']
t1 = time.clock()
removalInsertionNeighbourhood1 = RIP.removeInsertAllArcs(routeI)
e1 = time.clock() - t1
t2 = time.clock()
removalInsertionNeighbourhood2 = RIP.removeInsertAllDoubleArcs(routeI)
e2 = time.clock() - t2
t3 = time.clock()
removalInsertionNeighbourhood3 = RIP.exchangeAllArcs(routeI)
e3 = time.clock() - t3
t4 = time.clock()
removalInsertionNeighbourhood4 = RIP.exchangeAllDoubleArcs(routeI)
e4 = time.clock() - t4
break
print(len(removalInsertionNeighbourhood1))
print((len(routeI)-2)*(len(routeI)-3))
print(min(removalInsertionNeighbourhood1)[0])
print(e1)
print('')
print(len(removalInsertionNeighbourhood2))
print((len(routeI)-3)*(len(routeI)-4))
print(min(removalInsertionNeighbourhood2)[0])
print(e2)
print('')
print(len(removalInsertionNeighbourhood3))
print((len(routeI)-3)*(len(routeI)-4)/2)
print(min(removalInsertionNeighbourhood3)[0])
print(e3)
print('')
print(len(removalInsertionNeighbourhood4))
print((len(routeI)-5)*(len(routeI)-6)/2)
print(min(removalInsertionNeighbourhood4)[0])
print(e4)
print('')
for neighbour in removalInsertionNeighbourhood4[0:5]:
solution = deepcopy(solutionOld)
bestNeighbour = neighbour
solution[1]['Solution'] = bestNeighbour[1][2]
solution[1]['Cost'] += bestNeighbour[0] #This is not working!!!!!
solution['Total cost'] += bestNeighbour[0]
er = testSolutions.testSolution(None, solution, info)
er.checkFeasibility()
print(er.errorReport['Exceptions']['Total exception'],bestNeighbour[0])
def unitTestRemoveInsertAllRoutes(info, solutionOld):
print('unitTestRemoveInsertAllRoutes')
print('')
import testSolutions
import time
er = testSolutions.testSolution(None, solutionOld, info)
er.testReportSolution()
RIPij = MultipleRouteRemoveInsertProcedure(info)
solution = deepcopy(solutionOld)
while True:
routeI = solution[1]['Solution']
routeJ = solution[2]['Solution']
routes = [routeI,routeJ]
t1 = time.clock()
removalInsertionNeighbourhood1 = RIPij.removeInsertAllArcsRouteAllRoutes(routes)
e1 = time.clock() - t1
t2 = time.clock()
removalInsertionNeighbourhood2 = RIPij.removeInsertAllDoubleArcsAllRoutes(routes)
e2 = time.clock() - t2
t3 = time.clock()
removalInsertionNeighbourhood3 = RIPij.exchangeAllArcsAllRoutes(routes)
e3 = time.clock() - t3
t4 = time.clock()
removalInsertionNeighbourhood4 = RIPij.exchangeAllDoubleArcsAllRoutes(routes)
e4 = time.clock() - t4
break
print(len(removalInsertionNeighbourhood1))
print(2*(len(routeI)-2)*(len(routeJ)-2))
print(min(removalInsertionNeighbourhood1)[0])
print(e1)
print('')
print(len(removalInsertionNeighbourhood2))
print((len(routeI)-3)*(len(routeJ)-2))
print(min(removalInsertionNeighbourhood2)[0])
print(e2)
print('')
print(len(removalInsertionNeighbourhood3))
print((len(routeI)-2)*(len(routeJ)-2))
print(min(removalInsertionNeighbourhood3)[0])
print(e3)
print('')
print(len(removalInsertionNeighbourhood4))
print((len(routeI)-3)*(len(routeJ)-4))
print(min(removalInsertionNeighbourhood4)[0])
print(e4)
print('')
#print(solutionOld)
for neighbour in removalInsertionNeighbourhood4:
solution = deepcopy(solutionOld)
bestNeighbour = neighbour
modifications = bestNeighbour[1]
(routeI,routeJ) = modifications['routes']
routeI += 1
routeJ += 1
(solution[routeI]['Solution'],solution[routeJ]['Solution']) = modifications['modifiedRoutes']
(costIdelta, costJdelta) = modifications['costDelta']
(serviceIdelta, serviceJdelta) = modifications['serviceDelta']
solution[routeI]['Cost'] += costIdelta + serviceIdelta
solution[routeJ]['Cost'] += costJdelta + serviceJdelta
(loadIdelta, loadJdelta) = modifications['loadDelta']
solution[routeI]['Load'] += loadIdelta
solution[routeJ]['Load'] += loadJdelta
solution['Total cost'] += bestNeighbour[0]
er = testSolutions.testSolution(None, solution, info)
er.checkFeasibility()
print(er.errorReport['Exceptions']['Total exception'],costIdelta,costJdelta,solution[routeI]['Cost']+solution[routeJ]['Cost'],solution['Total cost'])
if er.errorReport['Exceptions']['Total exception']:
er.testReportSolution()
print(solution)
if __name__ == "__main__":
import cPickle
import LancommeARPconversions3 as LARP
fileName1 = 'lpr_ProblemInfo/Lpr-a-01_pickled.txt'
fileName2 = 'lpr_ProblemInfo/Lpr-b-01_pickled.txt'
fileName3 = 'lpr_ProblemInfo/Lpr-c-01_pickled.txt'
info1 = LARP.ReadProblemData(fileName1)
info2 = LARP.ReadProblemData(fileName2)
info3 = LARP.ReadProblemData(fileName3)
s1 = open('lpr_Solutions/Lpr-a-01_Init_sol.txt')
s2 = open('lpr_Solutions/Lpr-b-01_Init_sol.txt')
s3 = open('lpr_Solutions/Lpr-c-01_Init_sol.txt')
solution1 = cPickle.load(s1)
solution2 = cPickle.load(s2)
solution3 = cPickle.load(s3)
# unitTestRemoveInsert(info1, solution1)
# unitTestRemoveInsert2(info1, solution1)
# unitTestRemoveInsertIJ(info3, solution3)
unitTestRemoveInsertAllRoutes(info3, solution3)
| 58.799639 | 345 | 0.626984 | 2,737 | 32,575 | 7.446474 | 0.089879 | 0.003827 | 0.003238 | 0.002944 | 0.610863 | 0.575438 | 0.543791 | 0.516363 | 0.473824 | 0.4488 | 0 | 0.025359 | 0.244605 | 32,575 | 553 | 346 | 58.905967 | 0.802902 | 0.026124 | 0 | 0.563008 | 0 | 0 | 0.046393 | 0.016695 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054878 | false | 0 | 0.01626 | 0 | 0.077236 | 0.093496 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f7daa0048e77d81249f36f7b52f95cdad281cfb | 1,054 | py | Python | wb_common/gsm.py | contactless/wb-common | 401a1bafa1ccf401b79360f577c267128f135db3 | [
"MIT"
] | null | null | null | wb_common/gsm.py | contactless/wb-common | 401a1bafa1ccf401b79360f577c267128f135db3 | [
"MIT"
] | null | null | null | wb_common/gsm.py | contactless/wb-common | 401a1bafa1ccf401b79360f577c267128f135db3 | [
"MIT"
] | 1 | 2021-11-04T09:01:45.000Z | 2021-11-04T09:01:45.000Z | # coding: utf-8
from __future__ import print_function
import os
import subprocess
def gsm_decode(hexstr):
return os.popen('echo %s | xxd -r -ps | iconv -f=UTF-16BE -t=UTF-8' % hexstr).read()
def init_gsm():
retcode = subprocess.call("wb-gsm restart_if_broken", shell=True)
if retcode != 0:
raise RuntimeError("gsm init failed")
def init_baudrate():
retcode = subprocess.call("wb-gsm init_baud", shell=True)
if retcode != 0:
raise RuntimeError("gsm init baudrate failed")
def gsm_get_imei():
proc = subprocess.Popen("wb-gsm imei", shell=True, stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise RuntimeError("get imei failed")
return stdout.strip()
def split_imei(imei):
imei = str(imei)
if not imei.isdigit():
raise RuntimeError("imei is not a numerical")
if len(imei) != 15:
raise RuntimeError("wrong imei len")
prefix = imei[:8]
sn = imei[8:14]
crc = imei[14]
return int(prefix), int(sn), int(crc)
| 25.095238 | 88 | 0.652751 | 150 | 1,054 | 4.493333 | 0.433333 | 0.126113 | 0.080119 | 0.068249 | 0.204748 | 0.127596 | 0.127596 | 0.127596 | 0.127596 | 0 | 0 | 0.018182 | 0.217268 | 1,054 | 41 | 89 | 25.707317 | 0.798788 | 0.012334 | 0 | 0.068966 | 0 | 0.034483 | 0.183831 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.172414 | false | 0 | 0.103448 | 0.034483 | 0.37931 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f8113b4bbb8eca18371b717a2cd4d8f7bc38c2f | 25,128 | py | Python | iucn_sim/misc/transition_rates.py | tobiashofmann88/iucn_extinction_simulator | 9953be13637fbc9c5ec629700dc1d4ee9ad8225c | [
"MIT"
] | 11 | 2020-06-18T11:34:47.000Z | 2021-07-25T17:38:52.000Z | iucn_sim/misc/transition_rates.py | tobiashofmann88/iucn_extinction_simulator | 9953be13637fbc9c5ec629700dc1d4ee9ad8225c | [
"MIT"
] | null | null | null | iucn_sim/misc/transition_rates.py | tobiashofmann88/iucn_extinction_simulator | 9953be13637fbc9c5ec629700dc1d4ee9ad8225c | [
"MIT"
] | 2 | 2020-02-05T19:00:33.000Z | 2021-08-09T21:07:45.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MCMC-estimation of status transition rates from IUCN record
Created on Mon Oct 28 14:43:44 2019
@author: Tobias Andermann (tobias.andermann@bioenv.gu.se)
"""
import numpy as np
np.set_printoptions(suppress=True)
import pandas as pd
import os,sys
import datetime
from scipy.optimize import curve_fit
import warnings
import iucn_sim.functions as cust_func
# get extinction probs_________________________________________________________
def p_e_year(years,p_e):
pe_year = 1-(1-float(p_e))**(1/years)
return pe_year
def update_multiplier(q,d=1.1):
u = np.random.uniform(0,1)
l = 2*np.log(d)
m = np.exp(l*(u-.5))
new_q = q * m
return new_q, np.log(m)
def sample_rate_mcmc(count, tot_time, n_samples = 1, n_gen = 100000,burnin = 1000):
def get_loglik(count, dT, rate):
return np.log(rate)*count - dT*rate
post_samples = []
q = 0.01
likA = get_loglik(count,tot_time,q)
for i in range(n_gen):
new_q, hast = update_multiplier(q)
lik = get_loglik(count,tot_time,new_q)
if lik-likA + hast >= np.log(np.random.random()):
q = new_q
likA = lik
if i > burnin and i % 10==0:
post_samples.append(q)
sampled_rates = np.random.choice(post_samples,n_samples,replace=False)
return sampled_rates
def power_function(x,a,b):
# defining the power function
y = float(a)*x**float(b)
return y
def make_empty_rate_df(species_list,rate_columns,status_label):
rate_df = pd.DataFrame(np.zeros((len(species_list),rate_columns+1)))
rate_df.columns = ['species']+ ['%s_p_ext_%i'%(status_label,i) for i in np.arange(0,rate_columns)]
rate_df.species = species_list
return rate_df
def add_arguments(parser):
parser.add_argument(
'--species_data',
required=True,
metavar='<path>',
help="File containing species list and current IUCN status of species, as well as generation length (GL) data estimates if available. GL data is only used for '--extinction_probs_mode 0' ('species_data.txt' output from get_iucn_data function).",
)
parser.add_argument(
'--iucn_history',
required=True,
metavar='<path>',
help="File containing IUCN history of the reference group for transition rate estimation ('*_iucn_history.txt' output of get_iucn_data function)."
)
parser.add_argument(
'--outdir',
required=True,
metavar='<path>',
help="Provide path to outdir where results will be saved."
)
parser.add_argument(
'--extinction_probs_mode',
default=0,
metavar='N',
help="Set to '0' to use the critE EX mode to determine extinction probabilities for each status (e.g. Mooers et al, 2008 approach). Set to '1' to use empirical EX mode, based on the recorded extinction in the IUCN history of the reference group (e.g. Monroe et al, 2019 approach). GL data can only be used in the critE EX mode ('0')."
)
parser.add_argument(
'--possibly_extinct_list',
default=0,
metavar='<path>',
help="File containing list of taxa that are likely extinct, but that are listed as extant in IUCN, including the year of their assessment as possibly extinct ('possibly_extinct_reference_taxa.txt' output from get_iucn_data function). These species will then be modeled as extinct by the esimate_rates function, which will effect the estimated extinction probabilities when chosing `--extinction_probs_mode 1`",
)
parser.add_argument(
'--species_specific_regression',
action='store_true',
help='Enables species-specific regression fitting to model LC, NT, and VU extinction probabilities. Only applicable with --extinction_probs_mode 0 (critE mode) and if GL is provided.',
default=False
)
parser.add_argument(
'--rate_samples',
default=100,
metavar='N',
help="How many rates to sample from the posterior transition rate estimates. These rates will be used to populate transition rate q-matrices for downstream simulations. Later on you can still chose to run more simulation replicates than the here specified number of produced transition rate q-matrices, in which case the `run_sim` function will randomely resample from the available q-matrices (default=100, this is ususally sufficient, larger numbers can lead to very high output file size volumes)."
)
parser.add_argument(
'--n_gen',
default=100000,
metavar='N',
help="Number of generations for MCMC for transition rate estimation (default=100000)."
)
parser.add_argument(
'--burnin',
default=1000,
metavar='N',
help="Burn-in for MCMC for transition rate estimation (default=1000)."
)
parser.add_argument(
'--seed',
default=None,
help="Set random seed for the MCMC."
)
def main(args):
# get user input___________________________________________________________
input_data = args.species_data
iucn_history = args.iucn_history
outdir = args.outdir
try:
extinction_probs_mode = int(args.extinction_probs_mode)
except:
print('\nInvalid extinction_probs_mode provided. Please choose between the currenlty available options 0 or 1')
quit()
possibly_extinct_list = args.possibly_extinct_list
n_rep = int(args.rate_samples)
n_gen = int(args.n_gen)
burnin = int(args.burnin)
if not os.path.exists(outdir):
os.makedirs(outdir)
seed = args.seed
try:
random_seed = False
seed = int(seed)
except:
seed = np.random.randint(999999999)
random_seed = True
np.random.seed(seed)
np.savetxt(os.path.join(outdir,'starting_seed.txt'),np.array([seed]),fmt='%i')
# get input data
species_data_input = pd.read_csv(input_data,sep='\t',header=None).dropna()
invalid_status_taxa = species_data_input[~species_data_input.iloc[:,1].isin(['LC','NT','VU','EN','CR','DD','NE'])]
if len(invalid_status_taxa)>0:
print('\nFound invalid IUCN statuses:',list(invalid_status_taxa[1].values),'\n\nMake sure that the second column of your --species_data input contains the current IUCN status of your target species, which must be one of the following valid extant statuses: LC, NT, VU, EN, CR, DD, NE')
# if this effects only a minority of taxa, continue after removing these
if len(invalid_status_taxa)/len(species_data_input) < 0.5:
print('\nAutomatically dropping the following taxa because of invalid IUCN status information:', list(invalid_status_taxa[0].values))
species_data_input = species_data_input[species_data_input.iloc[:,1].isin(['LC','NT','VU','EN','CR','DD','NE'])]
else:
quit('\nPlease fix your species_data input file. Check presence of current IUCN status information and column order.')
# get the list of species
species_list = species_data_input.iloc[:,0].values.astype(str)
# replace underscores in species name in case they are present
species_list = np.array([i.replace('_',' ') for i in species_list])
# Check if all species names are binomial
for species in species_list:
if len(species.split(' ')) != 2:
print('ERROR','*'*50,'\nABORTED: All provided species names provided under --species_data flag must be binomial! Found non binomial name:\n%s\n'%species,'*'*50)
quit()
# get the current IUCN status of all species
current_status = species_data_input.iloc[:,1].values.astype(str)
# get GL data if provided
gl_data_available = False
if species_data_input.shape[1] > 2:
gl_matrix = species_data_input.iloc[:,2:].values
gl_data_available = True
#__________________________________________________________________________
# process the IUCN history data____________________________________________
iucn_start_year = 2001 #start-year of the IUCN3.1 standard for categories
current_year = datetime.datetime.now().year
master_stat_time_df = pd.DataFrame(columns=['species']+list(np.arange(iucn_start_year,current_year+1).astype(str)))
statuses_through_time = pd.read_csv(iucn_history, delimiter = '\t')
target_columns = [column for column in master_stat_time_df.columns if column in statuses_through_time.columns]
master_stat_time_df[target_columns] = statuses_through_time[target_columns]
# treat EW as EX
master_stat_time_df.replace('EW', 'EX',inplace=True)
# replace occurrences of NR (not recognized) with nan
master_stat_time_df.replace('NR', np.nan,inplace=True)
# clean and sort df
master_stat_time_df = master_stat_time_df.sort_values(by='species')
master_stat_time_df = master_stat_time_df.drop_duplicates()
master_stat_time_df.index = np.arange(len(master_stat_time_df))
# set the assessment at the current year to NE for species without any assessments
na_row_indeces = np.where(master_stat_time_df.iloc[:,1:].T.isnull().all().values)
for index in na_row_indeces:
master_stat_time_df.iloc[index,-1] = 'NE'
# if possibly_extinct_list provided, read that list and set the status for those taxa to extinct, starting at provided year
if possibly_extinct_list:
pex_data = pd.read_csv(possibly_extinct_list,sep='\t')
pex_species_list = pex_data.iloc[:,0].values.astype(str)
pex_year = pex_data.iloc[:,1].values.astype(int)
column_names = master_stat_time_df.columns.values
row_names = master_stat_time_df.species.values
#df_selection = master_stat_time_df[master_stat_time_df.species.isin(pex_species_list)]
for i,species in enumerate(pex_species_list):
row_index = np.where(row_names==species)[0][0]
assessment_year = pex_year[i]
column_index = np.where(column_names==str(assessment_year))[0][0]
master_stat_time_df.iloc[row_index,column_index:] = 'EX'
# extract most recent valid status for each taxon
valid_status_dict,most_recent_status_dict,status_series,taxon_series = cust_func.extract_valid_statuses(master_stat_time_df)
# extinciton prob mode 0: remove all currently extinct taxa
if extinction_probs_mode == 0:
ext_indices = np.array([num for num,i in enumerate(most_recent_status_dict.keys()) if most_recent_status_dict[i] == 'EX'])
master_stat_time_df = master_stat_time_df.drop(ext_indices)
master_stat_time_df.index = np.arange(len(master_stat_time_df))
# replace any occurrence of 'EX' as a past status with NaN to avoid problems with counting types of transitions (treating these assessments as invalid)
master_stat_time_df.replace('EX', np.nan,inplace=True)
# extinciton prob mode 1: remove only taxa that have been extinct all along, keeping those that have recorded transition to extinct within time frame
elif extinction_probs_mode == 1:
ext_indices = np.array([num for num,i in enumerate(master_stat_time_df.iloc[:,1:].values.astype(str)) if 'EX' in np.unique(i) and len(np.unique(i))==2])
master_stat_time_df = master_stat_time_df.drop(ext_indices)
master_stat_time_df.index = np.arange(len(master_stat_time_df))
# write IUCN history df to file
master_stat_time_df.to_csv(os.path.join(outdir,'formatted_iucn_history_reference_taxa.txt'),sep='\t')
# extract most recent valid status for each taxon
valid_status_dict,most_recent_status_dict,status_series,taxon_series = cust_func.extract_valid_statuses(master_stat_time_df)
# count current status distribution
unique, counts = np.unique(status_series, return_counts=True)
print('\nCurrent IUCN status distribution in reference group:',dict(zip(unique, counts)))
# count how often each status change occurs
change_type_dict = cust_func.count_status_changes(master_stat_time_df,valid_status_dict)
print('Summing up years spend in each category ...')
years_in_each_category = cust_func.get_years_spent_in_each_category(master_stat_time_df,valid_status_dict)
# write the status change data to file
final_years_count_array = np.array([list(years_in_each_category.keys()),list(years_in_each_category.values())]).T
np.savetxt(os.path.join(outdir,'years_spent_in_each_category.txt'),final_years_count_array,fmt='%s\t%s')
change_type_dict_array = np.array([list(change_type_dict.keys()),list(change_type_dict.values())]).T
np.savetxt(os.path.join(outdir,'change_type_dict.txt'),change_type_dict_array,fmt='%s\t%s')
#__________________________________________________________________________
# sample transition rates for all types of changes_________________________
if extinction_probs_mode == 0:
status_change_coutn_df = pd.DataFrame(data=np.zeros([6,6]).astype(int),index = ['LC','NT','VU','EN','CR','DD'],columns=['LC','NT','VU','EN','CR','DD'])
elif extinction_probs_mode == 1:
status_change_coutn_df = pd.DataFrame(data=np.zeros([7,7]).astype(int),index = ['LC','NT','VU','EN','CR','DD','EX'],columns=['LC','NT','VU','EN','CR','DD','EX'])
for status_change in change_type_dict.keys():
states = status_change.split('->')
original_state = states[0]
new_state = states[1]
count = change_type_dict[status_change]
status_change_coutn_df.loc[original_state,new_state] = count
status_change_coutn_df.to_csv(os.path.join(outdir,'status_change_counts.txt'),sep='\t',index=True)
print('Counted the following transition occurrences in IUCN history of reference group:')
print(status_change_coutn_df)
if not random_seed:
print('Running MCMC with user-set starting seed %i ...'%seed)
else:
print('Running MCMC with randomely generated starting seed %i ...'%seed)
sampled_rates_df = pd.DataFrame(columns = ['status_change']+ ['rate_%i'%i for i in np.arange(0,n_rep)])
for status_a in status_change_coutn_df.columns:
row = status_change_coutn_df.loc[status_a]
for status_b in row.index.values:
if not status_a == status_b:
count = row[status_b]
total_time = years_in_each_category[status_a]
rates = sample_rate_mcmc(count, total_time, n_samples = n_rep, n_gen = n_gen, burnin = burnin)
sampled_rates_df = sampled_rates_df.append(pd.DataFrame(data=np.matrix(['%s->%s'%(status_a,status_b)]+list(rates)),columns = ['status_change']+ ['rate_%i'%i for i in np.arange(0,n_rep)]),ignore_index=True)
sampled_rates_df[['rate_%i'%i for i in np.arange(0,n_rep)]] = sampled_rates_df[['rate_%i'%i for i in np.arange(0,n_rep)]].apply(pd.to_numeric)
sampled_rates_df.to_csv(os.path.join(outdir,'sampled_status_change_rates.txt'),sep='\t',index=False,float_format='%.8f')
print('Sampled %i rates from MCMC posterior for each transition type.'%n_rep)
#__________________________________________________________________________
# if mode 0, calculate extinction probabilities for EN and CR with GL data_________________________
if extinction_probs_mode == 0:
# calculate yearly extinction risks for categories EN and CR
if gl_data_available:
dims = gl_matrix.shape[1]
en_risks = []
for gl_array in gl_matrix:
if dims == 1:
gl_array = np.array(gl_array)
#replace all nan values with the standard en extinction risk
en_risks_species = p_e_year(np.minimum(np.maximum([20]*len(gl_array),5*gl_array),100),0.2)
n_nan = len(en_risks_species[en_risks_species!=en_risks_species])
en_risks_species[en_risks_species!=en_risks_species] = [p_e_year(20,0.2)]*n_nan
en_risks.append(en_risks_species)
en_risks = np.array(en_risks)
else:
print('Warning: No generation length (GL) data found. Extinction risks for status EN and CR are calculated without using GL data.')
dims = 1
en_risks = np.array([[p_e_year(20,0.2)]]*len(species_list))
en_risks_df = make_empty_rate_df(species_list,dims,'EN')
en_risks_df.iloc[:,1:] = en_risks
en_risks_df.to_csv(os.path.join(outdir,'en_extinction_risks_all_species.txt'),sep='\t',index=False, float_format='%.12f')
if gl_data_available:
dims = gl_matrix.shape[1]
cr_risks = []
for gl_array in gl_matrix:
if dims == 1:
gl_array = np.array(gl_array)
#replace all nan values with the standard en extinction risk
cr_risks_species = p_e_year(np.minimum(np.maximum([10]*len(gl_array),3*gl_array),100),0.5)
n_nan = len(cr_risks_species[cr_risks_species!=cr_risks_species])
cr_risks_species[cr_risks_species!=cr_risks_species] = [p_e_year(10,0.5)]*n_nan
cr_risks.append(cr_risks_species)
cr_risks = np.array(cr_risks)
else:
dims = 1
cr_risks = np.array([[p_e_year(10,0.5)]]*len(species_list))
cr_risks_df = make_empty_rate_df(species_list,dims,'CR')
cr_risks_df.iloc[:,1:] = cr_risks
cr_risks_df.to_csv(os.path.join(outdir,'cr_extinction_risks_all_species.txt'),sep='\t',index=False, float_format='%.12f')
if args.species_specific_regression:
# make regression for all other categories based on EN and CR risks
print('Fitting species-specific regression function to determine LC, NT, and VU extinction probabilities ...')
vu_risks_df = make_empty_rate_df(species_list,dims,'VU')
nt_risks_df = make_empty_rate_df(species_list,dims,'NT')
lc_risks_df = make_empty_rate_df(species_list,dims,'LC')
for i,species in enumerate(cr_risks_df.species.values):
en_risks = en_risks_df.iloc[i,1:].values
cr_risks = cr_risks_df.iloc[i,1:].values
vu_risks = []
nt_risks = []
lc_risks = []
for j,_ in enumerate(en_risks):
en_prob = en_risks[j]
cr_prob = cr_risks[j]
x = [4.,5.]
y = [en_prob,cr_prob]
# fitting the power function to the 2 data points of each species (EN and CR risk)
with warnings.catch_warnings():
# this is to avoid printing the warning from curve_fit when trying to fit function to only 2 points: "OptimizeWarning: Covariance of the parameters could not be estimated"
warnings.filterwarnings("ignore")
a_b = curve_fit(power_function,x,y);
# extracting the values for a and b from the curve fit function
a = a_b[0][0]
b = a_b[0][1]
# get values for LC, NT, and VU
p_year_LC = power_function(1,a,b)
p_year_NT = power_function(2,a,b)
p_year_VU = power_function(3,a,b)
vu_risks.append(p_year_VU)
nt_risks.append(p_year_NT)
lc_risks.append(p_year_LC)
vu_risks_df.iloc[vu_risks_df[vu_risks_df.species == species].index.values[0],1:] = np.array(vu_risks)
nt_risks_df.iloc[nt_risks_df[nt_risks_df.species == species].index.values[0],1:] = np.array(nt_risks)
lc_risks_df.iloc[lc_risks_df[lc_risks_df.species == species].index.values[0],1:] = np.array(lc_risks)
vu_risks_df.to_csv(os.path.join(outdir,'vu_extinction_risks_all_species.txt'),sep='\t',index=False, float_format='%.12f')
nt_risks_df.to_csv(os.path.join(outdir,'nt_extinction_risks_all_species.txt'),sep='\t',index=False, float_format='%.12f')
lc_risks_df.to_csv(os.path.join(outdir,'lc_extinction_risks_all_species.txt'),sep='\t',index=False, float_format='%.12f')
#__________________________________________________________________________
# populate q-matrices______________________________________________________
print("\nPopulating species-specific q-matrices ...")
sampled_rates_df.index = sampled_rates_df.status_change.values
if extinction_probs_mode == 0:
transition_rates = sampled_rates_df.iloc[:,1:]
# randomely sample cr and en extinction probs to be used in q-matrices.
if n_rep <= dims:
sample_columns = np.random.choice(np.arange(dims),size=n_rep,replace=False)
# since there are only as many cr and en p(ex) estimates as there are provided GL values, we may have to resample some (but make sure all are present at least once)
else:
sample_columns1 = np.random.choice(np.arange(dims),size=dims,replace=False)
sample_columns2 = np.random.choice(np.arange(dims),size=(n_rep-dims),replace=True)
sample_columns = np.concatenate([sample_columns1,sample_columns2])
# get the corresponding en and cr ex-risk columns
cr_risks_selection = cr_risks_df.iloc[:,1:].values[:,sample_columns]
en_risks_selection = en_risks_df.iloc[:,1:].values[:,sample_columns]
# smae for the vu, nt, and lc cats if species_specific_regression is activated
if args.species_specific_regression:
vu_risks_selection = vu_risks_df.iloc[:,1:].values[:,sample_columns]
nt_risks_selection = nt_risks_df.iloc[:,1:].values[:,sample_columns]
lc_risks_selection = lc_risks_df.iloc[:,1:].values[:,sample_columns]
elif extinction_probs_mode == 1:
target_keys = [i for i in sampled_rates_df.status_change.values if i[-2:] == 'EX']
ex_probs = sampled_rates_df[sampled_rates_df.status_change.isin(target_keys)].iloc[:-1,1:].values.T
transition_rates = sampled_rates_df[~sampled_rates_df.status_change.isin(target_keys)].iloc[:30,1:]
for i in np.arange(n_rep):
rates_i = transition_rates.iloc[:,i]
sys.stdout.write('\rProgress: %i %%'%int(((i+1)/n_rep)*100))
# for each rep (i), create list of q-matrices, 1 for each species
if extinction_probs_mode == 0:
cr_risks_rep = cr_risks_selection[:,i]
en_risks_rep = en_risks_selection[:,i]
if args.species_specific_regression:
vu_risks_rep = vu_risks_selection[:,i]
nt_risks_rep = nt_risks_selection[:,i]
lc_risks_rep = lc_risks_selection[:,i]
q_matrix_list_i = []
for j,__ in enumerate(species_list):
en_risk = en_risks_rep[j]
cr_risk = cr_risks_rep[j]
if args.species_specific_regression:
lc_nt_vu = [lc_risks_rep[j],nt_risks_rep[j],vu_risks_rep[j]]
else:
lc_nt_vu = [0.000000155728,0.000041551152,0.001053050310]
status_specific_p_e = np.array(lc_nt_vu+[en_risk,cr_risk]) # These values are the category specific probabilities of extinction per year calculated from IUCN definition of each category
q_matrix = cust_func.qmatrix(rates_i, status_specific_p_e)
q_matrix_list_i.append([q_matrix])
elif extinction_probs_mode == 1:
q_matrix_list_i = []
status_specific_p_e = ex_probs[i]
q_matrix = cust_func.qmatrix(rates_i, status_specific_p_e)
q_matrix_list_i = []
for spec in species_list:
q_matrix_list_i.append([q_matrix])
q_matrix_list_i_copy = q_matrix_list_i.copy()
if i == 0:
qmatrix_list_dict = dict(zip(list(species_list),q_matrix_list_i_copy)).copy()
else:
update_dict = [qmatrix_list_dict[species].append(q_matrix_list_i_copy[i][0]) for i, species in enumerate(list(species_list))]
print('\n')
#__________________________________________________________________________
# get transition rates for DD______________________________________________
dd_changes = []
dd_rates = []
for row_id,change_type in enumerate(transition_rates.index.values):
states = change_type.split('->')
if states[0] == 'DD':
dd_changes.append('-'.join(states))
rates = transition_rates[transition_rates.index==change_type].values
dd_rates.append(rates[0])
dd_probs = dd_rates/sum(np.array(dd_rates))
#__________________________________________________________________________
# Finally write all the compiled info to a pickle file_____________________
species_specific_data = [[species,current_status[i],qmatrix_list_dict[species]]for i,species in enumerate(species_list)]
final_output_data = [species_specific_data,dd_probs]
cust_func.save_obj(final_output_data,os.path.join(outdir,'simulation_input_data.pkl'))
#__________________________________________________________________________
| 51.810309 | 509 | 0.681869 | 3,597 | 25,128 | 4.269391 | 0.140395 | 0.021489 | 0.030084 | 0.034382 | 0.341603 | 0.260207 | 0.234095 | 0.177769 | 0.159146 | 0.122485 | 0 | 0.014443 | 0.214701 | 25,128 | 484 | 510 | 51.917355 | 0.763797 | 0.151425 | 0 | 0.211268 | 0 | 0.025352 | 0.193911 | 0.026068 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022535 | false | 0 | 0.019718 | 0.002817 | 0.059155 | 0.04507 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f81c035924075c36830efac17c5f78c6246f6ef | 1,904 | py | Python | AmEl_Tourism/application/routes/actions/admin/user_actions.py | Amirelkanov/AmEl_Tourism | 136445335bd8de4007ff2c7106fd97dd3a6c7bc1 | [
"Apache-2.0"
] | null | null | null | AmEl_Tourism/application/routes/actions/admin/user_actions.py | Amirelkanov/AmEl_Tourism | 136445335bd8de4007ff2c7106fd97dd3a6c7bc1 | [
"Apache-2.0"
] | null | null | null | AmEl_Tourism/application/routes/actions/admin/user_actions.py | Amirelkanov/AmEl_Tourism | 136445335bd8de4007ff2c7106fd97dd3a6c7bc1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
""" User related actions """
from flask import Blueprint, render_template, redirect, abort
from ....data.users import User
from ....extensions.init_models import db
from ....extensions.is_admin_decorator import is_user_admin
# User actions Blueprint registration
user_actions = Blueprint('user_actions', __name__)
@user_actions.route('/users', endpoint='users')
@user_actions.route('/users/sort_by/<string:sort_by_element>/'
'is_reversed=<int:is_reversed>', endpoint='users')
@is_user_admin
def users(sort_by_element: str = 'id', is_reversed: int = 0):
""" Showing page with list of users
:param sort_by_element: element to sort the user table by
:param is_reversed: whether sorting is reversed
"""
sorting_elem: str = User.__dict__.get(sort_by_element, 'id')
order_by_arg = db.desc(sorting_elem) if is_reversed else sorting_elem
return render_template('Admin/users.html', title='Список пользователей',
columns_name=User.user_columns_name,
sort_by_element=sort_by_element,
is_reversed=(is_reversed, int(not is_reversed)),
list_of_users=User.query.order_by(
order_by_arg).all())
@user_actions.route('/admin_setting/<string:action>/<int:user_id>')
@is_user_admin
def admin_setting(action: str, user_id: int):
""" Setting user role (admin / not admin)
:param action: add admin / do not add
:param user_id: user id in the database
"""
user = User.query.filter(User.id == user_id).first()
if user and action in ['add', 'remove']:
# Adding and committing changes to the DB
user.is_admin = True if action == 'add' else False
db.session.add(user)
db.session.commit()
return redirect('/users')
abort(404)
| 35.924528 | 76 | 0.657563 | 257 | 1,904 | 4.618677 | 0.338521 | 0.075821 | 0.065712 | 0.035383 | 0.038753 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003394 | 0.226366 | 1,904 | 52 | 77 | 36.615385 | 0.802444 | 0.204832 | 0 | 0.071429 | 0 | 0 | 0.135744 | 0.07708 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.285714 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f81da4c63fbbc428e76768743c4b5f29dce19d1 | 4,200 | py | Python | models/eligibility_trace_tf/ai/legacy/maczikasz/tf/ai_self_tf.py | qLience/AI-Pump-for-Underfloor-Heating-systems | 9696f36b59d2821bd5f61b2304b43406d7d9c549 | [
"MIT"
] | 31 | 2018-07-23T17:50:42.000Z | 2022-02-23T18:25:52.000Z | models/eligibility_trace_tf/ai/legacy/maczikasz/tf/ai_self_tf.py | qLience/AI-Pump-for-Underfloor-Heating-systems | 9696f36b59d2821bd5f61b2304b43406d7d9c549 | [
"MIT"
] | null | null | null | models/eligibility_trace_tf/ai/legacy/maczikasz/tf/ai_self_tf.py | qLience/AI-Pump-for-Underfloor-Heating-systems | 9696f36b59d2821bd5f61b2304b43406d7d9c549 | [
"MIT"
] | 9 | 2018-07-23T17:50:45.000Z | 2020-07-31T13:31:14.000Z | import os
import shutil
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from future.utils import lmap
HIDDEN_LAYER_SIZE = 30
class Dqn():
def __init__(self, input_size, nb_action, gamma):
try:
shutil.rmtree("train/")
except OSError:
print("")
self.reward_window = []
self.gamma = gamma
self.last_action = 0
self.last_state = np.zeros(input_size)
self.num_action = nb_action
self.input_tensor = tf.placeholder(shape=[None, input_size], dtype=tf.float32)
self.fc1 = slim.fully_connected(inputs=self.input_tensor, num_outputs=30, activation_fn=tf.nn.relu, scope="fc1")
self.fc2 = slim.fully_connected(inputs=self.fc1, num_outputs=30, activation_fn=tf.nn.relu, scope="fc2")
self.q = slim.fully_connected(inputs=self.fc2, num_outputs=nb_action, activation_fn=None, scope="q")
self.softmax = slim.softmax(self.q * 70, scope="softmax")
slim.summary.tensor_summary("softmax", self.softmax)
self.chosen_action = tf.argmax(self.softmax, axis=1)
self.action = tf.placeholder(shape=[300], dtype=tf.int32)
self.target = tf.placeholder(shape=[300], dtype=tf.float32)
self.hot = slim.one_hot_encoding(self.action, self.num_action, scope="one_hot")
self.predictions = tf.reduce_sum(self.hot * self.q, axis=1)
self.loss = tf.reduce_sum((self.predictions - self.target) ** 2)
self.optimizer = slim.train.AdamOptimizer()
self.training = slim.learning.create_train_op(total_loss=self.loss, optimizer=self.optimizer,
summarize_gradients=True)
sess = tf.Session()
self.sess = sess
self.ctr = 0
self.summary_op = slim.summary.merge_all()
self.train_writer = tf.summary.FileWriter('train/', sess.graph)
init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
self.sess.run(init)
def calculate_transition_reward(self, transition):
def decay_reward(tup):
r, i = tup
return self.gamma ** i * r
return sum(lmap(decay_reward, zip(lmap(lambda t: t.reward, reversed(transition[:-1])), range(transition.n - 1))))
def learn_from_transitions(self, transitions):
states = np.array(lmap(lambda transition: transition[0].state, transitions))
next_stateQs = self.sess.run(self.q, feed_dict={
self.input_tensor: np.array(lmap(lambda transition: transition[-1].next_state, transitions))})
rewards = np.array(lmap(self.calculate_transition_reward, transitions))
actions = np.array(lmap(lambda transition: transition[0].action.index, transitions))
next_max_qs = next_stateQs.max(1)
target = ((self.gamma ** len(transitions)) * next_max_qs) + rewards
predictions, loss, training, summary = self.sess.run(
[self.predictions, self.loss, self.training, self.summary_op],
feed_dict={
self.input_tensor: states,
self.action: actions,
self.target: target})
self.train_writer.add_summary(summary)
def update(self, new_signal):
q_orig, softmax, action_value = self.sess.run([self.q, self.softmax, self.chosen_action],
feed_dict={self.input_tensor: [new_signal]})
action = action_value[0]
return action
def append_reward(self, reward):
self.reward_window.append(reward)
def score(self):
return sum(self.reward_window) / len(self.reward_window) + 1.
def save(self, filename):
self.saver.save(self.sess, filename)
def load(self, filepath):
if os.path.exists(filepath):
print("===>> Loading checkpoint ...")
filepath, _ = os.path.splitext(filepath)
self.saver = tf.train.import_meta_graph(filepath + ".meta")
# dir, filename = os.path.split(filepath)
self.saver.restore(self.sess, filepath)
print("Loaded checkpoint")
else:
print("Nothing to load")
| 38.888889 | 121 | 0.63119 | 527 | 4,200 | 4.87666 | 0.288425 | 0.02179 | 0.029183 | 0.028016 | 0.187549 | 0.094553 | 0.058366 | 0.028794 | 0.028794 | 0 | 0 | 0.012365 | 0.249048 | 4,200 | 107 | 122 | 39.252336 | 0.802473 | 0.009286 | 0 | 0 | 0 | 0 | 0.025246 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.08642 | 0.012346 | 0.259259 | 0.049383 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f85c5b9d85675a45d1e838874bc342de58b5641 | 1,597 | py | Python | jetblack-auth/src/jetblack_auth/server.py | rob-blackbourn/scratch-python | c61582f334726c17bbe23a185e63d913d0151392 | [
"MIT"
] | null | null | null | jetblack-auth/src/jetblack_auth/server.py | rob-blackbourn/scratch-python | c61582f334726c17bbe23a185e63d913d0151392 | [
"MIT"
] | 10 | 2019-12-29T01:55:32.000Z | 2022-02-26T12:45:06.000Z | jetblack-auth/src/jetblack_auth/server.py | rob-blackbourn/scratch-python | c61582f334726c17bbe23a185e63d913d0151392 | [
"MIT"
] | null | null | null | from bareasgi import Application
import bareasgi_jinja2
from easydict import EasyDict as edict
import jinja2
import pkg_resources
import yaml
from .yaml import initialize_types
from .auth_controller import AuthController
from .auth_service import AuthService
from .jwt_authentication import JwtAuthentication
def make_app(port: int) -> Application:
initialize_types()
with open(pkg_resources.resource_filename('jetblack_auth', 'config.yml'), 'rt') as fp:
config = edict(yaml.load(fp))
templates_folder = pkg_resources.resource_filename('jetblack_auth', 'templates')
app = Application()
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(templates_folder),
autoescape=jinja2.select_autoescape(['html', 'xml']),
enable_async=True
)
bareasgi_jinja2.add_jinja2(app, env)
# cookie_name = 'jetblack-auth'
# secret = 'trustno1'
# token_expiry = timedelta(hours=1)
# login_expiry = timedelta(days=1)
# domain = 'jetblack.net'
# auth_host = '127.0.0.1'
# path_prefix = '/auth'
token_renewal_path = config.path_prefix + config.token_renewal_path
auth_service = AuthService()
authenticator = JwtAuthentication(config.cookie_name, config.secret, config.auth_host, port, token_renewal_path)
auth_controller = AuthController(
config.path_prefix,
config.cookie_name,
config.token_expiry,
config.login_expiry,
config.domain,
config.secret,
config.auth_service,
authenticator)
auth_controller.add_routes(app)
return app
| 28.517857 | 116 | 0.715091 | 185 | 1,597 | 5.940541 | 0.389189 | 0.032757 | 0.043676 | 0.050955 | 0.072793 | 0.072793 | 0 | 0 | 0 | 0 | 0 | 0.012442 | 0.19474 | 1,597 | 55 | 117 | 29.036364 | 0.842146 | 0.116468 | 0 | 0 | 0 | 0 | 0.038489 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.277778 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f86a38a2ce9e36d0af46bb971cdb51b127a17b0 | 2,530 | py | Python | appoppy/zernike_mask.py | lbusoni/appoppy | 988670675ade8c49fada185433a79d2c42d41ed6 | [
"MIT"
] | null | null | null | appoppy/zernike_mask.py | lbusoni/appoppy | 988670675ade8c49fada185433a79d2c42d41ed6 | [
"MIT"
] | null | null | null | appoppy/zernike_mask.py | lbusoni/appoppy | 988670675ade8c49fada185433a79d2c42d41ed6 | [
"MIT"
] | null | null | null | import numpy as np
import poppy
from poppy.optics import AnalyticImagePlaneElement
from poppy.poppy_core import Wavefront, PlaneType, BaseWavefront
from astropy import units as u
class ZernikeMaskWFS(object):
def __init__(self,
name='Zernike Mask WFS',
radius_in_arcsec=0.010,
phase_delay=np.pi / 2):
self.name = name
self._radius_in_arcsec = radius_in_arcsec
self._phase_delay = phase_delay
def add_to_system(self, osys, index):
osys.add_pupil(poppy.FQPM_FFT_aligner(), index=index)
osys.add_image(index=index + 1)
osys.add_image(ZernikeMask(
name=self.name,
phase_delay=self._phase_delay,
radius=self._radius_in_arcsec),
index=index + 2)
osys.add_pupil(poppy.FQPM_FFT_aligner(direction='backward'),
index=index + 3)
class ZernikeMask(AnalyticImagePlaneElement):
""" Defines a Zernike Mask field stop with an inner region of a given _radius and phase delay
Parameters
------------
name : string
Descriptive name
phase_delay : float, default=pi/2
phase delay of the inner region in radians
radius : float, default=1.0
Radius of the phase delayed region in arcsec
"""
def __init__(self,
name="unnamed pinhole field stop",
phase_delay=np.pi / 2,
radius=1.0,
**kwargs):
AnalyticImagePlaneElement.__init__(self, **kwargs)
self.name = name
self._phase_delay = phase_delay
self._radius = radius
self._default_display_size = 10 * u.arcsec # radius_outer
def get_opd(self, wave):
""" Compute the OPD [m] appropriate for a Zernike Mask
"""
if not isinstance(wave, Wavefront): # pragma: no cover
raise ValueError("get_opd must be called with "
"a Wavefront to define the spacing")
assert (wave.planetype == PlaneType.image)
print(wave)
if isinstance(wave, BaseWavefront):
wavelength = wave.wavelength
else:
wavelength = wave
radians2meter = wavelength.to(u.meter).value / (2. * np.pi)
y, x = self.get_coordinates(wave)
r = np.sqrt(x ** 2 + y ** 2)
opd = np.zeros(wave.shape, dtype=np.float)
w_inside = np.where(r <= self._radius)
opd[w_inside] = self._phase_delay * radians2meter
return opd
| 31.625 | 97 | 0.604348 | 306 | 2,530 | 4.803922 | 0.372549 | 0.081633 | 0.038095 | 0.020408 | 0.095238 | 0.042177 | 0.042177 | 0 | 0 | 0 | 0 | 0.011986 | 0.30751 | 2,530 | 79 | 98 | 32.025316 | 0.827055 | 0.157312 | 0 | 0.117647 | 0 | 0 | 0.053365 | 0 | 0 | 0 | 0 | 0 | 0.019608 | 1 | 0.078431 | false | 0 | 0.098039 | 0 | 0.235294 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |