hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794a73620f5d067b364940326f363e8479ab68b5
| 6,697
|
py
|
Python
|
src/stratis_cli/_actions/_formatting.py
|
lleshchi/stratis-cli
|
6e847d918d075dfd9548c66dd37a9b96b71e8f7d
|
[
"Apache-2.0"
] | null | null | null |
src/stratis_cli/_actions/_formatting.py
|
lleshchi/stratis-cli
|
6e847d918d075dfd9548c66dd37a9b96b71e8f7d
|
[
"Apache-2.0"
] | null | null | null |
src/stratis_cli/_actions/_formatting.py
|
lleshchi/stratis-cli
|
6e847d918d075dfd9548c66dd37a9b96b71e8f7d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Formatting for tables.
"""
# isort: STDLIB
import sys
from .._errors import StratisCliPropertyNotFoundError
# If the wcwidth package is not available the wcswidth function will not
# be available. In that case, use the standard function len where wcswidth
# would otherwise be used. Since len determines the number of _characters_
# in a string, rather than its width in cells, text containing characters
# occupying more or less than one cell, will in the general case, not be
# properly aligned in the column output. The wcwidth package may not be
# available in every distribution due to the non-local nature of its
# installation mechanism, which builds functions dynamically from tables
# made available online at www.unicode.org.
# Disable coverage for conditional import. We do not want to make our
# coverage result dependent on whether wcwidth is available or not, as our
# tests might be run, and succeed either with or without.
try:
from wcwidth import wcswidth
maybe_wcswidth = wcswidth # pragma: no cover
except ImportError: # pragma: no cover
maybe_wcswidth = len
# placeholder for tables where a desired value was not obtained from stratisd
# when the value should be supported.
TABLE_FAILURE_STRING = "FAILURE"
def fetch_property(object_type, props, name, to_repr):
"""
Get a representation of a property fetched through FetchProperties interface
:param object_type: string representation of object type implementing FetchProperties
:type object_type: str
:param props: dictionary of property names mapped to values
:type props: dict of strs to (bool, object)
:param name: the name of the property
:type name: str
:param to_repr: function expecting one object argument to convert to some type
:type to_repr: function(object) -> object
:returns: object produced by to_repr or None
:raises StratisCliPropertyNotFoundError:
"""
# Disable coverage for failure of the engine to successfully get a value
# or for a property not existing for a specified key. We can not force the
# engine error easily and should not force it these CLI tests. A KeyError
# can only be raised if there is a bug in the code or if the version of
# stratisd being run is not compatible with the version of the CLI being
# tested. We expect to avoid those conditions, and choose not to test for
# them.
try:
(success, variant) = props[name]
if not success:
return None # pragma: no cover
return to_repr(variant)
except KeyError: # pragma: no cover
raise StratisCliPropertyNotFoundError(object_type, name)
def _get_column_len(column_width, entry_len, entry_width):
"""
From the desired column width in cells and the item to be printed,
calculate the required number of characters to pass to the format method.
In order to get the correct width in chars it is necessary to subtract
the number of cells above 1 (or add the number of cells below 1) that
an individual character occupies.
:param int column_width: the column width, in cells
:param int entry_len: the entry len, in characters
:param int entry_width: the entry width, in cells
:returns: the column width in characters
Note that if wcswidth has defaulted to len,
entry_width == entry_len, so the result is always column_width.
Precondition: entry_width != -1
(equivalently, entry has no unprintable characters)
"""
return column_width - (entry_width - entry_len)
def _print_row(file, row, row_widths, column_widths, column_alignments):
"""
Print a single row in a table. The row might be the header row, or
a row of data items.
:param file: file to print to
:param list row: the list of items to print
:param list row_widths: the list of wcswidth for the row
:param list column_widths: corresponding list of column widths
:param list column_alignments: corresponding list of column alignment specs
Precondition: len(row) == len(column_widths) == len(alignment)
Precondition: no elements of row have unprintable characters
"""
entries = []
for index, entry in enumerate(row):
column_len = _get_column_len(
column_widths[index], len(entry), row_widths[index]
)
entries.append(
"{0:{align}{width}}".format(
entry, align=column_alignments[index], width=column_len
)
)
print(" ".join(entries), end="", file=file)
def print_table(column_headings, row_entries, alignment, file=sys.stdout):
"""
Given the column headings and the row_entries, print a table.
Align according to alignment specification and always pad with 2 spaces.
:param column_headings: the column headings
:type column_headings: list of str
:param row_entries: a list of the row entries
:type row_entries: list of list of str
:param alignment: the alignment indicator for each key, '<', '>', '^', '='
:type alignment: list of str
:param file: file to print too
:type file: writeable stream
Precondition: len(column_headings) == len(alignment) == len of each entry
in row_entries.
Precondition: all(wcswidth(h) != -1 for h in column_headings)
all(wcswidth(i) != -1 for row in rows for item in row)
(i.e., no items to be printed contain unprintable characters)
"""
column_widths = [0] * len(column_headings)
cell_widths = []
# Column header isn't different than any other row, insert into rows.
row_entries.insert(0, column_headings)
for row_index, row in enumerate(row_entries):
cell_widths.append([])
for column_index, cell in enumerate(row):
cell_width = maybe_wcswidth(cell)
cell_widths[row_index].append(cell_width)
column_widths[column_index] = max(column_widths[column_index], cell_width)
for row, row_widths in zip(row_entries, cell_widths):
_print_row(file, row, row_widths, column_widths, alignment)
print(file=file)
| 39.863095
| 89
| 0.712259
|
794a752312a17bd3f67c43b7bf61de444163206c
| 2,234
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/azure_reachability_report_parameters_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/azure_reachability_report_parameters_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/azure_reachability_report_parameters_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AzureReachabilityReportParameters(Model):
"""Geographic and time constraints for Azure reachability report.
All required parameters must be populated in order to send to Azure.
:param provider_location: Required.
:type provider_location:
~azure.mgmt.network.v2017_09_01.models.AzureReachabilityReportLocation
:param providers: List of Internet service providers.
:type providers: list[str]
:param azure_locations: Optional Azure regions to scope the query to.
:type azure_locations: list[str]
:param start_time: Required. The start time for the Azure reachability
report.
:type start_time: datetime
:param end_time: Required. The end time for the Azure reachability report.
:type end_time: datetime
"""
_validation = {
'provider_location': {'required': True},
'start_time': {'required': True},
'end_time': {'required': True},
}
_attribute_map = {
'provider_location': {'key': 'providerLocation', 'type': 'AzureReachabilityReportLocation'},
'providers': {'key': 'providers', 'type': '[str]'},
'azure_locations': {'key': 'azureLocations', 'type': '[str]'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(self, *, provider_location, start_time, end_time, providers=None, azure_locations=None, **kwargs) -> None:
super(AzureReachabilityReportParameters, self).__init__(**kwargs)
self.provider_location = provider_location
self.providers = providers
self.azure_locations = azure_locations
self.start_time = start_time
self.end_time = end_time
| 40.618182
| 123
| 0.645927
|
794a7776eed9a561853be3aa1ac2058be78d9548
| 3,263
|
py
|
Python
|
tests/test_passportelementerrorfrontside.py
|
ehsanbarkhordar/botcup
|
4e45c3df2dceb8afe3833c0e89813fa9493295ed
|
[
"MIT"
] | 1
|
2019-10-22T03:46:17.000Z
|
2019-10-22T03:46:17.000Z
|
python-telegram-bot/tests/test_passportelementerrorfrontside.py
|
shyguy-ry/paddingCheckBot
|
d0a60cc2f397b9b8e4d60bdea699a94beaff2ea1
|
[
"Apache-2.0"
] | null | null | null |
python-telegram-bot/tests/test_passportelementerrorfrontside.py
|
shyguy-ry/paddingCheckBot
|
d0a60cc2f397b9b8e4d60bdea699a94beaff2ea1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pytest
from telegram import PassportElementErrorFrontSide, PassportElementErrorSelfie
@pytest.fixture(scope='class')
def passport_element_error_front_side():
return PassportElementErrorFrontSide(TestPassportElementErrorFrontSide.type,
TestPassportElementErrorFrontSide.file_hash,
TestPassportElementErrorFrontSide.message)
class TestPassportElementErrorFrontSide(object):
source = 'front_side'
type = 'test_type'
file_hash = 'file_hash'
message = 'Error message'
def test_expected_values(self, passport_element_error_front_side):
assert passport_element_error_front_side.source == self.source
assert passport_element_error_front_side.type == self.type
assert passport_element_error_front_side.file_hash == self.file_hash
assert passport_element_error_front_side.message == self.message
def test_to_dict(self, passport_element_error_front_side):
passport_element_error_front_side_dict = passport_element_error_front_side.to_dict()
assert isinstance(passport_element_error_front_side_dict, dict)
assert (passport_element_error_front_side_dict['source']
== passport_element_error_front_side.source)
assert (passport_element_error_front_side_dict['type']
== passport_element_error_front_side.type)
assert (passport_element_error_front_side_dict['file_hash']
== passport_element_error_front_side.file_hash)
assert (passport_element_error_front_side_dict['message']
== passport_element_error_front_side.message)
def test_equality(self):
a = PassportElementErrorFrontSide(self.type, self.file_hash, self.message)
b = PassportElementErrorFrontSide(self.type, self.file_hash, self.message)
c = PassportElementErrorFrontSide(self.type, '', '')
d = PassportElementErrorFrontSide('', self.file_hash, '')
e = PassportElementErrorFrontSide('', '', self.message)
f = PassportElementErrorSelfie(self.type, self.file_hash, self.message)
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
assert a != f
assert hash(a) != hash(f)
| 40.7875
| 92
| 0.708857
|
794a77a793fbaa20f2ce0c65a5b80adfe30c9548
| 2,005
|
py
|
Python
|
QiskitTestProject/methods.py
|
chicknmilk/Quantum-Phase-Estimation
|
91c83a13155d7e5fee869a5a874d4edf713fc7a5
|
[
"MIT"
] | 2
|
2021-07-27T21:06:23.000Z
|
2021-07-27T21:28:59.000Z
|
QiskitTestProject/methods.py
|
chicknmilk/Quantum-Phase-Estimation
|
91c83a13155d7e5fee869a5a874d4edf713fc7a5
|
[
"MIT"
] | null | null | null |
QiskitTestProject/methods.py
|
chicknmilk/Quantum-Phase-Estimation
|
91c83a13155d7e5fee869a5a874d4edf713fc7a5
|
[
"MIT"
] | null | null | null |
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit.circuit.library import QFT
from qiskit import execute
from qiskit import Aer
from qiskit.test.mock import FakeMontreal
from qiskit import IBMQ
import math
def QPE(oracle, circuit, register, ancilla):
# initialize registers
circuit.h(register)
circuit.x(ancilla)
# repeatedly apply oracle
for i in range(len(register)):
for j in range(2 ** i):
oracle(circuit, register[i], ancilla)
# inverse QFT
qft_dagger(circuit, len(register))
def QPEmeasure(oracle, circuit, register, ancilla, classical_register, simulation=True):
# call QPE subroutine
QPE(oracle, circuit, register, ancilla)
result = circuit.measure(register, classical_register)
# print the circuit diagram
# print(circuit)
# initialize output
result = None
# run circuit on simulator/quantum computer
if simulation:
# change the simulator by switching the uncommented line (uncomment either line 41 or 42)
simulator = Aer.get_backend('aer_simulator')
# simulator = FakeMontreal()
simulation = execute(circuit, simulator, shots=1024)
result = simulation.result()
else:
provider = IBMQ.load_account()
backend = provider.get_backend('ibmq_santiago')
run = execute(circuit, backend, shots=1024)
result = run.result()
# retrieve data and return estimated theta
counts = result.get_counts(circuit)
ret = []
for(measured_state, count) in counts.items():
for i in range(count):
ret.append(int(measured_state, 2) / (2 ** len(register)))
return ret
def qft_dagger(qc, n):
"""n-qubit QFTdagger the first n qubits in circ"""
# Don't forget the Swaps!
for qubit in range(n//2):
qc.swap(qubit, n-qubit-1)
for j in range(n):
for m in range(j):
qc.cp(-math.pi/float(2**(j-m)), m, j)
qc.h(j)
| 27.465753
| 97
| 0.654364
|
794a77e58de3b05d706997ba75f9dbe0f3491c2d
| 3,835
|
py
|
Python
|
fsdk/analysis/emotion/compare.py
|
luigivieira/fsdk
|
3d3b04bcde5f72c95044caa637114368f9818979
|
[
"MIT"
] | null | null | null |
fsdk/analysis/emotion/compare.py
|
luigivieira/fsdk
|
3d3b04bcde5f72c95044caa637114368f9818979
|
[
"MIT"
] | null | null | null |
fsdk/analysis/emotion/compare.py
|
luigivieira/fsdk
|
3d3b04bcde5f72c95044caa637114368f9818979
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# This file is part of the Fun SDK (fsdk) project. The complete source code is
# available at https://github.com/luigivieira/fsdk.
#
# Copyright (c) 2016-2017, Luiz Carlos Vieira (http://www.luiz.vieira.nom.br)
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
#sns.set_style("whitegrid")
#---------------------------------------------
def main(argv):
"""
Main entry of this script.
Parameters
------
argv: list of str
Arguments received from the command line.
"""
expected = np.genfromtxt('ratings.csv', dtype='str',
delimiter=',', skip_header=1)
predicted = np.genfromtxt('detected.csv', dtype='str',
delimiter=',', skip_header=1)
rights = 0
wrongs = 0
fails = 0
for fileName, label in expected:
idx = np.argwhere(predicted[:, 0] == fileName)
if len(idx):
i = idx[0][0]
expectedLabel = int(label)
#predictedLabel = int(float(predicted[i, 1]))
values = predicted[i, 1:].astype(float).tolist()
predictedLabel = values.index(max(values))
print(values)
print(predictedLabel)
print('{}: {} x {}'.format(fileName, expectedLabel, predictedLabel))
if expectedLabel == predictedLabel:
rights += 1
else:
wrongs += 1
else:
fails += 1
print('\n')
print('Rights: {}'.format(rights))
print('Wrongs: {}'.format(wrongs))
print('Fails: {}'.format(fails))
return 0
s = [50 for i in range(5000)]
fig = plt.figure()
ann = plt.scatter(ann[:, 0], ann[:, 1], c='g', marker='o', s=s,
label='Manually annotated blinks')
det = plt.scatter(det[:, 0], det[:, 1], c='b', marker='o', s=s,
label='Automatically detected blinks')
fne = plt.scatter(fne[:, 0], fne[:, 1], c='g', marker='v', s=s,
label='False negatives')
fpo = plt.scatter(fpo[:, 0], fpo[:, 1], c='b', marker='^', s=s,
label='False positives')
plt.xlim([0, 5001])
plt.xticks([i for i in range(0, 5001, 1000)])
plt.ylim([0, 0.6])
plt.xlabel('Frame number', fontsize=15)
plt.yticks([])
plt.legend(handles=[ann, det, fne, fpo], fontsize=10)
mng = plt.get_current_fig_manager()
mng.window.state('zoomed')
plt.suptitle('Evaluation of the Blink Detector', fontsize=30)
plt.show()
#---------------------------------------------
# namespace verification for invoking main
#---------------------------------------------
if __name__ == '__main__':
main(sys.argv[1:])
| 32.777778
| 80
| 0.597653
|
794a78a5dd34a5bdf88e1a47e0b099962cd27858
| 333
|
py
|
Python
|
pythonclub/club/urls.py
|
janainfanger/itc240-webapp-1
|
0e9f9b0e12d82bf49ed1996ef2ed4ea21d3adc23
|
[
"Apache-2.0"
] | null | null | null |
pythonclub/club/urls.py
|
janainfanger/itc240-webapp-1
|
0e9f9b0e12d82bf49ed1996ef2ed4ea21d3adc23
|
[
"Apache-2.0"
] | null | null | null |
pythonclub/club/urls.py
|
janainfanger/itc240-webapp-1
|
0e9f9b0e12d82bf49ed1996ef2ed4ea21d3adc23
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index' ),
path('resources/', views.resources, name='resources'),
path('getmeetings/', views.getmeetings, name='getmeetings'),
path('meetingdetail/<int:id>', views.meetingdetail, name='details') #the int id part includes the id in the url
]
| 37
| 111
| 0.72973
|
794a78d84f5f5179166166f4db3e60df1cbd9273
| 412
|
py
|
Python
|
buzzbnb/base/models/category.py
|
AmidBidee/buzzbnb
|
632b44096229a9e346f57e918bc8d6f2d777e143
|
[
"CC0-1.0"
] | null | null | null |
buzzbnb/base/models/category.py
|
AmidBidee/buzzbnb
|
632b44096229a9e346f57e918bc8d6f2d777e143
|
[
"CC0-1.0"
] | null | null | null |
buzzbnb/base/models/category.py
|
AmidBidee/buzzbnb
|
632b44096229a9e346f57e918bc8d6f2d777e143
|
[
"CC0-1.0"
] | null | null | null |
from django.db import models
from django.urls import reverse
import datetime
class Categorie(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(unique=True, primary_key=True)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('categorie-detail', kwargs={'slug': self.slug})
| 25.75
| 70
| 0.686893
|
794a7abce96ceb98a7dc0701ce36b6a5c26898b4
| 7,271
|
py
|
Python
|
tests/functional/clients/scripts/standalone.py
|
Vijay-P/anchore-engine
|
660a0bf10c56d16f894919209c51ec7a12081e9b
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/clients/scripts/standalone.py
|
Vijay-P/anchore-engine
|
660a0bf10c56d16f894919209c51ec7a12081e9b
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/clients/scripts/standalone.py
|
Vijay-P/anchore-engine
|
660a0bf10c56d16f894919209c51ec7a12081e9b
|
[
"Apache-2.0"
] | null | null | null |
"""
This script is a wrapper to allow the code in
anchore_engine.clients.localanchore_standalone work without the need for
a running anchore-engine service(s). More specifically, it sets everything up
needed for the `analyze_image()` function, and it does so *for testing
purposes*.
By default, it uses the `centos:centos8` image from dockerhub, but this can be
altered to use other images as well. There are currently a few lacking pieces
in the implementation, like re-using a manifest if present or trying to keep
all downloaded files/layers from skopeo.
This is *not* a substitue for the `anchore-manager analyze` command that produces
a tarball for analyzing.
"""
import json
import os
import pprint
import shutil
import subprocess
from os.path import abspath, dirname, join
from uuid import uuid4
import click
from anchore_engine.clients.localanchore_standalone import analyze_image
current = dirname(abspath(__file__))
top_level = dirname(dirname(dirname(dirname(current))))
cache_path = join(top_level, ".cache")
def call(command, stop_on_error=False):
click.echo("Running command: %s" % " ".join(command))
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
returncode = process.wait()
if returncode != 0:
msg = "command returned non-zero exit status: %s" % returncode
click.echo(msg)
if stop_on_error:
raise SystemExit(returncode)
def run(command):
click.echo("Running command: %s" % " ".join(command))
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
close_fds=True,
)
stdout_stream = process.stdout.read()
stderr_stream = process.stderr.read()
returncode = process.wait()
if not isinstance(stdout_stream, str):
stdout_stream = stdout_stream.decode("utf-8")
if not isinstance(stderr_stream, str):
stderr_stream = stderr_stream.decode("utf-8")
stdout = stdout_stream.splitlines()
stderr = stderr_stream.splitlines()
if returncode != 0:
for line in stdout + stderr:
click.echo(">>> %s" % line)
click.echo("Command failed with non-zero exit status: %s" % returncode)
return stdout, stderr, returncode
def get_manifest(resource, destination):
"""
This optional helper should be preferred but `docker manifest` is an
experimental feature and thus CircleCI does not support it with
remote-docker.
See https://docs.docker.com/engine/reference/commandline/manifest_inspect/
If the `manifest inspect` call fails, it will load the previously loaded
manifest from `scripts/manifests/` using the `resource` as the identifier
for the JSON file.
:resource: The full URI to use to get the manifest, with a sha digest
:save_to: Absolute path to use to save the manifest
"""
command = ["docker", "manifest", "inspect", resource]
manifest, stderr, code = run(command)
if code != 0:
manifests_dir = join(current, "manifests")
json_path = join(manifests_dir, "%s.json" % resource)
with open(json_path, "r") as _f:
json_manifest = _f.read()
manifest = json.loads(json_manifest)
else:
json_manifest = "".join(manifest)
manifest = json.loads(json_manifest)
with open(destination, "w") as save_path:
json.dump(manifest, save_path)
click.echo("Saved manifest to: %s" % destination)
# This returns the actual JSON, not the dict version, because the analyzer
# really wants to deal with JSON directly
return json_manifest
def analyze(registry, manifest, repo, digest, tag, work_dir, localconfig):
userId = None # Not used at all in analyze_image
image_record = {
"dockerfile_mode": "actual", # XXX no idea
"image_detail": [
{ # always picks the first one
"registry": registry,
"repo": repo,
"imageDigest": digest,
"tag": tag,
"imageId": "XXX", # XXX no idea
"dockerfile": None,
}
],
"imageDigest": "some sha256 - this seems repeated?", # XXX
}
_localconfig = {"service_dir": join(work_dir, "service_dir")}
if localconfig:
_localconfig.update(localconfig)
localconfig = _localconfig
click.echo("Starting the analyze process...")
image_report, manifest = analyze_image(
userId,
manifest,
image_record,
work_dir,
localconfig,
use_cache_dir=join(work_dir, "cache_dir"),
)
click.echo("Completed analyze process. Saving results...")
result_python = join(work_dir, "result.py")
with open(result_python, "w") as python_file:
python_file.write("result = ")
python_file.write(pprint.pformat(image_report))
click.echo("Saved the results of the analyzer to %s" % result_python)
def create_directories(work_dir):
"""
Create a set of directories needed to save the data, skip creation if they
are there
"""
os.makedirs(work_dir, exist_ok=True)
if work_dir == cache_path:
work_dir = join(cache_path, str(uuid4())[:8])
os.makedirs(work_dir, exist_ok=True)
service_dir = "service_dir"
sub_directories = [service_dir, "cache_dir"]
for _dir in sub_directories:
os.makedirs(join(work_dir, _dir), exist_ok=True)
# add analyzer config file
current_path = os.path.dirname(os.path.realpath(__file__))
config_source = join(current_path, "analyzer_config.yaml")
config_dest = join(work_dir, service_dir, "analyzer_config.yaml")
shutil.copyfile(config_source, config_dest)
# if work_dir changed, return it so that it can be re-used
return work_dir
@click.command()
@click.option(
"--registry",
default="docker.io",
help="TLD of a registry, like docker.io",
show_default=True,
)
@click.option(
"--repo",
default="centos",
help='Repo name, like "centos" (official ones) or "anchore/enterprise" for accounts',
show_default=True,
)
@click.option(
"--digest",
default="sha256:85313b812ad747dd19cf18078795b576cc4ae9cd2ca2ccccd7b5c12722b2effd",
help="The image digest as shown in the registry",
show_default=True,
)
@click.option(
"--tag",
default="centos8",
help="The tag for the given container",
show_default=True,
)
@click.option(
"--work-dir",
type=click.Path(exists=False),
default=cache_path,
help="Path to place images and other files",
show_default=True,
)
def _main(registry, repo, digest, tag, work_dir):
main(registry, repo, digest, tag, work_dir)
def main(
registry=None,
repo=None,
digest=None,
tag=None,
work_dir=None,
localconfig=None,
**kw
):
# Re-assign work_dir in case it is using the cache, which gets computed
# dynamically
work_dir = create_directories(work_dir)
resource = "%s@%s" % (repo, digest)
manifest = get_manifest(resource, join(work_dir, "manifest.json"))
analyze(registry, manifest, repo, digest, tag, work_dir, localconfig)
if __name__ == "__main__":
_main()
| 31.07265
| 89
| 0.669647
|
794a7cd4b1a000c184bc367ecec280b25f118abe
| 19,183
|
py
|
Python
|
bin/ft_transformer.py
|
allbits/rtdl
|
65cf8edc360343d77723b83b07d76fb9f94c2168
|
[
"Apache-2.0"
] | null | null | null |
bin/ft_transformer.py
|
allbits/rtdl
|
65cf8edc360343d77723b83b07d76fb9f94c2168
|
[
"Apache-2.0"
] | null | null | null |
bin/ft_transformer.py
|
allbits/rtdl
|
65cf8edc360343d77723b83b07d76fb9f94c2168
|
[
"Apache-2.0"
] | null | null | null |
# %%
import math
import typing as ty
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as nn_init
import zero
from torch import Tensor
import lib
# %%
class Tokenizer(nn.Module):
category_offsets: ty.Optional[Tensor]
def __init__(
self,
d_numerical: int,
categories: ty.Optional[ty.List[int]],
d_token: int,
bias: bool,
) -> None:
super().__init__()
if categories is None:
d_bias = d_numerical
self.category_offsets = None
self.category_embeddings = None
else:
d_bias = d_numerical + len(categories)
category_offsets = torch.tensor([0] + categories[:-1]).cumsum(0)
self.register_buffer('category_offsets', category_offsets)
self.category_embeddings = nn.Embedding(sum(categories), d_token)
nn_init.kaiming_uniform_(self.category_embeddings.weight, a=math.sqrt(5))
print(f'{self.category_embeddings.weight.shape=}')
# take [CLS] token into account
self.weight = nn.Parameter(Tensor(d_numerical + 1, d_token))
self.bias = nn.Parameter(Tensor(d_bias, d_token)) if bias else None
# The initialization is inspired by nn.Linear
nn_init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
nn_init.kaiming_uniform_(self.bias, a=math.sqrt(5))
@property
def n_tokens(self) -> int:
return len(self.weight) + (
0 if self.category_offsets is None else len(self.category_offsets)
)
def forward(self, x_num: Tensor, x_cat: ty.Optional[Tensor]) -> Tensor:
x_some = x_num if x_cat is None else x_cat
assert x_some is not None
x_num = torch.cat(
[torch.ones(len(x_some), 1, device=x_some.device)] # [CLS]
+ ([] if x_num is None else [x_num]),
dim=1,
)
x = self.weight[None] * x_num[:, :, None]
if x_cat is not None:
x = torch.cat(
[x, self.category_embeddings(x_cat + self.category_offsets[None])],
dim=1,
)
if self.bias is not None:
bias = torch.cat(
[
torch.zeros(1, self.bias.shape[1], device=x.device),
self.bias,
]
)
x = x + bias[None]
return x
class MultiheadAttention(nn.Module):
def __init__(
self, d: int, n_heads: int, dropout: float, initialization: str
) -> None:
if n_heads > 1:
assert d % n_heads == 0
assert initialization in ['xavier', 'kaiming']
super().__init__()
self.W_q = nn.Linear(d, d)
self.W_k = nn.Linear(d, d)
self.W_v = nn.Linear(d, d)
self.W_out = nn.Linear(d, d) if n_heads > 1 else None
self.n_heads = n_heads
self.dropout = nn.Dropout(dropout) if dropout else None
for m in [self.W_q, self.W_k, self.W_v]:
if initialization == 'xavier' and (n_heads > 1 or m is not self.W_v):
# gain is needed since W_qkv is represented with 3 separate layers
nn_init.xavier_uniform_(m.weight, gain=1 / math.sqrt(2))
nn_init.zeros_(m.bias)
if self.W_out is not None:
nn_init.zeros_(self.W_out.bias)
def _reshape(self, x: Tensor) -> Tensor:
batch_size, n_tokens, d = x.shape
d_head = d // self.n_heads
return (
x.reshape(batch_size, n_tokens, self.n_heads, d_head)
.transpose(1, 2)
.reshape(batch_size * self.n_heads, n_tokens, d_head)
)
def forward(
self,
x_q: Tensor,
x_kv: Tensor,
key_compression: ty.Optional[nn.Linear],
value_compression: ty.Optional[nn.Linear],
) -> Tensor:
q, k, v = self.W_q(x_q), self.W_k(x_kv), self.W_v(x_kv)
for tensor in [q, k, v]:
assert tensor.shape[-1] % self.n_heads == 0
if key_compression is not None:
assert value_compression is not None
k = key_compression(k.transpose(1, 2)).transpose(1, 2)
v = value_compression(v.transpose(1, 2)).transpose(1, 2)
else:
assert value_compression is None
batch_size = len(q)
d_head_key = k.shape[-1] // self.n_heads
d_head_value = v.shape[-1] // self.n_heads
n_q_tokens = q.shape[1]
q = self._reshape(q)
k = self._reshape(k)
attention = F.softmax(q @ k.transpose(1, 2) / math.sqrt(d_head_key), dim=-1)
if self.dropout is not None:
attention = self.dropout(attention)
x = attention @ self._reshape(v)
x = (
x.reshape(batch_size, self.n_heads, n_q_tokens, d_head_value)
.transpose(1, 2)
.reshape(batch_size, n_q_tokens, self.n_heads * d_head_value)
)
if self.W_out is not None:
x = self.W_out(x)
return x
class Transformer(nn.Module):
"""Transformer.
References:
- https://pytorch.org/docs/stable/generated/torch.nn.Transformer.html
- https://github.com/facebookresearch/pytext/tree/master/pytext/models/representations/transformer
- https://github.com/pytorch/fairseq/blob/1bba712622b8ae4efb3eb793a8a40da386fe11d0/examples/linformer/linformer_src/modules/multihead_linear_attention.py#L19
"""
def __init__(
self,
*,
# tokenizer
d_numerical: int,
categories: ty.Optional[ty.List[int]],
token_bias: bool,
# transformer
n_layers: int,
d_token: int,
n_heads: int,
d_ffn_factor: float,
attention_dropout: float,
ffn_dropout: float,
residual_dropout: float,
activation: str,
prenormalization: bool,
initialization: str,
# linformer
kv_compression: ty.Optional[float],
kv_compression_sharing: ty.Optional[str],
#
d_out: int,
) -> None:
assert (kv_compression is None) ^ (kv_compression_sharing is not None)
super().__init__()
self.tokenizer = Tokenizer(d_numerical, categories, d_token, token_bias)
n_tokens = self.tokenizer.n_tokens
def make_kv_compression():
assert kv_compression
compression = nn.Linear(
n_tokens, int(n_tokens * kv_compression), bias=False
)
if initialization == 'xavier':
nn_init.xavier_uniform_(compression.weight)
return compression
self.shared_kv_compression = (
make_kv_compression()
if kv_compression and kv_compression_sharing == 'layerwise'
else None
)
def make_normalization():
return nn.LayerNorm(d_token)
d_hidden = int(d_token * d_ffn_factor)
self.layers = nn.ModuleList([])
for layer_idx in range(n_layers):
layer = nn.ModuleDict(
{
'attention': MultiheadAttention(
d_token, n_heads, attention_dropout, initialization
),
'linear0': nn.Linear(
d_token, d_hidden * (2 if activation.endswith('glu') else 1)
),
'linear1': nn.Linear(d_hidden, d_token),
'norm1': make_normalization(),
}
)
if not prenormalization or layer_idx:
layer['norm0'] = make_normalization()
if kv_compression and self.shared_kv_compression is None:
layer['key_compression'] = make_kv_compression()
if kv_compression_sharing == 'headwise':
layer['value_compression'] = make_kv_compression()
else:
assert kv_compression_sharing == 'key-value'
self.layers.append(layer)
self.activation = lib.get_activation_fn(activation)
self.last_activation = lib.get_nonglu_activation_fn(activation)
self.prenormalization = prenormalization
self.last_normalization = make_normalization() if prenormalization else None
self.ffn_dropout = ffn_dropout
self.residual_dropout = residual_dropout
self.head = nn.Linear(d_token, d_out)
def _get_kv_compressions(self, layer):
return (
(self.shared_kv_compression, self.shared_kv_compression)
if self.shared_kv_compression is not None
else (layer['key_compression'], layer['value_compression'])
if 'key_compression' in layer and 'value_compression' in layer
else (layer['key_compression'], layer['key_compression'])
if 'key_compression' in layer
else (None, None)
)
def _start_residual(self, x, layer, norm_idx):
x_residual = x
if self.prenormalization:
norm_key = f'norm{norm_idx}'
if norm_key in layer:
x_residual = layer[norm_key](x_residual)
return x_residual
def _end_residual(self, x, x_residual, layer, norm_idx):
if self.residual_dropout:
x_residual = F.dropout(x_residual, self.residual_dropout, self.training)
x = x + x_residual
if not self.prenormalization:
x = layer[f'norm{norm_idx}'](x)
return x
def forward(self, x_num: Tensor, x_cat: ty.Optional[Tensor]) -> Tensor:
x = self.tokenizer(x_num, x_cat)
for layer_idx, layer in enumerate(self.layers):
is_last_layer = layer_idx + 1 == len(self.layers)
layer = ty.cast(ty.Dict[str, nn.Module], layer)
x_residual = self._start_residual(x, layer, 0)
x_residual = layer['attention'](
# for the last attention, it is enough to process only [CLS]
(x_residual[:, :1] if is_last_layer else x_residual),
x_residual,
*self._get_kv_compressions(layer),
)
if is_last_layer:
x = x[:, : x_residual.shape[1]]
x = self._end_residual(x, x_residual, layer, 0)
x_residual = self._start_residual(x, layer, 1)
x_residual = layer['linear0'](x_residual)
x_residual = self.activation(x_residual)
if self.ffn_dropout:
x_residual = F.dropout(x_residual, self.ffn_dropout, self.training)
x_residual = layer['linear1'](x_residual)
x = self._end_residual(x, x_residual, layer, 1)
assert x.shape[1] == 1
x = x[:, 0]
if self.last_normalization is not None:
x = self.last_normalization(x)
x = self.last_activation(x)
x = self.head(x)
x = x.squeeze(-1)
return x
# %%
if __name__ == "__main__":
args, output = lib.load_config()
args['model'].setdefault('token_bias', True)
args['model'].setdefault('kv_compression', None)
args['model'].setdefault('kv_compression_sharing', None)
# %%
zero.set_randomness(args['seed'])
dataset_dir = lib.get_path(args['data']['path'])
stats: ty.Dict[str, ty.Any] = {
'dataset': dataset_dir.name,
'algorithm': Path(__file__).stem,
**lib.load_json(output / 'stats.json'),
}
timer = zero.Timer()
timer.run()
D = lib.Dataset.from_dir(dataset_dir)
X = D.build_X(
normalization=args['data'].get('normalization'),
num_nan_policy='mean',
cat_nan_policy='new',
cat_policy=args['data'].get('cat_policy', 'indices'),
cat_min_frequency=args['data'].get('cat_min_frequency', 0.0),
seed=args['seed'],
)
if not isinstance(X, tuple):
X = (X, None)
zero.set_randomness(args['seed'])
Y, y_info = D.build_y(args['data'].get('y_policy'))
lib.dump_pickle(y_info, output / 'y_info.pickle')
X = tuple(None if x is None else lib.to_tensors(x) for x in X)
Y = lib.to_tensors(Y)
device = lib.get_device()
if device.type != 'cpu':
X = tuple(
None if x is None else {k: v.to(device) for k, v in x.items()} for x in X
)
Y_device = {k: v.to(device) for k, v in Y.items()}
else:
Y_device = Y
X_num, X_cat = X
del X
if not D.is_multiclass:
Y_device = {k: v.float() for k, v in Y_device.items()}
np.save(dataset_dir / f'categories.npy', lib.get_categories(X_cat))
train_size = D.size(lib.TRAIN)
batch_size = args['training']['batch_size']
epoch_size = stats['epoch_size'] = math.ceil(train_size / batch_size)
eval_batch_size = args['training']['eval_batch_size']
chunk_size = None
loss_fn = (
F.binary_cross_entropy_with_logits
if D.is_binclass
else F.cross_entropy
if D.is_multiclass
else F.mse_loss
)
model = Transformer(
d_numerical=0 if X_num is None else X_num['train'].shape[1],
categories=lib.get_categories(X_cat),
d_out=D.info['n_classes'] if D.is_multiclass else 1,
**args['model'],
).to(device)
if torch.cuda.device_count() > 1: # type: ignore[code]
print('Using nn.DataParallel')
model = nn.DataParallel(model)
stats['n_parameters'] = lib.get_n_parameters(model)
def needs_wd(name):
return all(x not in name for x in ['tokenizer', '.norm', '.bias'])
for x in ['tokenizer', '.norm', '.bias']:
assert any(x in a for a in (b[0] for b in model.named_parameters()))
parameters_with_wd = [v for k, v in model.named_parameters() if needs_wd(k)]
parameters_without_wd = [v for k, v in model.named_parameters() if not needs_wd(k)]
optimizer = lib.make_optimizer(
args['training']['optimizer'],
(
[
{'params': parameters_with_wd},
{'params': parameters_without_wd, 'weight_decay': 0.0},
]
),
args['training']['lr'],
args['training']['weight_decay'],
)
stream = zero.Stream(lib.IndexLoader(train_size, batch_size, True, device))
progress = zero.ProgressTracker(args['training']['patience'])
training_log = {lib.TRAIN: [], lib.VAL: [], lib.TEST: []}
timer = zero.Timer()
checkpoint_path = output / 'checkpoint.pt'
def print_epoch_info():
print(f'\n>>> Epoch {stream.epoch} | {lib.format_seconds(timer())} | {output}')
print(
' | '.join(
f'{k} = {v}'
for k, v in {
'lr': lib.get_lr(optimizer),
'batch_size': batch_size,
'chunk_size': chunk_size,
'epoch_size': stats['epoch_size'],
'n_parameters': stats['n_parameters'],
}.items()
)
)
def apply_model(part, idx):
return model(
None if X_num is None else X_num[part][idx],
None if X_cat is None else X_cat[part][idx],
)
@torch.no_grad()
def evaluate(parts):
global eval_batch_size
model.eval()
metrics = {}
predictions = {}
for part in parts:
while eval_batch_size:
try:
predictions[part] = (
torch.cat(
[
apply_model(part, idx)
for idx in lib.IndexLoader(
D.size(part), eval_batch_size, False, device
)
]
)
.cpu()
.numpy()
)
except RuntimeError as err:
if not lib.is_oom_exception(err):
raise
eval_batch_size //= 2
print('New eval batch size:', eval_batch_size)
stats['eval_batch_size'] = eval_batch_size
else:
break
if not eval_batch_size:
RuntimeError('Not enough memory even for eval_batch_size=1')
metrics[part] = lib.calculate_metrics(
D.info['task_type'],
Y[part].numpy(), # type: ignore[code]
predictions[part], # type: ignore[code]
'logits',
y_info,
)
for part, part_metrics in metrics.items():
print(f'[{part:<5}]', lib.make_summary(part_metrics))
return metrics, predictions
def save_checkpoint(final):
torch.save(
{
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'stream': stream.state_dict(),
'random_state': zero.get_random_state(),
**{
x: globals()[x]
for x in [
'progress',
'stats',
'timer',
'training_log',
]
},
},
checkpoint_path,
)
lib.dump_stats(stats, output, final)
lib.backup_output(output)
# %%
timer.run()
for epoch in stream.epochs(args['training']['n_epochs']):
print_epoch_info()
model.train()
epoch_losses = []
for batch_idx in epoch:
loss, new_chunk_size = lib.train_with_auto_virtual_batch(
optimizer,
loss_fn,
lambda x: (apply_model(lib.TRAIN, x), Y_device[lib.TRAIN][x]),
batch_idx,
chunk_size or batch_size,
)
epoch_losses.append(loss.detach())
if new_chunk_size and new_chunk_size < (chunk_size or batch_size):
stats['chunk_size'] = chunk_size = new_chunk_size
print('New chunk size:', chunk_size)
epoch_losses = torch.stack(epoch_losses).tolist()
training_log[lib.TRAIN].extend(epoch_losses)
print(f'[{lib.TRAIN}] loss = {round(sum(epoch_losses) / len(epoch_losses), 3)}')
metrics, predictions = evaluate([lib.VAL, lib.TEST])
for k, v in metrics.items():
training_log[k].append(v)
progress.update(metrics[lib.VAL]['score'])
if progress.success:
print('New best epoch!')
stats['best_epoch'] = stream.epoch
stats['metrics'] = metrics
save_checkpoint(False)
for k, v in predictions.items():
np.save(output / f'p_{k}.npy', v)
elif progress.fail:
break
# %%
print('\nRunning the final evaluation...')
model.load_state_dict(torch.load(checkpoint_path)['model'])
stats['metrics'], predictions = evaluate(lib.PARTS)
for k, v in predictions.items():
np.save(output / f'p_{k}.npy', v)
stats['time'] = lib.format_seconds(timer())
save_checkpoint(True)
print('Done!')
| 35.856075
| 161
| 0.557577
|
794a7cddb068ad32b73c32ac3a102ff39a0b8268
| 2,014
|
py
|
Python
|
configs/FDDB/kl/cfgs_res50_fddb_kl_v5.py
|
chisyliu/RotationDetection
|
6f2bd55a51a6de0bcd0959a85977682511fd440d
|
[
"Apache-2.0"
] | 2
|
2022-03-05T09:55:49.000Z
|
2022-03-05T10:12:51.000Z
|
configs/FDDB/kl/cfgs_res50_fddb_kl_v5.py
|
ZhangRan24/RotationDetection
|
85791a4ec944bb0b14b8721193477eb0f582e981
|
[
"Apache-2.0"
] | null | null | null |
configs/FDDB/kl/cfgs_res50_fddb_kl_v5.py
|
ZhangRan24/RotationDetection
|
85791a4ec944bb0b14b8721193477eb0f582e981
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from alpharotate.utils.pretrain_zoo import PretrainModelZoo
from configs._base_.models.retinanet_r50_fpn import *
from configs._base_.datasets.dota_detection import *
from configs._base_.schedules.schedule_1x import *
# schedule
BATCH_SIZE = 1
GPU_GROUP = "0,1,2"
NUM_GPU = len(GPU_GROUP.strip().split(','))
LR = 1e-3
SAVE_WEIGHTS_INTE = 2000 * 2
DECAY_EPOCH = [8, 11, 20]
MAX_EPOCH = 12
WARM_EPOCH = 1 / 16.
DECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE
MAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH
WARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)
# dataset
DATASET_NAME = 'FDDB'
CLASS_NUM = 1
# model
# backbone
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
# bbox head
NUM_SUBNET_CONV = 4
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [1, 1 / 1.5, 1.5]
# loss
CLS_WEIGHT = 1.0
REG_WEIGHT = 2.0
REG_LOSS_MODE = 3 # KLD loss
KL_TAU = 1.0
KL_FUNC = 1 # 0: sqrt 1: log
# eval
USE_07_METRIC = False
VERSION = 'RetinaNet_FDDB_KL_2x_20211107'
"""
RetinaNet-H + kl (fix bug)
2007
cls : face|| Recall: 0.9800275482093664 || Precison: 0.5909468438538206|| AP: 0.9086022298878069
F1:0.9599106140657935 P:0.9806034482758621 R:0.9400826446280992
mAP is : 0.9086022298878069
2012
cls : face|| Recall: 0.9800275482093664 || Precison: 0.5909468438538206|| AP: 0.975114766801636
F1:0.9599106140657935 P:0.98060834482758621 R:0.9400826446280992
mAP is : 0.975114766801636
AP50:95=0.6801196958756109
0.975114766801636 0.9666442174651226 0.9540159408453797 0.9323120706611414 0.9064024625303708
0.8532868702313012 0.6871896787782661 0.4220118463759825 0.10333811036980581 0.000880994697104151
"""
| 27.972222
| 101
| 0.750248
|
794a7d5f7d8127455f9a24184e3e051180a0f513
| 523
|
py
|
Python
|
tests/test_user.py
|
Kevson102/Pitches
|
a1c41b83edf687135edf35dc068696ea6b7bf824
|
[
"MIT"
] | null | null | null |
tests/test_user.py
|
Kevson102/Pitches
|
a1c41b83edf687135edf35dc068696ea6b7bf824
|
[
"MIT"
] | null | null | null |
tests/test_user.py
|
Kevson102/Pitches
|
a1c41b83edf687135edf35dc068696ea6b7bf824
|
[
"MIT"
] | null | null | null |
import unittest
from app.models import User
class UserModelTest(unittest.TestCase):
def setUp(self):
self.new_user = User(password = 'phoenix')
def test_password_setter(self):
self.assertTrue(self.new_user.pass_secure is not None)
def test_no_access_password(self):
with self.assertRaises(AttributeError):
self.new_user.password
def test_password_verification(self):
self.assertTrue(self.new_user.verify_password('phoenix'))
# if __name__ == '__main__':
# unittest.main()
| 26.15
| 61
| 0.730402
|
794a7e01412b8bba33c761c355fc928e28c8c8b0
| 13,185
|
py
|
Python
|
aiida_quantumespresso/parsers/cp.py
|
lbotsch/aiida-quantumespresso
|
fe75c80cecb61113641366961ced8ed5a03cf896
|
[
"MIT"
] | null | null | null |
aiida_quantumespresso/parsers/cp.py
|
lbotsch/aiida-quantumespresso
|
fe75c80cecb61113641366961ced8ed5a03cf896
|
[
"MIT"
] | null | null | null |
aiida_quantumespresso/parsers/cp.py
|
lbotsch/aiida-quantumespresso
|
fe75c80cecb61113641366961ced8ed5a03cf896
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from distutils.version import LooseVersion
import numpy
from aiida.common import NotExistent
from aiida.orm import Dict, TrajectoryData
from qe_tools.constants import bohr_to_ang, hartree_to_ev, timeau_to_sec
from aiida_quantumespresso.parsers.parse_raw.cp import parse_cp_raw_output, parse_cp_traj_stanzas
from .base import Parser
class CpParser(Parser):
"""This class is the implementation of the Parser class for Cp."""
def parse(self, **kwargs):
"""Receives in input a dictionary of retrieved nodes.
Does all the logic here.
"""
try:
out_folder = self.retrieved
except NotExistent:
return self.exit(self.exit_codes.ERROR_NO_RETRIEVED_FOLDER)
# check what is inside the folder
list_of_files = out_folder._repository.list_object_names()
# options.metadata become attributes like this:
stdout_filename = self.node.get_attribute('output_filename')
# at least the stdout should exist
if stdout_filename not in list_of_files:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ)
# This should match 1 file
xml_files = [xml_file for xml_file in self.node.process_class.xml_filenames if xml_file in list_of_files]
if not xml_files:
return self.exit(self.exit_codes.ERROR_MISSING_XML_FILE)
elif len(xml_files) > 1:
return self.exit(self.exit_codes.ERROR_OUTPUT_XML_MULTIPLE)
if self.node.process_class._FILE_XML_PRINT_COUNTER_BASENAME not in list_of_files:
self.logger.error('We could not find the print counter file in the output')
# TODO: Add an error for this counter
return self.exit(self.exit_codes.ERROR_MISSING_XML_FILE)
# Let's pass file handlers to this function
out_dict, _raw_successful = parse_cp_raw_output(
out_folder.open(stdout_filename), out_folder.open(xml_files[0]),
out_folder.open(self.node.process_class._FILE_XML_PRINT_COUNTER_BASENAME)
)
# parse the trajectory. Units in Angstrom, picoseconds and eV.
# append everthing in the temporary dictionary raw_trajectory
raw_trajectory = {}
evp_keys = [
'electronic_kinetic_energy', 'cell_temperature', 'ionic_temperature', 'scf_total_energy', 'enthalpy',
'enthalpy_plus_kinetic', 'energy_constant_motion', 'volume', 'pressure'
]
# Now prepare the reordering, as filex in the xml are ordered
reordering = self._generate_sites_ordering(out_dict['species'], out_dict['atoms'])
pos_filename = '{}.{}'.format(self.node.process_class._PREFIX, 'pos')
if pos_filename not in list_of_files:
return self.exit(self.exit_codes.ERROR_READING_POS_FILE)
trajectories = [
('positions', 'pos', bohr_to_ang, out_dict['number_of_atoms']),
('cells', 'cel', bohr_to_ang, 3),
('velocities', 'vel', bohr_to_ang / timeau_to_sec * 10**12, out_dict['number_of_atoms']),
]
for name, extension, scale, elements in trajectories:
try:
with out_folder.open('{}.{}'.format(self.node.process_class._PREFIX, extension)) as datafile:
data = [l.split() for l in datafile]
# POSITIONS stored in angstrom
traj_data = parse_cp_traj_stanzas(
num_elements=elements, splitlines=data, prepend_name='{}_traj'.format(name), rescale=scale
)
# here initialize the dictionary. If the parsing of positions fails, though, I don't have anything
# out of the CP dynamics. Therefore, the calculation status is set to FAILED.
if extension != 'cel':
raw_trajectory['{}_ordered'.format(name)
] = self._get_reordered_array(traj_data['{}_traj_data'.format(name)], reordering)
else:
raw_trajectory['cells'] = numpy.array(traj_data['cells_traj_data'])
if extension == 'pos':
raw_trajectory['times'] = numpy.array(traj_data['{}_traj_times'.format(name)])
except IOError:
out_dict['warnings'].append('Unable to open the {} file... skipping.'.format(extension.upper()))
# =============== EVP trajectory ============================
try:
matrix = numpy.genfromtxt(out_folder.open('{}.evp'.format(self._node.process_class._PREFIX)))
# there might be a different format if the matrix has one row only
try:
matrix.shape[1]
except IndexError:
matrix = numpy.array(numpy.matrix(matrix))
if LooseVersion(out_dict['creator_version']) > LooseVersion('5.1'):
# Between version 5.1 and 5.1.1, someone decided to change
# the .evp output format, without any way to know that this
# happened... SVN commit 11158.
# I here use the version number to parse, plus some
# heuristics to check that I'm doing the right thing
#print "New version"
raw_trajectory['steps'] = numpy.array(matrix[:, 0], dtype=int)
raw_trajectory['evp_times'] = matrix[:, 1] # TPS, ps
raw_trajectory['electronic_kinetic_energy'] = matrix[:, 2] * hartree_to_ev # EKINC, eV
raw_trajectory['cell_temperature'] = matrix[:, 3] # TEMPH, K
raw_trajectory['ionic_temperature'] = matrix[:, 4] # TEMPP, K
raw_trajectory['scf_total_energy'] = matrix[:, 5] * hartree_to_ev # ETOT, eV
raw_trajectory['enthalpy'] = matrix[:, 6] * hartree_to_ev # ENTHAL, eV
raw_trajectory['enthalpy_plus_kinetic'] = matrix[:, 7] * hartree_to_ev # ECONS, eV
raw_trajectory['energy_constant_motion'] = matrix[:, 8] * hartree_to_ev # ECONT, eV
raw_trajectory['volume'] = matrix[:, 9] * (bohr_to_ang**3) # volume, angstrom^3
raw_trajectory['pressure'] = matrix[:, 10] # out_press, GPa
else:
#print "Old version"
raw_trajectory['steps'] = numpy.array(matrix[:, 0], dtype=int)
raw_trajectory['electronic_kinetic_energy'] = matrix[:, 1] * hartree_to_ev # EKINC, eV
raw_trajectory['cell_temperature'] = matrix[:, 2] # TEMPH, K
raw_trajectory['ionic_temperature'] = matrix[:, 3] # TEMPP, K
raw_trajectory['scf_total_energy'] = matrix[:, 4] * hartree_to_ev # ETOT, eV
raw_trajectory['enthalpy'] = matrix[:, 5] * hartree_to_ev # ENTHAL, eV
raw_trajectory['enthalpy_plus_kinetic'] = matrix[:, 6] * hartree_to_ev # ECONS, eV
raw_trajectory['energy_constant_motion'] = matrix[:, 7] * hartree_to_ev # ECONT, eV
raw_trajectory['volume'] = matrix[:, 8] * (bohr_to_ang**3) # volume, angstrom^3
raw_trajectory['pressure'] = matrix[:, 9] # out_press, GPa
raw_trajectory['evp_times'] = matrix[:, 10] # TPS, ps
# Huristics to understand if it's correct.
# A better heuristics could also try to fix possible issues
# (in new versions of QE, it's possible to recompile it with
# the __OLD_FORMAT flag to get back the old version format...)
# but I won't do it, as there may be also other columns swapped.
# Better to stop and ask the user to check what's going on.
max_time_difference = abs(numpy.array(raw_trajectory['times']) -
numpy.array(raw_trajectory['evp_times'])).max()
if max_time_difference > 1.e-4: # It is typically ~1.e-7 due to roundoff errors
# If there is a large discrepancy
# it means there is something very weird going on...
return self.exit(self.exit_codes.ERROR_READING_TRAJECTORY_DATA)
# Delete evp_times in any case, it's a duplicate of 'times'
del raw_trajectory['evp_times']
except IOError:
out_dict['warnings'].append('Unable to open the EVP file... skipping.')
# get the symbols from the input
# TODO: I should have kinds in TrajectoryData
input_structure = self.node.inputs.structure
raw_trajectory['symbols'] = [str(i.kind_name) for i in input_structure.sites]
traj = TrajectoryData()
traj.set_trajectory(
stepids=raw_trajectory['steps'],
cells=raw_trajectory['cells'],
symbols=raw_trajectory['symbols'],
positions=raw_trajectory['positions_ordered'],
times=raw_trajectory['times'],
velocities=raw_trajectory['velocities_ordered'],
)
for this_name in evp_keys:
try:
traj.set_array(this_name, raw_trajectory[this_name])
except KeyError:
# Some columns may have not been parsed, skip
pass
self.out('output_trajectory', traj)
# Remove big dictionaries that would be redundant
# For atoms and cell, there is a small possibility that nothing is parsed
# but then probably nothing moved.
try:
del out_dict['atoms']
except KeyError:
pass
try:
del out_dict['cell']
except KeyError:
pass
try:
del out_dict['ions_positions_stau']
except KeyError:
pass
try:
del out_dict['ions_positions_svel']
except KeyError:
pass
try:
del out_dict['ions_positions_taui']
except KeyError:
pass
# This should not be needed
try:
del out_dict['atoms_index_list']
except KeyError:
pass
# This should be already in the input
try:
del out_dict['atoms_if_pos_list']
except KeyError:
pass
#
try:
del out_dict['ions_positions_force']
except KeyError:
pass
# convert the dictionary into an AiiDA object
output_params = Dict(dict=out_dict)
self.out('output_parameters', output_params)
def get_linkname_trajectory(self):
"""Returns the name of the link to the output_structure (None if not present)"""
return 'output_trajectory'
def _generate_sites_ordering(self, raw_species, raw_atoms):
"""take the positions of xml and from file.pos of the LAST step and compare them."""
# Examples in the comments are for species [Ba, O, Ti]
# and atoms [Ba, Ti, O, O, O]
# Dictionary to associate the species name to the idx
# Example: {'Ba': 1, 'O': 2, 'Ti': 3}
species_dict = {name: idx for idx, name in zip(raw_species['index'], raw_species['type'])}
# List of the indices of the specie associated to each atom,
# in the order specified in input
# Example: (1,3,2,2,2)
atoms_species_idx = [species_dict[a[0]] for a in raw_atoms]
# I also attach the current position; important to convert to a list
# Otherwise the iterator can be looped on only once!
# Example: ((0,1),(1,3),(2,2),(3,2),(4,2))
ref_atom_list = list(enumerate(atoms_species_idx))
new_order_tmp = []
# I reorder the atoms, first by specie, then in their order
# This is the order used in output by CP!!
# Example: ((0,1),(2,2),(3,2),(4,2),(1,3))
for specie_idx in sorted(raw_species['index']):
for elem in ref_atom_list:
if elem[1] == specie_idx:
new_order_tmp.append(elem)
# This is the new order that is printed in CP:
# e.g. reordering[2] is the index of the atom, in the input
# list of atoms, that is printed in position 2 (0-based, so the
# third atom) in the CP output files.
# Example: [0,2,3,4,1]
reordering = [_[0] for _ in new_order_tmp]
# I now need the inverse reordering, to put back in place
# from the output ordering to the input one!
# Example: [0,4,1,2,3]
# Because in the final list (Ba, O, O, O, Ti)
# the first atom Ba in the input is atom 0 in the CP output (the first),
# the second atom Ti in the input is atom 4 (the fifth) in the CP output,
# and so on
sorted_indexed_reordering = sorted([(_[1], _[0]) for _ in enumerate(reordering)])
reordering_inverse = [_[1] for _ in sorted_indexed_reordering]
return reordering_inverse
def _get_reordered_list(self, origlist, reordering):
"""Given a list to reorder, a list of integer positions with the new order, return the reordered list."""
return [origlist[e] for e in reordering]
def _get_reordered_array(self, _input, reordering):
return numpy.array([self._get_reordered_list(i, reordering) for i in _input])
| 48.653137
| 116
| 0.607509
|
794a7e2c0152e327e393c8bec790bc48a5eb8e76
| 49,899
|
py
|
Python
|
tensorflow/python/eager/benchmarks_test.py
|
drezap/tensorflow
|
df6853ac5b6dbeefa814e67041fc4744c1528629
|
[
"Apache-2.0"
] | 2
|
2020-02-20T15:42:41.000Z
|
2020-03-06T09:38:56.000Z
|
tensorflow/python/eager/benchmarks_test.py
|
drezap/tensorflow
|
df6853ac5b6dbeefa814e67041fc4744c1528629
|
[
"Apache-2.0"
] | 2
|
2021-08-25T16:09:48.000Z
|
2022-02-10T02:18:35.000Z
|
tensorflow/python/eager/benchmarks_test.py
|
Chiranjeevi731/Machine-Learning
|
0ec3c78e0a34f224ee041b54a24717f77d3246fa
|
[
"Apache-2.0"
] | 1
|
2021-08-29T09:37:58.000Z
|
2021-08-29T09:37:58.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Benchmarks for low-level eager execution primitives.
To run CPU benchmarks:
bazel run -c opt benchmarks_test -- --benchmarks=.
To run GPU benchmarks:
bazel run --config=cuda -c opt --copt="-mavx" benchmarks_test -- \
--benchmarks=.
To run a subset of benchmarks using --benchmarks flag.
--benchmarks: the list of benchmarks to run. The specified value is interpreted
as a regular expression and any benchmark whose name contains a partial match
to the regular expression is executed.
e.g. --benchmarks=".*matmul*." will run all matmul related benchmarks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import backprop # pylint: disable=unused-import
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import forwardprop
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import tf_inspect
CPU = "/device:CPU:0"
GPU = "/device:GPU:0"
GLOBAL_TEST_VALUE = None
def c_tfe_py_fastpath_execute(a,
b,
transpose_a=False,
transpose_b=False,
name=None):
ctx = context.context()
assert ctx.executing_eagerly(
), "The prototype doesn't contain C code for graph construction"
try:
return pywrap_tfe.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"MatMul", name, ctx.op_callbacks,
a, b, "transpose_a", transpose_a,
"transpose_b", transpose_b)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
def run_benchmark(func, num_iters, execution_mode=None):
ctx = context.context()
with context.execution_mode(execution_mode):
# call func to warm up
func()
if execution_mode == context.ASYNC:
ctx.executor.wait()
start = time.time()
for _ in xrange(num_iters):
func()
if execution_mode == context.ASYNC:
ctx.executor.wait()
end = time.time()
return end - start
class MicroBenchmarks(test.Benchmark):
def __init__(self):
# TODO(b/153054118): Add tf.RandomUniform
if not context.is_tfrt_enabled():
# used for multiply benchmarks
self._m_2 = random_ops.random_uniform([2])
# used for matmul benchmarks
self._m_2_by_2 = random_ops.random_uniform((2, 2))
self._m_100_by_784 = random_ops.random_uniform((100, 784))
self._num_iters_2_by_2 = 30000
self._num_iters_100_by_784 = 30000
def _get_benchmark_name(self):
"""Mostly copied from benchmark.py _get_name()."""
stack = tf_inspect.stack()
name = None
for frame in stack[::-1]:
f_locals = frame[0].f_locals
f_self = f_locals.get("self", None)
if isinstance(f_self, test.Benchmark):
name = frame[3] # Get the method name
# This is a hack to get around the fact that some methods might have a
# disable_tfrt decorator around them. In that case a function called
# 'decorated' wraps the real called function underneath and so we
# peek one deeper into the stack to get the real name.
if name == "decorated":
continue
else:
break
if name is None:
raise ValueError("Unable to determine calling Benchmark function.")
if context.is_tfrt_enabled():
name = name + "_tfrt"
return name
def _run(self, func, num_iters, execution_mode=None):
total_time = run_benchmark(func, num_iters, execution_mode)
mean_us = total_time * 1e6 / num_iters
benchmark_name = self._get_benchmark_name()
self.report_benchmark(
iters=num_iters,
wall_time=mean_us,
extras={
"examples_per_sec":
float("{0:.3f}".format(num_iters / total_time)),
"us_per_example":
float("{0:.3f}".format(total_time * 1e6 / num_iters))
},
name=benchmark_name)
def benchmark_create_np_array(self):
func = lambda: np.array([3.0])
self._run(func, 30000)
def _benchmark_create_tensor(self, value, dtype, device):
"""Benchmark overheads of creating a Tensor object."""
ctx = context.context()
if device == GPU:
# Warmup the GPU
ops.EagerTensor(value, device=device)
def func():
ops.EagerTensor(value, device=device, dtype=dtype)
self._run(func, 30000)
def _benchmark_create_constant(self, value, dtype, cached=True):
global GLOBAL_TEST_VALUE
GLOBAL_TEST_VALUE = value
def cached_func():
constant_op.constant(value, dtype=dtype)
def uncached_func():
global GLOBAL_TEST_VALUE
GLOBAL_TEST_VALUE += 1
constant_op.constant(GLOBAL_TEST_VALUE, dtype=dtype)
func = cached_func if cached else uncached_func
with ops.device("GPU:0" if context.num_gpus() else "CPU:0"):
for _ in range(1000):
func() # Warmup.
self._run(func, 3000)
@test_util.disable_tfrt("Scalars are not handled correctly")
def benchmark_create_float_constant(self):
self._benchmark_create_constant(42.0, dtype=None)
@test_util.disable_tfrt("Scalars are not handled correctly")
def benchmark_create_float_constant_uncached(self):
self._benchmark_create_constant(42.0, dtype=None, cached=False)
@test_util.disable_tfrt("Scalars are not handled correctly")
def benchmark_create_int32_constant(self):
if context.num_gpus():
return # int32 constants are always allocated on CPU.
self._benchmark_create_constant(42, dtype=dtypes.int32)
@test_util.disable_tfrt("Scalars are not handled correctly")
def benchmark_create_int32_constant_uncached(self):
if context.num_gpus():
return # int32 constants are always allocated on CPU.
self._benchmark_create_constant(42, dtype=dtypes.int32, cached=False)
def _benchmark_add(self, a, b):
def func():
return memoryview(math_ops.add_v2(a, b))
with ops.device("GPU:0" if context.num_gpus() else "CPU:0"):
for _ in range(1000):
func() # Warmup.
self._run(func, 30000)
@test_util.disable_tfrt("Scalars are not handled correctly")
def benchmark_add_float_scalars(self):
self._benchmark_add(42.0, 24.0)
@test_util.disable_tfrt("Scalars are not handled correctly")
def benchmark_add_int32_scalars(self):
self._benchmark_add(42, 24)
@test_util.disable_tfrt("Scalars are not handled correctly")
def benchmark_add_float_scalar_tensor(self):
tensor_a = constant_op.constant(42.0)
tensor_b = constant_op.constant(24.0)
self._benchmark_add(tensor_a, tensor_b)
@test_util.disable_tfrt("Scalars are not handled correctly")
def benchmark_add_int32_scalar_tensor(self):
tensor_a = constant_op.constant(42)
tensor_b = constant_op.constant(24)
self._benchmark_add(tensor_a, tensor_b)
def benchmark_add_float_dense_tensor(self):
tensor_a = constant_op.constant([[42.0, 42.0], [42.0, 42.0]])
tensor_b = constant_op.constant([[24.0, 24.0], [24.0, 24.0]])
self._benchmark_add(tensor_a, tensor_b)
def benchmark_add_int32_dense_tensor(self):
tensor_a = constant_op.constant([[42, 42], [42, 42]])
tensor_b = constant_op.constant([[24, 24], [24, 24]])
self._benchmark_add(tensor_a, tensor_b)
@test_util.disable_tfrt("convert_to_tensor not handled")
def benchmark_create_float_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, CPU)
@test_util.disable_tfrt("convert_to_tensor not handled")
def benchmark_create_float_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
CPU)
@test_util.disable_tfrt("convert_to_tensor not handled")
def benchmark_create_int32_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, CPU)
@test_util.disable_tfrt("convert_to_tensor not handled")
def benchmark_create_int32_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, CPU)
@test_util.disable_tfrt("no gpu support")
def benchmark_create_float_tensor_from_list_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, GPU)
@test_util.disable_tfrt("no gpu support")
def benchmark_create_float_tensor_from_np_array_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
GPU)
@test_util.disable_tfrt("no gpu support")
def benchmark_create_int32_tensor_from_list_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, GPU)
@test_util.disable_tfrt("no gpu support")
def benchmark_create_int32_tensor_from_np_array_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, GPU)
@test_util.disable_tfrt("strided slice not supported")
def benchmark_index_tensor_with_literal(self):
func = lambda: constant_op.constant([3.0])[0]
self._run(func, 30000)
@test_util.disable_tfrt("strided slice not supported")
def benchmark_index_tensor_with_tensor(self):
func = lambda idx=constant_op.constant(0): constant_op.constant([3.0])[idx]
self._run(func, 30000)
@test_util.disable_tfrt("strided slice not supported")
def benchmark_index_tensor_with_np_array(self):
func = lambda idx=np.array(0): constant_op.constant([3.0])[idx]
self._run(func, 30000)
def _benchmark_np_multiply(self, m, num_iters):
a = m.cpu().numpy()
func = lambda: a * a
self._run(func, num_iters)
def _benchmark_tf_multiply(self, m, num_iters):
func = lambda: m * m
self._run(func, num_iters)
def _benchmark_tf_multiply_op(self, m, num_iters):
func = lambda: math_ops.multiply(m, m)
self._run(func, num_iters)
@test_util.disable_tfrt("random ops not supported")
def benchmark_np_multiply(self):
self._benchmark_np_multiply(self._m_2, 30000)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_multiply_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply(m, 30000)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_multiply_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply(m, 30000)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_multiply_op_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply_op(m, 30000)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_multiply_op_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply_op(m, 30000)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_identity(self):
m = self._m_2
self._run(lambda: gen_array_ops.identity(m), 30000)
@test_util.disable_tfrt("identity not supported")
def benchmark_slowpath_tf_identity(self):
self._run(lambda: gen_array_ops.identity(1), 30000)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tfe_py_execute_identity(self):
m = self._m_2
ctx_handle = context.context()._handle
attrs = ("T", self._m_2.dtype.as_datatype_enum)
inputs = [m]
def f():
pywrap_tfe.TFE_Py_Execute(ctx_handle, None, "Identity", inputs, attrs, 1)
self._run(f, 30000)
@test_util.disable_tfrt("identity not supported")
def benchmark_tf_gradient_function_identity(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(
lambda: backprop.gradients_function(gen_array_ops.identity, [0])(m),
30000)
@test_util.disable_tfrt("identity not supported")
def benchmark_tf_gradient_forward_identity(self):
with backprop.GradientTape() as tape:
m = self._m_2
tape.watch(m)
self._run(lambda: gen_array_ops.identity(m), 30000)
@test_util.disable_tfrt("gradients not supported")
def benchmark_tf_gradient_tape_push_pop(self):
def f():
with backprop.GradientTape():
pass
self._run(f, 30000)
@test_util.disable_tfrt("gradients not supported")
def benchmark_tf_gradient_function_no_op(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(lambda: backprop.gradients_function(lambda x: x, [0])(m), 30000)
def _benchmark_np_matmul(self, m, transpose_b, num_iters):
a = m.cpu().numpy()
b = a.T if transpose_b else a
func = lambda: np.dot(a, b)
self._run(func, num_iters)
def _benchmark_tf_matmul(self, m, transpose_b, num_iters,
execution_mode=None):
func = lambda: math_ops.matmul(m, m, transpose_b=transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_gen_math_ops_matmul(self, m, transpose_b, num_iters):
def func():
gen_math_ops.mat_mul(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_fastpath_execute_matmul(self, m, transpose_b,
num_iters):
def func():
c_tfe_py_fastpath_execute(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_execute_matmul(self, m, transpose_b, num_iters):
inputs = [m, m]
# pylint: disable=protected-access
ctx_handle = context.context()._handle
# pylint: enable=protected-access
device = context.context().device_name
attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
m.dtype.as_datatype_enum)
def func():
pywrap_tfe.TFE_Py_Execute(ctx_handle, device, "MatMul", inputs, attrs, 1)
self._run(func, num_iters)
def _benchmark_defun_matmul(self,
m,
transpose_b,
num_iters,
execution_mode=None):
f = function.defun(math_ops.matmul)
func = lambda: f(m, m, transpose_b=transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_nested_defun_matmul(self, m, transpose_b, num_iters):
inner = function.defun(math_ops.matmul)
@function.defun
def outer(a, b, c, transpose_b):
return math_ops.matmul(inner(a, b, transpose_b=transpose_b), c)
func = lambda: outer(m, m, m, transpose_b=transpose_b)
# Warmup before benchmark
for _ in range(1000):
func()
self._run(func, num_iters)
def _benchmark_defun_matmul_forward_backward(self,
m,
transpose_b,
num_iters,
execution_mode=None):
f = function.defun(math_ops.matmul)
def func():
with backprop.GradientTape() as gt:
gt.watch(m)
y = f(m, m, transpose_b=transpose_b)
_ = gt.gradient(y, m)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_read_variable(self, m, num_iters):
self._run(m.value, num_iters)
def _benchmark_matmul_read_variable(self, m, num_iters):
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_matmul_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._run(m.value, num_iters)
# Benchmarks for A^2, A of dimension 2 by 2.
@test_util.disable_tfrt("random ops not supported")
def benchmark_np_matmul_2_by_2(self):
self._benchmark_np_matmul(
self._m_2_by_2, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
@test_util.disable_tfrt("random ops not supported")
def benchmark_gen_math_ops_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tfe_py_fastpath_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tfe_py_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_defun_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_defun_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
@test_util.disable_tfrt("random ops not supported")
def benchmark_defun_matmul_forward_backward_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_forward_backward(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_defun_matmul_forward_backward_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul_forward_backward(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
@test_util.disable_tfrt("random ops not supported")
def benchmark_gen_math_ops_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tfe_py_execute_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_defun_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_defun_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
@test_util.disable_tfrt("random ops not supported")
def benchmark_nested_defun_matmul_2_by_2(self):
m = self._m_2_by_2.cpu()
self._benchmark_nested_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
# Benchmarks for AA.T, A of dimension 100 by 784.
@test_util.disable_tfrt("random ops not supported")
def benchmark_np_matmul_100_by_784(self):
self._benchmark_np_matmul(
self._m_100_by_784,
transpose_b=True,
num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_matmul_100_by_784_CPU_async(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
@test_util.disable_tfrt("random ops not supported")
def benchmark_gen_math_ops_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tfe_py_fastpath_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tfe_py_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("random ops not supported")
def benchmark_defun_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_matmul_100_by_784_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
@test_util.disable_tfrt("random ops not supported")
def benchmark_gen_math_ops_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tfe_py_execute_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("random ops not supported")
def benchmark_defun_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
@test_util.disable_tfrt("random ops not supported")
def benchmark_nested_defun_matmul_100_by_784(self):
m = self._m_100_by_784.gpu()
self._benchmark_nested_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def _benchmark_forwardprop_matmul_CPU(self, shape):
with ops.device(CPU):
m = random_ops.random_uniform(shape).cpu()
tangent = random_ops.random_uniform(shape).cpu()
def func():
with forwardprop.ForwardAccumulator(m, tangent) as acc:
result = math_ops.matmul(m, m, transpose_b=True)
return result, acc.jvp(result)
# Warmup before benchmark
for _ in range(100):
func()
self._run(func, 3000)
def _benchmark_forwardprop_in_defun_matmul_CPU(self, shape):
with ops.device(CPU):
@def_function.function
def compiled_function(x, tangent):
with forwardprop.ForwardAccumulator(x, tangent) as acc:
result = math_ops.matmul(x, x, transpose_b=True)
return result, acc.jvp(result)
m = random_ops.random_uniform(shape).cpu()
tangent = random_ops.random_uniform(shape).cpu()
func = lambda: compiled_function(m, tangent)
# Warmup before benchmark
for _ in range(100):
func()
self._run(func, 3000)
def _benchmark_forwardprop_in_defun_of_defun_matmul_CPU(self, shape):
with ops.device(CPU):
matmul = def_function.function(math_ops.matmul)
@def_function.function()
def compiled_function(x, tangent):
with forwardprop.ForwardAccumulator(x, tangent) as acc:
result = matmul(x, x, transpose_b=True)
return result, acc.jvp(result)
m = random_ops.random_uniform(shape).cpu()
tangent = random_ops.random_uniform(shape).cpu()
func = lambda: compiled_function(m, tangent)
# Warmup before benchmark
for _ in range(100):
func()
self._run(func, 3000)
def _benchmark_forwardprop_of_defun_matmul_CPU(self, shape):
with ops.device(CPU):
m = random_ops.random_uniform(shape).cpu()
tangent = random_ops.random_uniform(shape).cpu()
matmul = def_function.function(math_ops.matmul)
def func():
with forwardprop.ForwardAccumulator(m, tangent) as acc:
result = matmul(m, m, transpose_b=True)
return result, acc.jvp(result)
# Warmup before benchmark
for _ in range(100):
func()
self._run(func, 3000)
@test_util.disable_tfrt("random ops not supported")
def benchmark_forwardprop_matmul_256_by_2096_CPU(self):
self._benchmark_forwardprop_matmul_CPU(shape=(256, 2096))
@test_util.disable_tfrt("random ops not supported")
def benchmark_forwardprop_in_defun_matmul_256_by_2096_CPU(self):
self._benchmark_forwardprop_in_defun_matmul_CPU(shape=(256, 2096))
@test_util.disable_tfrt("random ops not supported")
def benchmark_forwardprop_in_defun_of_defun_matmul_256_by_2096_CPU(self):
self._benchmark_forwardprop_in_defun_of_defun_matmul_CPU(shape=(256, 2096))
@test_util.disable_tfrt("random ops not supported")
def benchmark_forwardprop_of_defun_matmul_256_by_2096_CPU(self):
self._benchmark_forwardprop_of_defun_matmul_CPU(shape=(256, 2096))
@test_util.disable_tfrt("random ops not supported")
def benchmark_forwardprop_matmul_100_by_784_CPU(self):
self._benchmark_forwardprop_matmul_CPU(shape=(100, 784))
@test_util.disable_tfrt("random ops not supported")
def benchmark_forwardprop_in_defun_matmul_100_by_784_CPU(self):
self._benchmark_forwardprop_in_defun_matmul_CPU(shape=(100, 784))
@test_util.disable_tfrt("random ops not supported")
def benchmark_forwardprop_in_defun_of_defun_matmul_100_by_784_CPU(self):
self._benchmark_forwardprop_in_defun_of_defun_matmul_CPU(shape=(100, 784))
@test_util.disable_tfrt("random ops not supported")
def benchmark_forwardprop_of_defun_matmul_100_by_784_CPU(self):
self._benchmark_forwardprop_of_defun_matmul_CPU(shape=(100, 784))
def _benchmark_tf_reduce_logsumexp(self,
device=CPU,
execution_mode=None,
defunc=False):
with context.device(device):
x = constant_op.constant([[1, 0.], [0., 0.]])
if defunc:
reduce_func = def_function.function(math_ops.reduce_logsumexp)
func = lambda: reduce_func(x)
else:
func = lambda: math_ops.reduce_logsumexp(x)
self._run(func, 3000, execution_mode=execution_mode)
@test_util.disable_tfrt("reduce logsumexp not supported")
def benchmark_tf_reduce_logsumexp_CPU(self):
self._benchmark_tf_reduce_logsumexp()
@test_util.disable_tfrt("reduce logsumexp not supported")
def benchmark_tf_reduce_logsumexp_CPU_async(self):
self._benchmark_tf_reduce_logsumexp(execution_mode=context.ASYNC)
@test_util.disable_tfrt("reduce logsumexp not supported")
def benchmark_tf_reduce_logsumexp_GPU(self):
self._benchmark_tf_reduce_logsumexp(device=GPU)
@test_util.disable_tfrt("reduce logsumexp not supported")
def benchmark_tf_reduce_logsumexp_GPU_async(self):
self._benchmark_tf_reduce_logsumexp(device=GPU,
execution_mode=context.ASYNC)
@test_util.disable_tfrt("reduce logsumexp not supported")
def benchmark_tf_reduce_logsumexp_CPU_defunc(self):
self._benchmark_tf_reduce_logsumexp(defunc=True)
@test_util.disable_tfrt("reduce logsumexp not supported")
def benchmark_tf_reduce_logsumexp_CPU_async_defun(self):
self._benchmark_tf_reduce_logsumexp(
execution_mode=context.ASYNC, defunc=True)
@test_util.disable_tfrt("reduce logsumexp not supported")
def benchmark_tf_reduce_logsumexp_GPU_defun(self):
self._benchmark_tf_reduce_logsumexp(device=GPU, defunc=True)
@test_util.disable_tfrt("reduce logsumexp not supported")
def benchmark_tf_reduce_logsumexp_GPU_async_defun(self):
self._benchmark_tf_reduce_logsumexp(
device=GPU, execution_mode=context.ASYNC, defunc=True)
def _benchmark_tf_tensordot(self, device=CPU, execution_mode=None):
with context.device(device):
a = array_ops.ones((2, 2))
b = array_ops.ones((2, 2))
func = lambda: math_ops.tensordot(a, b, [[1], [0]])
self._run(func, 30000, execution_mode=execution_mode)
@test_util.disable_tfrt("tensordot not supported")
def benchmark_tf_tensordot_CPU(self):
self._benchmark_tf_tensordot()
@test_util.disable_tfrt("tensordot not supported")
def benchmark_tf_tensordot_CPU_async(self):
self._benchmark_tf_tensordot(execution_mode=context.ASYNC)
@test_util.disable_tfrt("tensordot not supported")
def benchmark_tf_tensordot_GPU(self):
self._benchmark_tf_tensordot(device=GPU)
@test_util.disable_tfrt("tensordot not supported")
def benchmark_tf_tensordot_GPU_async(self):
self._benchmark_tf_tensordot(device=GPU, execution_mode=context.ASYNC)
def _benchmark_tf_zeros(self, shape, dtype, device=CPU):
with context.device(device):
func = lambda: array_ops.zeros(shape, dtype)
self._run(func, 3000)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_2_by_2_float32_CPU(self):
self._benchmark_tf_zeros((2, 2), dtypes.float32)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_2_by_2_bool_CPU(self):
self._benchmark_tf_zeros((2, 2), dtypes.bool)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_2_by_2_string_CPU(self):
self._benchmark_tf_zeros((2, 2), dtypes.string)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_2_by_2_float32_GPU(self):
self._benchmark_tf_zeros((2, 2), dtypes.float32, device=GPU)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_2_by_2_bool_GPU(self):
self._benchmark_tf_zeros((2, 2), dtypes.bool, device=GPU)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_30_by_30_float32_CPU(self):
self._benchmark_tf_zeros((30, 30), dtypes.float32)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_30_by_30_bool_CPU(self):
self._benchmark_tf_zeros((30, 30), dtypes.bool)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_30_by_30_string_CPU(self):
self._benchmark_tf_zeros((30, 30), dtypes.string)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_30_by_30_float32_GPU(self):
self._benchmark_tf_zeros((30, 30), dtypes.float32, device=GPU)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_30_by_30_bool_GPU(self):
self._benchmark_tf_zeros((30, 30), dtypes.bool, device=GPU)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_100_by_100_float32_CPU(self):
self._benchmark_tf_zeros((100, 100), dtypes.float32)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_100_by_100_bool_CPU(self):
self._benchmark_tf_zeros((100, 100), dtypes.bool)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_100_by_100_string_CPU(self):
self._benchmark_tf_zeros((100, 100), dtypes.string)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_100_by_100_float32_GPU(self):
self._benchmark_tf_zeros((100, 100), dtypes.float32, device=GPU)
@test_util.disable_tfrt("context.device not supported")
def benchmark_tf_zeros_100_by_100_bool_GPU(self):
self._benchmark_tf_zeros((100, 100), dtypes.bool, device=GPU)
def _benchmark_tf_zeros_like(self, m, device=CPU):
with context.device(device):
func = lambda: array_ops.zeros_like(m)
self._run(func, 3000)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_zeros_like_CPU(self):
self._benchmark_tf_zeros_like(self._m_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_zeros_like_GPU(self):
self._benchmark_tf_zeros_like(self._m_2_by_2, device=GPU)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_zeros_like_variable_CPU(self):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_tf_zeros_like(m)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_zeros_like_variable_GPU(self):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_tf_zeros_like(m, device=GPU)
@test_util.disable_tfrt("random ops not supported")
def _benchmark_tf_random_uniform_2_by_2(self,
shape=(2, 2),
dtype=dtypes.int32,
device=CPU):
with context.device(device):
def func():
return random_ops.random_uniform(shape, maxval=3, dtype=dtype)
self._run(func, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_random_uniform_2_by_2_integer_CPU(self):
self._benchmark_tf_random_uniform_2_by_2()
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_random_uniform_2_by_2_integer_GPU(self):
self._benchmark_tf_random_uniform_2_by_2(device=GPU)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_random_uniform_2_by_2_float_CPU(self):
self._benchmark_tf_random_uniform_2_by_2(dtype=dtypes.float32)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_random_uniform_2_by_2_float_GPU(self):
self._benchmark_tf_random_uniform_2_by_2(
dtype=dtypes.float32, device=GPU)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_random_uniform_2_by_2_default_setting_CPU(self):
with context.device(CPU):
func = lambda: random_ops.random_uniform((2, 2))
self._run(func, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_random_uniform_2_by_2_default_setting_GPU(self):
with context.device(GPU):
func = lambda: random_ops.random_uniform((2, 2))
self._run(func, num_iters=self._num_iters_2_by_2)
def _benchmark_tf_dropout_2_by_2(self,
is_rate_tensor=True,
noise_shape=None,
device=CPU):
if is_rate_tensor:
rate = constant_op.constant(0.5, dtype=dtypes.float32)
else:
rate = 0.5
with context.device(device):
def func():
return nn_ops.dropout(
self._m_2_by_2, rate=rate, noise_shape=noise_shape)
self._run(func, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_dropout_scalar_rate_2_by_2_CPU(self):
self._benchmark_tf_dropout_2_by_2(is_rate_tensor=False)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_dropout_scalar_rate_2_by_2_GPU(self):
self._benchmark_tf_dropout_2_by_2(is_rate_tensor=False, device=GPU)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_dropout_2_by_2_CPU(self):
self._benchmark_tf_dropout_2_by_2()
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_dropout_2_by_2_GPU(self):
self._benchmark_tf_dropout_2_by_2(device=GPU)
def _benchmark_transpose(self,
m,
num_iters,
perm=None,
conjugate=False,
execution_mode=None):
func = lambda: array_ops.transpose(m, perm, conjugate)
self._run(func, num_iters, execution_mode=execution_mode)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_transpose_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_transpose(m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_transpose_2_by_2_GPU(self):
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_transpose(m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_transpose_variable_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_transpose(m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_tf_transpose_variable_2_by_2_GPU(self):
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_transpose(m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("defun not supported")
def benchmark_defun_without_signature(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(func)
t = constant_op.constant(0.0)
cache_computation = lambda: defined(t, t, t, t, t, t, t, t)
self._run(cache_computation, 30000)
@test_util.disable_tfrt("defun not supported")
def benchmark_defun_without_signature_and_with_kwargs(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(func)
t = constant_op.constant(0.0)
def cache_computation():
return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
self._run(cache_computation, 30000)
@test_util.disable_tfrt("defun not supported")
def benchmark_defun_with_signature(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(
func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
t = constant_op.constant(0.0)
signature_computation = lambda: defined(t, t, t, t, t, t, t, t)
self._run(signature_computation, 30000)
@test_util.disable_tfrt("defun not supported")
def benchmark_defun_with_signature_and_kwargs(self):
def func(t1, t2, t3, t4, t5, t6, t7, t8):
del t1, t2, t3, t4, t5, t6, t7, t8
return None
defined = function.defun(
func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
t = constant_op.constant(0.0)
def signature_computation():
return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
self._run(signature_computation, 30000)
@test_util.disable_tfrt("random ops not supported")
def benchmark_matmul_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable(m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_matmul_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_read_variable_op_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("random ops not supported")
def benchmark_read_variable_op_with_tape_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
@test_util.disable_tfrt("Scan, loops need fallback")
def benchmarkScan(self):
elems = math_ops.range(1600)
def scan():
return functional_ops.scan(
lambda a, x: a + x, elems, parallel_iterations=1)
self._run(scan, 100)
@test_util.disable_tfrt("Scan, loops need fallback")
def benchmarkScanDefun(self):
elems = math_ops.range(1600)
@function.defun
def scan():
return functional_ops.scan(
lambda a, x: a + x, elems, parallel_iterations=1)
self._run(scan, 100)
@test_util.disable_tfrt("add not supported, only add_v2")
def benchmark_fastpath_conversion_type_inference(self):
c = constant_op.constant(1., dtype=dtypes.float32)
def fn():
return gen_math_ops.add(c, 1)
self._run(fn, 10000)
@test_util.disable_tfrt("convert to tensor not supported")
def benchmark_convert_3x_list_to_tensor(self):
xs = [1, 2, 3]
self._run(lambda: ops.convert_to_tensor(xs), 1000)
@test_util.disable_tfrt("convert to tensor not supported")
def benchmark_convert_3x_array_to_tensor(self):
xs = np.array([1, 2, 3], dtype=np.int32)
self._run(lambda: ops.convert_to_tensor(xs), 1000)
def benchmark_constant_40x2_list_to_tensor(self):
xs = [[0] * 2] * 40
self._run(lambda: constant_op.constant(xs), 1000)
@test_util.disable_tfrt("convert to tensor not supported")
def benchmark_constant_40x2_array_to_tensor(self):
xs = np.array([[0] * 2] * 40, dtype=np.int32)
self._run(lambda: constant_op.constant(xs), 1000)
def benchmark_constant_40x_list_of_2x_arrays_to_tensor(self):
xs = [np.array([0] * 2, dtype=np.int32)] * 40
self._run(lambda: constant_op.constant(xs), 1000)
def benchmark_constant_20x20x20_double_list_to_float32_tensor(self):
xs = [[[np.linspace(0, 1, 21).tolist()] * 20] * 20]
self._run(lambda: constant_op.constant(xs, dtype=dtypes.float32), 10000)
def benchmark_constant_20x20x20_double_list_to_float64_tensor(self):
xs = [[[np.linspace(0, 1, 21).tolist()] * 20] * 20]
self._run(lambda: constant_op.constant(xs, dtype=dtypes.float64), 10000)
@test_util.disable_tfrt("tf.fill not supported")
def benchmark_list_of_zeros_to_np_array(self):
values = []
for _ in range(1000):
values.append(array_ops.zeros(shape=(1000,)))
self._run(lambda: np.array([x.numpy() for x in values]), 1000)
def _benchmarkFunctionWithResourceInputs(self, num_resources, num_iters):
@def_function.function
def add_all(*args):
return math_ops.add_n(*args)
with context.device(CPU):
resources = []
for _ in range(num_resources):
resources.append(resource_variable_ops.ResourceVariable(self._m_2))
self._run(lambda: add_all(resources), num_iters)
@test_util.disable_tfrt("Random uniform needs fallback")
def benchmarkFunctionWithFiveResourceInputs(self):
self._benchmarkFunctionWithResourceInputs(5, 1000)
@test_util.disable_tfrt("Random uniform needs fallback")
def benchmarkFunctionWithFiveHundredResourceInputs(self):
self._benchmarkFunctionWithResourceInputs(500, 100)
def _benchmarkResourceReadsInCondInInnerFunc(self, var_count):
rvars = []
for _ in range(var_count):
rvars.append(resource_variable_ops.ResourceVariable(1.0))
# Note: We want to benchmark the graph building time so we intentionally
# add this outer function so that the tf.function gets retraced every time.
def benchmark_fn():
@def_function.function
def fn_with_many_reads():
@def_function.function
def fn_with_many_reads_inner():
def then_branch():
return math_ops.add_n(rvars)
def else_branch():
return 0.
return control_flow_ops.cond(
constant_op.constant(True), then_branch, else_branch)
return fn_with_many_reads_inner()
return fn_with_many_reads()
with context.device(CPU):
self._run(benchmark_fn, 10)
@test_util.disable_tfrt("VarHandleOp needs fallback")
def benchmarkTenThousandResourceReadsInCondInInnerFunc(self):
self._benchmarkResourceReadsInCondInInnerFunc(10000)
@test_util.disable_tfrt("VarHandleOp needs fallback")
def benchmarkHundredResourceReadsInCondInInnerFunc(self):
self._benchmarkResourceReadsInCondInInnerFunc(100)
@test_util.disable_tfrt("VarHandleOp needs fallback")
def benchmarkTenResourceReadsInCondInInnerFunc(self):
self._benchmarkResourceReadsInCondInInnerFunc(10)
if __name__ == "__main__":
test.main()
| 36.798673
| 80
| 0.716147
|
794a7e96a00e7f9dadbeae58340c3b963fe3aa9d
| 667
|
py
|
Python
|
api/tacticalrmm/agents/management/commands/show_outdated_agents.py
|
MikroT/tacticalrmm
|
f0b7e515b6d71c6d86454947e567f5908ae636ce
|
[
"MIT"
] | null | null | null |
api/tacticalrmm/agents/management/commands/show_outdated_agents.py
|
MikroT/tacticalrmm
|
f0b7e515b6d71c6d86454947e567f5908ae636ce
|
[
"MIT"
] | null | null | null |
api/tacticalrmm/agents/management/commands/show_outdated_agents.py
|
MikroT/tacticalrmm
|
f0b7e515b6d71c6d86454947e567f5908ae636ce
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.core.management.base import BaseCommand
from agents.models import Agent
from tacticalrmm.constants import AGENT_STATUS_ONLINE, ONLINE_AGENTS
class Command(BaseCommand):
help = "Shows online agents that are not on the latest version"
def handle(self, *args, **kwargs):
only = ONLINE_AGENTS + ("hostname",)
q = Agent.objects.exclude(version=settings.LATEST_AGENT_VER).only(*only)
agents = [i for i in q if i.status == AGENT_STATUS_ONLINE]
for agent in agents:
self.stdout.write(
self.style.SUCCESS(f"{agent.hostname} - v{agent.version}")
)
| 35.105263
| 80
| 0.685157
|
794a7efe123a0044e11433f6e0e7a3164b27bc8e
| 966
|
py
|
Python
|
qiskit/qasm/node/reset.py
|
lerongil/qiskit-terra
|
a25af2a2378bc3d4f5ec73b948d048d1b707454c
|
[
"Apache-2.0"
] | 22
|
2019-08-15T04:39:15.000Z
|
2022-03-06T05:17:04.000Z
|
qiskit/qasm/node/reset.py
|
lerongil/qiskit-terra
|
a25af2a2378bc3d4f5ec73b948d048d1b707454c
|
[
"Apache-2.0"
] | 4
|
2019-05-13T15:28:46.000Z
|
2019-12-19T20:47:02.000Z
|
qiskit/qasm/node/reset.py
|
lerongil/qiskit-terra
|
a25af2a2378bc3d4f5ec73b948d048d1b707454c
|
[
"Apache-2.0"
] | 9
|
2019-09-05T05:33:00.000Z
|
2021-10-09T16:04:53.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Node for an OPENQASM reset statement."""
from .node import Node
class Reset(Node):
"""Node for an OPENQASM reset statement.
children[0] is a primary node (id or indexedid)
"""
def __init__(self, children):
"""Create the reset node."""
super().__init__('reset', children, None)
def qasm(self, prec=15):
"""Return the corresponding OPENQASM string."""
return "reset " + self.children[0].qasm(prec) + ";"
| 29.272727
| 77
| 0.681159
|
794a7f09cfc93e4df37cd204ea0953d64e12ec53
| 89
|
py
|
Python
|
openapi_client_generator/info.py
|
ppentchev/openapi-client-generator
|
a058af4ec28a1e53809273a662fb8cba0157695e
|
[
"MIT"
] | 6
|
2021-01-16T21:36:43.000Z
|
2022-01-24T10:21:14.000Z
|
openapi_client_generator/info.py
|
ppentchev/openapi-client-generator
|
a058af4ec28a1e53809273a662fb8cba0157695e
|
[
"MIT"
] | 11
|
2021-01-17T00:05:44.000Z
|
2022-01-23T16:13:56.000Z
|
openapi_client_generator/info.py
|
ppentchev/openapi-client-generator
|
a058af4ec28a1e53809273a662fb8cba0157695e
|
[
"MIT"
] | 2
|
2021-11-06T00:07:34.000Z
|
2022-01-14T01:35:06.000Z
|
DISTRIBUTION_NAME = 'openapi-client-generator'
PACKAGE_NAME = 'openapi_client_generator'
| 29.666667
| 46
| 0.842697
|
794a7f737a701c04fb59b66e98428fc389ee3973
| 754
|
py
|
Python
|
train.py
|
KopfKrieg/ReinLife
|
560e1495319e2b68d74285a60007cde6c1a27a18
|
[
"MIT"
] | 44
|
2020-05-05T14:50:38.000Z
|
2022-03-26T21:01:17.000Z
|
train.py
|
KopfKrieg/ReinLife
|
560e1495319e2b68d74285a60007cde6c1a27a18
|
[
"MIT"
] | 1
|
2022-01-08T21:38:44.000Z
|
2022-01-08T21:38:44.000Z
|
train.py
|
KopfKrieg/ReinLife
|
560e1495319e2b68d74285a60007cde6c1a27a18
|
[
"MIT"
] | 8
|
2020-05-06T05:51:43.000Z
|
2021-08-10T02:02:17.000Z
|
from ReinLife.Models import D3QN, DQN, PERD3QN, PERDQN, PPO
from ReinLife.Helpers import trainer
n_episodes = 15_000
brains = [DQN(train_freq=20, max_epi=n_episodes),
D3QN(train_freq=20),
PERDQN(train_freq=20),
PERD3QN(train_freq=20),
PPO(train_freq=20)]
trainer(brains, n_episodes=n_episodes, update_interval=300, width=30, height=30, max_agents=100,
visualize_results=True, print_results=False, google_colab=False, render=False, static_families=True,
training=True, save=True, limit_reproduction=False, incentivize_killing=True)
# To do:
# * Choose genes that are not alive to give them a fighting chance
# * Update tester.py such that it does not include the button press anymore
| 41.888889
| 108
| 0.724138
|
794a810e8919e29d8b9ca461e9543c553ca51cb2
| 9,099
|
py
|
Python
|
library/ntc_install_os.py
|
itdependsnetworks/ntc-ansible
|
f691ef9275333f14c63bf38faacd516d6556e5cf
|
[
"Apache-2.0"
] | null | null | null |
library/ntc_install_os.py
|
itdependsnetworks/ntc-ansible
|
f691ef9275333f14c63bf38faacd516d6556e5cf
|
[
"Apache-2.0"
] | null | null | null |
library/ntc_install_os.py
|
itdependsnetworks/ntc-ansible
|
f691ef9275333f14c63bf38faacd516d6556e5cf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2015 Jason Edelman <jason@networktocode.com>
# Network to Code, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCUMENTATION = '''
---
module: ntc_install_os
short_description: Install an operating system by setting the boot options like boot image and kickstart image.
description:
- Set boot options like boot image and kickstart image.
- Supported platforms include Cisco Nexus switches with NX-API, Cisco IOS switches or routers, Arista switches with eAPI.
notes:
- Do not include full file paths, just the name of the file(s) stored on the top level flash directory.
- You must know if your platform supports taking a kickstart image as a parameter. If supplied but not supported, errors may occur.
- It may be useful to use this module in conjuction with ntc_file_copy and ntc_reboot.
- With NXOS devices, this module attempts to install the software immediately, wich may trigger a reboot.
- With NXOS devices, install process may take up to 10 minutes, especially if the device reboots.
- Tested on Nexus 3000, 5000, 9000.
- In check mode, the module tells you if the current boot images are set to the desired images.
author: Jason Edelman (@jedelman8)
version_added: 1.9.2
requirements:
- pyntc
options:
platform:
description:
- Switch platform
required: true
choices: ['cisco_nxos_nxapi', 'arista_eos_eapi', 'cisco_ios_ssh']
system_image_file:
description:
- Name of the system (or combined) image file on flash.
required: true
kickstart_image_file:
description:
- Name of the kickstart image file on flash.
required: false
default: null
host:
description:
- Hostame or IP address of switch.
required: true
username:
description:
- Username used to login to the target device
required: true
password:
description:
- Password used to login to the target device
required: true
secret:
description:
- Enable secret for devices connecting over SSH.
required: false
transport:
description:
- Transport protocol for API-based devices.
required: false
default: null
choices: ['http', 'https']
port:
description:
- TCP/UDP port to connect to target device. If omitted standard port numbers will be used.
80 for HTTP; 443 for HTTPS; 22 for SSH.
required: false
default: null
ntc_host:
description:
- The name of a host as specified in an NTC configuration file.
required: false
default: null
ntc_conf_file:
description:
- The path to a local NTC configuration file. If omitted, and ntc_host is specified,
the system will look for a file given by the path in the environment variable PYNTC_CONF,
and then in the users home directory for a file called .ntc.conf.
required: false
default: null
'''
EXAMPLES = '''
- ntc_install_os:
ntc_host: n9k1
system_image_file: n9000-dk9.6.1.2.I3.1.bin
- ntc_install_os:
ntc_host: n3k1
system_image_file: n3000-uk9.6.0.2.U6.5.bin
kickstart_image_file: n3000-uk9-kickstart.6.0.2.U6.5.bin
- ntc_install_os:
ntc_host: c2801
system_image_file: c2800nm-adventerprisek9_ivs_li-mz.151-3.T4.bin
'''
RETURN = '''
install_state:
returned: always
type: dictionary
sample: {
"kick": "n5000-uk9-kickstart.7.2.1.N1.1.bin",
"sys": "n5000-uk9.7.2.1.N1.1.bin",
"status": "This is the log of last installation.\n
Continuing with installation process, please wait.\n
The login will be disabled until the installation is completed.\n
Performing supervisor state verification. \n
SUCCESS\n
Supervisor non-disruptive upgrade successful.\n
Install has been successful.\n",
}
'''
import time
try:
HAS_PYNTC = True
from pyntc import ntc_device, ntc_device_by_name
except ImportError:
HAS_PYNTC = False
PLATFORM_NXAPI = 'cisco_nxos_nxapi'
PLATFORM_IOS = 'cisco_ios_ssh'
PLATFORM_EAPI = 'arista_eos_eapi'
PLATFORM_JUNOS = 'juniper_junos_netconf'
def already_set(current_boot_options, system_image_file, kickstart_image_file):
return current_boot_options.get('sys') == system_image_file \
and current_boot_options.get('kick') == kickstart_image_file
def main():
module = AnsibleModule(
argument_spec=dict(
platform=dict(choices=[PLATFORM_NXAPI, PLATFORM_IOS, PLATFORM_EAPI, PLATFORM_JUNOS],
required=False),
host=dict(required=False),
username=dict(required=False, type='str'),
password=dict(required=False, type='str'),
secret=dict(required=False),
transport=dict(required=False, choices=['http', 'https']),
port=dict(required=False, type='int'),
ntc_host=dict(required=False),
ntc_conf_file=dict(required=False),
system_image_file=dict(required=True),
kickstart_image_file=dict(required=False),
),
mutually_exclusive=[['host', 'ntc_host'],
['ntc_host', 'secret'],
['ntc_host', 'transport'],
['ntc_host', 'port'],
['ntc_conf_file', 'secret'],
['ntc_conf_file', 'transport'],
['ntc_conf_file', 'port'],
],
required_one_of=[['host', 'ntc_host']],
required_together=[['host', 'username', 'password', 'platform']],
supports_check_mode=True
)
if not HAS_PYNTC:
module.fail_json(msg='pyntc Python library not found.')
platform = module.params['platform']
host = module.params['host']
username = module.params['username']
password = module.params['password']
ntc_host = module.params['ntc_host']
ntc_conf_file = module.params['ntc_conf_file']
transport = module.params['transport']
port = module.params['port']
secret = module.params['secret']
if ntc_host is not None:
device = ntc_device_by_name(ntc_host, ntc_conf_file)
else:
kwargs = {}
if transport is not None:
kwargs['transport'] = transport
if port is not None:
kwargs['port'] = port
if secret is not None:
kwargs['secret'] = secret
device_type = platform
device = ntc_device(device_type, host, username, password, **kwargs)
if device.device_type == PLATFORM_JUNOS:
module.fail_json(msg='Install OS for Juniper not supported.')
system_image_file = module.params['system_image_file']
kickstart_image_file = module.params['kickstart_image_file']
if kickstart_image_file == 'null':
kickstart_image_file = None
device.open()
current_boot_options = device.get_boot_options()
changed = False
if not already_set(current_boot_options, system_image_file, kickstart_image_file):
changed = True
if not module.check_mode and changed == True:
if device.device_type == 'nxos':
timeout = 600
device.set_timeout(timeout)
try:
start_time = time.time()
device.set_boot_options(system_image_file, kickstart=kickstart_image_file)
except:
pass
elapsed_time = time.time() - start_time
device.set_timeout(30)
try:
install_state = device.get_boot_options()
except:
install_state = {}
while elapsed_time < timeout and not install_state:
try:
install_state = device.get_boot_options()
except:
time.sleep(10)
elapsed_time += 10
else:
device.set_boot_options(system_image_file, kickstart=kickstart_image_file)
install_state = device.get_boot_options()
if not already_set(install_state, system_image_file, kickstart_image_file):
module.fail_json(msg='Install not successful', install_state=install_state)
else:
install_state = current_boot_options
device.close()
module.exit_json(changed=changed, install_state=install_state)
from ansible.module_utils.basic import *
main()
| 35.822835
| 135
| 0.639631
|
794a81a90e210c9c1ce81fe2f5fd45daef8545a6
| 261
|
py
|
Python
|
src/saturnv_api/setup.py
|
epkaz93/saturnv
|
b8a2c61bb0e833f2e31698050113038bab3ca5a4
|
[
"MIT"
] | 1
|
2022-03-12T07:38:09.000Z
|
2022-03-12T07:38:09.000Z
|
src/saturnv_api/setup.py
|
epkaz93/saturnv
|
b8a2c61bb0e833f2e31698050113038bab3ca5a4
|
[
"MIT"
] | null | null | null |
src/saturnv_api/setup.py
|
epkaz93/saturnv
|
b8a2c61bb0e833f2e31698050113038bab3ca5a4
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_namespace_packages
setup(
name='saturnv_api',
packages=find_namespace_packages(include=['saturnv.api']),
requires=[
'six',
'SQLAlchemy',
'psycopg2',
'psutil',
'rez'
]
)
| 18.642857
| 62
| 0.590038
|
794a8209a635006733a8b49cf7578ff255a66983
| 1,545
|
py
|
Python
|
project/core/db/base.py
|
p141592/simple_fastapi
|
f0ee4340b7904586cba62bc6a38e4ad9bfa6c37b
|
[
"MIT"
] | 2
|
2020-04-21T02:20:46.000Z
|
2020-08-20T21:26:06.000Z
|
project/core/db/base.py
|
p141592/simple_fastapi
|
f0ee4340b7904586cba62bc6a38e4ad9bfa6c37b
|
[
"MIT"
] | 1
|
2021-12-01T06:41:57.000Z
|
2021-12-01T06:41:57.000Z
|
project/core/db/base.py
|
p141592/simple_fastapi
|
f0ee4340b7904586cba62bc6a38e4ad9bfa6c37b
|
[
"MIT"
] | null | null | null |
import sqlalchemy as s
from pydantic_sqlalchemy import sqlalchemy_to_pydantic
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.util.compat import contextmanager
from pydantic import BaseConfig
from core.settings import settings
engine = create_engine(
settings.DB_DSN, echo=True, connect_args={"check_same_thread": False}
)
Base = declarative_base()
@contextmanager
def session_scope():
"""Provide a transactional scope around a series of operations."""
session = sessionmaker(bind=engine)()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class PydanticConfig(BaseConfig):
orm_mode = True
class BaseDBModel(Base):
__abstract__ = True
_model = None
readable_field = "id"
@declared_attr
def __tablename__(cls) -> str:
return cls.__name__.lower()
id = s.Column(s.Integer, primary_key=True, unique=True, autoincrement=True)
@classmethod
def model(cls, **kwargs):
return sqlalchemy_to_pydantic(cls, config=PydanticConfig, **kwargs)
def __repr__(self):
return f"<{self.__class__.__name__}: {getattr(self, self.readable_field)}>"
class BaseDBHandbook(BaseDBModel):
__abstract__ = True
readable_field = "title"
title = s.Column(s.String)
key = s.Column(s.String, nullable=False, unique=True)
| 24.52381
| 83
| 0.721683
|
794a821168f32d004fb5f1f1e366e4b4044653e6
| 337
|
py
|
Python
|
ecosystems/cedar_settings.py
|
stewardshiptools/stewardshiptools
|
ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e
|
[
"MIT"
] | null | null | null |
ecosystems/cedar_settings.py
|
stewardshiptools/stewardshiptools
|
ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e
|
[
"MIT"
] | 11
|
2020-03-24T15:29:46.000Z
|
2022-03-11T23:14:48.000Z
|
ecosystems/cedar_settings.py
|
stewardshiptools/stewardshiptools
|
ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e
|
[
"MIT"
] | null | null | null |
from cedar_settings.default_settings import default_settings
default_settings['ecosystems_project_code_prefix'] = ('text', '#ECO-PRJ-')
# see DEV PRJ settings for an example of how to use this.
default_settings['ecosystems_project_misc_textareas'] = ('text',
"""notes|Notes""")
| 42.125
| 75
| 0.646884
|
794a83286f2ff607191648298d8f857fac82a749
| 8,202
|
py
|
Python
|
grader.py
|
petef4/payg
|
3a83e093abb985a71cbfd6ab94fd258471550df7
|
[
"MIT"
] | 1
|
2021-09-02T22:03:52.000Z
|
2021-09-02T22:03:52.000Z
|
grader.py
|
petef4/payg
|
3a83e093abb985a71cbfd6ab94fd258471550df7
|
[
"MIT"
] | 2
|
2021-03-24T17:33:47.000Z
|
2021-03-24T17:33:48.000Z
|
grader.py
|
petef4/payg
|
3a83e093abb985a71cbfd6ab94fd258471550df7
|
[
"MIT"
] | null | null | null |
import re
class Grader:
def __init__(self, grading):
self.grading = grading
def grade(self, data):
"""Amend data with scores, keys (for sorting columns) and grades.
Scores are determined for most columns. Sort keys start with the column
score but then may draw on other columns so that secondary sorting
works. Sort keys are effectively alphabetic when scoring does not
apply.
"""
operator_scores = dict(
(name, index) for (index, name) in enumerate(
sorted(set(r['operator'] for r in data))))
plan_scores = dict(
(name, index) for (index, name) in enumerate(
sorted(set(r['plan'] for r in data))))
network_scores = dict(
(name, index) for (index, name) in enumerate(
sorted(set(r['network'] for r in data))))
MB_per_day = self.grading['data']['MB / day']
for row in data:
operator = operator_scores[row['operator']]
plan = plan_scores[row['plan']]
row['operator.key'] = 100 * operator + plan
row['plan.key'] = 100 * plan + operator
row['min_same.score'] = same = decipence(row['min_same'])
row['min_other.score'] = other = decipence(row['min_other'])
row['min_land.score'] = land = decipence(row['min_land'])
row['min_same.key'] = 1000000 * same + 1000 * other + land
row['min_other.key'] = 1000000 * other + 1000 * land + same
row['min_land.key'] = 1000000 * land + 1000 * other + same
row['charge_min.key'] = row['charge_min.score'] = (
max((same, other, land))
if row['charge_min'] == '[1 min]'
else decipence(row['charge_min']))
row['bill_per.key'] = row['bill_per.score'] = bill_per_score(
row['bill_per'])
row['sms_same.score'] = same = decipence(row['sms_same'])
row['sms_other.score'] = other = decipence(row['sms_other'])
row['sms_same.key'] = 1000 * same + other
row['sms_other.key'] = 1000 * other + same
row['voicemail.key'] = row['voicemail.score'] = voicemail_score(
row['voicemail'])
row['08x.key'] = row['08x.score'] = decipence(row['08x'])
row['mms.key'] = row['mms.score'] = decipence(row['mms'])
row['4G.key'] = row['4G.score'] = yes_no_score(row['4G'])
row['tether.key'] = row['tether.score'] = yes_no_score(
row['tether'])
row['data.key'] = row['data.score'] = data_score(MB_per_day,
row['data'])
network = network_scores[row['network']]
row['network.key'] = 10000 * network + 100 * operator + plan
row['checked.key'] = (
10000 * int(row['checked'].replace('-', '')) + 100 * operator +
plan)
grades = {}
for col_score, score in row.items():
if not col_score.endswith('.score'):
continue
col = col_score[:-6]
if row[col] in ('?', 'n/a') or (
col == 'data' and (row[col].startswith('Add-ons') or
row[col].startswith('Bundles'))):
grades[col + '.grade'] = 'na'
else:
for g in self.grading:
if col.startswith(g):
break
else:
g = None
if g:
for grade in ['good', 'okay', 'poor']:
if score <= self.grading[g][grade]:
grades[col + '.grade'] = grade
break
else:
grades[col + '.grade'] = 'bad'
row.update(grades)
def decipence(pence):
"""Convert pence or pounds into decipence, taking note of special values.
Decipence are ints.
"""
if isinstance(pence, str):
if pence.endswith('p'):
pence = float(pence[:-1])
elif pence.startswith('£'):
pence = float(pence[1:]) * 100
elif pence == 'Free':
pence = 0
elif pence in ('?', 'n/a'):
pence = 99.9
else:
raise ValueError('String format not handled: ' + pence)
return int(pence * 10 + 0.5)
def bill_per_score(bill_per):
if bill_per == 'sec':
return 100
elif bill_per in ('min', '[min]'):
return 200
if bill_per == 'day':
return 400
else:
raise ValueError('String format not handled: ' + bill_per)
def voicemail_score(voicemail):
try:
pence = voicemail.split('/')[0].rstrip() # discard ' / per call'
except AttributeError: # voicemail is an int
pence = voicemail
return decipence(pence)
YES_NO = {
'Yes': 100,
'Soon': 150,
'No': 200,
'[No]': 200,
'n/a': None,
'?': None}
def yes_no_score(yes_no):
try:
return YES_NO[yes_no]
except KeyError:
raise ValueError('String format not handled: ' + yes_no)
_FLT = r'\d+\.?\d*'
FLOAT = '(' + _FLT + ')'
MONEY = '(' + _FLT + 'p|£' + _FLT + ')'
DAY_PER_MONTH = 30 # month tends to be shorthand for 30 days, not calendar
N_MB = '(' + _FLT + ')MB'
def dip_per_MB(MB_per_day, data):
match = re.match(MONEY + ' / MB', data)
if match:
return MB_per_day * decipence(match.group(1))
return None
def dip_per_nMB(MB_per_day, data):
match = re.match(MONEY + ' / ' + N_MB, data)
if match:
return MB_per_day * decipence(match.group(1)) / float(match.group(2))
return None
def dip_per_day(MB_per_day, data):
match = re.match(
MONEY + ' / day for ' + FLOAT + ' MB then ' + MONEY + ' / MB',
data)
if match:
chunk = 1
else:
match = re.match(
MONEY + ' / day for ' + FLOAT + ' MB then ' + MONEY + ' / ' +
FLOAT + ' MB',
data)
if match:
chunk = float(match.group(4))
else:
return None
price = decipence(match.group(1))
allowance = float(match.group(2))
if MB_per_day == 0:
return 0
elif MB_per_day <= allowance:
return price
extra, part = divmod(MB_per_day - allowance, chunk)
if part > 0:
extra += 1
return price + extra * decipence(match.group(3))
def dip_per_MB_some_free_per_day(MB_per_day, data):
match = re.match(FLOAT + ' MB / day free then ' + MONEY + ' / MB', data)
if not match:
return None
free = float(match.group(1))
if MB_per_day < free:
return 0
return (MB_per_day - free) * decipence(match.group(2))
def dip_per_MB_some_free_per_month(MB_per_day, data):
match = re.match(FLOAT + ' MB / month free then ' + MONEY + ' / MB', data)
if not match:
return None
free = float(match.group(1)) / DAY_PER_MONTH
if MB_per_day < free:
return 0
return (MB_per_day - free) * decipence(match.group(2))
def dip_per_MB_capped(MB_per_day, data):
match = re.match(MONEY + ' / MB capped at ' + MONEY + ' / day', data)
if not match:
return None
price = MB_per_day * decipence(match.group(1))
return min(price, decipence(match.group(2)))
def dip_addons_only(MB_per_day, data):
match = re.match(
'(?:Add-on|Bundle)s start from ' + MONEY + ' / ' + FLOAT + ' (?:GB|MB)', data)
if match:
if MB_per_day == 0:
return 0
else:
return decipence(match.group(1))
return None
data_scorers = [
dip_per_MB_some_free_per_day,
dip_per_MB_some_free_per_month,
dip_per_MB_capped,
dip_per_MB, # must come later than dip_per_MB_capped
dip_per_nMB,
dip_per_day,
dip_addons_only]
def data_score(MB_per_day, data):
for fn in data_scorers:
score = fn(MB_per_day, data)
if score is not None:
return score
raise ValueError('String format not handled: ' + data)
| 34.317992
| 86
| 0.528164
|
794a83a35335a231168ec73248bb23a4abb622c3
| 740
|
py
|
Python
|
onadata/apps/eventlog/migrations/0015_auto_20170906_1211.py
|
awemulya/fieldsight-kobocat
|
f302d084e30fb637d43ec638c701e01a3dddc721
|
[
"BSD-2-Clause"
] | 38
|
2017-02-28T05:39:40.000Z
|
2019-01-16T04:39:04.000Z
|
onadata/apps/eventlog/migrations/0015_auto_20170906_1211.py
|
awemulya/fieldsightt
|
f302d084e30fb637d43ec638c701e01a3dddc721
|
[
"BSD-2-Clause"
] | 20
|
2017-04-27T09:14:27.000Z
|
2019-01-17T06:35:52.000Z
|
onadata/apps/eventlog/migrations/0015_auto_20170906_1211.py
|
awemulya/fieldsightt
|
f302d084e30fb637d43ec638c701e01a3dddc721
|
[
"BSD-2-Clause"
] | 5
|
2017-02-22T12:25:19.000Z
|
2019-01-15T11:16:40.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('eventlog', '0014_auto_20170901_1159'),
]
operations = [
migrations.AddField(
model_name='fieldsightlog',
name='extra_content_type',
field=models.ForeignKey(related_name='notify_object', blank=True, to='contenttypes.ContentType', null=True),
),
migrations.AddField(
model_name='fieldsightlog',
name='extra_object_id',
field=models.CharField(max_length=255, null=True, blank=True),
),
]
| 28.461538
| 120
| 0.632432
|
794a83d6df80a000a6904ffc29448c3ca3e19ee9
| 25,998
|
py
|
Python
|
dql/cli.py
|
stevearc/dql
|
6ed2cfcd16d5b077c1613a0ff219bcedfcaf5539
|
[
"MIT"
] | 136
|
2016-09-30T15:08:08.000Z
|
2022-01-04T07:44:54.000Z
|
dql/cli.py
|
stevearc/dql
|
6ed2cfcd16d5b077c1613a0ff219bcedfcaf5539
|
[
"MIT"
] | 38
|
2016-10-01T05:33:04.000Z
|
2021-12-23T23:20:27.000Z
|
dql/cli.py
|
stevearc/dql
|
6ed2cfcd16d5b077c1613a0ff219bcedfcaf5539
|
[
"MIT"
] | 11
|
2016-09-16T16:29:05.000Z
|
2020-08-27T23:41:03.000Z
|
""" Interative DQL client """
import cmd
import functools
import json
import os
import random
import shlex
import subprocess
from builtins import input
from collections import OrderedDict
from contextlib import contextmanager
from fnmatch import fnmatch
from typing import Any, Callable, ContextManager, Dict, Optional, Tuple
import botocore
from pyparsing import ParseException
from rich.panel import Panel
from rich.syntax import Syntax
from rich.traceback import install
from .engine import FragmentEngine
from .exceptions import EngineRuntimeError
from .help import (
ALTER,
ANALYZE,
CREATE,
DELETE,
DROP,
DUMP,
EXPLAIN,
INSERT,
LOAD,
OPTIONS,
SCAN,
SELECT,
UPDATE,
)
from .history import HistoryManager
from .monitor import Monitor
from .output import (
ColumnFormat,
ExpandedFormat,
JsonFormat,
SmartBuffer,
SmartFormat,
console,
less_display,
stdout_display,
)
from .throttle import TableLimits
# From http://docs.aws.amazon.com/general/latest/gr/rande.html#ddb_region
REGIONS = [
"us-east-1",
"us-west-2",
"us-west-1",
"eu-west-1",
"eu-central-1",
"ap-southeast-1",
"ap-southeast-2",
"ap-northeast-1",
"sa-east-1",
]
NO_DEFAULT = object()
DISPLAYS = {"stdout": stdout_display, "less": less_display}
FORMATTERS = {
"smart": SmartFormat,
"expanded": ExpandedFormat,
"column": ColumnFormat,
"json": JsonFormat,
}
DEFAULT_CONFIG = {
"width": "auto",
"pagesize": "auto",
"display": "stdout",
"format": "smart",
"allow_select_scan": False,
"lossy_json_float": True,
"_throttle": {},
}
# Installing the rich traceback handler for un-handled errors.
install()
def indent(string, prefix=" "):
""" Indent a paragraph of text """
return "\n".join([prefix + line for line in string.split("\n")])
def prompt(msg, default=NO_DEFAULT, validate=None):
""" Prompt user for input """
while True:
response = input(msg + " ").strip()
if not response:
if default is NO_DEFAULT:
continue
return default
if validate is None or validate(response):
return response
def promptyn(msg: str, default: Optional[bool] = None) -> bool:
"""
Display a blocking prompt until the user confirms.
Case is disregarded for prompt input.
User can input one of: `["y", "n", "yes", "no"]`
Example:
--------
promptyn("This is a message. Do you want to do stuff?", True)
# will print with a default True, capitalizes Y.
"This is a message. Do you want to do stuff? (Y/n)"
promptyn("This is a message. Do you want to do stuff?", False)
# will print with a default False, capitalizes N.
"This is a message. Do you want to do stuff? (y/N)"
"""
while True:
yes = "Y" if default else "y"
if default or default is None:
no = "n"
else:
no = "N"
confirm = prompt("%s [%s/%s]" % (msg, yes, no), "").lower()
if confirm in ("y", "yes"):
return True
elif confirm in ("n", "no"):
return False
elif not confirm and default is not None:
return default
def repl_command(fxn):
"""
Decorator for cmd methods
Parses arguments from the arg string and passes them to the method as *args
and **kwargs.
"""
@functools.wraps(fxn)
def wrapper(self, arglist):
"""Wraps the command method"""
args = []
kwargs = {}
if arglist:
for arg in shlex.split(arglist):
if "=" in arg:
split = arg.split("=", 1)
kwargs[split[0]] = split[1]
else:
args.append(arg)
return fxn(self, *args, **kwargs)
return wrapper
def get_enum_key(key, choices):
""" Get an enum by prefix or equality """
if key in choices:
return key
keys = [k for k in choices if k.startswith(key)]
if len(keys) == 1:
return keys[0]
@contextmanager
def exception_handler(engine):
""" It is a context manager which can handle exceptions and deal with them. """
try:
yield
except KeyboardInterrupt:
spooky_season = [":skull:", ":vampire:", ":zombie:", ":jack-o-lantern:"]
console.print(random.choice(spooky_season))
except botocore.exceptions.BotoCoreError as e:
console.log("BotoCoreError: ", e)
except ParseException as e:
console.log("Engine: ParseException")
syntax = Syntax(
engine.pformat_exc(e),
"sql",
theme="monokai",
line_numbers=True,
word_wrap=True,
)
console.print(Panel(syntax, title="Engine Details", expand=False))
except EngineRuntimeError as e:
console.log(e)
except SyntaxError as e:
console.log(e)
except Exception:
console.print_exception()
class DQLClient(cmd.Cmd):
"""
Interactive commandline interface.
Attributes
----------
running : bool
True while session is active, False after quitting
engine : :class:`dql.engine.FragmentEngine`
"""
running = False
conf: Dict
engine: FragmentEngine
formatter = None
display: Any
session = None
_conf_dir: str
_local_endpoint: Optional[Tuple[str, int]] = None
throttle: TableLimits
# When True, will not output status messages from queries (i.e. "table created").
# Used with --command
_silent: bool = False
history_manager: HistoryManager = HistoryManager()
def initialize(
self,
region: str = "us-west-1",
host: str = None,
port: int = 8000,
config_dir: Optional[str] = None,
session: Optional[Any] = None,
) -> None:
""" Set up the repl for execution. """
self.history_manager.try_to_load_history()
try:
import readline
import rlcompleter
except ImportError:
# Windows doesn't have readline, so gracefully ignore.
pass
else:
# Mac OS X readline compatibility from http://stackoverflow.com/a/7116997
if "libedit" in readline.__doc__:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
# Tab-complete names with a '-' in them
delims = set(readline.get_completer_delims())
if "-" in delims:
delims.remove("-")
readline.set_completer_delims("".join(delims))
self._conf_dir = config_dir or os.path.join(
os.environ.get("HOME", "."), ".config"
)
self.session = session
self.engine = FragmentEngine()
self.engine.caution_callback = self.caution_callback
kwargs = {}
if host is not None:
self._local_endpoint = (host, port)
# If we don't pass these in we might get a missing credentials error
kwargs["access_key"] = ""
kwargs["secret_key"] = ""
self.engine.connect(
region,
session=session,
host=host,
port=port,
is_secure=(host is None),
**kwargs
)
self.conf = self.load_config()
for key, value in DEFAULT_CONFIG.items():
self.conf.setdefault(key, value)
self.display = DISPLAYS[self.conf["display"]]
self.throttle = TableLimits()
self.throttle.load(self.conf["_throttle"])
def start(self):
""" Start running the interactive session (blocking) """
self.running = True
while self.running:
self.update_prompt()
with exception_handler(self.engine):
self.cmdloop()
self.engine.reset()
def postcmd(self, stop, line):
self.update_prompt()
return stop
def update_prompt(self):
""" Update the prompt """
prefix = ""
if self._local_endpoint is not None:
prefix += "(%s:%d) " % self._local_endpoint
prefix += self.engine.region
if self.engine.partial:
self.prompt = len(prefix) * " " + "> "
else:
self.prompt = prefix + "> "
def do_shell(self, arglist):
""" Run a shell command """
proc = subprocess.Popen(
shlex.split(arglist), stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
print(proc.communicate()[0])
def caution_callback(self, action):
"""
Prompt user for manual continue when doing write operation on all items
in a table
"""
msg = "This will run %s on all items in the table! Continue?" % action
return promptyn(msg, False)
def save_config(self):
""" Save the conf file """
if not os.path.exists(self._conf_dir):
os.makedirs(self._conf_dir)
conf_file = os.path.join(self._conf_dir, "dql.json")
with open(conf_file, "w") as ofile:
json.dump(self.conf, ofile, indent=2)
def load_config(self):
""" Load your configuration settings from a file """
conf_file = os.path.join(self._conf_dir, "dql.json")
if not os.path.exists(conf_file):
return {}
with open(conf_file, "r") as ifile:
return json.load(ifile)
@repl_command
def do_opt(self, *_args, **kwargs):
""" Get and set options """
args = list(_args)
if not args:
largest = 0
keys = [key for key in self.conf if not key.startswith("_")]
for key in keys:
largest = max(largest, len(key))
for key in keys:
print("%s : %s" % (key.rjust(largest), self.conf[key]))
return
option = args.pop(0)
if not args and not kwargs:
method = getattr(self, "getopt_" + option, None)
if method is None:
self.getopt_default(option)
else:
method()
else:
method = getattr(self, "opt_" + option, None)
if method is None:
print("Unrecognized option %r" % option)
else:
method(*args, **kwargs)
self.save_config()
def help_opt(self):
""" Print the help text for options """
print(OPTIONS)
def getopt_default(self, option):
""" Default method to get an option """
if option not in self.conf:
print("Unrecognized option %r" % option)
return
print("%s: %s" % (option, self.conf[option]))
def complete_opt(self, text, line, begidx, endidx):
""" Autocomplete for options """
tokens = line.split()
if len(tokens) == 1:
if text:
return
else:
option = ""
else:
option = tokens[1]
if len(tokens) == 1 or (len(tokens) == 2 and text):
return [
name[4:] + " " for name in dir(self) if name.startswith("opt_" + text)
]
method = getattr(self, "complete_opt_" + option, None)
if method is not None:
return method(text, line, begidx, endidx) # pylint: disable=E1102
def opt_width(self, width):
""" Set width of output ('auto' will auto-detect terminal width) """
if width != "auto":
width = int(width)
self.conf["width"] = width
def complete_opt_width(self, *_):
""" Autocomplete for width option """
return ["auto"]
def opt_pagesize(self, pagesize):
""" Get or set the page size of the query output """
if pagesize != "auto":
pagesize = int(pagesize)
self.conf["pagesize"] = pagesize
def complete_opt_pagesize(self, *_):
""" Autocomplete for pagesize option """
return ["auto"]
def _print_enum_opt(self, option, choices):
""" Helper for enum options """
for key in choices:
if key == self.conf[option]:
print("* %s" % key)
else:
print(" %s" % key)
def opt_display(self, display):
""" Set value for display option """
key = get_enum_key(display, DISPLAYS)
if key is not None:
self.conf["display"] = key
self.display = DISPLAYS[key]
print("Set display %r" % key)
else:
print("Unknown display %r" % display)
def getopt_display(self):
""" Get value for display option """
self._print_enum_opt("display", DISPLAYS)
def complete_opt_display(self, text, *_):
""" Autocomplete for display option """
return [t + " " for t in DISPLAYS if t.startswith(text)]
def opt_format(self, fmt):
""" Set value for format option """
key = get_enum_key(fmt, FORMATTERS)
if key is not None:
self.conf["format"] = key
print("Set format %r" % key)
else:
print("Unknown format %r" % fmt)
def getopt_format(self):
""" Get value for format option """
self._print_enum_opt("format", FORMATTERS)
def complete_opt_format(self, text, *_):
""" Autocomplete for format option """
return [t + " " for t in FORMATTERS if t.startswith(text)]
def opt_allow_select_scan(self, allow):
""" Set option allow_select_scan """
allow = allow.lower() in ("true", "t", "yes", "y")
self.conf["allow_select_scan"] = allow
self.engine.allow_select_scan = allow
def complete_opt_allow_select_scan(self, text, *_):
""" Autocomplete for allow_select_scan option """
return [t for t in ("true", "false", "yes", "no") if t.startswith(text.lower())]
def opt_lossy_json_float(self, lossy):
""" Set option lossy_json_float """
lossy = lossy.lower() in ("true", "t", "yes", "y")
self.conf["lossy_json_float"] = lossy
def complete_opt_lossy_json_float(self, text, *_):
""" Autocomplete for lossy_json_float option """
return [t for t in ("true", "false", "yes", "no") if t.startswith(text.lower())]
@repl_command
def do_watch(self, *args):
""" Watch Dynamo tables consumed capacity """
tables = set()
if not self.engine.cached_descriptions:
self.engine.describe_all()
all_tables = list(self.engine.cached_descriptions)
for arg in args:
candidates = set((t for t in all_tables if fnmatch(t, arg)))
tables.update(candidates)
monitor = Monitor(self.engine, sorted(tables))
monitor.start()
def complete_watch(self, text, *_):
""" Autocomplete for watch """
return [t + " " for t in self.engine.cached_descriptions if t.startswith(text)]
@repl_command
def do_file(self, filename):
""" Read and execute a .dql file """
with open(filename, "r") as infile:
self._run_cmd(infile.read())
def complete_file(self, text, line, *_):
""" Autocomplete DQL file lookup """
leading = line[len("file ") :]
curpath = os.path.join(os.path.curdir, leading)
def isdql(parent, filename):
""" Check if a file is .dql or a dir """
return not filename.startswith(".") and (
os.path.isdir(os.path.join(parent, filename))
or filename.lower().endswith(".dql")
)
def addslash(path):
""" Append a slash if a file is a directory """
if path.lower().endswith(".dql"):
return path + " "
else:
return path + "/"
if not os.path.exists(curpath) or not os.path.isdir(curpath):
curpath = os.path.dirname(curpath)
return [
addslash(f)
for f in os.listdir(curpath)
if f.startswith(text) and isdql(curpath, f)
]
@repl_command
def do_ls(self, table: str = None) -> None:
""" List all tables or print details of one table """
if table is None:
table_descriptions = self.engine.describe_all()
else:
tables = list(self.engine.connection.list_tables())
filtered = [t for t in tables if fnmatch(t, table)]
if len(filtered) == 1:
print(
self.engine.describe(
filtered[0], refresh=True, metrics=True
).pformat()
)
return
elif len(filtered) == 0:
raise EngineRuntimeError("Table %r not found" % table)
else:
table_descriptions = [self.engine.describe(t, True) for t in filtered]
fields = OrderedDict(
[
("Name", "name"),
("Status", "status"),
("Read", "total_read_throughput"),
("Write", "total_write_throughput"),
]
)
# Calculate max width of all items for each column
sizes = [
1
+ max([len(str(getattr(t, f))) for t in table_descriptions] + [len(title)])
for title, f in fields.items()
]
# Print the header
for size, title in zip(sizes, fields):
print(title.ljust(size), end="")
print()
# Print each table row
for row_table in table_descriptions:
for size, field in zip(sizes, fields.values()):
print(str(getattr(row_table, field)).ljust(size), end="")
print()
def complete_ls(self, text, *_):
""" Autocomplete for ls """
return [t + " " for t in self.engine.cached_descriptions if t.startswith(text)]
@repl_command
def do_local(self, host="localhost", port=8000):
"""
Connect to a local DynamoDB instance. Use 'local off' to disable.
> local
> local host=localhost port=8001
> local off
"""
port = int(port)
if host == "off":
self._local_endpoint = None
else:
self._local_endpoint = (host, port)
self.onecmd("use %s" % self.engine.region)
@repl_command
def do_use(self, region):
"""
Switch the AWS region
> use us-west-1
> use us-east-1
"""
if self._local_endpoint is not None:
host, port = self._local_endpoint # pylint: disable=W0633
self.engine.connect(
region,
session=self.session,
host=host,
port=port,
is_secure=False,
access_key="",
secret_key="",
)
else:
self.engine.connect(region, session=self.session)
def complete_use(self, text, *_):
""" Autocomplete for use """
return [t + " " for t in REGIONS if t.startswith(text)]
@repl_command
def do_throttle(self, *_args):
"""
Set the allowed consumed throughput for DQL.
# Set the total allowed throughput across all tables
> throttle 1000 100
# Set the default allowed throughput per-table/index
> throttle default 40% 20%
# Set the allowed throughput on a table
> throttle mytable 10 10
# Set the allowed throughput on a global index
> throttle mytable myindex 40 6
see also: unthrottle
"""
args = list(_args)
if not args:
print(self.throttle)
return
if len(args) < 2:
return self.onecmd("help throttle")
args, read, write = args[:-2], args[-2], args[-1]
if len(args) == 2:
tablename, indexname = args # pylint: disable=W0632
self.throttle.set_index_limit(tablename, indexname, read, write)
elif len(args) == 1:
tablename = args[0]
if tablename == "default":
self.throttle.set_default_limit(read, write)
elif tablename == "total":
self.throttle.set_total_limit(read, write)
else:
self.throttle.set_table_limit(tablename, read, write)
elif not args:
self.throttle.set_total_limit(read, write)
else:
return self.onecmd("help throttle")
self.conf["_throttle"] = self.throttle.save()
self.save_config()
@repl_command
def do_unthrottle(self, *args):
"""
Remove the throughput limits for DQL that were set with 'throttle'
Examples:
---------
# Remove all limits
> unthrottle
# Remove the limit on total allowed throughput
> unthrottle total
# Remove the default limit
> unthrottle default
# Remove the limit on a table
> unthrottle mytable
# Remove the limit on a global index
> unthrottle mytable myindex
"""
if not args:
if promptyn("Are you sure you want to clear all throttles?"):
self.throttle.load({})
elif len(args) == 1:
tablename = args[0]
if tablename == "total":
self.throttle.set_total_limit()
elif tablename == "default":
self.throttle.set_default_limit()
else:
self.throttle.set_table_limit(tablename)
elif len(args) == 2:
tablename, indexname = args
self.throttle.set_index_limit(tablename, indexname)
else:
self.onecmd("help unthrottle")
self.conf["_throttle"] = self.throttle.save()
self.save_config()
def default(self, command):
""" This is an override of super class method. """
self._run_cmd(command)
def completedefault(self, text, line, *_):
""" Autocomplete table names in queries """
tokens = line.split()
try:
before = tokens[-2]
complete = before.lower() in ("from", "update", "table", "into")
if tokens[0].lower() == "dump":
complete = True
if complete:
return [
t + " "
for t in self.engine.cached_descriptions
if t.startswith(text)
]
except KeyError:
pass
def _run_cmd(self, command):
""" Run a DQL command """
if self.throttle:
tables = self.engine.describe_all(False)
limiter = self.throttle.get_limiter(tables)
else:
limiter = None
self.engine.rate_limit = limiter
results = self.engine.execute(command)
if results is None:
pass
elif isinstance(results, str):
if not self._silent:
print(results)
else:
with self.display() as ostream:
formatter = FORMATTERS[self.conf["format"]](
results,
ostream,
pagesize=self.conf["pagesize"],
width=self.conf["width"],
lossy_json_float=self.conf["lossy_json_float"],
)
formatter.display()
print_count = 0
total = None
for (cmd_fragment, capacity) in self.engine.consumed_capacities:
total += capacity
print(cmd_fragment)
print(indent(str(capacity)))
print_count += 1
if print_count > 1:
print("TOTAL")
print(indent(str(total)))
@repl_command
def do_EOF(self): # pylint: disable=C0103
"""Exit"""
return self.onecmd("exit")
@repl_command
def do_exit(self):
"""Exit"""
self.running = False
print()
self.history_manager.try_to_write_history()
return True
def run_command(
self, command: str, use_json: bool = False, raise_exceptions: bool = False
) -> None:
""" Run a command passed in from the command line with -c """
self.display = DISPLAYS["stdout"]
self.conf["pagesize"] = 0
if use_json:
self.conf["format"] = "json"
self._silent = True
if raise_exceptions:
self.onecmd(command)
else:
with exception_handler(self.engine):
self.onecmd(command)
def emptyline(self):
self.default("")
def help_help(self):
"""Print the help text for help"""
print("List commands or print details about a command")
def help_alter(self):
""" Print the help text for ALTER """
print(ALTER)
def help_analyze(self):
""" Print the help text for ANALYZE """
print(ANALYZE)
def help_create(self):
""" Print the help text for CREATE """
print(CREATE)
def help_delete(self):
""" Print the help text for DELETE """
print(DELETE)
def help_drop(self):
""" Print the help text for DROP """
print(DROP)
def help_dump(self):
""" Print the help text for DUMP """
print(DUMP)
def help_explain(self):
""" Print the help text for EXPLAIN """
print(EXPLAIN)
def help_insert(self):
""" Print the help text for INSERT """
print(INSERT)
def help_load(self):
""" Print the help text for LOAD """
print(LOAD)
def help_scan(self):
""" Print the help text for SCAN """
print(SCAN)
def help_select(self):
""" Print the help text for SELECT """
print(SELECT)
def help_update(self):
""" Print the help text for UPDATE """
print(UPDATE)
| 30.986889
| 88
| 0.552504
|
794a84f6a8114eb656a7faa0dfc13cf173af736b
| 387
|
py
|
Python
|
test1/test1/wsgi.py
|
StanislavDanilov/interview
|
4b113c6415cc8f18c6d0b18d40949032a8ccd8eb
|
[
"MIT"
] | null | null | null |
test1/test1/wsgi.py
|
StanislavDanilov/interview
|
4b113c6415cc8f18c6d0b18d40949032a8ccd8eb
|
[
"MIT"
] | null | null | null |
test1/test1/wsgi.py
|
StanislavDanilov/interview
|
4b113c6415cc8f18c6d0b18d40949032a8ccd8eb
|
[
"MIT"
] | null | null | null |
"""
WSGI config for test1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test1.settings')
application = get_wsgi_application()
| 22.764706
| 78
| 0.782946
|
794a8587323e82ba32c12c9024ec7ffbd0fb1c47
| 2,705
|
py
|
Python
|
Assignment1-Deauth/main.py
|
pushqin/cyber_wifi_defence
|
190ef231f387efe01201dd350c0a2094a70d1dd2
|
[
"Apache-2.0"
] | null | null | null |
Assignment1-Deauth/main.py
|
pushqin/cyber_wifi_defence
|
190ef231f387efe01201dd350c0a2094a70d1dd2
|
[
"Apache-2.0"
] | null | null | null |
Assignment1-Deauth/main.py
|
pushqin/cyber_wifi_defence
|
190ef231f387efe01201dd350c0a2094a70d1dd2
|
[
"Apache-2.0"
] | null | null | null |
import os
import inquirer
import deauth_client
from find_ap_connected_stations import FindAccessPointConnectedStations
from scan_acess_points import ScanAccessPoints
def select_ap_bssid(values):
"""
allows the user to select an access point from a list
Parameters
----------
values : Dictionary
all founded access points BSSIDs
"""
questions = [
inquirer.List('ap_bssid', message="Select BSSID of the access point", choices=values),
]
return inquirer.prompt(questions)["ap_bssid"]
def select_bssids_for_deauth(values):
"""
allows the user to select the station he would like to perform deauth attack on.
Parameters
----------
values : Dictionary
all founded stations of the specific BSSID
"""
questions = [
inquirer.List('both_bssid', message="Select target station BSSID to deauth", choices=values),
]
target_bssid = inquirer.prompt(questions)["both_bssid"]
# here we need to extract both bssid's for deauth method
return dict(target_bssid=target_bssid, ap_bssid=values[target_bssid])
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="A python script for sending deauthentication frames")
parser.add_argument(
"--card_iface", help="Network card interface name from iwconfig ,default 'wlan1'", default='wlan1')
parser.add_argument("--monitor_iface",
help="Desired monitor interface name ,default 'wlan0mon'", default='wlan0mon')
parser.add_argument(
"--sniff_time",
help="Amount of time each sniffing stage(find access points and find access point connected stations) should be executed ,default 60",
default=60)
# Get arguments
args = parser.parse_args()
card_iface = args.card_iface
monitor_iface = args.monitor_iface
sniff_time = int(args.sniff_time)
# Setup wireless network interface to monitor mode
os.system(f"iw dev {card_iface} interface add {monitor_iface} type monitor")
os.system(f"ifconfig {monitor_iface} up")
# Find access points
scan_access_points_task = ScanAccessPoints(monitor_iface)
scan_access_points_task.sniffAction(sniff_time)
ap_bssid = select_ap_bssid(scan_access_points_task.access_points)
# Find connected stations of access point
find_ap_connected_stations_task = FindAccessPointConnectedStations(monitor_iface)
find_ap_connected_stations_task.sniffAction(ap_bssid, sniff_time)
response = select_bssids_for_deauth(find_ap_connected_stations_task.stations)
# Execute deauth
deauth_client.deauth(response["target_bssid"], response["ap_bssid"], monitor_iface)
| 33.8125
| 142
| 0.726063
|
794a85aa1d62626726a7ef3a11bd5ec22c240186
| 5,288
|
py
|
Python
|
test/cpython/test_cmd_line.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:28:45.000Z
|
2020-02-06T14:28:45.000Z
|
test/cpython/test_cmd_line.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
test/cpython/test_cmd_line.py
|
aisk/pyston
|
ac69cfef0621dbc8901175e84fa2b5cb5781a646
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2020-02-06T14:29:00.000Z
|
2020-02-06T14:29:00.000Z
|
# expected: fail
# Tests invocation of the interpreter with various command line arguments
# All tests are executed with environment variables ignored
# See test_cmd_line_script.py for testing of script execution
import test.test_support
import sys
import unittest
from test.script_helper import (
assert_python_ok, assert_python_failure, spawn_python, kill_python,
python_exit_code
)
class CmdLineTest(unittest.TestCase):
def start_python(self, *args):
p = spawn_python(*args)
return kill_python(p)
def exit_code(self, *args):
return python_exit_code(*args)
def test_directories(self):
self.assertNotEqual(self.exit_code('.'), 0)
self.assertNotEqual(self.exit_code('< .'), 0)
def verify_valid_flag(self, cmd_line):
data = self.start_python(cmd_line)
self.assertTrue(data == '' or data.endswith('\n'))
self.assertNotIn('Traceback', data)
def test_optimize(self):
self.verify_valid_flag('-O')
self.verify_valid_flag('-OO')
def test_q(self):
self.verify_valid_flag('-Qold')
self.verify_valid_flag('-Qnew')
self.verify_valid_flag('-Qwarn')
self.verify_valid_flag('-Qwarnall')
def test_site_flag(self):
self.verify_valid_flag('-S')
def test_usage(self):
self.assertIn('usage', self.start_python('-h'))
def test_version(self):
version = 'Python %d.%d' % sys.version_info[:2]
self.assertTrue(self.start_python('-V').startswith(version))
def test_run_module(self):
# Test expected operation of the '-m' switch
# Switch needs an argument
self.assertNotEqual(self.exit_code('-m'), 0)
# Check we get an error for a nonexistent module
self.assertNotEqual(
self.exit_code('-m', 'fnord43520xyz'),
0)
# Check the runpy module also gives an error for
# a nonexistent module
self.assertNotEqual(
self.exit_code('-m', 'runpy', 'fnord43520xyz'),
0)
# All good if module is located and run successfully
self.assertEqual(
self.exit_code('-m', 'timeit', '-n', '1'),
0)
def test_run_module_bug1764407(self):
# -m and -i need to play well together
# Runs the timeit module and checks the __main__
# namespace has been populated appropriately
p = spawn_python('-i', '-m', 'timeit', '-n', '1')
p.stdin.write('Timer\n')
p.stdin.write('exit()\n')
data = kill_python(p)
self.assertTrue(data.startswith('1 loop'))
self.assertIn('__main__.Timer', data)
def test_run_code(self):
# Test expected operation of the '-c' switch
# Switch needs an argument
self.assertNotEqual(self.exit_code('-c'), 0)
# Check we get an error for an uncaught exception
self.assertNotEqual(
self.exit_code('-c', 'raise Exception'),
0)
# All good if execution is successful
self.assertEqual(
self.exit_code('-c', 'pass'),
0)
def test_hash_randomization(self):
# Verify that -R enables hash randomization:
self.verify_valid_flag('-R')
hashes = []
for i in range(2):
code = 'print(hash("spam"))'
data = self.start_python('-R', '-c', code)
hashes.append(data)
self.assertNotEqual(hashes[0], hashes[1])
# Verify that sys.flags contains hash_randomization
code = 'import sys; print sys.flags'
data = self.start_python('-R', '-c', code)
self.assertTrue('hash_randomization=1' in data)
def test_del___main__(self):
# Issue #15001: PyRun_SimpleFileExFlags() did crash because it kept a
# borrowed reference to the dict of __main__ module and later modify
# the dict whereas the module was destroyed
filename = test.test_support.TESTFN
self.addCleanup(test.test_support.unlink, filename)
with open(filename, "w") as script:
print >>script, "import sys"
print >>script, "del sys.modules['__main__']"
assert_python_ok(filename)
def test_unknown_options(self):
rc, out, err = assert_python_failure('-E', '-z')
self.assertIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -z'), 1)
self.assertEqual(b'', out)
# Add "without='-E'" to prevent _assert_python to append -E
# to env_vars and change the output of stderr
rc, out, err = assert_python_failure('-z', without='-E')
self.assertIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -z'), 1)
self.assertEqual(b'', out)
rc, out, err = assert_python_failure('-a', '-z', without='-E')
self.assertIn(b'Unknown option: -a', err)
# only the first unknown option is reported
self.assertNotIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -a'), 1)
self.assertEqual(b'', out)
def test_main():
test.test_support.run_unittest(CmdLineTest)
test.test_support.reap_children()
if __name__ == "__main__":
test_main()
| 36.468966
| 77
| 0.625946
|
794a85fe28c64c05a8d8e9a58dec10a8c9f6bf69
| 93,004
|
py
|
Python
|
toontown/toon/DistributedToon.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2021-02-13T22:40:50.000Z
|
2021-02-13T22:40:50.000Z
|
toontown/toon/DistributedToon.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
toontown/toon/DistributedToon.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
import copy
from direct.controls.GravityWalker import GravityWalker
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
from direct.distributed import DistributedSmoothNode
from direct.distributed.ClockDelta import *
from direct.distributed.MsgTypes import *
from direct.fsm import ClassicFSM
from direct.interval.IntervalGlobal import Sequence, Wait, Func, Parallel, SoundInterval
from direct.showbase import PythonUtil
from direct.task.Task import Task
import operator
from panda3d.core import *
import random
import time
import Experience
import InventoryNew
import TTEmote
import Toon
from otp.ai.MagicWordGlobal import *
from otp.avatar import Avatar, DistributedAvatar
from otp.avatar import DistributedPlayer
from otp.chat import TalkAssistant, ChatUtil
from otp.otpbase import OTPGlobals
from otp.otpbase import OTPLocalizer
from otp.speedchat import SCDecoders
from toontown.catalog import CatalogItem
from toontown.catalog import CatalogItemList
from toontown.chat import ResistanceChat
from toontown.chat import ToonChatGarbler
from otp.nametag.NametagConstants import *
from otp.margins.WhisperPopup import *
from toontown.coghq import CogDisguiseGlobals
from toontown.distributed import DelayDelete
from toontown.distributed import DelayDelete
from toontown.distributed.DelayDeletable import DelayDeletable
from toontown.effects.ScavengerHuntEffects import *
from toontown.estate import DistributedGagTree
from toontown.estate import FlowerBasket
from toontown.estate import FlowerCollection
from toontown.estate import GardenDropGame
from toontown.estate import GardenGlobals
from toontown.fishing import FishCollection
from toontown.fishing import FishTank
from toontown.friends import FriendHandle
from toontown.golf import GolfGlobals
from toontown.hood import ZoneUtil
from otp.nametag import NametagGroup
from otp.nametag.NametagGroup import *
from toontown.parties import PartyGlobals
from toontown.parties.InviteInfo import InviteInfo
from toontown.parties.PartyGlobals import InviteStatus, PartyStatus
from toontown.parties.PartyInfo import PartyInfo
from toontown.parties.PartyReplyInfo import PartyReplyInfoBase
from toontown.parties.SimpleMailBase import SimpleMailBase
from toontown.shtiker.OptionsPage import speedChatStyles
from toontown.speedchat import TTSCDecoders
from toontown.suit import SuitDNA
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
from toontown.battle import BattleParticles
if base.wantKarts:
from toontown.racing.KartDNA import *
class DistributedToon(DistributedPlayer.DistributedPlayer, Toon.Toon, DistributedSmoothNode.DistributedSmoothNode, DelayDeletable):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedToon')
partyNotify = DirectNotifyGlobal.directNotify.newCategory('DistributedToon_Party')
chatGarbler = ToonChatGarbler.ToonChatGarbler()
def __init__(self, cr, bFake = False):
try:
self.DistributedToon_initialized
return
except:
self.DistributedToon_initialized = 1
DistributedPlayer.DistributedPlayer.__init__(self, cr)
Toon.Toon.__init__(self)
DistributedSmoothNode.DistributedSmoothNode.__init__(self, cr)
self.bFake = bFake
self.kart = None
self.trophyScore = 0
self.trophyStar = None
self.trophyStarSpeed = 0
self.NPCFriendsDict = {}
self.earnedExperience = None
self.track = None
self.effect = None
self.maxCarry = 0
self.disguisePageFlag = 0
self.sosPageFlag = 0
self.disguisePage = None
self.sosPage = None
self.gardenPage = None
self.emoteAccess = [0] * 27
self.cogTypes = [0, 0, 0, 0]
self.cogLevels = [0, 0, 0, 0]
self.cogParts = [0, 0, 0, 0]
self.cogMerits = [0, 0, 0, 0]
self.savedCheesyEffect = ToontownGlobals.CENormal
self.savedCheesyHoodId = 0
self.savedCheesyExpireTime = 0
if hasattr(base, 'wantPets') and base.wantPets:
self.petTrickPhrases = []
self.customMessages = []
self.resistanceMessages = []
self.cogSummonsEarned = []
self.catalogNotify = ToontownGlobals.NoItems
self.mailboxNotify = ToontownGlobals.NoItems
self.simpleMailNotify = ToontownGlobals.NoItems
self.inviteMailNotify = ToontownGlobals.NoItems
self.catalogScheduleCurrentWeek = 0
self.catalogScheduleNextTime = 0
self.monthlyCatalog = CatalogItemList.CatalogItemList()
self.weeklyCatalog = CatalogItemList.CatalogItemList()
self.backCatalog = CatalogItemList.CatalogItemList()
self.onOrder = CatalogItemList.CatalogItemList(store=CatalogItem.Customization | CatalogItem.DeliveryDate)
self.onGiftOrder = CatalogItemList.CatalogItemList(store=CatalogItem.Customization | CatalogItem.DeliveryDate)
self.mailboxContents = CatalogItemList.CatalogItemList(store=CatalogItem.Customization)
self.awardMailboxContents = CatalogItemList.CatalogItemList(store=CatalogItem.Customization)
self.onAwardOrder = CatalogItemList.CatalogItemList(store=CatalogItem.Customization | CatalogItem.DeliveryDate)
self.splash = None
self.tossTrack = None
self.pieTracks = {}
self.splatTracks = {}
self.lastTossedPie = 0
self.clothesTopsList = []
self.clothesBottomsList = []
self.hatList = []
self.glassesList = []
self.backpackList = []
self.shoesList = []
self.tunnelTrack = None
self.tunnelPivotPos = [-14, -6, 0]
self.tunnelCenterOffset = 9.0
self.tunnelCenterInfluence = 0.6
self.pivotAngle = 90 + 45
self.houseId = 0
self.money = 0
self.bankMoney = 0
self.maxMoney = 0
self.maxBankMoney = 0
self.emblems = [0, 0]
self.petId = 0
self.petTutorialDone = False
self.fishBingoTutorialDone = False
self.fishBingoMarkTutorialDone = False
self.accessories = []
if base.wantKarts:
self.kartDNA = [-1] * getNumFields()
self.flowerCollection = None
self.shovel = 0
self.shovelSkill = 0
self.shovelModel = None
self.wateringCan = 0
self.wateringCanSkill = 0
self.wateringCanModel = None
self.gardenSpecials = []
self.unlimitedSwing = 0
self.soundSequenceList = []
self.boardingParty = None
self.__currentDialogue = None
self.mail = None
self.invites = []
self.hostedParties = []
self.partiesInvitedTo = []
self.partyReplyInfoBases = []
self.buffs = []
self.redeemedCodes = []
self.ignored = []
self.reported = []
self.trueFriends = []
self.specialInventory = [0, 0, 0, 0, 0]
def disable(self):
for soundSequence in self.soundSequenceList:
soundSequence.finish()
self.soundSequenceList = []
if self.boardingParty:
self.boardingParty.demandDrop()
self.boardingParty = None
self.ignore('clientCleanup')
self.stopAnimations()
self.clearCheesyEffect()
self.stopBlink()
self.stopSmooth()
self.stopLookAroundNow()
self.setGhostMode(0)
if self.track != None:
self.track.finish()
DelayDelete.cleanupDelayDeletes(self.track)
self.track = None
if self.effect != None:
self.effect.destroy()
self.effect = None
if self.splash != None:
self.splash.destroy()
self.splash = None
if self.emote != None:
self.emote.finish()
self.emote = None
self.cleanupPies()
if self.isDisguised:
self.takeOffSuit()
if self.tunnelTrack:
self.tunnelTrack.finish()
self.tunnelTrack = None
self.setTrophyScore(0)
if self.doId in self.cr.toons:
del self.cr.toons[self.doId]
DistributedPlayer.DistributedPlayer.disable(self)
def delete(self):
try:
self.DistributedToon_deleted
except:
self.DistributedToon_deleted = 1
DistributedPlayer.DistributedPlayer.delete(self)
Toon.Toon.delete(self)
DistributedSmoothNode.DistributedSmoothNode.delete(self)
def generate(self):
DistributedPlayer.DistributedPlayer.generate(self)
DistributedSmoothNode.DistributedSmoothNode.generate(self)
self.cr.toons[self.doId] = self
if base.cr.trophyManager != None:
base.cr.trophyManager.d_requestTrophyScore()
self.startBlink()
self.startSmooth()
self.accept('clientCleanup', self._handleClientCleanup)
def announceGenerate(self):
DistributedPlayer.DistributedPlayer.announceGenerate(self)
if self.animFSM.getCurrentState().getName() == 'off':
self.setAnimState('neutral')
def _handleClientCleanup(self):
if self.track != None:
DelayDelete.cleanupDelayDeletes(self.track)
def setDNAString(self, dnaString):
Toon.Toon.setDNAString(self, dnaString)
def setAdminAccess(self, access):
DistributedPlayer.DistributedPlayer.setAdminAccess(self, access)
self.removeGMIcon()
if self.isAdmin():
self.setGMIcon(access)
def setDNA(self, dna):
oldHat = self.getHat()
oldGlasses = self.getGlasses()
oldBackpack = self.getBackpack()
oldShoes = self.getShoes()
self.setHat(0, 0, 0)
self.setGlasses(0, 0, 0)
self.setBackpack(0, 0, 0)
self.setShoes(0, 0, 0)
Toon.Toon.setDNA(self, dna)
self.setHat(*oldHat)
self.setGlasses(*oldGlasses)
self.setBackpack(*oldBackpack)
self.setShoes(*oldShoes)
def setHat(self, idx, textureIdx, colorIdx):
Toon.Toon.setHat(self, idx, textureIdx, colorIdx)
def setGlasses(self, idx, textureIdx, colorIdx):
Toon.Toon.setGlasses(self, idx, textureIdx, colorIdx)
def setBackpack(self, idx, textureIdx, colorIdx):
Toon.Toon.setBackpack(self, idx, textureIdx, colorIdx)
def setShoes(self, idx, textureIdx, colorIdx):
Toon.Toon.setShoes(self, idx, textureIdx, colorIdx)
def setExperience(self, experience):
self.experience = Experience.Experience(experience, self)
if self.inventory:
self.inventory.updateGUI()
def setInventory(self, inventoryNetString):
if not self.inventory:
self.inventory = InventoryNew.InventoryNew(self, inventoryNetString)
self.inventory.updateInvString(inventoryNetString)
def setLastHood(self, lastHood):
self.lastHood = lastHood
def setBattleId(self, battleId):
self.battleId = battleId
messenger.send('ToonBattleIdUpdate', [self.doId])
def b_setSCToontask(self, taskId, toNpcId, toonProgress, msgIndex):
self.setSCToontask(taskId, toNpcId, toonProgress, msgIndex)
self.d_setSCToontask(taskId, toNpcId, toonProgress, msgIndex)
return None
def d_setSCToontask(self, taskId, toNpcId, toonProgress, msgIndex):
messenger.send('wakeup')
self.sendUpdate('setSCToontask', [taskId,
toNpcId,
toonProgress,
msgIndex])
def setSCToontask(self, taskId, toNpcId, toonProgress, msgIndex):
if base.localAvatar.isIgnored(self.doId):
return
chatString = TTSCDecoders.decodeTTSCToontaskMsg(taskId, toNpcId, toonProgress, msgIndex)
if chatString:
self.setChatAbsolute(chatString, CFSpeech | CFQuicktalker | CFTimeout)
def sendLogSuspiciousEvent(self, msg):
localAvatar.sendUpdate('logSuspiciousEvent', ['%s for %s' % (msg, self.doId)])
def d_reqSCResistance(self, msgIndex):
messenger.send('wakeup')
nearbyPlayers = self.getNearbyPlayers(ResistanceChat.EFFECT_RADIUS)
self.sendUpdate('reqSCResistance', [msgIndex, nearbyPlayers])
def getNearbyPlayers(self, radius, includeSelf = True):
nearbyToons = []
toonIds = self.cr.getObjectsOfExactClass(DistributedToon)
for toonId, toon in toonIds.items():
if toon is not self:
dist = toon.getDistance(self)
if dist < radius:
nearbyToons.append(toonId)
if includeSelf:
nearbyToons.append(self.doId)
return nearbyToons
def setSCResistance(self, msgIndex, nearbyToons = []):
chatString = TTSCDecoders.decodeTTSCResistanceMsg(msgIndex)
if chatString:
self.setChatAbsolute(chatString, CFSpeech | CFTimeout)
ResistanceChat.doEffect(msgIndex, self, nearbyToons)
def d_battleSOS(self, sendToId):
self.cr.ttsFriendsManager.d_battleSOS(sendToId)
def battleSOS(self, requesterId):
avatar = base.cr.identifyAvatar(requesterId)
if isinstance(avatar, (DistributedToon, FriendHandle.FriendHandle)):
self.setSystemMessage(requesterId,
TTLocalizer.MovieSOSWhisperHelp % avatar.getName(),
whisperType=WTBattleSOS
)
elif avatar:
self.notify.warning('got battleSOS from non-toon %s' % requesterId)
def getDialogueArray(self, *args):
if hasattr(self, 'animalSound'):
dialogueArrays = [
Toon.DogDialogueArray,
Toon.CatDialogueArray,
Toon.HorseDialogueArray,
Toon.MouseDialogueArray,
Toon.RabbitDialogueArray,
Toon.DuckDialogueArray,
Toon.MonkeyDialogueArray,
Toon.BearDialogueArray,
Toon.PigDialogueArray,
]
return dialogueArrays[self.animalSound]
return Toon.Toon.getDialogueArray(self, *args)
def setDefaultShard(self, shard):
self.defaultShard = shard
def setDefaultZone(self, zoneId):
if not ZoneUtil.getCanonicalHoodId(zoneId) in ToontownGlobals.hoodNameMap:
self.defaultZone = ToontownGlobals.ToontownCentral
return
if ZoneUtil.getCanonicalHoodId(zoneId) == ToontownGlobals.FunnyFarm:
self.defaultZone = ToontownGlobals.ToontownCentral
return
if self.getHp() <= 0 and zoneId in ToontownGlobals.HQToSafezone:
self.defaultZone = ToontownGlobals.HQToSafezone[zoneId]
return
self.defaultZone = zoneId
def __starSpin1(self, task):
now = globalClock.getFrameTime()
r = now * 90 % 360.0
self.trophyStar1.setH(r)
return Task.cont
def isAvFriend(self, avId):
return base.cr.isFriend(avId)
def setTalkWhisper(self, avId, chat):
if not base.cr.chatAgent.verifyMessage(chat):
return
if not localAvatar.acceptingNonFriendWhispers:
if not self.isAvFriend(avId):
return
if base.localAvatar.isIgnored(avId):
return
if base.localAvatar.sleepFlag == 1:
if not base.cr.identifyAvatar(avId) == base.localAvatar:
base.cr.ttsFriendsManager.d_sleepAutoReply(avId)
if base.whiteList:
chat = base.whiteList.processThroughAll(chat, self.chatGarbler)
self.displayTalkWhisper(avId, chat)
def setSleepAutoReply(self, fromId):
pass
def _isValidWhisperSource(self, source):
return isinstance(source, (DistributedToon, FriendHandle.FriendHandle))
def setWhisperSCEmoteFrom(self, fromId, emoteId):
handle = base.cr.identifyAvatar(fromId)
if handle == None:
return
if not self._isValidWhisperSource(handle):
self.notify.warning('setWhisperSCEmoteFrom non-toon %s' % fromId)
return
if not localAvatar.acceptingNonFriendWhispers:
if not self.isAvFriend(fromId):
return
if base.localAvatar.isIgnored(fromId):
return
if base.localAvatar.sleepFlag == 1:
if not handle == base.localAvatar:
base.cr.ttsFriendsManager.d_sleepAutoReply(fromId)
chatString = SCDecoders.decodeSCEmoteWhisperMsg(emoteId, handle.getName())
if chatString:
self.displayWhisper(fromId, chatString, WTEmote)
base.talkAssistant.receiveAvatarWhisperSpeedChat(TalkAssistant.SPEEDCHAT_EMOTE, emoteId, fromId)
return
def setWhisperSCFrom(self, fromId, msgIndex):
handle = base.cr.identifyAvatar(fromId)
if handle == None:
return
if not self._isValidWhisperSource(handle):
self.notify.warning('setWhisperSCFrom non-toon %s' % fromId)
return
if not localAvatar.acceptingNonFriendWhispers:
if not self.isAvFriend(fromId):
return
if base.localAvatar.isIgnored(fromId):
return
if base.localAvatar.sleepFlag == 1:
if not handle == base.localAvatar:
base.cr.ttsFriendsManager.d_sleepAutoReply(fromId)
chatString = SCDecoders.decodeSCStaticTextMsg(msgIndex)
if chatString:
self.displayWhisper(fromId, chatString, WTNormal)
base.talkAssistant.receiveAvatarWhisperSpeedChat(TalkAssistant.SPEEDCHAT_NORMAL, msgIndex, fromId)
return
def setWhisperSCCustomFrom(self, fromId, msgIndex):
handle = base.cr.identifyAvatar(fromId)
if handle == None:
return
if not localAvatar.acceptingNonFriendWhispers:
if not self.isAvFriend(fromId):
return
return DistributedPlayer.DistributedPlayer.setWhisperSCCustomFrom(self, fromId, msgIndex)
def whisperSCToontaskTo(self, taskId, toNpcId, toonProgress, msgIndex, sendToId):
messenger.send('wakeup')
base.cr.ttsFriendsManager.d_whisperSCToontaskTo(sendToId, taskId,
toNpcId, toonProgress, msgIndex
)
def setWhisperSCToontaskFrom(self, fromId, taskId, toNpcId, toonProgress, msgIndex):
sender = base.cr.identifyAvatar(fromId)
if sender is None:
return
if not localAvatar.acceptingNonFriendWhispers:
if not self.isAvFriend(fromId):
return
if base.localAvatar.isIgnored(fromId):
return
chatString = TTSCDecoders.decodeTTSCToontaskMsg(taskId, toNpcId, toonProgress, msgIndex)
if chatString:
self.displayWhisper(fromId, chatString, WTNormal)
def getNPCFriendsDict(self):
return self.NPCFriendsDict
def getNPCFriendCount(self, npcId):
return self.NPCFriendsDict.get(npcId, 0)
def setNPCFriendsDict(self, NPCFriendsList):
NPCFriendsDict = {}
for friendPair in NPCFriendsList:
NPCFriendsDict[friendPair[0]] = friendPair[1]
self.NPCFriendsDict = NPCFriendsDict
def setHatList(self, clothesList):
self.hatList = clothesList
def getHatList(self):
return self.hatList
def setGlassesList(self, clothesList):
self.glassesList = clothesList
def getGlassesList(self):
return self.glassesList
def setBackpackList(self, clothesList):
self.backpackList = clothesList
def getBackpackList(self):
return self.backpackList
def setShoesList(self, clothesList):
self.shoesList = clothesList
def getShoesList(self):
return self.shoesList
def isTrunkFull(self, extraAccessories = 0):
numAccessories = (len(self.hatList) + len(self.glassesList) + len(self.backpackList) + len(self.shoesList)) / 3
return numAccessories + extraAccessories >= ToontownGlobals.MaxAccessories
def setMaxClothes(self, max):
self.maxClothes = max
def getMaxClothes(self):
return self.maxClothes
def getClothesTopsList(self):
return self.clothesTopsList
def setClothesTopsList(self, clothesList):
self.clothesTopsList = clothesList
def getClothesBottomsList(self):
return self.clothesBottomsList
def setClothesBottomsList(self, clothesList):
self.clothesBottomsList = clothesList
def catalogGenClothes(self, avId):
if avId == self.doId:
self.generateToonClothes()
self.loop('neutral')
def catalogGenAccessories(self, avId):
if avId == self.doId:
self.generateToonAccessories()
self.loop('neutral')
def isClosetFull(self, extraClothes = 0):
numClothes = len(self.clothesTopsList) / 4 + len(self.clothesBottomsList) / 2
return numClothes + extraClothes >= self.maxClothes
def setMaxHp(self, hitPoints):
DistributedPlayer.DistributedPlayer.setMaxHp(self, hitPoints)
if self.inventory:
self.inventory.updateGUI()
def died(self):
messenger.send(self.uniqueName('died'))
if self.isLocal():
target_sz = ZoneUtil.getSafeZoneId(self.defaultZone)
place = self.cr.playGame.getPlace()
if place and place.fsm:
place.fsm.request('died', [{'loader': ZoneUtil.getLoaderName(target_sz),
'where': ZoneUtil.getWhereName(target_sz, 1),
'how': 'teleportIn',
'hoodId': target_sz,
'zoneId': target_sz,
'shardId': None,
'avId': -1,
'battle': 1}])
return
def setHoodsVisited(self, hoods):
self.hoodsVisited = hoods
if ToontownGlobals.SellbotHQ in hoods or ToontownGlobals.CashbotHQ in hoods or ToontownGlobals.LawbotHQ in hoods or ToontownGlobals.BossbotHQ in hoods:
self.setDisguisePageFlag(1)
def wrtReparentTo(self, parent):
DistributedSmoothNode.DistributedSmoothNode.wrtReparentTo(self, parent)
def setTutorialAck(self, tutorialAck):
self.tutorialAck = tutorialAck
def getTutorialAck(self):
return self.tutorialAck
def setEarnedExperience(self, earnedExp):
self.earnedExperience = earnedExp
def b_setTunnelIn(self, endX, tunnelOrigin):
timestamp = globalClockDelta.getFrameNetworkTime()
pos = tunnelOrigin.getPos(render)
h = tunnelOrigin.getH(render)
self.setTunnelIn(timestamp, endX, pos[0], pos[1], pos[2], h)
self.d_setTunnelIn(timestamp, endX, pos[0], pos[1], pos[2], h)
def d_setTunnelIn(self, timestamp, endX, x, y, z, h):
self.sendUpdate('setTunnelIn', [timestamp,
endX,
x,
y,
z,
h])
def setTunnelIn(self, timestamp, endX, x, y, z, h):
t = globalClockDelta.networkToLocalTime(timestamp)
self.handleTunnelIn(t, endX, x, y, z, h)
def getTunnelInToonTrack(self, endX, tunnelOrigin):
pivotNode = tunnelOrigin.attachNewNode(self.uniqueName('pivotNode'))
pivotNode.setPos(*self.tunnelPivotPos)
pivotNode.setHpr(0, 0, 0)
pivotY = pivotNode.getY(tunnelOrigin)
endY = 5.0
straightLerpDur = abs(endY - pivotY) / ToontownGlobals.ToonForwardSpeed
pivotDur = 2.0
pivotLerpDur = pivotDur * (90.0 / self.pivotAngle)
self.reparentTo(pivotNode)
self.setPos(0, 0, 0)
self.setX(tunnelOrigin, endX)
targetX = self.getX()
self.setX(self.tunnelCenterOffset + (targetX - self.tunnelCenterOffset) * (1.0 - self.tunnelCenterInfluence))
self.setHpr(tunnelOrigin, 0, 0, 0)
pivotNode.setH(-self.pivotAngle)
return Sequence(Wait(0.8), Parallel(LerpHprInterval(pivotNode, pivotDur, hpr=Point3(0, 0, 0), name=self.uniqueName('tunnelInPivot')), Sequence(Wait(pivotDur - pivotLerpDur), LerpPosInterval(self, pivotLerpDur, pos=Point3(targetX, 0, 0), name=self.uniqueName('tunnelInPivotLerpPos')))), Func(self.wrtReparentTo, render), Func(pivotNode.removeNode), LerpPosInterval(self, straightLerpDur, pos=Point3(endX, endY, 0.1), other=tunnelOrigin, name=self.uniqueName('tunnelInStraightLerp')))
def handleTunnelIn(self, startTime, endX, x, y, z, h):
self.stopSmooth()
tunnelOrigin = render.attachNewNode('tunnelOrigin')
tunnelOrigin.setPosHpr(x, y, z, h, 0, 0)
if self.tunnelTrack:
self.tunnelTrack.finish()
self.tunnelTrack = Sequence(self.getTunnelInToonTrack(endX, tunnelOrigin), Func(tunnelOrigin.removeNode), Func(self.startSmooth))
tOffset = globalClock.getFrameTime() - (startTime + self.smoother.getDelay())
if tOffset < 0.0:
self.tunnelTrack = Sequence(Wait(-tOffset), self.tunnelTrack)
self.tunnelTrack.start()
else:
self.tunnelTrack.start(tOffset)
def b_setTunnelOut(self, startX, startY, tunnelOrigin):
timestamp = globalClockDelta.getFrameNetworkTime()
pos = tunnelOrigin.getPos(render)
h = tunnelOrigin.getH(render)
self.setTunnelOut(timestamp, startX, startY, pos[0], pos[1], pos[2], h)
self.d_setTunnelOut(timestamp, startX, startY, pos[0], pos[1], pos[2], h)
def d_setTunnelOut(self, timestamp, startX, startY, x, y, z, h):
self.sendUpdate('setTunnelOut', [timestamp,
startX,
startY,
x,
y,
z,
h])
def setTunnelOut(self, timestamp, startX, startY, x, y, z, h):
t = globalClockDelta.networkToLocalTime(timestamp)
self.handleTunnelOut(t, startX, startY, x, y, z, h)
def getTunnelOutToonTrack(self, startX, startY, tunnelOrigin):
startPos = self.getPos(tunnelOrigin)
startHpr = self.getHpr(tunnelOrigin)
reducedAvH = PythonUtil.fitDestAngle2Src(startHpr[0], 180)
pivotNode = tunnelOrigin.attachNewNode(self.uniqueName('pivotNode'))
pivotNode.setPos(*self.tunnelPivotPos)
pivotNode.setHpr(0, 0, 0)
pivotY = pivotNode.getY(tunnelOrigin)
straightLerpDur = abs(startY - pivotY) / ToontownGlobals.ToonForwardSpeed
pivotDur = 2.0
pivotLerpDur = pivotDur * (90.0 / self.pivotAngle)
def getTargetPos(self = self):
pos = self.getPos()
return Point3(self.tunnelCenterOffset + (pos[0] - self.tunnelCenterOffset) * (1.0 - self.tunnelCenterInfluence), pos[1], pos[2])
return Sequence(Parallel(LerpPosInterval(self, straightLerpDur, pos=Point3(startX, pivotY, 0.1), startPos=startPos, other=tunnelOrigin, name=self.uniqueName('tunnelOutStraightLerp')), LerpHprInterval(self, straightLerpDur * 0.8, hpr=Point3(reducedAvH, 0, 0), startHpr=startHpr, other=tunnelOrigin, name=self.uniqueName('tunnelOutStraightLerpHpr'))), Func(self.wrtReparentTo, pivotNode), Parallel(LerpHprInterval(pivotNode, pivotDur, hpr=Point3(-self.pivotAngle, 0, 0), name=self.uniqueName('tunnelOutPivot')), LerpPosInterval(self, pivotLerpDur, pos=getTargetPos, name=self.uniqueName('tunnelOutPivotLerpPos'))), Func(self.wrtReparentTo, render), Func(pivotNode.removeNode))
def handleTunnelOut(self, startTime, startX, startY, x, y, z, h):
tunnelOrigin = render.attachNewNode('tunnelOrigin')
tunnelOrigin.setPosHpr(x, y, z, h, 0, 0)
if self.tunnelTrack:
self.tunnelTrack.finish()
self.tunnelTrack = Sequence(Func(self.stopSmooth), self.getTunnelOutToonTrack(startX, startY, tunnelOrigin), Func(self.detachNode), Func(tunnelOrigin.removeNode))
tOffset = globalClock.getFrameTime() - (startTime + self.smoother.getDelay())
if tOffset < 0.0:
self.tunnelTrack = Sequence(Wait(-tOffset), self.tunnelTrack)
self.tunnelTrack.start()
else:
self.tunnelTrack.start(tOffset)
def enterTeleportOut(self, *args, **kw):
Toon.Toon.enterTeleportOut(self, *args, **kw)
if self.track:
self.track.delayDelete = DelayDelete.DelayDelete(self, 'enterTeleportOut')
def exitTeleportOut(self):
if self.track != None:
DelayDelete.cleanupDelayDeletes(self.track)
Toon.Toon.exitTeleportOut(self)
return
def b_setAnimState(self, animName, animMultiplier = 1.0, callback = None, extraArgs = []):
self.d_setAnimState(animName, animMultiplier, None, extraArgs)
self.setAnimState(animName, animMultiplier, None, None, callback, extraArgs)
return
def d_setAnimState(self, animName, animMultiplier = 1.0, timestamp = None, extraArgs = []):
timestamp = globalClockDelta.getFrameNetworkTime()
self.sendUpdate('setAnimState', [animName, animMultiplier, timestamp])
def setAnimState(self, animName, animMultiplier = 1.0, timestamp = None, animType = None, callback = None, extraArgs = []):
if not animName or animName == 'None':
return
if timestamp == None:
ts = 0.0
else:
ts = globalClockDelta.localElapsedTime(timestamp)
if base.config.GetBool('check-invalid-anims', True):
if animMultiplier > 1.0 and animName in ['neutral']:
animMultiplier = 1.0
if self.animFSM.getStateNamed(animName):
self.animFSM.request(animName, [animMultiplier,
ts,
callback,
extraArgs])
self.cleanupPieInHand()
return
def b_setEmoteState(self, animIndex, animMultiplier):
self.setEmoteState(animIndex, animMultiplier)
self.d_setEmoteState(animIndex, animMultiplier)
def d_setEmoteState(self, animIndex, animMultiplier):
timestamp = globalClockDelta.getFrameNetworkTime()
self.sendUpdate('setEmoteState', [animIndex, animMultiplier, timestamp])
def setEmoteState(self, animIndex, animMultiplier, timestamp = None):
if animIndex == TTEmote.EmoteClear:
return
if timestamp == None:
ts = 0.0
else:
ts = globalClockDelta.localElapsedTime(timestamp)
callback = None
extraArgs = []
extraArgs.insert(0, animIndex)
self.doEmote(animIndex, animMultiplier, ts, callback, extraArgs)
return
def setCogStatus(self, cogStatusList):
self.cogs = cogStatusList
def setCogCount(self, cogCountList):
self.cogCounts = cogCountList
if hasattr(self, 'suitPage'):
self.suitPage.updatePage()
def setCogRadar(self, radar):
self.cogRadar = radar
if hasattr(self, 'suitPage'):
self.suitPage.updateCogRadarButtons(radar)
def setBuildingRadar(self, radar):
self.buildingRadar = radar
if hasattr(self, 'suitPage'):
self.suitPage.updateBuildingRadarButtons(radar)
def setCogTypes(self, types):
self.cogTypes = types
if self.disguisePage:
self.disguisePage.updatePage()
def setCogLevels(self, levels):
self.cogLevels = levels
if self.disguisePage:
self.disguisePage.updatePage()
def getCogLevels(self):
return self.cogLevels
def setCogParts(self, parts):
self.cogParts = parts
messenger.send(self.uniqueName('cogMeritsChange'))
if self.disguisePage:
self.disguisePage.updatePage()
def getCogParts(self):
return self.cogParts
def setCogMerits(self, merits):
self.cogMerits = merits
messenger.send(self.uniqueName('cogMeritsChange'))
if self.disguisePage:
self.disguisePage.updatePage()
def getCogMerits(self):
return self.cogMerits
def readyForPromotion(self, dept):
merits = base.localAvatar.cogMerits[dept]
totalMerits = CogDisguiseGlobals.getTotalMerits(self, dept)
if merits >= totalMerits:
return 1
else:
return 0
def setCogIndex(self, index):
self.cogIndex = index
if self.cogIndex == -1:
if self.isDisguised:
self.takeOffSuit()
else:
parts = self.getCogParts()
if CogDisguiseGlobals.isSuitComplete(parts, index):
cogIndex = self.cogTypes[index] + SuitDNA.suitsPerDept * index
cog = SuitDNA.suitHeadTypes[cogIndex]
self.putOnSuit(cog)
else:
self.putOnSuit(index, rental=True)
def isCog(self):
if self.cogIndex == -1:
return 0
else:
return 1
def setDisguisePageFlag(self, flag):
if flag and hasattr(self, 'book'):
self.loadDisguisePages()
self.disguisePageFlag = flag
def setSosPageFlag(self, flag):
if flag and hasattr(self, 'book'):
self.loadSosPages()
self.sosPageFlag = flag
def setFishCollection(self, genusList, speciesList, weightList):
self.fishCollection = FishCollection.FishCollection()
self.fishCollection.makeFromNetLists(genusList, speciesList, weightList)
def getFishCollection(self):
return self.fishCollection
def setMaxFishTank(self, maxTank):
self.maxFishTank = maxTank
def getMaxFishTank(self):
return self.maxFishTank
def setFishTank(self, genusList, speciesList, weightList):
self.fishTank = FishTank.FishTank()
self.fishTank.makeFromNetLists(genusList, speciesList, weightList)
messenger.send(self.uniqueName('fishTankChange'))
def getFishTank(self):
return self.fishTank
def isFishTankFull(self):
return len(self.fishTank) >= self.maxFishTank
def setFishingRod(self, rodId):
self.fishingRod = rodId
if self == base.localAvatar:
messenger.send('refreshFishingRod')
def getFishingRod(self):
return self.fishingRod
def setMaxFishingRod(self, rodId):
self.maxFishingRod = rodId
if self == base.localAvatar:
messenger.send('refreshFishingRod')
def getMaxFishingRod(self):
return self.maxFishingRod
def requestFishingRod(self, rodId):
if not 0 <= rodId <= self.maxFishingRod:
return
self.sendUpdate('requestFishingRod', [rodId])
def setFishingTrophies(self, trophyList):
self.fishingTrophies = trophyList
def getFishingTrophies(self):
return self.fishingTrophies
def setQuests(self, flattenedQuests):
questList = []
questLen = 5
for i in xrange(0, len(flattenedQuests), questLen):
questList.append(flattenedQuests[i:i + questLen])
self.quests = questList
if self == base.localAvatar:
messenger.send('questsChanged')
def setQuestCarryLimit(self, limit):
self.questCarryLimit = limit
if self == base.localAvatar:
messenger.send('questsChanged')
def getQuestCarryLimit(self):
return self.questCarryLimit
def d_requestDeleteQuest(self, questDesc):
self.sendUpdate('requestDeleteQuest', [list(questDesc)])
def setMaxCarry(self, maxCarry):
self.maxCarry = maxCarry
if self.inventory:
self.inventory.updateGUI()
def getMaxCarry(self):
return self.maxCarry
def setCheesyEffect(self, effect, hoodId, expireTime):
self.savedCheesyEffect = effect
self.savedCheesyHoodId = hoodId
self.savedCheesyExpireTime = expireTime
if self == base.localAvatar:
self.notify.debug('setCheesyEffect(%s, %s, %s)' % (effect, hoodId, expireTime))
if effect != ToontownGlobals.CENormal:
serverTime = time.time() + self.cr.getServerDelta()
duration = expireTime * 60 - serverTime
if duration < 0:
self.notify.debug('effect should have expired %s ago.' % PythonUtil.formatElapsedSeconds(-duration))
else:
self.notify.debug('effect will expire in %s.' % PythonUtil.formatElapsedSeconds(duration))
if self.activeState == DistributedObject.ESGenerated:
self.reconsiderCheesyEffect(lerpTime=0.5)
else:
self.reconsiderCheesyEffect()
def reconsiderCheesyEffect(self, lerpTime = 0):
effect = self.savedCheesyEffect
hoodId = self.savedCheesyHoodId
if not self.cr.areCheesyEffectsAllowed():
effect = ToontownGlobals.CENormal
if hoodId != 0:
try:
currentHoodId = base.cr.playGame.hood.id
except:
currentHoodId = None
if hoodId == 1:
if currentHoodId == ToontownGlobals.ToontownCentral:
effect = ToontownGlobals.CENormal
elif currentHoodId != None and currentHoodId != hoodId:
effect = ToontownGlobals.CENormal
if self.ghostMode:
effect = ToontownGlobals.CEGhost
self.applyCheesyEffect(effect, lerpTime=lerpTime)
return
def setGhostMode(self, flag):
if self.ghostMode != flag:
self.ghostMode = flag
if not hasattr(self, 'cr'):
return
if self.activeState <= DistributedObject.ESDisabled:
self.notify.debug('not applying cheesy effect to disabled Toon')
elif self.activeState == DistributedObject.ESGenerating:
self.reconsiderCheesyEffect()
elif self.activeState == DistributedObject.ESGenerated:
self.reconsiderCheesyEffect(lerpTime=0.5)
else:
self.notify.warning('unknown activeState: %s' % self.activeState)
self.showNametag2d()
self.showNametag3d()
if hasattr(self, 'collNode'):
if self.ghostMode:
self.collNode.setCollideMask(ToontownGlobals.GhostBitmask)
else:
self.collNode.setCollideMask(ToontownGlobals.WallBitmask | ToontownGlobals.PieBitmask)
if self.isLocal():
if self.ghostMode:
self.useGhostControls()
else:
self.useWalkControls()
if hasattr(base, 'wantPets') and base.wantPets:
def setPetTrickPhrases(self, petTricks):
self.petTrickPhrases = petTricks
if self.isLocal():
messenger.send('petTrickPhrasesChanged')
def setCustomMessages(self, customMessages):
self.customMessages = customMessages
if self.isLocal():
messenger.send('customMessagesChanged')
def setResistanceMessages(self, resistanceMessages):
self.resistanceMessages = resistanceMessages
if self.isLocal():
messenger.send('resistanceMessagesChanged')
def getResistanceMessageCharges(self, textId):
msgs = self.resistanceMessages
for i in xrange(len(msgs)):
if msgs[i][0] == textId:
return msgs[i][1]
return 0
def setCatalogSchedule(self, currentWeek, nextTime):
self.catalogScheduleCurrentWeek = currentWeek
self.catalogScheduleNextTime = nextTime
if self.isLocal():
self.notify.debug('setCatalogSchedule(%s, %s)' % (currentWeek, nextTime))
if nextTime:
serverTime = time.time() + self.cr.getServerDelta()
duration = nextTime * 60 - serverTime
self.notify.debug('next catalog in %s.' % PythonUtil.formatElapsedSeconds(duration))
def setCatalog(self, monthlyCatalog, weeklyCatalog, backCatalog):
self.monthlyCatalog = CatalogItemList.CatalogItemList(monthlyCatalog)
self.weeklyCatalog = CatalogItemList.CatalogItemList(weeklyCatalog)
self.backCatalog = CatalogItemList.CatalogItemList(backCatalog)
if config.GetBool('want-house-types', False):
from toontown.catalog import CatalogHouseItem
self.backCatalog.extend(CatalogHouseItem.getAllHouses())
if self.catalogNotify == ToontownGlobals.NewItems:
self.catalogNotify = ToontownGlobals.OldItems
def setCatalogNotify(self, catalogNotify, mailboxNotify):
if len(self.weeklyCatalog) + len(self.monthlyCatalog) == 0:
catalogNotify = ToontownGlobals.NoItems
if len(self.mailboxContents) == 0:
mailboxNotify = ToontownGlobals.NoItems
self.catalogNotify = catalogNotify
self.mailboxNotify = mailboxNotify
if self.isLocal():
self.gotCatalogNotify = 1
self.refreshOnscreenButtons()
def setDeliverySchedule(self, onOrder):
self.onOrder = CatalogItemList.CatalogItemList(onOrder, store=CatalogItem.Customization | CatalogItem.DeliveryDate)
if self == base.localAvatar:
nextTime = self.onOrder.getNextDeliveryDate()
if nextTime != None:
serverTime = time.time() + self.cr.getServerDelta()
duration = nextTime * 60 - serverTime
self.notify.debug('next delivery in %s.' % PythonUtil.formatElapsedSeconds(duration))
messenger.send('setDeliverySchedule-%s' % self.doId)
return
def setMailboxContents(self, mailboxContents):
self.mailboxContents = CatalogItemList.CatalogItemList(mailboxContents, store=CatalogItem.Customization)
messenger.send('setMailboxContents-%s' % self.doId)
def setAwardSchedule(self, onOrder):
self.onAwardOrder = CatalogItemList.CatalogItemList(onOrder, store=CatalogItem.Customization | CatalogItem.DeliveryDate)
if self == base.localAvatar:
nextTime = self.onAwardOrder.getNextDeliveryDate()
if nextTime != None:
serverTime = time.time() + self.cr.getServerDelta()
duration = nextTime * 60 - serverTime
self.notify.debug('next delivery in %s.' % PythonUtil.formatElapsedSeconds(duration))
messenger.send('setAwardSchedule-%s' % self.doId)
return
def setAwardMailboxContents(self, awardMailboxContents):
self.notify.debug('Setting awardMailboxContents to %s.' % awardMailboxContents)
self.awardMailboxContents = CatalogItemList.CatalogItemList(awardMailboxContents, store=CatalogItem.Customization)
self.notify.debug('awardMailboxContents is %s.' % self.awardMailboxContents)
messenger.send('setAwardMailboxContents-%s' % self.doId)
def setAwardNotify(self, awardNotify):
self.notify.debug('setAwardNotify( %s )' % awardNotify)
self.awardNotify = awardNotify
if self.isLocal():
self.gotCatalogNotify = 1
self.refreshOnscreenButtons()
def setGiftSchedule(self, onGiftOrder):
self.onGiftOrder = CatalogItemList.CatalogItemList(onGiftOrder, store=CatalogItem.Customization | CatalogItem.DeliveryDate)
if self == base.localAvatar:
nextTime = self.onGiftOrder.getNextDeliveryDate()
if nextTime != None:
serverTime = time.time() + self.cr.getServerDelta()
duration = nextTime * 60 - serverTime
self.notify.debug('next delivery in %s.' % PythonUtil.formatElapsedSeconds(duration))
return
def playSplashEffect(self, x, y, z):
if localAvatar.zoneId not in [ToontownGlobals.DonaldsDock, ToontownGlobals.OutdoorZone] and (not hasattr(localAvatar, 'inEstate') or localAvatar.inEstate != 1):
if random.random() < 0.1:
self.sendLogSuspiciousEvent('AvatarHackWarning! playing hacked splash effect')
return
from toontown.effects import Splash
if self.splash == None:
self.splash = Splash.Splash(render)
self.splash.setPos(x, y, z)
self.splash.setScale(2)
self.splash.play()
place = base.cr.playGame.getPlace()
if place:
if hasattr(place.loader, 'submergeSound'):
base.playSfx(place.loader.submergeSound, node=self)
return
def d_playSplashEffect(self, x, y, z):
self.playSplashEffect(x, y, z)
self.sendUpdate('playSplashEffect', [x, y, z])
def setTrackAccess(self, trackArray):
self.trackArray = trackArray
if self.inventory:
self.inventory.updateGUI()
def getTrackAccess(self):
return self.trackArray
def hasTrackAccess(self, track):
return self.trackArray[track]
def setTrackProgress(self, trackId, progress):
self.trackProgressId = trackId
self.trackProgress = progress
if hasattr(self, 'trackPage'):
self.trackPage.updatePage()
def getTrackProgress(self):
return [self.trackProgressId, self.trackProgress]
def getTrackProgressAsArray(self, maxLength = 15):
shifts = map(operator.rshift, maxLength * [self.trackProgress], range(maxLength - 1, -1, -1))
digits = map(operator.mod, shifts, maxLength * [2])
digits.reverse()
return digits
def setTeleportAccess(self, teleportZoneArray):
self.teleportZoneArray = teleportZoneArray
def getTeleportAccess(self):
return self.teleportZoneArray
def hasTeleportAccess(self, zoneId):
return zoneId in self.teleportZoneArray
def setScavengerHunt(self, scavengerHuntArray):
self.scavengerHuntArray = scavengerHuntArray
def getScavengerHunt(self):
return self.scavengerHuntArray
def setQuestHistory(self, questList):
self.questHistory = questList
def getQuestHistory(self):
return self.questHistory
def setRewardHistory(self, rewardTier, rewardList):
self.rewardTier = rewardTier
self.rewardHistory = rewardList
def getRewardHistory(self):
return (self.rewardTier, self.rewardHistory)
def doSmoothTask(self, task):
self.smoother.computeAndApplySmoothPosHpr(self, self)
self.setSpeed(self.smoother.getSmoothForwardVelocity(), self.smoother.getSmoothRotationalVelocity())
return Task.cont
def d_setParent(self, parentToken):
DistributedSmoothNode.DistributedSmoothNode.d_setParent(self, parentToken)
def setEmoteAccess(self, bits):
if bits[26]:
bits.remove(bits[26])
self.emoteAccess = bits
if self == base.localAvatar:
messenger.send('emotesChanged')
def b_setHouseId(self, id):
self.setHouseId(id)
self.d_setHouseId(id)
def d_setHouseId(self, id):
self.sendUpdate('setHouseId', [id])
def setHouseId(self, id):
self.houseId = id
def getHouseId(self):
return self.houseId
def b_setSpeedChatStyleIndex(self, index):
realIndexToSend = 0
if type(index) == type(0) and 0 <= index and index < len(speedChatStyles):
realIndexToSend = index
self.setSpeedChatStyleIndex(realIndexToSend)
self.d_setSpeedChatStyleIndex(realIndexToSend)
def d_setSpeedChatStyleIndex(self, index):
realIndexToSend = 0
if type(index) == type(0) and 0 <= index and index < len(speedChatStyles):
realIndexToSend = index
self.sendUpdate('setSpeedChatStyleIndex', [realIndexToSend])
def setSpeedChatStyleIndex(self, index):
realIndexToUse = 0
if type(index) == type(0) and 0 <= index and index < len(speedChatStyles):
realIndexToUse = index
else:
base.cr.centralLogger.writeClientEvent('Hacker victim setSpeedChatStyleIndex invalid attacking toon = %d' % self.doId)
self.speedChatStyleIndex = realIndexToUse
nameKey, arrowColor, rolloverColor, frameColor = speedChatStyles[realIndexToUse]
self.nametag.setQtColor(VBase4(frameColor[0], frameColor[1], frameColor[2], 1))
if self.isLocal():
messenger.send('SpeedChatStyleChange', [])
def getSpeedChatStyleIndex(self):
return self.speedChatStyleIndex
def setMaxMoney(self, maxMoney):
self.maxMoney = maxMoney
def getMaxMoney(self):
return self.maxMoney
def setMoney(self, money):
if money != self.money:
self.money = money
messenger.send(self.uniqueName('moneyChange'), [self.money])
def getMoney(self):
return self.money
def setMaxBankMoney(self, maxMoney):
self.maxBankMoney = maxMoney
def getMaxBankMoney(self):
return self.maxBankMoney
def setBankMoney(self, money):
self.bankMoney = money
messenger.send(self.uniqueName('bankMoneyChange'), [self.bankMoney])
def getBankMoney(self):
return self.bankMoney
def getTotalMoney(self):
return self.getBankMoney() + self.getMoney()
def takeMoney(self, money):
self.sendUpdate('takeMoney', [money])
def setEmblems(self, emblems):
if self.emblems != emblems:
self.emblems = emblems
messenger.send(self.uniqueName('emblemsChange'), [self.emblems])
def getEmblems(self):
return self.emblems
def isEnoughEmblemsToBuy(self, itemEmblemPrices):
for emblemIndex, emblemPrice in enumerate(itemEmblemPrices):
if emblemIndex >= len(self.emblems):
return False
if self.emblems[emblemIndex] < emblemPrice:
return False
return True
def isEnoughMoneyAndEmblemsToBuy(self, moneyPrice, itemEmblemPrices):
if self.getTotalMoney() < moneyPrice:
return False
for emblemIndex, emblemPrice in enumerate(itemEmblemPrices):
if emblemIndex >= len(self.emblems):
return False
if self.emblems[emblemIndex] < emblemPrice:
return False
return True
def presentPie(self, x, y, z, h, timestamp32):
if self.numPies <= 0:
return
lastTossTrack = Sequence()
if self.tossTrack:
lastTossTrack = self.tossTrack
tossTrack = None
ts = globalClockDelta.localElapsedTime(timestamp32, bits=32)
ts -= self.smoother.getDelay()
ival = self.getPresentPieInterval(x, y, z, h)
if ts > 0:
startTime = ts
lastTossTrack.finish()
else:
ival = Sequence(Wait(-ts), ival)
lastTossTrack.finish()
startTime = 0
ival = Sequence(ival)
ival.start(startTime)
self.tossTrack = ival
return
def tossPie(self, x, y, z, h, sequence, power, throwType, timestamp32):
if self.numPies <= 0:
return
if self.numPies != ToontownGlobals.FullPies:
self.setNumPies(self.numPies - 1)
self.lastTossedPie = globalClock.getFrameTime()
lastTossTrack = Sequence()
if self.tossTrack:
lastTossTrack = self.tossTrack
tossTrack = None
lastPieTrack = Sequence()
if sequence in self.pieTracks:
lastPieTrack = self.pieTracks[sequence]
del self.pieTracks[sequence]
ts = globalClockDelta.localElapsedTime(timestamp32, bits=32)
ts -= self.smoother.getDelay()
toss, pie, flyPie = self.getTossPieInterval(x, y, z, h, power, throwType)
if ts > 0:
startTime = ts
lastTossTrack.finish()
lastPieTrack.finish()
else:
toss = Sequence(Wait(-ts), toss)
pie = Sequence(Wait(-ts), pie)
lastTossTrack.finish()
lastPieTrack.finish()
startTime = 0
self.tossTrack = toss
toss.start(startTime)
pie = Sequence(pie, Func(self.pieFinishedFlying, sequence))
self.pieTracks[sequence] = pie
pie.start(startTime)
def pieFinishedFlying(self, sequence):
if sequence in self.pieTracks:
del self.pieTracks[sequence]
def pieFinishedSplatting(self, sequence):
if sequence in self.splatTracks:
del self.splatTracks[sequence]
def pieSplat(self, x, y, z, sequence, pieCode, timestamp32):
if self.isLocal():
return
elapsed = globalClock.getFrameTime() - self.lastTossedPie
if elapsed > 30:
return
lastPieTrack = Sequence()
if sequence in self.pieTracks:
lastPieTrack = self.pieTracks[sequence]
del self.pieTracks[sequence]
if sequence in self.splatTracks:
lastSplatTrack = self.splatTracks[sequence]
del self.splatTracks[sequence]
lastSplatTrack.finish()
ts = globalClockDelta.localElapsedTime(timestamp32, bits=32)
ts -= self.smoother.getDelay()
splat = self.getPieSplatInterval(x, y, z, pieCode)
splat = Sequence(Func(messenger.send, 'pieSplat', [self, pieCode]), splat)
if ts > 0:
startTime = ts
lastPieTrack.finish()
else:
splat = Sequence(Wait(-ts), splat)
startTime = 0
splat = Sequence(splat, Func(self.pieFinishedSplatting, sequence))
self.splatTracks[sequence] = splat
splat.start(startTime)
def cleanupPies(self):
for track in self.pieTracks.values():
track.finish()
self.pieTracks = {}
for track in self.splatTracks.values():
track.finish()
self.splatTracks = {}
self.cleanupPieInHand()
def cleanupPieInHand(self):
if self.tossTrack:
self.tossTrack.finish()
self.tossTrack = None
self.cleanupPieModel()
def setNumPies(self, numPies):
self.numPies = numPies
if self.isLocal():
self.updatePieButton()
if numPies == 0:
self.interruptPie()
def setPieType(self, pieType):
self.pieType = pieType
if self.isLocal():
self.updatePieButton()
def setTrophyScore(self, score):
self.trophyScore = score
if self.trophyStar != None:
self.trophyStar.removeNode()
self.trophyStar = None
if self.trophyStarSpeed != 0:
taskMgr.remove(self.uniqueName('starSpin'))
self.trophyStarSpeed = 0
if self.trophyScore >= ToontownGlobals.TrophyStarLevels[4]:
self.trophyStar = loader.loadModel('phase_3.5/models/gui/name_star')
np = NodePath(self.nametag.getNameIcon())
self.trophyStar.reparentTo(np)
self.trophyStar.setScale(2)
self.trophyStar.setColor(ToontownGlobals.TrophyStarColors[4])
self.trophyStarSpeed = 15
if self.trophyScore >= ToontownGlobals.TrophyStarLevels[5]:
taskMgr.add(self.__starSpin, self.uniqueName('starSpin'))
elif self.trophyScore >= ToontownGlobals.TrophyStarLevels[2]:
self.trophyStar = loader.loadModel('phase_3.5/models/gui/name_star')
np = NodePath(self.nametag.getNameIcon())
self.trophyStar.reparentTo(np)
self.trophyStar.setScale(1.5)
self.trophyStar.setColor(ToontownGlobals.TrophyStarColors[2])
self.trophyStarSpeed = 10
if self.trophyScore >= ToontownGlobals.TrophyStarLevels[3]:
taskMgr.add(self.__starSpin, self.uniqueName('starSpin'))
elif self.trophyScore >= ToontownGlobals.TrophyStarLevels[0]:
self.trophyStar = loader.loadModel('phase_3.5/models/gui/name_star')
np = NodePath(self.nametag.getNameIcon())
self.trophyStar.reparentTo(np)
self.trophyStar.setScale(1.5)
self.trophyStar.setColor(ToontownGlobals.TrophyStarColors[0])
self.trophyStarSpeed = 8
if self.trophyScore >= ToontownGlobals.TrophyStarLevels[1]:
taskMgr.add(self.__starSpin, self.uniqueName('starSpin'))
self.setHeadPositions()
def __starSpin(self, task):
now = globalClock.getFrameTime()
r = now * self.trophyStarSpeed % 360.0
self.trophyStar.setR(r)
return Task.cont
def getZoneId(self):
place = base.cr.playGame.getPlace()
if place:
return place.getZoneId()
else:
return None
return None
def getRequestID(self):
return CLIENT_GET_AVATAR_DETAILS
def announceBingo(self):
self.setChatAbsolute(TTLocalizer.FishBingoBingo, CFSpeech | CFTimeout)
def squish(self, damage):
if self == base.localAvatar:
base.cr.playGame.getPlace().fsm.request('squished')
self.stunToon()
self.setZ(self.getZ(render) + 0.025)
def d_squish(self, damage):
self.sendUpdate('squish', [damage])
def b_squish(self, damage):
if not self.isStunned:
self.squish(damage)
self.d_squish(damage)
self.playDialogueForString('!')
def getShadowJoint(self):
return Toon.Toon.getShadowJoint(self)
if base.wantKarts:
def hasKart(self):
return self.kartDNA[KartDNA.bodyType] != -1
def getKartDNA(self):
return self.kartDNA
def setTickets(self, numTickets):
self.tickets = numTickets
def getTickets(self):
return self.tickets
def getAccessoryByType(self, accType):
return self.kartDNA[accType]
def setCurrentKart(self, avId):
self.kartId = avId
def releaseKart(self):
self.kartId = None
return
def setKartBodyType(self, bodyType):
self.kartDNA[KartDNA.bodyType] = bodyType
def getKartBodyType(self):
return self.kartDNA[KartDNA.bodyType]
def setKartBodyColor(self, bodyColor):
self.kartDNA[KartDNA.bodyColor] = bodyColor
def getKartBodyColor(self):
return self.kartDNA[KartDNA.bodyColor]
def setKartAccessoryColor(self, accColor):
self.kartDNA[KartDNA.accColor] = accColor
def getKartAccessoryColor(self):
return self.kartDNA[KartDNA.accColor]
def setKartEngineBlockType(self, ebType):
self.kartDNA[KartDNA.ebType] = ebType
def getKartEngineBlockType(self):
return self.kartDNA[KartDNA.ebType]
def setKartSpoilerType(self, spType):
self.kartDNA[KartDNA.spType] = spType
def getKartSpoilerType(self):
return self.kartDNA[KartDNA.spType]
def setKartFrontWheelWellType(self, fwwType):
self.kartDNA[KartDNA.fwwType] = fwwType
def getKartFrontWheelWellType(self):
return self.kartDNA[KartDNA.fwwType]
def setKartBackWheelWellType(self, bwwType):
self.kartDNA[KartDNA.bwwType] = bwwType
def getKartBackWheelWellType(self):
return self.kartDNA[KartDNA.bwwType]
def setKartRimType(self, rimsType):
self.kartDNA[KartDNA.rimsType] = rimsType
def setKartDecalType(self, decalType):
self.kartDNA[KartDNA.decalType] = decalType
def getKartDecalType(self):
return self.kartDNA[KartDNA.decalType]
def getKartRimType(self):
return self.kartDNA[KartDNA.rimsType]
def setKartAccessoriesOwned(self, accessories):
while len(accessories) < 16:
accessories.append(-1)
self.accessories = accessories
def getKartAccessoriesOwned(self):
owned = copy.deepcopy(self.accessories)
while InvalidEntry in owned:
owned.remove(InvalidEntry)
return owned
def requestKartDNAFieldUpdate(self, dnaField, fieldValue):
self.notify.debug('requestKartDNAFieldUpdate - dnaField %s, fieldValue %s' % (dnaField, fieldValue))
self.sendUpdate('updateKartDNAField', [dnaField, fieldValue])
def requestAddOwnedAccessory(self, accessoryId):
self.notify.debug('requestAddOwnedAccessor - purchased accessory %s' % accessoryId)
self.sendUpdate('addOwnedAccessory', [accessoryId])
def requestRemoveOwnedAccessory(self, accessoryId):
self.notify.debug('requestRemoveOwnedAccessor - removed accessory %s' % accessoryId)
self.sendUpdate('removeOwnedAccessory', [accessoryId])
def setKartingTrophies(self, trophyList):
self.kartingTrophies = trophyList
def getKartingTrophies(self):
return self.kartingTrophies
def setKartingHistory(self, history):
self.kartingHistory = history
def getKartingHistory(self):
return self.kartingHistory
def setKartingPersonalBest(self, bestTimes):
self.kartingPersonalBest = bestTimes
def getKartingPersonalBest(self):
return self.kartingPersonalBest
def setKartingPersonalBest2(self, bestTimes2):
self.kartingPersonalBest2 = bestTimes2
def getKartingPersonalBest2(self):
return self.kartingPersonalBest2
def getKartingPersonalBestAll(self):
return self.kartingPersonalBest + self.kartingPersonalBest2
if hasattr(base, 'wantPets') and base.wantPets:
def setPetId(self, petId):
self.petId = petId
if self.isLocal():
base.cr.addPetToFriendsMap()
def getPetId(self):
return self.petId
def hasPet(self):
return self.petId != 0
def b_setPetTutorialDone(self, done):
self.d_setPetTutorialDone(done)
self.setPetTutorialDone(done)
def d_setPetTutorialDone(self, done):
self.sendUpdate('setPetTutorialDone', [done])
def setPetTutorialDone(self, done):
self.petTutorialDone = done
def b_setFishBingoTutorialDone(self, done):
self.d_setFishBingoTutorialDone(done)
self.setFishBingoTutorialDone(done)
def d_setFishBingoTutorialDone(self, done):
self.sendUpdate('setFishBingoTutorialDone', [done])
def setFishBingoTutorialDone(self, done):
self.fishBingoTutorialDone = done
def b_setFishBingoMarkTutorialDone(self, done):
self.d_setFishBingoMarkTutorialDone(done)
self.setFishBingoMarkTutorialDone(done)
def d_setFishBingoMarkTutorialDone(self, done):
self.sendUpdate('setFishBingoMarkTutorialDone', [done])
def setFishBingoMarkTutorialDone(self, done):
self.fishBingoMarkTutorialDone = done
def b_setPetMovie(self, petId, flag):
self.d_setPetMovie(petId, flag)
self.setPetMovie(petId, flag)
def d_setPetMovie(self, petId, flag):
self.sendUpdate('setPetMovie', [petId, flag])
def setPetMovie(self, petId, flag):
pass
def lookupPetDNA(self):
if self.petId and not self.petDNA:
from toontown.pets import PetDetail
PetDetail.PetDetail(self.petId, self.__petDetailsLoaded)
def __petDetailsLoaded(self, pet):
self.petDNA = pet.style
def trickOrTreatTargetMet(self, beanAmount):
if self.effect:
self.effect.stop()
self.effect = TrickOrTreatTargetEffect(beanAmount)
self.effect.play()
def winterCarolingTargetMet(self, beanAmount):
if self.effect:
self.effect.stop()
self.effect = WinterCarolingEffect(beanAmount)
self.effect.play()
def d_reqCogSummons(self, type, suitIndex):
self.sendUpdate('reqCogSummons', [type, suitIndex])
def cogSummonsResponse(self, returnCode, suitIndex, doId):
messenger.send('cog-summons-response', [returnCode, suitIndex, doId])
def setCogSummonsEarned(self, cogSummonsEarned):
self.cogSummonsEarned = cogSummonsEarned
def getCogSummonsEarned(self):
return self.cogSummonsEarned
def hasCogSummons(self, suitIndex, type = None):
summons = self.getCogSummonsEarned()
curSetting = summons[suitIndex]
if type == 'building':
return curSetting & 1
elif type == 'invasion':
return curSetting & 2
elif type == 'cogdo':
return curSetting & 4
elif type == 'skelinvasion':
return curSetting & 8
elif type == 'waiterinvasion':
return curSetting & 16
elif type == 'v2invasion':
return curSetting & 32
return curSetting
def setFlowerCollection(self, speciesList, varietyList):
self.flowerCollection = FlowerCollection.FlowerCollection()
self.flowerCollection.makeFromNetLists(speciesList, varietyList)
def getFlowerCollection(self):
return self.flowerCollection
def setMaxFlowerBasket(self, maxFlowerBasket):
self.maxFlowerBasket = maxFlowerBasket
def getMaxFlowerBasket(self):
return self.maxFlowerBasket
def isFlowerBasketFull(self):
return len(self.flowerBasket) >= self.maxFlowerBasket
def setFlowerBasket(self, speciesList, varietyList):
self.flowerBasket = FlowerBasket.FlowerBasket()
self.flowerBasket.makeFromNetLists(speciesList, varietyList)
messenger.send('flowerBasketUpdated')
def getFlowerBasket(self):
return self.flowerBasket
def setShovel(self, shovelId):
self.shovel = shovelId
def attachShovel(self):
self.shovelModel = self.getShovelModel()
self.shovelModel.reparentTo(self.rightHand)
return self.shovelModel
def detachShovel(self):
if self.shovelModel:
self.shovelModel.removeNode()
def getShovelModel(self):
shovels = loader.loadModel('phase_5.5/models/estate/shovels')
shovelId = ['A',
'B',
'C',
'D'][self.shovel]
shovel = shovels.find('**/shovel' + shovelId)
shovel.setH(-90)
shovel.setP(216)
shovel.setX(0.2)
shovel.detachNode()
shovels.removeNode()
return shovel
def setShovelSkill(self, skillLevel):
self.shovelSkill = skillLevel
def getBoxCapability(self):
return GardenGlobals.getShovelPower(self.shovel, self.shovelSkill)
def setWateringCan(self, wateringCanId):
self.wateringCan = wateringCanId
def attachWateringCan(self):
self.wateringCanModel = self.getWateringCanModel()
self.wateringCanModel.reparentTo(self.rightHand)
return self.wateringCanModel
def detachWateringCan(self):
if self.wateringCanModel:
self.wateringCanModel.removeNode()
def getWateringCanModel(self):
scalePosHprsTable = ((0.25, 0.1, 0, 0.2, -90, -125, -45),
(0.2, 0.0, 0.25, 0.2, -90, -125, -45),
(0.2, 0.2, 0.1, 0.2, -90, -125, -45),
(0.2, 0.0, 0.25, 0.2, -90, -125, -45))
cans = loader.loadModel('phase_5.5/models/estate/watering_cans')
canId = ['A',
'B',
'C',
'D'][self.wateringCan]
can = cans.find('**/water_can' + canId)
can.setScale(scalePosHprsTable[self.wateringCan][0])
can.setPos(scalePosHprsTable[self.wateringCan][1], scalePosHprsTable[self.wateringCan][2], scalePosHprsTable[self.wateringCan][3])
can.setHpr(scalePosHprsTable[self.wateringCan][4], scalePosHprsTable[self.wateringCan][5], scalePosHprsTable[self.wateringCan][6])
can.detachNode()
cans.removeNode()
if hasattr(base, 'rwc'):
if base.rwc:
if hasattr(self, 'wateringCan2'):
self.wateringCan2.removeNode()
self.wateringCan2 = can.copyTo(self.rightHand)
else:
self.wateringCan2.removeNode()
return can
def setWateringCanSkill(self, skillLevel):
self.wateringCanSkill = skillLevel
def setGardenSpecials(self, specials):
self.gardenSpecials = specials
if hasattr(self, 'gardenPage') and self.gardenPage:
self.gardenPage.updatePage()
def getGardenSpecials(self):
return self.gardenSpecials
def getMyTrees(self):
treeDict = self.cr.getObjectsOfClass(DistributedGagTree.DistributedGagTree)
trees = []
for tree in treeDict.values():
if tree.getOwnerId() == self.doId:
trees.append(tree)
if not trees:
pass
return trees
def isTreePlanted(self, track, level):
trees = self.getMyTrees()
for tree in trees:
if tree.gagTrack == track and tree.gagLevel == level:
return True
return False
def doIHaveRequiredTrees(self, track, level):
trees = self.getMyTrees()
trackAndLevelList = []
for tree in trees:
trackAndLevelList.append((tree.gagTrack, tree.gagLevel))
haveRequired = True
for curLevel in xrange(level):
testTuple = (track, curLevel)
if testTuple not in trackAndLevelList:
haveRequired = False
break
return haveRequired
def setTrackBonusLevel(self, trackArray):
self.trackBonusLevel = trackArray
if self.inventory:
self.inventory.updateGUI()
def getTrackBonusLevel(self, track = None):
if track == None:
return self.trackBonusLevel
else:
return self.trackBonusLevel[track]
return
def checkGagBonus(self, track, level):
trackBonus = self.getTrackBonusLevel(track)
return trackBonus >= level
def setGardenTrophies(self, trophyList):
self.gardenTrophies = trophyList
def getGardenTrophies(self):
return self.gardenTrophies
def useSpecialResponse(self, returnCode):
messenger.send('use-special-response', [returnCode])
def setGardenStarted(self, bStarted):
self.gardenStarted = bStarted
def getGardenStarted(self):
return self.gardenStarted
def sendToGolfCourse(self, zoneId):
hoodId = self.cr.playGame.hood.hoodId
golfRequest = {'loader': 'safeZoneLoader',
'where': 'golfcourse',
'how': 'teleportIn',
'hoodId': hoodId,
'zoneId': zoneId,
'shardId': None,
'avId': -1}
base.cr.playGame.getPlace().requestLeave(golfRequest)
return
def getGolfTrophies(self):
return self.golfTrophies
def getGolfCups(self):
return self.golfCups
def setGolfHistory(self, history):
self.golfHistory = history
self.golfTrophies = GolfGlobals.calcTrophyListFromHistory(self.golfHistory)
self.golfCups = GolfGlobals.calcCupListFromHistory(self.golfHistory)
if hasattr(self, 'book'):
self.addGolfPage()
def getGolfHistory(self):
return self.golfHistory
def hasPlayedGolf(self):
retval = False
for historyValue in self.golfHistory:
if historyValue:
retval = True
break
return retval
def setPackedGolfHoleBest(self, packedHoleBest):
unpacked = GolfGlobals.unpackGolfHoleBest(packedHoleBest)
self.setGolfHoleBest(unpacked)
def setGolfHoleBest(self, holeBest):
self.golfHoleBest = holeBest
def getGolfHoleBest(self):
return self.golfHoleBest
def setGolfCourseBest(self, courseBest):
self.golfCourseBest = courseBest
def getGolfCourseBest(self):
return self.golfCourseBest
def setUnlimitedSwing(self, unlimitedSwing):
self.unlimitedSwing = unlimitedSwing
def getUnlimitedSwing(self):
return self.unlimitedSwing
def getPinkSlips(self):
return self.specialInventory[0]
def getCrateKeys(self):
return self.specialInventory[1]
def setSpecialInventory(self, specialInventory):
self.specialInventory = specialInventory
def getSpecialInventory(self):
return self.specialInventory
def setDisplayName(self, str):
if not self.isDisguised:
self.setFancyNametag(name=str)
else:
self.removeFancyNametag()
Avatar.Avatar.setDisplayName(self, str)
def setFancyNametag(self, name=None):
if name is None:
name = self.getName()
if self.getNametagStyle() >= len(TTLocalizer.NametagFonts):
self.nametag.setFont(ToontownGlobals.getToonFont())
else:
self.nametag.setFont(ToontownGlobals.getNametagFont(self.getNametagStyle()))
Avatar.Avatar.setDisplayName(self, name)
def removeFancyNametag(self):
self.nametag.clearShadow()
def getNametagStyle(self):
if hasattr(self, 'nametagStyle'):
return self.nametagStyle
else:
return 0
def setNametagStyle(self, nametagStyle):
if hasattr(self, 'gmToonLockStyle') and self.gmToonLockStyle:
return
if base.config.GetBool('want-nametag-avids', 0):
nametagStyle = 0
self.nametagStyle = nametagStyle
self.setDisplayName(self.getName())
def getNametagStyles(self):
return self.nametagStyles
def setNametagStyles(self, nametagStyles):
self.nametagStyles = nametagStyles
if self == base.localAvatar:
messenger.send('refreshNametagStyle')
def requestNametagStyle(self, nametagStyle):
if nametagStyle not in self.nametagStyles:
return
self.sendUpdate('requestNametagStyle', [nametagStyle])
def getAvIdName(self):
return '%s\n%s' % (self.getName(), self.doId)
def playCurrentDialogue(self, dialogue, chatFlags, interrupt = 1):
if interrupt and self.__currentDialogue is not None:
self.__currentDialogue.stop()
self.__currentDialogue = dialogue
if dialogue:
base.playSfx(dialogue, node=self)
elif chatFlags & CFSpeech != 0:
if self.nametag.getNumChatPages() > 0:
self.playDialogueForString(self.nametag.getChat())
if self.soundChatBubble != None:
base.playSfx(self.soundChatBubble, node=self)
elif self.nametag.getChatStomp():
self.playDialogueForString(self.nametag.getStompText(), self.nametag.getStompDelay())
def playDialogueForString(self, chatString, delay = 0.0):
if len(chatString) == 0:
return
searchString = chatString.lower()
if searchString.find(OTPLocalizer.DialogSpecial) >= 0:
type = 'special'
elif searchString.find(OTPLocalizer.DialogExclamation) >= 0:
type = 'exclamation'
elif searchString.find(OTPLocalizer.DialogQuestion) >= 0:
type = 'question'
elif random.randint(0, 1):
type = 'statementA'
else:
type = 'statementB'
stringLength = len(chatString)
if stringLength <= OTPLocalizer.DialogLength1:
length = 1
elif stringLength <= OTPLocalizer.DialogLength2:
length = 2
elif stringLength <= OTPLocalizer.DialogLength3:
length = 3
else:
length = 4
self.playDialogue(type, length, delay)
def playDialogue(self, type, length, delay = 0.0):
dialogueArray = self.getDialogueArray()
if dialogueArray == None:
return
sfxIndex = None
if type == 'statementA' or type == 'statementB':
if length == 1:
sfxIndex = 0
elif length == 2:
sfxIndex = 1
elif length >= 3:
sfxIndex = 2
elif type == 'question':
sfxIndex = 3
elif type == 'exclamation':
sfxIndex = 4
elif type == 'special':
sfxIndex = 5
else:
self.notify.error('unrecognized dialogue type: ', type)
if sfxIndex != None and sfxIndex < len(dialogueArray) and dialogueArray[sfxIndex] != None:
soundSequence = Sequence(Wait(delay), SoundInterval(dialogueArray[sfxIndex], node=None, listenerNode=base.localAvatar, loop=0, volume=1.0))
self.soundSequenceList.append(soundSequence)
soundSequence.start()
self.cleanUpSoundList()
return
def cleanUpSoundList(self):
removeList = []
for soundSequence in self.soundSequenceList:
if soundSequence.isStopped():
removeList.append(soundSequence)
for soundSequence in removeList:
self.soundSequenceList.remove(soundSequence)
def setChatAbsolute(self, chatString, chatFlags, dialogue = None, interrupt = 1, quiet = 0):
DistributedAvatar.DistributedAvatar.setChatAbsolute(self, chatString, chatFlags, dialogue, interrupt)
def displayTalk(self, chatString):
flags = CFSpeech | CFTimeout
if ChatUtil.isThought(chatString):
flags = CFThought
chatString = ChatUtil.removeThoughtPrefix(chatString)
self.nametag.setChat(chatString, flags)
if base.toonChatSounds:
self.playCurrentDialogue(None, flags, interrupt=1)
def setMail(self, mail):
DistributedToon.partyNotify.debug('setMail called with %d mail items' % len(mail))
self.mail = []
for i in xrange(len(mail)):
oneMailItem = mail[i]
newMail = SimpleMailBase(*oneMailItem)
self.mail.append(newMail)
def setSimpleMailNotify(self, simpleMailNotify):
DistributedToon.partyNotify.debug('setSimpleMailNotify( %s )' % simpleMailNotify)
self.simpleMailNotify = simpleMailNotify
if self.isLocal():
self.gotCatalogNotify = 1
self.refreshOnscreenButtons()
def setInviteMailNotify(self, inviteMailNotify):
DistributedToon.partyNotify.debug('setInviteMailNotify( %s )' % inviteMailNotify)
self.inviteMailNotify = inviteMailNotify
if self.isLocal():
self.gotCatalogNotify = 1
self.refreshOnscreenButtons()
def setInvites(self, invites):
DistributedToon.partyNotify.debug('setInvites called passing in %d invites.' % len(invites))
self.invites = []
for i in xrange(len(invites)):
oneInvite = invites[i]
newInvite = InviteInfo(*oneInvite)
self.invites.append(newInvite)
def updateInviteMailNotify(self):
invitesInMailbox = self.getInvitesToShowInMailbox()
newInvites = 0
readButNotRepliedInvites = 0
for invite in invitesInMailbox:
if invite.status == PartyGlobals.InviteStatus.NotRead:
newInvites += 1
elif invite.status == PartyGlobals.InviteStatus.ReadButNotReplied:
readButNotRepliedInvites += 1
if newInvites:
self.setInviteMailNotify(ToontownGlobals.NewItems)
elif readButNotRepliedInvites:
self.setInviteMailNotify(ToontownGlobals.OldItems)
else:
self.setInviteMailNotify(ToontownGlobals.NoItems)
def getInvitesToShowInMailbox(self):
result = []
for invite in self.invites:
appendInvite = True
if invite.status == InviteStatus.Accepted or invite.status == InviteStatus.Rejected:
appendInvite = False
if appendInvite:
partyInfo = self.getOnePartyInvitedTo(invite.partyId)
if not partyInfo:
appendInvite = False
if appendInvite:
if partyInfo.status == PartyGlobals.PartyStatus.Cancelled:
appendInvite = False
if appendInvite:
endDate = partyInfo.endTime.date()
curDate = base.cr.toontownTimeManager.getCurServerDateTime().date()
if endDate < curDate:
appendInvite = False
if appendInvite:
result.append(invite)
return result
def getNumInvitesToShowInMailbox(self):
result = len(self.getInvitesToShowInMailbox())
return result
def setHostedParties(self, hostedParties):
DistributedToon.partyNotify.debug('setHostedParties called passing in %d parties.' % len(hostedParties))
self.hostedParties = []
for i in xrange(len(hostedParties)):
hostedInfo = hostedParties[i]
newParty = PartyInfo(*hostedInfo)
self.hostedParties.append(newParty)
def setPartiesInvitedTo(self, partiesInvitedTo):
DistributedToon.partyNotify.debug('setPartiesInvitedTo called passing in %d parties.' % len(partiesInvitedTo))
self.partiesInvitedTo = []
for i in xrange(len(partiesInvitedTo)):
partyInfo = partiesInvitedTo[i]
newParty = PartyInfo(*partyInfo)
self.partiesInvitedTo.append(newParty)
self.updateInviteMailNotify()
def getOnePartyInvitedTo(self, partyId):
result = None
for i in xrange(len(self.partiesInvitedTo)):
partyInfo = self.partiesInvitedTo[i]
if partyInfo.partyId == partyId:
result = partyInfo
break
return result
def getInviteForPartyId(self, partyId):
result = None
for invite in self.invites:
if invite.partyId == partyId:
result = invite
break
return result
def setPartyReplies(self, replies):
DistributedToon.partyNotify.debug('setPartyReplies called passing in %d parties.' % len(replies))
self.partyReplyInfoBases = []
for i in xrange(len(replies)):
partyReply = replies[i]
repliesForOneParty = PartyReplyInfoBase(*partyReply)
self.partyReplyInfoBases.append(repliesForOneParty)
def setPartyCanStart(self, partyId):
DistributedToon.partyNotify.debug('setPartyCanStart called passing in partyId=%s' % partyId)
for partyInfo in self.hostedParties:
if partyInfo.partyId == partyId:
partyInfo.status = PartyGlobals.PartyStatus.CanStart
from toontown.shtiker import EventsPage
if hasattr(self, 'eventsPage') and base.localAvatar.book.entered and base.localAvatar.book.isOnPage(self.eventsPage) and self.eventsPage.getMode() == EventsPage.EventsPage_Host:
base.localAvatar.eventsPage.loadHostedPartyInfo()
self.setSystemMessage(0, TTLocalizer.PartyCanStart, WTSystem)
def setPartyStatus(self, partyId, newStatus):
DistributedToon.partyNotify.debug('setPartyCanStatus called passing in partyId=%s status=%s' % (partyId, newStatus))
found = False
for partyInfo in self.hostedParties:
if partyInfo.partyId == partyId:
partyInfo.status = newStatus
found = True
break
for partyInfo in self.partiesInvitedTo:
if partyInfo.partyId == partyId:
partyInfo.status = newStatus
found = True
from toontown.shtiker import EventsPage
if hasattr(self, 'eventsPage') and base.localAvatar.book.entered and base.localAvatar.book.isOnPage(self.eventsPage) and self.eventsPage.getMode() == EventsPage.EventsPage_Invited:
base.localAvatar.eventsPage.loadInvitations()
if newStatus == PartyStatus.Started and hasattr(self, 'setSystemMessage'):
invite = self.getInviteForPartyId(partyId)
if invite:
name = ' '
host = base.cr.identifyAvatar(partyInfo.hostId)
if host:
name = host.getName()
if invite.status == InviteStatus.Accepted:
displayStr = TTLocalizer.PartyHasStartedAcceptedInvite % TTLocalizer.GetPossesive(name)
else:
displayStr = TTLocalizer.PartyHasStartedNotAcceptedInvite % TTLocalizer.GetPossesive(name)
self.setSystemMessage(partyInfo.hostId, displayStr, WTSystem)
break
if not found:
self.notify.warning("setPartyCanStart can't find partyId=% status=%d" % (partyId, newStatus))
def announcePartyStarted(self, partyId):
DistributedToon.partyNotify.debug('announcePartyStarted')
return
for partyReplyInfo in self.partyReplyInfoBases:
if partyReplyInfo.partyId == partyId:
for singleReply in partyReplyInfo.replies:
toonId = singleReply.inviteeId
if base.cr.isFriend(toonId):
if base.cr.isFriendOnline(toonId):
if singleReply.status == InviteStatus.Accepted:
self.whisperSCTo(5302, toonId)
else:
self.whisperSCTo(5302, toonId)
def updateInvite(self, inviteKey, newStatus):
DistributedToon.partyNotify.debug('updateInvite( inviteKey=%d, newStatus=%s )' % (inviteKey, InviteStatus.getString(newStatus)))
for invite in self.invites:
if invite.inviteKey == inviteKey:
invite.status = newStatus
self.updateInviteMailNotify()
break
def updateReply(self, partyId, inviteeId, newStatus):
DistributedToon.partyNotify.debug('updateReply( partyId=%d, inviteeId=%d, newStatus=%s )' % (partyId, inviteeId, InviteStatus.getString(newStatus)))
for partyReplyInfoBase in self.partyReplyInfoBases:
if partyReplyInfoBase.partyId == partyId:
for reply in partyReplyInfoBase.replies:
if reply.inviteeId == inviteeId:
reply.status = newStatus
break
def toonUp(self, hpGained, hasInteractivePropBonus = False):
if self.hp == None or hpGained < 0:
return
oldHp = self.hp
if self.hp + hpGained <= 0:
self.hp += hpGained
else:
self.hp = min(max(self.hp, 0) + hpGained, self.maxHp)
hpGained = self.hp - max(oldHp, 0)
if hpGained > 0:
self.showHpText(hpGained, hasInteractivePropBonus=hasInteractivePropBonus)
self.hpChange(quietly=0)
return
def showHpText(self, number, bonus = 0, scale = 1, hasInteractivePropBonus = False):
if self.HpTextEnabled and not self.ghostMode:
if number != 0:
if self.hpText:
self.hideHpText()
self.HpTextGenerator.setFont(OTPGlobals.getSignFont())
if number < 0:
self.HpTextGenerator.setText(str(number))
else:
hpGainedStr = '+' + str(number)
if hasInteractivePropBonus:
hpGainedStr += '\n' + TTLocalizer.InteractivePropTrackBonusTerms[0]
self.HpTextGenerator.setText(hpGainedStr)
self.HpTextGenerator.clearShadow()
self.HpTextGenerator.setAlign(TextNode.ACenter)
if bonus == 1:
r = 1.0
g = 1.0
b = 0
a = 1
elif bonus == 2:
r = 1.0
g = 0.5
b = 0
a = 1
elif number < 0:
r = 0.9
g = 0
b = 0
a = 1
else:
r = 0
g = 0.9
b = 0
a = 1
self.HpTextGenerator.setTextColor(r, g, b, a)
self.hpTextNode = self.HpTextGenerator.generate()
self.hpText = self.attachNewNode(self.hpTextNode)
self.hpText.setScale(scale)
self.hpText.setBillboardPointEye()
self.hpText.setBin('fixed', 100)
self.hpText.setPos(0, 0, self.height / 2)
seq = Sequence(self.hpText.posInterval(1.0, Point3(0, 0, self.height + 1.5), blendType='easeOut'), Wait(0.85), self.hpText.colorInterval(0.1, Vec4(r, g, b, 0)), Func(self.hideHpText))
seq.start()
def setAnimalSound(self, index):
self.animalSound = index
def setBuffs(self, buffs):
self.buffs = buffs
self.applyBuffs()
def setRedeemedCodes(self, redeemedCodes):
self.redeemedCodes = redeemedCodes
def b_setIgnored(self, ignored):
self.setIgnored(ignored)
self.d_setIgnored(ignored)
def setIgnored(self, ignored):
self.ignored = ignored
def d_setIgnored(self, ignored):
self.sendUpdate('setIgnored', [ignored])
def isIgnored(self, doId):
return doId in self.ignored
def addIgnore(self, doId):
if not self.isIgnored(doId):
self.ignored.append(doId)
self.d_setIgnored(self.ignored)
def removeIgnore(self, doId):
if self.isIgnored(doId):
self.ignored.remove(doId)
self.d_setIgnored(self.ignored)
def setReported(self, reported):
self.reported = reported
def isReported(self, doId):
return doId in self.reported
def addReport(self, doId):
if not self.isReported(doId):
self.reported.append(doId)
def setFriendsList(self, friendsList):
DistributedPlayer.DistributedPlayer.setFriendsList(self, friendsList)
messenger.send('friendsListChanged')
Toon.reconsiderAllToonsUnderstandable()
def setTrueFriends(self, trueFriends):
self.trueFriends = trueFriends
Toon.reconsiderAllToonsUnderstandable()
messenger.send('friendsListChanged')
def isTrueFriends(self, doId):
return base.cr.wantTrueFriends() and doId in self.trueFriends
def applyBuffs(self):
for id, timestamp in enumerate(self.buffs):
if id == ToontownGlobals.BMovementSpeed:
if not timestamp:
return
if self.zoneId is None:
return
if ZoneUtil.isDynamicZone(self.zoneId):
return
if ZoneUtil.getWhereName(self.zoneId, True) not in ('playground', 'street', 'toonInterior', 'cogHQExterior', 'factoryExterior'):
return
self.controlManager.setSpeeds(
ToontownGlobals.ToonForwardSpeed * ToontownGlobals.BMovementSpeedMultiplier,
ToontownGlobals.ToonJumpForce,
ToontownGlobals.ToonReverseSpeed * ToontownGlobals.BMovementSpeedMultiplier,
ToontownGlobals.ToonRotateSpeed * ToontownGlobals.BMovementSpeedMultiplier)
def setStats(self, stats):
self.stats = stats
if self == base.localAvatar:
messenger.send('refreshStats')
def getStats(self):
return self.stats
def getStat(self, index):
return self.stats[index]
def wipeStats(self):
self.sendUpdate('wipeStats')
@magicWord(category=CATEGORY_COMMUNITY_MANAGER)
def globalTeleport():
"""
Activates the global teleport cheat.
"""
invoker = spellbook.getInvoker()
invoker.sendUpdate('setTeleportOverride', [1])
invoker.setTeleportAccess(list(ToontownGlobals.HoodsForTeleportAll))
return 'Global teleport has been activated.'
@magicWord(category=CATEGORY_ADMINISTRATOR, types=[int])
def zone(zoneId):
"""
Changes the invoker's zone ID.
"""
base.cr.sendSetZoneMsg(zoneId, [zoneId])
return 'You have been moved to zone %d.' % zoneId
@magicWord(category=CATEGORY_ADMINISTRATOR)
def blackCat():
"""
Ask the black cat manager to turn you into a cat.
"""
base.cr.blackCatMgr.requestBlackCatTransformation()
@magicWord(category=CATEGORY_COMMUNITY_MANAGER)
def toggleGM():
invoker = spellbook.getInvoker()
if invoker.gmIcon:
invoker.setWantAdminTag(False)
invoker.removeGMIcon()
invoker.setNametagName()#setName(invoker.getName())
else:
invoker.setWantAdminTag(True)
invoker.setGMIcon(invoker.getAdminAccess())
invoker.setNametagName()#setName(invoker.getName())
@magicWord(category=CATEGORY_COMMUNITY_MANAGER, types=[str])
def showParticle(name):
"""
Shows a particle.
"""
particle = BattleParticles.createParticleEffect(name)
if particle:
particle.start(spellbook.getTarget())
return 'Successfully started particle!'
return 'Particle %s does not exist.' % name
| 37.39606
| 682
| 0.637059
|
794a86ad1b19068fe88b6ace9cb058dee06b1383
| 1,548
|
py
|
Python
|
Course I/Алгоритмы Python/Part1/семинары/pract5/task2.py
|
GeorgiyDemo/FA
|
641a29d088904302f5f2164c9b3e1f1c813849ec
|
[
"WTFPL"
] | 27
|
2019-08-18T20:54:27.000Z
|
2022-02-22T02:39:45.000Z
|
Course I/Алгоритмы Python/Part1/семинары/pract5/task2.py
|
GeorgiyDemo/FA
|
641a29d088904302f5f2164c9b3e1f1c813849ec
|
[
"WTFPL"
] | 217
|
2019-09-22T14:43:25.000Z
|
2022-03-30T13:49:18.000Z
|
Course I/Алгоритмы Python/Part1/семинары/pract5/task2.py
|
GeorgiyDemo/FA
|
641a29d088904302f5f2164c9b3e1f1c813849ec
|
[
"WTFPL"
] | 42
|
2019-09-18T11:36:28.000Z
|
2022-03-19T18:43:00.000Z
|
"""
Реализовать создание, запись, чтение и удаление файла с данными о пользователе.
пользователь выбирает действие самостоятельно, а так же указывает путь к размещению файла.
"""
import os
class FileProcessing:
def __init__(self):
select_d = {
"1": self.file_add,
"2": self.file_remove,
"3": self.file_read,
"4": self.file_write,
}
self.file_name = input("Введите название файла для записи -> ")
input_str = ""
while input_str != "0":
input_str = input(
"1. Добавление файла\n2. Удаление файла\n3. Чтение из файла\n4. Запись файла\n0. Выход\n-> "
)
if input_str in select_d:
select_d[input_str]()
elif input_str != "0":
print("Нет введёного пункта меню")
def file_add(self):
f = open(self.file_name, "w")
f.close()
def file_remove(self):
os.remove(self.file_name)
def file_write(self):
"""
Запись исходного выражения в файл
"""
user_info = input("Введите строку для записи -> ")
with open(self.file_name, "w") as f:
f.write(user_info)
def file_read(self):
"""
Чтение файла self.file_name
"""
try:
with open(self.file_name, "r") as f:
print(f.read())
except FileNotFoundError:
print("Ошибка чтения файла. Файла не существует")
if __name__ == "__main__":
FileProcessing()
| 26.689655
| 108
| 0.552326
|
794a874f39d0e04a0a1e09083feb6bcc7e5b79aa
| 1,075
|
py
|
Python
|
hackerearth/Algorithms/Benny and Segments/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | 4
|
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerearth/Algorithms/Benny and Segments/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
hackerearth/Algorithms/Benny and Segments/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
t = int(input())
for _ in range(t):
n, l = map(int, input().strip().split())
a = []
for _ in range(n):
xl, xr = map(int, input().strip().split())
if xr - xl <= l: # 1 ≤ Xl ≤ Xr ≤ 10^6
a.append((xl, xr))
a.sort()
ln = len(a)
for i in range(ln - 1):
max_right = a[i][0] + l
curr_right = a[i][1]
if curr_right == max_right:
print('Yes')
break
elif curr_right > max_right:
continue
else:
for j in range(i + 1, ln):
if a[j][0] <= curr_right and a[j][1] <= max_right:
curr_right = max(curr_right, a[j][1])
if curr_right == max_right:
print('Yes')
break
else:
print('No')
| 28.289474
| 94
| 0.484651
|
794a87eac30404525fcf9dfdaf9b2d888681dcb2
| 2,574
|
py
|
Python
|
demos/test_GP_active.py
|
tsilifis/quinoa
|
cc01e942e0453ad4ec21da6223731745ec543371
|
[
"MIT"
] | 1
|
2020-01-31T02:42:20.000Z
|
2020-01-31T02:42:20.000Z
|
demos/test_GP_active.py
|
tsilifis/quinoa
|
cc01e942e0453ad4ec21da6223731745ec543371
|
[
"MIT"
] | null | null | null |
demos/test_GP_active.py
|
tsilifis/quinoa
|
cc01e942e0453ad4ec21da6223731745ec543371
|
[
"MIT"
] | null | null | null |
import numpy as np
import quinoa as qu
import matplotlib.pyplot as plt
import GPy as gpy
from scipy import linalg
#np.random.seed(31051985)
X = np.random.normal(scale = 1, size = (2,1))
#X = np.array([0, 1.]).reshape((2,1))
Y = np.sin(X)# + 0.1 * np.random.normal(size = (3,1))
kern = qu.RBF(1, 1, 4)
ker = gpy.kern.RBF(1, 1, 1)
m = gpy.models.GPRegression(X, Y, ker)
gp = qu.GP(X, Y, kern)
x = np.linspace(-4., 4., 100).reshape(100,1)
f, var = gp.predict(x)
#x0 = np.array([np.random.normal( size = (2,))]).reshape((2,1))
#fig = plt.figure(tight_layout = True)
#ax = fig.add_subplot(111)
#ax.plot(x, f, '-')
#ax.fill_between(x[:,0], f - 2*np.sqrt(np.diag(var)), f + 2*np.sqrt(np.diag(var)), alpha = 0.5)
#ax.plot(X[:,0], Y[:,0], 'x')
#ax.set_xlim([-4, 4])
#plt.show()
#m.optimize(messages = True)
#print '-' * 30
#print m.kern.lengthscale[0], m.kern.variance[0], m.likelihood.gaussian_variance()[0]
#print '-' * 30
#m.plot()
#plt.show()
#print gp._kern._iso
gp.optimize()
f, var = gp.predict(x)
fig1 = plt.figure()
ax2 = fig1.add_subplot(111)
ax2.plot(x, f, '-')
ax2.fill_between(x[:,0], f - 2*np.sqrt(np.diag(var)), f + 2*np.sqrt(np.diag(var)), alpha = 0.5)
ax2.plot(X[:,0], Y[:,0], 'x')
plt.show()
#
N_points = 15
sig = np.zeros(N_points)
sig_noise = np.zeros(N_points)
ell = np.zeros(N_points)
sig[0] = gp._kern._var
sig_noise[0] = gp._noise_var
ell[0] = gp._kern._lengthscale[0]
for i in range(N_points-1):
x_new = gp.argmaxvar()
print 'New design :' + str(x_new)
print x_new.shape
y_new = np.sin(x_new)# + 0.1 * np.random.normal(size = (1,1))
X = np.vstack([X, x_new])
Y = np.vstack([Y, y_new])
gp_new = qu.GP(X, Y, kern)
gp_new.optimize()
#gp_new._kern._lengthscale
sig[i+1] = gp_new._kern._var
sig_noise[i+1] = gp_new._noise_var
ell[i+1] = gp_new._kern._lengthscale[0]
gp = gp_new
#print gp._log_marginal_likelihood
#print m._log_marginal_likelihood
x = np.linspace(np.min([x.min(), x_new[0]]), np.max([x.max(), x_new[0]]), 100).reshape(100,1)
#x = np.linspace(np.min([x.min() x_new[0]]), np.max([x.max(), x_new[0]]), 100).reshape(100,1)
f, var = gp_new.predict(x)
fig1 = plt.figure()
ax2 = fig1.add_subplot(111)
ax2.plot(x, f, '-')
ax2.fill_between(x[:,0], f - 2*np.sqrt(np.diag(var)), f + 2*np.sqrt(np.diag(var)), alpha = 0.5)
ax2.plot(X[:,0], Y[:,0], 'x')
plt.show()
np.save('sig.npy', sig)
np.save('sig_noise.npy', sig_noise)
np.save('ell.npy', ell)
np.save('X.npy', X)
np.save('Y.npy', Y)
#print gp.log_marginal_likelihood(np.array([m.kern.lengthscale[0], m.kern.variance[0], m.likelihood.gaussian_variance()[0]]))
| 26
| 125
| 0.637141
|
794a881c2b9fa070a844e9d52d866808566e8671
| 5,056
|
py
|
Python
|
src/pythonfinder/models/windows.py
|
awkimball/pythonfinder
|
d7af08e07cbb1d79f078d8c0b6aab80d320e7b74
|
[
"MIT"
] | 34
|
2018-06-10T19:09:42.000Z
|
2022-02-25T21:34:08.000Z
|
src/pythonfinder/models/windows.py
|
awkimball/pythonfinder
|
d7af08e07cbb1d79f078d8c0b6aab80d320e7b74
|
[
"MIT"
] | 92
|
2018-07-08T00:03:47.000Z
|
2022-02-21T14:25:50.000Z
|
src/pythonfinder/models/windows.py
|
awkimball/pythonfinder
|
d7af08e07cbb1d79f078d8c0b6aab80d320e7b74
|
[
"MIT"
] | 17
|
2018-10-12T08:35:20.000Z
|
2022-03-09T16:51:10.000Z
|
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function
import operator
from collections import defaultdict
import attr
from ..environment import MYPY_RUNNING
from ..exceptions import InvalidPythonVersion
from ..utils import ensure_path
from .mixins import BaseFinder
from .path import PathEntry
from .python import PythonVersion, VersionMap
if MYPY_RUNNING:
from typing import DefaultDict, Tuple, List, Optional, Union, TypeVar, Type, Any
FinderType = TypeVar("FinderType")
@attr.s
class WindowsFinder(BaseFinder):
paths = attr.ib(default=attr.Factory(list), type=list)
version_list = attr.ib(default=attr.Factory(list), type=list)
_versions = attr.ib() # type: DefaultDict[Tuple, PathEntry]
_pythons = attr.ib() # type: DefaultDict[str, PathEntry]
def find_all_python_versions(
self,
major=None, # type: Optional[Union[str, int]]
minor=None, # type: Optional[int]
patch=None, # type: Optional[int]
pre=None, # type: Optional[bool]
dev=None, # type: Optional[bool]
arch=None, # type: Optional[str]
name=None, # type: Optional[str]
):
# type (...) -> List[PathEntry]
version_matcher = operator.methodcaller(
"matches", major, minor, patch, pre, dev, arch, python_name=name
)
pythons = [py for py in self.version_list if version_matcher(py)]
version_sort = operator.attrgetter("version_sort")
return [
c.comes_from for c in sorted(pythons, key=version_sort, reverse=True)
if c.comes_from
]
def find_python_version(
self,
major=None, # type: Optional[Union[str, int]]
minor=None, # type: Optional[int]
patch=None, # type: Optional[int]
pre=None, # type: Optional[bool]
dev=None, # type: Optional[bool]
arch=None, # type: Optional[str]
name=None, # type: Optional[str]
):
# type: (...) -> Optional[PathEntry]
return next(
iter(
v
for v in self.find_all_python_versions(
major=major,
minor=minor,
patch=patch,
pre=pre,
dev=dev,
arch=arch,
name=name,
)
),
None,
)
@_versions.default
def get_versions(self):
# type: () -> DefaultDict[Tuple, PathEntry]
versions = defaultdict(PathEntry) # type: DefaultDict[Tuple, PathEntry]
from pythonfinder._vendor.pep514tools import environment as pep514env
env_versions = pep514env.findall()
path = None
for version_object in env_versions:
install_path = getattr(version_object.info, "install_path", None)
name = getattr(version_object, "tag", None)
company = getattr(version_object, "company", None)
if install_path is None:
continue
try:
path = ensure_path(install_path.__getattr__(""))
except AttributeError:
continue
if not path.exists():
continue
try:
py_version = PythonVersion.from_windows_launcher(
version_object, name=name, company=company
)
except (InvalidPythonVersion, AttributeError):
continue
if py_version is None:
continue
self.version_list.append(py_version)
python_path = (
py_version.comes_from.path
if py_version.comes_from
else py_version.executable
)
python_kwargs = {python_path: py_version} if python_path is not None else {}
base_dir = PathEntry.create(
path, is_root=True, only_python=True, pythons=python_kwargs
)
versions[py_version.version_tuple[:5]] = base_dir
self.paths.append(base_dir)
return versions
@property
def versions(self):
# type: () -> DefaultDict[Tuple, PathEntry]
if not self._versions:
self._versions = self.get_versions()
return self._versions
@_pythons.default
def get_pythons(self):
# type: () -> DefaultDict[str, PathEntry]
pythons = defaultdict() # type: DefaultDict[str, PathEntry]
for version in self.version_list:
_path = ensure_path(version.comes_from.path)
pythons[_path.as_posix()] = version.comes_from
return pythons
@property
def pythons(self):
# type: () -> DefaultDict[str, PathEntry]
return self._pythons
@pythons.setter
def pythons(self, value):
# type: (DefaultDict[str, PathEntry]) -> None
self._pythons = value
@classmethod
def create(cls, *args, **kwargs):
# type: (Type[FinderType], Any, Any) -> FinderType
return cls()
| 33.932886
| 88
| 0.58307
|
794a886cbcac419c244d5345f4c774d6c473dc7c
| 3,838
|
py
|
Python
|
converter.py
|
coloz/IC2BA
|
ee5d55b36db31184d6465328f3a4a6a47199d36e
|
[
"Apache-2.0"
] | 3
|
2021-01-13T07:58:46.000Z
|
2022-01-04T09:30:53.000Z
|
converter.py
|
coloz/IC2BA
|
ee5d55b36db31184d6465328f3a4a6a47199d36e
|
[
"Apache-2.0"
] | null | null | null |
converter.py
|
coloz/IC2BA
|
ee5d55b36db31184d6465328f3a4a6a47199d36e
|
[
"Apache-2.0"
] | 1
|
2021-01-13T07:58:48.000Z
|
2021-01-13T07:58:48.000Z
|
'''
2017.2.5 chenlvzhou
'''
from PIL import Image
import argparse
def load_image(filename, target_width, target_height):
"""
Loads an image, resized it to the target dimensions and returns it's data.
"""
image = Image.open(filename, 'r')
image = image.resize((target_width, target_height), Image.NEAREST)
image_data = image.load()
return image.size[0], image.size[1], image_data
def get_pixel_intensity(pixel, invert=False, max_value=255):
"""
Gets the average intensity of a pixel.
"""
intensity = 0
# Pixel is multi channel
if type(pixel) is list or type(pixel) is tuple:
for channel_intensity in pixel:
intensity += channel_intensity
intensity /= len(pixel)
# Pixel is single channel
elif type(pixel) is int or type(pixel) is float:
intensity = pixel
# Pixel is magic
else:
raise ValueError('Not a clue what format the pixel data is: ' + str(type(pixel)))
if invert:
return max_value - intensity
else:
return intensity
def get_average_pixel_intensity(width, height, pixel_data, invert):
"""
Gets the average intensity over all pixels.
"""
avg_intensity = 0
for x_idx in range(0, width):
for y_idx in range(0, height):
avg_intensity += get_pixel_intensity(pixel_data[x_idx, y_idx], invert)
avg_intensity = avg_intensity / (width * height)
return avg_intensity
def reverse_bit(dat):
res = 0
for i in range(0, 8, 1):
res = res << 1
res = res | (dat & 1)
dat = dat >> 1
return res
def output_image_c_array(width, height, pixel_data, crossover, invert, endian):
"""
Outputs the data in a C bitmap array format.
"""
print '//http://clz.me\nconst unsigned char arduino[] = {'
for y_idx in range(0, height):
next_line = ''
next_value = 0
for x_idx in range(0, width):
if (x_idx % 8 == 0 or x_idx == width - 1) and x_idx > 0:
if (endian == 'big'):
next_value = reverse_bit(next_value)
next_line += str('0x%0.2X' % next_value).lower() + ","
next_value = 0
if get_pixel_intensity(pixel_data[x_idx, y_idx], invert) > crossover:
next_value += 2 ** (7 - (x_idx % 8))
print next_line
print '};'
def convert(params):
"""
Runs an image conversion.
"""
width, height, image_data = load_image(params.image, params.width, params.height)
if params.threshold == 0:
crossover_intensity = get_average_pixel_intensity(width, height, image_data, params.invert)
else:
crossover_intensity = params.threshold
output_image_c_array(width, height, image_data, crossover_intensity, params.invert, params.endian)
def run():
"""
Gets parameters and runs conversion.
"""
parser = argparse.ArgumentParser(description='Convert a bitmap image to a C array for LCD / OLED')
parser.add_argument(
'-i', '--invert',
action='store_true',
help='Invert image intensity')
parser.add_argument(
'-t','--threshold',
default=0,
type=int,
help='BW pixel intensity threshold')
parser.add_argument(
'--width',
default=128,
type=int,
help='Width of the output image')
parser.add_argument(
'--height',
default=64,
type=int,
help='Height of the output image')
parser.add_argument(
'-f', '--image',
type=str,
help='Image file to convert')
parser.add_argument(
'-e', '--endian',
default='big',
type=str,
help='Byte order')
params = parser.parse_args()
convert(params)
if __name__ == '__main__':
run()
| 25.25
| 102
| 0.600573
|
794a8974433bd0a52593c15f034d0639762c450e
| 106,699
|
py
|
Python
|
py-tnt/pytnt/shipmentresponseout.py
|
QwadwoNyamekye/purplship-carriers
|
ce34e3054de246e3d85ddf6928b607193d061ae2
|
[
"MIT"
] | null | null | null |
py-tnt/pytnt/shipmentresponseout.py
|
QwadwoNyamekye/purplship-carriers
|
ce34e3054de246e3d85ddf6928b607193d061ae2
|
[
"MIT"
] | null | null | null |
py-tnt/pytnt/shipmentresponseout.py
|
QwadwoNyamekye/purplship-carriers
|
ce34e3054de246e3d85ddf6928b607193d061ae2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Thu Jun 14 23:10:43 2018 by generateDS.py version 2.29.14.
# Python 3.6.5 (default, May 19 2018, 11:27:13) [GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)]
#
# Command line options:
# ('--no-namespace-defs', '')
# ('-o', '../pytnt/shipmentresponseout.py')
#
# Command line arguments:
# ././shipmentresponseout.xsd
#
# Command line:
# /Users/danielkobina/Documents/Open/bin/generateDS --no-namespace-defs -o "../pytnt/shipmentresponseout.py" ././shipmentresponseout.xsd
#
# Current working directory (os.getcwd()):
# schemas
#
import sys
import re as re_
import base64
import datetime as datetime_
import warnings as warnings_
try:
from lxml import etree as etree_
except ImportError:
from xml.etree import ElementTree as etree_
Validate_simpletypes_ = True
if sys.version_info.major == 2:
BaseStrType_ = basestring
else:
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
def parsexmlstring_(instring, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
element = etree_.fromstring(instring, parser=parser, **kwargs)
return element
#
# Namespace prefix definition table (and other attributes, too)
#
# The module generatedsnamespaces, if it is importable, must contain
# a dictionary named GeneratedsNamespaceDefs. This Python dictionary
# should map element type names (strings) to XML schema namespace prefix
# definitions. The export method for any class for which there is
# a namespace prefix definition, will export that definition in the
# XML representation of that element. See the export method of
# any generated element type class for a example of the use of this
# table.
# A sample table is:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceDefs = {
# "ElementtypeA": "http://www.xxx.com/namespaceA",
# "ElementtypeB": "http://www.xxx.com/namespaceB",
# }
#
try:
from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_
except ImportError:
GenerateDSNamespaceDefs_ = {}
#
# The root super-class for element type classes
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node=None, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node=None, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node=None, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (time_parts[0], micro_seconds, )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns. We should:
# - AND the outer elements
# - OR the inner elements
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
if re_.search(patterns2, target) is not None:
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
return instring.encode(ExternalEncoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
if type(self) != type(other):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'utf-8'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace,
pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(
outfile, level, namespace, name,
pretty_print=pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name,
base64.b64encode(self.value),
self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(
element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0,
optional=0, child_attrs=None, choice=None):
self.name = name
self.data_type = data_type
self.container = container
self.child_attrs = child_attrs
self.choice = choice
self.optional = optional
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs
def get_child_attrs(self): return self.child_attrs
def set_choice(self, choice): self.choice = choice
def get_choice(self): return self.choice
def set_optional(self, optional): self.optional = optional
def get_optional(self): return self.optional
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class BOOK(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, CONSIGNMENT=None):
self.original_tagname_ = None
if CONSIGNMENT is None:
self.CONSIGNMENT = []
else:
self.CONSIGNMENT = CONSIGNMENT
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, BOOK)
if subclass is not None:
return subclass(*args_, **kwargs_)
if BOOK.subclass:
return BOOK.subclass(*args_, **kwargs_)
else:
return BOOK(*args_, **kwargs_)
factory = staticmethod(factory)
def get_CONSIGNMENT(self): return self.CONSIGNMENT
def set_CONSIGNMENT(self, CONSIGNMENT): self.CONSIGNMENT = CONSIGNMENT
def add_CONSIGNMENT(self, value): self.CONSIGNMENT.append(value)
def insert_CONSIGNMENT_at(self, index, value): self.CONSIGNMENT.insert(index, value)
def replace_CONSIGNMENT_at(self, index, value): self.CONSIGNMENT[index] = value
def hasContent_(self):
if (
self.CONSIGNMENT
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='BOOK', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('BOOK')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='BOOK')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='BOOK', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BOOK'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='BOOK', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for CONSIGNMENT_ in self.CONSIGNMENT:
CONSIGNMENT_.export(outfile, level, namespace_, name_='CONSIGNMENT', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'CONSIGNMENT':
obj_ = CONSIGNMENT.factory()
obj_.build(child_)
self.CONSIGNMENT.append(obj_)
obj_.original_tagname_ = 'CONSIGNMENT'
# end class BOOK
class CONNOTE(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ERROR=None, valueOf_=None, mixedclass_=None, content_=None):
self.original_tagname_ = None
if ERROR is None:
self.ERROR = []
else:
self.ERROR = ERROR
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, CONNOTE)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CONNOTE.subclass:
return CONNOTE.subclass(*args_, **kwargs_)
else:
return CONNOTE(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ERROR(self): return self.ERROR
def set_ERROR(self, ERROR): self.ERROR = ERROR
def add_ERROR(self, value): self.ERROR.append(value)
def insert_ERROR_at(self, index, value): self.ERROR.insert(index, value)
def replace_ERROR_at(self, index, value): self.ERROR[index] = value
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.ERROR or
(1 if type(self.valueOf_) in [int,float] else self.valueOf_)
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='CONNOTE', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('CONNOTE')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CONNOTE')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='CONNOTE', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='CONNOTE'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='CONNOTE', fromsubclass_=False, pretty_print=True):
if not fromsubclass_:
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for ERROR_ in self.ERROR:
ERROR_.export(outfile, level, namespace_, name_='ERROR', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ERROR':
obj_ = ERROR.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'ERROR', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_ERROR'):
self.add_ERROR(obj_.value)
elif hasattr(self, 'set_ERROR'):
self.set_ERROR(obj_.value)
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
# end class CONNOTE
class CONSIGNMENT(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, CONREF=None, CONNUMBER=None, SUCCESS=None, FIRSTTIMETRADER=None, BOOKINGREF=None):
self.original_tagname_ = None
self.CONREF = CONREF
self.CONNUMBER = CONNUMBER
self.SUCCESS = SUCCESS
self.FIRSTTIMETRADER = FIRSTTIMETRADER
self.BOOKINGREF = BOOKINGREF
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, CONSIGNMENT)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CONSIGNMENT.subclass:
return CONSIGNMENT.subclass(*args_, **kwargs_)
else:
return CONSIGNMENT(*args_, **kwargs_)
factory = staticmethod(factory)
def get_CONREF(self): return self.CONREF
def set_CONREF(self, CONREF): self.CONREF = CONREF
def get_CONNUMBER(self): return self.CONNUMBER
def set_CONNUMBER(self, CONNUMBER): self.CONNUMBER = CONNUMBER
def get_SUCCESS(self): return self.SUCCESS
def set_SUCCESS(self, SUCCESS): self.SUCCESS = SUCCESS
def get_FIRSTTIMETRADER(self): return self.FIRSTTIMETRADER
def set_FIRSTTIMETRADER(self, FIRSTTIMETRADER): self.FIRSTTIMETRADER = FIRSTTIMETRADER
def get_BOOKINGREF(self): return self.BOOKINGREF
def set_BOOKINGREF(self, BOOKINGREF): self.BOOKINGREF = BOOKINGREF
def hasContent_(self):
if (
self.CONREF is not None or
self.CONNUMBER is not None or
self.SUCCESS is not None or
self.FIRSTTIMETRADER is not None or
self.BOOKINGREF is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='CONSIGNMENT', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('CONSIGNMENT')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CONSIGNMENT')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='CONSIGNMENT', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='CONSIGNMENT'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='CONSIGNMENT', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.CONREF is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<CONREF>%s</CONREF>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.CONREF), input_name='CONREF')), eol_))
if self.CONNUMBER is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<CONNUMBER>%s</CONNUMBER>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.CONNUMBER), input_name='CONNUMBER')), eol_))
if self.SUCCESS is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<SUCCESS>%s</SUCCESS>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.SUCCESS), input_name='SUCCESS')), eol_))
if self.FIRSTTIMETRADER is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<FIRSTTIMETRADER>%s</FIRSTTIMETRADER>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.FIRSTTIMETRADER), input_name='FIRSTTIMETRADER')), eol_))
if self.BOOKINGREF is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<BOOKINGREF>%s</BOOKINGREF>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.BOOKINGREF), input_name='BOOKINGREF')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'CONREF':
CONREF_ = child_.text
CONREF_ = self.gds_validate_string(CONREF_, node, 'CONREF')
self.CONREF = CONREF_
elif nodeName_ == 'CONNUMBER':
CONNUMBER_ = child_.text
CONNUMBER_ = self.gds_validate_string(CONNUMBER_, node, 'CONNUMBER')
self.CONNUMBER = CONNUMBER_
elif nodeName_ == 'SUCCESS':
SUCCESS_ = child_.text
SUCCESS_ = self.gds_validate_string(SUCCESS_, node, 'SUCCESS')
self.SUCCESS = SUCCESS_
elif nodeName_ == 'FIRSTTIMETRADER':
FIRSTTIMETRADER_ = child_.text
FIRSTTIMETRADER_ = self.gds_validate_string(FIRSTTIMETRADER_, node, 'FIRSTTIMETRADER')
self.FIRSTTIMETRADER = FIRSTTIMETRADER_
elif nodeName_ == 'BOOKINGREF':
BOOKINGREF_ = child_.text
BOOKINGREF_ = self.gds_validate_string(BOOKINGREF_, node, 'BOOKINGREF')
self.BOOKINGREF = BOOKINGREF_
# end class CONSIGNMENT
class CREATE(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, CONREF=None, CONNUMBER=None, SUCCESS=None):
self.original_tagname_ = None
self.CONREF = CONREF
self.CONNUMBER = CONNUMBER
self.SUCCESS = SUCCESS
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, CREATE)
if subclass is not None:
return subclass(*args_, **kwargs_)
if CREATE.subclass:
return CREATE.subclass(*args_, **kwargs_)
else:
return CREATE(*args_, **kwargs_)
factory = staticmethod(factory)
def get_CONREF(self): return self.CONREF
def set_CONREF(self, CONREF): self.CONREF = CONREF
def get_CONNUMBER(self): return self.CONNUMBER
def set_CONNUMBER(self, CONNUMBER): self.CONNUMBER = CONNUMBER
def get_SUCCESS(self): return self.SUCCESS
def set_SUCCESS(self, SUCCESS): self.SUCCESS = SUCCESS
def hasContent_(self):
if (
self.CONREF is not None or
self.CONNUMBER is not None or
self.SUCCESS is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='CREATE', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('CREATE')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CREATE')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='CREATE', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='CREATE'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='CREATE', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.CONREF is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<CONREF>%s</CONREF>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.CONREF), input_name='CONREF')), eol_))
if self.CONNUMBER is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<CONNUMBER>%s</CONNUMBER>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.CONNUMBER), input_name='CONNUMBER')), eol_))
if self.SUCCESS is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<SUCCESS>%s</SUCCESS>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.SUCCESS), input_name='SUCCESS')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'CONREF':
CONREF_ = child_.text
CONREF_ = self.gds_validate_string(CONREF_, node, 'CONREF')
self.CONREF = CONREF_
elif nodeName_ == 'CONNUMBER':
CONNUMBER_ = child_.text
CONNUMBER_ = self.gds_validate_string(CONNUMBER_, node, 'CONNUMBER')
self.CONNUMBER = CONNUMBER_
elif nodeName_ == 'SUCCESS':
SUCCESS_ = child_.text
SUCCESS_ = self.gds_validate_string(SUCCESS_, node, 'SUCCESS')
self.SUCCESS = SUCCESS_
# end class CREATE
class document(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, GROUPCODE=None, CREATE=None, RATE=None, BOOK=None, SHIP=None, PRINT=None, ERROR=None):
self.original_tagname_ = None
self.GROUPCODE = GROUPCODE
if CREATE is None:
self.CREATE = []
else:
self.CREATE = CREATE
self.RATE = RATE
self.BOOK = BOOK
self.SHIP = SHIP
self.PRINT = PRINT
if ERROR is None:
self.ERROR = []
else:
self.ERROR = ERROR
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, document)
if subclass is not None:
return subclass(*args_, **kwargs_)
if document.subclass:
return document.subclass(*args_, **kwargs_)
else:
return document(*args_, **kwargs_)
factory = staticmethod(factory)
def get_GROUPCODE(self): return self.GROUPCODE
def set_GROUPCODE(self, GROUPCODE): self.GROUPCODE = GROUPCODE
def get_CREATE(self): return self.CREATE
def set_CREATE(self, CREATE): self.CREATE = CREATE
def add_CREATE(self, value): self.CREATE.append(value)
def insert_CREATE_at(self, index, value): self.CREATE.insert(index, value)
def replace_CREATE_at(self, index, value): self.CREATE[index] = value
def get_RATE(self): return self.RATE
def set_RATE(self, RATE): self.RATE = RATE
def get_BOOK(self): return self.BOOK
def set_BOOK(self, BOOK): self.BOOK = BOOK
def get_SHIP(self): return self.SHIP
def set_SHIP(self, SHIP): self.SHIP = SHIP
def get_PRINT(self): return self.PRINT
def set_PRINT(self, PRINT): self.PRINT = PRINT
def get_ERROR(self): return self.ERROR
def set_ERROR(self, ERROR): self.ERROR = ERROR
def add_ERROR(self, value): self.ERROR.append(value)
def insert_ERROR_at(self, index, value): self.ERROR.insert(index, value)
def replace_ERROR_at(self, index, value): self.ERROR[index] = value
def hasContent_(self):
if (
self.GROUPCODE is not None or
self.CREATE or
self.RATE is not None or
self.BOOK is not None or
self.SHIP is not None or
self.PRINT is not None or
self.ERROR
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='document', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('document')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='document')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='document', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='document'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='document', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.GROUPCODE is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<GROUPCODE>%s</GROUPCODE>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.GROUPCODE), input_name='GROUPCODE')), eol_))
for CREATE_ in self.CREATE:
CREATE_.export(outfile, level, namespace_, name_='CREATE', pretty_print=pretty_print)
if self.RATE is not None:
self.RATE.export(outfile, level, namespace_, name_='RATE', pretty_print=pretty_print)
if self.BOOK is not None:
self.BOOK.export(outfile, level, namespace_, name_='BOOK', pretty_print=pretty_print)
if self.SHIP is not None:
self.SHIP.export(outfile, level, namespace_, name_='SHIP', pretty_print=pretty_print)
if self.PRINT is not None:
self.PRINT.export(outfile, level, namespace_, name_='PRINT', pretty_print=pretty_print)
for ERROR_ in self.ERROR:
ERROR_.export(outfile, level, namespace_, name_='ERROR', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'GROUPCODE':
GROUPCODE_ = child_.text
GROUPCODE_ = self.gds_validate_string(GROUPCODE_, node, 'GROUPCODE')
self.GROUPCODE = GROUPCODE_
elif nodeName_ == 'CREATE':
obj_ = CREATE.factory()
obj_.build(child_)
self.CREATE.append(obj_)
obj_.original_tagname_ = 'CREATE'
elif nodeName_ == 'RATE':
obj_ = RATE.factory()
obj_.build(child_)
self.RATE = obj_
obj_.original_tagname_ = 'RATE'
elif nodeName_ == 'BOOK':
obj_ = BOOK.factory()
obj_.build(child_)
self.BOOK = obj_
obj_.original_tagname_ = 'BOOK'
elif nodeName_ == 'SHIP':
obj_ = SHIP.factory()
obj_.build(child_)
self.SHIP = obj_
obj_.original_tagname_ = 'SHIP'
elif nodeName_ == 'PRINT':
obj_ = PRINT.factory()
obj_.build(child_)
self.PRINT = obj_
obj_.original_tagname_ = 'PRINT'
elif nodeName_ == 'ERROR':
obj_ = ERROR.factory()
obj_.build(child_)
self.ERROR.append(obj_)
obj_.original_tagname_ = 'ERROR'
# end class document
class ERROR(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, CODE=None, DESCRIPTION=None, SOURCE=None):
self.original_tagname_ = None
self.CODE = CODE
self.DESCRIPTION = DESCRIPTION
self.SOURCE = SOURCE
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, ERROR)
if subclass is not None:
return subclass(*args_, **kwargs_)
if ERROR.subclass:
return ERROR.subclass(*args_, **kwargs_)
else:
return ERROR(*args_, **kwargs_)
factory = staticmethod(factory)
def get_CODE(self): return self.CODE
def set_CODE(self, CODE): self.CODE = CODE
def get_DESCRIPTION(self): return self.DESCRIPTION
def set_DESCRIPTION(self, DESCRIPTION): self.DESCRIPTION = DESCRIPTION
def get_SOURCE(self): return self.SOURCE
def set_SOURCE(self, SOURCE): self.SOURCE = SOURCE
def hasContent_(self):
if (
self.CODE is not None or
self.DESCRIPTION is not None or
self.SOURCE is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='ERROR', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ERROR')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ERROR')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='ERROR', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='ERROR'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='ERROR', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.CODE is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<CODE>%s</CODE>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.CODE), input_name='CODE')), eol_))
if self.DESCRIPTION is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<DESCRIPTION>%s</DESCRIPTION>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.DESCRIPTION), input_name='DESCRIPTION')), eol_))
if self.SOURCE is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<SOURCE>%s</SOURCE>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.SOURCE), input_name='SOURCE')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'CODE':
CODE_ = child_.text
CODE_ = self.gds_validate_string(CODE_, node, 'CODE')
self.CODE = CODE_
elif nodeName_ == 'DESCRIPTION':
DESCRIPTION_ = child_.text
DESCRIPTION_ = self.gds_validate_string(DESCRIPTION_, node, 'DESCRIPTION')
self.DESCRIPTION = DESCRIPTION_
elif nodeName_ == 'SOURCE':
SOURCE_ = child_.text
SOURCE_ = self.gds_validate_string(SOURCE_, node, 'SOURCE')
self.SOURCE = SOURCE_
# end class ERROR
class INVOICE(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ERROR=None, valueOf_=None, mixedclass_=None, content_=None):
self.original_tagname_ = None
if ERROR is None:
self.ERROR = []
else:
self.ERROR = ERROR
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, INVOICE)
if subclass is not None:
return subclass(*args_, **kwargs_)
if INVOICE.subclass:
return INVOICE.subclass(*args_, **kwargs_)
else:
return INVOICE(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ERROR(self): return self.ERROR
def set_ERROR(self, ERROR): self.ERROR = ERROR
def add_ERROR(self, value): self.ERROR.append(value)
def insert_ERROR_at(self, index, value): self.ERROR.insert(index, value)
def replace_ERROR_at(self, index, value): self.ERROR[index] = value
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.ERROR or
(1 if type(self.valueOf_) in [int,float] else self.valueOf_)
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='INVOICE', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('INVOICE')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='INVOICE')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='INVOICE', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='INVOICE'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='INVOICE', fromsubclass_=False, pretty_print=True):
if not fromsubclass_:
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for ERROR_ in self.ERROR:
ERROR_.export(outfile, level, namespace_, name_='ERROR', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ERROR':
obj_ = ERROR.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'ERROR', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_ERROR'):
self.add_ERROR(obj_.value)
elif hasattr(self, 'set_ERROR'):
self.set_ERROR(obj_.value)
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
# end class INVOICE
class LABEL(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ERROR=None, valueOf_=None, mixedclass_=None, content_=None):
self.original_tagname_ = None
if ERROR is None:
self.ERROR = []
else:
self.ERROR = ERROR
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, LABEL)
if subclass is not None:
return subclass(*args_, **kwargs_)
if LABEL.subclass:
return LABEL.subclass(*args_, **kwargs_)
else:
return LABEL(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ERROR(self): return self.ERROR
def set_ERROR(self, ERROR): self.ERROR = ERROR
def add_ERROR(self, value): self.ERROR.append(value)
def insert_ERROR_at(self, index, value): self.ERROR.insert(index, value)
def replace_ERROR_at(self, index, value): self.ERROR[index] = value
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.ERROR or
(1 if type(self.valueOf_) in [int,float] else self.valueOf_)
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='LABEL', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('LABEL')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='LABEL')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='LABEL', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='LABEL'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='LABEL', fromsubclass_=False, pretty_print=True):
if not fromsubclass_:
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for ERROR_ in self.ERROR:
ERROR_.export(outfile, level, namespace_, name_='ERROR', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ERROR':
obj_ = ERROR.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'ERROR', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_ERROR'):
self.add_ERROR(obj_.value)
elif hasattr(self, 'set_ERROR'):
self.set_ERROR(obj_.value)
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
# end class LABEL
class MANIFEST(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ERROR=None, valueOf_=None, mixedclass_=None, content_=None):
self.original_tagname_ = None
if ERROR is None:
self.ERROR = []
else:
self.ERROR = ERROR
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, MANIFEST)
if subclass is not None:
return subclass(*args_, **kwargs_)
if MANIFEST.subclass:
return MANIFEST.subclass(*args_, **kwargs_)
else:
return MANIFEST(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ERROR(self): return self.ERROR
def set_ERROR(self, ERROR): self.ERROR = ERROR
def add_ERROR(self, value): self.ERROR.append(value)
def insert_ERROR_at(self, index, value): self.ERROR.insert(index, value)
def replace_ERROR_at(self, index, value): self.ERROR[index] = value
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.ERROR or
(1 if type(self.valueOf_) in [int,float] else self.valueOf_)
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='MANIFEST', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('MANIFEST')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='MANIFEST')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='MANIFEST', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='MANIFEST'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='MANIFEST', fromsubclass_=False, pretty_print=True):
if not fromsubclass_:
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for ERROR_ in self.ERROR:
ERROR_.export(outfile, level, namespace_, name_='ERROR', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ERROR':
obj_ = ERROR.factory()
obj_.build(child_)
obj_ = self.mixedclass_(MixedContainer.CategoryComplex,
MixedContainer.TypeNone, 'ERROR', obj_)
self.content_.append(obj_)
if hasattr(self, 'add_ERROR'):
self.add_ERROR(obj_.value)
elif hasattr(self, 'set_ERROR'):
self.set_ERROR(obj_.value)
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
# end class MANIFEST
class parse_error(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, error_reason=None, error_line=None, error_linepos=None, error_srcText=None):
self.original_tagname_ = None
self.error_reason = error_reason
self.error_line = error_line
self.error_linepos = error_linepos
self.error_srcText = error_srcText
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, parse_error)
if subclass is not None:
return subclass(*args_, **kwargs_)
if parse_error.subclass:
return parse_error.subclass(*args_, **kwargs_)
else:
return parse_error(*args_, **kwargs_)
factory = staticmethod(factory)
def get_error_reason(self): return self.error_reason
def set_error_reason(self, error_reason): self.error_reason = error_reason
def get_error_line(self): return self.error_line
def set_error_line(self, error_line): self.error_line = error_line
def get_error_linepos(self): return self.error_linepos
def set_error_linepos(self, error_linepos): self.error_linepos = error_linepos
def get_error_srcText(self): return self.error_srcText
def set_error_srcText(self, error_srcText): self.error_srcText = error_srcText
def hasContent_(self):
if (
self.error_reason is not None or
self.error_line is not None or
self.error_linepos is not None or
self.error_srcText is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='parse_error', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('parse_error')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='parse_error')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='parse_error', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='parse_error'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='parse_error', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.error_reason is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<error_reason>%s</error_reason>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.error_reason), input_name='error_reason')), eol_))
if self.error_line is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<error_line>%s</error_line>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.error_line), input_name='error_line')), eol_))
if self.error_linepos is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<error_linepos>%s</error_linepos>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.error_linepos), input_name='error_linepos')), eol_))
if self.error_srcText is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<error_srcText>%s</error_srcText>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.error_srcText), input_name='error_srcText')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'error_reason':
error_reason_ = child_.text
error_reason_ = self.gds_validate_string(error_reason_, node, 'error_reason')
self.error_reason = error_reason_
elif nodeName_ == 'error_line':
error_line_ = child_.text
error_line_ = self.gds_validate_string(error_line_, node, 'error_line')
self.error_line = error_line_
elif nodeName_ == 'error_linepos':
error_linepos_ = child_.text
error_linepos_ = self.gds_validate_string(error_linepos_, node, 'error_linepos')
self.error_linepos = error_linepos_
elif nodeName_ == 'error_srcText':
error_srcText_ = child_.text
error_srcText_ = self.gds_validate_string(error_srcText_, node, 'error_srcText')
self.error_srcText = error_srcText_
# end class parse_error
class PRICE(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, RATEID=None, SERVICE=None, SERVICEDESC=None, OPTION=None, OPTIONDESC=None, CURRENCY=None, RATE=None, RESULT=None):
self.original_tagname_ = None
self.RATEID = RATEID
self.SERVICE = SERVICE
self.SERVICEDESC = SERVICEDESC
self.OPTION = OPTION
self.OPTIONDESC = OPTIONDESC
self.CURRENCY = CURRENCY
self.RATE = RATE
self.RESULT = RESULT
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PRICE)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PRICE.subclass:
return PRICE.subclass(*args_, **kwargs_)
else:
return PRICE(*args_, **kwargs_)
factory = staticmethod(factory)
def get_RATEID(self): return self.RATEID
def set_RATEID(self, RATEID): self.RATEID = RATEID
def get_SERVICE(self): return self.SERVICE
def set_SERVICE(self, SERVICE): self.SERVICE = SERVICE
def get_SERVICEDESC(self): return self.SERVICEDESC
def set_SERVICEDESC(self, SERVICEDESC): self.SERVICEDESC = SERVICEDESC
def get_OPTION(self): return self.OPTION
def set_OPTION(self, OPTION): self.OPTION = OPTION
def get_OPTIONDESC(self): return self.OPTIONDESC
def set_OPTIONDESC(self, OPTIONDESC): self.OPTIONDESC = OPTIONDESC
def get_CURRENCY(self): return self.CURRENCY
def set_CURRENCY(self, CURRENCY): self.CURRENCY = CURRENCY
def get_RATE(self): return self.RATE
def set_RATE(self, RATE): self.RATE = RATE
def get_RESULT(self): return self.RESULT
def set_RESULT(self, RESULT): self.RESULT = RESULT
def hasContent_(self):
if (
self.RATEID is not None or
self.SERVICE is not None or
self.SERVICEDESC is not None or
self.OPTION is not None or
self.OPTIONDESC is not None or
self.CURRENCY is not None or
self.RATE is not None or
self.RESULT is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='PRICE', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PRICE')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PRICE')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='PRICE', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PRICE'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='PRICE', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.RATEID is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<RATEID>%s</RATEID>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.RATEID), input_name='RATEID')), eol_))
if self.SERVICE is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<SERVICE>%s</SERVICE>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.SERVICE), input_name='SERVICE')), eol_))
if self.SERVICEDESC is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<SERVICEDESC>%s</SERVICEDESC>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.SERVICEDESC), input_name='SERVICEDESC')), eol_))
if self.OPTION is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<OPTION>%s</OPTION>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.OPTION), input_name='OPTION')), eol_))
if self.OPTIONDESC is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<OPTIONDESC>%s</OPTIONDESC>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.OPTIONDESC), input_name='OPTIONDESC')), eol_))
if self.CURRENCY is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<CURRENCY>%s</CURRENCY>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.CURRENCY), input_name='CURRENCY')), eol_))
if self.RATE is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<RATE>%s</RATE>%s' % (self.gds_format_float(self.RATE, input_name='RATE'), eol_))
if self.RESULT is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<RESULT>%s</RESULT>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.RESULT), input_name='RESULT')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'RATEID':
RATEID_ = child_.text
RATEID_ = self.gds_validate_string(RATEID_, node, 'RATEID')
self.RATEID = RATEID_
elif nodeName_ == 'SERVICE':
SERVICE_ = child_.text
SERVICE_ = self.gds_validate_string(SERVICE_, node, 'SERVICE')
self.SERVICE = SERVICE_
elif nodeName_ == 'SERVICEDESC':
SERVICEDESC_ = child_.text
SERVICEDESC_ = self.gds_validate_string(SERVICEDESC_, node, 'SERVICEDESC')
self.SERVICEDESC = SERVICEDESC_
elif nodeName_ == 'OPTION':
OPTION_ = child_.text
OPTION_ = self.gds_validate_string(OPTION_, node, 'OPTION')
self.OPTION = OPTION_
elif nodeName_ == 'OPTIONDESC':
OPTIONDESC_ = child_.text
OPTIONDESC_ = self.gds_validate_string(OPTIONDESC_, node, 'OPTIONDESC')
self.OPTIONDESC = OPTIONDESC_
elif nodeName_ == 'CURRENCY':
CURRENCY_ = child_.text
CURRENCY_ = self.gds_validate_string(CURRENCY_, node, 'CURRENCY')
self.CURRENCY = CURRENCY_
elif nodeName_ == 'RATE':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'RATE')
self.RATE = fval_
elif nodeName_ == 'RESULT':
RESULT_ = child_.text
RESULT_ = self.gds_validate_string(RESULT_, node, 'RESULT')
self.RESULT = RESULT_
# end class PRICE
class PRINT(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ERROR=None, CONNOTE=None, LABEL=None, MANIFEST=None, INVOICE=None, EMAILTO=None, EMAILFROM=None):
self.original_tagname_ = None
if ERROR is None:
self.ERROR = []
else:
self.ERROR = ERROR
self.CONNOTE = CONNOTE
self.LABEL = LABEL
self.MANIFEST = MANIFEST
self.INVOICE = INVOICE
self.EMAILTO = EMAILTO
self.EMAILFROM = EMAILFROM
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PRINT)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PRINT.subclass:
return PRINT.subclass(*args_, **kwargs_)
else:
return PRINT(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ERROR(self): return self.ERROR
def set_ERROR(self, ERROR): self.ERROR = ERROR
def add_ERROR(self, value): self.ERROR.append(value)
def insert_ERROR_at(self, index, value): self.ERROR.insert(index, value)
def replace_ERROR_at(self, index, value): self.ERROR[index] = value
def get_CONNOTE(self): return self.CONNOTE
def set_CONNOTE(self, CONNOTE): self.CONNOTE = CONNOTE
def get_LABEL(self): return self.LABEL
def set_LABEL(self, LABEL): self.LABEL = LABEL
def get_MANIFEST(self): return self.MANIFEST
def set_MANIFEST(self, MANIFEST): self.MANIFEST = MANIFEST
def get_INVOICE(self): return self.INVOICE
def set_INVOICE(self, INVOICE): self.INVOICE = INVOICE
def get_EMAILTO(self): return self.EMAILTO
def set_EMAILTO(self, EMAILTO): self.EMAILTO = EMAILTO
def get_EMAILFROM(self): return self.EMAILFROM
def set_EMAILFROM(self, EMAILFROM): self.EMAILFROM = EMAILFROM
def hasContent_(self):
if (
self.ERROR or
self.CONNOTE is not None or
self.LABEL is not None or
self.MANIFEST is not None or
self.INVOICE is not None or
self.EMAILTO is not None or
self.EMAILFROM is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='PRINT', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PRINT')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PRINT')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='PRINT', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PRINT'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='PRINT', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for ERROR_ in self.ERROR:
ERROR_.export(outfile, level, namespace_, name_='ERROR', pretty_print=pretty_print)
if self.CONNOTE is not None:
self.CONNOTE.export(outfile, level, namespace_, name_='CONNOTE', pretty_print=pretty_print)
if self.LABEL is not None:
self.LABEL.export(outfile, level, namespace_, name_='LABEL', pretty_print=pretty_print)
if self.MANIFEST is not None:
self.MANIFEST.export(outfile, level, namespace_, name_='MANIFEST', pretty_print=pretty_print)
if self.INVOICE is not None:
self.INVOICE.export(outfile, level, namespace_, name_='INVOICE', pretty_print=pretty_print)
if self.EMAILTO is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<EMAILTO>%s</EMAILTO>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.EMAILTO), input_name='EMAILTO')), eol_))
if self.EMAILFROM is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<EMAILFROM>%s</EMAILFROM>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.EMAILFROM), input_name='EMAILFROM')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ERROR':
obj_ = ERROR.factory()
obj_.build(child_)
self.ERROR.append(obj_)
obj_.original_tagname_ = 'ERROR'
elif nodeName_ == 'CONNOTE':
obj_ = CONNOTE.factory()
obj_.build(child_)
self.CONNOTE = obj_
obj_.original_tagname_ = 'CONNOTE'
elif nodeName_ == 'LABEL':
obj_ = LABEL.factory()
obj_.build(child_)
self.LABEL = obj_
obj_.original_tagname_ = 'LABEL'
elif nodeName_ == 'MANIFEST':
obj_ = MANIFEST.factory()
obj_.build(child_)
self.MANIFEST = obj_
obj_.original_tagname_ = 'MANIFEST'
elif nodeName_ == 'INVOICE':
obj_ = INVOICE.factory()
obj_.build(child_)
self.INVOICE = obj_
obj_.original_tagname_ = 'INVOICE'
elif nodeName_ == 'EMAILTO':
EMAILTO_ = child_.text
EMAILTO_ = self.gds_validate_string(EMAILTO_, node, 'EMAILTO')
self.EMAILTO = EMAILTO_
elif nodeName_ == 'EMAILFROM':
EMAILFROM_ = child_.text
EMAILFROM_ = self.gds_validate_string(EMAILFROM_, node, 'EMAILFROM')
self.EMAILFROM = EMAILFROM_
# end class PRINT
class RATE(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ERROR=None, PRICE=None):
self.original_tagname_ = None
if ERROR is None:
self.ERROR = []
else:
self.ERROR = ERROR
if PRICE is None:
self.PRICE = []
else:
self.PRICE = PRICE
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, RATE)
if subclass is not None:
return subclass(*args_, **kwargs_)
if RATE.subclass:
return RATE.subclass(*args_, **kwargs_)
else:
return RATE(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ERROR(self): return self.ERROR
def set_ERROR(self, ERROR): self.ERROR = ERROR
def add_ERROR(self, value): self.ERROR.append(value)
def insert_ERROR_at(self, index, value): self.ERROR.insert(index, value)
def replace_ERROR_at(self, index, value): self.ERROR[index] = value
def get_PRICE(self): return self.PRICE
def set_PRICE(self, PRICE): self.PRICE = PRICE
def add_PRICE(self, value): self.PRICE.append(value)
def insert_PRICE_at(self, index, value): self.PRICE.insert(index, value)
def replace_PRICE_at(self, index, value): self.PRICE[index] = value
def hasContent_(self):
if (
self.ERROR or
self.PRICE
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='RATE', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('RATE')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RATE')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='RATE', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='RATE'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='RATE', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for ERROR_ in self.ERROR:
ERROR_.export(outfile, level, namespace_, name_='ERROR', pretty_print=pretty_print)
for PRICE_ in self.PRICE:
PRICE_.export(outfile, level, namespace_, name_='PRICE', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ERROR':
obj_ = ERROR.factory()
obj_.build(child_)
self.ERROR.append(obj_)
obj_.original_tagname_ = 'ERROR'
elif nodeName_ == 'PRICE':
obj_ = PRICE.factory()
obj_.build(child_)
self.PRICE.append(obj_)
obj_.original_tagname_ = 'PRICE'
# end class RATE
class runtime_error(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, error_reason=None, error_srcText=None):
self.original_tagname_ = None
self.error_reason = error_reason
self.error_srcText = error_srcText
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, runtime_error)
if subclass is not None:
return subclass(*args_, **kwargs_)
if runtime_error.subclass:
return runtime_error.subclass(*args_, **kwargs_)
else:
return runtime_error(*args_, **kwargs_)
factory = staticmethod(factory)
def get_error_reason(self): return self.error_reason
def set_error_reason(self, error_reason): self.error_reason = error_reason
def get_error_srcText(self): return self.error_srcText
def set_error_srcText(self, error_srcText): self.error_srcText = error_srcText
def hasContent_(self):
if (
self.error_reason is not None or
self.error_srcText is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='runtime_error', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('runtime_error')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='runtime_error')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='runtime_error', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='runtime_error'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='runtime_error', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.error_reason is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<error_reason>%s</error_reason>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.error_reason), input_name='error_reason')), eol_))
if self.error_srcText is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<error_srcText>%s</error_srcText>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.error_srcText), input_name='error_srcText')), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'error_reason':
error_reason_ = child_.text
error_reason_ = self.gds_validate_string(error_reason_, node, 'error_reason')
self.error_reason = error_reason_
elif nodeName_ == 'error_srcText':
error_srcText_ = child_.text
error_srcText_ = self.gds_validate_string(error_srcText_, node, 'error_srcText')
self.error_srcText = error_srcText_
# end class runtime_error
class SHIP(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, ERROR=None, CONSIGNMENT=None):
self.original_tagname_ = None
if ERROR is None:
self.ERROR = []
else:
self.ERROR = ERROR
if CONSIGNMENT is None:
self.CONSIGNMENT = []
else:
self.CONSIGNMENT = CONSIGNMENT
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, SHIP)
if subclass is not None:
return subclass(*args_, **kwargs_)
if SHIP.subclass:
return SHIP.subclass(*args_, **kwargs_)
else:
return SHIP(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ERROR(self): return self.ERROR
def set_ERROR(self, ERROR): self.ERROR = ERROR
def add_ERROR(self, value): self.ERROR.append(value)
def insert_ERROR_at(self, index, value): self.ERROR.insert(index, value)
def replace_ERROR_at(self, index, value): self.ERROR[index] = value
def get_CONSIGNMENT(self): return self.CONSIGNMENT
def set_CONSIGNMENT(self, CONSIGNMENT): self.CONSIGNMENT = CONSIGNMENT
def add_CONSIGNMENT(self, value): self.CONSIGNMENT.append(value)
def insert_CONSIGNMENT_at(self, index, value): self.CONSIGNMENT.insert(index, value)
def replace_CONSIGNMENT_at(self, index, value): self.CONSIGNMENT[index] = value
def hasContent_(self):
if (
self.ERROR or
self.CONSIGNMENT
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='SHIP', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('SHIP')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='SHIP')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='SHIP', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='SHIP'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='SHIP', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for ERROR_ in self.ERROR:
ERROR_.export(outfile, level, namespace_, name_='ERROR', pretty_print=pretty_print)
for CONSIGNMENT_ in self.CONSIGNMENT:
CONSIGNMENT_.export(outfile, level, namespace_, name_='CONSIGNMENT', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ERROR':
obj_ = ERROR.factory()
obj_.build(child_)
self.ERROR.append(obj_)
obj_.original_tagname_ = 'ERROR'
elif nodeName_ == 'CONSIGNMENT':
obj_ = CONSIGNMENT.factory()
obj_.build(child_)
self.CONSIGNMENT.append(obj_)
obj_.original_tagname_ = 'CONSIGNMENT'
# end class SHIP
GDSClassesMapping = {
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'BOOK'
rootClass = BOOK
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'BOOK'
rootClass = BOOK
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
'''Parse a string, create the object tree, and export it.
Arguments:
- inString -- A string. This XML fragment should not start
with an XML declaration containing an encoding.
- silence -- A boolean. If False, export the object.
Returns -- The root object in the tree.
'''
parser = None
rootNode= parsexmlstring_(inString, parser)
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'BOOK'
rootClass = BOOK
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'BOOK'
rootClass = BOOK
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from shipmentresponseout import *\n\n')
sys.stdout.write('import shipmentresponseout as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"BOOK",
"CONNOTE",
"CONSIGNMENT",
"CREATE",
"ERROR",
"INVOICE",
"LABEL",
"MANIFEST",
"PRICE",
"PRINT",
"RATE",
"SHIP",
"document",
"parse_error",
"runtime_error"
]
| 42.140205
| 181
| 0.60323
|
794a8ac2a5cee035e62b2aed61618d02c7234770
| 76
|
py
|
Python
|
app/utils/class_schedule/__init__.py
|
Hansybx/guohe3
|
53990b92f5d45bac1c2b903fd8df9cb97283697e
|
[
"Apache-2.0"
] | 1
|
2019-08-18T13:16:14.000Z
|
2019-08-18T13:16:14.000Z
|
app/utils/class_schedule/__init__.py
|
Hansybx/guohe3
|
53990b92f5d45bac1c2b903fd8df9cb97283697e
|
[
"Apache-2.0"
] | 1
|
2019-08-19T02:05:32.000Z
|
2022-02-11T07:27:37.000Z
|
app/utils/class_schedule/__init__.py
|
Hansybx/guohe3
|
53990b92f5d45bac1c2b903fd8df9cb97283697e
|
[
"Apache-2.0"
] | 1
|
2019-08-18T09:08:25.000Z
|
2019-08-18T09:08:25.000Z
|
"""
-*- coding: utf-8 -*-
Time : 2019/7/12 16:53
Author : Hansybx
"""
| 10.857143
| 25
| 0.5
|
794a8b44cbcdd5dc7503074a262f105d2a9d3a9a
| 5,819
|
py
|
Python
|
homeassistant/components/homematicip_cloud/climate.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 3
|
2020-01-21T18:09:09.000Z
|
2022-01-17T08:06:03.000Z
|
homeassistant/components/homematicip_cloud/climate.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 39
|
2016-12-16T12:40:34.000Z
|
2017-02-13T17:53:42.000Z
|
homeassistant/components/homematicip_cloud/climate.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 3
|
2020-03-03T18:14:10.000Z
|
2020-10-04T06:52:45.000Z
|
"""Support for HomematicIP Cloud climate devices."""
import logging
from typing import Awaitable
from homematicip.aio.device import AsyncHeatingThermostat, AsyncHeatingThermostatCompact
from homematicip.aio.group import AsyncHeatingGroup
from homematicip.aio.home import AsyncHome
from homematicip.base.enums import AbsenceType
from homematicip.functionalHomes import IndoorClimateHome
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
PRESET_AWAY,
PRESET_BOOST,
PRESET_ECO,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice
_LOGGER = logging.getLogger(__name__)
HMIP_AUTOMATIC_CM = "AUTOMATIC"
HMIP_MANUAL_CM = "MANUAL"
HMIP_ECO_CM = "ECO"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the HomematicIP Cloud climate devices."""
pass
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the HomematicIP climate from a config entry."""
home = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]].home
devices = []
for device in home.groups:
if isinstance(device, AsyncHeatingGroup):
devices.append(HomematicipHeatingGroup(home, device))
if devices:
async_add_entities(devices)
class HomematicipHeatingGroup(HomematicipGenericDevice, ClimateDevice):
"""Representation of a HomematicIP heating group."""
def __init__(self, home: AsyncHome, device) -> None:
"""Initialize heating group."""
device.modelType = "Group-Heating"
self._simple_heating = None
if device.actualTemperature is None:
self._simple_heating = _get_first_heating_thermostat(device)
super().__init__(home, device)
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_PRESET_MODE | SUPPORT_TARGET_TEMPERATURE
@property
def target_temperature(self) -> float:
"""Return the temperature we try to reach."""
return self._device.setPointTemperature
@property
def current_temperature(self) -> float:
"""Return the current temperature."""
if self._simple_heating:
return self._simple_heating.valveActualTemperature
return self._device.actualTemperature
@property
def current_humidity(self) -> int:
"""Return the current humidity."""
return self._device.humidity
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
if self._device.boostMode:
return HVAC_MODE_AUTO
if self._device.controlMode == HMIP_MANUAL_CM:
return HVAC_MODE_HEAT
return HVAC_MODE_AUTO
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return [HVAC_MODE_AUTO, HVAC_MODE_HEAT]
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp.
Requires SUPPORT_PRESET_MODE.
"""
if self._device.boostMode:
return PRESET_BOOST
if self._device.controlMode == HMIP_ECO_CM:
absence_type = self._home.get_functionalHome(IndoorClimateHome).absenceType
if absence_type == AbsenceType.VACATION:
return PRESET_AWAY
if absence_type in [
AbsenceType.PERIOD,
AbsenceType.PERMANENT,
AbsenceType.PARTY,
]:
return PRESET_ECO
return PRESET_NONE
@property
def preset_modes(self):
"""Return a list of available preset modes.
Requires SUPPORT_PRESET_MODE.
"""
return [PRESET_NONE, PRESET_BOOST]
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return self._device.minTemperature
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return self._device.maxTemperature
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
await self._device.set_point_temperature(temperature)
async def async_set_hvac_mode(self, hvac_mode: str) -> Awaitable[None]:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_AUTO:
await self._device.set_control_mode(HMIP_AUTOMATIC_CM)
else:
await self._device.set_control_mode(HMIP_MANUAL_CM)
async def async_set_preset_mode(self, preset_mode: str) -> Awaitable[None]:
"""Set new preset mode."""
if self._device.boostMode and preset_mode != PRESET_BOOST:
await self._device.set_boost(False)
if preset_mode == PRESET_BOOST:
await self._device.set_boost()
def _get_first_heating_thermostat(heating_group: AsyncHeatingGroup):
"""Return the first HeatingThermostat from a HeatingGroup."""
for device in heating_group.devices:
if isinstance(device, (AsyncHeatingThermostat, AsyncHeatingThermostatCompact)):
return device
return None
| 32.327778
| 88
| 0.684654
|
794a8b641b76e3cc346c807731d2762d70461db8
| 1,531
|
gyp
|
Python
|
ppapi/ppapi_tests_mojo.gyp
|
hefen1/chromium
|
52f0b6830e000ca7c5e9aa19488af85be792cc88
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
ppapi/ppapi_tests_mojo.gyp
|
hefen1/chromium
|
52f0b6830e000ca7c5e9aa19488af85be792cc88
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
ppapi/ppapi_tests_mojo.gyp
|
hefen1/chromium
|
52f0b6830e000ca7c5e9aa19488af85be792cc88
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2020-04-04T13:34:56.000Z
|
2020-11-04T07:17:52.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'ppapi_nacl_test_common.gypi',
],
'targets': [
{
'target_name': 'ppapi_tests_mojo',
'type': 'none',
'variables': {
'nexe_target': 'ppapi_tests_mojo',
# Only the pnacl toolchain can be used with mojo dependencies
# currently.
'build_newlib': 0,
'build_glibc': 0,
'build_pnacl_newlib': 1,
# TODO(teravest): Build a translated nexe as well.
'nexe_destination_dir': 'test_data/ppapi/tests/mojo',
'sources': [
'tests/mojo/test_mojo.cc',
'tests/mojo/test_mojo.h',
# Test support files.
'tests/test_case.cc',
'tests/test_case.h',
'tests/test_utils.cc',
'tests/test_utils.h',
'tests/testing_instance.cc',
'tests/testing_instance.h',
],
'link_flags': [
'-lmojo',
'-limc_syscalls',
'-lppapi_cpp',
'-lppapi',
],
},
'dependencies': [
'../mojo/mojo_nacl_untrusted.gyp:libmojo',
'../mojo/mojo_nacl.gyp:monacl_codegen',
'../native_client/src/untrusted/nacl/nacl.gyp:imc_syscalls_lib',
'../third_party/mojo/mojo_public.gyp:mojo_system_placeholder',
'native_client/native_client.gyp:ppapi_lib',
'ppapi_nacl.gyp:ppapi_cpp_lib',
],
},
],
}
| 29.442308
| 72
| 0.574135
|
794a8b83317ab8cd07a15d9640ca3d2290bf286e
| 357
|
py
|
Python
|
examples/example_dates_range_str.py
|
jeff00seattle/pyfortified-datetime
|
51e52bd815cb2e82b9f6e781868316676ff2a05d
|
[
"MIT"
] | null | null | null |
examples/example_dates_range_str.py
|
jeff00seattle/pyfortified-datetime
|
51e52bd815cb2e82b9f6e781868316676ff2a05d
|
[
"MIT"
] | null | null | null |
examples/example_dates_range_str.py
|
jeff00seattle/pyfortified-datetime
|
51e52bd815cb2e82b9f6e781868316676ff2a05d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
from pprintpp import pprint
import datetime as dt
import pyfortified_datetime
start_dt = dt.date(2015, 12, 20)
end_dt = dt.date(2016, 1, 11)
print(type(pyfortified_datetime.dates_range(start_dt, end_dt)))
for dt in pyfortified_datetime.dates_range(start_dt, end_dt):
pprint(dt.strftime("%Y-%m-%d"))
| 21
| 63
| 0.728291
|
794a8c1883de370cc025a07ccf68020f3ca4c7a1
| 19,750
|
py
|
Python
|
dataclasses_avroschema/fields.py
|
slawak/dataclasses-avroschema
|
04e69a176b3e72bfa0acd3edbd044ecd161b1a68
|
[
"MIT"
] | null | null | null |
dataclasses_avroschema/fields.py
|
slawak/dataclasses-avroschema
|
04e69a176b3e72bfa0acd3edbd044ecd161b1a68
|
[
"MIT"
] | null | null | null |
dataclasses_avroschema/fields.py
|
slawak/dataclasses-avroschema
|
04e69a176b3e72bfa0acd3edbd044ecd161b1a68
|
[
"MIT"
] | null | null | null |
import abc
import collections
import dataclasses
import datetime
import json
import typing
import uuid
from collections import OrderedDict
import inflect
from dataclasses_avroschema import schema_generator, types, utils
p = inflect.engine()
BOOLEAN = "boolean"
NULL = "null"
INT = "int"
FLOAT = "float"
LONG = "long"
BYTES = "bytes"
STRING = "string"
ARRAY = "array"
ENUM = "enum"
MAP = "map"
FIXED = "fixed"
DATE = "date"
TIME_MILLIS = "time-millis"
TIMESTAMP_MILLIS = "timestamp-millis"
UUID = "uuid"
LOGICAL_DATE = {"type": INT, "logicalType": DATE}
LOGICAL_TIME = {"type": INT, "logicalType": TIME_MILLIS}
LOGICAL_DATETIME = {"type": LONG, "logicalType": TIMESTAMP_MILLIS}
LOGICAL_UUID = {"type": STRING, "logicalType": UUID}
PYTHON_TYPE_TO_AVRO = {
bool: BOOLEAN,
type(None): NULL,
int: INT,
float: FLOAT,
bytes: BYTES,
str: STRING,
list: {"type": ARRAY},
tuple: {"type": ENUM},
dict: {"type": MAP},
types.Fixed: {"type": FIXED},
datetime.date: {"type": INT, "logicalType": DATE},
datetime.time: {"type": INT, "logicalType": TIME_MILLIS},
datetime.datetime: {"type": LONG, "logicalType": TIMESTAMP_MILLIS},
uuid.uuid4: {"type": STRING, "logicalType": UUID},
}
# excluding tuple because is a container
PYTHON_INMUTABLE_TYPES = (str, int, bool, float, bytes, type(None))
PYTHON_PRIMITIVE_CONTAINERS = (list, tuple, dict)
PYTHON_LOGICAL_TYPES = (datetime.date, datetime.time, datetime.datetime, uuid.uuid4)
PYTHON_PRIMITIVE_TYPES = PYTHON_INMUTABLE_TYPES + PYTHON_PRIMITIVE_CONTAINERS
PRIMITIVE_AND_LOGICAL_TYPES = PYTHON_INMUTABLE_TYPES + PYTHON_LOGICAL_TYPES
PythonPrimitiveTypes = typing.Union[str, int, bool, float, list, tuple, dict]
@dataclasses.dataclass
class BaseField:
avro_type: typing.ClassVar
name: str
type: typing.Any # store the python primitive type
default: typing.Any = dataclasses.MISSING
metadata: typing.Dict = dataclasses.MISSING
@staticmethod
def _get_self_reference_type(a_type):
internal_type = a_type.__args__[0]
return internal_type.__forward_arg__
@staticmethod
def get_singular_name(name):
singular = p.singular_noun(name)
if singular:
return singular
return name
def get_metadata(self) -> typing.List[typing.Tuple[str, str]]:
meta_data_for_template = []
try:
metadata = dict(self.metadata)
for name, value in metadata.items():
meta_data_for_template.append((name, value))
except (ValueError, TypeError):
return meta_data_for_template
return meta_data_for_template
def render(self) -> OrderedDict:
"""
Render the fields base on the avro field
At least will have name and type.
returns:
OrderedDict(
("name", "a name"),
("type", "a type"),
("default", "default value")
)
The default key is optional.
If self.type is:
* list, the OrderedDict will contains the key items inside type
* tuple, he OrderedDict will contains the key symbols inside type
* dict, he OrderedDict will contains the key values inside type
"""
template = OrderedDict(
[("name", self.name), ("type", self.get_avro_type())] + self.get_metadata()
)
default = self.get_default_value()
if default is not None:
template["default"] = default
return template
def get_default_value(self):
if self.default is not dataclasses.MISSING:
if self.default is None:
return NULL
if self.validate_default():
return self.default
def validate_default(self):
msg = f"Invalid default type. Default should be {self.type}"
assert isinstance(self.default, self.type), msg
return True
def to_json(self) -> str:
return json.dumps(self.render(), indent=2)
def to_dict(self) -> dict:
return json.loads(self.to_json())
@abc.abstractmethod
def get_avro_type(self):
... # pragma: no cover
class InmutableField(BaseField):
def get_avro_type(self) -> PythonPrimitiveTypes:
if self.default is not dataclasses.MISSING:
if self.default is not None:
return [self.avro_type, NULL]
# means that default value is None
return [NULL, self.avro_type]
return self.avro_type
@dataclasses.dataclass
class StringField(InmutableField):
avro_type: typing.ClassVar = STRING
@dataclasses.dataclass
class IntegerField(InmutableField):
avro_type: typing.ClassVar = INT
@dataclasses.dataclass
class BooleanField(InmutableField):
avro_type: typing.ClassVar = BOOLEAN
@dataclasses.dataclass
class FloatField(InmutableField):
avro_type: typing.ClassVar = FLOAT
@dataclasses.dataclass
class BytesField(InmutableField):
avro_type: typing.ClassVar = BYTES
@dataclasses.dataclass
class NoneField(InmutableField):
avro_type: typing.ClassVar = NULL
@dataclasses.dataclass
class ContainerField(BaseField):
def get_avro_type(self) -> PythonPrimitiveTypes:
avro_type = self.avro_type
avro_type["name"] = self.get_singular_name(self.name)
return avro_type
@dataclasses.dataclass
class TupleField(ContainerField):
symbols: typing.Any = None
default_factory: typing.Any = None
def __post_init__(self):
self.generate_symbols()
@property
def avro_type(self) -> typing.Dict:
return {"type": ENUM, "symbols": self.symbols}
def get_default_value(self):
return
def generate_symbols(self):
self.symbols = list(self.default)
@dataclasses.dataclass
class ListField(ContainerField):
items_type: typing.Any = None
default_factory: typing.Any = None
def __post_init__(self):
self.generate_items_type()
@property
def avro_type(self) -> typing.Dict:
return {"type": ARRAY, "items": self.items_type}
def get_default_value(self):
if self.default is not dataclasses.MISSING:
if self.default is None:
return []
elif self.default_factory not in (dataclasses.MISSING, None):
# expecting a callable
default = self.default_factory()
assert isinstance(
default, list
), f"List is required as default for field {self.name}"
logical_classes = LOGICAL_TYPES_FIELDS_CLASSES.keys()
return [
LOGICAL_TYPES_FIELDS_CLASSES[type(item)].to_logical_type(item)
if type(item) in logical_classes
else item
for item in default
]
def generate_items_type(self):
# because avro can have only one type, we take the first one
items_type = self.type.__args__[0]
if items_type in PRIMITIVE_AND_LOGICAL_TYPES:
klass = PRIMITIVE_LOGICAL_TYPES_FIELDS_CLASSES[items_type]
self.items_type = klass.avro_type
elif utils.is_self_referenced(items_type):
# Checking for a self reference. Maybe is a typing.ForwardRef
self.items_type = self._get_self_reference_type(items_type)
elif utils.is_union(items_type):
self.items_type = UnionField.generate_union(
items_type.__args__,
default=self.default,
default_factory=self.default_factory,
)
else:
# Is Avro Record Type
self.items_type = schema_generator.SchemaGenerator(
items_type
).avro_schema_to_python()
@dataclasses.dataclass
class DictField(ContainerField):
default_factory: typing.Any = None
values_type: typing.Any = None
def __post_init__(self):
self.generate_values_type()
@property
def avro_type(self) -> typing.Dict:
return {"type": MAP, "values": self.values_type}
def get_default_value(self):
if self.default is not dataclasses.MISSING:
if self.default is None:
return {}
elif self.default_factory not in (dataclasses.MISSING, None):
# expeting a callable
default = self.default_factory()
assert isinstance(
default, dict
), f"Dict is required as default for field {self.name}"
logical_classes = LOGICAL_TYPES_FIELDS_CLASSES.keys()
return {
key: LOGICAL_TYPES_FIELDS_CLASSES[type(value)].to_logical_type(value)
if type(value) in logical_classes
else value
for key, value in default.items()
}
def generate_values_type(self):
"""
Process typing.Dict. Avro assumes that the key of a map is always a string,
so we take the second argument to determine the value type
"""
values_type = self.type.__args__[1]
if values_type in PRIMITIVE_AND_LOGICAL_TYPES:
klass = PRIMITIVE_LOGICAL_TYPES_FIELDS_CLASSES[values_type]
self.values_type = klass.avro_type
elif utils.is_self_referenced(values_type):
# Checking for a self reference. Maybe is a typing.ForwardRef
self.values_type = self._get_self_reference_type(values_type)
else:
self.values_type = schema_generator.SchemaGenerator(
values_type
).avro_schema_to_python()
@dataclasses.dataclass
class UnionField(BaseField):
default_factory: typing.Any = dataclasses.MISSING
def get_avro_type(self):
elements = self.type.__args__
return self.generate_union(
elements, default=self.default, default_factory=self.default_factory
)
@staticmethod
def generate_union(
elements: typing.List,
default: typing.Any = None,
default_factory: typing.Callable = dataclasses.MISSING,
):
"""
Generate union.
Arguments:
elements (typing.List): List of python types
default (typing.Any): Default value
default factory (typing.Calleable): Callable to get the default value for
a list or dict type
Returns:
typing.List: List of avro types
"""
unions = []
for element in elements:
if element in PRIMITIVE_AND_LOGICAL_TYPES:
klass = PRIMITIVE_LOGICAL_TYPES_FIELDS_CLASSES[element]
union_element = klass.avro_type
else:
union_element = schema_generator.SchemaGenerator(
element
).avro_schema_to_python()
unions.append(union_element)
if default is None and default_factory is dataclasses.MISSING:
unions.insert(0, NULL)
return unions
def get_default_value(self):
if self.default is not dataclasses.MISSING:
if self.default is None:
return NULL
elif self.default_factory not in (dataclasses.MISSING, None):
# expeting a callable
default = self.default_factory()
assert isinstance(
default, (dict, list)
), f"Dict or List is required as default for field {self.name}"
return default
@dataclasses.dataclass
class FixedField(BaseField):
def get_avro_type(self):
avro_type = {
"type": FIXED,
"name": self.get_singular_name(self.name),
"size": int(self.default.size),
}
if self.default.namespace is not None:
avro_type["namespace"] = self.default.namespace
if self.default.aliases is not None:
avro_type["aliases"] = self.default.aliases
return avro_type
def get_default_value(self):
return
@dataclasses.dataclass
class SelfReferenceField(BaseField):
def get_avro_type(self):
return self._get_self_reference_type(self.type)
def get_default_value(self):
return
class LogicalTypeField(BaseField):
def get_avro_type(self):
return self.avro_type
@dataclasses.dataclass
class DateField(LogicalTypeField):
"""
The date logical type represents a date within the calendar,
with no reference to a particular time zone or time of day.
A date logical type annotates an Avro int, where the int stores
the number of days from the unix epoch, 1 January 1970 (ISO calendar).
"""
avro_type: typing.ClassVar = {"type": INT, "logicalType": DATE}
def get_default_value(self):
if self.default is not dataclasses.MISSING:
if self.default is None:
return NULL
if self.validate_default():
# Convert to datetime and get the amount of days
return self.to_logical_type(self.default)
@staticmethod
def to_logical_type(date):
"""
Convert to datetime and get the amount of days
from the unix epoch, 1 January 1970 (ISO calendar)
for a given date
Arguments:
date (datetime.date)
Returns:
int
"""
date_time = datetime.datetime.combine(date, datetime.datetime.min.time())
ts = (date_time - datetime.datetime(1970, 1, 1)).total_seconds()
return int(ts / (3600 * 24))
@dataclasses.dataclass
class TimeField(LogicalTypeField):
"""
The time-millis logical type represents a time of day,
with no reference to a particular calendar,
time zone or date, with a precision of one millisecond.
A time-millis logical type annotates an Avro int,
where the int stores the number of milliseconds after midnight, 00:00:00.000.
"""
avro_type: typing.ClassVar = {"type": INT, "logicalType": TIME_MILLIS}
def get_default_value(self):
if self.default is not dataclasses.MISSING:
if self.default is None:
return NULL
if self.validate_default():
return self.to_logical_type(self.default)
@staticmethod
def to_logical_type(time):
"""
Returns the number of milliseconds after midnight, 00:00:00.000
for a given time object
Arguments:
time (datetime.time)
Returns:
int
"""
hour, minutes, seconds, microseconds = (
time.hour,
time.minute,
time.second,
time.microsecond,
)
return int(
(((hour * 60 + minutes) * 60 + seconds) * 1000) + (microseconds / 1000)
)
@dataclasses.dataclass
class DatetimeField(LogicalTypeField):
"""
The timestamp-millis logical type represents an instant on the global timeline,
independent of a particular time zone or calendar, with a precision of one millisecond.
A timestamp-millis logical type annotates an Avro long,
where the long stores the number of milliseconds from the unix epoch,
1 January 1970 00:00:00.000 UTC.
"""
avro_type: typing.ClassVar = {"type": LONG, "logicalType": TIMESTAMP_MILLIS}
def get_default_value(self):
if self.default is not dataclasses.MISSING:
if self.default is None:
return NULL
if self.validate_default():
return self.to_logical_type(self.default)
@staticmethod
def to_logical_type(date_time):
"""
Returns the number of milliseconds from the unix epoch,
1 January 1970 00:00:00.000 UTC for a given datetime
Arguments:
date_time (datetime.datetime)
Returns:
float
"""
ts = (date_time - datetime.datetime(1970, 1, 1)).total_seconds()
return ts * 1000
@dataclasses.dataclass
class UUIDField(LogicalTypeField):
avro_type: typing.ClassVar = {"type": STRING, "logicalType": UUID}
def get_default_value(self):
if self.default is not dataclasses.MISSING:
if self.default is None:
return NULL
if self.validate_default():
return self.to_logical_type(self.default)
def validate_default(self):
msg = f"Invalid default type. Default should be {str} or {uuid.UUID}"
assert isinstance(self.default, (str, uuid.UUID)), msg
return True
@staticmethod
def to_logical_type(uuid4):
return str(uuid4)
@dataclasses.dataclass
class RecordField(BaseField):
def get_avro_type(self):
return schema_generator.SchemaGenerator(self.type).avro_schema_to_python()
INMUTABLE_FIELDS_CLASSES = {
bool: BooleanField,
int: IntegerField,
float: FloatField,
bytes: BytesField,
str: StringField,
type(None): NoneField,
}
CONTAINER_FIELDS_CLASSES = {
tuple: TupleField,
list: ListField,
collections.abc.Sequence: ListField,
collections.abc.MutableSequence: ListField,
dict: DictField,
collections.abc.Mapping: DictField,
collections.abc.MutableMapping: DictField,
typing.Union: UnionField,
}
LOGICAL_TYPES_FIELDS_CLASSES = {
datetime.date: DateField,
datetime.time: TimeField,
datetime.datetime: DatetimeField,
uuid.uuid4: UUIDField,
uuid.UUID: UUIDField,
}
PRIMITIVE_LOGICAL_TYPES_FIELDS_CLASSES = {
**INMUTABLE_FIELDS_CLASSES,
**LOGICAL_TYPES_FIELDS_CLASSES,
types.Fixed: FixedField,
}
FieldType = typing.Union[
StringField,
BooleanField,
FloatField,
BytesField,
NoneField,
TupleField,
ListField,
DictField,
UnionField,
FixedField,
SelfReferenceField,
LogicalTypeField,
DateField,
TimeField,
DatetimeField,
UUIDField,
RecordField,
]
def field_factory(
name: str,
native_type: typing.Any,
default: typing.Any = dataclasses.MISSING,
default_factory: typing.Any = dataclasses.MISSING,
metadata: typing.Dict = dataclasses.MISSING,
) -> FieldType:
if native_type in PYTHON_INMUTABLE_TYPES:
klass = INMUTABLE_FIELDS_CLASSES[native_type]
return klass(name=name, type=native_type, default=default, metadata=metadata)
elif utils.is_self_referenced(native_type):
return SelfReferenceField(
name=name, type=native_type, default=default, metadata=metadata
)
elif native_type is types.Fixed:
return FixedField(
name=name, type=native_type, default=default, metadata=metadata
)
elif isinstance(native_type, typing._GenericAlias):
origin = native_type.__origin__
if origin not in (
tuple,
list,
dict,
typing.Union,
collections.abc.Sequence,
collections.abc.MutableSequence,
collections.abc.Mapping,
collections.abc.MutableMapping,
):
raise ValueError(
f"""
Invalid Type for field {name}. Accepted types are list, tuple, dict or typing.Union
"""
)
klass = CONTAINER_FIELDS_CLASSES[origin]
return klass(
name=name,
type=native_type,
default=default,
default_factory=default_factory,
metadata=metadata,
)
elif native_type in PYTHON_LOGICAL_TYPES:
klass = LOGICAL_TYPES_FIELDS_CLASSES[native_type]
return klass(name=name, type=native_type, default=default, metadata=metadata)
else:
return RecordField(
name=name, type=native_type, default=default, metadata=metadata
)
Field = field_factory
| 28.74818
| 99
| 0.63757
|
794a8ceb3ffab46a07f10ab701e066980aeaa1d4
| 17,175
|
py
|
Python
|
srunner/scenarios/open_scenario.py
|
auto-bwcx-me/scenario_runner
|
acf26639f97ec2ef0df121106f248285ec754eb9
|
[
"MIT"
] | null | null | null |
srunner/scenarios/open_scenario.py
|
auto-bwcx-me/scenario_runner
|
acf26639f97ec2ef0df121106f248285ec754eb9
|
[
"MIT"
] | null | null | null |
srunner/scenarios/open_scenario.py
|
auto-bwcx-me/scenario_runner
|
acf26639f97ec2ef0df121106f248285ec754eb9
|
[
"MIT"
] | 1
|
2020-12-22T09:38:13.000Z
|
2020-12-22T09:38:13.000Z
|
#!/usr/bin/env python
# Copyright (c) 2019-2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Basic scenario class using the OpenSCENARIO definition
"""
import py_trees
from srunner.scenariomanager.scenarioatomics.atomic_behaviors import SetOSCInitSpeed
from srunner.scenariomanager.timer import GameTime
from srunner.scenarios.basic_scenario import BasicScenario
from srunner.tools.openscenario_parser import OpenScenarioParser
from srunner.tools.py_trees_port import Decorator, oneshot_behavior
def repeatable_behavior(behaviour, name=None):
"""
This behaviour allows a composite with oneshot ancestors to run multiple
times, resetting the oneshot variables after each execution
"""
if not name:
name = behaviour.name
clear_descendant_variables = ClearBlackboardVariablesStartingWith(
name="Clear Descendant Variables of {}".format(name),
variable_name_beginning=get_py_tree_path(behaviour) + ">"
)
# If it's a sequence, don't double-nest it in a redundant manner
if isinstance(behaviour, py_trees.composites.Sequence):
behaviour.add_child(clear_descendant_variables)
sequence = behaviour
else:
sequence = py_trees.composites.Sequence(name="RepeatableBehaviour")
sequence.add_children([behaviour, clear_descendant_variables])
return sequence
class ClearBlackboardVariablesStartingWith(py_trees.behaviours.Success):
"""
Clear the values starting with the specified string from the blackboard.
Args:
name (:obj:`str`): name of the behaviour
variable_name_beginning (:obj:`str`): beginning of the names of variable to clear
"""
def __init__(self,
name="Clear Blackboard Variable Starting With",
variable_name_beginning="dummy",
):
super(ClearBlackboardVariablesStartingWith, self).__init__(name)
self.variable_name_beginning = variable_name_beginning
def initialise(self):
"""
Delete the variables from the blackboard.
"""
blackboard_variables = [key for key, _ in py_trees.blackboard.__dict__.items(
) if key.startswith(self.variable_name_beginning)]
for variable in blackboard_variables:
delattr(py_trees.blackboard, variable)
class StoryElementStatusToBlackboard(Decorator):
"""
Reflect the status of the decorator's child story element to the blackboard.
Args:
child: the child behaviour or subtree
story_element_type: the element type [act,scene,maneuver,event,action]
element_name: the story element's name attribute
"""
def __init__(
self,
child,
story_element_type,
element_name
):
super(StoryElementStatusToBlackboard, self).__init__(name=child.name, child=child)
self.story_element_type = story_element_type
self.element_name = element_name
self.blackboard = py_trees.blackboard.Blackboard()
def initialise(self):
"""
Record the elements's start time on the blackboard
"""
self.blackboard.set(
name="({}){}-{}".format(self.story_element_type.upper(),
self.element_name, "START"),
value=GameTime.get_time(),
overwrite=True
)
def update(self):
"""
Reflect the decorated child's status
Returns: the decorated child's status
"""
return self.decorated.status
def terminate(self, new_status):
"""
Terminate and mark Blackboard entry with END
"""
# Report whether we ended with End or Cancel
# If we were ended or cancelled, our state will be INVALID and
# We will have an ancestor (a parallel SUCCESS_ON_ALL) with a successful child/children
# It's possible we ENDed AND CANCELled if both condition groups were true simultaneously
# NOTE 'py_trees.common.Status.INVALID' is the status of a behaviur which was terminated by a parent
rules = []
if new_status == py_trees.common.Status.INVALID:
# We were terminated from above unnaturally
# Figure out if were ended or cancelled
terminating_ancestor = self.parent
while terminating_ancestor.status == py_trees.common.Status.INVALID:
terminating_ancestor = terminating_ancestor.parent
# We have found an ancestory which was not terminated by a parent
# Check what caused it to terminate its children
if terminating_ancestor.status == py_trees.common.Status.SUCCESS:
successful_children = [
child.name
for child
in terminating_ancestor.children
if child.status == py_trees.common.Status.SUCCESS]
if "EndConditions" in successful_children:
rules.append("END")
if "CancelConditions" in successful_children:
rules.append("CANCEL")
# END is the default status unless we have a more detailed one
rules = rules or ["END"]
for rule in rules:
self.blackboard.set(
name="({}){}-{}".format(self.story_element_type.upper(),
self.element_name, rule),
value=GameTime.get_time(),
overwrite=True
)
def get_py_tree_path(behaviour):
"""
Accept a behaviour/composite and return a string representation of its full path
"""
path = ""
target = behaviour
while True:
path = "{}>{}".format(target.name, path)
target = target.parent
if not target:
break
path = path[:-1]
return path
class OpenScenario(BasicScenario):
"""
Implementation of the OpenSCENARIO scenario
"""
def __init__(self, world, ego_vehicles, config, config_file, debug_mode=False, criteria_enable=True, timeout=300):
"""
Setup all relevant parameters and create scenario
"""
self.config = config
self.target = None
self.route = None
self.config_file = config_file
# Timeout of scenario in seconds
self.timeout = timeout
super(OpenScenario, self).__init__("OpenScenario", ego_vehicles=ego_vehicles, config=config,
world=world, debug_mode=debug_mode,
terminate_on_failure=False, criteria_enable=criteria_enable)
def _create_init_behavior(self):
init_behavior = None
# set initial speed
for actor in self.config.other_actors:
if actor.speed > 0:
rolename = actor.rolename
init_speed = actor.speed
for carla_actor in self.other_actors:
if 'role_name' in carla_actor.attributes and carla_actor.attributes['role_name'] == rolename:
init_behavior = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ALL, name="InitBehaviour")
set_init_speed = SetOSCInitSpeed(actor, init_speed)
init_behavior.add_child(set_init_speed)
return init_behavior
def _create_behavior(self):
"""
Basic behavior do nothing, i.e. Idle
"""
story_behavior = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ALL, name="Story")
joint_actor_list = self.other_actors + self.ego_vehicles
for act in self.config.story.iter("Act"):
act_sequence = py_trees.composites.Sequence(
name="Act StartConditions and behaviours")
start_conditions = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE, name="StartConditions Group")
parallel_behavior = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE, name="Maneuver + EndConditions Group")
parallel_sequences = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ALL, name="Maneuvers")
for sequence in act.iter("Sequence"):
sequence_behavior = py_trees.composites.Sequence(name="Seq:" + sequence.attrib.get('name'))
repetitions = sequence.attrib.get('numberOfExecutions', 1)
actor_ids = []
for actor in sequence.iter("Actors"):
for entity in actor.iter("Entity"):
for k, _ in enumerate(joint_actor_list):
if entity.attrib.get('name', None) == joint_actor_list[k].attributes['role_name']:
actor_ids.append(k)
break
single_sequence_iteration = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ALL, name=sequence.attrib.get('name'))
for maneuver in sequence.iter("Maneuver"):
maneuver_parallel = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ALL,
name="Maneuver " + maneuver.attrib.get('name'))
for event in maneuver.iter("Event"):
event_sequence = py_trees.composites.Sequence(
name="Event " + event.attrib.get('name'))
parallel_actions = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ALL, name="Actions")
for child in event.iter():
if child.tag == "Action":
for actor_id in actor_ids:
maneuver_behavior = OpenScenarioParser.convert_maneuver_to_atomic(
child, joint_actor_list[actor_id])
maneuver_behavior = StoryElementStatusToBlackboard(
maneuver_behavior, "ACTION", child.attrib.get('name'))
parallel_actions.add_child(
oneshot_behavior(variable_name=get_py_tree_path(maneuver_behavior),
behaviour=maneuver_behavior))
if child.tag == "StartConditions":
# There is always one StartConditions block per Event
parallel_condition_groups = self._create_condition_container(
child, "Parallel Condition Groups")
event_sequence.add_child(
parallel_condition_groups)
parallel_actions = StoryElementStatusToBlackboard(
parallel_actions, "EVENT", event.attrib.get('name'))
event_sequence.add_child(parallel_actions)
maneuver_parallel.add_child(
oneshot_behavior(variable_name=get_py_tree_path(event_sequence), behaviour=event_sequence))
maneuver_parallel = StoryElementStatusToBlackboard(
maneuver_parallel, "MANEUVER", maneuver.attrib.get('name'))
single_sequence_iteration.add_child(
oneshot_behavior(variable_name=get_py_tree_path(maneuver_parallel),
behaviour=maneuver_parallel))
# OpenSCENARIO refers to Sequences as Scenes in this instance
single_sequence_iteration = StoryElementStatusToBlackboard(
single_sequence_iteration, "SCENE", sequence.attrib.get('name'))
single_sequence_iteration = repeatable_behavior(
single_sequence_iteration)
for _ in range(int(repetitions)):
sequence_behavior.add_child(single_sequence_iteration)
if sequence_behavior.children:
parallel_sequences.add_child(
oneshot_behavior(variable_name=get_py_tree_path(sequence_behavior),
behaviour=sequence_behavior))
if parallel_sequences.children:
parallel_sequences = StoryElementStatusToBlackboard(
parallel_sequences, "ACT", act.attrib.get('name'))
parallel_behavior.add_child(parallel_sequences)
for conditions in act.iter("Conditions"):
for start_condition in conditions.iter("Start"):
parallel_start_criteria = self._create_condition_container(
start_condition, "StartConditions", oneshot=True)
if parallel_start_criteria.children:
start_conditions.add_child(parallel_start_criteria)
for end_condition in conditions.iter("End"):
parallel_end_criteria = self._create_condition_container(
end_condition, "EndConditions")
if parallel_end_criteria.children:
parallel_behavior.add_child(parallel_end_criteria)
for cancel_condition in conditions.iter("Cancel"):
parallel_cancel_criteria = self._create_condition_container(
cancel_condition, "CancelConditions")
if parallel_cancel_criteria.children:
parallel_behavior.add_child(parallel_cancel_criteria)
if start_conditions.children:
act_sequence.add_child(start_conditions)
if parallel_behavior.children:
act_sequence.add_child(parallel_behavior)
if act_sequence.children:
story_behavior.add_child(act_sequence)
# Build behavior tree
behavior = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ALL, name="behavior")
init_behavior = self._create_init_behavior()
if init_behavior is not None:
behavior.add_child(oneshot_behavior(variable_name=get_py_tree_path(init_behavior), behaviour=init_behavior))
behavior.add_child(story_behavior)
return behavior
def _create_condition_container(self, node, name='Conditions Group', oneshot=False):
"""
This is a generic function to handle conditions utilising ConditionGroups
Each ConditionGroup is represented as a Sequence of Conditions
The ConditionGroups are grouped under a SUCCESS_ON_ONE Parallel
If oneshot is set to True, oneshot_behaviour will be applied to conditions
"""
parallel_condition_groups = py_trees.composites.Parallel(name,
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
for condition_group in node.iter("ConditionGroup"):
condition_group_sequence = py_trees.composites.Sequence(
name="Condition Group")
for condition in condition_group.iter("Condition"):
criterion = OpenScenarioParser.convert_condition_to_atomic(
condition, self.other_actors + self.ego_vehicles)
if oneshot:
criterion = oneshot_behavior(variable_name=get_py_tree_path(criterion), behaviour=criterion)
condition_group_sequence.add_child(criterion)
if condition_group_sequence.children:
parallel_condition_groups.add_child(condition_group_sequence)
return parallel_condition_groups
def _create_test_criteria(self):
"""
A list of all test criteria will be created that is later used
in parallel behavior tree.
"""
parallel_criteria = py_trees.composites.Parallel("EndConditions (Criteria Group)",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
criteria = []
for endcondition in self.config.storyboard.iter("EndConditions"):
for condition in endcondition.iter("Condition"):
if condition.attrib.get('name').startswith('criteria_'):
condition.set('name', condition.attrib.get('name')[9:])
criteria.append(condition)
for condition in criteria:
criterion = OpenScenarioParser.convert_condition_to_atomic(
condition, self.ego_vehicles)
parallel_criteria.add_child(criterion)
return parallel_criteria
def __del__(self):
"""
Remove all actors upon deletion
"""
self.remove_all_actors()
| 43.925831
| 120
| 0.612402
|
794a8d0fc3822d6fef339cfadb3b3b6c1a15d1b8
| 12,015
|
py
|
Python
|
autokeras/nn/model_trainer.py
|
wpsliu123/AUTOKERAS
|
172fb3cf705126e4c3d86b41292463e30ecf3c15
|
[
"MIT"
] | null | null | null |
autokeras/nn/model_trainer.py
|
wpsliu123/AUTOKERAS
|
172fb3cf705126e4c3d86b41292463e30ecf3c15
|
[
"MIT"
] | null | null | null |
autokeras/nn/model_trainer.py
|
wpsliu123/AUTOKERAS
|
172fb3cf705126e4c3d86b41292463e30ecf3c15
|
[
"MIT"
] | null | null | null |
import os
import abc
import sys
from copy import deepcopy
from functools import reduce
import numpy as np
import torch
from torchvision import utils as vutils
from tqdm.autonotebook import tqdm
from autokeras.constant import Constant
from autokeras.utils import get_device
class ModelTrainerBase(abc.ABC):
def __init__(self,
loss_function,
train_data,
test_data=None,
metric=None,
verbose=False):
self.device = get_device()
self.metric = metric
self.verbose = verbose
self.loss_function = loss_function
self.train_loader = train_data
self.test_loader = test_data
@abc.abstractmethod
def train_model(self,
max_iter_num=Constant.MAX_ITER_NUM,
max_no_improvement_num=Constant.MAX_NO_IMPROVEMENT_NUM):
pass
class ModelTrainer(ModelTrainerBase):
"""A class that is used to train the model.
This class can train a Pytorch model with the given data loaders.
The metric, loss_function, and model must be compatible with each other.
Please see the details in the Attributes.
Attributes:
device: A string. Indicating the device to use. 'cuda' or 'cpu'.
model: An instance of Pytorch Module. The model that will be trained.
train_loader: Training data wrapped in batches in Pytorch Dataloader.
test_loader: Testing data wrapped in batches in Pytorch Dataloader.
loss_function: A function with two parameters (prediction, target).
There is no specific requirement for the types of the parameters,
as long as they are compatible with the model and the data loaders.
The prediction should be the output of the model for a batch.
The target should be a batch of targets packed in the data loaders.
optimizer: The optimizer is chosen to use the Pytorch Adam optimizer.
early_stop: An instance of class EarlyStop.
metric: It should be a subclass of class autokeras.metric.Metric.
In the compute(prediction, target) function, prediction and targets are
all numpy arrays converted from the output of the model and the targets packed in the data loaders.
verbose: Verbosity mode.
"""
def __init__(self, model, path, **kwargs):
"""Init the ModelTrainer with `model`, `x_train`, `y_train`, `x_test`, `y_test`, `verbose`"""
super().__init__(**kwargs)
self.model = model
self.model.to(self.device)
self.optimizer = None
self.early_stop = None
self.current_epoch = 0
self.current_metric_value = 0
self.temp_model_path = os.path.join(path, 'temp_model')
def train_model(self,
max_iter_num=None,
max_no_improvement_num=None):
"""Train the model.
Args:
max_iter_num: An integer. The maximum number of epochs to train the model.
The training will stop when this number is reached.
max_no_improvement_num: An integer. The maximum number of epochs when the loss value doesn't decrease.
The training will stop when this number is reached.
"""
if max_iter_num is None:
max_iter_num = Constant.MAX_ITER_NUM
if max_no_improvement_num is None:
max_no_improvement_num = Constant.MAX_NO_IMPROVEMENT_NUM
self.early_stop = EarlyStop(max_no_improvement_num)
self.early_stop.on_train_begin()
test_metric_value_list = []
test_loss_list = []
self.optimizer = torch.optim.Adam(self.model.parameters())
for epoch in range(max_iter_num):
self._train()
test_loss, metric_value = self._test()
self.current_metric_value = metric_value
test_metric_value_list.append(metric_value)
test_loss_list.append(test_loss)
decreasing = self.early_stop.on_epoch_end(test_loss)
if self.early_stop.no_improvement_count == 0:
self._save_model()
if not decreasing:
if self.verbose:
print('\nNo loss decrease after {} epochs.\n'.format(max_no_improvement_num))
self._load_model()
break
last_num = min(max_no_improvement_num, max_iter_num)
return (sum(test_loss_list[-last_num:]) / last_num,
sum(test_metric_value_list[-last_num:]) / last_num)
def _train(self):
self.model.train()
loader = self.train_loader
self.current_epoch += 1
if self.verbose:
progress_bar = tqdm(total=len(loader),
desc='Epoch-'
+ str(self.current_epoch)
+ ', Current Metric - '
+ str(self.current_metric_value),
file=sys.stdout,
leave=False,
ncols=100,
position=0,
unit=' batch')
else:
progress_bar = None
for batch_idx, (inputs, targets) in enumerate(deepcopy(loader)):
inputs, targets = inputs.to(self.device), targets.to(self.device)
self.optimizer.zero_grad()
outputs = self.model(inputs)
loss = self.loss_function(outputs, targets)
loss.backward()
self.optimizer.step()
if self.verbose:
if batch_idx % 10 == 0:
progress_bar.update(10)
if self.verbose:
progress_bar.close()
def _test(self):
self.model.eval()
test_loss = 0
all_targets = []
all_predicted = []
loader = self.test_loader
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(deepcopy(loader)):
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs = self.model(inputs)
# cast tensor to float
test_loss += float(self.loss_function(outputs, targets))
all_predicted.append(outputs.cpu().numpy())
all_targets.append(targets.cpu().numpy())
all_predicted = reduce(lambda x, y: np.concatenate((x, y)), all_predicted)
all_targets = reduce(lambda x, y: np.concatenate((x, y)), all_targets)
return test_loss, self.metric.compute(all_predicted, all_targets)
def _save_model(self):
torch.save(self.model.state_dict(), self.temp_model_path)
def _load_model(self):
self.model.load_state_dict(torch.load(self.temp_model_path))
class GANModelTrainer(ModelTrainerBase):
def __init__(self,
g_model,
d_model,
train_data,
loss_function,
verbose,
gen_training_result=None):
"""Init the ModelTrainer with `model`, `x_train`, `y_train`, `x_test`, `y_test`, `verbose`"""
super().__init__(loss_function, train_data, verbose=verbose)
self.d_model = d_model
self.g_model = g_model
self.d_model.to(self.device)
self.g_model.to(self.device)
self.outf = None
self.out_size = 0
if gen_training_result is not None:
self.outf, self.out_size = gen_training_result
self.sample_noise = torch.randn(self.out_size,
self.g_model.nz,
1, 1, device=self.device)
self.optimizer_d = None
self.optimizer_g = None
def train_model(self,
max_iter_num=Constant.MAX_ITER_NUM,
max_no_improvement_num=Constant.MAX_NO_IMPROVEMENT_NUM):
self.optimizer_d = torch.optim.Adam(self.d_model.parameters())
self.optimizer_g = torch.optim.Adam(self.g_model.parameters())
if self.verbose:
progress_bar = tqdm(total=max_iter_num,
desc=' Model ',
file=sys.stdout,
ncols=75,
position=1,
unit=' epoch')
else:
progress_bar = None
for epoch in range(max_iter_num):
self._train(epoch)
if self.verbose:
progress_bar.update(1)
if self.verbose:
progress_bar.close()
def _train(self, epoch):
# put model into train mode
self.d_model.train()
# TODO: why?
cp_loader = deepcopy(self.train_loader)
if self.verbose:
progress_bar = tqdm(total=len(cp_loader),
desc='Current Epoch',
file=sys.stdout,
leave=False,
ncols=75,
position=0,
unit=' Batch')
else:
progress_bar = None
real_label = 1
fake_label = 0
for batch_idx, inputs in enumerate(cp_loader):
# Update Discriminator network maximize log(D(x)) + log(1 - D(G(z)))
# train with real
self.optimizer_d.zero_grad()
inputs = inputs.to(self.device)
batch_size = inputs.size(0)
outputs = self.d_model(inputs)
label = torch.full((batch_size,), real_label, device=self.device)
loss_d_real = self.loss_function(outputs, label)
loss_d_real.backward()
# train with fake
noise = torch.randn((batch_size, self.g_model.nz, 1, 1,), device=self.device)
fake_outputs = self.g_model(noise)
label.fill_(fake_label)
outputs = self.d_model(fake_outputs.detach())
loss_g_fake = self.loss_function(outputs, label)
loss_g_fake.backward()
self.optimizer_d.step()
# (2) Update G network: maximize log(D(G(z)))
self.g_model.zero_grad()
label.fill_(real_label)
outputs = self.d_model(fake_outputs)
loss_g = self.loss_function(outputs, label)
loss_g.backward()
self.optimizer_g.step()
if self.verbose:
if batch_idx % 10 == 0:
progress_bar.update(10)
if self.outf is not None and batch_idx % 100 == 0:
fake = self.g_model(self.sample_noise)
vutils.save_image(
fake.detach(),
'%s/fake_samples_epoch_%03d.png' % (self.outf, epoch),
normalize=True)
if self.verbose:
progress_bar.close()
class EarlyStop:
def __init__(self, max_no_improvement_num=Constant.MAX_NO_IMPROVEMENT_NUM, min_loss_dec=Constant.MIN_LOSS_DEC):
super().__init__()
self.training_losses = []
self.minimum_loss = None
self.no_improvement_count = 0
self._max_no_improvement_num = max_no_improvement_num
self._done = False
self._min_loss_dec = min_loss_dec
def on_train_begin(self):
self.training_losses = []
self.no_improvement_count = 0
self._done = False
self.minimum_loss = float('inf')
def on_epoch_end(self, loss):
self.training_losses.append(loss)
if self._done and loss > (self.minimum_loss - self._min_loss_dec):
return False
if loss > (self.minimum_loss - self._min_loss_dec):
self.no_improvement_count += 1
else:
self.no_improvement_count = 0
self.minimum_loss = loss
if self.no_improvement_count > self._max_no_improvement_num:
self._done = True
return True
| 38.883495
| 115
| 0.57603
|
794a8d7983042049c8f1997afc63b91a188f8dba
| 2,790
|
py
|
Python
|
airflow/npmjs_dags1/npmjs_static_22.py
|
Yanivmd/maloss
|
af85ac202668da88d0b4a885386a1e56703e37c8
|
[
"MIT"
] | 1
|
2022-01-29T16:13:06.000Z
|
2022-01-29T16:13:06.000Z
|
airflow/npmjs_dags1/npmjs_static_22.py
|
Yanivmd/maloss
|
af85ac202668da88d0b4a885386a1e56703e37c8
|
[
"MIT"
] | null | null | null |
airflow/npmjs_dags1/npmjs_static_22.py
|
Yanivmd/maloss
|
af85ac202668da88d0b4a885386a1e56703e37c8
|
[
"MIT"
] | 1
|
2022-01-29T16:13:07.000Z
|
2022-01-29T16:13:07.000Z
|
import re
import pickle
import logging
import networkx
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2019, 1, 1),
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
# don't auto-schedule the dag
# https://airflow.readthedocs.io/en/stable/scheduler.html
dag = DAG('npmjs_static_22', default_args=default_args, schedule_interval=None)
# periodically run the dag
# dag = DAG('tutorial', default_args=default_args, schedule_interval=timedelta(days=1))
# load dep_tree for packages, relative to AIRFLOW_HOME
npmjs_dep_path = "./dags/npmjs.with_stats.dep_graph_22.pickle"
dep_tree = pickle.load(open(npmjs_dep_path, "rb"))
logging.info("loaded dep_tree with %d nodes", dep_tree.number_of_nodes())
def get_sanitized_pkgname(pkg_name):
invalid_name = re.compile(r'[^a-zA-Z0-9_.-]')
pkg_name = re.sub(invalid_name, '..', pkg_name)
return pkg_name
def get_bash_op(pkg_name, dag, configpath='/home/maloss/config/astgen_javascript_smt.config', cache_dir='/home/maloss/metadata', outdir='/home/maloss/result'):
return BashOperator(
task_id=get_sanitized_pkgname(pkg_name=pkg_name),
execution_timeout=timedelta(hours=2),
bash_command='cd /home/maloss/src/ && python main.py astfilter --ignore_dep_version -n %s -c %s -d %s -o %s -l javascript' % (pkg_name, configpath, cache_dir, outdir),
dag=dag)
# all analysis jobs
# get all leaves
# https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.simple_paths.all_simple_paths.html
# leaves = (v for v, d in dep_tree.out_degree() if d == 0)
pkg2op = {}
for pkg in dep_tree.nodes():
pkg = str(pkg)
dep_pkgs = list(dep_tree.successors(pkg))
logging.debug("%s has %d dep_pkgs", pkg, len(dep_pkgs))
if not get_sanitized_pkgname(pkg_name=pkg):
continue
if pkg not in pkg2op:
pkg2op[pkg] = get_bash_op(pkg_name=pkg, dag=dag)
else:
continue
pkg_task = pkg2op[pkg]
dep_tasks = set()
for dep_pkg in dep_pkgs:
dep_pkg = str(dep_pkg)
# avoid cycles
if dep_pkg == pkg or not get_sanitized_pkgname(pkg_name=dep_pkg):
continue
if dep_pkg not in pkg2op:
pkg2op[dep_pkg] = get_bash_op(pkg_name=dep_pkg, dag=dag)
dep_tasks.add(pkg2op[dep_pkg])
# default trigger rule is all_success
# use all_done instead
pkg_task << list(dep_tasks)
| 34.875
| 175
| 0.699283
|
794a8dd17a6ce4ecc2a0998beff72261f7572ba1
| 8,500
|
py
|
Python
|
typ.py
|
tomkren/TFGPy
|
24c46dac95fd4bc28a9e3cd1d988a6050266c069
|
[
"MIT"
] | 3
|
2017-04-28T11:04:49.000Z
|
2021-08-23T18:20:29.000Z
|
typ.py
|
tomkren/TFGPy
|
24c46dac95fd4bc28a9e3cd1d988a6050266c069
|
[
"MIT"
] | 7
|
2017-06-13T15:58:36.000Z
|
2017-07-03T10:07:04.000Z
|
typ.py
|
tomkren/TFGPy
|
24c46dac95fd4bc28a9e3cd1d988a6050266c069
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
from collections import namedtuple
from functools import reduce
import sub as sub_m
import utils
from utils import make_enum_table
FreshResult = namedtuple('FreshResult', ['typ', 'n'])
class Typ:
def apply_mini_sub(self, key, typ):
raise NotImplementedError
def __eq__(self, other):
if self is other:
return True
if type(other) != type(self):
return False
return self._eq_content(other)
def _eq_content(self, other):
raise NotImplementedError
def __hash__(self):
raise NotImplementedError
def gather_leaves(self, pred, make_new):
"""
:param make_new: constructor for a set-like structure,
which needs to have update method.
"""
raise NotImplementedError
def get_sub_keys(self):
# TODO skolem-ready
return self.get_vars()
def get_vars(self):
return self.gather_leaves(
lambda leaf: isinstance(leaf, TypVar),
lambda *args: set(args)
)
def get_next_var_id(self, acc):
raise NotImplementedError
def freshen_vars(self, n) -> FreshResult:
return FreshResult(*self._freshen_vars_acc(n, {}))
def _freshen_vars_acc(self, n, table):
raise NotImplementedError
def contains_var(self, var):
raise NotImplementedError
def apply_sub(self, sub):
raise NotImplementedError
def apply_sub_fun(self, sub_fun):
raise NotImplementedError
def skolemize(self):
acc = {}
skolemized = self._skolemize_acc(acc)
return skolemized, sub_m.Sub(acc)
def _skolemize_acc(self, acc):
raise NotImplementedError
class TypVar(Typ):
def __init__(self, name):
self.name = name
def apply_mini_sub(self, key, typ):
if self == key:
return typ
return self
def _eq_content(self, other):
return self.name == other.name
def contains_var(self, var):
return self == var
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return "TypVar(%s)" % (repr(self.name))
# TODO udělat pořádně jako eq je pořádně !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def __lt__(self, other):
return str(self.name) < str(other.name)
def gather_leaves(self, pred, make_new):
if pred(self):
return make_new(self)
return make_new()
def get_next_var_id(self, acc=0):
if isinstance(self.name, int):
return max(acc, self.name + 1)
return acc
def _freshen_vars_acc(self, n, table):
new_var = table.get(self, None)
if new_var is None:
new_var = TypVar(n)
table[self] = new_var
n += 1
return new_var, n
def apply_sub(self, sub):
if self in sub.table:
return sub.table[self]
return self
def apply_sub_fun(self, sub_fun):
return sub_fun(self)
def __str__(self):
return "$%s" % self.name
def _skolemize_acc(self, acc):
ret = TypSkolem(self.name)
acc[ret] = self
return ret
class TypSymbol(Typ):
def __init__(self, name):
self.name = name
def apply_mini_sub(self, *args):
return self
def _eq_content(self, other):
return self.name == other.name
def contains_var(self, var):
return False
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return "TypSymbol(%s)" % (repr(self.name))
def __str__(self):
return str(self.name)
def gather_leaves(self, pred, make_new):
if pred(self):
return make_new(self)
return make_new()
def get_next_var_id(self, acc=0):
return acc
def _freshen_vars_acc(self, n, table):
return self, n
def apply_sub(self, sub):
return self
def apply_sub_fun(self, sub_fun):
return self
def _skolemize_acc(self, acc):
return self
class TypSkolem(TypSymbol):
def __repr__(self):
return "TypSkolem(%s)" % (repr(self.name))
def __str__(self):
return "_%s" % self.name
def apply_sub(self, sub):
if self in sub.table:
return sub.table[self]
return self
def apply_sub_fun(self, sub_fun):
return sub_fun(self)
def get_next_var_id(self, acc=0):
if isinstance(self.name, int):
return max(acc, self.name + 1)
return acc
T_ARROW = TypSymbol('->')
T_INTERNAL_PAIR = TypSymbol('_P_')
INTERNAL_PAIR_CONSTRUCTOR_SYM = '_p_'
class TypTerm(Typ):
@staticmethod
def make_arrow(left, right):
return TypTerm((T_ARROW, left, right))
@staticmethod
def make_internal_pair(a, b):
return TypTerm((T_INTERNAL_PAIR, a, b))
@staticmethod
def make_internal_tuple(xs):
assert len(xs) > 0
return reduce(lambda x, y: TypTerm.make_internal_pair(y, x), xs[::-1])
@staticmethod
def is_internal_pair_typ(typ):
return isinstance(typ, TypTerm) and \
len(typ.arguments) == 3 and \
typ.arguments[0] == T_INTERNAL_PAIR
@staticmethod
def split_internal_pair_typ(typ):
return typ.arguments[1], typ.arguments[2]
def __init__(self, arguments):
assert isinstance(arguments, tuple)
assert arguments
self.arguments = arguments
if is_fun_type(self):
assert len(arguments) == 3
def apply_mini_sub(self, *args):
return TypTerm(tuple(a.apply_mini_sub(*args) for a in self.arguments))
def _eq_content(self, other):
return self.arguments == other.arguments
def contains_var(self, var):
return any(a.contains_var(var) for a in self.arguments)
def __hash__(self):
return hash(self.arguments)
def __repr__(self):
return "TypTerm(%s)" % (repr(self.arguments))
def __str__(self):
if is_fun_type(self):
op, l, r = self.arguments
return "(%s %s %s)" % (l, op, r)
return "(%s)" % " ".join(str(a) for a in self.arguments)
def gather_leaves(self, pred, make_new):
return utils.update_union((a.gather_leaves(pred, make_new) for a in self.arguments),
make_new())
def get_next_var_id(self, acc=0):
return max(a.get_next_var_id(acc) for a in self.arguments)
def _freshen_vars_acc(self, n, table):
new_arguments = []
for a in self.arguments:
new_term, n = a._freshen_vars_acc(n, table)
new_arguments.append(new_term)
return TypTerm(tuple(new_arguments)), n
def apply_sub(self, sub):
# TODO measure speedup
children = tuple(a.apply_sub(sub) for a in self.arguments)
for c, a in zip(children, self.arguments):
if id(c) != id(a):
return TypTerm(children)
return self
def apply_sub_fun(self, sub_fun):
# TODO measure speedup
children = tuple(a.apply_sub_fun(sub_fun) for a in self.arguments)
for c, a in zip(children, self.arguments):
if id(c) != id(a):
return TypTerm(children)
return self
def _skolemize_acc(self, acc):
# TODO if apply_sub is more efficient with id checks => apply it here as well
return TypTerm(tuple(a._skolemize_acc(acc) for a in self.arguments))
def is_fun_type(typ):
return isinstance(typ, TypTerm) and typ.arguments[0] == T_ARROW
def split_fun_type(typ: TypTerm):
assert is_fun_type(typ)
return typ.arguments[1], typ.arguments[2]
def fresh(t_fresh: Typ, t_avoid: Typ, n):
n1 = t_avoid.get_next_var_id(n)
n2 = t_fresh.get_next_var_id(n1)
return t_fresh.freshen_vars(n2)
def new_var(typ: Typ, n):
n1 = typ.get_next_var_id(n)
return TypVar(n1), n1 + 1
def make_norm_bijection(typ):
# TODO SKOLEM
ordered_vars = typ.gather_leaves(
lambda leaf: isinstance(leaf, TypVar),
lambda *args: OrderedDict((a, True) for a in args)
)
proto_table = make_enum_table(ordered_vars.keys(), TypVar)
table, rev_table = utils.construct_bijection(proto_table)
return sub_m.Sub(table), sub_m.Sub(rev_table)
T_INTERNAL_PAIR_CONSTRUCTOR = TypTerm.make_arrow(TypVar(0), TypTerm.make_arrow(TypVar(1), TypTerm.make_internal_pair(TypVar(0), TypVar(1))))
| 25.993884
| 140
| 0.613412
|
794a8f101e031a66942802d854ebdd49c3c43a41
| 1,525
|
py
|
Python
|
scrapy/tutorial/tutorial/spiders/quotes.py
|
amirongit/notecode
|
3ce453531eaf4674d9a15e59510fa8dea77704fd
|
[
"Unlicense"
] | null | null | null |
scrapy/tutorial/tutorial/spiders/quotes.py
|
amirongit/notecode
|
3ce453531eaf4674d9a15e59510fa8dea77704fd
|
[
"Unlicense"
] | null | null | null |
scrapy/tutorial/tutorial/spiders/quotes.py
|
amirongit/notecode
|
3ce453531eaf4674d9a15e59510fa8dea77704fd
|
[
"Unlicense"
] | null | null | null |
import scrapy
# scrapy uses spiders to scrape information from a websites
# they must define the initial requests to make
class QuotesSpider(scrapy.Spider):
# identifies the spider and must be unique within a project
name = 'quotes'
# start_requests method should return an iterable of Request objects which
# the spider will begin to crawl from
# scrapy will schedule these Request objects and for each of their
# Response objects the callback method will be called given the object
def start_requests_(self):
urls = [
'http://quotes.toscrape.com/page/1/',
'http://quotes.toscrape.com/page/2/',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
# start_requests method also has a default implementation which uses
# start_urls class attribute to generate the initial iterable of Request
# objects the class attribute could be a list of urls
# the default callback method for the generated Request objects will be
# parse
start_urls = [
'http://quotes.toscrape.com/page/1/',
'http://quotes.toscrape.com/page/2/',
]
# the method which is called to handle the response of each request made
# always passed a TextResponse object
def parse(self, response):
page = response.url.split('/')[-2]
filename = f'quotes-{page}.html'
with open(filename, 'wb') as f:
f.write(response.body)
self.log(f'Saved file {filename}.')
| 38.125
| 78
| 0.672131
|
794a8f5744a3e7c9fe35b61b5a4b90df09367658
| 2,784
|
py
|
Python
|
netbox_agent/vendors/supermicro.py
|
z0nker/netbox-agent
|
c306b1226e37e32015440a9974fc0d444145cbd2
|
[
"Apache-2.0"
] | null | null | null |
netbox_agent/vendors/supermicro.py
|
z0nker/netbox-agent
|
c306b1226e37e32015440a9974fc0d444145cbd2
|
[
"Apache-2.0"
] | null | null | null |
netbox_agent/vendors/supermicro.py
|
z0nker/netbox-agent
|
c306b1226e37e32015440a9974fc0d444145cbd2
|
[
"Apache-2.0"
] | null | null | null |
from netbox_agent.location import Slot
from netbox_agent.server import ServerBase
class SupermicroHost(ServerBase):
"""
Supermicro DMI can be messed up. They depend on the vendor
to set the correct values. The endusers cannot
change them without buying a license from Supermicro.
There are 3 serial numbers in the system
1) System - this is used for the chassis information.
2) Baseboard - this is used for the blade.
3) Chassis - this is ignored.
"""
def __init__(self, *args, **kwargs):
super(SupermicroHost, self).__init__(*args, **kwargs)
self.manufacturer = 'Supermicro'
def is_blade(self):
product_name = self.get_product_name()
# Blades
blade = product_name.startswith('SBI')
blade |= product_name.startswith('SBA')
# Twin
blade |= 'TR-' in product_name
# BigTwin
blade |= 'BT-' in product_name
# Microcloud
blade |= product_name.startswith('SYS-5039')
blade |= product_name.startswith('SYS-5038')
return blade
def get_blade_slot(self):
if self.is_blade():
# Some Supermicro servers don't report the slot in dmidecode
# let's use a regex
slot = Slot()
return slot.get()
# No supermicro on hands
return None
def get_service_tag(self):
return self.system[0]['Serial Number'].strip()
def get_product_name(self):
return self.system[0]['Product Name'].strip()
def get_chassis(self):
if self.is_blade():
return self.chassis[0]['Product Name'].strip()
return self.get_product_name()
def get_chassis_service_tag(self):
if self.is_blade():
return self.chassis[0]['Serial Number'].strip()
return self.get_service_tag()
def get_chassis_name(self):
if not self.is_blade():
return None
return 'Chassis {}'.format(self.get_chassis_service_tag())
def get_expansion_product(self):
"""
Get the extension slot that is on a pair slot number
next to the compute slot that is on an odd slot number
I only know on model of slot GPU extension card that.
"""
raise NotImplementedError
def is_expansion_slot(self, server):
"""
Return True if its an extension slot, based on the name
"""
raise NotImplementedError
def get_blade_expansion_slot(self):
"""
Expansion slot are always the compute bay number + 1
"""
raise NotImplementedError
def own_expansion_slot(self):
"""
Say if the device can host an extension card based
on the product name
"""
pass
| 29.935484
| 72
| 0.61602
|
794a8f7c8895dbd3d1b2399f9d26fab4726a7ed4
| 7,996
|
py
|
Python
|
Chapter 06/tic_tac_toe/tic_tac_toe_moves.py
|
makwakwa/Tensor_flow
|
47c240d7fb9842f36ef2a51514dfce62bb973b6a
|
[
"MIT"
] | null | null | null |
Chapter 06/tic_tac_toe/tic_tac_toe_moves.py
|
makwakwa/Tensor_flow
|
47c240d7fb9842f36ef2a51514dfce62bb973b6a
|
[
"MIT"
] | null | null | null |
Chapter 06/tic_tac_toe/tic_tac_toe_moves.py
|
makwakwa/Tensor_flow
|
47c240d7fb9842f36ef2a51514dfce62bb973b6a
|
[
"MIT"
] | 1
|
2020-01-27T17:54:09.000Z
|
2020-01-27T17:54:09.000Z
|
# Learning Optimal Tic-Tac-Toe Moves via a Neural Network
#---------------------------------------
#
# We will build a one-hidden layer neural network
# to predict tic-tac-toe optimal moves. This will
# be accomplished by loading a small list of board
# positions with the optimal play response in a csv
# then we apply two random board transformations.
#
# We then train the neural network on the board + response
#
import tensorflow as tf
import matplotlib.pyplot as plt
import csv
import random
import numpy as np
import random
from tensorflow.python.framework import ops
ops.reset_default_graph()
# X = 1
# O = -1
# empty = 0
# response on 1-9 grid for placement of next '1'
# For example, the 'test_board' is:
#
# O | - | -
# -----------------
# X | O | O
# -----------------
# - | - | X
#
# board above = [-1, 0, 0, 1, -1, -1, 0, 0, 1]
# Optimal response would be position 6, where
# the position numbers are:
#
# 0 | 1 | 2
# -----------------
# 3 | 4 | 5
# -----------------
# 6 | 7 | 8
batch_size = 50
symmetry = ['rotate180', 'rotate90', 'rotate270', 'flip_v', 'flip_h']
# Print a board
def print_board(board):
symbols = ['O',' ','X']
board_plus1 = [int(x) + 1 for x in board]
print(' ' + symbols[board_plus1[0]] + ' | ' + symbols[board_plus1[1]] + ' | ' + symbols[board_plus1[2]])
print('___________')
print(' ' + symbols[board_plus1[3]] + ' | ' + symbols[board_plus1[4]] + ' | ' + symbols[board_plus1[5]])
print('___________')
print(' ' + symbols[board_plus1[6]] + ' | ' + symbols[board_plus1[7]] + ' | ' + symbols[board_plus1[8]])
## Given a board, a response, and a transformation, get the new board+response
def get_symmetry(board, response, transformation):
'''
:param board: list of integers 9 long:
opposing mark = -1
friendly mark = 1
empty space = 0
:param transformation: one of five transformations on a board:
'rotate180', 'rotate90', 'rotate270', 'flip_v', 'flip_h'
:return: tuple: (new_board, new_response)
'''
if transformation == 'rotate180':
new_response = 8 - response
return(board[::-1], new_response)
elif transformation == 'rotate90':
new_response = [6, 3, 0, 7, 4, 1, 8, 5, 2].index(response)
tuple_board = list(zip(*[board[6:9], board[3:6], board[0:3]]))
return([value for item in tuple_board for value in item], new_response)
elif transformation == 'rotate270':
new_response = [2, 5, 8, 1, 4, 7, 0, 3, 6].index(response)
tuple_board = list(zip(*[board[0:3], board[3:6], board[6:9]]))[::-1]
return([value for item in tuple_board for value in item], new_response)
elif transformation == 'flip_v':
new_response = [6, 7, 8, 3, 4, 5, 0, 1, 2].index(response)
return(board[6:9] + board[3:6] + board[0:3], new_response)
elif transformation == 'flip_h': # flip_h = rotate180, then flip_v
new_response = [2, 1, 0, 5, 4, 3, 8, 7, 6].index(response)
new_board = board[::-1]
return(new_board[6:9] + new_board[3:6] + new_board[0:3], new_response)
else:
raise ValueError('Method not implmented.')
## Read in board move csv file
def get_moves_from_csv(csv_file):
'''
:param csv_file: csv file location containing the boards w/ responses
:return: moves: list of moves with index of best response
'''
moves = []
with open(csv_file, 'rt') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
moves.append(([int(x) for x in row[0:9]],int(row[9])))
return(moves)
## Get random board with optimal move
def get_rand_move(moves, rand_transforms=2):
'''
:param moves: list of the boards w/responses
:param rand_transforms: how many random transforms performed on each
:return: (board, response), board is a list of 9 integers, response is 1 int
'''
(board, response) = random.choice(moves)
possible_transforms = ['rotate90', 'rotate180', 'rotate270', 'flip_v', 'flip_h']
for i in range(rand_transforms):
random_transform = random.choice(possible_transforms)
(board, response) = get_symmetry(board, response, random_transform)
return(board, response)
# Initialize our graph session
sess = tf.Session()
# Get list of optimal moves w/ responses
moves = get_moves_from_csv('base_tic_tac_toe_moves.csv')
# Create a train set:
train_length = 500
train_set = []
for t in range(train_length):
train_set.append(get_rand_move(moves))
# To see if the network learns anything new, we will remove
# all instances of the board [-1, 0, 0, 1, -1, -1, 0, 0, 1],
# which the optimal response will be the index '6'. We will
# Test this at the end.
test_board = [-1, 0, 0, 1, -1, -1, 0, 0, 1]
train_set = [x for x in train_set if x[0] != test_board]
def init_weights(shape):
return(tf.Variable(tf.random_normal(shape)))
def model(X, A1, A2, bias1, bias2):
layer1 = tf.nn.sigmoid(tf.add(tf.matmul(X, A1), bias1))
layer2 = tf.add(tf.matmul(layer1, A2), bias2)
return(layer2) # note that we dont take the softmax at the end because our cost fn does that for us
X = tf.placeholder(dtype=tf.float32, shape=[None, 9])
Y = tf.placeholder(dtype=tf.int32, shape=[None])
A1 = init_weights([9, 81])
bias1 = init_weights([81])
A2 = init_weights([81, 9])
bias2 = init_weights([9])
model_output = model(X, A1, A2, bias1, bias2)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(model_output, Y))
train_step = tf.train.GradientDescentOptimizer(0.025).minimize(loss)
prediction = tf.argmax(model_output, 1)
init = tf.initialize_all_variables()
sess.run(init)
loss_vec = []
for i in range(10000):
rand_indices = np.random.choice(range(len(train_set)), batch_size, replace=False)
batch_data = [train_set[i] for i in rand_indices]
x_input = [x[0] for x in batch_data]
y_target = np.array([y[1] for y in batch_data])
sess.run(train_step, feed_dict={X: x_input, Y: y_target})
temp_loss = sess.run(loss, feed_dict={X: x_input, Y: y_target})
loss_vec.append(temp_loss)
if i%500==0:
print('iteration ' + str(i) + ' Loss: ' + str(temp_loss))
# Print loss
plt.plot(loss_vec, 'k-', label='Loss')
plt.title('Loss (MSE) per Generation')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.show()
# Make Prediction:
test_boards = [test_board]
feed_dict = {X: test_boards}
logits = sess.run(model_output, feed_dict=feed_dict)
predictions = sess.run(prediction, feed_dict=feed_dict)
print(predictions)
# Declare function to check for win
def check(board):
wins = [[0,1,2], [3,4,5], [6,7,8], [0,3,6], [1,4,7], [2,5,8], [0,4,8], [2,4,6]]
for i in range(len(wins)):
if board[wins[i][0]]==board[wins[i][1]]==board[wins[i][2]]==1.:
return(1)
elif board[wins[i][0]]==board[wins[i][1]]==board[wins[i][2]]==-1.:
return(1)
return(0)
# Let's play against our model
game_tracker = [0., 0., 0., 0., 0., 0., 0., 0., 0.]
win_logical = False
num_moves = 0
while not win_logical:
player_index = input('Input index of your move (0-8): ')
num_moves += 1
# Add player move to game
game_tracker[int(player_index)] = 1.
# Get model's move by first getting all the logits for each index
[potential_moves] = sess.run(model_output, feed_dict={X: [game_tracker]})
# Now find allowed moves (where game tracker values = 0.0)
allowed_moves = [ix for ix,x in enumerate(game_tracker) if x==0.0]
# Find best move by taking argmax of logits if they are in allowed moves
model_move = np.argmax([x if ix in allowed_moves else -999.0 for ix,x in enumerate(potential_moves)])
# Add model move to game
game_tracker[int(model_move)] = -1.
print('Model has moved')
print_board(game_tracker)
# Now check for win or too many moves
if check(game_tracker)==1 or num_moves>=5:
print('Game Over!')
win_logical = True
| 34.465517
| 108
| 0.639195
|
794a90202181ebcd32aefc5337eac52153b277e0
| 10,012
|
py
|
Python
|
conftest.py
|
nghia-tran/f5-common-python
|
acb23a6e5830a119b460c19a578654113419f5c3
|
[
"Apache-2.0"
] | 272
|
2016-02-23T06:05:44.000Z
|
2022-02-20T02:09:32.000Z
|
conftest.py
|
nghia-tran/f5-common-python
|
acb23a6e5830a119b460c19a578654113419f5c3
|
[
"Apache-2.0"
] | 1,103
|
2016-02-11T17:48:03.000Z
|
2022-02-15T17:13:37.000Z
|
conftest.py
|
nghia-tran/f5-common-python
|
acb23a6e5830a119b460c19a578654113419f5c3
|
[
"Apache-2.0"
] | 167
|
2016-02-11T17:48:21.000Z
|
2022-01-17T20:13:05.000Z
|
# Copyright 2015-2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5.sdk_exception import UnsupportedOperation
from f5.utils.testutils.registrytools import register_device
from icontrol.session import iControlRESTSession
import logging
import mock
import os
import pytest
import requests
from six import itervalues
from tempfile import NamedTemporaryFile
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
requests.packages.urllib3.disable_warnings()
@pytest.fixture
def fakeicontrolsession(monkeypatch):
class Response(object):
def json(self):
return {'selfLink': 'https://localhost/mgmt/tm/sys?ver=11.6.0'}
fakesessionclass = mock.create_autospec(iControlRESTSession, spec_set=True)
fakesessioninstance = mock.create_autospec(iControlRESTSession('A', 'B'),
spec_set=True)
fakesessioninstance.get =\
mock.MagicMock(return_value=Response())
fakesessionclass.return_value = fakesessioninstance
monkeypatch.setattr('f5.bigip.iControlRESTSession', fakesessionclass)
@pytest.fixture
def fakeicontrolsessionv12(monkeypatch):
class Response(object):
def json(self):
return {'selfLink': 'https://localhost/mgmt/tm/sys?ver=12.0.0'}
fakesessionclass = mock.create_autospec(iControlRESTSession, spec_set=True)
fakesessioninstance = mock.create_autospec(
iControlRESTSession('A', 'B'),
spec_set=True
)
fakesessioninstance.get = mock.MagicMock(return_value=Response())
fakesessionclass.return_value = fakesessioninstance
monkeypatch.setattr('f5.bigip.iControlRESTSession', fakesessionclass)
@pytest.fixture
def fakeicontrolsessionfactory(monkeypatch):
class Response(object):
def __init__(self, **json_keys):
if 'selfLink' not in json_keys:
json_keys['selfLink'] =\
'https://localhost/mgmt/tm/sys?ver=11.6.0'
self.params = json_keys
def json(self):
return self.params
def _session_factory(**json_keys):
fakesessionclass = mock.create_autospec(iControlRESTSession,
spec_set=True)
fakesessioninstance =\
mock.create_autospec(iControlRESTSession('A', 'B'), spec_set=True)
fakesessioninstance.get =\
mock.MagicMock(return_value=Response(**json_keys))
fakesessionclass.return_value = fakesessioninstance
monkeypatch.setattr('f5.bigip.iControlRESTSession', fakesessionclass)
return _session_factory
@pytest.fixture(scope='function')
def responsivesessionfactory(monkeypatch):
class Response(object):
def __init__(self, http_code, **json_keys):
if 'selfLink' not in json_keys:
json_keys['selfLink'] = 'https://localhost/mgmt/tm/sys?ver=12.1.0'
self.params = json_keys
self.status_code = http_code
def json(self):
return self.params
def _session_factory(http_code, **json_keys):
fakesessionclass = mock.create_autospec(
iControlRESTSession,
spec_set=True
)
fakesessioninstance = mock.create_autospec(iControlRESTSession('A', 'B'), spec_set=True)
fakesessioninstance.get = mock.MagicMock(return_value=Response(http_code, **json_keys))
fakesessioninstance.delete = mock.MagicMock(return_value=Response(http_code))
fakesessioninstance.patch = mock.MagicMock(return_value=Response(http_code, **json_keys))
fakesessioninstance.put = mock.MagicMock(return_value=Response(http_code, **json_keys))
fakesessioninstance.post = mock.MagicMock(return_value=Response(http_code, **json_keys))
fakesessionclass.return_value = fakesessioninstance
monkeypatch.setattr('f5.bigip.iControlRESTSession', fakesessionclass)
return _session_factory
@pytest.fixture
def fakeicontrolsession_v12(monkeypatch):
class Response(object):
def json(self):
return {'selfLink': 'https://localhost/mgmt/tm/sys?ver=12.1.0'}
fakesessionclass = mock.create_autospec(iControlRESTSession, spec_set=True)
fakesessioninstance =\
mock.create_autospec(iControlRESTSession('A', 'B'), spec_set=True)
fakesessioninstance.get = mock.MagicMock(return_value=Response())
fakesessionclass.return_value = fakesessioninstance
monkeypatch.setattr('f5.bigip.iControlRESTSession', fakesessionclass)
@pytest.fixture
def fakeiwficontrolsession(monkeypatch):
class Response(object):
def json(self):
return {
'version': '2.1.0',
'selfLink': 'https://localhost/shared/identified-devices/config/device-info' # NOQA
}
fakesessionclass = mock.create_autospec(iControlRESTSession, spec_set=True)
fakesessioninstance = mock.create_autospec(iControlRESTSession('A', 'B'),
spec_set=True)
fakesessioninstance.get = \
mock.MagicMock(return_value=Response())
fakesessionclass.return_value = fakesessioninstance
monkeypatch.setattr('f5.iworkflow.iControlRESTSession', fakesessionclass)
@pytest.fixture
def fakeiwficontrolsessionfactory(monkeypatch):
class Response(object):
def __init__(self, **json_keys):
if 'selfLink' not in json_keys:
json_keys['selfLink'] = \
'https://localhost/shared/identified-devices/config/device-info' # NOQA
self.params = json_keys
def json(self):
return self.params
def _session_factory(**json_keys):
fakesessionclass = mock.create_autospec(iControlRESTSession,
spec_set=True)
fakesessioninstance = \
mock.create_autospec(iControlRESTSession('A', 'B'), spec_set=True)
fakesessioninstance.get = \
mock.MagicMock(return_value=Response(**json_keys))
fakesessionclass.return_value = fakesessioninstance
monkeypatch.setattr(
'f5.iworkflow.iControlRESTSession', fakesessionclass
)
return _session_factory
@pytest.fixture
def NAT(bigip):
n = bigip.ltm.nats.nat
return n
@pytest.fixture
def USER(bigip):
n = bigip.auth.users.user
return n
def _delete_pools_members(mgmt_root, pool_records):
for pr in pool_records:
if mgmt_root.tm.ltm.pools.pool.exists(partition=pr.partition, name=pr.name):
pool_inst = mgmt_root.tm.ltm.pools.pool.load(partition=pr.partition, name=pr.name)
members_list = pool_inst.members_s.get_collection()
pool_inst.delete()
for mem_inst in members_list:
mem_inst.delete()
@pytest.fixture
def pool_factory():
def _setup_boilerplate(mgmt_root, request, pool_records):
_delete_pools_members(mgmt_root, pool_records)
pool_registry = {}
members_registry = {}
for pr in pool_records:
pool_registry[pr.name] =\
mgmt_root.tm.ltm.pools.pool.create(partition=pr.partition, name=pr.name)
if pr.memberconfigs != (tuple(),):
members_collection = pool_registry[pr.name].members_s
for memconf in pr.memberconfigs:
members_registry[memconf.memname] =\
members_collection.members\
.create(partition=memconf.mempartition,
name=memconf.memname)
def deleter():
for member_instance in list(itervalues(members_registry)):
member_instance.delete()
for pool_instance in list(itervalues(pool_registry)):
pool_instance.delete()
request.addfinalizer(deleter)
return pool_registry, members_registry
return _setup_boilerplate
@pytest.fixture(scope='module')
def setup_device_snapshot(request, mgmt_root):
'''Snapshot the device to manage objects created by tests.
Snapshot the device before a test runs and after, then remove objects
that persist after suite runs.
'''
before_snapshot = register_device(mgmt_root)
def teardown():
after_snapshot = register_device(mgmt_root)
diff = set(after_snapshot) - set(before_snapshot)
for item in diff:
try:
after_snapshot[item].delete()
except UnsupportedOperation:
pass
request.addfinalizer(teardown)
return before_snapshot
@pytest.fixture
def IFILE(mgmt_root):
ntf = NamedTemporaryFile(delete=False)
ntf_basename = os.path.basename(ntf.name)
ntf.write('this is a test file')
ntf.seek(0)
mgmt_root.shared.file_transfer.uploads.upload_file(ntf.name)
tpath_name = 'file:/var/config/rest/downloads/{0}'.format(ntf_basename)
i = mgmt_root.tm.sys.file.ifiles.ifile.create(name=ntf_basename,
sourcePath=tpath_name)
return i
@pytest.fixture
def DATAGROUP(mgmt_root):
ntf = NamedTemporaryFile(delete=False)
ntf_basename = os.path.basename(ntf.name)
ntf.write('"name1" := "value1",')
ntf.seek(0)
mgmt_root.shared.file_transfer.uploads.upload_file(ntf.name)
tpath_name = 'file:/var/config/rest/downloads/{0}'.format(ntf_basename)
dg = mgmt_root.tm.sys.file.data_groups.data_group.create(
name=ntf_basename, type='string', sourcePath=tpath_name)
return dg
| 36.407273
| 100
| 0.675989
|
794a902e941e4c6c1d96d781a512d2f94af8d91f
| 7,919
|
py
|
Python
|
homeassistant/components/smartthings/light.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 3
|
2021-11-22T22:37:43.000Z
|
2022-03-17T00:55:28.000Z
|
homeassistant/components/smartthings/light.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 25
|
2021-11-24T06:24:10.000Z
|
2022-03-31T06:23:06.000Z
|
homeassistant/components/smartthings/light.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 3
|
2022-01-02T18:49:54.000Z
|
2022-01-25T02:03:54.000Z
|
"""Support for lights through the SmartThings cloud API."""
from __future__ import annotations
import asyncio
from collections.abc import Sequence
from pysmartthings import Capability
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
import homeassistant.util.color as color_util
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add lights for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
async_add_entities(
[
SmartThingsLight(device)
for device in broker.devices.values()
if broker.any_assigned(device.device_id, "light")
],
True,
)
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None:
"""Return all capabilities supported if minimum required are present."""
supported = [
Capability.switch,
Capability.switch_level,
Capability.color_control,
Capability.color_temperature,
]
# Must be able to be turned on/off.
if Capability.switch not in capabilities:
return None
# Must have one of these
light_capabilities = [
Capability.color_control,
Capability.color_temperature,
Capability.switch_level,
]
if any(capability in capabilities for capability in light_capabilities):
return supported
return None
def convert_scale(value, value_scale, target_scale, round_digits=4):
"""Convert a value to a different scale."""
return round(value * target_scale / value_scale, round_digits)
class SmartThingsLight(SmartThingsEntity, LightEntity):
"""Define a SmartThings Light."""
def __init__(self, device):
"""Initialize a SmartThingsLight."""
super().__init__(device)
self._brightness = None
self._color_temp = None
self._hs_color = None
self._supported_features = self._determine_features()
def _determine_features(self):
"""Get features supported by the device."""
features = 0
# Brightness and transition
if Capability.switch_level in self._device.capabilities:
features |= SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION
# Color Temperature
if Capability.color_temperature in self._device.capabilities:
features |= SUPPORT_COLOR_TEMP
# Color
if Capability.color_control in self._device.capabilities:
features |= SUPPORT_COLOR
return features
async def async_turn_on(self, **kwargs) -> None:
"""Turn the light on."""
tasks = []
# Color temperature
if self._supported_features & SUPPORT_COLOR_TEMP and ATTR_COLOR_TEMP in kwargs:
tasks.append(self.async_set_color_temp(kwargs[ATTR_COLOR_TEMP]))
# Color
if self._supported_features & SUPPORT_COLOR and ATTR_HS_COLOR in kwargs:
tasks.append(self.async_set_color(kwargs[ATTR_HS_COLOR]))
if tasks:
# Set temp/color first
await asyncio.gather(*tasks)
# Switch/brightness/transition
if self._supported_features & SUPPORT_BRIGHTNESS and ATTR_BRIGHTNESS in kwargs:
await self.async_set_level(
kwargs[ATTR_BRIGHTNESS], kwargs.get(ATTR_TRANSITION, 0)
)
else:
await self._device.switch_on(set_status=True)
# State is set optimistically in the commands above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state(True)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the light off."""
# Switch/transition
if self._supported_features & SUPPORT_TRANSITION and ATTR_TRANSITION in kwargs:
await self.async_set_level(0, int(kwargs[ATTR_TRANSITION]))
else:
await self._device.switch_off(set_status=True)
# State is set optimistically in the commands above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state(True)
async def async_update(self):
"""Update entity attributes when the device status has changed."""
# Brightness and transition
if self._supported_features & SUPPORT_BRIGHTNESS:
self._brightness = int(
convert_scale(self._device.status.level, 100, 255, 0)
)
# Color Temperature
if self._supported_features & SUPPORT_COLOR_TEMP:
self._color_temp = color_util.color_temperature_kelvin_to_mired(
self._device.status.color_temperature
)
# Color
if self._supported_features & SUPPORT_COLOR:
self._hs_color = (
convert_scale(self._device.status.hue, 100, 360),
self._device.status.saturation,
)
async def async_set_color(self, hs_color):
"""Set the color of the device."""
hue = convert_scale(float(hs_color[0]), 360, 100)
hue = max(min(hue, 100.0), 0.0)
saturation = max(min(float(hs_color[1]), 100.0), 0.0)
await self._device.set_color(hue, saturation, set_status=True)
async def async_set_color_temp(self, value: float):
"""Set the color temperature of the device."""
kelvin = color_util.color_temperature_mired_to_kelvin(value)
kelvin = max(min(kelvin, 30000), 1)
await self._device.set_color_temperature(kelvin, set_status=True)
async def async_set_level(self, brightness: int, transition: int):
"""Set the brightness of the light over transition."""
level = int(convert_scale(brightness, 255, 100, 0))
# Due to rounding, set level to 1 (one) so we don't inadvertently
# turn off the light when a low brightness is set.
level = 1 if level == 0 and brightness > 0 else level
level = max(min(level, 100), 0)
duration = int(transition)
await self._device.set_level(level, duration, set_status=True)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def color_temp(self):
"""Return the CT color value in mireds."""
return self._color_temp
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
return self._hs_color
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._device.status.switch
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
# SmartThings does not expose this attribute, instead it's
# implemented within each device-type handler. This value is the
# lowest kelvin found supported across 20+ handlers.
return 500 # 2000K
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
# SmartThings does not expose this attribute, instead it's
# implemented within each device-type handler. This value is the
# highest kelvin found supported across 20+ handlers.
return 111 # 9000K
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
| 36.325688
| 87
| 0.667382
|
794a91bff956a6b30f890f72112824acceb80294
| 17,860
|
py
|
Python
|
test/functional/feature_bip68_sequence.py
|
CoinBitCore/temp
|
a2ebefc861aa979b441286a203d574197459b0d6
|
[
"MIT"
] | null | null | null |
test/functional/feature_bip68_sequence.py
|
CoinBitCore/temp
|
a2ebefc861aa979b441286a203d574197459b0d6
|
[
"MIT"
] | null | null | null |
test/functional/feature_bip68_sequence.py
|
CoinBitCore/temp
|
a2ebefc861aa979b441286a203d574197459b0d6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Coinbit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP68 implementation."""
from test_framework.test_framework import CoinbitTestFramework
from test_framework.util import *
from test_framework.blocktools import *
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
# RPC error for non-BIP68 final transactions
NOT_FINAL_ERROR = "64: non-BIP68-final"
class BIP68Test(CoinbitTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [[], ["-acceptnonstdtxn=0"]]
def run_test(self):
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
# Generate some coins
self.nodes[0].generate(110)
self.log.info("Running test disable flag")
self.test_disable_flag()
self.log.info("Running test sequence-lock-confirmed-inputs")
self.test_sequence_lock_confirmed_inputs()
self.log.info("Running test sequence-lock-unconfirmed-inputs")
self.test_sequence_lock_unconfirmed_inputs()
self.log.info("Running test BIP68 not consensus before versionbits activation")
self.test_bip68_not_consensus()
self.log.info("Activating BIP68 (and 112/113)")
self.activateCSV()
self.log.info("Verifying nVersion=2 transactions are standard.")
self.log.info("Note that nVersion=2 transactions are always standard (independent of BIP68 activation status).")
self.test_version2_relay()
self.log.info("Passed")
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(new_addr, 2) # send 2 CTB
utxos = self.nodes[0].listunspent(0, 0)
assert(len(utxos) > 0)
utxo = utxos[0]
tx1 = CTransaction()
value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, CScript([b'a']))]
tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))]
tx2.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2))
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in range(num_outputs):
outputs[addresses[i]] = random.randint(1, 20)*0.01
self.nodes[0].sendmany("", outputs)
self.nodes[0].generate(1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for i in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in range(num_inputs):
sequence_value = 0xfffffffe # this disables sequence locks
# 50% chance we enable sequence locks
if random.randint(0,1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1,10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68 spec.
orig_time = self.get_median_time_past(utxos[j]["confirmations"])
cur_time = self.get_median_time_past(0) # MTP of the tip
# can only timelock this input if it's not too old -- otherwise use height
can_time_lock = True
if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time lock
if random.randint(0,1) and can_time_lock:
# Find first time-lock value that fails, or latest one that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"]*COIN
# Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50
tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a'])))
rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"]
if (using_sequence_locks and not should_pass):
# This transaction should be rejected
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx)
else:
# This raw transaction should be accepted
self.nodes[0].sendrawtransaction(rawtx)
utxos = self.nodes[0].listunspent()
# Test that sequence locks on unconfirmed inputs must have nSequence
# height or time of 0 to be accepted.
# Then test that BIP68-invalid transactions are removed from the mempool
# after a reorg.
def test_sequence_lock_unconfirmed_inputs(self):
# Store height so we can easily reset the chain at the end of the test
cur_height = self.nodes[0].getblockcount()
# Create a mempool tx.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Anyone-can-spend mempool tx.
# Sequence lock of 0 should pass.
tx2 = CTransaction()
tx2.nVersion = 2
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2_raw)
# Create a spend of the 0th output of orig_tx with a sequence lock
# of 1, and test what happens when submitting.
# orig_tx.vout[0] must be an anyone-can-spend output
def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
sequence_value = 1
if not use_height_lock:
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx = CTransaction()
tx.nVersion = 2
tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))]
tx.rehash()
if (orig_tx.hash in node.getrawmempool()):
# sendrawtransaction should fail if the tx is in the mempool
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx))
else:
# sendrawtransaction should succeed if the tx is not in the mempool
node.sendrawtransaction(ToHex(tx))
return tx
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Now mine some blocks, but make sure tx2 doesn't get mined.
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee*COIN))
cur_time = int(time.time())
for i in range(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
assert(tx2.hash in self.nodes[0].getrawmempool())
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Mine tx2, and then try again
self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(self.relayfee*COIN))
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time+600)
self.nodes[0].generate(1)
assert(tx2.hash not in self.nodes[0].getrawmempool())
# Now that tx2 is not in the mempool, a sequence locked spend should
# succeed
tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
assert(tx3.hash in self.nodes[0].getrawmempool())
self.nodes[0].generate(1)
assert(tx3.hash not in self.nodes[0].getrawmempool())
# One more test, this time using height locks
tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx4.hash in self.nodes[0].getrawmempool())
# Now try combining confirmed and unconfirmed inputs
tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx5.hash not in self.nodes[0].getrawmempool())
utxos = self.nodes[0].listunspent()
tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"]
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5)
# Test mempool-BIP68 consistency after reorg
#
# State of the transactions in the last blocks:
# ... -> [ tx2 ] -> [ tx3 ]
# tip-1 tip
# And currently tx4 is in the mempool.
#
# If we invalidate the tip, tx3 should get added to the mempool, causing
# tx4 to be removed (fails sequence-lock).
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
assert(tx4.hash not in self.nodes[0].getrawmempool())
assert(tx3.hash in self.nodes[0].getrawmempool())
# Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16)
height = self.nodes[0].getblockcount()
for i in range(2):
block = create_block(tip, create_coinbase(height), cur_time)
block.nVersion = 3
block.rehash()
block.solve()
tip = block.sha256
height += 1
self.nodes[0].submitblock(ToHex(block))
cur_time += 1
mempool = self.nodes[0].getrawmempool()
assert(tx3.hash not in mempool)
assert(tx2.hash in mempool)
# Reset the chain and get rid of the mocktimed-blocks
self.nodes[0].setmocktime(0)
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1))
self.nodes[0].generate(10)
# Make sure that BIP68 isn't being used to validate blocks, prior to
# versionbits activation. If more blocks are mined prior to this test
# being run, then it's possible the test has activated the soft fork, and
# this test should be moved to run earlier, or deleted.
def test_bip68_not_consensus(self):
assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active')
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Make an anyone-can-spend transaction
tx2 = CTransaction()
tx2.nVersion = 1
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
# sign tx2
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Now make an invalid spend of tx2 according to BIP68
sequence_value = 100 # 100 block relative locktime
tx3 = CTransaction()
tx3.nVersion = 2
tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx3.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3))
# make a block that violates bip68; ensure that the tip updates
tip = int(self.nodes[0].getbestblockhash(), 16)
block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1))
block.nVersion = 3
block.vtx.extend([tx1, tx2, tx3])
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
add_witness_commitment(block)
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def activateCSV(self):
# activation should happen at block height 432 (3 periods)
# getblockchaininfo will show CSV as active at block 431 (144 * 3 -1) since it's returning whether CSV is active for the next block.
min_activation_height = 432
height = self.nodes[0].getblockcount()
assert_greater_than(min_activation_height - height, 2)
self.nodes[0].generate(min_activation_height - height - 2)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "locked_in")
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "active")
sync_blocks(self.nodes)
# Use self.nodes[1] to test that version 2 transactions are standard.
def test_version2_relay(self):
inputs = [ ]
outputs = { self.nodes[1].getnewaddress() : 1.0 }
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtxfund)
tx.nVersion = 2
tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"]
self.nodes[1].sendrawtransaction(tx_signed)
if __name__ == '__main__':
BIP68Test().main()
| 45.10101
| 140
| 0.633203
|
794a9253531c046b1d9703ca3b95ad7db35f3681
| 164
|
py
|
Python
|
4_MATRIX/i.py
|
clauvm/project_map_reduce
|
a7b4847cad195965fe939698b747bd495f8ea30b
|
[
"MIT"
] | null | null | null |
4_MATRIX/i.py
|
clauvm/project_map_reduce
|
a7b4847cad195965fe939698b747bd495f8ea30b
|
[
"MIT"
] | null | null | null |
4_MATRIX/i.py
|
clauvm/project_map_reduce
|
a7b4847cad195965fe939698b747bd495f8ea30b
|
[
"MIT"
] | null | null | null |
import numpy as np
A = np.random.rand(1000,50)
B = np.random.rand(50,2000)
np.savetxt('A.txt',A)
np.savetxt('B.txt',B)
C = np.dot(A,B)
np.savetxt('C.txt',C)
| 10.933333
| 27
| 0.628049
|
794a93283571e2d79e3ee4dcad10dfbd510cbc2a
| 1,276
|
py
|
Python
|
benchmarks/benchmarks/small_ramses.py
|
fzmolina/yt
|
59fbfc2f7e5eea7bf2517fa4bd53113372f01a7c
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-11-29T21:59:06.000Z
|
2021-11-29T21:59:06.000Z
|
benchmarks/benchmarks/small_ramses.py
|
bkhamesra/yt-EinsteinToolkit
|
576bf88b5cd706fd577c513c23b1db07ec5f4cd2
|
[
"BSD-3-Clause-Clear"
] | 1
|
2016-04-05T22:30:14.000Z
|
2016-04-05T22:30:14.000Z
|
benchmarks/benchmarks/small_ramses.py
|
bkhamesra/yt-EinsteinToolkit
|
576bf88b5cd706fd577c513c23b1db07ec5f4cd2
|
[
"BSD-3-Clause-Clear"
] | 1
|
2020-12-05T05:51:09.000Z
|
2020-12-05T05:51:09.000Z
|
import numpy as np
import yt
class SmallRAMSESSuite:
dsname = "output_00080/info_00080.txt"
def setup(self):
self.ds = yt.load(self.dsname)
def time_all_particles(self):
dd = self.ds.all_data()
dd["all", "particle_velocity_x"]
dd["all", "particle_velocity_y"]
dd["all", "particle_velocity_z"]
def time_all_particles_derived(self):
dd = self.ds.all_data()
dd["all", "particle_velocity_magnitude"]
def time_gas_read(self):
dd = self.ds.all_data()
dd["gas", "density"]
def time_gas_derived(self):
dd = self.ds.all_data()
dd["gas", "velocity_magnitude"]
def time_project_unweight(self):
proj = self.ds.proj("density", 0)
def time_project_weight(self):
proj = self.ds.proj("density", 0, "density")
def time_particle_quantities(self):
dd = self.ds.all_data()
dd.quantities.extrema("particle_mass")
dd.quantities.extrema("particle_velocity_magnitude")
dd.quantities.extrema(["particle_velocity_%s" % ax for ax in 'xyz'])
def time_gas_quantites(self):
dd = self.ds.all_data()
dd.quantities.extrema("density")
dd.quantities.extrema(["velocity_x", "velocity_y", "velocity_z"])
| 29.674419
| 76
| 0.634013
|
794a9362073ce7ba7ad0d79a22848e39234f52ca
| 757
|
py
|
Python
|
python/projeto06/comentarios/models.py
|
WilliamDeveloper/udemy_cursos
|
f592bafbe3d2a5d631458f8c42151c880aadef17
|
[
"MIT"
] | null | null | null |
python/projeto06/comentarios/models.py
|
WilliamDeveloper/udemy_cursos
|
f592bafbe3d2a5d631458f8c42151c880aadef17
|
[
"MIT"
] | null | null | null |
python/projeto06/comentarios/models.py
|
WilliamDeveloper/udemy_cursos
|
f592bafbe3d2a5d631458f8c42151c880aadef17
|
[
"MIT"
] | null | null | null |
from django.db import models
from posts.models import Post
from django.contrib.auth.models import User
from django.utils import timezone
# Create your models here.
class Comentario(models.Model):
nome_comentario = models.CharField(max_length=150, verbose_name='Nome')
email_comentario = models.EmailField(verbose_name='Email')
comentario = models.TextField(verbose_name='Comentário')
post_comentario = models.ForeignKey(Post, on_delete=models.CASCADE)
usuario_comentario = models.ForeignKey(User, on_delete=models.DO_NOTHING, blank=True, null=True)
data_comentario = models.DateTimeField(default=timezone.now)
publicado_comentario = models.BooleanField(default=False)
def __str__(self):
return self.nome_comentario
| 42.055556
| 100
| 0.784676
|
794a9401375db1c1203aad0a2f707bc2092f4953
| 4,283
|
py
|
Python
|
samples/nx-config-nd.py
|
jonnyrocks/nxtoolkit
|
f03872793a31613c3b7498a8d65cfac3080aeee0
|
[
"Apache-2.0"
] | 44
|
2015-09-01T19:26:21.000Z
|
2022-03-24T18:06:44.000Z
|
samples/nx-config-nd.py
|
jonnyrocks/nxtoolkit
|
f03872793a31613c3b7498a8d65cfac3080aeee0
|
[
"Apache-2.0"
] | 10
|
2015-09-18T13:40:41.000Z
|
2018-08-13T19:39:24.000Z
|
samples/nx-config-nd.py
|
jonnyrocks/nxtoolkit
|
f03872793a31613c3b7498a8d65cfac3080aeee0
|
[
"Apache-2.0"
] | 37
|
2015-09-01T19:28:22.000Z
|
2020-07-08T19:57:40.000Z
|
#!/usr/bin/env python
################################################################################
# #
# Copyright (c) 2015 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
"""
Simple application that logs on to the Switch and Configure Neighbor Discovery
"""
import sys
import nxtoolkit.nxtoolkit as NX
def main():
"""
Main execution routine
:return: None
"""
# Take login credentials from the command line if provided
# Otherwise, take them from your environment variables file ~/.profile
description = '''Simple application that logs on to the
Switch and Configure Neighbor Discovery.'''
creds = NX.Credentials('switch', description)
args = creds.get()
''' Login to Switch '''
session = NX.Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
print('%% Could not login to Switch')
sys.exit(0)
nd = NX.ND() # Create ND instance
nd_int = NX.NdInterface('vlan123')
nd_int.disable_redirect()
nd_int.set_ra_interval('600')
nd_int.set_prefix('2000::/12', '100', '99')
nd.add(nd_int)
print nd.get_json()
''' Push ND configuration to the switch '''
resp = session.push_to_switch(nd.get_url(), nd.get_json())
if not resp.ok:
print resp.text
print ('Could not push to Switch')
exit(0)
# Uncomment below lines to delete nd configuration of specific interface
'''
nd_int = NX.NdInterface('vlan123')
resp = session.delete(nd_int.get_url())
if not resp.ok:
print('%% Could not delete from Switch')
sys.exit(0)
'''
template = "{0:20} {1:20} {2:20}"
print template.format("Interface/Vlan", "Ra Interval",
"Redirection State")
print template.format("===============", "===============",
"===============")
nd_data = NX.ND.get(session)
for int in nd_data.interfaces:
print template.format(int.id, int.ra_interval, int.redirect_st)
for prefix in int.prefixes:
print ("Prefix Address:%s\tlifetime:%s\tpreferred lifetime:%s"
% (prefix.address, prefix.lifetime, prefix.pref_lifetime))
print ("\n")
# Uncomment below lines to get specific interface details
'''
int_data = NX.NdInterface.get(session, 'vlan123')
print template.format("Interface/Vlan", "Ra Interval",
"Redirection State")
print template.format("===============", "===============",
"===============")
print template.format(int_data.id, int_data.ra_interval,
int_data.redirect_st)
for prefix in int_data.prefixes:
print ("Prefix Address:%s\tlifetime:%s\tpreferred lifetime:%s"
% (prefix.address, prefix.lifetime, prefix.pref_lifetime))
'''
if __name__ == '__main__':
main()
| 41.582524
| 80
| 0.487742
|
794a942639f14f9a72001793afbef61090fcf3a0
| 3,938
|
py
|
Python
|
electrum_dash/scripts/bruteforce_pw.py
|
PanderMusubi/electrum-dash
|
02cd655c57adba276bc17084bd4d1a105fb10bfe
|
[
"MIT"
] | 51
|
2018-01-09T01:44:49.000Z
|
2022-02-22T13:28:43.000Z
|
electrum_dash/scripts/bruteforce_pw.py
|
PanderMusubi/electrum-dash
|
02cd655c57adba276bc17084bd4d1a105fb10bfe
|
[
"MIT"
] | 152
|
2017-07-11T16:24:56.000Z
|
2022-03-21T13:29:39.000Z
|
electrum_dash/scripts/bruteforce_pw.py
|
PanderMusubi/electrum-dash
|
02cd655c57adba276bc17084bd4d1a105fb10bfe
|
[
"MIT"
] | 67
|
2018-01-11T23:54:50.000Z
|
2022-03-29T21:59:23.000Z
|
#!/usr/bin/env python3
#
# This script is just a demonstration how one could go about bruteforcing an
# Electrum wallet file password. As it is pure-python and runs in the CPU,
# it is horribly slow. It could be changed to utilise multiple threads
# but any serious attempt would need at least GPU acceleration.
#
# There are two main types of password encryption that need to be disambiguated
# for Electrum wallets:
# (1) keystore-encryption: The wallet file itself is mostly plaintext (json),
# only the Bitcoin private keys themselves are encrypted.
# (e.g. seed words, xprv are encrypted; addresses are not)
# Even in memory (at runtime), the private keys are typically
# stored encrypted, and only when needed the user is prompted
# for their password to decrypt the keys briefly.
# (2) storage-encryption: The file itself is encrypted. When opened in a text editor,
# it is base64 ascii text. Normally storage-encrypted wallets
# also have keystore-encryption (unless they don't have private keys).
# Storage-encryption was introduced in Electrum 2.8, keystore-encryption predates that.
# Newly created wallets in modern Electrum have storage-encryption enabled by default.
#
# Storage encryption uses a stronger KDF than keystore-encryption.
# As is, this script can test around ~1000 passwords per second for storage-encryption.
import sys
from string import digits, ascii_uppercase, ascii_lowercase
from itertools import product
from typing import Callable
from functools import partial
from electrum_dash.wallet import Wallet, Abstract_Wallet
from electrum_dash.storage import WalletStorage
from electrum_dash.wallet_db import WalletDB
from electrum_dash.simple_config import SimpleConfig
from electrum_dash.util import InvalidPassword
ALLOWED_CHARS = digits + ascii_uppercase + ascii_lowercase
MAX_PASSWORD_LEN = 12
def test_password_for_storage_encryption(storage: WalletStorage, password: str) -> bool:
try:
storage.decrypt(password)
except InvalidPassword:
return False
else:
return True
def test_password_for_keystore_encryption(wallet: Abstract_Wallet, password: str) -> bool:
try:
wallet.check_password(password)
except InvalidPassword:
return False
else:
return True
def bruteforce_loop(test_password: Callable[[str], bool]) -> str:
num_tested = 0
for pw_len in range(1, MAX_PASSWORD_LEN + 1):
for pw_tuple in product(ALLOWED_CHARS, repeat=pw_len):
password = "".join(pw_tuple)
if test_password(password):
return password
num_tested += 1
if num_tested % 5000 == 0:
print(f"> tested {num_tested} passwords so far... most recently tried: {password!r}")
if __name__ == '__main__':
if len(sys.argv) < 2:
print("ERROR. usage: bruteforce_pw.py <path_to_wallet_file>")
sys.exit(1)
path = sys.argv[1]
config = SimpleConfig()
storage = WalletStorage(path)
if not storage.file_exists():
print(f"ERROR. wallet file not found at path: {path}")
sys.exit(1)
if storage.is_encrypted():
test_password = partial(test_password_for_storage_encryption, storage)
print(f"wallet found: with storage encryption.")
else:
db = WalletDB(storage.read(), manual_upgrades=True)
wallet = Wallet(db, storage, config=config)
if not wallet.has_password():
print("wallet found but it is not encrypted.")
sys.exit(0)
test_password = partial(test_password_for_keystore_encryption, wallet)
print(f"wallet found: with keystore encryption.")
password = bruteforce_loop(test_password)
print(f"====================")
print(f"password found: {password}")
| 40.597938
| 101
| 0.686897
|
794a9554850163441270b62bfe591a2c43987fd4
| 671
|
py
|
Python
|
1 File Python Problems/FizzBuzz/FizzBuzz.py
|
TheRealDougie1/SmallCodeProjects
|
0b04a6b96b8700dad808a54b78e435274627f173
|
[
"MIT"
] | null | null | null |
1 File Python Problems/FizzBuzz/FizzBuzz.py
|
TheRealDougie1/SmallCodeProjects
|
0b04a6b96b8700dad808a54b78e435274627f173
|
[
"MIT"
] | 4
|
2020-12-07T18:13:06.000Z
|
2020-12-08T18:41:25.000Z
|
1 File Python Problems/FizzBuzz/FizzBuzz.py
|
TheRealDougie1/SmallCodeProjects
|
0b04a6b96b8700dad808a54b78e435274627f173
|
[
"MIT"
] | null | null | null |
"""
Basic Problem Summary:
For 100 numbers:
If the number is divisible by 3, print Fizz.
If the number is divisible by 5, print Buzz
If the number is divisible by both 3 and 5, print FizzBuzz
Otherwise, just print the number :)
Example Output: 1,2,Fizz,4,Buzz,Fizz,7,8,Fizz,Buzz
"""
def FizzBuzz():
for number in range(1,101):
if number % 3 == 0 and number % 5 == 0:
print("FizzBuzz")
continue
if number % 3 == 0:
print("Fizz")
continue
if number % 5 == 0:
print("Buzz")
continue
print(number)
if __name__ == '__main__':
FizzBuzz()
| 25.807692
| 62
| 0.558867
|
794a95ace00732377d5566144faf41ebb7a303a0
| 19,976
|
py
|
Python
|
configs/common/Options.py
|
AMHM/gem5-PhD
|
c1af9c50dcb3569f2d9bba9364de560260832d09
|
[
"BSD-3-Clause"
] | 1
|
2020-09-08T13:35:28.000Z
|
2020-09-08T13:35:28.000Z
|
configs/common/Options.py
|
AMHM/gem5-PhD
|
c1af9c50dcb3569f2d9bba9364de560260832d09
|
[
"BSD-3-Clause"
] | null | null | null |
configs/common/Options.py
|
AMHM/gem5-PhD
|
c1af9c50dcb3569f2d9bba9364de560260832d09
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
import m5
from m5.defines import buildEnv
from m5.objects import *
from common.Benchmarks import *
from common import CpuConfig
from common import MemConfig
from common import PlatformConfig
def _listCpuTypes(option, opt, value, parser):
CpuConfig.print_cpu_list()
sys.exit(0)
def _listMemTypes(option, opt, value, parser):
MemConfig.print_mem_list()
sys.exit(0)
def _listPlatformTypes(option, opt, value, parser):
PlatformConfig.print_platform_list()
sys.exit(0)
# Add the very basic options that work also in the case of the no ISA
# being used, and consequently no CPUs, but rather various types of
# testers and traffic generators.
def addNoISAOptions(parser):
parser.add_option("-n", "--num-cpus", type="int", default=1)
parser.add_option("--sys-voltage", action="store", type="string",
default='1.0V',
help = """Top-level voltage for blocks running at system
power supply""")
parser.add_option("--sys-clock", action="store", type="string",
default='1GHz',
help = """Top-level clock for blocks running at system
speed""")
# Memory Options
parser.add_option("--list-mem-types",
action="callback", callback=_listMemTypes,
help="List available memory types")
parser.add_option("--mem-type", type="choice", default="DDR3_1600_x64",
choices=MemConfig.mem_names(),
help = "type of memory to use")
parser.add_option("--mem-channels", type="int", default=1,
help = "number of memory channels")
parser.add_option("--mem-ranks", type="int", default=None,
help = "number of memory ranks per channel")
parser.add_option("--mem-size", action="store", type="string",
default="512MB",
help="Specify the physical memory size (single memory)")
parser.add_option("--memchecker", action="store_true")
# Cache Options
parser.add_option("--external-memory-system", type="string",
help="use external ports of this port_type for caches")
parser.add_option("--tlm-memory", type="string",
help="use external port for SystemC TLM cosimulation")
parser.add_option("--caches", action="store_true")
parser.add_option("--l2cache", action="store_true")
parser.add_option("--num-dirs", type="int", default=1)
parser.add_option("--num-l2caches", type="int", default=1)
parser.add_option("--num-l3caches", type="int", default=1)
parser.add_option("--l1d_size", type="string", default="64kB")
parser.add_option("--l1i_size", type="string", default="32kB")
parser.add_option("--l2_size", type="string", default="2MB")
parser.add_option("--l3_size", type="string", default="16MB")
parser.add_option("--l1d_assoc", type="int", default=2)
parser.add_option("--l1i_assoc", type="int", default=2)
parser.add_option("--l2_assoc", type="int", default=8)
parser.add_option("--l3_assoc", type="int", default=16)
parser.add_option("--cacheline_size", type="int", default=64)
parser.add_option("--cache_set_data", type="string", default="")
parser.add_option("--sttmram", type="string", default="")
# Enable Ruby
parser.add_option("--ruby", action="store_true")
# Run duration options
parser.add_option("-m", "--abs-max-tick", type="int", default=m5.MaxTick,
metavar="TICKS", help="Run to absolute simulated tick "
"specified including ticks from a restored checkpoint")
parser.add_option("--rel-max-tick", type="int", default=None,
metavar="TICKS", help="Simulate for specified number of"
" ticks relative to the simulation start tick (e.g. if "
"restoring a checkpoint)")
parser.add_option("--maxtime", type="float", default=None,
help="Run to the specified absolute simulated time in "
"seconds")
# Add common options that assume a non-NULL ISA.
def addCommonOptions(parser):
# start by adding the base options that do not assume an ISA
addNoISAOptions(parser)
# system options
parser.add_option("--list-cpu-types",
action="callback", callback=_listCpuTypes,
help="List available CPU types")
parser.add_option("--cpu-type", type="choice", default="atomic",
choices=CpuConfig.cpu_names(),
help = "type of cpu to run with")
parser.add_option("--checker", action="store_true");
parser.add_option("--cpu-clock", action="store", type="string",
default='2GHz',
help="Clock for blocks running at CPU speed")
parser.add_option("--smt", action="store_true", default=False,
help = """
Only used if multiple programs are specified. If true,
then the number of threads per cpu is same as the
number of programs.""")
parser.add_option("--elastic-trace-en", action="store_true",
help="""Enable capture of data dependency and instruction
fetch traces using elastic trace probe.""")
# Trace file paths input to trace probe in a capture simulation and input
# to Trace CPU in a replay simulation
parser.add_option("--inst-trace-file", action="store", type="string",
help="""Instruction fetch trace file input to
Elastic Trace probe in a capture simulation and
Trace CPU in a replay simulation""", default="")
parser.add_option("--data-trace-file", action="store", type="string",
help="""Data dependency trace file input to
Elastic Trace probe in a capture simulation and
Trace CPU in a replay simulation""", default="")
parser.add_option("-l", "--lpae", action="store_true")
parser.add_option("-V", "--virtualisation", action="store_true")
parser.add_option("--fastmem", action="store_true")
# dist-gem5 options
parser.add_option("--dist", action="store_true",
help="Parallel distributed gem5 simulation.")
parser.add_option("--dist-sync-on-pseudo-op", action="store_true",
help="Use a pseudo-op to start dist-gem5 synchronization.")
parser.add_option("--is-switch", action="store_true",
help="Select the network switch simulator process for a"\
"distributed gem5 run")
parser.add_option("--dist-rank", default=0, action="store", type="int",
help="Rank of this system within the dist gem5 run.")
parser.add_option("--dist-size", default=0, action="store", type="int",
help="Number of gem5 processes within the dist gem5 run.")
parser.add_option("--dist-server-name",
default="127.0.0.1",
action="store", type="string",
help="Name of the message server host\nDEFAULT: localhost")
parser.add_option("--dist-server-port",
default=2200,
action="store", type="int",
help="Message server listen port\nDEFAULT: 2200")
parser.add_option("--dist-sync-repeat",
default="0us",
action="store", type="string",
help="Repeat interval for synchronisation barriers among dist-gem5 processes\nDEFAULT: --ethernet-linkdelay")
parser.add_option("--dist-sync-start",
default="5200000000000t",
action="store", type="string",
help="Time to schedule the first dist synchronisation barrier\nDEFAULT:5200000000000t")
parser.add_option("--ethernet-linkspeed", default="10Gbps",
action="store", type="string",
help="Link speed in bps\nDEFAULT: 10Gbps")
parser.add_option("--ethernet-linkdelay", default="10us",
action="store", type="string",
help="Link delay in seconds\nDEFAULT: 10us")
# Run duration options
parser.add_option("-I", "--maxinsts", action="store", type="int",
default=None, help="""Total number of instructions to
simulate (default: run forever)""")
parser.add_option("--work-item-id", action="store", type="int",
help="the specific work id for exit & checkpointing")
parser.add_option("--num-work-ids", action="store", type="int",
help="Number of distinct work item types")
parser.add_option("--work-begin-cpu-id-exit", action="store", type="int",
help="exit when work starts on the specified cpu")
parser.add_option("--work-end-exit-count", action="store", type="int",
help="exit at specified work end count")
parser.add_option("--work-begin-exit-count", action="store", type="int",
help="exit at specified work begin count")
parser.add_option("--init-param", action="store", type="int", default=0,
help="""Parameter available in simulation with m5
initparam""")
parser.add_option("--initialize-only", action="store_true", default=False,
help="""Exit after initialization. Do not simulate time.
Useful when gem5 is run as a library.""")
# Simpoint options
parser.add_option("--simpoint-profile", action="store_true",
help="Enable basic block profiling for SimPoints")
parser.add_option("--simpoint-interval", type="int", default=10000000,
help="SimPoint interval in num of instructions")
parser.add_option("--take-simpoint-checkpoints", action="store", type="string",
help="<simpoint file,weight file,interval-length,warmup-length>")
parser.add_option("--restore-simpoint-checkpoint", action="store_true",
help="restore from a simpoint checkpoint taken with " +
"--take-simpoint-checkpoints")
# Checkpointing options
###Note that performing checkpointing via python script files will override
###checkpoint instructions built into binaries.
parser.add_option("--take-checkpoints", action="store", type="string",
help="<M,N> take checkpoints at tick M and every N ticks thereafter")
parser.add_option("--max-checkpoints", action="store", type="int",
help="the maximum number of checkpoints to drop", default=5)
parser.add_option("--checkpoint-dir", action="store", type="string",
help="Place all checkpoints in this absolute directory")
parser.add_option("-r", "--checkpoint-restore", action="store", type="int",
help="restore from checkpoint <N>")
parser.add_option("--checkpoint-at-end", action="store_true",
help="take a checkpoint at end of run")
parser.add_option("--work-begin-checkpoint-count", action="store", type="int",
help="checkpoint at specified work begin count")
parser.add_option("--work-end-checkpoint-count", action="store", type="int",
help="checkpoint at specified work end count")
parser.add_option("--work-cpus-checkpoint-count", action="store", type="int",
help="checkpoint and exit when active cpu count is reached")
parser.add_option("--restore-with-cpu", action="store", type="choice",
default="atomic", choices=CpuConfig.cpu_names(),
help = "cpu type for restoring from a checkpoint")
# CPU Switching - default switch model goes from a checkpoint
# to a timing simple CPU with caches to warm up, then to detailed CPU for
# data measurement
parser.add_option("--repeat-switch", action="store", type="int",
default=None,
help="switch back and forth between CPUs with period <N>")
parser.add_option("-s", "--standard-switch", action="store", type="int",
default=None,
help="switch from timing to Detailed CPU after warmup period of <N>")
parser.add_option("-p", "--prog-interval", type="str",
help="CPU Progress Interval")
# Fastforwarding and simpoint related materials
parser.add_option("-W", "--warmup-insts", action="store", type="int",
default=None,
help="Warmup period in total instructions (requires --standard-switch)")
parser.add_option("--bench", action="store", type="string", default=None,
help="base names for --take-checkpoint and --checkpoint-restore")
parser.add_option("-F", "--fast-forward", action="store", type="string",
default=None,
help="Number of instructions to fast forward before switching")
parser.add_option("-S", "--simpoint", action="store_true", default=False,
help="""Use workload simpoints as an instruction offset for
--checkpoint-restore or --take-checkpoint.""")
parser.add_option("--at-instruction", action="store_true", default=False,
help="""Treat value of --checkpoint-restore or --take-checkpoint as a
number of instructions.""")
parser.add_option("--spec-input", default="ref", type="choice",
choices=["ref", "test", "train", "smred", "mdred",
"lgred"],
help="Input set size for SPEC CPU2000 benchmarks.")
parser.add_option("--arm-iset", default="arm", type="choice",
choices=["arm", "thumb", "aarch64"],
help="ARM instruction set.")
def addSEOptions(parser):
# Benchmark options
parser.add_option("-c", "--cmd", default="",
help="The binary to run in syscall emulation mode.")
parser.add_option("-o", "--options", default="",
help="""The options to pass to the binary, use " "
around the entire string""")
parser.add_option("-e", "--env", default="",
help="Initialize workload environment from text file.")
parser.add_option("-i", "--input", default="",
help="Read stdin from a file.")
parser.add_option("--output", default="",
help="Redirect stdout to a file.")
parser.add_option("--errout", default="",
help="Redirect stderr to a file.")
def addFSOptions(parser):
from FSConfig import os_types
# Simulation options
parser.add_option("--timesync", action="store_true",
help="Prevent simulated time from getting ahead of real time")
# System options
parser.add_option("--kernel", action="store", type="string")
parser.add_option("--os-type", action="store", type="choice",
choices=os_types[buildEnv['TARGET_ISA']], default="linux",
help="Specifies type of OS to boot")
parser.add_option("--script", action="store", type="string")
parser.add_option("--frame-capture", action="store_true",
help="Stores changed frame buffers from the VNC server to compressed "\
"files in the gem5 output directory")
if buildEnv['TARGET_ISA'] == "arm":
parser.add_option("--bare-metal", action="store_true",
help="Provide the raw system without the linux specific bits")
parser.add_option("--list-machine-types",
action="callback", callback=_listPlatformTypes,
help="List available platform types")
parser.add_option("--machine-type", action="store", type="choice",
choices=PlatformConfig.platform_names(),
default="VExpress_EMM")
parser.add_option("--dtb-filename", action="store", type="string",
help="Specifies device tree blob file to use with device-tree-"\
"enabled kernels")
parser.add_option("--enable-context-switch-stats-dump", \
action="store_true", help="Enable stats dump at context "\
"switches and dump tasks file (required for Streamline)")
# Benchmark options
parser.add_option("--dual", action="store_true",
help="Simulate two systems attached with an ethernet link")
parser.add_option("-b", "--benchmark", action="store", type="string",
dest="benchmark",
help="Specify the benchmark to run. Available benchmarks: %s"\
% DefinedBenchmarks)
# Metafile options
parser.add_option("--etherdump", action="store", type="string", dest="etherdump",
help="Specify the filename to dump a pcap capture of the" \
"ethernet traffic")
# Disk Image Options
parser.add_option("--disk-image", action="store", type="string", default=None,
help="Path to the disk image to use.")
parser.add_option("--root-device", action="store", type="string", default=None,
help="OS device name for root partition")
# Command line options
parser.add_option("--command-line", action="store", type="string",
default=None,
help="Template for the kernel command line.")
parser.add_option("--command-line-file", action="store",
default=None, type="string",
help="File with a template for the kernel command line")
| 53.55496
| 131
| 0.621846
|
794a960100de33c9c7bb87c0c9b32b283a91d6a4
| 5,753
|
py
|
Python
|
utils/helpers.py
|
dgriffiths3/mononet3d
|
4505234025efaee727d4bb4bb3f013fee8dc65b4
|
[
"MIT"
] | 9
|
2020-08-24T17:48:33.000Z
|
2021-01-29T17:26:51.000Z
|
utils/helpers.py
|
dgriffiths3/mononet3d
|
4505234025efaee727d4bb4bb3f013fee8dc65b4
|
[
"MIT"
] | 1
|
2021-01-15T10:51:06.000Z
|
2021-01-21T10:39:15.000Z
|
utils/helpers.py
|
dgriffiths3/mononet3d
|
4505234025efaee727d4bb4bb3f013fee8dc65b4
|
[
"MIT"
] | 1
|
2021-02-18T14:06:16.000Z
|
2021-02-18T14:06:16.000Z
|
import os
import numpy as np
import progressbar
import pyvista as pv
import toml
import tensorflow as tf
import colorsys
class EvalProgBar():
def __init__(self):
self.widgets = [progressbar.FormatLabel('')]
self.bar = progressbar.ProgressBar(widgets=self.widgets)
self.bar.start(max_value=progressbar.UnknownLength)
def update(self, step, metrics):
self.widgets[0] = progressbar.FormatLabel(
'step: {}, AP: {:}, mAP: {:.2f}'.format(step, metrics[0], metrics[1])
)
self.bar.update(step, True)
def progbar(n):
bar = progressbar.ProgressBar(
maxval=n,
widgets=[
progressbar.Bar('=', '[', ']'), ' ',
progressbar.Percentage(), ' | ',
progressbar.SimpleProgress(), ' | ',
progressbar.AdaptiveETA()
]
)
return bar
def colormap(n_classes):
vals = np.linspace(0, 1, n_classes)
return np.array([colorsys.hsv_to_rgb(c, 0.8, 0.8) for c in vals])
def dump_config(cfg):
save_dir = os.path.join('./logs/{}'.format(cfg['std']['log_code']))
if not os.path.isdir(save_dir): os.makedirs(save_dir)
f = open(os.path.join(save_dir, 'config.toml'), "w")
s = toml.dumps(cfg)
f.write(s)
f.close()
def euc_dist(a, b):
return np.sqrt(np.sum((a - b)**2))
def bounding_box(pc):
bbox = [
np.min(pc[:, 0]), np.min(pc[:, 1]), np.min(pc[:, 2]),
np.max(pc[:, 0]), np.max(pc[:, 1]), np.max(pc[:, 2])
]
return np.array(bbox)
def bbox_overlap(pc_a, pc_b):
bbox_a = bounding_box(pc_a)
bbox_b = bounding_box(pc_b)
if (
bbox_a[3] >= bbox_b[0] and bbox_b[3] >= bbox_a[0] and
bbox_a[4] >= bbox_b[1] and bbox_b[4] >= bbox_a[1] and
bbox_a[5] >= bbox_b[2] and bbox_b[5] >= bbox_a[2]
):
overlap = True
else:
overlap = False
return overlap
def nonaa_box(b, theta, axis=1):
pts = np.array([
[b[0], b[1], b[2]],
[b[3], b[1], b[2]],
[b[0], b[1], b[5]],
[b[3], b[1], b[5]],
[b[0], b[4], b[2]],
[b[3], b[4], b[2]],
[b[0], b[4], b[5]],
[b[3], b[4], b[5]]
])
return rotate_euler(pts, theta, axis)
def make_lines(pts):
lines = [
pv.Line(pts[0], pts[1]), pv.Line(pts[1], pts[3]), pv.Line(pts[3], pts[2]), pv.Line(pts[2], pts[0]),
pv.Line(pts[0], pts[4]), pv.Line(pts[1], pts[5]), pv.Line(pts[3], pts[7]), pv.Line(pts[2], pts[6]),
pv.Line(pts[4], pts[6]), pv.Line(pts[6], pts[7]), pv.Line(pts[7], pts[5]), pv.Line(pts[5], pts[4])
]
return lines
def rotate_euler(pts, theta, axis=2, center=None):
c = np.cos(theta)
s = np.sin(theta)
if axis == 0:
R = np.array([[1., 0., 0.], [0., c, -s], [0., s, c]])
elif axis == 1:
R = np.array([[c, 0., s], [0., 1., 0.], [-s, 0., c]])
elif axis == 2:
R = np.array([[c, -s, 0.], [s, c, 0.], [0., 0., 1.]])
mean = np.mean(pts, axis=0) if center is None else center
pts -= mean
pts = np.einsum('ij,kj->ki', R, pts)
pts += mean
return pts
def get_fixed_pts(in_pts, n_pts):
out_pts = np.zeros((n_pts, 3))
ret = True
if in_pts.shape[0] == 0:
ret = False
elif in_pts.shape[0] < n_pts:
out_pts[0:in_pts.shape[0]] = in_pts
s_idx = np.arange(n_pts)
np.random.shuffle(s_idx)
out_pts = out_pts[s_idx]
else:
s_idx = np.arange(in_pts.shape[0])
np.random.shuffle(s_idx)
out_pts = in_pts[s_idx[0:n_pts]]
return ret, out_pts
def iou(a, b):
xx1 = np.maximum(a[0], b[0])
yy1 = np.maximum(a[1], b[1])
zz1 = np.maximum(a[2], b[2])
xx2 = np.minimum(a[3], b[3])
yy2 = np.minimum(a[4], b[4])
zz2 = np.minimum(a[5], b[5])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
d = np.maximum(0.0, zz2 - zz1)
inter = w * h * d
area_a = (a[3] - a[0]) * (a[4] - a[1]) * (a[5] - a[2])
area_b = (b[3] - b[0]) * (b[4] - b[1]) * (b[5] - b[2])
return inter / float(area_a + area_b - inter)
def nms(boxes, scores, max_out=100, iou_thresh=0.25):
"""
Code adapted from : https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py
"""
x1 = boxes[:, 0]
y1 = boxes[:, 1]
z1 = boxes[:, 2]
x2 = boxes[:, 3]
y2 = boxes[:, 4]
z2 = boxes[:, 5]
keep_inds = []
if scores.shape[0] > 0:
order = np.argsort(-scores, axis=0)
areas = (x2 - x1) * (y2 - y1) * (z2 - z1)
num_in = 0
while order.size > 0:
if num_in == max_out: break
i = order[0]
keep_inds.append(i[0])
num_in += 1
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
zz1 = np.maximum(z1[i], z1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
zz2 = np.minimum(z2[i], z2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
d = np.maximum(0.0, zz2 - zz1)
inter = w * h * d
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= iou_thresh)[0]
order = order[inds + 1]
return keep_inds
def kitti_scenenet_to_oabb(center, attr):
extent = attr[:, :3]
theta = np.arctan2(attr[:, 4], attr[:, 3])
x_mins = center[:, 0, None] - (extent[:, 2, None]/2.)
x_maxs = center[:, 0, None] + (extent[:, 2, None]/2.)
y_mins = center[:, 1, None]
y_maxs = center[:, 1, None] - (extent[:, 0, None])
z_mins = center[:, 2, None] - (extent[:, 1, None]/2.)
z_maxs = center[:, 2, None] + (extent[:, 1, None]/2.)
boxes = np.concatenate(
[x_mins, y_mins, z_mins, x_maxs, y_maxs, z_maxs]
, 1)
nonaa_boxes = np.zeros((boxes.shape[0], 8, 3))
for i, b in enumerate(boxes):
nonaa_boxes[i] = nonaa_box(b, theta[i])
return nonaa_boxes
def get_fixed_pts(in_pts, n_pts):
out_pts = np.zeros((n_pts, 3))
ret = True
if in_pts.shape[0] == 0:
ret = False
elif in_pts.shape[0] < n_pts:
out_pts[0:in_pts.shape[0]] = in_pts
s_idx = np.arange(n_pts)
np.random.shuffle(s_idx)
out_pts = out_pts[s_idx]
else:
s_idx = np.arange(in_pts.shape[0])
np.random.shuffle(s_idx)
out_pts = in_pts[s_idx[0:n_pts]]
return ret, out_pts
| 20.620072
| 101
| 0.58665
|
794a96be12ba6650ac283118aefe6a48f04659f5
| 12,121
|
py
|
Python
|
sdks/python/apache_beam/runners/direct/direct_runner.py
|
acidburn0zzz/beam
|
92540d0ecd98125e4f6fe13917dca46a77af52f0
|
[
"Apache-2.0"
] | 3
|
2018-12-04T14:44:37.000Z
|
2021-07-07T09:23:54.000Z
|
sdks/python/apache_beam/runners/direct/direct_runner.py
|
acidburn0zzz/beam
|
92540d0ecd98125e4f6fe13917dca46a77af52f0
|
[
"Apache-2.0"
] | 13
|
2019-11-13T03:56:36.000Z
|
2021-12-14T21:12:07.000Z
|
sdks/python/apache_beam/runners/direct/direct_runner.py
|
permutive/beam
|
13d7c5c54daacd4536b533c072f72e80e5e86c78
|
[
"Apache-2.0"
] | 1
|
2019-02-07T19:20:22.000Z
|
2019-02-07T19:20:22.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""DirectRunner, executing on the local machine.
The DirectRunner is a runner implementation that executes the entire
graph of transformations belonging to a pipeline on the local machine.
"""
from __future__ import absolute_import
import collections
import logging
from google.protobuf import wrappers_pb2
import apache_beam as beam
from apache_beam import typehints
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.options.pipeline_options import DirectOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.pvalue import PCollection
from apache_beam.runners.direct.bundle_factory import BundleFactory
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.runners.runner import PValueCache
from apache_beam.transforms.core import _GroupAlsoByWindow
from apache_beam.transforms.core import _GroupByKeyOnly
from apache_beam.transforms.ptransform import PTransform
__all__ = ['DirectRunner']
# Type variables.
K = typehints.TypeVariable('K')
V = typehints.TypeVariable('V')
@typehints.with_input_types(typehints.KV[K, V])
@typehints.with_output_types(typehints.KV[K, typehints.Iterable[V]])
class _StreamingGroupByKeyOnly(_GroupByKeyOnly):
"""Streaming GroupByKeyOnly placeholder for overriding in DirectRunner."""
urn = "direct_runner:streaming_gbko:v0.1"
# These are needed due to apply overloads.
def to_runner_api_parameter(self, unused_context):
return _StreamingGroupByKeyOnly.urn, None
@PTransform.register_urn(urn, None)
def from_runner_api_parameter(unused_payload, unused_context):
return _StreamingGroupByKeyOnly()
@typehints.with_input_types(typehints.KV[K, typehints.Iterable[V]])
@typehints.with_output_types(typehints.KV[K, typehints.Iterable[V]])
class _StreamingGroupAlsoByWindow(_GroupAlsoByWindow):
"""Streaming GroupAlsoByWindow placeholder for overriding in DirectRunner."""
urn = "direct_runner:streaming_gabw:v0.1"
# These are needed due to apply overloads.
def to_runner_api_parameter(self, context):
return (
_StreamingGroupAlsoByWindow.urn,
wrappers_pb2.BytesValue(value=context.windowing_strategies.get_id(
self.windowing)))
@PTransform.register_urn(urn, wrappers_pb2.BytesValue)
def from_runner_api_parameter(payload, context):
return _StreamingGroupAlsoByWindow(
context.windowing_strategies.get_by_id(payload.value))
class DirectRunner(PipelineRunner):
"""Executes a single pipeline on the local machine."""
# A list of PTransformOverride objects to be applied before running a pipeline
# using DirectRunner.
# Currently this only works for overrides where the input and output types do
# not change.
# For internal SDK use only. This should not be updated by Beam pipeline
# authors.
_PTRANSFORM_OVERRIDES = []
def __init__(self):
self._cache = None
def apply_CombinePerKey(self, transform, pcoll):
# TODO: Move imports to top. Pipeline <-> Runner dependency cause problems
# with resolving imports when they are at top.
# pylint: disable=wrong-import-position
from apache_beam.runners.direct.helper_transforms import LiftedCombinePerKey
try:
return pcoll | LiftedCombinePerKey(
transform.fn, transform.args, transform.kwargs)
except NotImplementedError:
return transform.expand(pcoll)
def apply__GroupByKeyOnly(self, transform, pcoll):
if (transform.__class__ == _GroupByKeyOnly and
pcoll.pipeline._options.view_as(StandardOptions).streaming):
# Use specialized streaming implementation, if requested.
type_hints = transform.get_type_hints()
return pcoll | (_StreamingGroupByKeyOnly()
.with_input_types(*type_hints.input_types[0])
.with_output_types(*type_hints.output_types[0]))
return transform.expand(pcoll)
def apply__GroupAlsoByWindow(self, transform, pcoll):
if (transform.__class__ == _GroupAlsoByWindow and
pcoll.pipeline._options.view_as(StandardOptions).streaming):
# Use specialized streaming implementation, if requested.
type_hints = transform.get_type_hints()
return pcoll | (_StreamingGroupAlsoByWindow(transform.windowing)
.with_input_types(*type_hints.input_types[0])
.with_output_types(*type_hints.output_types[0]))
return transform.expand(pcoll)
def apply_ReadStringsFromPubSub(self, transform, pcoll):
try:
from google.cloud import pubsub as unused_pubsub
except ImportError:
raise ImportError('Google Cloud PubSub not available, please install '
'apache_beam[gcp]')
# Execute this as a native transform.
output = PCollection(pcoll.pipeline)
output.element_type = unicode
return output
def apply_WriteStringsToPubSub(self, transform, pcoll):
try:
from google.cloud import pubsub
except ImportError:
raise ImportError('Google Cloud PubSub not available, please install '
'apache_beam[gcp]')
project = transform._sink.project
topic_name = transform._sink.topic_name
class DirectWriteToPubSub(beam.DoFn):
_topic = None
def __init__(self, project, topic_name):
self.project = project
self.topic_name = topic_name
def start_bundle(self):
if self._topic is None:
self._topic = pubsub.Client(project=self.project).topic(
self.topic_name)
self._buffer = []
def process(self, elem):
self._buffer.append(elem.encode('utf-8'))
if len(self._buffer) >= 100:
self._flush()
def finish_bundle(self):
self._flush()
def _flush(self):
if self._buffer:
with self._topic.batch() as batch:
for datum in self._buffer:
batch.publish(datum)
self._buffer = []
output = pcoll | beam.ParDo(DirectWriteToPubSub(project, topic_name))
output.element_type = unicode
return output
def run(self, pipeline):
"""Execute the entire pipeline and returns an DirectPipelineResult."""
# Performing configured PTransform overrides.
pipeline.replace_all(DirectRunner._PTRANSFORM_OVERRIDES)
# TODO: Move imports to top. Pipeline <-> Runner dependency cause problems
# with resolving imports when they are at top.
# pylint: disable=wrong-import-position
from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import \
ConsumerTrackingPipelineVisitor
from apache_beam.runners.direct.evaluation_context import EvaluationContext
from apache_beam.runners.direct.executor import Executor
from apache_beam.runners.direct.transform_evaluator import \
TransformEvaluatorRegistry
MetricsEnvironment.set_metrics_supported(True)
logging.info('Running pipeline with DirectRunner.')
self.consumer_tracking_visitor = ConsumerTrackingPipelineVisitor()
pipeline.visit(self.consumer_tracking_visitor)
evaluation_context = EvaluationContext(
pipeline._options,
BundleFactory(stacked=pipeline._options.view_as(DirectOptions)
.direct_runner_use_stacked_bundle),
self.consumer_tracking_visitor.root_transforms,
self.consumer_tracking_visitor.value_to_consumers,
self.consumer_tracking_visitor.step_names,
self.consumer_tracking_visitor.views)
evaluation_context.use_pvalue_cache(self._cache)
executor = Executor(self.consumer_tracking_visitor.value_to_consumers,
TransformEvaluatorRegistry(evaluation_context),
evaluation_context)
# DirectRunner does not support injecting
# PipelineOptions values at runtime
RuntimeValueProvider.set_runtime_options({})
# Start the executor. This is a non-blocking call, it will start the
# execution in background threads and return.
executor.start(self.consumer_tracking_visitor.root_transforms)
result = DirectPipelineResult(executor, evaluation_context)
if self._cache:
# We are running in eager mode, block until the pipeline execution
# completes in order to have full results in the cache.
result.wait_until_finish()
self._cache.finalize()
return result
@property
def cache(self):
if not self._cache:
self._cache = BufferingInMemoryCache()
return self._cache.pvalue_cache
class BufferingInMemoryCache(object):
"""PValueCache wrapper for buffering bundles until a PValue is fully computed.
BufferingInMemoryCache keeps an in memory cache of
(applied_ptransform, tag) tuples. It accepts appending to existing cache
entries until it is finalized. finalize() will make all the existing cached
entries visible to the underyling PValueCache in their entirety, clean the in
memory cache and stop accepting new cache entries.
"""
def __init__(self):
self._cache = collections.defaultdict(list)
self._pvalue_cache = PValueCache()
self._finalized = False
@property
def pvalue_cache(self):
return self._pvalue_cache
def append(self, applied_ptransform, tag, elements):
assert not self._finalized
assert elements is not None
self._cache[(applied_ptransform, tag)].extend(elements)
def finalize(self):
"""Make buffered cache elements visible to the underlying PValueCache."""
assert not self._finalized
for key, value in self._cache.iteritems():
applied_ptransform, tag = key
self._pvalue_cache.cache_output(applied_ptransform, tag, value)
self._cache = None
class DirectPipelineResult(PipelineResult):
"""A DirectPipelineResult provides access to info about a pipeline."""
def __init__(self, executor, evaluation_context):
super(DirectPipelineResult, self).__init__(PipelineState.RUNNING)
self._executor = executor
self._evaluation_context = evaluation_context
def __del__(self):
if self._state == PipelineState.RUNNING:
logging.warning(
'The DirectPipelineResult is being garbage-collected while the '
'DirectRunner is still running the corresponding pipeline. This may '
'lead to incomplete execution of the pipeline if the main thread '
'exits before pipeline completion. Consider using '
'result.wait_until_finish() to wait for completion of pipeline '
'execution.')
def _is_in_terminal_state(self):
return self._state is not PipelineState.RUNNING
def wait_until_finish(self, duration=None):
if not self._is_in_terminal_state():
if duration:
raise NotImplementedError(
'DirectRunner does not support duration argument.')
try:
self._executor.await_completion()
self._state = PipelineState.DONE
except: # pylint: disable=broad-except
self._state = PipelineState.FAILED
raise
return self._state
def aggregated_values(self, aggregator_or_name):
return self._evaluation_context.get_aggregator_values(aggregator_or_name)
def metrics(self):
return self._evaluation_context.metrics()
class EagerRunner(DirectRunner):
is_eager = True
| 37.760125
| 80
| 0.742678
|
794a97b7185b91f8d5a500a2cad6bebfbdd81a38
| 1,477
|
py
|
Python
|
tests/conftest.py
|
epics-containers/ibek
|
0c021e0b3221051a2dea263e8d907bcace243084
|
[
"Apache-2.0"
] | 5
|
2021-09-08T11:03:29.000Z
|
2022-02-28T22:59:17.000Z
|
tests/conftest.py
|
epics-containers/ibek
|
0c021e0b3221051a2dea263e8d907bcace243084
|
[
"Apache-2.0"
] | 7
|
2021-09-08T18:21:42.000Z
|
2022-03-30T12:07:40.000Z
|
tests/conftest.py
|
epics-containers/ibek
|
0c021e0b3221051a2dea263e8d907bcace243084
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
from pytest import fixture
from ruamel.yaml import YAML
from ibek.ioc import clear_entity_classes, make_entity_classes
from ibek.support import Support
def get_support(samples: Path, yaml_file: str) -> Support:
"""
Get a support object from the sample YAML directory
"""
# load from file
d = YAML(typ="safe").load(samples / "yaml" / f"{yaml_file}")
# create a support object from that dict
support = Support.deserialize(d)
return support
@fixture
def samples():
return Path(__file__).parent / "samples"
@fixture
def pmac_support(samples):
return get_support(samples, "pmac.ibek.yaml")
@fixture
def pmac_classes(pmac_support):
# clear the entity classes to make sure there's nothing left
clear_entity_classes()
# make entity subclasses for everything defined in it
namespace = make_entity_classes(pmac_support)
# return the namespace so that callers have access to all of the
# generated dataclasses
return namespace
@fixture
def epics_support(samples):
return get_support(samples, "epics.ibek.yaml")
@fixture
def epics_classes(epics_support):
# clear the entity classes to make sure there's nothing left
clear_entity_classes()
# make entity subclasses for everything defined in it
namespace = make_entity_classes(epics_support)
# return the namespace so that callers have access to all of the
# generated dataclasses
return namespace
| 24.616667
| 68
| 0.735274
|
794a9a39f9b4ab873a7413b54047aec8e0b8021d
| 1,927
|
py
|
Python
|
data_processing/dataentities.py
|
stbman/reactivegumi
|
b85edcbbcdc65f2d95ceb7bc490da0ff78c0cf5e
|
[
"MIT"
] | null | null | null |
data_processing/dataentities.py
|
stbman/reactivegumi
|
b85edcbbcdc65f2d95ceb7bc490da0ff78c0cf5e
|
[
"MIT"
] | null | null | null |
data_processing/dataentities.py
|
stbman/reactivegumi
|
b85edcbbcdc65f2d95ceb7bc490da0ff78c0cf5e
|
[
"MIT"
] | null | null | null |
"""
Test script to process entities
"""
import pandas
import numpy as np
filename = 'groups_102523307031776_23-05-2018-15-02-44.tsv'
df = pandas.read_csv(filename, sep='\t')
df['Timestamp String'] = df['Timestamp']
df['Timestamp'] = pandas.to_datetime(df['Timestamp'])
df['User Content'] = df['User Content'].replace(np.nan, '')
df['Link Content'] = df['Link Content'].replace(np.nan, '')
df['keywords'] = df['keywords'].replace(np.nan, '')
### Global
keyword_dict = {}
keyword_list = list(df['keywords'].values)
for keyword_group in keyword_list:
keyword_group_array = keyword_group.split(',')
for keyword in keyword_group_array:
if keyword in keyword_dict:
keyword_dict[keyword] = keyword_dict[keyword] + 1
else:
keyword_dict[keyword] = 1
keyword_sorted = sorted(keyword_dict.items(), key=lambda x: x[1], reverse=True)
# Get top n
n = 5
for i in xrange(0, n):
keyword = keyword_sorted[i][0]
count = keyword_sorted[i][1]
df_keyword = df[df['keywords'].str.contains(keyword)]
unique_people = df_keyword['Profile Name'].value_counts()
unique_people_string = ""
for idx in unique_people.index:
unique_people_string += idx + " (" + str(unique_people[idx]) + " posts), "
### People Centric
# Get people's entities
df_profile_groups = df.groupby('Profile Name')['keywords'].agg(lambda keywords: ''.join(keywords))
for idx in df_profile_groups.index:
profile_name = idx
profile_keywords = df_profile_groups[idx]
profile_keywords_dict = {}
profile_keywords_array = profile_keywords.split(',')
for keyword in profile_keywords_array:
if keyword in profile_keywords_dict:
profile_keywords_dict[keyword] = profile_keywords_dict[keyword] + 1
else:
profile_keywords_dict[keyword] = 1
profile_keywords_sorted = sorted(profile_keywords_dict.items(), key=lambda x: x[1], reverse=True)
| 31.590164
| 101
| 0.689673
|
794a9c40409addb1eb8cd14830361178095df064
| 2,481
|
py
|
Python
|
configs/types/contribution_types.py
|
iPr0ger/mdr-fastapi
|
5ac3c4dc254969caa095c01ef629add1c5b72c49
|
[
"MIT"
] | null | null | null |
configs/types/contribution_types.py
|
iPr0ger/mdr-fastapi
|
5ac3c4dc254969caa095c01ef629add1c5b72c49
|
[
"MIT"
] | null | null | null |
configs/types/contribution_types.py
|
iPr0ger/mdr-fastapi
|
5ac3c4dc254969caa095c01ef629add1c5b72c49
|
[
"MIT"
] | null | null | null |
CONTRIBUTION_TYPES = (
{
"id": 11,
"name": "Creator"
},
{
"id": 12,
"name": "Contact person"
},
{
"id": 13,
"name": "Data collector"
},
{
"id": 14,
"name": "Data curator"
},
{
"id": 15,
"name": "Data manager"
},
{
"id": 16,
"name": "Distributor"
},
{
"id": 17,
"name": "Editor"
},
{
"id": 18,
"name": "Hosting Institution"
},
{
"id": 19,
"name": "Producer"
},
{
"id": 20,
"name": "Project leader"
},
{
"id": 21,
"name": "Project manager"
},
{
"id": 22,
"name": "Project member"
},
{
"id": 23,
"name": "Registration agency"
},
{
"id": 24,
"name": "Registration authority"
},
{
"id": 25,
"name": "Related person"
},
{
"id": 26,
"name": "Researcher"
},
{
"id": 27,
"name": "Research group"
},
{
"id": 28,
"name": "Rights holder"
},
{
"id": 29,
"name": "Sponsor"
},
{
"id": 30,
"name": "Supervisor"
},
{
"id": 31,
"name": "Work package leader"
},
{
"id": 51,
"name": "Study lead"
},
{
"id": 52,
"name": "CT site principal investigator"
},
{
"id": 53,
"name": "Clinical study manager"
},
{
"id": 54,
"name": "Trial sponsor"
},
{
"id": 55,
"name": "Sponsor contact"
},
{
"id": 56,
"name": "Public contact"
},
{
"id": 57,
"name": "Recruitment contact"
},
{
"id": 58,
"name": "Study funder"
},
{
"id": 59,
"name": "Funder contact"
},
{
"id": 60,
"name": "Independent monitoring committee member"
},
{
"id": 61,
"name": "Medicinal product supplier"
},
{
"id": 62,
"name": "Medical device supplier"
},
{
"id": 63,
"name": "Logistics support organisation"
},
{
"id": 64,
"name": "Scientific support organisation"
},
{
"id": 65,
"name": "Central laboratory"
},
{
"id": 66,
"name": "Central imaging facility"
},
{
"id": 67,
"name": "Clinical organisation"
},
{
"id": 68,
"name": "Clinical site"
},
{
"id": 69,
"name": "Collaborating organisation"
},
{
"id": 70,
"name": "Sponsor-investigator"
},
{
"id": 71,
"name": "Results contact"
},
{
"id": 72,
"name": "Research group member"
},
{
"id": 90,
"name": "Other"
},
{
"id": 0,
"name": "Not yet known"
}
)
| 13.557377
| 53
| 0.433293
|
794a9c5e226966d45d3659d4c22161da565ae1be
| 2,703
|
py
|
Python
|
pytorch_lightning/plugins/training_type/ddp2.py
|
lemairecarl/pytorch-lightning
|
85304d4672a9ed24a16f7f5b2abaa34148ab86f4
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/plugins/training_type/ddp2.py
|
lemairecarl/pytorch-lightning
|
85304d4672a9ed24a16f7f5b2abaa34148ab86f4
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/plugins/training_type/ddp2.py
|
lemairecarl/pytorch-lightning
|
85304d4672a9ed24a16f7f5b2abaa34148ab86f4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from pytorch_lightning.plugins.training_type.ddp import DDPStrategy
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.enums import _StrategyType
from pytorch_lightning.utilities.types import _METRIC_COLLECTION
class DDP2Strategy(DDPStrategy):
"""DDP2 behaves like DP in one node, but synchronization across nodes behaves like in DDP."""
distributed_backend = _StrategyType.DDP2
@property
def global_rank(self) -> int:
return self.node_rank
@property
def world_size(self) -> int:
return self.num_nodes
def reduce(self, collection: _METRIC_COLLECTION, *args, **kwargs) -> _METRIC_COLLECTION:
"""Reduces a collection of tensors from all processes. It can be applied to just a single tensor. In DDP2,
the reduction here is only across local devices within the node.
Args:
collection: The collection of tensors to sync and reduce.
*args: ignored for DDP2
**kwargs: ignored for DDP2
Return:
Reduced tensor values or the same value if it was not or did not contain a tensor.
"""
def mean(t: torch.Tensor) -> torch.Tensor:
original_dtype = t.dtype
return t.float().mean().to(original_dtype)
return apply_to_collection(collection, torch.Tensor, mean)
@property
def root_device(self):
return self.parallel_devices[0]
def model_to_device(self):
# no need to do anything when model is wrapped in torch.nn.DataParallel
pass
@property
def distributed_sampler_kwargs(self):
distributed_sampler_kwargs = dict(num_replicas=self.num_nodes, rank=self.global_rank)
return distributed_sampler_kwargs
@property
def _is_single_process_single_device(self) -> bool:
return False
def set_world_ranks(self) -> None:
if self.cluster_environment is None:
return
self.cluster_environment.set_global_rank(self.node_rank)
self.cluster_environment.set_world_size(self.num_nodes)
| 35.565789
| 114
| 0.715131
|
794a9c75ef92ad0e6c4b5024bfcf2bb234a97376
| 16,170
|
py
|
Python
|
circus/tests/support.py
|
sphuber/circus
|
da45a601001954082ed270d130922d9ce46b983c
|
[
"Apache-2.0"
] | null | null | null |
circus/tests/support.py
|
sphuber/circus
|
da45a601001954082ed270d130922d9ce46b983c
|
[
"Apache-2.0"
] | null | null | null |
circus/tests/support.py
|
sphuber/circus
|
da45a601001954082ed270d130922d9ce46b983c
|
[
"Apache-2.0"
] | null | null | null |
from tempfile import mkstemp, mkdtemp
import os
import signal
import sys
from time import time, sleep
from collections import defaultdict
import cProfile
import pstats
import shutil
import functools
import multiprocessing
import socket
try:
import sysconfig
DEBUG = sysconfig.get_config_var('Py_DEBUG') == 1
except ImportError:
# py2.6, we don't really care about that flage here
# since no one will run Python --with-pydebug in 2.6
DEBUG = 0
try:
from unittest import skip, skipIf, TestCase, TestSuite, findTestCases
except ImportError:
from unittest2 import skip, skipIf, TestCase, TestSuite # NOQA
from unittest2 import findTestCases # NOQA
from tornado.testing import AsyncTestCase
from zmq.eventloop import ioloop
import mock
import tornado
from circus import get_arbiter
from circus.client import AsyncCircusClient, make_message
from circus.util import DEFAULT_ENDPOINT_DEALER, DEFAULT_ENDPOINT_SUB
from circus.util import tornado_sleep, ConflictError
from circus.util import IS_WINDOWS
from circus.watcher import Watcher
ioloop.install()
if 'ASYNC_TEST_TIMEOUT' not in os.environ:
os.environ['ASYNC_TEST_TIMEOUT'] = '30'
class EasyTestSuite(TestSuite):
def __init__(self, name):
try:
super(EasyTestSuite, self).__init__(
findTestCases(sys.modules[name]))
except KeyError:
pass
PYTHON = sys.executable
# Script used to sleep for a specified amount of seconds.
# Should be used instead of the 'sleep' command for
# compatibility
SLEEP = PYTHON + " -c 'import time;time.sleep(%d)'"
def get_ioloop():
from zmq.eventloop.ioloop import ZMQPoller
from zmq.eventloop.ioloop import ZMQError, ETERM
from tornado.ioloop import PollIOLoop
class DebugPoller(ZMQPoller):
def __init__(self):
super(DebugPoller, self).__init__()
self._fds = []
def register(self, fd, events):
if fd not in self._fds:
self._fds.append(fd)
return self._poller.register(fd, self._map_events(events))
def modify(self, fd, events):
if fd not in self._fds:
self._fds.append(fd)
return self._poller.modify(fd, self._map_events(events))
def unregister(self, fd):
if fd in self._fds:
self._fds.remove(fd)
return self._poller.unregister(fd)
def poll(self, timeout):
"""
#737 - For some reason the poller issues events with
unexistant FDs, usually with big ints. We have not found yet the
reason of this
behavior that happens only during the tests. But by filtering out
those events, everything works fine.
"""
z_events = self._poller.poll(1000*timeout)
return [(fd, self._remap_events(evt)) for fd, evt in z_events
if fd in self._fds]
class DebugLoop(PollIOLoop):
def initialize(self, **kwargs):
PollIOLoop.initialize(self, impl=DebugPoller(), **kwargs)
def handle_callback_exception(self, callback):
exc_type, exc_value, tb = sys.exc_info()
raise exc_value
@staticmethod
def instance():
PollIOLoop.configure(DebugLoop)
return PollIOLoop.instance()
def start(self):
try:
super(DebugLoop, self).start()
except ZMQError as e:
if e.errno == ETERM:
# quietly return on ETERM
pass
else:
raise e
from tornado import ioloop
ioloop.IOLoop.configure(DebugLoop)
return ioloop.IOLoop.instance()
def get_available_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("", 0))
return s.getsockname()[1]
finally:
s.close()
class MockWatcher(Watcher):
def start(self):
self.started = True
def spawn_process(self):
self.processes[1] = 'dummy'
class TestCircus(AsyncTestCase):
arbiter_factory = get_arbiter
arbiters = []
def setUp(self):
super(TestCircus, self).setUp()
self.files = []
self.dirs = []
self.tmpfiles = []
self._clients = {}
self.plugins = []
@property
def cli(self):
if self.arbiters == []:
# nothing is running
raise Exception("nothing is running")
endpoint = self.arbiters[-1].endpoint
if endpoint in self._clients:
return self._clients[endpoint]
cli = AsyncCircusClient(endpoint=endpoint)
self._clients[endpoint] = cli
return cli
def _stop_clients(self):
for client in self._clients.values():
client.stop()
self._clients.clear()
def get_new_ioloop(self):
return get_ioloop()
def tearDown(self):
for file in self.files + self.tmpfiles:
try:
os.remove(file)
except OSError:
pass
for dir in self.dirs:
try:
shutil.rmtree(dir)
except OSError:
pass
self._stop_clients()
for plugin in self.plugins:
plugin.stop()
for arbiter in self.arbiters:
if arbiter.running:
try:
arbiter.stop()
except ConflictError:
pass
self.arbiters = []
super(TestCircus, self).tearDown()
def make_plugin(self, klass, endpoint=DEFAULT_ENDPOINT_DEALER,
sub=DEFAULT_ENDPOINT_SUB, check_delay=1,
**config):
config['active'] = True
plugin = klass(endpoint, sub, check_delay, None, **config)
self.plugins.append(plugin)
return plugin
@tornado.gen.coroutine
def start_arbiter(self, cmd='support.run_process',
stdout_stream=None, debug=True, **kw):
testfile, arbiter = self._create_circus(
cmd, stdout_stream=stdout_stream,
debug=debug, use_async=True, **kw)
self.test_file = testfile
self.arbiter = arbiter
self.arbiters.append(arbiter)
yield self.arbiter.start()
@tornado.gen.coroutine
def stop_arbiter(self):
for watcher in self.arbiter.iter_watchers():
yield self.arbiter.rm_watcher(watcher.name)
yield self.arbiter._emergency_stop()
@tornado.gen.coroutine
def status(self, cmd, **props):
resp = yield self.call(cmd, **props)
raise tornado.gen.Return(resp.get('status'))
@tornado.gen.coroutine
def numwatchers(self, cmd, **props):
resp = yield self.call(cmd, waiting=True, **props)
raise tornado.gen.Return(resp.get('numprocesses'))
@tornado.gen.coroutine
def numprocesses(self, cmd, **props):
resp = yield self.call(cmd, waiting=True, **props)
raise tornado.gen.Return(resp.get('numprocesses'))
@tornado.gen.coroutine
def pids(self):
resp = yield self.call('list', name='test')
raise tornado.gen.Return(resp.get('pids'))
def get_tmpdir(self):
dir_ = mkdtemp()
self.dirs.append(dir_)
return dir_
def get_tmpfile(self, content=None):
fd, file = mkstemp()
os.close(fd)
self.tmpfiles.append(file)
if content is not None:
with open(file, 'w') as f:
f.write(content)
return file
@classmethod
def _create_circus(cls, callable_path, plugins=None, stats=False,
use_async=False, arbiter_kw=None, **kw):
fd, testfile = mkstemp()
os.close(fd)
wdir = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__))))
args = ['circus/tests/generic.py', callable_path, testfile]
worker = {'cmd': PYTHON, 'args': args, 'working_dir': wdir,
'name': 'test', 'graceful_timeout': 2}
worker.update(kw)
if not arbiter_kw:
arbiter_kw = {}
debug = arbiter_kw['debug'] = kw.get('debug',
arbiter_kw.get('debug', False))
# -1 => no periodic callback to manage_watchers by default
arbiter_kw['check_delay'] = kw.get('check_delay',
arbiter_kw.get('check_delay', -1))
_gp = get_available_port
arbiter_kw['controller'] = "tcp://127.0.0.1:%d" % _gp()
arbiter_kw['pubsub_endpoint'] = "tcp://127.0.0.1:%d" % _gp()
arbiter_kw['multicast_endpoint'] = "udp://237.219.251.97:12027"
if stats:
arbiter_kw['statsd'] = True
arbiter_kw['stats_endpoint'] = "tcp://127.0.0.1:%d" % _gp()
arbiter_kw['statsd_close_outputs'] = not debug
if use_async:
arbiter_kw['background'] = False
arbiter_kw['loop'] = get_ioloop()
else:
arbiter_kw['background'] = True
arbiter = cls.arbiter_factory([worker], plugins=plugins, **arbiter_kw)
cls.arbiters.append(arbiter)
return testfile, arbiter
@tornado.gen.coroutine
def _stop_runners(self):
for arbiter in self.arbiters:
yield arbiter.stop()
self.arbiters = []
@tornado.gen.coroutine
def call(self, _cmd, **props):
msg = make_message(_cmd, **props)
resp = yield self.cli.call(msg)
raise tornado.gen.Return(resp)
def profile(func):
"""Can be used to dump profile stats"""
def _profile(*args, **kw):
profiler = cProfile.Profile()
try:
return profiler.runcall(func, *args, **kw)
finally:
pstats.Stats(profiler).sort_stats('time').print_stats(30)
return _profile
class Process(object):
def __init__(self, testfile):
self.testfile = testfile
# init signal handling
if IS_WINDOWS:
signal.signal(signal.SIGABRT, self.handle_quit)
signal.signal(signal.SIGTERM, self.handle_quit)
signal.signal(signal.SIGINT, self.handle_quit)
signal.signal(signal.SIGILL, self.handle_quit)
signal.signal(signal.SIGBREAK, self.handle_quit)
else:
signal.signal(signal.SIGQUIT, self.handle_quit)
signal.signal(signal.SIGTERM, self.handle_quit)
signal.signal(signal.SIGINT, self.handle_quit)
signal.signal(signal.SIGCHLD, self.handle_chld)
self.alive = True
def _write(self, msg):
with open(self.testfile, 'a+') as f:
f.write(msg)
def handle_quit(self, *args):
self._write('QUIT')
self.alive = False
def handle_chld(self, *args):
self._write('CHLD')
return
def run(self):
self._write('START')
while self.alive:
sleep(0.1)
self._write('STOP')
def run_process(test_file):
process = Process(test_file)
process.run()
return 1
def has_gevent():
try:
import gevent # NOQA
return True
except ImportError:
return False
def has_circusweb():
try:
import circusweb # NOQA
return True
except ImportError:
return False
class TimeoutException(Exception):
pass
def poll_for_callable(func, *args, **kwargs):
"""Replay to update the status during timeout seconds."""
timeout = 5
if 'timeout' in kwargs:
timeout = kwargs.pop('timeout')
start = time()
last_exception = None
while time() - start < timeout:
try:
func_args = []
for arg in args:
if callable(arg):
func_args.append(arg())
else:
func_args.append(arg)
func(*func_args)
except AssertionError as e:
last_exception = e
sleep(0.1)
else:
return True
raise last_exception or AssertionError('No exception triggered yet')
def poll_for(filename, needles, timeout=5):
"""Poll a file for a given string.
Raises a TimeoutException if the string isn't found after timeout seconds
of polling.
"""
if isinstance(needles, str):
needles = [needles]
start = time()
needle = content = None
while time() - start < timeout:
with open(filename) as f:
content = f.read()
for needle in needles:
if needle in content:
return True
# When using gevent this will make sure the redirector greenlets are
# scheduled.
sleep(0.1)
raise TimeoutException('Timeout polling "%s" for "%s". Content: %s' % (
filename, needle, content))
@tornado.gen.coroutine
def async_poll_for(filename, needles, timeout=5):
"""Async version of poll_for
"""
if isinstance(needles, str):
needles = [needles]
start = time()
needle = content = None
while time() - start < timeout:
with open(filename) as f:
content = f.read()
for needle in needles:
if needle in content:
raise tornado.gen.Return(True)
yield tornado_sleep(0.1)
raise TimeoutException('Timeout polling "%s" for "%s". Content: %s' % (
filename, needle, content))
def truncate_file(filename):
"""Truncate a file (empty it)."""
open(filename, 'w').close() # opening as 'w' overwrites the file
def run_plugin(klass, config, plugin_info_callback=None, duration=300,
endpoint=DEFAULT_ENDPOINT_DEALER,
pubsub_endpoint=DEFAULT_ENDPOINT_SUB):
check_delay = 1
ssh_server = None
class _Statsd(object):
gauges = []
increments = defaultdict(int)
def gauge(self, name, value):
self.gauges.append((name, value))
def increment(self, name):
self.increments[name] += 1
def stop(self):
pass
_statsd = _Statsd()
plugin = klass(endpoint, pubsub_endpoint, check_delay, ssh_server,
**config)
# make sure we close the existing statsd client
if hasattr(plugin, 'statsd'):
plugin.statsd.stop()
plugin.statsd = _statsd
deadline = time() + (duration / 1000.)
plugin.loop.add_timeout(deadline, plugin.stop)
plugin.start()
try:
if plugin_info_callback:
plugin_info_callback(plugin)
finally:
plugin.stop()
return _statsd
@tornado.gen.coroutine
def async_run_plugin(klass, config, plugin_info_callback, duration=300,
endpoint=DEFAULT_ENDPOINT_DEALER,
pubsub_endpoint=DEFAULT_ENDPOINT_SUB):
queue = multiprocessing.Queue()
plugin_info_callback = functools.partial(plugin_info_callback, queue)
circusctl_process = multiprocessing.Process(
target=run_plugin,
args=(klass, config, plugin_info_callback, duration,
endpoint, pubsub_endpoint))
circusctl_process.start()
while queue.empty():
yield tornado_sleep(.1)
result = queue.get()
raise tornado.gen.Return(result)
class FakeProcess(object):
def __init__(self, pid, status, started=1, age=1):
self.status = status
self.pid = pid
self.started = started
self.age = age
self.stopping = False
def is_alive(self):
return True
def stop(self):
pass
class MagicMockFuture(mock.MagicMock, tornado.concurrent.Future):
def cancel(self):
return False
def cancelled(self):
return False
def running(self):
return False
def done(self):
return True
def result(self, timeout=None):
return None
def exception(self, timeout=None):
return None
def add_done_callback(self, fn):
fn(self)
def set_result(self, result):
pass
def set_exception(self, exception):
pass
def __del__(self):
# Don't try to print non-consumed exceptions
pass
| 27.927461
| 78
| 0.598083
|
794a9d511d3d68a9ef4b9ce3d827d7b6f763868b
| 4,298
|
py
|
Python
|
gamestonk_terminal/mutual_funds/yfinance_view.py
|
i2infinity/GamestonkTerminal
|
abf79a5249930e5a9f5d2a1c4ba64590888ecef5
|
[
"MIT"
] | null | null | null |
gamestonk_terminal/mutual_funds/yfinance_view.py
|
i2infinity/GamestonkTerminal
|
abf79a5249930e5a9f5d2a1c4ba64590888ecef5
|
[
"MIT"
] | 1
|
2022-03-29T13:45:05.000Z
|
2022-03-29T13:45:05.000Z
|
gamestonk_terminal/mutual_funds/yfinance_view.py
|
i2infinity/GamestonkTerminal
|
abf79a5249930e5a9f5d2a1c4ba64590888ecef5
|
[
"MIT"
] | 1
|
2021-06-20T02:42:40.000Z
|
2021-06-20T02:42:40.000Z
|
"""Yahoo Finance Mutual Fund Model"""
__docformat__ = "numpy"
import os
import pandas as pd
import matplotlib.pyplot as plt
from rich.console import Console
from gamestonk_terminal.helper_funcs import (
rich_table_from_df,
export_data,
plot_autoscale,
)
from gamestonk_terminal.mutual_funds import yfinance_model
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.config_plot import PLOT_DPI
console = Console()
def display_sector(fund: str, min_pct_to_display: float = 5, export: str = ""):
"""Display sector weightings for fund
Parameters
----------
fund: str
Fund symbol
min_pct_to_display: float
Minimum percentage to display sector
export: str
Type of format to export data
"""
sector_weights = yfinance_model.get_information(fund)
if "sectorWeightings" not in sector_weights.keys():
console.print(
f"Sector Weights are not found f for {fund}. Either the symbol is incorrect or there is an issue "
"in pulling from yahoo.\n"
)
return
sector_weights = sector_weights["sectorWeightings"]
weights = {}
for weight in sector_weights:
weights.update(weight)
df_weight = pd.DataFrame.from_dict(weights, orient="index")
if df_weight.empty:
console.print("No sector data found.\n")
df_weight = df_weight.apply(lambda x: round(100 * x, 3))
df_weight.columns = ["Weight"]
df_weight.sort_values(by="Weight", inplace=True, ascending=False)
df_weight.index = [
"Real Estate" if x == "realestate" else x.replace("_", " ").title()
for x in df_weight.index
]
if gtff.USE_TABULATE_DF:
console.print(
rich_table_from_df(
df_weight,
show_index=True,
index_name="Sector",
headers=["Weight (%)"],
title=f"[bold]{fund.upper()} Sector Weightings[/bold] ",
)
)
else:
console.print(df_weight.to_string())
console.print("\n")
main_holdings = df_weight[df_weight.Weight > min_pct_to_display].to_dict()[
df_weight.columns[0]
]
if len(main_holdings) < len(df_weight):
main_holdings["Others"] = 100 - sum(main_holdings.values())
legend, values = zip(*main_holdings.items())
leg = [f"{le}\n{round(v, 2)}%" for le, v in zip(legend, values)]
fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
ax.pie(
values,
labels=leg,
wedgeprops={"linewidth": 0.5, "edgecolor": "white"},
labeldistance=1.05,
startangle=90,
)
ax.set_title(f"Sector holdings of {fund.upper()}")
fig.tight_layout()
if gtff.USE_ION:
plt.ion()
plt.show()
export_data(export, os.path.dirname(os.path.abspath(__file__)), "sector", df_weight)
def display_equity(fund: str):
"""Display equity holdings for fund
Parameters
----------
fund: str
Fund symbol
"""
title_map = {
"priceToCashflow": "Price To Cash Flow",
"priceToSales": "Price To Sales",
"priceToBookCat": "Price To Book Cat",
"priceToEarningsCat": "Price To Earnings Cat",
"medianMarketCapCat": "Median Market Cap Cat",
"threeYearEarningsGrowthCat": "3Yr Earnings Growth Cat",
"threeYearEarningsGrowth": "3Y Earnings Growth",
"medianMarketCap": "Median Market Cap",
"priceToEarnings": "Price To Earnings",
"priceToBook": "Price To Book",
"priceToSalesCat": "Price To Sales Cat",
"priceToCashflowCat": "Price To Cashflow Cat",
}
equity_hold = yfinance_model.get_information(fund)["equityHoldings"]
df_weight = pd.DataFrame.from_dict(equity_hold, orient="index")
df_weight = df_weight.apply(lambda x: round(100 * x, 3))
df_weight.index = df_weight.index.map(title_map)
if gtff.USE_TABULATE_DF:
console.print(
rich_table_from_df(
df_weight,
show_index=True,
index_name="Equity",
headers=["Holding"],
title=f"[bold]{fund.upper()} Equity Holdings[/bold] ",
)
)
else:
console.print(df_weight.to_string())
console.print("\n")
| 32.315789
| 110
| 0.625407
|
794a9dbe5db931440a3183f9362c48991ad07713
| 2,232
|
py
|
Python
|
src/w3s_pg_002.py
|
antalpeti/Python-Tutorial
|
157d631f4c5c888aa8d961a56fe0c6542a433321
|
[
"MIT"
] | null | null | null |
src/w3s_pg_002.py
|
antalpeti/Python-Tutorial
|
157d631f4c5c888aa8d961a56fe0c6542a433321
|
[
"MIT"
] | 2
|
2019-10-31T10:26:53.000Z
|
2019-10-31T10:27:16.000Z
|
src/w3s_pg_002.py
|
antalpeti/Python-Tutorial
|
157d631f4c5c888aa8d961a56fe0c6542a433321
|
[
"MIT"
] | null | null | null |
x = str("s1")
y = str(2)
z = str(3.0)
print(x)
print(y)
print(z)
x = float(1)
y = float(2.8)
z = float("3")
w = float("4.2")
print(x)
print(y)
print(z)
print(w)
x = int(1)
y = int(2.8)
z = int("3")
print(x)
print(y)
print(z)
import random
print(random.randrange(1,10))
x = 1 # int
y = 2.8 # float
z = 1j # complex
#convert from int to float:
a = float(x)
#convert from float to int:
b = int(y)
#convert from int to complex:
c = complex(x)
print(a)
print(b)
print(c)
print(type(a))
print(type(b))
print(type(c))
x = 3+5j
y = 5j
z = -5j
print(type(x))
print(type(y))
print(type(z))
x = 35e3
y = 12E4
z = -87.7e100
print(type(x))
print(type(y))
print(type(z))
x = 1.10
y = 1.0
z = -35.59
print(type(x))
print(type(y))
print(type(z))
x = 1
y = 35656222554887711
z = -3255522
print(type(x))
print(type(y))
print(type(z))
x = 1
y = 2.8
z = 1j
print(type(x))
print(type(y))
print(type(z))
x = memoryview(bytes(5))
#display x:
print(x)
#display the data type of x:
print(type(x))
x = bytearray(5)
#display x:
print(x)
#display the data type of x:
print(type(x))
x = bytes(5)
#display x:
print(x)
#display the data type of x:
print(type(x))
x = bool(5)
#display x:
print(x)
#display the data type of x:
print(type(x))
x = frozenset(("apple", "banana", "cherry"))
#display x:
print(x)
#display the data type of x:
print(type(x))
x = set(("apple", "banana", "cherry"))
#display x:
print(x)
#display the data type of x:
print(type(x))
x = dict(name="John", age=36)
#display x:
print(x)
#display the data type of x:
print(type(x))
x = range(6)
#display x:
print(x)
#display the data type of x:
print(type(x))
x = tuple(("apple", "banana", "cherry"))
#display x:
print(x)
#display the data type of x:
print(type(x))
x = list(("apple", "banana", "cherry"))
#display x:
print(x)
#display the data type of x:
print(type(x))
x = complex(1j)
#display x:
print(x)
#display the data type of x:
print(type(x))
x = float(20.5)
#display x:
print(x)
#display the data type of x:
print(type(x))
x = int(20)
#display x:
print(x)
#display the data type of x:
print(type(x))
x = str("Hello World")
#display x:
print(x)
#display the data type of x:
print(type(x))
| 10.054054
| 44
| 0.613351
|
794a9f081bfe4b2f76627dbcf6b4edfb2872a28e
| 31,274
|
bzl
|
Python
|
apple/internal/watchos_rules.bzl
|
specto-dev/rules_apple
|
9ee34a4793b5906eba5e5b948c4339c74e29e797
|
[
"Apache-2.0"
] | null | null | null |
apple/internal/watchos_rules.bzl
|
specto-dev/rules_apple
|
9ee34a4793b5906eba5e5b948c4339c74e29e797
|
[
"Apache-2.0"
] | null | null | null |
apple/internal/watchos_rules.bzl
|
specto-dev/rules_apple
|
9ee34a4793b5906eba5e5b948c4339c74e29e797
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of watchOS rules."""
load(
"@build_bazel_apple_support//lib:xcode_support.bzl",
"xcode_support",
)
load(
"@build_bazel_rules_apple//apple/internal:apple_product_type.bzl",
"apple_product_type",
)
load(
"@build_bazel_rules_apple//apple/internal:bundling_support.bzl",
"bundling_support",
)
load(
"@build_bazel_rules_apple//apple/internal:codesigning_support.bzl",
"codesigning_support",
)
load(
"@build_bazel_rules_apple//apple/internal:entitlements_support.bzl",
"entitlements_support",
)
load(
"@build_bazel_rules_apple//apple/internal:features_support.bzl",
"features_support",
)
load(
"@build_bazel_rules_apple//apple/internal:linking_support.bzl",
"linking_support",
)
load(
"@build_bazel_rules_apple//apple/internal:outputs.bzl",
"outputs",
)
load(
"@build_bazel_rules_apple//apple/internal:partials.bzl",
"partials",
)
load(
"@build_bazel_rules_apple//apple/internal:platform_support.bzl",
"platform_support",
)
load(
"@build_bazel_rules_apple//apple/internal:processor.bzl",
"processor",
)
load(
"@build_bazel_rules_apple//apple/internal:rule_factory.bzl",
"rule_factory",
)
load(
"@build_bazel_rules_apple//apple/internal:rule_support.bzl",
"rule_support",
)
load(
"@build_bazel_rules_apple//apple/internal:stub_support.bzl",
"stub_support",
)
load(
"@build_bazel_rules_apple//apple/internal/aspects:swift_static_framework_aspect.bzl",
"SwiftStaticFrameworkInfo",
)
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"AppleSupportToolchainInfo",
"IosFrameworkBundleInfo",
"WatchosApplicationBundleInfo",
"WatchosExtensionBundleInfo",
"WatchosStaticFrameworkBundleInfo",
)
load(
"@build_bazel_rules_apple//apple/internal/aspects:swift_dynamic_framework_aspect.bzl",
"SwiftDynamicFrameworkInfo",
)
load(
"@build_bazel_rules_swift//swift:swift.bzl",
"SwiftInfo",
)
def _watchos_dynamic_framework_impl(ctx):
"""Experimental implementation of watchos_dynamic_framework."""
# This rule should only have one swift_library dependency. This means len(ctx.attr.deps) should be 2
# because of the swift_runtime_linkopts dep that comes with the swift_libray
swiftdeps = [x for x in ctx.attr.deps if SwiftInfo in x]
if len(swiftdeps) != 1 or len(ctx.attr.deps) > 2:
fail(
"""\
error: Swift dynamic frameworks expect a single swift_library dependency.
""",
)
binary_target = [deps for deps in ctx.attr.deps if deps.label.name.endswith("swift_runtime_linkopts")][0]
extra_linkopts = []
if ctx.attr.extension_safe:
extra_linkopts.append("-fapplication-extension")
link_result = linking_support.register_linking_action(
ctx,
extra_linkopts = extra_linkopts,
stamp = ctx.attr.stamp,
)
binary_artifact = link_result.binary_provider.binary
debug_outputs_provider = link_result.debug_outputs_provider
actions = ctx.actions
apple_toolchain_info = ctx.attr._toolchain[AppleSupportToolchainInfo]
bin_root_path = ctx.bin_dir.path
bundle_id = ctx.attr.bundle_id
bundle_name, bundle_extension = bundling_support.bundle_full_name_from_rule_ctx(ctx)
entitlements = entitlements_support.entitlements(
entitlements_attr = getattr(ctx.attr, "entitlements", None),
entitlements_file = getattr(ctx.file, "entitlements", None),
)
executable_name = bundling_support.executable_name(ctx)
features = features_support.compute_enabled_features(
requested_features = ctx.features,
unsupported_features = ctx.disabled_features,
)
label = ctx.label
platform_prerequisites = platform_support.platform_prerequisites_from_rule_ctx(ctx)
predeclared_outputs = ctx.outputs
rule_descriptor = rule_support.rule_descriptor(ctx)
signed_frameworks = []
if getattr(ctx.file, "provisioning_profile", None):
signed_frameworks = [
bundle_name + rule_descriptor.bundle_extension,
]
archive_for_embedding = outputs.archive_for_embedding(
actions = actions,
bundle_name = bundle_name,
bundle_extension = bundle_extension,
executable_name = executable_name,
label_name = label.name,
rule_descriptor = rule_descriptor,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
)
processor_partials = [
partials.apple_bundle_info_partial(
actions = actions,
bundle_extension = bundle_extension,
bundle_id = bundle_id,
bundle_name = bundle_name,
executable_name = executable_name,
entitlements = entitlements,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
product_type = rule_descriptor.product_type,
),
partials.binary_partial(
actions = actions,
binary_artifact = binary_artifact,
executable_name = executable_name,
label_name = label.name,
),
partials.bitcode_symbols_partial(
actions = actions,
binary_artifact = binary_artifact,
debug_outputs_provider = debug_outputs_provider,
dependency_targets = ctx.attr.frameworks,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
),
partials.clang_rt_dylibs_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
binary_artifact = binary_artifact,
features = features,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
),
partials.debug_symbols_partial(
actions = actions,
bin_root_path = bin_root_path,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
debug_dependencies = ctx.attr.frameworks,
debug_outputs_provider = debug_outputs_provider,
dsym_info_plist_template = apple_toolchain_info.dsym_info_plist_template,
executable_name = executable_name,
platform_prerequisites = platform_prerequisites,
rule_label = label,
),
partials.embedded_bundles_partial(
frameworks = [archive_for_embedding],
embeddable_targets = ctx.attr.frameworks,
platform_prerequisites = platform_prerequisites,
signed_frameworks = depset(signed_frameworks),
),
partials.extension_safe_validation_partial(
is_extension_safe = ctx.attr.extension_safe,
rule_label = label,
targets_to_validate = ctx.attr.frameworks,
),
partials.framework_provider_partial(
actions = actions,
bin_root_path = bin_root_path,
binary_provider = link_result.binary_provider,
bundle_name = bundle_name,
bundle_only = False,
rule_label = label,
),
partials.resources_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
bundle_extension = bundle_extension,
bundle_id = bundle_id,
bundle_name = bundle_name,
environment_plist = ctx.file._environment_plist,
executable_name = executable_name,
launch_storyboard = None,
platform_prerequisites = platform_prerequisites,
plist_attrs = ["infoplists"],
rule_attrs = ctx.attr,
rule_descriptor = rule_descriptor,
rule_label = label,
targets_to_avoid = ctx.attr.frameworks,
top_level_attrs = ["resources"],
version_keys_required = False,
),
partials.swift_dylibs_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
binary_artifact = binary_artifact,
dependency_targets = ctx.attr.frameworks,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
),
partials.swift_dynamic_framework_partial(
actions = actions,
bundle_name = bundle_name,
label_name = label.name,
swift_dynamic_framework_info = binary_target[SwiftDynamicFrameworkInfo],
),
]
processor_result = processor.process(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
codesignopts = codesigning_support.codesignopts_from_rule_ctx(ctx),
entitlements = entitlements,
executable_name = executable_name,
ipa_post_processor = ctx.executable.ipa_post_processor,
partials = processor_partials,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
process_and_sign_template = apple_toolchain_info.process_and_sign_template,
provisioning_profile = getattr(ctx.file, "provisioning_profile", None),
rule_descriptor = rule_descriptor,
rule_label = label,
)
providers = processor_result.providers
additional_providers = []
for provider in providers:
if type(provider) == "AppleDynamicFramework":
# Make the ObjC provider using the framework_files depset found
# in the AppleDynamicFramework provider. This is to make the
# watchos_dynamic_framework usable as a dependency in swift_library
objc_provider = apple_common.new_objc_provider(
dynamic_framework_file = provider.framework_files
)
additional_providers.append(objc_provider)
providers.extend(additional_providers)
return [
DefaultInfo(files = processor_result.output_files),
IosFrameworkBundleInfo(),
OutputGroupInfo(
**outputs.merge_output_groups(
link_result.output_groups,
processor_result.output_groups,
)
),
] + providers
def _watchos_application_impl(ctx):
"""Implementation of watchos_application."""
top_level_attrs = [
"app_icons",
"storyboards",
"strings",
"resources",
]
actions = ctx.actions
apple_toolchain_info = ctx.attr._toolchain[AppleSupportToolchainInfo]
bin_root_path = ctx.bin_dir.path
bundle_id = ctx.attr.bundle_id
bundle_name, bundle_extension = bundling_support.bundle_full_name_from_rule_ctx(ctx)
executable_name = bundling_support.executable_name(ctx)
entitlements = entitlements_support.entitlements(
entitlements_attr = getattr(ctx.attr, "entitlements", None),
entitlements_file = getattr(ctx.file, "entitlements", None),
)
features = features_support.compute_enabled_features(
requested_features = ctx.features,
unsupported_features = ctx.disabled_features,
)
label = ctx.label
platform_prerequisites = platform_support.platform_prerequisites_from_rule_ctx(ctx)
predeclared_outputs = ctx.outputs
rule_descriptor = rule_support.rule_descriptor(ctx)
binary_artifact = stub_support.create_stub_binary(
actions = actions,
platform_prerequisites = platform_prerequisites,
rule_label = label,
xcode_stub_path = rule_descriptor.stub_binary_path,
)
bundle_verification_targets = [
struct(
target = ctx.attr.extension,
parent_bundle_id_reference = [
"NSExtension",
"NSExtensionAttributes",
"WKAppBundleIdentifier",
],
),
]
archive = outputs.archive(
actions = actions,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
)
processor_partials = [
partials.apple_bundle_info_partial(
actions = actions,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
executable_name = executable_name,
bundle_id = bundle_id,
entitlements = entitlements,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
product_type = rule_descriptor.product_type,
),
partials.binary_partial(
actions = actions,
binary_artifact = binary_artifact,
executable_name = executable_name,
label_name = label.name,
),
partials.bitcode_symbols_partial(
actions = actions,
dependency_targets = [ctx.attr.extension],
label_name = label.name,
platform_prerequisites = platform_prerequisites,
),
partials.clang_rt_dylibs_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
binary_artifact = binary_artifact,
features = features,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
),
partials.debug_symbols_partial(
actions = actions,
bin_root_path = bin_root_path,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
debug_dependencies = [ctx.attr.extension],
dsym_info_plist_template = apple_toolchain_info.dsym_info_plist_template,
executable_name = executable_name,
platform_prerequisites = platform_prerequisites,
rule_label = label,
),
partials.embedded_bundles_partial(
bundle_embedded_bundles = True,
embeddable_targets = [ctx.attr.extension],
platform_prerequisites = platform_prerequisites,
watch_bundles = [archive],
),
partials.resources_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
bundle_extension = bundle_extension,
bundle_id = bundle_id,
bundle_name = bundle_name,
executable_name = executable_name,
bundle_verification_targets = bundle_verification_targets,
environment_plist = ctx.file._environment_plist,
launch_storyboard = None,
platform_prerequisites = platform_prerequisites,
plist_attrs = ["infoplists"],
rule_attrs = ctx.attr,
rule_descriptor = rule_descriptor,
rule_label = label,
top_level_attrs = top_level_attrs,
),
partials.swift_dylibs_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
binary_artifact = binary_artifact,
bundle_dylibs = True,
dependency_targets = [ctx.attr.extension],
label_name = label.name,
platform_prerequisites = platform_prerequisites,
),
partials.watchos_stub_partial(
actions = actions,
binary_artifact = binary_artifact,
label_name = label.name,
),
]
if platform_prerequisites.platform.is_device:
processor_partials.append(
partials.provisioning_profile_partial(
actions = actions,
profile_artifact = ctx.file.provisioning_profile,
rule_label = label,
),
)
processor_result = processor.process(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
entitlements = entitlements,
executable_name = executable_name,
ipa_post_processor = ctx.executable.ipa_post_processor,
partials = processor_partials,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
process_and_sign_template = apple_toolchain_info.process_and_sign_template,
provisioning_profile = getattr(ctx.file, "provisioning_profile", None),
rule_descriptor = rule_descriptor,
rule_label = label,
)
return [
DefaultInfo(
files = processor_result.output_files,
),
OutputGroupInfo(**processor_result.output_groups),
WatchosApplicationBundleInfo(),
] + processor_result.providers
def _watchos_extension_impl(ctx):
"""Implementation of watchos_extension."""
top_level_attrs = [
"app_icons",
"strings",
"resources",
]
# Xcode 11 requires this flag to be passed to the linker, but it is not accepted by earlier
# versions.
# TODO(min(Xcode) >= 11): Make this unconditional when the minimum supported Xcode is Xcode 11.
xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig]
if xcode_support.is_xcode_at_least_version(xcode_config, "11"):
extra_linkopts = ["-e", "_WKExtensionMain"]
# This is required when building with watchOS SDK 6.0 or higher but with a minimum
# deployment version lower than 6.0. See
# https://developer.apple.com/documentation/xcode_release_notes/xcode_11_release_notes.
minimum_os = apple_common.dotted_version(ctx.attr.minimum_os_version)
if minimum_os < apple_common.dotted_version("6.0"):
extra_linkopts.append(
# The linker will search for this library relative to sysroot, which will already
# be the watchOS SDK directory.
#
# This is a force-load (unlike Xcode, which uses a standard `-l`) because we can't
# easily control where it appears in the link order relative to WatchKit.framework
# (where this symbol also lives, in watchOS 6+), so we need to guarantee that the
# linker doesn't skip the static library's implementation of `WKExtensionMain` if
# it already resolved the symbol from the framework.
"-Wl,-force_load,/usr/lib/libWKExtensionMainLegacy.a",
)
else:
extra_linkopts = []
link_result = linking_support.register_linking_action(
ctx,
extra_linkopts = extra_linkopts,
stamp = ctx.attr.stamp,
)
binary_artifact = link_result.binary_provider.binary
debug_outputs_provider = link_result.debug_outputs_provider
actions = ctx.actions
apple_toolchain_info = ctx.attr._toolchain[AppleSupportToolchainInfo]
bin_root_path = ctx.bin_dir.path
bundle_id = ctx.attr.bundle_id
bundle_name, bundle_extension = bundling_support.bundle_full_name_from_rule_ctx(ctx)
executable_name = bundling_support.executable_name(ctx)
entitlements = entitlements_support.entitlements(
entitlements_attr = getattr(ctx.attr, "entitlements", None),
entitlements_file = getattr(ctx.file, "entitlements", None),
)
features = features_support.compute_enabled_features(
requested_features = ctx.features,
unsupported_features = ctx.disabled_features,
)
label = ctx.label
platform_prerequisites = platform_support.platform_prerequisites_from_rule_ctx(ctx)
predeclared_outputs = ctx.outputs
rule_descriptor = rule_support.rule_descriptor(ctx)
archive = outputs.archive(
actions = actions,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
)
bundle_verification_targets = [struct(target = ext) for ext in ctx.attr.extensions]
processor_partials = [
partials.apple_bundle_info_partial(
actions = actions,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
executable_name = executable_name,
bundle_id = bundle_id,
entitlements = entitlements,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
product_type = rule_descriptor.product_type,
),
partials.binary_partial(
actions = actions,
binary_artifact = binary_artifact,
executable_name = executable_name,
label_name = ctx.label.name,
),
partials.bitcode_symbols_partial(
actions = actions,
binary_artifact = binary_artifact,
debug_outputs_provider = debug_outputs_provider,
dependency_targets = ctx.attr.extensions,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
),
partials.clang_rt_dylibs_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
binary_artifact = binary_artifact,
features = features,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
),
partials.debug_symbols_partial(
actions = actions,
bin_root_path = bin_root_path,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
debug_dependencies = ctx.attr.extensions,
debug_outputs_provider = debug_outputs_provider,
dsym_info_plist_template = apple_toolchain_info.dsym_info_plist_template,
executable_name = executable_name,
platform_prerequisites = platform_prerequisites,
rule_label = label,
),
partials.embedded_bundles_partial(
bundle_embedded_bundles = True,
platform_prerequisites = platform_prerequisites,
embeddable_targets = ctx.attr.extensions,
plugins = [archive],
),
# Following guidance of the watchOS 2 migration guide's recommendations for placement of a
# framework, scoping dynamic frameworks only to the watch extension bundles:
# https://developer.apple.com/library/archive/documentation/General/Conceptual/AppleWatch2TransitionGuide/ConfiguretheXcodeProject.html
partials.framework_import_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
provisioning_profile = getattr(ctx.file, "provisioning_profile", None),
rule_descriptor = rule_descriptor,
targets = ctx.attr.deps,
),
partials.resources_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
bundle_extension = bundle_extension,
bundle_verification_targets = bundle_verification_targets,
bundle_id = bundle_id,
bundle_name = bundle_name,
environment_plist = ctx.file._environment_plist,
executable_name = executable_name,
launch_storyboard = None,
platform_prerequisites = platform_prerequisites,
plist_attrs = ["infoplists"],
rule_attrs = ctx.attr,
rule_descriptor = rule_descriptor,
rule_label = label,
top_level_attrs = top_level_attrs,
),
partials.swift_dylibs_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
binary_artifact = binary_artifact,
label_name = label.name,
dependency_targets = ctx.attr.extensions,
platform_prerequisites = platform_prerequisites,
),
]
if platform_prerequisites.platform.is_device:
processor_partials.append(
partials.provisioning_profile_partial(
actions = actions,
profile_artifact = ctx.file.provisioning_profile,
rule_label = label,
),
)
processor_result = processor.process(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
codesignopts = codesigning_support.codesignopts_from_rule_ctx(ctx),
entitlements = entitlements,
executable_name = executable_name,
ipa_post_processor = ctx.executable.ipa_post_processor,
partials = processor_partials,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
process_and_sign_template = apple_toolchain_info.process_and_sign_template,
provisioning_profile = getattr(ctx.file, "provisioning_profile", None),
rule_descriptor = rule_descriptor,
rule_label = label,
)
return [
DefaultInfo(
files = processor_result.output_files,
),
OutputGroupInfo(
**outputs.merge_output_groups(
link_result.output_groups,
processor_result.output_groups,
)
),
WatchosExtensionBundleInfo(),
] + processor_result.providers
def _watchos_static_framework_impl(ctx):
"""Implementation of watchos_static_framework."""
binary_target = ctx.attr.deps[0]
binary_artifact = binary_target[apple_common.AppleStaticLibrary].archive
actions = ctx.actions
apple_toolchain_info = ctx.attr._toolchain[AppleSupportToolchainInfo]
bundle_name, bundle_extension = bundling_support.bundle_full_name_from_rule_ctx(ctx)
executable_name = bundling_support.executable_name(ctx)
entitlements = entitlements_support.entitlements(
entitlements_attr = getattr(ctx.attr, "entitlements", None),
entitlements_file = getattr(ctx.file, "entitlements", None),
)
label = ctx.label
platform_prerequisites = platform_support.platform_prerequisites_from_rule_ctx(ctx)
predeclared_outputs = ctx.outputs
rule_descriptor = rule_support.rule_descriptor(ctx)
processor_partials = [
partials.apple_bundle_info_partial(
actions = actions,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
executable_name = executable_name,
entitlements = entitlements,
label_name = label.name,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
product_type = rule_descriptor.product_type,
),
partials.binary_partial(
actions = actions,
binary_artifact = binary_artifact,
executable_name = executable_name,
label_name = label.name,
),
]
# If there's any Swift dependencies on the static framework rule, treat it as a Swift static
# framework.
if SwiftStaticFrameworkInfo in binary_target:
processor_partials.append(
partials.swift_static_framework_partial(
actions = actions,
bundle_name = bundle_name,
label_name = label.name,
swift_static_framework_info = binary_target[SwiftStaticFrameworkInfo],
),
)
else:
processor_partials.append(
partials.static_framework_header_modulemap_partial(
actions = actions,
binary_objc_provider = binary_target[apple_common.Objc],
bundle_name = bundle_name,
hdrs = ctx.files.hdrs,
label_name = label.name,
umbrella_header = ctx.file.umbrella_header,
),
)
if not ctx.attr.exclude_resources:
rule_descriptor = rule_support.rule_descriptor(ctx)
processor_partials.append(partials.resources_partial(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
environment_plist = ctx.file._environment_plist,
executable_name = executable_name,
launch_storyboard = None,
platform_prerequisites = platform_prerequisites,
rule_attrs = ctx.attr,
rule_descriptor = rule_descriptor,
rule_label = label,
))
processor_result = processor.process(
actions = actions,
apple_toolchain_info = apple_toolchain_info,
bundle_extension = bundle_extension,
bundle_name = bundle_name,
codesignopts = codesigning_support.codesignopts_from_rule_ctx(ctx),
entitlements = entitlements,
executable_name = executable_name,
ipa_post_processor = ctx.executable.ipa_post_processor,
partials = processor_partials,
platform_prerequisites = platform_prerequisites,
predeclared_outputs = predeclared_outputs,
process_and_sign_template = apple_toolchain_info.process_and_sign_template,
provisioning_profile = getattr(ctx.file, "provisioning_profile", None),
rule_descriptor = rule_descriptor,
rule_label = label,
)
return [
DefaultInfo(files = processor_result.output_files),
WatchosStaticFrameworkBundleInfo(),
] + processor_result.providers
watchos_application = rule_factory.create_apple_bundling_rule(
implementation = _watchos_application_impl,
platform_type = "watchos",
product_type = apple_product_type.watch2_application,
doc = "Builds and bundles an watchOS Application.",
)
watchos_extension = rule_factory.create_apple_bundling_rule(
implementation = _watchos_extension_impl,
platform_type = "watchos",
product_type = apple_product_type.watch2_extension,
doc = "Builds and bundles an watchOS Extension.",
)
watchos_dynamic_framework = rule_factory.create_apple_bundling_rule(
implementation = _watchos_dynamic_framework_impl,
platform_type = "watchos",
product_type = apple_product_type.framework,
doc = "Builds and bundles a watchOS dynamic framework that is consumable by Xcode.",
)
watchos_static_framework = rule_factory.create_apple_bundling_rule(
implementation = _watchos_static_framework_impl,
platform_type = "watchos",
product_type = apple_product_type.static_framework,
doc = "Builds and bundles a watchOS Static Framework.",
)
| 39.043695
| 143
| 0.668447
|
794a9f286c0c7410dd73ddbff3edfe9d2fbb3d55
| 2,957
|
py
|
Python
|
tests/fields/test_input.py
|
hiporox/strawberry-graphql-django
|
eb2924a35fc503a5fb1fb73c4188dc6bdc90c58e
|
[
"MIT"
] | 96
|
2021-03-11T05:50:55.000Z
|
2022-03-30T09:14:09.000Z
|
tests/fields/test_input.py
|
hiporox/strawberry-graphql-django
|
eb2924a35fc503a5fb1fb73c4188dc6bdc90c58e
|
[
"MIT"
] | 92
|
2021-03-11T13:28:31.000Z
|
2022-03-31T15:20:19.000Z
|
tests/fields/test_input.py
|
hiporox/strawberry-graphql-django
|
eb2924a35fc503a5fb1fb73c4188dc6bdc90c58e
|
[
"MIT"
] | 22
|
2021-03-17T18:41:48.000Z
|
2022-03-18T12:11:51.000Z
|
import strawberry
from django.db import models
from strawberry.type import StrawberryOptional
import strawberry_django
from strawberry_django import auto
class InputFieldsModel(models.Model):
mandatory = models.IntegerField()
default = models.IntegerField(default=1)
blank = models.IntegerField(blank=True)
null = models.IntegerField(null=True)
null_boolean = models.NullBooleanField()
def test_input_type():
@strawberry_django.input(InputFieldsModel)
class InputType:
id: auto
mandatory: auto
default: auto
blank: auto
null: auto
null_boolean: auto
assert [(f.name, f.type) for f in InputType._type_definition.fields] == [
("id", StrawberryOptional(strawberry.ID)),
("mandatory", int),
("default", StrawberryOptional(int)),
("blank", StrawberryOptional(int)),
("null", StrawberryOptional(int)),
("null_boolean", StrawberryOptional(bool)),
]
def test_input_type_for_partial_update():
@strawberry_django.input(InputFieldsModel, partial=True)
class InputType:
id: auto
mandatory: auto
default: auto
blank: auto
null: auto
null_boolean: auto
assert [(f.name, f.type) for f in InputType._type_definition.fields] == [
("id", StrawberryOptional(strawberry.ID)),
("mandatory", StrawberryOptional(int)),
("default", StrawberryOptional(int)),
("blank", StrawberryOptional(int)),
("null", StrawberryOptional(int)),
("null_boolean", StrawberryOptional(bool)),
]
def test_input_type_basic():
from .. import models
@strawberry_django.input(models.User)
class UserInput:
name: auto
assert [(f.name, f.type) for f in UserInput._type_definition.fields] == [
("name", str),
]
def test_partial_input_type():
from .. import models
@strawberry_django.input(models.User, partial=True)
class UserPartialInput:
name: auto
assert [(f.name, f.type) for f in UserPartialInput._type_definition.fields] == [
("name", StrawberryOptional(str)),
]
def test_partial_input_type_inheritance():
from .. import models
@strawberry_django.input(models.User)
class UserInput:
name: auto
@strawberry_django.input(models.User, partial=True)
class UserPartialInput(UserInput):
pass
assert [(f.name, f.type) for f in UserPartialInput._type_definition.fields] == [
("name", StrawberryOptional(str)),
]
def test_input_type_inheritance_from_type():
from .. import models
@strawberry_django.type(models.User)
class User:
id: auto
name: auto
@strawberry_django.input(models.User)
class UserInput(User):
pass
assert [(f.name, f.type) for f in UserInput._type_definition.fields] == [
("id", StrawberryOptional(strawberry.ID)),
("name", str),
]
| 26.168142
| 84
| 0.655056
|
794aa15c74cd776523eb7c0d7f541a52521fa9d9
| 656
|
py
|
Python
|
scripts/mongio.py
|
msirprism/cryptoview
|
9ad8196100c0031c5d7e2c3de58cc1aadfa2bb7d
|
[
"MIT"
] | 224
|
2017-12-25T06:53:52.000Z
|
2022-02-09T03:35:34.000Z
|
scripts/mongio.py
|
msirprism/cryptoview
|
9ad8196100c0031c5d7e2c3de58cc1aadfa2bb7d
|
[
"MIT"
] | 9
|
2017-12-29T19:29:33.000Z
|
2018-05-10T22:38:06.000Z
|
scripts/mongio.py
|
msirprism/cryptoview
|
9ad8196100c0031c5d7e2c3de58cc1aadfa2bb7d
|
[
"MIT"
] | 72
|
2017-12-25T09:46:12.000Z
|
2021-12-30T06:08:59.000Z
|
import pymongo
import json
import sys
# Local Files
sys.path.append("..")
from scripts import settings
client = pymongo.MongoClient(settings.mongo_server, settings.mongo_id)
db = client[settings.mongo_client]
db.authenticate(settings.mongo_user, settings.mongo_pass)
def save(account, datatype, data):
d = db.positions.find_one({'account': account})
if d is not None:
d[datatype] = json.dumps(data)
db.positions.save(d)
else:
db.positions.insert_one({'account': account, datatype: json.dumps(data)})
def load(account, datatype):
d = db.positions.find_one({'account': account})
return json.loads(d[datatype])
| 27.333333
| 81
| 0.713415
|
794aa17038cd6b459b611130a61641236d63f13d
| 6,365
|
py
|
Python
|
python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py
|
RangeKing/Paddle
|
2d87300809ae75d76f5b0b457d8112cb88dc3e27
|
[
"Apache-2.0"
] | 8
|
2016-08-15T07:02:27.000Z
|
2016-08-24T09:34:00.000Z
|
python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py
|
RangeKing/Paddle
|
2d87300809ae75d76f5b0b457d8112cb88dc3e27
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py
|
RangeKing/Paddle
|
2d87300809ae75d76f5b0b457d8112cb88dc3e27
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import tempfile
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from paddle.fluid.dygraph.jit import declarative
from paddle.fluid.dygraph.dygraph_to_static.partial_program import partial_program_from
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX, INFER_PARAMS_INFO_SUFFIX
SEED = 2020
np.random.seed(SEED)
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace(
)
program_translator = ProgramTranslator()
class SimpleFcLayer(fluid.dygraph.Layer):
def __init__(self, fc_size):
super(SimpleFcLayer, self).__init__()
self._linear = fluid.dygraph.Linear(fc_size, fc_size)
@declarative
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
out = fluid.layers.mean(z)
return out, y
class TestDyToStaticSaveInferenceModel(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_save_inference_model(self):
fc_size = 20
x_data = np.random.random((fc_size, fc_size)).astype('float32')
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
x = fluid.dygraph.to_variable(x_data)
layer = SimpleFcLayer(fc_size)
adam = fluid.optimizer.SGD(learning_rate=0.1,
parameter_list=layer.parameters())
for i in range(5):
loss, pred = layer(x)
loss.backward()
adam.minimize(loss)
layer.clear_gradients()
# test for saving model in dygraph.guard
infer_model_prefix = os.path.join(
self.temp_dir.name, "test_dy2stat_inference_in_guard/model")
infer_model_dir = os.path.join(self.temp_dir.name,
"test_dy2stat_inference_in_guard")
fluid.dygraph.jit.save(
layer=layer,
path=infer_model_prefix,
input_spec=[x],
output_spec=[pred])
# Check the correctness of the inference
dygraph_out, _ = layer(x)
self.check_save_inference_model(layer, [x_data], dygraph_out.numpy())
self.check_save_inference_model(
layer, [x_data], dygraph_out.numpy(), fetch=[loss])
self.check_save_inference_model(
layer, [x_data], dygraph_out.numpy(), feed=[x])
def check_save_inference_model(self,
model,
inputs,
gt_out,
feed=None,
fetch=None):
expected_persistable_vars = set([p.name for p in model.parameters()])
infer_model_prefix = os.path.join(self.temp_dir.name,
"test_dy2stat_inference/model")
infer_model_dir = os.path.join(self.temp_dir.name,
"test_dy2stat_inference")
model_filename = "model" + INFER_MODEL_SUFFIX
params_filename = "model" + INFER_PARAMS_SUFFIX
fluid.dygraph.jit.save(
layer=model,
path=infer_model_prefix,
input_spec=feed if feed else None,
output_spec=fetch if fetch else None)
# Check the correctness of the inference
infer_out = self.load_and_run_inference(infer_model_dir, model_filename,
params_filename, inputs)
self.assertTrue(np.allclose(gt_out, infer_out))
def load_and_run_inference(self, model_path, model_filename,
params_filename, inputs):
paddle.enable_static()
exe = fluid.Executor(place)
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(
dirname=model_path,
executor=exe,
model_filename=model_filename,
params_filename=params_filename)
results = exe.run(inference_program,
feed=dict(zip(feed_target_names, inputs)),
fetch_list=fetch_targets)
return np.array(results[0])
class TestPartialProgramRaiseError(unittest.TestCase):
def test_param_type(self):
program_translator = ProgramTranslator()
program_translator.enable(True)
x_data = np.random.random((20, 20)).astype('float32')
with fluid.dygraph.guard(fluid.CPUPlace()):
net = SimpleFcLayer(20)
x = fluid.dygraph.to_variable(x_data)
out = net(x)
program_cache = net.forward.program_cache
_, (concrete_program, _) = program_cache.last()
params = concrete_program.parameters
concrete_program.parameters = params[0]
# TypeError: Type of self._params should be list or tuple,
# but received <class 'paddle.fluid.framework.ParamBase'>.
with self.assertRaises(TypeError):
partial_program_from(concrete_program)
params[0] = "linear.w.0"
concrete_program.parameters = params
# TypeError: Type of self._params[0] should be framework.ParamBase,
# but received <type 'str'>.
with self.assertRaises(TypeError):
partial_program_from(concrete_program)
if __name__ == '__main__':
unittest.main()
| 38.113772
| 101
| 0.623095
|
794aa24767cb47ca8dae04f759e9042668004db0
| 18,608
|
py
|
Python
|
all code (not organized)/Database Load one hot semantic of sorts.py
|
TaylorChris2/Virtuoso
|
87a3d59141172d5daff0ae4725b843351b52fe63
|
[
"Apache-2.0"
] | null | null | null |
all code (not organized)/Database Load one hot semantic of sorts.py
|
TaylorChris2/Virtuoso
|
87a3d59141172d5daff0ae4725b843351b52fe63
|
[
"Apache-2.0"
] | null | null | null |
all code (not organized)/Database Load one hot semantic of sorts.py
|
TaylorChris2/Virtuoso
|
87a3d59141172d5daff0ae4725b843351b52fe63
|
[
"Apache-2.0"
] | null | null | null |
import sounddevice as sd
from scipy.signal import istft
from scipy.signal import stft
import librosa
import librosa.display
import midi
import skimage.transform
import numpy as np
import os
import h5py
import time
import matplotlib.pyplot as plt
start_time = time.time()
def load_midi_violin(path):
note_events = []
mid = midi.read_midifile(path)
##getting only the note data
for n,track in enumerate(mid):
note_events.append([])
for event in track:
if "NoteOnEvent" in str(event):
note_events[n].append(event)
elif "NoteOffEvent" in str(event):
event.data[1] = 0
note_events[n].append(event)
##deleting empty tracks
only_notes = []
for n,track in enumerate(note_events):
if len(track)>0:
only_notes.append(track)
##getting track length
track_lengths = []
for n,track in enumerate(only_notes):
track_lengths.append(0)
for event in track:
track_lengths[n] += event.tick
track_length = max(track_lengths)
##creating the actual track array and filling with empties
track_array = []
for i in range(0,track_length):
track_array.append([[0.,0.,0.,0.],[1.,1.,1.,1.],[0.,0.,0.,0.]])##one four channel list for pitch and one for articulation
track_array = np.stack(track_array)
##filling in the track array with real note data
for track in only_notes:
current_tick = 0
for n,event in enumerate(track):
current_tick += event.tick
if event.data[1] == 100:##every note start
for i in range(current_tick,current_tick+track[n+1].tick):
for slot in range(0,4):
if track_array[i][0][slot] == 0:
track_array[i][0][slot] = event.data[0]
working_slot = slot
break
for slot in range(0,4):
if track_array[current_tick][2][slot] == 0:
track_array[current_tick][2][slot] = track[n+1].tick
break
for i in range(0,int(track[n+1].tick/4)):
track_array[current_tick+i][1][working_slot] = i/int(track[n+1].tick/4)
track_array[current_tick+track[n+1].tick-i-1][1][working_slot] = i/int(track[n+1].tick/4)
return track_array
def seperate_sets(midis, mels, set_size):
midi_sets = []
mel_sets = []
loop = 0
current_set = -1
num_sets = len(midis)
while True:
if loop % set_size == 0:
midi_sets.append([])
mel_sets.append([])
current_set += 1
midi_sets[current_set].append(midis[loop])
mel_sets[current_set].append(mels[loop])
loop += 1
if loop >= num_sets:
break
return midi_sets, mel_sets
def save_data_set(set_, save_path, save_name):
if os.path.exists(os.path.join(save_path, save_name)+".h5"):
os.remove(os.path.join(save_path, save_name)+".h5")
hdf5_store = h5py.File(os.path.join(save_path, save_name)+".h5", "a")
hdf5_store.create_dataset("all_data", data = set_, compression="gzip")
def split_train_val_test(set_):
total = len(set_)
train_end_val_beginning = round(0.7 * total)
val_end_test_beginning = round(0.85 * total)
train_images = set_[:train_end_val_beginning]
val_images = set_[train_end_val_beginning:val_end_test_beginning]
test_images = set_[val_end_test_beginning:]
return train_images, val_images, test_images
def make_wave(freq, duration, sample_rate = 22050):
wave = [i/((sample_rate/(2*np.pi))/freq) for i in range(0, int(duration))]
wave = np.stack(wave)
wave = np.cos(wave)
'''
sd.play(wave,sample_rate)
cont = input("...")
'''
return wave
def load_array(path):
h5f = h5py.File(path,'r')
array = h5f['all_data'][:]
h5f.close()
return array
def save_array(array, path):
while True:
try:
if os.path.exists(path):
os.remove(path)
hdf5_store = h5py.File(path, "a")
hdf5_store.create_dataset("all_data", data = array, compression="gzip")
break
except:
pass
def midi_2_specgram(midi, length_factor):
Fs = 22050
N = 2048
w = np.hamming(N)
ov = N - Fs // 1000
midi_wave = np.zeros((4,int(midi.shape[0]*length_factor)))
articulation_factors = midi[:,1]
articulation_factors = skimage.transform.rescale(articulation_factors, (length_factor, 1))
print("articulation factors shape:",articulation_factors.shape)
print("specgram shape:",midi_wave.shape)
last_print = 0
for channel in range(0,4):
print("channel:",channel)
for i,note in enumerate(midi):
if note[0,channel]>0: ## pitch
try:
if note[2,channel] > 0: ## every note start
if i-last_print > 10000:
print(i,note[2,channel])
last_print = i
freq = 440*(2**((note[0,channel]-69)/12))
wave = make_wave(freq, note[2,channel]*length_factor, 22050)
for j,value in enumerate(wave):
midi_wave[channel,int(i*length_factor)+j]=wave[j]*articulation_factors[int(i*length_factor)+j,channel]
except Exception as e:
print(e)
print(last_start, i)
cont = input("...")
_,_,first_channel = stft(midi_wave[0],nfft=N,fs=Fs,window=w,nperseg=None,noverlap=ov)
specgram = np.zeros((first_channel.shape[0],first_channel.shape[1]))
added_wave = midi_wave[0].copy()
added_wave[:] = 0
for channel in midi_wave:
added_wave += channel
added_wave = added_wave*0.1/np.max(added_wave)
_,_,specgram = stft(added_wave,nfft=N,fs=Fs,window=w,nperseg=None,noverlap=ov)
specgram = np.real(specgram)
specgram[specgram < 0] = 0
specgram = librosa.amplitude_to_db(specgram, top_db=None)
return specgram
set_size = 48
path = "C:/Users/JiangQin/Documents/python/Music Composition Project/Music data/violin/cut data 2"
save_folder_path = "C:/Users/JiangQin/Documents/python/Music Composition Project/Music data/violin/Midis and Mels for Machine Learning"
frequency_clip_midi = 512 ##amount of frequencies to be included
frequency_clip_wav = 512 ##amount of frequencies to be included
time_split = 8192 ##milliseconds
midis = []
wavs = []
sets = 0
sets_ = []
start_index = 0
for set_ in os.listdir(path):
sets_.append(set_)
print(sets_)
for set_num in range(start_index, len(sets_)):
print("\n"+sets_[set_num]+"\n")
found_wav = False
found_mid = False
for file in os.listdir(os.path.join(path,sets_[set_num])):
if file.endswith(".wav") and not found_wav:
y,sr = librosa.load(os.path.join(os.path.join(path,sets_[set_num]), file))
y = y*0.1/np.max(y)
wav_length = y.shape[0]
Fs = 22050
N = 2048
w = np.hamming(N)
ov = N - Fs // 1000
f,t,specgram = stft(y,nfft=N,fs=Fs,window=w,nperseg=None,noverlap=ov)
specgram = np.real(specgram)
specgram[specgram < 0] = 0
specgram = librosa.amplitude_to_db(specgram, top_db=None)
wav_specgram = []
for i in range(0,frequency_clip_wav):
wav_specgram.append(specgram[i])
wav_specgram = np.stack(wav_specgram)
wav_specgram += 100
wav_specgram = wav_specgram/100
print(wav_specgram.shape)
'''
##test for converting back and playing
converted_back = wav_specgram*10.1
converted_back-=10
converted_back = 10**converted_back
converted_back = converted_back*0.1/np.max(converted_back)
decoded = []
for freq in converted_back:
decoded.append(freq)
decoded.append(np.zeros(converted_back.shape[1]))
decoded = np.stack(decoded)
t,back = istft(decoded,nfft=N,fs=Fs,window=w,nperseg=None,noverlap=ov)
cont = "a"
while cont == "a":
sd.play(back,22050)
cont = input("...")
sd.play(y,22050)
cont = input("...")
'''
#wav_specgram = 10**wav_specgram
print(np.max(wav_specgram))
print(np.min(wav_specgram))
'''
extent = [0,8192,0,1024]
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
im = ax.imshow(wav_specgram, extent=extent, origin='lower')
plt.show()
'''
found_wav = True
print("Loaded wave file.")
elif file.endswith("mid") and not found_mid:
midi_array = load_midi_violin(os.path.join(os.path.join(path,sets_[set_num]), file))
print("1st channel max:",np.max(midi_array[:,0,0]))
print("2nd channel max:",np.max(midi_array[:,0,1]))
print("3rd channel max:",np.max(midi_array[:,0,2]))
print("4th channel max:",np.max(midi_array[:,0,3]))
print(midi_array.shape)
found_mid = True
print("Loaded midi file.")
if not found_wav or not found_mid:
print("Data incomplete. Failed to load: " + os.path.join(path,sets_[set_num]))
else:
sets+=1
rescale_factor = (wav_specgram.shape[1]/midi_array.shape[0])
length_factor = (wav_length/midi_array.shape[0])
specgram = midi_2_specgram(midi_array, length_factor)
'''
t,back = istft(specgram,nfft=N,fs=Fs,window=w,nperseg=None,noverlap=ov)
sd.play(back,22050)
cont = input("...")
'''
midi_specgram = []
for i in range(0,frequency_clip_midi):
midi_specgram.append(specgram[i])
midi_specgram = np.stack(midi_specgram)
midi_specgram += 100
midi_specgram = midi_specgram/100
if np.min(midi_specgram) < 0 or np.min(wav_specgram) < 0:
print("\n\nNOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO\n\n")
print("midi shape:",midi_specgram.shape)
print(np.max(midi_specgram))
print(np.min(midi_specgram))
timef_midi = np.transpose(midi_specgram)
timef_wav = np.transpose(wav_specgram)
#timef_wav = timef_wav-timef_midi
#timef_wav += 1
#timef_wav = timef_wav/2
timef_wav = np.round(timef_wav,1)
print("\n\n",np.unique(timef_wav),"\n\n")
timef_wav_onehot = np.stack([timef_wav,timef_wav,timef_wav,timef_wav,timef_wav,timef_wav,timef_wav,timef_wav],axis=2)
timef_wav_onehot[:] = 0
timef_wav_onehot[:,:,0][timef_wav == 0] = 1
timef_wav_onehot[:,:,1][timef_wav == 0.1] = 1
timef_wav_onehot[:,:,2][timef_wav == 0.2] = 1
timef_wav_onehot[:,:,3][timef_wav == 0.3] = 1
timef_wav_onehot[:,:,4][timef_wav == 0.4] = 1
timef_wav_onehot[:,:,5][timef_wav == 0.5] = 1
timef_wav_onehot[:,:,6][timef_wav == 0.6] = 1
timef_wav_onehot[:,:,7][timef_wav == 0.7] = 1
'''
for i,moment in enumerate(timef_wav):
for j,freq in enumerate(moment):
if freq == 0:
timef_wav_onehot[i][j][0] = 1
elif freq == 0.1:
timef_wav_onehot[i][j][1] = 1
elif freq == 0.2:
timef_wav_onehot[i][j][2] = 1
elif freq == 0.3:
timef_wav_onehot[i][j][3] = 1
elif freq == 0.4:
timef_wav_onehot[i][j][4] = 1
elif freq == 0.5:
timef_wav_onehot[i][j][5] = 1
elif freq == 0.6:
timef_wav_onehot[i][j][6] = 1
elif freq == 0.7:
timef_wav_onehot[i][j][7] = 1
elif freq > 0.7:
timef_wav_onehot[i][j][7] = 1
'''
#timef_wav[timef_midi == 0] = 0
print("specgram shapes:", timef_midi.shape,timef_wav.shape)
print(np.max(timef_wav))
print(np.min(timef_wav))
print("Converted to spectrogram.")
delete_last = False
print("Split wav spectrograms.")
index = 0
segments = []
while True:
start = index*time_split
end = (index+1)*time_split
if np.array(timef_midi[start:end]).shape[0] == 0:
break
segments.append(np.array(timef_midi[start:end]))
index += 1
##padding the ending
if segments[-1].shape[0] > 3000:
padding_amt = time_split-segments[-1].shape[0]
padding = np.zeros((padding_amt, segments[-1].shape[1]))
new_last = []
for time_ in segments[-1]:
new_last.append(time_)
for pad in padding:
#print("pad",pad)
new_last.append(pad)
segments[-1] = np.stack(new_last)
else:
print(segments[-1].shape)
del segments[-1]
delete_last = True
for segment in segments:
midis.append(segment)
index = 0
segments = []
while True:
start = index*time_split
end = (index+1)*time_split
if np.array(timef_wav_onehot[start:end]).shape[0] == 0:
break
segments.append(np.array(timef_wav_onehot[start:end]))
index += 1
if not delete_last:
padding_amt = time_split-segments[-1].shape[0]
padding = np.zeros((padding_amt, segments[-1].shape[1], 8))
padding[:,:,0] = 1
new_last = []
for time_ in segments[-1]:
new_last.append(time_)
for pad in padding:
new_last.append(pad)
segments[-1] = np.stack(new_last)
else:
print("DELETING LAST, LESS THAN 3 SECONDS LONG")
del segments[-1]
delete_last = True
for segment in segments:
wavs.append(segment)
print("Split midi spectrograms.")
print("Loaded in" ,len(segments), "sets in", int((time.time() - start_time)/60), "minutes and",
int(((time.time() - start_time) % 60)+1), "seconds.")
'''
for n, wav in enumerate(wavs):
print(wav.shape)
print(wav.dtype)
fig = plt.figure()
#fig.subplots_adjust(hspace=0.4, wspace=0.4)
ax = fig.add_subplot(1, 2, 1)
ax.imshow(wav)
ax = fig.add_subplot(1, 2, 2)
ax.imshow(midis[n])
plt.show()
'''
##playing the wavs for testing, not needed for data loading
decoded = []
converted_back_midi = np.transpose(timef_midi)
decoded = []
for freq in converted_back_midi:
decoded.append(freq)
for i in range(0,(1025-frequency_clip_midi)):
decoded.append(np.zeros(converted_back_midi.shape[1]))
decoded = np.stack(decoded)
decoded = (decoded*100)-100
decoded = librosa.db_to_amplitude(decoded)
print(decoded.shape)
t,back = istft(decoded,nfft=N,fs=Fs,window=w,nperseg=None,noverlap=ov)
back = back*0.1/np.max(back)
print(back[-1])
sd.play(back,22050)
time.sleep(5)
#cont = input("...;")
'''
converted_back_wav = np.transpose(timef_wav)*2
converted_back_wav = (converted_back_wav-1) + converted_back_midi
'''
converted_back_wav = np.transpose(np.argmax(wavs[-1],axis=2)/10)
converted_back_wav = converted_back_wav > 0.2
print("converted shape:",converted_back_wav.shape)
decoded = []
for freq in converted_back_wav:
decoded.append(freq)
for i in range(0,(1025-frequency_clip_wav)):
decoded.append(np.zeros(converted_back_wav.shape[1]))
decoded = np.stack(decoded)
decoded = (decoded*100)-100
decoded = librosa.db_to_amplitude(decoded)
t,back = istft(decoded,nfft=N,fs=Fs,window=w,nperseg=None,noverlap=ov)
back = back*0.1/np.max(back)
print(back[-1])
sd.play(back,22050)
print("Loaded in" ,len(midis),len(wavs), "sets from", sets, "folders in", int((time.time() - start_time)/60), "minutes and",
int(((time.time() - start_time) % 60)+1), "seconds.")
midi_sets, wav_sets = seperate_sets(midis, wavs, set_size)
start_time = time.time()
print("\nSaving loaded data in: " + save_folder_path + "...")
if not os.path.exists(save_folder_path):
os.makedirs(save_folder_path)
for n, set_ in enumerate(midi_sets):
train_midis, val_midis, test_midis = split_train_val_test(set_)
save_data_set(train_midis, save_folder_path, "Train Midis "+str(n))
save_data_set(val_midis, save_folder_path, "Val Midis "+str(n))
save_data_set(test_midis, save_folder_path, "Test Midis "+str(n))
print("Finished saving midis. Proceeding to save wavs...")
for n, set_ in enumerate(wav_sets):
train_wavs, val_wavs, test_wavs = split_train_val_test(set_)
save_data_set(train_wavs, save_folder_path, "Train Wavs "+str(n))
save_data_set(val_wavs, save_folder_path, "Val Wavs "+str(n))
save_data_set(test_wavs, save_folder_path, "Test Wavs "+str(n))
print("Finished saving wavs.")
print("\nAll data finished saving in", int((time.time() - start_time)/60), "minutes and ",
int(((time.time() - start_time) % 60)+1), "seconds.")
| 36.994036
| 136
| 0.551
|
794aa27d55496924767cfd42e99241c88171cf0c
| 2,492
|
py
|
Python
|
game/memobrowser.py
|
AmkG/gearhead-caramel
|
0238378295a09b4b33adb2ec0854fa06b0ad7b1b
|
[
"Apache-2.0"
] | null | null | null |
game/memobrowser.py
|
AmkG/gearhead-caramel
|
0238378295a09b4b33adb2ec0854fa06b0ad7b1b
|
[
"Apache-2.0"
] | null | null | null |
game/memobrowser.py
|
AmkG/gearhead-caramel
|
0238378295a09b4b33adb2ec0854fa06b0ad7b1b
|
[
"Apache-2.0"
] | null | null | null |
import pbge
import pygame
class MemoBrowser(object):
def __init__(self,camp):
self.camp = camp
self.text_area = pbge.frects.Frect(-200,-100,400,200)
self.memos = [p.memo for p in camp.active_plots() if p.memo]
if not self.memos:
self.memos = ["<<No memos.>>"]
self.memo_n = 0
self.keep_browsing = True
bfbuttonsprite = pbge.image.Image('sys_bfarrows.png',80,32)
self.prev_button = pbge.widgets.ButtonWidget(-200,116,80,32,bfbuttonsprite,0,on_click=self.prev_memo)
self.next_button = pbge.widgets.ButtonWidget(120,116,80,32,bfbuttonsprite,1,on_click=self.next_memo)
closebuttonsprite = pbge.image.Image('sys_closeicon.png',13,14)
self.close_button = pbge.widgets.ButtonWidget(200,-112,13,14,closebuttonsprite,0,on_click=self.close_browser)
def render( self ):
pbge.my_state.view()
myrect = self.text_area.get_rect()
pbge.default_border.render(myrect)
pbge.draw_text(pbge.my_state.medium_font,self.memos[self.memo_n],myrect)
def close_browser(self,button=None,ev=None):
self.keep_browsing = False
def prev_memo(self,button=None,ev=None):
self.memo_n -= 1
if self.memo_n < 0:
self.memo_n = len(self.memos)-1
def next_memo(self,button=None,ev=None):
self.memo_n += 1
if self.memo_n >= len(self.memos):
self.memo_n = 0
def update(self,ev):
# gdi is a pygame event.
if ev.type == pbge.TIMEREVENT:
self.render()
pbge.my_state.do_flip()
elif ev.type == pygame.KEYDOWN:
if ev.key == pygame.K_LEFT:
self.prev_memo()
elif ev.key == pygame.K_ESCAPE:
self.keep_browsing = False
def activate(self):
pbge.my_state.widgets += [self.prev_button,self.next_button,self.close_button]
def deactivate(self):
pbge.my_state.widgets.remove(self.prev_button)
pbge.my_state.widgets.remove(self.next_button)
pbge.my_state.widgets.remove(self.close_button)
def __call__(self):
self.activate()
while self.keep_browsing:
gdi = pbge.wait_event()
self.update(gdi)
self.deactivate()
@classmethod
def browse(self, camp):
# Run the UI. Return a DoInvocation action if an invocation
# was chosen, or None if the invocation was cancelled.
myui = self(camp)
myui()
| 35.6
| 117
| 0.628812
|
794aa310245a161204649cae29e759c39aff78e0
| 627
|
py
|
Python
|
recursive_count_items/recursive_count_items.py
|
Lumexralph/python-algorithm-datastructures
|
5108cbc19c6cb650e72a95e5fa0c69be2a3354ee
|
[
"MIT"
] | null | null | null |
recursive_count_items/recursive_count_items.py
|
Lumexralph/python-algorithm-datastructures
|
5108cbc19c6cb650e72a95e5fa0c69be2a3354ee
|
[
"MIT"
] | null | null | null |
recursive_count_items/recursive_count_items.py
|
Lumexralph/python-algorithm-datastructures
|
5108cbc19c6cb650e72a95e5fa0c69be2a3354ee
|
[
"MIT"
] | 1
|
2019-06-11T00:02:10.000Z
|
2019-06-11T00:02:10.000Z
|
def count_items_in_list(items):
"""function to recursively count items in a list
without using the len() function
Args:
items(list): an array of items
Returns:
int: the count of items in a list
"""
if not isinstance(items, list):
return "data not a list/array"
try:
# remove first item from the list
# it reduces the size of the list
# make a copy
copy_of_items = items[:]
copy_of_items.pop(0)
return 1 + count_items_in_list(copy_of_items)
except IndexError:
# the base case or index when there's no
# element left in the list, means it is empty
return 0
| 20.9
| 50
| 0.671451
|
794aa3e6e9edb544472bbcbde0cfbf0f3f15f9a1
| 2,403
|
py
|
Python
|
share/qt/extract_strings_qt.py
|
fujicoin/fujicoin-0.20.0
|
2bed6d064ce44ada8bf3263fc2138029b78a8011
|
[
"MIT"
] | null | null | null |
share/qt/extract_strings_qt.py
|
fujicoin/fujicoin-0.20.0
|
2bed6d064ce44ada8bf3263fc2138029b78a8011
|
[
"MIT"
] | null | null | null |
share/qt/extract_strings_qt.py
|
fujicoin/fujicoin-0.20.0
|
2bed6d064ce44ada8bf3263fc2138029b78a8011
|
[
"MIT"
] | 1
|
2021-07-18T11:40:12.000Z
|
2021-07-18T11:40:12.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2012-2019 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import operator
import os
import sys
OUT_CPP="qt/fujicoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
sys.exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w', encoding="utf8")
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *fujicoin_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("fujicoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("fujicoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| 27.94186
| 105
| 0.629213
|
794aa41bef7cc9154fa183231c97277d53309e87
| 1,320
|
py
|
Python
|
hathor/p2p/netfilter/rule.py
|
mbnunes/hathor-core
|
e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8
|
[
"Apache-2.0"
] | 51
|
2019-12-28T03:33:27.000Z
|
2022-03-10T14:03:03.000Z
|
hathor/p2p/netfilter/rule.py
|
mbnunes/hathor-core
|
e5e0d4a627341e2a37ee46db5c9354ddb7f8dfb8
|
[
"Apache-2.0"
] | 316
|
2019-09-10T09:20:05.000Z
|
2022-03-31T20:18:56.000Z
|
hathor/p2p/netfilter/rule.py
|
jansegre/hathor-core
|
22b3de6be2518e7a0797edbf0e4f6eb1cf28d6fd
|
[
"Apache-2.0"
] | 19
|
2020-01-04T00:13:18.000Z
|
2022-02-08T21:18:46.000Z
|
# Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from hathor.p2p.netfilter.chain import NetfilterChain
from hathor.p2p.netfilter.context import NetfilterContext
from hathor.p2p.netfilter.matches import NetfilterMatch
from hathor.p2p.netfilter.target import NetfilterTarget
class NetfilterRule:
"""Rule that has a match and a target."""
def __init__(self, match: 'NetfilterMatch', target: 'NetfilterTarget'):
self.chain: Optional['NetfilterChain'] = None
self.match = match
self.target = target
def get_target_if_match(self, context: 'NetfilterContext') -> Optional['NetfilterTarget']:
if not self.match.match(context):
return None
return self.target
| 37.714286
| 94
| 0.739394
|
794aa4445d46e27f0eae59e03a75fd56cc056dba
| 1,753
|
py
|
Python
|
kwanmath/geodesy.py
|
kwan3217/kwanmath
|
c43f8209324cdb0c673b969b41b06d49c9d46e71
|
[
"BSD-3-Clause"
] | null | null | null |
kwanmath/geodesy.py
|
kwan3217/kwanmath
|
c43f8209324cdb0c673b969b41b06d49c9d46e71
|
[
"BSD-3-Clause"
] | null | null | null |
kwanmath/geodesy.py
|
kwan3217/kwanmath
|
c43f8209324cdb0c673b969b41b06d49c9d46e71
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Geodesy and gravity calculations
"""
from .vector import vdecomp, vlength, vcomp, rv
import numpy as np
def xyz2llr(sv):
"""
Calculate spherical coordinates of state
:param sv: State vector, can be stack
:return: tuple of (lon,lat,r). Each will be an array iff sv is a stack
"""
x,y,z=vdecomp(rv(sv))
r=vlength(rv(sv))
lat=np.arcsin(z/r)
lon=np.arctan2(y,x)
return(lon,lat,r)
def llr2xyz(*,latd,lond,r):
x=r*np.cos(np.radians(latd))*np.cos(np.radians(lond))
y=r*np.cos(np.radians(latd))*np.sin(np.radians(lond))
z=r*np.sin(np.radians(latd))
return vcomp((x,y,z))
def aJ2(svf,*,j2,gm,re):
"""
J2 gravity acceleration
:param svf: State vector in an inertial equatorial frame (frozen frame is designed to meet this requirement)
:return: J2 acceleration in (distance units implied by rvf)/(time units implied by constants)s**2 in same frame as rvf
Constants MarsGM, MarsJ2, and MarsR must be pre-loaded and have distance units consistent
with rvf, and time units consistent with each other.
Only position part of state is used, but the velocity part *should* have time units consistent
with the constants. Time units follow those of the constants, completely ignoring those implied
by the velocity part
"""
r=vlength(rv(svf))
coef=-3*j2*gm*re**2/(2*r**5)
x,y,z=vdecomp(rv(svf))
j2x=x*(1-5*z**2/r**2)
j2y=y*(1-5*z**2/r**2)
j2z=z*(3-5*z**2/r**2)
return (coef*vcomp((j2x,j2y,j2z)))
def aTwoBody(svi,*,gm):
"""
Two-body gravity acceleration
:param rv: Position vector in an inertial frame
:return: Two-body acceleration in (distance units implied by rv)/s**2
"""
return -gm*rv(svi)/vlength(rv(svi))**3
| 32.462963
| 122
| 0.663434
|
794aa4be77b7073fc8f4a1ad438ae9a13480151f
| 7,957
|
py
|
Python
|
src/graphql/type/directives.py
|
KingDarBoja/graphql-core
|
22970e94f1016e813848fc0ab5d1e7ab9ad612e4
|
[
"MIT"
] | null | null | null |
src/graphql/type/directives.py
|
KingDarBoja/graphql-core
|
22970e94f1016e813848fc0ab5d1e7ab9ad612e4
|
[
"MIT"
] | null | null | null |
src/graphql/type/directives.py
|
KingDarBoja/graphql-core
|
22970e94f1016e813848fc0ab5d1e7ab9ad612e4
|
[
"MIT"
] | null | null | null |
from typing import Any, Collection, Dict, List, Optional, cast
from ..language import ast, DirectiveLocation
from ..pyutils import inspect, is_description, FrozenList
from .definition import GraphQLArgument, GraphQLInputType, GraphQLNonNull, is_input_type
from .scalars import GraphQLBoolean, GraphQLString
__all__ = [
"is_directive",
"assert_directive",
"is_specified_directive",
"specified_directives",
"GraphQLDirective",
"GraphQLIncludeDirective",
"GraphQLSkipDirective",
"GraphQLDeprecatedDirective",
"GraphQLSpecifiedByDirective",
"DirectiveLocation",
"DEFAULT_DEPRECATION_REASON",
]
class GraphQLDirective:
"""GraphQL Directive
Directives are used by the GraphQL runtime as a way of modifying execution behavior.
Type system creators will usually not create these directly.
"""
name: str
locations: List[DirectiveLocation]
is_repeatable: bool
args: Dict[str, GraphQLArgument]
description: Optional[str]
extensions: Optional[Dict[str, Any]]
ast_node: Optional[ast.DirectiveDefinitionNode]
def __init__(
self,
name: str,
locations: Collection[DirectiveLocation],
args: Optional[Dict[str, GraphQLArgument]] = None,
is_repeatable: bool = False,
description: Optional[str] = None,
extensions: Optional[Dict[str, Any]] = None,
ast_node: Optional[ast.DirectiveDefinitionNode] = None,
) -> None:
if not name:
raise TypeError("Directive must be named.")
elif not isinstance(name, str):
raise TypeError("The directive name must be a string.")
try:
locations = [
value
if isinstance(value, DirectiveLocation)
else DirectiveLocation[cast(str, value)]
for value in locations
]
except (KeyError, TypeError):
raise TypeError(
f"{name} locations must be specified"
" as a collection of DirectiveLocation enum values."
)
if args is None:
args = {}
elif not isinstance(args, dict) or not all(
isinstance(key, str) for key in args
):
raise TypeError(f"{name} args must be a dict with argument names as keys.")
elif not all(
isinstance(value, GraphQLArgument) or is_input_type(value)
for value in args.values()
):
raise TypeError(
f"{name} args must be GraphQLArgument or input type objects."
)
else:
args = {
name: value
if isinstance(value, GraphQLArgument)
else GraphQLArgument(cast(GraphQLInputType, value))
for name, value in args.items()
}
if not isinstance(is_repeatable, bool):
raise TypeError(f"{name} is_repeatable flag must be True or False.")
if ast_node and not isinstance(ast_node, ast.DirectiveDefinitionNode):
raise TypeError(f"{name} AST node must be a DirectiveDefinitionNode.")
if description is not None and not is_description(description):
raise TypeError(f"{name} description must be a string.")
if extensions is not None and (
not isinstance(extensions, dict)
or not all(isinstance(key, str) for key in extensions)
):
raise TypeError(f"{name} extensions must be a dictionary with string keys.")
self.name = name
self.locations = locations
self.args = args
self.is_repeatable = is_repeatable
self.description = description
self.extensions = extensions
self.ast_node = ast_node
def __str__(self) -> str:
return f"@{self.name}"
def __repr__(self) -> str:
return f"<{self.__class__.__name__}({self})>"
def __eq__(self, other: Any) -> bool:
return self is other or (
isinstance(other, GraphQLDirective)
and self.name == other.name
and self.locations == other.locations
and self.args == other.args
and self.is_repeatable == other.is_repeatable
and self.description == other.description
and self.extensions == other.extensions
)
def to_kwargs(self) -> Dict[str, Any]:
return dict(
name=self.name,
locations=self.locations,
args=self.args,
is_repeatable=self.is_repeatable,
description=self.description,
extensions=self.extensions,
ast_node=self.ast_node,
)
def is_directive(directive: Any) -> bool:
"""Test if the given value is a GraphQL directive."""
return isinstance(directive, GraphQLDirective)
def assert_directive(directive: Any) -> GraphQLDirective:
if not is_directive(directive):
raise TypeError(f"Expected {inspect(directive)} to be a GraphQL directive.")
return cast(GraphQLDirective, directive)
# Used to conditionally include fields or fragments.
GraphQLIncludeDirective = GraphQLDirective(
name="include",
locations=[
DirectiveLocation.FIELD,
DirectiveLocation.FRAGMENT_SPREAD,
DirectiveLocation.INLINE_FRAGMENT,
],
args={
"if": GraphQLArgument(
GraphQLNonNull(GraphQLBoolean), description="Included when true."
)
},
description="Directs the executor to include this field or fragment"
" only when the `if` argument is true.",
)
# Used to conditionally skip (exclude) fields or fragments:
GraphQLSkipDirective = GraphQLDirective(
name="skip",
locations=[
DirectiveLocation.FIELD,
DirectiveLocation.FRAGMENT_SPREAD,
DirectiveLocation.INLINE_FRAGMENT,
],
args={
"if": GraphQLArgument(
GraphQLNonNull(GraphQLBoolean), description="Skipped when true."
)
},
description="Directs the executor to skip this field or fragment"
" when the `if` argument is true.",
)
# Constant string used for default reason for a deprecation:
DEFAULT_DEPRECATION_REASON = "No longer supported"
# Used to declare element of a GraphQL schema as deprecated:
GraphQLDeprecatedDirective = GraphQLDirective(
name="deprecated",
locations=[DirectiveLocation.FIELD_DEFINITION, DirectiveLocation.ENUM_VALUE],
args={
"reason": GraphQLArgument(
GraphQLString,
description="Explains why this element was deprecated,"
" usually also including a suggestion for how to access"
" supported similar data."
" Formatted using the Markdown syntax, as specified by"
" [CommonMark](https://commonmark.org/).",
default_value=DEFAULT_DEPRECATION_REASON,
)
},
description="Marks an element of a GraphQL schema as no longer supported.",
)
# Used to provide a URL for specifying the behaviour of custom scalar definitions:
GraphQLSpecifiedByDirective = GraphQLDirective(
name="specifiedBy",
locations=[DirectiveLocation.SCALAR],
args={
"url": GraphQLArgument(
GraphQLNonNull(GraphQLString),
description="The URL that specifies the behaviour of this scalar.",
)
},
description="Exposes a URL that specifies the behaviour of this scalar.",
)
specified_directives: FrozenList[GraphQLDirective] = FrozenList(
[
GraphQLIncludeDirective,
GraphQLSkipDirective,
GraphQLDeprecatedDirective,
GraphQLSpecifiedByDirective,
]
)
specified_directives.__doc__ = """The full list of specified directives."""
def is_specified_directive(directive: GraphQLDirective) -> bool:
"""Check whether the given directive is one of the specified directives."""
return any(
specified_directive.name == directive.name
for specified_directive in specified_directives
)
| 34.445887
| 88
| 0.646852
|
794aa519b39aec3d57d2b9c13f3564b32aa35383
| 2,716
|
py
|
Python
|
reproject/utils.py
|
barentsen/reproject
|
af164f0cabc3ec772c6d2a03e58eef65060bfb52
|
[
"BSD-3-Clause"
] | null | null | null |
reproject/utils.py
|
barentsen/reproject
|
af164f0cabc3ec772c6d2a03e58eef65060bfb52
|
[
"BSD-3-Clause"
] | null | null | null |
reproject/utils.py
|
barentsen/reproject
|
af164f0cabc3ec772c6d2a03e58eef65060bfb52
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from astropy.io import fits
from astropy.io.fits import CompImageHDU, HDUList, Header, ImageHDU, PrimaryHDU
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS
def parse_input_data(input_data, hdu_in=None):
"""
Parse input data to return a Numpy array and WCS object.
"""
if isinstance(input_data, str):
return parse_input_data(fits.open(input_data), hdu_in=hdu_in)
elif isinstance(input_data, HDUList):
if hdu_in is None:
if len(input_data) > 1:
raise ValueError("More than one HDU is present, please specify HDU to use with ``hdu_in=`` option")
else:
hdu_in = 0
return parse_input_data(input_data[hdu_in])
elif isinstance(input_data, (PrimaryHDU, ImageHDU, CompImageHDU)):
return input_data.data, WCS(input_data.header)
elif isinstance(input_data, tuple) and isinstance(input_data[0], np.ndarray):
if isinstance(input_data[1], Header):
return input_data[0], WCS(input_data[1])
else:
return input_data
else:
raise TypeError("input_data should either be an HDU object or a tuple of (array, WCS) or (array, Header)")
def parse_output_projection(output_projection, shape_out=None, output_array=None):
if shape_out is None:
if output_array is not None:
shape_out = output_array.shape
elif shape_out is not None and output_array is not None:
if shape_out != output_array.shape:
raise ValueError("shape_out does not match shape of output_array")
if isinstance(output_projection, Header):
wcs_out = WCS(output_projection)
try:
shape_out = [output_projection['NAXIS{0}'.format(i + 1)] for i in range(output_projection['NAXIS'])][::-1]
except KeyError:
if shape_out is None:
raise ValueError("Need to specify shape since output header does not contain complete shape information")
elif isinstance(output_projection, BaseHighLevelWCS):
wcs_out = output_projection
if shape_out is None:
raise ValueError("Need to specify shape when specifying output_projection as WCS object")
elif isinstance(output_projection, str):
hdu_list = fits.open(output_projection)
shape_out = hdu_list[0].data.shape
header = hdu_list[0].header
wcs_out = WCS(header)
hdu_list.close()
else:
raise TypeError('output_projection should either be a Header, a WCS object, or a filename')
if len(shape_out) == 0:
raise ValueError("The shape of the output image should not be an empty tuple")
return wcs_out, shape_out
| 41.784615
| 121
| 0.678203
|
794aa55b7b9af4ce05e4960e812cc0d6353eb77b
| 26,582
|
py
|
Python
|
tests/core/tests/indexes.py
|
infoxchange/django-haystack
|
5c2dec0da28846eec87a3a5c6166a1734d4245ab
|
[
"BSD-3-Clause"
] | null | null | null |
tests/core/tests/indexes.py
|
infoxchange/django-haystack
|
5c2dec0da28846eec87a3a5c6166a1734d4245ab
|
[
"BSD-3-Clause"
] | null | null | null |
tests/core/tests/indexes.py
|
infoxchange/django-haystack
|
5c2dec0da28846eec87a3a5c6166a1734d4245ab
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from threading import Thread
import time
from django.test import TestCase
from django.utils.six.moves import queue
from haystack import connections, connection_router
from haystack.exceptions import SearchFieldError
from haystack import indexes
from haystack.utils.loading import UnifiedIndex
from core.models import MockModel, AThirdMockModel, AFifthMockModel
class BadSearchIndex1(indexes.SearchIndex, indexes.Indexable):
author = indexes.CharField(model_attr='author')
pub_date = indexes.DateTimeField(model_attr='pub_date')
def get_model(self):
return MockModel
class BadSearchIndex2(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
content2 = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='author')
pub_date = indexes.DateTimeField(model_attr='pub_date')
def get_model(self):
return MockModel
class GoodMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='author')
pub_date = indexes.DateTimeField(model_attr='pub_date')
extra = indexes.CharField(indexed=False, use_template=True)
def get_model(self):
return MockModel
# For testing inheritance...
class AltGoodMockSearchIndex(GoodMockSearchIndex, indexes.Indexable):
additional = indexes.CharField(model_attr='author')
def get_model(self):
return MockModel
class GoodCustomMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='author', faceted=True)
pub_date = indexes.DateTimeField(model_attr='pub_date', faceted=True)
extra = indexes.CharField(indexed=False, use_template=True)
hello = indexes.CharField(model_attr='hello')
def prepare(self, obj):
super(GoodCustomMockSearchIndex, self).prepare(obj)
self.prepared_data['whee'] = 'Custom preparation.'
return self.prepared_data
def prepare_author(self, obj):
return "Hi, I'm %s" % self.prepared_data['author']
def load_all_queryset(self):
return self.get_model()._default_manager.filter(id__gt=1)
def get_model(self):
return MockModel
def index_queryset(self, using=None):
return MockModel.objects.all()
def read_queryset(self, using=None):
return MockModel.objects.filter(author__in=['daniel1', 'daniel3'])
def build_queryset(self, start_date=None, end_date=None):
return MockModel.objects.filter(author__in=['daniel1', 'daniel3'])
class GoodNullableMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='author', null=True, faceted=True)
def get_model(self):
return MockModel
class GoodOverriddenFieldNameMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True, index_fieldname='more_content')
author = indexes.CharField(model_attr='author', index_fieldname='name_s')
hello = indexes.CharField(model_attr='hello')
def get_model(self):
return MockModel
class GoodFacetedMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='author')
author_foo = indexes.FacetCharField(facet_for='author')
pub_date = indexes.DateTimeField(model_attr='pub_date')
pub_date_exact = indexes.FacetDateTimeField(facet_for='pub_date')
def get_model(self):
return MockModel
def prepare_author(self, obj):
return "Hi, I'm %s" % self.prepared_data['author']
def prepare_pub_date_exact(self, obj):
return "2010-10-26T01:54:32"
class MROFieldsSearchIndexA(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr='test_a')
def get_model(self):
return MockModel
class MROFieldsSearchIndexB(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr='test_b')
def get_model(self):
return MockModel
class MROFieldsSearchChild(MROFieldsSearchIndexA, MROFieldsSearchIndexB):
pass
class SearchIndexTestCase(TestCase):
def setUp(self):
super(SearchIndexTestCase, self).setUp()
self.sb = connections['default'].get_backend()
self.mi = GoodMockSearchIndex()
self.cmi = GoodCustomMockSearchIndex()
self.cnmi = GoodNullableMockSearchIndex()
self.gfmsi = GoodFacetedMockSearchIndex()
# Fake the unified index.
self.old_unified_index = connections['default']._index
self.ui = UnifiedIndex()
self.ui.build(indexes=[self.mi])
connections['default']._index = self.ui
self.sample_docs = {
u'core.mockmodel.1': {
'text': u'Indexed!\n1',
'django_id': u'1',
'django_ct': u'core.mockmodel',
'extra': u'Stored!\n1',
'author': u'daniel1',
'pub_date': datetime.datetime(2009, 3, 17, 6, 0),
'id': u'core.mockmodel.1'
},
u'core.mockmodel.2': {
'text': u'Indexed!\n2',
'django_id': u'2',
'django_ct': u'core.mockmodel',
'extra': u'Stored!\n2',
'author': u'daniel2',
'pub_date': datetime.datetime(2009, 3, 17, 7, 0),
'id': u'core.mockmodel.2'
},
u'core.mockmodel.3': {
'text': u'Indexed!\n3',
'django_id': u'3',
'django_ct': u'core.mockmodel',
'extra': u'Stored!\n3',
'author': u'daniel3',
'pub_date': datetime.datetime(2009, 3, 17, 8, 0),
'id': u'core.mockmodel.3'
}
}
def tearDown(self):
connections['default']._index = self.old_unified_index
super(SearchIndexTestCase, self).tearDown()
def test_no_contentfield_present(self):
self.assertRaises(SearchFieldError, BadSearchIndex1)
def test_too_many_contentfields_present(self):
self.assertRaises(SearchFieldError, BadSearchIndex2)
def test_contentfield_present(self):
try:
mi = GoodMockSearchIndex()
except:
self.fail()
def test_proper_fields(self):
self.assertEqual(len(self.mi.fields), 4)
self.assertTrue('text' in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields['text'], indexes.CharField))
self.assertTrue('author' in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields['author'], indexes.CharField))
self.assertTrue('pub_date' in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields['pub_date'], indexes.DateTimeField))
self.assertTrue('extra' in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields['extra'], indexes.CharField))
self.assertEqual(len(self.cmi.fields), 7)
self.assertTrue('text' in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields['text'], indexes.CharField))
self.assertTrue('author' in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields['author'], indexes.CharField))
self.assertTrue('author_exact' in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields['author_exact'], indexes.FacetCharField))
self.assertTrue('pub_date' in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields['pub_date'], indexes.DateTimeField))
self.assertTrue('pub_date_exact' in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields['pub_date_exact'], indexes.FacetDateTimeField))
self.assertTrue('extra' in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields['extra'], indexes.CharField))
self.assertTrue('hello' in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields['extra'], indexes.CharField))
def test_index_queryset(self):
self.assertEqual(len(self.cmi.index_queryset()), 3)
def test_read_queryset(self):
self.assertEqual(len(self.cmi.read_queryset()), 2)
def test_build_queryset(self):
# The custom SearchIndex.build_queryset returns the same records as
# the read_queryset
self.assertEqual(len(self.cmi.build_queryset()), 2)
# Store a reference to the original method
old_guf = self.mi.__class__.get_updated_field
self.mi.__class__.get_updated_field = lambda self: 'pub_date'
# With an updated field, we should get have filtered results
sd = datetime.datetime(2009, 3, 17, 7, 0)
self.assertEqual(len(self.mi.build_queryset(start_date=sd)), 2)
ed = datetime.datetime(2009, 3, 17, 7, 59)
self.assertEqual(len(self.mi.build_queryset(end_date=ed)), 2)
sd = datetime.datetime(2009, 3, 17, 6, 0)
ed = datetime.datetime(2009, 3, 17, 6, 59)
self.assertEqual(len(self.mi.build_queryset(start_date=sd,
end_date=ed)), 1)
# Remove the updated field for the next test
del self.mi.__class__.get_updated_field
# The default should return all 3 even if we specify a start date
# because there is no updated field specified
self.assertEqual(len(self.mi.build_queryset(start_date=sd)), 3)
# Restore the original attribute
self.mi.__class__.get_updated_field = old_guf
def test_prepare(self):
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.mi.prepare(mock)), 7)
self.assertEqual(sorted(self.mi.prepare(mock).keys()), ['author', 'django_ct', 'django_id', 'extra', 'id', 'pub_date', 'text'])
def test_custom_prepare(self):
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.cmi.prepare(mock)), 11)
self.assertEqual(sorted(self.cmi.prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee'])
self.assertEqual(len(self.cmi.full_prepare(mock)), 11)
self.assertEqual(sorted(self.cmi.full_prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee'])
def test_thread_safety(self):
# This is a regression. ``SearchIndex`` used to write to
# ``self.prepared_data``, which would leak between threads if things
# went too fast.
exceptions = []
def threaded_prepare(index_queue, index, model):
try:
index.queue = index_queue
prepped = index.prepare(model)
except Exception as e:
exceptions.append(e)
raise
class ThreadedSearchIndex(GoodMockSearchIndex):
def prepare_author(self, obj):
if obj.pk == 20:
time.sleep(0.1)
else:
time.sleep(0.5)
index_queue.put(self.prepared_data['author'])
return self.prepared_data['author']
tmi = ThreadedSearchIndex()
index_queue = queue.Queue()
mock_1 = MockModel()
mock_1.pk = 20
mock_1.author = 'foo'
mock_1.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
mock_2 = MockModel()
mock_2.pk = 21
mock_2.author = 'daniel%s' % mock_2.id
mock_2.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
th1 = Thread(target=threaded_prepare, args=(index_queue, tmi, mock_1))
th2 = Thread(target=threaded_prepare, args=(index_queue, tmi, mock_2))
th1.start()
th2.start()
th1.join()
th2.join()
mock_1_result = index_queue.get()
mock_2_result = index_queue.get()
self.assertEqual(mock_1_result, u'foo')
self.assertEqual(mock_2_result, u'daniel21')
def test_custom_prepare_author(self):
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.cmi.prepare(mock)), 11)
self.assertEqual(sorted(self.cmi.prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee'])
self.assertEqual(len(self.cmi.full_prepare(mock)), 11)
self.assertEqual(sorted(self.cmi.full_prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee'])
self.assertEqual(self.cmi.prepared_data['author'], "Hi, I'm daniel20")
self.assertEqual(self.cmi.prepared_data['author_exact'], "Hi, I'm daniel20")
def test_custom_model_attr(self):
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.cmi.prepare(mock)), 11)
self.assertEqual(sorted(self.cmi.prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee'])
self.assertEqual(len(self.cmi.full_prepare(mock)), 11)
self.assertEqual(sorted(self.cmi.full_prepare(mock).keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'extra', 'hello', 'id', 'pub_date', 'pub_date_exact', 'text', 'whee'])
self.assertEqual(self.cmi.prepared_data['hello'], u'World!')
def test_custom_index_fieldname(self):
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
cofnmi = GoodOverriddenFieldNameMockSearchIndex()
self.assertEqual(len(cofnmi.prepare(mock)), 6)
self.assertEqual(sorted(cofnmi.prepare(mock).keys()), ['django_ct', 'django_id', 'hello', 'id', 'more_content', 'name_s'])
self.assertEqual(cofnmi.prepared_data['name_s'], u'daniel20')
self.assertEqual(cofnmi.get_content_field(), 'more_content')
def test_get_content_field(self):
self.assertEqual(self.mi.get_content_field(), 'text')
def test_update(self):
self.sb.clear()
self.assertEqual(self.sb.search('*')['hits'], 0)
self.mi.update()
self.assertEqual(self.sb.search('*')['hits'], 3)
self.sb.clear()
def test_update_object(self):
self.sb.clear()
self.assertEqual(self.sb.search('*')['hits'], 0)
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.mi.update_object(mock)
self.assertEqual([(res.content_type(), res.pk) for res in self.sb.search('*')['results']], [(u'core.mockmodel', u'20')])
self.sb.clear()
def test_remove_object(self):
self.mi.update()
self.assertEqual(self.sb.search('*')['hits'], 3)
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.mi.update_object(mock)
self.assertEqual(self.sb.search('*')['hits'], 4)
self.mi.remove_object(mock)
self.assertEqual([(res.content_type(), res.pk) for res in self.sb.search('*')['results']], [(u'core.mockmodel', u'1'), (u'core.mockmodel', u'2'), (u'core.mockmodel', u'3')])
# Put it back so we can test passing kwargs.
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.mi.update_object(mock)
self.assertEqual(self.sb.search('*')['hits'], 4)
self.mi.remove_object(mock, commit=False)
self.assertEqual([(res.content_type(), res.pk) for res in self.sb.search('*')['results']], [(u'core.mockmodel', u'1'), (u'core.mockmodel', u'2'), (u'core.mockmodel', u'3'), (u'core.mockmodel', u'20')])
self.sb.clear()
def test_clear(self):
self.mi.update()
self.assertEqual(self.sb.search('*')['hits'], 3)
self.mi.clear()
self.assertEqual(self.sb.search('*')['hits'], 0)
def test_reindex(self):
self.mi.reindex()
self.assertEqual([(res.content_type(), res.pk) for res in self.sb.search('*')['results']], [(u'core.mockmodel', u'1'), (u'core.mockmodel', u'2'), (u'core.mockmodel', u'3')])
self.sb.clear()
def test_inheritance(self):
try:
agmi = AltGoodMockSearchIndex()
except:
self.fail()
self.assertEqual(len(agmi.fields), 5)
self.assertTrue('text' in agmi.fields)
self.assertTrue(isinstance(agmi.fields['text'], indexes.CharField))
self.assertTrue('author' in agmi.fields)
self.assertTrue(isinstance(agmi.fields['author'], indexes.CharField))
self.assertTrue('pub_date' in agmi.fields)
self.assertTrue(isinstance(agmi.fields['pub_date'], indexes.DateTimeField))
self.assertTrue('extra' in agmi.fields)
self.assertTrue(isinstance(agmi.fields['extra'], indexes.CharField))
self.assertTrue('additional' in agmi.fields)
self.assertTrue(isinstance(agmi.fields['additional'], indexes.CharField))
def test_proper_field_resolution(self):
mrofsc = MROFieldsSearchChild()
mock = MockModel()
mock.pk = 20
mock.author = 'daniel%s' % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
mock.test_a = 'This is A'
mock.test_b = 'This is B'
self.assertEqual(len(mrofsc.fields), 1)
prepped_data = mrofsc.prepare(mock)
self.assertEqual(len(prepped_data), 4)
self.assertEqual(prepped_data['text'], 'This is A')
def test_load_all_queryset(self):
self.assertEqual([obj.id for obj in self.cmi.load_all_queryset()], [2, 3])
def test_nullable(self):
mock = MockModel()
mock.pk = 20
mock.author = None
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
prepared_data = self.cnmi.prepare(mock)
self.assertEqual(len(prepared_data), 6)
self.assertEqual(sorted(prepared_data.keys()), ['author', 'author_exact', 'django_ct', 'django_id', 'id', 'text'])
prepared_data = self.cnmi.full_prepare(mock)
self.assertEqual(len(prepared_data), 4)
self.assertEqual(sorted(prepared_data.keys()), ['django_ct', 'django_id', 'id', 'text'])
def test_custom_facet_fields(self):
mock = MockModel()
mock.pk = 20
mock.author = 'daniel'
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
prepared_data = self.gfmsi.prepare(mock)
self.assertEqual(len(prepared_data), 8)
self.assertEqual(sorted(prepared_data.keys()), ['author', 'author_foo', 'django_ct', 'django_id', 'id', 'pub_date', 'pub_date_exact', 'text'])
prepared_data = self.gfmsi.full_prepare(mock)
self.assertEqual(len(prepared_data), 8)
self.assertEqual(sorted(prepared_data.keys()), ['author', 'author_foo', 'django_ct', 'django_id', 'id', 'pub_date', 'pub_date_exact', 'text'])
self.assertEqual(prepared_data['author_foo'], u"Hi, I'm daniel")
self.assertEqual(prepared_data['pub_date_exact'], '2010-10-26T01:54:32')
class BasicModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = MockModel
class FieldsModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = MockModel
fields = ['author', 'pub_date']
class ExcludesModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = MockModel
excludes = ['author', 'foo']
class FieldsWithOverrideModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
foo = indexes.IntegerField(model_attr='foo')
class Meta:
model = MockModel
fields = ['author', 'foo']
def get_index_fieldname(self, f):
if f.name == 'author':
return 'author_bar'
else:
return f.name
class YetAnotherBasicModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
class Meta:
model = AThirdMockModel
class GhettoAFifthMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
def get_model(self):
return AFifthMockModel
def index_queryset(self, using=None):
# Index everything,
return self.get_model().objects.complete_set()
def read_queryset(self, using=None):
return self.get_model().objects.all()
class ReadQuerySetTestSearchIndex(indexes.SearchIndex, indexes.Indexable):
author = indexes.CharField(model_attr='author', document=True)
def get_model(self):
return AFifthMockModel
def read_queryset(self, using=None):
return self.get_model().objects.complete_set()
class TextReadQuerySetTestSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr='author', document=True)
def get_model(self):
return AFifthMockModel
def read_queryset(self, using=None):
return self.get_model().objects.complete_set()
class ModelSearchIndexTestCase(TestCase):
def setUp(self):
super(ModelSearchIndexTestCase, self).setUp()
self.sb = connections['default'].get_backend()
self.bmsi = BasicModelSearchIndex()
self.fmsi = FieldsModelSearchIndex()
self.emsi = ExcludesModelSearchIndex()
self.fwomsi = FieldsWithOverrideModelSearchIndex()
self.yabmsi = YetAnotherBasicModelSearchIndex()
def test_basic(self):
self.assertEqual(len(self.bmsi.fields), 4)
self.assertTrue('foo' in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields['foo'], indexes.CharField))
self.assertEqual(self.bmsi.fields['foo'].null, False)
self.assertEqual(self.bmsi.fields['foo'].index_fieldname, 'foo')
self.assertTrue('author' in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields['author'], indexes.CharField))
self.assertEqual(self.bmsi.fields['author'].null, False)
self.assertTrue('pub_date' in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields['pub_date'], indexes.DateTimeField))
self.assertTrue(isinstance(self.bmsi.fields['pub_date'].default, datetime.datetime))
self.assertTrue('text' in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields['text'], indexes.CharField))
self.assertEqual(self.bmsi.fields['text'].document, True)
self.assertEqual(self.bmsi.fields['text'].use_template, True)
def test_fields(self):
self.assertEqual(len(self.fmsi.fields), 3)
self.assertTrue('author' in self.fmsi.fields)
self.assertTrue(isinstance(self.fmsi.fields['author'], indexes.CharField))
self.assertTrue('pub_date' in self.fmsi.fields)
self.assertTrue(isinstance(self.fmsi.fields['pub_date'], indexes.DateTimeField))
self.assertTrue('text' in self.fmsi.fields)
self.assertTrue(isinstance(self.fmsi.fields['text'], indexes.CharField))
def test_excludes(self):
self.assertEqual(len(self.emsi.fields), 2)
self.assertTrue('pub_date' in self.emsi.fields)
self.assertTrue(isinstance(self.emsi.fields['pub_date'], indexes.DateTimeField))
self.assertTrue('text' in self.emsi.fields)
self.assertTrue(isinstance(self.emsi.fields['text'], indexes.CharField))
def test_fields_with_override(self):
self.assertEqual(len(self.fwomsi.fields), 3)
self.assertTrue('author' in self.fwomsi.fields)
self.assertTrue(isinstance(self.fwomsi.fields['author'], indexes.CharField))
self.assertTrue('foo' in self.fwomsi.fields)
self.assertTrue(isinstance(self.fwomsi.fields['foo'], indexes.IntegerField))
self.assertTrue('text' in self.fwomsi.fields)
self.assertTrue(isinstance(self.fwomsi.fields['text'], indexes.CharField))
def test_overriding_field_name_with_get_index_fieldname(self):
self.assertTrue(self.fwomsi.fields['foo'].index_fieldname, 'foo')
self.assertTrue(self.fwomsi.fields['author'].index_fieldname, 'author_bar')
def test_float_integer_fields(self):
self.assertEqual(len(self.yabmsi.fields), 5)
self.assertEqual(sorted(self.yabmsi.fields.keys()), ['author', 'average_delay', 'pub_date', 'text', 'view_count'])
self.assertTrue('author' in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields['author'], indexes.CharField))
self.assertEqual(self.yabmsi.fields['author'].null, False)
self.assertTrue('pub_date' in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields['pub_date'], indexes.DateTimeField))
self.assertTrue(isinstance(self.yabmsi.fields['pub_date'].default, datetime.datetime))
self.assertTrue('text' in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields['text'], indexes.CharField))
self.assertEqual(self.yabmsi.fields['text'].document, True)
self.assertEqual(self.yabmsi.fields['text'].use_template, False)
self.assertTrue('view_count' in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields['view_count'], indexes.IntegerField))
self.assertEqual(self.yabmsi.fields['view_count'].null, False)
self.assertEqual(self.yabmsi.fields['view_count'].index_fieldname, 'view_count')
self.assertTrue('average_delay' in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields['average_delay'], indexes.FloatField))
self.assertEqual(self.yabmsi.fields['average_delay'].null, False)
self.assertEqual(self.yabmsi.fields['average_delay'].index_fieldname, 'average_delay')
| 41.212403
| 209
| 0.657964
|
794aa58362bc9838495c7facb590ce593449435b
| 9,893
|
py
|
Python
|
src/webapp/azext_webapp/create_util.py
|
j-martens/azure-cli-extensions
|
3d4854205b0f0d882f688cfa12383d14506c2e35
|
[
"MIT"
] | 1
|
2019-05-10T19:58:09.000Z
|
2019-05-10T19:58:09.000Z
|
src/webapp/azext_webapp/create_util.py
|
j-martens/azure-cli-extensions
|
3d4854205b0f0d882f688cfa12383d14506c2e35
|
[
"MIT"
] | null | null | null |
src/webapp/azext_webapp/create_util.py
|
j-martens/azure-cli-extensions
|
3d4854205b0f0d882f688cfa12383d14506c2e35
|
[
"MIT"
] | 1
|
2021-07-28T14:50:54.000Z
|
2021-07-28T14:50:54.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import zipfile
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.mgmt.resource.resources.models import ResourceGroup
from ._constants import (
NETCORE_VERSION_DEFAULT,
NETCORE_VERSIONS,
NODE_VERSION_DEFAULT,
NODE_VERSIONS,
NETCORE_RUNTIME_NAME,
NODE_RUNTIME_NAME,
DOTNET_RUNTIME_NAME,
DOTNET_VERSION_DEFAULT,
DOTNET_VERSIONS,
STATIC_RUNTIME_NAME,
PYTHON_RUNTIME_NAME,
PYTHON_VERSION_DEFAULT)
def _resource_client_factory(cli_ctx, **_):
from azure.cli.core.profiles import ResourceType
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
def web_client_factory(cli_ctx, **_):
from azure.mgmt.web import WebSiteManagementClient
return get_mgmt_service_client(cli_ctx, WebSiteManagementClient)
def zip_contents_from_dir(dirPath, lang):
relroot = os.path.abspath(os.path.join(dirPath, os.pardir))
path_and_file = os.path.splitdrive(dirPath)[1]
file_val = os.path.split(path_and_file)[1]
zip_file_path = relroot + os.path.sep + file_val + ".zip"
abs_src = os.path.abspath(dirPath)
with zipfile.ZipFile("{}".format(zip_file_path), "w", zipfile.ZIP_DEFLATED) as zf:
for dirname, subdirs, files in os.walk(dirPath):
# skip node_modules folder for Node apps,
# since zip_deployment will perfom the build operation
if lang.lower() == NODE_RUNTIME_NAME and 'node_modules' in subdirs:
subdirs.remove('node_modules')
elif lang.lower() == NETCORE_RUNTIME_NAME:
if 'bin' in subdirs:
subdirs.remove('bin')
elif 'obj' in subdirs:
subdirs.remove('obj')
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
zf.write(absname, arcname)
return zip_file_path
def get_runtime_version_details(file_path, lang_name):
version_detected = None
version_to_create = None
if lang_name.lower() == NETCORE_RUNTIME_NAME:
# method returns list in DESC, pick the first
version_detected = parse_netcore_version(file_path)[0]
version_to_create = detect_netcore_version_tocreate(version_detected)
elif lang_name.lower() == DOTNET_RUNTIME_NAME:
# method returns list in DESC, pick the first
version_detected = parse_dotnet_version(file_path)
version_to_create = detect_dotnet_version_tocreate(version_detected)
elif lang_name.lower() == NODE_RUNTIME_NAME:
version_detected = parse_node_version(file_path)[0]
version_to_create = detect_node_version_tocreate(version_detected)
elif lang_name.lower() == PYTHON_RUNTIME_NAME:
version_detected = "-"
version_to_create = PYTHON_VERSION_DEFAULT
elif lang_name.lower() == STATIC_RUNTIME_NAME:
version_detected = "-"
version_to_create = "-"
return {'detected': version_detected, 'to_create': version_to_create}
def create_resource_group(cmd, rg_name, location):
rcf = _resource_client_factory(cmd.cli_ctx)
rg_params = ResourceGroup(location=location)
return rcf.resource_groups.create_or_update(rg_name, rg_params)
def check_resource_group_exists(cmd, rg_name):
rcf = _resource_client_factory(cmd.cli_ctx)
return rcf.resource_groups.check_existence(rg_name)
def check_resource_group_supports_os(cmd, rg_name, is_linux):
# get all appservice plans from RG
client = web_client_factory(cmd.cli_ctx)
plans = list(client.app_service_plans.list_by_resource_group(rg_name))
for item in plans:
# for Linux if an app with reserved==False exists, ASP doesn't support Linux
if is_linux and not item.reserved:
return False
elif not is_linux and item.reserved:
return False
return True
def check_if_asp_exists(cmd, rg_name, asp_name, location):
# get all appservice plans from RG
client = web_client_factory(cmd.cli_ctx)
for item in list(client.app_service_plans.list_by_resource_group(rg_name)):
if item.name == asp_name and (item.location.replace(" ", "").lower() == location or item.location == location):
return True
return False
def check_app_exists(cmd, rg_name, app_name):
client = web_client_factory(cmd.cli_ctx)
for item in list(client.web_apps.list_by_resource_group(rg_name)):
if item.name == app_name:
return True
return False
# pylint:disable=unexpected-keyword-arg
def get_lang_from_content(src_path):
import glob
# NODE: package.json should exist in the application root dir
# NETCORE & DOTNET: *.csproj should exist in the application dir
# NETCORE: <TargetFramework>netcoreapp2.0</TargetFramework>
# DOTNET: <TargetFrameworkVersion>v4.5.2</TargetFrameworkVersion>
runtime_details_dict = dict.fromkeys(['language', 'file_loc', 'default_sku'])
package_json_file = os.path.join(src_path, 'package.json')
package_python_file = glob.glob("**/*.py", recursive=True)
package_netlang_glob = glob.glob("**/*.csproj", recursive=True)
static_html_file = glob.glob("**/*.html", recursive=True)
if os.path.isfile(package_json_file):
runtime_details_dict['language'] = NODE_RUNTIME_NAME
runtime_details_dict['file_loc'] = package_json_file
runtime_details_dict['default_sku'] = 'B1'
elif package_python_file:
runtime_details_dict['language'] = PYTHON_RUNTIME_NAME
runtime_details_dict['file_loc'] = os.path.join(src_path, package_json_file[0])
runtime_details_dict['default_sku'] = 'B1'
elif package_netlang_glob:
package_netcore_file = os.path.join(src_path, package_netlang_glob[0])
runtime_lang = detect_dotnet_lang(package_netcore_file)
runtime_details_dict['language'] = runtime_lang
runtime_details_dict['file_loc'] = package_netcore_file
runtime_details_dict['default_sku'] = 'F1'
elif static_html_file:
runtime_details_dict['language'] = STATIC_RUNTIME_NAME
runtime_details_dict['file_loc'] = static_html_file[0]
runtime_details_dict['default_sku'] = 'F1'
return runtime_details_dict
def detect_dotnet_lang(csproj_path):
import xml.etree.ElementTree as ET
import re
parsed_file = ET.parse(csproj_path)
root = parsed_file.getroot()
version_lang = ''
for target_ver in root.iter('TargetFramework'):
version_lang = re.sub(r'([^a-zA-Z\s]+?)', '', target_ver.text)
if 'netcore' in version_lang.lower():
return NETCORE_RUNTIME_NAME
return DOTNET_RUNTIME_NAME
def parse_dotnet_version(file_path):
version_detected = ['4.7']
try:
from xml.dom import minidom
import re
xmldoc = minidom.parse(file_path)
framework_ver = xmldoc.getElementsByTagName('TargetFrameworkVersion')
target_ver = framework_ver[0].firstChild.data
non_decimal = re.compile(r'[^\d.]+')
# reduce the version to '5.7.4' from '5.7'
if target_ver is not None:
# remove the string from the beginning of the version value
c = non_decimal.sub('', target_ver)
version_detected = c[:3]
except: # pylint: disable=bare-except
version_detected = version_detected[0]
return version_detected
def parse_netcore_version(file_path):
import xml.etree.ElementTree as ET
import re
version_detected = ['0.0']
parsed_file = ET.parse(file_path)
root = parsed_file.getroot()
for target_ver in root.iter('TargetFramework'):
version_detected = re.findall(r"\d+\.\d+", target_ver.text)
# incase of multiple versions detected, return list in descending order
version_detected = sorted(version_detected, key=float, reverse=True)
return version_detected
def parse_node_version(file_path):
import json
import re
with open(file_path) as data_file:
data = []
for d in find_key_in_json(json.load(data_file), 'node'):
non_decimal = re.compile(r'[^\d.]+')
# remove the string ~ or > that sometimes exists in version value
c = non_decimal.sub('', d)
# reduce the version to '6.0' from '6.0.0'
data.append(c[:3])
version_detected = sorted(data, key=float, reverse=True)
return version_detected or ['0.0']
def detect_netcore_version_tocreate(detected_ver):
if detected_ver in NETCORE_VERSIONS:
return detected_ver
return NETCORE_VERSION_DEFAULT
def detect_dotnet_version_tocreate(detected_ver):
min_ver = DOTNET_VERSIONS[0]
if detected_ver in DOTNET_VERSIONS:
return detected_ver
elif detected_ver < min_ver:
return min_ver
return DOTNET_VERSION_DEFAULT
def detect_node_version_tocreate(detected_ver):
if detected_ver in NODE_VERSIONS:
return detected_ver
# get major version & get the closest version from supported list
major_ver = float(detected_ver.split('.')[0])
if major_ver < 4:
return NODE_VERSION_DEFAULT
elif major_ver >= 4 and major_ver < 6:
return '4.5'
elif major_ver >= 6 and major_ver < 8:
return '6.9'
return NODE_VERSION_DEFAULT
def find_key_in_json(json_data, key):
for k, v in json_data.items():
if key in k:
yield v
elif isinstance(v, dict):
for id_val in find_key_in_json(v, key):
yield id_val
| 39.257937
| 119
| 0.685333
|
794aa5f8b2d7af6be29f554bc47c72f6dc3fcb3f
| 4,671
|
py
|
Python
|
Jalase5/main.py
|
ashkanjalaliQ/Maadanchi
|
acb2ec9ecdb1e20398454c942cce166f92facda6
|
[
"MIT"
] | null | null | null |
Jalase5/main.py
|
ashkanjalaliQ/Maadanchi
|
acb2ec9ecdb1e20398454c942cce166f92facda6
|
[
"MIT"
] | null | null | null |
Jalase5/main.py
|
ashkanjalaliQ/Maadanchi
|
acb2ec9ecdb1e20398454c942cce166f92facda6
|
[
"MIT"
] | null | null | null |
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
def divider_tag(tag):
tag = tag[tag.find('>') + 1:tag.find('</')]
if len(tag) == 0:
tag = '-'
return tag
def attach(names):
result = ''
if len(names) != 1:
for i in range(len(names)):
if i != len(names) - 1:
result += names[i] + ', '
else:
return result + names[i]
return names[0]
def scraping(website):
html = urlopen(website)
bs = BeautifulSoup(html, "html.parser")
data = {
'Game Name' : '',
'Score' : '',
'Published date' : '',
'author' : '',
'Vote' : '',
'Number of Players' : '',
'Age range' : '',
'Game time' : '',
'Favorite' : '',
'Want it' : '',
'Own it' : '',
'Follow' : '',
'Played it' : '',
'Heart it' : ''
}
data['Game Name'] = divider_tag(str(bs.findAll('h1')))
data['Score'] = divider_tag(str(bs.findAll('span', {'class' : 'average'})))
data['Published date'] = str(bs.findAll('div', {'class' : 'meta'}))[:str(bs.findAll('div', {'class' : 'meta'})).find('<div class="game-tags">')].strip().split(':')[-1]
author = bs.findAll('a', {'rel' : 'tag'})
author_name = []
for i in range(len(author)):
if 'https://boardgaming.com/publishers/' in str(author[i].attrs['href']):
author_name.append(divider_tag(str(author[i])))
data['author'] = attach(author_name)
data['Vote'] = divider_tag(str(bs.findAll('div', {'class' : 'votes'})))
data['Number of Players'] = divider_tag(str(bs.findAll('div', {'id' : 'detail-icon-players'})))
data['Age range'] = divider_tag(str(bs.findAll('div', {'id' : 'detail-icon-age'})))
data['Game time'] = divider_tag(str(bs.findAll('div', {'id' : 'detail-icon-time'})))
other_info = str(bs.findAll('span', {'class' : 'stat'})).split(',')
data['Own it'] = divider_tag(other_info[0])
data['Want it'] = divider_tag(other_info[1])
data['Favorite'] = divider_tag(other_info[2])
data['Heart it'] = divider_tag(other_info[3])
data['Played it'] = divider_tag(other_info[4])
data['Follow'] = divider_tag(other_info[5])
return data
def Link_extractor(page_link):
html = urlopen(page_link)
bs = BeautifulSoup(html, "html.parser")
link = bs.findAll('a')
links = []
for i in range(len(link)):
#link[i] = str(link[i])
if 'boardgaming.com/games/card-games/' in str(link[i]) or 'boardgaming.com/games/board-games/' in str(link[i]) or 'boardgaming.com/games/' in str(link[i]):
if 'href' in str(link[i]) and 'title' in str(link[i]):
if not 'class' in str(link[i]) and not 'img' in str(link[i]):
links.append(link[i].attrs['href'])
#print(link[i].attrs['href'])
return links
html = urlopen('https://boardgaming.com/category/games/board-games')
bs = BeautifulSoup(html, "html.parser")
pages = int(str(bs.findAll('div', {'class' : 'pagination'}))[str(bs.findAll('div', {'class' : 'pagination'})).find('Page 1 of') + 10 : str(bs.findAll('div', {'class' : 'pagination'})).find('Page 1 of') + 13])
print(str(pages) + ' Pages')
info = [
['Game Name'],
['Score'],
['Published date'],
['author'],
['Vote'],
['Number of Players'],
['Age range'],
['Game time'],
['Own it'],
['Want it'],
['Favorite'],
['Heart it'],
['Played it'],
['Follow']
]
for i in range(28,29):
links = Link_extractor('https://boardgaming.com/category/games/board-games/page/' + str(i + 1))
print('Page ' + str(i + 1) + ' Started')
for link in links:
link_data = scraping(link)
for j in range(len(info)):
info[j].append(link_data[info[j][0]])
#print(info)
print('Page ' + str(i + 1) + ' Completed!')
#print(info)
for i in range(len(info)):
info[i] = info[i][1:]
data = {'Game Name': info[0],
'Score': info[1],
'Published date': info[2],
'author': info[3],
'Vote': info[4],
'Number of Players': info[5],
'Age range': info[6],
'Game time': info[7],
'Own it': info[8],
'Want it': info[9],
'Favorite': info[10],
'Heart it': info[11],
'Played it': info[12],
'Follow': info[13],
}
df = pd.DataFrame(data)
df.to_csv('export2.csv', index=False)
print('File Saved!')
| 33.364286
| 208
| 0.519375
|
794aa66456a1484843d7f5cc75537fdd1e040e13
| 7,028
|
py
|
Python
|
GRsync.py
|
clyang/GRsync
|
4d72a2260eeb0d0e8bd467e8aa8d44a13ef29e26
|
[
"MIT"
] | 41
|
2015-12-09T01:50:18.000Z
|
2021-12-09T16:21:22.000Z
|
GRsync.py
|
clyang/GRsync
|
4d72a2260eeb0d0e8bd467e8aa8d44a13ef29e26
|
[
"MIT"
] | 5
|
2019-06-18T11:16:20.000Z
|
2022-01-16T01:07:03.000Z
|
GRsync.py
|
clyang/GRsync
|
4d72a2260eeb0d0e8bd467e8aa8d44a13ef29e26
|
[
"MIT"
] | 6
|
2018-12-31T03:46:54.000Z
|
2021-10-18T12:37:21.000Z
|
#!/usr/bin/python -u
# -*- coding: utf-8 -*-
import urllib2
import sys
import json
import argparse
from argparse import RawTextHelpFormatter
import socket
import re
import os
# remember the ending "/"
# eg: PHOTO_DEST_DIR = "/home/user/photos/"
PHOTO_DEST_DIR = ""
# GR_HOST is FIXED. DO NOT CHANGE!!
GR_HOST = "http://192.168.0.1/"
PHOTO_LIST_URI = "v1/photos"
GR_PROPS = "v1/props"
STARTDIR = ""
STARTFILE = ""
SUPPORT_DEVICE = ['RICOH GR II', 'RICOH GR III']
DEVICE = "RICOH GR II"
def getDeviceModel():
req = urllib2.Request(GR_HOST + GR_PROPS)
try:
resp = urllib2.urlopen(req)
data = resp.read()
props = json.loads(data)
if props['errCode'] != 200:
print "Error code: %d, Error message: %s" % (photoDict['errCode'], photoDict['errMsg'])
sys.exit(1)
else:
return props['model']
except urllib2.URLError, e:
print "Unable to fetch device props from device"
sys.exit(1)
def getBatteryLevel():
req = urllib2.Request(GR_HOST + GR_PROPS)
try:
resp = urllib2.urlopen(req)
data = resp.read()
props = json.loads(data)
if props['errCode'] != 200:
print "Error code: %d, Error message: %s" % (photoDict['errCode'], photoDict['errMsg'])
sys.exit(1)
else:
return props['battery']
except urllib2.URLError, e:
print "Unable to fetch device props from %s" % DEVICE
sys.exit(1)
def getPhotoList():
req = urllib2.Request(GR_HOST + PHOTO_LIST_URI)
try:
resp = urllib2.urlopen(req)
data = resp.read()
photoDict = json.loads(data)
if photoDict['errCode'] != 200:
print "Error code: %d, Error message: %s" % (photoDict['errCode'], photoDict['errMsg'])
sys.exit(1)
else:
photoList = []
for dic in photoDict['dirs']:
# check if this directory already exist in local PHOTO_DEST_DIR
# if not, create one
if not os.path.isdir(PHOTO_DEST_DIR+dic['name']):
os.makedirs(PHOTO_DEST_DIR+dic['name'])
# generate the full photo list
for file in dic['files']:
photoList.append("%s/%s" % (dic['name'], file ))
return photoList
except urllib2.URLError, e:
print "Unable to fetch photo list from %s" % DEVICE
sys.exit(1)
def getLocalFiles():
fileList = []
for (dir, _, files) in os.walk(PHOTO_DEST_DIR):
for f in files:
fileList.append(os.path.join(dir, f).replace(PHOTO_DEST_DIR, ""))
return fileList
def fetchPhoto(photouri):
try:
if DEVICE is 'GR2':
f = urllib2.urlopen(GR_HOST+photouri)
else:
f = urllib2.urlopen(GR_HOST+PHOTO_LIST_URI+'/'+photouri)
with open(PHOTO_DEST_DIR+photouri, "wb") as localfile:
localfile.write(f.read())
return True
except urllib2.URLError, e:
return False
def shutdownGR():
req = urllib2.Request("http://192.168.0.1/v1/device/finish")
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req, "{}")
def downloadPhotos(isAll):
print "Fetching photo list from %s ..." % DEVICE
photoLists = getPhotoList()
localFiles = getLocalFiles()
count = 0
if isAll == True:
totalPhoto = len(photoLists)
else:
starturi = "%s/%s" % (STARTDIR, STARTFILE)
if starturi not in photoLists:
print "Unable to find %s in Ricoh %s" % (starturi, DEVICE)
sys.exit(1)
else:
while True:
if photoLists[0] != starturi:
photoLists.pop(0)
else:
totalPhoto = len(photoLists)
break
print "Start to download photos ..."
while True:
if not photoLists:
print "\nAll photos are downloaded."
shutdownGR()
break
else:
photouri = photoLists.pop(0)
count += 1
if photouri in localFiles:
print "(%d/%d) Skip %s, already have it on local drive!!" % (count, totalPhoto, photouri)
else:
print "(%d/%d) Downloading %s now ... " % (count, totalPhoto, photouri),
if fetchPhoto(photouri) == True:
print "done!!"
else:
print "*** FAILED ***"
if __name__ == "__main__":
# set connection timeout to 2 seconds
socket.setdefaulttimeout(2)
# setting up argument parser
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter, description='''
GRsync is a handy Python script, which allows you to sync photos from Ricoh GR
II or III via Wifi. It has been tested on Mac OS X and Ubuntu. It should be able to
run on any platform that has a Python environment.
It automatically checks if photos already exists in your local drive. Duplicated
photos will be skipped and only sync needed photos for you.
Simple usage - Download ALL photos from Ricoh GR II or III:
./GRsync -a
Advanced usage - Download photos after specific directory and file:
./GRsync -d 100RICOH -f R0000005.JPG
All photos after 100RICOH/R0000005.JPG will be downloaded, including all
following directories (eg. 101RICOH, 102RICOH)
''')
parser.add_argument("-a", "--all", action="store_true", help="Download all photos")
parser.add_argument("-d", "--dir", help="Assign directory (eg. -d 100RICOH). MUST use with -f")
parser.add_argument("-f", "--file", help="Start to download photos from specific file \n(eg. -f R0000005.JPG). MUST use with -d")
model = getDeviceModel()
if model not in SUPPORT_DEVICE:
print "Your source device '%s' is unknown or not supported!" % model
sys.exit(1)
else:
DEVICE = model
if getBatteryLevel() < 15:
print "Your battery level is less than 15%, please charge it before sync operation!"
sys.exit(1)
if parser.parse_args().all == True and parser.parse_args().dir is None and parser.parse_args().file is None:
downloadPhotos(isAll=True)
elif not (parser.parse_args().dir is None) and not (parser.parse_args().file is None) and parser.parse_args().all == False:
match = re.match(r"^[1-9]\d\dRICOH$", parser.parse_args().dir)
if match:
STARTDIR = parser.parse_args().dir
else:
print "Incorrect directory name. It should be something like 100RICOH"
sys.exit(1)
match = re.match(r"^R0\d{6}\.JPG$", parser.parse_args().file)
if match:
STARTFILE = parser.parse_args().file
else:
print "Incorrect file name. It should be something like R0999999.JPG. (all in CAPITAL)"
sys.exit(1)
downloadPhotos(isAll=False)
else:
parser.print_help()
| 34.792079
| 133
| 0.593341
|
794aa825d89ec7b3bb9112c16836e448686573f9
| 8,230
|
py
|
Python
|
smac/utils/io/input_reader.py
|
brenting/SMAC3
|
f628d8b83f9f1803054d6e39bce7a51ab033dff1
|
[
"BSD-3-Clause"
] | 51
|
2019-02-01T19:43:37.000Z
|
2022-03-16T09:07:03.000Z
|
smac/utils/io/input_reader.py
|
brenting/SMAC3
|
f628d8b83f9f1803054d6e39bce7a51ab033dff1
|
[
"BSD-3-Clause"
] | 2
|
2019-02-23T18:54:22.000Z
|
2019-11-09T01:30:32.000Z
|
smac/utils/io/input_reader.py
|
brenting/SMAC3
|
f628d8b83f9f1803054d6e39bce7a51ab033dff1
|
[
"BSD-3-Clause"
] | 35
|
2019-02-08T02:00:31.000Z
|
2022-03-01T23:17:00.000Z
|
import numpy as np
from smac.configspace import pcs
__author__ = "Marius Lindauer"
__copyright__ = "Copyright 2015, ML4AAD"
__license__ = "3-clause BSD"
__maintainer__ = "Marius Lindauer"
__email__ = "lindauer@cs.uni-freiburg.de"
__version__ = "0.0.1"
class InputReader(object):
"""Reading all input files for SMAC (scenario file, instance files, ...)
Note: Most of this code was taken from the pysmac repository.
We copy it here because we don't want smac3 to depend
on an earlier version!
"""
def __init__(self):
pass
def read_scenario_file(self, fn: str):
"""Encapsulates read_scenario_file of pysmac
Parameters
----------
fn: string
File name of scenario file
Returns
-------
dict : dictionary
(key, value) pairs are (variable name, variable value)
"""
# translate the difference option names to a canonical name
# kept for backwards-compatibility
scenario_option_names = {'algo-exec': 'algo',
'algoExec': 'algo',
'algo': 'algo',
'algo-exec-dir': 'execdir',
'exec-dir': 'execdir',
'execDir': 'execdir',
'execdir': 'execdir',
'algo-deterministic': 'deterministic',
'deterministic': 'deterministic',
'paramFile': 'paramfile',
'pcs-file': 'paramfile',
'param-file': 'paramfile',
'paramfile': 'paramfile',
'run-obj': 'run_obj',
'run-objective': 'run_obj',
'runObj': 'run_obj',
'run_obj': 'run_obj',
'overall_obj': 'overall_obj',
'intra-obj': 'overall_obj',
'intra-instance-obj': 'overall_obj',
'overall-obj': 'overall_obj',
'intraInstanceObj': 'overall_obj',
'overallObj': 'overall_obj',
'intra_instance_obj': 'overall_obj',
'cost-for-crash': 'cost_for_crash',
'cost_for_crash': 'cost_for_crash',
'algo-cutoff-time': 'cutoff_time',
'target-run-cputime-limit': 'cutoff_time',
'target_run_cputime_limit': 'cutoff_time',
'cutoff-time': 'cutoff_time',
'cutoffTime': 'cutoff_time',
'cutoff_time': 'cutoff_time',
'memory-limit': 'memory_limit',
'memory_limit': 'memory_limit',
'cputime-limit': 'tuner_timeout',
'cputime_limit': 'tuner_timeout',
'tunertime-limit': 'tuner_timeout',
'tuner-timeout': 'tuner_timeout',
'tunerTimeout': 'tuner_timeout',
'tuner_timeout': 'tuner_timeout',
'wallclock-limit': 'wallclock_limit',
'runtime-limit': 'wallclock_limit',
'runtimeLimit': 'wallclock_limit',
'wallClockLimit': 'wallclock_limit',
'wallclock_limit': 'wallclock_limit',
'output-dir': 'output_dir',
'outputDirectory': 'output_dir',
'outdir': 'output_dir',
'output_dir': 'output_dir',
'instances': 'instance_file',
'instance-file': 'instance_file',
'instance-dir': 'instance_file',
'instanceFile': 'instance_file',
'instance_file': 'instance_file',
'i': 'instance_file',
'instance_seed_file': 'instance_file',
'test-instances': 'test_instance_file',
'test-instance-file': 'test_instance_file',
'test-instance-dir': 'test_instance_file',
'testInstanceFile': 'test_instance_file',
'test_instance_file': 'test_instance_file',
'test_instance_seed_file': 'test_instance_file',
'feature-file': 'feature_file',
'instanceFeatureFile': 'feature_file',
'feature_file': 'feature_file',
'runcount-limit': 'runcount_limit',
'runcount_limit': 'runcount_limit',
'totalNumRunsLimit': 'runcount_limit',
'numRunsLimit': 'runcount_limit',
'numberOfRunsLimit': 'runcount_limit',
'initial-incumbent': 'initial_incumbent',
'initial_incumbent': 'initial_incumbent'
}
scenario_dict = {}
with open(fn, 'r') as fh:
for line in fh:
line = line.replace("\n", "").strip(" ")
# remove comments
if line.find("#") > -1:
line = line[:line.find("#")]
# skip empty lines
if line == "":
continue
if "=" in line:
tmp = line.split("=")
tmp = [' '.join(s.split()) for s in tmp]
else:
tmp = line.split()
scenario_dict[
scenario_option_names.get(tmp[0], tmp[0])] = " ".join(tmp[1:])
return(scenario_dict)
def read_instance_file(self, fn: str):
"""Encapsulates read_instances_file of pysmac
Parameters
----------
fn: string
File name of instance file
Returns
-------
instances: list
Each element is a list where the first element is the
instance name followed by additional
information for the specific instance.
"""
with open(fn, 'r') as fh:
instance_names = fh.readlines()
return([s.strip().split() for s in instance_names])
def read_instance_features_file(self, fn: str):
"""Encapsulates read_instances_file of pysmac
Parameters
----------
fn: string
File name of instance feature file
Returns
-------
features: tuple
first entry is a list of the feature names,
second one is a dict with 'instance name' -
'numpy array containing the features' key-value pairs
"""
instances = {}
with open(fn, 'r') as fh:
lines = fh.readlines()
for line in lines[1:]:
tmp = line.strip().split(",")
instances[tmp[0]] = np.array(tmp[1:], dtype=np.double)
return [f.strip() for f in lines[0].rstrip("\n").split(",")[1:]], instances
def read_pcs_file(self, fn: str):
"""Encapsulates generating configuration space object
Parameters
----------
fn: string
File name of pcs file
Returns
-------
ConfigSpace: ConfigSpace
"""
space = pcs.read(fn)
return space
| 43.544974
| 83
| 0.431106
|
794aa8d1274014142632265c55a066dbe8483572
| 717
|
py
|
Python
|
iexfinance/tests/data_apis/test_time_series.py
|
dirtyFox27/iexfinance
|
cae6745fbee2246b010629ab05c62ffbce2c1c9c
|
[
"Apache-2.0"
] | null | null | null |
iexfinance/tests/data_apis/test_time_series.py
|
dirtyFox27/iexfinance
|
cae6745fbee2246b010629ab05c62ffbce2c1c9c
|
[
"Apache-2.0"
] | null | null | null |
iexfinance/tests/data_apis/test_time_series.py
|
dirtyFox27/iexfinance
|
cae6745fbee2246b010629ab05c62ffbce2c1c9c
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import pytest
from iexfinance.data_apis import get_time_series
class TestTimeSeries(object):
@pytest.mark.xfail(reason="Endpoint not working in sandbox environment")
def test_all_series(self):
data = get_time_series()
assert isinstance(data, list)
@pytest.mark.xfail(reason="Endpoint not working in sandbox environment")
def test_all_series_pandas(self):
data = get_time_series(output_format="pandas")
assert isinstance(data, pd.DataFrame)
def test_params(self):
data = get_time_series("REPORTED_FINANCIALS", "AAPL", last=1)
assert isinstance(data, pd.DataFrame)
assert isinstance(data.columns, pd.DatetimeIndex)
| 28.68
| 76
| 0.719665
|
794aa953761c1fecfdb974275e3060c739ce305a
| 538
|
py
|
Python
|
tests/conftest.py
|
izm51/obniz-python-sdk
|
40a738b5fe2c0a415cdc09f46d28c143982bfb07
|
[
"MIT"
] | 11
|
2019-03-22T12:02:11.000Z
|
2021-01-21T04:57:18.000Z
|
tests/conftest.py
|
izm51/obniz-python-sdk
|
40a738b5fe2c0a415cdc09f46d28c143982bfb07
|
[
"MIT"
] | 5
|
2019-03-02T08:28:25.000Z
|
2021-02-02T22:06:37.000Z
|
tests/conftest.py
|
izm51/obniz-python-sdk
|
40a738b5fe2c0a415cdc09f46d28c143982bfb07
|
[
"MIT"
] | 3
|
2019-07-20T06:55:09.000Z
|
2019-12-04T05:05:00.000Z
|
import pytest
from .utils import release_obnize, setup_obniz, receive_json, assert_send
@pytest.fixture(scope="function")
def obniz(mocker):
obniz = setup_obniz(mocker)
receive_json(obniz, [{'ws': {'ready': True, 'obniz': {'hw': 'obnizb1', 'firmware': '2.0.2'}}}])
assert_send(obniz, [{'ws': {'reset_obniz_on_ws_disconnection': True}}])
yield obniz
release_obnize(obniz)
@pytest.fixture(scope="function")
def uninitialized_obniz(mocker):
obniz = setup_obniz(mocker)
yield obniz
release_obnize(obniz)
| 25.619048
| 99
| 0.700743
|
794aa958b96d5b4ea31e2d5ef46d2117e8ed00e0
| 3,794
|
py
|
Python
|
mapa_cidadao/mapa_cidadao/core/migrations/0001_initial.py
|
zokis/mapa_do_cidadao
|
abfbf79c5a1fcc3ee812e3f3f768c65a425e917c
|
[
"MIT"
] | 1
|
2015-06-15T13:44:55.000Z
|
2015-06-15T13:44:55.000Z
|
mapa_cidadao/mapa_cidadao/core/migrations/0001_initial.py
|
zokis/mapa_do_cidadao
|
abfbf79c5a1fcc3ee812e3f3f768c65a425e917c
|
[
"MIT"
] | 1
|
2015-08-14T18:39:02.000Z
|
2015-08-14T18:39:02.000Z
|
mapa_cidadao/mapa_cidadao/core/migrations/0001_initial.py
|
zokis/mapa_do_cidadao
|
abfbf79c5a1fcc3ee812e3f3f768c65a425e917c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
import django.contrib.gis.db.models.fields
from django.conf import settings
import mapa_cidadao.core.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Categoria',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nome', models.CharField(max_length=20, verbose_name=b'nome')),
('descricao', models.CharField(max_length=200, verbose_name='Descri\xe7\xe3o')),
('marker', models.FileField(null=True, upload_to=mapa_cidadao.core.models.MarkerRename(b'/avatars'), blank=True)),
('estilo', jsonfield.fields.JSONField(default=b'{"graphicHeight": 32, "externalGraphic": "https://cdn2.iconfinder.com/data/icons/snipicons/500/map-marker-32.png", "graphicWidth": 32}', null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Ocorrencia',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ponto', django.contrib.gis.db.models.fields.PointField(srid=900913, null=True, verbose_name=b'ponto', blank=True)),
('status', models.SmallIntegerField(default=1, verbose_name=b'status', choices=[(1, 'Aberto'), (2, 'Reaberto'), (3, 'Resolvido'), (4, 'Inapropriado'), (5, 'Spam')])),
('titulo', models.CharField(max_length=120, verbose_name='T\xedtulo')),
('descricao', models.TextField(null=True, verbose_name='Descri\xe7\xe3o', blank=True)),
('date_add', models.DateTimeField(auto_now_add=True, null=True)),
('categoria', models.ForeignKey(to='core.Categoria')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Spam',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('contagem', models.IntegerField(default=0)),
('ocorrencia', models.ForeignKey(to='core.Ocorrencia')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Veto',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_add', models.DateTimeField(auto_now_add=True)),
('ocorrencia', models.ForeignKey(to='core.Ocorrencia')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('date_add',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Voto',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_add', models.DateTimeField(auto_now_add=True)),
('ocorrencia', models.ForeignKey(to='core.Ocorrencia')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('date_add',),
},
bases=(models.Model,),
),
]
| 44.635294
| 225
| 0.568793
|
794aa95cc6dc2a75d3de366d2806414e2ef2d68c
| 176
|
py
|
Python
|
examples/_scripts/maria_muster/q2.py
|
lebalz/easy_template
|
8d19ebf9a3cd723511ad8ee1877a5066c7d3b4d0
|
[
"MIT"
] | null | null | null |
examples/_scripts/maria_muster/q2.py
|
lebalz/easy_template
|
8d19ebf9a3cd723511ad8ee1877a5066c7d3b4d0
|
[
"MIT"
] | null | null | null |
examples/_scripts/maria_muster/q2.py
|
lebalz/easy_template
|
8d19ebf9a3cd723511ad8ee1877a5066c7d3b4d0
|
[
"MIT"
] | null | null | null |
def collatz(num)
if num % 2 == 0:
return num / 2
return (3 * num) + 1
res = []
zahl = 9
while zahl > 1
zahl = collatz(zahl)
res.append(zahl)
print(res)
| 13.538462
| 24
| 0.539773
|
794aa997dec029c7dfc7f4b20e9e837f3ea85a99
| 8,861
|
py
|
Python
|
tensorflow_asr/featurizers/methods/gammatone.py
|
Thumb-Technologies/TensorFlowASR
|
37ea12af04a8f2c13f75a617f4aa4331f95ce945
|
[
"Apache-2.0"
] | 1
|
2021-11-30T16:04:53.000Z
|
2021-11-30T16:04:53.000Z
|
tensorflow_asr/featurizers/methods/gammatone.py
|
xbsdsongnan/TensorFlowASR
|
130124ccaf23fabe3e7a6f138d9403a7c0946ef3
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_asr/featurizers/methods/gammatone.py
|
xbsdsongnan/TensorFlowASR
|
130124ccaf23fabe3e7a6f138d9403a7c0946ef3
|
[
"Apache-2.0"
] | 1
|
2021-08-17T14:53:33.000Z
|
2021-08-17T14:53:33.000Z
|
# Copyright 2020 Huy Le Nguyen (@usimarit) and Huy Phan (@pquochuy)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This code is inspired from https://github.com/detly/gammatone """
import numpy as np
import tensorflow as tf
from ...utils.shape_util import shape_list
pi = tf.constant(np.pi, dtype=tf.complex64)
DEFAULT_FILTER_NUM = 100
DEFAULT_LOW_FREQ = 100
DEFAULT_HIGH_FREQ = 44100 / 4
def fft_weights(
nfft,
fs,
nfilts,
width,
fmin,
fmax,
maxlen):
"""
:param nfft: the source FFT size
:param sr: sampling rate (Hz)
:param nfilts: the number of output bands required (default 64)
:param width: the constant width of each band in Bark (default 1)
:param fmin: lower limit of frequencies (Hz)
:param fmax: upper limit of frequencies (Hz)
:param maxlen: number of bins to truncate the rows to
:return: a tuple `weights`, `gain` with the calculated weight matrices and
gain vectors
Generate a matrix of weights to combine FFT bins into Gammatone bins.
Note about `maxlen` parameter: While wts has nfft columns, the second half
are all zero. Hence, aud spectrum is::
fft2gammatonemx(nfft,sr)*abs(fft(xincols,nfft))
`maxlen` truncates the rows to this many bins.
| (c) 2004-2009 Dan Ellis dpwe@ee.columbia.edu based on rastamat/audspec.m
| (c) 2012 Jason Heeris (Python implementation)
"""
ucirc = tf.exp(1j * 2 * pi * tf.cast(tf.range(0, nfft / 2 + 1),
tf.complex64) / nfft)[None, ...]
# Common ERB filter code factored out
cf_array = erb_space(fmin, fmax, nfilts)[::-1]
erb_filers = make_erb_filters(fs, cf_array, width)
A11 = erb_filers[1]
A12 = erb_filers[2]
A13 = erb_filers[3]
A14 = erb_filers[4]
B2 = erb_filers[8]
gain = erb_filers[9]
# _, A11, A12, A13, A14, _, _, _, B2, gain =
A11, A12, A13, A14 = A11[..., None], A12[..., None], A13[..., None], A14[..., None]
r = tf.cast(tf.sqrt(B2), tf.complex64)
theta = 2 * pi * cf_array / fs
pole = (r * tf.exp(1j * theta))[..., None]
GTord = 4
weights = (
tf.abs(ucirc + A11 * fs) * tf.abs(ucirc + A12 * fs)
* tf.abs(ucirc + A13 * fs) * tf.abs(ucirc + A14 * fs)
* tf.abs(fs * (pole - ucirc) * (tf.math.conj(pole) - ucirc)) ** (-GTord)
/ tf.cast(gain[..., None], tf.float32)
)
weights = tf.pad(weights, [[0, 0], [0, nfft - shape_list(weights)[-1]]])
weights = weights[:, 0:int(maxlen)]
return tf.transpose(weights, perm=[1, 0])
def erb_point(low_freq, high_freq, fraction):
"""
Calculates a single point on an ERB scale between ``low_freq`` and
``high_freq``, determined by ``fraction``. When ``fraction`` is ``1``,
``low_freq`` will be returned. When ``fraction`` is ``0``, ``high_freq``
will be returned.
``fraction`` can actually be outside the range ``[0, 1]``, which in general
isn't very meaningful, but might be useful when ``fraction`` is rounded a
little above or below ``[0, 1]`` (eg. for plot axis labels).
"""
# Change the following three parameters if you wish to use a different ERB
# scale. Must change in MakeERBCoeffs too.
# TODO: Factor these parameters out
ear_q = 9.26449 # Glasberg and Moore Parameters
min_bw = 24.7
# All of the following expressions are derived in Apple TR #35, "An
# Efficient Implementation of the Patterson-Holdsworth Cochlear Filter
# Bank." See pages 33-34.
erb_point = (
-ear_q * min_bw
+ tf.exp(
fraction * (
-tf.math.log(high_freq + ear_q * min_bw)
+ tf.math.log(low_freq + ear_q * min_bw)
)
) *
(high_freq + ear_q * min_bw)
)
return tf.cast(erb_point, tf.complex64)
def erb_space(
low_freq=DEFAULT_LOW_FREQ,
high_freq=DEFAULT_HIGH_FREQ,
num=DEFAULT_FILTER_NUM):
"""
This function computes an array of ``num`` frequencies uniformly spaced
between ``high_freq`` and ``low_freq`` on an ERB scale.
For a definition of ERB, see Moore, B. C. J., and Glasberg, B. R. (1983).
"Suggested formulae for calculating auditory-filter bandwidths and
excitation patterns," J. Acoust. Soc. Am. 74, 750-753.
"""
return erb_point(
low_freq,
high_freq,
tf.range(1, num + 1, dtype=tf.float32) / num
)
def make_erb_filters(fs, centre_freqs, width=1.0):
"""
This function computes the filter coefficients for a bank of
Gammatone filters. These filters were defined by Patterson and Holdworth for
simulating the cochlea.
The result is returned as a :class:`ERBCoeffArray`. Each row of the
filter arrays contains the coefficients for four second order filters. The
transfer function for these four filters share the same denominator (poles)
but have different numerators (zeros). All of these coefficients are
maked into one vector that the ERBFilterBank can take apart to implement
the filter.
The filter bank contains "numChannels" channels that extend from
half the sampling rate (fs) to "lowFreq". Alternatively, if the numChannels
input argument is a vector, then the values of this vector are taken to be
the center frequency of each desired filter. (The lowFreq argument is
ignored in this case.)
Note this implementation fixes a problem in the original code by
computing four separate second order filters. This avoids a big problem with
round off errors in cases of very small cfs (100Hz) and large sample rates
(44kHz). The problem is caused by roundoff error when a number of poles are
combined, all very close to the unit circle. Small errors in the eigth order
coefficient, are multiplied when the eigth root is taken to give the pole
location. These small errors lead to poles outside the unit circle and
instability. Thanks to Julius Smith for leading me to the proper
explanation.
Execute the following code to evaluate the frequency response of a 10
channel filterbank::
fcoefs = MakeERBFilters(16000,10,100);
y = ERBFilterBank([1 zeros(1,511)], fcoefs);
resp = 20*log10(abs(fft(y')));
freqScale = (0:511)/512*16000;
semilogx(freqScale(1:255),resp(1:255,:));
axis([100 16000 -60 0])
xlabel('Frequency (Hz)'); ylabel('Filter Response (dB)');
| Rewritten by Malcolm Slaney@Interval. June 11, 1998.
| (c) 1998 Interval Research Corporation
|
| (c) 2012 Jason Heeris (Python implementation)
"""
T = 1 / fs
# Change the followFreqing three parameters if you wish to use a different
# ERB scale. Must change in ERBSpace too.
# TODO: factor these out
ear_q = 9.26449 # Glasberg and Moore Parameters
min_bw = 24.7
order = 1
erb = width * ((centre_freqs / ear_q) ** order + min_bw ** order) ** (1 / order)
B = 1.019 * 2 * pi * erb
arg = 2 * centre_freqs * pi * T
vec = tf.exp(2j * arg)
A0 = T
A2 = 0
B0 = 1
B1 = -2 * tf.cos(arg) / tf.exp(B * T)
B2 = tf.exp(-2 * B * T)
rt_pos = tf.cast(tf.sqrt(3 + 2 ** 1.5), tf.complex64)
rt_neg = tf.cast(tf.sqrt(3 - 2 ** 1.5), tf.complex64)
common = -T * tf.exp(-(B * T))
# TODO: This could be simplified to a matrix calculation involving the
# constant first term and the alternating rt_pos/rt_neg and +/-1 second
# terms
k11 = tf.cos(arg) + rt_pos * tf.sin(arg)
k12 = tf.cos(arg) - rt_pos * tf.sin(arg)
k13 = tf.cos(arg) + rt_neg * tf.sin(arg)
k14 = tf.cos(arg) - rt_neg * tf.sin(arg)
A11 = common * k11
A12 = common * k12
A13 = common * k13
A14 = common * k14
gain_arg = tf.exp(1j * arg - B * T)
gain = tf.cast(tf.abs(
(vec - gain_arg * k11)
* (vec - gain_arg * k12)
* (vec - gain_arg * k13)
* (vec - gain_arg * k14)
* (T * tf.exp(B * T)
/ (-1 / tf.exp(B * T) + 1 + vec * (1 - tf.exp(B * T)))
)**4
), tf.complex64)
allfilts = tf.ones_like(centre_freqs, dtype=tf.complex64)
fcoefs = tf.stack([
A0 * allfilts, A11, A12, A13, A14, A2 * allfilts,
B0 * allfilts, B1, B2,
gain
], axis=1)
return tf.transpose(fcoefs, perm=[1, 0])
| 34.885827
| 87
| 0.633111
|
794aa9982a87cbd5cdadce0a3df9adcef69fea18
| 7,772
|
py
|
Python
|
util/reggen/data.py
|
joshuisken/opentitan
|
d4d5d2f3bb87af5ca2f2787af5bc19bee04ffc6a
|
[
"Apache-2.0"
] | 1
|
2021-12-10T17:05:45.000Z
|
2021-12-10T17:05:45.000Z
|
util/reggen/data.py
|
abignail/opentitan
|
920b48f3da14f90661b5b52d1f9de19a1ba3f012
|
[
"Apache-2.0"
] | null | null | null |
util/reggen/data.py
|
abignail/opentitan
|
920b48f3da14f90661b5b52d1f9de19a1ba3f012
|
[
"Apache-2.0"
] | 1
|
2021-03-25T15:48:48.000Z
|
2021-03-25T15:48:48.000Z
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from .field_enums import HwAccess, SwAccess, SwRdAccess, SwWrAccess
# helper funtion that strips trailing number from name
# TODO: this is a workaround, should solve this in validate.py
def _get_basename(name):
outname = ""
for (k, c) in enumerate(name[::-1]):
if not str.isdigit(c):
return name[0:len(name) - k]
return ""
class Field():
"""Field in a register.
Field class contains necessary info to generate RTL code.
It has two additional (tool generated) fields, swrdaccess and swwraccess,
which represent read and write type. This makes RTL generation code simpler.
"""
name = "" # required
msb = 31 # required
lsb = 0 # required
resval = 0 # optional
swaccess = SwAccess.NONE # optional
swrdaccess = SwRdAccess.NONE
swwraccess = SwWrAccess.NONE
hwaccess = HwAccess.HRO
hwqe = False
hwre = False
hwext = False
tags = []
def __init__(self):
self.name = "" # required
self.msb = 31 # required
self.lsb = 0 # required
self.resval = 0 # optional
self.swaccess = SwAccess.NONE # optional
self.swrdaccess = SwRdAccess.NONE
self.swwraccess = SwWrAccess.NONE
self.hwaccess = HwAccess.HRO
self.hwqe = False
self.hwre = False
self.hwext = False
self.tags = []
def get_n_bits(self, bittype=["q"]):
n_bits = 0
if "q" in bittype and self.hwaccess in [HwAccess.HRW, HwAccess.HRO]:
n_bits += self.msb - self.lsb + 1
if "d" in bittype and self.hwaccess in [HwAccess.HRW, HwAccess.HWO]:
n_bits += self.msb - self.lsb + 1
if "qe" in bittype and self.hwaccess in [HwAccess.HRW, HwAccess.HRO]:
n_bits += self.hwqe
if "re" in bittype and self.hwaccess in [HwAccess.HRW, HwAccess.HRO]:
n_bits += self.hwre
if "de" in bittype and self.hwaccess in [HwAccess.HRW, HwAccess.HWO]:
n_bits += not self.hwext
return n_bits
def get_fields_flat(self):
return [self]
def get_basename(self):
return _get_basename(self.name)
class Reg():
name = ""
offset = 0
hwqe = False
hwre = False
hwext = False # External register
resval = 0
dvrights = "RO" # Used by UVM REG only
regwen = ""
fields = []
width = 0 # indicate register size
ishomog = 0
tags = []
def __init__(self, name=""):
self.name = name
self.offset = 0
self.hwqe = False
self.hwre = False
self.hwext = False # External register
self.resval = 0
self.dvrights = "RO" # Used by UVM REG only
self.regwen = ""
self.fields = []
self.width = 0
self.ishomog = 0
self.tags = []
def is_multi_reg(self):
"""Returns true if this is a multireg"""
return False
def get_n_bits(self, bittype=["q"]):
"""Returns number of bits in this register (including all multiregs and
fields). By default this function counts read data bits (bittype "q"),
but other bits such as "d", qe", "re", "de" can be counted as well by
specifying them in the bittype list argument.
"""
n_bits = 0
for f in self.fields:
n_bits += f.get_n_bits(bittype)
return n_bits
def get_fields_flat(self):
"""Returns a flat list of all the fields in this register"""
fields = []
for f in self.fields:
fields += f.get_fields_flat()
return fields
def get_field_flat(self, linear_idx):
"""Returns a specific field at a linear index position in
the flattened field list"""
fields_flat = self.get_fields_flat()
return fields_flat[linear_idx]
def get_n_fields_flat(self):
"""Returns the number of fields contained in the flat field list"""
return len(self.get_fields_flat())
def get_regs_flat(self):
"""Returns a flat list containing all
registers and subregisters"""
if isinstance(self.fields[0], Field):
return [self]
else:
regs = []
for r in self.fields:
regs += r.get_regs_flat()
return regs
def get_reg_flat(self, linear_index):
"""Returns a specific register at a linear index position in
the flattened regiser list"""
regs_flat = self.get_regs_flat()
return regs_flat[linear_index]
def get_n_regs_flat(self):
"""Returns the number of registers contained in
the flat register list"""
return len(self.get_regs_flat())
def get_nested_dims(self):
"""Recursively get dimensions of nested registers (outputs a list)"""
# return length of flattened field array if this is a regular register,
# or if this is the last multiregister level in a nested multiregister
if not isinstance(self, MultiReg):
dims = [len(self.get_fields_flat())]
if isinstance(self, MultiReg) and \
not isinstance(self.fields[0], MultiReg):
if self.ishomog:
dims = [len(self.get_fields_flat())]
else:
dims = [len(self.fields)]
else:
# nested multiregister case
dims = [len(self.fields)] + self.fields[0].get_nested_dims()
return dims
def get_nested_params(self):
"""Recursively get parameters of nested registers (outputs a list)"""
params = []
if isinstance(self, MultiReg):
params += [self.param]
if isinstance(self.fields[0], MultiReg):
params += self.fields[0].get_nested_params()
return params
def get_basename(self):
return _get_basename(self.name)
class MultiReg(Reg):
param = ""
def __init__(self, name):
Reg.__init__(self, name)
self.param = ""
def is_multi_reg(self):
"""Returns true if this is a multireg"""
return True
class Window():
base_addr = 0
limit_addr = 0
n_bits = 0
tags = []
def __init__(self):
self.base_addr = 0
self.limit_addr = 0
self.n_bits = 0
self.tags = []
class Block():
width = 32
addr_width = 12
base_addr = 0
name = ""
regs = []
wins = []
blocks = []
params = []
tags = []
def __init__(self):
self.width = 32
self.addr_width = 12
self.base_addr = 0
self.name = ""
self.regs = []
self.wins = []
self.blocks = []
self.params = []
self.tags = []
def get_regs_flat(self):
"""Returns flattened register list
"""
regs = []
for r in self.regs:
regs += r.get_regs_flat()
return regs
def get_n_bits(self, bittype=["q"]):
"""Returns number of bits in this block (including all multiregs and
fields). By default this function counts read data bits (bittype "q"),
but other bits such as "d", qe", "re", "de" can be counted as well by
specifying them in the bittype list argument.
"""
n_bits = 0
for r in self.regs:
n_bits += r.get_n_bits(bittype)
return n_bits
def get_n_regs_flat(self):
return len(self.get_regs_flat())
def contains_multiregs(self):
"""Returns true if there are multiregs in this block
"""
for r in self.regs:
if isinstance(r, MultiReg):
return True
return False
| 29.777778
| 80
| 0.584148
|
794aaa6ddbec7beb255acbf73bd9af55b4e66946
| 22,198
|
py
|
Python
|
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams.py
|
dsdorazio/airbyte
|
078c6604a499c5ab68beb403d80b3068e6bef4ab
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams.py
|
dsdorazio/airbyte
|
078c6604a499c5ab68beb403d80b3068e6bef4ab
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams.py
|
dsdorazio/airbyte
|
078c6604a499c5ab68beb403d80b3068e6bef4ab
|
[
"MIT"
] | null | null | null |
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import time
import urllib.parse as urlparse
from abc import ABC
from collections import deque
from datetime import datetime
from typing import Any, Iterable, Iterator, List, Mapping, MutableMapping, Optional, Sequence, Union
import backoff
import pendulum
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.core import package_name_from_class
from airbyte_cdk.sources.utils.schema_helpers import ResourceSchemaLoader
from cached_property import cached_property
from facebook_business.adobjects.adreportrun import AdReportRun
from facebook_business.api import FacebookAdsApiBatch, FacebookRequest, FacebookResponse
from facebook_business.exceptions import FacebookRequestError
from source_facebook_marketing.api import API
from .common import FacebookAPIException, JobTimeoutException, batch, deep_merge, retry_pattern
backoff_policy = retry_pattern(backoff.expo, FacebookRequestError, max_tries=5, factor=5)
def remove_params_from_url(url: str, params: List[str]) -> str:
"""
Parses a URL and removes the query parameters specified in params
:param url: URL
:param params: list of query parameters
:return: URL with params removed
"""
parsed = urlparse.urlparse(url)
query = urlparse.parse_qs(parsed.query, keep_blank_values=True)
filtered = dict((k, v) for k, v in query.items() if k not in params)
return urlparse.urlunparse(
[parsed.scheme, parsed.netloc, parsed.path, parsed.params, urlparse.urlencode(filtered, doseq=True), parsed.fragment]
)
class FBMarketingStream(Stream, ABC):
"""Base stream class"""
primary_key = "id"
page_size = 100
enable_deleted = False
entity_prefix = None
def __init__(self, api: API, include_deleted: bool = False, **kwargs):
super().__init__(**kwargs)
self._api = api
self._include_deleted = include_deleted if self.enable_deleted else False
@cached_property
def fields(self) -> List[str]:
"""List of fields that we want to query, for now just all properties from stream's schema"""
return list(self.get_json_schema().get("properties", {}).keys())
@backoff_policy
def execute_in_batch(self, requests: Iterable[FacebookRequest]) -> Sequence[MutableMapping[str, Any]]:
"""Execute list of requests in batches"""
records = []
def success(response: FacebookResponse):
records.append(response.json())
def failure(response: FacebookResponse):
raise response.error()
api_batch: FacebookAdsApiBatch = self._api.api.new_batch()
for request in requests:
api_batch.add_request(request, success=success, failure=failure)
retry_batch = api_batch.execute()
if retry_batch:
raise FacebookAPIException(f"Batch has failed {len(retry_batch)} requests")
return records
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""Main read method used by CDK"""
for record in self._read_records(params=self.request_params(stream_state=stream_state)):
yield self.transform(self._extend_record(record, fields=self.fields))
def transform(self, record: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Use this method to remove update fields types in record according to schema.
"""
schema = self.get_json_schema()
self.convert_to_schema_types(record, schema["properties"])
return record
def get_python_type(self, _types: Union[list, str]) -> tuple:
"""Converts types from schema to python types. Examples:
- `["string", "null"]` will be converted to `(str,)`
- `["array", "string", "null"]` will be converted to `(list, str,)`
- `"boolean"` will be converted to `(bool,)`
"""
types_mapping = {
"string": str,
"number": float,
"integer": int,
"object": dict,
"array": list,
"boolean": bool,
}
if isinstance(_types, list):
return tuple([types_mapping[t] for t in _types if t != "null"])
return (types_mapping[_types],)
def convert_to_schema_types(self, record: Mapping[str, Any], schema: Mapping[str, Any]):
"""
Converts values' type from record to appropriate type from schema. For example, let's say we have `reach` value
and in schema it has `number` type because it's, well, a number, but from API we are getting `reach` as string.
This function fixes this and converts `reach` value from `string` to `number`. Same for all fields and all
types from schema.
"""
if not schema:
return
for key, value in record.items():
if key not in schema:
continue
if isinstance(value, dict):
self.convert_to_schema_types(record=value, schema=schema[key].get("properties", {}))
elif isinstance(value, list) and "items" in schema[key]:
for record_list_item in value:
if list in self.get_python_type(schema[key]["items"]["type"]):
# TODO Currently we don't have support for list of lists.
pass
elif dict in self.get_python_type(schema[key]["items"]["type"]):
self.convert_to_schema_types(record=record_list_item, schema=schema[key]["items"]["properties"])
elif not isinstance(record_list_item, self.get_python_type(schema[key]["items"]["type"])):
record[key] = self.get_python_type(schema[key]["items"]["type"])[0](record_list_item)
elif not isinstance(value, self.get_python_type(schema[key]["type"])):
record[key] = self.get_python_type(schema[key]["type"])[0](value)
def _read_records(self, params: Mapping[str, Any]) -> Iterable:
"""Wrapper around query to backoff errors.
We have default implementation because we still can override read_records so this method is not mandatory.
"""
return []
@backoff_policy
def _extend_record(self, obj: Any, **kwargs):
"""Wrapper around api_get to backoff errors"""
return obj.api_get(**kwargs).export_all_data()
def request_params(self, **kwargs) -> MutableMapping[str, Any]:
"""Parameters that should be passed to query_records method"""
params = {"limit": self.page_size}
if self._include_deleted:
params.update(self._filter_all_statuses())
return params
def _filter_all_statuses(self) -> MutableMapping[str, Any]:
"""Filter that covers all possible statuses thus including deleted/archived records"""
filt_values = [
"active",
"archived",
"completed",
"limited",
"not_delivering",
"deleted",
"not_published",
"pending_review",
"permanently_deleted",
"recently_completed",
"recently_rejected",
"rejected",
"scheduled",
"inactive",
]
return {
"filtering": [
{"field": f"{self.entity_prefix}.delivery_info", "operator": "IN", "value": filt_values},
],
}
class FBMarketingIncrementalStream(FBMarketingStream, ABC):
cursor_field = "updated_time"
def __init__(self, start_date: datetime, **kwargs):
super().__init__(**kwargs)
self._start_date = pendulum.instance(start_date)
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]):
"""Update stream state from latest record"""
potentially_new_records_in_the_past = self._include_deleted and not current_stream_state.get("include_deleted", False)
record_value = latest_record[self.cursor_field]
state_value = current_stream_state.get(self.cursor_field) or record_value
max_cursor = max(pendulum.parse(state_value), pendulum.parse(record_value))
if potentially_new_records_in_the_past:
max_cursor = record_value
return {
self.cursor_field: str(max_cursor),
"include_deleted": self._include_deleted,
}
def request_params(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
"""Include state filter"""
params = super().request_params(**kwargs)
params = deep_merge(params, self._state_filter(stream_state=stream_state or {}))
return params
def _state_filter(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
"""Additional filters associated with state if any set"""
state_value = stream_state.get(self.cursor_field)
filter_value = self._start_date if not state_value else pendulum.parse(state_value)
potentially_new_records_in_the_past = self._include_deleted and not stream_state.get("include_deleted", False)
if potentially_new_records_in_the_past:
self.logger.info(f"Ignoring bookmark for {self.name} because of enabled `include_deleted` option")
filter_value = self._start_date
return {
"filtering": [
{
"field": f"{self.entity_prefix}.{self.cursor_field}",
"operator": "GREATER_THAN",
"value": filter_value.int_timestamp,
},
],
}
class AdCreatives(FBMarketingStream):
"""AdCreative is append only stream
doc: https://developers.facebook.com/docs/marketing-api/reference/ad-creative
"""
entity_prefix = "adcreative"
batch_size = 50
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""Read records using batch API"""
records = self._read_records(params=self.request_params(stream_state=stream_state))
requests = [record.api_get(fields=self.fields, pending=True) for record in records]
for requests_batch in batch(requests, size=self.batch_size):
for record in self.execute_in_batch(requests_batch):
yield self.clear_urls(record)
@staticmethod
def clear_urls(record: MutableMapping[str, Any]) -> MutableMapping[str, Any]:
"""Some URLs has random values, these values doesn't affect validity of URLs, but breaks SAT"""
thumbnail_url = record.get("thumbnail_url")
if thumbnail_url:
record["thumbnail_url"] = remove_params_from_url(thumbnail_url, ["_nc_hash", "d"])
return record
@backoff_policy
def _read_records(self, params: Mapping[str, Any]) -> Iterator:
return self._api.account.get_ad_creatives(params=params)
class Ads(FBMarketingIncrementalStream):
"""doc: https://developers.facebook.com/docs/marketing-api/reference/adgroup"""
entity_prefix = "ad"
enable_deleted = True
@backoff_policy
def _read_records(self, params: Mapping[str, Any]):
return self._api.account.get_ads(params=params, fields=[self.cursor_field])
class AdSets(FBMarketingIncrementalStream):
"""doc: https://developers.facebook.com/docs/marketing-api/reference/ad-campaign"""
entity_prefix = "adset"
enable_deleted = True
@backoff_policy
def _read_records(self, params: Mapping[str, Any]):
return self._api.account.get_ad_sets(params=params)
class Campaigns(FBMarketingIncrementalStream):
"""doc: https://developers.facebook.com/docs/marketing-api/reference/ad-campaign-group"""
entity_prefix = "campaign"
enable_deleted = True
@backoff_policy
def _read_records(self, params: Mapping[str, Any]):
return self._api.account.get_campaigns(params=params)
class AdsInsights(FBMarketingIncrementalStream):
"""doc: https://developers.facebook.com/docs/marketing-api/insights"""
cursor_field = "date_start"
primary_key = None
ALL_ACTION_ATTRIBUTION_WINDOWS = [
"1d_click",
"7d_click",
"28d_click",
"1d_view",
"7d_view",
"28d_view",
]
ALL_ACTION_BREAKDOWNS = [
"action_type",
"action_target_id",
"action_destination",
]
MAX_WAIT_TO_START = pendulum.duration(minutes=5)
MAX_WAIT_TO_FINISH = pendulum.duration(minutes=30)
MAX_ASYNC_SLEEP = pendulum.duration(minutes=5)
MAX_ASYNC_JOBS = 10
INSIGHTS_RETENTION_PERIOD = pendulum.duration(days=37 * 30)
action_breakdowns = ALL_ACTION_BREAKDOWNS
level = "ad"
action_attribution_windows = ALL_ACTION_ATTRIBUTION_WINDOWS
time_increment = 1
breakdowns = []
def __init__(self, buffer_days, days_per_job, **kwargs):
super().__init__(**kwargs)
self.lookback_window = pendulum.duration(days=buffer_days)
self._days_per_job = days_per_job
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""Waits for current job to finish (slice) and yield its result"""
result = self.wait_for_job(stream_slice["job"])
# because we query `lookback_window` days before actual cursor we might get records older then cursor
for obj in result.get_result():
yield self.transform(obj.export_all_data())
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:
"""Slice by date periods and schedule async job for each period, run at most MAX_ASYNC_JOBS jobs at the same time.
This solution for Async was chosen because:
1. we should commit state after each successful job
2. we should run as many job as possible before checking for result
3. we shouldn't proceed to consumption of the next job before previous succeed
"""
stream_state = stream_state or {}
running_jobs = deque()
date_ranges = list(self._date_ranges(stream_state=stream_state))
for params in date_ranges:
params = deep_merge(params, self.request_params(stream_state=stream_state))
job = self._create_insights_job(params)
running_jobs.append(job)
if len(running_jobs) >= self.MAX_ASYNC_JOBS:
yield {"job": running_jobs.popleft()}
while running_jobs:
yield {"job": running_jobs.popleft()}
@backoff_policy
def wait_for_job(self, job) -> AdReportRun:
factor = 2
start_time = pendulum.now()
sleep_seconds = factor
while True:
job = job.api_get()
job_progress_pct = job["async_percent_completion"]
job_id = job["report_run_id"]
self.logger.info(f"ReportRunId {job_id} is {job_progress_pct}% complete ({job['async_status']})")
runtime = pendulum.now() - start_time
if job["async_status"] == "Job Completed":
return job
elif job["async_status"] == "Job Failed":
raise JobTimeoutException(f"AdReportRun {job} failed after {runtime.in_seconds()} seconds.")
elif job["async_status"] == "Job Skipped":
raise JobTimeoutException(f"AdReportRun {job} skipped after {runtime.in_seconds()} seconds.")
if runtime > self.MAX_WAIT_TO_START and job_progress_pct == 0:
raise JobTimeoutException(
f"AdReportRun {job} did not start after {runtime.in_seconds()} seconds."
f" This is an intermittent error which may be fixed by retrying the job. Aborting."
)
elif runtime > self.MAX_WAIT_TO_FINISH:
raise JobTimeoutException(
f"AdReportRun {job} did not finish after {runtime.in_seconds()} seconds."
f" This is an intermittent error which may be fixed by retrying the job. Aborting."
)
self.logger.info(f"Sleeping {sleep_seconds} seconds while waiting for AdReportRun: {job_id} to complete")
time.sleep(sleep_seconds)
if sleep_seconds < self.MAX_ASYNC_SLEEP.in_seconds():
sleep_seconds *= factor
def request_params(self, stream_state: Mapping[str, Any], **kwargs) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state, **kwargs)
params = deep_merge(
params,
{
"level": self.level,
"action_breakdowns": self.action_breakdowns,
"breakdowns": self.breakdowns,
"fields": self.fields,
"time_increment": self.time_increment,
"action_attribution_windows": self.action_attribution_windows,
},
)
return params
def _state_filter(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
"""Works differently for insights, so remove it"""
return {}
def get_json_schema(self) -> Mapping[str, Any]:
"""Add fields from breakdowns to the stream schema
:return: A dict of the JSON schema representing this stream.
"""
schema = ResourceSchemaLoader(package_name_from_class(self.__class__)).get_schema("ads_insights")
schema["properties"].update(self._schema_for_breakdowns())
return schema
@cached_property
def fields(self) -> List[str]:
"""List of fields that we want to query, for now just all properties from stream's schema"""
schema = ResourceSchemaLoader(package_name_from_class(self.__class__)).get_schema("ads_insights")
return list(schema.get("properties", {}).keys())
def _schema_for_breakdowns(self) -> Mapping[str, Any]:
"""Breakdown fields and their type"""
schemas = {
"age": {"type": ["null", "integer", "string"]},
"gender": {"type": ["null", "string"]},
"country": {"type": ["null", "string"]},
"dma": {"type": ["null", "string"]},
"region": {"type": ["null", "string"]},
"impression_device": {"type": ["null", "string"]},
"placement": {"type": ["null", "string"]},
"platform_position": {"type": ["null", "string"]},
"publisher_platform": {"type": ["null", "string"]},
}
breakdowns = self.breakdowns[:]
if "platform_position" in breakdowns:
breakdowns.append("placement")
return {breakdown: schemas[breakdown] for breakdown in self.breakdowns}
def _date_ranges(self, stream_state: Mapping[str, Any]) -> Iterator[dict]:
"""Iterate over period between start_date/state and now
Notes: Facebook freezes insight data 28 days after it was generated, which means that all data
from the past 28 days may have changed since we last emitted it, so we retrieve it again.
"""
state_value = stream_state.get(self.cursor_field)
if state_value:
start_date = pendulum.parse(state_value) - self.lookback_window
else:
start_date = self._start_date
end_date = pendulum.now()
start_date = max(end_date - self.INSIGHTS_RETENTION_PERIOD, start_date)
for since in pendulum.period(start_date, end_date).range("days", self._days_per_job):
until = min(since.add(days=self._days_per_job - 1), end_date) # -1 because time_range is inclusive
yield {
"time_range": {"since": since.to_date_string(), "until": until.to_date_string()},
}
@backoff_policy
def _create_insights_job(self, params) -> AdReportRun:
job = self._api.account.get_insights(params=params, is_async=True)
job_id = job["report_run_id"]
time_range = params["time_range"]
self.logger.info(f"Created AdReportRun: {job_id} to sync insights {time_range} with breakdown {self.breakdowns}")
return job
class AdsInsightsAgeAndGender(AdsInsights):
breakdowns = ["age", "gender"]
class AdsInsightsCountry(AdsInsights):
breakdowns = ["country"]
class AdsInsightsRegion(AdsInsights):
breakdowns = ["region"]
class AdsInsightsDma(AdsInsights):
breakdowns = ["dma"]
class AdsInsightsPlatformAndDevice(AdsInsights):
breakdowns = ["publisher_platform", "platform_position", "impression_device"]
action_breakdowns = ["action_type"] # FB Async Job fails for unknown reason if we set other breakdowns
class AdsInsightsActionType(AdsInsights):
breakdowns = []
action_breakdowns = ["action_type"]
| 40.655678
| 126
| 0.652401
|
794aabcd0e8d4ad6c6d3d6e609ac48a9b7a0f6f1
| 7,004
|
py
|
Python
|
vodloader.py
|
retrontology/vodloader
|
6b9b62854f164f3a7bb0619fc1950fb841274581
|
[
"MIT"
] | 5
|
2021-05-20T00:54:48.000Z
|
2022-01-23T01:18:32.000Z
|
vodloader.py
|
retrontology/vodloader
|
6b9b62854f164f3a7bb0619fc1950fb841274581
|
[
"MIT"
] | 2
|
2021-05-08T23:01:41.000Z
|
2021-05-30T08:51:10.000Z
|
vodloader.py
|
retrontology/vodloader
|
6b9b62854f164f3a7bb0619fc1950fb841274581
|
[
"MIT"
] | 2
|
2021-05-08T22:57:42.000Z
|
2021-06-19T16:52:41.000Z
|
from twitchAPI.types import VideoType, EventSubSubscriptionConflict, EventSubSubscriptionTimeout, EventSubSubscriptionError
from time import sleep
from threading import Thread
import logging
from vodloader_video import vodloader_video
from vodloader_status import vodloader_status
from youtube_uploader import YouTubeOverQuota, youtube_uploader
import datetime
import pytz
import os
class vodloader(object):
def __init__(self, sl, channel, twitch, webhook, twitch_config, yt_json, download_dir, keep=False, upload=True, sort=True, quota_pause=True, tz=pytz.timezone("America/Chicago")):
self.streamlink = sl
self.end = False
self.channel = channel
self.logger = logging.getLogger(f'vodloader.{self.channel}')
self.logger.info(f'Setting up vodloader for {self.channel}')
self.tz = tz
self.download_dir = download_dir
self.keep = keep
self.twitch = twitch
self.webhook = webhook
self.upload = upload
self.quota_pause = quota_pause
if self.upload:
self.uploader = youtube_uploader(self, yt_json, twitch_config['youtube_param'], sort)
if self.uploader.sort:
self.uploader.sort_playlist_by_timestamp(twitch_config['youtube_param']['playlistId'])
else:
self.uploader = None
self.user_id = self.get_user_id()
self.status = vodloader_status(self.user_id)
self.sync_status()
self.get_live()
self.webhook_subscribe()
if 'chapters' in twitch_config and twitch_config['chapters'] != "":
self.chapters_type = twitch_config['chapters']
else:
self.chapters_type = False
if 'quality' in twitch_config and twitch_config['quality'] != "":
self.quality = twitch_config['quality']
else:
self.quality = 'best'
if 'backlog' in twitch_config and twitch_config['backlog']:
self.backlog = twitch_config['backlog']
else:
self.backlog = False
if self.backlog:
self.backlog_process = Thread(target=self.backlog_buffload, args=(), daemon=True)
self.backlog_process.start()
def __del__(self):
self.webhook_unsubscribe()
async def callback_online(self, data: dict):
if not self.live:
self.live = True
self.logger.info(f'{self.channel} has gone live!')
data = self.twitch.get_streams(user_id=self.user_id)['data'][0]
url = 'https://www.twitch.tv/' + self.channel
self.livestream = vodloader_video(self, url, data, backlog=False, quality=self.quality)
async def callback_offline(self, data: dict):
self.live = False
self.logger.info(f'{self.channel} has gone offline')
async def callback_channel_update(self, data:dict):
if self.live:
data = data['event']
if self.livestream.chapters.get_current_game() != data["category_name"]:
self.logger.info(f'{self.channel} has changed game to {data["category_name"]}')
if self.livestream.chapters.get_current_title() != data["title"]:
self.logger.info(f'{self.channel} has changed their title to {data["title"]}')
self.livestream.chapters.append(data['category_name'], data['title'])
def get_live(self):
data = self.twitch.get_streams(user_id=self.user_id)
if not data['data']:
self.live = False
elif data['data'][0]['type'] == 'live':
self.live = True
else:
self.live = False
return self.live
def webhook_unsubscribe(self):
if self.webhook_uuid:
success = set()
for uuid in self.webhook_uuid:
success.add(self.webhook.unsubscribe_topic(uuid))
self.webhook_uuid = None
self.logger.info(f'Unsubscribed from eventsub for {self.channel}')
return all(success)
else:
return True
def webhook_subscribe(self):
try:
online_uuid = self.webhook.listen_stream_online(self.user_id, self.callback_online)
offline_uuid = self.webhook.listen_stream_offline(self.user_id, self.callback_offline)
channel_update_uuid = self.webhook.listen_channel_update(self.user_id, self.callback_channel_update)
self.webhook_uuid = {online_uuid, offline_uuid, channel_update_uuid}
self.logger.info(f'Subscribed to eventsub for {self.channel}')
except (EventSubSubscriptionConflict, EventSubSubscriptionTimeout, EventSubSubscriptionError) as e:
self.logger.error(e)
self.webhook_uuid = None
def get_user_id(self):
user_info = self.twitch.get_users(logins=[self.channel])
return user_info['data'][0]['id']
def sync_status(self):
ids = []
for id in self.status.copy():
if self.status[id] == False:
if not os.path.isfile(os.path.join(self.download_dir, f'{id}.ts')):
self.status.pop(id)
try:
for video in self.uploader.get_channel_videos():
if video['tvid']:
if video['part'] and video['part'] > 1:
ids.append(f'{video["tvid"]}p{video["part"]}')
else:
ids.append(str(video['tvid']))
for id in self.status.copy():
if not id in ids and self.status[id] == True:
self.status.pop(id)
for id in ids:
self.status[id] = True
self.logger.debug('Status synced with YouTube uploads')
except YouTubeOverQuota:
self.logger.error("YouTube quota is exceeded, can't sync status")
self.status.save()
def get_twitch_videos(self, video_type=VideoType.ARCHIVE):
cursor = None
videos = []
while True:
data = self.twitch.get_videos(user_id=self.user_id, first=100, after=cursor)
for video in data['data']:
if video['type'] == video_type:
videos.append(video)
if not 'cursor' in data['pagination']:
break
else:
cursor = data['pagination']['cursor']
return videos
def backlog_buffload(self):
videos = self.get_twitch_videos()
videos.sort(reverse=False, key=lambda x: datetime.datetime.strptime((x['created_at']), '%Y-%m-%dT%H:%M:%SZ'))
for video in videos:
if self.end: exit()
if self.uploader.pause and self.quota_pause:
self.logger.info('Pausing backlog processing until YouTube quota is refreshed')
while self.uploader.pause:
sleep(10)
self.backlog_video = vodloader_video(self, video['url'], video, backlog=True, quality=self.quality)
self.backlog_video.thread.join()
| 43.234568
| 182
| 0.611508
|
794aac901a4f7be1f96bd0bf07eb907bb4569410
| 8,098
|
py
|
Python
|
Python/libraries/datatypes-timex-expression/datatypes_timex_expression/timex_helpers.py
|
inloco/Recognizers-Text
|
9f4ac7cd4170fe39e48ccf52c028877e7c421e60
|
[
"MIT"
] | 1
|
2019-01-03T16:41:29.000Z
|
2019-01-03T16:41:29.000Z
|
Python/libraries/datatypes-timex-expression/datatypes_timex_expression/timex_helpers.py
|
inloco/Recognizers-Text
|
9f4ac7cd4170fe39e48ccf52c028877e7c421e60
|
[
"MIT"
] | 76
|
2018-11-09T18:19:44.000Z
|
2019-08-20T20:29:53.000Z
|
Python/libraries/datatypes-timex-expression/datatypes_timex_expression/timex_helpers.py
|
inloco/Recognizers-Text
|
9f4ac7cd4170fe39e48ccf52c028877e7c421e60
|
[
"MIT"
] | 6
|
2017-05-04T17:24:59.000Z
|
2019-07-23T15:48:44.000Z
|
from datetime import date, timedelta, datetime
from math import floor
from .timex_inference import TimexInference
from .date_range import DateRange
from .time import Time
from .time_range import TimeRange
from .timex_constants import Constants
from .timex_range import TimexRange
class TimexHelpers:
@staticmethod
def expand_datetime_range(timex):
from datatypes_timex_expression import Timex
types = timex.types if len(timex.types) != 0 else TimexInference.infer(timex)
if Constants.TIMEX_TYPES_DURATION in types:
start = TimexHelpers.clone_datetime(timex)
duration = TimexHelpers.clone_duration(timex)
return TimexRange(start, TimexHelpers.timex_datetime_add(start, duration), duration)
else:
if timex.year is not None:
start = Timex()
start.year = timex.year
result = TimexRange(start, Timex())
if timex.month is not None:
result.start.month = timex.month
result.start.day_of_month = 1
result.end.year = timex.year
result.end.month = timex.month + 1
result.end.day_of_month = 1
else:
result.start.month = 1
result.start.day_of_month = 1
result.end.year = timex.year + 1
result.end.month = 1
result.end.day_of_month = 1
return result
return TimexRange(Timex(), Timex())
@staticmethod
def expand_time_range(timex):
from datatypes_timex_expression import TimexCreator
from datatypes_timex_expression import Timex
if not (Constants.TIMEX_TYPES_TIMERANGE in timex.types):
raise TypeError
if timex.part_of_day is not None:
if timex.part_of_day == 'DT':
timex = Timex(TimexCreator.DAYTIME)
elif timex.part_of_day == 'MO':
timex = Timex(TimexCreator.MORNING)
elif timex.part_of_day == 'AF':
timex = Timex(TimexCreator.AFTERNOON)
elif timex.part_of_day == 'EV':
timex = Timex(TimexCreator.EVENING)
elif timex.part_of_day == 'NI':
timex = Timex(TimexCreator.NIGHT)
else:
raise TypeError
start = Timex(hour=timex.hour, minute=timex.minute, second=timex.second)
duration = TimexHelpers.clone_duration(timex)
return TimexRange(start, TimexHelpers.add_time(start, duration))
@staticmethod
def timex_date_add(start, duration):
from datatypes_timex_expression import Timex
duration_days = duration.days
if duration.days is None and duration.weeks is not None:
duration_days = 7 * duration.weeks
if start.day_of_week:
end = start.clone()
if duration.days:
end.day_of_week += duration.days
return end
if start.month is not None and start.day_of_month is not None:
if duration_days:
if start.year:
d = date(start.year, start.month, start.day_of_month)
d = d + timedelta(days=int(duration_days))
result = Timex()
result.year = d.year
result.month = d.month
result.day_of_month = d.day
return result
else:
d = date(2001, start.month, start.day_of_month)
d = d + timedelta(int(duration_days))
result = Timex()
result.month = d.month
result.day_of_month = d.day
return result
if duration.years:
if start.year:
result = Timex()
result.year = start.year + duration.years
result.month = start.month
result.day_of_month = start.day_of_month
return result
if duration.month:
if start.month:
result = Timex()
result.year = start.year
result.month = start.month + duration.months
result.day_of_month = start.day_of_month
return result
return start
@staticmethod
def timex_time_add(start, duration):
if duration.hours is not None:
result = start.clone()
result.hour = result.hour + int(duration.hours)
if result.hour > 23:
days = floor(result.hour / 24)
hour = result.hour % 24
result.hour = hour
if (result.year and result.month and result.day_of_month) is not None:
d = datetime(result.year, result.month, result.day_of_month, 0, 0, 0)
d = d + timedelta(days=float(days))
result.year = d.year
result.month = d.month
result.day_of_month = d.day
return result
if result.day_of_week is not None:
result.day_of_week += int(days)
return result
return result
if duration.minutes is not None:
result = start.clone()
result.minute += int(duration.minutes)
if result.minute > 50:
result.hour = result.hour + 1
result.minute = 0
return result
return start
@staticmethod
def timex_datetime_add(start, duration):
a = TimexHelpers.timex_date_add(start, duration)
b = TimexHelpers.timex_time_add(a, duration)
return b
@staticmethod
def date_from_timex(timex):
return date(
timex.year if timex.year is not None else 2001,
timex.month if timex.month is not None else 1,
timex.day_of_month if timex.day_of_month is not None else 1
)
@staticmethod
def time_from_timex(timex):
return Time(
timex.hour if timex.hour is not None else 0,
timex.minute if timex.minute is not None else 0,
timex.second if timex.second is not None else 0)
@staticmethod
def daterange_from_timex(timex):
expanded = TimexHelpers.expand_datetime_range(timex)
return DateRange(
TimexHelpers.date_from_timex(expanded.start),
TimexHelpers.date_from_timex(expanded.end))
@staticmethod
def timerange_from_timex(timex):
expanded = TimexHelpers.expand_time_range(timex)
return TimeRange(
TimexHelpers.time_from_timex(expanded.start),
TimexHelpers.time_from_timex(expanded.end))
@staticmethod
def add_time(start, duration):
from datatypes_timex_expression import Timex
result = Timex()
result.hour = start.hour + (duration.hours if duration.hours is not None else 0)
result.minute = start.minute + (duration.minue if duration.minutes is not None else 0)
result.second = start.second + (duration.second if duration.seconds is not None else 0)
return result
@staticmethod
def clone_datetime(timex):
result = timex.clone()
result.years = None
result.months = None
result.weeks = None
result.days = None
result.hours = None
result.minutes = None
result.seconds = None
return result
@staticmethod
def clone_duration(timex):
result = timex.clone()
result.year = None
result.month = None
result.day_of_month = None
result.day_of_week = None
result.week_of_year = None
result.week_of_month = None
result.season = None
result.hour = None
result.minute = None
result.second = None
result.weekend = None
result.part_of_day = None
return result
| 35.674009
| 96
| 0.57187
|
794aadb9a06ac5d31f40133fa95070269163088b
| 2,107
|
py
|
Python
|
venv/Lib/site-packages/pyrogram/raw/base/stats/broadcast_stats.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/base/stats/broadcast_stats.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/base/stats/broadcast_stats.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
from typing import Union
from pyrogram import raw
from pyrogram.raw.core import TLObject
BroadcastStats = Union[raw.types.stats.BroadcastStats]
# noinspection PyRedeclaration
class BroadcastStats: # type: ignore
"""This base type has 1 constructor available.
Constructors:
.. hlist::
:columns: 2
- :obj:`stats.BroadcastStats <pyrogram.raw.types.stats.BroadcastStats>`
See Also:
This object can be returned by 1 method:
.. hlist::
:columns: 2
- :obj:`stats.GetBroadcastStats <pyrogram.raw.functions.stats.GetBroadcastStats>`
"""
QUALNAME = "pyrogram.raw.base.stats.BroadcastStats"
def __init__(self):
raise TypeError("Base types can only be used for type checking purposes: "
"you tried to use a base type instance as argument, "
"but you need to instantiate one of its constructors instead. "
"More info: https://docs.pyrogram.org/telegram/base/broadcast-stats")
| 36.327586
| 93
| 0.639298
|
794aae6b7df9a9130c935ce7b01d01575297e946
| 3,287
|
py
|
Python
|
blender/2.79/scripts/addons/io_scene_ms3d/__init__.py
|
uzairakbar/bpy2.79
|
3a3e0004ac6783c4e4b89d939e4432de99026a85
|
[
"MIT"
] | 2
|
2019-11-27T09:05:42.000Z
|
2020-02-20T01:25:23.000Z
|
io_scene_ms3d/__init__.py
|
1-MillionParanoidTterabytes/blender-addons-master
|
acc8fc23a38e6e89099c3e5079bea31ce85da06a
|
[
"Unlicense"
] | null | null | null |
io_scene_ms3d/__init__.py
|
1-MillionParanoidTterabytes/blender-addons-master
|
acc8fc23a38e6e89099c3e5079bea31ce85da06a
|
[
"Unlicense"
] | 4
|
2020-02-19T20:02:26.000Z
|
2022-02-11T18:47:56.000Z
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
bl_info = {
"name": "MilkShape3D MS3D format (.ms3d)",
"description": "Import / Export MilkShape3D MS3D files "
"(conform with MilkShape3D v1.8.4)",
"author": "Alexander Nussbaumer",
"version": (2, 72, 2),
"blender": (2, 72, 2),
"location": "File > Import & File > Export",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/MilkShape3D_MS3D",
"category": "Import-Export",
}
###############################################################################
#234567890123456789012345678901234567890123456789012345678901234567890123456789
#--------1---------2---------3---------4---------5---------6---------7---------
# ##### BEGIN COPYRIGHT BLOCK #####
#
# initial script copyright (c)2011-2013 Alexander Nussbaumer
#
# ##### END COPYRIGHT BLOCK #####
# To support reload properly, try to access a package var,
# if it's there, reload everything
if 'bpy' in locals():
import importlib
if 'io_scene_ms3d.ms3d_ui' in locals():
importlib.reload(io_scene_ms3d.ms3d_ui)
else:
from io_scene_ms3d.ms3d_ui import (
Ms3dImportOperator,
Ms3dExportOperator,
)
#import blender stuff
from bpy.utils import (
register_module,
unregister_module,
)
from bpy.types import (
INFO_MT_file_export,
INFO_MT_file_import,
)
###############################################################################
# registration
def register():
####################
# F8 - key
import importlib
importlib.reload(ms3d_ui)
# F8 - key
####################
ms3d_ui.register()
register_module(__name__)
INFO_MT_file_export.append(Ms3dExportOperator.menu_func)
INFO_MT_file_import.append(Ms3dImportOperator.menu_func)
def unregister():
ms3d_ui.unregister()
unregister_module(__name__)
INFO_MT_file_export.remove(Ms3dExportOperator.menu_func)
INFO_MT_file_import.remove(Ms3dImportOperator.menu_func)
###############################################################################
# global entry point
if (__name__ == "__main__"):
register()
###############################################################################
#234567890123456789012345678901234567890123456789012345678901234567890123456789
#--------1---------2---------3---------4---------5---------6---------7---------
# ##### END OF FILE #####
| 30.719626
| 79
| 0.584119
|
794aaeaaef6ab4dcd98d135a4fdaae0c4ca6aaae
| 1,003
|
py
|
Python
|
labeling.py
|
qwerlarlgus/Object_tracking
|
1ef0eded171d2d432cfdcfbeb5b794a6546bf116
|
[
"MIT"
] | null | null | null |
labeling.py
|
qwerlarlgus/Object_tracking
|
1ef0eded171d2d432cfdcfbeb5b794a6546bf116
|
[
"MIT"
] | null | null | null |
labeling.py
|
qwerlarlgus/Object_tracking
|
1ef0eded171d2d432cfdcfbeb5b794a6546bf116
|
[
"MIT"
] | null | null | null |
import sys
import random
import numpy as np
import cv2
src = cv2.imread('crop0.jpg', cv2.IMREAD_GRAYSCALE)
if src is None:
print('Image load failed!')
sys.exit()
h, w = src.shape[:2]
dst1 = np.zeros((h, w, 3), np.uint8)
dst2 = np.zeros((h, w, 3), np.uint8)
# 전처리
src = cv2.blur(src, (3, 3))
_, src_bin = cv2.threshold(src, 0, 255, cv2.THRESH_OTSU)
# 레이블링
cnt, labels, stats, centroids = cv2.connectedComponentsWithStats(src_bin)
for i in range(1, cnt):
c = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
dst1[labels == i] = c
# 외곽선 검출
contours, _ = cv2.findContours(src_bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for i in range(len(contours)):
c = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
cv2.drawContours(dst2, contours, i, c, 1)
cv2.imshow('src', src)
cv2.imshow('src_bin', src_bin)
cv2.imshow('dst1', dst1)
cv2.imshow('dst2', dst2)
cv2.waitKey()
cv2.destroyAllWindows()
| 25.075
| 81
| 0.65005
|
794aaee4046557600d02440f4e9cd1b8a8f1ff95
| 2,292
|
py
|
Python
|
redis-srv.py
|
M-O-P-D/crims
|
19ca9ac3701bd6725e01ee29a2bfb429775d79da
|
[
"MIT"
] | null | null | null |
redis-srv.py
|
M-O-P-D/crims
|
19ca9ac3701bd6725e01ee29a2bfb429775d79da
|
[
"MIT"
] | 12
|
2020-11-11T11:24:05.000Z
|
2021-03-11T11:43:43.000Z
|
redis-srv.py
|
M-O-P-D/crims
|
19ca9ac3701bd6725e01ee29a2bfb429775d79da
|
[
"MIT"
] | null | null | null |
#redis-srv.py
import redis
import pandas as pd
import pickle
import json
from datetime import date
# requires non-python dependency redis-server
# sudo apt install redis-server
# follow instructions here: https://www.digitalocean.com/community/tutorials/how-to-install-and-secure-redis-on-ubuntu-18-04
import neworder as no
class RedisDemoModel(no.Model):
def __init__(self, force, start_year, start_month, end_year, end_month):
super().__init__(no.CalendarTimeline(date(start_year, start_month, 1), date(end_year, end_month, 1), 1, "m", 1), no.MonteCarlo.nondeterministic_stream)
self.force = force
self.len = 2
pubsub.subscribe("crime_rate")
def step(self):
data = pd.DataFrame(data = { "force": [self.force]*self.len, "date": [self.timeline().time()]*self.len, "value": self.mc().ustream(self.len)})
# send some data
cache.publish("crime_data", pickle.dumps(data))
# wait for response...
for m in pubsub.listen():
no.log(m)
if m["type"] == "message" and m["channel"] == b"crime_rate":
# adjust amount of data to produce accoring to feedback from upstream model
self.len = json.loads(m["data"])
break
def checkpoint(self):
# send done signal (NB (int)0 gets serialised as b"0" i.e. a string (?))
cache.publish("crime_model_result", json.dumps({"status": 0}))
pubsub.unsubscribe("crime_rate")
cache = redis.StrictRedis(host='localhost', port=6379)
pubsub = cache.pubsub()
pubsub.subscribe("crime_model_init")
# m = RedisDemoModel("West Yorkshire", 2020, 1, 2021, 1)
# no.run(m)
#def run_model(force, start_year, start_month, duration_months):
# list for model requests
for m in pubsub.listen():
no.log(m)
if m["type"] == "message" and m["channel"] == b"crime_model_init":
params = json.loads(m["data"])
m = RedisDemoModel(params["force"], params["start_year"], params["start_month"], params["end_year"], params["end_month"])
no.run(m)
# # # or json? "unpickling can execute code"
# # cache.set("crimes", pickle.dumps(df))
# for m in range(3):
# print(m)
# # send crime data
# cache.publish("crime_data", pickle.dumps(df))
# # wait for response
# m = next(pubsub.listen())
# if m["type"] == "message":
# print(m["data"])
# else:
# print(m)
| 28.296296
| 155
| 0.671466
|
794aaf7234b3ee90332fb39568eb198808b5e94f
| 5,348
|
py
|
Python
|
model_zoo/official/nlp/bert/scripts/ascend_distributed_launcher/run_distribute_pretrain.py
|
Rossil2012/mindspore
|
8a20b5d784b3fec6d32e058581ec56ec553a06a0
|
[
"Apache-2.0"
] | 1
|
2021-04-23T06:35:18.000Z
|
2021-04-23T06:35:18.000Z
|
model_zoo/official/nlp/bert/scripts/ascend_distributed_launcher/run_distribute_pretrain.py
|
nudt-eddie/mindspore
|
55372b41fdfae6d2b88d7078971e06d537f6c558
|
[
"Apache-2.0"
] | null | null | null |
model_zoo/official/nlp/bert/scripts/ascend_distributed_launcher/run_distribute_pretrain.py
|
nudt-eddie/mindspore
|
55372b41fdfae6d2b88d7078971e06d537f6c558
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""distribute pretrain script"""
import os
import json
import configparser
import multiprocessing
from argparse import ArgumentParser
def parse_args():
"""
parse args .
Args:
Returns:
args.
Examples:
>>> parse_args()
"""
parser = ArgumentParser(description="mindspore distributed training")
parser.add_argument("--run_script_dir", type=str, default="",
help="Run script path, it is better to use absolute path")
parser.add_argument("--hyper_parameter_config_dir", type=str, default="",
help="Hyper Parameter config path, it is better to use absolute path")
parser.add_argument("--data_dir", type=str, default="",
help="Data path, it is better to use absolute path")
parser.add_argument("--hccl_config_dir", type=str, default="",
help="Hccl config path, it is better to use absolute path")
args = parser.parse_args()
return args
def distribute_pretrain():
"""
distribute pretrain scripts. The number of D chips can be automatically allocated
based on the device_num set in hccl config file, You don not need to specify that.
"""
print("start", __file__)
args = parse_args()
run_script = args.run_script_dir
data_dir = args.data_dir
cf = configparser.ConfigParser()
cf.read(args.hyper_parameter_config_dir)
cfg = dict(cf.items("config"))
print("hccl_config_dir:", args.hccl_config_dir)
os.environ['RANK_TABLE_FILE'] = args.hccl_config_dir
cores = multiprocessing.cpu_count()
print("the number of logical core:", cores)
# get device_ips
device_ips = {}
with open('/etc/hccn.conf', 'r') as fin:
for hccn_item in fin.readlines():
if hccn_item.strip().startswith('address_'):
device_id, device_ip = hccn_item.split('=')
device_id = device_id.split('_')[1]
device_ips[device_id] = device_ip.strip()
with open(args.hccl_config_dir, "r", encoding="utf-8") as fin:
hccl_config = json.loads(fin.read())
rank_size = 0
for server in hccl_config["server_list"]:
rank_size += len(server["device"])
if server["device"][0]["device_ip"] in device_ips.values():
this_server = server
os.environ['RANK_SIZE'] = str(rank_size)
print("total rank size:", rank_size)
print("this server rank size:", len(this_server["device"]))
avg_core_per_rank = int(int(cores) / len(this_server["device"]))
core_gap = avg_core_per_rank - 1
print("avg_core_per_rank:", avg_core_per_rank)
count = 0
for instance in this_server["device"]:
device_id = instance["device_id"]
rank_id = instance["rank_id"]
print("\nstart training for rank " + str(rank_id) + ", device " + str(device_id) + ":")
print("rank_id:", rank_id)
print("device_id:", device_id)
start = count * int(avg_core_per_rank)
count += 1
end = start + core_gap
cmdopt = str(start) + "-" + str(end)
os.environ["DEVICE_ID"] = device_id
os.environ["RANK_ID"] = rank_id
os.environ["DEPLOY_MODE"] = "0"
os.environ["GE_USE_STATIC_MEMORY"] = "1"
os.system("rm -rf LOG" + str(device_id))
os.system("mkdir ./LOG" + str(device_id))
os.system("cp *.py ./LOG" + str(device_id))
os.system("mkdir -p ./LOG" + str(device_id) + "/ms_log")
os.system("env > ./LOG" + str(device_id) + "/env.log")
cur_dir = os.getcwd()
os.environ["GLOG_log_dir"] = cur_dir + "/LOG" + str(device_id) + "/ms_log"
os.environ["GLOG_logtostderr"] = "0"
print("core_nums:", cmdopt)
print("epoch_size:", str(cfg['epoch_size']))
print("data_dir:", data_dir)
print("log_file_dir: " + cur_dir + "/LOG" + str(device_id) + "/pretraining_log.txt")
os.chdir(cur_dir + "/LOG" + str(device_id))
cmd = 'taskset -c ' + cmdopt + ' nohup python ' + run_script + " "
opt = " ".join(["--" + key + "=" + str(cfg[key]) for key in cfg.keys()])
if ('device_id' in opt) or ('device_num' in opt) or ('data_dir' in opt):
raise ValueError("hyper_parameter_config.ini can not setting 'device_id',"
" 'device_num' or 'data_dir'! ")
cmd += opt
cmd += " --data_dir=" + data_dir
cmd += ' --device_id=' + str(device_id) + ' --device_num=' \
+ str(rank_size) + ' >./pretraining_log.txt 2>&1 &'
os.system(cmd)
os.chdir(cur_dir)
if __name__ == "__main__":
distribute_pretrain()
| 37.398601
| 95
| 0.6092
|
794ab257259a261ababb4dfd5261e0d7a7ddf359
| 5,736
|
py
|
Python
|
superset/queries/saved_queries/api.py
|
piyush-singhal/incubator-superset
|
cf4edf73a6ad1a61e7a65f05b6e4201db3a4b6e4
|
[
"Apache-2.0"
] | 1
|
2020-10-24T06:54:41.000Z
|
2020-10-24T06:54:41.000Z
|
superset/queries/saved_queries/api.py
|
piyush-singhal/incubator-superset
|
cf4edf73a6ad1a61e7a65f05b6e4201db3a4b6e4
|
[
"Apache-2.0"
] | 1
|
2021-02-23T16:45:47.000Z
|
2021-04-24T23:15:03.000Z
|
superset/queries/saved_queries/api.py
|
piyush-singhal/incubator-superset
|
cf4edf73a6ad1a61e7a65f05b6e4201db3a4b6e4
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import Any
from flask import g, Response
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import ngettext
from superset.constants import RouteMethod
from superset.databases.filters import DatabaseFilter
from superset.models.sql_lab import SavedQuery
from superset.queries.saved_queries.commands.bulk_delete import (
BulkDeleteSavedQueryCommand,
)
from superset.queries.saved_queries.commands.exceptions import (
SavedQueryBulkDeleteFailedError,
SavedQueryNotFoundError,
)
from superset.queries.saved_queries.filters import (
SavedQueryAllTextFilter,
SavedQueryFavoriteFilter,
SavedQueryFilter,
)
from superset.queries.saved_queries.schemas import (
get_delete_ids_schema,
openapi_spec_methods_override,
)
from superset.views.base_api import BaseSupersetModelRestApi, statsd_metrics
logger = logging.getLogger(__name__)
class SavedQueryRestApi(BaseSupersetModelRestApi):
datamodel = SQLAInterface(SavedQuery)
include_route_methods = RouteMethod.REST_MODEL_VIEW_CRUD_SET | {
RouteMethod.RELATED,
RouteMethod.DISTINCT,
"bulk_delete", # not using RouteMethod since locally defined
}
class_permission_name = "SavedQueryView"
resource_name = "saved_query"
allow_browser_login = True
base_filters = [["id", SavedQueryFilter, lambda: []]]
show_columns = [
"created_by.first_name",
"created_by.id",
"created_by.last_name",
"database.database_name",
"database.id",
"description",
"id",
"label",
"schema",
"sql",
"sql_tables",
]
list_columns = [
"changed_on_delta_humanized",
"created_on",
"created_by.first_name",
"created_by.id",
"created_by.last_name",
"database.database_name",
"database.id",
"db_id",
"description",
"id",
"label",
"schema",
"sql",
"sql_tables",
"rows",
"last_run_delta_humanized",
]
add_columns = ["db_id", "description", "label", "schema", "sql"]
edit_columns = add_columns
order_columns = [
"schema",
"label",
"description",
"sql",
"rows",
"created_by.first_name",
"database.database_name",
"created_on",
"changed_on_delta_humanized",
"last_run_delta_humanized",
]
search_columns = ["id", "database", "label", "schema"]
search_filters = {
"id": [SavedQueryFavoriteFilter],
"label": [SavedQueryAllTextFilter],
}
apispec_parameter_schemas = {
"get_delete_ids_schema": get_delete_ids_schema,
}
openapi_spec_tag = "Queries"
openapi_spec_methods = openapi_spec_methods_override
related_field_filters = {
"database": "database_name",
}
filter_rel_fields = {"database": [["id", DatabaseFilter, lambda: []]]}
allowed_rel_fields = {"database"}
allowed_distinct_fields = {"schema"}
def pre_add(self, item: SavedQuery) -> None:
item.user = g.user
def pre_update(self, item: SavedQuery) -> None:
self.pre_add(item)
@expose("/", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@rison(get_delete_ids_schema)
def bulk_delete(self, **kwargs: Any) -> Response:
"""Delete bulk Saved Queries
---
delete:
description: >-
Deletes multiple saved queries in a bulk operation.
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_delete_ids_schema'
responses:
200:
description: Saved queries bulk delete
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
item_ids = kwargs["rison"]
try:
BulkDeleteSavedQueryCommand(g.user, item_ids).run()
return self.response(
200,
message=ngettext(
"Deleted %(num)d saved query",
"Deleted %(num)d saved queries",
num=len(item_ids),
),
)
except SavedQueryNotFoundError:
return self.response_404()
except SavedQueryBulkDeleteFailedError as ex:
return self.response_422(message=str(ex))
| 30.83871
| 76
| 0.621688
|
794ab2746411301365665cbacaae817feb3617db
| 1,000
|
py
|
Python
|
NoteBooks/Curso de Python/Python/Algoritmos/Sorting/Bubble_Sort.py
|
Alejandro-sin/Learning_Notebooks
|
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
|
[
"MIT"
] | 1
|
2021-02-26T13:12:22.000Z
|
2021-02-26T13:12:22.000Z
|
NoteBooks/Curso de Python/Python/Algoritmos/Sorting/Bubble_Sort.py
|
Alejandro-sin/Learning_Notebooks
|
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
|
[
"MIT"
] | null | null | null |
NoteBooks/Curso de Python/Python/Algoritmos/Sorting/Bubble_Sort.py
|
Alejandro-sin/Learning_Notebooks
|
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
|
[
"MIT"
] | null | null | null |
'''
Facil enteneder e implementar.
Performance On**2.
El primer loop lo que hace es que cuenta desde el ultimo dato hasta el primero.
el segundo loop, me habla sobre la compración entre elementos vecinos
'''
# Bubble sort algorithm
def bubbleSort(dataset):
# TODO: start with the array length and decrement each time
for i in range(len(dataset)-1,0,-1):
for j in range(i):
#comparación del
if dataset[j] > dataset[j+1]:
# Almaceno tmeporalmente este valor para poderlo mover después.
temp = dataset[j]
#Me deveuvle el valro dle vecino, aqui sucede el swap.
dataset[j] = dataset[j+1]
# Le damos el valor que guardamos provisinalmente.
dataset[j+1] = temp
print("Current state: ", dataset)
def main():
list1 = [6, 20, 8, 19, 56, 23, 87, 41, 49, 53]
bubbleSort(list1)
print("Result: ", list1)
if __name__ == "__main__":
main()
| 23.255814
| 79
| 0.601
|
794ab2a0426e4976d5ee0d2905f226d72bf2360a
| 190
|
py
|
Python
|
Week 6/1.py
|
ShruKin/Python-Assignments
|
f6b5d8b2e23f0d6f68d7acced43a1830c6ecebf3
|
[
"MIT"
] | null | null | null |
Week 6/1.py
|
ShruKin/Python-Assignments
|
f6b5d8b2e23f0d6f68d7acced43a1830c6ecebf3
|
[
"MIT"
] | 1
|
2019-11-16T08:40:15.000Z
|
2019-11-16T08:40:15.000Z
|
Week 6/1.py
|
ShruKin/Python-Assignments
|
f6b5d8b2e23f0d6f68d7acced43a1830c6ecebf3
|
[
"MIT"
] | null | null | null |
print("Kinjal Raykarmakar\nSec: CSE2H\tRoll: 29\n")
tup = [('item1', '12.20'), ('item2','15.10' ), ('item3', '24.50') ]
tup.sort(key = lambda x: float(x[1]), reverse = True)
print(tup)
| 31.666667
| 68
| 0.589474
|
794ab2e34056e597458fcc0bfbbbdf828ee82ab6
| 3,744
|
py
|
Python
|
project/settings.py
|
lsmiley/django-ex-ess-tools
|
42ac288e67b1ec28c1acc206762c3f3dadb1dba9
|
[
"CC0-1.0"
] | null | null | null |
project/settings.py
|
lsmiley/django-ex-ess-tools
|
42ac288e67b1ec28c1acc206762c3f3dadb1dba9
|
[
"CC0-1.0"
] | null | null | null |
project/settings.py
|
lsmiley/django-ex-ess-tools
|
42ac288e67b1ec28c1acc206762c3f3dadb1dba9
|
[
"CC0-1.0"
] | null | null | null |
"""
Django settings for this project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# The SECRET_KEY is provided via an environment variable in OpenShift
SECRET_KEY = os.getenv(
'DJANGO_SECRET_KEY',
# safe value used for development when DJANGO_SECRET_KEY might not be set
'9e4@&tw46$l31)zrqe3wi+-slqm(ruvz&se0^%9#6(_w3ui!c0'
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'welcome',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
# from . import database
#
# DATABASES = {
# 'default': database.config()
# }
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
INTERNAL_IPS = ['127.0.0.1']
| 26
| 91
| 0.703259
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.