hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0bf33e21f5e815eb68a81d49aa320e1e5ede530a | 2,488 | py | Python | arvestust/serializers/tests/image.py | lehvitus/arvestust | 2d508317b744eaf12a643a398ff95723893a046a | [
"BSD-3-Clause"
] | 1 | 2021-09-17T23:45:27.000Z | 2021-09-17T23:45:27.000Z | arvestust/serializers/tests/image.py | lehvitus/arvestust | 2d508317b744eaf12a643a398ff95723893a046a | [
"BSD-3-Clause"
] | 3 | 2020-07-25T05:40:54.000Z | 2020-08-11T04:01:19.000Z | arvestust/serializers/tests/image.py | lehvitus/arvestust | 2d508317b744eaf12a643a398ff95723893a046a | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from django.contrib.auth import get_user_model
from ..image import Image
class ImageTestCase(APITestCase):
# The client used to connect to the API
client = APIClient()
def setUp(self):
"""
Prepare database and client.
"""
# API endpoint
self.namespace = '/v1/images'
@classmethod
def setUpTestData(cls):
# Create users
cls.alice = get_user_model().objects.create(username="alice", email="alice@example.org")
cls.bob = get_user_model().objects.create(username="bob", email="bob@example.org")
# Create images
# cls.image1 = Image.objects.create(...)
# cls.image2 = Image.objects.create(...)
# cls.image3 = Image.objects.create(...)
#################################################################
# Require authentication
def test_must_authenticate_to_read_images(self):
res = self.client.get(self.namespace)
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
def test_must_authenticate_to_create_images(self):
res = self.client.post(self.namespace)
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
#################################################################
# Allowed requests
def test_create_image(self):
self.client.force_authenticate(user=self.alice)
res = self.client.post(self.namespace, data={})
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
def test_list_image(self):
self.client.force_authenticate(user=self.alice)
res = self.client.get(self.namespace)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_retrieve_image(self):
self.client.force_authenticate(user=self.alice)
url = self.namespace + '/1'
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_update_image(self):
self.client.force_authenticate(user=self.alice)
url = self.namespace + '/1'
res = self.client.patch(url, data={})
self.assertEqual(res.status_code, status.HTTP_202_ACCEPTED)
def test_delete_image(self):
self.client.force_authenticate(user=self.alice)
url = self.namespace + '/1'
res = self.client.delete(url)
self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)
| 33.173333 | 96 | 0.639068 |
e300b5b421daedb7935a6390064343dcb8001cd0 | 5,011 | py | Python | pymatgen/phasediagram/tests/test_pdanalyzer.py | ctoher/pymatgen | 54df358f61fbe60417e90850811b75c1a9e2e230 | [
"MIT"
] | 1 | 2015-05-18T14:31:20.000Z | 2015-05-18T14:31:20.000Z | pymatgen/phasediagram/tests/test_pdanalyzer.py | ctoher/pymatgen | 54df358f61fbe60417e90850811b75c1a9e2e230 | [
"MIT"
] | null | null | null | pymatgen/phasediagram/tests/test_pdanalyzer.py | ctoher/pymatgen | 54df358f61fbe60417e90850811b75c1a9e2e230 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import unicode_literals
import unittest
import os
from numbers import Number
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.phasediagram.pdmaker import PhaseDiagram
from pymatgen.phasediagram.pdanalyzer import PDAnalyzer
from pymatgen.phasediagram.entries import PDEntryIO, PDEntry
class PDAnalyzerTest(unittest.TestCase):
def setUp(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
(elements, entries) = PDEntryIO.from_csv(os.path.join(module_dir,
"pdentries_test.csv"))
self.pd = PhaseDiagram(entries)
self.analyzer = PDAnalyzer(self.pd)
def test_get_e_above_hull(self):
for entry in self.pd.stable_entries:
self.assertLess(self.analyzer.get_e_above_hull(entry), 1e-11,
"Stable entries should have e above hull of zero!")
for entry in self.pd.all_entries:
if entry not in self.pd.stable_entries:
e_ah = self.analyzer.get_e_above_hull(entry)
self.assertGreaterEqual(e_ah, 0)
self.assertTrue(isinstance(e_ah, Number))
def test_get_equilibrium_reaction_energy(self):
for entry in self.pd.stable_entries:
self.assertLessEqual(
self.analyzer.get_equilibrium_reaction_energy(entry), 0,
"Stable entries should have negative equilibrium reaction energy!")
def test_get_decomposition(self):
for entry in self.pd.stable_entries:
self.assertEqual(len(self.analyzer.get_decomposition(entry.composition)), 1,
"Stable composition should have only 1 decomposition!")
dim = len(self.pd.elements)
for entry in self.pd.all_entries:
ndecomp = len(self.analyzer.get_decomposition(entry.composition))
self.assertTrue(ndecomp > 0 and ndecomp <= dim,
"The number of decomposition phases can at most be equal to the number of components.")
#Just to test decomp for a ficitious composition
ansdict = {entry.composition.formula: amt
for entry, amt in
self.analyzer.get_decomposition(Composition("Li3Fe7O11")).items()}
expected_ans = {"Fe2 O2": 0.0952380952380949,
"Li1 Fe1 O2": 0.5714285714285714,
"Fe6 O8": 0.33333333333333393}
for k, v in expected_ans.items():
self.assertAlmostEqual(ansdict[k], v)
def test_get_transition_chempots(self):
for el in self.pd.elements:
self.assertLessEqual(len(self.analyzer.get_transition_chempots(el)),
len(self.pd.facets))
def test_get_element_profile(self):
for el in self.pd.elements:
for entry in self.pd.stable_entries:
if not (entry.composition.is_element):
self.assertLessEqual(len(self.analyzer.get_element_profile(el, entry.composition)),
len(self.pd.facets))
def test_get_get_chempot_range_map(self):
elements = [el for el in self.pd.elements if el.symbol != "Fe"]
self.assertEqual(len(self.analyzer.get_chempot_range_map(elements)), 10)
def test_getmu_vertices_stability_phase(self):
results = self.analyzer.getmu_vertices_stability_phase(Composition("LiFeO2"), Element("O"))
self.assertAlmostEqual(len(results), 6)
test_equality = False
for c in results:
if abs(c[Element("O")]+7.115) < 1e-2 and abs(c[Element("Fe")]+6.596) < 1e-2 and \
abs(c[Element("Li")]+3.931) < 1e-2:
test_equality = True
self.assertTrue(test_equality,"there is an expected vertex missing in the list")
def test_getmu_range_stability_phase(self):
results = self.analyzer.get_chempot_range_stability_phase(
Composition("LiFeO2"), Element("O"))
self.assertAlmostEqual(results[Element("O")][1], -4.4501812249999997)
self.assertAlmostEqual(results[Element("Fe")][0], -6.5961470999999996)
self.assertAlmostEqual(results[Element("Li")][0], -3.6250022625000007)
def test_get_hull_energy(self):
for entry in self.pd.stable_entries:
h_e = self.analyzer.get_hull_energy(entry.composition)
self.assertAlmostEqual(h_e, entry.energy)
n_h_e = self.analyzer.get_hull_energy(entry.composition.fractional_composition)
self.assertAlmostEqual(n_h_e, entry.energy_per_atom)
def test_1d_pd(self):
entry = PDEntry('H', 0)
pd = PhaseDiagram([entry])
pda = PDAnalyzer(pd)
decomp, e = pda.get_decomp_and_e_above_hull(PDEntry('H', 1))
self.assertAlmostEqual(e, 1)
self.assertAlmostEqual(decomp[entry], 1.0)
if __name__ == '__main__':
unittest.main()
| 43.95614 | 115 | 0.643784 |
0fbcd8f556a88ae2a109df3d7c1bf8555a42491a | 2,773 | py | Python | Banking-Inferences./code.py | arshee2403/ga-learner-dsmp-repo | f79d26ceaf38952e975404984f7d14d585727c15 | [
"MIT"
] | 2 | 2019-10-12T16:05:04.000Z | 2020-01-22T16:41:58.000Z | Banking-Inferences./code.py | arshee2403/ga-learner-dsmp-repo | f79d26ceaf38952e975404984f7d14d585727c15 | [
"MIT"
] | null | null | null | Banking-Inferences./code.py | arshee2403/ga-learner-dsmp-repo | f79d26ceaf38952e975404984f7d14d585727c15 | [
"MIT"
] | null | null | null | # --------------
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#Sample_Size
sample_size=2000
#Z_Critical Score
z_critical = stats.norm.ppf(q = 0.95)
# path [File location variable]
#Code starts here
data = pd.read_csv(path)
data_sample = data.sample(n=sample_size,random_state=0)
sample_mean = data_sample['installment'].mean()
sample_std = data_sample['installment'].std()
margin_of_error = z_critical*(sample_std/np.sqrt(sample_size))
confidence_interval= []
confidence_interval.append(sample_mean-margin_of_error)
confidence_interval.append(sample_mean+margin_of_error)
print(confidence_interval)
true_mean = data['installment'].mean()
print(true_mean)
if true_mean>confidence_interval[0] and true_mean<confidence_interval[1]:
print('Yes')
else:
print('NO')
# --------------
import matplotlib.pyplot as plt
import numpy as np
#Different sample sizes to take
sample_size=np.array([20,50,100])
#Code starts here
fig,axes = plt.subplots(nrows = 3 , ncols = 1)
for i in range(len(sample_size)):
m=[]
for j in range(1000):
data_sample = data['installment'].sample(n=sample_size[i])
m.append(data_sample.mean())
mean_series = pd.Series(m)
print(mean_series)
axes[i].plot(mean_series)
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
#Code starts here
data['int.rate'] = (data['int.rate'].str.rstrip('%')).astype(float)
data['int.rate'] = data['int.rate']/100
z_statistic,p_value = ztest(data[data['purpose']=='small_business']['int.rate'],value=data['int.rate'].mean(),alternative='larger')
if p_value<0.05:
print('Reject')
else:
print('Accept')
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
#Code starts here
z_statistic,p_value = ztest(data[data['paid.back.loan']=='No']['installment'],data[data['paid.back.loan']=='Yes']['installment'])
if p_value<0.05:
print('Reject')
else:
print('Accept')
# --------------
#Importing header files
from scipy.stats import chi2_contingency
#Critical value
critical_value = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*
df = 6) # Df = number of variable categories(in purpose) - 1
#Code starts here
yes = data[data['paid.back.loan']=='Yes']['purpose'].value_counts()
no = data[data['paid.back.loan']=='No']['purpose'].value_counts()
observed = pd.concat([yes.transpose(),no.transpose()],axis=1,keys=['Yes','No'])
chi2, p, dof, ex = chi2_contingency(observed)
if chi2>critical_value:
print('Reject')
else:
print('Accept')
| 26.160377 | 132 | 0.670754 |
29684ff87d37655a0314a1763ed497e49f442f63 | 3,583 | py | Python | app/account/forms.py | abhisuri97/mhealth | 2d74c9c8298d097f86090e3365652d858c682ffa | [
"MIT"
] | 2 | 2020-01-19T11:39:26.000Z | 2020-02-11T12:19:46.000Z | app/account/forms.py | abhisuri97/mhealth | 2d74c9c8298d097f86090e3365652d858c682ffa | [
"MIT"
] | 2 | 2018-02-15T13:42:52.000Z | 2021-06-02T03:45:33.000Z | app/account/forms.py | abhisuri97/mhealth | 2d74c9c8298d097f86090e3365652d858c682ffa | [
"MIT"
] | 2 | 2020-10-20T05:42:42.000Z | 2022-03-18T13:27:35.000Z | from flask import url_for
from flask_wtf import Form
from wtforms import ValidationError
from wtforms.fields import (BooleanField, PasswordField, StringField,
SubmitField)
from wtforms.fields.html5 import EmailField
from wtforms.validators import Email, EqualTo, InputRequired, Length
from ..models import User
class LoginForm(Form):
email = EmailField(
'Email', validators=[InputRequired(), Length(1, 64), Email()])
password = PasswordField('Password', validators=[InputRequired()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log in')
class RegistrationForm(Form):
first_name = StringField(
'First name', validators=[InputRequired(), Length(1, 64)])
last_name = StringField(
'Last name', validators=[InputRequired(), Length(1, 64)])
email = EmailField(
'Email', validators=[InputRequired(), Length(1, 64), Email()])
password = PasswordField(
'Password',
validators=[
InputRequired(), EqualTo('password2', 'Passwords must match')
])
password2 = PasswordField('Confirm password', validators=[InputRequired()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered. (Did you mean to '
'<a href="{}">log in</a> instead?)'
.format(url_for('account.login')))
class RequestResetPasswordForm(Form):
email = EmailField(
'Email', validators=[InputRequired(), Length(1, 64), Email()])
submit = SubmitField('Reset password')
# We don't validate the email address so we don't confirm to attackers
# that an account with the given email exists.
class ResetPasswordForm(Form):
email = EmailField(
'Email', validators=[InputRequired(), Length(1, 64), Email()])
new_password = PasswordField(
'New password',
validators=[
InputRequired(), EqualTo('new_password2', 'Passwords must match.')
])
new_password2 = PasswordField(
'Confirm new password', validators=[InputRequired()])
submit = SubmitField('Reset password')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError('Unknown email address.')
class CreatePasswordForm(Form):
password = PasswordField(
'Password',
validators=[
InputRequired(), EqualTo('password2', 'Passwords must match.')
])
password2 = PasswordField(
'Confirm new password', validators=[InputRequired()])
submit = SubmitField('Set password')
class ChangePasswordForm(Form):
old_password = PasswordField('Old password', validators=[InputRequired()])
new_password = PasswordField(
'New password',
validators=[
InputRequired(), EqualTo('new_password2', 'Passwords must match.')
])
new_password2 = PasswordField(
'Confirm new password', validators=[InputRequired()])
submit = SubmitField('Update password')
class ChangeEmailForm(Form):
email = EmailField(
'New email', validators=[InputRequired(), Length(1, 64), Email()])
password = PasswordField('Password', validators=[InputRequired()])
submit = SubmitField('Update email')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
| 35.83 | 79 | 0.653084 |
3be67eb9fcb7657c9a1a93535e6db87a450314d8 | 893 | py | Python | pcg_gazebo/parsers/sdf/empty.py | TForce1/pcg_gazebo | 9ff88016b7b6903236484958ca7c6ed9f8ffb346 | [
"ECL-2.0",
"Apache-2.0"
] | 40 | 2020-02-04T18:16:49.000Z | 2022-02-22T11:36:34.000Z | pcg_gazebo/parsers/sdf/empty.py | awesomebytes/pcg_gazebo | 4f335dd460ef7c771f1df78b46a92fad4a62cedc | [
"ECL-2.0",
"Apache-2.0"
] | 75 | 2020-01-23T13:40:50.000Z | 2022-02-09T07:26:01.000Z | pcg_gazebo/parsers/sdf/empty.py | GimpelZhang/gazebo_world_generator | eb7215499d0ddc972d804c988fadab1969579b1b | [
"ECL-2.0",
"Apache-2.0"
] | 18 | 2020-09-10T06:35:41.000Z | 2022-02-20T19:08:17.000Z | # Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..types import XMLString
class Empty(XMLString):
_NAME = 'empty'
_TYPE = 'sdf'
def __init__(self):
super(Empty, self).__init__('')
def _set_value(self, value):
pass
| 31.892857 | 74 | 0.730123 |
83fad4a92050cd1dc8644bb1ecc11f08cf1e1244 | 11,989 | py | Python | arroyo/synchronized.py | horpto/arroyo | 53410f292ff7a26170c6feacee3a514541d0b4a8 | [
"Apache-2.0"
] | 11 | 2021-07-13T17:20:49.000Z | 2022-03-25T05:46:59.000Z | arroyo/synchronized.py | horpto/arroyo | 53410f292ff7a26170c6feacee3a514541d0b4a8 | [
"Apache-2.0"
] | 12 | 2021-06-15T20:04:36.000Z | 2022-03-31T23:43:30.000Z | arroyo/synchronized.py | horpto/arroyo | 53410f292ff7a26170c6feacee3a514541d0b4a8 | [
"Apache-2.0"
] | 1 | 2022-01-28T15:44:03.000Z | 2022-01-28T15:44:03.000Z | import logging
from dataclasses import dataclass
from datetime import datetime
from threading import Event
from typing import Callable, Mapping, MutableMapping, Optional, Sequence, Set
from arroyo.backends.abstract import Consumer
from arroyo.backends.kafka import KafkaPayload
from arroyo.errors import ConsumerError, EndOfPartition
from arroyo.types import Message, Partition, Position, Topic, TPayload
from arroyo.utils.codecs import Codec
from arroyo.utils.concurrent import Synchronized, execute
logger = logging.getLogger(__name__)
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
@dataclass(frozen=True)
class Commit:
__slots__ = ["group", "partition", "offset", "orig_message_ts"]
group: str
partition: Partition
offset: int
orig_message_ts: Optional[datetime]
class CommitCodec(Codec[KafkaPayload, Commit]):
def encode(self, value: Commit) -> KafkaPayload:
assert value.orig_message_ts is not None
return KafkaPayload(
f"{value.partition.topic.name}:{value.partition.index}:{value.group}".encode(
"utf-8"
),
f"{value.offset}".encode("utf-8"),
[
(
"orig_message_ts",
datetime.strftime(value.orig_message_ts, DATETIME_FORMAT).encode(
"utf-8"
),
)
],
)
def decode(self, value: KafkaPayload) -> Commit:
key = value.key
if not isinstance(key, bytes):
raise TypeError("payload key must be a bytes object")
val = value.value
if not isinstance(val, bytes):
raise TypeError("payload value must be a bytes object")
headers = {k: v for (k, v) in value.headers}
try:
orig_message_ts: Optional[datetime] = datetime.strptime(
headers["orig_message_ts"].decode("utf-8"), DATETIME_FORMAT
)
except KeyError:
orig_message_ts = None
topic_name, partition_index, group = key.decode("utf-8").split(":", 3)
offset = int(val.decode("utf-8"))
return Commit(
group,
Partition(Topic(topic_name), int(partition_index)),
offset,
orig_message_ts,
)
commit_codec = CommitCodec()
class SynchronizedConsumer(Consumer[TPayload]):
"""
This class implements a consumer that is can only consume messages that
have already been consumed and committed by one or more other consumer
groups.
The consumer groups that are being "followed" are required to publish
their offsets to a shared commit log topic. The advancement of the
offsets for these consumer groups in the commit log topic controls
whether or not the local consumer is allowed to consume messages from its
assigned partitions. (This commit log topic works similarly to/was
inspired by/essentially duplicates the contents of the Kafka built-in
``__consumer_offsets`` topic, which seems to be intended to be a private
API of the Kafka system based on the lack of external documentation.)
It is important to note that the since the local consumer is only allowed
to consume messages that have been consumed and committed by all of the
members of the referenced consumer groups, this consumer can only consume
messages as fast as the slowest consumer (or in other words, the most
latent or lagging consumer) for each partition. If one of these consumers
stops consuming messages entirely, this consumer will also stop making
progress in those partitions.
"""
def __init__(
self,
consumer: Consumer[TPayload],
commit_log_consumer: Consumer[KafkaPayload],
commit_log_topic: Topic,
commit_log_groups: Set[str],
) -> None:
self.__consumer = consumer
self.__commit_log_consumer = commit_log_consumer
self.__commit_log_topic = commit_log_topic
self.__commit_log_groups = commit_log_groups
self.__remote_offsets: Synchronized[
Mapping[str, MutableMapping[Partition, int]]
] = Synchronized({group: {} for group in commit_log_groups})
self.__commit_log_worker_stop_requested = Event()
self.__commit_log_worker_subscription_received = Event()
self.__commit_log_worker = execute(self.__run_commit_log_worker)
logger.debug("Waiting for commit log consumer to receieve assignment...")
while not self.__commit_log_worker_subscription_received.wait(0.1):
# Check to make sure we're not waiting for an event that will never
# happen if the commit log consumer has crashed.
if not self.__commit_log_worker.running():
self.__commit_log_worker.result()
else:
logger.debug("Commit log consumer has started.")
# The set of partitions that have been paused by the caller/user. This
# takes precedence over whether or not the partition should be paused
# due to offset synchronization.
self.__paused: Set[Partition] = set()
def __run_commit_log_worker(self) -> None:
# TODO: This needs to roll back to the initial offset.
# TODO: This needs to ensure that it is subscribed to all partitions.
def assignment_callback(offsets: Mapping[Partition, int]) -> None:
logger.debug("Commit log consumer received assignment: %r", offsets)
self.__commit_log_worker_subscription_received.set()
self.__commit_log_consumer.subscribe(
[self.__commit_log_topic], on_assign=assignment_callback
)
while not self.__commit_log_worker_stop_requested.is_set():
try:
message = self.__commit_log_consumer.poll(0.1)
except EndOfPartition:
continue
if message is None:
continue
commit = commit_codec.decode(message.payload)
if commit.group not in self.__commit_log_groups:
continue
with self.__remote_offsets.get() as remote_offsets:
# NOTE: This will store data about partitions that are not
# actually part of the subscription or assignment. This
# approach (potentially) requires more memory and locking
# overhead (due to writing state for partitions that are not
# subscribed or assigned), but amortizes the cost of the
# initial load of the topic and makes the implementation
# quite a bit simpler.
remote_offsets[commit.group][commit.partition] = commit.offset
self.__commit_log_consumer.close()
def __check_commit_log_worker_running(self) -> None:
if not self.closed and not self.__commit_log_worker.running():
try:
self.__commit_log_worker.result()
except Exception as e:
raise RuntimeError("commit log consumer thread crashed") from e
else:
raise RuntimeError("commit log consumer thread unexpectedly exited")
def subscribe(
self,
topics: Sequence[Topic],
on_assign: Optional[Callable[[Mapping[Partition, int]], None]] = None,
on_revoke: Optional[Callable[[Sequence[Partition]], None]] = None,
) -> None:
def assignment_callback(offsets: Mapping[Partition, int]) -> None:
for partition in offsets:
self.__paused.discard(partition)
if on_assign is not None:
on_assign(offsets)
def revocation_callback(partitions: Sequence[Partition]) -> None:
for partition in partitions:
self.__paused.discard(partition)
if on_revoke is not None:
on_revoke(partitions)
return self.__consumer.subscribe(
topics, on_assign=assignment_callback, on_revoke=revocation_callback
)
def unsubscribe(self) -> None:
return self.__consumer.unsubscribe()
def poll(self, timeout: Optional[float] = None) -> Optional[Message[TPayload]]:
self.__check_commit_log_worker_running()
# Resume any partitions that can be resumed (where the local
# offset is less than the remote offset.)
resume_candidates = set(self.__consumer.paused()) - self.__paused
if resume_candidates:
local_offsets = self.tell()
resume_partitions = []
with self.__remote_offsets.get() as remote_offsets:
for partition in resume_candidates:
remote_offset = min(
(
offsets.get(partition, 0)
for offsets in remote_offsets.values()
),
default=0,
)
if remote_offset > local_offsets[partition]:
resume_partitions.append(partition)
if resume_partitions:
self.__consumer.resume(resume_partitions)
# We don't need to explicitly handle ``EndOfPartition`` here -- even if
# we receive the next message before the leader, we will roll back our
# offsets and wait for the leader to advance.
message = self.__consumer.poll(timeout)
if message is None:
return None
with self.__remote_offsets.get() as remote_offsets:
remote_offset = min(
(
offsets.get(message.partition, 0)
for offsets in remote_offsets.values()
),
default=0,
)
# Check to make sure the message does not exceed the remote offset. If
# it does, pause the partition and seek back to the message offset.
if message.offset >= remote_offset:
self.__consumer.pause([message.partition])
self.__consumer.seek({message.partition: message.offset})
return None
return message
def pause(self, partitions: Sequence[Partition]) -> None:
if self.closed:
raise RuntimeError("consumer is closed")
if set(partitions) - self.tell().keys():
raise ConsumerError("cannot pause unassigned partitions")
for partition in partitions:
self.__paused.add(partition)
self.__consumer.pause(partitions)
def resume(self, partitions: Sequence[Partition]) -> None:
if self.closed:
raise RuntimeError("consumer is closed")
if set(partitions) - self.tell().keys():
raise ConsumerError("cannot resume unassigned partitions")
# Partitions are not actually resumed by the inner consumer immediately
# upon calling this method. Instead, any partitions that are able to be
# resumed will be resumed at the start of the next ``poll`` call.
for partition in partitions:
self.__paused.discard(partition)
def paused(self) -> Sequence[Partition]:
return [*self.__paused]
def tell(self) -> Mapping[Partition, int]:
return self.__consumer.tell()
def seek(self, offsets: Mapping[Partition, int]) -> None:
return self.__consumer.seek(offsets)
def stage_positions(self, positions: Mapping[Partition, Position]) -> None:
return self.__consumer.stage_positions(positions)
def commit_positions(self) -> Mapping[Partition, Position]:
return self.__consumer.commit_positions()
def close(self, timeout: Optional[float] = None) -> None:
# TODO: Be careful to ensure there are not any deadlock conditions
# here. Should this actually wait for the commit log worker?
self.__commit_log_worker_stop_requested.set()
return self.__consumer.close(timeout)
@property
def closed(self) -> bool:
return self.__consumer.closed
| 38.674194 | 89 | 0.638502 |
afdbc1c04f9de0aa381aa3418b3fafd59992ae25 | 1,092 | py | Python | db_test.py | fga-eps-mds/2020-2-eps-mds-Parlamentaqui-db-G6 | fb15975c3f32b297bf9ae53e8f4bd29f1150f92e | [
"MIT"
] | null | null | null | db_test.py | fga-eps-mds/2020-2-eps-mds-Parlamentaqui-db-G6 | fb15975c3f32b297bf9ae53e8f4bd29f1150f92e | [
"MIT"
] | null | null | null | db_test.py | fga-eps-mds/2020-2-eps-mds-Parlamentaqui-db-G6 | fb15975c3f32b297bf9ae53e8f4bd29f1150f92e | [
"MIT"
] | null | null | null | import os
from mongoengine import *
from dotenv import load_dotenv
load_dotenv()
DB_USERNAME = os.getenv('DB_USERNAME')
DB_PASSWORD = os.getenv('DB_PASSWORD')
DB_HOST = os.getenv('DB_HOST')
DB_PORT = os.getenv('DB_PORT')
DB_NAME = os.getenv('DB_NAME')
connect(DB_NAME, host=f'mongodb://{DB_USERNAME}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}?authSource=admin')
class Deputy(Document):
id = IntField(primary_key=True)
name = StringField(required=True)
photo_url = StringField()
initial_legislature_id = IntField(required=True)
final_legislature_id = IntField()
initial_legislature_year = IntField(required=True)
final_legislature_year = IntField()
last_activity_date = DateTimeField()
full_name = StringField()
sex = StringField()
email = StringField()
birth_date = DateTimeField()
death_date = DateTimeField()
federative_unity = StringField()
party = StringField()
instagram_username = StringField()
twitter_username = StringField()
facebook_username = StringField()
for item in Deputy.objects:
print(item.name) | 30.333333 | 110 | 0.733516 |
dd6f1340db901f0b753ce5561d5fce44a8870f4c | 4,843 | py | Python | projects/Unstructured Supplemenrary Service Data/ussdtim.py | PickCot/LR_3 | 1f32222e3e13429a36162888612d8d1bb4b85bf2 | [
"MIT"
] | null | null | null | projects/Unstructured Supplemenrary Service Data/ussdtim.py | PickCot/LR_3 | 1f32222e3e13429a36162888612d8d1bb4b85bf2 | [
"MIT"
] | null | null | null | projects/Unstructured Supplemenrary Service Data/ussdtim.py | PickCot/LR_3 | 1f32222e3e13429a36162888612d8d1bb4b85bf2 | [
"MIT"
] | null | null | null | import time
import sys
print('Welcome To fastrack USSD Banking Project...')
time.sleep(8)
bank_list="""
1. Access Bank
2. Fidelity Bank
3. Guarantee Trust Bank
4. Heritage Bank
5. Polaris Bank
6. Stanbic IBTC
7. Unity Bank
8. Wema Bank
"""
gen_bvn = " "
def BVN_checker( ):
global gen_bvn
bvn = [str(i) for i in range (5)]
gen_bvn= "".join(bvn)
def open_acct( ):
global gen_bvn
print("Welcome to our online Account opening services.")
print("loading...")
# creating an empty list to serve as a temporary place holder.
temp_storage= [ ]
f_name= input("Enter your first name:")
s_name= input ("Enter your second name:")
sex = input("Enter sex [M/F]:")
BVN_checker( )
temp_storage.append(f_name)
temp_storage.append(s_name)
temp_storage.append(sex)
temp_storage.append(gen_bvn)
details= " ".join(temp_storage)
split_details = details.split(" ")
#print(split_details)
print(split_details[0]+" "+split_details[1])
print(split_details[2])
print("Your bvn is :"+split_details[3])
print("1. Press # to go back to options menu\n2. Press * to exit")
bck=input(":")
if bck=='#':
options_menu( )
else:
sys.exit( )
exit( )
def upgrade_migrate( ):
print("Welcome to our online Upgrade/Migration services.\n 1. Ugrade\n 2. Migrate")
print("press # is go back to the Main Menu.")
prompt = input("Enter preferred Choice:")
if prompt=="1":
time.sleep(5)
print("Upgrading...")
exit( )
elif prompt == "2":
time.sleep(5)
print("Migrating...")
exit( )
elif prompt == "#":
options_menu( )
else:
sys.exit( )
def balance ( ):
print("ACCOUNT\tBALANCE\n CHECKER")
print("press # is go back to the Main Menu.")
pin=input("Enter your 4 digit pin:")
# isdigit( ) is used to check for digits within a str while the nested if is used to make sure the user inputs 4 digits.
###```i am to put the pin trial in a while loop```###REMINDER!!!
if len(pin)!=4:
print("Make sure its a 4digit pin.")
time.sleep(5)
balance( )
else:
if pin.isdigit( ):
time.sleep(5)
print("Loading...")
exit( )
elif pin== "#":
options_menu( )
else:
time.sleep(15)
print("wrong pin")
sys.exit( )
def transf( ):
print("1. Transfer self\n2. Transfer others")
print("press # is go back to the Main Menu.")
trnsf=input(":")
if trnsf == "#" :
options_menu( )
elif trnsf == "1":
time.sleep(5)
print("Sending...")
exit( )
elif trnsf=="2":
time.sleep(5)
num=int(input("Enter receivers mobile number:"))
print("Transferring to",num)
exit( )
else:
if trnsf.isdigit( )!= True:
time.sleep(5)
print("Not an option")
sys.exit( )
elif trnsf.isdigit( ) and len(trnsf)>2:
time.sleep( 5)
print("wrong password.")
sys.exit( )
else:
time.sleep(10)
print("An error has occurred")
sys.exit( )
def funds( ):
time.sleep(3)
print(bank_list)
bnk = input("Select receipients Bank:")
acc_num= input("Entet account number:")
print("Sending to",acc_num)
hash= input("1.Press # to go back to options menu\n2. Press * to go exit.")
if hash == "#":
options_menu( )
elif hash == "*":
exit( )
else:
sys.exit( )
#--------------------------------------------------
###i'm yet to catch an error for non -digit and more than one digit###REMINDER!!! #-#------------------------------------------------------
# This is the function for options.
def options_menu( ) :
print("1. Open Account\n2. Upgrade/Migrate\n3. Balance\n4. Transfer\n5. Funds")
select_options ={
'1':open_acct,
'2':upgrade_migrate,
'3': balance,
'4':transf,
'5':funds}
choice=input("Enter an option:")
if select_options.get(choice):
select_options[choice]()
else:
sys.exit()
# This is the function which prompts the user as to whether the user wishes to continue or stop transaction.
def exit( ):
exit= input("Do you wish to make another transaction [Y/N] :")
if exit== "N":
sys.exit( )
elif exit == "#":
options_menu( )
else:
log_in( )
# This is the function for logging using the fast code *919#
def log_in( ):
try:
a=0
while a<3:
a+=1
USSD=input("ENTER USSD:")
if(USSD !="*919#"):
print("please re-enter USSD ...")
else:
print("Welcome to our online services how may we help you")
options_menu( )
exit( )
else:
time.sleep(10)
print("checking discrepancies...")
time.sleep(5)
print("An error has occured.")
except:
sys.exit( )
log_in( ) | 25.760638 | 152 | 0.572372 |
f6bfb48bc92e60e34d2379c5ca38d3fe3ed8f667 | 23,530 | py | Python | utils/geoutils.py | thinkingmachines/geoai-immap | 99634812602db6ceec9f55bfbacbd917eec2c407 | [
"MIT"
] | 23 | 2020-08-03T06:27:12.000Z | 2022-03-07T17:44:37.000Z | utils/geoutils.py | thinkingmachines/geoai-immap | 99634812602db6ceec9f55bfbacbd917eec2c407 | [
"MIT"
] | null | null | null | utils/geoutils.py | thinkingmachines/geoai-immap | 99634812602db6ceec9f55bfbacbd917eec2c407 | [
"MIT"
] | 5 | 2020-08-07T03:52:24.000Z | 2022-01-19T15:44:09.000Z | import os
import json
import itertools
import numpy as np
import pandas as pd
from tqdm import tqdm
from pathlib import Path
import subprocess
import matplotlib.pyplot as plt
import geopandas as gpd
import rasterio as rio
from rasterio.windows import (
Window,
transform
)
from rasterio import features
import rasterio.mask
from rasterio.plot import show
from fiona.crs import to_string
GRID_ID = 1
def write_indices(area_dict, area, indices_dir):
"""
Reads the bands for each image of each area and calculates the derived indices.
Args:
area_dict (dict) : Python dictionary containing the file paths per area
area (str) : The area of interest (AOI)
Returns:
data (pd.DataFrame) : The resulting pandas dataframe containing the raw spectral
bands and derived indices
"""
subdata = {}
image_list = area_dict[area]['images']
# Iterate over each year
for image_file in tqdm(image_list, total=len(image_list)):
year = image_file.split('_')[-1].split('.')[0]
# Read each band
src = rio.open(image_file)
out_meta = src.meta
for band_idx in range(src.count):
band = src.read(band_idx+1).ravel()
subdata['B{}'.format(band_idx+1)] = band
# Get derived indices
subdata["ndvi"] = ndvi(subdata)
subdata["ndbi"] = ndbi(subdata)
subdata["savi"] = savi(subdata)
subdata["mndwi"] = mndwi(subdata)
subdata["ui"] = ui(subdata)
subdata["nbi"] = nbi(subdata)
subdata["brba"] = brba(subdata)
subdata["nbai"] = nbai(subdata)
subdata["mbi"] = mbi(subdata)
subdata["baei"] = baei(subdata)
for index in subdata:
subdata[index] = subdata[index].reshape(
(src.height,src.width)
).astype(np.float64)
output_file = indices_dir + 'indices_' + area + '_' + year + '.tif'
area_dict[area]['indices'].append(output_file)
out_meta = src.meta
out_meta.update({
"driver": "GTiff",
"height": src.height,
"width": src.width,
"count": 1,
'nodata': -1,
"dtype": np.float64
})
out_meta.update(count = 10)
with rasterio.open(output_file, 'w', **out_meta, compress='deflate') as dst:
dst.write(subdata["ndvi"], 1)
dst.write(subdata["ndbi"], 2)
dst.write(subdata["savi"], 3)
dst.write(subdata["mndwi"], 4)
dst.write(subdata["ui"], 5)
dst.write(subdata["nbi"], 6)
dst.write(subdata["brba"], 7)
dst.write(subdata["nbai"], 8)
dst.write(subdata["mbi"], 9)
dst.write(subdata["baei"], 10)
return area_dict
def save_predictions_window(pred, image_src, output_file, window, tfm):
"""
Saves the predictions as a TIFF file, using img_source as reference.
Args:
pred (numpy array) : The array containing the predictions
image_src (str) : Path to the source image to be used as a reference file
Returns:
None
"""
with rio.open(image_src) as src:
out_image = np.array(pred).reshape(
(window.height,window.width)
)
out_meta = src.meta
out_meta.update({
"driver": "GTiff",
"height": window.height,
"width": window.width,
"count": 1,
'nodata': -1,
"dtype": np.float64,
"transform": tfm
})
with rio.open(output_file, "w", **out_meta, compress='deflate') as dest:
dest.write(out_image, 1)
def rename_ind_cols(df):
"""
Renames columns according to column names used by model
"""
cols = [c for c in df.columns if 'I' in c]
renaming = {}
ind_dict = {
'I1': "ndvi",
'I2': "ndbi",
'I3': "savi",
'I4': "mndwi",
'I5': "ui",
'I6': "nbi",
'I7': "brba",
'I8': "nbai",
'I9': "mbi",
'I10': "baei",
}
# create mapping of column names
for col in cols:
pat = col.split('_')[0]
col_n = col.replace(pat, ind_dict[pat])
renaming[col] = col_n
return df.rename(columns = renaming)
def get_rasters_merged(
raster_file1,
raster_file2,
output_file,
tmp_dir,
grid_blocks=5
):
p = Path(tmp_dir)
tmp_files = [str(f) for f in list(p.glob('tmp*.tif'))]
for f in tmp_files:
os.remove(f)
windows = make_windows(raster_file1, grid_blocks = grid_blocks)
pbar = tqdm(enumerate(windows), total=len(windows))
for idx, window in pbar:
raster1 = rio.open(raster_file1).read(1, window=window)
raster2 = rio.open(raster_file2).read(1, window=window)
result = np.maximum(raster1, raster2)
# Save
image_src = raster_file1
tmp_file = tmp_dir + 'tmp{}.tif'.format(idx)
tfm = transform(window, transform = rio.open(image_src).transform)
save_predictions_window(result, image_src, tmp_file, window, tfm)
stitch(output_file, tmp_dir)
def get_preds_windowing(
area,
area_dict,
model,
tmp_dir,
best_features,
output,
grid_blocks=5,
threshold=0
):
# Delete tmp files from previous run
if Path(output).is_file():
os.remove(output)
p = Path(tmp_dir)
tmp_files = [str(f) for f in list(p.glob('tmp*.tif'))]
for f in tmp_files:
os.remove(f)
# Read bands
src_file = area_dict[area]['images'][0]
windows = make_windows(src_file, grid_blocks = grid_blocks)
pbar = tqdm(enumerate(windows), total=len(windows))
for idx, window in pbar:
pbar.set_description('Processing {}...'.format(area))
df_bands = read_bands_window(area_dict, area, window=window)
df_inds = read_inds_window(area_dict, area, window=window)
df_test = pd.concat((df_bands, df_inds), axis = 1)
df_test = rename_ind_cols(df_test)
df_test = df_test.replace([np.inf, -np.inf], 0)
# Prediction
X_test = df_test[best_features].fillna(0)
all_zeroes = (X_test.iloc[:, :-1].sum(axis=1) == 0)
data = X_test
features = best_features
# Prettify Tiff
preds = model.predict_proba(data)[:, 1]
if threshold > 0:
preds[(preds < threshold)] = 0
preds[all_zeroes] = -1
# Save
image_src = src_file
output_file = tmp_dir + 'tmp{}.tif'.format(idx)
tfm = transform(window, transform = rio.open(src_file).transform)
save_predictions_window(preds, image_src, output_file, window, tfm)
#print('Saving to {}...'.format(output))
stitch(output, tmp_dir)
def stitch(output_file, tmp_dir):
"""
Merges all raster files to one
Source: https://gis.stackexchange.com/questions/230553/merging-all-tiles-from-one-directory-using-gdal
Args:
output_file (str) : The output filepath
tmp_dir (str) : Path to temporary directory
Returns:
result () : The stitched image
"""
p = Path(tmp_dir)
file_list = [str(f) for f in list(p.glob('tmp*.tif'))]
files_string = " ".join(file_list)
command = "gdal_merge.py -n -1 -a_nodata -1 -o {} -of gtiff ".format(output_file) + files_string
text = '''
# set conda env for these commands - took me 3h to figure out
eval "$(conda shell.bash hook)"
conda activate ee
{}
'''.format(command)
f = open(tmp_dir + "stitch.sh", "w")
f.write(text)
f.close()
result = subprocess.run('sh ' + tmp_dir + 'stitch.sh', shell = True, stdout=subprocess.PIPE)
return result
def read_inds_window(area_dict, area, window):
"""
Reads the bands for each image of each area and calculates
the derived indices.
Args:
area_dict (dict) : Python dictionary containing the file paths per area
area (str) : The area of interest (AOI)
Returns:
data (pd.DataFrame) : The resulting pandas dataframe containing the raw spectral
bands and derived indices
"""
data = []
image_list = area_dict[area]['indices']
# Iterate over each year
for image_file in image_list:
year = image_file.split('_')[-1].split('.')[0]
# Read each band
subdata = dict()
raster = rio.open(image_file)
for band_idx in range(raster.count):
band = raster.read(band_idx+1, window=window).ravel()
subdata['I{}'.format(band_idx+1)] = band
# Cast to pandas subdataframe
subdata = pd.DataFrame(subdata)#.fillna(0)
subdata.columns = [
column + '_' + str(year)
for column in subdata.columns
]
data.append(subdata)
del subdata
data = pd.concat(data, axis=1)
return data
def read_bands_window(area_dict, area, window):
"""
Reads the bands for each image of each area and calculates
the derived indices.
Args:
area_dict (dict) : Python dictionary containing the file paths per area
area (str) : The area of interest (AOI)
Returns:
data (pd.DataFrame) : The resulting pandas dataframe containing the raw spectral
bands and derived indices
"""
data = []
image_list = area_dict[area]['images']
# Iterate over each year
for image_file in image_list:
year = image_file.split('_')[-1].split('.')[0]
# Read each band
subdata = dict()
raster = rio.open(image_file)
for band_idx in range(raster.count):
band = raster.read(band_idx+1, window=window).ravel()
subdata['B{}'.format(band_idx+1)] = band
# Cast to pandas subdataframe
subdata = pd.DataFrame(subdata).fillna(0)
subdata.columns = [
column + '_' + str(year)
for column in subdata.columns
]
data.append(subdata)
del subdata
data = pd.concat(data, axis=1)
return data
def make_windows(image_file, grid_blocks = 5):
""" Make a list of windows based on bounds of an image file """
windows = []
subdata = dict()
raster = rio.open(image_file)
src_shape = raster.shape
height, width = int(src_shape[0] / grid_blocks), int(src_shape[1] / grid_blocks)
grid_indices = list(itertools.product(range(grid_blocks), range(grid_blocks)))
grid_cnt = len(grid_indices)
# Read each window
for idx in range(len(grid_indices)):
i, j = grid_indices[idx]
row_start, row_stop, col_start, col_stop = i*height, (i+1)*height, j*width, (j+1)*width
w = Window.from_slices((row_start, row_stop), (col_start, col_stop))
windows.append(w)
return windows
def ndvi(b):
return (b["B8"] - b["B4"]) / (b["B8"] + b["B4"])
def ndbi(b):
return (b["B11"] - b["B9"]) / (b["B11"] + b["B9"])
def savi(b):
return 1.5 * (b["B9"] - b["B4"]) / (b["B9"] + b["B4"] + 0.5)
def mndwi(b):
return (b["B3"] - b["B11"]) / (b["B3"] + b["B11"])
def ui(b):
return (b["B7"] - b["B5"]) / (b["B7"] + b["B5"])
def nbi(b):
return b["B4"] * b["B11"] / b["B9"]
def brba(b):
return b["B4"] / b["B11"]
def nbai(b):
return (b["B11"] - b["B12"] / b["B3"]) / (b["B11"] + b["B12"] / b["B3"])
def mbi(b):
return (b["B12"] * b["B4"] - b["B9"] ** 2) / (b["B4"] + b["B9"] + b["B12"])
def baei(b):
return (b["B4"] + 0.3) / (b["B3"] + b["B11"])
def ibi(b):
"""
Calculates the index-based building index (IBI).
Source: https://stats.stackexchange.com/questions/178626/how-to-normalize-data-between-1-and-1
Args:
area_dict (dict or pd.DataFrame) : A Python dictionary or Python DataFrame containing
the 12 band values
Returns:
"""
# Threshold
t = 0.05
# Normalize to (-1,1)
ndbi_t, savi_t, mndwi_t = b["ndbi"], b["savi"], b["mndwi"] #ndbi(), savi(), mndwi()
ndbi_n = 2 * (ndbi_t - ndbi_t.min()) / (ndbi_t.max() - ndbi_t.min()) - 1
savi_n = 2 * (savi_t - savi_t.min()) / (savi_t.max() - savi_t.min()) - 1
mndwi_n = 2 * (mndwi_t - mndwi_t.min()) / (mndwi_t.max() - mndwi_t.min()) - 1
# Remove outliers
temp = (ndbi_n - (savi_n + mndwi_n) / 2) / (ndbi_n + (savi_n + mndwi_n) / 2)
vv = pd.DataFrame({"col": temp.reshape(-1, 1)[:, 0]})
cutoffs = list(vv["col"].quantile([t / 2, 1 - t / 2]))
temp[temp <= cutoffs[0]] = cutoffs[0]
temp[temp >= cutoffs[1]] = cutoffs[1]
return temp
def save_predictions(pred, image_src, output_file):
"""
Saves the predictions as a TIFF file, based on a reference (source) image.
Args:
pred (numpy array) : The array containing the predictions
image_src (str) : Path to the source image to be used as a reference file
Returns:
None
"""
with rio.open(image_src) as src:
out_image = np.array(pred).reshape(
(src.height,src.width)
)
out_meta = src.meta
out_meta.update({
"driver": "GTiff",
"height": src.height,
"width": src.width,
"count": 1,
'nodata': -1,
"dtype": np.float64
})
with rio.open(output_file, "w", **out_meta, compress='deflate') as dest:
dest.write(out_image, 1)
def read_bands(area_dict, area):
"""
Reads the bands for each image of each area and calculates the derived indices.
Args:
area_dict (dict) : Python dictionary containing the file paths per area
area (str) : The area of interest (AOI)
Returns:
data (pd.DataFrame) : The resulting pandas dataframe containing the raw spectral
bands and derived indices
"""
data = []
image_list = area_dict[area]['images']
# Iterate over each year
for image_file in image_list:
year = image_file.split('_')[-1].split('.')[0]
# Read each band
subdata = dict()
raster = rio.open(image_file)
for band_idx in range(raster.count):
band = raster.read(band_idx+1).ravel()
subdata['B{}'.format(band_idx+1)] = band
# Get derived indices
subdata["ndvi"] = ndvi(subdata)
subdata["ndbi"] = ndbi(subdata)
subdata["savi"] = savi(subdata)
subdata["mndwi"] = mndwi(subdata)
subdata["ui"] = ui(subdata)
subdata["nbi"] = nbi(subdata)
subdata["brba"] = brba(subdata)
subdata["nbai"] = nbai(subdata)
subdata["mbi"] = mbi(subdata)
subdata["baei"] = baei(subdata)
# Cast to pandas subdataframe
subdata = pd.DataFrame(subdata).fillna(0)
subdata.columns = [
column + '_' + str(year)
for column in subdata.columns
]
data.append(subdata)
del subdata
data = pd.concat(data, axis=1)
return data
def generate_training_data(area_dict):
"""
Generates training data consisting of pixels as data points. The script obtains the
raw spectrals bands and calculates the derived indices for each year (2016-2020)
for each area. The resulting dataframe also contains a column containing the target
label and a column indicating the area of each pixel.
Args:
area_dict (dict) : Python dictionary containing the file paths per area
Returns:
data (pd.DataFrame) : The resulting pandas dataframe containing the training data
area_code (dict) : A Python dictionary containing the numerical codes for each area
e.g. {'maicao': 0, 'riohacha': 1, 'uribia': 2}
"""
data = []
area_code = {}
for idx, area in enumerate(area_dict):
print('Reading {}...'.format(area))
# Read positive target mask
pos = rio.open(area_dict[area]['pos_mask_tiff'])
pos_mask = pos.read(1).ravel()
pos_grid = pos.read(2).ravel()
# Read negative mask
neg = rio.open(area_dict[area]['neg_mask_tiff'])
neg_mask = neg.read(1).ravel()
neg_grid = neg.read(2).ravel()
# Get sum of postive and negative mask
mask = pos_mask + neg_mask
grid = pos_grid + neg_grid
# Read bands
subdata = read_bands(area_dict, area)
subdata['target'] = mask
subdata['uid'] = grid
subdata['area'] = idx
area_code[area] = idx
# Get non-zero rows
subdata = subdata[subdata.iloc[:, :-3].values.sum(axis=1) != 0]
subdata = subdata[subdata['target'] != 0]
data.append(subdata)
# Concatenate all areas
data = pd.concat(data)
return data, area_code
def get_filepaths(areas, images_dir, indices_dir, pos_mask_dir='', neg_mask_dir=''):
"""
Returns a dictionary containing the image filepaths for each area.
Args:
areas (list) : Python list of strings of the areas of interests (AOIs)
e.g. ['maicao', 'riohacha', 'uribia']
Returns:
dict : A Python dictionary
"""
area_dict = {area: dict() for area in areas}
for area in area_dict:
area_dict[area]["pos_mask_gpkg"] = "{}{}_pos.gpkg".format(pos_mask_dir, area)
area_dict[area]["neg_mask_gpkg"] = "{}{}_neg.gpkg".format(neg_mask_dir, area)
image_files, indices_files = [], []
for image_file in os.listdir(images_dir):
if area in image_file:
image_files.append(images_dir + image_file)
for image_file in os.listdir(indices_dir):
if area in image_file:
indices_files.append(indices_dir + image_file)
area_dict[area]["images"] = sorted(image_files)
area_dict[area]["indices"] = sorted(indices_files)
return area_dict
def explode(gdf):
"""
Explodes a geodataframe
Source: https://gist.github.com/mhweber/cf36bb4e09df9deee5eb54dc6be74d26
Will explode muti-part geometries into single geometries.
Args:
gdf (gpd.GeoDataFrame) : Input geodataframe with multi-geometries
Returns:
gdf (gpd.GeoDataFrame) : Exploded geodataframe with a new index
and two new columns: level_0 and level_1
"""
gs = gdf.explode()
gdf2 = gs.reset_index().rename(columns={0: "geometry"})
if 'class' in gdf2.columns:
gdf2 = gdf2.drop("class", axis=1)
gdf_out = gdf2.merge(
gdf.drop("geometry", axis=1), left_on="level_0", right_index=True
)
gdf_out = gdf_out.set_index(["level_0", "level_1"]).set_geometry("geometry")
gdf_out.crs = gdf.crs
return gdf_out
def generate_mask(tiff_file, shape_file, output_file, plot=False):
"""
Generates a segmentation mask for one TIFF image.
Args:
tiff_file (str) : Path to reference TIFF file
shape_file (str) : Path to shapefile
output_file (str) : Path to output file
Returns:
image (np.array) : A binary mask as a numpy array
"""
global GRID_ID
src = rio.open(tiff_file)
raw = gpd.read_file(shape_file).dropna()
gdf = explode(raw)
values = {}
if "class" in gdf.columns:
unique_classes = sorted(gdf["class"].unique())
values = {value: x + 2 for x, value in enumerate(unique_classes)}
values["Informal settlement"] = 1
value = 1.0
masks, grids = [], []
for index, (idx, x) in enumerate(gdf.iterrows()):
if "class" in x: value = values[x["class"]]
gdf_json = json.loads(gpd.GeoDataFrame(x).T.to_json())
feature = [gdf_json["features"][0]["geometry"]][0]
masks.append((feature, value))
grids.append((feature, GRID_ID))
GRID_ID += 1
masks = rio.features.rasterize(
((g, v) for (g, v) in masks), out_shape=src.shape, transform=src.transform
).astype(rio.uint16)
grids = rio.features.rasterize(
((g, v) for (g, v) in grids), out_shape=src.shape, transform=src.transform
).astype(rio.uint16)
out_meta = src.meta.copy()
out_meta["count"] = 2
out_meta["nodata"] = 0
out_meta["dtype"] = rio.uint16
out_meta["compress"] = "deflate"
with rio.open(output_file, "w", **out_meta) as dst:
dst.write(masks, indexes=1)
dst.write(grids, indexes=2)
if plot:
f, ax = plt.subplots(1, 3, figsize=(15, 15))
gdf.plot(ax=ax[0])
rio.plot.show(src, ax=ax[1], adjust=None)
rio.plot.show(masks, ax=ax[2], adjust=None)
ax[0].set_title("Vector File")
ax[1].set_title("TIFF")
ax[2].set_title("Masked")
plt.show()
return masks, grids, values
def get_pos_raster_mask(area_dict, plot=False):
"""
Converts positive vector label files (GPKG) to raster masks (TIFF)
Args:
area_dict (dict) : Python dictionary containing the file paths per area
Returns:
area_dict (dict) : The input area_dict with a new entry "pos_mask_tiff"
containing the file path of the generated TIFF file.
"""
for area, value in area_dict.items():
# Get filepaths
tiff_file = value["images"][0]
shape_file = value["pos_mask_gpkg"]
target_file = shape_file.replace("gpkg", "tiff")
# Generate masks
generate_mask(
tiff_file=tiff_file,
shape_file=shape_file,
output_file=target_file,
plot=plot,
)
# Set filepath of raster mask in the area dictionary
area_dict[area]["pos_mask_tiff"] = target_file
return area_dict
def get_neg_raster_mask(area_dict, plot=False):
"""
Converts negative vector label files (GPKG) to raster masks (TIFF)
Args:
area_dict (dict) : Python dictionary containing the file paths per area
Returns:
area_dict (dict) : The input area_dict with a new entry "neg_mask_tiff"
containing the file path of the generated TIFF file.
"""
for area, value in area_dict.items():
# Get filepaths
tiff_file = value["images"][0]
shape_file = value["neg_mask_gpkg"]
target_file = shape_file.replace("gpkg", "tiff")
if os.path.isfile(shape_file):
# Read vector file + geopandas cleanup
gdf = gpd.read_file(shape_file)
# Generate masks
_, _, target_dict = generate_mask(
tiff_file=tiff_file,
shape_file=shape_file,
output_file=target_file,
plot=plot,
)
# Set filepath of raster mask in the area dictionary
area_dict[area]["neg_mask_tiff"] = target_file
return area_dict, target_dict | 30.28314 | 106 | 0.571951 |
5dfdedc0c75bea8fa20ec48c23371c77d7b535da | 4,406 | py | Python | russe/measures/word2vec/evaluate/utils.py | nlpub/russe-evaluation | bc2cf2504fabed650561399716c9a74ae64a2300 | [
"MIT"
] | 36 | 2015-01-17T05:37:17.000Z | 2022-01-25T12:01:14.000Z | russe/measures/word2vec/evaluate/utils.py | nlpub/russe-evaluation | bc2cf2504fabed650561399716c9a74ae64a2300 | [
"MIT"
] | 3 | 2015-01-16T18:13:49.000Z | 2015-06-02T21:17:55.000Z | russe/measures/word2vec/evaluate/utils.py | nlpub/russe-evaluation | bc2cf2504fabed650561399716c9a74ae64a2300 | [
"MIT"
] | 14 | 2015-01-16T17:43:07.000Z | 2021-04-01T19:42:45.000Z | import logging
from numpy import zeros, dtype, float32 as REAL, \
fromstring
from gensim.models.word2vec import Vocab
from gensim import utils
from gensim.models.word2vec import Word2Vec
logger = logging.getLogger("gensim.models.word2vec")
def load_vectors(fvec):
# return gs.models.Word2Vec.load_word2vec_format(fvec,binary=True)
return load_word2vec_format(fvec, binary=True)
def load_word2vec_format(fname, fvocab=None, binary=False, norm_only=True, encoding='utf8'):
"""
Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
`norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool).
If you trained the C model using non-utf8 encoding for words, specify that
encoding in `encoding`.
"""
counts = None
if fvocab is not None:
logger.info("loading word counts from %s" % (fvocab))
counts = {}
with utils.smart_open(fvocab) as fin:
for line in fin:
word, count = utils.to_unicode(line).strip().split()
counts[word] = int(count)
logger.info("loading projection weights from %s" % (fname))
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = map(int, header.split()) # throws for invalid file format
result = Word2Vec(size=vector_size)
result.syn0 = zeros((vocab_size, vector_size), dtype=REAL)
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for line_no in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
try:
word = utils.to_unicode(b''.join(word), encoding=encoding)
except UnicodeDecodeError, e:
logger.warning("Couldn't convert whole word to unicode: trying to convert first %d bytes only ..." % e.start)
word = utils.to_unicode(b''.join(word[:e.start]), encoding=encoding)
logger.warning("... first %d bytes converted to '%s'" % (e.start, word))
if counts is None:
result.vocab[word] = Vocab(index=line_no, count=vocab_size - line_no)
elif word in counts:
result.vocab[word] = Vocab(index=line_no, count=counts[word])
else:
logger.warning("vocabulary file is incomplete")
result.vocab[word] = Vocab(index=line_no, count=None)
result.index2word.append(word)
result.syn0[line_no] = fromstring(fin.read(binary_len), dtype=REAL)
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line[:-1], encoding=encoding).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no))
word, weights = parts[0], list(map(REAL, parts[1:]))
if counts is None:
result.vocab[word] = Vocab(index=line_no, count=vocab_size - line_no)
elif word in counts:
result.vocab[word] = Vocab(index=line_no, count=counts[word])
else:
logger.warning("vocabulary file is incomplete")
result.vocab[word] = Vocab(index=line_no, count=None)
result.index2word.append(word)
result.syn0[line_no] = weights
logger.info("loaded %s matrix from %s" % (result.syn0.shape, fname))
result.init_sims(norm_only)
return result | 47.376344 | 129 | 0.603268 |
57acee92e260fc763ee67f5922ed7ee1b9f83447 | 899 | py | Python | SScriptCompiler/examples/thresholdcounter/thresholdCounter_composite.py | alklasil/SScript | de4481bf96e79b9ee157e266ea9fe8b1bfb3701e | [
"MIT"
] | null | null | null | SScriptCompiler/examples/thresholdcounter/thresholdCounter_composite.py | alklasil/SScript | de4481bf96e79b9ee157e266ea9fe8b1bfb3701e | [
"MIT"
] | 8 | 2018-03-10T19:20:43.000Z | 2018-04-30T18:11:17.000Z | SScriptCompiler/examples/thresholdcounter/thresholdCounter_composite.py | alklasil/SScript | de4481bf96e79b9ee157e266ea9fe8b1bfb3701e | [
"MIT"
] | null | null | null | import sys
# SSCript
from src.SComposite import SComposite
# programData
from examples.thresholdcounter.thresholdCounter import get_programData as thresholdCounter
from examples.logger import get_programData as logger
argv = sys.argv[1:]
def get_compositeData(argv=argv):
# TODO: rm logger, add requestStingGenerator related programData
# (current compositeData was used for testing)
compositeData = {
"thresholdcounter": {
'programData': thresholdCounter(argv)
},
"logger": {
'programData': logger(),
'variableNameValuePairs': 'thresholdcounter',
'confs': 'thresholdcounter'
}
}
return compositeData
def main(argv=argv):
composite = SComposite(get_compositeData(argv))
composite.compile()
if __name__ == "__main__":
# execute only if run as a script
main(argv)
| 20.906977 | 90 | 0.671858 |
11d4ce34c05deecf276e914506060e88992d708c | 867 | py | Python | cla_frontend/apps/status/smoketests/__init__.py | farrepa/cla_frontend | a789ad96cf91daf755784e3a5ed11350a85bddf6 | [
"MIT"
] | null | null | null | cla_frontend/apps/status/smoketests/__init__.py | farrepa/cla_frontend | a789ad96cf91daf755784e3a5ed11350a85bddf6 | [
"MIT"
] | null | null | null | cla_frontend/apps/status/smoketests/__init__.py | farrepa/cla_frontend | a789ad96cf91daf755784e3a5ed11350a85bddf6 | [
"MIT"
] | null | null | null | class SmokeTestFail(Exception):
pass
class SmokeTestRegistry(object):
def __init__(self):
self.tests = {}
def register(self, sequence, name):
def decorator(fn):
self.tests[name] = {"sequence": sequence, "test": fn}
return fn
return decorator
def __iter__(self):
def seq(key):
return self.tests[key]["sequence"]
for name in sorted(self.tests, key=seq):
yield name, self.tests[name]["test"]
def execute(self):
for name, test in iter(self):
status = True
message = ""
try:
test()
except SmokeTestFail as fail:
status = False
message = str(fail)
yield {"name": name, "status": status, "message": message}
smoketests = SmokeTestRegistry()
| 24.083333 | 70 | 0.534025 |
8b3159fa4d2d19c80215e627f172a8577220789e | 49,303 | py | Python | title_maker_pro/datasets.py | umair-akbar/this-word-does-not-exist | 3cfb04c96c3647fa67f9b72df4d085520d7b5d2a | [
"MIT"
] | 3 | 2020-10-22T03:06:51.000Z | 2020-10-22T10:19:12.000Z | title_maker_pro/datasets.py | umair-akbar/this-word-does-not-exist | 3cfb04c96c3647fa67f9b72df4d085520d7b5d2a | [
"MIT"
] | null | null | null | title_maker_pro/datasets.py | umair-akbar/this-word-does-not-exist | 3cfb04c96c3647fa67f9b72df4d085520d7b5d2a | [
"MIT"
] | null | null | null | from dataclasses import dataclass
import os
import logging
import pickle
import hashlib
import itertools
import title_maker_pro.custom_modeling_utils as custom_modeling_utils
import title_maker_pro.dictionary_definition as dictionary_definition
import re
import torch
import random
import stanza
import time
import sys
from collections import Counter
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from transformers import PreTrainedTokenizer
from typing import NamedTuple, List, Optional
from io import StringIO
logger = logging.getLogger(__name__)
oed_to_upos = {
"exclamation": ("NOUN", "PROPN", "VERB", "NUM", "SYM", "X"),
"abbreviation": ("X", "NOUN", "PROPN", "NUM", "SYM"),
"noun": ("NOUN", "PROPN"),
"adjective adverb": ("ADJ", "ADVERB"),
"adjective": ("ADJ",),
"verb": ("VERB"),
"adverb": ("ADVERB"),
"prefix": ("ADJ", "ADVERB"),
"conjunction": ("AUX"),
"pronoun": ("INTJ", "NOUN"),
}
def _access_zero_assert(item):
if len(item) != 1:
raise RuntimeError("Expected length 1 in item")
return item[0]
def _read_in_chunks(stream, chunk_size=1 * 1024 * 1024):
while True:
data = stream.read(chunk_size)
if not data:
break
yield data
@dataclass
class GeneratedWord:
word: str
pos: Optional[str]
topic: Optional[str]
definition: str
example: Optional[str]
decoded: Optional[str]
decoded_tokens: Optional[List[int]]
@classmethod
def print_words(cls, words, f=sys.stdout):
for word in words:
word_str = [word.word]
if word.pos:
word_str.append(f"/{word.pos}/")
if word.topic:
word_str.append(f"[{word.topic}]")
print(" ".join(word_str), file=f)
print(f"\t{word.definition}{' |n| ' if word.example is None else ''}", file=f)
if word.example:
cleaned_example = word.example.replace("\n", "\n\t")
print(f"\t\"{cleaned_example}\"", file=f)
print("----------------", file=f)
@dataclass
class GeneratedWordCandidate:
score: float
candidate: GeneratedWord
class Blacklist:
def __init__(self, blacklist_set):
self.blacklist_set = blacklist_set
def merge(self, other):
self.blacklist_set |= other.blacklist_set
return self
def contains(self, word, recursive=True):
word = word.strip().lower()
return (
word in self.blacklist_set
or re.sub(r"('s|s|ing|')$", "", word) in self.blacklist_set
or (recursive and all(self.contains(e, recursive=False) for e in word.split()))
or (recursive and all(self.contains(e, recursive=False) for e in word.split("-")))
)
def collapse_hyphens(self):
self.blacklist_set |= {"".join(e.split()) for e in self.blacklist_set}
self.blacklist_set |= {"".join(e.split("-")) for e in self.blacklist_set}
@classmethod
def load(cls, path):
with open(path, "rb") as f:
return cls(pickle.load(f))
@classmethod
def from_text_lines(cls, stream):
return cls(set(e.strip().lower() for e in stream))
@classmethod
def from_text_stream(cls, stream, min_threshold=3, chunk_size=1024 * 1024, use_gpu=False, sample_rate=1.0):
cnt = Counter()
for i, chunk in enumerate(_read_in_chunks(stream, chunk_size)):
if sample_rate != 1.0 and random.random() > sample_rate:
continue
pipe = stanza.Pipeline(lang="en", processors="tokenize", use_gpu=use_gpu)
res = pipe(chunk)
two_prev = None
prev = None
for w in res.iter_words():
cnt[w.text.lower()] += 1
if prev:
cnt[f"{prev} {w.text}".lower()] += 1
if two_prev:
cnt[f"{two_prev} {prev} {w.text}".lower()] += 1
two_prev = prev
prev = w.text
ret = cls(set(k for k, v in cnt.items() if v > min_threshold))
return ret
@classmethod
def from_parsed_dictionary(cls, path):
blacklist = set(
(
x.lower()
for x in itertools.chain.from_iterable([e.word] + e.derivatives for e in pickle.load(open(path, "rb")))
)
)
return cls(blacklist)
@classmethod
def from_urban_dictionary(cls, path, loaded=None):
class RenamingUnpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'urban_dictionary_scraper':
module = 'title_maker_pro.urban_dictionary_scraper'
return super().find_class(module, name)
with open(path, "rb") as f:
ordered_dict = (loaded or RenamingUnpickler(f).load())
blacklist = set(
(
x.word.lower()
for x in itertools.chain.from_iterable(e.definitions for e in ordered_dict.values())
)
)
return cls(blacklist)
def dump(self, path):
with open(path, "wb") as f:
pickle.dump(self.blacklist_set, f, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.blacklist_set)
def _len_range_overlap(x, y):
start = max(x[0], y[0])
end = min(x[-1], y[-1]) + 1
return max(0, end - start)
def _split_range(splits, split_idx):
splits_tensor = torch.tensor(splits)
sum_splits = torch.cumsum(splits_tensor, 0)
if sum_splits[-1] != 1.0:
raise RuntimeError(f"Splits must sum to 1 (actual: {sum_splits[-1]})")
elif split_idx >= len(sum_splits):
raise RuntimeError(f"Invalid split index {split_idx} (must be less than {len(sum_splits)})")
if split_idx == 0:
start_range = 0.0
else:
start_range = sum_splits[split_idx - 1]
end_range = sum_splits[split_idx]
return (start_range, end_range)
def _in_split_range(split_range, randomizer_str):
start_range, end_range = split_range
val = int(hashlib.md5(randomizer_str.encode("utf-8")).hexdigest(), 16,) % 100000 / 100000
return (val >= start_range and val < end_range).item()
def _cache_path(class_name, base_directory, filename, **keys):
path = [class_name]
for k, v in keys.items():
if isinstance(v, str):
path.append(f"{k}-{v}")
continue
try:
path.append(f"{k}-{'-'.join(str(e) for e in iter(v))}")
continue
except TypeError:
pass
path.append(f"{k}-{str(v)}")
path.append(filename)
return os.path.join(base_directory, "__".join(path))
class TokenGroup(NamedTuple):
separator: List[int] = []
payload: List[int] = []
remove_if_truncated: bool = False
def _join_and_truncate(
max_len: int, begin_tokens: List[int], token_groups: List[TokenGroup], end_tokens: List[int], min_append_size=5,
):
if len(begin_tokens) + len(end_tokens) > max_len:
raise RuntimeError("Length is too small for required tokens")
running_max_len = max_len - len(begin_tokens) - len(end_tokens)
ret = [begin_tokens]
for token_group in token_groups:
if len(token_group.separator) + len(token_group.payload) > running_max_len:
if token_group.remove_if_truncated:
break
if running_max_len - len(token_group.separator) - len(token_group.payload) < min_append_size:
break
ret.append(token_group.separator)
running_max_len -= len(token_group.separator)
ret.append(token_group.payload[:running_max_len])
running_max_len = 0
break
else:
ret.append(token_group.separator)
ret.append(token_group.payload)
running_max_len -= len(token_group.separator) + len(token_group.payload)
ret.append(end_tokens)
return list(itertools.chain.from_iterable(ret))
class SpecialTokens:
BOS_TOKEN = "<|bod|>"
EOS_TOKEN = "<|eod|>"
PAD = "<|pad|>"
DEFINITION_SEP = "<|bd|>"
EXAMPLE_SEP = "<|be|>"
POS_SEP = "<|pos|>"
TOPIC_SEP = "<|bto|>"
@classmethod
def special_tokens_dict(cls):
return {
"bos_token": cls.BOS_TOKEN,
"eos_token": cls.EOS_TOKEN,
"pad_token": cls.PAD,
"additional_special_tokens": [cls.DEFINITION_SEP, cls.EXAMPLE_SEP, cls.POS_SEP, cls.TOPIC_SEP],
}
@dataclass
class GenerationStats:
num_iterations: int = 0
num_items_considered: int = 0
num_failed_match: int = 0
num_blacklist_filtered: int = 0
num_seen_filtered: int = 0
num_proper_noun_filtered: int = 0
num_example_missing: int = 0
num_user_filtered: int = 0
num_returned: int = 0
num_example_pos_match_failed: int = 0
num_example_missing_title: int = 0
num_short_definitions: int = 0
wall_time: float = 0.0
wall_stanza_time: float = 0.0
def __str__(self):
return (
f"iterations={self.num_iterations} time={self.wall_time} stanza_time={self.wall_stanza_time} | "
+ ", ".join(
f"{k} {v / self.num_items_considered:.2f}@{v}"
for k, v in (
("items_considered", self.num_items_considered),
("failed_match", self.num_failed_match),
("blacklist_filtered", self.num_blacklist_filtered),
("seen_filtered", self.num_seen_filtered),
("proper_noun_filtered", self.num_proper_noun_filtered),
("example_missing", self.num_example_missing),
("short_definitions", self.num_short_definitions),
("example_missing_title", self.num_example_missing_title),
("example_pos_match_failed", self.num_example_pos_match_failed),
("user_filtered", self.num_user_filtered),
("returned", self.num_returned),
)
)
)
class ParsedDictionaryDefinitionDataset(Dataset):
@classmethod
def _split_re(cls):
split_re_pat = (
f"^{re.escape(SpecialTokens.BOS_TOKEN)}(?P<title>.+?)"
f"(?:{re.escape(SpecialTokens.POS_SEP)}(?P<pos>.+?))?"
f"(?:{re.escape(SpecialTokens.TOPIC_SEP)}(?P<topic>.+?))?"
f"{re.escape(SpecialTokens.DEFINITION_SEP)}(?P<definition>.+?)"
f"(?:{re.escape(SpecialTokens.EXAMPLE_SEP)}(?P<example>.+?))*"
f"{re.escape(SpecialTokens.EOS_TOKEN)}"
)
split_re = re.compile(split_re_pat, flags=re.MULTILINE | re.DOTALL)
return split_re
@classmethod
def approx_pos(cls, nlp, sentence, lookup_idx, lookup_len):
start_end_re = re.compile(r"start_char=(\d+)\|end_char=(\d+)")
doc = nlp(sentence)
uposes = Counter()
for sentence in doc.sentences:
for word in sentence.words:
m = start_end_re.match(word.misc)
if not m:
raise RuntimeError("Unable to extract start and end positions!")
start_char = int(m.group(1))
end_char = int(m.group(2))
uposes[word.upos] += _len_range_overlap(
(lookup_idx, lookup_idx + lookup_len - 1), (start_char, end_char - 1)
)
((tag, _),) = uposes.most_common(1)
return tag
@classmethod
def evaluate_creativity(cls, tokenizer, model, blacklist, num_to_generate, batch_size, max_length):
input = tokenizer.encode(SpecialTokens.BOS_TOKEN, return_tensors="pt").to(model.device)
split_re = cls._split_re()
num_generated = 0
num_failed_match = 0
num_succeeded_match = 0
num_blacklisted = 0
for i in tqdm(list(range(0, num_to_generate, batch_size)), desc="Evaluating Creativity"):
generated = model.generate(
input,
max_length=max_length,
num_return_sequences=batch_size,
do_sample=True,
pad_token_id=tokenizer.pad_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
)
for i in range(generated.size()[0]):
sentence_tokens = generated[i, :].tolist()
num_generated += 1
decoded = tokenizer.decode(sentence_tokens)
m = split_re.match(decoded)
if not m:
num_failed_match += 1
continue
num_succeeded_match += 1
title = m.group("title")
if blacklist and blacklist.contains(title):
num_blacklisted += 1
return {
"creative_words": 1 - (num_blacklisted / max(num_succeeded_match, 1)),
"nonconforming": num_failed_match / num_generated,
}
@classmethod
def generate_words(
cls,
tokenizer,
model,
prefix=SpecialTokens.BOS_TOKEN,
num=100,
max_iterations=10,
generation_args={},
blacklist=None,
example_title_match=True,
example_match_pos_pipeline=None,
dedupe_titles=True,
user_filter=None,
filter_proper_nouns=False,
use_custom_generate=True,
min_definition_words=3,
):
start = time.time()
viable_candidates = []
ret = []
num_iteration = 0
if isinstance(prefix, str):
input = tokenizer.encode(prefix, return_tensors="pt").to(model.device)
else:
input = torch.tensor([prefix], dtype=torch.long).to(model.device)
pos_sep_id = _access_zero_assert(tokenizer.encode(SpecialTokens.POS_SEP))
example_sep_id = _access_zero_assert(tokenizer.encode(SpecialTokens.EXAMPLE_SEP))
topic_sep_id = _access_zero_assert(tokenizer.encode(SpecialTokens.TOPIC_SEP))
definition_sep_id = _access_zero_assert(tokenizer.encode(SpecialTokens.DEFINITION_SEP))
split_re = cls._split_re()
seen_titles = set()
stats = GenerationStats()
t = tqdm(total=num)
while len(ret) < num and num_iteration < max_iterations:
num_iteration += 1
stats.num_iterations += 1
if not use_custom_generate:
generated = model.generate(
input,
pad_token_id=tokenizer.pad_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
**generation_args,
)
else:
def partial_generation_transform(input_ids, tokens_to_add):
for i in range(tokens_to_add.size()[0]):
if blacklist and tokens_to_add[i] in (pos_sep_id, topic_sep_id, definition_sep_id):
word = tokenizer.decode(input_ids[i, :][1:])
if blacklist.contains(word):
tokens_to_add[i] = tokenizer.eos_token_id
elif tokens_to_add[i] == tokenizer.eos_token_id:
example_token_idxs = input_ids[i, :] == example_sep_id
if example_token_idxs.max() == 0:
tokens_to_add[i] = example_sep_id
return tokens_to_add
generated = custom_modeling_utils.custom_generate(
model,
input,
pad_token_id=tokenizer.pad_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
partial_generation_transform=partial_generation_transform,
**generation_args,
)
for i in range(generated.size()[0]):
if len(ret) >= num:
break
viable_candidates = viable_candidates[:1000]
stats.num_items_considered += 1
sentence_tokens = generated[i, :].tolist()
decoded = tokenizer.decode(sentence_tokens)
m = split_re.match(decoded)
if not m:
stats.num_failed_match += 1
continue
title = m.group("title")
definition = m.group("definition")
topic = m.group("topic")
pos = m.group("pos")
example = m.group("example")
generated_word = GeneratedWord(
word=title and title.strip(),
definition=definition and definition.strip(),
example=example and example.strip(),
pos=pos and pos.strip(),
topic=topic and topic.strip(),
decoded=decoded,
decoded_tokens=sentence_tokens,
)
if blacklist and blacklist.contains(title):
stats.num_blacklist_filtered += 1
continue
if dedupe_titles and title.strip().lower() in seen_titles:
stats.num_seen_filtered += 1
continue
if filter_proper_nouns and title.strip()[:1].isupper():
stats.num_proper_noun_filtered += 1
continue
if not example or not example.strip():
stats.num_example_missing += 1
viable_candidates.append(GeneratedWordCandidate(0.0, generated_word))
continue
if len(definition.split()) < min_definition_words:
stats.num_short_definitions += 1
viable_candidates.append(GeneratedWordCandidate(0.2, generated_word))
continue
t_rstrip = title.strip().lower().rstrip("s")
l_example = example.lower()
try:
start_title_idx = l_example.index(t_rstrip)
if pos and example_match_pos_pipeline:
pos_removed = re.sub(r"\[.*\]", "", pos).strip()
pos_removed = re.sub(r"plural", "", pos_removed).strip()
start_stanza = time.time()
pos_guess = cls.approx_pos(
example_match_pos_pipeline, l_example, start_title_idx, len(t_rstrip)
)
stats.wall_stanza_time += time.time() - start_stanza
if pos_removed not in oed_to_upos:
logger.warn(f"No UPOS mapping for {pos_removed} - {title} in '{example}'': {pos_guess}")
stats.num_example_pos_match_failed += 1
viable_candidates.append(GeneratedWordCandidate(0.9, generated_word))
continue
elif pos_guess not in oed_to_upos[pos_removed]:
stats.num_example_pos_match_failed += 1
viable_candidates.append(GeneratedWordCandidate(1.0, generated_word))
continue
except ValueError:
stats.num_example_missing_title += 1
viable_candidates.append(GeneratedWordCandidate(0.5, generated_word))
continue
if user_filter and not user_filter(generated_word):
stats.num_user_filtered += 1
continue
else:
t.update()
ret.append(generated_word)
seen_titles.add(generated_word.word.lower())
stats.num_returned = len(ret)
stats.viable_candidates = viable_candidates
stats.wall_time = time.time() - start
return ret[:num], stats
def _make_examples(self, tokenizer, entry: dictionary_definition.Entry):
examples = []
for sense in entry.senses:
for definition in sense.definitions:
if isinstance(definition, dictionary_definition.ReferenceDefinition):
continue
token_groups = []
token_groups.append(TokenGroup(separator=[], payload=tokenizer.encode(entry.word)))
if sense.pos:
if definition.pos_modifier:
payload = tokenizer.encode(f"{sense.pos} {definition.pos_modifier}")
else:
payload = tokenizer.encode(sense.pos)
token_groups.append(TokenGroup(separator=self.pos_sep_ids, payload=payload))
if definition.topic:
token_groups.append(
TokenGroup(separator=self.topic_sep_ids, payload=tokenizer.encode(definition.topic),)
)
token_groups.append(
TokenGroup(
separator=self.definition_sep_ids, payload=tokenizer.encode(definition.definition.rstrip(". ")),
)
)
for example in definition.examples:
token_groups.append(
TokenGroup(
separator=self.example_sep_ids, payload=tokenizer.encode(example), remove_if_truncated=True,
)
)
example = _join_and_truncate(
max_len=self.max_len,
begin_tokens=self.bos_token_ids,
end_tokens=self.eos_token_ids,
token_groups=token_groups,
)
assert (
len(example) <= self.max_len
), f"Example should be less than max length: {len(example)} Vs. {self.max_len}"
examples.append(example)
return examples
def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path: str, splits=(1.0), split_idx=0):
self.max_len = min(tokenizer.max_len_single_sentence, args.block_size)
self.bos_token_ids = tokenizer.encode(SpecialTokens.BOS_TOKEN)
self.eos_token_ids = tokenizer.encode(SpecialTokens.EOS_TOKEN)
self.pos_sep_ids = tokenizer.encode(SpecialTokens.POS_SEP)
self.definition_sep_ids = tokenizer.encode(SpecialTokens.DEFINITION_SEP)
self.example_sep_ids = tokenizer.encode(SpecialTokens.EXAMPLE_SEP)
self.topic_sep_ids = tokenizer.encode(SpecialTokens.TOPIC_SEP)
assert os.path.isfile(file_path) or os.path.islink(file_path)
directory, filename = os.path.split(file_path)
cached_features_file = _cache_path(
self.__class__.__name__,
directory,
filename,
model_type=args.model_type,
splits=splits,
split_idx=split_idx,
max_len=self.max_len,
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
logger.info("Loaded {len(self.examples)} features")
else:
logger.info(
f"Cache at {cached_features_file} not found... creating features from dataset file at %s", directory,
)
self.examples = []
split_range = _split_range(splits, split_idx)
with open(file_path, "rb") as f:
entries = pickle.load(f)
for entry in entries:
if _in_split_range(split_range, entry.word):
self.examples.extend(self._make_examples(tokenizer, entry))
logger.info(f"Saving {len(self.examples)} features into cached file {cached_features_file}")
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item], dtype=torch.long)
class InverseParsedDictionaryDefinitionDataset(Dataset):
@classmethod
def _split_re(self):
split_re_pat = (
f"^{re.escape(SpecialTokens.BOS_TOKEN)}(?P<definition>.+?)"
f"{re.escape(SpecialTokens.DEFINITION_SEP)}(?P<title>.+?)"
f"(?:{re.escape(SpecialTokens.POS_SEP)}(?P<pos>.+?))?"
f"(?:{re.escape(SpecialTokens.TOPIC_SEP)}(?P<topic>.+?))?"
f"(?:{re.escape(SpecialTokens.EXAMPLE_SEP)}(?P<example>.+?))*"
f"{re.escape(SpecialTokens.EOS_TOKEN)}"
)
split_re = re.compile(split_re_pat, flags=re.MULTILINE | re.DOTALL)
return split_re
@classmethod
def generate_words(
cls,
tokenizer,
model,
prefix=SpecialTokens.BOS_TOKEN,
num=100,
max_iterations=10,
generation_args={},
blacklist=(),
dedupe_titles=True,
user_filter=None,
):
ret = []
num_iteration = 0
if isinstance(prefix, str):
input = tokenizer.encode(prefix, return_tensors="pt").to(model.device)
else:
input = torch.tensor([prefix], dtype=torch.long).to(model.device)
split_re = cls._split_re()
seen_titles = set()
t = tqdm(total=num)
while len(ret) < num and num_iteration < max_iterations:
num_iteration += 1
generated = model.generate(
input,
pad_token_id=tokenizer.pad_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
**generation_args,
)
for i in range(generated.size()[0]):
if len(ret) >= num:
break
sentence_tokens = generated[i, :].tolist()
decoded = tokenizer.decode(sentence_tokens)
m = split_re.match(decoded)
if not m:
continue
title = m.group("title")
definition = m.group("definition")
topic = m.group("topic")
pos = m.group("pos")
example = m.group("example")
generated_word = GeneratedWord(
word=title and title.strip(),
definition=definition and definition.strip(),
example=example and example.strip(),
pos=pos and pos.strip(),
topic=topic and topic.strip(),
decoded=decoded,
decoded_tokens=sentence_tokens,
)
if blacklist and blacklist.contains(title):
continue
if dedupe_titles and title.strip().lower() in seen_titles:
continue
if user_filter and not user_filter(generated_word):
continue
else:
ret.append(generated_word)
seen_titles.add(generated_word.word.lower())
t.update()
return ret[:num], None
def _make_examples(self, tokenizer, entry: dictionary_definition.Entry):
examples = []
for sense in entry.senses:
for definition in sense.definitions:
if isinstance(definition, dictionary_definition.ReferenceDefinition):
continue
token_groups = []
token_groups.append(
TokenGroup(separator=[], payload=tokenizer.encode(definition.definition.rstrip(". ")))
)
token_groups.append(TokenGroup(separator=self.definition_sep_ids, payload=tokenizer.encode(entry.word)))
if sense.pos:
if definition.pos_modifier:
payload = tokenizer.encode(f"{sense.pos} {definition.pos_modifier}")
else:
payload = tokenizer.encode(sense.pos)
token_groups.append(TokenGroup(separator=self.pos_sep_ids, payload=payload))
if definition.topic:
token_groups.append(
TokenGroup(separator=self.topic_sep_ids, payload=tokenizer.encode(definition.topic),)
)
for example in definition.examples:
token_groups.append(
TokenGroup(
separator=self.example_sep_ids, payload=tokenizer.encode(example), remove_if_truncated=True,
)
)
example = _join_and_truncate(
max_len=self.max_len,
begin_tokens=self.bos_token_ids,
end_tokens=self.eos_token_ids,
token_groups=token_groups,
)
assert (
len(example) <= self.max_len
), f"Example should be less than max length: {len(example)} Vs. {self.max_len}"
examples.append(example)
return examples
def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path: str, splits=(1.0), split_idx=0):
self.max_len = min(tokenizer.max_len_single_sentence, args.block_size)
self.bos_token_ids = tokenizer.encode(SpecialTokens.BOS_TOKEN)
self.eos_token_ids = tokenizer.encode(SpecialTokens.EOS_TOKEN)
self.pos_sep_ids = tokenizer.encode(SpecialTokens.POS_SEP)
self.definition_sep_ids = tokenizer.encode(SpecialTokens.DEFINITION_SEP)
self.example_sep_ids = tokenizer.encode(SpecialTokens.EXAMPLE_SEP)
self.topic_sep_ids = tokenizer.encode(SpecialTokens.TOPIC_SEP)
assert os.path.isfile(file_path) or os.path.islink(file_path)
directory, filename = os.path.split(file_path)
cached_features_file = _cache_path(
self.__class__.__name__,
directory,
filename,
model_type=args.model_type,
splits=splits,
split_idx=split_idx,
max_len=self.max_len,
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
logger.info("Loaded {len(self.examples)} features")
else:
logger.info(
f"Cache at {cached_features_file} not found... creating features from dataset file at %s", directory,
)
self.examples = []
split_range = _split_range(splits, split_idx)
with open(file_path, "rb") as f:
entries = pickle.load(f)
for entry in entries:
if _in_split_range(split_range, entry.word):
self.examples.extend(self._make_examples(tokenizer, entry))
logger.info(f"Saving {len(self.examples)} features into cached file {cached_features_file}")
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item], dtype=torch.long)
class BinaryDictionaryDefinitionDataset(Dataset):
@classmethod
def title_tokenization(cls, title):
return f"<title>{title}</title>"
@classmethod
def _make_example(cls, tokenizer, definition):
max_len = cls.max_len
m = re.match(r"\s*" + re.escape(definition.title) + r"\d*\s*(\|[^|]*\|)?\s*", definition.entry_str,)
if m:
trainable_entry = definition.entry_str[m.span()[1] :].strip()
if not trainable_entry:
raise RuntimeError(f"Bad entry for {definition.title}: '{definition.entry_str}'")
else:
raise RuntimeError(f"Couldn't match {definition.title} on '{definition.entry_str}'")
tokenized_title = [tokenizer.bos_token_id] + tokenizer.convert_tokens_to_ids(
tokenizer.tokenize(cls.title_tokenization(definition.title))
)
tokenized_entry = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(trainable_entry))
if len(tokenized_title) + len(tokenized_entry) > max_len:
logger.warn(f"Truncating long entry for '{definition.title}' (entry is {len(tokenized_entry)})")
all_tokenized = (tokenized_title + tokenized_entry)[:max_len]
example = tokenizer.build_inputs_with_special_tokens(all_tokenized)
assert len(example) == len(all_tokenized), "If this fails our tokenizer is weird"
return example
def __init__(
self, tokenizer: PreTrainedTokenizer, args, file_path: str, splits=(1.0), split_idx=0,
):
assert os.path.isfile(file_path) or os.path.islink(file_path)
self.max_len = min(tokenizer.max_len_single_sentence, args.block_size)
directory, filename = os.path.split(file_path)
cached_features_file = _cache_path(
self.__class__.__name__,
directory,
filename,
model_type=args.model_type,
splits=splits,
split_idx=split_idx,
max_len=self.max_len,
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
logger.info("Loaded {len(self.examples)} features")
else:
logger.info("Creating features from dataset file at %s", directory)
split_range = _split_range(splits, split_idx)
self.examples = []
with open(file_path, "rb") as f:
for dd in dictionary_definition.DictionaryDefinition.gen_from_apple_dictionary(f):
if _in_split_range(split_range, dd.title):
self.examples.append(self._make_example(tokenizer, dd))
logger.info(f"Saving {len(self.examples)} features into cached file {cached_features_file}")
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item], dtype=torch.long)
class UrbanDictionaryDataset(Dataset):
@classmethod
def _split_re(cls):
split_re_pat = (
f"^{re.escape(SpecialTokens.BOS_TOKEN)}(?P<title>.+?)"
f"{re.escape(SpecialTokens.DEFINITION_SEP)}(?P<definition>.+?)"
f"{re.escape(SpecialTokens.EXAMPLE_SEP)}(?P<example>.+?)"
f"{re.escape(SpecialTokens.EOS_TOKEN)}"
)
split_re = re.compile(split_re_pat, flags=re.MULTILINE | re.DOTALL)
return split_re
@classmethod
def generate_words(
cls,
tokenizer,
model,
prefix=SpecialTokens.BOS_TOKEN,
num=100,
max_iterations=10,
generation_args={},
blacklist=None,
example_title_match=True,
example_match_pos_pipeline=None,
dedupe_titles=True,
user_filter=None,
filter_proper_nouns=False,
use_custom_generate=True,
min_definition_words=3,
):
start = time.time()
viable_candidates = []
ret = []
num_iteration = 0
if isinstance(prefix, str):
input = tokenizer.encode(prefix, return_tensors="pt").to(model.device)
else:
input = torch.tensor([prefix], dtype=torch.long).to(model.device)
pos_sep_id = _access_zero_assert(tokenizer.encode(SpecialTokens.POS_SEP))
example_sep_id = _access_zero_assert(tokenizer.encode(SpecialTokens.EXAMPLE_SEP))
topic_sep_id = _access_zero_assert(tokenizer.encode(SpecialTokens.TOPIC_SEP))
definition_sep_id = _access_zero_assert(tokenizer.encode(SpecialTokens.DEFINITION_SEP))
split_re = cls._split_re()
seen_titles = set()
stats = GenerationStats()
t = tqdm(total=num)
while len(ret) < num and num_iteration < max_iterations:
num_iteration += 1
stats.num_iterations += 1
if not use_custom_generate:
generated = model.generate(
input,
pad_token_id=tokenizer.pad_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
**generation_args,
)
else:
def partial_generation_transform(input_ids, tokens_to_add):
for i in range(tokens_to_add.size()[0]):
if blacklist and tokens_to_add[i] in (pos_sep_id, topic_sep_id, definition_sep_id):
word = tokenizer.decode(input_ids[i, :][1:])
if blacklist.contains(word):
tokens_to_add[i] = tokenizer.eos_token_id
elif tokens_to_add[i] == tokenizer.eos_token_id:
example_token_idxs = input_ids[i, :] == example_sep_id
if example_token_idxs.max() == 0:
tokens_to_add[i] = example_sep_id
return tokens_to_add
generated = custom_modeling_utils.custom_generate(
model,
input,
pad_token_id=tokenizer.pad_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
partial_generation_transform=partial_generation_transform,
**generation_args,
)
for i in range(generated.size()[0]):
if len(ret) >= num:
break
viable_candidates = viable_candidates[:1000]
stats.num_items_considered += 1
sentence_tokens = generated[i, :].tolist()
decoded = tokenizer.decode(sentence_tokens)
m = split_re.match(decoded)
if not m:
stats.num_failed_match += 1
continue
title = m.group("title")
definition = m.group("definition")
example = m.group("example")
generated_word = GeneratedWord(
word=title and title.strip(),
definition=definition and definition.strip(),
example=example and example.strip(),
pos=None,
topic=None,
decoded=decoded,
decoded_tokens=sentence_tokens,
)
if blacklist and blacklist.contains(title):
stats.num_blacklist_filtered += 1
continue
if dedupe_titles and title.strip().lower() in seen_titles:
stats.num_seen_filtered += 1
continue
if filter_proper_nouns and title.strip()[:1].isupper():
stats.num_proper_noun_filtered += 1
continue
if not example or not example.strip():
stats.num_example_missing += 1
viable_candidates.append(GeneratedWordCandidate(0.0, generated_word))
continue
if len(definition.split()) < min_definition_words:
stats.num_short_definitions += 1
viable_candidates.append(GeneratedWordCandidate(0.2, generated_word))
continue
t_rstrip = title.strip().lower().rstrip("s")
l_example = example.lower()
try:
l_example.index(t_rstrip)
except ValueError:
stats.num_example_missing_title += 1
viable_candidates.append(GeneratedWordCandidate(0.5, generated_word))
continue
if user_filter and not user_filter(generated_word):
stats.num_user_filtered += 1
continue
else:
t.update()
ret.append(generated_word)
seen_titles.add(generated_word.word.lower())
stats.num_returned = len(ret)
stats.viable_candidates = viable_candidates
stats.wall_time = time.time() - start
return ret[:num], stats
def _make_examples(self, tokenizer, word):
examples = []
for definition in word.definitions:
example = _join_and_truncate(
max_len=self.max_len,
begin_tokens=self.bos_token_ids,
end_tokens=self.eos_token_ids,
token_groups=[
TokenGroup(separator=[], payload=tokenizer.encode(definition.word)),
TokenGroup(separator=self.definition_sep_ids, payload=tokenizer.encode(definition.meaning),),
TokenGroup(separator=self.example_sep_ids, payload=tokenizer.encode(definition.examples[0]),),
],
)
assert (
len(example) <= self.max_len
), f"Example should be less than max length: {len(example)} Vs. {self.max_len}"
examples.append(example)
return examples
def __init__(
self, tokenizer: PreTrainedTokenizer, args, file_path: str, splits=(1.0), split_idx=0,
):
self.max_len = min(tokenizer.max_len_single_sentence, args.block_size)
self.bos_token_ids = tokenizer.encode(SpecialTokens.BOS_TOKEN)
self.eos_token_ids = tokenizer.encode(SpecialTokens.EOS_TOKEN)
self.definition_sep_ids = tokenizer.encode(SpecialTokens.DEFINITION_SEP)
self.example_sep_ids = tokenizer.encode(SpecialTokens.EXAMPLE_SEP)
assert os.path.isfile(file_path) or os.path.islink(file_path)
directory, filename = os.path.split(file_path)
cached_features_file = _cache_path(
self.__class__.__name__,
directory,
filename,
model_type=args.model_type,
splits=splits,
split_idx=split_idx,
max_len=self.max_len,
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
logger.info("Loaded {len(self.examples)} features")
else:
logger.info(
f"Cache at {cached_features_file} not found... creating features from dataset file at %s", directory,
)
self.examples = []
split_range = _split_range(splits, split_idx)
with open(file_path, "rb") as f:
words = list(pickle.load(f).values())
for word in words:
if _in_split_range(split_range, word.title):
self.examples.extend(self._make_examples(tokenizer, word))
logger.info(f"Saving {len(self.examples)} features into cached file {cached_features_file}")
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item], dtype=torch.long)
@dataclass
class WikiArticle:
title: str
text: str
class WikiArticleTitleDataset(Dataset):
@classmethod
def title_tokenization(cls, title):
return f"<bot>{title}<eot>"
@classmethod
def refine_wikitext(cls, istream, limit=None):
last_blank = False
title_matcher = re.compile(r"^[\s]*= ([^=]*) =[\s]*$")
last_title = None
article_text = StringIO()
for i, line in enumerate(istream):
m = title_matcher.match(line)
if m and last_blank:
title = m.group(1)
if last_title is not None:
yield WikiArticle(title=last_title, text=article_text.getvalue())
last_title = title
article_text = StringIO()
else:
cleaned_line = re.sub(re.escape(last_title), "TITLE", line, flags=re.IGNORECASE) if last_title else line
article_text.write(cleaned_line)
last_blank = re.match(r"^\s*$", line)
if limit and i > limit:
break
yield WikiArticle(title=last_title, text=article_text.getvalue())
@classmethod
def generate_text_dataset(cls, istream, ostream, offset=0, stride=1024, limit=None):
def _output_range(article, start, end):
text = article.text[start:end]
spaces = list(re.compile(r"\s+").finditer(text))
if spaces:
replace_idx = spaces[-1].span()[0]
ostream.write(text[:replace_idx])
ostream.write(cls.title_tokenization(article.title))
ostream.write(text[replace_idx:])
else:
ostream.write(text)
ostream.write(cls.title_tokenization(article.title))
for article in cls.refine_wikitext(istream, limit=limit):
if offset > 0:
_output_range(article, 0, offset)
for i in range(offset, len(article.text), stride):
_output_range(article, i, i + stride)
@staticmethod
def _make_example(tokenizer, text_tokens, title_tokens):
example = tokenizer.build_inputs_with_special_tokens(text_tokens + title_tokens)
start_title_idx = next(i for i in reversed(range(len(example))) if example[i] == title_tokens[0])
end_title_idx = start_title_idx + len(title_tokens)
bool_mask = [bool(i > start_title_idx and i < end_title_idx) for i in range(len(example))]
return (example, bool_mask)
def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path: str, block_size=512):
assert os.path.isfile(file_path)
block_size = block_size - (tokenizer.max_len - tokenizer.max_len_single_sentence)
directory, filename = os.path.split(file_path)
cached_features_file = os.path.join(
directory, args.model_type + "_cached_lm_" + str(block_size) + "_" + filename,
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
else:
logger.info("Creating features from dataset file at %s", directory)
self.examples = []
with open(file_path, encoding="utf-8") as f:
for article in self.refine_wikitext(f):
tokenized_title = tokenizer.convert_tokens_to_ids(
tokenizer.tokenize(self.title_tokenization(article.title))
)
tokenized_article_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(article.text))
article_block_size = block_size - len(tokenized_title)
for i in range(0, len(tokenized_article_text) - article_block_size + 1, article_block_size,):
self.examples.append(
self._make_example(
tokenizer, tokenized_article_text[i : (i + article_block_size)], tokenized_title,
)
)
# Note that we are loosing the last truncated example here for the sake of simplicity (no padding)
# If your dataset is small, first you should loook for a bigger one :-) and second you
# can change this behavior by adding (model specific) padding.
logger.info("Saving features into cached file %s", cached_features_file)
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return (
torch.tensor(self.examples[item][0], dtype=torch.long),
torch.tensor(self.examples[item][1], dtype=torch.bool),
)
| 38.427903 | 120 | 0.576233 |
077cb5379209167093305d9c720b2214782a1bc8 | 68,717 | py | Python | tensorflow/core/protobuf/eager_service_pb2.py | alexeygrigorev/tensorflow-protobuf | 9863a9281eb6caa9be73128c03906d990639208c | [
"Apache-2.0"
] | 7 | 2020-12-28T02:53:05.000Z | 2022-03-23T05:45:03.000Z | tensorflow/core/protobuf/eager_service_pb2.py | alexeygrigorev/tensorflow-protobuf | 9863a9281eb6caa9be73128c03906d990639208c | [
"Apache-2.0"
] | 1 | 2021-01-27T16:06:16.000Z | 2021-01-27T19:43:38.000Z | tensorflow/core/protobuf/eager_service_pb2.py | alexeygrigorev/tensorflow-protobuf | 9863a9281eb6caa9be73128c03906d990639208c | [
"Apache-2.0"
] | 1 | 2021-02-11T11:46:01.000Z | 2021-02-11T11:46:01.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/protobuf/eager_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import attr_value_pb2 as tensorflow_dot_core_dot_framework_dot_attr__value__pb2
from tensorflow.core.framework import device_attributes_pb2 as tensorflow_dot_core_dot_framework_dot_device__attributes__pb2
from tensorflow.core.framework import function_pb2 as tensorflow_dot_core_dot_framework_dot_function__pb2
from tensorflow.core.framework import tensor_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__pb2
from tensorflow.core.framework import tensor_shape_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2
from tensorflow.core.framework import versions_pb2 as tensorflow_dot_core_dot_framework_dot_versions__pb2
from tensorflow.core.protobuf import remote_tensor_handle_pb2 as tensorflow_dot_core_dot_protobuf_dot_remote__tensor__handle__pb2
from tensorflow.core.protobuf import tensorflow_server_pb2 as tensorflow_dot_core_dot_protobuf_dot_tensorflow__server__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/eager_service.proto',
package='tensorflow.eager',
syntax='proto3',
serialized_options=b'ZHgithub.com/tensorflow/tensorflow/tensorflow/go/core/core_protos_go_proto',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n,tensorflow/core/protobuf/eager_service.proto\x12\x10tensorflow.eager\x1a*tensorflow/core/framework/attr_value.proto\x1a\x31tensorflow/core/framework/device_attributes.proto\x1a(tensorflow/core/framework/function.proto\x1a&tensorflow/core/framework/tensor.proto\x1a,tensorflow/core/framework/tensor_shape.proto\x1a(tensorflow/core/framework/versions.proto\x1a\x33tensorflow/core/protobuf/remote_tensor_handle.proto\x1a\x30tensorflow/core/protobuf/tensorflow_server.proto\"\xca\x03\n\tOperation\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x34\n\top_inputs\x18\n \x03(\x0b\x32!.tensorflow.eager.Operation.Input\x12\x16\n\x0e\x63ontrol_op_ids\x18\x04 \x03(\x03\x12\x35\n\x05\x61ttrs\x18\x05 \x03(\x0b\x32&.tensorflow.eager.Operation.AttrsEntry\x12\x0e\n\x06\x64\x65vice\x18\x06 \x01(\t\x12\x1d\n\x15is_component_function\x18\x07 \x01(\x08\x12\x14\n\x0c\x66unc_step_id\x18\x08 \x01(\x03\x12\x13\n\x0bis_function\x18\t \x01(\x08\x1ay\n\x05Input\x12=\n\rremote_handle\x18\x01 \x01(\x0b\x32$.tensorflow.eager.RemoteTensorHandleH\x00\x12)\n\x06tensor\x18\x02 \x01(\x0b\x32\x17.tensorflow.TensorProtoH\x00\x42\x06\n\x04item\x1a\x43\n\nAttrsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.tensorflow.AttrValue:\x02\x38\x01J\x04\x08\x03\x10\x04\"\xe0\x03\n\tQueueItem\x12@\n\x10handle_to_decref\x18\x01 \x01(\x0b\x32$.tensorflow.eager.RemoteTensorHandleH\x00\x12\x30\n\toperation\x18\x02 \x01(\x0b\x32\x1b.tensorflow.eager.OperationH\x00\x12\x35\n\x0bsend_tensor\x18\x03 \x01(\x0b\x32\x1e.tensorflow.eager.SendTensorOpH\x00\x12\x41\n\x11register_function\x18\x04 \x01(\x0b\x32$.tensorflow.eager.RegisterFunctionOpH\x00\x12?\n\x10\x63leanup_function\x18\x05 \x01(\x0b\x32#.tensorflow.eager.CleanupFunctionOpH\x00\x12X\n\x1fsync_remote_executor_for_stream\x18\x06 \x01(\x0b\x32-.tensorflow.eager.SyncRemoteExecutorForStreamH\x00\x12\x42\n\x12send_packed_handle\x18\x07 \x01(\x0b\x32$.tensorflow.eager.SendPackedHandleOpH\x00\x42\x06\n\x04item\"e\n\rQueueResponse\x12+\n\x05shape\x18\x01 \x03(\x0b\x32\x1c.tensorflow.TensorShapeProto\x12\'\n\x06tensor\x18\x02 \x03(\x0b\x32\x17.tensorflow.TensorProto\"\xb4\x02\n\x14\x43reateContextRequest\x12)\n\nserver_def\x18\x01 \x01(\x0b\x32\x15.tensorflow.ServerDef\x12\r\n\x05\x61sync\x18\x02 \x01(\x08\x12\x17\n\x0fkeep_alive_secs\x18\x03 \x01(\x03\x12+\n\x0bversion_def\x18\x04 \x01(\x0b\x32\x16.tensorflow.VersionDef\x12?\n\x19\x63luster_device_attributes\x18\x06 \x03(\x0b\x32\x1c.tensorflow.DeviceAttributes\x12\x12\n\ncontext_id\x18\x07 \x01(\x06\x12\x17\n\x0f\x63ontext_view_id\x18\x08 \x01(\x06\x12(\n lazy_copy_remote_function_inputs\x18\t \x01(\x08J\x04\x08\x05\x10\x06\"V\n\x15\x43reateContextResponse\x12\x37\n\x11\x64\x65vice_attributes\x18\x02 \x03(\x0b\x32\x1c.tensorflow.DeviceAttributesJ\x04\x08\x01\x10\x02\"\xaf\x01\n\x14UpdateContextRequest\x12)\n\nserver_def\x18\x01 \x01(\x0b\x32\x15.tensorflow.ServerDef\x12?\n\x19\x63luster_device_attributes\x18\x02 \x03(\x0b\x32\x1c.tensorflow.DeviceAttributes\x12\x12\n\ncontext_id\x18\x03 \x01(\x06\x12\x17\n\x0f\x63ontext_view_id\x18\x04 \x01(\x06\"P\n\x15UpdateContextResponse\x12\x37\n\x11\x64\x65vice_attributes\x18\x01 \x03(\x0b\x32\x1c.tensorflow.DeviceAttributes\"P\n\x0e\x45nqueueRequest\x12\x12\n\ncontext_id\x18\x01 \x01(\x06\x12*\n\x05queue\x18\x03 \x03(\x0b\x32\x1b.tensorflow.eager.QueueItem\"J\n\x0f\x45nqueueResponse\x12\x37\n\x0equeue_response\x18\x01 \x03(\x0b\x32\x1f.tensorflow.eager.QueueResponse\"9\n\x14WaitQueueDoneRequest\x12\x12\n\ncontext_id\x18\x01 \x01(\x06\x12\r\n\x05op_id\x18\x02 \x03(\x03\"\x17\n\x15WaitQueueDoneResponse\"a\n\x1bRunComponentFunctionRequest\x12\x12\n\ncontext_id\x18\x01 \x01(\x06\x12.\n\toperation\x18\x02 \x01(\x0b\x32\x1b.tensorflow.eager.Operation\"t\n\x1cRunComponentFunctionResponse\x12+\n\x05shape\x18\x01 \x03(\x0b\x32\x1c.tensorflow.TensorShapeProto\x12\'\n\x06tensor\x18\x02 \x03(\x0b\x32\x17.tensorflow.TensorProto\"&\n\x10KeepAliveRequest\x12\x12\n\ncontext_id\x18\x01 \x01(\x06\",\n\x11KeepAliveResponse\x12\x17\n\x0f\x63ontext_view_id\x18\x01 \x01(\x06\"B\n\x13\x43loseContextRequest\x12\x12\n\ncontext_id\x18\x01 \x01(\x06\x12\x17\n\x0f\x63ontext_view_id\x18\x02 \x01(\x06\"\x16\n\x14\x43loseContextResponse\"\x93\x01\n\x12RegisterFunctionOp\x12-\n\x0c\x66unction_def\x18\x01 \x01(\x0b\x32\x17.tensorflow.FunctionDef\x12\x1d\n\x15is_component_function\x18\x02 \x01(\x08\x12/\n\x07library\x18\x03 \x01(\x0b\x32\x1e.tensorflow.FunctionDefLibrary\"$\n\x11\x43leanupFunctionOp\x12\x0f\n\x07step_id\x18\x01 \x01(\x03\"\x1d\n\x1bSyncRemoteExecutorForStream\"\\\n\x0cSendTensorOp\x12\r\n\x05op_id\x18\x01 \x01(\x03\x12(\n\x07tensors\x18\x02 \x03(\x0b\x32\x17.tensorflow.TensorProto\x12\x13\n\x0b\x64\x65vice_name\x18\x03 \x01(\t\"\xe6\x02\n\x12SendPackedHandleOp\x12\r\n\x05op_id\x18\x01 \x01(\x03\x12<\n\x07handles\x18\x02 \x03(\x0b\x32+.tensorflow.eager.SendPackedHandleOp.Handle\x12\x13\n\x0b\x64\x65vice_name\x18\x03 \x01(\t\x1aL\n\x11LocalTensorHandle\x12\'\n\x06tensor\x18\x01 \x01(\x0b\x32\x17.tensorflow.TensorProto\x12\x0e\n\x06\x64\x65vice\x18\x02 \x01(\t\x1a\x9f\x01\n\x06Handle\x12N\n\x0clocal_handle\x18\x01 \x01(\x0b\x32\x36.tensorflow.eager.SendPackedHandleOp.LocalTensorHandleH\x00\x12=\n\rremote_handle\x18\x02 \x01(\x0b\x32$.tensorflow.eager.RemoteTensorHandleH\x00\x42\x06\n\x04item2\x8d\x06\n\x0c\x45\x61gerService\x12`\n\rCreateContext\x12&.tensorflow.eager.CreateContextRequest\x1a\'.tensorflow.eager.CreateContextResponse\x12`\n\rUpdateContext\x12&.tensorflow.eager.UpdateContextRequest\x1a\'.tensorflow.eager.UpdateContextResponse\x12N\n\x07\x45nqueue\x12 .tensorflow.eager.EnqueueRequest\x1a!.tensorflow.eager.EnqueueResponse\x12[\n\x10StreamingEnqueue\x12 .tensorflow.eager.EnqueueRequest\x1a!.tensorflow.eager.EnqueueResponse(\x01\x30\x01\x12`\n\rWaitQueueDone\x12&.tensorflow.eager.WaitQueueDoneRequest\x1a\'.tensorflow.eager.WaitQueueDoneResponse\x12u\n\x14RunComponentFunction\x12-.tensorflow.eager.RunComponentFunctionRequest\x1a..tensorflow.eager.RunComponentFunctionResponse\x12T\n\tKeepAlive\x12\".tensorflow.eager.KeepAliveRequest\x1a#.tensorflow.eager.KeepAliveResponse\x12]\n\x0c\x43loseContext\x12%.tensorflow.eager.CloseContextRequest\x1a&.tensorflow.eager.CloseContextResponseBJZHgithub.com/tensorflow/tensorflow/tensorflow/go/core/core_protos_go_protob\x06proto3'
,
dependencies=[tensorflow_dot_core_dot_framework_dot_attr__value__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_device__attributes__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_function__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_tensor__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_versions__pb2.DESCRIPTOR,tensorflow_dot_core_dot_protobuf_dot_remote__tensor__handle__pb2.DESCRIPTOR,tensorflow_dot_core_dot_protobuf_dot_tensorflow__server__pb2.DESCRIPTOR,])
_OPERATION_INPUT = _descriptor.Descriptor(
name='Input',
full_name='tensorflow.eager.Operation.Input',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='remote_handle', full_name='tensorflow.eager.Operation.Input.remote_handle', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tensor', full_name='tensorflow.eager.Operation.Input.tensor', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='item', full_name='tensorflow.eager.Operation.Input.item',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=697,
serialized_end=818,
)
_OPERATION_ATTRSENTRY = _descriptor.Descriptor(
name='AttrsEntry',
full_name='tensorflow.eager.Operation.AttrsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.eager.Operation.AttrsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.eager.Operation.AttrsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=820,
serialized_end=887,
)
_OPERATION = _descriptor.Descriptor(
name='Operation',
full_name='tensorflow.eager.Operation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='tensorflow.eager.Operation.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.eager.Operation.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='op_inputs', full_name='tensorflow.eager.Operation.op_inputs', index=2,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='control_op_ids', full_name='tensorflow.eager.Operation.control_op_ids', index=3,
number=4, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='attrs', full_name='tensorflow.eager.Operation.attrs', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='device', full_name='tensorflow.eager.Operation.device', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_component_function', full_name='tensorflow.eager.Operation.is_component_function', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='func_step_id', full_name='tensorflow.eager.Operation.func_step_id', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_function', full_name='tensorflow.eager.Operation.is_function', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_OPERATION_INPUT, _OPERATION_ATTRSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=435,
serialized_end=893,
)
_QUEUEITEM = _descriptor.Descriptor(
name='QueueItem',
full_name='tensorflow.eager.QueueItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='handle_to_decref', full_name='tensorflow.eager.QueueItem.handle_to_decref', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='operation', full_name='tensorflow.eager.QueueItem.operation', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='send_tensor', full_name='tensorflow.eager.QueueItem.send_tensor', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='register_function', full_name='tensorflow.eager.QueueItem.register_function', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cleanup_function', full_name='tensorflow.eager.QueueItem.cleanup_function', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sync_remote_executor_for_stream', full_name='tensorflow.eager.QueueItem.sync_remote_executor_for_stream', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='send_packed_handle', full_name='tensorflow.eager.QueueItem.send_packed_handle', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='item', full_name='tensorflow.eager.QueueItem.item',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=896,
serialized_end=1376,
)
_QUEUERESPONSE = _descriptor.Descriptor(
name='QueueResponse',
full_name='tensorflow.eager.QueueResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='shape', full_name='tensorflow.eager.QueueResponse.shape', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tensor', full_name='tensorflow.eager.QueueResponse.tensor', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1378,
serialized_end=1479,
)
_CREATECONTEXTREQUEST = _descriptor.Descriptor(
name='CreateContextRequest',
full_name='tensorflow.eager.CreateContextRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='server_def', full_name='tensorflow.eager.CreateContextRequest.server_def', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='async', full_name='tensorflow.eager.CreateContextRequest.async', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='keep_alive_secs', full_name='tensorflow.eager.CreateContextRequest.keep_alive_secs', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version_def', full_name='tensorflow.eager.CreateContextRequest.version_def', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cluster_device_attributes', full_name='tensorflow.eager.CreateContextRequest.cluster_device_attributes', index=4,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='context_id', full_name='tensorflow.eager.CreateContextRequest.context_id', index=5,
number=7, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='context_view_id', full_name='tensorflow.eager.CreateContextRequest.context_view_id', index=6,
number=8, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='lazy_copy_remote_function_inputs', full_name='tensorflow.eager.CreateContextRequest.lazy_copy_remote_function_inputs', index=7,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1482,
serialized_end=1790,
)
_CREATECONTEXTRESPONSE = _descriptor.Descriptor(
name='CreateContextResponse',
full_name='tensorflow.eager.CreateContextResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='device_attributes', full_name='tensorflow.eager.CreateContextResponse.device_attributes', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1792,
serialized_end=1878,
)
_UPDATECONTEXTREQUEST = _descriptor.Descriptor(
name='UpdateContextRequest',
full_name='tensorflow.eager.UpdateContextRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='server_def', full_name='tensorflow.eager.UpdateContextRequest.server_def', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cluster_device_attributes', full_name='tensorflow.eager.UpdateContextRequest.cluster_device_attributes', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='context_id', full_name='tensorflow.eager.UpdateContextRequest.context_id', index=2,
number=3, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='context_view_id', full_name='tensorflow.eager.UpdateContextRequest.context_view_id', index=3,
number=4, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1881,
serialized_end=2056,
)
_UPDATECONTEXTRESPONSE = _descriptor.Descriptor(
name='UpdateContextResponse',
full_name='tensorflow.eager.UpdateContextResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='device_attributes', full_name='tensorflow.eager.UpdateContextResponse.device_attributes', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2058,
serialized_end=2138,
)
_ENQUEUEREQUEST = _descriptor.Descriptor(
name='EnqueueRequest',
full_name='tensorflow.eager.EnqueueRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='context_id', full_name='tensorflow.eager.EnqueueRequest.context_id', index=0,
number=1, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='queue', full_name='tensorflow.eager.EnqueueRequest.queue', index=1,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2140,
serialized_end=2220,
)
_ENQUEUERESPONSE = _descriptor.Descriptor(
name='EnqueueResponse',
full_name='tensorflow.eager.EnqueueResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='queue_response', full_name='tensorflow.eager.EnqueueResponse.queue_response', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2222,
serialized_end=2296,
)
_WAITQUEUEDONEREQUEST = _descriptor.Descriptor(
name='WaitQueueDoneRequest',
full_name='tensorflow.eager.WaitQueueDoneRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='context_id', full_name='tensorflow.eager.WaitQueueDoneRequest.context_id', index=0,
number=1, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='op_id', full_name='tensorflow.eager.WaitQueueDoneRequest.op_id', index=1,
number=2, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2298,
serialized_end=2355,
)
_WAITQUEUEDONERESPONSE = _descriptor.Descriptor(
name='WaitQueueDoneResponse',
full_name='tensorflow.eager.WaitQueueDoneResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2357,
serialized_end=2380,
)
_RUNCOMPONENTFUNCTIONREQUEST = _descriptor.Descriptor(
name='RunComponentFunctionRequest',
full_name='tensorflow.eager.RunComponentFunctionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='context_id', full_name='tensorflow.eager.RunComponentFunctionRequest.context_id', index=0,
number=1, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='operation', full_name='tensorflow.eager.RunComponentFunctionRequest.operation', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2382,
serialized_end=2479,
)
_RUNCOMPONENTFUNCTIONRESPONSE = _descriptor.Descriptor(
name='RunComponentFunctionResponse',
full_name='tensorflow.eager.RunComponentFunctionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='shape', full_name='tensorflow.eager.RunComponentFunctionResponse.shape', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tensor', full_name='tensorflow.eager.RunComponentFunctionResponse.tensor', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2481,
serialized_end=2597,
)
_KEEPALIVEREQUEST = _descriptor.Descriptor(
name='KeepAliveRequest',
full_name='tensorflow.eager.KeepAliveRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='context_id', full_name='tensorflow.eager.KeepAliveRequest.context_id', index=0,
number=1, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2599,
serialized_end=2637,
)
_KEEPALIVERESPONSE = _descriptor.Descriptor(
name='KeepAliveResponse',
full_name='tensorflow.eager.KeepAliveResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='context_view_id', full_name='tensorflow.eager.KeepAliveResponse.context_view_id', index=0,
number=1, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2639,
serialized_end=2683,
)
_CLOSECONTEXTREQUEST = _descriptor.Descriptor(
name='CloseContextRequest',
full_name='tensorflow.eager.CloseContextRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='context_id', full_name='tensorflow.eager.CloseContextRequest.context_id', index=0,
number=1, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='context_view_id', full_name='tensorflow.eager.CloseContextRequest.context_view_id', index=1,
number=2, type=6, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2685,
serialized_end=2751,
)
_CLOSECONTEXTRESPONSE = _descriptor.Descriptor(
name='CloseContextResponse',
full_name='tensorflow.eager.CloseContextResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2753,
serialized_end=2775,
)
_REGISTERFUNCTIONOP = _descriptor.Descriptor(
name='RegisterFunctionOp',
full_name='tensorflow.eager.RegisterFunctionOp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='function_def', full_name='tensorflow.eager.RegisterFunctionOp.function_def', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_component_function', full_name='tensorflow.eager.RegisterFunctionOp.is_component_function', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='library', full_name='tensorflow.eager.RegisterFunctionOp.library', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2778,
serialized_end=2925,
)
_CLEANUPFUNCTIONOP = _descriptor.Descriptor(
name='CleanupFunctionOp',
full_name='tensorflow.eager.CleanupFunctionOp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='step_id', full_name='tensorflow.eager.CleanupFunctionOp.step_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2927,
serialized_end=2963,
)
_SYNCREMOTEEXECUTORFORSTREAM = _descriptor.Descriptor(
name='SyncRemoteExecutorForStream',
full_name='tensorflow.eager.SyncRemoteExecutorForStream',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2965,
serialized_end=2994,
)
_SENDTENSOROP = _descriptor.Descriptor(
name='SendTensorOp',
full_name='tensorflow.eager.SendTensorOp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='op_id', full_name='tensorflow.eager.SendTensorOp.op_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tensors', full_name='tensorflow.eager.SendTensorOp.tensors', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='device_name', full_name='tensorflow.eager.SendTensorOp.device_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2996,
serialized_end=3088,
)
_SENDPACKEDHANDLEOP_LOCALTENSORHANDLE = _descriptor.Descriptor(
name='LocalTensorHandle',
full_name='tensorflow.eager.SendPackedHandleOp.LocalTensorHandle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tensor', full_name='tensorflow.eager.SendPackedHandleOp.LocalTensorHandle.tensor', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='device', full_name='tensorflow.eager.SendPackedHandleOp.LocalTensorHandle.device', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3211,
serialized_end=3287,
)
_SENDPACKEDHANDLEOP_HANDLE = _descriptor.Descriptor(
name='Handle',
full_name='tensorflow.eager.SendPackedHandleOp.Handle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='local_handle', full_name='tensorflow.eager.SendPackedHandleOp.Handle.local_handle', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='remote_handle', full_name='tensorflow.eager.SendPackedHandleOp.Handle.remote_handle', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='item', full_name='tensorflow.eager.SendPackedHandleOp.Handle.item',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=3290,
serialized_end=3449,
)
_SENDPACKEDHANDLEOP = _descriptor.Descriptor(
name='SendPackedHandleOp',
full_name='tensorflow.eager.SendPackedHandleOp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='op_id', full_name='tensorflow.eager.SendPackedHandleOp.op_id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='handles', full_name='tensorflow.eager.SendPackedHandleOp.handles', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='device_name', full_name='tensorflow.eager.SendPackedHandleOp.device_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_SENDPACKEDHANDLEOP_LOCALTENSORHANDLE, _SENDPACKEDHANDLEOP_HANDLE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3091,
serialized_end=3449,
)
_OPERATION_INPUT.fields_by_name['remote_handle'].message_type = tensorflow_dot_core_dot_protobuf_dot_remote__tensor__handle__pb2._REMOTETENSORHANDLE
_OPERATION_INPUT.fields_by_name['tensor'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__pb2._TENSORPROTO
_OPERATION_INPUT.containing_type = _OPERATION
_OPERATION_INPUT.oneofs_by_name['item'].fields.append(
_OPERATION_INPUT.fields_by_name['remote_handle'])
_OPERATION_INPUT.fields_by_name['remote_handle'].containing_oneof = _OPERATION_INPUT.oneofs_by_name['item']
_OPERATION_INPUT.oneofs_by_name['item'].fields.append(
_OPERATION_INPUT.fields_by_name['tensor'])
_OPERATION_INPUT.fields_by_name['tensor'].containing_oneof = _OPERATION_INPUT.oneofs_by_name['item']
_OPERATION_ATTRSENTRY.fields_by_name['value'].message_type = tensorflow_dot_core_dot_framework_dot_attr__value__pb2._ATTRVALUE
_OPERATION_ATTRSENTRY.containing_type = _OPERATION
_OPERATION.fields_by_name['op_inputs'].message_type = _OPERATION_INPUT
_OPERATION.fields_by_name['attrs'].message_type = _OPERATION_ATTRSENTRY
_QUEUEITEM.fields_by_name['handle_to_decref'].message_type = tensorflow_dot_core_dot_protobuf_dot_remote__tensor__handle__pb2._REMOTETENSORHANDLE
_QUEUEITEM.fields_by_name['operation'].message_type = _OPERATION
_QUEUEITEM.fields_by_name['send_tensor'].message_type = _SENDTENSOROP
_QUEUEITEM.fields_by_name['register_function'].message_type = _REGISTERFUNCTIONOP
_QUEUEITEM.fields_by_name['cleanup_function'].message_type = _CLEANUPFUNCTIONOP
_QUEUEITEM.fields_by_name['sync_remote_executor_for_stream'].message_type = _SYNCREMOTEEXECUTORFORSTREAM
_QUEUEITEM.fields_by_name['send_packed_handle'].message_type = _SENDPACKEDHANDLEOP
_QUEUEITEM.oneofs_by_name['item'].fields.append(
_QUEUEITEM.fields_by_name['handle_to_decref'])
_QUEUEITEM.fields_by_name['handle_to_decref'].containing_oneof = _QUEUEITEM.oneofs_by_name['item']
_QUEUEITEM.oneofs_by_name['item'].fields.append(
_QUEUEITEM.fields_by_name['operation'])
_QUEUEITEM.fields_by_name['operation'].containing_oneof = _QUEUEITEM.oneofs_by_name['item']
_QUEUEITEM.oneofs_by_name['item'].fields.append(
_QUEUEITEM.fields_by_name['send_tensor'])
_QUEUEITEM.fields_by_name['send_tensor'].containing_oneof = _QUEUEITEM.oneofs_by_name['item']
_QUEUEITEM.oneofs_by_name['item'].fields.append(
_QUEUEITEM.fields_by_name['register_function'])
_QUEUEITEM.fields_by_name['register_function'].containing_oneof = _QUEUEITEM.oneofs_by_name['item']
_QUEUEITEM.oneofs_by_name['item'].fields.append(
_QUEUEITEM.fields_by_name['cleanup_function'])
_QUEUEITEM.fields_by_name['cleanup_function'].containing_oneof = _QUEUEITEM.oneofs_by_name['item']
_QUEUEITEM.oneofs_by_name['item'].fields.append(
_QUEUEITEM.fields_by_name['sync_remote_executor_for_stream'])
_QUEUEITEM.fields_by_name['sync_remote_executor_for_stream'].containing_oneof = _QUEUEITEM.oneofs_by_name['item']
_QUEUEITEM.oneofs_by_name['item'].fields.append(
_QUEUEITEM.fields_by_name['send_packed_handle'])
_QUEUEITEM.fields_by_name['send_packed_handle'].containing_oneof = _QUEUEITEM.oneofs_by_name['item']
_QUEUERESPONSE.fields_by_name['shape'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2._TENSORSHAPEPROTO
_QUEUERESPONSE.fields_by_name['tensor'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__pb2._TENSORPROTO
_CREATECONTEXTREQUEST.fields_by_name['server_def'].message_type = tensorflow_dot_core_dot_protobuf_dot_tensorflow__server__pb2._SERVERDEF
_CREATECONTEXTREQUEST.fields_by_name['version_def'].message_type = tensorflow_dot_core_dot_framework_dot_versions__pb2._VERSIONDEF
_CREATECONTEXTREQUEST.fields_by_name['cluster_device_attributes'].message_type = tensorflow_dot_core_dot_framework_dot_device__attributes__pb2._DEVICEATTRIBUTES
_CREATECONTEXTRESPONSE.fields_by_name['device_attributes'].message_type = tensorflow_dot_core_dot_framework_dot_device__attributes__pb2._DEVICEATTRIBUTES
_UPDATECONTEXTREQUEST.fields_by_name['server_def'].message_type = tensorflow_dot_core_dot_protobuf_dot_tensorflow__server__pb2._SERVERDEF
_UPDATECONTEXTREQUEST.fields_by_name['cluster_device_attributes'].message_type = tensorflow_dot_core_dot_framework_dot_device__attributes__pb2._DEVICEATTRIBUTES
_UPDATECONTEXTRESPONSE.fields_by_name['device_attributes'].message_type = tensorflow_dot_core_dot_framework_dot_device__attributes__pb2._DEVICEATTRIBUTES
_ENQUEUEREQUEST.fields_by_name['queue'].message_type = _QUEUEITEM
_ENQUEUERESPONSE.fields_by_name['queue_response'].message_type = _QUEUERESPONSE
_RUNCOMPONENTFUNCTIONREQUEST.fields_by_name['operation'].message_type = _OPERATION
_RUNCOMPONENTFUNCTIONRESPONSE.fields_by_name['shape'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2._TENSORSHAPEPROTO
_RUNCOMPONENTFUNCTIONRESPONSE.fields_by_name['tensor'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__pb2._TENSORPROTO
_REGISTERFUNCTIONOP.fields_by_name['function_def'].message_type = tensorflow_dot_core_dot_framework_dot_function__pb2._FUNCTIONDEF
_REGISTERFUNCTIONOP.fields_by_name['library'].message_type = tensorflow_dot_core_dot_framework_dot_function__pb2._FUNCTIONDEFLIBRARY
_SENDTENSOROP.fields_by_name['tensors'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__pb2._TENSORPROTO
_SENDPACKEDHANDLEOP_LOCALTENSORHANDLE.fields_by_name['tensor'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__pb2._TENSORPROTO
_SENDPACKEDHANDLEOP_LOCALTENSORHANDLE.containing_type = _SENDPACKEDHANDLEOP
_SENDPACKEDHANDLEOP_HANDLE.fields_by_name['local_handle'].message_type = _SENDPACKEDHANDLEOP_LOCALTENSORHANDLE
_SENDPACKEDHANDLEOP_HANDLE.fields_by_name['remote_handle'].message_type = tensorflow_dot_core_dot_protobuf_dot_remote__tensor__handle__pb2._REMOTETENSORHANDLE
_SENDPACKEDHANDLEOP_HANDLE.containing_type = _SENDPACKEDHANDLEOP
_SENDPACKEDHANDLEOP_HANDLE.oneofs_by_name['item'].fields.append(
_SENDPACKEDHANDLEOP_HANDLE.fields_by_name['local_handle'])
_SENDPACKEDHANDLEOP_HANDLE.fields_by_name['local_handle'].containing_oneof = _SENDPACKEDHANDLEOP_HANDLE.oneofs_by_name['item']
_SENDPACKEDHANDLEOP_HANDLE.oneofs_by_name['item'].fields.append(
_SENDPACKEDHANDLEOP_HANDLE.fields_by_name['remote_handle'])
_SENDPACKEDHANDLEOP_HANDLE.fields_by_name['remote_handle'].containing_oneof = _SENDPACKEDHANDLEOP_HANDLE.oneofs_by_name['item']
_SENDPACKEDHANDLEOP.fields_by_name['handles'].message_type = _SENDPACKEDHANDLEOP_HANDLE
DESCRIPTOR.message_types_by_name['Operation'] = _OPERATION
DESCRIPTOR.message_types_by_name['QueueItem'] = _QUEUEITEM
DESCRIPTOR.message_types_by_name['QueueResponse'] = _QUEUERESPONSE
DESCRIPTOR.message_types_by_name['CreateContextRequest'] = _CREATECONTEXTREQUEST
DESCRIPTOR.message_types_by_name['CreateContextResponse'] = _CREATECONTEXTRESPONSE
DESCRIPTOR.message_types_by_name['UpdateContextRequest'] = _UPDATECONTEXTREQUEST
DESCRIPTOR.message_types_by_name['UpdateContextResponse'] = _UPDATECONTEXTRESPONSE
DESCRIPTOR.message_types_by_name['EnqueueRequest'] = _ENQUEUEREQUEST
DESCRIPTOR.message_types_by_name['EnqueueResponse'] = _ENQUEUERESPONSE
DESCRIPTOR.message_types_by_name['WaitQueueDoneRequest'] = _WAITQUEUEDONEREQUEST
DESCRIPTOR.message_types_by_name['WaitQueueDoneResponse'] = _WAITQUEUEDONERESPONSE
DESCRIPTOR.message_types_by_name['RunComponentFunctionRequest'] = _RUNCOMPONENTFUNCTIONREQUEST
DESCRIPTOR.message_types_by_name['RunComponentFunctionResponse'] = _RUNCOMPONENTFUNCTIONRESPONSE
DESCRIPTOR.message_types_by_name['KeepAliveRequest'] = _KEEPALIVEREQUEST
DESCRIPTOR.message_types_by_name['KeepAliveResponse'] = _KEEPALIVERESPONSE
DESCRIPTOR.message_types_by_name['CloseContextRequest'] = _CLOSECONTEXTREQUEST
DESCRIPTOR.message_types_by_name['CloseContextResponse'] = _CLOSECONTEXTRESPONSE
DESCRIPTOR.message_types_by_name['RegisterFunctionOp'] = _REGISTERFUNCTIONOP
DESCRIPTOR.message_types_by_name['CleanupFunctionOp'] = _CLEANUPFUNCTIONOP
DESCRIPTOR.message_types_by_name['SyncRemoteExecutorForStream'] = _SYNCREMOTEEXECUTORFORSTREAM
DESCRIPTOR.message_types_by_name['SendTensorOp'] = _SENDTENSOROP
DESCRIPTOR.message_types_by_name['SendPackedHandleOp'] = _SENDPACKEDHANDLEOP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Operation = _reflection.GeneratedProtocolMessageType('Operation', (_message.Message,), {
'Input' : _reflection.GeneratedProtocolMessageType('Input', (_message.Message,), {
'DESCRIPTOR' : _OPERATION_INPUT,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.Operation.Input)
})
,
'AttrsEntry' : _reflection.GeneratedProtocolMessageType('AttrsEntry', (_message.Message,), {
'DESCRIPTOR' : _OPERATION_ATTRSENTRY,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.Operation.AttrsEntry)
})
,
'DESCRIPTOR' : _OPERATION,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.Operation)
})
_sym_db.RegisterMessage(Operation)
_sym_db.RegisterMessage(Operation.Input)
_sym_db.RegisterMessage(Operation.AttrsEntry)
QueueItem = _reflection.GeneratedProtocolMessageType('QueueItem', (_message.Message,), {
'DESCRIPTOR' : _QUEUEITEM,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.QueueItem)
})
_sym_db.RegisterMessage(QueueItem)
QueueResponse = _reflection.GeneratedProtocolMessageType('QueueResponse', (_message.Message,), {
'DESCRIPTOR' : _QUEUERESPONSE,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.QueueResponse)
})
_sym_db.RegisterMessage(QueueResponse)
CreateContextRequest = _reflection.GeneratedProtocolMessageType('CreateContextRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATECONTEXTREQUEST,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.CreateContextRequest)
})
_sym_db.RegisterMessage(CreateContextRequest)
CreateContextResponse = _reflection.GeneratedProtocolMessageType('CreateContextResponse', (_message.Message,), {
'DESCRIPTOR' : _CREATECONTEXTRESPONSE,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.CreateContextResponse)
})
_sym_db.RegisterMessage(CreateContextResponse)
UpdateContextRequest = _reflection.GeneratedProtocolMessageType('UpdateContextRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATECONTEXTREQUEST,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.UpdateContextRequest)
})
_sym_db.RegisterMessage(UpdateContextRequest)
UpdateContextResponse = _reflection.GeneratedProtocolMessageType('UpdateContextResponse', (_message.Message,), {
'DESCRIPTOR' : _UPDATECONTEXTRESPONSE,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.UpdateContextResponse)
})
_sym_db.RegisterMessage(UpdateContextResponse)
EnqueueRequest = _reflection.GeneratedProtocolMessageType('EnqueueRequest', (_message.Message,), {
'DESCRIPTOR' : _ENQUEUEREQUEST,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.EnqueueRequest)
})
_sym_db.RegisterMessage(EnqueueRequest)
EnqueueResponse = _reflection.GeneratedProtocolMessageType('EnqueueResponse', (_message.Message,), {
'DESCRIPTOR' : _ENQUEUERESPONSE,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.EnqueueResponse)
})
_sym_db.RegisterMessage(EnqueueResponse)
WaitQueueDoneRequest = _reflection.GeneratedProtocolMessageType('WaitQueueDoneRequest', (_message.Message,), {
'DESCRIPTOR' : _WAITQUEUEDONEREQUEST,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.WaitQueueDoneRequest)
})
_sym_db.RegisterMessage(WaitQueueDoneRequest)
WaitQueueDoneResponse = _reflection.GeneratedProtocolMessageType('WaitQueueDoneResponse', (_message.Message,), {
'DESCRIPTOR' : _WAITQUEUEDONERESPONSE,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.WaitQueueDoneResponse)
})
_sym_db.RegisterMessage(WaitQueueDoneResponse)
RunComponentFunctionRequest = _reflection.GeneratedProtocolMessageType('RunComponentFunctionRequest', (_message.Message,), {
'DESCRIPTOR' : _RUNCOMPONENTFUNCTIONREQUEST,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.RunComponentFunctionRequest)
})
_sym_db.RegisterMessage(RunComponentFunctionRequest)
RunComponentFunctionResponse = _reflection.GeneratedProtocolMessageType('RunComponentFunctionResponse', (_message.Message,), {
'DESCRIPTOR' : _RUNCOMPONENTFUNCTIONRESPONSE,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.RunComponentFunctionResponse)
})
_sym_db.RegisterMessage(RunComponentFunctionResponse)
KeepAliveRequest = _reflection.GeneratedProtocolMessageType('KeepAliveRequest', (_message.Message,), {
'DESCRIPTOR' : _KEEPALIVEREQUEST,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.KeepAliveRequest)
})
_sym_db.RegisterMessage(KeepAliveRequest)
KeepAliveResponse = _reflection.GeneratedProtocolMessageType('KeepAliveResponse', (_message.Message,), {
'DESCRIPTOR' : _KEEPALIVERESPONSE,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.KeepAliveResponse)
})
_sym_db.RegisterMessage(KeepAliveResponse)
CloseContextRequest = _reflection.GeneratedProtocolMessageType('CloseContextRequest', (_message.Message,), {
'DESCRIPTOR' : _CLOSECONTEXTREQUEST,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.CloseContextRequest)
})
_sym_db.RegisterMessage(CloseContextRequest)
CloseContextResponse = _reflection.GeneratedProtocolMessageType('CloseContextResponse', (_message.Message,), {
'DESCRIPTOR' : _CLOSECONTEXTRESPONSE,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.CloseContextResponse)
})
_sym_db.RegisterMessage(CloseContextResponse)
RegisterFunctionOp = _reflection.GeneratedProtocolMessageType('RegisterFunctionOp', (_message.Message,), {
'DESCRIPTOR' : _REGISTERFUNCTIONOP,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.RegisterFunctionOp)
})
_sym_db.RegisterMessage(RegisterFunctionOp)
CleanupFunctionOp = _reflection.GeneratedProtocolMessageType('CleanupFunctionOp', (_message.Message,), {
'DESCRIPTOR' : _CLEANUPFUNCTIONOP,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.CleanupFunctionOp)
})
_sym_db.RegisterMessage(CleanupFunctionOp)
SyncRemoteExecutorForStream = _reflection.GeneratedProtocolMessageType('SyncRemoteExecutorForStream', (_message.Message,), {
'DESCRIPTOR' : _SYNCREMOTEEXECUTORFORSTREAM,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.SyncRemoteExecutorForStream)
})
_sym_db.RegisterMessage(SyncRemoteExecutorForStream)
SendTensorOp = _reflection.GeneratedProtocolMessageType('SendTensorOp', (_message.Message,), {
'DESCRIPTOR' : _SENDTENSOROP,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.SendTensorOp)
})
_sym_db.RegisterMessage(SendTensorOp)
SendPackedHandleOp = _reflection.GeneratedProtocolMessageType('SendPackedHandleOp', (_message.Message,), {
'LocalTensorHandle' : _reflection.GeneratedProtocolMessageType('LocalTensorHandle', (_message.Message,), {
'DESCRIPTOR' : _SENDPACKEDHANDLEOP_LOCALTENSORHANDLE,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.SendPackedHandleOp.LocalTensorHandle)
})
,
'Handle' : _reflection.GeneratedProtocolMessageType('Handle', (_message.Message,), {
'DESCRIPTOR' : _SENDPACKEDHANDLEOP_HANDLE,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.SendPackedHandleOp.Handle)
})
,
'DESCRIPTOR' : _SENDPACKEDHANDLEOP,
'__module__' : 'tensorflow.core.protobuf.eager_service_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.eager.SendPackedHandleOp)
})
_sym_db.RegisterMessage(SendPackedHandleOp)
_sym_db.RegisterMessage(SendPackedHandleOp.LocalTensorHandle)
_sym_db.RegisterMessage(SendPackedHandleOp.Handle)
DESCRIPTOR._options = None
_OPERATION_ATTRSENTRY._options = None
_EAGERSERVICE = _descriptor.ServiceDescriptor(
name='EagerService',
full_name='tensorflow.eager.EagerService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=3452,
serialized_end=4233,
methods=[
_descriptor.MethodDescriptor(
name='CreateContext',
full_name='tensorflow.eager.EagerService.CreateContext',
index=0,
containing_service=None,
input_type=_CREATECONTEXTREQUEST,
output_type=_CREATECONTEXTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='UpdateContext',
full_name='tensorflow.eager.EagerService.UpdateContext',
index=1,
containing_service=None,
input_type=_UPDATECONTEXTREQUEST,
output_type=_UPDATECONTEXTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Enqueue',
full_name='tensorflow.eager.EagerService.Enqueue',
index=2,
containing_service=None,
input_type=_ENQUEUEREQUEST,
output_type=_ENQUEUERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='StreamingEnqueue',
full_name='tensorflow.eager.EagerService.StreamingEnqueue',
index=3,
containing_service=None,
input_type=_ENQUEUEREQUEST,
output_type=_ENQUEUERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='WaitQueueDone',
full_name='tensorflow.eager.EagerService.WaitQueueDone',
index=4,
containing_service=None,
input_type=_WAITQUEUEDONEREQUEST,
output_type=_WAITQUEUEDONERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='RunComponentFunction',
full_name='tensorflow.eager.EagerService.RunComponentFunction',
index=5,
containing_service=None,
input_type=_RUNCOMPONENTFUNCTIONREQUEST,
output_type=_RUNCOMPONENTFUNCTIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='KeepAlive',
full_name='tensorflow.eager.EagerService.KeepAlive',
index=6,
containing_service=None,
input_type=_KEEPALIVEREQUEST,
output_type=_KEEPALIVERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CloseContext',
full_name='tensorflow.eager.EagerService.CloseContext',
index=7,
containing_service=None,
input_type=_CLOSECONTEXTREQUEST,
output_type=_CLOSECONTEXTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_EAGERSERVICE)
DESCRIPTOR.services_by_name['EagerService'] = _EAGERSERVICE
# @@protoc_insertion_point(module_scope)
| 45.268116 | 6,328 | 0.783867 |
2e90cc315d8d974df2f079feb28f844aa8bc88de | 2,387 | py | Python | src/m3_practice_fixing_errors.py | smitham-3/02-ObjectsFunctionsAndMethods-201930 | 7862b5fa8921b45b8341c8d7c9de3a513c56d4a2 | [
"MIT"
] | null | null | null | src/m3_practice_fixing_errors.py | smitham-3/02-ObjectsFunctionsAndMethods-201930 | 7862b5fa8921b45b8341c8d7c9de3a513c56d4a2 | [
"MIT"
] | null | null | null | src/m3_practice_fixing_errors.py | smitham-3/02-ObjectsFunctionsAndMethods-201930 | 7862b5fa8921b45b8341c8d7c9de3a513c56d4a2 | [
"MIT"
] | null | null | null | """
This module lets you practice correcting SYNTAX (notation) errors.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Aaron Wilkin, their colleagues, and Alex Smith.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
# DONE: 2.
# Locate the syntax (notation) errors in this file
# by looking for red underlines.
#
# For each error, try to make sense of its message.
# -- Hover and/or expand as needed -- make sure you see the message!
#
# Then fix the errors, one by one. IMPORTANT:
# -- Fixing one error may bring up additional errors
# (after a few seconds or when you run or save the module).
# -- Each time, fix the error that is nearest the TOP of the module.
# -- Often the SOURCE of the error may be on the line
# just BEFORE the line with a red underline.
# -- New errors may appear during the RUN of the module.
#
# Finish by RUNNING the corrected program
# and making sure that it RUNS CORRECTLY.
# That is, make sure that (per the doc-strings) the program
# prints two calculated values and makes a SimpleTurtle do some things.
#
# When finished, COMMIT-and-PUSH your work, as always.
#
###############################################################################
import rosegraphics as rg
import math
def main():
""" Calls the other functions in this module to demo them. """
print_math()
turtle_fun()
def print_math():
""" Prints some calculated values. """
x = math.cos(math.pi)
print(x)
y = math.sin(math.pi)
print('The sine of PI is', y)
def turtle_fun():
"""
Constructs a TurtleWindow,
constructs a classic SimpleTurtle and asks it to do some things,
and waits for the user to click anywhere in the window to close it.
"""
window = rg.TurtleWindow()
alan = rg.SimpleTurtle()
alan.pen = rg.Pen('blue', 30)
alan.paint_bucket = rg.PaintBucket('yellow')
alan.backward(3 * (47 + 16))
alan.begin_fill()
alan.draw_circle(25)
alan.end_fill()
alan.forward(200)
window.close_on_mouse_click()
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 32.256757 | 79 | 0.578132 |
82856000107db08a56b7a1945af129be29b72b48 | 4,432 | py | Python | kb_utils.py | yangshao/vcr | 88513d6958d93bd7845d532d5b83744a678fc980 | [
"MIT"
] | null | null | null | kb_utils.py | yangshao/vcr | 88513d6958d93bd7845d532d5b83744a678fc980 | [
"MIT"
] | null | null | null | kb_utils.py | yangshao/vcr | 88513d6958d93bd7845d532d5b83744a678fc980 | [
"MIT"
] | null | null | null | import json
import os
from collections import defaultdict
img_folder = '/mnt/home/yangshao/vcr/vcr1/vcr1images'
data_folder = '/mnt/home/yangshao/vcr/action_data/'
train_file = 'aug_train.jsonl'
val_file = 'aug_val.jsonl'
test_file = 'aug_test.jsonl'
srl_train_file = 'aug_train_srl.jsonl'
srl_val_file = 'aug_val_srl.jsonl'
srl_test_file = 'aug_test_srl_jsonl'
train_file_action_analysis = 'aug_train_action_vos.jsonl'
val_file_action_analysis = 'aug_val_action_vos.jsonl'
test_file_action_analysis = 'aug_test_action_vos.jsonl'
OBJS, QUESTION, ANSWER, ANS_LABEL, RATIONALE, AUG_RATIONALE, \
RATIONALE_LABEL, PRED, META, IMG_FN, BOX, WIDTH, HEIGHT, NAME, AUG_RATIONALE_LABEL = ['objects', 'question',
'answer_choices', 'answer_label', 'aug_rationales',
'rationale_choices', 'rationale_label',
'crf_pred_label', 'metadata_fn',
'img_fn', 'boxes', 'width', 'height', 'names',
'aug_answer_rationale_indexes']
GENDER_NEUTRAL_NAMES = ['Casey', 'Riley', 'Jessie', 'Jackie', 'Avery', 'Jaime', 'Peyton', 'Kerry', 'Jody', 'Kendall',
'Peyton', 'Skyler', 'Frankie', 'Pat', 'Quinn']
def load_jsonl(file_path):
res = []
with open(file_path, 'r') as infile:
for line in infile:
res.append(json.loads(line))
return res
def replace_obj_name(answer, objects):
res = []
for e in answer:
if(type(e)==list):
temp_l = []
for sub_e in e:
temp_l.append(objects[sub_e])
res.append(' and '.join(temp_l))
else:
res.append(e)
return res
def get_data(data_folder, train_file):
train_path = os.path.join(data_folder, train_file)
train_data = load_jsonl(train_path)
return train_data
def collect_persons(sample):
obj_to_type = sample[OBJS]
all_sents = []
all_sents.append(sample[QUESTION])
for sent in sample[ANSWER]:
all_sents.append(sent)
for sent in sample[RATIONALE]:
all_sents.append(sent)
cur_person_idx = 0
person_dic = defaultdict()
for sent in all_sents:
for tok in sent:
if isinstance(tok, list):
temp_l = []
for int_name in tok:
obj_type = obj_to_type[int_name]
if(obj_type=='person'):
if(int_name not in person_dic):
person_dic[int_name] = GENDER_NEUTRAL_NAMES[cur_person_idx]
cur_person_idx = (cur_person_idx+1)%len(GENDER_NEUTRAL_NAMES)
return person_dic
def normalize_sentence(sent, sample, person_idx_dic):
obj_to_type = sample[OBJS]
# person_idx_dic = defaultdict()
new_sent = []
sent_wo_and = []
for tok in sent:
if isinstance(tok, list):
temp_l = []
for int_name in tok:
obj_type = obj_to_type[int_name]
if(obj_type!='person'):
temp_l.append(obj_type)
sent_wo_and.append(obj_type)
else:
temp_l.append(person_idx_dic[int_name])
sent_wo_and.append(person_idx_dic[int_name])
new_sent.extend(' and '.join(temp_l).split())
else:
tok = tok.strip().split()
new_sent.extend(tok)
sent_wo_and.extend(tok)
new2old_idx = [0 for i in range(len(new_sent))]
i = 0
j = 0
# print(new_sent, sent_wo_and)
while(j<len(new_sent)):
if(new_sent[j]==sent_wo_and[i]):
new2old_idx[j] = i
i += 1
j += 1
elif(new_sent[j] == 'and'):
new2old_idx[j] = -1
j += 1
return ' '.join(new_sent), new2old_idx
def write_to_file(samples, path):
with open(path, 'w') as outfile:
for sample in samples:
outfile.write(json.dumps(sample)+'\n')
def write_lists(lists, path):
with open(path, 'w') as outfile:
for l in lists:
outfile.write(' '.join([str(e) for e in l])+'\n') | 38.206897 | 118 | 0.544901 |
2cfe77eb54c96c4281e2a24618988ced5d0c23f4 | 4,832 | py | Python | trafpy/benchmarker/versions/benchmark_v001/config.py | cwfparsonson/trafpy | 23b27abb2352990522b21dc1b14f0310abf84a17 | [
"Apache-2.0"
] | 4 | 2020-08-28T18:24:11.000Z | 2020-11-13T07:26:18.000Z | trafpy/benchmarker/versions/benchmark_v001/config.py | cwfparsonson/trafpy | 23b27abb2352990522b21dc1b14f0310abf84a17 | [
"Apache-2.0"
] | 4 | 2020-09-14T11:31:09.000Z | 2020-09-21T16:00:20.000Z | trafpy/benchmarker/versions/benchmark_v001/config.py | cwfparsonson/trafpy | 23b27abb2352990522b21dc1b14f0310abf84a17 | [
"Apache-2.0"
] | null | null | null | import trafpy
from trafpy.generator.src.dists import node_dists
from trafpy.generator.src.dists import val_dists
from trafpy.generator.src.dists import plot_dists
import os
import glob
import copy
import importlib
import json
def get_default_benchmark_names():
'''
Gets list of default benchmarks in TrafPy.
'''
trafpy_path = os.path.dirname(trafpy.__file__)
path_to_benchmarks = trafpy_path + '/benchmarker/versions/benchmark_v001/benchmarks/'
paths = glob.glob(path_to_benchmarks + '*.py')
return sorted([os.path.basename(path).split('.')[0] for path in paths])
def plot_benchmark_dists(benchmark_names, fontsize=20, time_units='\u03BCs', info_units='B'):
'''Plots dist info of all benchmark(s).
e.g. benchmark_names = ['uniform', 'university']
'''
for benchmark in benchmark_names:
if benchmark not in get_default_benchmark_names():
raise Exception('Benchmark \'{}\' not recognised. Must be one of: {}'.format(benchmark, get_default_benchmark_names()))
# load dists
dist_names = ['node_dist', 'interarrival_time_dist', 'flow_size_dist', 'num_ops_dist']
dists = {benchmark: {dist_name: None for dist_name in dist_names} for benchmark in benchmark_names}
plotted_rand_vars = copy.deepcopy(dists)
plots = copy.deepcopy(dists)
for benchmark in benchmark_names:
print('\n~* {} *~'.format(benchmark))
# import benchmark class and instantiate benchmark object
benchmark_module = importlib.import_module('trafpy.benchmarker.versions.benchmark_v001.benchmarks.{}'.format(benchmark))
b = benchmark_module.DefaultBenchmark(benchmark_name=benchmark, benchmark_version='v001', load_prev_dists=True)
for dist_name in dist_names:
# load dist
dists[benchmark][dist_name], path = b.load_dist(benchmark, dist_name)
if type(dists[benchmark][dist_name]) is str:
dists[benchmark][dist_name] = json.loads(dists[benchmark][dist_name])
# check loaded dist successfully
if dists[benchmark][dist_name] is None:
print('Dist {} for benchmark {} not found in {}. Ensure dist is named as one of {}, and that dist has been saved in correct location.'.format(dist_name, benchmark, path, get_default_benchmark_names()))
num_demands = max(len(dists[benchmark]['node_dist']) * 1000, 200000) # estimate appropriate number of rand vars to gen
if dist_name in ['flow_size_dist', 'interarrival_time_dist']:
# remove str keys
dists[benchmark][dist_name] = {float(key): val for key, val in dists[benchmark][dist_name].items()}
# generate random variables from dist to plot
rand_vars = val_dists.gen_rand_vars_from_discretised_dist(unique_vars=list(dists[benchmark][dist_name].keys()),
probabilities=list(dists[benchmark][dist_name].values()),
num_demands=num_demands)
plotted_rand_vars[benchmark][dist_name] = rand_vars
if all(prob == list(dists[benchmark][dist_name].values())[0] for prob in dists[benchmark][dist_name].values()):
# uniform dist, do not plot logscale
logscale = False
else:
logscale = True
if dist_name == 'flow_size_dist':
fig = plot_dists.plot_val_dist(rand_vars, show_fig=True, figsize=(6.2, 4), use_scientific_notation_yaxis=True, plot_horizontally=False, logscale=logscale, num_bins=20, rand_var_name='Flow Size ({})'.format(info_units), font_size=fontsize)
plots[benchmark][dist_name] = fig
elif dist_name == 'interarrival_time_dist':
fig = plot_dists.plot_val_dist(rand_vars, show_fig=True, figsize=(6.2, 4), use_scientific_notation_yaxis=True, plot_horizontally=False, logscale=logscale, num_bins=20, rand_var_name='Interarrival Time ({})'.format(time_units), font_size=fontsize)
plots[benchmark][dist_name] = fig
elif dist_name == 'node_dist':
fig = plot_dists.plot_node_dist(dists[benchmark][dist_name],
chord_edge_width_range=[1,25],
chord_edge_display_threshold=0.35,
font_size=fontsize,
show_fig=True) # 0.475
plots[benchmark][dist_name] = fig
else:
print('Unrecognised dist_name {}'.format(dist_name))
return plots, dists, plotted_rand_vars
| 49.814433 | 266 | 0.624586 |
7291fc13e690d2e062a50270227e6995038fc9db | 2,334 | py | Python | scripts/helper_scripts/rename_history_items/rename_hist_items.py | bgruening/tools-artbio | 2a17d184b8591d872acada56b742569590fe1a4b | [
"MIT"
] | 12 | 2015-09-13T13:29:58.000Z | 2021-09-24T09:01:40.000Z | scripts/helper_scripts/rename_history_items/rename_hist_items.py | bgruening/tools-artbio | 2a17d184b8591d872acada56b742569590fe1a4b | [
"MIT"
] | 210 | 2015-08-31T14:04:58.000Z | 2022-03-23T08:52:03.000Z | scripts/helper_scripts/rename_history_items/rename_hist_items.py | bgruening/tools-artbio | 2a17d184b8591d872acada56b742569590fe1a4b | [
"MIT"
] | 16 | 2015-08-31T13:15:11.000Z | 2021-09-24T09:03:22.000Z | #!/usr/bin/env python2.7
from bioblend.galaxy import GalaxyInstance
import requests
import datetime
import argparse
requests.packages.urllib3.disable_warnings()
def parse_args():
args = argparse.ArgumentParser(description="Rename history items using a tabular file." +"\n" +
"Example usage: python rename_hist_items.py -url misssissippi.snv.jussieu.fr \
-key $your_api_key -hid $your_history_id -table $your_tabular_file \n \
See test-data/sample_table.tab for an example file.")
args.add_argument("-url", "--galaxy_url", required=True, help="url of galaxy instance")
args.add_argument("-key", "--api_key", required=True, help="api key for galaxy instance" )
args.add_argument("-hid", "--history_id", required=True, help="History id of hitory containing files to be renamed")
args.add_argument("-table", "--rename_table", required=True, type=file,
help="tab-seperated file with first column current filename,\
and second column the desired name")
return args.parse_args()
def return_datetime(string_representation):
"""
returns current time, to find last modified history.
Currently ununsed, may be used in the future.
"""
date, time = string_representation.split('T')
return datetime.datetime.strptime(date + ' ' + time, "%Y-%m-%d %H:%M:%S.%f")
def get_rename_list(rename_table):
return [(line.split('\t')[0],line.split('\t')[1].strip()) for line in rename_table]
def get_instance(url, api_key):
return GalaxyInstance(url, api_key)
def get_name_id_d(gi, hid):
return {dataset[u'name']:dataset[u'id'] for dataset in gi.histories.show_history(hid, contents=True)}
def update_names(gi, hid, rename_list, name_id_d ):
for old_name, new_name in rename_list:
dataset_id = name_id_d[old_name]
gi.histories.update_dataset(history_id=hid, dataset_id=dataset_id, name=new_name)
def main():
args = parse_args()
hid = args.history_id
rename_list = get_rename_list(args.rename_table)
gi = get_instance(args.galaxy_url, args.api_key)
name_id_d = get_name_id_d(gi, hid)
rval = update_names(gi, hid, rename_list, name_id_d)
if __name__ == "__main__":
main()
| 37.645161 | 120 | 0.67138 |
298a3cd40487e6ffe1b5c0527c1e764d515d92f1 | 147 | py | Python | elastipy/aggregation/__init__.py | defgsus/elastipy | c1144ab39fa70571ba0e02ccf41d380a8a1bd730 | [
"Apache-2.0"
] | 1 | 2021-02-17T17:50:28.000Z | 2021-02-17T17:50:28.000Z | elastipy/aggregation/__init__.py | defgsus/elastipy | c1144ab39fa70571ba0e02ccf41d380a8a1bd730 | [
"Apache-2.0"
] | 2 | 2021-03-29T02:09:41.000Z | 2022-03-01T20:09:48.000Z | elastipy/aggregation/__init__.py | netzkolchose/elastipy | c1144ab39fa70571ba0e02ccf41d380a8a1bd730 | [
"Apache-2.0"
] | null | null | null | from .aggregation import Aggregation, AggregationInterface, factory
# just make them parsed, no need to expose
from . import special as _special
| 24.5 | 67 | 0.802721 |
10d20cf6ce77c6eb123e82ee741d2309f3d89efe | 8,078 | py | Python | src/aprl/policies/wrappers.py | fkamrani/adversarial-policies | 53e129c2083f6557ddc18dbb39e4e633a2d7ab9b | [
"MIT"
] | 211 | 2019-02-22T08:07:25.000Z | 2022-03-14T10:44:20.000Z | src/aprl/policies/wrappers.py | fkamrani/adversarial-policies | 53e129c2083f6557ddc18dbb39e4e633a2d7ab9b | [
"MIT"
] | 51 | 2019-02-08T01:39:49.000Z | 2022-02-15T21:21:46.000Z | src/aprl/policies/wrappers.py | fkamrani/adversarial-policies | 53e129c2083f6557ddc18dbb39e4e633a2d7ab9b | [
"MIT"
] | 41 | 2019-04-23T05:01:49.000Z | 2022-03-16T06:51:19.000Z | from typing import List, Optional, Sequence, Tuple, TypeVar
import numpy as np
from stable_baselines.common.base_class import BaseRLModel
from aprl.policies import base
class NoisyAgentWrapper(base.ModelWrapper):
def __init__(self, model: BaseRLModel, noise_annealer, noise_type: str = "gaussian"):
"""
Wrap an agent and add noise to its actions
:param model: the agent to wrap
:param noise_annealer: Annealer.get_value - presumably the noise should be decreased
over time in order to get the adversarial policy to perform well on a normal victim.
:param noise_type: the type of noise parametrized by noise_annealer's value.
Current options are [gaussian]
"""
super().__init__(model=model)
self.noise_annealer = noise_annealer
self.noise_generator = self._get_noise_generator(noise_type)
@staticmethod
def _get_noise_generator(noise_type):
noise_generators = {"gaussian": lambda x, size: np.random.normal(scale=x, size=size)}
return noise_generators[noise_type]
def log_callback(self, logger):
current_noise_param = self.noise_annealer()
logger.logkv("shaping/victim_noise", current_noise_param)
def predict(self, observation, state=None, mask=None, deterministic=False):
original_actions, states = self.model.predict(observation, state, mask, deterministic)
action_shape = original_actions.shape
noise_param = self.noise_annealer()
noise = self.noise_generator(noise_param, action_shape)
noisy_actions = original_actions * (1 + noise)
return noisy_actions, states
T = TypeVar("T")
def _array_mask_assign(arr: List[T], mask: Sequence[bool], vals: Optional[List[T]]) -> List[T]:
"""Assign val to indices of `arr` that are True in `mask`.
:param arr: a Python list.
:param mask: a list of boolean values of the same length as `arr`.
:param vals: value to assign.
:return A copy of `arr` with masked values updated to `val`.
"""
if vals is None:
vals = [None] * sum(mask)
arr = list(arr)
inds = np.arange(len(arr))[mask]
for i, v in zip(inds, vals):
arr[i] = v
return arr
def _standardize_state(
state_arr: Sequence[np.ndarray], mask: Sequence[bool], filler_shape: Optional[Tuple[int, ...]]
) -> Optional[np.ndarray]:
"""Replaces values in state_arr[env_mask] with a filler value.
The returned value should have entries of a consistent type, suitable to pass to a policy.
The input `state_arr` may contain entries produced by different policies, which may include
`None` values and NumPy arrays of various shapes.
:param state_arr: The state from the previous timestep.
:param mask: Mask of indices to replace with filler values. These should be environments
the policy does not control -- so it does not matter what output it produces.
:param filler_shape: The shape of the value to fill in.
:return `None` if `filler_shape` is None, otherwise `state_arr` with appropriate entries
masked by the filler value.
"""
if filler_shape is None:
# If the policy is stateless, it should take a `None` state entry
return None
# The policy is stateful, and expects entries of shape inferred_state_shape.
num_env = len(state_arr)
standardized_arr = np.zeros(shape=(num_env,) + filler_shape)
if np.any(mask):
# Copy over values from state_arr in mask. The others are OK to leave as zero:
# we'll ignore actions predicted in those indices anyway.
to_copy = np.array(state_arr)[mask] # extract subset
to_copy = np.stack(to_copy) # ensure it is a 2D array
standardized_arr[mask] = to_copy
return standardized_arr
class MultiPolicyWrapper(base.ModelWrapper):
"""Combines multiple policies into a single policy.
Each policy executes for the entirety of an episode, and then a new policy is randomly
selected from the list of policies.
WARNING: Only suitable for inference, not for training!"""
def __init__(self, policies: Sequence[BaseRLModel], num_envs: int):
"""Creates MultiPolicyWrapper.
:param policies: The underlying policies to execute.
:param num_envs: The number of environments to execute in parallel.
"""
super().__init__(policies[0])
self.policies = policies
self.action_space = self.policies[0].action_space
self.obs_space = self.policies[0].observation_space
for p in self.policies:
err_txt = "All policies must have the same {} space"
assert p.action_space == self.action_space, err_txt.format("action")
assert p.observation_space == self.obs_space, err_txt.format("obs")
# Strictly we do not need `num_envs`, but it is convenient to have it so we can
# construct an appropriate sized `self.current_env_policies` at initialization.
self.num_envs = num_envs
self.current_env_policies = np.random.choice(self.policies, size=self.num_envs)
self.inferred_state_shapes = [None] * len(policies)
def predict(self, observation, state=None, mask=None, deterministic=False):
self._reset_current_policies(mask)
policy_actions = np.zeros(
(self.num_envs,) + self.action_space.shape, dtype=self.action_space.dtype
)
new_state_array = [None] * self.num_envs
for i, policy in enumerate(self.policies):
env_mask = np.array([el == policy for el in self.current_env_policies])
if not np.any(env_mask):
# If this policy isn't active for any environments, don't run predict on it
continue
if state is None:
# If it's the first training step, and the global state is None, just pass that
# through to policies without standardizing, because stateful policies can accept
# a single None as input, but not an array with None values
standardized_state = None
else:
# Otherwise, fill in values for places where env_mask is False, i.e. that belong
# to other policies. Also fill in values if the environment has just been reset
# (mask is True), as the state may have originated from a different policy.
#
# Note initially we do not know what shape stateful policies expect, so we default
# to `None`, which is always OK at the first time step. Inferred state shapes will
# be set for stateful policies as soon as they return a state vector.
retain = env_mask & ~np.array(mask)
standardized_state = _standardize_state(
state, mask=retain, filler_shape=self.inferred_state_shapes[i]
)
predicted_actions, new_states = policy.predict(
observation, state=standardized_state, mask=mask, deterministic=deterministic
)
if new_states is not None and self.inferred_state_shapes[i] is None:
# If this is a policy that returns state, and its current inferred state
# is None, update the inferred state value to this shape
self.inferred_state_shapes[i] = new_states.shape[1:]
assert (
new_states is None and self.inferred_state_shapes[i] is None
) or new_states.shape[1:] == self.inferred_state_shapes[i]
policy_actions[env_mask] = predicted_actions[env_mask]
new_state_array = _array_mask_assign(new_state_array, env_mask, new_states)
return policy_actions, new_state_array
def _reset_current_policies(self, mask):
num_done = sum(mask)
self.current_env_policies[mask] = np.random.choice(self.policies, size=num_done)
def close(self):
for policy in self.policies:
if policy.sess is not None:
policy.sess.close()
| 44.384615 | 98 | 0.666997 |
bb6589d1e6a898e1cb730cdd53b677b60db42db2 | 6,405 | py | Python | pycoin/tx/script/vm.py | Udala/docforever | 03eb20e4909c6204ff71503b30181a3ace1918c1 | [
"MIT"
] | 6 | 2016-04-20T20:55:04.000Z | 2021-03-12T21:21:02.000Z | pycoin/tx/script/vm.py | Udala/docforever | 03eb20e4909c6204ff71503b30181a3ace1918c1 | [
"MIT"
] | 1 | 2016-07-08T00:43:06.000Z | 2016-07-24T15:45:39.000Z | pycoin/tx/script/vm.py | Udala/docforever | 03eb20e4909c6204ff71503b30181a3ace1918c1 | [
"MIT"
] | 3 | 2016-04-04T17:51:20.000Z | 2017-02-21T09:03:40.000Z | # -*- coding: utf-8 -*-
"""
Parse, stream, create, sign and verify Bitcoin transactions as Tx structures.
The MIT License (MIT)
Copyright (c) 2013 by Richard Kiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
from ...intbytes import byte_to_int, int_to_bytes
from . import opcodes
from . import ScriptError
from .check_signature import op_checksig, op_checkmultisig
from .microcode import MICROCODE_LOOKUP, VCH_TRUE, VCH_FALSE
from .tools import get_opcode, bin_script
logger = logging.getLogger(__name__)
VERIFY_OPS = frozenset((opcodes.OPCODE_TO_INT[s] for s in (
"OP_NUMEQUALVERIFY OP_EQUALVERIFY OP_CHECKSIGVERIFY OP_VERIFY OP_CHECKMULTISIGVERIFY".split())))
INVALID_OPCODE_VALUES = frozenset((opcodes.OPCODE_TO_INT[s] for s in (
"OP_CAT OP_SUBSTR OP_LEFT OP_RIGHT OP_INVERT OP_AND OP_OR OP_XOR OP_2MUL OP_2DIV OP_MUL "
"OP_DIV OP_MOD OP_LSHIFT OP_RSHIFT".split())))
def eval_script(script, signature_for_hash_type_f, expected_hash_type=None, stack=[],
disallow_long_scripts=True):
altstack = []
if disallow_long_scripts and len(script) > 10000:
return False
pc = 0
begin_code_hash = pc
if_condition = None # or True or False
# TODO: set op_count
# op_count = 0
try:
while pc < len(script):
opcode, data, pc = get_opcode(script, pc)
if len(data) > 0:
stack.append(data)
continue
# deal with if_condition first
if if_condition is not None:
# TODO: fix IF (which doesn't properly nest)
if opcode == opcodes.OP_ELSE:
if_condition = not if_condition
continue
if opcode == opcodes.OP_ENDIF:
if_condition = None
continue
if not if_condition:
continue
if opcode in (opcodes.OP_IF, opcodes.OP_NOTIF):
if_condition = (stack.pop() == VCH_TRUE)
continue
if opcode == opcodes.OP_CODESEPARATOR:
begin_code_hash = pc - 1
continue
if opcode in INVALID_OPCODE_VALUES:
raise ScriptError("invalid opcode %s at %d" % (opcodes.INT_TO_OPCODE[opcode], pc-1))
if opcode in MICROCODE_LOOKUP:
MICROCODE_LOOKUP[opcode](stack)
if opcode in VERIFY_OPS:
v = stack.pop()
if v != VCH_TRUE:
raise ScriptError("VERIFY failed at %d" % (pc-1))
continue
if opcode == opcodes.OP_TOALTSTACK:
altstack.append(stack.pop())
continue
if opcode == opcodes.OP_FROMALTSTACK:
stack.append(altstack.pop())
continue
if opcode >= opcodes.OP_1NEGATE and opcode <= opcodes.OP_16:
stack.append(int_to_bytes(opcode + 1 - opcodes.OP_1))
continue
if opcode in (opcodes.OP_ELSE, opcodes.OP_ENDIF):
raise ScriptError("%s without OP_IF" % opcodes.INT_TO_OPCODE[opcode])
if opcode in (opcodes.OP_CHECKSIG, opcodes.OP_CHECKSIGVERIFY):
# Subset of script starting at the most recent codeseparator
op_checksig(stack, signature_for_hash_type_f, expected_hash_type, script[begin_code_hash:])
if opcode == opcodes.OP_CHECKSIGVERIFY:
if stack.pop() != VCH_TRUE:
raise ScriptError("VERIFY failed at %d" % (pc-1))
continue
if opcode == opcodes.OP_CHECKMULTISIG:
# Subset of script starting at the most recent codeseparator
op_checkmultisig(
stack, signature_for_hash_type_f, expected_hash_type, script[begin_code_hash:])
continue
# BRAIN DAMAGE -- does it always get down here for each verify op? I think not
if opcode in VERIFY_OPS:
v = stack.pop()
if v != VCH_TRUE:
raise ScriptError("VERIFY failed at %d" % pc-1)
logger.error("can't execute opcode %s", opcode)
except Exception:
logger.exception("script failed for unknown reason")
return False
return len(stack) != 0
def verify_script(script_signature, script_public_key, signature_for_hash_type_f, expected_hash_type=None):
stack = []
is_p2h = (len(script_public_key) == 23 and byte_to_int(script_public_key[0]) == opcodes.OP_HASH160
and byte_to_int(script_public_key[-1]) == opcodes.OP_EQUAL)
if not eval_script(script_signature, signature_for_hash_type_f, expected_hash_type, stack):
logger.debug("script_signature did not evaluate")
return False
if is_p2h:
signatures, alt_script_public_key = stack[:-1], stack[-1]
alt_script_signature = bin_script(signatures)
if not eval_script(script_public_key, signature_for_hash_type_f, expected_hash_type, stack):
logger.debug("script_public_key did not evaluate")
return False
if is_p2h and stack[-1] == VCH_TRUE:
return verify_script(alt_script_signature, alt_script_public_key,
signature_for_hash_type_f, expected_hash_type=expected_hash_type)
return stack[-1] != VCH_FALSE
| 38.353293 | 107 | 0.645433 |
efb46995035b1b9f5b14c94f234499f528b30ae7 | 401 | py | Python | setup.py | acwikla-novela/test_release | 86cb65a412fa529e68a22b4fa12779eb791cf353 | [
"Apache-2.0"
] | null | null | null | setup.py | acwikla-novela/test_release | 86cb65a412fa529e68a22b4fa12779eb791cf353 | [
"Apache-2.0"
] | null | null | null | setup.py | acwikla-novela/test_release | 86cb65a412fa529e68a22b4fa12779eb791cf353 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
version_file = open('VERSION')
version = version_file.read().strip()
setup(
name='test_release',
version=version,
author='Aleksander Cwikla',
url="https://github.com/acwikla-novela/test_release",
packages=find_packages(),
description='Testing auto-release',
platforms='Posix; MacOS X; Windows',
python_requires='==3.7.4',
)
| 26.733333 | 57 | 0.705736 |
00afa4a7f73d2ea32f8399e151eeb6022357d952 | 1,287 | py | Python | pages/product_page.py | pkuptcov/stepik-selenium-final | a150ab1c79adeed3346b48b296f210b826140ad1 | [
"Apache-2.0"
] | null | null | null | pages/product_page.py | pkuptcov/stepik-selenium-final | a150ab1c79adeed3346b48b296f210b826140ad1 | [
"Apache-2.0"
] | 1 | 2021-06-02T00:44:19.000Z | 2021-06-02T00:44:19.000Z | pages/product_page.py | pkuptcov/stepik-selenium-final | a150ab1c79adeed3346b48b296f210b826140ad1 | [
"Apache-2.0"
] | null | null | null | from .base_page import BasePage
from .locators import ProductPageLocator
class ProductPage(BasePage):
def add_to_cart(self):
add_to_cart_button = self.browser.find_element(*ProductPageLocator.ADD_TO_BASKET_BUTTON)
add_to_cart_button.click()
def should_be_right_title(self):
product_title = self.browser.find_element(*ProductPageLocator.PRODUCT_TITLE).text
success_message = self.browser.find_element(*ProductPageLocator.SUCCESS_MESSAGE).text
assert f'{product_title} был добавлен в вашу корзину.' == success_message, "Product title doesn't match product name added"
def should_be_right_price(self):
product_price = self.browser.find_element(*ProductPageLocator.PRODUCT_PRICE).text
basket_total = self.browser.find_element(*ProductPageLocator.BASKET_TOTAL).text
assert product_price in basket_total, "Product price isn't equal to basket price"
def should_not_be_success_message(self):
assert self.is_not_element_present(*ProductPageLocator.SUCCESS_MESSAGE), \
"Success message is presented, but should not be"
def should_not_be_success_message_wait(self):
assert self.is_disappeared(*ProductPageLocator.SUCCESS_MESSAGE), "Success message is presented, but should not be" | 51.48 | 131 | 0.769231 |
f55c922babfffef1526f4dc8ebbc157de7085f2c | 1,703 | py | Python | sdk/servicebus/azure-mgmt-servicebus/azure/mgmt/servicebus/models/tracked_resource_py3.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/servicebus/azure-mgmt-servicebus/azure/mgmt/servicebus/models/tracked_resource_py3.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/servicebus/azure-mgmt-servicebus/azure/mgmt/servicebus/models/tracked_resource_py3.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class TrackedResource(Resource):
"""The Resource definition.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. The Geo-location where the resource lives
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, *, location: str, tags=None, **kwargs) -> None:
super(TrackedResource, self).__init__(**kwargs)
self.location = location
self.tags = tags
| 31.537037 | 76 | 0.56195 |
23cbfd731e4ece5306a6e0475419ff14bf517de4 | 970 | py | Python | newnnfw/tools/tflitefile_tool/tflite/ArgMaxOptions.py | kosslab-kr/Tizen-NN-Framework | 132fc98ed57e4b19ad1f4cb258ad79fa9df1db7a | [
"Apache-2.0"
] | 8 | 2018-09-10T01:32:26.000Z | 2020-05-13T06:05:40.000Z | newnnfw/tools/tflitefile_tool/tflite/ArgMaxOptions.py | kosslab-kr/Tizen-NN-Framework | 132fc98ed57e4b19ad1f4cb258ad79fa9df1db7a | [
"Apache-2.0"
] | 28 | 2018-09-10T05:01:09.000Z | 2021-03-04T10:07:12.000Z | newnnfw/tools/tflitefile_tool/tflite/ArgMaxOptions.py | kosslab-kr/Tizen-NN-Framework | 132fc98ed57e4b19ad1f4cb258ad79fa9df1db7a | [
"Apache-2.0"
] | 4 | 2018-09-13T04:16:08.000Z | 2018-12-03T07:34:44.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
class ArgMaxOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsArgMaxOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ArgMaxOptions()
x.Init(buf, n + offset)
return x
# ArgMaxOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ArgMaxOptions
def OutputType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
def ArgMaxOptionsStart(builder):
builder.StartObject(1)
def ArgMaxOptionsAddOutputType(builder, outputType):
builder.PrependInt8Slot(0, outputType, 0)
def ArgMaxOptionsEnd(builder):
return builder.EndObject()
| 24.25 | 87 | 0.685567 |
f9f81e47dabd551b186b5d1c3dd96bfeccb9d921 | 8,857 | py | Python | flexget/tests/test_misc.py | sillygreen89/Flexget | 60f24ab0dda7b94c87ba43451921c50c3cef391f | [
"MIT"
] | null | null | null | flexget/tests/test_misc.py | sillygreen89/Flexget | 60f24ab0dda7b94c87ba43451921c50c3cef391f | [
"MIT"
] | null | null | null | flexget/tests/test_misc.py | sillygreen89/Flexget | 60f24ab0dda7b94c87ba43451921c50c3cef391f | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import os
import stat
import pytest
from flexget.entry import EntryUnicodeError, Entry
class TestDisableBuiltins(object):
"""
Quick a hack, test disable functionality by checking if seen filtering (builtin) is working
"""
config = """
tasks:
test:
mock:
- {title: 'dupe1', url: 'http://localhost/dupe', 'imdb_score': 5}
- {title: 'dupe2', url: 'http://localhost/dupe', 'imdb_score': 5}
accept_all: yes
disable: builtins
test2:
mock:
- {title: 'dupe1', url: 'http://localhost/dupe', 'imdb_score': 5, description: 'http://www.imdb.com/title/tt0409459/'}
- {title: 'dupe2', url: 'http://localhost/dupe', 'imdb_score': 5}
accept_all: yes
disable:
- seen
- cli_config
"""
def test_disable_builtins(self, execute_task):
# Execute the task once, then we'll make sure seen plugin isn't rejecting on future executions
execute_task('test')
task = execute_task('test')
assert task.find_entry('accepted', title='dupe1') and task.find_entry('accepted', title='dupe2'), \
'disable is not working?'
task = execute_task('test2')
assert task.find_entry(title='dupe1').accepted and task.find_entry('accepted', title='dupe2'), \
'disable is not working?'
@pytest.mark.online
class TestInputHtml(object):
config = """
tasks:
test:
html: http://download.flexget.com/
"""
def test_parsing(self, execute_task):
task = execute_task('test')
assert task.entries, 'did not produce entries'
class TestPriority(object):
config = """
tasks:
test:
mock:
- {title: 'Smoke hdtv'}
accept_all: yes
set:
quality: 720p
quality: 720p
plugin_priority:
set: 3
quality: 2
accept_all: 1
test2:
mock:
- {title: 'Smoke hdtv'}
accept_all: yes
set:
quality: 720p
quality: 720p
plugin_priority:
set: 3
quality: 2
accept_all: 1
"""
def test_smoke(self, execute_task):
task = execute_task('test')
assert task.accepted, 'set plugin should have changed quality before quality plugin was run'
task = execute_task('test2')
assert task.rejected, 'quality plugin should have rejected Smoke as hdtv'
class TestImmortal(object):
config = """
tasks:
test:
mock:
- {title: 'title1', immortal: yes}
- {title: 'title2'}
regexp:
reject:
- .*
"""
def test_immortal(self, execute_task):
task = execute_task('test')
assert task.find_entry(title='title1'), 'rejected immortal entry'
assert not task.find_entry(title='title2'), 'did not reject mortal'
@pytest.mark.online
class TestDownload(object):
config = """
tasks:
test:
mock:
- title: README
url: https://github.com/Flexget/Flexget/raw/master/README.rst
filename: flexget_test_data
accept_all: true
download:
path: __tmp__
fail_html: no
"""
def test_download(self, execute_task, tmpdir):
# NOTE: what the hell is .obj and where it comes from?
# Re: seems to come from python mimetype detection in download plugin ...
# Re Re: Implemented in such way that extension does not matter?
# A little convoluted, but you have to set the umask in order to have
# the current value returned to you
curr_umask = os.umask(0)
os.umask(curr_umask)
# executes task and downloads the file
task = execute_task('test')
assert task.entries[0]['location'], 'location missing?'
testfile = task.entries[0]['location']
assert os.path.exists(testfile), 'download file does not exists'
testfile_stat = os.stat(testfile)
modes_equal = 0o666 - curr_umask == stat.S_IMODE(testfile_stat.st_mode)
assert modes_equal, 'download file mode not honoring umask'
class TestEntryUnicodeError(object):
def test_encoding(self):
e = Entry('title', 'url')
with pytest.raises(EntryUnicodeError):
e['invalid'] = b'\x8e'
class TestFilterRequireField(object):
config = """
tasks:
test:
mock:
- {title: 'Taken[2008]DvDrip[Eng]-FOO', imdb_url: 'http://www.imdb.com/title/tt0936501/'}
- {title: 'ASDFASDFASDF'}
require_field: imdb_url
test2:
mock:
- {title: 'Entry.S01E05.720p', series_name: 'Entry'}
- {title: 'Entry2.is.a.Movie'}
require_field: series_name
"""
def test_field_required(self, execute_task):
task = execute_task('test')
assert not task.find_entry('rejected', title='Taken[2008]DvDrip[Eng]-FOO'), \
'Taken should NOT have been rejected'
assert task.find_entry('rejected', title='ASDFASDFASDF'), \
'ASDFASDFASDF should have been rejected'
task = execute_task('test2')
assert not task.find_entry('rejected', title='Entry.S01E05.720p'), \
'Entry should NOT have been rejected'
assert task.find_entry('rejected', title='Entry2.is.a.Movie'), \
'Entry2 should have been rejected'
class TestHtmlUtils(object):
def test_decode_html(self):
"""utils decode_html"""
from flexget.utils.tools import decode_html
assert decode_html('<3') == u'<3'
assert decode_html('─') == u'\u2500'
@pytest.mark.skip(reason='FAILS - DISABLED')
def test_encode_html(self):
"""utils encode_html (FAILS - DISABLED)"""
# why this does not encode < ?
from flexget.utils.tools import encode_html
print(encode_html('<3'))
assert encode_html('<3') == '<3'
class TestSetPlugin(object):
config = """
templates:
global:
accept_all: yes
tasks:
test:
mock:
- {title: 'Entry 1'}
set:
thefield: TheValue
otherfield: 3.0
test_jinja:
mock:
- {title: 'Entry 1', series_name: 'Value'}
- {title: 'Entry 2'}
set:
field: 'The {{ series_name|upper }}'
otherfield: '{% if series_name is not defined %}no series{% endif %}'
alu: '{{ series_name|re_search(".l.") }}'
test_non_string:
mock:
- title: Entry 1
set:
bool: False
int: 42
test_lazy:
mock:
- title: Entry 1
set:
a: "the {{title}}"
test_lazy_err:
mock:
- title: Entry 1
set:
title: "{{ao"
other: "{{eaeou}"
"""
def test_set(self, execute_task):
task = execute_task('test')
entry = task.find_entry('entries', title='Entry 1')
assert entry['thefield'] == 'TheValue'
assert entry['otherfield'] == 3.0
def test_jinja(self, execute_task):
task = execute_task('test_jinja')
entry = task.find_entry('entries', title='Entry 1')
assert entry['field'] == 'The VALUE'
assert entry['otherfield'] == ''
assert entry['alu'] == 'alu'
entry = task.find_entry('entries', title='Entry 2')
assert entry['field'] is None, \
'`field` should be None when jinja rendering fails'
assert entry['otherfield'] == 'no series'
def test_non_string(self, execute_task):
task = execute_task('test_non_string')
entry = task.find_entry('entries', title='Entry 1')
assert entry['bool'] is False
assert entry['int'] == 42
def test_lazy(self, execute_task):
task = execute_task('test_lazy')
entry = task.find_entry('entries', title='Entry 1')
assert entry.is_lazy('a')
assert entry['a'] == 'the Entry 1'
def test_lazy_err(self, execute_task):
task = execute_task('test_lazy_err')
entry = task.find_entry('entries', title='Entry 1')
assert entry['title'] == 'Entry 1', 'should fall back to original value when template fails'
assert entry['other'] is None
| 32.682657 | 132 | 0.557638 |
ebff2262ca92c36667248cf73d1b5df1fb4dcb5f | 3,887 | py | Python | src/viztracer/flamegraph.py | dummyindex/viztracer | 73d7b9a1cdcd8f91ad6ef9dd522b6be94de22b4d | [
"Apache-2.0"
] | 1,798 | 2020-08-13T05:10:54.000Z | 2022-03-31T16:43:17.000Z | src/viztracer/flamegraph.py | dummyindex/viztracer | 73d7b9a1cdcd8f91ad6ef9dd522b6be94de22b4d | [
"Apache-2.0"
] | 185 | 2020-08-15T15:37:42.000Z | 2022-03-31T19:31:23.000Z | src/viztracer/flamegraph.py | dummyindex/viztracer | 73d7b9a1cdcd8f91ad6ef9dd522b6be94de22b4d | [
"Apache-2.0"
] | 100 | 2020-08-20T06:11:20.000Z | 2022-03-29T03:29:00.000Z | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt
import queue
from typing import Any, Dict, List, Optional, Tuple
from .functree import FuncTree, FuncTreeNode
class _FlameNode:
def __init__(self, parent: Optional["_FlameNode"], name: str):
self.name: str = name
self.value: float = 0
self.count: int = 0
self.parent: Optional["_FlameNode"] = parent
self.children: Dict[str, "_FlameNode"] = {}
def get_child(self, child: FuncTreeNode) -> None:
if child.fullname not in self.children:
self.children[child.fullname] = _FlameNode(self, child.fullname)
self.children[child.fullname].value += child.end - child.start
self.children[child.fullname].count += 1
for grandchild in child.children:
self.children[child.fullname].get_child(grandchild)
class _FlameTree:
def __init__(self, func_tree: FuncTree):
self.root: _FlameNode = _FlameNode(None, "__root__")
self.parse(func_tree)
def parse(self, func_tree: FuncTree):
self.root = _FlameNode(None, "__root__")
for child in func_tree.root.children:
self.root.get_child(child)
class FlameGraph:
def __init__(self, trace_data: Optional[Dict[str, Any]] = None):
self.trees: Dict[str, _FlameTree] = {}
if trace_data:
self.parse(trace_data)
def parse(self, trace_data: Dict[str, Any]) -> None:
func_trees: Dict[str, FuncTree] = {}
for data in trace_data["traceEvents"]:
key = "p{}_t{}".format(data["pid"], data["tid"])
if key in func_trees:
tree = func_trees[key]
else:
tree = FuncTree(data["pid"], data["tid"])
func_trees[key] = tree
if data["ph"] == "X":
tree.add_event(data)
for key, tree in func_trees.items():
self.trees[key] = _FlameTree(tree)
def dump_to_perfetto(self) -> List[Dict[str, Any]]:
"""
reformat data to what perfetto likes
private _functionProfileDetails?: FunctionProfileDetails[]
export interface FunctionProfileDetails {
name?: string;
flamegraph?: CallsiteInfo[];
expandedCallsite?: CallsiteInfo;
expandedId?: number;
}
export interface CallsiteInfo {
id: number;
parentId: number;
depth: number;
name?: string;
totalSize: number;
selfSize: number;
mapping: string;
merged: boolean;
highlighted: boolean;
}
"""
ret = []
for name, tree in self.trees.items():
q: queue.Queue[Tuple[_FlameNode, int, int]] = queue.Queue()
for child in tree.root.children.values():
q.put((child, -1, 0))
if q.empty():
continue
flamegraph = []
idx = 0
while not q.empty():
node, parent, depth = q.get()
flamegraph.append({
"id": idx,
"parentId": parent,
"depth": depth,
"name": node.name,
"totalSize": node.value,
"selfSize": node.value - sum((n.value for n in node.children.values())),
"mapping": f"{node.count}",
"merged": False,
"highlighted": False
})
for n in node.children.values():
q.put((n, idx, depth + 1))
idx += 1
detail = {
"name": name,
"flamegraph": flamegraph
}
ret.append(detail)
return ret
| 33.508621 | 92 | 0.538204 |
813e8f0a5bf45e76190b2d5bdd282810d922e899 | 902 | py | Python | tests/programs/import_variants/some_package/__init__.py | augustand/Nuitka | b7b9dd50b60505a309f430ce17cad36fb7d75048 | [
"Apache-2.0"
] | null | null | null | tests/programs/import_variants/some_package/__init__.py | augustand/Nuitka | b7b9dd50b60505a309f430ce17cad36fb7d75048 | [
"Apache-2.0"
] | null | null | null | tests/programs/import_variants/some_package/__init__.py | augustand/Nuitka | b7b9dd50b60505a309f430ce17cad36fb7d75048 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
print( "*** some_package: Coming from '%s'" % __file__.replace(".pyc", ".py") )
print( "*** some_package: Path is '%s'" % __path__ )
| 45.1 | 79 | 0.699557 |
8c0192ed4db6ea4b01822326284716395822ee8d | 3,502 | py | Python | handler/FileHandler.py | ByungjunKim/teanaps | 84eb0a1dd2c2b568105182a9bce2b0a588a97720 | [
"Apache-2.0"
] | 93 | 2019-06-01T18:14:17.000Z | 2022-03-19T01:50:32.000Z | handler/FileHandler.py | ByungjunKim/teanaps | 84eb0a1dd2c2b568105182a9bce2b0a588a97720 | [
"Apache-2.0"
] | 5 | 2020-10-30T15:34:55.000Z | 2021-09-25T06:02:52.000Z | handler/FileHandler.py | ByungjunKim/teanaps | 84eb0a1dd2c2b568105182a9bce2b0a588a97720 | [
"Apache-2.0"
] | 12 | 2019-12-19T08:09:38.000Z | 2022-03-15T07:30:51.000Z | from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import resolve1
import docx2txt
from pptx import Presentation
import io
import pickle
class FileHandler():
def __init__(self):
None
def save_data(self, file_name, data):
with open(file_name, "wb") as f:
pickle.dump(data, f, -1)
def load_data(self, file_name):
with open(file_name, "rb") as f:
data = pickle.load(f)
return data
def save_txt(self, file_name, line_list, encoding="utf-8", separator="\t"):
f = open(file_name, "w", encoding=encoding)
for line in line_list:
if type(line) == "str":
f.write(line.replace("\n", " "))
else:
new_line = ""
for col in line:
new_line += str(col).replace("\n", " ") + separator
f.write(new_line.strip())
f.write("\n")
f.close()
def load_txt(self, file_name, encoding="utf-8", separator="\t"):
line_list = []
f = open(file_name, encoding=encoding)
for line in f:
line = line.replace("\n", "").split(separator)
line_list.append(line)
return line_list
def pdf_converter(self, input_filename, output_filename):
input_file = open(input_filename, "rb")
pdf_parser = PDFParser(input_file)
pdf_document = PDFDocument(pdf_parser)
page_count = range(resolve1(pdf_document.catalog["Pages"])["Count"])
string_io = io.StringIO()
if not page_count:
page_number_set = set()
else:
page_number_set = set(page_count)
resource_manager = PDFResourceManager()
converter = TextConverter(resource_manager, string_io, laparams=LAParams())
page_interpreter = PDFPageInterpreter(resource_manager, converter)
for page in PDFPage.get_pages(input_file, page_number_set, caching=True, check_extractable=True):
page_interpreter.process_page(page)
output_file = open(output_filename, "w")
output_file.write(string_io.getvalue().replace("\n\n", "\n"))
input_file.close()
converter.close()
output_file.close()
def docx_converter(self, input_filename, output_filename):
input_file = open(input_filename, "rb")
output_file = open(output_filename, "w", encoding="utf-8")
output_file.write(docx2txt.process(input_file).replace("\n\n", "\n"))
input_file.close()
output_file.close()
def pptx_converter(self, input_filename, output_filename):
input_file = open(input_filename, "rb")
output_file = open(output_filename, "w", encoding="utf-8")
pptx = Presentation(input_file)
contents = ""
for slide in pptx.slides:
for shape in slide.shapes:
if not shape.has_text_frame:
continue
for paragraph in shape.text_frame.paragraphs:
for run in paragraph.runs:
contents += run.text
contents += "\n"
output_file.write(contents.replace("\n\n", "\n"))
input_file.close()
output_file.close() | 38.483516 | 105 | 0.611936 |
3013a5c8ea7149c89cdf4448110d5369f8d4c033 | 8,970 | py | Python | tests/test_server.py | iluxonchik/mercedes-benz-io-challenge-2018-sinfo | 92dfb8f43aed6c230688005412a3b3cef72f44cd | [
"MIT"
] | 2 | 2018-09-12T11:52:39.000Z | 2019-06-19T05:44:23.000Z | tests/test_server.py | iluxonchik/mercedes-benz-io-challenge-2018-sinfo | 92dfb8f43aed6c230688005412a3b3cef72f44cd | [
"MIT"
] | null | null | null | tests/test_server.py | iluxonchik/mercedes-benz-io-challenge-2018-sinfo | 92dfb8f43aed6c230688005412a3b3cef72f44cd | [
"MIT"
] | null | null | null | """Tests for the HTTP server."""
import unittest
import threading
import json
import urllib.request
import urllib.parse
from urllib.request import Request, urlopen
from urllib.error import HTTPError
import datetime
import uuid
from http.server import HTTPServer
from unittest.mock import patch
from mbio.server.endpoint import Endpoint
from mbio.server.server import Server
MOCKED_UUIDS = ['136fbb51-8a06-42fd-b839-d01ab87e2c6c', '136fbb51-8a06-42fd-b839-c01ab87e2c6b',
'132fbb51-8a06-42fd-b839-c01ab87e2c6c']
class TestHTTPServer(HTTPServer):
def shutdown(self):
super(TestHTTPServer, self).shutdown()
self.server_close()
class MockedDateTime(datetime.datetime):
MOCKED_DATE_VALUE = datetime.datetime(2018, 10, 3, 19, 22, 19, 92)
@classmethod
def today(cls):
return cls.MOCKED_DATE_VALUE
class RESTServerTestCase(unittest.TestCase):
SERVER_PORT = 1234
@classmethod
def setUpClass(cls):
datetime.datetime = MockedDateTime
def setUp(self):
Server.DATASET_PATH = './tests/resources/dataset_full.json'
server_address = ('', RESTServerTestCase.SERVER_PORT)
self.httpd = TestHTTPServer(server_address, Server)
self.thr = threading.Thread(target=self.httpd.serve_forever)
self.thr.start()
def tearDown(self):
self.httpd.shutdown()
self.thr.join()
def test_server_crashes_on_invalid_dataset(self):
with self.assertRaises(Exception):
Server.td = None
Server.DATASET_PATH = 'Still Dre Day'
Server()
def test_server_error_get_on_post(self):
expected = {
"error": "This endpoint only supports the POST method."
}
res = self._get_request('{}'.format(Endpoint.BOOKINGS_CREATE))
obtained = json.loads(res)
self.assertEqual(expected, obtained)
def test_server_error_post_on_get(self):
expected = {
"error": "This endpoint only supports the GET method."
}
res = self._post_request('{}'.format(Endpoint.VEHICLES), {'a':'b'})
obtained = json.loads(res)
self.assertEqual(expected, obtained)
def test_unknown_endpoint_get_error(self):
expected = {
"error": "Unknown endpoint."
}
res = self._get_request('{}'.format('/the-documentary/album/'))
obtained = json.loads(res)
self.assertEqual(expected, obtained)
def test_unknown_endpoint_post_error(self):
expected = {
"error": "Unknown endpoint."
}
res = self._post_request('{}'.format('/the-the-chronic/album/'), {})
obtained = json.loads(res)
self.assertEqual(expected, obtained)
def test_unknown_endpoint_put_error(self):
expected = {
"error": "Unknown endpoint."
}
res = self._put_request('{}'.format('/the-the-chronic/album/'), {})
obtained = json.loads(res)
self.assertEqual(expected, obtained)
def test_get_vehicles(self):
EXPECTED_JSON_FILE_PATH = './tests/resources/expected_all_vehicles.json'
with open(EXPECTED_JSON_FILE_PATH, 'r') as f:
expected = json.load(f)
res = self._get_request('{}'.format(Endpoint.VEHICLES))
obtained = json.loads(res)
self.assertEqual(expected, obtained)
def test_get_specific_vehicle(self):
EXPECTED_JSON_FILE_PATH = './tests/resources/expected_specific_vehicle.json'
with open(EXPECTED_JSON_FILE_PATH, 'r') as f:
expected = json.load(f)
res = self._get_request('{}?model=E&fuel=electric&transmission=auto&dealer=846679bd-5831-4286-969b-056e9c89d74c'.format(Endpoint.VEHICLES))
obtained = json.loads(res)
self.assertEqual(expected, obtained)
@patch.object(uuid, 'uuid4', side_effect=MOCKED_UUIDS)
def test_create_booking(self, uuid):
data = {
"first_name": "Jayceon",
"last_name": "Taylor",
"vehicle_id": "136fbb51-8a06-42fd-b839-c01ab87e2c6c",
"pickup_date": "2019-04-08T10:30:00"
}
res = self._post_request('{}'.format(Endpoint.BOOKINGS_CREATE), data)
obtained = json.loads(res)
expected = {
"id": MOCKED_UUIDS[0],
"firstName": "Jayceon",
"lastName": "Taylor",
"vehicleId": "136fbb51-8a06-42fd-b839-c01ab87e2c6c",
"pickupDate": "2019-04-08T10:30:00",
"createdAt": MockedDateTime.MOCKED_DATE_VALUE.isoformat(),
}
self.assertEqual(expected, obtained)
def test_create_booking_invalid_date(self):
expected = {
"error": "2019-04-08a10:30:00 is not a valid ISO date format"
}
data = {
"first_name": "Jayceon",
"last_name": "Taylor",
"vehicle_id": "136fbb51-8a06-42fd-b839-c01ab87e2c6c",
"pickup_date": "2019-04-08a10:30:00"
}
res = self._post_request('{}'.format(Endpoint.BOOKINGS_CREATE), data)
obtained = json.loads(res)
self.assertEqual(expected, obtained)
def test_cancel_booking(self):
data = {
"booking_id": "b00d3e76-9605-49c7-910b-03b51679f6d6",
"reason": "On a scale from one to ten I'm a nine, with two M's"
}
res = self._put_request('{}'.format(Endpoint.BOOKINGS_CANCEL), data)
obtained = json.loads(res)
expected = {
"id": "b00d3e76-9605-49c7-910b-03b51679f6d6",
"firstName": "Joanna",
"lastName": "Walker",
"vehicleId": "875f00fa-9f67-44ea-bb26-75ff375fdd3f",
"pickupDate": "2018-03-03T10:30:00",
"createdAt": "2018-02-26T08:42:46.298",
"cancelledAt": MockedDateTime.MOCKED_DATE_VALUE.isoformat(),
"cancelledReason": "On a scale from one to ten I'm a nine, with two M's"
}
self.assertEqual(expected, obtained)
def test_dealers_in_polygon(self):
EXPECTED_JSON_FILE_PATH = './tests/resources/dealer_mb_lisboa.json'
with open(EXPECTED_JSON_FILE_PATH, 'r') as f:
expected = json.load(f)
data = {
"coordinates": [
[42.203891, -9.525033],
[36.800254, -9.349252],
[37.203849, -5.899545],
[42.268963, -6.031381]
],
"model": "E",
"fuel": "gasoline",
"transmission":"manual"
}
res = self._post_request('{}'.format(Endpoint.DEALERS_IN_POLYGON), data)
obtained = json.loads(res)
self.assertEqual(expected, obtained)
def test_get_closest_dealer(self):
url = '{}?latitude=38.187787&longitude=-8.104157&model=amg&fuel=gasoline&transmission=manual'.format(Endpoint.DEALER_CLOSEST)
EXPECTED_JSON_FILE_PATH = './tests/resources/dealer_mb_albufeira.json'
with open(EXPECTED_JSON_FILE_PATH, 'r') as f:
expected = json.load(f)
res = self._get_request(url)
obtained = json.loads(res)
self.assertEqual(expected, obtained)
def test_get_closest_dealer_list(self):
url = '{}?latitude=38.187787&longitude=-8.104157&model=amg&fuel=gasoline'.format(Endpoint.DEALERS_CLOSEST_LIST)
EXPECTED_JSON_FILE_PATH = './tests/resources/closest_dealer_list.json'
with open(EXPECTED_JSON_FILE_PATH, 'r') as f:
expected = json.load(f)
res = self._get_request(url)
obtained = json.loads(res)
self.assertEqual(expected, obtained)
def _get_request(self, endpoint):
url = 'http://localhost:{}{}'.format(RESTServerTestCase.SERVER_PORT, endpoint)
req = urllib.request.Request(url)
try:
with urllib.request.urlopen(req) as response:
json_res = response.read().decode()
except HTTPError as e:
json_res = e.read().decode()
return json_res
def _post_request(self, endpoint, data):
url = 'http://localhost:{}{}'.format(RESTServerTestCase.SERVER_PORT, endpoint)
try:
request = Request(url, json.dumps(data).encode())
json_res = urlopen(request).read().decode()
except HTTPError as e:
json_res = e.read().decode()
return json_res
def _put_request(self, endpoint, data):
url = 'http://localhost:{}{}'.format(RESTServerTestCase.SERVER_PORT, endpoint)
try:
request = Request(url, json.dumps(data).encode(), method='PUT')
json_res = urlopen(request).read().decode()
except HTTPError as e:
json_res = e.read().decode()
return json_res
| 33.345725 | 147 | 0.601226 |
eade41eaa22875b2da4c32ccb7004643694d1510 | 388 | py | Python | python/athena/gpu_links/TanhLink.py | DMALab/TSplit | 8f86f987163aa06521bfeeb174616eb4a0a81b47 | [
"Apache-2.0"
] | 2 | 2021-05-29T11:18:14.000Z | 2021-09-09T14:29:21.000Z | python/athena/gpu_links/TanhLink.py | DMALab/TSplit | 8f86f987163aa06521bfeeb174616eb4a0a81b47 | [
"Apache-2.0"
] | null | null | null | python/athena/gpu_links/TanhLink.py | DMALab/TSplit | 8f86f987163aa06521bfeeb174616eb4a0a81b47 | [
"Apache-2.0"
] | 1 | 2021-05-01T16:34:37.000Z | 2021-05-01T16:34:37.000Z | from __future__ import absolute_import
import ctypes
from .._base import _LIB
from .. import ndarray as _nd
def tanh(in_arr, out_arr, stream = None, profiler = None):
assert isinstance(in_arr, _nd.NDArray)
assert isinstance(out_arr, _nd.NDArray)
_LIB.DLGpuTanh(in_arr.handle, out_arr.handle, stream.handle if stream else None, ctypes.byref(profiler) if profiler else None) | 35.272727 | 131 | 0.768041 |
e9f87dd230c65ea553130718b384bd20c7810ccf | 2,131 | py | Python | Analog_Inputs_for_Raspberry_Pi_Using_the_MCP3008/Analog_Inputs_for_Raspberry_Pi_Using_the_MCP3008.py | joewalk102/Adafruit_Learning_System_Guides | 2bda607f8c433c661a2d9d40b4db4fd132334c9a | [
"MIT"
] | 665 | 2017-09-27T21:20:14.000Z | 2022-03-31T09:09:25.000Z | Analog_Inputs_for_Raspberry_Pi_Using_the_MCP3008/Analog_Inputs_for_Raspberry_Pi_Using_the_MCP3008.py | joewalk102/Adafruit_Learning_System_Guides | 2bda607f8c433c661a2d9d40b4db4fd132334c9a | [
"MIT"
] | 641 | 2017-10-03T19:46:37.000Z | 2022-03-30T18:28:46.000Z | Analog_Inputs_for_Raspberry_Pi_Using_the_MCP3008/Analog_Inputs_for_Raspberry_Pi_Using_the_MCP3008.py | joewalk102/Adafruit_Learning_System_Guides | 2bda607f8c433c661a2d9d40b4db4fd132334c9a | [
"MIT"
] | 734 | 2017-10-02T22:47:38.000Z | 2022-03-30T14:03:51.000Z | import os
import time
import busio
import digitalio
import board
import adafruit_mcp3xxx.mcp3008 as MCP
from adafruit_mcp3xxx.analog_in import AnalogIn
# create the spi bus
spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)
# create the cs (chip select)
cs = digitalio.DigitalInOut(board.D22)
# create the mcp object
mcp = MCP.MCP3008(spi, cs)
# create an analog input channel on pin 0
chan0 = AnalogIn(mcp, MCP.P0)
print('Raw ADC Value: ', chan0.value)
print('ADC Voltage: ' + str(chan0.voltage) + 'V')
last_read = 0 # this keeps track of the last potentiometer value
tolerance = 250 # to keep from being jittery we'll only change
# volume when the pot has moved a significant amount
# on a 16-bit ADC
def remap_range(value, left_min, left_max, right_min, right_max):
# this remaps a value from original (left) range to new (right) range
# Figure out how 'wide' each range is
left_span = left_max - left_min
right_span = right_max - right_min
# Convert the left range into a 0-1 range (int)
valueScaled = int(value - left_min) / int(left_span)
# Convert the 0-1 range into a value in the right range.
return int(right_min + (valueScaled * right_span))
while True:
# we'll assume that the pot didn't move
trim_pot_changed = False
# read the analog pin
trim_pot = chan0.value
# how much has it changed since the last read?
pot_adjust = abs(trim_pot - last_read)
if pot_adjust > tolerance:
trim_pot_changed = True
if trim_pot_changed:
# convert 16bit adc0 (0-65535) trim pot read into 0-100 volume level
set_volume = remap_range(trim_pot, 0, 65535, 0, 100)
# set OS volume playback volume
print('Volume = {volume}%' .format(volume = set_volume))
set_vol_cmd = 'sudo amixer cset numid=1 -- {volume}% > /dev/null' \
.format(volume = set_volume)
os.system(set_vol_cmd)
# save the potentiometer reading for the next loop
last_read = trim_pot
# hang out and do nothing for a half second
time.sleep(0.5)
| 30.884058 | 76 | 0.678085 |
99c210fbbcca1a6b05754a63611ead07f3ce31f6 | 588 | py | Python | test/unit/module/helpers/test_create_rules.py | tomislacker/cfn-python-lint | f209ddfef9bcc1a005adfebcfcc16220b18deddb | [
"MIT-0"
] | 1,134 | 2019-03-02T14:58:34.000Z | 2021-05-15T00:57:16.000Z | test/unit/module/helpers/test_create_rules.py | tomislacker/cfn-python-lint | f209ddfef9bcc1a005adfebcfcc16220b18deddb | [
"MIT-0"
] | 1,122 | 2019-03-03T04:27:15.000Z | 2021-05-14T20:51:16.000Z | test/unit/module/helpers/test_create_rules.py | tomislacker/cfn-python-lint | f209ddfef9bcc1a005adfebcfcc16220b18deddb | [
"MIT-0"
] | 297 | 2019-03-11T09:56:57.000Z | 2021-05-14T16:41:19.000Z | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import os
from test.testlib.testcase import BaseTestCase
from cfnlint.helpers import create_rules
from cfnlint.rules import CloudFormationLintRule
class TestCreateRules(BaseTestCase):
"""Test creating rules from a module."""
def testBase(self):
from cfnlint.rules.templates import Base
rules = create_rules(Base)
self.assertTrue(all(isinstance(r, CloudFormationLintRule) for r in rules))
self.assertTrue('E1001' in (r.id for r in rules))
| 29.4 | 82 | 0.738095 |
0b3586803380dc9efc203451fa11315c5c7b81da | 901 | py | Python | Tutorials/Statistics/Binomial_Distribution1.py | vinayvinu500/Hackerrank | e185ae9d3c7dc5cd661761142e436f5df6a3f0f1 | [
"MIT"
] | null | null | null | Tutorials/Statistics/Binomial_Distribution1.py | vinayvinu500/Hackerrank | e185ae9d3c7dc5cd661761142e436f5df6a3f0f1 | [
"MIT"
] | null | null | null | Tutorials/Statistics/Binomial_Distribution1.py | vinayvinu500/Hackerrank | e185ae9d3c7dc5cd661761142e436f5df6a3f0f1 | [
"MIT"
] | null | null | null | # Given that
b:float = 1.09
g:int = 1
n = 6 # exactly 6 childrens
p = b/100 # having of probability of success having a boy
q = 1- p # having of probability of success having not a boy
# print(p,q)
def fact(x,memo={}):
# factorial of x! = x * (x-1) * (x-2) * ..... 2 * 1
if x in memo:
return memo[x]
if x <= 1:
return 1
memo[x] = x*fact(x-1,memo)
return memo[x]
def bin_coe(n,x):
# binomial coefficient = n! / x!(n-x)!
return fact(n) / (fact(x) * fact(n-x))
def pmf(n,x,p):
# probability mass function for the binomial distribution
return bin_coe(n,x) * (p**x) * ((1-p)**(n-x))
# P(X < 3) = P(x=1) + p(x=2)
px2 = pmf(n,1,p) + pmf(n,2,p)
# P(X >= 3) = 1 - P(X<3)
px3 = 1 - px2
print(round(px3,3))
# P(X >= 3) = P(X=3) + P(X=4) + P(X=5) + P(X=6)
px3456 = pmf(n,3,p) + pmf(n,4,p) + pmf(n,5,p) + pmf(n,6,p)
print(px3456) # wrong answer | 22.525 | 61 | 0.532741 |
1e742928e8c3a41282a202a71d8bff4fc9925f40 | 480 | py | Python | Practice(Beginner)/FLOW002.py | 8Bit1Byte/Codechef-Solutions | a79d64042da04e007c5101d3c784a843df01f852 | [
"MIT"
] | 2 | 2021-05-24T11:20:46.000Z | 2021-06-18T12:21:43.000Z | Practice(Beginner)/FLOW002.py | 8Bit1Byte/CodechefSolutions | a79d64042da04e007c5101d3c784a843df01f852 | [
"MIT"
] | null | null | null | Practice(Beginner)/FLOW002.py | 8Bit1Byte/CodechefSolutions | a79d64042da04e007c5101d3c784a843df01f852 | [
"MIT"
] | null | null | null | '''
Problem Name: Find Remainder
Problem Code: FLOW002
Problem Type: https://www.codechef.com/problems/school
Problem Link: https://www.codechef.com/problems/FLOW002
Solution Link: https://www.codechef.com/viewsolution/46835115
'''
from sys import stdin
def main(t):
curInx = t
while curInx:
a, b = map(int, stdin.readline().split())
curInx -= 1
print(a%b)
if __name__ == '__main__':
t = int(stdin.readline())
main(t) | 24 | 65 | 0.6375 |
7eb4ce2c76205834ac9d3dea4e4a9c1b89e5a0fd | 15,494 | py | Python | magenta/models/performance_rnn/performance_model.py | hologerry/magenta | c08c17a548f97a3f5d294a010c28ea2803718d6f | [
"Apache-2.0"
] | null | null | null | magenta/models/performance_rnn/performance_model.py | hologerry/magenta | c08c17a548f97a3f5d294a010c28ea2803718d6f | [
"Apache-2.0"
] | null | null | null | magenta/models/performance_rnn/performance_model.py | hologerry/magenta | c08c17a548f97a3f5d294a010c28ea2803718d6f | [
"Apache-2.0"
] | 1 | 2021-09-09T15:30:36.000Z | 2021-09-09T15:30:36.000Z | # Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performance RNN model."""
import collections
import functools
import magenta
from magenta.models.shared import events_rnn_model
from magenta.music.performance_lib import PerformanceEvent
from tensorflow.contrib import training as contrib_training # noqa
# State for constructing a time-varying control sequence. Keeps track of the
# current event position and time step in the generated performance, to allow
# the control sequence to vary with clock time.
PerformanceControlState = collections.namedtuple(
'PerformanceControlState', ['current_perf_index', 'current_perf_step'])
class PerformanceRnnModel(events_rnn_model.EventSequenceRnnModel):
"""Class for RNN performance generation models."""
def generate_performance(
self, num_steps, primer_sequence, temperature=1.0, beam_size=1,
branch_factor=1, steps_per_iteration=1, control_signal_fns=None,
disable_conditioning_fn=None):
"""Generate a performance track from a primer performance track.
Args:
num_steps: The integer length in steps of the final track, after
generation. Includes the primer.
primer_sequence: The primer sequence, a Performance object.
temperature: A float specifying how much to divide the logits by
before computing the softmax. Greater than 1.0 makes tracks more
random, less than 1.0 makes tracks less random.
beam_size: An integer, beam size to use when generating tracks via
beam search.
branch_factor: An integer, beam search branch factor to use.
steps_per_iteration: An integer, number of steps to take per beam search
iteration.
control_signal_fns: A list of functions that map time step to desired
control value, or None if not using control signals.
disable_conditioning_fn: A function that maps time step to whether or not
conditioning should be disabled, or None if there is no conditioning
or conditioning is not optional.
Returns:
The generated Performance object (which begins with the provided primer
track).
"""
if control_signal_fns:
control_event = tuple(f(0) for f in control_signal_fns)
if disable_conditioning_fn is not None:
control_event = (disable_conditioning_fn(0), control_event)
control_events = [control_event]
control_state = PerformanceControlState(
current_perf_index=0, current_perf_step=0)
extend_control_events_callback = functools.partial(
_extend_control_events, control_signal_fns, disable_conditioning_fn)
else:
control_events = None
control_state = None
extend_control_events_callback = None
return self._generate_events(
num_steps, primer_sequence, temperature, beam_size, branch_factor,
steps_per_iteration, control_events=control_events,
control_state=control_state,
extend_control_events_callback=extend_control_events_callback)
def performance_log_likelihood(self, sequence, control_values,
disable_conditioning):
"""Evaluate the log likelihood of a performance.
Args:
sequence: The Performance object for which to evaluate the log likelihood.
control_values: List of (single) values for all control signal.
disable_conditioning: Whether or not to disable optional conditioning. If
True, disable conditioning. If False, do not disable. None when no
conditioning or it is not optional.
Returns:
The log likelihood of `sequence` under this model.
"""
if control_values:
control_event = tuple(control_values)
if disable_conditioning is not None:
control_event = (disable_conditioning, control_event)
control_events = [control_event] * len(sequence)
else:
control_events = None
return self._evaluate_log_likelihood(
[sequence], control_events=control_events)[0]
def _extend_control_events(control_signal_fns, disable_conditioning_fn,
control_events, performance, control_state):
"""Extend a performance control sequence.
Extends `control_events` -- a sequence of control signal value tuples -- to be
one event longer than `performance`, so the next event of `performance` can be
conditionally generated.
This function is meant to be used as the `extend_control_events_callback`
in the `_generate_events` method of `EventSequenceRnnModel`.
Args:
control_signal_fns: A list of functions that map time step to desired
control value, or None if not using control signals.
disable_conditioning_fn: A function that maps time step to whether or not
conditioning should be disabled, or None if there is no conditioning or
conditioning is not optional.
control_events: The control sequence to extend.
performance: The Performance being generated.
control_state: A PerformanceControlState tuple containing the current
position in `performance`. We maintain this so as not to have to
recompute the total performance length (in steps) every time we want to
extend the control sequence.
Returns:
The PerformanceControlState after extending the control sequence one step
past the end of the generated performance.
"""
idx = control_state.current_perf_index
step = control_state.current_perf_step
while idx < len(performance):
if performance[idx].event_type == PerformanceEvent.TIME_SHIFT:
step += performance[idx].event_value
idx += 1
control_event = tuple(f(step) for f in control_signal_fns)
if disable_conditioning_fn is not None:
control_event = (disable_conditioning_fn(step), control_event)
control_events.append(control_event)
return PerformanceControlState(
current_perf_index=idx, current_perf_step=step)
class PerformanceRnnConfig(events_rnn_model.EventSequenceRnnConfig):
"""Stores a configuration for a Performance RNN.
Attributes:
num_velocity_bins: Number of velocity bins to use. If 0, don't use velocity
at all.
control_signals: List of PerformanceControlSignal objects to use for
conditioning, or None if not conditioning on anything.
optional_conditioning: If True, conditioning can be disabled by setting a
flag as part of the conditioning input.
"""
def __init__(self, details, encoder_decoder, hparams, num_velocity_bins=0,
control_signals=None, optional_conditioning=False,
note_performance=False):
if control_signals is not None:
control_encoder = magenta.music.MultipleEventSequenceEncoder(
[control.encoder for control in control_signals])
if optional_conditioning:
control_encoder = magenta.music.OptionalEventSequenceEncoder(
control_encoder)
encoder_decoder = magenta.music.ConditionalEventSequenceEncoderDecoder(
control_encoder, encoder_decoder)
super(PerformanceRnnConfig, self).__init__(
details, encoder_decoder, hparams)
self.num_velocity_bins = num_velocity_bins
self.control_signals = control_signals
self.optional_conditioning = optional_conditioning
self.note_performance = note_performance
default_configs = {
'performance':
PerformanceRnnConfig(
magenta.music.protobuf.generator_pb2.GeneratorDetails(
id='performance', description='Performance RNN'),
magenta.music.OneHotEventSequenceEncoderDecoder(
magenta.music.PerformanceOneHotEncoding()),
contrib_training.HParams(
batch_size=64,
rnn_layer_sizes=[512, 512, 512],
dropout_keep_prob=1.0,
clip_norm=3,
learning_rate=0.001)),
'performance_with_dynamics':
PerformanceRnnConfig(
magenta.music.protobuf.generator_pb2.GeneratorDetails(
id='performance_with_dynamics',
description='Performance RNN with dynamics'),
magenta.music.OneHotEventSequenceEncoderDecoder(
magenta.music.PerformanceOneHotEncoding(num_velocity_bins=32)),
contrib_training.HParams(
batch_size=64,
rnn_layer_sizes=[512, 512, 512],
dropout_keep_prob=1.0,
clip_norm=3,
learning_rate=0.001),
num_velocity_bins=32),
'performance_with_dynamics_compact':
PerformanceRnnConfig(
magenta.music.protobuf.generator_pb2.GeneratorDetails(
id='performance_with_dynamics',
description='Performance RNN with dynamics (compact input)'),
magenta.music.OneHotIndexEventSequenceEncoderDecoder(
magenta.music.PerformanceOneHotEncoding(num_velocity_bins=32)),
contrib_training.HParams(
batch_size=64,
rnn_layer_sizes=[512, 512, 512],
dropout_keep_prob=1.0,
clip_norm=3,
learning_rate=0.001),
num_velocity_bins=32),
'performance_with_dynamics_and_modulo_encoding':
PerformanceRnnConfig(
magenta.music.protobuf.generator_pb2.GeneratorDetails(
id='performance_with_dynamics_and_modulo_encoding',
description='Performance RNN with dynamics and modulo encoding'
),
magenta.music.ModuloPerformanceEventSequenceEncoderDecoder(
num_velocity_bins=32),
contrib_training.HParams(
batch_size=64,
rnn_layer_sizes=[512, 512, 512],
dropout_keep_prob=1.0,
clip_norm=3,
learning_rate=0.001),
num_velocity_bins=32),
'performance_with_dynamics_and_note_encoding':
PerformanceRnnConfig(
magenta.music.protobuf.generator_pb2.GeneratorDetails(
id='performance_with_dynamics_and_note_encoding',
description='Performance RNN with dynamics and note encoding'),
magenta.music.NotePerformanceEventSequenceEncoderDecoder(
num_velocity_bins=32),
contrib_training.HParams(
batch_size=64,
rnn_layer_sizes=[512, 512, 512],
dropout_keep_prob=1.0,
clip_norm=3,
learning_rate=0.001),
num_velocity_bins=32,
note_performance=True),
'density_conditioned_performance_with_dynamics':
PerformanceRnnConfig(
magenta.music.protobuf.generator_pb2.GeneratorDetails(
id='density_conditioned_performance_with_dynamics',
description='Note-density-conditioned Performance RNN + dynamics'
),
magenta.music.OneHotEventSequenceEncoderDecoder(
magenta.music.PerformanceOneHotEncoding(num_velocity_bins=32)),
contrib_training.HParams(
batch_size=64,
rnn_layer_sizes=[512, 512, 512],
dropout_keep_prob=1.0,
clip_norm=3,
learning_rate=0.001),
num_velocity_bins=32,
control_signals=[
magenta.music.NoteDensityPerformanceControlSignal(
window_size_seconds=3.0,
density_bin_ranges=[1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0])
]),
'pitch_conditioned_performance_with_dynamics':
PerformanceRnnConfig(
magenta.music.protobuf.generator_pb2.GeneratorDetails(
id='pitch_conditioned_performance_with_dynamics',
description='Pitch-histogram-conditioned Performance RNN'),
magenta.music.OneHotEventSequenceEncoderDecoder(
magenta.music.PerformanceOneHotEncoding(num_velocity_bins=32)),
contrib_training.HParams(
batch_size=64,
rnn_layer_sizes=[512, 512, 512],
dropout_keep_prob=1.0,
clip_norm=3,
learning_rate=0.001),
num_velocity_bins=32,
control_signals=[
magenta.music.PitchHistogramPerformanceControlSignal(
window_size_seconds=5.0)
]),
'multiconditioned_performance_with_dynamics':
PerformanceRnnConfig(
magenta.music.protobuf.generator_pb2.GeneratorDetails(
id='multiconditioned_performance_with_dynamics',
description='Density- and pitch-conditioned Performance RNN'),
magenta.music.OneHotEventSequenceEncoderDecoder(
magenta.music.PerformanceOneHotEncoding(num_velocity_bins=32)),
contrib_training.HParams(
batch_size=64,
rnn_layer_sizes=[512, 512, 512],
dropout_keep_prob=1.0,
clip_norm=3,
learning_rate=0.001),
num_velocity_bins=32,
control_signals=[
magenta.music.NoteDensityPerformanceControlSignal(
window_size_seconds=3.0,
density_bin_ranges=[1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0]),
magenta.music.PitchHistogramPerformanceControlSignal(
window_size_seconds=5.0)
]),
'optional_multiconditioned_performance_with_dynamics':
PerformanceRnnConfig(
magenta.music.protobuf.generator_pb2.GeneratorDetails(
id='optional_multiconditioned_performance_with_dynamics',
description='Optionally multiconditioned Performance RNN'),
magenta.music.OneHotEventSequenceEncoderDecoder(
magenta.music.PerformanceOneHotEncoding(num_velocity_bins=32)),
contrib_training.HParams(
batch_size=64,
rnn_layer_sizes=[512, 512, 512],
dropout_keep_prob=1.0,
clip_norm=3,
learning_rate=0.001),
num_velocity_bins=32,
control_signals=[
magenta.music.NoteDensityPerformanceControlSignal(
window_size_seconds=3.0,
density_bin_ranges=[1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0]),
magenta.music.PitchHistogramPerformanceControlSignal(
window_size_seconds=5.0)
],
optional_conditioning=True)
}
| 45.570588 | 84 | 0.652898 |
5801924aaa5b9882239f0450e2a31c3a03cfcef5 | 3,364 | py | Python | migrations/versions/0319_create_complaint_template.py | department-of-veterans-affairs/notification-api | 698bc98d8e78a13a0b2cfc432cfc718ff1016b06 | [
"MIT"
] | 10 | 2020-05-04T14:11:06.000Z | 2022-02-22T19:06:36.000Z | migrations/versions/0319_create_complaint_template.py | department-of-veterans-affairs/notification-api | 698bc98d8e78a13a0b2cfc432cfc718ff1016b06 | [
"MIT"
] | 554 | 2020-05-07T21:56:24.000Z | 2022-03-31T23:04:51.000Z | migrations/versions/0319_create_complaint_template.py | department-of-veterans-affairs/notification-api | 698bc98d8e78a13a0b2cfc432cfc718ff1016b06 | [
"MIT"
] | 4 | 2020-08-27T16:43:29.000Z | 2021-02-17T22:17:27.000Z | """
Revision ID: 0319_create_complaint_template
Revises: 0318_remove_custom_email_from
Create Date: 2021-03-11 11:15:00
"""
from datetime import datetime
from alembic import op
from flask import current_app
from app.models import EMAIL_TYPE, NORMAL
revision = '0319_create_complaint_template'
down_revision = '0318_remove_custom_email_from'
user_id = current_app.config['NOTIFY_USER_ID']
service_id = current_app.config['NOTIFY_SERVICE_ID']
complaint_template_id = current_app.config['EMAIL_COMPLAINT_TEMPLATE_ID']
complaint_template_name = 'received email complaint'
complaint_template_subject = 'received email complaint for ((notification_id))'
def upgrade():
content = """An email has been marked as spam. Here is the complaint info:
\t notification_id: ((notification_id))
\t service_name: ((service_name))
\t template_name: ((template_name))
\t complaint_id: ((complaint_id))
\t complaint_type: ((complaint_type))
\t complaint_date: ((complaint_date))
"""
template_history_insert = f"""INSERT INTO templates_history (id, name, template_type, created_at,
content, archived, service_id, hidden,
subject, created_by_id, process_type, version)
VALUES ('{complaint_template_id}', '{complaint_template_name}', '{EMAIL_TYPE}', '{datetime.utcnow()}',
'{content}', False, '{service_id}', '{False}',
'{complaint_template_subject}', '{user_id}', '{NORMAL}', 1)
"""
template_insert = f"""INSERT INTO templates (id, name, template_type, created_at,
content, archived, service_id, hidden,
subject, created_by_id, process_type, version)
VALUES ('{complaint_template_id}', '{complaint_template_name}', '{EMAIL_TYPE}', '{datetime.utcnow()}',
'{content}', False, '{service_id}', '{False}',
'{complaint_template_subject}', '{user_id}', '{NORMAL}', 1)
"""
op.get_bind()
op.execute(template_history_insert)
op.execute(template_insert)
# If you are copying this migration, please remember about an insert to TemplateRedacted,
# which was not originally included here either by mistake or because it was before TemplateRedacted existed
op.execute(
f"""
INSERT INTO template_redacted (template_id, redact_personalisation, updated_at, updated_by_id)
VALUES ('{complaint_template_id}', '{False}', '{datetime.utcnow()}', '{user_id}')
;
"""
)
def downgrade():
op.execute(f"delete from notifications where template_id = '{complaint_template_id}'")
op.execute(f"delete from jobs where template_id = '{complaint_template_id}'")
op.execute(f"delete from template_redacted where template_id = '{complaint_template_id}'")
op.execute(f"delete from templates_history where id = '{complaint_template_id}'")
op.execute(f"delete from templates where id = '{complaint_template_id}'")
| 43.688312 | 135 | 0.611177 |
3a69ca2db912d045933eff108a7217a5de5b7367 | 2,072 | py | Python | tests/unit/visualization/test_dag_viewer.py | brendanhasz/drainpype | a183acec7cae1ef9fde260868e2b021516a8cd7f | [
"MIT"
] | 2 | 2021-03-03T12:11:24.000Z | 2021-03-18T15:09:52.000Z | tests/unit/visualization/test_dag_viewer.py | brendanhasz/pipedown | a183acec7cae1ef9fde260868e2b021516a8cd7f | [
"MIT"
] | null | null | null | tests/unit/visualization/test_dag_viewer.py | brendanhasz/pipedown | a183acec7cae1ef9fde260868e2b021516a8cd7f | [
"MIT"
] | null | null | null | from pipedown.dag import DAG
from pipedown.nodes.base import Model, Node
from pipedown.visualization.dag_viewer import get_dag_viewer_html
class MyNode(Node):
"""This is the docstring for my node!
More info here.
And even more info here.
Additional Features
-------------------
AnotherFeature : dtype
Some description
and another line which should be on the same line
SomeOtherFeature : other_dtype
Some more description
Another section
---------------
AnotherFeature : dtype
Some description
and another line which should be on the same line
SomeOtherFeature : other_dtype
Some more description
"""
def run(self, X):
return X
class MyModel(Model):
"""This is the docstring for my node!
More info here.
And even more info here.
"""
CODE_URL = (
"http://github.com/brendanhasz/pipedown/blob/main/"
"src/pipedown/nodes/base/model.py"
)
def fit(self, X, y):
pass
def predict(self, X):
return X.iloc[:, 0]
class MyDAG(DAG):
"""This is the docstring for my DAG!
Lots of high-level info.
And even more info here.
"""
CODE_URL = (
"http://github.com/brendanhasz/pipedown/blob/main/"
"src/pipedown/dag/dag.py"
)
def nodes(self):
return {
"a": MyNode(),
"b": MyNode(),
"c": MyNode(),
"d": MyNode(),
"e": MyModel(),
"f": MyModel(),
}
def edges(self):
return {
"c": ["a", "b"],
"d": "c",
"e": "d",
"f": "d",
}
def test_dag_viewer():
# Define some nodes
dag = MyDAG()
# Get the html
html = get_dag_viewer_html(dag)
assert isinstance(html, str)
if __name__ == "__main__":
# Define some nodes
dag = MyDAG()
# Get the html
html = get_dag_viewer_html(dag)
# Save the html
with open("test_dag_html.html", "w") as fid:
fid.write(html)
| 19.185185 | 65 | 0.550193 |
91537fa443c27ef2060c00a874f8394c6837d8d6 | 1,578 | py | Python | stock_entry/migrations/0001_initial.py | JuanFariasGit/crm | 5ab322644cb988a916bf6b5e8ee79322a40f2601 | [
"MIT"
] | null | null | null | stock_entry/migrations/0001_initial.py | JuanFariasGit/crm | 5ab322644cb988a916bf6b5e8ee79322a40f2601 | [
"MIT"
] | null | null | null | stock_entry/migrations/0001_initial.py | JuanFariasGit/crm | 5ab322644cb988a916bf6b5e8ee79322a40f2601 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-07-27 22:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('provider', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('product', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='StockEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('purchase_date', models.DateField()),
('expiration_date', models.DateField(blank=True, null=True)),
('quantity', models.IntegerField()),
('cost_unit', models.DecimalField(decimal_places=2, max_digits=8)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.product')),
('provider', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='provider.provider')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Stock Entry',
'verbose_name_plural': 'Stock Entry',
'db_table': 'stock_entry',
},
),
]
| 39.45 | 118 | 0.602662 |
6478aea2fd07ea20b16e716f91f9a8cd885cac4e | 143,167 | py | Python | src/syft/lib/torch/allowlist.py | chinmayshah99/PySyft | c26c7c9478df37da7d0327a67a5987c2dfd91cbe | [
"MIT"
] | 1 | 2020-12-22T17:22:13.000Z | 2020-12-22T17:22:13.000Z | src/syft/lib/torch/allowlist.py | chinmayshah99/PySyft | c26c7c9478df37da7d0327a67a5987c2dfd91cbe | [
"MIT"
] | null | null | null | src/syft/lib/torch/allowlist.py | chinmayshah99/PySyft | c26c7c9478df37da7d0327a67a5987c2dfd91cbe | [
"MIT"
] | null | null | null | # stdlib
from typing import Dict
from typing import Union
# syft relative
from ..misc.union import UnionGenerator
allowlist: Dict[str, Union[str, Dict[str, str]]] = {} # (path: str, return_type:type)
# --------------------------------------------------------------------------------------
# SECTION - Tensor methods which are intentionally disabled
# --------------------------------------------------------------------------------------
# SECTION - Tensor methods which are insecure
# allowlist["torch.Tensor.__array__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__array_priority__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__array_wrap__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__bool__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__class__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__contains__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__deepcopy__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__delattr__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__delitem__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__dict__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__dir__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__doc__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__format__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__getattribute__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__init__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__init_subclass__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__len__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST YET - talk to TRASK
# allowlist["torch.Tensor.__module__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__new__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__reduce__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__reduce_ex__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__repr__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__setattr__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__setstate__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__sizeof__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST YET - talk to TRASK
# allowlist["torch.Tensor.__str__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__subclasshook__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.__weakref__"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor._backward_hooks"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor._base"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor._cdata"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor._coalesced_"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor._dimI"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor._dimV"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor._grad"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor._grad_fn"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.grad_fn"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor._indices"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor._is_view"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor._make_subclass"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor._nnz"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor._update_names"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor._values"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.dtype"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST YET - talk to TRASK
# allowlist["torch.Tensor.has_names"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.record_stream"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST#
# allowlist["torch.Tensor.register_hook"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.share_memory_"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.storage"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.storage_offset"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.storage_type"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.where"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# SECTION - Torch functions which are insecure
# allowlist["torch.where"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.storage"] = SECURITY WARNING: DO NOT ADD TO ALLOW LIST
# SECTION - Tensor methods which have serde issues
# allowlist["torch.Tensor.to_dense"] = SERDE WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.to_mkldnn"] = SERDE WARNING: DO NOT ADD TO ALLOW LIST
# allowlist["torch.Tensor.to_sparse"] = SERDE WARNING: DO NOT ADD TO ALLOW LIST
# --------------------------------------------------------------------------------------
# SECTION - Tensor methods which are tested
# --------------------------------------------------------------------------------------
# SECTION - The capital Tensor constructors
allowlist["torch.Tensor"] = "torch.Tensor"
allowlist["torch.BFloat16Tensor"] = "torch.Tensor"
allowlist["torch.BoolTensor"] = "torch.Tensor"
allowlist["torch.ByteTensor"] = "torch.Tensor"
allowlist["torch.CharTensor"] = "torch.Tensor"
allowlist["torch.DoubleTensor"] = "torch.Tensor"
allowlist["torch.FloatTensor"] = "torch.Tensor"
allowlist["torch.HalfTensor"] = "torch.Tensor"
allowlist["torch.IntTensor"] = "torch.Tensor"
allowlist["torch.LongTensor"] = "torch.Tensor"
allowlist["torch.ShortTensor"] = "torch.Tensor"
# SECTION - Tensor methods
allowlist["torch.Tensor.__abs__"] = "torch.Tensor"
allowlist["torch.Tensor.__add__"] = "torch.Tensor"
allowlist["torch.Tensor.__and__"] = "torch.Tensor"
allowlist["torch.Tensor.__eq__"] = "torch.Tensor"
allowlist["torch.Tensor.__float__"] = "syft.lib.python.Float"
allowlist["torch.Tensor.__ge__"] = "torch.Tensor"
allowlist["torch.Tensor.__getitem__"] = "torch.Tensor"
allowlist["torch.Tensor.__gt__"] = "torch.Tensor"
allowlist["torch.Tensor.__hash__"] = "syft.lib.python.Int"
allowlist["torch.Tensor.__iadd__"] = "torch.Tensor"
allowlist["torch.Tensor.__iand__"] = "torch.Tensor"
allowlist["torch.Tensor.__idiv__"] = "torch.Tensor"
allowlist["torch.Tensor.__ilshift__"] = "torch.Tensor"
allowlist["torch.Tensor.__imul__"] = "torch.Tensor"
allowlist["torch.Tensor.__index__"] = "torch.Tensor"
allowlist["torch.Tensor.__int__"] = "syft.lib.python.Int"
allowlist["torch.Tensor.__invert__"] = "torch.Tensor"
allowlist["torch.Tensor.__ior__"] = "torch.Tensor"
allowlist["torch.Tensor.__ipow__"] = "torch.Tensor" # none implemented in 1.5.1
allowlist["torch.Tensor.__irshift__"] = "torch.Tensor"
allowlist["torch.Tensor.__isub__"] = "torch.Tensor"
allowlist["torch.Tensor.__itruediv__"] = "torch.Tensor"
allowlist["torch.Tensor.__ixor__"] = "torch.Tensor"
allowlist["torch.Tensor.__le__"] = "torch.Tensor"
allowlist["torch.Tensor.__long__"] = "syft.lib.python.Int"
allowlist["torch.Tensor.__lshift__"] = "torch.Tensor"
allowlist["torch.Tensor.__lt__"] = "torch.Tensor"
allowlist["torch.Tensor.__matmul__"] = "torch.Tensor"
allowlist["torch.Tensor.__mod__"] = "torch.Tensor"
allowlist["torch.Tensor.__mul__"] = "torch.Tensor"
allowlist["torch.Tensor.__ne__"] = "torch.Tensor"
allowlist["torch.Tensor.__neg__"] = "torch.Tensor"
allowlist["torch.Tensor.__nonzero__"] = "torch.Tensor"
allowlist["torch.Tensor.__or__"] = "torch.Tensor"
allowlist["torch.Tensor.__pow__"] = "torch.Tensor"
allowlist["torch.Tensor.__radd__"] = "torch.Tensor"
allowlist["torch.Tensor.__rdiv__"] = "torch.Tensor"
allowlist["torch.Tensor.__reversed__"] = "torch.Tensor"
allowlist["torch.Tensor.__rmul__"] = "torch.Tensor"
allowlist["torch.Tensor.__rpow__"] = "torch.Tensor"
allowlist["torch.Tensor.__rshift__"] = "torch.Tensor"
allowlist["torch.Tensor.__rsub__"] = "torch.Tensor"
allowlist["torch.Tensor.__rtruediv__"] = "torch.Tensor"
allowlist["torch.Tensor.__setitem__"] = "syft.lib.python._SyNone"
allowlist["torch.Tensor.__sub__"] = "torch.Tensor"
allowlist["torch.Tensor.__truediv__"] = "torch.Tensor"
allowlist["torch.Tensor.__xor__"] = "torch.Tensor"
allowlist["torch.Tensor._version"] = "syft.lib.python.Int"
allowlist["torch.Tensor.abs_"] = "torch.Tensor"
allowlist["torch.Tensor.abs"] = "torch.Tensor"
allowlist["torch.Tensor.acos_"] = "torch.Tensor"
allowlist["torch.Tensor.acos"] = "torch.Tensor"
allowlist["torch.Tensor.add_"] = "torch.Tensor"
allowlist["torch.Tensor.add"] = "torch.Tensor"
allowlist["torch.Tensor.addbmm_"] = "torch.Tensor"
allowlist["torch.Tensor.addbmm"] = "torch.Tensor"
allowlist["torch.Tensor.addcdiv_"] = "torch.Tensor"
allowlist["torch.Tensor.addcdiv"] = "torch.Tensor"
allowlist["torch.Tensor.addcmul_"] = "torch.Tensor"
allowlist["torch.Tensor.addcmul"] = "torch.Tensor"
allowlist["torch.Tensor.addmm_"] = "torch.Tensor"
allowlist["torch.Tensor.addmm"] = "torch.Tensor"
allowlist["torch.Tensor.addmv_"] = "torch.Tensor"
allowlist["torch.Tensor.addmv"] = "torch.Tensor"
allowlist["torch.Tensor.addr_"] = "torch.Tensor"
allowlist["torch.Tensor.addr"] = "torch.Tensor"
allowlist["torch.Tensor.all"] = "torch.Tensor"
allowlist["torch.Tensor.allclose"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.angle"] = "torch.Tensor"
allowlist["torch.Tensor.any"] = "torch.Tensor"
allowlist["torch.Tensor.argmax"] = "torch.Tensor"
allowlist["torch.Tensor.argmin"] = "torch.Tensor"
allowlist["torch.Tensor.argsort"] = "torch.Tensor"
allowlist["torch.Tensor.as_strided_"] = "torch.Tensor"
allowlist["torch.Tensor.as_strided"] = "torch.Tensor"
allowlist["torch.Tensor.asin_"] = "torch.Tensor"
allowlist["torch.Tensor.asin"] = "torch.Tensor"
allowlist["torch.Tensor.atan_"] = "torch.Tensor"
allowlist["torch.Tensor.atan"] = "torch.Tensor"
allowlist["torch.Tensor.atan2_"] = "torch.Tensor"
allowlist["torch.Tensor.atan2"] = "torch.Tensor"
allowlist["torch.Tensor.backward"] = "syft.lib.python._SyNone"
allowlist["torch.Tensor.baddbmm_"] = "torch.Tensor"
allowlist["torch.Tensor.baddbmm"] = "torch.Tensor"
allowlist["torch.Tensor.bernoulli_"] = "torch.Tensor"
allowlist["torch.Tensor.bernoulli"] = "torch.Tensor"
allowlist["torch.Tensor.bfloat16"] = "torch.Tensor"
allowlist["torch.Tensor.bincount"] = "torch.Tensor"
allowlist["torch.Tensor.bitwise_not_"] = "torch.Tensor"
allowlist["torch.Tensor.bitwise_not"] = "torch.Tensor"
allowlist["torch.Tensor.bitwise_xor_"] = "torch.Tensor"
allowlist["torch.Tensor.bitwise_xor"] = "torch.Tensor"
allowlist["torch.Tensor.bmm"] = "torch.Tensor"
allowlist["torch.Tensor.bool"] = "torch.Tensor"
allowlist["torch.Tensor.byte"] = "torch.Tensor"
allowlist["torch.Tensor.cauchy_"] = "torch.Tensor"
allowlist["torch.Tensor.ceil_"] = "torch.Tensor"
allowlist["torch.Tensor.ceil"] = "torch.Tensor"
allowlist["torch.Tensor.char"] = "torch.Tensor"
allowlist["torch.Tensor.cholesky_inverse"] = "torch.Tensor"
allowlist["torch.Tensor.cholesky_solve"] = "torch.Tensor"
allowlist["torch.Tensor.cholesky"] = "torch.Tensor"
allowlist["torch.Tensor.chunk"] = "syft.lib.python.List" # Tuple not List
allowlist["torch.Tensor.clamp_"] = "torch.Tensor"
allowlist["torch.Tensor.clamp_max_"] = "torch.Tensor"
allowlist["torch.Tensor.clamp_max"] = "torch.Tensor"
allowlist["torch.Tensor.clamp_min_"] = "torch.Tensor"
allowlist["torch.Tensor.clamp_min"] = "torch.Tensor"
allowlist["torch.Tensor.clamp"] = "torch.Tensor"
allowlist["torch.Tensor.clone"] = "torch.Tensor"
allowlist["torch.Tensor.coalesce"] = "torch.Tensor"
allowlist["torch.Tensor.conj"] = "torch.Tensor"
allowlist["torch.Tensor.contiguous"] = "torch.Tensor"
allowlist["torch.Tensor.copy_"] = "torch.Tensor"
allowlist["torch.Tensor.cos_"] = "torch.Tensor"
allowlist["torch.Tensor.cos"] = "torch.Tensor"
allowlist["torch.Tensor.cosh_"] = "torch.Tensor"
allowlist["torch.Tensor.cosh"] = "torch.Tensor"
allowlist["torch.Tensor.cpu"] = "torch.Tensor"
allowlist["torch.Tensor.cross"] = "torch.Tensor"
allowlist["torch.Tensor.cuda"] = "torch.Tensor"
allowlist["torch.Tensor.cummax"] = "syft.lib.python.ValuesIndices"
allowlist["torch.Tensor.cummin"] = "syft.lib.python.ValuesIndices"
allowlist["torch.Tensor.cumprod"] = "torch.Tensor"
allowlist["torch.Tensor.cumsum"] = "torch.Tensor"
allowlist["torch.Tensor.data_ptr"] = "syft.lib.python.Int"
allowlist["torch.Tensor.data"] = "torch.Tensor"
allowlist["torch.Tensor.dense_dim"] = "torch.Tensor"
allowlist["torch.Tensor.dequantize"] = "torch.Tensor"
allowlist["torch.Tensor.det"] = "torch.Tensor"
allowlist["torch.Tensor.detach"] = "torch.Tensor"
allowlist["torch.Tensor.diag_embed"] = "torch.Tensor"
allowlist["torch.Tensor.diag"] = "torch.Tensor"
allowlist["torch.Tensor.diagflat"] = "torch.Tensor"
allowlist["torch.Tensor.diagonal"] = "torch.Tensor"
allowlist["torch.Tensor.digamma_"] = "torch.Tensor"
allowlist["torch.Tensor.digamma"] = "torch.Tensor"
allowlist["torch.Tensor.dim"] = "torch.Tensor"
allowlist["torch.Tensor.dist"] = "torch.Tensor"
allowlist["torch.Tensor.dot"] = "torch.Tensor"
allowlist["torch.Tensor.double"] = "torch.Tensor"
allowlist["torch.Tensor.eig"] = "syft.lib.python.ValuesIndices"
allowlist["torch.Tensor.element_size"] = "syft.lib.python.Int"
allowlist["torch.Tensor.eq_"] = "torch.Tensor"
allowlist["torch.Tensor.eq"] = "torch.Tensor"
allowlist["torch.Tensor.equal"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.erf_"] = "torch.Tensor"
allowlist["torch.Tensor.erf"] = "torch.Tensor"
allowlist["torch.Tensor.erfc_"] = "torch.Tensor"
allowlist["torch.Tensor.erfc"] = "torch.Tensor"
allowlist["torch.Tensor.erfinv_"] = "torch.Tensor"
allowlist["torch.Tensor.erfinv"] = "torch.Tensor"
allowlist["torch.Tensor.exp_"] = "torch.Tensor"
allowlist["torch.Tensor.exp"] = "torch.Tensor"
allowlist["torch.Tensor.expand_as"] = "torch.Tensor"
allowlist["torch.Tensor.expand"] = "torch.Tensor"
allowlist["torch.Tensor.expm1_"] = "torch.Tensor"
allowlist["torch.Tensor.expm1"] = "torch.Tensor"
allowlist["torch.Tensor.exponential_"] = "torch.Tensor"
allowlist["torch.Tensor.fft"] = "torch.Tensor"
allowlist["torch.Tensor.fill_"] = "torch.Tensor"
allowlist["torch.Tensor.fill_diagonal_"] = "torch.Tensor"
allowlist["torch.Tensor.flatten"] = "torch.Tensor"
allowlist["torch.Tensor.flip"] = "torch.Tensor"
allowlist["torch.Tensor.float"] = "torch.Tensor"
allowlist["torch.Tensor.floor_"] = "torch.Tensor"
allowlist["torch.Tensor.floor"] = "torch.Tensor"
allowlist["torch.Tensor.fmod_"] = "torch.Tensor"
allowlist["torch.Tensor.fmod"] = "torch.Tensor"
allowlist["torch.Tensor.frac_"] = "torch.Tensor"
allowlist["torch.Tensor.frac"] = "torch.Tensor"
allowlist["torch.Tensor.gather"] = "torch.Tensor"
allowlist["torch.Tensor.ge_"] = "torch.Tensor"
allowlist["torch.Tensor.ge"] = "torch.Tensor"
allowlist["torch.Tensor.geometric_"] = "torch.Tensor"
allowlist["torch.Tensor.geqrf"] = "syft.lib.python.ValuesIndices"
allowlist["torch.Tensor.ger"] = "torch.Tensor"
allowlist["torch.Tensor.get_device"] = "syft.lib.python.Int"
allowlist["torch.Tensor.gt_"] = "torch.Tensor"
allowlist["torch.Tensor.gt"] = "torch.Tensor"
allowlist["torch.Tensor.half"] = "torch.Tensor"
allowlist["torch.Tensor.hardshrink"] = "torch.Tensor"
allowlist["torch.Tensor.histc"] = "torch.Tensor"
allowlist["torch.Tensor.ifft"] = "torch.Tensor"
allowlist["torch.Tensor.index_add_"] = "torch.Tensor"
allowlist["torch.Tensor.index_add"] = "torch.Tensor"
allowlist["torch.Tensor.index_copy_"] = "torch.Tensor"
allowlist["torch.Tensor.index_copy"] = "torch.Tensor"
allowlist["torch.Tensor.index_fill_"] = "torch.Tensor"
allowlist["torch.Tensor.index_fill"] = "torch.Tensor"
allowlist["torch.Tensor.index_put_"] = "torch.Tensor"
allowlist["torch.Tensor.index_put"] = "torch.Tensor"
allowlist["torch.Tensor.index_select"] = "torch.Tensor"
allowlist["torch.Tensor.indices"] = "torch.Tensor"
allowlist["torch.Tensor.int_repr"] = "torch.Tensor"
allowlist["torch.Tensor.int"] = "torch.Tensor"
allowlist["torch.Tensor.inverse"] = "torch.Tensor"
allowlist["torch.Tensor.irfft"] = "torch.Tensor"
allowlist["torch.Tensor.is_coalesced"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.is_complex"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.is_contiguous"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.is_cuda"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.is_distributed"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.is_floating_point"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.is_leaf"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.is_mkldnn"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.is_nonzero"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.is_pinned"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.is_quantized"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.is_same_size"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.is_set_to"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.is_shared"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.is_signed"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.is_sparse"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.isclose"] = "torch.Tensor"
allowlist["torch.Tensor.item"] = UnionGenerator[
"syft.lib.python.Int", "syft.lib.python.Float", "syft.lib.python.Bool"
]
allowlist["torch.Tensor.kthvalue"] = "syft.lib.python.ValuesIndices"
allowlist["torch.Tensor.le_"] = "torch.Tensor"
allowlist["torch.Tensor.le"] = "torch.Tensor"
allowlist["torch.Tensor.lerp_"] = "torch.Tensor"
allowlist["torch.Tensor.lerp"] = "torch.Tensor"
allowlist["torch.Tensor.lgamma_"] = "torch.Tensor"
allowlist["torch.Tensor.lgamma"] = "torch.Tensor"
allowlist["torch.Tensor.log_"] = "torch.Tensor"
allowlist["torch.Tensor.log_normal_"] = "torch.Tensor"
allowlist["torch.Tensor.log_softmax"] = "torch.Tensor"
allowlist["torch.Tensor.log"] = "torch.Tensor"
allowlist["torch.Tensor.log10_"] = "torch.Tensor"
allowlist["torch.Tensor.log10"] = "torch.Tensor"
allowlist["torch.Tensor.log1p_"] = "torch.Tensor"
allowlist["torch.Tensor.log1p"] = "torch.Tensor"
allowlist["torch.Tensor.log2_"] = "torch.Tensor"
allowlist["torch.Tensor.log2"] = "torch.Tensor"
allowlist["torch.Tensor.logdet"] = "torch.Tensor"
allowlist["torch.Tensor.logical_not_"] = "torch.Tensor"
allowlist["torch.Tensor.logical_not"] = "torch.Tensor"
allowlist["torch.Tensor.logical_xor_"] = "torch.Tensor"
allowlist["torch.Tensor.logical_xor"] = "torch.Tensor"
allowlist["torch.Tensor.logsumexp"] = "torch.Tensor"
allowlist["torch.Tensor.long"] = "torch.Tensor"
allowlist["torch.Tensor.lstsq"] = "syft.lib.python.ValuesIndices"
allowlist["torch.Tensor.lt_"] = "torch.Tensor"
allowlist["torch.Tensor.lt"] = "torch.Tensor"
allowlist["torch.Tensor.lu_solve"] = "torch.Tensor"
allowlist["torch.Tensor.lu"] = "syft.lib.python.List" # Tuple not List
allowlist["torch.Tensor.masked_fill_"] = "torch.Tensor"
allowlist["torch.Tensor.masked_fill"] = "torch.Tensor"
allowlist["torch.Tensor.masked_scatter_"] = "torch.Tensor"
allowlist["torch.Tensor.masked_scatter"] = "torch.Tensor"
allowlist["torch.Tensor.masked_select"] = "torch.Tensor"
allowlist["torch.Tensor.matmul"] = "torch.Tensor"
allowlist["torch.Tensor.matrix_power"] = "torch.Tensor"
allowlist["torch.Tensor.max"] = UnionGenerator[
"syft.lib.python.Bool",
"syft.lib.python.Float",
"syft.lib.python.Int",
"syft.lib.python.ValuesIndices",
]
allowlist["torch.Tensor.mean"] = "torch.Tensor"
allowlist["torch.Tensor.median"] = UnionGenerator[
"syft.lib.python.Bool",
"syft.lib.python.Float",
"syft.lib.python.Int",
"syft.lib.python.ValuesIndices",
]
allowlist["torch.Tensor.min"] = UnionGenerator[
"syft.lib.python.Bool",
"syft.lib.python.Float",
"syft.lib.python.Int",
"syft.lib.python.ValuesIndices",
]
allowlist["torch.Tensor.mm"] = "torch.Tensor"
allowlist["torch.Tensor.mode"] = "syft.lib.python.ValuesIndices"
allowlist["torch.Tensor.mul_"] = "torch.Tensor"
allowlist["torch.Tensor.mul"] = "torch.Tensor"
allowlist["torch.Tensor.multinomial"] = "torch.Tensor"
allowlist["torch.Tensor.mv"] = "torch.Tensor"
allowlist["torch.Tensor.mvlgamma_"] = "torch.Tensor"
allowlist["torch.Tensor.mvlgamma"] = "torch.Tensor"
allowlist["torch.Tensor.narrow_copy"] = "torch.Tensor"
allowlist["torch.Tensor.narrow"] = "torch.Tensor"
allowlist["torch.Tensor.ndim"] = "syft.lib.python.Int"
allowlist["torch.Tensor.ndimension"] = "syft.lib.python.Int"
allowlist["torch.Tensor.ne_"] = "torch.Tensor"
allowlist["torch.Tensor.ne"] = "torch.Tensor"
allowlist["torch.Tensor.neg_"] = "torch.Tensor"
allowlist["torch.Tensor.neg"] = "torch.Tensor"
allowlist["torch.Tensor.nelement"] = "syft.lib.python.Int" # is this INSECURE???
allowlist["torch.Tensor.new_empty"] = "torch.Tensor"
allowlist["torch.Tensor.new_full"] = "torch.Tensor"
allowlist["torch.Tensor.new_ones"] = "torch.Tensor"
allowlist["torch.Tensor.new_tensor"] = "torch.Tensor"
allowlist["torch.Tensor.new_zeros"] = "torch.Tensor"
allowlist["torch.Tensor.new"] = "torch.Tensor"
allowlist["torch.Tensor.nonzero"] = "torch.Tensor"
allowlist["torch.Tensor.norm"] = "torch.Tensor"
allowlist["torch.Tensor.normal_"] = "torch.Tensor"
allowlist["torch.Tensor.numel"] = "syft.lib.python.Int" # is this INSECURE???
allowlist["torch.Tensor.orgqr"] = "torch.Tensor"
allowlist["torch.Tensor.ormqr"] = "torch.Tensor"
allowlist["torch.Tensor.output_nr"] = "syft.lib.python.Int"
allowlist["torch.Tensor.permute"] = "torch.Tensor"
allowlist["torch.Tensor.pin_memory"] = "torch.Tensor"
allowlist["torch.Tensor.pinverse"] = "torch.Tensor"
allowlist["torch.Tensor.polygamma_"] = "torch.Tensor"
allowlist["torch.Tensor.polygamma"] = "torch.Tensor"
allowlist["torch.Tensor.pow_"] = "torch.Tensor"
allowlist["torch.Tensor.pow"] = "torch.Tensor"
allowlist["torch.Tensor.prelu"] = "torch.Tensor"
allowlist["torch.Tensor.prod"] = "torch.Tensor"
allowlist["torch.Tensor.put_"] = "torch.Tensor"
allowlist["torch.Tensor.q_per_channel_axis"] = "syft.lib.python.Int"
allowlist["torch.Tensor.q_per_channel_scales"] = "torch.Tensor"
allowlist["torch.Tensor.q_per_channel_zero_points"] = "torch.Tensor"
allowlist["torch.Tensor.q_scale"] = "syft.lib.python.Float"
allowlist["torch.Tensor.q_zero_point"] = "syft.lib.python.Int"
allowlist["torch.Tensor.qr"] = "syft.lib.python.ValuesIndices"
allowlist["torch.Tensor.random_"] = "torch.Tensor"
allowlist["torch.Tensor.reciprocal_"] = "torch.Tensor"
allowlist["torch.Tensor.reciprocal"] = "torch.Tensor"
allowlist["torch.Tensor.relu_"] = "torch.Tensor"
allowlist["torch.Tensor.relu"] = "torch.Tensor"
allowlist["torch.Tensor.renorm_"] = "torch.Tensor"
allowlist["torch.Tensor.renorm"] = "torch.Tensor"
allowlist["torch.Tensor.repeat_interleave"] = "torch.Tensor"
allowlist["torch.Tensor.repeat"] = "torch.Tensor"
allowlist["torch.Tensor.requires_grad_"] = "torch.Tensor"
allowlist["torch.Tensor.requires_grad"] = "syft.lib.python.Bool"
allowlist["torch.Tensor.reshape_as"] = "torch.Tensor"
allowlist["torch.Tensor.reshape"] = "torch.Tensor"
allowlist["torch.Tensor.resize_"] = "torch.Tensor"
allowlist["torch.Tensor.resize_as_"] = "torch.Tensor"
allowlist["torch.Tensor.resize_as"] = "torch.Tensor"
allowlist["torch.Tensor.resize"] = "torch.Tensor"
allowlist["torch.Tensor.retain_grad"] = "syft.lib.python._SyNone"
allowlist["torch.Tensor.rfft"] = "torch.Tensor"
allowlist["torch.Tensor.roll"] = "torch.Tensor"
allowlist["torch.Tensor.rot90"] = "torch.Tensor"
allowlist["torch.Tensor.round_"] = "torch.Tensor"
allowlist["torch.Tensor.round"] = "torch.Tensor"
allowlist["torch.Tensor.rsqrt_"] = "torch.Tensor"
allowlist["torch.Tensor.rsqrt"] = "torch.Tensor"
allowlist["torch.Tensor.scatter_"] = "torch.Tensor"
allowlist["torch.Tensor.scatter_add_"] = "torch.Tensor"
allowlist["torch.Tensor.scatter_add"] = "torch.Tensor"
allowlist["torch.Tensor.scatter"] = "torch.Tensor"
allowlist["torch.Tensor.select"] = "torch.Tensor"
allowlist["torch.Tensor.set_"] = "torch.Tensor"
allowlist["torch.Tensor.short"] = "torch.Tensor"
allowlist["torch.Tensor.sigmoid_"] = "torch.Tensor"
allowlist["torch.Tensor.sigmoid"] = "torch.Tensor"
allowlist["torch.Tensor.sign_"] = "torch.Tensor"
allowlist["torch.Tensor.sign"] = "torch.Tensor"
allowlist["torch.Tensor.sin_"] = "torch.Tensor"
allowlist["torch.Tensor.sin"] = "torch.Tensor"
allowlist["torch.Tensor.sinh_"] = "torch.Tensor"
allowlist["torch.Tensor.sinh"] = "torch.Tensor"
allowlist["torch.Tensor.slogdet"] = "syft.lib.python.ValuesIndices"
allowlist["torch.Tensor.softmax"] = "torch.Tensor"
allowlist["torch.Tensor.solve"] = "syft.lib.python.ValuesIndices"
allowlist["torch.Tensor.sort"] = "syft.lib.python.ValuesIndices"
allowlist["torch.Tensor.split_with_sizes"] = "syft.lib.python.List" # Tuple not List
allowlist["torch.Tensor.split"] = "syft.lib.python.List" # Tuple not List
allowlist["torch.Tensor.sqrt_"] = "torch.Tensor"
allowlist["torch.Tensor.sqrt"] = "torch.Tensor"
allowlist["torch.Tensor.squeeze_"] = "torch.Tensor"
allowlist["torch.Tensor.squeeze"] = "torch.Tensor"
allowlist["torch.Tensor.std"] = "torch.Tensor"
allowlist["torch.Tensor.stft"] = "torch.Tensor"
allowlist["torch.Tensor.stride"] = UnionGenerator[ # Tuple not List
"syft.lib.python.List", "syft.lib.python.Int"
]
allowlist["torch.Tensor.sub_"] = "torch.Tensor"
allowlist["torch.Tensor.sub"] = "torch.Tensor"
allowlist["torch.Tensor.sum_to_size"] = "torch.Tensor"
allowlist["torch.Tensor.sum"] = "torch.Tensor"
allowlist["torch.Tensor.svd"] = "syft.lib.python.ValuesIndices"
allowlist["torch.Tensor.symeig"] = "syft.lib.python.ValuesIndices"
allowlist["torch.Tensor.t_"] = "torch.Tensor"
allowlist["torch.Tensor.t"] = "torch.Tensor"
allowlist["torch.Tensor.t"] = "torch.Tensor"
allowlist["torch.Tensor.T"] = "torch.Tensor"
allowlist["torch.Tensor.take"] = "torch.Tensor"
allowlist["torch.Tensor.tan_"] = "torch.Tensor"
allowlist["torch.Tensor.tan"] = "torch.Tensor"
allowlist["torch.Tensor.tanh_"] = "torch.Tensor"
allowlist["torch.Tensor.tanh"] = "torch.Tensor"
allowlist["torch.Tensor.to"] = "torch.Tensor"
allowlist["torch.Tensor.tolist"] = "syft.lib.python.List"
allowlist["torch.Tensor.topk"] = "syft.lib.python.ValuesIndices"
allowlist["torch.Tensor.trace"] = "torch.Tensor"
allowlist["torch.Tensor.transpose_"] = "torch.Tensor"
allowlist["torch.Tensor.transpose"] = "torch.Tensor"
allowlist["torch.Tensor.triangular_solve"] = "syft.lib.python.ValuesIndices"
allowlist["torch.Tensor.tril_"] = "torch.Tensor"
allowlist["torch.Tensor.tril"] = "torch.Tensor"
allowlist["torch.Tensor.triu_"] = "torch.Tensor"
allowlist["torch.Tensor.triu"] = "torch.Tensor"
allowlist["torch.Tensor.trunc_"] = "torch.Tensor"
allowlist["torch.Tensor.trunc"] = "torch.Tensor"
allowlist["torch.Tensor.type_as"] = "torch.Tensor"
allowlist["torch.Tensor.type"] = "syft.lib.python.String"
allowlist["torch.Tensor.unbind"] = "syft.lib.python.List" # Tuple not List
allowlist["torch.Tensor.unfold"] = "torch.Tensor"
allowlist["torch.Tensor.uniform_"] = "torch.Tensor"
allowlist["torch.Tensor.unique_consecutive"] = "torch.Tensor"
allowlist["torch.Tensor.unique"] = "torch.Tensor"
allowlist["torch.Tensor.unsqueeze_"] = "torch.Tensor"
allowlist["torch.Tensor.unsqueeze"] = "torch.Tensor"
allowlist["torch.Tensor.var"] = "torch.Tensor"
allowlist["torch.Tensor.view_as"] = "torch.Tensor"
allowlist["torch.Tensor.view"] = "torch.Tensor"
allowlist["torch.Tensor.zero_"] = "torch.Tensor"
# --------------------------------------------------------------------------------------
# SECTION - Tensor methods with special version requirements
# --------------------------------------------------------------------------------------
# SECTION - Tensor methods since 1.5.0
allowlist[
"torch.Tensor.__div__"
] = { # exists in 1.4.0 but causes fatal exception on non floats
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist[
"torch.Tensor.__floordiv__"
] = { # exists in 1.4.0 but causes fatal exception on non floats
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist[
"torch.Tensor.__rfloordiv__"
] = { # exists in 1.4.0 but causes fatal exception on non floats
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist["torch.Tensor.bitwise_and"] = {
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist["torch.Tensor.bitwise_and_"] = {
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist["torch.Tensor.bitwise_or"] = {
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist["torch.Tensor.bitwise_or_"] = {
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist[
"torch.Tensor.div"
] = { # exists in 1.4.0 but causes fatal exception on non floats
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist[
"torch.Tensor.div_"
] = { # exists in 1.4.0 but causes fatal exception on non floats
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist["torch.Tensor.floor_divide"] = {
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist["torch.Tensor.floor_divide_"] = {
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist["torch.Tensor.logical_and"] = {
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist["torch.Tensor.logical_and_"] = {
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist["torch.Tensor.logical_or"] = {
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist["torch.Tensor.logical_or_"] = {
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist[
"torch.Tensor.remainder"
] = { # exists in 1.4.0 but causes fatal exception on non floats
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist[
"torch.Tensor.remainder_"
] = { # exists in 1.4.0 but causes fatal exception on non floats
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist["torch.Tensor.square"] = {
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist["torch.Tensor.square_"] = {
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist["torch.Tensor.true_divide"] = {
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
allowlist["torch.Tensor.true_divide_"] = {
"return_type": "torch.Tensor",
"min_version": "1.5.0",
}
# SECTION - Tensor methods since 1.5.1
allowlist["torch.Tensor.__ifloordiv__"] = {
"return_type": "torch.Tensor",
"min_version": "1.5.1",
}
# SECTION - Tensor methods since 1.6.0
allowlist["torch.Tensor.is_meta"] = {
"return_type": "syft.lib.python.Bool",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.absolute_"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.absolute"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.acosh_"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.acosh"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.asinh_"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.asinh"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.atanh_"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.atanh"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.deg2rad_"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.deg2rad"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.fliplr"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.flipud"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.isfinite"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.isinf"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.isnan"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.logaddexp"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.logaddexp2"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.logcumsumexp"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.rad2deg_"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.rad2deg"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.Tensor.istft"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
# SECTION - Tensor methods since 1.7.0
allowlist["torch.Tensor.__complex__"] = {
"return_type": "syft.lib.python.Complex",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.amax"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.amin"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.arccos"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.arccos_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.arccosh"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.arccosh_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.arcsin"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.arcsin_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.arcsinh"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.arcsinh_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.arctan"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.arctan_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.arctanh"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.arctanh_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.clip"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.clip_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.count_nonzero"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.divide"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.divide_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.exp2"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.exp2_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.fix"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.fix_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.gcd"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.gcd_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.greater"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.greater_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.greater_equal"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.greater_equal_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.heaviside"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.heaviside_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.hypot"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.hypot_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.i0"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.i0_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.isneginf"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.isposinf"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.isreal"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.lcm"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.lcm_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.less"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.less_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.less_equal"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.less_equal_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.logit"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.logit_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.maximum"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.minimum"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.matrix_exp"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.multiply"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.multiply_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.nanquantile"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.nansum"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.negative"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.negative_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.nextafter"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.nextafter_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.outer"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.quantile"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.sgn"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.sgn_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.signbit"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.subtract"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.subtract_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.unsafe_chunk"] = {
"return_type": "syft.lib.python.List",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.unsafe_split"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.vdot"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.movedim"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.Tensor.unsafe_split_with_sizes"] = {
"return_type": "syft.lib.python.List", # Tuple not List
"min_version": "1.7.0",
}
# --------------------------------------------------------------------------------------
# SECTION - Tensor methods which are incomplete or untested but enabled
# --------------------------------------------------------------------------------------
allowlist["torch.Tensor.device"] = "torch.device"
allowlist["torch.Tensor.detach_"] = "torch.Tensor"
allowlist["torch.Tensor.grad"] = "torch.Tensor" # need an example with grad
# --------------------------------------------------------------------------------------
# SECTION - Tensor methods with specific issues or require a special test combination
# --------------------------------------------------------------------------------------
# allowlist["torch.layout"] = "torch.layout" # requires protobuf serialization
# allowlist["torch.Tensor.layout"] = "torch.layout" # requires torch layout
# allowlist["torch.Size"] = "torch.Size" # requires protobuf serialization
# allowlist["torch.Tensor.size"] = "torch.Size" # requires torch.Size
# allowlist["torch.Tensor.shape"] = "torch.Size" # requires torch.Size
# allowlist["torch.Tensor.__iter__"] = "unknown" # How to handle return iterator?
# allowlist["torch.Tensor.imag"] = "torch.Tensor" # requires dtype complex
# allowlist["torch.Tensor.real"] = "torch.Tensor" # requires dtype complex
# allowlist["torch.Tensor.qscheme"] = "unknown" # requires quantized backend
# --------------------------------------------------------------------------------------
# SECTION - Tensor methods which require named tensors
# --------------------------------------------------------------------------------------
# allowlist["torch.Tensor.unflatten"] = "torch.Tensor" # named tensors
# allowlist["torch.Tensor.refine_names"] = "torch.Tensor" # named tensors
# allowlist["torch.Tensor.rename_"] = "torch.Tensor" # named tensors
# allowlist["torch.Tensor.rename"] = "torch.Tensor" # named tensors
# allowlist["torch.Tensor.align_as"] = "torch.Tensor" # named tensors
# allowlist["torch.Tensor.align_to"] = "torch.Tensor" # named tensors
# allowlist["torch.Tensor.name"] = "Optional[str]" # requires named tensors and Optional
# allowlist["torch.Tensor.names"] = "Tuple[str]" # requires named tensors and Tuple
# allowlist["torch.Tensor.__torch_function__"] = "unknown" # 1.7.0 # probably wont work
# --------------------------------------------------------------------------------------
# SECTION - Tensor methods which require classes or callables or external libs
# --------------------------------------------------------------------------------------
# allowlist["torch.Tensor.apply_"] = "torch.Tensor" # requires a callable
# allowlist["torch.Tensor.as_subclass"] = "torch.Tensor" # requires a subclass
# allowlist["torch.Tensor.map_"] = "unknown" # requires a callable
# allowlist["torch.Tensor.map2_"] = "unknown" # requires a callable
# allowlist["torch.Tensor.numpy"] = "numpy.ndarray" # requires numpy.ndarray
# allowlist["torch.Tensor.reinforce"] = "unknown" # requires reinforce
# --------------------------------------------------------------------------------------
# SECTION - Tensor methods which require sparse
# --------------------------------------------------------------------------------------
# allowlist["torch.Tensor.smm"] = "unknown" # requires sparse tensors
# allowlist["torch.Tensor.sparse_dim"] = "unknown" # requires sparse tensors
# allowlist["torch.Tensor.sparse_mask"] = "unknown" # requires sparse tensors
# allowlist["torch.Tensor.sspaddmm"] = "torch.Tensor" # requires sparse tensors
# allowlist["torch.Tensor.sparse_resize_"] = "unknown" # requires sparse tensors
# allowlist["torch.Tensor.sparse_resize_and_clear_"] = "unknown" # requires sparse
# allowlist["torch.Tensor.values"] = "unknown" # requires sparse tensors
# --------------------------------------------------------------------------------------
# SECTION - Torch functions enabled as torch.Tensor methods above
# --------------------------------------------------------------------------------------
allowlist["torch.abs_"] = "torch.Tensor"
allowlist["torch.abs"] = "torch.Tensor"
allowlist["torch.acos_"] = "torch.Tensor"
allowlist["torch.acos"] = "torch.Tensor"
allowlist["torch.add"] = "torch.Tensor"
allowlist["torch.addbmm"] = "torch.Tensor"
allowlist["torch.addcdiv"] = "torch.Tensor"
allowlist["torch.addcmul"] = "torch.Tensor"
allowlist["torch.addmm"] = "torch.Tensor"
allowlist["torch.addmv_"] = "torch.Tensor"
allowlist["torch.addmv"] = "torch.Tensor"
allowlist["torch.addr"] = "torch.Tensor"
allowlist["torch.all"] = "torch.Tensor"
allowlist["torch.allclose"] = "syft.lib.python.Bool"
allowlist["torch.angle"] = "torch.Tensor"
allowlist["torch.any"] = "torch.Tensor"
allowlist["torch.argmax"] = "torch.Tensor"
allowlist["torch.argmin"] = "torch.Tensor"
allowlist["torch.argsort"] = "torch.Tensor"
allowlist["torch.as_strided_"] = "torch.Tensor"
allowlist["torch.as_strided"] = "torch.Tensor"
allowlist["torch.asin_"] = "torch.Tensor"
allowlist["torch.asin"] = "torch.Tensor"
allowlist["torch.atan_"] = "torch.Tensor"
allowlist["torch.atan"] = "torch.Tensor"
allowlist["torch.atan2"] = "torch.Tensor"
allowlist["torch.baddbmm"] = "torch.Tensor"
allowlist["torch.bernoulli"] = "torch.Tensor"
allowlist["torch.bitwise_and"] = "torch.Tensor"
allowlist["torch.bitwise_not"] = "torch.Tensor"
allowlist["torch.bitwise_or"] = "torch.Tensor"
allowlist["torch.bitwise_xor"] = "torch.Tensor"
allowlist["torch.bmm"] = "torch.Tensor"
allowlist["torch.ceil_"] = "torch.Tensor"
allowlist["torch.ceil"] = "torch.Tensor"
allowlist["torch.cholesky_inverse"] = "torch.Tensor"
allowlist["torch.cholesky_solve"] = "torch.Tensor"
allowlist["torch.cholesky"] = "torch.Tensor"
allowlist["torch.chunk"] = "syft.lib.python.List" # Tuple not List
allowlist["torch.clamp_"] = "torch.Tensor"
allowlist["torch.clamp_max_"] = "torch.Tensor"
allowlist["torch.clamp_max"] = "torch.Tensor"
allowlist["torch.clamp_min_"] = "torch.Tensor"
allowlist["torch.clamp_min"] = "torch.Tensor"
allowlist["torch.clamp"] = "torch.Tensor"
allowlist["torch.clone"] = "torch.Tensor"
allowlist["torch.conj"] = "torch.Tensor"
allowlist["torch.cos_"] = "torch.Tensor"
allowlist["torch.cos"] = "torch.Tensor"
allowlist["torch.cosh_"] = "torch.Tensor"
allowlist["torch.cosh"] = "torch.Tensor"
allowlist["torch.cross"] = "torch.Tensor"
allowlist["torch.cummax"] = "syft.lib.python.ValuesIndices"
allowlist["torch.cummin"] = "syft.lib.python.ValuesIndices"
allowlist["torch.cumprod"] = "torch.Tensor"
allowlist["torch.cumsum"] = "torch.Tensor"
allowlist["torch.dequantize"] = "torch.Tensor"
allowlist["torch.det"] = "torch.Tensor"
allowlist["torch.detach"] = "torch.Tensor"
allowlist["torch.diag_embed"] = "torch.Tensor"
allowlist["torch.diag"] = "torch.Tensor"
allowlist["torch.diagflat"] = "torch.Tensor"
allowlist["torch.diagonal"] = "torch.Tensor"
allowlist["torch.digamma"] = "torch.Tensor"
allowlist["torch.dist"] = "torch.Tensor"
allowlist["torch.div"] = "torch.Tensor"
allowlist["torch.dot"] = "torch.Tensor"
allowlist["torch.eig"] = "syft.lib.python.ValuesIndices"
allowlist["torch.eq"] = "torch.Tensor"
allowlist["torch.equal"] = "syft.lib.python.Bool"
allowlist["torch.erf_"] = "torch.Tensor"
allowlist["torch.erf"] = "torch.Tensor"
allowlist["torch.erfc_"] = "torch.Tensor"
allowlist["torch.erfc"] = "torch.Tensor"
allowlist["torch.erfinv"] = "torch.Tensor"
allowlist["torch.exp_"] = "torch.Tensor"
allowlist["torch.exp"] = "torch.Tensor"
allowlist["torch.expm1_"] = "torch.Tensor"
allowlist["torch.expm1"] = "torch.Tensor"
allowlist["torch.fft"] = "torch.Tensor"
allowlist["torch.fill_"] = "torch.Tensor"
allowlist["torch.flatten"] = "torch.Tensor"
allowlist["torch.flip"] = "torch.Tensor"
allowlist["torch.floor_"] = "torch.Tensor"
allowlist["torch.floor_divide"] = "torch.Tensor"
allowlist["torch.floor"] = "torch.Tensor"
allowlist["torch.fmod"] = "torch.Tensor"
allowlist["torch.frac_"] = "torch.Tensor"
allowlist["torch.frac"] = "torch.Tensor"
allowlist["torch.gather"] = "torch.Tensor"
allowlist["torch.ge"] = "torch.Tensor"
allowlist["torch.geqrf"] = "syft.lib.python.ValuesIndices"
allowlist["torch.ger"] = "torch.Tensor"
allowlist["torch.get_device"] = "syft.lib.python.Int"
allowlist["torch.gt"] = "torch.Tensor"
allowlist["torch.hardshrink"] = "torch.Tensor"
allowlist["torch.histc"] = "torch.Tensor"
allowlist["torch.ifft"] = "torch.Tensor"
allowlist["torch.index_add"] = "torch.Tensor"
allowlist["torch.index_copy"] = "torch.Tensor"
allowlist["torch.index_fill"] = "torch.Tensor"
allowlist["torch.index_put_"] = "torch.Tensor"
allowlist["torch.index_put"] = "torch.Tensor"
allowlist["torch.index_select"] = "torch.Tensor"
allowlist["torch.int_repr"] = "torch.Tensor"
allowlist["torch.inverse"] = "torch.Tensor"
allowlist["torch.irfft"] = "torch.Tensor"
allowlist["torch.is_complex"] = "syft.lib.python.Bool"
allowlist["torch.is_distributed"] = "syft.lib.python.Bool"
allowlist["torch.is_floating_point"] = "syft.lib.python.Bool"
allowlist["torch.is_nonzero"] = "syft.lib.python.Bool"
allowlist["torch.is_same_size"] = "syft.lib.python.Bool"
allowlist["torch.is_signed"] = "syft.lib.python.Bool"
allowlist["torch.isclose"] = "torch.Tensor"
allowlist["torch.kthvalue"] = "syft.lib.python.ValuesIndices"
allowlist["torch.le"] = "torch.Tensor"
allowlist["torch.lerp"] = "torch.Tensor"
allowlist["torch.lgamma"] = "torch.Tensor"
allowlist["torch.log_"] = "torch.Tensor"
allowlist["torch.log_softmax"] = "torch.Tensor"
allowlist["torch.log"] = "torch.Tensor"
allowlist["torch.log10_"] = "torch.Tensor"
allowlist["torch.log10"] = "torch.Tensor"
allowlist["torch.log1p_"] = "torch.Tensor"
allowlist["torch.log1p"] = "torch.Tensor"
allowlist["torch.log2_"] = "torch.Tensor"
allowlist["torch.log2"] = "torch.Tensor"
allowlist["torch.logdet"] = "torch.Tensor"
allowlist["torch.logical_and"] = "torch.Tensor"
allowlist["torch.logical_not"] = "torch.Tensor"
allowlist["torch.logical_or"] = "torch.Tensor"
allowlist["torch.logical_xor"] = "torch.Tensor"
allowlist["torch.logsumexp"] = "torch.Tensor"
allowlist["torch.lstsq"] = "syft.lib.python.ValuesIndices"
allowlist["torch.lt"] = "torch.Tensor"
allowlist["torch.lu_solve"] = "torch.Tensor"
allowlist["torch.lu"] = "syft.lib.python.List" # Tuple not List
allowlist["torch.masked_fill"] = "torch.Tensor"
allowlist["torch.masked_scatter"] = "torch.Tensor"
allowlist["torch.masked_select"] = "torch.Tensor"
allowlist["torch.matmul"] = "torch.Tensor"
allowlist["torch.matrix_power"] = "torch.Tensor"
allowlist["torch.mean"] = "torch.Tensor"
allowlist["torch.mm"] = "torch.Tensor"
allowlist["torch.mode"] = "syft.lib.python.ValuesIndices"
allowlist["torch.mul"] = "torch.Tensor"
allowlist["torch.multinomial"] = "torch.Tensor"
allowlist["torch.mv"] = "torch.Tensor"
allowlist["torch.mvlgamma"] = "torch.Tensor"
allowlist["torch.narrow"] = "torch.Tensor"
allowlist["torch.ne"] = "torch.Tensor"
allowlist["torch.neg_"] = "torch.Tensor"
allowlist["torch.neg"] = "torch.Tensor"
allowlist["torch.nonzero"] = "torch.Tensor"
allowlist["torch.norm"] = "torch.Tensor"
allowlist["torch.orgqr"] = "torch.Tensor"
allowlist["torch.ormqr"] = "torch.Tensor"
allowlist["torch.pinverse"] = "torch.Tensor"
allowlist["torch.polygamma"] = "torch.Tensor"
allowlist["torch.pow"] = "torch.Tensor"
allowlist["torch.prelu"] = "torch.Tensor"
allowlist["torch.q_per_channel_axis"] = "syft.lib.python.Int"
allowlist["torch.q_per_channel_scales"] = "torch.Tensor"
allowlist["torch.q_per_channel_zero_points"] = "torch.Tensor"
allowlist["torch.q_scale"] = "syft.lib.python.Float"
allowlist["torch.q_zero_point"] = "syft.lib.python.Int"
allowlist["torch.qr"] = "syft.lib.python.ValuesIndices"
allowlist["torch.reciprocal_"] = "torch.Tensor"
allowlist["torch.reciprocal"] = "torch.Tensor"
allowlist["torch.relu_"] = "torch.Tensor"
allowlist["torch.relu"] = "torch.Tensor"
allowlist["torch.remainder"] = "torch.Tensor"
allowlist["torch.renorm"] = "torch.Tensor"
allowlist["torch.repeat_interleave"] = "torch.Tensor"
allowlist["torch.reshape"] = "torch.Tensor"
allowlist["torch.resize_as_"] = "torch.Tensor"
allowlist["torch.rfft"] = "torch.Tensor"
allowlist["torch.roll"] = "torch.Tensor"
allowlist["torch.rot90"] = "torch.Tensor"
allowlist["torch.round_"] = "torch.Tensor"
allowlist["torch.round"] = "torch.Tensor"
allowlist["torch.rsqrt_"] = "torch.Tensor"
allowlist["torch.rsqrt"] = "torch.Tensor"
allowlist["torch.scatter_add"] = "torch.Tensor"
allowlist["torch.scatter"] = "torch.Tensor"
allowlist["torch.select"] = "torch.Tensor"
allowlist["torch.sigmoid_"] = "torch.Tensor"
allowlist["torch.sigmoid"] = "torch.Tensor"
allowlist["torch.sign"] = "torch.Tensor"
allowlist["torch.sin_"] = "torch.Tensor"
allowlist["torch.sin"] = "torch.Tensor"
allowlist["torch.sinh_"] = "torch.Tensor"
allowlist["torch.sinh"] = "torch.Tensor"
allowlist["torch.slogdet"] = "syft.lib.python.ValuesIndices"
allowlist["torch.softmax"] = "torch.Tensor"
allowlist["torch.solve"] = "syft.lib.python.ValuesIndices"
allowlist["torch.sort"] = "syft.lib.python.ValuesIndices"
allowlist["torch.split_with_sizes"] = "syft.lib.python.List" # Tuple not List
allowlist["torch.split"] = "syft.lib.python.List" # Tuple not List
allowlist["torch.sqrt_"] = "torch.Tensor"
allowlist["torch.sqrt"] = "torch.Tensor"
allowlist["torch.square_"] = "torch.Tensor"
allowlist["torch.square"] = "torch.Tensor"
allowlist["torch.squeeze"] = "torch.Tensor"
allowlist["torch.std"] = "torch.Tensor"
allowlist["torch.stft"] = "torch.Tensor"
allowlist["torch.sub"] = "torch.Tensor"
allowlist["torch.sum"] = "torch.Tensor"
allowlist["torch.svd"] = "syft.lib.python.ValuesIndices"
allowlist["torch.symeig"] = "syft.lib.python.ValuesIndices"
allowlist["torch.t"] = "torch.Tensor"
allowlist["torch.take"] = "torch.Tensor"
allowlist["torch.tan_"] = "torch.Tensor"
allowlist["torch.tan"] = "torch.Tensor"
allowlist["torch.tanh_"] = "torch.Tensor"
allowlist["torch.tanh"] = "torch.Tensor"
allowlist["torch.topk"] = "syft.lib.python.ValuesIndices"
allowlist["torch.trace"] = "torch.Tensor"
allowlist["torch.transpose"] = "torch.Tensor"
allowlist["torch.triangular_solve"] = "syft.lib.python.ValuesIndices"
allowlist["torch.tril"] = "torch.Tensor"
allowlist["torch.triu"] = "torch.Tensor"
allowlist["torch.true_divide"] = "torch.Tensor"
allowlist["torch.trunc_"] = "torch.Tensor"
allowlist["torch.trunc"] = "torch.Tensor"
allowlist["torch.unique_consecutive"] = "torch.Tensor"
allowlist["torch.unique"] = "torch.Tensor"
allowlist["torch.unsqueeze"] = "torch.Tensor"
allowlist["torch.var"] = "torch.Tensor"
allowlist["torch.unsafe_chunk"] = "syft.lib.python.List" # Tuple not List
# SECTION - Tensor functions since 1.6.0
allowlist["torch.absolute"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.acosh_"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.acosh"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.asinh_"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.asinh"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.atanh_"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.atanh"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.deg2rad_"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.deg2rad"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.fliplr"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.flipud"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.isfinite"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.isinf"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.isnan"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.logaddexp"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.logaddexp2"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.logcumsumexp"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.rad2deg_"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.rad2deg"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.istft"] = {
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
# SECTION - Tensor functions since 1.7.0
allowlist["torch.amax"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.amin"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.arccos"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.arccos_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.arccosh"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.arccosh_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.arcsin"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.arcsin_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.arcsinh"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.arcsinh_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.arctan"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.arctan_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.arctanh"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.arctanh_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.clip"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.clip_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.count_nonzero"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.divide"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.exp2"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.exp2_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.fix"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.fix_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.gcd"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.gcd_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.greater"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.greater_equal"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.heaviside"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.hypot"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.i0"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.i0_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.isneginf"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.isposinf"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.isreal"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.lcm"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.lcm_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.less"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.less_equal"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.logit"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.logit_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.maximum"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.minimum"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.matrix_exp"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.multiply"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.nanquantile"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.nansum"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.negative"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.negative_"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.nextafter"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.outer"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.quantile"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.sgn"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.signbit"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.subtract"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.unsafe_chunk"] = {
"return_type": "syft.lib.python.List", # Tuple not List
"min_version": "1.7.0",
}
allowlist["torch.unsafe_split"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.vdot"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.movedim"] = {
"return_type": "torch.Tensor",
"min_version": "1.7.0",
}
allowlist["torch.unsafe_split_with_sizes"] = {
"return_type": "syft.lib.python.List", # Tuple not List
"min_version": "1.7.0",
}
# --------------------------------------------------------------------------------------
# SECTION - Torch functions not enabled yet
# --------------------------------------------------------------------------------------
# allowlist["torch.zero_"] = "torch.Tensor"
# allowlist["torch.detach_"] = "torch.Tensor"
# allowlist["torch.device"] = "torch.Tensor"
# allowlist["torch.imag"] = "torch.Tensor"
# allowlist["torch.layout"] = "torch.Tensor"
# allowlist["torch.max"] = "torch.Tensor"
# allowlist["torch.median"] = "torch.Tensor"
# allowlist["torch.min"] = "torch.Tensor"
# allowlist["torch.name"] = "torch.Tensor"
# allowlist["torch.not_equal"] = "torch.Tensor"
# allowlist["torch.qscheme"] = "torch.Tensor"
# allowlist["torch.real"] = "torch.Tensor"
# allowlist["torch.smm"] = "torch.Tensor"
# allowlist["torch.sspaddmm"] = "torch.Tensor"
# --------------------------------------------------------------------------------------
# SECTION - Torch functions used in the fast tests: $ pytest -m fast
# --------------------------------------------------------------------------------------
allowlist["torch.cuda.is_available"] = "syft.lib.python.Bool"
allowlist["torch.device"] = "torch.device" # warning this must come before the attrs
allowlist["torch.device.index"] = "syft.lib.python.Int"
allowlist["torch.device.type"] = "syft.lib.python.String"
allowlist["torch.random.initial_seed"] = "syft.lib.python.Int"
allowlist["torch.zeros_like"] = "torch.Tensor"
# --------------------------------------------------------------------------------------
# SECTION - Torch functions which are enabled but supported above on torch.Tensor
# --------------------------------------------------------------------------------------
# SECTION - Parameter methods
# torch.nn.Parameter is a subclass of torch.Tensor
# However, we still need the constructor Class to be listed here. Everything else is
# automatically added in create_torch_ast function by doing:
# method = method.replace("torch.Tensor.", "torch.nn.Parameter.")
# allowlist["torch.nn.Parameter"] = "torch.nn.Parameter"
# Misc
allowlist["torch.manual_seed"] = "torch.Generator"
allowlist["torch.Generator"] = "torch.Generator"
allowlist["torch.Generator.get_state"] = "torch.Tensor"
allowlist["torch.Generator.set_state"] = "torch.Generator"
allowlist["torch.exp"] = "torch.Tensor"
# Modules
allowlist["torch.nn.Module"] = "torch.nn.Module"
allowlist["torch.nn.Module.__call__"] = "torch.Tensor"
allowlist["torch.nn.Module.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Module.train"] = "torch.nn.Module"
allowlist["torch.nn.Module.cuda"] = "torch.nn.Module"
allowlist["torch.nn.Module.cpu"] = "torch.nn.Module"
allowlist["torch.nn.Module.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist[
"torch.nn.Module.load_state_dict"
] = "syft.lib.python._SyNone" # torch.nn.modules.module._IncompatibleKeys
allowlist["torch.nn.Module.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Conv2d"] = "torch.nn.Conv2d"
allowlist["torch.nn.Conv2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.Conv2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Conv2d.train"] = "torch.nn.Conv2d"
allowlist["torch.nn.Conv2d.cuda"] = "torch.nn.Conv2d"
allowlist["torch.nn.Conv2d.cpu"] = "torch.nn.Conv2d"
allowlist["torch.nn.Conv2d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist[
"torch.nn.Conv2d.load_state_dict"
] = "syft.lib.python._SyNone" # torch.nn.modules.module._IncompatibleKeys
allowlist["torch.nn.Conv2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Dropout2d"] = "torch.nn.Dropout2d"
allowlist["torch.nn.Dropout2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.Dropout2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Dropout2d.train"] = "torch.nn.Dropout2d"
allowlist["torch.nn.Dropout2d.cuda"] = "torch.nn.Dropout2d"
allowlist["torch.nn.Dropout2d.cpu"] = "torch.nn.Dropout2d"
allowlist["torch.nn.Dropout2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Linear"] = "torch.nn.Linear"
allowlist["torch.nn.Linear.__call__"] = "torch.Tensor"
allowlist["torch.nn.Linear.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Linear.train"] = "torch.nn.Linear"
allowlist["torch.nn.Linear.cuda"] = "torch.nn.Linear"
allowlist["torch.nn.Linear.cpu"] = "torch.nn.Linear"
allowlist["torch.nn.Linear.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Linear.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Linear.extra_repr"] = "syft.lib.python.String"
# DataLoader
allowlist["torch.utils.data.DataLoader"] = "torch.utils.data.DataLoader"
allowlist["torch.utils.data.DataLoader.__iter__"] = "syft.lib.python.Iterator"
allowlist["torch.utils.data.DataLoader.__len__"] = "syft.lib.python.Int"
# Functional
allowlist["torch.nn.functional.relu"] = "torch.Tensor"
allowlist["torch.nn.functional.max_pool2d"] = "torch.Tensor"
allowlist["torch.nn.functional.log_softmax"] = "torch.Tensor"
allowlist["torch.flatten"] = "torch.Tensor"
# Optimizers
allowlist["torch.optim.ASGD"] = "torch.optim.ASGD"
allowlist["torch.optim.ASGD.zero_grad"] = "syft.lib.python._SyNone"
allowlist["torch.optim.ASGD.step"] = "syft.lib.python._SyNone"
allowlist["torch.optim.Adadelta"] = "torch.optim.Adadelta"
allowlist["torch.optim.Adadelta.zero_grad"] = "syft.lib.python._SyNone"
allowlist["torch.optim.Adadelta.step"] = "syft.lib.python._SyNone"
allowlist["torch.optim.Adagrad"] = "torch.optim.Adagrad"
allowlist["torch.optim.Adagrad.zero_grad"] = "syft.lib.python._SyNone"
allowlist["torch.optim.Adagrad.step"] = "syft.lib.python._SyNone"
allowlist["torch.optim.Adam"] = "torch.optim.Adam"
allowlist["torch.optim.Adam.zero_grad"] = "syft.lib.python._SyNone"
allowlist["torch.optim.Adam.step"] = "syft.lib.python._SyNone"
allowlist["torch.optim.AdamW"] = "torch.optim.AdamW"
allowlist["torch.optim.AdamW.zero_grad"] = "syft.lib.python._SyNone"
allowlist["torch.optim.AdamW.step"] = "syft.lib.python._SyNone"
allowlist["torch.optim.Adamax"] = "torch.optim.Adamax"
allowlist["torch.optim.Adamax.zero_grad"] = "syft.lib.python._SyNone"
allowlist["torch.optim.Adamax.step"] = "syft.lib.python._SyNone"
allowlist["torch.optim.LBFGS"] = "torch.optim.LBFGS"
allowlist["torch.optim.LBFGS.zero_grad"] = "syft.lib.python._SyNone"
allowlist["torch.optim.LBFGS.step"] = "syft.lib.python._SyNone"
allowlist["torch.optim.Optimizer"] = "torch.optim.Optimizer"
allowlist["torch.optim.Optimizer.zero_grad"] = "syft.lib.python._SyNone"
allowlist["torch.optim.Optimizer.step"] = "syft.lib.python._SyNone"
allowlist["torch.optim.RMSprop"] = "torch.optim.RMSprop"
allowlist["torch.optim.RMSprop.zero_grad"] = "syft.lib.python._SyNone"
allowlist["torch.optim.RMSprop.step"] = "syft.lib.python._SyNone"
allowlist["torch.optim.Rprop"] = "torch.optim.Rprop"
allowlist["torch.optim.Rprop.zero_grad"] = "syft.lib.python._SyNone"
allowlist["torch.optim.Rprop.step"] = "syft.lib.python._SyNone"
allowlist["torch.optim.SGD"] = "torch.optim.SGD"
allowlist["torch.optim.SGD.zero_grad"] = "syft.lib.python._SyNone"
allowlist["torch.optim.SGD.step"] = "syft.lib.python._SyNone"
allowlist["torch.optim.SparseAdam"] = "torch.optim.SparseAdam"
allowlist["torch.optim.SparseAdam.zero_grad"] = "syft.lib.python._SyNone"
allowlist["torch.optim.SparseAdam.step"] = "syft.lib.python._SyNone"
# Scheduler
allowlist["torch.optim.lr_scheduler.StepLR"] = "torch.optim.lr_scheduler.StepLR"
allowlist["torch.optim.lr_scheduler.StepLR.step"] = "syft.lib.python._SyNone"
# Autograd
allowlist["torch.no_grad"] = "torch.autograd.grad_mode.no_grad"
allowlist["torch.autograd.grad_mode.no_grad"] = "torch.autograd.grad_mode.no_grad"
allowlist["torch.autograd.grad_mode.no_grad.__enter__"] = "syft.lib.python._SyNone"
allowlist["torch.autograd.grad_mode.no_grad.__exit__"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Sequential"] = "torch.nn.Sequential"
allowlist["torch.nn.Sequential.cpu"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Sequential.cuda"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Sequential.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Sequential.train"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Sequential.eval"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Sequential.__call__"] = "torch.Tensor"
# Loss Functions
allowlist["torch.nn.functional.cosine_embedding_loss"] = "torch.Tensor"
allowlist["torch.nn.functional.ctc_loss"] = "torch.Tensor"
allowlist["torch.nn.functional.hinge_embedding_loss"] = "torch.Tensor"
allowlist["torch.nn.functional.l1_loss"] = "torch.Tensor"
allowlist["torch.nn.functional.margin_ranking_loss"] = "torch.Tensor"
allowlist["torch.nn.functional.mse_loss"] = "torch.Tensor"
allowlist["torch.nn.functional.multi_margin_loss"] = "torch.Tensor"
allowlist["torch.nn.functional.multilabel_margin_loss"] = "torch.Tensor"
allowlist["torch.nn.functional.multilabel_soft_margin_loss"] = "torch.Tensor"
allowlist["torch.nn.functional.nll_loss"] = "torch.Tensor"
allowlist["torch.nn.functional.poisson_nll_loss"] = "torch.Tensor"
allowlist["torch.nn.functional.smooth_l1_loss"] = "torch.Tensor"
allowlist["torch.nn.functional.soft_margin_loss"] = "torch.Tensor"
allowlist["torch.nn.functional.triplet_margin_loss"] = "torch.Tensor"
allowlist["torch.nn.AdaptiveLogSoftmaxWithLoss"] = "torch.nn.AdaptiveLogSoftmaxWithLoss"
allowlist["torch.nn.AdaptiveLogSoftmaxWithLoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.BCELoss"] = "torch.nn.BCELoss"
allowlist["torch.nn.BCELoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.BCEWithLogitsLoss"] = "torch.nn.BCEWithLogitsLoss"
allowlist["torch.nn.BCEWithLogitsLoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.CTCLoss"] = "torch.nn.CTCLoss"
allowlist["torch.nn.CTCLoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.CrossEntropyLoss"] = "torch.nn.CrossEntropyLoss"
allowlist["torch.nn.CrossEntropyLoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.CosineEmbeddingLoss"] = "torch.nn.CosineEmbeddingLoss"
allowlist["torch.nn.CosineEmbeddingLoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.HingeEmbeddingLoss"] = "torch.nn.HingeEmbeddingLoss"
allowlist["torch.nn.HingeEmbeddingLoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.KLDivLoss"] = "torch.nn.KLDivLoss"
allowlist["torch.nn.KLDivLoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.L1Loss"] = "torch.nn.L1Loss"
allowlist["torch.nn.L1Loss.__call__"] = "torch.Tensor"
allowlist["torch.nn.MSELoss"] = "torch.nn.MSELoss"
allowlist["torch.nn.MSELoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.MarginRankingLoss"] = "torch.nn.MarginRankingLoss"
allowlist["torch.nn.MarginRankingLoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.MultiLabelMarginLoss"] = "torch.nn.MultiLabelMarginLoss"
allowlist["torch.nn.MultiLabelMarginLoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.MultiLabelSoftMarginLoss"] = "torch.nn.MultiLabelSoftMarginLoss"
allowlist["torch.nn.MultiLabelSoftMarginLoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.MultiMarginLoss"] = "torch.nn.MultiMarginLoss"
allowlist["torch.nn.MultiMarginLoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.NLLLoss"] = "torch.nn.NLLLoss"
allowlist["torch.nn.NLLLoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.NLLLoss2d"] = "torch.nn.NLLLoss2d"
allowlist["torch.nn.NLLLoss2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.PoissonNLLLoss"] = "torch.nn.PoissonNLLLoss"
allowlist["torch.nn.PoissonNLLLoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.SmoothL1Loss"] = "torch.nn.SmoothL1Loss"
allowlist["torch.nn.SmoothL1Loss.__call__"] = "torch.Tensor"
allowlist["torch.nn.SoftMarginLoss"] = "torch.nn.SoftMarginLoss"
allowlist["torch.nn.SoftMarginLoss.__call__"] = "torch.Tensor"
allowlist["torch.nn.TripletMarginLoss"] = "torch.nn.TripletMarginLoss"
allowlist["torch.nn.TripletMarginLoss.__call__"] = "torch.Tensor"
# Layer Classes
allowlist["torch.nn.AdaptiveAvgPool1d"] = "torch.nn.AdaptiveAvgPool1d"
allowlist["torch.nn.AdaptiveAvgPool1d.__call__"] = "torch.Tensor"
allowlist["torch.nn.AdaptiveAvgPool1d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.AdaptiveAvgPool1d.train"] = "torch.nn.AdaptiveAvgPool1d"
allowlist["torch.nn.AdaptiveAvgPool1d.cuda"] = "torch.nn.AdaptiveAvgPool1d"
allowlist["torch.nn.AdaptiveAvgPool1d.cpu"] = "torch.nn.AdaptiveAvgPool1d"
allowlist[
"torch.nn.AdaptiveAvgPool1d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.AdaptiveAvgPool1d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.AdaptiveAvgPool1d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.AdaptiveAvgPool2d"] = "torch.nn.AdaptiveAvgPool2d"
allowlist["torch.nn.AdaptiveAvgPool2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.AdaptiveAvgPool2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.AdaptiveAvgPool2d.train"] = "torch.nn.AdaptiveAvgPool2d"
allowlist["torch.nn.AdaptiveAvgPool2d.cuda"] = "torch.nn.AdaptiveAvgPool2d"
allowlist["torch.nn.AdaptiveAvgPool2d.cpu"] = "torch.nn.AdaptiveAvgPool2d"
allowlist[
"torch.nn.AdaptiveAvgPool2d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.AdaptiveAvgPool2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.AdaptiveAvgPool2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.AdaptiveAvgPool3d"] = "torch.nn.AdaptiveAvgPool3d"
allowlist["torch.nn.AdaptiveAvgPool3d.__call__"] = "torch.Tensor"
allowlist["torch.nn.AdaptiveAvgPool3d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.AdaptiveAvgPool3d.train"] = "torch.nn.AdaptiveAvgPool3d"
allowlist["torch.nn.AdaptiveAvgPool3d.cuda"] = "torch.nn.AdaptiveAvgPool3d"
allowlist["torch.nn.AdaptiveAvgPool3d.cpu"] = "torch.nn.AdaptiveAvgPool3d"
allowlist[
"torch.nn.AdaptiveAvgPool3d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.AdaptiveAvgPool3d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.AdaptiveAvgPool3d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.AdaptiveMaxPool1d"] = "torch.nn.AdaptiveMaxPool1d"
allowlist["torch.nn.AdaptiveMaxPool1d.__call__"] = "torch.Tensor"
allowlist["torch.nn.AdaptiveMaxPool1d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.AdaptiveMaxPool1d.train"] = "torch.nn.AdaptiveMaxPool1d"
allowlist["torch.nn.AdaptiveMaxPool1d.cuda"] = "torch.nn.AdaptiveMaxPool1d"
allowlist["torch.nn.AdaptiveMaxPool1d.cpu"] = "torch.nn.AdaptiveMaxPool1d"
allowlist[
"torch.nn.AdaptiveMaxPool1d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.AdaptiveMaxPool1d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.AdaptiveMaxPool1d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.AdaptiveMaxPool2d"] = "torch.nn.AdaptiveMaxPool2d"
allowlist["torch.nn.AdaptiveMaxPool2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.AdaptiveMaxPool2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.AdaptiveMaxPool2d.train"] = "torch.nn.AdaptiveMaxPool2d"
allowlist["torch.nn.AdaptiveMaxPool2d.cuda"] = "torch.nn.AdaptiveMaxPool2d"
allowlist["torch.nn.AdaptiveMaxPool2d.cpu"] = "torch.nn.AdaptiveMaxPool2d"
allowlist[
"torch.nn.AdaptiveMaxPool2d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.AdaptiveMaxPool2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.AdaptiveMaxPool2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.AdaptiveMaxPool3d"] = "torch.nn.AdaptiveMaxPool3d"
allowlist["torch.nn.AdaptiveMaxPool3d.__call__"] = "torch.Tensor"
allowlist["torch.nn.AdaptiveMaxPool3d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.AdaptiveMaxPool3d.train"] = "torch.nn.AdaptiveMaxPool3d"
allowlist["torch.nn.AdaptiveMaxPool3d.cuda"] = "torch.nn.AdaptiveMaxPool3d"
allowlist["torch.nn.AdaptiveMaxPool3d.cpu"] = "torch.nn.AdaptiveMaxPool3d"
allowlist[
"torch.nn.AdaptiveMaxPool3d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.AdaptiveMaxPool3d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.AdaptiveMaxPool3d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.AlphaDropout"] = "torch.nn.AlphaDropout"
allowlist["torch.nn.AlphaDropout.__call__"] = "torch.Tensor"
allowlist["torch.nn.AlphaDropout.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.AlphaDropout.train"] = "torch.nn.AlphaDropout"
allowlist["torch.nn.AlphaDropout.cuda"] = "torch.nn.AlphaDropout"
allowlist["torch.nn.AlphaDropout.cpu"] = "torch.nn.AlphaDropout"
allowlist[
"torch.nn.AlphaDropout.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.AlphaDropout.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.AlphaDropout.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.AvgPool1d"] = "torch.nn.AvgPool1d"
allowlist["torch.nn.AvgPool1d.__call__"] = "torch.Tensor"
allowlist["torch.nn.AvgPool1d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.AvgPool1d.train"] = "torch.nn.AvgPool1d"
allowlist["torch.nn.AvgPool1d.cuda"] = "torch.nn.AvgPool1d"
allowlist["torch.nn.AvgPool1d.cpu"] = "torch.nn.AvgPool1d"
allowlist["torch.nn.AvgPool1d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.AvgPool1d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.AvgPool1d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.AvgPool2d"] = "torch.nn.AvgPool2d"
allowlist["torch.nn.AvgPool2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.AvgPool2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.AvgPool2d.train"] = "torch.nn.AvgPool2d"
allowlist["torch.nn.AvgPool2d.cuda"] = "torch.nn.AvgPool2d"
allowlist["torch.nn.AvgPool2d.cpu"] = "torch.nn.AvgPool2d"
allowlist["torch.nn.AvgPool2d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.AvgPool2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.AvgPool2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.AvgPool3d"] = "torch.nn.AvgPool3d"
allowlist["torch.nn.AvgPool3d.__call__"] = "torch.Tensor"
allowlist["torch.nn.AvgPool3d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.AvgPool3d.train"] = "torch.nn.AvgPool3d"
allowlist["torch.nn.AvgPool3d.cuda"] = "torch.nn.AvgPool3d"
allowlist["torch.nn.AvgPool3d.cpu"] = "torch.nn.AvgPool3d"
allowlist["torch.nn.AvgPool3d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.AvgPool3d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.AvgPool3d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.BatchNorm1d"] = "torch.nn.BatchNorm1d"
allowlist["torch.nn.BatchNorm1d.__call__"] = "torch.Tensor"
allowlist["torch.nn.BatchNorm1d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.BatchNorm1d.train"] = "torch.nn.BatchNorm1d"
allowlist["torch.nn.BatchNorm1d.cuda"] = "torch.nn.BatchNorm1d"
allowlist["torch.nn.BatchNorm1d.cpu"] = "torch.nn.BatchNorm1d"
allowlist["torch.nn.BatchNorm1d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.BatchNorm1d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.BatchNorm1d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.BatchNorm2d"] = "torch.nn.BatchNorm2d"
allowlist["torch.nn.BatchNorm2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.BatchNorm2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.BatchNorm2d.train"] = "torch.nn.BatchNorm2d"
allowlist["torch.nn.BatchNorm2d.cuda"] = "torch.nn.BatchNorm2d"
allowlist["torch.nn.BatchNorm2d.cpu"] = "torch.nn.BatchNorm2d"
allowlist["torch.nn.BatchNorm2d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.BatchNorm2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.BatchNorm2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.BatchNorm3d"] = "torch.nn.BatchNorm3d"
allowlist["torch.nn.BatchNorm3d.__call__"] = "torch.Tensor"
allowlist["torch.nn.BatchNorm3d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.BatchNorm3d.train"] = "torch.nn.BatchNorm3d"
allowlist["torch.nn.BatchNorm3d.cuda"] = "torch.nn.BatchNorm3d"
allowlist["torch.nn.BatchNorm3d.cpu"] = "torch.nn.BatchNorm3d"
allowlist["torch.nn.BatchNorm3d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.BatchNorm3d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.BatchNorm3d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Bilinear"] = "torch.nn.Bilinear"
allowlist["torch.nn.Bilinear.__call__"] = "torch.Tensor"
allowlist["torch.nn.Bilinear.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Bilinear.train"] = "torch.nn.Bilinear"
allowlist["torch.nn.Bilinear.cuda"] = "torch.nn.Bilinear"
allowlist["torch.nn.Bilinear.cpu"] = "torch.nn.Bilinear"
allowlist["torch.nn.Bilinear.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Bilinear.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Bilinear.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.CELU"] = "torch.nn.CELU"
allowlist["torch.nn.CELU.__call__"] = "torch.Tensor"
allowlist["torch.nn.CELU.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.CELU.train"] = "torch.nn.CELU"
allowlist["torch.nn.CELU.cuda"] = "torch.nn.CELU"
allowlist["torch.nn.CELU.cpu"] = "torch.nn.CELU"
allowlist["torch.nn.CELU.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.CELU.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.CELU.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ConstantPad1d"] = "torch.nn.ConstantPad1d"
allowlist["torch.nn.ConstantPad1d.__call__"] = "torch.Tensor"
allowlist["torch.nn.ConstantPad1d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ConstantPad1d.train"] = "torch.nn.ConstantPad1d"
allowlist["torch.nn.ConstantPad1d.cuda"] = "torch.nn.ConstantPad1d"
allowlist["torch.nn.ConstantPad1d.cpu"] = "torch.nn.ConstantPad1d"
allowlist[
"torch.nn.ConstantPad1d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ConstantPad1d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ConstantPad1d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ConstantPad2d"] = "torch.nn.ConstantPad2d"
allowlist["torch.nn.ConstantPad2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.ConstantPad2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ConstantPad2d.train"] = "torch.nn.ConstantPad2d"
allowlist["torch.nn.ConstantPad2d.cuda"] = "torch.nn.ConstantPad2d"
allowlist["torch.nn.ConstantPad2d.cpu"] = "torch.nn.ConstantPad2d"
allowlist[
"torch.nn.ConstantPad2d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ConstantPad2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ConstantPad2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ConstantPad3d"] = "torch.nn.ConstantPad3d"
allowlist["torch.nn.ConstantPad3d.__call__"] = "torch.Tensor"
allowlist["torch.nn.ConstantPad3d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ConstantPad3d.train"] = "torch.nn.ConstantPad3d"
allowlist["torch.nn.ConstantPad3d.cuda"] = "torch.nn.ConstantPad3d"
allowlist["torch.nn.ConstantPad3d.cpu"] = "torch.nn.ConstantPad3d"
allowlist[
"torch.nn.ConstantPad3d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ConstantPad3d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ConstantPad3d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Container"] = "torch.nn.Container"
allowlist["torch.nn.Container.__call__"] = "torch.Tensor"
allowlist["torch.nn.Container.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Container.train"] = "torch.nn.Container"
allowlist["torch.nn.Container.cuda"] = "torch.nn.Container"
allowlist["torch.nn.Container.cpu"] = "torch.nn.Container"
allowlist["torch.nn.Container.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Container.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Container.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Conv1d"] = "torch.nn.Conv1d"
allowlist["torch.nn.Conv1d.__call__"] = "torch.Tensor"
allowlist["torch.nn.Conv1d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Conv1d.train"] = "torch.nn.Conv1d"
allowlist["torch.nn.Conv1d.cuda"] = "torch.nn.Conv1d"
allowlist["torch.nn.Conv1d.cpu"] = "torch.nn.Conv1d"
allowlist["torch.nn.Conv1d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Conv1d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Conv1d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Conv2d"] = "torch.nn.Conv2d"
allowlist["torch.nn.Conv2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.Conv2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Conv2d.train"] = "torch.nn.Conv2d"
allowlist["torch.nn.Conv2d.cuda"] = "torch.nn.Conv2d"
allowlist["torch.nn.Conv2d.cpu"] = "torch.nn.Conv2d"
allowlist["torch.nn.Conv2d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Conv2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Conv2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Conv3d"] = "torch.nn.Conv3d"
allowlist["torch.nn.Conv3d.__call__"] = "torch.Tensor"
allowlist["torch.nn.Conv3d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Conv3d.train"] = "torch.nn.Conv3d"
allowlist["torch.nn.Conv3d.cuda"] = "torch.nn.Conv3d"
allowlist["torch.nn.Conv3d.cpu"] = "torch.nn.Conv3d"
allowlist["torch.nn.Conv3d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Conv3d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Conv3d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ConvTranspose1d"] = "torch.nn.ConvTranspose1d"
allowlist["torch.nn.ConvTranspose1d.__call__"] = "torch.Tensor"
allowlist["torch.nn.ConvTranspose1d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ConvTranspose1d.train"] = "torch.nn.ConvTranspose1d"
allowlist["torch.nn.ConvTranspose1d.cuda"] = "torch.nn.ConvTranspose1d"
allowlist["torch.nn.ConvTranspose1d.cpu"] = "torch.nn.ConvTranspose1d"
allowlist[
"torch.nn.ConvTranspose1d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ConvTranspose1d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ConvTranspose1d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ConvTranspose2d"] = "torch.nn.ConvTranspose2d"
allowlist["torch.nn.ConvTranspose2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.ConvTranspose2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ConvTranspose2d.train"] = "torch.nn.ConvTranspose2d"
allowlist["torch.nn.ConvTranspose2d.cuda"] = "torch.nn.ConvTranspose2d"
allowlist["torch.nn.ConvTranspose2d.cpu"] = "torch.nn.ConvTranspose2d"
allowlist[
"torch.nn.ConvTranspose2d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ConvTranspose2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ConvTranspose2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ConvTranspose3d"] = "torch.nn.ConvTranspose3d"
allowlist["torch.nn.ConvTranspose3d.__call__"] = "torch.Tensor"
allowlist["torch.nn.ConvTranspose3d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ConvTranspose3d.train"] = "torch.nn.ConvTranspose3d"
allowlist["torch.nn.ConvTranspose3d.cuda"] = "torch.nn.ConvTranspose3d"
allowlist["torch.nn.ConvTranspose3d.cpu"] = "torch.nn.ConvTranspose3d"
allowlist[
"torch.nn.ConvTranspose3d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ConvTranspose3d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ConvTranspose3d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.CosineSimilarity"] = "torch.nn.CosineSimilarity"
allowlist["torch.nn.CosineSimilarity.__call__"] = "torch.Tensor"
allowlist["torch.nn.CosineSimilarity.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.CosineSimilarity.train"] = "torch.nn.CosineSimilarity"
allowlist["torch.nn.CosineSimilarity.cuda"] = "torch.nn.CosineSimilarity"
allowlist["torch.nn.CosineSimilarity.cpu"] = "torch.nn.CosineSimilarity"
allowlist[
"torch.nn.CosineSimilarity.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.CosineSimilarity.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.CosineSimilarity.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.CrossMapLRN2d"] = "torch.nn.CrossMapLRN2d"
allowlist["torch.nn.CrossMapLRN2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.CrossMapLRN2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.CrossMapLRN2d.train"] = "torch.nn.CrossMapLRN2d"
allowlist["torch.nn.CrossMapLRN2d.cuda"] = "torch.nn.CrossMapLRN2d"
allowlist["torch.nn.CrossMapLRN2d.cpu"] = "torch.nn.CrossMapLRN2d"
allowlist[
"torch.nn.CrossMapLRN2d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.CrossMapLRN2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.CrossMapLRN2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.DataParallel"] = "torch.nn.DataParallel"
allowlist["torch.nn.DataParallel.__call__"] = "torch.Tensor"
allowlist["torch.nn.DataParallel.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.DataParallel.train"] = "torch.nn.DataParallel"
allowlist["torch.nn.DataParallel.cuda"] = "torch.nn.DataParallel"
allowlist["torch.nn.DataParallel.cpu"] = "torch.nn.DataParallel"
allowlist[
"torch.nn.DataParallel.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.DataParallel.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.DataParallel.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Dropout"] = "torch.nn.Dropout"
allowlist["torch.nn.Dropout.__call__"] = "torch.Tensor"
allowlist["torch.nn.Dropout.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Dropout.train"] = "torch.nn.Dropout"
allowlist["torch.nn.Dropout.cuda"] = "torch.nn.Dropout"
allowlist["torch.nn.Dropout.cpu"] = "torch.nn.Dropout"
allowlist["torch.nn.Dropout.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Dropout.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Dropout.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Dropout2d"] = "torch.nn.Dropout2d"
allowlist["torch.nn.Dropout2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.Dropout2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Dropout2d.train"] = "torch.nn.Dropout2d"
allowlist["torch.nn.Dropout2d.cuda"] = "torch.nn.Dropout2d"
allowlist["torch.nn.Dropout2d.cpu"] = "torch.nn.Dropout2d"
allowlist["torch.nn.Dropout2d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Dropout2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Dropout2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Dropout3d"] = "torch.nn.Dropout3d"
allowlist["torch.nn.Dropout3d.__call__"] = "torch.Tensor"
allowlist["torch.nn.Dropout3d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Dropout3d.train"] = "torch.nn.Dropout3d"
allowlist["torch.nn.Dropout3d.cuda"] = "torch.nn.Dropout3d"
allowlist["torch.nn.Dropout3d.cpu"] = "torch.nn.Dropout3d"
allowlist["torch.nn.Dropout3d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Dropout3d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Dropout3d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ELU"] = "torch.nn.ELU"
allowlist["torch.nn.ELU.__call__"] = "torch.Tensor"
allowlist["torch.nn.ELU.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ELU.train"] = "torch.nn.ELU"
allowlist["torch.nn.ELU.cuda"] = "torch.nn.ELU"
allowlist["torch.nn.ELU.cpu"] = "torch.nn.ELU"
allowlist["torch.nn.ELU.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ELU.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ELU.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Embedding"] = "torch.nn.Embedding"
allowlist["torch.nn.Embedding.__call__"] = "torch.Tensor"
allowlist["torch.nn.Embedding.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Embedding.train"] = "torch.nn.Embedding"
allowlist["torch.nn.Embedding.cuda"] = "torch.nn.Embedding"
allowlist["torch.nn.Embedding.cpu"] = "torch.nn.Embedding"
allowlist["torch.nn.Embedding.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Embedding.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Embedding.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.EmbeddingBag"] = "torch.nn.EmbeddingBag"
allowlist["torch.nn.EmbeddingBag.__call__"] = "torch.Tensor"
allowlist["torch.nn.EmbeddingBag.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.EmbeddingBag.train"] = "torch.nn.EmbeddingBag"
allowlist["torch.nn.EmbeddingBag.cuda"] = "torch.nn.EmbeddingBag"
allowlist["torch.nn.EmbeddingBag.cpu"] = "torch.nn.EmbeddingBag"
allowlist[
"torch.nn.EmbeddingBag.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.EmbeddingBag.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.EmbeddingBag.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.FeatureAlphaDropout"] = "torch.nn.FeatureAlphaDropout"
allowlist["torch.nn.FeatureAlphaDropout.__call__"] = "torch.Tensor"
allowlist["torch.nn.FeatureAlphaDropout.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.FeatureAlphaDropout.train"] = "torch.nn.FeatureAlphaDropout"
allowlist["torch.nn.FeatureAlphaDropout.cuda"] = "torch.nn.FeatureAlphaDropout"
allowlist["torch.nn.FeatureAlphaDropout.cpu"] = "torch.nn.FeatureAlphaDropout"
allowlist[
"torch.nn.FeatureAlphaDropout.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.FeatureAlphaDropout.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.FeatureAlphaDropout.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Flatten"] = "torch.nn.Flatten"
allowlist["torch.nn.Flatten.__call__"] = "torch.Tensor"
allowlist["torch.nn.Flatten.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Flatten.train"] = "torch.nn.Flatten"
allowlist["torch.nn.Flatten.cuda"] = "torch.nn.Flatten"
allowlist["torch.nn.Flatten.cpu"] = "torch.nn.Flatten"
allowlist["torch.nn.Flatten.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Flatten.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Flatten.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Fold"] = "torch.nn.Fold"
allowlist["torch.nn.Fold.__call__"] = "torch.Tensor"
allowlist["torch.nn.Fold.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Fold.train"] = "torch.nn.Fold"
allowlist["torch.nn.Fold.cuda"] = "torch.nn.Fold"
allowlist["torch.nn.Fold.cpu"] = "torch.nn.Fold"
allowlist["torch.nn.Fold.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Fold.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Fold.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.FractionalMaxPool2d"] = "torch.nn.FractionalMaxPool2d"
allowlist["torch.nn.FractionalMaxPool2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.FractionalMaxPool2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.FractionalMaxPool2d.train"] = "torch.nn.FractionalMaxPool2d"
allowlist["torch.nn.FractionalMaxPool2d.cuda"] = "torch.nn.FractionalMaxPool2d"
allowlist["torch.nn.FractionalMaxPool2d.cpu"] = "torch.nn.FractionalMaxPool2d"
allowlist[
"torch.nn.FractionalMaxPool2d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.FractionalMaxPool2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.FractionalMaxPool2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.FractionalMaxPool3d"] = "torch.nn.FractionalMaxPool3d"
allowlist["torch.nn.FractionalMaxPool3d.__call__"] = "torch.Tensor"
allowlist["torch.nn.FractionalMaxPool3d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.FractionalMaxPool3d.train"] = "torch.nn.FractionalMaxPool3d"
allowlist["torch.nn.FractionalMaxPool3d.cuda"] = "torch.nn.FractionalMaxPool3d"
allowlist["torch.nn.FractionalMaxPool3d.cpu"] = "torch.nn.FractionalMaxPool3d"
allowlist[
"torch.nn.FractionalMaxPool3d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.FractionalMaxPool3d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.FractionalMaxPool3d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.GELU"] = "torch.nn.GELU"
allowlist["torch.nn.GELU.__call__"] = "torch.Tensor"
allowlist["torch.nn.GELU.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.GELU.train"] = "torch.nn.GELU"
allowlist["torch.nn.GELU.cuda"] = "torch.nn.GELU"
allowlist["torch.nn.GELU.cpu"] = "torch.nn.GELU"
allowlist["torch.nn.GELU.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.GELU.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.GELU.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.GLU"] = "torch.nn.GLU"
allowlist["torch.nn.GLU.__call__"] = "torch.Tensor"
allowlist["torch.nn.GLU.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.GLU.train"] = "torch.nn.GLU"
allowlist["torch.nn.GLU.cuda"] = "torch.nn.GLU"
allowlist["torch.nn.GLU.cpu"] = "torch.nn.GLU"
allowlist["torch.nn.GLU.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.GLU.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.GLU.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.GRU"] = "torch.nn.GRU"
allowlist["torch.nn.GRU.__call__"] = "torch.Tensor"
allowlist["torch.nn.GRU.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.GRU.train"] = "torch.nn.GRU"
allowlist["torch.nn.GRU.cuda"] = "torch.nn.GRU"
allowlist["torch.nn.GRU.cpu"] = "torch.nn.GRU"
allowlist["torch.nn.GRU.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.GRU.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.GRU.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.GRUCell"] = "torch.nn.GRUCell"
allowlist["torch.nn.GRUCell.__call__"] = "torch.Tensor"
allowlist["torch.nn.GRUCell.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.GRUCell.train"] = "torch.nn.GRUCell"
allowlist["torch.nn.GRUCell.cuda"] = "torch.nn.GRUCell"
allowlist["torch.nn.GRUCell.cpu"] = "torch.nn.GRUCell"
allowlist["torch.nn.GRUCell.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.GRUCell.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.GRUCell.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.GroupNorm"] = "torch.nn.GroupNorm"
allowlist["torch.nn.GroupNorm.__call__"] = "torch.Tensor"
allowlist["torch.nn.GroupNorm.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.GroupNorm.train"] = "torch.nn.GroupNorm"
allowlist["torch.nn.GroupNorm.cuda"] = "torch.nn.GroupNorm"
allowlist["torch.nn.GroupNorm.cpu"] = "torch.nn.GroupNorm"
allowlist["torch.nn.GroupNorm.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.GroupNorm.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.GroupNorm.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Hardshrink"] = "torch.nn.Hardshrink"
allowlist["torch.nn.Hardshrink.__call__"] = "torch.Tensor"
allowlist["torch.nn.Hardshrink.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Hardshrink.train"] = "torch.nn.Hardshrink"
allowlist["torch.nn.Hardshrink.cuda"] = "torch.nn.Hardshrink"
allowlist["torch.nn.Hardshrink.cpu"] = "torch.nn.Hardshrink"
allowlist["torch.nn.Hardshrink.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Hardshrink.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Hardshrink.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Hardsigmoid"] = "torch.nn.Hardsigmoid"
allowlist["torch.nn.Hardsigmoid.__call__"] = "torch.Tensor"
allowlist["torch.nn.Hardsigmoid.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Hardsigmoid.train"] = "torch.nn.Hardsigmoid"
allowlist["torch.nn.Hardsigmoid.cuda"] = "torch.nn.Hardsigmoid"
allowlist["torch.nn.Hardsigmoid.cpu"] = "torch.nn.Hardsigmoid"
allowlist["torch.nn.Hardsigmoid.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Hardsigmoid.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Hardsigmoid.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Hardswish"] = { # exists in # 1.6.0 +
"return_type": "torch.nn.Hardswish",
"min_version": "1.6.0",
}
allowlist["torch.nn.Hardswish.__call__"] = { # exists in # 1.6.0 +
"return_type": "torch.Tensor",
"min_version": "1.6.0",
}
allowlist["torch.nn.Hardswish.parameters"] = { # exists in # 1.6.0 +
"return_type": "syft.lib.python.List",
"min_version": "1.6.0",
}
allowlist["torch.nn.Hardswish.train"] = { # exists in # 1.6.0 +
"return_type": "torch.nn.Hardswish",
"min_version": "1.6.0",
}
allowlist["torch.nn.Hardswish.cuda"] = { # exists in # 1.6.0 +
"return_type": "torch.nn.Hardswish",
"min_version": "1.6.0",
}
allowlist["torch.nn.Hardswish.cpu"] = { # exists in # 1.6.0 +
"return_type": "torch.nn.Hardswish",
"min_version": "1.6.0",
}
allowlist["torch.nn.Hardswish.state_dict"] = { # exists in # 1.6.0 +
"return_type": "syft.lib.python.collections.OrderedDict",
"min_version": "1.6.0",
}
allowlist["torch.nn.Hardswish.load_state_dict"] = { # exists in # 1.6.0 +
"return_type": "syft.lib.python._SyNone",
"min_version": "1.6.0",
}
allowlist["torch.nn.Hardswish.extra_repr"] = { # exists in # 1.6.0 +
"return_type": "syft.lib.python.String",
"min_version": "1.6.0",
}
allowlist["torch.nn.Hardtanh"] = "torch.nn.Hardtanh"
allowlist["torch.nn.Hardtanh.__call__"] = "torch.Tensor"
allowlist["torch.nn.Hardtanh.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Hardtanh.train"] = "torch.nn.Hardtanh"
allowlist["torch.nn.Hardtanh.cuda"] = "torch.nn.Hardtanh"
allowlist["torch.nn.Hardtanh.cpu"] = "torch.nn.Hardtanh"
allowlist["torch.nn.Hardtanh.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Hardtanh.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Hardtanh.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Identity"] = "torch.nn.Identity"
allowlist["torch.nn.Identity.__call__"] = "torch.Tensor"
allowlist["torch.nn.Identity.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Identity.train"] = "torch.nn.Identity"
allowlist["torch.nn.Identity.cuda"] = "torch.nn.Identity"
allowlist["torch.nn.Identity.cpu"] = "torch.nn.Identity"
allowlist["torch.nn.Identity.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Identity.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Identity.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.InstanceNorm1d"] = "torch.nn.InstanceNorm1d"
allowlist["torch.nn.InstanceNorm1d.__call__"] = "torch.Tensor"
allowlist["torch.nn.InstanceNorm1d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.InstanceNorm1d.train"] = "torch.nn.InstanceNorm1d"
allowlist["torch.nn.InstanceNorm1d.cuda"] = "torch.nn.InstanceNorm1d"
allowlist["torch.nn.InstanceNorm1d.cpu"] = "torch.nn.InstanceNorm1d"
allowlist[
"torch.nn.InstanceNorm1d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.InstanceNorm1d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.InstanceNorm1d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.InstanceNorm2d"] = "torch.nn.InstanceNorm2d"
allowlist["torch.nn.InstanceNorm2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.InstanceNorm2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.InstanceNorm2d.train"] = "torch.nn.InstanceNorm2d"
allowlist["torch.nn.InstanceNorm2d.cuda"] = "torch.nn.InstanceNorm2d"
allowlist["torch.nn.InstanceNorm2d.cpu"] = "torch.nn.InstanceNorm2d"
allowlist[
"torch.nn.InstanceNorm2d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.InstanceNorm2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.InstanceNorm2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.InstanceNorm3d"] = "torch.nn.InstanceNorm3d"
allowlist["torch.nn.InstanceNorm3d.__call__"] = "torch.Tensor"
allowlist["torch.nn.InstanceNorm3d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.InstanceNorm3d.train"] = "torch.nn.InstanceNorm3d"
allowlist["torch.nn.InstanceNorm3d.cuda"] = "torch.nn.InstanceNorm3d"
allowlist["torch.nn.InstanceNorm3d.cpu"] = "torch.nn.InstanceNorm3d"
allowlist[
"torch.nn.InstanceNorm3d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.InstanceNorm3d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.InstanceNorm3d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.LPPool1d"] = "torch.nn.LPPool1d"
allowlist["torch.nn.LPPool1d.__call__"] = "torch.Tensor"
allowlist["torch.nn.LPPool1d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.LPPool1d.train"] = "torch.nn.LPPool1d"
allowlist["torch.nn.LPPool1d.cuda"] = "torch.nn.LPPool1d"
allowlist["torch.nn.LPPool1d.cpu"] = "torch.nn.LPPool1d"
allowlist["torch.nn.LPPool1d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.LPPool1d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.LPPool1d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.LPPool2d"] = "torch.nn.LPPool2d"
allowlist["torch.nn.LPPool2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.LPPool2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.LPPool2d.train"] = "torch.nn.LPPool2d"
allowlist["torch.nn.LPPool2d.cuda"] = "torch.nn.LPPool2d"
allowlist["torch.nn.LPPool2d.cpu"] = "torch.nn.LPPool2d"
allowlist["torch.nn.LPPool2d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.LPPool2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.LPPool2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.LSTM"] = "torch.nn.LSTM"
allowlist["torch.nn.LSTM.__call__"] = "torch.Tensor"
allowlist["torch.nn.LSTM.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.LSTM.train"] = "torch.nn.LSTM"
allowlist["torch.nn.LSTM.cuda"] = "torch.nn.LSTM"
allowlist["torch.nn.LSTM.cpu"] = "torch.nn.LSTM"
allowlist["torch.nn.LSTM.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.LSTM.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.LSTM.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.LSTMCell"] = "torch.nn.LSTMCell"
allowlist["torch.nn.LSTMCell.__call__"] = "torch.Tensor"
allowlist["torch.nn.LSTMCell.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.LSTMCell.train"] = "torch.nn.LSTMCell"
allowlist["torch.nn.LSTMCell.cuda"] = "torch.nn.LSTMCell"
allowlist["torch.nn.LSTMCell.cpu"] = "torch.nn.LSTMCell"
allowlist["torch.nn.LSTMCell.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.LSTMCell.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.LSTMCell.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.LayerNorm"] = "torch.nn.LayerNorm"
allowlist["torch.nn.LayerNorm.__call__"] = "torch.Tensor"
allowlist["torch.nn.LayerNorm.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.LayerNorm.train"] = "torch.nn.LayerNorm"
allowlist["torch.nn.LayerNorm.cuda"] = "torch.nn.LayerNorm"
allowlist["torch.nn.LayerNorm.cpu"] = "torch.nn.LayerNorm"
allowlist["torch.nn.LayerNorm.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.LayerNorm.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.LayerNorm.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.LeakyReLU"] = "torch.nn.LeakyReLU"
allowlist["torch.nn.LeakyReLU.__call__"] = "torch.Tensor"
allowlist["torch.nn.LeakyReLU.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.LeakyReLU.train"] = "torch.nn.LeakyReLU"
allowlist["torch.nn.LeakyReLU.cuda"] = "torch.nn.LeakyReLU"
allowlist["torch.nn.LeakyReLU.cpu"] = "torch.nn.LeakyReLU"
allowlist["torch.nn.LeakyReLU.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.LeakyReLU.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.LeakyReLU.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Linear"] = "torch.nn.Linear"
allowlist["torch.nn.Linear.__call__"] = "torch.Tensor"
allowlist["torch.nn.Linear.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Linear.train"] = "torch.nn.Linear"
allowlist["torch.nn.Linear.cuda"] = "torch.nn.Linear"
allowlist["torch.nn.Linear.cpu"] = "torch.nn.Linear"
allowlist["torch.nn.Linear.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Linear.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Linear.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.LocalResponseNorm"] = "torch.nn.LocalResponseNorm"
allowlist["torch.nn.LocalResponseNorm.__call__"] = "torch.Tensor"
allowlist["torch.nn.LocalResponseNorm.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.LocalResponseNorm.train"] = "torch.nn.LocalResponseNorm"
allowlist["torch.nn.LocalResponseNorm.cuda"] = "torch.nn.LocalResponseNorm"
allowlist["torch.nn.LocalResponseNorm.cpu"] = "torch.nn.LocalResponseNorm"
allowlist[
"torch.nn.LocalResponseNorm.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.LocalResponseNorm.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.LocalResponseNorm.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.LogSigmoid"] = "torch.nn.LogSigmoid"
allowlist["torch.nn.LogSigmoid.__call__"] = "torch.Tensor"
allowlist["torch.nn.LogSigmoid.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.LogSigmoid.train"] = "torch.nn.LogSigmoid"
allowlist["torch.nn.LogSigmoid.cuda"] = "torch.nn.LogSigmoid"
allowlist["torch.nn.LogSigmoid.cpu"] = "torch.nn.LogSigmoid"
allowlist["torch.nn.LogSigmoid.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.LogSigmoid.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.LogSigmoid.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.LogSoftmax"] = "torch.nn.LogSoftmax"
allowlist["torch.nn.LogSoftmax.__call__"] = "torch.Tensor"
allowlist["torch.nn.LogSoftmax.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.LogSoftmax.train"] = "torch.nn.LogSoftmax"
allowlist["torch.nn.LogSoftmax.cuda"] = "torch.nn.LogSoftmax"
allowlist["torch.nn.LogSoftmax.cpu"] = "torch.nn.LogSoftmax"
allowlist["torch.nn.LogSoftmax.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.LogSoftmax.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.LogSoftmax.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.MaxPool1d"] = "torch.nn.MaxPool1d"
allowlist["torch.nn.MaxPool1d.__call__"] = "torch.Tensor"
allowlist["torch.nn.MaxPool1d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.MaxPool1d.train"] = "torch.nn.MaxPool1d"
allowlist["torch.nn.MaxPool1d.cuda"] = "torch.nn.MaxPool1d"
allowlist["torch.nn.MaxPool1d.cpu"] = "torch.nn.MaxPool1d"
allowlist["torch.nn.MaxPool1d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.MaxPool1d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.MaxPool1d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.MaxPool2d"] = "torch.nn.MaxPool2d"
allowlist["torch.nn.MaxPool2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.MaxPool2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.MaxPool2d.train"] = "torch.nn.MaxPool2d"
allowlist["torch.nn.MaxPool2d.cuda"] = "torch.nn.MaxPool2d"
allowlist["torch.nn.MaxPool2d.cpu"] = "torch.nn.MaxPool2d"
allowlist["torch.nn.MaxPool2d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.MaxPool2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.MaxPool2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.MaxPool3d"] = "torch.nn.MaxPool3d"
allowlist["torch.nn.MaxPool3d.__call__"] = "torch.Tensor"
allowlist["torch.nn.MaxPool3d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.MaxPool3d.train"] = "torch.nn.MaxPool3d"
allowlist["torch.nn.MaxPool3d.cuda"] = "torch.nn.MaxPool3d"
allowlist["torch.nn.MaxPool3d.cpu"] = "torch.nn.MaxPool3d"
allowlist["torch.nn.MaxPool3d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.MaxPool3d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.MaxPool3d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.MaxUnpool1d"] = "torch.nn.MaxUnpool1d"
allowlist["torch.nn.MaxUnpool1d.__call__"] = "torch.Tensor"
allowlist["torch.nn.MaxUnpool1d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.MaxUnpool1d.train"] = "torch.nn.MaxUnpool1d"
allowlist["torch.nn.MaxUnpool1d.cuda"] = "torch.nn.MaxUnpool1d"
allowlist["torch.nn.MaxUnpool1d.cpu"] = "torch.nn.MaxUnpool1d"
allowlist["torch.nn.MaxUnpool1d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.MaxUnpool1d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.MaxUnpool1d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.MaxUnpool2d"] = "torch.nn.MaxUnpool2d"
allowlist["torch.nn.MaxUnpool2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.MaxUnpool2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.MaxUnpool2d.train"] = "torch.nn.MaxUnpool2d"
allowlist["torch.nn.MaxUnpool2d.cuda"] = "torch.nn.MaxUnpool2d"
allowlist["torch.nn.MaxUnpool2d.cpu"] = "torch.nn.MaxUnpool2d"
allowlist["torch.nn.MaxUnpool2d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.MaxUnpool2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.MaxUnpool2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.MaxUnpool3d"] = "torch.nn.MaxUnpool3d"
allowlist["torch.nn.MaxUnpool3d.__call__"] = "torch.Tensor"
allowlist["torch.nn.MaxUnpool3d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.MaxUnpool3d.train"] = "torch.nn.MaxUnpool3d"
allowlist["torch.nn.MaxUnpool3d.cuda"] = "torch.nn.MaxUnpool3d"
allowlist["torch.nn.MaxUnpool3d.cpu"] = "torch.nn.MaxUnpool3d"
allowlist["torch.nn.MaxUnpool3d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.MaxUnpool3d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.MaxUnpool3d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Module"] = "torch.nn.Module"
allowlist["torch.nn.Module.__call__"] = "torch.Tensor"
allowlist["torch.nn.Module.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Module.train"] = "torch.nn.Module"
allowlist["torch.nn.Module.cuda"] = "torch.nn.Module"
allowlist["torch.nn.Module.cpu"] = "torch.nn.Module"
allowlist["torch.nn.Module.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Module.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Module.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ModuleDict"] = "torch.nn.ModuleDict"
allowlist["torch.nn.ModuleDict.__call__"] = "torch.Tensor"
allowlist["torch.nn.ModuleDict.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ModuleDict.train"] = "torch.nn.ModuleDict"
allowlist["torch.nn.ModuleDict.cuda"] = "torch.nn.ModuleDict"
allowlist["torch.nn.ModuleDict.cpu"] = "torch.nn.ModuleDict"
allowlist["torch.nn.ModuleDict.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ModuleDict.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ModuleDict.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ModuleList"] = "torch.nn.ModuleList"
allowlist["torch.nn.ModuleList.__call__"] = "torch.Tensor"
allowlist["torch.nn.ModuleList.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ModuleList.train"] = "torch.nn.ModuleList"
allowlist["torch.nn.ModuleList.cuda"] = "torch.nn.ModuleList"
allowlist["torch.nn.ModuleList.cpu"] = "torch.nn.ModuleList"
allowlist["torch.nn.ModuleList.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ModuleList.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ModuleList.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.MultiheadAttention"] = "torch.nn.MultiheadAttention"
allowlist["torch.nn.MultiheadAttention.__call__"] = "torch.Tensor"
allowlist["torch.nn.MultiheadAttention.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.MultiheadAttention.train"] = "torch.nn.MultiheadAttention"
allowlist["torch.nn.MultiheadAttention.cuda"] = "torch.nn.MultiheadAttention"
allowlist["torch.nn.MultiheadAttention.cpu"] = "torch.nn.MultiheadAttention"
allowlist[
"torch.nn.MultiheadAttention.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.MultiheadAttention.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.MultiheadAttention.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.PReLU"] = "torch.nn.PReLU"
allowlist["torch.nn.PReLU.__call__"] = "torch.Tensor"
allowlist["torch.nn.PReLU.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.PReLU.train"] = "torch.nn.PReLU"
allowlist["torch.nn.PReLU.cuda"] = "torch.nn.PReLU"
allowlist["torch.nn.PReLU.cpu"] = "torch.nn.PReLU"
allowlist["torch.nn.PReLU.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.PReLU.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.PReLU.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.PairwiseDistance"] = "torch.nn.PairwiseDistance"
allowlist["torch.nn.PairwiseDistance.__call__"] = "torch.Tensor"
allowlist["torch.nn.PairwiseDistance.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.PairwiseDistance.train"] = "torch.nn.PairwiseDistance"
allowlist["torch.nn.PairwiseDistance.cuda"] = "torch.nn.PairwiseDistance"
allowlist["torch.nn.PairwiseDistance.cpu"] = "torch.nn.PairwiseDistance"
allowlist[
"torch.nn.PairwiseDistance.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.PairwiseDistance.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.PairwiseDistance.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.PixelShuffle"] = "torch.nn.PixelShuffle"
allowlist["torch.nn.PixelShuffle.__call__"] = "torch.Tensor"
allowlist["torch.nn.PixelShuffle.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.PixelShuffle.train"] = "torch.nn.PixelShuffle"
allowlist["torch.nn.PixelShuffle.cuda"] = "torch.nn.PixelShuffle"
allowlist["torch.nn.PixelShuffle.cpu"] = "torch.nn.PixelShuffle"
allowlist[
"torch.nn.PixelShuffle.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.PixelShuffle.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.PixelShuffle.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.RNN"] = "torch.nn.RNN"
allowlist["torch.nn.RNN.__call__"] = "torch.Tensor"
allowlist["torch.nn.RNN.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.RNN.train"] = "torch.nn.RNN"
allowlist["torch.nn.RNN.cuda"] = "torch.nn.RNN"
allowlist["torch.nn.RNN.cpu"] = "torch.nn.RNN"
allowlist["torch.nn.RNN.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.RNN.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.RNN.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.RNNBase"] = "torch.nn.RNNBase"
allowlist["torch.nn.RNNBase.__call__"] = "torch.Tensor"
allowlist["torch.nn.RNNBase.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.RNNBase.train"] = "torch.nn.RNNBase"
allowlist["torch.nn.RNNBase.cuda"] = "torch.nn.RNNBase"
allowlist["torch.nn.RNNBase.cpu"] = "torch.nn.RNNBase"
allowlist["torch.nn.RNNBase.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.RNNBase.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.RNNBase.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.RNNCell"] = "torch.nn.RNNCell"
allowlist["torch.nn.RNNCell.__call__"] = "torch.Tensor"
allowlist["torch.nn.RNNCell.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.RNNCell.train"] = "torch.nn.RNNCell"
allowlist["torch.nn.RNNCell.cuda"] = "torch.nn.RNNCell"
allowlist["torch.nn.RNNCell.cpu"] = "torch.nn.RNNCell"
allowlist["torch.nn.RNNCell.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.RNNCell.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.RNNCell.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.RNNCellBase"] = "torch.nn.RNNCellBase"
allowlist["torch.nn.RNNCellBase.__call__"] = "torch.Tensor"
allowlist["torch.nn.RNNCellBase.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.RNNCellBase.train"] = "torch.nn.RNNCellBase"
allowlist["torch.nn.RNNCellBase.cuda"] = "torch.nn.RNNCellBase"
allowlist["torch.nn.RNNCellBase.cpu"] = "torch.nn.RNNCellBase"
allowlist["torch.nn.RNNCellBase.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.RNNCellBase.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.RNNCellBase.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.RReLU"] = "torch.nn.RReLU"
allowlist["torch.nn.RReLU.__call__"] = "torch.Tensor"
allowlist["torch.nn.RReLU.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.RReLU.train"] = "torch.nn.RReLU"
allowlist["torch.nn.RReLU.cuda"] = "torch.nn.RReLU"
allowlist["torch.nn.RReLU.cpu"] = "torch.nn.RReLU"
allowlist["torch.nn.RReLU.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.RReLU.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.RReLU.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ReLU"] = "torch.nn.ReLU"
allowlist["torch.nn.ReLU.__call__"] = "torch.Tensor"
allowlist["torch.nn.ReLU.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ReLU.train"] = "torch.nn.ReLU"
allowlist["torch.nn.ReLU.cuda"] = "torch.nn.ReLU"
allowlist["torch.nn.ReLU.cpu"] = "torch.nn.ReLU"
allowlist["torch.nn.ReLU.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ReLU.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ReLU.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ReLU6"] = "torch.nn.ReLU6"
allowlist["torch.nn.ReLU6.__call__"] = "torch.Tensor"
allowlist["torch.nn.ReLU6.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ReLU6.train"] = "torch.nn.ReLU6"
allowlist["torch.nn.ReLU6.cuda"] = "torch.nn.ReLU6"
allowlist["torch.nn.ReLU6.cpu"] = "torch.nn.ReLU6"
allowlist["torch.nn.ReLU6.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ReLU6.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ReLU6.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ReflectionPad1d"] = "torch.nn.ReflectionPad1d"
allowlist["torch.nn.ReflectionPad1d.__call__"] = "torch.Tensor"
allowlist["torch.nn.ReflectionPad1d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ReflectionPad1d.train"] = "torch.nn.ReflectionPad1d"
allowlist["torch.nn.ReflectionPad1d.cuda"] = "torch.nn.ReflectionPad1d"
allowlist["torch.nn.ReflectionPad1d.cpu"] = "torch.nn.ReflectionPad1d"
allowlist[
"torch.nn.ReflectionPad1d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ReflectionPad1d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ReflectionPad1d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ReflectionPad2d"] = "torch.nn.ReflectionPad2d"
allowlist["torch.nn.ReflectionPad2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.ReflectionPad2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ReflectionPad2d.train"] = "torch.nn.ReflectionPad2d"
allowlist["torch.nn.ReflectionPad2d.cuda"] = "torch.nn.ReflectionPad2d"
allowlist["torch.nn.ReflectionPad2d.cpu"] = "torch.nn.ReflectionPad2d"
allowlist[
"torch.nn.ReflectionPad2d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ReflectionPad2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ReflectionPad2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ReplicationPad1d"] = "torch.nn.ReplicationPad1d"
allowlist["torch.nn.ReplicationPad1d.__call__"] = "torch.Tensor"
allowlist["torch.nn.ReplicationPad1d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ReplicationPad1d.train"] = "torch.nn.ReplicationPad1d"
allowlist["torch.nn.ReplicationPad1d.cuda"] = "torch.nn.ReplicationPad1d"
allowlist["torch.nn.ReplicationPad1d.cpu"] = "torch.nn.ReplicationPad1d"
allowlist[
"torch.nn.ReplicationPad1d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ReplicationPad1d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ReplicationPad1d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ReplicationPad2d"] = "torch.nn.ReplicationPad2d"
allowlist["torch.nn.ReplicationPad2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.ReplicationPad2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ReplicationPad2d.train"] = "torch.nn.ReplicationPad2d"
allowlist["torch.nn.ReplicationPad2d.cuda"] = "torch.nn.ReplicationPad2d"
allowlist["torch.nn.ReplicationPad2d.cpu"] = "torch.nn.ReplicationPad2d"
allowlist[
"torch.nn.ReplicationPad2d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ReplicationPad2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ReplicationPad2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ReplicationPad3d"] = "torch.nn.ReplicationPad3d"
allowlist["torch.nn.ReplicationPad3d.__call__"] = "torch.Tensor"
allowlist["torch.nn.ReplicationPad3d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ReplicationPad3d.train"] = "torch.nn.ReplicationPad3d"
allowlist["torch.nn.ReplicationPad3d.cuda"] = "torch.nn.ReplicationPad3d"
allowlist["torch.nn.ReplicationPad3d.cpu"] = "torch.nn.ReplicationPad3d"
allowlist[
"torch.nn.ReplicationPad3d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ReplicationPad3d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ReplicationPad3d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.SELU"] = "torch.nn.SELU"
allowlist["torch.nn.SELU.__call__"] = "torch.Tensor"
allowlist["torch.nn.SELU.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.SELU.train"] = "torch.nn.SELU"
allowlist["torch.nn.SELU.cuda"] = "torch.nn.SELU"
allowlist["torch.nn.SELU.cpu"] = "torch.nn.SELU"
allowlist["torch.nn.SELU.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.SELU.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.SELU.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Sequential"] = "torch.nn.Sequential"
allowlist["torch.nn.Sequential.__call__"] = "torch.Tensor"
allowlist["torch.nn.Sequential.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Sequential.train"] = "torch.nn.Sequential"
allowlist["torch.nn.Sequential.cuda"] = "torch.nn.Sequential"
allowlist["torch.nn.Sequential.cpu"] = "torch.nn.Sequential"
allowlist["torch.nn.Sequential.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Sequential.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Sequential.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Sigmoid"] = "torch.nn.Sigmoid"
allowlist["torch.nn.Sigmoid.__call__"] = "torch.Tensor"
allowlist["torch.nn.Sigmoid.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Sigmoid.train"] = "torch.nn.Sigmoid"
allowlist["torch.nn.Sigmoid.cuda"] = "torch.nn.Sigmoid"
allowlist["torch.nn.Sigmoid.cpu"] = "torch.nn.Sigmoid"
allowlist["torch.nn.Sigmoid.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Sigmoid.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Sigmoid.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Softmax"] = "torch.nn.Softmax"
allowlist["torch.nn.Softmax.__call__"] = "torch.Tensor"
allowlist["torch.nn.Softmax.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Softmax.train"] = "torch.nn.Softmax"
allowlist["torch.nn.Softmax.cuda"] = "torch.nn.Softmax"
allowlist["torch.nn.Softmax.cpu"] = "torch.nn.Softmax"
allowlist["torch.nn.Softmax.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Softmax.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Softmax.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Softmax2d"] = "torch.nn.Softmax2d"
allowlist["torch.nn.Softmax2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.Softmax2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Softmax2d.train"] = "torch.nn.Softmax2d"
allowlist["torch.nn.Softmax2d.cuda"] = "torch.nn.Softmax2d"
allowlist["torch.nn.Softmax2d.cpu"] = "torch.nn.Softmax2d"
allowlist["torch.nn.Softmax2d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Softmax2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Softmax2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Softmin"] = "torch.nn.Softmin"
allowlist["torch.nn.Softmin.__call__"] = "torch.Tensor"
allowlist["torch.nn.Softmin.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Softmin.train"] = "torch.nn.Softmin"
allowlist["torch.nn.Softmin.cuda"] = "torch.nn.Softmin"
allowlist["torch.nn.Softmin.cpu"] = "torch.nn.Softmin"
allowlist["torch.nn.Softmin.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Softmin.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Softmin.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Softplus"] = "torch.nn.Softplus"
allowlist["torch.nn.Softplus.__call__"] = "torch.Tensor"
allowlist["torch.nn.Softplus.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Softplus.train"] = "torch.nn.Softplus"
allowlist["torch.nn.Softplus.cuda"] = "torch.nn.Softplus"
allowlist["torch.nn.Softplus.cpu"] = "torch.nn.Softplus"
allowlist["torch.nn.Softplus.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Softplus.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Softplus.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Softshrink"] = "torch.nn.Softshrink"
allowlist["torch.nn.Softshrink.__call__"] = "torch.Tensor"
allowlist["torch.nn.Softshrink.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Softshrink.train"] = "torch.nn.Softshrink"
allowlist["torch.nn.Softshrink.cuda"] = "torch.nn.Softshrink"
allowlist["torch.nn.Softshrink.cpu"] = "torch.nn.Softshrink"
allowlist["torch.nn.Softshrink.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Softshrink.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Softshrink.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Softsign"] = "torch.nn.Softsign"
allowlist["torch.nn.Softsign.__call__"] = "torch.Tensor"
allowlist["torch.nn.Softsign.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Softsign.train"] = "torch.nn.Softsign"
allowlist["torch.nn.Softsign.cuda"] = "torch.nn.Softsign"
allowlist["torch.nn.Softsign.cpu"] = "torch.nn.Softsign"
allowlist["torch.nn.Softsign.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Softsign.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Softsign.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.SyncBatchNorm"] = "torch.nn.SyncBatchNorm"
allowlist["torch.nn.SyncBatchNorm.__call__"] = "torch.Tensor"
allowlist["torch.nn.SyncBatchNorm.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.SyncBatchNorm.train"] = "torch.nn.SyncBatchNorm"
allowlist["torch.nn.SyncBatchNorm.cuda"] = "torch.nn.SyncBatchNorm"
allowlist["torch.nn.SyncBatchNorm.cpu"] = "torch.nn.SyncBatchNorm"
allowlist[
"torch.nn.SyncBatchNorm.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.SyncBatchNorm.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.SyncBatchNorm.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Tanh"] = "torch.nn.Tanh"
allowlist["torch.nn.Tanh.__call__"] = "torch.Tensor"
allowlist["torch.nn.Tanh.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Tanh.train"] = "torch.nn.Tanh"
allowlist["torch.nn.Tanh.cuda"] = "torch.nn.Tanh"
allowlist["torch.nn.Tanh.cpu"] = "torch.nn.Tanh"
allowlist["torch.nn.Tanh.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Tanh.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Tanh.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Tanhshrink"] = "torch.nn.Tanhshrink"
allowlist["torch.nn.Tanhshrink.__call__"] = "torch.Tensor"
allowlist["torch.nn.Tanhshrink.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Tanhshrink.train"] = "torch.nn.Tanhshrink"
allowlist["torch.nn.Tanhshrink.cuda"] = "torch.nn.Tanhshrink"
allowlist["torch.nn.Tanhshrink.cpu"] = "torch.nn.Tanhshrink"
allowlist["torch.nn.Tanhshrink.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Tanhshrink.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Tanhshrink.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Threshold"] = "torch.nn.Threshold"
allowlist["torch.nn.Threshold.__call__"] = "torch.Tensor"
allowlist["torch.nn.Threshold.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Threshold.train"] = "torch.nn.Threshold"
allowlist["torch.nn.Threshold.cuda"] = "torch.nn.Threshold"
allowlist["torch.nn.Threshold.cpu"] = "torch.nn.Threshold"
allowlist["torch.nn.Threshold.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Threshold.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Threshold.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Transformer"] = "torch.nn.Transformer"
allowlist["torch.nn.Transformer.__call__"] = "torch.Tensor"
allowlist["torch.nn.Transformer.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Transformer.train"] = "torch.nn.Transformer"
allowlist["torch.nn.Transformer.cuda"] = "torch.nn.Transformer"
allowlist["torch.nn.Transformer.cpu"] = "torch.nn.Transformer"
allowlist["torch.nn.Transformer.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Transformer.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Transformer.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.TransformerDecoder"] = "torch.nn.TransformerDecoder"
allowlist["torch.nn.TransformerDecoder.__call__"] = "torch.Tensor"
allowlist["torch.nn.TransformerDecoder.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.TransformerDecoder.train"] = "torch.nn.TransformerDecoder"
allowlist["torch.nn.TransformerDecoder.cuda"] = "torch.nn.TransformerDecoder"
allowlist["torch.nn.TransformerDecoder.cpu"] = "torch.nn.TransformerDecoder"
allowlist[
"torch.nn.TransformerDecoder.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.TransformerDecoder.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.TransformerDecoder.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.TransformerDecoderLayer"] = "torch.nn.TransformerDecoderLayer"
allowlist["torch.nn.TransformerDecoderLayer.__call__"] = "torch.Tensor"
allowlist["torch.nn.TransformerDecoderLayer.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.TransformerDecoderLayer.train"] = "torch.nn.TransformerDecoderLayer"
allowlist["torch.nn.TransformerDecoderLayer.cuda"] = "torch.nn.TransformerDecoderLayer"
allowlist["torch.nn.TransformerDecoderLayer.cpu"] = "torch.nn.TransformerDecoderLayer"
allowlist[
"torch.nn.TransformerDecoderLayer.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist[
"torch.nn.TransformerDecoderLayer.load_state_dict"
] = "syft.lib.python._SyNone"
allowlist["torch.nn.TransformerDecoderLayer.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.TransformerEncoder"] = "torch.nn.TransformerEncoder"
allowlist["torch.nn.TransformerEncoder.__call__"] = "torch.Tensor"
allowlist["torch.nn.TransformerEncoder.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.TransformerEncoder.train"] = "torch.nn.TransformerEncoder"
allowlist["torch.nn.TransformerEncoder.cuda"] = "torch.nn.TransformerEncoder"
allowlist["torch.nn.TransformerEncoder.cpu"] = "torch.nn.TransformerEncoder"
allowlist[
"torch.nn.TransformerEncoder.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.TransformerEncoder.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.TransformerEncoder.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.TransformerEncoderLayer"] = "torch.nn.TransformerEncoderLayer"
allowlist["torch.nn.TransformerEncoderLayer.__call__"] = "torch.Tensor"
allowlist["torch.nn.TransformerEncoderLayer.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.TransformerEncoderLayer.train"] = "torch.nn.TransformerEncoderLayer"
allowlist["torch.nn.TransformerEncoderLayer.cuda"] = "torch.nn.TransformerEncoderLayer"
allowlist["torch.nn.TransformerEncoderLayer.cpu"] = "torch.nn.TransformerEncoderLayer"
allowlist[
"torch.nn.TransformerEncoderLayer.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist[
"torch.nn.TransformerEncoderLayer.load_state_dict"
] = "syft.lib.python._SyNone"
allowlist["torch.nn.TransformerEncoderLayer.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Unfold"] = "torch.nn.Unfold"
allowlist["torch.nn.Unfold.__call__"] = "torch.Tensor"
allowlist["torch.nn.Unfold.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Unfold.train"] = "torch.nn.Unfold"
allowlist["torch.nn.Unfold.cuda"] = "torch.nn.Unfold"
allowlist["torch.nn.Unfold.cpu"] = "torch.nn.Unfold"
allowlist["torch.nn.Unfold.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Unfold.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Unfold.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.Upsample"] = "torch.nn.Upsample"
allowlist["torch.nn.Upsample.__call__"] = "torch.Tensor"
allowlist["torch.nn.Upsample.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.Upsample.train"] = "torch.nn.Upsample"
allowlist["torch.nn.Upsample.cuda"] = "torch.nn.Upsample"
allowlist["torch.nn.Upsample.cpu"] = "torch.nn.Upsample"
allowlist["torch.nn.Upsample.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.Upsample.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.Upsample.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.UpsamplingBilinear2d"] = "torch.nn.UpsamplingBilinear2d"
allowlist["torch.nn.UpsamplingBilinear2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.UpsamplingBilinear2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.UpsamplingBilinear2d.train"] = "torch.nn.UpsamplingBilinear2d"
allowlist["torch.nn.UpsamplingBilinear2d.cuda"] = "torch.nn.UpsamplingBilinear2d"
allowlist["torch.nn.UpsamplingBilinear2d.cpu"] = "torch.nn.UpsamplingBilinear2d"
allowlist[
"torch.nn.UpsamplingBilinear2d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.UpsamplingBilinear2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.UpsamplingBilinear2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.UpsamplingNearest2d"] = "torch.nn.UpsamplingNearest2d"
allowlist["torch.nn.UpsamplingNearest2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.UpsamplingNearest2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.UpsamplingNearest2d.train"] = "torch.nn.UpsamplingNearest2d"
allowlist["torch.nn.UpsamplingNearest2d.cuda"] = "torch.nn.UpsamplingNearest2d"
allowlist["torch.nn.UpsamplingNearest2d.cpu"] = "torch.nn.UpsamplingNearest2d"
allowlist[
"torch.nn.UpsamplingNearest2d.state_dict"
] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.UpsamplingNearest2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.UpsamplingNearest2d.extra_repr"] = "syft.lib.python.String"
allowlist["torch.nn.ZeroPad2d"] = "torch.nn.ZeroPad2d"
allowlist["torch.nn.ZeroPad2d.__call__"] = "torch.Tensor"
allowlist["torch.nn.ZeroPad2d.parameters"] = "syft.lib.python.List"
allowlist["torch.nn.ZeroPad2d.train"] = "torch.nn.ZeroPad2d"
allowlist["torch.nn.ZeroPad2d.cuda"] = "torch.nn.ZeroPad2d"
allowlist["torch.nn.ZeroPad2d.cpu"] = "torch.nn.ZeroPad2d"
allowlist["torch.nn.ZeroPad2d.state_dict"] = "syft.lib.python.collections.OrderedDict"
allowlist["torch.nn.ZeroPad2d.load_state_dict"] = "syft.lib.python._SyNone"
allowlist["torch.nn.ZeroPad2d.extra_repr"] = "syft.lib.python.String"
| 46.986216 | 103 | 0.726613 |
234774891575cd4943eda51bfd5b09e44c3c1773 | 553 | py | Python | source/interprocedural_analyses/taint/test/integration/breadcrumbs.py | rcramblit/pyre-check | fa15d2021ab1bf5b52aae8c09d1520e2b7bcf6c9 | [
"MIT"
] | 1 | 2021-05-17T23:13:52.000Z | 2021-05-17T23:13:52.000Z | source/interprocedural_analyses/taint/test/integration/breadcrumbs.py | rcramblit/pyre-check | fa15d2021ab1bf5b52aae8c09d1520e2b7bcf6c9 | [
"MIT"
] | 1 | 2021-08-16T12:11:37.000Z | 2021-08-16T12:11:37.000Z | source/interprocedural_analyses/taint/test/integration/breadcrumbs.py | rcramblit/pyre-check | fa15d2021ab1bf5b52aae8c09d1520e2b7bcf6c9 | [
"MIT"
] | 1 | 2021-02-20T13:09:30.000Z | 2021-02-20T13:09:30.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from builtins import __test_sink, __test_source
def int_source() -> int:
return __test_source()
def float_source() -> float:
return __test_source()
def bool_source() -> bool:
return __test_source()
def int_parameter(x, y: int):
__test_sink(y)
def float_parameter(x, y: float):
__test_sink(y)
def bool_parameter(x, y: bool):
__test_sink(y)
| 17.83871 | 65 | 0.708861 |
2c6404bc2552a750eb092819d832d9181d04bbb6 | 645 | py | Python | firmware/hook_generic_deploy_keys_readable.py | prothen/teensy_middleware | 984ed0a9845a374c3ae51fe5f51509053bda0243 | [
"MIT"
] | 1 | 2021-05-04T12:06:04.000Z | 2021-05-04T12:06:04.000Z | firmware/hook_generic_deploy_keys_readable.py | prothen/teensy_middleware | 984ed0a9845a374c3ae51fe5f51509053bda0243 | [
"MIT"
] | 1 | 2021-05-06T07:17:18.000Z | 2021-05-06T07:17:18.000Z | firmware/hook_generic_deploy_keys_readable.py | prothen/teensy_middleware | 984ed0a9845a374c3ae51fe5f51509053bda0243 | [
"MIT"
] | 1 | 2021-05-04T12:06:29.000Z | 2021-05-04T12:06:29.000Z | #!/usr/bin/env python
#
#
# Author: Philipp Rothenhaeusler
# Smart Mobility Lab, KTH
# Stockholm 2020
#
import os
import sys
import time
import subprocess
# Redirect output to log file since stdoutput is procssed by platformio build system
sys.stdout = open("./log/generic_deploy_key_setup.log", "w")
print('Executing middleware hook...')
if __name__ == "__main__":
cmds= ("chmod 600 $st_DIR/resources/deploy_keys/*")
ps = subprocess.Popen(('bash', "../resources/shells/generic_command", cmds), stdout=sys.stdout, stderr=sys.stdout)
time.sleep(1)
ps.wait()
sys.stdout = sys.__stdout__
sys.exit(0)
| 26.875 | 118 | 0.691473 |
6ee41516d6431041a322f6c690fb8ba1cd36a104 | 17,798 | py | Python | mmhuman3d/data/data_converters/humman.py | ykk648/mmhuman3d | 26af92bcf6abbe1855e1a8a48308621410f9c047 | [
"Apache-2.0"
] | 472 | 2021-12-03T03:12:55.000Z | 2022-03-31T01:33:13.000Z | mmhuman3d/data/data_converters/humman.py | ykk648/mmhuman3d | 26af92bcf6abbe1855e1a8a48308621410f9c047 | [
"Apache-2.0"
] | 127 | 2021-12-03T05:00:14.000Z | 2022-03-31T13:47:33.000Z | mmhuman3d/data/data_converters/humman.py | ykk648/mmhuman3d | 26af92bcf6abbe1855e1a8a48308621410f9c047 | [
"Apache-2.0"
] | 37 | 2021-12-03T03:23:22.000Z | 2022-03-31T08:41:58.000Z | import glob
import os
import numpy as np
import torch
from tqdm import tqdm
from mmhuman3d.core.cameras import build_cameras
from mmhuman3d.core.conventions.keypoints_mapping import (
convert_kps,
get_keypoint_idx,
get_keypoint_num,
)
from mmhuman3d.data.data_structures import SMCReader
from mmhuman3d.data.data_structures.human_data import HumanData
from mmhuman3d.models.builder import build_body_model
from .base_converter import BaseModeConverter
from .builder import DATA_CONVERTERS
@DATA_CONVERTERS.register_module()
class HuMManConverter(BaseModeConverter):
"""A mysterious dataset that will be announced soon."""
ACCEPTED_MODES = ['test', 'train']
def __init__(self, *args, **kwargs):
super(HuMManConverter, self).__init__(*args, **kwargs)
self.skip_no_iphone = True
self.skip_no_keypoints3d = True
self.downsample_ratio = 10 # uniformly sampling
self.keypoints2d_humman_mask = None
self.keypoints3d_humman_mask = None
self.keypoint_convention_humman = 'coco_wholebody'
self.num_keypoints_humman = \
get_keypoint_num(self.keypoint_convention_humman)
self.keypoint_convention_smpl = 'smpl_54'
self.num_keypoints_smpl = \
get_keypoint_num(self.keypoint_convention_smpl)
self.left_hip_idx_humman = get_keypoint_idx(
'left_hip', convention=self.keypoint_convention_humman)
self.right_hip_idx_humman = get_keypoint_idx(
'right_hip', convention=self.keypoint_convention_humman)
self.root_idx_smpl = get_keypoint_idx(
'pelvis_extra', convention=self.keypoint_convention_smpl)
self.device = torch.device(
'cuda') if torch.cuda.is_available() else torch.device('cpu')
# Body model used for keypoint computation
self.smpl = build_body_model(
dict(
type='SMPL',
keypoint_src='smpl_54',
keypoint_dst=self.keypoint_convention_smpl,
model_path='data/body_models/smpl',
extra_joints_regressor='data/body_models/J_regressor_extra.npy'
)).to(self.device)
# Body model used for pelvis computation in SMCReader
self.smpl_smc = build_body_model(
dict(
type='SMPL',
gender='neutral',
num_betas=10,
keypoint_src='smpl_45',
keypoint_dst='smpl_45',
model_path='data/body_models/smpl',
batch_size=1,
)).to(self.device)
def _derive_keypoints(self, global_orient, body_pose, betas, transl,
focal_length, image_size, camera_center):
"""Get SMPL-derived keypoints."""
camera = build_cameras(
dict(
type='PerspectiveCameras',
convention='opencv',
in_ndc=False,
focal_length=torch.tensor(focal_length,
dtype=torch.float).reshape(1, 2),
image_size=torch.tensor(image_size,
dtype=torch.float).reshape(1, 2),
principal_point=torch.tensor(camera_center,
dtype=torch.float).reshape(
1, 2))).to(self.device)
output = self.smpl(
global_orient=torch.tensor(global_orient, device=self.device),
body_pose=torch.tensor(body_pose, device=self.device),
betas=torch.tensor(betas, device=self.device),
transl=torch.tensor(transl, device=self.device),
return_joints=True)
keypoints3d = output['joints']
keypoints2d_xyd = camera.transform_points_screen(keypoints3d)
keypoints2d = keypoints2d_xyd[..., :2]
keypoints3d = keypoints3d.cpu().numpy()
keypoints2d = keypoints2d.cpu().numpy()
# root align
keypoints3d = keypoints3d - keypoints3d[:, [self.root_idx_smpl], :]
return {'keypoints2d': keypoints2d, 'keypoints3d': keypoints3d}
def _make_human_data(
self,
smpl,
image_path,
image_id,
bbox_xywh,
keypoints2d_smpl,
keypoints3d_smpl,
keypoints2d_humman,
keypoints3d_humman,
):
# use HumanData to store all data
human_data = HumanData()
# frames
num_frames = len(image_path)
# downsample idx
selected_inds = np.arange(len(image_path))
selected_inds = selected_inds[::self.downsample_ratio]
smpl['global_orient'] = np.concatenate(
smpl['global_orient'], axis=0).reshape(-1, 3)[selected_inds]
smpl['body_pose'] = np.concatenate(
smpl['body_pose'], axis=0).reshape(-1, 23, 3)[selected_inds]
smpl['betas'] = np.concatenate(
smpl['betas'], axis=0).reshape(-1, 10)[selected_inds]
smpl['transl'] = np.concatenate(
smpl['transl'], axis=0).reshape(-1, 3)[selected_inds]
human_data['smpl'] = smpl
# Save derived keypoints as ground truth
# 2D SMPL keypoints
keypoints2d_smpl = np.concatenate(
keypoints2d_smpl, axis=0).reshape(num_frames,
self.num_keypoints_smpl, 2)
keypoints2d_smpl, keypoints2d_smpl_mask = convert_kps(
keypoints2d_smpl,
src=self.keypoint_convention_smpl,
dst='human_data')
keypoints2d_smpl = np.concatenate(
[keypoints2d_smpl,
np.ones([*keypoints2d_smpl.shape[:2], 1])],
axis=-1)
human_data['keypoints2d'] = keypoints2d_smpl[selected_inds]
human_data['keypoints2d_mask'] = keypoints2d_smpl_mask
# 3D SMPL keypoints
keypoints3d_smpl = np.concatenate(
keypoints3d_smpl, axis=0).reshape(num_frames,
self.num_keypoints_smpl, 3)
keypoints3d_smpl, keypoints3d_smpl_mask = convert_kps(
keypoints3d_smpl,
src=self.keypoint_convention_smpl,
dst='human_data')
keypoints3d_smpl = np.concatenate(
[keypoints3d_smpl,
np.ones([*keypoints3d_smpl.shape[:2], 1])],
axis=-1)
human_data['keypoints3d'] = keypoints3d_smpl[selected_inds]
human_data['keypoints3d_mask'] = keypoints3d_smpl_mask
# Save HuMMan keypoints
# 2D HuMMan Keypoints
keypoints2d_humman = np.concatenate(
keypoints2d_humman, axis=0).reshape(num_frames,
self.num_keypoints_humman, 3)
keypoints2d_humman, keypoints2d_humman_mask = convert_kps(
keypoints2d_humman,
mask=self.keypoints2d_humman_mask,
src=self.keypoint_convention_humman,
dst='human_data')
human_data['keypoints2d_humman'] = keypoints2d_humman[selected_inds]
human_data['keypoints2d_humman_mask'] = keypoints2d_humman_mask
# 3D HuMMan Keypoints
keypoints3d_humman = np.concatenate(
keypoints3d_humman, axis=0).reshape(num_frames,
self.num_keypoints_humman, 4)
keypoints3d_humman, keypoints3d_humman_mask = convert_kps(
keypoints3d_humman,
mask=self.keypoints3d_humman_mask,
src=self.keypoint_convention_humman,
dst='human_data')
human_data['keypoints3d_humman'] = keypoints3d_humman[selected_inds]
human_data['keypoints3d_humman_mask'] = keypoints3d_humman_mask
# Save bboxes
bbox_xywh = np.array(bbox_xywh).reshape((num_frames, 4))
bbox_xywh = np.concatenate(
[bbox_xywh, np.ones([num_frames, 1])], axis=-1)
assert bbox_xywh.shape == (num_frames, 5)
human_data['bbox_xywh'] = bbox_xywh[selected_inds]
# Save other attributes
human_data['image_path'] = [image_path[i] for i in selected_inds]
human_data['image_id'] = [image_id[i] for i in selected_inds]
human_data['config'] = 'humman'
human_data.compress_keypoints_by_mask()
return human_data
def convert_by_mode(self, dataset_path: str, out_path: str,
mode: str) -> dict:
"""
Args:
dataset_path (str): Path to directory where raw images and
annotations are stored.
out_path (str): Path to directory to save preprocessed npz file
mode (str): Mode in accepted modes
Returns:
dict:
A dict containing keys image_path, bbox_xywh, keypoints2d,
keypoints2d_mask stored in HumanData() format
"""
kinect_smpl = {}
kinect_smpl['body_pose'] = []
kinect_smpl['global_orient'] = []
kinect_smpl['betas'] = []
kinect_smpl['transl'] = []
iphone_smpl = {}
iphone_smpl['body_pose'] = []
iphone_smpl['global_orient'] = []
iphone_smpl['betas'] = []
iphone_smpl['transl'] = []
# structs we use
kinect_image_path_, kinect_image_id_, kinect_bbox_xywh_, \
kinect_keypoints2d_smpl_, kinect_keypoints3d_smpl_, \
kinect_keypoints2d_humman_, kinect_keypoints3d_humman_ = \
[], [], [], [], [], [], []
iphone_image_path_, iphone_image_id_, iphone_bbox_xywh_, \
iphone_keypoints2d_smpl_, iphone_keypoints3d_smpl_, \
iphone_keypoints2d_humman_, iphone_keypoints3d_humman_ = \
[], [], [], [], [], [], []
ann_paths = sorted(glob.glob(os.path.join(dataset_path, '*.smc')))
with open(os.path.join(dataset_path, f'{mode}.txt'), 'r') as f:
split = set(f.read().splitlines())
for ann_path in tqdm(ann_paths):
if os.path.basename(ann_path) not in split:
continue
try:
smc_reader = SMCReader(ann_path, body_model=self.smpl_smc)
except OSError:
print(f'Unable to load {ann_path}.')
continue
if self.skip_no_keypoints3d and not smc_reader.keypoint_exists:
continue
if self.skip_no_iphone and not smc_reader.iphone_exists:
continue
num_kinect = smc_reader.get_num_kinect()
num_iphone = smc_reader.get_num_iphone()
num_frames = smc_reader.get_kinect_num_frames()
device_list = [('Kinect', i) for i in range(num_kinect)] + \
[('iPhone', i) for i in range(num_iphone)]
assert len(device_list) == num_kinect + num_iphone
for device, device_id in device_list:
assert device in {
'Kinect', 'iPhone'
}, f'Undefined device: {device}, ' \
f'should be "Kinect" or "iPhone"'
if device == 'Kinect':
image_id_ = kinect_image_id_
image_path_ = kinect_image_path_
bbox_xywh_ = kinect_bbox_xywh_
keypoints2d_humman_ = kinect_keypoints2d_humman_
keypoints3d_humman_ = kinect_keypoints3d_humman_
keypoints2d_smpl_ = kinect_keypoints2d_smpl_
keypoints3d_smpl_ = kinect_keypoints3d_smpl_
smpl_ = kinect_smpl
width, height = \
smc_reader.get_kinect_color_resolution(device_id)
intrinsics = smc_reader.get_kinect_color_intrinsics(
device_id)
fx, fy = intrinsics[0, 0], intrinsics[1, 1]
cx, cy = intrinsics[0, 2], intrinsics[1, 2]
focal_length = (fx, fy)
camera_center = (cx, cy) # xy
image_size = (height, width) # (height, width)
else:
image_id_ = iphone_image_id_
image_path_ = iphone_image_path_
bbox_xywh_ = iphone_bbox_xywh_
keypoints2d_humman_ = iphone_keypoints2d_humman_
keypoints3d_humman_ = iphone_keypoints3d_humman_
keypoints2d_smpl_ = iphone_keypoints2d_smpl_
keypoints3d_smpl_ = iphone_keypoints3d_smpl_
smpl_ = iphone_smpl
width, height = smc_reader.get_iphone_color_resolution()
intrinsics = smc_reader.get_iphone_intrinsics()
fx, fy = intrinsics[0, 0], intrinsics[1, 1]
cx, cy = intrinsics[0, 2], intrinsics[1, 2]
focal_length = (fx, fy)
camera_center = (cx, cy) # xy
image_size = (height, width) # (height, width)
assert device_id >= 0, f'Negative device id: {device_id}'
keypoint_convention_humman = \
smc_reader.get_keypoints_convention()
assert self.keypoint_convention_humman == \
keypoint_convention_humman
# get keypoints2d (all frames)
keypoints2d_humman, keypoints2d_humman_mask = \
smc_reader.get_keypoints2d(device, device_id)
if self.keypoints2d_humman_mask is None:
self.keypoints2d_humman_mask = keypoints2d_humman_mask
assert np.allclose(self.keypoints2d_humman_mask,
keypoints2d_humman_mask)
keypoints2d_humman_.append(keypoints2d_humman)
# get keypoints3d (all frames)
keypoints3d_humman, keypoints3d_humman_mask = \
smc_reader.get_keypoints3d(device, device_id)
if self.keypoints3d_humman_mask is None:
self.keypoints3d_humman_mask = keypoints3d_humman_mask
assert np.allclose(self.keypoints3d_humman_mask,
keypoints3d_humman_mask)
# root-align keypoints3d
left_hip_keypoints = \
keypoints3d_humman[:, [self.left_hip_idx_humman], :3]
right_hip_keypoints = \
keypoints3d_humman[:, [self.right_hip_idx_humman], :3]
root_keypoints = \
(left_hip_keypoints + right_hip_keypoints) / 2.0
keypoints3d_humman[..., :3] = \
keypoints3d_humman[..., :3] - root_keypoints
keypoints3d_humman_.append(keypoints3d_humman)
# get smpl (all frames)
smpl_dict = smc_reader.get_smpl(device, device_id)
smpl_['body_pose'].append(smpl_dict['body_pose'])
smpl_['global_orient'].append(smpl_dict['global_orient'])
smpl_['transl'].append(smpl_dict['transl'])
# expand betas
betas_expanded = np.tile(smpl_dict['betas'],
num_frames).reshape(-1, 10)
smpl_['betas'].append(betas_expanded)
# get keypoints derived from SMPL and use them as supervision
smpl_keypoints = self._derive_keypoints(
**smpl_dict,
focal_length=focal_length,
image_size=image_size,
camera_center=camera_center)
keypoints2d_smpl = smpl_keypoints['keypoints2d']
keypoints3d_smpl = smpl_keypoints['keypoints3d']
keypoints2d_smpl_.append(keypoints2d_smpl)
keypoints3d_smpl_.append(keypoints3d_smpl)
# compute bbox from keypoints2d
for kp2d in keypoints2d_smpl:
assert kp2d.shape == (self.num_keypoints_smpl, 2)
xs, ys = kp2d[:, 0], kp2d[:, 1]
xmin = max(np.min(xs), 0)
xmax = min(np.max(xs), width - 1)
ymin = max(np.min(ys), 0)
ymax = min(np.max(ys), height - 1)
bbox_xyxy = [xmin, ymin, xmax, ymax]
bbox_xyxy = self._bbox_expand(bbox_xyxy, scale_factor=1.2)
bbox_xywh = self._xyxy2xywh(bbox_xyxy)
bbox_xywh_.append(bbox_xywh)
# get image paths (smc paths)
image_path = os.path.basename(ann_path)
for frame_id in range(num_frames):
image_id = (device, device_id, frame_id)
image_id_.append(image_id)
image_path_.append(image_path)
os.makedirs(out_path, exist_ok=True)
# make kinect human data
kinect_human_data = self._make_human_data(
kinect_smpl, kinect_image_path_, kinect_image_id_,
kinect_bbox_xywh_, kinect_keypoints2d_smpl_,
kinect_keypoints3d_smpl_, kinect_keypoints2d_humman_,
kinect_keypoints3d_humman_)
file_name = f'humman_{mode}_kinect_ds{self.downsample_ratio}_smpl.npz'
out_file = os.path.join(out_path, file_name)
kinect_human_data.dump(out_file)
# make iphone human data
iphone_human_data = self._make_human_data(
iphone_smpl, iphone_image_path_, iphone_image_id_,
iphone_bbox_xywh_, iphone_keypoints2d_smpl_,
iphone_keypoints3d_smpl_, iphone_keypoints2d_humman_,
iphone_keypoints3d_humman_)
file_name = f'humman_{mode}_iphone_ds{self.downsample_ratio}_smpl.npz'
out_file = os.path.join(out_path, file_name)
iphone_human_data.dump(out_file)
| 41.487179 | 79 | 0.584616 |
2f497b696b9ccdfa5ad15c1d8f2516f140b4332b | 5,124 | py | Python | video_annotator/utils.py | theopsall/video_annotator | cfd33d9a0a6efa332c5bf5fe078936d5fea17d9b | [
"MIT"
] | 3 | 2020-10-29T11:20:07.000Z | 2021-01-08T14:45:31.000Z | video_annotator/utils.py | theopsall/video_annotator | cfd33d9a0a6efa332c5bf5fe078936d5fea17d9b | [
"MIT"
] | 4 | 2020-10-29T14:04:59.000Z | 2020-11-01T19:57:26.000Z | video_annotator/utils.py | theopsall/video_annotator | cfd33d9a0a6efa332c5bf5fe078936d5fea17d9b | [
"MIT"
] | 1 | 2021-02-08T19:49:31.000Z | 2021-02-08T19:49:31.000Z | """
Helper functions
"""
import os
import pandas as pd
import random
from video_annotator.config import VIDEOS, ANNOTATED, USERS
def get_users() -> list:
"""
Get Users from user file
Returns:
list: List of registered users
"""
with open(USERS, 'r') as f:
users = [line.split('\n')[0] for line in f.readlines()]
return users
def add_user(user) -> None:
"""
Insert user to the database
Args:
user (str): Name of the user to be added
"""
with open(USERS, 'a') as f:
f.write(user + '\n')
def add_video(nickname, video_name) -> None:
"""
Add video to the user annotated log file
Args:
nickname (str): Name of the current user annotator
video_name (str): Name of the video that have been annotated
"""
user = os.path.join(ANNOTATED, nickname + '.txt')
with open(user, 'a') as f:
f.write(video_name + '\n')
def make_annotation_file(user) -> None:
"""
Make annotation file containing the annotated videos name by the current user
Args:
user (str): Name of the current user annotator
"""
user_path = os.path.join(ANNOTATED, user)
with open(user_path + '.txt', 'w') as f:
pass
def make_annotation_directory(user) -> None:
"""
Make annotation directory of the user
Args:
user (str): Name of the current user annotator
"""
user_path = os.path.join(ANNOTATED, user)
os.mkdir(user_path)
def get_videos(directory=VIDEOS) -> list:
"""
Crawling videos from Videos subdirectories
Args:
directory (str) : The directory to crawl, default is set to the Videos from static directory
Returns:
tree (list) : A list with all the filepaths
"""
tree = []
subdirs = [folder[0] for folder in os.walk(directory)]
for subdir in subdirs:
files = next(os.walk(subdir))[2]
for _file in files:
tree.append(os.path.join(subdir, _file))
return tree
def num_videos() -> int:
"""
Get the total number of the videos in the database
Returns:
int: Number of the total videos in the database
"""
return len(get_videos())
def annotated(nickname) -> list:
"""
Get the annotated video names of the current user
Args:
nickname (str): User name of the current annotator
Returns:
list: List of the video names
"""
name = os.path.join(ANNOTATED, nickname + '.txt')
return read_txt(name)
def num_annotated(nickname) -> int:
"""
Total number of annotated videos from the current nickname
Args:
nickname (str): User name annotator
Returns:
int: Number of total annotated videos
"""
return len(annotated(nickname))
def read_txt(path) -> list:
"""
Reading the txt file line by line
Args:
path (str): Path name of the txt file to read
Returns:
list: List of the data from the text file
"""
with open(path, 'r') as f:
data = [line.split('\n')[0] for line in f.readlines()]
return data
def get_difference(nickname) -> list:
"""
Get the between the total videos and the annotated videos of the current user.
Args:
nickname (str): User name of the current user
Returns:
list: List of the videos that have not been annotated from the current user
"""
videos = [vid.split(os.sep)[-2:] for vid in get_videos()]
videos = [os.path.join(*vid) for vid in videos]
diff = list(set(videos) - set(annotated(nickname)))
return diff
def get_random_video(diff) -> str:
"""
Get a random video to be annotated
Args:
diff (list): List of the videos that have not been annotated from the current user
Returns:
str: File name of the random video to be annotated
"""
return random.choice(diff)
def add_annotation(user, video, data) -> None:
"""
Save annotations to the csv file for the current user.
Args:
user (str): User name of the current user
video (str): Video name to be annotated
data (list): List of the annotated timestamps for the specific video
"""
dirname = os.path.join(ANNOTATED, user)
dst_class = video.split(os.sep)[0]
dst_dir = os.path.join(dirname, dst_class)
if not os.path.exists(dst_dir):
print(dst_dir)
os.makedirs(dst_dir)
video_path = os.path.join(dst_dir, video.split(os.sep)[1] + '.csv')
df = pd.DataFrame(data=data, columns=['Start Minutes', 'Start Seconds', 'End Minutes', 'End Seconds'])
df.to_csv(video_path, index=False)
def create_directories():
"""
Check if the directories exists, otherwise it creates the VIDEOS and ANNOTATED directories.
"""
if not os.path.isdir(ANNOTATED):
os.mkdir(ANNOTATED)
if not os.path.isdir(VIDEOS):
os.mkdir(VIDEOS)
| 25.62 | 107 | 0.600507 |
a8ebf5a00134fba6abb722a5320966bc6398d635 | 4,690 | py | Python | python_scripts/old_unsorted/original_pipeline/createDatabase.py | jfnavarro/st_misc | bb8c1f2c4f05343f6dd5cc8b8cd8f405d825bd31 | [
"MIT"
] | null | null | null | python_scripts/old_unsorted/original_pipeline/createDatabase.py | jfnavarro/st_misc | bb8c1f2c4f05343f6dd5cc8b8cd8f405d825bd31 | [
"MIT"
] | null | null | null | python_scripts/old_unsorted/original_pipeline/createDatabase.py | jfnavarro/st_misc | bb8c1f2c4f05343f6dd5cc8b8cd8f405d825bd31 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# @Created by Jose Fernandez
""" Complete definition here
"""
import sys
import os
import json
def usage():
print "Usage:"
print "createDatabase.py read_with_barcodes lane expName"
print "This scripts generates json files containing the reads and the transcripts that mapped to barcodes"
def getNameToIdMap(NameToIdFile):
''' Parse the output from the barcode mapper,
VERY CAREFULL, the order of columns has to be like this = Name,chromosome,gene,barcode,x,y,Qul,Read
'''
nameToId = dict()
inF = open(NameToIdFile,'r')
for line in inF:
cols = line.split()
if(len(cols) == 8):
gene = str(cols[2].replace("Gene:",""))
clear_name = str(cols[0].replace("@",""))
chromosome = str(cols[1].replace("Chr:",""))
barcode = str(cols[3])
x = int(cols[4])
y = int(cols[5])
qual = str(cols[6])
seq = str(cols[7])
nameToId[clear_name] = [barcode,x,y,qual,seq,gene]
else:
sys.stderr.write("Error: parse Name to Id file, wrong number of columns " + str(len(cols)) + " \n")
sys.exit(1)
inF.close()
return nameToId
def getIdMap(nameToId):
"""create a map of barcode -> gene -> transcripts
"""
idMap = dict()
for name in nameToId.keys(): ## go trough the reads that have barcodes mapped
gene = nameToId[name][5] ## get the gene name
Id = nameToId[name][0] ## get the barcode
if Id in idMap: # if barcode is already added
if gene in idMap[Id]: # if the gene is the same, increase hits and append reads and qualities and names
idMap[Id][gene][0] += 1
idMap[Id][gene][4].append(nameToId[name][3])
idMap[Id][gene][5].append(nameToId[name][4])
idMap[Id][gene][6].append(name)
else: # create new record for the gene
idMap[Id][gene] = [1,nameToId[name][0],nameToId[name][1],nameToId[name][2],
[nameToId[name][3]], [nameToId[name][4]], [name] ]
else: # create new record for barcode and gene
idMap[Id] = dict()
idMap[Id][gene] = [1,nameToId[name][0],nameToId[name][1],nameToId[name][2],
[nameToId[name][3]], [nameToId[name][4]], [name] ]
return idMap
def main(NameToIdFile,collName,dbName):
if(not os.path.isfile(NameToIdFile)):
sys.stderr.write("Error, one of the input file/s not present")
sys.exit()
nameToId = getNameToIdMap(NameToIdFile)
idMap = getIdMap(nameToId)
filename = dbName + "_" + collName + "_barcodes.json"
filenameReads = dbName + "_" + collName + "_reads.json"
filehandler = open(filename, "w")
filehandlerReads = open(filenameReads, "w")
total_record = 0
unique_genes = set()
unique_barcodes = set()
total_barcodes = 0
for Id in idMap.keys(): # for each barcode
for g in idMap[Id].keys(): # for each gene
x = int(idMap[Id][g][2])
y = int(idMap[Id][g][3])
hits = int(idMap[Id][g][0])
#add json line with the barcode information
jsonLine = json.dumps({"barcode":Id,"gene":g,"x":x,"y":y,"hits":hits})
filehandler.write(jsonLine + "\n")
#get the reads that mapped to the barcode
for qula, read, name in zip(idMap[Id][g][4], idMap[Id][g][5], idMap[Id][g][6]):
#add json line with the raw reads information
jsonLine2 = json.dumps({"name":str(name),"read":str(read),"quality":str(qula),"barcode":Id,"gene":g})
filehandlerReads.write(jsonLine2 + "\n")
unique_genes.add(str(g))
unique_barcodes.add(str(Id))
total_record += 1
total_barcodes += int(idMap[Id][g][0])
if(total_record == 0):
sys.stderr.write("Error: the number of transcripts present is 0\n")
sys.exit(1)
print "Number of Transcripts with Barcode present : " + str(total_barcodes)
print "Number of unique events present : " + str(total_record)
print "Number of unique Barcodes present : " + str(len(unique_barcodes))
print "Number of unique Genes present : " + str(len(unique_genes))
filehandler.close()
filehandlerReads.close()
if __name__ == "__main__":
if len(sys.argv) == 4:
main(sys.argv[1],sys.argv[2],sys.argv[3])
else:
usage()
sys.exit(1)
| 36.640625 | 117 | 0.559488 |
73f52c12b9ecfd36af75d61d327f251f50753f01 | 18,636 | py | Python | pymcfost/parameters.py | j-f-gonzalez/pymcfost | 6ffbe0b7f7fc1d874875c86aa65b89ebd129f33f | [
"MIT"
] | 1 | 2021-03-16T23:53:41.000Z | 2021-03-16T23:53:41.000Z | pymcfost/parameters.py | smaret/pymcfost | a1f49f6da6ad4aebbaa01fd357f8106a32360d11 | [
"MIT"
] | null | null | null | pymcfost/parameters.py | smaret/pymcfost | a1f49f6da6ad4aebbaa01fd357f8106a32360d11 | [
"MIT"
] | null | null | null | import glob
import numpy as np
def _word_to_bool(word):
"""convert a string to boolean according the first 2 characters."""
_accepted_bool_prefixes = ("T", ".T")
return word.upper().startswith(_accepted_bool_prefixes)
class Photons:
pass
class Wavelengths:
pass
class Physics:
pass
class Dust:
component = []
pass
class DustComponent:
pass
class Grid:
pass
class Map:
pass
class Zone:
dust = []
pass
class Mol:
molecule = []
pass
class Molecule:
pass
class Star:
pass
class Simu:
version = float()
pass
class Params:
simu = Simu()
phot = Photons()
wavelengths = Wavelengths()
map = Map()
grid = Grid()
zones = []
mol = Mol()
stars = []
_minimum_version = 3.0
def __init__(self, filename=None, **kwargs):
self.filename = filename
self._read(**kwargs)
def _read(self):
with open(self.filename, mode="rt") as file:
f = []
# Reading file and removing comments
for line in file:
# Skipping comments and empty lines
if (not line.startswith("#")) and (len(line.strip()) > 0):
f += [line]
f = iter(f)
# -- Version of the parameter file --
line = next(f).split()
self.simu.version = float(line[0])
if self.simu.version < self._minimum_version - 1e-3:
print("Parameter file version is ", self.simu.version)
raise Exception(
'Parameter file version must be at least {ver:.2f}'.format(
ver=self._minimum_version
)
)
# -- Number of photon packages --
# to support float notations (e.g. "1.28e8" or "64000.0"),
# we read as float but convert to int
line = next(f).split()
self.phot.nphot_T = int(float(line[0]))
line = next(f).split()
self.phot.nphot_SED = int(float(line[0]))
line = next(f).split()
self.phot.nphot_image = int(float(line[0]))
# -- Wavelengths --
line = next(f).split()
self.wavelengths.n_wl = int(line[0])
self.wavelengths.wl_min = float(line[1])
self.wavelengths.wl_max = float(line[2])
line = next(f).split()
self.simu.compute_T = _word_to_bool(line[0])
self.simu.compute_SED = _word_to_bool(line[1])
self.simu.use_default_wl = _word_to_bool(line[2])
line = next(f).split()
self.wavelengths.file = line[0]
line = next(f).split()
self.simu.separate_contrib = _word_to_bool(line[0])
self.simu.separate_pola = _word_to_bool(line[1])
# -- Grid --
line = next(f).split()
self.grid.type = int(line[0])
line = next(f).split()
self.grid.n_rad = int(line[0])
self.grid.nz = int(line[1])
self.grid.n_az = int(line[2])
self.grid.n_rad_in = int(line[3])
# -- Maps --
line = next(f).split()
self.map.nx = int(line[0])
self.map.ny = int(line[1])
self.map.size = float(line[2])
line = next(f).split()
self.map.RT_imin = float(line[0])
self.map.RT_imax = float(line[1])
self.map.RT_ntheta = int(line[2])
self.map.lRT_centered = _word_to_bool(line[3])
line = next(f).split()
self.map.RT_az_min = float(line[0])
self.map.RT_az_max = float(line[1])
self.map.RT_n_az = int(line[2])
line = next(f).split()
self.map.distance = float(line[0])
line = next(f).split()
self.map.PA = float(line[0])
# -- Scattering method --
line = next(f).split()
self.simu.scattering_method = int(line[0])
line = next(f).split()
self.simu.phase_function_method = int(line[0])
# -- Symetries --
line = next(f).split()
self.simu.image_symmetry = _word_to_bool(line[0])
line = next(f).split()
self.simu.central_symmetry = _word_to_bool(line[0])
line = next(f).split()
self.simu.axial_symmetry = _word_to_bool(line[0])
# -- Disk physics --
line = next(f).split()
self.simu.dust_settling_type = int(line[0])
self.simu.dust_settling_exp = float(line[1])
self.simu.a_settling = float(line[2])
line = next(f).split()
self.simu.radial_migration = _word_to_bool(line[0])
line = next(f).split()
self.simu.dust_sublimation = _word_to_bool(line[0])
line = next(f).split()
self.simu.hydrostatic_eq = _word_to_bool(line[0])
line = next(f).split()
self.simu.viscous_heating = _word_to_bool(line[0])
self.simu.viscosity = float(line[1])
# -- Number of zones --
line = next(f).split()
n_zones = int(line[0])
self.simu.n_zones = n_zones
# -- Density structure --
z = Zone()
for k in range(n_zones):
self.zones.append(z)
line = next(f).split()
self.zones[k].geometry = int(line[0])
line = next(f).split()
self.zones[k].dust_mass = float(line[0])
self.zones[k].gas_to_dust_ratio = float(line[1])
line = next(f).split()
self.zones[k].h0 = float(line[0])
self.zones[k].Rref = float(line[1])
self.zones[k].vertical_exp = float(line[2])
line = next(f).split()
self.zones[k].Rin = float(line[0])
self.zones[k].edge = float(line[1])
self.zones[k].Rout = float(line[2])
self.zones[k].Rc = float(line[3])
line = next(f).split()
self.zones[k].flaring_exp = float(line[0])
line = next(f).split()
self.zones[k].surface_density_exp = float(line[0])
self.zones[k].m_gamma_exp = float(line[1])
# -- Grain properties --
d = Dust
for k in range(n_zones):
line = next(f).split()
n_species = int(line[0])
self.zones[k].n_species = n_species
for j in range(n_species):
self.zones[k].dust.append(d)
line = next(f).split()
self.zones[k].dust[j].type = line[0]
n_components = int(line[1])
self.zones[k].dust[j].n_components = n_components
self.zones[k].dust[j].mixing_rule = int(line[2])
self.zones[k].dust[j].porosity = float(line[3])
self.zones[k].dust[j].mass_fraction = float(line[4])
self.zones[k].dust[j].DHS_Vmax = float(line[5])
c = DustComponent()
for l in range(n_components):
self.zones[k].dust[j].component.append(c)
line = next(f).split()
self.zones[k].dust[j].component[l].file = line[0]
self.zones[k].dust[j].component[l].volume_fraction = float(line[1])
line = next(f).split()
self.zones[k].dust[j].heating_method = int(line[0])
line = next(f).split()
self.zones[k].dust[j].amin = float(line[0])
self.zones[k].dust[j].amax = float(line[1])
self.zones[k].dust[j].aexp = float(line[2])
self.zones[k].dust[j].n_grains = int(line[3])
# -- Molecular settings --
line = next(f).split()
self.mol.compute_pop = _word_to_bool(line[0])
self.mol.compute_pop_accurate = _word_to_bool(line[1])
self.mol.LTE = _word_to_bool(line[2])
self.mol.profile_width = float(line[3])
line = next(f).split()
self.mol.v_turb = float(line[0])
line = next(f).split()
n_mol = int(line[0])
self.mol.n_mol = n_mol
m = Molecule()
for k in range(n_mol):
self.mol.molecule.append(m)
line = next(f).split()
self.mol.molecule[k].file = line[0]
self.mol.molecule[k].level_max = int(line[1])
line = next(f).split()
self.mol.molecule[k].v_max = float(line[0])
self.mol.molecule[k].nv = int(line[1])
line = next(f).split()
self.mol.molecule[k].cst_abundance = _word_to_bool(line[0])
self.mol.molecule[k].abundance = line[1]
self.mol.molecule[k].abundance_file = line[2]
line = next(f).split()
self.mol.molecule[k].ray_tracing = _word_to_bool(line[0])
nTrans = int(line[1])
self.mol.molecule[k].n_trans = nTrans
line = next(f).split()
self.mol.molecule[k].transitions = list(
map(int, line[0:nTrans])
) # convert list of str to int
# -- Star properties --
line = next(f).split()
n_stars = int(line[0])
self.simu.n_stars = n_stars
s = Star()
for k in range(n_stars):
self.stars.append(s)
line = next(f).split()
self.stars[k].Teff = float(line[0])
self.stars[k].R = float(line[1])
self.stars[k].M = float(line[2])
self.stars[k].x = float(line[3])
self.stars[k].y = float(line[4])
self.stars[k].z = float(line[5])
self.stars[k].is_bb = _word_to_bool(line[6])
line = next(f).split()
self.stars[k].file = line[0]
line = next(f).split()
self.stars[k].fUV = float(line[0])
self.stars[k].slope_UV = float(line[1])
# -- Command line options --
for line in f:
if (len(line) > 0):
line = line.split()
if (len(line) > 0): # we test again in case there were only spaces
if (line[0] == "Executed"):
self.options = " ".join(line[6:])
if (line[0] == "sha"):
self.mcfost_sha = line[2]
def __str__(self):
""" Return a formatted parameter file. Currently returns v3.0 format
"""
# -- Photon packets --
txt = f"""3.0 mcfost version\n
#-- Number of photon packages --
{self.phot.nphot_T:<10.5g} nbr_photons_eq_th : T computation
{self.phot.nphot_SED:<10.5g} nbr_photons_lambda : SED computation
{self.phot.nphot_image:<10.5g} nbr_photons_image : images computation\n\n"""
# -- Wavelengths --
txt += f"""#-- Wavelength --
{self.wavelengths.n_wl:<4d} {self.wavelengths.wl_min:<5.1f} {self.wavelengths.wl_max:<7g} n_lambda, lambda_min, lambda_max [microns]
{self.simu.compute_T} {self.simu.compute_SED} {self.simu.use_default_wl} compute temperature?, compute sed?, use default wavelength grid ?
{self.wavelengths.file} wavelength file (if previous parameter is F)
{self.simu.separate_contrib} {self.simu.separate_pola} separation of different contributions?, stokes parameters?\n\n"""
# -- Grid --
txt += f"""#-- Grid geometry and size --
{self.grid.type:>1d} 1 = cylindrical, 2 = spherical
{self.grid.n_rad} {self.grid.nz} {self.grid.n_az} {self.grid.n_rad_in} n_rad (log distribution), nz (or n_theta), n_az, n_rad_in\n\n"""
# -- Maps --
txt += f"""#-- Maps --
{self.map.nx} {self.map.ny} {self.map.size:5.1f} grid (nx,ny), size [au]
{self.map.RT_imin:<4.1f} {self.map.RT_imax:<4.1f} {self.map.RT_ntheta:>2d} {self.map.lRT_centered} RT: imin, imax, n_incl, centered ?
{self.map.RT_az_min:<4.1f} {self.map.RT_az_max:<4.1f} {self.map.RT_n_az:>2d} RT: az_min, az_max, n_az
{self.map.distance:<6.2f} distance (pc)
{self.map.PA:<6.2f} disk PA\n\n"""
# -- Scattering method --
txt += f"""#-- Scattering method --
{self.simu.scattering_method} 0=auto, 1=grain prop, 2=cell prop
{self.simu.phase_function_method} 1=Mie, 2=hg (2 implies the loss of polarizarion)\n\n"""
# -- Symetries --
txt += f"""#-- Symmetries --
{self.simu.image_symmetry} image symmetry
{self.simu.central_symmetry} central symmetry
{self.simu.axial_symmetry} axial symmetry (important only if N_phi > 1)\n\n"""
# -- Disk physics --
txt += f"""#Disk physics
{self.simu.dust_settling_type} {self.simu.dust_settling_exp:<6.2f} {self.simu.a_settling:<6.2f} dust_settling (0=no settling, 1=parametric, 2=Dubrulle, 3=Fromang), exp_strat, a_strat (for parametric settling)
{self.simu.radial_migration} dust radial migration
{self.simu.dust_sublimation} sublimate dust
{self.simu.hydrostatic_eq} hydrostatic equilibrium
{self.simu.viscous_heating} {self.simu.viscosity:4.1g} viscous heating, alpha_viscosity\n\n"""
# -- Number of zones --
txt += f"""#-- Number of zones -- 1 zone = 1 density structure + corresponding grain properties
{self.simu.n_zones}\n\n"""
# -- Density structure --
txt += f"#-- Density structure --\n"
for k in range(self.simu.n_zones):
txt += f""" {self.zones[k].geometry} zone type : 1 = disk, 2 = tapered-edge disk, 3 = envelope, 4 = debris disk, 5 = wall
{self.zones[k].dust_mass:<10.2e} {self.zones[k].gas_to_dust_ratio:<5.1f} dust mass, gas-to-dust mass ratio
{self.zones[k].h0:<5.1f} {self.zones[k].Rref:<6.1f} {self.zones[k].vertical_exp:<6.1f} scale height, reference radius (AU), unused for envelope, vertical profile exponent (only for debris disk)
{self.zones[k].Rin:<6.1f} {self.zones[k].edge:<6.1f} {self.zones[k].Rout:<6.1f} {self.zones[k].Rc:<6.1f} Rin, edge, Rout, Rc (AU) Rc is only used for tappered-edge & debris disks (Rout set to 8*Rc if Rout==0)
{self.zones[k].flaring_exp:<8.3f} flaring exponent, unused for envelope
{self.zones[k].surface_density_exp} {self.zones[k].m_gamma_exp} surface density exponent (or -gamma for tappered-edge disk or volume density for envelope), usually < 0, -gamma_exp (or alpha_in & alpha_out for debris disk)\n\n"""
txt += f"\n"
# -- Grain properties --
txt += f"#-- Grain properties --\n"
for k in range(self.simu.n_zones):
txt += (
f" {self.zones[k].n_species} Number of species\n"
)
for j in range(self.zones[k].n_species):
txt += f" Mie {self.zones[k].dust[j].n_components} {self.zones[k].dust[j].mixing_rule} {self.zones[k].dust[j].porosity:<5.2f} {self.zones[k].dust[j].mass_fraction:<5.2f} {self.zones[k].dust[j].DHS_Vmax} Grain type (Mie or DHS), N_components, mixing rule (1 = EMT or 2 = coating), porosity, mass fraction, Vmax (for DHS)\n"
for l in range(self.zones[k].dust[j].n_components):
txt += f" {self.zones[k].dust[j].component[l].file} {self.zones[k].dust[j].component[l].volume_fraction} Optical indices file, volume fraction\n"
txt += f""" {self.zones[k].dust[j].heating_method} Heating method : 1 = RE + LTE, 2 = RE + NLTE, 3 = NRE
{self.zones[k].dust[j].amin} {self.zones[k].dust[j].amax} {self.zones[k].dust[j].aexp} {self.zones[k].dust[j].n_grains} amin, amax, aexp, nbr_grains\n\n"""
# -- Molecular settings --
txt += f"""#-- Molecular RT settings --
{self.mol.compute_pop} {self.mol.compute_pop_accurate} {self.mol.LTE} {self.mol.profile_width} lpop, laccurate_pop, LTE, profile width
{self.mol.v_turb} v_turb [km/s]
{self.mol.n_mol} nmol\n"""
for k in range(self.mol.n_mol):
txt += f""" {self.mol.molecule[k].file} {self.mol.molecule[k].level_max} molecular data filename, level_max
{self.mol.molecule[k].v_max} {self.mol.molecule[k].nv} vmax (km.s-1), n_speed
{self.mol.molecule[k].cst_abundance} {self.mol.molecule[k].abundance} {self.mol.molecule[k].abundance_file} cst molecule abundance ?, abundance, abundance file
{self.mol.molecule[k].ray_tracing} {self.mol.molecule[k].n_trans} ray tracing ?, number of lines in ray-tracing\n """
for j in range(self.mol.molecule[k].n_trans):
txt += f" {self.mol.molecule[k].transitions[j]}"
txt += f" transition numbers\n"
txt += f"\n"
# -- Star properties --
txt += f"""#-- Star properties --
{self.simu.n_stars} Number of stars\n"""
for k in range(self.simu.n_stars):
txt += f""" {self.stars[k].Teff} {self.stars[k].R} {self.stars[k].M} {self.stars[k].x} {self.stars[k].y} {self.stars[k].x} {self.stars[k].is_bb} Temp, radius (solar radius),M (solar mass),x,y,z (AU), is a blackbody?
{self.stars[k].file}
{self.stars[k].fUV} {self.stars[k].slope_UV} fUV, slope_UV\n"""
return txt
def writeto(self, outname):
""" Write an MCFOST parameter file to disk. """
with open(outname, mode="wt") as file:
file.write(str(self))
def calc_inclinations(self):
# Calculate the inclinations for the ray-traced SEDs and images
if self.map.RT_ntheta == 1:
return self.map.RT_imin
else:
cos_min, cos_max = np.cos(np.deg2rad([self.map.RT_imin, self.map.RT_imax]))
if self.map.lRT_centered:
return (
np.rad2deg(np.arccos(
cos_min
+ (np.arange(self.map.RT_ntheta) + 0.5)
/ self.map.RT_ntheta
* (cos_max - cos_min)
))
)
else:
return (
np.rad2deg(np.arccos(
cos_min
+ (np.arange(self.map.RT_ntheta))
/ (self.map.RT_ntheta - 1)
* (cos_max - cos_min)
))
)
def find_parameter_file(directory="./"):
list = glob.glob(directory + "/*.par*")
if len(list) == 1:
return list[0]
elif len(list) > 1:
raise ValueError("Multiple parameter files found in " + directory)
else:
raise ValueError("No parameter files found in " + directory)
| 38.345679 | 343 | 0.544698 |
097f3c2ed1050e52ea9a1554541b4dc064bf9fcf | 18,753 | py | Python | pipedream/datasources/maps_labelnoise.py | rainerkelz/ICASSP18 | 7445eab24ab4c78e425c8c9c41fbba1a75696223 | [
"MIT"
] | null | null | null | pipedream/datasources/maps_labelnoise.py | rainerkelz/ICASSP18 | 7445eab24ab4c78e425c8c9c41fbba1a75696223 | [
"MIT"
] | null | null | null | pipedream/datasources/maps_labelnoise.py | rainerkelz/ICASSP18 | 7445eab24ab4c78e425c8c9c41fbba1a75696223 | [
"MIT"
] | null | null | null | from .. import utils
import madmom.audio.spectrogram as mmspec
from madmom.utils import midi
from itertools import cycle
from scipy.signal import convolve
import numpy as np
import os
from functools import partial
synthnames = [
'ENSTDkCl',
'ENSTDkAm',
'StbgTGd2',
'SptkBGCl',
'SptkBGAm',
'AkPnStgb',
'AkPnCGdD',
'AkPnBsdf',
'AkPnBcht'
]
def no_weighting(w, *args):
return w
def get_no_weighting():
return no_weighting
def nopp(raw):
return raw
def get_postprocess_none():
return nopp
def two_hump(start_weight, between_weight, end_weight, w, start, end):
w[start] = start_weight
w[start + 1:end] = between_weight
w[end] = end_weight
return w
def get_two_hump_weighting_function(start_weight, between_weight, end_weight):
return partial(two_hump, start_weight, between_weight, end_weight)
def exponential_weighting(start_weight, end_weight, factor, w, start, end):
length = end - start
w[start:end] = 1 + np.exp(-np.linspace(start_weight, end_weight, length)) * factor
return w
def get_exponential_weighting(start_weight=0, end_weight=3, factor=1):
return partial(exponential_weighting, start_weight, end_weight, factor)
def phase_index(w, start, end):
w[start:end] = np.arange(1, (end - start) + 1)
return w
# this is only used for error analysis so far, to answer questions
# such as: in which phase of the note did we make the most errors?
# (does not make a lot of sense for anything else
def get_phase_indexing_function():
return phase_index
def get_postprocess_smooth_weighting(kernel=np.array([1, 1, 1, 0, 0])):
def smooth_weights(raw):
w = np.zeros_like(raw)
for note in xrange(w.shape[1]):
w[:, note] = convolve(raw[:, note], kernel, mode='same')
# moved weight normalizing to here, so each postprocessing function can opt out ...
wmax = w.max()
if wmax > 0:
w /= wmax
w += 1
return w
return smooth_weights
# this is the one that works very well
def midi_to_groundtruth_a(midifile, dt, n_frames, weighting, postprocess_weighting):
pattern = midi.MIDIFile.from_file(midifile)
y = np.zeros((n_frames, 88)).astype(np.int8)
w = np.ones((n_frames, 88)).astype(np.float32)
for onset, _pitch, duration, velocity, _channel in pattern.notes():
pitch = int(_pitch)
frame_start = int(np.round(onset / dt))
frame_end = int(np.round((onset + duration) / dt))
label = pitch - 21
y[frame_start:frame_end, label] = 1
if weighting is not None:
w[:, label] = weighting(w[:, label], frame_start, frame_end)
if postprocess_weighting is not None:
w = postprocess_weighting(w)
return y, w
# this one uses ceil instead of round
def midi_to_groundtruth_b(midifile, dt, n_frames, weighting, postprocess_weighting):
pattern = midi.MIDIFile.from_file(midifile)
y = np.zeros((n_frames, 88)).astype(np.int8)
w = np.ones((n_frames, 88)).astype(np.float32)
for onset, _pitch, duration, velocity, _channel in pattern.notes():
pitch = int(_pitch)
frame_start = int(np.ceil(onset / dt))
frame_end = int(np.ceil((onset + duration) / dt))
label = pitch - 21
y[frame_start:frame_end, label] = 1
if weighting is not None:
w[:, label] = weighting(w[:, label], frame_start, frame_end)
if postprocess_weighting is not None:
w = postprocess_weighting(w)
return y, w
# this one uses floor instead of round
def midi_to_groundtruth_c(midifile, dt, n_frames, weighting, postprocess_weighting):
pattern = midi.MIDIFile.from_file(midifile)
y = np.zeros((n_frames, 88)).astype(np.int8)
w = np.ones((n_frames, 88)).astype(np.float32)
for onset, _pitch, duration, velocity, _channel in pattern.notes():
pitch = int(_pitch)
frame_start = int(np.floor(onset / dt))
frame_end = int(np.floor((onset + duration) / dt))
label = pitch - 21
y[frame_start:frame_end, label] = 1
if weighting is not None:
w[:, label] = weighting(w[:, label], frame_start, frame_end)
if postprocess_weighting is not None:
w = postprocess_weighting(w)
return y, w
# this is the one that produced the ISMIR2016 results (systematic error)
def midi_to_groundtruth_d(midifile, dt, n_frames, weighting, postprocess_weighting):
pattern = midi.MIDIFile.from_file(midifile)
notes = pattern.notes()
piano_roll = np.zeros((n_frames, 109 - 21), dtype=np.int8)
w = np.ones((n_frames, 88)).astype(np.float32)
for n in notes:
onset = int(np.ceil(n[0] / dt))
end = onset + int(np.ceil(n[2] / dt))
label = int(n[1] - 21)
piano_roll[onset:end, label] = 1
if weighting is not None:
w[:, label] = weighting(w[:, label], onset, end)
if postprocess_weighting is not None:
w = postprocess_weighting(w)
return piano_roll, w
# this is one that randomly shifts the whole note 1 frame
def midi_to_groundtruth_e(rng, midifile, dt, n_frames, weighting, postprocess_weighting):
pattern = midi.MIDIFile.from_file(midifile)
y = np.zeros((n_frames, 88)).astype(np.int8)
w = np.ones((n_frames, 88)).astype(np.float32)
for onset, _pitch, duration, velocity, _channel in pattern.notes():
pitch = int(_pitch)
frame_start = int(np.round(onset / dt))
frame_end = int(np.round((onset + duration) / dt))
label = pitch - 21
# randomly shift whole note
shift = rng.randint(-1, 2)
frame_start = max(0, frame_start + shift)
frame_end = min(n_frames, frame_end + shift)
y[frame_start:frame_end, label] = 1
if weighting is not None:
w[:, label] = weighting(w[:, label], frame_start, frame_end)
if postprocess_weighting is not None:
w = postprocess_weighting(w)
return y, w
# this is one that randomly shifts the start and end
def midi_to_groundtruth_f(rng, midifile, dt, n_frames, weighting, postprocess_weighting):
pattern = midi.MIDIFile.from_file(midifile)
y = np.zeros((n_frames, 88)).astype(np.int8)
w = np.ones((n_frames, 88)).astype(np.float32)
for onset, _pitch, duration, velocity, _channel in pattern.notes():
pitch = int(_pitch)
frame_start = int(np.round(onset / dt))
frame_end = int(np.round((onset + duration) / dt))
label = pitch - 21
# randomly shift start and end separately
shift_start = rng.randint(-1, 2)
frame_start = max(0, frame_start + shift_start)
shift_end = rng.randint(-1, 2)
frame_end = min(n_frames, frame_end + shift_end)
y[frame_start:frame_end, label] = 1
if weighting is not None:
w[:, label] = weighting(w[:, label], frame_start, frame_end)
if postprocess_weighting is not None:
w = postprocess_weighting(w)
return y, w
def spec_notes_weight_from_file(basedir, filename, _audio_options, midi_to_groundtruth, weighting, postprocess_weighting):
audiofilename = os.path.join(basedir, filename + '.flac')
midifilename = os.path.join(basedir, filename + '.mid')
spec_type, audio_options = utils.canonicalize_audio_options(_audio_options, mmspec)
# it's necessary to cast this to np.array, b/c the madmom-class holds references to wayyy too much memory ...
x = np.array(spec_type(audiofilename, **audio_options))
y, w = midi_to_groundtruth(midifilename, 1. / audio_options['fps'], x.shape[0], weighting, postprocess_weighting)
return x, y, w
def random_log_filtered_mono_from_file(basedir, filename, audio_options, midi_to_groundtruth, samplesize, x_contextsize, y_contextsize, weighting, postprocess_weighting, rng, finite):
x, y, w = spec_notes_weight_from_file(basedir, filename, audio_options, midi_to_groundtruth, weighting, postprocess_weighting)
if x_contextsize > 0:
x = utils.Context2D4D(x, x_contextsize)
if y_contextsize > 0:
y = utils.Context2D4D(y, y_contextsize)
w = utils.Context2D4D(w, y_contextsize)
indices = utils.indices_without_replacement(np.arange(0, len(x)), samplesize, rng, finite=finite)
for idx in indices:
yield x[idx], y[idx], w[idx], idx
def uniformly_random_sample(basedir, foldname, audio_options, midi_to_groundtruth, batchsize, x_contextsize, y_contextsize, weighting, postprocess_weighting, rng):
filenames = open(foldname, 'r').readlines()
filenames = [f.strip() for f in filenames]
specs = []
for filename in filenames:
specs.append(
(
filename,
random_log_filtered_mono_from_file(
basedir,
filename,
audio_options,
midi_to_groundtruth,
1,
x_contextsize,
y_contextsize,
weighting,
postprocess_weighting,
rng=rng,
finite=False
)
)
)
while True:
x_batch = []
y_batch = []
i_batch = []
w_batch = []
f_batch = []
for bi in xrange(batchsize):
# choose any file from specs
fi = rng.randint(0, len(specs))
filename, xywi_it = specs[fi]
x, y, w, i = next(xywi_it)
x_batch.append(x)
y_batch.append(y)
w_batch.append(w)
i_batch.append(i)
f_batch.append(filename)
yield np.vstack(x_batch), np.vstack(y_batch), np.vstack(w_batch), np.hstack(i_batch), f_batch
def stratified_random_sample(basedir, foldname, audio_options, midi_to_groundtruth, batchsize, x_contextsize, y_contextsize, weighting, postprocess_weighting, rng):
filenames = open(foldname, 'r').readlines()
filenames = [f.strip() for f in filenames]
present_synthnames = set()
for filename in filenames:
synthname = filename[0:len(synthnames[0])]
present_synthnames.add(synthname)
# IMPORTANT:
# we do away with this kind-of artificial restriction,
# set n_samples_per_label to 1
# and sample round-robin style from the synthnames
# n_samples_per_label, rest = divmod(batchsize, len(present_synthnames))
# if rest != 0:
# raise RuntimeError('batchsize not divisible by len(present_synthnames)={}'.format(len(present_synthnames)))
synthname_to_specs = {synthname: list() for synthname in present_synthnames}
for filename in filenames:
synthname = filename[0:len(synthnames[0])]
synthname_to_specs[synthname].append(
(
filename,
random_log_filtered_mono_from_file(
basedir,
filename,
audio_options,
midi_to_groundtruth,
1,
x_contextsize,
y_contextsize,
weighting,
postprocess_weighting,
rng=rng,
finite=False
)
)
)
cycling_synthname_to_specs = {synthname: cycle(v) for synthname, v in synthname_to_specs.iteritems()}
while True:
x_batch = []
y_batch = []
i_batch = []
w_batch = []
f_batch = []
# IMPORTANT:
# this way, if the batchsize is *not* an integer-multiple of the number of len(present_synthnames),
# we randomize, which synthesizer will be under/over represented in each batch
random_synthnames = list(present_synthnames)
rng.shuffle(random_synthnames)
cycling_synthnames = cycle(random_synthnames)
for bi in xrange(batchsize):
synthname = next(cycling_synthnames)
filename, xywi_it = next(cycling_synthname_to_specs[synthname])
x, y, w, i = next(xywi_it)
x_batch.append(x)
y_batch.append(y)
w_batch.append(w)
i_batch.append(i)
f_batch.append(filename)
yield np.vstack(x_batch), np.vstack(y_batch), np.vstack(w_batch), np.hstack(i_batch), f_batch
def get_fold_iterator(
basedir,
foldname,
audio_options,
midi_to_groundtruth,
batchsize,
x_contextsize,
y_contextsize,
weighting=None,
postprocess_weighting=None):
filenames = open(foldname, 'r').readlines()
filenames = [f.strip() for f in filenames]
# extract all the filenames from the foldfile
labelled_specs = []
for filename in filenames:
x, y, w = spec_notes_weight_from_file(
basedir,
filename,
audio_options,
midi_to_groundtruth,
weighting=weighting,
postprocess_weighting=postprocess_weighting
)
if x_contextsize > 0:
x = utils.Context2D4D(x, x_contextsize)
if y_contextsize > 0:
y = utils.Context2D4D(y, y_contextsize)
w = utils.Context2D4D(w, y_contextsize)
labelled_specs.append((filename, (x, y, w)))
n_samples = 0
for f, (x, y, w) in labelled_specs:
n_samples += len(x)
print 'fold_iterator (n_samples={})'.format(n_samples)
def it():
if weighting is None:
for f, (x, y, w) in labelled_specs:
n_batches = (len(x) // batchsize) + 1
indices = cycle(xrange(0, len(x)))
for i_batch in (np.array([indices.next() for _ in xrange(batchsize)]) for b in xrange(n_batches)):
yield x[i_batch], y[i_batch], i_batch, f
else:
# TODO: very hacky ...
# the return values change, if weighting is used ...
# this should *all* be dictionaries ...
for f, (x, y, w) in labelled_specs:
n_batches = (len(x) // batchsize) + 1
indices = cycle(xrange(0, len(x)))
for i_batch in (np.array([indices.next() for _ in xrange(batchsize)]) for b in xrange(n_batches)):
yield x[i_batch], y[i_batch], w[i_batch], i_batch, f
# each time 'next' is called, yield a new generator that iterates over the (cached!) mem-mapped spectrograms
while True:
yield it()
def fully_stratified_random_sample(basedir, foldname, audio_options, midi_to_groundtruth, batchsize, x_contextsize, y_contextsize, weighting, postprocess_weighting, rng):
filenames = open(foldname, 'r').readlines()
filenames = [f.strip() for f in filenames]
pieces = []
note_index = utils.ReverseNoteIndex()
for piece_index, filename in enumerate(filenames):
x, y, w = spec_notes_weight_from_file(basedir, filename, audio_options, midi_to_groundtruth, weighting, postprocess_weighting)
note_index.update(piece_index, y.astype(np.int8))
if x_contextsize > 0:
x = utils.Context2D4D(x, x_contextsize)
if y_contextsize > 0:
y = utils.Context2D4D(y, y_contextsize)
w = utils.Context2D4D(w, y_contextsize)
pieces.append((x, y, w, filename))
while True:
x_batch = []
y_batch = []
i_batch = []
w_batch = []
f_batch = []
for bi in xrange(batchsize):
piece_index, time_index = note_index.sample()
x, y, w, f = pieces[piece_index]
x_batch.append(x[time_index])
y_batch.append(y[time_index])
w_batch.append(w[time_index])
i_batch.append(time_index)
f_batch.append(f)
yield np.vstack(x_batch), np.vstack(y_batch), np.vstack(w_batch), np.hstack(i_batch), f_batch
def single_note_stratified_random_sample(basedir, foldname, audio_options, midi_to_groundtruth, batchsize, x_contextsize, y_contextsize, weighting, postprocess_weighting, rng):
filenames = open(foldname, 'r').readlines()
filenames = [f.strip() for f in filenames]
pieces = []
note_index = utils.ReverseSingleNoteIndex()
for piece_index, filename in enumerate(filenames):
x, y, w = spec_notes_weight_from_file(basedir, filename, audio_options, midi_to_groundtruth, weighting, postprocess_weighting)
note_index.update(piece_index, y.astype(np.int8))
if x_contextsize > 0:
x = utils.Context2D4D(x, x_contextsize)
if y_contextsize > 0:
y = utils.Context2D4D(y, y_contextsize)
w = utils.Context2D4D(w, y_contextsize)
pieces.append((x, y, w, filename))
while True:
x_batch = []
y_batch = []
i_batch = []
w_batch = []
f_batch = []
for bi in xrange(batchsize):
piece_index, time_index = note_index.sample()
x, y, w, f = pieces[piece_index]
x_batch.append(x[time_index])
y_batch.append(y[time_index])
w_batch.append(w[time_index])
i_batch.append(time_index)
f_batch.append(f)
yield np.vstack(x_batch), np.vstack(y_batch), np.vstack(w_batch), np.hstack(i_batch), f_batch
def diverse_random_sample_for_single_note(basedir, foldname, audio_options, midi_to_groundtruth, batchsize, x_contextsize, y_contextsize, note, weighting, postprocess_weighting, rng):
filenames = open(foldname, 'r').readlines()
filenames = [f.strip() for f in filenames]
pieces = []
note_index = utils.ReverseSingleNoteIndex()
for piece_index, filename in enumerate(filenames):
x, y, w = spec_notes_weight_from_file(basedir, filename, audio_options, midi_to_groundtruth, weighting, postprocess_weighting)
note_index.update(piece_index, y.astype(np.int8))
if x_contextsize > 0:
x = utils.Context2D4D(x, x_contextsize)
if y_contextsize > 0:
y = utils.Context2D4D(y, y_contextsize)
w = utils.Context2D4D(w, y_contextsize)
pieces.append((x, y, w, filename))
while True:
x_batch = []
y_batch = []
i_batch = []
w_batch = []
f_batch = []
for bi in xrange(batchsize):
# half of the time, we'll draw this note?
piece_index, time_index = note_index.sample_single_note(note, ratio=0.5)
x, y, w, f = pieces[piece_index]
x_batch.append(x[time_index])
y_batch.append(y[time_index])
w_batch.append(w[time_index])
i_batch.append(time_index)
f_batch.append(f)
yield np.vstack(x_batch), np.vstack(y_batch), np.vstack(w_batch), np.hstack(i_batch), f_batch
| 34.727778 | 183 | 0.62774 |
4012af62093e6076315429211125f0c71fa924b9 | 2,943 | py | Python | polypheny/errors.py | polypheny/Polypheny-Connector-Python | 7425eb01d76209783898a01b7a75e3923e911e67 | [
"Apache-2.0"
] | 3 | 2021-10-31T17:34:47.000Z | 2021-11-01T13:31:11.000Z | polypheny/errors.py | polypheny/Polypheny-Connector-Python | 7425eb01d76209783898a01b7a75e3923e911e67 | [
"Apache-2.0"
] | null | null | null | polypheny/errors.py | polypheny/Polypheny-Connector-Python | 7425eb01d76209783898a01b7a75e3923e911e67 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019-2021 The Polypheny Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# According do DB API 2.0 : https://www.python.org/dev/peps/pep-0249/#exceptions
__all__ = [
'Warning', 'Error', 'InterfaceError', 'DatabaseError', 'DataError',
'OperationalError', 'IntegrityError', 'InternalError',
'ProgrammingError', 'NotSupportedError',
]
_StandardError = Exception
class Warning(_StandardError):
"""Not yes used by this package, only defined for compatibility
with DB API 2.0."""
class Error(_StandardError):
"""Exception that is the base class of all other error exceptions.
You can use this to catch all errors with one single except statement."""
def __init__(self, message, code=None, sqlstate=None, cause=None):
super(_StandardError, self).__init__(message, code, sqlstate, cause)
@property
def message(self):
return self.args[0]
@property
def code(self):
return self.args[1]
@property
def sqlstate(self):
return self.args[2]
@property
def cause(self):
return self.args[3]
class InterfaceError(Error):
"""Exception raised for errors that are related to the database
interface rather than the database itself."""
class DatabaseError(Error):
"""Exception raised for errors that are related to the database."""
class DataError(DatabaseError):
"""Exception raised for errors that are due to problems with the
processed data like division by zero, numeric value out of range,
etc."""
class OperationalError(DatabaseError):
"""Raised for errors that are related to the database's operation and not
necessarily under the control of the programmer, e.g. an unexpected
disconnect occurs, the data source name is not found, a transaction could
not be processed, a memory allocation error occurred during
processing, etc."""
class IntegrityError(DatabaseError):
"""Raised when the relational integrity of the database is affected, e.g. a foreign key check fails."""
class InternalError(DatabaseError):
"""Raised when the database encounters an internal problem."""
class ProgrammingError(DatabaseError):
"""Raises for programming errors, e.g. table not found, syntax error, etc."""
class NotSupportedError(DatabaseError):
"""Raised when using an API that is not supported by the database."""
| 30.65625 | 107 | 0.717975 |
d26d17dd4266bf3b468cc3c7e1e6e100914b0a3c | 2,344 | py | Python | utils-test-app/utils_test_app/tests/test_helpers.py | buahaha/allianceauth-app-utils | 7e0b319c0ac9ff3f4a05635a1dcb017d4e09c8a6 | [
"MIT"
] | null | null | null | utils-test-app/utils_test_app/tests/test_helpers.py | buahaha/allianceauth-app-utils | 7e0b319c0ac9ff3f4a05635a1dcb017d4e09c8a6 | [
"MIT"
] | null | null | null | utils-test-app/utils_test_app/tests/test_helpers.py | buahaha/allianceauth-app-utils | 7e0b319c0ac9ff3f4a05635a1dcb017d4e09c8a6 | [
"MIT"
] | null | null | null | from time import time
from unittest.mock import patch
from django.core.cache import cache
from django.test import TestCase
from app_utils.helpers import humanize_number, throttle
class TestFormatisk(TestCase):
def test_should_return_formatted_string_from_number_1(self):
# when
result = humanize_number(1260000000)
# then
self.assertEqual(result, "1.3b")
def test_should_return_formatted_string_from_number_2(self):
# when
result = humanize_number(123456789)
# then
self.assertEqual(result, "123.5m")
def test_should_return_formatted_string_from_string(self):
# when
result = humanize_number("1234567890")
# then
self.assertEqual(result, "1.2b")
def test_should_raise_value_error_when_type_invalid(self):
# when/then
with self.assertRaises(ValueError):
humanize_number("invalid")
def test_should_use_custom_magnitude(self):
# when
result = humanize_number(123456789, "b")
# then
self.assertEqual(result, "0.1b")
def test_should_format_with_custom_precision(self):
# when
result = humanize_number("1234567890", precision=3)
# then
self.assertEqual(result, "1.235b")
def my_func():
"""Dummy function for testing throttle()"""
return "dummy"
@patch(f"{__package__}.test_helpers.my_func", wraps=my_func)
class TestThrottle(TestCase):
def setUp(self) -> None:
cache.clear()
def test_should_run_once(self, spy_my_func):
# when
result = throttle(my_func, "test-1", timeout=60)
# then
self.assertEqual(spy_my_func.call_count, 1)
self.assertEqual(result, "dummy")
def test_should_run_twice_only(self, spy_my_func):
# given
start = time()
# when
while time() < start + 1.1:
throttle(my_func, "test-1", timeout=1)
# then
self.assertEqual(spy_my_func.call_count, 2)
def test_should_once_per_context_id(self, spy_my_func):
# when
throttle(my_func, "test-1", timeout=60)
throttle(my_func, "test-2", timeout=60)
throttle(my_func, "test-1", timeout=60)
throttle(my_func, "test-2", timeout=60)
# then
self.assertEqual(spy_my_func.call_count, 2)
| 28.938272 | 64 | 0.650171 |
ab6b24149c4446b8b6d0ac546775798925ed75ee | 415 | py | Python | panoptes_aggregation/tests/reducer_tests/test_sw_variant_reducer.py | amyrebecca/aggregation-for-caesar | 5f0d884932312010f9caeb8ebfcfe358f490e41f | [
"Apache-2.0"
] | null | null | null | panoptes_aggregation/tests/reducer_tests/test_sw_variant_reducer.py | amyrebecca/aggregation-for-caesar | 5f0d884932312010f9caeb8ebfcfe358f490e41f | [
"Apache-2.0"
] | null | null | null | panoptes_aggregation/tests/reducer_tests/test_sw_variant_reducer.py | amyrebecca/aggregation-for-caesar | 5f0d884932312010f9caeb8ebfcfe358f490e41f | [
"Apache-2.0"
] | null | null | null | from panoptes_aggregation import reducers
from .base_test_class import ReducerTestNoProcessing
extracted_data = [
{'variants': ['a', 'b']},
{},
{'variants': ['c']}
]
reduced_data = {
'variants': [
'a',
'b',
'c'
]
}
TestSWVariantsReducer = ReducerTestNoProcessing(
reducers.sw_variant_reducer,
extracted_data,
reduced_data,
'Test SW variants reducer'
)
| 17.291667 | 52 | 0.624096 |
a5e82d5cf02e2e05ce4c5873f8ee20b112e1f5e1 | 406 | py | Python | energylenserver/fields.py | manaswis/energylensplus | dee76dfd4a9948b906acd3e77cf28900744ef19a | [
"Apache-2.0"
] | null | null | null | energylenserver/fields.py | manaswis/energylensplus | dee76dfd4a9948b906acd3e77cf28900744ef19a | [
"Apache-2.0"
] | null | null | null | energylenserver/fields.py | manaswis/energylensplus | dee76dfd4a9948b906acd3e77cf28900744ef19a | [
"Apache-2.0"
] | null | null | null | from django.db.models import fields
# from south.modelsinspector import add_introspection_rules
class BigAutoField(fields.AutoField):
def db_type(self, connection):
if 'mysql' in connection.__class__.__module__:
return 'bigint AUTO_INCREMENT'
return super(BigAutoField, self).db_type(connection)
# add_introspection_rules([], ["^energylenserver\.fields\.BigAutoField"])
| 31.230769 | 73 | 0.746305 |
5c35e6274a01773c6409b6bfd6b6db2197a10a8e | 1,441 | py | Python | caffe2/python/helpers/control_ops.py | kickers18/caffe2 | f69232c9237174f8e272c0fc59d3e28af52842f2 | [
"Apache-2.0"
] | 585 | 2015-08-10T02:48:52.000Z | 2021-12-01T08:46:59.000Z | caffe2/python/helpers/control_ops.py | mingzhe09088/caffe2 | 8f41717c46d214aaf62b53e5b3b9b308b5b8db91 | [
"Apache-2.0"
] | 27 | 2018-04-14T06:44:22.000Z | 2018-08-01T18:02:39.000Z | caffe2/python/helpers/control_ops.py | mingzhe09088/caffe2 | 8f41717c46d214aaf62b53e5b3b9b308b5b8db91 | [
"Apache-2.0"
] | 183 | 2015-08-10T02:49:04.000Z | 2021-12-01T08:47:13.000Z | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package control_ops
# Module caffe2.python.helpers.control_ops
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.control_ops_util import add_if_op, add_while_op
def cond(model, cond_blob, external_blobs, then_model, else_model=None):
"""Condition"""
add_if_op(
model.net,
cond_blob,
external_blobs,
then_model.net,
else_model.net if else_model else None)
def loop(model, cond_blob, external_blobs, loop_model, cond_model=None):
"""Loop"""
add_while_op(
model.net,
cond_blob,
external_blobs,
loop_model.net,
cond_model.net if cond_model else None)
| 32.75 | 78 | 0.693963 |
30e95d5fb76732d8283d34db12bb9b2ccb33ef11 | 5,664 | py | Python | qa/rpc-tests/bind_test.py | zahidaliayub/straks | c55ada5e0f3749c4f7f710e55aade2a442effd6a | [
"MIT"
] | 67 | 2017-11-21T01:32:30.000Z | 2022-03-17T10:17:53.000Z | qa/rpc-tests/bind_test.py | zahidaliayub/straks | c55ada5e0f3749c4f7f710e55aade2a442effd6a | [
"MIT"
] | 22 | 2017-11-21T15:43:59.000Z | 2019-06-19T12:40:30.000Z | qa/rpc-tests/bind_test.py | zahidaliayub/straks | c55ada5e0f3749c4f7f710e55aade2a442effd6a | [
"MIT"
] | 52 | 2017-11-21T06:05:34.000Z | 2022-03-17T10:18:47.000Z | #!/usr/bin/env python
# Copyright (c) 2014 The Straks Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test for -rpcbind, as well as -rpcallowip and -rpcconnect
# Add python-straksrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-straksrpc"))
import json
import shutil
import subprocess
import tempfile
import traceback
from straksrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
from netutil import *
def run_bind_test(tmpdir, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
nodes = start_nodes(1, tmpdir, [base_args + binds], connect_to)
try:
pid = straksd_processes[0].pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
finally:
stop_nodes(nodes)
wait_straksds()
def run_allowip_test(tmpdir, allow_ips, rpchost):
'''
Start a node with rpcwallow IP, and request getinfo
at a non-localhost IP.
'''
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
nodes = start_nodes(1, tmpdir, [base_args])
try:
# connect to node through non-loopback interface
url = "http://rt:rt@%s:%d" % (rpchost, START_RPC_PORT,)
node = AuthServiceProxy(url)
node.getinfo()
finally:
node = None # make sure connection will be garbage collected and closed
stop_nodes(nodes)
wait_straksds()
def run_test(tmpdir):
assert(sys.platform == 'linux2') # due to OS-specific network stats queries, this test works only on Linux
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
assert(not 'This test requires at least one non-loopback IPv4 interface')
print("Using interface %s for testing" % non_loopback_ip)
# check default without rpcallowip (IPv4 and IPv6 localhost)
run_bind_test(tmpdir, None, '127.0.0.1', [],
[('127.0.0.1', 11100), ('::1', 11100)])
# check default with rpcallowip (IPv6 any)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', [],
[('::0', 11100)])
# check only IPv4 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', START_RPC_PORT)])
# check only IPv4 localhost (explicit) with alternative port
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
run_bind_test(tmpdir, ['[::1]'], '[::1]', ['[::1]'],
[('::1', 11100)])
# check both IPv4 and IPv6 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', START_RPC_PORT), ('::1', START_RPC_PORT)])
# check only non-loopback interface
run_bind_test(tmpdir, [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, START_RPC_PORT)])
# Check that with invalid rpcallowip, we are denied
run_allowip_test(tmpdir, [non_loopback_ip], non_loopback_ip)
try:
run_allowip_test(tmpdir, ['1.1.1.1'], non_loopback_ip)
assert(not 'Connection not denied by rpcallowip as expected')
except ValueError:
pass
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave straksds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing straksd/straks-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
run_test(options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
wait_straksds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
| 37.019608 | 110 | 0.644244 |
cdd4509c219207bcaa7a94f6c333f636e8e7e9e5 | 38,568 | py | Python | client/batteryclient/api/users_api.py | martinjrobins/battery-api | c416bdf487f3d41901c7155245ffce7330bdc496 | [
"BSD-3-Clause"
] | 2 | 2021-11-11T11:03:22.000Z | 2022-01-25T19:29:52.000Z | client/batteryclient/api/users_api.py | martinjrobins/battery-api | c416bdf487f3d41901c7155245ffce7330bdc496 | [
"BSD-3-Clause"
] | null | null | null | client/batteryclient/api/users_api.py | martinjrobins/battery-api | c416bdf487f3d41901c7155245ffce7330bdc496 | [
"BSD-3-Clause"
] | null | null | null | """
Battery Data API
A standard API for accessing battery experiment datasets and metadata # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: martin.robinson@cs.ox.ac.uk
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from batteryclient.api_client import ApiClient, Endpoint as _Endpoint
from batteryclient.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from batteryclient.model.cell import Cell
from batteryclient.model.dataset import Dataset
from batteryclient.model.equipment import Equipment
from batteryclient.model.user import User
class UsersApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __get_cell(
self,
id,
**kwargs
):
"""get information on a single cell # noqa: E501
Returns information on a cell with the given id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_cell(id, async_req=True)
>>> result = thread.get()
Args:
id (int): Numeric ID of the resource to get
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Cell
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.get_cell = _Endpoint(
settings={
'response_type': (Cell,),
'auth': [
'bearerAuth'
],
'endpoint_path': '/cell/{id}',
'operation_id': 'get_cell',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(int,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_cell
)
def __get_cells(
self,
**kwargs
):
"""gets information on all cells # noqa: E501
Returns information on all cells. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_cells(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Cell]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_cells = _Endpoint(
settings={
'response_type': ([Cell],),
'auth': [
'bearerAuth'
],
'endpoint_path': '/cell',
'operation_id': 'get_cells',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_cells
)
def __get_column(
self,
id,
column_id,
**kwargs
):
"""gets a single column of data from a dataset # noqa: E501
Returns a column of a dataset as a binary blob. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_column(id, column_id, async_req=True)
>>> result = thread.get()
Args:
id (int): Numeric ID of the resource to get
column_id (int): Numeric ID of the column to get
Keyword Args:
precision (str): Whether to format the response as a single (32-bit) or double (64-bit) array . [optional] if omitted the server will use the default value of "single"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
file_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['column_id'] = \
column_id
return self.call_with_http_info(**kwargs)
self.get_column = _Endpoint(
settings={
'response_type': (file_type,),
'auth': [
'bearerAuth'
],
'endpoint_path': '/dataset/{id}/{column_id}',
'operation_id': 'get_column',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'column_id',
'precision',
],
'required': [
'id',
'column_id',
],
'nullable': [
],
'enum': [
'precision',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('precision',): {
"SINGLE": "single",
"DOUBLE": "double"
},
},
'openapi_types': {
'id':
(int,),
'column_id':
(int,),
'precision':
(str,),
},
'attribute_map': {
'id': 'id',
'column_id': 'column_id',
'precision': 'precision',
},
'location_map': {
'id': 'path',
'column_id': 'path',
'precision': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/octet-stream'
],
'content_type': [],
},
api_client=api_client,
callable=__get_column
)
def __get_dataset(
self,
id,
**kwargs
):
"""get information on a single dataset # noqa: E501
Returns metadata on all the dataset corresponding to the given id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dataset(id, async_req=True)
>>> result = thread.get()
Args:
id (int): Numeric ID of the resource to get
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Dataset
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.get_dataset = _Endpoint(
settings={
'response_type': (Dataset,),
'auth': [
'bearerAuth'
],
'endpoint_path': '/dataset/{id}',
'operation_id': 'get_dataset',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(int,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_dataset
)
def __get_datasets(
self,
**kwargs
):
"""gets information on all datasets # noqa: E501
Returns metadata on all the datasets. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_datasets(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Dataset]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_datasets = _Endpoint(
settings={
'response_type': ([Dataset],),
'auth': [
'bearerAuth'
],
'endpoint_path': '/dataset',
'operation_id': 'get_datasets',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_datasets
)
def __get_equipment(
self,
id,
**kwargs
):
"""get information on a single item of test equipment # noqa: E501
Returns information on the test equipment with the given id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_equipment(id, async_req=True)
>>> result = thread.get()
Args:
id (int): Numeric ID of the resource to get
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Equipment
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.get_equipment = _Endpoint(
settings={
'response_type': (Equipment,),
'auth': [
'bearerAuth'
],
'endpoint_path': '/equipment/{id}',
'operation_id': 'get_equipment',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(int,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_equipment
)
def __get_equipments(
self,
**kwargs
):
"""gets information on all recorded test equipment # noqa: E501
Returns information on all test equipment. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_equipments(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Equipment]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_equipments = _Endpoint(
settings={
'response_type': ([Equipment],),
'auth': [
'bearerAuth'
],
'endpoint_path': '/equipment',
'operation_id': 'get_equipments',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_equipments
)
def __get_user(
self,
id,
**kwargs
):
"""get information on a single user # noqa: E501
Returns information on a user with the given id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user(id, async_req=True)
>>> result = thread.get()
Args:
id (int): Numeric ID of the resource to get
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
User
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.get_user = _Endpoint(
settings={
'response_type': (User,),
'auth': [
'bearerAuth'
],
'endpoint_path': '/user/{id}',
'operation_id': 'get_user',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(int,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_user
)
def __get_users(
self,
**kwargs
):
"""gets information on all users # noqa: E501
Returns information on all users. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_users(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[User]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_users = _Endpoint(
settings={
'response_type': ([User],),
'auth': [
'bearerAuth'
],
'endpoint_path': '/user',
'operation_id': 'get_users',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_users
)
| 35.481141 | 183 | 0.450347 |
534e2edfa2ca593d0dc6aa4e539a4ebf229936b5 | 4,633 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/operations/_service_tags_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/operations/_service_tags_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_11_01/operations/_service_tags_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceTagsOperations(object):
"""ServiceTagsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceTagsListResult"
"""Gets a list of service tag information resources.
:param location: The location that will be used as a reference for version (not as a filter
based on location, you will get the list of service tags with prefix details across all regions
but limited to the cloud that your subscription belongs to).
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceTagsListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.ServiceTagsListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceTagsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceTagsListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/serviceTags'} # type: ignore
| 44.980583 | 139 | 0.680553 |
e51ef8faee980bcce4816a4cb223fef5db9102bb | 8,652 | py | Python | main_app/models.py | AlexGeniusMan/ARTWAY-PROJECT | 0430cf2359e3b78ef4eb25466e5871ab2ff2bfcd | [
"Apache-2.0"
] | 1 | 2020-12-12T13:07:34.000Z | 2020-12-12T13:07:34.000Z | main_app/models.py | AlexGeniusMan/ARTWAY-PROJECT | 0430cf2359e3b78ef4eb25466e5871ab2ff2bfcd | [
"Apache-2.0"
] | null | null | null | main_app/models.py | AlexGeniusMan/ARTWAY-PROJECT | 0430cf2359e3b78ef4eb25466e5871ab2ff2bfcd | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth.models import AbstractUser
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from django.db import models
import qrcode
# from io import BytesIO
# from django.core.files import File
from PIL import Image
class Ticket(models.Model):
token = models.CharField(_("Токен"), max_length=30)
created_at = models.DateTimeField(_("Время создания"), default=timezone.now)
pdf = models.FileField(_("PDF"), upload_to='tickets', blank=True)
museum = models.ForeignKey('Museum', on_delete=models.CASCADE, verbose_name='Музей',
related_name='tickets', null=True)
class Meta:
verbose_name = 'Билет'
verbose_name_plural = 'Билеты'
def __str__(self):
return f'Билет №{self.id}'
class Artifact(models.Model):
name = models.CharField(_("Название"), max_length=100)
img_1 = models.ImageField(_("Фотография #1"), null=True, upload_to='artifacts/photos', blank=True)
img_2 = models.ImageField(_("Фотография #2"), null=True, upload_to='artifacts/photos', blank=True)
img_3 = models.ImageField(_("Фотография #3"), null=True, upload_to='artifacts/photos', blank=True)
img_4 = models.ImageField(_("Фотография #4"), null=True, upload_to='artifacts/photos', blank=True)
img_5 = models.ImageField(_("Фотография #5"), null=True, upload_to='artifacts/photos', blank=True)
audio_1 = models.FileField(_("Аудио #1"), upload_to='artifacts/audios', blank=True)
audio_2 = models.FileField(_("Аудио #2"), upload_to='artifacts/audios', blank=True)
audio_3 = models.FileField(_("Аудио #3"), upload_to='artifacts/audios', blank=True)
audio_4 = models.FileField(_("Аудио #4"), upload_to='artifacts/audios', blank=True)
audio_5 = models.FileField(_("Аудио #5"), upload_to='artifacts/audios', blank=True)
link_name_1 = models.CharField(_("Название ссылки #1"), max_length=50, blank=True)
link_name_2 = models.CharField(_("Название ссылки #2"), max_length=50, blank=True)
link_name_3 = models.CharField(_("Название ссылки #3"), max_length=50, blank=True)
link_name_4 = models.CharField(_("Название ссылки #4"), max_length=50, blank=True)
link_name_5 = models.CharField(_("Название ссылки #5"), max_length=50, blank=True)
link_value_1 = models.CharField(_("Ссылка #1"), max_length=1000, blank=True)
link_value_2 = models.CharField(_("Ссылка #2"), max_length=1000, blank=True)
link_value_3 = models.CharField(_("Ссылка #3"), max_length=1000, blank=True)
link_value_4 = models.CharField(_("Ссылка #4"), max_length=1000, blank=True)
link_value_5 = models.CharField(_("Ссылка #5"), max_length=1000, blank=True)
description = models.TextField(_("Описание"), max_length=10000, blank=True)
qr_code = models.ImageField(_('QR code'), upload_to='artifacts/qrs', blank=True)
prev = models.IntegerField(_("Экспонат выше"), null=True, blank=True)
hall = models.ForeignKey('Hall', on_delete=models.CASCADE, verbose_name='Зал',
related_name='artifacts', null=True)
class Meta:
verbose_name = 'Экспонат'
verbose_name_plural = 'Экспонаты'
def __str__(self):
return f'{self.name} ({self.id})'
@classmethod
def create(cls, name, description, hall, prev,
img_1,
img_2,
img_3,
img_4,
img_5,
audio_1,
audio_2,
audio_3,
audio_4,
audio_5,
link_name_1,
link_name_2,
link_name_3,
link_name_4,
link_name_5,
link_value_1,
link_value_2,
link_value_3,
link_value_4,
link_value_5,
):
artifact = cls(name=name, description=description, hall=hall, prev=prev,
img_1=img_1,
img_2=img_2,
img_3=img_3,
img_4=img_4,
img_5=img_5,
audio_1=audio_1,
audio_2=audio_2,
audio_3=audio_3,
audio_4=audio_4,
audio_5=audio_5,
link_name_1=link_name_1,
link_name_2=link_name_2,
link_name_3=link_name_3,
link_name_4=link_name_4,
link_name_5=link_name_5,
link_value_1=link_value_1,
link_value_2=link_value_2,
link_value_3=link_value_3,
link_value_4=link_value_4,
link_value_5=link_value_5,
)
return artifact
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
qr = qrcode.QRCode(version=1, box_size=15, border=2)
qr.add_data('https://devgang.ru/artifacts/' + str(self.id))
qr.make(fit=True)
img = qr.make_image(fill='black', back_color='white')
canvas = Image.new('RGB', (500, 500), 'white')
canvas.paste(img)
fname = f'qr_{self.id}.jpeg'
# buffer = BytesIO()
canvas.save('media/artifacts/qrs/' + fname, 'jpeg')
canvas.close()
self.qr_code = 'artifacts/qrs/' + fname
# self.qr_code.save(fname, File(buffer), save=False)
super().save(*args, **kwargs)
# def save(self, *args, **kwargs):
# super().save(*args, **kwargs)
# print(self.img_1)
# im1 = Image.open(self.img_1)
# fname = f'{self.id}-mini.jpeg'
# print(fname)
# im1.save('media/Products/' + fname, "JPEG", quality=10)
# self.img_mini = 'Products/' + fname
# print(self.img_mini)
# super().save(*args, **kwargs)
# class ArtifactLink(models.Model):
# name = models.CharField(_("Название ссылки"), max_length=50)
# link = models.CharField(_("Ссылка"), max_length=1000)
# artifact = models.ForeignKey('Artifact', on_delete=models.CASCADE, verbose_name='Экспонат',
# related_name='links', null=True, blank=True)
#
# class Meta:
# verbose_name = 'Ссылка'
# verbose_name_plural = 'Ссылки'
class Hall(models.Model):
name = models.CharField(_("Название"), max_length=100)
prev = models.IntegerField(_("Зал выше"), null=True, blank=True)
location = models.ForeignKey('Location', on_delete=models.CASCADE, verbose_name='Локация',
related_name='halls', null=True)
class Meta:
verbose_name = 'Зал'
verbose_name_plural = 'Залы'
def __str__(self):
return f'{self.name} ({self.id})'
class Location(models.Model):
name = models.CharField(_("Название"), max_length=100)
prev = models.IntegerField(_("Локация выше"), null=True, blank=True)
museum = models.ForeignKey('Museum', on_delete=models.CASCADE, verbose_name='Музей',
related_name='locations', null=True)
class Meta:
verbose_name = 'Локация'
verbose_name_plural = 'Локации'
def __str__(self):
return f'{self.name} ({self.id})'
class Museum(models.Model):
name = models.CharField(_("Название"), max_length=100)
img = models.ImageField(_("Фотография"), null=True, upload_to='museums', blank=True)
description = models.TextField(_("Описание"), max_length=10000, blank=True)
ticket_lifetime = models.IntegerField(_("Время действия билета"), default=3)
class Meta:
verbose_name = 'Музей'
verbose_name_plural = 'Музеи'
def __str__(self):
return self.name
class User(AbstractUser):
ROLES = (
('service_super_admin', 'Супер-админ сервиса'),
('museum_super_admin', 'Супер-админ музея'),
('museum_admin', 'Админ музея'),
('museum_cashier', 'Кассир музея'),
)
last_name = models.CharField(_("Фамилия"), max_length=50)
first_name = models.CharField(_("Имя"), max_length=50)
middle_name = models.CharField(_("Отчество"), max_length=50, blank=True)
museum = models.ForeignKey('Museum', on_delete=models.CASCADE, verbose_name='Музей',
related_name='admins', null=True, blank=True)
REQUIRED_FIELDS = ['email', 'last_name', 'first_name', 'middle_name']
class Meta:
verbose_name = 'Пользователь'
verbose_name_plural = 'Пользователи'
def __str__(self):
return f'{self.last_name} {self.first_name} {self.middle_name}'
| 38.453333 | 102 | 0.606334 |
f909fbd3181609db35efaa7231c2d220af282e14 | 393 | py | Python | profiles/asgi.py | Arun1508/profiel-reat-api-learn | cf5bb772cc2aa27cc52874f3199cf7cc6a6cbacc | [
"MIT"
] | null | null | null | profiles/asgi.py | Arun1508/profiel-reat-api-learn | cf5bb772cc2aa27cc52874f3199cf7cc6a6cbacc | [
"MIT"
] | null | null | null | profiles/asgi.py | Arun1508/profiel-reat-api-learn | cf5bb772cc2aa27cc52874f3199cf7cc6a6cbacc | [
"MIT"
] | null | null | null | """
ASGI config for profiles project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'profiles.settings')
application = get_asgi_application()
| 23.117647 | 78 | 0.78626 |
1486894440727a9f1d7479ecb3a8072860770fae | 839 | py | Python | labscript_c_extensions/runviewer/__init__.py | rpanderson/labscript-c-extensions | 55be41e177f8527c4157e00f9052ee55f7fa20e7 | [
"BSD-2-Clause"
] | null | null | null | labscript_c_extensions/runviewer/__init__.py | rpanderson/labscript-c-extensions | 55be41e177f8527c4157e00f9052ee55f7fa20e7 | [
"BSD-2-Clause"
] | 2 | 2020-06-01T08:59:24.000Z | 2021-11-19T11:55:20.000Z | labscript_c_extensions/runviewer/__init__.py | rpanderson/labscript-c-extensions | 55be41e177f8527c4157e00f9052ee55f7fa20e7 | [
"BSD-2-Clause"
] | 3 | 2020-06-01T08:23:04.000Z | 2021-06-17T16:56:47.000Z | #####################################################################
# #
# /runviewer/__init__.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program labscript-c-extensions, in the #
# labscript suite (see http://labscriptsuite.org), and is licensed #
# under the Simplified BSD License. See the license.txt file in the #
# root of the project for the full license. #
# #
##################################################################### | 69.916667 | 69 | 0.281287 |
fd7d0aa3e4cc81a6ae1ee8e13ec28da6d77dc1ea | 174 | py | Python | integration/setup.py | ashishknitcs/jiva | 8ac5ebe5ba6194b5d823d3626a49805c325ee77e | [
"Apache-2.0"
] | 21 | 2017-01-06T13:57:48.000Z | 2019-07-08T18:23:45.000Z | integration/setup.py | ashishknitcs/jiva | 8ac5ebe5ba6194b5d823d3626a49805c325ee77e | [
"Apache-2.0"
] | 30 | 2016-11-15T15:51:42.000Z | 2020-06-09T06:04:22.000Z | integration/setup.py | ashishknitcs/jiva | 8ac5ebe5ba6194b5d823d3626a49805c325ee77e | [
"Apache-2.0"
] | 22 | 2016-11-14T09:29:04.000Z | 2018-10-29T17:55:54.000Z | from distutils.core import setup
setup(
name='Longhorn Integration Tests',
version='0.1',
packages=[
'core',
'data',
],
license='ASL 2.0',
)
| 14.5 | 38 | 0.563218 |
e133b337357cd432456bf3f49a3debd966b67179 | 1,407 | py | Python | MangAdventure/middleware.py | a-mere-peasant/MangAdventure | afbcdb5ab68bfc801550c8383568f7265e70b5ab | [
"MIT"
] | null | null | null | MangAdventure/middleware.py | a-mere-peasant/MangAdventure | afbcdb5ab68bfc801550c8383568f7265e70b5ab | [
"MIT"
] | null | null | null | MangAdventure/middleware.py | a-mere-peasant/MangAdventure | afbcdb5ab68bfc801550c8383568f7265e70b5ab | [
"MIT"
] | null | null | null | from re import MULTILINE, findall, search
class XPBMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
response.setdefault('X-Powered-By', 'MangAdventure')
return response
class PreloadMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if 'text/html' not in response['Content-Type']:
return response
preload = []
pattern = r'(<(link|script|img)[^>]+?' + \
r'rel="[^>]*?preload[^>]*?"[^>]*?/?>)'
content = str(response.content)
for link in findall(pattern, content, MULTILINE):
link_src = self._get_link_src(link)
link_as = search(r'as="(.+?)"', link[0])
if not (link_src is None or link_as is None):
preload.append('<{}>; as={}; rel=preload'.format(
link_src.group(1), link_as.group(1)
))
if preload:
response['Link'] = ', '.join(preload)
return response
def _get_link_src(self, link):
return search(
r'href="(.+?)"' if link[1] == 'link'
else r'src="(.+?)"', link[0]
)
__all__ = ['XPBMiddleware', 'PreloadMiddleware']
| 29.3125 | 65 | 0.558635 |
911e2382deba2517a5121cf7bd5a2772e252ba3d | 5,637 | py | Python | merkle.py | mellowcroc/merkle-tree | 386d265a8fb7ef10f69722826297968493a76810 | [
"MIT"
] | null | null | null | merkle.py | mellowcroc/merkle-tree | 386d265a8fb7ef10f69722826297968493a76810 | [
"MIT"
] | null | null | null | merkle.py | mellowcroc/merkle-tree | 386d265a8fb7ef10f69722826297968493a76810 | [
"MIT"
] | null | null | null | import sys
import hashlib
import math
import numpy as np
if sys.version_info < (3, 6):
import sha3
class Node:
def __init__(self, value, left_child=None, right_child=None):
self.value = value
self.left_child = left_child
self.right_child = right_child
self.parent = None
def __lt__(self, other):
return self.value < other.value
def setParent(self, parent):
self.parent = parent
class MerkleTree:
def __init__(self):
self.leafNodes = None
self.root = None
def create_from_data_list(self, dataList):
self.leafNodes = self.create_leaf_nodes_from_data_list(dataList)
self.traverse(self.leafNodes, None)
def create_from_data(self, data, parseSize):
self.leafNodes = self.create_leaf_nodes_from_data(data, parseSize)
self.traverse(self.leafNodes, None)
def create_leaf_nodes_from_data(self, data, parse_size):
leaf_nodes = []
data_size = len(data)
index = 0
while index < data_size:
sha3 = hashlib.sha3_224()
parseData = data[index : min(index + parse_size, data_size)]
sha3.update(parseData)
leaf_nodes.append(Node(sha3.hexdigest()))
index += parse_size
leaf_nodes.sort(reverse=True)
return leaf_nodes
def create_leaf_nodes_from_data_list(self, data_list):
leaf_nodes = []
for data in data_list:
sha3 = hashlib.sha3_224()
sha3.update(data.encode())
leaf_nodes.append(Node(sha3.hexdigest()))
leaf_nodes.sort(reverse=True)
return leaf_nodes
def print_pre_order(self, node):
print("\nvalue: ", node.value)
print("parent: ", node.parent.value if node.parent != None else node.parent)
if node.left_child != None: self.print_pre_order(node.left_child)
if node.right_child != None: self.print_pre_order(node.right_child)
def print_in_order(self, node):
if node.left_child != None: self.print_in_order(node.left_child)
print("\nvalue: ", node.value)
print("parent: ", node.parent.value if node.parent != None else node.parent)
if node.right_child != None: self.print_in_order(node.right_child)
def print_post_order(self, node):
if node.left_child != None: self.print_post_order(node.left_child)
if node.right_child != None: self.print_post_order(node.right_child)
print("\nvalue: ", node.value)
print("parent: ", node.parent.value if node.parent != None else node.parent)
def traverse(self, nodes, oddNode):
print("traverse")
print("len(nodes): ", len(nodes))
if oddNode != None:
print("oddNode: ", oddNode.value)
else:
print("oddNode: ", oddNode)
if len(nodes) == 1 and oddNode == None :
self.root = nodes[0]
return self.root
if len(nodes) % 2 == 1:
if oddNode != None:
nodes.append(oddNode)
oddNode = None
else:
oddNode = nodes.pop()
parents = []
for i in range(int(len(nodes) / 2)):
left_child = nodes[i * 2]
right_child = nodes[i * 2 + 1]
print("left_child.value: ", left_child.value)
print("right_child.value: ", right_child.value)
if left_child.value < right_child.value: left_child, right_child = right_child, left_child
print("left_child.value: ", left_child.value)
print("right_child.value: ", right_child.value)
sha3 = hashlib.sha3_224()
sha3.update((left_child.value + right_child.value).encode())
print("parent.value: ", sha3.hexdigest())
parent = Node(sha3.hexdigest(), left_child, right_child)
parents.append(parent)
left_child.parent = parent
right_child.parent = parent
return self.traverse(parents, oddNode)
def getProofs(self, leafData):
sha3 = hashlib.sha3_224()
sha3.update(leafData.encode())
leafNode = self.search(self.root, sha3.hexdigest())
if leafNode == None:
print("could not find given leaf")
return
siblings = []
self.getSibling(leafNode, siblings)
print("siblings len: ", len(siblings))
for i in range(len(siblings)):
print("i: ", siblings[i])
return siblings
def getSibling(self, node, siblings):
parent = node.parent
if parent == None:
return
if node == parent.left_child:
siblings.append(parent.right_child.value)
else:
siblings.append(parent.left_child.value)
self.getSibling(parent, siblings)
def search(self, node, value):
if node == None:
return None
if node.value == value:
return node
leftResult = self.search(node.left_child, value)
if leftResult != None: return leftResult
rightResult = self.search(node.right_child, value)
if rightResult != None: return rightResult
def verify(self, root, data, siblings):
sha3 = hashlib.sha3_224()
sha3.update(data.encode())
hash = sha3.hexdigest()
for siblingHash in siblings:
if hash < siblingHash: hash, siblingHash = siblingHash, hash
sha3 = hashlib.sha3_224()
sha3.update((hash + siblingHash).encode())
hash = sha3.hexdigest()
if root == hash: return True
return False
| 35.012422 | 102 | 0.599078 |
aab3c9d65b213353870d44b1612631945c28150f | 1,033 | py | Python | utils/cluster_validity_index.py | ClayLiu/Soft-ClusteringAlgorithms | 38dc8c2ac610f996c79760de00631840c784029c | [
"MIT"
] | null | null | null | utils/cluster_validity_index.py | ClayLiu/Soft-ClusteringAlgorithms | 38dc8c2ac610f996c79760de00631840c784029c | [
"MIT"
] | null | null | null | utils/cluster_validity_index.py | ClayLiu/Soft-ClusteringAlgorithms | 38dc8c2ac610f996c79760de00631840c784029c | [
"MIT"
] | 1 | 2020-08-10T15:48:46.000Z | 2020-08-10T15:48:46.000Z | import numpy as np
import pandas as pd
from sklearn import metrics
def get_cluster(U : np.ndarray) -> np.ndarray:
return np.argmax(U, axis = 0)
def NMI(y : np.ndarray, label : np.ndarray):
return metrics.normalized_mutual_info_score(label, y)
def AMI(y : np.ndarray, label : np.ndarray):
return metrics.adjusted_mutual_info_score(label, y)
def ARI(y : np.ndarray, label : np.ndarray):
return metrics.adjusted_rand_score(label, y)
def ACC(y : np.ndarray, label : np.ndarray):
return metrics.accuracy_score(label, y)
f_list = [NMI, AMI, ARI, ACC]
indexs_name_list = ['NMI', 'AMI', 'ARI', 'ACC']
def get_all_indices(y : np.ndarray, label : np.ndarray) -> np.ndarray:
indices = np.zeros(4)
for i in range(4):
indices[i] = f_list[i](y, label)
return pd.Series(data = indices, index = indexs_name_list)
if __name__ == '__main__':
a = [1] * 20
b = [0] * 10 + [1] * 10
print(a)
print(b)
print(NMI(a, b))
print(AMI(a, b)) | 27.184211 | 71 | 0.622459 |
c8388a6e9771b9578ffc64e005772954331f8a9d | 675 | py | Python | misc/rit.py | dw/acid | 3aabb3940f23c052ed7a009cff5d84cc50b099fb | [
"Apache-2.0"
] | 15 | 2015-09-24T03:57:49.000Z | 2020-08-25T22:44:20.000Z | misc/rit.py | dw/acid | 3aabb3940f23c052ed7a009cff5d84cc50b099fb | [
"Apache-2.0"
] | 2 | 2015-06-21T02:06:20.000Z | 2019-11-14T14:02:39.000Z | misc/rit.py | dw/acid | 3aabb3940f23c052ed7a009cff5d84cc50b099fb | [
"Apache-2.0"
] | 1 | 2019-09-11T03:13:52.000Z | 2019-09-11T03:13:52.000Z |
# Tickle old reverse iteration bug
import acid.keylib
import acid.engines
import acid._iterators
le = acid.engines.ListEngine()
le.put(acid.keylib.packs(('a', 'b'), 'z'), 'b')
le.put(acid.keylib.packs(('b', 'c'), 'z'), 'b')
it = acid._iterators.BasicIterator(le, 'z')
print
print 'it.keys:', it.keys
print 'it.data:', `it.data`
print
it.reverse()
print 'here'
res = next(it)
print 'there'
print 'res:', res
print 'res.keys:', res.keys
print 'res.data:', `res.data`
print
res = next(it)
print 'res:', res
print 'res.keys:', res.keys
print 'res.data:', `res.data`
print
res = next(it)
print 'res:', res
print 'res.keys:', res.keys
print 'res.data:', `res.data`
print
| 16.463415 | 47 | 0.659259 |
5862b95a4944f7d17c2030d945355191a2122715 | 1,156 | py | Python | knodle_experiments/old/majority_bert.py | knodle/knodle-experiments | 0a635f3a181432901509b59511ef9d8f8ffb3187 | [
"Apache-2.0"
] | null | null | null | knodle_experiments/old/majority_bert.py | knodle/knodle-experiments | 0a635f3a181432901509b59511ef9d8f8ffb3187 | [
"Apache-2.0"
] | 1 | 2021-05-18T08:56:39.000Z | 2021-05-18T08:56:39.000Z | knodle_experiments/old/majority_bert.py | knodle/knodle-experiments | 0a635f3a181432901509b59511ef9d8f8ffb3187 | [
"Apache-2.0"
] | null | null | null | from typing import Dict
from transformers import AutoModelForSequenceClassification, AdamW
from knodle.trainer.baseline.majority_config import MajorityConfig
from knodle.trainer.baseline.bert import MajorityBertTrainer
def get_majority_bert_trainer(
train_x, train_rule_matches_z, mapping_rules_labels_t, config: Dict
) -> MajorityBertTrainer:
"""Train a logistic regression model; with SimpleDsModelTrainer."""
model = AutoModelForSequenceClassification.from_pretrained('distilbert-base-uncased')
model.train()
custom_model_config = MajorityConfig(
model=model,
optimizer_=AdamW(model.parameters(), lr=config.get('hyp_params').get('learning_rate')),
batch_size=config.get('hyp_params').get('batch_size'),
epochs=config.get('hyp_params').get('num_epochs'),
filter_non_labelled=config.get("hyp_params").get("filter_empty_labels")
)
trainer = MajorityBertTrainer(
model,
mapping_rules_labels_t=mapping_rules_labels_t,
model_input_x=train_x,
rule_matches_z=train_rule_matches_z,
trainer_config=custom_model_config,
)
return trainer
| 35.030303 | 95 | 0.747405 |
1fd8a9dec63a664d74f0c12f128c70eba5754ea9 | 8,036 | py | Python | python/kfserving/test/test_storage.py | titoeb/kfserving | b072a76842b57e904dbdf46a136474a22051500d | [
"Apache-2.0"
] | null | null | null | python/kfserving/test/test_storage.py | titoeb/kfserving | b072a76842b57e904dbdf46a136474a22051500d | [
"Apache-2.0"
] | null | null | null | python/kfserving/test/test_storage.py | titoeb/kfserving | b072a76842b57e904dbdf46a136474a22051500d | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import tempfile
import binascii
import unittest.mock as mock
import botocore
import kfserving
import pytest
STORAGE_MODULE = 'kfserving.storage'
# *.tar.gz contains a single empty file model.pth
FILE_TAR_GZ_RAW = binascii.unhexlify('1f8b0800bac550600003cbcd4f49cdd12b28c960a01d3030303033315100d1e666a660dac008c28'
'701054313a090a189919981998281a1b1b1a1118382010ddd0407a5c525894540a754656466e464e'
'2560754969686c71ca83fe0f4281805a360140c7200009f7e1bb400060000')
# *.zip contains a single empty file model.pth
FILE_ZIP_RAW = binascii.unhexlify('504b030414000800080035b67052000000000000000000000000090020006d6f64656c2e70746855540'
'd000786c5506086c5506086c5506075780b000104f501000004140000000300504b07080000000002000'
'00000000000504b0102140314000800080035b6705200000000020000000000000009002000000000000'
'0000000a481000000006d6f64656c2e70746855540d000786c5506086c5506086c5506075780b000104f'
'50100000414000000504b0506000000000100010057000000590000000000')
def test_storage_local_path():
abs_path = 'file:///'
relative_path = 'file://.'
assert kfserving.Storage.download(abs_path) == abs_path.replace("file://", "", 1)
assert kfserving.Storage.download(relative_path) == relative_path.replace("file://", "", 1)
def test_storage_local_path_exception():
not_exist_path = 'file:///some/random/path'
with pytest.raises(Exception):
kfserving.Storage.download(not_exist_path)
def test_no_prefix_local_path():
abs_path = '/'
relative_path = '.'
assert kfserving.Storage.download(abs_path) == abs_path
assert kfserving.Storage.download(relative_path) == relative_path
class MockHttpResponse(object):
def __init__(
self,
status_code=404,
raw=b'',
content_type=''
):
self.status_code = status_code
self.raw = io.BytesIO(raw)
self.headers = {'Content-Type': content_type}
def __enter__(self):
return self
def __exit__(self, ex_type, ex_val, traceback):
pass
@mock.patch('requests.get', return_value=MockHttpResponse(status_code=200, content_type='application/octet-stream'))
def test_http_uri_path(_):
http_uri = 'http://foo.bar/model.joblib'
http_with_query_uri = 'http://foo.bar/model.joblib?foo=bar'
out_dir = '.'
assert kfserving.Storage.download(http_uri, out_dir=out_dir) == out_dir
assert kfserving.Storage.download(http_with_query_uri, out_dir=out_dir) == out_dir
os.remove('./model.joblib')
@mock.patch('requests.get', return_value=MockHttpResponse(status_code=200, content_type='application/octet-stream'))
def test_https_uri_path(_):
https_uri = 'https://foo.bar/model.joblib'
https_with_query_uri = 'https://foo.bar/model.joblib?foo=bar'
out_dir = '.'
assert kfserving.Storage.download(https_uri, out_dir=out_dir) == out_dir
assert kfserving.Storage.download(https_with_query_uri, out_dir=out_dir) == out_dir
os.remove('./model.joblib')
@mock.patch('requests.get', return_value=MockHttpResponse(status_code=200, content_type='application/x-tar',
raw=FILE_TAR_GZ_RAW))
def test_http_uri_path_with_tar_gz(_):
with tempfile.TemporaryDirectory() as out_dir:
https_uri = 'https://foo.bar/model.tar.gz'
assert kfserving.Storage.download(https_uri, out_dir=out_dir) == out_dir
assert os.path.exists(os.path.join(out_dir, 'model.pth'))
@mock.patch('requests.get', return_value=MockHttpResponse(status_code=200, content_type='application/x-tar',
raw=FILE_TAR_GZ_RAW))
def test_http_uri_path_with_tar_gz_query_params(_):
with tempfile.TemporaryDirectory() as out_dir:
https_with_query_uri = 'https://foo.bar/model.tar.gz?foo=bar'
assert kfserving.Storage.download(https_with_query_uri, out_dir=out_dir) == out_dir
assert os.path.exists(os.path.join(out_dir, 'model.pth'))
@mock.patch('requests.get', return_value=MockHttpResponse(status_code=200, content_type='application/zip',
raw=FILE_ZIP_RAW))
def test_http_uri_path_with_zip(_):
with tempfile.TemporaryDirectory() as out_dir:
https_uri = 'https://foo.bar/model.zip'
assert kfserving.Storage.download(https_uri, out_dir=out_dir) == out_dir
assert os.path.exists(os.path.join(out_dir, 'model.pth'))
@mock.patch('requests.get', return_value=MockHttpResponse(status_code=200, content_type='application/zip',
raw=FILE_ZIP_RAW))
def test_http_uri_path_with_zip_query_params(_):
with tempfile.TemporaryDirectory() as out_dir:
https_with_query_uri = 'https://foo.bar/model.zip?foo=bar'
assert kfserving.Storage.download(https_with_query_uri, out_dir=out_dir) == out_dir
assert os.path.exists(os.path.join(out_dir, 'model.pth'))
@mock.patch('requests.get', return_value=MockHttpResponse(status_code=404))
def test_nonexistent_uri(_):
non_existent_uri = 'https://theabyss.net/model.joblib'
with pytest.raises(RuntimeError):
kfserving.Storage.download(non_existent_uri)
@mock.patch('requests.get', return_value=MockHttpResponse(status_code=200))
def test_uri_no_filename(_):
bad_uri = 'https://foo.bar/test/'
with pytest.raises(ValueError):
kfserving.Storage.download(bad_uri)
@mock.patch('requests.get', return_value=MockHttpResponse(status_code=200, content_type='text/html'))
def test_html_content_type(_):
bad_uri = 'https://some.site.com/test.model'
with pytest.raises(RuntimeError):
kfserving.Storage.download(bad_uri)
@mock.patch(STORAGE_MODULE + '.storage')
def test_mock_gcs(mock_storage):
gcs_path = 'gs://foo/bar'
mock_obj = mock.MagicMock()
mock_obj.name = 'mock.object'
mock_storage.Client().bucket().list_blobs().__iter__.return_value = [mock_obj]
assert kfserving.Storage.download(gcs_path)
def test_storage_blob_exception():
blob_path = 'https://accountname.blob.core.windows.net/container/some/blob/'
with pytest.raises(Exception):
kfserving.Storage.download(blob_path)
@mock.patch(STORAGE_MODULE + '.boto3')
def test_storage_s3_exception(mock_boto3):
path = 's3://foo/bar'
# Create mock client
mock_s3_resource = mock.MagicMock()
mock_s3_resource.Bucket.side_effect = Exception()
mock_boto3.resource.return_value = mock_s3_resource
with pytest.raises(Exception):
kfserving.Storage.download(path)
@mock.patch(STORAGE_MODULE + '.boto3')
@mock.patch('urllib3.PoolManager')
def test_no_permission_buckets(mock_connection, mock_boto3):
bad_s3_path = "s3://random/path"
# Access private buckets without credentials
mock_s3_resource = mock.MagicMock()
mock_s3_bucket = mock.MagicMock()
mock_s3_bucket.objects.filter.return_value = [mock.MagicMock()]
mock_s3_bucket.objects.filter.side_effect = botocore.exceptions.ClientError(
{}, "GetObject"
)
mock_s3_resource.Bucket.return_value = mock_s3_bucket
mock_boto3.resource.return_value = mock_s3_resource
with pytest.raises(botocore.exceptions.ClientError):
kfserving.Storage.download(bad_s3_path)
| 40.791878 | 120 | 0.715032 |
30e7065ceb31a72c640c01ec467538659a249a7c | 752 | py | Python | setup.py | acoomans/prvsn | af6b313c2e779ae4e3a9cdba0b1c3a1f4b4c085e | [
"BSD-2-Clause"
] | null | null | null | setup.py | acoomans/prvsn | af6b313c2e779ae4e3a9cdba0b1c3a1f4b4c085e | [
"BSD-2-Clause"
] | null | null | null | setup.py | acoomans/prvsn | af6b313c2e779ae4e3a9cdba0b1c3a1f4b4c085e | [
"BSD-2-Clause"
] | null | null | null | from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md')) as f:
long_description = f.read()
with open(path.join(here, 'requirements.txt')) as f:
requirements = f.read()
setup(
name='prvsn',
version=0.3,
author='Arnaud Coomans',
author_email='hello@acoomans.com',
description='A simple provisioning tool',
long_description=long_description,
url='https://github.com/acoomans/prvsn',
license='BSD',
platforms='any',
keywords=[
'provision',
],
install_requires=requirements,
scripts=['scripts/prvsn'],
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
test_suite='tests',
)
| 25.066667 | 65 | 0.672872 |
c0048de3b7fdd69ad0be2fe15ce77a7051be9bb1 | 7,723 | py | Python | 06rnn_attention/03_decode_attention.py | sotuken2021/s | 2b37fdcaa60c1f5ac492edddd4a0960882fbc0aa | [
"MIT"
] | 41 | 2021-05-19T15:04:19.000Z | 2022-02-11T15:24:58.000Z | 06rnn_attention/03_decode_attention.py | sotuken2021/s | 2b37fdcaa60c1f5ac492edddd4a0960882fbc0aa | [
"MIT"
] | 1 | 2021-07-19T08:59:36.000Z | 2021-07-25T07:54:29.000Z | 06rnn_attention/03_decode_attention.py | sotuken2021/s | 2b37fdcaa60c1f5ac492edddd4a0960882fbc0aa | [
"MIT"
] | 14 | 2021-05-21T05:47:22.000Z | 2022-03-31T09:11:32.000Z | # -*- coding: utf-8 -*-
#
# RNN Attention Encoder-Decoderによるデコーディングを行います
#
# Pytorchを用いた処理に必要なモジュールをインポート
import torch
from torch.utils.data import DataLoader
# 作成したDatasetクラスをインポート
from my_dataset import SequenceDataset
# 数値演算用モジュール(numpy)をインポート
import numpy as np
# モデルの定義をインポート
from my_model import MyE2EModel
# json形式の入出力を行うモジュールをインポート
import json
# os, sysモジュールをインポート
import os
import sys
#
# メイン関数
#
if __name__ == "__main__":
#
# 設定ここから
#
# トークンの単位
# phone:音素 kana:かな char:キャラクター
unit = 'phone'
# 実験ディレクトリ
exp_dir = './exp_train_small'
# 評価データの特徴量(feats.scp)が存在するディレクトリ
feat_dir_test = '../01compute_features/fbank/test'
# 評価データの特徴量リストファイル
feat_scp_test = os.path.join(feat_dir_test, 'feats.scp')
# 評価データのラベルファイル
label_test = os.path.join(exp_dir, 'data', unit, 'label_test')
# トークンリスト
token_list_path = os.path.join(exp_dir, 'data', unit,
'token_list')
# 学習済みモデルが格納されているディレクトリ
model_dir = os.path.join(exp_dir, unit+'_model_attention')
# 訓練データから計算された特徴量の平均/標準偏差ファイル
mean_std_file = os.path.join(model_dir, 'mean_std.txt')
# 学習済みのモデルファイル
model_file = os.path.join(model_dir, 'best_model.pt')
# デコード結果を出力するディレクトリ
output_dir = os.path.join(model_dir, 'decode_test')
# デコード結果および正解文の出力ファイル
hypothesis_file = os.path.join(output_dir, 'hypothesis.txt')
reference_file = os.path.join(output_dir, 'reference.txt')
# 学習時に出力した設定ファイル
config_file = os.path.join(model_dir, 'config.json')
# ミニバッチに含める発話数
batch_size = 10
#
# 設定ここまで
#
# 設定ファイルを読み込む
with open(config_file, mode='r') as f:
config = json.load(f)
# 読み込んだ設定を反映する
# Encoderの設定
# 中間層のレイヤー数
enc_num_layers = config['enc_num_layers']
# 層ごとのsub sampling設定
enc_sub_sample = config['enc_sub_sample']
# RNNのタイプ(LSTM or GRU)
enc_rnn_type = config['enc_rnn_type']
# 中間層の次元数
enc_hidden_dim = config['enc_hidden_dim']
# Projection層の次元数
enc_projection_dim = config['enc_projection_dim']
# bidirectional を用いるか(Trueなら用いる)
enc_bidirectional = config['enc_bidirectional']
# Attention, Decoderの設定
# RNN層のレイヤー数
dec_num_layers = config['dec_num_layers']
# RNN層の次元数
dec_hidden_dim = config['dec_hidden_dim']
# Attentionの次元
att_hidden_dim = config['att_hidden_dim']
# LocationAwareAttentionにおけるフィルタサイズ
att_filter_size = config['att_filter_size']
# LocationAwareAttentionにおけるフィルタ数
att_filter_num = config['att_filter_num']
# LocationAwareAttentionにおけるtemperature
att_temperature = config['att_temperature']
# 出力ディレクトリが存在しない場合は作成する
os.makedirs(output_dir, exist_ok=True)
# 特徴量の平均/標準偏差ファイルを読み込む
with open(mean_std_file, mode='r') as f:
# 全行読み込み
lines = f.readlines()
# 1行目(0始まり)が平均値ベクトル(mean),
# 3行目が標準偏差ベクトル(std)
mean_line = lines[1]
std_line = lines[3]
# スペース区切りのリストに変換
feat_mean = mean_line.split()
feat_std = std_line.split()
# numpy arrayに変換
feat_mean = np.array(feat_mean,
dtype=np.float32)
feat_std = np.array(feat_std,
dtype=np.float32)
# 次元数の情報を得る
feat_dim = np.size(feat_mean)
# トークンリストをdictionary型で読み込む
# このとき,0番目は blank と定義する
# (ただし,このプログラムではblankは使われない)
token_list = {0: '<blank>'}
with open(token_list_path, mode='r') as f:
# 1行ずつ読み込む
for line in f:
# 読み込んだ行をスペースで区切り,
# リスト型の変数にする
parts = line.split()
# 0番目の要素がトークン,1番目の要素がID
token_list[int(parts[1])] = parts[0]
# <eos>トークンをユニットリストの末尾に追加
eos_id = len(token_list)
token_list[eos_id] = '<eos>'
# 本プログラムでは、<sos>と<eos>を
# 同じトークンとして扱う
sos_id = eos_id
# トークン数(blankを含む)
num_tokens = len(token_list)
# ニューラルネットワークモデルを作成する
# 入力の次元数は特徴量の次元数,
# 出力の次元数はトークン数となる
model = MyE2EModel(dim_in=feat_dim,
dim_enc_hid=enc_hidden_dim,
dim_enc_proj=enc_projection_dim,
dim_dec_hid=dec_hidden_dim,
dim_out=num_tokens,
dim_att=att_hidden_dim,
att_filter_size=att_filter_size,
att_filter_num=att_filter_num,
sos_id=sos_id,
att_temperature=att_temperature,
enc_num_layers=enc_num_layers,
dec_num_layers=dec_num_layers,
enc_bidirectional=enc_bidirectional,
enc_sub_sample=enc_sub_sample,
enc_rnn_type=enc_rnn_type)
# モデルのパラメータを読み込む
model.load_state_dict(torch.load(model_file))
# 訓練/開発データのデータセットを作成する
test_dataset = SequenceDataset(feat_scp_test,
label_test,
feat_mean,
feat_std)
# 評価データのDataLoaderを呼び出す
test_loader = DataLoader(test_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=4)
# CUDAが使える場合はモデルパラメータをGPUに,
# そうでなければCPUに配置する
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
model = model.to(device)
# モデルを評価モードに設定する
model.eval()
# デコード結果および正解ラベルをファイルに書き込みながら
# 以下の処理を行う
with open(hypothesis_file, mode='w') as hyp_file, \
open(reference_file, mode='w') as ref_file:
# 評価データのDataLoaderから1ミニバッチ
# ずつ取り出して処理する.
# これを全ミニバッチ処理が終わるまで繰り返す.
# ミニバッチに含まれるデータは,
# 音声特徴量,ラベル,フレーム数,
# ラベル長,発話ID
for (features, labels, feat_lens,
label_lens, utt_ids) in test_loader:
# PackedSequence の仕様上,
# ミニバッチがフレーム長の降順で
# ソートされている必要があるため,
# ソートを実行する
sorted_lens, indices = \
torch.sort(feat_lens.view(-1),
dim=0,
descending=True)
features = features[indices]
feat_lens = sorted_lens
# CUDAが使える場合はデータをGPUに,
# そうでなければCPUに配置する
features = features.to(device)
# モデルの出力を計算(フォワード処理)
outputs, out_lens = model(features, feat_lens)
# バッチ内の1発話ごとに以下の処理を行う
for n in range(outputs.size(0)):
# 出力はフレーム長でソートされている
# 元のデータ並びに戻すため,
# 対応する要素番号を取得する
idx = torch.nonzero(indices==n,
as_tuple=False).view(-1)[0]
# 各ステップのデコーダ出力を得る
_, hyp_per_step = torch.max(outputs[idx], 1)
# numpy.array型に変換
hyp_per_step = hyp_per_step.cpu().numpy()
# 認識結果の文字列を取得
hypothesis = []
for m in hyp_per_step[:out_lens[idx]]:
if m == eos_id:
break
hypothesis.append(token_list[m])
# 正解の文字列を取得
reference = []
for m in labels[n][:label_lens[n]].cpu().numpy():
reference.append(token_list[m])
# 結果を書き込む
# (' '.join() は,リスト形式のデータを
# スペース区切りで文字列に変換している)
hyp_file.write('%s %s\n' \
% (utt_ids[n], ' '.join(hypothesis)))
ref_file.write('%s %s\n' \
% (utt_ids[n], ' '.join(reference)))
| 28.603704 | 66 | 0.569856 |
5011a801b3cd48e0015b064b7ee87928e42ee720 | 3,636 | py | Python | pkgs/ops-pkg/src/genie/libs/ops/static_routing/nxos/static_routing.py | kecorbin/genielibs | 5d3951b8911013691822e73e9c3d0f557ca10f43 | [
"Apache-2.0"
] | null | null | null | pkgs/ops-pkg/src/genie/libs/ops/static_routing/nxos/static_routing.py | kecorbin/genielibs | 5d3951b8911013691822e73e9c3d0f557ca10f43 | [
"Apache-2.0"
] | null | null | null | pkgs/ops-pkg/src/genie/libs/ops/static_routing/nxos/static_routing.py | kecorbin/genielibs | 5d3951b8911013691822e73e9c3d0f557ca10f43 | [
"Apache-2.0"
] | null | null | null | # Genie package
from genie.ops.base import Base
# genie.libs
from genie.libs.parser.nxos.show_static_routing import ShowIpStaticRoute,\
ShowIpv6StaticRoute
class StaticRoute(Base):
'''StaticRouting Ops Object'''
def learn(self):
'''Learn StaticRouting object'''
# new StaticRouting structure
# Place holder to make it more readable
##############################################
#### Ipv4 ##########
# vrf
# ipv4
# route
# next_hop
# outgoing_interface
# next_hop_vrf N/A
# tag N/A
# track N/A
# preference N/A
# next_hop_list
# next_hop_vrf N/A
# tag N/A
# track N/A
# preference N/A
src_static_routing_route = '[vrf][(?P<vrf>.*)][address_family][(?P<af>.*)]' \
'[routes][(?P<route>.*)]'
dest_static_routing_route = 'info' + src_static_routing_route
self.add_leaf(cmd=ShowIpStaticRoute,
src=src_static_routing_route + '[route]',
dest=dest_static_routing_route + '[route]'
)
src_static_routing_intf = src_static_routing_route +'[next_hop][outgoing_interface][(?P<intf>.*)]'
dest_static_routing_intf = 'info' + src_static_routing_intf
req_key =['outgoing_interface','active']
for key in req_key:
self.add_leaf(cmd=ShowIpStaticRoute,
src=src_static_routing_intf + '[{}]'.format(key),
dest=dest_static_routing_intf + '[{}]'.format(key))
src_static_routing_hop = src_static_routing_route +'[next_hop][next_hop_list][(?P<index>.*)]'
dest_static_routing_hop = 'info' + src_static_routing_hop
req_key = ['index', 'active', 'next_hop', 'outgoing_interface']
for key in req_key:
self.add_leaf(cmd=ShowIpStaticRoute,
src=src_static_routing_hop + '[{}]'.format(key),
dest=dest_static_routing_hop + '[{}]'.format(key))
##############################################
#### Ipv6 ##########
# vrf
# ipv6
# route
# next_hop
# outgoing_interface
# tag N/A
# active N/A
# track N/A
# next_hop_list
# tag N/A
# track N/A
# active N/A
self.add_leaf(cmd=ShowIpv6StaticRoute,
src=src_static_routing_route + '[route]',
dest=dest_static_routing_route + '[route]'
)
req_key = ['outgoing_interface', 'next_hop_vrf' ,'preference']
for key in req_key:
self.add_leaf(cmd=ShowIpv6StaticRoute,
src=src_static_routing_intf + '[{}]'.format(key),
dest=dest_static_routing_intf + '[{}]'.format(key))
req_key = ['index', 'next_hop', 'outgoing_interface', 'next_hop_vrf', 'preference']
for key in req_key:
self.add_leaf(cmd=ShowIpv6StaticRoute,
src=src_static_routing_hop + '[{}]'.format(key),
dest=dest_static_routing_hop + '[{}]'.format(key))
self.make(final_call=True) | 37.484536 | 106 | 0.476623 |
a6da670965ba2390ccafaf9a6b8e637981c477b7 | 5,199 | py | Python | Cogs/Translate.py | MrAngelDo6pa/MedBotS | 89e19d831507e20d0898114502967b2ad8ecf957 | [
"MIT"
] | 1 | 2021-04-10T19:03:02.000Z | 2021-04-10T19:03:02.000Z | Cogs/Translate.py | flagersgit/CorpBot.py | 33444a8b1a699a0185e6ac7ab235929e6fdb2413 | [
"MIT"
] | null | null | null | Cogs/Translate.py | flagersgit/CorpBot.py | 33444a8b1a699a0185e6ac7ab235929e6fdb2413 | [
"MIT"
] | null | null | null | import functools, asyncio, discord, json, os, string, googletrans
from Cogs import Nullify, DisplayName, Message
from discord.ext import commands
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Translate(bot, settings))
# Requires the mtranslate module be installed
class Translate(commands.Cog):
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.translator = googletrans.Translator(service_urls=["translate.googleapis.com"])
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
@commands.command(pass_context=True)
async def langlist(self, ctx):
"""Lists available languages."""
description = ""
for lang in googletrans.LANGCODES:
description += "**{}** - {}\n".format(string.capwords(lang), googletrans.LANGCODES[lang])
await Message.EmbedText(
title="Language List",
force_pm=True,
description=description,
color=ctx.author
).send(ctx)
@commands.command(pass_context=True)
async def detectlang(self, ctx, *, text):
"""Reports the detected language and certainty of the passed text."""
if text == None: return await ctx.send("Usage: `{}detectlang [text to identify]`".format(ctx.prefix))
lang_detect = await self.bot.loop.run_in_executor(None, self.translator.detect, text)
await Message.EmbedText(
title="Detected Language",
description="Detected **{}** ({}) with {:.0%} confidence.".format(
string.capwords(googletrans.LANGUAGES.get(lang_detect.lang.lower(),"Martian?")),
lang_detect.lang.lower(),
lang_detect.confidence
),
color=ctx.author
).send(ctx)
@commands.command(pass_context=True)
async def tr(self, ctx, *, translate = None):
"""Translate some stuff! Takes a phrase, the from language identifier (optional), and the to language identifier.
To see a number of potential language identifiers, use the langlist command.
Example Translation:
$tr Hello there, how are you? en es
Would translate from English to Spanish resulting in:
¿Hola como estás?
If you do not specify the from language, Google translate will attempt to automatically determine it."""
usage = "Usage: `{}tr [words] [from code (optional)] [to code]`".format(ctx.prefix)
if translate == None: return await ctx.send(usage)
word_list = translate.split(" ")
if len(word_list) < 2: return await ctx.send(usage)
to_lang = word_list[len(word_list)-1]
from_lang = word_list[len(word_list)-2] if len(word_list) >= 3 else ""
# Get the from language name from the passed code
from_lang_name = googletrans.LANGUAGES.get(from_lang.lower(),None)
# Get the to language name from the passed code
to_lang_name = googletrans.LANGUAGES.get(to_lang.lower(),None)
if not to_lang_name: # No dice on the language :(
return await Message.EmbedText(
title="Something went wrong...",
description="I couldn't find that language!",
color=ctx.author
).send(ctx)
# Get all but our language codes joined with spaces
trans = " ".join(word_list[:-2] if from_lang_name else word_list[:-1])
# If our from_lang_name is None, we need to auto-detect it
if not from_lang_name:
from_output = await self.bot.loop.run_in_executor(None, self.translator.detect, trans)
from_lang = from_output.lang
from_lang_name = googletrans.LANGUAGES.get(from_lang,"Unknown")
# Let's actually translate now
result_output = await self.bot.loop.run_in_executor(None, functools.partial(self.translator.translate, trans, dest=to_lang, src=from_lang))
result = result_output.text
# Explore the results!
if not result:
await Message.EmbedText(
title="Something went wrong...",
description="I wasn't able to translate that!",
color=ctx.author
).send(ctx)
return
if result == trans:
# We got back what we put in...
await Message.EmbedText(
title="Something went wrong...",
description="The text returned from Google was the same as the text put in. Either the translation failed - or you were translating from/to the same language (en -> en)",
color=ctx.author
).send(ctx)
return
await Message.EmbedText(
title="{}, your translation is:".format(DisplayName.name(ctx.author)),
force_pm=True,
color=ctx.author,
description=result,
footer="{} --> {} - Powered by Google Translate".format(string.capwords(from_lang_name), string.capwords(to_lang_name))
).send(ctx)
| 43.689076 | 187 | 0.616657 |
d776be97dd53db0eb83441d49c0b7fe1d3177c97 | 1,395 | py | Python | genre/toolbox/cam_bp/cam_bp/functions/get_surface_mask.py | wagnew3/Amodal-3D-Reconstruction-for-Robotic-Manipulationvia-Stability-and-Connectivity--Release | f55c6b0fac44d9d749e7804d99169a39d30c2111 | [
"MIT"
] | null | null | null | genre/toolbox/cam_bp/cam_bp/functions/get_surface_mask.py | wagnew3/Amodal-3D-Reconstruction-for-Robotic-Manipulationvia-Stability-and-Connectivity--Release | f55c6b0fac44d9d749e7804d99169a39d30c2111 | [
"MIT"
] | null | null | null | genre/toolbox/cam_bp/cam_bp/functions/get_surface_mask.py | wagnew3/Amodal-3D-Reconstruction-for-Robotic-Manipulationvia-Stability-and-Connectivity--Release | f55c6b0fac44d9d749e7804d99169a39d30c2111 | [
"MIT"
] | null | null | null | import torch
import numpy as np
from .._ext import cam_bp_lib
from cffi import FFI
ffi = FFI()
def get_vox_surface_cnt(depth_t, fl, cam_dist, res=128):
assert depth_t.dim() == 4
assert fl.dim() == 2 and fl.size(1) == depth_t.size(1)
assert cam_dist.dim() == 2 and cam_dist.size(1) == depth_t.size(1)
assert cam_dist.size(0) == depth_t.size(0)
assert fl.size(0) == depth_t.size(0)
assert depth_t.is_cuda
assert fl.is_cuda
assert cam_dist.is_cuda
in_shape = depth_t.shape
cnt = depth_t.new(in_shape[0], in_shape[1], res, res, res).zero_()
tdf = depth_t.new(in_shape[0], in_shape[1], res,
res, res).zero_() + 1 / res
cam_bp_lib.back_projection_forward(depth_t, cam_dist, fl, tdf, cnt)
return cnt
def get_surface_mask(depth_t, fl=784.4645406, cam_dist=2.0, res=128):
n = depth_t.size(0)
nc = depth_t.size(1)
if type(fl) == float:
fl_v = fl
fl = torch.FloatTensor(n, nc).cuda()
fl.fill_(fl_v)
if type(cam_dist) == float:
cmd_v = cam_dist
cam_dist = torch.FloatTensor(n, nc).cuda()
cam_dist.fill_(cmd_v)
cnt = get_vox_surface_cnt(depth_t, fl, cam_dist, res)
mask = cnt.new(n, nc, res, res, res).zero_()
cam_bp_lib.get_surface_mask(depth_t, cam_dist, fl, cnt, mask)
surface_vox = torch.clamp(cnt, min=0.0, max=1.0)
return surface_vox, mask
| 34.02439 | 71 | 0.643011 |
fe94eb8808e4fe3ce20d216607ced6a35f46ccea | 16,192 | py | Python | eval.py | waiting-gy/Caltech_Pedestrian | bd57a85a5fd4965616fe52f20a990abe8e28dda8 | [
"MIT"
] | null | null | null | eval.py | waiting-gy/Caltech_Pedestrian | bd57a85a5fd4965616fe52f20a990abe8e28dda8 | [
"MIT"
] | null | null | null | eval.py | waiting-gy/Caltech_Pedestrian | bd57a85a5fd4965616fe52f20a990abe8e28dda8 | [
"MIT"
] | null | null | null | """Adapted from:
@longcw faster_rcnn_pytorch: https://github.com/longcw/faster_rcnn_pytorch
@rbgirshick py-faster-rcnn https://github.com/rbgirshick/py-faster-rcnn
Licensed under The MIT License [see LICENSE for details]
"""
from __future__ import print_function
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from data import VOC_ROOT, VOCAnnotationTransform, VOCDetection, BaseTransform
from data import VOC_CLASSES as labelmap
import torch.utils.data as data
from ssd import build_ssd
import sys
import os
import time
import argparse
import numpy as np
import pickle
import cv2
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Evaluation')
#parser.add_argument('--trained_model',
#default='weights/ssd300_mAP_77.43_v2.pth', type=str,
#help='Trained state_dict file path to open')
parser.add_argument('--trained_model',
default='weights/VOC.pth', type=str,
help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='File path to save results')
parser.add_argument('--confidence_threshold', default=0.01, type=float,
help='Detection confidence threshold')
parser.add_argument('--top_k', default=5, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=False, type=str2bool,
help='Use cuda to train model')
parser.add_argument('--voc_root', default=VOC_ROOT,
help='Location of VOC root directory')
parser.add_argument('--cleanup', default=True, type=str2bool,
help='Cleanup and remove results files following eval')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't using \
CUDA. Run with --cuda for optimal eval speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
annopath = os.path.join(args.voc_root, 'VOC2007', 'Annotations', '%s.xml')
imgpath = os.path.join(args.voc_root, 'VOC2007', 'JPEGImages', '%s.jpg')
imgsetpath = os.path.join(args.voc_root, 'VOC2007', 'ImageSets',
'Main', '{:s}.txt')
YEAR = '2007'
devkit_path = args.voc_root + 'VOC' + YEAR
dataset_mean = (104, 117, 123)
set_type = 'test'
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,
int(bbox.find('ymin').text) - 1,
int(bbox.find('xmax').text) - 1,
int(bbox.find('ymax').text) - 1]
objects.append(obj_struct)
return objects
def get_output_dir(name, phase):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
filedir = os.path.join(name, phase)
if not os.path.exists(filedir):
os.makedirs(filedir)
return filedir
def get_voc_results_file_template(image_set, cls):
# VOCdevkit/VOC2007/results/det_test_aeroplane.txt
filename = 'det_' + image_set + '_%s.txt' % (cls)
filedir = os.path.join(devkit_path, 'results')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def write_voc_results_file(all_boxes, dataset):
for cls_ind, cls in enumerate(labelmap):
print('Writing {:s} VOC results file'.format(cls))
filename = get_voc_results_file_template(set_type, cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(dataset.ids):
dets = all_boxes[cls_ind+1][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index[1], dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def do_python_eval(output_dir='output', use_07=True):
cachedir = os.path.join(devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = use_07
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(labelmap):
filename = get_voc_results_file_template(set_type, cls)
rec, prec, ap = voc_eval(
filename, annopath, imgsetpath.format(set_type), cls, cachedir,
ovthresh=0.5, use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('--------------------------------------------------------------')
def voc_ap(rec, prec, use_07_metric=True):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:True).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=True):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default True)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath % (imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
recs = pickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
if any(lines) == 1:
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = ((bb[2] - bb[0]) * (bb[3] - bb[1]) +
(BBGT[:, 2] - BBGT[:, 0]) *
(BBGT[:, 3] - BBGT[:, 1]) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
else:
rec = -1.
prec = -1.
ap = -1.
return rec, prec, ap
def test_net(save_folder, net, cuda, dataset, transform, top_k,
im_size=300, thresh=0.05):
num_images = len(dataset)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
# all_boxes = [[[] for _ in range(num_images)]
# for _ in range(len(labelmap)+1)]
all_boxes = [[[] for _ in range(num_images)]
for _ in range(2)]
# timers
_t = {'im_detect': Timer(), 'misc': Timer()}
#output_dir = get_output_dir('ssd300_120000', set_type)
output_dir = get_output_dir('VOC', set_type)
det_file = os.path.join(output_dir, 'detections.pkl')
for i in range(num_images):
im, gt, h, w = dataset.pull_item(i)
x = Variable(im.unsqueeze(0))
if args.cuda:
x = x.cuda()
_t['im_detect'].tic()
detections = net(x).data
detect_time = _t['im_detect'].toc(average=False)
# skip j = 0, because it's the background class
for j in range(1, detections.size(1)):
dets = detections[0, j, :]
mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
dets = torch.masked_select(dets, mask).view(-1, 5)
if dets.size(0) == 0:
continue
boxes = dets[:, 1:]
boxes[:, 0] *= w
boxes[:, 2] *= w
boxes[:, 1] *= h
boxes[:, 3] *= h
scores = dets[:, 0].cpu().numpy()
cls_dets = np.hstack((boxes.cpu().numpy(),
scores[:, np.newaxis])).astype(np.float32,
copy=False)
all_boxes[j][i] = cls_dets
print('im_detect: {:d}/{:d} {:.3f}s'.format(i + 1,
num_images, detect_time))
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
evaluate_detections(all_boxes, output_dir, dataset)
def evaluate_detections(box_list, output_dir, dataset):
write_voc_results_file(box_list, dataset)
do_python_eval(output_dir)
if __name__ == '__main__':
# load net
#num_classes = len(labelmap) + 1 # +1 for background
num_classes = 2
net = build_ssd('test', 300, num_classes) # initialize SSD
net.load_state_dict(torch.load(args.trained_model))
net.eval()
print('Finished loading model!')
# load data
dataset = VOCDetection(args.voc_root, [('2007', set_type)],
BaseTransform(300, dataset_mean),
VOCAnnotationTransform())
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
# evaluation
test_net(args.save_folder, net, args.cuda, dataset,
BaseTransform(net.size, dataset_mean), args.top_k, 300,
thresh=args.confidence_threshold)
| 36.304933 | 80 | 0.561141 |
ca39e8154b73f2617241da15eaa9766c2304b075 | 689 | py | Python | problems/leetcode/product-of-array-except-self.py | AravindVasudev/datastructures-and-algorithms | d2a45ddff1d13a599e889e02ae00e9c197bf8b0f | [
"MIT"
] | 1 | 2022-01-08T11:01:45.000Z | 2022-01-08T11:01:45.000Z | problems/leetcode/product-of-array-except-self.py | AravindVasudev/datastructures-and-algorithms | d2a45ddff1d13a599e889e02ae00e9c197bf8b0f | [
"MIT"
] | null | null | null | problems/leetcode/product-of-array-except-self.py | AravindVasudev/datastructures-and-algorithms | d2a45ddff1d13a599e889e02ae00e9c197bf8b0f | [
"MIT"
] | null | null | null | # https://leetcode.com/problems/product-of-array-except-self/
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
N = len(nums)
leftProducts = [1] * N
rightProducts = [1] * N
result = [1] * N
# Add all left products
for i in range(1, N):
leftProducts[i] = leftProducts[i - 1] * nums[i - 1]
# Add all right products
for i in range(N - 2, -1, -1):
rightProducts[i] = rightProducts[i + 1] * nums[i + 1]
# Compute results
for i in range(N):
result[i] = leftProducts[i] * rightProducts[i]
return result
| 31.318182 | 65 | 0.512337 |
014b235ed621e0464d56a6fe5643129a737c2251 | 1,499 | py | Python | download_models.py | SumanthRH/Learning-to-See-Moving-Objects-in-the-Dark | 14eeac3b0d8fee70c52d2f77fc8e3864321833be | [
"MIT"
] | 119 | 2019-10-31T18:52:21.000Z | 2022-03-31T06:11:17.000Z | download_models.py | SumanthRH/Learning-to-See-Moving-Objects-in-the-Dark | 14eeac3b0d8fee70c52d2f77fc8e3864321833be | [
"MIT"
] | 14 | 2019-11-19T13:44:33.000Z | 2022-02-10T00:34:40.000Z | download_models.py | SumanthRH/Learning-to-See-Moving-Objects-in-the-Dark | 14eeac3b0d8fee70c52d2f77fc8e3864321833be | [
"MIT"
] | 18 | 2019-11-01T03:02:47.000Z | 2021-03-04T08:08:54.000Z | import os
import requests
from config import CHECKPOINT_DIR
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if not os.path.isdir(CHECKPOINT_DIR):
os.makedirs(CHECKPOINT_DIR)
print('Dowloading Trained Model (63Mb)...')
download_file_from_google_drive('1yXeEh2zbP4NQ9ogOO-r7GO9pdrV7yXzr', CHECKPOINT_DIR + '/checkpoint')
download_file_from_google_drive('1yl3mMkvXBZf19XoM38lmDyUuJkYt-Rgb', CHECKPOINT_DIR + '/model.ckpt.index')
download_file_from_google_drive('1YQP0zzbkGH-EaqU3eMIX6MpWB7dD3l6l', CHECKPOINT_DIR + '/model.ckpt.meta')
download_file_from_google_drive('1YbiBNm2iIRuSm4Jb3xSVJ5UrJspIE9cw', CHECKPOINT_DIR + '/model.ckpt.data-00000-of-00001')
print('Done.') | 34.068182 | 120 | 0.724483 |
940c4c93831f568d3dcd33c4a77b04636b19ffc3 | 43,534 | py | Python | sklearn/model_selection/_search_successive_halving.py | jlopezNEU/scikit-learn | 593495eebc3c2f2ffdb244036adf57fab707a47d | [
"BSD-3-Clause"
] | 2 | 2022-03-16T17:33:38.000Z | 2022-03-17T11:50:21.000Z | sklearn/model_selection/_search_successive_halving.py | jlopezNEU/scikit-learn | 593495eebc3c2f2ffdb244036adf57fab707a47d | [
"BSD-3-Clause"
] | 10 | 2022-02-21T20:10:33.000Z | 2022-03-26T16:31:33.000Z | sklearn/model_selection/_search_successive_halving.py | jlopezNEU/scikit-learn | 593495eebc3c2f2ffdb244036adf57fab707a47d | [
"BSD-3-Clause"
] | 1 | 2020-02-16T05:40:12.000Z | 2020-02-16T05:40:12.000Z | from copy import deepcopy
from math import ceil, floor, log
from abc import abstractmethod
from numbers import Integral
import numpy as np
from ._search import BaseSearchCV
from . import ParameterGrid, ParameterSampler
from ..base import is_classifier
from ._split import check_cv, _yields_constant_splits
from ..utils import resample
from ..utils.multiclass import check_classification_targets
from ..utils.validation import _num_samples
__all__ = ["HalvingGridSearchCV", "HalvingRandomSearchCV"]
class _SubsampleMetaSplitter:
"""Splitter that subsamples a given fraction of the dataset"""
def __init__(self, *, base_cv, fraction, subsample_test, random_state):
self.base_cv = base_cv
self.fraction = fraction
self.subsample_test = subsample_test
self.random_state = random_state
def split(self, X, y, groups=None):
for train_idx, test_idx in self.base_cv.split(X, y, groups):
train_idx = resample(
train_idx,
replace=False,
random_state=self.random_state,
n_samples=int(self.fraction * train_idx.shape[0]),
)
if self.subsample_test:
test_idx = resample(
test_idx,
replace=False,
random_state=self.random_state,
n_samples=int(self.fraction * test_idx.shape[0]),
)
yield train_idx, test_idx
def _top_k(results, k, itr):
# Return the best candidates of a given iteration
iteration, mean_test_score, params = (
np.asarray(a)
for a in (results["iter"], results["mean_test_score"], results["params"])
)
iter_indices = np.flatnonzero(iteration == itr)
sorted_indices = np.argsort(mean_test_score[iter_indices])
return np.array(params[iter_indices][sorted_indices[-k:]])
class BaseSuccessiveHalving(BaseSearchCV):
"""Implements successive halving.
Ref:
Almost optimal exploration in multi-armed bandits, ICML 13
Zohar Karnin, Tomer Koren, Oren Somekh
"""
def __init__(
self,
estimator,
*,
scoring=None,
n_jobs=None,
refit=True,
cv=5,
verbose=0,
random_state=None,
error_score=np.nan,
return_train_score=True,
max_resources="auto",
min_resources="exhaust",
resource="n_samples",
factor=3,
aggressive_elimination=False,
):
super().__init__(
estimator,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
cv=cv,
verbose=verbose,
error_score=error_score,
return_train_score=return_train_score,
)
self.random_state = random_state
self.max_resources = max_resources
self.resource = resource
self.factor = factor
self.min_resources = min_resources
self.aggressive_elimination = aggressive_elimination
def _check_input_parameters(self, X, y, groups):
if self.scoring is not None and not (
isinstance(self.scoring, str) or callable(self.scoring)
):
raise ValueError(
"scoring parameter must be a string, "
"a callable or None. Multimetric scoring is not "
"supported."
)
# We need to enforce that successive calls to cv.split() yield the same
# splits: see https://github.com/scikit-learn/scikit-learn/issues/15149
if not _yields_constant_splits(self._checked_cv_orig):
raise ValueError(
"The cv parameter must yield consistent folds across "
"calls to split(). Set its random_state to an int, or set "
"shuffle=False."
)
if (
self.resource != "n_samples"
and self.resource not in self.estimator.get_params()
):
raise ValueError(
f"Cannot use resource={self.resource} which is not supported "
f"by estimator {self.estimator.__class__.__name__}"
)
if isinstance(self.max_resources, str) and self.max_resources != "auto":
raise ValueError(
"max_resources must be either 'auto' or a positive integer"
)
if self.max_resources != "auto" and (
not isinstance(self.max_resources, Integral) or self.max_resources <= 0
):
raise ValueError(
"max_resources must be either 'auto' or a positive integer"
)
if self.min_resources not in ("smallest", "exhaust") and (
not isinstance(self.min_resources, Integral) or self.min_resources <= 0
):
raise ValueError(
"min_resources must be either 'smallest', 'exhaust', "
"or a positive integer "
"no greater than max_resources."
)
if isinstance(self, HalvingRandomSearchCV):
if self.min_resources == self.n_candidates == "exhaust":
# for n_candidates=exhaust to work, we need to know what
# min_resources is. Similarly min_resources=exhaust needs to
# know the actual number of candidates.
raise ValueError(
"n_candidates and min_resources cannot be both set to 'exhaust'."
)
if self.n_candidates != "exhaust" and (
not isinstance(self.n_candidates, Integral) or self.n_candidates <= 0
):
raise ValueError(
"n_candidates must be either 'exhaust' or a positive integer"
)
self.min_resources_ = self.min_resources
if self.min_resources_ in ("smallest", "exhaust"):
if self.resource == "n_samples":
n_splits = self._checked_cv_orig.get_n_splits(X, y, groups)
# please see https://gph.is/1KjihQe for a justification
magic_factor = 2
self.min_resources_ = n_splits * magic_factor
if is_classifier(self.estimator):
y = self._validate_data(X="no_validation", y=y)
check_classification_targets(y)
n_classes = np.unique(y).shape[0]
self.min_resources_ *= n_classes
else:
self.min_resources_ = 1
# if 'exhaust', min_resources_ might be set to a higher value later
# in _run_search
self.max_resources_ = self.max_resources
if self.max_resources_ == "auto":
if not self.resource == "n_samples":
raise ValueError(
"max_resources can only be 'auto' if resource='n_samples'"
)
self.max_resources_ = _num_samples(X)
if self.min_resources_ > self.max_resources_:
raise ValueError(
f"min_resources_={self.min_resources_} is greater "
f"than max_resources_={self.max_resources_}."
)
if self.min_resources_ == 0:
raise ValueError(
f"min_resources_={self.min_resources_}: you might have passed "
"an empty dataset X."
)
if not isinstance(self.refit, bool):
raise ValueError(
f"refit is expected to be a boolean. Got {type(self.refit)} instead."
)
@staticmethod
def _select_best_index(refit, refit_metric, results):
"""Custom refit callable to return the index of the best candidate.
We want the best candidate out of the last iteration. By default
BaseSearchCV would return the best candidate out of all iterations.
Currently, we only support for a single metric thus `refit` and
`refit_metric` are not required.
"""
last_iter = np.max(results["iter"])
last_iter_indices = np.flatnonzero(results["iter"] == last_iter)
best_idx = np.argmax(results["mean_test_score"][last_iter_indices])
return last_iter_indices[best_idx]
def fit(self, X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_output), optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator.
Returns
-------
self : object
Instance of fitted estimator.
"""
self._checked_cv_orig = check_cv(
self.cv, y, classifier=is_classifier(self.estimator)
)
self._check_input_parameters(
X=X,
y=y,
groups=groups,
)
self._n_samples_orig = _num_samples(X)
super().fit(X, y=y, groups=groups, **fit_params)
# Set best_score_: BaseSearchCV does not set it, as refit is a callable
self.best_score_ = self.cv_results_["mean_test_score"][self.best_index_]
return self
def _run_search(self, evaluate_candidates):
candidate_params = self._generate_candidate_params()
if self.resource != "n_samples" and any(
self.resource in candidate for candidate in candidate_params
):
# Can only check this now since we need the candidates list
raise ValueError(
f"Cannot use parameter {self.resource} as the resource since "
"it is part of the searched parameters."
)
# n_required_iterations is the number of iterations needed so that the
# last iterations evaluates less than `factor` candidates.
n_required_iterations = 1 + floor(log(len(candidate_params), self.factor))
if self.min_resources == "exhaust":
# To exhaust the resources, we want to start with the biggest
# min_resources possible so that the last (required) iteration
# uses as many resources as possible
last_iteration = n_required_iterations - 1
self.min_resources_ = max(
self.min_resources_,
self.max_resources_ // self.factor**last_iteration,
)
# n_possible_iterations is the number of iterations that we can
# actually do starting from min_resources and without exceeding
# max_resources. Depending on max_resources and the number of
# candidates, this may be higher or smaller than
# n_required_iterations.
n_possible_iterations = 1 + floor(
log(self.max_resources_ // self.min_resources_, self.factor)
)
if self.aggressive_elimination:
n_iterations = n_required_iterations
else:
n_iterations = min(n_possible_iterations, n_required_iterations)
if self.verbose:
print(f"n_iterations: {n_iterations}")
print(f"n_required_iterations: {n_required_iterations}")
print(f"n_possible_iterations: {n_possible_iterations}")
print(f"min_resources_: {self.min_resources_}")
print(f"max_resources_: {self.max_resources_}")
print(f"aggressive_elimination: {self.aggressive_elimination}")
print(f"factor: {self.factor}")
self.n_resources_ = []
self.n_candidates_ = []
for itr in range(n_iterations):
power = itr # default
if self.aggressive_elimination:
# this will set n_resources to the initial value (i.e. the
# value of n_resources at the first iteration) for as many
# iterations as needed (while candidates are being
# eliminated), and then go on as usual.
power = max(0, itr - n_required_iterations + n_possible_iterations)
n_resources = int(self.factor**power * self.min_resources_)
# guard, probably not needed
n_resources = min(n_resources, self.max_resources_)
self.n_resources_.append(n_resources)
n_candidates = len(candidate_params)
self.n_candidates_.append(n_candidates)
if self.verbose:
print("-" * 10)
print(f"iter: {itr}")
print(f"n_candidates: {n_candidates}")
print(f"n_resources: {n_resources}")
if self.resource == "n_samples":
# subsampling will be done in cv.split()
cv = _SubsampleMetaSplitter(
base_cv=self._checked_cv_orig,
fraction=n_resources / self._n_samples_orig,
subsample_test=True,
random_state=self.random_state,
)
else:
# Need copy so that the n_resources of next iteration does
# not overwrite
candidate_params = [c.copy() for c in candidate_params]
for candidate in candidate_params:
candidate[self.resource] = n_resources
cv = self._checked_cv_orig
more_results = {
"iter": [itr] * n_candidates,
"n_resources": [n_resources] * n_candidates,
}
results = evaluate_candidates(
candidate_params, cv, more_results=more_results
)
n_candidates_to_keep = ceil(n_candidates / self.factor)
candidate_params = _top_k(results, n_candidates_to_keep, itr)
self.n_remaining_candidates_ = len(candidate_params)
self.n_required_iterations_ = n_required_iterations
self.n_possible_iterations_ = n_possible_iterations
self.n_iterations_ = n_iterations
@abstractmethod
def _generate_candidate_params(self):
pass
def _more_tags(self):
tags = deepcopy(super()._more_tags())
tags["_xfail_checks"].update(
{
"check_fit2d_1sample": (
"Fail during parameter check since min/max resources requires"
" more samples"
),
}
)
return tags
class HalvingGridSearchCV(BaseSuccessiveHalving):
"""Search over specified parameter values with successive halving.
The search strategy starts evaluating all the candidates with a small
amount of resources and iteratively selects the best candidates, using
more and more resources.
Read more in the :ref:`User guide <successive_halving_user_guide>`.
.. note::
This estimator is still **experimental** for now: the predictions
and the API might change without any deprecation cycle. To use it,
you need to explicitly import ``enable_halving_search_cv``::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> # now you can import normally from model_selection
>>> from sklearn.model_selection import HalvingGridSearchCV
Parameters
----------
estimator : estimator object
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
factor : int or float, default=3
The 'halving' parameter, which determines the proportion of candidates
that are selected for each subsequent iteration. For example,
``factor=3`` means that only one third of the candidates are selected.
resource : ``'n_samples'`` or str, default='n_samples'
Defines the resource that increases with each iteration. By default,
the resource is the number of samples. It can also be set to any
parameter of the base estimator that accepts positive integer
values, e.g. 'n_iterations' or 'n_estimators' for a gradient
boosting estimator. In this case ``max_resources`` cannot be 'auto'
and must be set explicitly.
max_resources : int, default='auto'
The maximum amount of resource that any candidate is allowed to use
for a given iteration. By default, this is set to ``n_samples`` when
``resource='n_samples'`` (default), else an error is raised.
min_resources : {'exhaust', 'smallest'} or int, default='exhaust'
The minimum amount of resource that any candidate is allowed to use
for a given iteration. Equivalently, this defines the amount of
resources `r0` that are allocated for each candidate at the first
iteration.
- 'smallest' is a heuristic that sets `r0` to a small value:
- ``n_splits * 2`` when ``resource='n_samples'`` for a regression
problem
- ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a
classification problem
- ``1`` when ``resource != 'n_samples'``
- 'exhaust' will set `r0` such that the **last** iteration uses as
much resources as possible. Namely, the last iteration will use the
highest value smaller than ``max_resources`` that is a multiple of
both ``min_resources`` and ``factor``. In general, using 'exhaust'
leads to a more accurate estimator, but is slightly more time
consuming.
Note that the amount of resources used at each iteration is always a
multiple of ``min_resources``.
aggressive_elimination : bool, default=False
This is only relevant in cases where there isn't enough resources to
reduce the remaining candidates to at most `factor` after the last
iteration. If ``True``, then the search process will 'replay' the
first iteration for as long as needed until the number of candidates
is small enough. This is ``False`` by default, which means that the
last iteration may evaluate more than ``factor`` candidates. See
:ref:`aggressive_elimination` for more details.
cv : int, cross-validation generator or iterable, default=5
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. note::
Due to implementation details, the folds produced by `cv` must be
the same across multiple calls to `cv.split()`. For
built-in `scikit-learn` iterators, this can be achieved by
deactivating shuffling (`shuffle=False`), or by setting the
`cv`'s `random_state` parameter to an integer.
scoring : str, callable, or None, default=None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
If None, the estimator's score method is used.
refit : bool, default=True
If True, refit an estimator using the best found parameters on the
whole dataset.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``HalvingGridSearchCV`` instance.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error. Default is ``np.nan``.
return_train_score : bool, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
random_state : int, RandomState instance or None, default=None
Pseudo random number generator state used for subsampling the dataset
when `resources != 'n_samples'`. Ignored otherwise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int or None, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int
Controls the verbosity: the higher, the more messages.
Attributes
----------
n_resources_ : list of int
The amount of resources used at each iteration.
n_candidates_ : list of int
The number of candidate parameters that were evaluated at each
iteration.
n_remaining_candidates_ : int
The number of candidate parameters that are left after the last
iteration. It corresponds to `ceil(n_candidates[-1] / factor)`
max_resources_ : int
The maximum number of resources that any candidate is allowed to use
for a given iteration. Note that since the number of resources used
at each iteration must be a multiple of ``min_resources_``, the
actual number of resources used at the last iteration may be smaller
than ``max_resources_``.
min_resources_ : int
The amount of resources that are allocated for each candidate at the
first iteration.
n_iterations_ : int
The actual number of iterations that were run. This is equal to
``n_required_iterations_`` if ``aggressive_elimination`` is ``True``.
Else, this is equal to ``min(n_possible_iterations_,
n_required_iterations_)``.
n_possible_iterations_ : int
The number of iterations that are possible starting with
``min_resources_`` resources and without exceeding
``max_resources_``.
n_required_iterations_ : int
The number of iterations that are required to end up with less than
``factor`` candidates at the last iteration, starting with
``min_resources_`` resources. This will be smaller than
``n_possible_iterations_`` when there isn't enough resources.
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``. It contains lots of information
for analysing the results of a search.
Please refer to the :ref:`User guide<successive_halving_cv_results>`
for details.
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
best_score_ : float
Mean cross-validated score of the best_estimator.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
multimetric_ : bool
Whether or not the scorers compute several metrics.
classes_ : ndarray of shape (n_classes,)
The classes labels. This is present only if ``refit`` is specified and
the underlying estimator is a classifier.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if
`best_estimator_` is defined (see the documentation for the `refit`
parameter for more details) and that `best_estimator_` exposes
`n_features_in_` when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if
`best_estimator_` is defined (see the documentation for the `refit`
parameter for more details) and that `best_estimator_` exposes
`feature_names_in_` when fit.
.. versionadded:: 1.0
See Also
--------
:class:`HalvingRandomSearchCV`:
Random search over a set of parameters using successive halving.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> from sklearn.model_selection import HalvingGridSearchCV
...
>>> X, y = load_iris(return_X_y=True)
>>> clf = RandomForestClassifier(random_state=0)
...
>>> param_grid = {"max_depth": [3, None],
... "min_samples_split": [5, 10]}
>>> search = HalvingGridSearchCV(clf, param_grid, resource='n_estimators',
... max_resources=10,
... random_state=0).fit(X, y)
>>> search.best_params_ # doctest: +SKIP
{'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9}
"""
_required_parameters = ["estimator", "param_grid"]
def __init__(
self,
estimator,
param_grid,
*,
factor=3,
resource="n_samples",
max_resources="auto",
min_resources="exhaust",
aggressive_elimination=False,
cv=5,
scoring=None,
refit=True,
error_score=np.nan,
return_train_score=True,
random_state=None,
n_jobs=None,
verbose=0,
):
super().__init__(
estimator,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
verbose=verbose,
cv=cv,
random_state=random_state,
error_score=error_score,
return_train_score=return_train_score,
max_resources=max_resources,
resource=resource,
factor=factor,
min_resources=min_resources,
aggressive_elimination=aggressive_elimination,
)
self.param_grid = param_grid
def _generate_candidate_params(self):
return ParameterGrid(self.param_grid)
class HalvingRandomSearchCV(BaseSuccessiveHalving):
"""Randomized search on hyper parameters.
The search strategy starts evaluating all the candidates with a small
amount of resources and iteratively selects the best candidates, using more
and more resources.
The candidates are sampled at random from the parameter space and the
number of sampled candidates is determined by ``n_candidates``.
Read more in the :ref:`User guide<successive_halving_user_guide>`.
.. note::
This estimator is still **experimental** for now: the predictions
and the API might change without any deprecation cycle. To use it,
you need to explicitly import ``enable_halving_search_cv``::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> # now you can import normally from model_selection
>>> from sklearn.model_selection import HalvingRandomSearchCV
Parameters
----------
estimator : estimator object
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_candidates : int, default='exhaust'
The number of candidate parameters to sample, at the first
iteration. Using 'exhaust' will sample enough candidates so that the
last iteration uses as many resources as possible, based on
`min_resources`, `max_resources` and `factor`. In this case,
`min_resources` cannot be 'exhaust'.
factor : int or float, default=3
The 'halving' parameter, which determines the proportion of candidates
that are selected for each subsequent iteration. For example,
``factor=3`` means that only one third of the candidates are selected.
resource : ``'n_samples'`` or str, default='n_samples'
Defines the resource that increases with each iteration. By default,
the resource is the number of samples. It can also be set to any
parameter of the base estimator that accepts positive integer
values, e.g. 'n_iterations' or 'n_estimators' for a gradient
boosting estimator. In this case ``max_resources`` cannot be 'auto'
and must be set explicitly.
max_resources : int, default='auto'
The maximum number of resources that any candidate is allowed to use
for a given iteration. By default, this is set ``n_samples`` when
``resource='n_samples'`` (default), else an error is raised.
min_resources : {'exhaust', 'smallest'} or int, default='smallest'
The minimum amount of resource that any candidate is allowed to use
for a given iteration. Equivalently, this defines the amount of
resources `r0` that are allocated for each candidate at the first
iteration.
- 'smallest' is a heuristic that sets `r0` to a small value:
- ``n_splits * 2`` when ``resource='n_samples'`` for a regression
problem
- ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a
classification problem
- ``1`` when ``resource != 'n_samples'``
- 'exhaust' will set `r0` such that the **last** iteration uses as
much resources as possible. Namely, the last iteration will use the
highest value smaller than ``max_resources`` that is a multiple of
both ``min_resources`` and ``factor``. In general, using 'exhaust'
leads to a more accurate estimator, but is slightly more time
consuming. 'exhaust' isn't available when `n_candidates='exhaust'`.
Note that the amount of resources used at each iteration is always a
multiple of ``min_resources``.
aggressive_elimination : bool, default=False
This is only relevant in cases where there isn't enough resources to
reduce the remaining candidates to at most `factor` after the last
iteration. If ``True``, then the search process will 'replay' the
first iteration for as long as needed until the number of candidates
is small enough. This is ``False`` by default, which means that the
last iteration may evaluate more than ``factor`` candidates. See
:ref:`aggressive_elimination` for more details.
cv : int, cross-validation generator or an iterable, default=5
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. note::
Due to implementation details, the folds produced by `cv` must be
the same across multiple calls to `cv.split()`. For
built-in `scikit-learn` iterators, this can be achieved by
deactivating shuffling (`shuffle=False`), or by setting the
`cv`'s `random_state` parameter to an integer.
scoring : str, callable, or None, default=None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
If None, the estimator's score method is used.
refit : bool, default=True
If True, refit an estimator using the best found parameters on the
whole dataset.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``HalvingRandomSearchCV`` instance.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error. Default is ``np.nan``.
return_train_score : bool, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
random_state : int, RandomState instance or None, default=None
Pseudo random number generator state used for subsampling the dataset
when `resources != 'n_samples'`. Also used for random uniform
sampling from lists of possible values instead of scipy.stats
distributions.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int or None, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int
Controls the verbosity: the higher, the more messages.
Attributes
----------
n_resources_ : list of int
The amount of resources used at each iteration.
n_candidates_ : list of int
The number of candidate parameters that were evaluated at each
iteration.
n_remaining_candidates_ : int
The number of candidate parameters that are left after the last
iteration. It corresponds to `ceil(n_candidates[-1] / factor)`
max_resources_ : int
The maximum number of resources that any candidate is allowed to use
for a given iteration. Note that since the number of resources used at
each iteration must be a multiple of ``min_resources_``, the actual
number of resources used at the last iteration may be smaller than
``max_resources_``.
min_resources_ : int
The amount of resources that are allocated for each candidate at the
first iteration.
n_iterations_ : int
The actual number of iterations that were run. This is equal to
``n_required_iterations_`` if ``aggressive_elimination`` is ``True``.
Else, this is equal to ``min(n_possible_iterations_,
n_required_iterations_)``.
n_possible_iterations_ : int
The number of iterations that are possible starting with
``min_resources_`` resources and without exceeding
``max_resources_``.
n_required_iterations_ : int
The number of iterations that are required to end up with less than
``factor`` candidates at the last iteration, starting with
``min_resources_`` resources. This will be smaller than
``n_possible_iterations_`` when there isn't enough resources.
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``. It contains lots of information
for analysing the results of a search.
Please refer to the :ref:`User guide<successive_halving_cv_results>`
for details.
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
best_score_ : float
Mean cross-validated score of the best_estimator.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
multimetric_ : bool
Whether or not the scorers compute several metrics.
classes_ : ndarray of shape (n_classes,)
The classes labels. This is present only if ``refit`` is specified and
the underlying estimator is a classifier.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if
`best_estimator_` is defined (see the documentation for the `refit`
parameter for more details) and that `best_estimator_` exposes
`n_features_in_` when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if
`best_estimator_` is defined (see the documentation for the `refit`
parameter for more details) and that `best_estimator_` exposes
`feature_names_in_` when fit.
.. versionadded:: 1.0
See Also
--------
:class:`HalvingGridSearchCV`:
Search over a grid of parameters using successive halving.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> from sklearn.model_selection import HalvingRandomSearchCV
>>> from scipy.stats import randint
>>> import numpy as np
...
>>> X, y = load_iris(return_X_y=True)
>>> clf = RandomForestClassifier(random_state=0)
>>> np.random.seed(0)
...
>>> param_distributions = {"max_depth": [3, None],
... "min_samples_split": randint(2, 11)}
>>> search = HalvingRandomSearchCV(clf, param_distributions,
... resource='n_estimators',
... max_resources=10,
... random_state=0).fit(X, y)
>>> search.best_params_ # doctest: +SKIP
{'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9}
"""
_required_parameters = ["estimator", "param_distributions"]
def __init__(
self,
estimator,
param_distributions,
*,
n_candidates="exhaust",
factor=3,
resource="n_samples",
max_resources="auto",
min_resources="smallest",
aggressive_elimination=False,
cv=5,
scoring=None,
refit=True,
error_score=np.nan,
return_train_score=True,
random_state=None,
n_jobs=None,
verbose=0,
):
super().__init__(
estimator,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
verbose=verbose,
cv=cv,
random_state=random_state,
error_score=error_score,
return_train_score=return_train_score,
max_resources=max_resources,
resource=resource,
factor=factor,
min_resources=min_resources,
aggressive_elimination=aggressive_elimination,
)
self.param_distributions = param_distributions
self.n_candidates = n_candidates
def _generate_candidate_params(self):
n_candidates_first_iter = self.n_candidates
if n_candidates_first_iter == "exhaust":
# This will generate enough candidate so that the last iteration
# uses as much resources as possible
n_candidates_first_iter = self.max_resources_ // self.min_resources_
return ParameterSampler(
self.param_distributions,
n_candidates_first_iter,
random_state=self.random_state,
)
| 40.647993 | 85 | 0.641613 |
c02d872dac18c18b8a20a91412210a3e331da57d | 8,458 | py | Python | source/setup.py | siddhartha-iitd/NVDA-Enhancements | fb00b52f7d3c81210dad81b584fd0170c0b47215 | [
"bzip2-1.0.6"
] | null | null | null | source/setup.py | siddhartha-iitd/NVDA-Enhancements | fb00b52f7d3c81210dad81b584fd0170c0b47215 | [
"bzip2-1.0.6"
] | null | null | null | source/setup.py | siddhartha-iitd/NVDA-Enhancements | fb00b52f7d3c81210dad81b584fd0170c0b47215 | [
"bzip2-1.0.6"
] | null | null | null | # -*- coding: UTF-8 -*-
#setup.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2015 NV Access Limited, Peter Vágner, Joseph Lee
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import os
import copy
import gettext
gettext.install("nvda", unicode=True)
from distutils.core import setup
import py2exe as py2exeModule
from glob import glob
import fnmatch
from versionInfo import *
from py2exe import build_exe
import wx
import imp
MAIN_MANIFEST_EXTRA = r"""
<file name="brailleDisplayDrivers\handyTech\HtBrailleDriverServer.dll">
<comClass
description="HtBrailleDriver Class"
clsid="{209445BA-92ED-4AB2-83EC-F24ACEE77EE0}"
threadingModel="Apartment"
progid="HtBrailleDriverServer.HtBrailleDriver"
tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}" />
</file>
<file name="brailleDisplayDrivers\handyTech\HtBrailleDriverServer.tlb">
<typelib tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}"
version="1.0"
helpdir="" />
</file>
<comInterfaceExternalProxyStub
name="IHtBrailleDriverSink"
iid="{EF551F82-1C7E-421F-963D-D9D03548785A}"
proxyStubClsid32="{00020420-0000-0000-C000-000000000046}"
baseInterface="{00000000-0000-0000-C000-000000000046}"
tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}" />
<comInterfaceExternalProxyStub
name="IHtBrailleDriver"
iid="{43A71F9B-58EE-42D4-B58E-0F9FBA28D995}"
proxyStubClsid32="{00020424-0000-0000-C000-000000000046}"
baseInterface="{00000000-0000-0000-C000-000000000046}"
tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}" />
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
<application>
<!-- Windows Vista -->
<supportedOS Id="{e2011457-1546-43c5-a5fe-008deee3d3f0}"/>
<!-- Windows 7 -->
<supportedOS Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"/>
<!-- Windows 8 -->
<supportedOS Id="{4a2f28e3-53b9-4441-ba9c-d69d4a4a6e38}"/>
<!-- Windows 8.1 -->
<supportedOS Id="{1f676c76-80e1-4239-95bb-83d0f6d0da78}"/>
<!-- Windows 10 -->
<supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
</application>
</compatibility>
"""
def getModuleExtention(thisModType):
for ext,mode,modType in imp.get_suffixes():
if modType==thisModType:
return ext
raise ValueError("unknown mod type %s"%thisModType)
# py2exe's idea of whether a dll is a system dll appears to be wrong sometimes, so monkey patch it.
origIsSystemDLL = build_exe.isSystemDLL
def isSystemDLL(pathname):
dll = os.path.basename(pathname).lower()
if dll in ("msvcp71.dll", "msvcp90.dll", "gdiplus.dll","mfc71.dll", "mfc90.dll"):
# These dlls don't exist on many systems, so make sure they're included.
return 0
elif dll.startswith("api-ms-win-") or dll in ("powrprof.dll", "mpr.dll"):
# These are definitely system dlls available on all systems and must be excluded.
# Including them can cause serious problems when a binary build is run on a different version of Windows.
return 1
return origIsSystemDLL(pathname)
build_exe.isSystemDLL = isSystemDLL
class py2exe(build_exe.py2exe):
"""Overridden py2exe command to:
* Add a command line option --enable-uiAccess to enable uiAccess for the main executable
* Add extra info to the manifest
* Don't copy w9xpopen, as NVDA will never run on Win9x
"""
user_options = build_exe.py2exe.user_options + [
("enable-uiAccess", "u", "enable uiAccess for the main executable"),
]
def initialize_options(self):
build_exe.py2exe.initialize_options(self)
self.enable_uiAccess = False
def copy_w9xpopen(self, modules, dlls):
pass
def run(self):
dist = self.distribution
if self.enable_uiAccess:
# Add a target for nvda_uiAccess, using nvda_noUIAccess as a base.
target = copy.deepcopy(dist.windows[0])
target["dest_base"] = "nvda_uiAccess"
target["uac_info"] = (target["uac_info"][0], True)
dist.windows.insert(1, target)
build_exe.py2exe.run(self)
def build_manifest(self, target, template):
mfest, rid = build_exe.py2exe.build_manifest(self, target, template)
if getattr(target, "script", None) == "nvda.pyw":
# This is one of the main application executables.
mfest = mfest[:mfest.rindex("</assembly>")]
mfest += MAIN_MANIFEST_EXTRA + "</assembly>"
return mfest, rid
def getLocaleDataFiles():
wxDir=wx.__path__[0]
localeMoFiles=set()
for f in glob("locale/*/LC_MESSAGES"):
localeMoFiles.add((f, (os.path.join(f,"nvda.mo"),)))
wxMoFile=os.path.join(wxDir,f,"wxstd.mo")
if os.path.isfile(wxMoFile):
localeMoFiles.add((f,(wxMoFile,)))
lang=os.path.split(os.path.split(f)[0])[1]
if '_' in lang:
lang=lang.split('_')[0]
f=os.path.join('locale',lang,'lc_messages')
wxMoFile=os.path.join(wxDir,f,"wxstd.mo")
if os.path.isfile(wxMoFile):
localeMoFiles.add((f,(wxMoFile,)))
localeDicFiles=[(os.path.dirname(f), (f,)) for f in glob("locale/*/*.dic")]
NVDALocaleGestureMaps=[(os.path.dirname(f), (f,)) for f in glob("locale/*/gestures.ini")]
return list(localeMoFiles)+localeDicFiles+NVDALocaleGestureMaps
def getRecursiveDataFiles(dest,source,excludes=()):
rulesList=[]
rulesList.append((dest,
[f for f in glob("%s/*"%source) if not any(fnmatch.fnmatch(f,exclude) for exclude in excludes) and os.path.isfile(f)]))
[rulesList.extend(getRecursiveDataFiles(os.path.join(dest,dirName),os.path.join(source,dirName),excludes=excludes)) for dirName in os.listdir(source) if os.path.isdir(os.path.join(source,dirName)) and not dirName.startswith('.')]
return rulesList
compiledModExtention = getModuleExtention(imp.PY_COMPILED)
sourceModExtention = getModuleExtention(imp.PY_SOURCE)
setup(
name = name,
version=version,
description=description,
url=url,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Win32 (MS Windows)',
'Topic :: Adaptive Technologies'
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Programming Language :: Python',
'Operating System :: Microsoft :: Windows',
],
cmdclass={"py2exe": py2exe},
windows=[
{
"script":"nvda.pyw",
"dest_base":"nvda_noUIAccess",
"uac_info": ("asInvoker", False),
"icon_resources":[(1,"images/nvda.ico")],
"version":"0.0.0.0",
"description":"NVDA application",
"product_version":version,
"copyright":copyright,
"company_name":publisher,
},
# The nvda_uiAccess target will be added at runtime if required.
{
"script": "nvda_slave.pyw",
"icon_resources": [(1,"images/nvda.ico")],
"version": "0.0.0.0",
"description": name,
"product_version": version,
"copyright": copyright,
"company_name": publisher,
},
],
service=[{
"modules": ["nvda_service"],
"icon_resources": [(1, "images/nvda.ico")],
"version": "0.0.0.0",
"description": "NVDA service",
"product_version": version,
"copyright": copyright,
"company_name": publisher,
"uac_info": ("requireAdministrator", False),
"cmdline_style": "pywin32",
}],
options = {"py2exe": {
"bundle_files": 3,
"excludes": ["Tkinter",
"serial.loopback_connection", "serial.rfc2217", "serial.serialcli", "serial.serialjava", "serial.serialposix", "serial.socket_connection"],
"packages": ["NVDAObjects","virtualBuffers","appModules","comInterfaces","brailleDisplayDrivers","synthDrivers"],
# #3368: bisect was implicitly included with Python 2.7.3, but isn't with 2.7.5.
# Explicitly include it so we don't break some add-ons.
"includes": ["nvdaBuiltin", "bisect"],
}},
data_files=[
(".",glob("*.dll")+glob("*.manifest")+["builtin.dic"]),
("documentation", ['../copying.txt', '../contributors.txt']),
("lib", glob("lib/*.dll")),
("lib64", glob("lib64/*.dll") + glob("lib64/*.exe")),
("waves", glob("waves/*.wav")),
("images", glob("images/*.ico")),
("louis/tables",glob("louis/tables/*"))
] + (
getLocaleDataFiles()
+ getRecursiveDataFiles("synthDrivers", "synthDrivers",
excludes=("*%s" % sourceModExtention, "*%s" % compiledModExtention, "*.exp", "*.lib", "*.pdb"))
+ getRecursiveDataFiles("brailleDisplayDrivers", "brailleDisplayDrivers", excludes=("*%s"%sourceModExtention,"*%s"%compiledModExtention))
+ getRecursiveDataFiles('documentation', '../user_docs', excludes=('*.t2t', '*.t2tconf', '*/developerGuide.*'))
),
)
| 37.758929 | 231 | 0.693899 |
c7aff376da7002219a6bfddf66f6190d33fceddc | 20,605 | py | Python | tea/evaluate.py | hmckenzie/tea-lang | d88d63ea600c387d086d19bcb0c9ae54cc78cb68 | [
"Apache-2.0"
] | null | null | null | tea/evaluate.py | hmckenzie/tea-lang | d88d63ea600c387d086d19bcb0c9ae54cc78cb68 | [
"Apache-2.0"
] | null | null | null | tea/evaluate.py | hmckenzie/tea-lang | d88d63ea600c387d086d19bcb0c9ae54cc78cb68 | [
"Apache-2.0"
] | null | null | null | from tea.ast import ( Node, Variable, Literal,
Equal, NotEqual, LessThan,
LessThanEqual, GreaterThan, GreaterThanEqual,
Relate, PositiveRelationship
)
from tea.runtimeDataStructures.dataset import Dataset
from tea.runtimeDataStructures.varData import VarData
from tea.runtimeDataStructures.bivariateData import BivariateData
from tea.runtimeDataStructures.multivariateData import MultivariateData
from tea.runtimeDataStructures.resultData import ResultData
from tea.helpers.evaluateHelperMethods import determine_study_type, assign_roles, add_paired_property, execute_test
from tea.z3_solver.solver import synthesize_tests
import attr
from typing import Any
from types import SimpleNamespace # allows for dot notation access for dictionaries
from typing import Dict
from scipy import stats # Stats library used
import statsmodels.api as sm
import statsmodels.formula.api as smf
import numpy as np # Use some stats from numpy instead
import pandas as pd
# TODO: Pass participant_id as part of experimental design, not load_data
def evaluate(dataset: Dataset, expr: Node, assumptions: Dict[str, str], design: Dict[str, str]=None):
if isinstance(expr, Variable):
# dataframe = dataset[expr.name] # I don't know if we want this. We may want to just store query (in metadata?) and
# then use query to get raw data later....(for user, not interpreter?)
metadata = dataset.get_variable_data(expr.name) # (dtype, categories)
# if expr.name == 'strategy':
# import pdb; pdb.set_trace()
metadata['var_name'] = expr.name
metadata['query'] = ''
return VarData(metadata)
elif isinstance(expr, Literal):
data = pd.Series([expr.value] * len(dataset.data), index=dataset.data.index) # Series filled with literal value
# metadata = None # metadata=None means literal
metadata = dict() # metadata=None means literal
metadata['var_name'] = '' # because not a var in the dataset
metadata['query'] = ''
metadata['value'] = expr.value
return VarData(data, metadata)
elif isinstance(expr, Equal):
lhs = evaluate(dataset, expr.lhs)
rhs = evaluate(dataset, expr.rhs)
assert isinstance(lhs, VarData)
assert isinstance(rhs, VarData)
dataframe = lhs.dataframe[lhs.dataframe == rhs.dataframe]
metadata = lhs.metadata
if (isinstance(expr.rhs, Literal)):
metadata['query'] = f" == \'{rhs.metadata['value']}\'" # override lhs metadata for query
elif (isinstance(expr.rhs, Variable)):
metadata['query'] = f" == {rhs.metadata['var_name']}"
else:
raise ValueError(f"Not implemented for {rhs}")
return VarData(metadata)
elif isinstance(expr, NotEqual):
rhs = evaluate(dataset, expr.rhs)
lhs = evaluate(dataset, expr.lhs)
assert isinstance(rhs, VarData)
assert isinstance(lhs, VarData)
dataframe = lhs.dataframe[lhs.dataframe != rhs.dataframe]
metadata = lhs.metadata
if (isinstance(expr.rhs, Literal)):
metadata['query'] = " != \'\'" # override lhs metadata for query
elif (isinstance(expr.rhs, Variable)):
metadata['query'] = f" != {rhs.metadata['var_name']}"
else:
raise ValueError(f"Not implemented for {rhs}")
return VarData(metadata)
elif isinstance(expr, LessThan):
lhs = evaluate(dataset, expr.lhs)
rhs = evaluate(dataset, expr.rhs)
assert isinstance(lhs, VarData)
assert isinstance(rhs, VarData)
dataframe = None
metadata = rhs.metadata
if (not lhs.metadata):
raise ValueError('Malformed Relation. Filter on Variables must have variable as rhs')
elif (lhs.metadata['dtype'] is DataType.NOMINAL):
raise ValueError('Cannot compare nominal values with Less Than')
elif (lhs.metadata['dtype'] is DataType.ORDINAL):
# TODO May want to add a case should RHS and LHS both be variables
# assert (rhs.metadata is None)
comparison = rhs.dataframe.iloc[0]
if (isinstance(comparison, str)):
categories = lhs.metadata['categories'] # OrderedDict
# Get raw Pandas Series indices for desired data
ids = [i for i,x in enumerate(lhs.dataframe) if categories[x] < categories[comparison]]
# Get Pandas Series set indices for desired data
p_ids = [lhs.dataframe.index.values[i] for i in ids]
# Create new Pandas Series with only the desired data, using set indices
dataframe = pd.Series(lhs.dataframe, p_ids)
dataframe.index.name = dataset.pid_col_name
elif (np.issubdtype(comparison, np.integer)):
categories = lhs.metadata['categories'] # OrderedDict
# Get raw Pandas Series indices for desired data
ids = [i for i,x in enumerate(lhs.dataframe) if categories[x] < comparison]
# Get Pandas Series set indices for desired data
p_ids = [lhs.dataframe.index.values[i] for i in ids]
# Create new Pandas Series with only the desired data, using set indices
dataframe = pd.Series(lhs.dataframe, p_ids)
dataframe.index.name = dataset.pid_col_name
else:
raise ValueError(f"Cannot compare ORDINAL variables to {type(rhs.dataframe.iloc[0])}")
elif (lhs.metadata['dtype'] is DataType.INTERVAL or lhs.metadata['dtype'] is DataType.RATIO):
comparison = rhs.dataframe.iloc[0]
# Get raw Pandas Series indices for desired data
ids = [i for i,x in enumerate(lhs.dataframe) if x < comparison]
# Get Pandas Series set indices for desired data
p_ids = [lhs.dataframe.index.values[i] for i in ids]
# Create new Pandas Series with only the desired data, using set indices
dataframe = pd.Series(lhs.dataframe, p_ids)
dataframe.index.name = dataset.pid_col_name
else:
raise Exception(f"Invalid Less Than Operation:{lhs} < {rhs}")
if (isinstance(expr.rhs, Literal)):
metadata['query'] = " < \'\'" # override lhs metadata for query
elif (isinstance(expr.rhs, Variable)):
metadata['query'] = f" < {rhs.metadata['var_name']}"
else:
raise ValueError(f"Not implemented for {rhs}")
return VarData(metadata)
elif isinstance(expr, LessThanEqual):
lhs = evaluate(dataset, expr.lhs)
rhs = evaluate(dataset, expr.rhs)
assert isinstance(lhs, VarData)
assert isinstance(rhs, VarData)
dataframe = None
metadata = rhs.metadata
if (not lhs.metadata):
raise ValueError('Malformed Relation. Filter on Variables must have variable as rhs')
elif (lhs.metadata['dtype'] is DataType.NOMINAL):
raise ValueError('Cannot compare nominal values with Less Than')
elif (lhs.metadata['dtype'] is DataType.ORDINAL):
# TODO May want to add a case should RHS and LHS both be variables
# assert (rhs.metadata is None)
comparison = rhs.dataframe.iloc[0]
if (isinstance(comparison, str)):
categories = lhs.metadata['categories'] # OrderedDict
# Get raw Pandas Series indices for desired data
ids = [i for i,x in enumerate(lhs.dataframe) if categories[x] <= categories[comparison]]
# Get Pandas Series set indices for desired data
p_ids = [lhs.dataframe.index.values[i] for i in ids]
# Create new Pandas Series with only the desired data, using set indices
dataframe = pd.Series(lhs.dataframe, p_ids)
dataframe.index.name = dataset.pid_col_name
elif (np.issubdtype(comparison, np.integer)):
categories = lhs.metadata['categories'] # OrderedDict
# Get raw Pandas Series indices for desired data
ids = [i for i,x in enumerate(lhs.dataframe) if categories[x] <= comparison]
# Get Pandas Series set indices for desired data
p_ids = [lhs.dataframe.index.values[i] for i in ids]
# Create new Pandas Series with only the desired data, using set indices
dataframe = pd.Series(lhs.dataframe, p_ids)
dataframe.index.name = dataset.pid_col_name
else:
raise ValueError(f"Cannot compare ORDINAL variables to {type(rhs.dataframe.iloc[0])}")
elif (lhs.metadata['dtype'] is DataType.INTERVAL or lhs.metadata['dtype'] is DataType.RATIO):
comparison = rhs.dataframe.iloc[0]
# Get raw Pandas Series indices for desired data
ids = [i for i,x in enumerate(lhs.dataframe) if x <= comparison]
# Get Pandas Series set indices for desired data
p_ids = [lhs.dataframe.index.values[i] for i in ids]
# Create new Pandas Series with only the desired data, using set indices
dataframe = pd.Series(lhs.dataframe, p_ids)
dataframe.index.name = dataset.pid_col_name
else:
raise Exception(f"Invalid Less Than Equal Operation:{lhs} <= {rhs}")
if (isinstance(expr.rhs, Literal)):
metadata['query'] = " <= \'\'" # override lhs metadata for query
elif (isinstance(expr.rhs, Variable)):
metadata['query'] = f" <= {rhs.metadata['var_name']}"
else:
raise ValueError(f"Not implemented for {rhs}")
return VarData(metadata)
elif isinstance(expr, GreaterThan):
lhs = evaluate(dataset, expr.lhs)
rhs = evaluate(dataset, expr.rhs)
assert isinstance(lhs, VarData)
assert isinstance(rhs, VarData)
dataframe = None
metadata = rhs.metadata
if (not lhs.metadata):
raise ValueError('Malformed Relation. Filter on Variables must have variable as rhs')
elif (lhs.metadata['dtype'] is DataType.NOMINAL):
raise ValueError('Cannot compare nominal values with Greater Than')
elif (lhs.metadata['dtype'] is DataType.ORDINAL):
# TODO May want to add a case should RHS and LHS both be variables
# assert (rhs.metadata is None)
comparison = rhs.dataframe.iloc[0]
if (isinstance(comparison, str)):
categories = lhs.metadata['categories'] # OrderedDict
# Get raw Pandas Series indices for desired data
ids = [i for i,x in enumerate(lhs.dataframe) if categories[x] > categories[comparison]]
# Get Pandas Series set indices for desired data
p_ids = [lhs.dataframe.index.values[i] for i in ids]
# Create new Pandas Series with only the desired data, using set indices
dataframe = pd.Series(lhs.dataframe, p_ids)
dataframe.index.name = dataset.pid_col_name
elif (np.issubdtype(comparison, np.integer)):
categories = lhs.metadata['categories'] # OrderedDict
# Get raw Pandas Series indices for desired data
ids = [i for i,x in enumerate(lhs.dataframe) if categories[x] > comparison]
# Get Pandas Series set indices for desired data
p_ids = [lhs.dataframe.index.values[i] for i in ids]
# Create new Pandas Series with only the desired data, using set indices
dataframe = pd.Series(lhs.dataframe, p_ids)
dataframe.index.name = dataset.pid_col_name
else:
raise ValueError(f"Cannot compare ORDINAL variables to {type(rhs.dataframe.iloc[0])}")
elif (lhs.metadata['dtype'] is DataType.INTERVAL or lhs.metadata['dtype'] is DataType.RATIO):
comparison = rhs.dataframe.iloc[0]
# Get raw Pandas Series indices for desired data
ids = [i for i,x in enumerate(lhs.dataframe) if x > comparison]
# Get Pandas Series set indices for desired data
p_ids = [lhs.dataframe.index.values[i] for i in ids]
# Create new Pandas Series with only the desired data, using set indices
dataframe = pd.Series(lhs.dataframe, p_ids)
dataframe.index.name = dataset.pid_col_name
else:
raise Exception(f"Invalid Greater Than Operation:{lhs} > {rhs}")
if (isinstance(expr.rhs, Literal)):
metadata['query'] = " > \'\'" # override lhs metadata for query
elif (isinstance(expr.rhs, Variable)):
metadata['query'] = f" > {rhs.metadata['var_name']}"
else:
raise ValueError(f"Not implemented for {rhs}")
return VarData(metadata)
elif isinstance(expr, GreaterThanEqual):
lhs = evaluate(dataset, expr.lhs)
rhs = evaluate(dataset, expr.rhs)
assert isinstance(lhs, VarData)
assert isinstance(rhs, VarData)
dataframe = None
metadata = rhs.metadata
if (not lhs.metadata):
raise ValueError('Malformed Relation. Filter on Variables must have variable as rhs')
elif (lhs.metadata['dtype'] is DataType.NOMINAL):
raise ValueError('Cannot compare nominal values with Greater Than Equal')
elif (lhs.metadata['dtype'] is DataType.ORDINAL):
# TODO May want to add a case should RHS and LHS both be variables
# assert (rhs.metadata is None)
comparison = rhs.dataframe.iloc[0]
if (isinstance(comparison, str)):
categories = lhs.metadata['categories'] # OrderedDict
# Get raw Pandas Series indices for desired data
ids = [i for i,x in enumerate(lhs.dataframe) if categories[x] >= categories[comparison]]
# Get Pandas Series set indices for desired data
p_ids = [lhs.dataframe.index.values[i] for i in ids]
# Create new Pandas Series with only the desired data, using set indices
dataframe = pd.Series(lhs.dataframe, p_ids)
dataframe.index.name = dataset.pid_col_name
elif (np.issubdtype(comparison, np.integer)):
categories = lhs.metadata['categories'] # OrderedDict
# Get raw Pandas Series indices for desired data
ids = [i for i,x in enumerate(lhs.dataframe) if categories[x] >= comparison]
# Get Pandas Series set indices for desired data
p_ids = [lhs.dataframe.index.values[i] for i in ids]
# Create new Pandas Series with only the desired data, using set indices
dataframe = pd.Series(lhs.dataframe, p_ids)
dataframe.index.name = dataset.pid_col_name
else:
raise ValueError(f"Cannot compare ORDINAL variables to {type(rhs.dataframe.iloc[0])}")
elif (lhs.metadata['dtype'] is DataType.INTERVAL or lhs.metadata['dtype'] is DataType.RATIO):
comparison = rhs.dataframe.iloc[0]
# Get raw Pandas Series indices for desired data
ids = [i for i,x in enumerate(lhs.dataframe) if x >= comparison]
# Get Pandas Series set indices for desired data
p_ids = [lhs.dataframe.index.values[i] for i in ids]
# Create new Pandas Series with only the desired data, using set indices
dataframe = pd.Series(lhs.dataframe, p_ids)
dataframe.index.name = dataset.pid_col_name
else:
raise Exception(f"Invalid Greater Than Equal Operation:{lhs} >= {rhs}")
if (isinstance(expr.rhs, Literal)):
metadata['query'] = " >= \'\'" # override lhs metadata for query
elif (isinstance(expr.rhs, Variable)):
metadata['query'] = f" >= {rhs.metadata['var_name']}"
else:
raise ValueError(f"Not implemented for {rhs}")
return VarData(metadata)
elif isinstance(expr, Relate):
vars = []
for v in expr.vars:
eval_v = evaluate(dataset, v, design)
if not eval_v:
raise ValueError("The variables you are referencing are not defined as variables in your list of variables.")
assert isinstance(eval_v, VarData)
vars.append(eval_v)
# What kind of study are we analyzing?
study_type = determine_study_type(vars, design)
# Assign roles to variables we are analyzing
vars = assign_roles(vars, study_type, design)
combined_data = None
# Do we have a Bivariate analysis?
if len(vars) == 2:
combined_data = BivariateData(vars, study_type, alpha=float(assumptions['alpha']))
else: # Do we have a Multivariate analysis?
combined_data = MultivariateData(vars, study_type, alpha=float(assumptions['alpha']))
# Add paired property
add_paired_property(dataset, combined_data, study_type, design) # check sample sizes are identical
# Infer stats tests (mingled with)
tests = synthesize_tests(dataset, assumptions, combined_data)
""""
# verify_properties(properties_and_tests)
# get_tests
# execute_tests
# interpret_tests_results
# print(tests)
for test in tests:
print("\nValid test: %s" % test.name)
print("Properties:")
properties = test.properties()
for prop in properties:
property_identifier = ""
if prop.scope == "test":
property_identifier = test.name + ": " + prop.name
else:
for var_indices in test.properties_for_vars[prop]:
for var_index in var_indices:
property_identifier += f"variable {test.test_vars[var_index].name} "
property_identifier += ": %s" % prop.name
print(property_identifier)
"""
# Execute and store results from each valid test
results = {}
if len(tests) == 0:
tests.append('bootstrap') # Default to bootstrap
for test in tests:
test_result = execute_test(dataset, design, expr.predictions, combined_data, test)
results[test] = test_result
res_data = ResultData(results, combined_data)
follow_up = []
# There are multiple hypotheses to follow-up and correct for
if expr.predictions and len(expr.predictions) > 1:
for pred in expr.predictions:
# create follow-up expr Node (to evaluate recursively)
pred_res = evaluate(dataset, pred, assumptions, design)
follow_up.append(pred_res) # add follow-up result to follow_up
res_data.add_follow_up(follow_up) # add follow-up results to the res_data object
"""
# TODO: use a handle here to more generally/modularly support corrections, need a more generic data structure for this!
if expr.predictions:
preds = expr.predictions
# There are multiple comparisons
# if len(preds > 1):
# FOR DEBUGGING:
if len(preds) >= 1:
correct_multiple_comparison(res_data, len(preds))
"""
# import pdb; pdb.set_trace()
return res_data
elif isinstance(expr, PositiveRelationship):
# get variables
vars = [expr.lhs.var, expr.rhs.var]
# create a Relate object
pos_relate_expr = Relate(vars)
return evaluate(dataset, pos_relate_expr, assumptions, design)
# elif isinstance(expr, Relationship):
# import pdb; pdb.set_trace()
# elif isinstance(expr, Mean):
# var = evaluate(dataset, expr.var)
# assert isinstance(var, VarData)
# # bs.bootstrap(var.dataframe, stat_func=
# # bs_stats.mean)
# raise Exception('Not implemented Mean') | 46.407658 | 127 | 0.604805 |
25d7abd8960710bafe0fec865f438692f960a936 | 3,256 | py | Python | tests/integration/test_disco_dynamodb.py | amplifylitco/asiaq | a1a292f6e9cbf32a30242405e4947b17910e5369 | [
"BSD-2-Clause"
] | 27 | 2016-03-08T16:50:22.000Z | 2018-11-26T06:33:25.000Z | tests/integration/test_disco_dynamodb.py | amplifylitco/asiaq | a1a292f6e9cbf32a30242405e4947b17910e5369 | [
"BSD-2-Clause"
] | 202 | 2016-03-08T17:13:08.000Z | 2019-02-01T00:49:06.000Z | tests/integration/test_disco_dynamodb.py | amplify-education/asiaq | fb6004bc4da0acef40e7bc18b148db4f72fa2f32 | [
"BSD-2-Clause"
] | 2 | 2016-03-17T18:52:37.000Z | 2016-10-06T20:36:37.000Z | """
Integration tests for disco_dynamodb.py
"""
import json
from random import randint
from tests.helpers.integration_helpers import IntegrationTest
MOCK_TABLE_NAME = "IntegrationTest"
CREATE_CMD = "disco_dynamodb.py create --env {0} --config test/test_dynamodb_create.json --wait"
DELETE_CMD = "disco_dynamodb.py delete --table {0} --env {1} --wait"
LIST_CMD = "disco_dynamodb.py list"
class TestDiscoDynamoDB(IntegrationTest):
""" Test bin/disco_dynamodb.py """
def setUp(self):
"""
Generate random environment name for integration test env
"""
self.env_name = "env_{0}".format(randint(10000, 99999))
def test_create_and_delete_table(self):
""" Ensures we can create and delete DynamoDB table properly """
table_list_output = u"{0:<20} {1}".format(MOCK_TABLE_NAME, self.env_name)
try:
create_output = self.run_cmd(CREATE_CMD.format(self.env_name).split())
table = json.loads(self._remove_lines_from_logs(create_output))
self.assertEqual(table["TableStatus"], "ACTIVE")
self.assertEqual(table["TableName"], "{0}_{1}".format(MOCK_TABLE_NAME, self.env_name))
self.assertEqual(table["ProvisionedThroughput"]["WriteCapacityUnits"], 10)
self.assertEqual(table["ProvisionedThroughput"]["ReadCapacityUnits"], 10)
for key in table["KeySchema"]:
if key["KeyType"] == "HASH":
self.assertEqual(key["AttributeName"], "_id")
else:
self.assertEqual(key["AttributeName"], "mock_range_key")
self.assertEqual(table["GlobalSecondaryIndexes"][0]["IndexName"], "mock_index")
self.assertEqual(table["GlobalSecondaryIndexes"][0]["KeySchema"][0]["AttributeName"],
"mock_index_attr")
self.assertEqual(table["GlobalSecondaryIndexes"][0]["Projection"]["ProjectionType"], "ALL")
# Also assert that table can be found when running the list command
list_output = self.run_cmd(LIST_CMD.format(self.env_name).split())
lines = list_output.split('\n')
self.assertIn(table_list_output, lines)
finally:
delete_output = self.run_cmd(DELETE_CMD.format(MOCK_TABLE_NAME, self.env_name).split())
delete_output = json.loads(self._remove_lines_from_logs(delete_output))
self.assertEqual(delete_output["TableName"], "{0}_{1}".format(MOCK_TABLE_NAME, self.env_name))
self.assertEqual(delete_output["TableStatus"], "DELETED")
list_output = self.run_cmd(LIST_CMD.format(self.env_name).split())
lines = list_output.split('\n')
self.assertNotIn(table_list_output, lines)
def _remove_lines_from_logs(self, input_string):
lines = []
for line in input_string.split("\n"):
words = line.split()
try:
# If it quacks like a logging line...
if words[3] in ["WARNING", "WARN", "INFO", "DEBUG", "CRITICAL", "NOTSET"]:
continue
except IndexError:
pass
lines.append(line)
output_string = "\n".join(lines)
return output_string
| 39.707317 | 106 | 0.630835 |
0123e48a516345d0a8f6c14c8ca2fbc406e47dab | 1,326 | py | Python | src/appfl/algorithm/server_fed_avgmom.py | APPFL/APPFL | 96a1da6d7aeb64a8a78bfcc7dc60fa37273dfdb5 | [
"MIT"
] | 9 | 2022-01-23T03:12:01.000Z | 2022-03-28T14:03:44.000Z | src/appfl/algorithm/server_fed_avgmom.py | APPFL/APPFL | 96a1da6d7aeb64a8a78bfcc7dc60fa37273dfdb5 | [
"MIT"
] | 40 | 2021-12-18T05:46:30.000Z | 2022-03-31T21:40:01.000Z | src/appfl/algorithm/server_fed_avgmom.py | APPFL/APPFL | 96a1da6d7aeb64a8a78bfcc7dc60fa37273dfdb5 | [
"MIT"
] | null | null | null | from .server_federated import FedServer
class ServerFedAvgMomentum(FedServer):
def compute_step(self):
super(ServerFedAvgMomentum, self).compute_pseudo_gradient()
super(ServerFedAvgMomentum, self).update_m_vector()
for name, _ in self.model.named_parameters():
self.step[name] = -self.m_vector[name]
def logging_summary(self, cfg, logger):
super(FedServer, self).log_summary(cfg, logger)
logger.info("client_learning_rate = %s " % (cfg.fed.args.optim_args.lr))
logger.info(
"server_momentum_param_1 = %s " % (cfg.fed.args.server_momentum_param_1)
)
if cfg.summary_file != "":
with open(cfg.summary_file, "a") as f:
f.write(
cfg.logginginfo.DataSet_name
+ " FedAvgM ClientLR "
+ str(cfg.fed.args.optim_args.lr)
+ " MParam1 "
+ str(cfg.fed.args.server_momentum_param_1)
+ " TestAccuracy "
+ str(cfg.logginginfo.accuracy)
+ " BestAccuracy "
+ str(cfg.logginginfo.BestAccuracy)
+ " Time "
+ str(round(cfg.logginginfo.Elapsed_time, 2))
+ "\n"
)
| 36.833333 | 84 | 0.537707 |
99b92116db55b6ba1af753ae8228be829129c2c1 | 1,757 | py | Python | devops-console/apps/requirements/models.py | lilinghell/devops | 1b2890d3f2d9f6e15e5b32d0910bc4768f065adc | [
"Apache-2.0"
] | 4 | 2019-12-06T06:19:33.000Z | 2021-12-23T13:05:06.000Z | devops-console/apps/requirements/models.py | lilinghell/devops | 1b2890d3f2d9f6e15e5b32d0910bc4768f065adc | [
"Apache-2.0"
] | 8 | 2020-03-15T03:40:38.000Z | 2022-03-12T00:50:27.000Z | devops-console/apps/requirements/models.py | lilinghell/devops | 1b2890d3f2d9f6e15e5b32d0910bc4768f065adc | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.utils.translation import ugettext_lazy as _
from common.mixin import BaseModelMixin
from teams.models import Team
from users.models import User
class Requirement(BaseModelMixin):
"""
业务需求
"""
STATUS_INIT = "0"
STATUS_DOING = "1"
STATUS_FIXED = "2"
STATUS_TEST = "3"
STATUS_DONE = "4"
STATUS_CLOSED = "5"
STATUS_CHOICES = (
(STATUS_INIT, '0'), (STATUS_DOING, '1'), (STATUS_FIXED, '2'), (STATUS_TEST, '3'), (STATUS_DONE, '4'),
(STATUS_CLOSED, '5'))
title = models.CharField(max_length=128)
description = models.TextField(null=True)
start_date = models.DateTimeField()
end_date = models.DateTimeField()
important = models.IntegerField(verbose_name=_("重要性"))
priority = models.IntegerField(verbose_name=_("优先级"))
status = models.CharField(choices=STATUS_CHOICES, max_length=2)
link_id = models.CharField(max_length=128, verbose_name=_("关联redmine"))
link_url = models.CharField(max_length=128, verbose_name=_("关联redmine url"))
owner = models.ForeignKey(User, related_name="own_reqs", on_delete=models.SET_NULL, null=True,
verbose_name=_("责任人"))
assignee_teams = models.ManyToManyField(Team, related_name="assignee_reqs", verbose_name=_("指派小组"))
parent = models.ForeignKey("requirements.Requirement", related_name="child_req", null=True,
on_delete=models.SET_NULL, verbose_name=_("上级需求"))
relate = models.ForeignKey("requirements.Requirement", related_name="relate_req", null=True,
on_delete=models.SET_NULL, verbose_name=_("关联需求"))
class Meta:
verbose_name = _("业务需求表")
db_table = "requirements"
| 40.860465 | 109 | 0.674445 |
1a47accc9daf71210b4a8e926cfe0352242e631a | 1,186 | py | Python | pystein/tests/test_geodesic.py | JWKennington/pynstein | 5333a4f90991a0e69be13c68fbe67f172aeb9d85 | [
"MIT"
] | null | null | null | pystein/tests/test_geodesic.py | JWKennington/pynstein | 5333a4f90991a0e69be13c68fbe67f172aeb9d85 | [
"MIT"
] | 3 | 2020-10-26T22:36:00.000Z | 2020-12-04T03:55:11.000Z | pystein/tests/test_geodesic.py | JWKennington/pystein | 5333a4f90991a0e69be13c68fbe67f172aeb9d85 | [
"MIT"
] | null | null | null | import numpy
import sympy
from sympy.diffgeom import Manifold, Patch
from pystein import geodesic, metric, coords
from pystein.utilities import tensor_pow as tpow
class TestGeodesic:
def test_numerical(self):
M = Manifold('M', dim=2)
P = Patch('origin', M)
rho, phi, a = sympy.symbols('rho phi a', nonnegative=True)
cs = coords.CoordSystem('schw', P, [rho, phi])
drho, dphi = cs.base_oneforms()
ds2 = a ** 2 * ((1 / (1 - rho ** 2)) * tpow(drho, 2) + rho ** 2 * tpow(dphi, 2))
g = metric.Metric(twoform=ds2)
init = (0.01, 0.01, 0.000001, 0.1)
ts = numpy.arange(0, 1000, 0.1)
df = geodesic.numerical_geodesic(g, init, ts)
print('yay')
def test_parallel(self):
M = Manifold('M', dim=2)
P = Patch('origin', M)
theta, phi, a = sympy.symbols('theta phi a', nonnegative=True)
cs = coords.CoordSystem('spherical', P, [theta, phi])
dtheta, dphi = cs.base_oneforms()
ds2 = a ** 2 * (tpow(dtheta, 2) + sympy.sin(theta) ** 2 * tpow(dphi, 2))
g2 = metric.Metric(twoform=ds2)
param = sympy.symbols('lambda')
curve = [
2 * sympy.pi * param,
sympy.pi / 4,
]
lhs_0 = geodesic.parallel_transport_equation(0, curve, param, g2)
print(lhs_0)
| 26.355556 | 82 | 0.643339 |
2b689ccfa7380be75641e1f8d82e88e6a5914155 | 46,722 | py | Python | otp/avatar/LocalAvatar.py | Max-Rodriguez/toontown-online | cdb5d11fc9a7bae6171d4f3f52b5550fac7bc106 | [
"BSD-3-Clause"
] | 1 | 2022-03-25T18:20:49.000Z | 2022-03-25T18:20:49.000Z | otp/avatar/LocalAvatar.py | Max-Rodriguez/toontown-online | cdb5d11fc9a7bae6171d4f3f52b5550fac7bc106 | [
"BSD-3-Clause"
] | null | null | null | otp/avatar/LocalAvatar.py | Max-Rodriguez/toontown-online | cdb5d11fc9a7bae6171d4f3f52b5550fac7bc106 | [
"BSD-3-Clause"
] | null | null | null | from panda3d.core import *
from panda3d.otp import Nametag, WhisperPopup
from direct.gui.DirectGui import *
from direct.showbase.PythonUtil import *
from direct.interval.IntervalGlobal import *
from direct.showbase.InputStateGlobal import inputState
from direct.controls import ControlManager
from . import DistributedAvatar
from direct.task import Task
from otp.otpbase import OTPGlobals
import math
import random
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedSmoothNode
from otp.otpbase import OTPLocalizer
from direct.controls.GhostWalker import GhostWalker
from direct.controls.GravityWalker import GravityWalker
from direct.controls.ObserverWalker import ObserverWalker
from direct.controls.SwimWalker import SwimWalker
from direct.controls.TwoDWalker import TwoDWalker
class LocalAvatar(DistributedAvatar.DistributedAvatar, DistributedSmoothNode.DistributedSmoothNode):
notify = DirectNotifyGlobal.directNotify.newCategory('LocalAvatar')
wantDevCameraPositions = ConfigVariableBool('want-dev-camera-positions', 0).value
wantMouse = ConfigVariableBool('want-mouse', 0).value
sleepTimeout = ConfigVariableInt('sleep-timeout', 120).value
swimTimeout = ConfigVariableInt('afk-timeout', 600).value
__enableMarkerPlacement = ConfigVariableBool('place-markers', 0).value
acceptingNewFriends = ConfigVariableBool('accepting-new-friends', 1).value
acceptingNonFriendWhispers = ConfigVariableBool('accepting-non-friend-whispers', 0).value
def __init__(self, cr, chatMgr, talkAssistant = None, passMessagesThrough = False):
try:
self.LocalAvatar_initialized
return
except:
pass
self.LocalAvatar_initialized = 1
DistributedAvatar.DistributedAvatar.__init__(self, cr)
DistributedSmoothNode.DistributedSmoothNode.__init__(self, cr)
self.cTrav = CollisionTraverser('base.cTrav')
base.pushCTrav(self.cTrav)
self.cTrav.setRespectPrevTransform(1)
self.avatarControlsEnabled = 0
self.controlManager = ControlManager.ControlManager(True, passMessagesThrough)
self.initializeCollisions()
self.initializeSmartCamera()
self.cameraPositions = []
self.animMultiplier = 1.0
self.runTimeout = 2.5
self.customMessages = []
self.chatMgr = chatMgr
base.talkAssistant = talkAssistant
self.commonChatFlags = 0
self.garbleChat = 1
self.teleportAllowed = 1
self.lockedDown = 0
self.isPageUp = 0
self.isPageDown = 0
self.soundRun = None
self.soundWalk = None
self.sleepFlag = 0
self.isDisguised = 0
self.movingFlag = 0
self.swimmingFlag = 0
self.lastNeedH = None
self.accept('friendOnline', self.__friendOnline)
self.accept('friendOffline', self.__friendOffline)
self.accept('clickedWhisper', self.clickedWhisper)
self.accept('playerOnline', self.__playerOnline)
self.accept('playerOffline', self.__playerOffline)
self.sleepCallback = None
self.accept('wakeup', self.wakeUp)
self.jumpLandAnimFixTask = None
self.fov = OTPGlobals.DefaultCameraFov
self.accept('avatarMoving', self.clearPageUpDown)
self.nametag2dNormalContents = Nametag.CSpeech
self.showNametag2d()
self.setPickable(0)
self.posCameraSeq = None
return
def useSwimControls(self):
self.controlManager.use('swim', self)
def useGhostControls(self):
self.controlManager.use('ghost', self)
def useWalkControls(self):
self.controlManager.use('walk', self)
def useTwoDControls(self):
self.controlManager.use('twoD', self)
def isLockedDown(self):
return self.lockedDown
def lock(self):
if self.lockedDown == 1:
self.notify.debug('lock() - already locked!')
self.lockedDown = 1
def unlock(self):
if self.lockedDown == 0:
self.notify.debug('unlock() - already unlocked!')
self.lockedDown = 0
def isInWater(self):
return self.getZ(render) <= 0.0
def isTeleportAllowed(self):
return self.teleportAllowed and not self.isDisguised
def setTeleportAllowed(self, flag):
self.teleportAllowed = flag
self.refreshOnscreenButtons()
def sendFriendsListEvent(self):
self.wakeUp()
messenger.send('openFriendsList')
def delete(self):
try:
self.LocalAvatar_deleted
return
except:
self.LocalAvatar_deleted = 1
self.ignoreAll()
self.stopJumpLandTask()
taskMgr.remove('shadowReach')
base.popCTrav()
if self.posCameraSeq:
self.posCameraSeq.finish()
self.posCameraSeq = None
self.disableAvatarControls()
self.stopTrackAnimToSpeed()
self.stopUpdateSmartCamera()
self.shutdownSmartCamera()
self.deleteCollisions()
self.controlManager.delete()
self.physControls = None
del self.controlManager
self.positionExaminer.delete()
del self.positionExaminer
taskMgr.remove(self.uniqueName('walkReturnTask'))
self.chatMgr.delete()
del self.chatMgr
del self.soundRun
del self.soundWalk
if hasattr(self, 'soundWhisper'):
del self.soundWhisper
DistributedAvatar.DistributedAvatar.delete(self)
return
def shadowReach(self, state):
if base.localAvatar.shadowPlacer:
base.localAvatar.shadowPlacer.lifter.setReach(base.localAvatar.getAirborneHeight() + 4.0)
return Task.cont
def wantLegacyLifter(self):
return False
def setupControls(self, avatarRadius = 1.4, floorOffset = OTPGlobals.FloorOffset, reach = 4.0, wallBitmask = OTPGlobals.WallBitmask, floorBitmask = OTPGlobals.FloorBitmask, ghostBitmask = OTPGlobals.GhostBitmask):
walkControls = GravityWalker(legacyLifter=self.wantLegacyLifter())
walkControls.setWallBitMask(wallBitmask)
walkControls.setFloorBitMask(floorBitmask)
walkControls.initializeCollisions(self.cTrav, self, avatarRadius, floorOffset, reach)
walkControls.setAirborneHeightFunc(self.getAirborneHeight)
self.controlManager.add(walkControls, 'walk')
self.physControls = walkControls
twoDControls = TwoDWalker()
twoDControls.setWallBitMask(wallBitmask)
twoDControls.setFloorBitMask(floorBitmask)
twoDControls.initializeCollisions(self.cTrav, self, avatarRadius, floorOffset, reach)
twoDControls.setAirborneHeightFunc(self.getAirborneHeight)
self.controlManager.add(twoDControls, 'twoD')
swimControls = SwimWalker()
swimControls.setWallBitMask(wallBitmask)
swimControls.setFloorBitMask(floorBitmask)
swimControls.initializeCollisions(self.cTrav, self, avatarRadius, floorOffset, reach)
swimControls.setAirborneHeightFunc(self.getAirborneHeight)
self.controlManager.add(swimControls, 'swim')
ghostControls = GhostWalker()
ghostControls.setWallBitMask(ghostBitmask)
ghostControls.setFloorBitMask(floorBitmask)
ghostControls.initializeCollisions(self.cTrav, self, avatarRadius, floorOffset, reach)
ghostControls.setAirborneHeightFunc(self.getAirborneHeight)
self.controlManager.add(ghostControls, 'ghost')
observerControls = ObserverWalker()
observerControls.setWallBitMask(ghostBitmask)
observerControls.setFloorBitMask(floorBitmask)
observerControls.initializeCollisions(self.cTrav, self, avatarRadius, floorOffset, reach)
observerControls.setAirborneHeightFunc(self.getAirborneHeight)
self.controlManager.add(observerControls, 'observer')
self.controlManager.use('walk', self)
self.controlManager.disable()
def initializeCollisions(self):
self.setupControls()
def deleteCollisions(self):
self.controlManager.deleteCollisions()
self.ignore('entero157')
del self.cTrav
def initializeSmartCameraCollisions(self):
self.ccTrav = CollisionTraverser('LocalAvatar.ccTrav')
self.ccLine = CollisionSegment(0.0, 0.0, 0.0, 1.0, 0.0, 0.0)
self.ccLineNode = CollisionNode('ccLineNode')
self.ccLineNode.addSolid(self.ccLine)
self.ccLineNodePath = self.attachNewNode(self.ccLineNode)
self.ccLineBitMask = OTPGlobals.CameraBitmask
self.ccLineNode.setFromCollideMask(self.ccLineBitMask)
self.ccLineNode.setIntoCollideMask(BitMask32.allOff())
self.camCollisionQueue = CollisionHandlerQueue()
self.ccTrav.addCollider(self.ccLineNodePath, self.camCollisionQueue)
self.ccSphere = CollisionSphere(0, 0, 0, 1)
self.ccSphereNode = CollisionNode('ccSphereNode')
self.ccSphereNode.addSolid(self.ccSphere)
self.ccSphereNodePath = base.camera.attachNewNode(self.ccSphereNode)
self.ccSphereNode.setFromCollideMask(OTPGlobals.CameraBitmask)
self.ccSphereNode.setIntoCollideMask(BitMask32.allOff())
self.camPusher = CollisionHandlerPusher()
self.camPusher.addCollider(self.ccSphereNodePath, base.camera)
self.camPusher.setCenter(self)
self.ccPusherTrav = CollisionTraverser('LocalAvatar.ccPusherTrav')
self.ccSphere2 = self.ccSphere
self.ccSphereNode2 = CollisionNode('ccSphereNode2')
self.ccSphereNode2.addSolid(self.ccSphere2)
self.ccSphereNodePath2 = base.camera.attachNewNode(self.ccSphereNode2)
self.ccSphereNode2.setFromCollideMask(OTPGlobals.CameraBitmask)
self.ccSphereNode2.setIntoCollideMask(BitMask32.allOff())
self.camPusher2 = CollisionHandlerPusher()
self.ccPusherTrav.addCollider(self.ccSphereNodePath2, self.camPusher2)
self.camPusher2.addCollider(self.ccSphereNodePath2, base.camera)
self.camPusher2.setCenter(self)
self.camFloorRayNode = self.attachNewNode('camFloorRayNode')
self.ccRay = CollisionRay(0.0, 0.0, 0.0, 0.0, 0.0, -1.0)
self.ccRayNode = CollisionNode('ccRayNode')
self.ccRayNode.addSolid(self.ccRay)
self.ccRayNodePath = self.camFloorRayNode.attachNewNode(self.ccRayNode)
self.ccRayBitMask = OTPGlobals.FloorBitmask
self.ccRayNode.setFromCollideMask(self.ccRayBitMask)
self.ccRayNode.setIntoCollideMask(BitMask32.allOff())
self.ccTravFloor = CollisionTraverser('LocalAvatar.ccTravFloor')
self.camFloorCollisionQueue = CollisionHandlerQueue()
self.ccTravFloor.addCollider(self.ccRayNodePath, self.camFloorCollisionQueue)
self.ccTravOnFloor = CollisionTraverser('LocalAvatar.ccTravOnFloor')
self.ccRay2 = CollisionRay(0.0, 0.0, 0.0, 0.0, 0.0, -1.0)
self.ccRay2Node = CollisionNode('ccRay2Node')
self.ccRay2Node.addSolid(self.ccRay2)
self.ccRay2NodePath = self.camFloorRayNode.attachNewNode(self.ccRay2Node)
self.ccRay2BitMask = OTPGlobals.FloorBitmask
self.ccRay2Node.setFromCollideMask(self.ccRay2BitMask)
self.ccRay2Node.setIntoCollideMask(BitMask32.allOff())
self.ccRay2MoveNodePath = hidden.attachNewNode('ccRay2MoveNode')
self.camFloorCollisionBroadcaster = CollisionHandlerFloor()
self.camFloorCollisionBroadcaster.setInPattern('on-floor')
self.camFloorCollisionBroadcaster.setOutPattern('off-floor')
self.camFloorCollisionBroadcaster.addCollider(self.ccRay2NodePath, self.ccRay2MoveNodePath)
def deleteSmartCameraCollisions(self):
del self.ccTrav
del self.ccLine
del self.ccLineNode
self.ccLineNodePath.removeNode()
del self.ccLineNodePath
del self.camCollisionQueue
del self.ccRay
del self.ccRayNode
self.ccRayNodePath.removeNode()
del self.ccRayNodePath
del self.ccRay2
del self.ccRay2Node
self.ccRay2NodePath.removeNode()
del self.ccRay2NodePath
self.ccRay2MoveNodePath.removeNode()
del self.ccRay2MoveNodePath
del self.ccTravOnFloor
del self.ccTravFloor
del self.camFloorCollisionQueue
del self.camFloorCollisionBroadcaster
del self.ccSphere
del self.ccSphereNode
self.ccSphereNodePath.removeNode()
del self.ccSphereNodePath
del self.camPusher
del self.ccPusherTrav
del self.ccSphere2
del self.ccSphereNode2
self.ccSphereNodePath2.removeNode()
del self.ccSphereNodePath2
del self.camPusher2
def collisionsOff(self):
self.controlManager.collisionsOff()
def collisionsOn(self):
self.controlManager.collisionsOn()
def recalcCameraSphere(self):
nearPlaneDist = base.camLens.getNear()
hFov = base.camLens.getHfov()
vFov = base.camLens.getVfov()
hOff = nearPlaneDist * math.tan(deg2Rad(hFov / 2.0))
vOff = nearPlaneDist * math.tan(deg2Rad(vFov / 2.0))
camPnts = [Point3(hOff, nearPlaneDist, vOff),
Point3(-hOff, nearPlaneDist, vOff),
Point3(hOff, nearPlaneDist, -vOff),
Point3(-hOff, nearPlaneDist, -vOff),
Point3(0.0, 0.0, 0.0)]
avgPnt = Point3(0.0, 0.0, 0.0)
for camPnt in camPnts:
avgPnt = avgPnt + camPnt
avgPnt = avgPnt / len(camPnts)
sphereRadius = 0.0
for camPnt in camPnts:
dist = Vec3(camPnt - avgPnt).length()
if dist > sphereRadius:
sphereRadius = dist
avgPnt = Point3(avgPnt)
self.ccSphereNodePath.setPos(avgPnt)
self.ccSphereNodePath2.setPos(avgPnt)
self.ccSphere.setRadius(sphereRadius)
def putCameraFloorRayOnAvatar(self):
self.camFloorRayNode.setPos(self, 0, 0, 5)
def putCameraFloorRayOnCamera(self):
self.camFloorRayNode.setPos(self.ccSphereNodePath, 0, 0, 0)
def attachCamera(self):
camera.reparentTo(self)
base.enableMouse()
base.setMouseOnNode(self.node())
self.ignoreMouse = not self.wantMouse
self.setWalkSpeedNormal()
def detachCamera(self):
base.disableMouse()
def stopJumpLandTask(self):
if self.jumpLandAnimFixTask:
self.jumpLandAnimFixTask.remove()
self.jumpLandAnimFixTask = None
return
def jumpStart(self):
if not self.sleepFlag and self.hp > 0:
self.b_setAnimState('jumpAirborne', 1.0)
self.stopJumpLandTask()
def returnToWalk(self, task):
if self.sleepFlag:
state = 'Sleep'
elif self.hp > 0:
state = 'Happy'
else:
state = 'Sad'
self.b_setAnimState(state, 1.0)
return Task.done
if 1:
def jumpLandAnimFix(self, jumpTime):
if self.playingAnim != 'run' and self.playingAnim != 'walk':
return taskMgr.doMethodLater(jumpTime, self.returnToWalk, self.uniqueName('walkReturnTask'))
def jumpHardLand(self):
if self.allowHardLand():
self.b_setAnimState('jumpLand', 1.0)
self.stopJumpLandTask()
self.jumpLandAnimFixTask = self.jumpLandAnimFix(1.0)
if self.d_broadcastPosHpr:
self.d_broadcastPosHpr()
def jumpLand(self):
self.jumpLandAnimFixTask = self.jumpLandAnimFix(0.01)
if self.d_broadcastPosHpr:
self.d_broadcastPosHpr()
def setupAnimationEvents(self):
self.accept('jumpStart', self.jumpStart, [])
self.accept('jumpHardLand', self.jumpHardLand, [])
self.accept('jumpLand', self.jumpLand, [])
def ignoreAnimationEvents(self):
self.ignore('jumpStart')
self.ignore('jumpHardLand')
self.ignore('jumpLand')
def allowHardLand(self):
return not self.sleepFlag and self.hp > 0
def enableSmartCameraViews(self):
self.accept('tab', self.nextCameraPos, [1])
self.accept('shift-tab', self.nextCameraPos, [0])
self.accept('page_up', self.pageUp)
self.accept('page_down', self.pageDown)
def disableSmartCameraViews(self):
self.ignore('tab')
self.ignore('shift-tab')
self.ignore('page_up')
self.ignore('page_down')
self.ignore('page_down-up')
def enableAvatarControls(self):
if self.avatarControlsEnabled:
return
self.avatarControlsEnabled = 1
self.setupAnimationEvents()
self.controlManager.enable()
def disableAvatarControls(self):
if not self.avatarControlsEnabled:
return
self.avatarControlsEnabled = 0
self.ignoreAnimationEvents()
self.controlManager.disable()
self.clearPageUpDown()
def setWalkSpeedNormal(self):
self.controlManager.setSpeeds(OTPGlobals.ToonForwardSpeed, OTPGlobals.ToonJumpForce, OTPGlobals.ToonReverseSpeed, OTPGlobals.ToonRotateSpeed)
def setWalkSpeedSlow(self):
self.controlManager.setSpeeds(OTPGlobals.ToonForwardSlowSpeed, OTPGlobals.ToonJumpSlowForce, OTPGlobals.ToonReverseSlowSpeed, OTPGlobals.ToonRotateSlowSpeed)
def pageUp(self):
if not self.avatarControlsEnabled:
return
self.wakeUp()
if not self.isPageUp:
self.isPageDown = 0
self.isPageUp = 1
self.lerpCameraFov(70, 0.6)
self.setCameraPositionByIndex(self.cameraIndex)
else:
self.clearPageUpDown()
def pageDown(self):
if not self.avatarControlsEnabled:
return
self.wakeUp()
if not self.isPageDown:
self.isPageUp = 0
self.isPageDown = 1
self.lerpCameraFov(70, 0.6)
self.setCameraPositionByIndex(self.cameraIndex)
else:
self.clearPageUpDown()
def clearPageUpDown(self):
if self.isPageDown or self.isPageUp:
self.lerpCameraFov(self.fov, 0.6)
self.isPageDown = 0
self.isPageUp = 0
self.setCameraPositionByIndex(self.cameraIndex)
def nextCameraPos(self, forward):
if not self.avatarControlsEnabled:
return
self.wakeUp()
self.__cameraHasBeenMoved = 1
if forward:
self.cameraIndex += 1
if self.cameraIndex > len(self.cameraPositions) - 1:
self.cameraIndex = 0
else:
self.cameraIndex -= 1
if self.cameraIndex < 0:
self.cameraIndex = len(self.cameraPositions) - 1
self.setCameraPositionByIndex(self.cameraIndex)
def initCameraPositions(self):
camHeight = self.getClampedAvatarHeight()
heightScaleFactor = camHeight * 0.3333333333
defLookAt = Point3(0.0, 1.5, camHeight)
scXoffset = 3.0
scPosition = (Point3(scXoffset - 1, -10.0, camHeight + 5.0), Point3(scXoffset, 2.0, camHeight))
self.cameraPositions = [(Point3(0.0, -9.0 * heightScaleFactor, camHeight),
defLookAt,
Point3(0.0, camHeight, camHeight * 4.0),
Point3(0.0, camHeight, camHeight * -1.0),
0),
(Point3(0.0, 0.5, camHeight),
defLookAt,
Point3(0.0, camHeight, camHeight * 1.33),
Point3(0.0, camHeight, camHeight * 0.66),
1),
(Point3(5.7 * heightScaleFactor, 7.65 * heightScaleFactor, camHeight + 2.0),
Point3(0.0, 1.0, camHeight),
Point3(0.0, 1.0, camHeight * 4.0),
Point3(0.0, 1.0, camHeight * -1.0),
0),
(Point3(0.0, -24.0 * heightScaleFactor, camHeight + 4.0),
defLookAt,
Point3(0.0, 1.5, camHeight * 4.0),
Point3(0.0, 1.5, camHeight * -1.0),
0),
(Point3(0.0, -12.0 * heightScaleFactor, camHeight + 4.0),
defLookAt,
Point3(0.0, 1.5, camHeight * 4.0),
Point3(0.0, 1.5, camHeight * -1.0),
0)] + self.auxCameraPositions
if self.wantDevCameraPositions:
self.cameraPositions += [(Point3(0.0, 0.0, camHeight * 3),
Point3(0.0, 0.0, 0.0),
Point3(0.0, camHeight * 2, 0.0),
Point3(0.0, -camHeight * 2, 0.0),
1),
(Point3(camHeight * 3, 0.0, camHeight),
Point3(0.0, 0.0, camHeight),
Point3(0.0, camHeight, camHeight * 1.1),
Point3(0.0, camHeight, camHeight * 0.9),
1),
(Point3(camHeight * 3, 0.0, 0.0),
Point3(0.0, 0.0, camHeight),
Point3(0.0, camHeight, camHeight * 1.1),
Point3(0.0, camHeight, camHeight * 0.9),
1),
(Point3(-camHeight * 3, 0.0, camHeight),
Point3(0.0, 0.0, camHeight),
Point3(0.0, camHeight, camHeight * 1.1),
Point3(0.0, camHeight, camHeight * 0.9),
1),
(Point3(0.0, -60, 60),
defLookAt + Point3(0, 15, 0),
defLookAt + Point3(0, 15, 0),
defLookAt + Point3(0, 15, 0),
1),
(Point3(0.0, -20, 20),
defLookAt + Point3(0, 5, 0),
defLookAt + Point3(0, 5, 0),
defLookAt + Point3(0, 5, 0),
1)]
def addCameraPosition(self, camPos = None):
if camPos == None:
lookAtNP = self.attachNewNode('lookAt')
lookAtNP.setPos(base.cam, 0, 1, 0)
lookAtPos = lookAtNP.getPos()
camHeight = self.getClampedAvatarHeight()
camPos = (base.cam.getPos(self),
lookAtPos,
Point3(0.0, 1.5, camHeight * 4.0),
Point3(0.0, 1.5, camHeight * -1.0),
1)
lookAtNP.removeNode()
self.auxCameraPositions.append(camPos)
self.cameraPositions.append(camPos)
return
def resetCameraPosition(self):
self.cameraIndex = 0
self.setCameraPositionByIndex(self.cameraIndex)
def removeCameraPosition(self):
if len(self.cameraPositions) > 1:
camPos = self.cameraPositions[self.cameraIndex]
if camPos in self.auxCameraPositions:
self.auxCameraPositions.remove(camPos)
if camPos in self.cameraPositions:
self.cameraPositions.remove(camPos)
self.nextCameraPos(1)
def printCameraPositions(self):
print('[')
for i in range(len(self.cameraPositions)):
self.printCameraPosition(i)
print(',')
print(']')
def printCameraPosition(self, index):
cp = self.cameraPositions[index]
print('(Point3(%0.2f, %0.2f, %0.2f),' % (cp[0][0], cp[0][1], cp[0][2]))
print('Point3(%0.2f, %0.2f, %0.2f),' % (cp[1][0], cp[1][1], cp[1][2]))
print('Point3(%0.2f, %0.2f, %0.2f),' % (cp[2][0], cp[2][1], cp[2][2]))
print('Point3(%0.2f, %0.2f, %0.2f),' % (cp[3][0], cp[3][1], cp[3][2]))
print('%d,' % cp[4])
print(')', end=' ')
def posCamera(self, lerp, time):
if not lerp:
self.positionCameraWithPusher(self.getCompromiseCameraPos(), self.getLookAtPoint())
else:
camPos = self.getCompromiseCameraPos()
savePos = camera.getPos()
saveHpr = camera.getHpr()
self.positionCameraWithPusher(camPos, self.getLookAtPoint())
x = camPos[0]
y = camPos[1]
z = camPos[2]
destHpr = camera.getHpr()
h = destHpr[0]
p = destHpr[1]
r = destHpr[2]
camera.setPos(savePos)
camera.setHpr(saveHpr)
if self.posCameraSeq:
self.posCameraSeq.finish()
self.posCameraSeq = None
self.posCameraSeq = camera.posHprInterval(time, Point3(x, y, z), Point3(h, p, r), name='posCamera')
self.posCameraSeq.start()
def getClampedAvatarHeight(self):
return max(self.getHeight(), 3.0)
def getVisibilityPoint(self):
return Point3(0.0, 0.0, self.getHeight())
def setLookAtPoint(self, la):
self.__curLookAt = Point3(la)
def getLookAtPoint(self):
return Point3(self.__curLookAt)
def setIdealCameraPos(self, pos):
self.__idealCameraPos = Point3(pos)
self.updateSmartCameraCollisionLineSegment()
def getIdealCameraPos(self):
return Point3(self.__idealCameraPos)
def setCameraPositionByIndex(self, index):
self.notify.debug('switching to camera position %s' % index)
self.setCameraSettings(self.cameraPositions[index])
def setCameraPosForPetInteraction(self):
height = self.getClampedAvatarHeight()
point = Point3(height * (7 / 3.0), height * (-7 / 3.0), height)
self.prevIdealPos = self.getIdealCameraPos()
self.setIdealCameraPos(point)
self.posCamera(1, 0.7)
def unsetCameraPosForPetInteraction(self):
self.setIdealCameraPos(self.prevIdealPos)
del self.prevIdealPos
self.posCamera(1, 0.7)
def setCameraSettings(self, camSettings):
self.setIdealCameraPos(camSettings[0])
if self.isPageUp and self.isPageDown or not self.isPageUp and not self.isPageDown:
self.__cameraHasBeenMoved = 1
self.setLookAtPoint(camSettings[1])
elif self.isPageUp:
self.__cameraHasBeenMoved = 1
self.setLookAtPoint(camSettings[2])
elif self.isPageDown:
self.__cameraHasBeenMoved = 1
self.setLookAtPoint(camSettings[3])
else:
self.notify.error('This case should be impossible.')
self.__disableSmartCam = camSettings[4]
if self.__disableSmartCam:
self.putCameraFloorRayOnAvatar()
self.cameraZOffset = 0.0
def getCompromiseCameraPos(self):
if self.__idealCameraObstructed == 0:
compromisePos = self.getIdealCameraPos()
else:
visPnt = self.getVisibilityPoint()
idealPos = self.getIdealCameraPos()
distance = Vec3(idealPos - visPnt).length()
ratio = self.closestObstructionDistance / distance
compromisePos = idealPos * ratio + visPnt * (1 - ratio)
liftMult = 1.0 - ratio * ratio
compromisePos = Point3(compromisePos[0], compromisePos[1], compromisePos[2] + self.getHeight() * 0.4 * liftMult)
compromisePos.setZ(compromisePos[2] + self.cameraZOffset)
return compromisePos
def updateSmartCameraCollisionLineSegment(self):
pointB = self.getIdealCameraPos()
pointA = self.getVisibilityPoint()
vectorAB = Vec3(pointB - pointA)
lengthAB = vectorAB.length()
if lengthAB > 0.001:
self.ccLine.setPointA(pointA)
self.ccLine.setPointB(pointB)
def initializeSmartCamera(self):
self.__idealCameraObstructed = 0
self.closestObstructionDistance = 0.0
self.cameraIndex = 0
self.auxCameraPositions = []
self.cameraZOffset = 0.0
self.__onLevelGround = 0
self.__camCollCanMove = 0
self.__geom = render
self.__disableSmartCam = 0
self.initializeSmartCameraCollisions()
self._smartCamEnabled = False
def shutdownSmartCamera(self):
self.deleteSmartCameraCollisions()
def setOnLevelGround(self, flag):
self.__onLevelGround = flag
def setCameraCollisionsCanMove(self, flag):
self.__camCollCanMove = flag
def setGeom(self, geom):
self.__geom = geom
def startUpdateSmartCamera(self, push = 1):
if self._smartCamEnabled:
LocalAvatar.notify.warning('redundant call to startUpdateSmartCamera')
return
self._smartCamEnabled = True
self.__floorDetected = 0
self.__cameraHasBeenMoved = 0
self.recalcCameraSphere()
self.initCameraPositions()
self.setCameraPositionByIndex(self.cameraIndex)
self.posCamera(0, 0.0)
self.__instantaneousCamPos = camera.getPos()
if push:
self.cTrav.addCollider(self.ccSphereNodePath, self.camPusher)
self.ccTravOnFloor.addCollider(self.ccRay2NodePath, self.camFloorCollisionBroadcaster)
self.__disableSmartCam = 0
else:
self.__disableSmartCam = 1
self.__lastPosWrtRender = camera.getPos(render)
self.__lastHprWrtRender = camera.getHpr(render)
taskName = self.taskName('updateSmartCamera')
taskMgr.remove(taskName)
taskMgr.add(self.updateSmartCamera, taskName, priority=47)
self.enableSmartCameraViews()
def stopUpdateSmartCamera(self):
if not self._smartCamEnabled:
LocalAvatar.notify.warning('redundant call to stopUpdateSmartCamera')
return
self.disableSmartCameraViews()
self.cTrav.removeCollider(self.ccSphereNodePath)
self.ccTravOnFloor.removeCollider(self.ccRay2NodePath)
if not base.localAvatar.isEmpty():
self.putCameraFloorRayOnAvatar()
taskName = self.taskName('updateSmartCamera')
taskMgr.remove(taskName)
self._smartCamEnabled = False
def updateSmartCamera(self, task):
if not self.__camCollCanMove and not self.__cameraHasBeenMoved:
if self.__lastPosWrtRender == camera.getPos(render):
if self.__lastHprWrtRender == camera.getHpr(render):
return Task.cont
self.__cameraHasBeenMoved = 0
self.__lastPosWrtRender = camera.getPos(render)
self.__lastHprWrtRender = camera.getHpr(render)
self.__idealCameraObstructed = 0
if not self.__disableSmartCam:
self.ccTrav.traverse(self.__geom)
if self.camCollisionQueue.getNumEntries() > 0:
self.camCollisionQueue.sortEntries()
self.handleCameraObstruction(self.camCollisionQueue.getEntry(0))
if not self.__onLevelGround:
self.handleCameraFloorInteraction()
if not self.__idealCameraObstructed:
self.nudgeCamera()
if not self.__disableSmartCam:
self.ccPusherTrav.traverse(self.__geom)
self.putCameraFloorRayOnCamera()
self.ccTravOnFloor.traverse(self.__geom)
return Task.cont
def positionCameraWithPusher(self, pos, lookAt):
camera.setPos(pos)
self.ccPusherTrav.traverse(self.__geom)
camera.lookAt(lookAt)
def nudgeCamera(self):
CLOSE_ENOUGH = 0.1
curCamPos = self.__instantaneousCamPos
curCamHpr = camera.getHpr()
targetCamPos = self.getCompromiseCameraPos()
targetCamLookAt = self.getLookAtPoint()
posDone = 0
if Vec3(curCamPos - targetCamPos).length() <= CLOSE_ENOUGH:
camera.setPos(targetCamPos)
posDone = 1
camera.setPos(targetCamPos)
camera.lookAt(targetCamLookAt)
targetCamHpr = camera.getHpr()
hprDone = 0
if Vec3(curCamHpr - targetCamHpr).length() <= CLOSE_ENOUGH:
hprDone = 1
if posDone and hprDone:
return
lerpRatio = 0.15
lerpRatio = 1 - pow(1 - lerpRatio, globalClock.getDt() * 30.0)
self.__instantaneousCamPos = targetCamPos * lerpRatio + curCamPos * (1 - lerpRatio)
if self.__disableSmartCam or not self.__idealCameraObstructed:
newHpr = targetCamHpr * lerpRatio + curCamHpr * (1 - lerpRatio)
else:
newHpr = targetCamHpr
camera.setPos(self.__instantaneousCamPos)
camera.setHpr(newHpr)
def popCameraToDest(self):
newCamPos = self.getCompromiseCameraPos()
newCamLookAt = self.getLookAtPoint()
self.positionCameraWithPusher(newCamPos, newCamLookAt)
self.__instantaneousCamPos = camera.getPos()
def handleCameraObstruction(self, camObstrCollisionEntry):
collisionPoint = camObstrCollisionEntry.getSurfacePoint(self.ccLineNodePath)
collisionVec = Vec3(collisionPoint - self.ccLine.getPointA())
distance = collisionVec.length()
self.__idealCameraObstructed = 1
self.closestObstructionDistance = distance
self.popCameraToDest()
def handleCameraFloorInteraction(self):
self.putCameraFloorRayOnCamera()
self.ccTravFloor.traverse(self.__geom)
if self.__onLevelGround:
return
if self.camFloorCollisionQueue.getNumEntries() == 0:
return
self.camFloorCollisionQueue.sortEntries()
camObstrCollisionEntry = self.camFloorCollisionQueue.getEntry(0)
camHeightFromFloor = camObstrCollisionEntry.getSurfacePoint(self.ccRayNodePath)[2]
self.cameraZOffset = camera.getPos()[2] + camHeightFromFloor
if self.cameraZOffset < 0:
self.cameraZOffset = 0
if self.__floorDetected == 0:
self.__floorDetected = 1
self.popCameraToDest()
def lerpCameraFov(self, fov, time):
taskMgr.remove('cam-fov-lerp-play')
oldFov = base.camLens.getHfov()
if abs(fov - oldFov) > 0.1:
def setCamFov(fov):
base.camLens.setFov(fov)
self.camLerpInterval = LerpFunctionInterval(setCamFov, fromData=oldFov, toData=fov, duration=time, name='cam-fov-lerp')
self.camLerpInterval.start()
def setCameraFov(self, fov):
self.fov = fov
if not (self.isPageDown or self.isPageUp):
base.camLens.setFov(self.fov)
def gotoNode(self, node, eyeHeight = 3):
possiblePoints = (Point3(3, 6, 0),
Point3(-3, 6, 0),
Point3(6, 6, 0),
Point3(-6, 6, 0),
Point3(3, 9, 0),
Point3(-3, 9, 0),
Point3(6, 9, 0),
Point3(-6, 9, 0),
Point3(9, 9, 0),
Point3(-9, 9, 0),
Point3(6, 0, 0),
Point3(-6, 0, 0),
Point3(6, 3, 0),
Point3(-6, 3, 0),
Point3(9, 9, 0),
Point3(-9, 9, 0),
Point3(0, 12, 0),
Point3(3, 12, 0),
Point3(-3, 12, 0),
Point3(6, 12, 0),
Point3(-6, 12, 0),
Point3(9, 12, 0),
Point3(-9, 12, 0),
Point3(0, -6, 0),
Point3(-3, -6, 0),
Point3(0, -9, 0),
Point3(-6, -9, 0))
for point in possiblePoints:
pos = self.positionExaminer.consider(node, point, eyeHeight)
if pos:
self.setPos(node, pos)
self.lookAt(node)
self.setHpr(self.getH() + random.choice((-10, 10)), 0, 0)
return
self.setPos(node, 0, 0, 0)
def setCustomMessages(self, customMessages):
self.customMessages = customMessages
messenger.send('customMessagesChanged')
def displayWhisper(self, fromId, chatString, whisperType):
sender = None
sfx = self.soundWhisper
if whisperType == WhisperPopup.WTNormal or whisperType == WhisperPopup.WTQuickTalker:
if sender == None:
return
chatString = sender.getName() + ': ' + chatString
whisper = WhisperPopup(chatString, OTPGlobals.getInterfaceFont(), whisperType)
if sender != None:
whisper.setClickable(sender.getName(), fromId)
whisper.manage(base.marginManager)
base.playSfx(sfx)
return
def displayWhisperPlayer(self, fromId, chatString, whisperType):
sender = None
playerInfo = None
sfx = self.soundWhisper
playerInfo = base.cr.playerFriendsManager.playerId2Info.get(fromId, None)
if playerInfo == None:
return
senderName = playerInfo.playerName
if whisperType == WhisperPopup.WTNormal or whisperType == WhisperPopup.WTQuickTalker:
chatString = senderName + ': ' + chatString
whisper = WhisperPopup(chatString, OTPGlobals.getInterfaceFont(), whisperType)
if sender != None:
whisper.setClickable(senderName, fromId)
whisper.manage(base.marginManager)
base.playSfx(sfx)
return
def setAnimMultiplier(self, value):
self.animMultiplier = value
def getAnimMultiplier(self):
return self.animMultiplier
def enableRun(self):
self.accept('arrow_up', self.startRunWatch)
self.accept('arrow_up-up', self.stopRunWatch)
self.accept('control-arrow_up', self.startRunWatch)
self.accept('control-arrow_up-up', self.stopRunWatch)
self.accept('alt-arrow_up', self.startRunWatch)
self.accept('alt-arrow_up-up', self.stopRunWatch)
self.accept('shift-arrow_up', self.startRunWatch)
self.accept('shift-arrow_up-up', self.stopRunWatch)
def disableRun(self):
self.ignore('arrow_up')
self.ignore('arrow_up-up')
self.ignore('control-arrow_up')
self.ignore('control-arrow_up-up')
self.ignore('alt-arrow_up')
self.ignore('alt-arrow_up-up')
self.ignore('shift-arrow_up')
self.ignore('shift-arrow_up-up')
def startRunWatch(self):
def setRun(ignored):
messenger.send('running-on')
taskMgr.doMethodLater(self.runTimeout, setRun, self.uniqueName('runWatch'))
return Task.cont
def stopRunWatch(self):
taskMgr.remove(self.uniqueName('runWatch'))
messenger.send('running-off')
return Task.cont
def runSound(self):
self.soundWalk.stop()
base.playSfx(self.soundRun, looping=1)
def walkSound(self):
self.soundRun.stop()
base.playSfx(self.soundWalk, looping=1)
def stopSound(self):
self.soundRun.stop()
self.soundWalk.stop()
def wakeUp(self):
if self.sleepCallback != None:
taskMgr.remove(self.uniqueName('sleepwatch'))
self.startSleepWatch(self.sleepCallback)
self.lastMoved = globalClock.getFrameTime()
if self.sleepFlag:
self.sleepFlag = 0
return
def gotoSleep(self):
if not self.sleepFlag:
self.b_setAnimState('Sleep', self.animMultiplier)
self.sleepFlag = 1
def forceGotoSleep(self):
if self.hp > 0:
self.sleepFlag = 0
self.gotoSleep()
def startSleepWatch(self, callback):
self.sleepCallback = callback
taskMgr.doMethodLater(self.sleepTimeout, callback, self.uniqueName('sleepwatch'))
def stopSleepWatch(self):
taskMgr.remove(self.uniqueName('sleepwatch'))
self.sleepCallback = None
return
def startSleepSwimTest(self):
taskName = self.taskName('sleepSwimTest')
taskMgr.remove(taskName)
task = Task.Task(self.sleepSwimTest)
self.lastMoved = globalClock.getFrameTime()
self.lastState = None
self.lastAction = None
self.sleepSwimTest(task)
taskMgr.add(self.sleepSwimTest, taskName, 35)
return
def stopSleepSwimTest(self):
taskName = self.taskName('sleepSwimTest')
taskMgr.remove(taskName)
self.stopSound()
def sleepSwimTest(self, task):
now = globalClock.getFrameTime()
speed, rotSpeed, slideSpeed = self.controlManager.getSpeeds()
if speed != 0.0 or rotSpeed != 0.0 or inputState.isSet('jump'):
if not self.swimmingFlag:
self.swimmingFlag = 1
elif self.swimmingFlag:
self.swimmingFlag = 0
if self.swimmingFlag or self.hp <= 0:
self.wakeUp()
elif not self.sleepFlag:
now = globalClock.getFrameTime()
if now - self.lastMoved > self.swimTimeout:
self.swimTimeoutAction()
return Task.done
return Task.cont
def swimTimeoutAction(self):
pass
def trackAnimToSpeed(self, task):
speed, rotSpeed, slideSpeed = self.controlManager.getSpeeds()
if speed != 0.0 or rotSpeed != 0.0 or inputState.isSet('jump'):
if not self.movingFlag:
self.movingFlag = 1
self.stopLookAround()
elif self.movingFlag:
self.movingFlag = 0
self.startLookAround()
if self.movingFlag or self.hp <= 0:
self.wakeUp()
elif not self.sleepFlag:
now = globalClock.getFrameTime()
if now - self.lastMoved > self.sleepTimeout:
self.gotoSleep()
state = None
if self.sleepFlag:
state = 'Sleep'
elif self.hp > 0:
state = 'Happy'
else:
state = 'Sad'
if state != self.lastState:
self.lastState = state
self.b_setAnimState(state, self.animMultiplier)
if state == 'Sad':
self.setWalkSpeedSlow()
else:
self.setWalkSpeedNormal()
if self.cheesyEffect == OTPGlobals.CEFlatProfile or self.cheesyEffect == OTPGlobals.CEFlatPortrait:
needH = None
if rotSpeed > 0.0:
needH = -10
elif rotSpeed < 0.0:
needH = 10
elif speed != 0.0:
needH = 0
if needH != None and self.lastNeedH != needH:
node = self.getGeomNode().getChild(0)
lerp = Sequence(LerpHprInterval(node, 0.5, Vec3(needH, 0, 0), blendType='easeInOut'), name='cheesy-lerp-hpr', autoPause=1)
lerp.start()
self.lastNeedH = needH
else:
self.lastNeedH = None
action = self.setSpeed(speed, rotSpeed)
if action != self.lastAction:
self.lastAction = action
if self.emoteTrack:
self.emoteTrack.finish()
self.emoteTrack = None
if action == OTPGlobals.WALK_INDEX or action == OTPGlobals.REVERSE_INDEX:
self.walkSound()
elif action == OTPGlobals.RUN_INDEX:
self.runSound()
else:
self.stopSound()
return Task.cont
def hasTrackAnimToSpeed(self):
taskName = self.taskName('trackAnimToSpeed')
return taskMgr.hasTaskNamed(taskName)
def startTrackAnimToSpeed(self):
taskName = self.taskName('trackAnimToSpeed')
taskMgr.remove(taskName)
task = Task.Task(self.trackAnimToSpeed)
self.lastMoved = globalClock.getFrameTime()
self.lastState = None
self.lastAction = None
self.trackAnimToSpeed(task)
taskMgr.add(self.trackAnimToSpeed, taskName, 35)
return
def stopTrackAnimToSpeed(self):
taskName = self.taskName('trackAnimToSpeed')
taskMgr.remove(taskName)
self.stopSound()
def startChat(self):
self.chatMgr.start()
self.accept(OTPGlobals.WhisperIncomingEvent, self.handlePlayerFriendWhisper)
self.accept(OTPGlobals.ThinkPosHotkey, self.thinkPos)
self.accept(OTPGlobals.PrintCamPosHotkey, self.printCamPos)
if self.__enableMarkerPlacement:
self.accept(OTPGlobals.PlaceMarkerHotkey, self.__placeMarker)
def stopChat(self):
self.chatMgr.stop()
self.ignore(OTPGlobals.WhisperIncomingEvent)
self.ignore(OTPGlobals.ThinkPosHotkey)
self.ignore(OTPGlobals.PrintCamPosHotkey)
if self.__enableMarkerPlacement:
self.ignore(OTPGlobals.PlaceMarkerHotkey)
def printCamPos(self):
node = base.camera.getParent()
pos = base.cam.getPos(node)
hpr = base.cam.getHpr(node)
print('cam pos = ', repr(pos), ', cam hpr = ', repr(hpr))
def d_broadcastPositionNow(self):
self.d_clearSmoothing()
self.d_broadcastPosHpr()
def travCollisionsLOS(self, n = None):
if n == None:
n = self.__geom
self.ccTrav.traverse(n)
return
def travCollisionsFloor(self, n = None):
if n == None:
n = self.__geom
self.ccTravFloor.traverse(n)
return
def travCollisionsPusher(self, n = None):
if n == None:
n = self.__geom
self.ccPusherTrav.traverse(n)
return
def __friendOnline(self, doId, commonChatFlags = 0, whitelistChatFlags = 0):
friend = base.cr.identifyFriend(doId)
if friend != None and hasattr(friend, 'setCommonAndWhitelistChatFlags'):
friend.setCommonAndWhitelistChatFlags(commonChatFlags, whitelistChatFlags)
if self.oldFriendsList != None:
now = globalClock.getFrameTime()
elapsed = now - self.timeFriendsListChanged
if elapsed < 10.0 and self.oldFriendsList.count(doId) == 0:
self.oldFriendsList.append(doId)
return
if friend != None:
self.setSystemMessage(doId, OTPLocalizer.WhisperFriendComingOnline % friend.getName())
return
def __friendOffline(self, doId):
friend = base.cr.identifyFriend(doId)
if friend != None:
self.setSystemMessage(0, OTPLocalizer.WhisperFriendLoggedOut % friend.getName())
return
def __playerOnline(self, playerId):
playerInfo = base.cr.playerFriendsManager.playerId2Info[playerId]
if playerInfo:
self.setSystemMessage(playerId, OTPLocalizer.WhisperPlayerOnline % (playerInfo.playerName, playerInfo.location))
def __playerOffline(self, playerId):
playerInfo = base.cr.playerFriendsManager.playerId2Info[playerId]
if playerInfo:
self.setSystemMessage(playerId, OTPLocalizer.WhisperPlayerOffline % playerInfo.playerName)
def clickedWhisper(self, doId, isPlayer = None):
if not isPlayer:
friend = base.cr.identifyFriend(doId)
if friend != None:
messenger.send('clickedNametag', [friend])
self.chatMgr.whisperTo(friend.getName(), doId)
else:
friend = base.cr.playerFriendsManager.getFriendInfo(doId)
if friend:
messenger.send('clickedNametagPlayer', [None, doId])
self.chatMgr.whisperTo(friend.getName(), None, doId)
return
def d_setParent(self, parentToken):
DistributedSmoothNode.DistributedSmoothNode.d_setParent(self, parentToken)
def handlePlayerFriendWhisper(self, playerId, charMessage):
print('handlePlayerFriendWhisper')
self.displayWhisperPlayer(playerId, charMessage, WhisperPopup.WTNormal)
def canChat(self):
return 0
| 38.454321 | 217 | 0.633106 |
4a7de1e6d7e232147c1636b5cd11679df2e4ddb2 | 6,093 | py | Python | models/BertAugmentedTransformer.py | ahmad0790/spotify-sequential-track-sequence-prediction | fa117000e66d72367592ddeba594e0f3a819c109 | [
"Apache-2.0"
] | 2 | 2021-03-18T04:47:11.000Z | 2021-09-26T18:25:46.000Z | models/BertAugmentedTransformer.py | ahmad0790/spotify-sequential-track-sequence-prediction | fa117000e66d72367592ddeba594e0f3a819c109 | [
"Apache-2.0"
] | null | null | null | models/BertAugmentedTransformer.py | ahmad0790/spotify-sequential-track-sequence-prediction | fa117000e66d72367592ddeba594e0f3a819c109 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import random
import math
import torch
from einops import rearrange
from torch import nn
from models.CustomizedTransformer import Transformer, TransformerEncoder, TransformerEncoderLayer, TransformerDecoder, TransformerDecoderLayer
from torch.nn import functional as F
class BertAugmentedTransformer(nn.Module):
def __init__(self, vocab_size, d_model, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, max_seq_length, skip_pred = False, feat_embed=None, device=None):
super(BertAugmentedTransformer, self).__init__()
self.device = device
self.max_length = max_seq_length
self.d_model = d_model
self.vocab_size = vocab_size
self.nhead = nhead
self.num_decoder_layers = num_decoder_layers
self.num_encoder_layers = num_encoder_layers
self.num_track_feats = 26
self.d_model_combined = self.d_model+self.num_track_feats
self.layer_norm = nn.LayerNorm(self.d_model_combined)
self.encoder_layer = TransformerEncoderLayer(d_model=self.d_model_combined, nhead=self.nhead, dropout=0.2, activation = 'relu')
self.transformer_encoder = TransformerEncoder(self.encoder_layer, self.num_encoder_layers, norm= self.layer_norm)
self.decoder_layer = TransformerDecoderLayer(d_model=self.d_model_combined, nhead=self.nhead, dropout = 0.2, activation= 'relu')
self.transformer_decoder = TransformerDecoder(self.decoder_layer, self.num_decoder_layers, norm = self.layer_norm)
self.skip_embed = nn.Embedding(2, self.d_model_combined)
self.feat_embed = nn.Embedding(self.vocab_size, self.num_track_feats)
self.track_embed = nn.Embedding(self.vocab_size, self.d_model)
self.pos_embed = nn.Embedding(self.max_length, self.d_model_combined)
feat_weights = torch.FloatTensor(feat_embed).to(self.device)
feat_weights[1,:] = torch.rand(self.num_track_feats)
feat_weights = F.normalize(feat_weights, p=2, dim=1)
self.feat_embed.weights = nn.Parameter(feat_weights, requires_grad=True)
if skip_pred:
self.fc = nn.Linear(self.d_model_combined, 2)
else:
self.fc = nn.Linear(self.d_model_combined, self.vocab_size)
#src bert and tgt bert are the bert fina layere output. they are the same.
def forward(self, src, tgt, bert_output, src_skip_inputs=None, src_mask=None, tgt_mask=None, encoder_mask=None):
"Take in and process masked src and target sequences."
#only during training for track sequences we do teacher forcing
#teacher forcing right shift target embedding by 1 (last token is not predicted)
if src_skip_inputs is None:
tgt = torch.cat((src[:,-1].reshape(src.shape[0],1), tgt[:, :-1]), 1)
encoder_output = self.encode(src, bert_output)
decoder_output = self.decode(encoder_output, tgt, bert_output)
output = self.fc(decoder_output)
output = rearrange(output, 't n e -> n t e')
return output
def encode(self, src, src_bert, src_skip_inputs=None, src_mask=None):
source_embeddings = self.embed(src, src_skip_inputs)
source_embeddings = rearrange(source_embeddings, 'n s t -> s n t')
x = self.transformer_encoder(source_embeddings, src_bert)
return x
def decode(self, encoder_output, tgt, tgt_bert, src_mask=None, tgt_mask=None, encoder_mask = None):
tgt_embeddings = self.embed(tgt)
tgt_no_peek_mask = self.gen_nopeek_mask(tgt.shape[1]).to(self.device)
tgt_embeddings = rearrange(tgt_embeddings, 'n s t -> s n t')
x = self.transformer_decoder(tgt = tgt_embeddings, tgt_bert = tgt_bert, memory = encoder_output, tgt_mask=tgt_no_peek_mask)
return x
def embed(self, src, src_skip_inputs=None):
"""
:param inputs: intTensor of shape (N,T)
:returns embeddings: floatTensor of shape (N,T,H)
"""
track_id_embed = self.track_embed(src)
track_feat_embed = self.feat_embed(src)
positional_encoding = torch.zeros(src.shape[0], src.shape[1]).to(torch.int64)
positional_encoding = positional_encoding.to(self.device)
for i in range(src.shape[0]):
positional_encoding[i,:] = torch.LongTensor([list(range(0,src.shape[1]))])
positional_embeddings = self.pos_embed(positional_encoding)
if src_skip_inputs is not None:
track_skip_embed = self.skip_embed(src_skip_inputs)
embeddings = torch.cat((track_id_embed, track_feat_embed), dim=2) + positional_embeddings + track_skip_embed
else:
embeddings = torch.cat((track_id_embed, track_feat_embed), dim=2) + positional_embeddings
return embeddings
def greedy_decoder(self, model, src, bert_output, max_len, start_tgt_sequence):
encoder_output = model.encode(src, bert_output)
#what the start token is
#print("DECODER DEEBUG")
tgt_tokens = start_tgt_sequence.reshape(start_tgt_sequence.shape[0], 1)
#tgt_probs = torch.FloatTensor((start_tgt_sequence.shape[0], self.max_length, self.vocab_size))
#print(tgt_tokens.shape)
#tgt_no_peek_mask = self.gen_nopeek_mask(tgt_tokens.shape[1]).to(self.device)
#ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)
tgt_prob = torch.zeros((start_tgt_sequence.shape[0], self.max_length, self.vocab_size)).to(self.device)
#print(tgt_prob.shape)
for i in range(max_len):
#print(i)
#tgt_no_peek_mask = self.gen_nopeek_mask(tgt_tokens.shape[1]).to(self.device)
output = model.decode(encoder_output, tgt_tokens, bert_output)
#print("OUTPUT")
#print(output.shape)
output = self.fc(output)[-1, :,:]
#print("FC")
#print(output.shape)
tgt_prob[:,i,:] = output
output = F.log_softmax(output, dim=1)
#print(output.shape)
#next_token_prob = output
next_token_pred = torch.argmax(output, dim = 1).reshape(output.shape[0],1)
#print(next_token_pred.shape)
#remember this is the input to the model
if i != (max_len - 1):
tgt_tokens = torch.cat([tgt_tokens, next_token_pred], dim=1)
#print(tgt_tokens.shape)
#tgt_probs[:,i,:] = next_token_prob
#print(tgt_probs.shape)
#print(tgt_tokens)
#print("FINISHED DECODING")
return tgt_tokens, tgt_prob
def gen_nopeek_mask(self, length):
mask = rearrange(torch.triu(torch.ones(length, length)) == 1, 'h w -> w h')
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask | 42.02069 | 170 | 0.754636 |
c0c1aa11a284ecb2f2d7c95c3bc2d7d133a1d28f | 12,423 | py | Python | scipy/linalg/_expm_frechet.py | jake-is-ESD-protected/scipy | d7283ff75c218c300f372b5fdd960b987c1709a1 | [
"BSD-3-Clause"
] | 9,095 | 2015-01-02T18:24:23.000Z | 2022-03-31T20:35:31.000Z | scipy/linalg/_expm_frechet.py | jake-is-ESD-protected/scipy | d7283ff75c218c300f372b5fdd960b987c1709a1 | [
"BSD-3-Clause"
] | 11,500 | 2015-01-01T01:15:30.000Z | 2022-03-31T23:07:35.000Z | scipy/linalg/_expm_frechet.py | jake-is-ESD-protected/scipy | d7283ff75c218c300f372b5fdd960b987c1709a1 | [
"BSD-3-Clause"
] | 5,838 | 2015-01-05T11:56:42.000Z | 2022-03-31T23:21:19.000Z | """Frechet derivative of the matrix exponential."""
import numpy as np
import scipy.linalg
__all__ = ['expm_frechet', 'expm_cond']
def expm_frechet(A, E, method=None, compute_expm=True, check_finite=True):
"""
Frechet derivative of the matrix exponential of A in the direction E.
Parameters
----------
A : (N, N) array_like
Matrix of which to take the matrix exponential.
E : (N, N) array_like
Matrix direction in which to take the Frechet derivative.
method : str, optional
Choice of algorithm. Should be one of
- `SPS` (default)
- `blockEnlarge`
compute_expm : bool, optional
Whether to compute also `expm_A` in addition to `expm_frechet_AE`.
Default is True.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
expm_A : ndarray
Matrix exponential of A.
expm_frechet_AE : ndarray
Frechet derivative of the matrix exponential of A in the direction E.
For ``compute_expm = False``, only `expm_frechet_AE` is returned.
See Also
--------
expm : Compute the exponential of a matrix.
Notes
-----
This section describes the available implementations that can be selected
by the `method` parameter. The default method is *SPS*.
Method *blockEnlarge* is a naive algorithm.
Method *SPS* is Scaling-Pade-Squaring [1]_.
It is a sophisticated implementation which should take
only about 3/8 as much time as the naive implementation.
The asymptotics are the same.
.. versionadded:: 0.13.0
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
Computing the Frechet Derivative of the Matrix Exponential,
with an application to Condition Number Estimation.
SIAM Journal On Matrix Analysis and Applications.,
30 (4). pp. 1639-1657. ISSN 1095-7162
Examples
--------
>>> import scipy.linalg
>>> rng = np.random.default_rng()
>>> A = rng.standard_normal((3, 3))
>>> E = rng.standard_normal((3, 3))
>>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E)
>>> expm_A.shape, expm_frechet_AE.shape
((3, 3), (3, 3))
>>> import scipy.linalg
>>> rng = np.random.default_rng()
>>> A = rng.standard_normal((3, 3))
>>> E = rng.standard_normal((3, 3))
>>> expm_A, expm_frechet_AE = scipy.linalg.expm_frechet(A, E)
>>> M = np.zeros((6, 6))
>>> M[:3, :3] = A; M[:3, 3:] = E; M[3:, 3:] = A
>>> expm_M = scipy.linalg.expm(M)
>>> np.allclose(expm_A, expm_M[:3, :3])
True
>>> np.allclose(expm_frechet_AE, expm_M[:3, 3:])
True
"""
if check_finite:
A = np.asarray_chkfinite(A)
E = np.asarray_chkfinite(E)
else:
A = np.asarray(A)
E = np.asarray(E)
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be a square matrix')
if E.ndim != 2 or E.shape[0] != E.shape[1]:
raise ValueError('expected E to be a square matrix')
if A.shape != E.shape:
raise ValueError('expected A and E to be the same shape')
if method is None:
method = 'SPS'
if method == 'SPS':
expm_A, expm_frechet_AE = expm_frechet_algo_64(A, E)
elif method == 'blockEnlarge':
expm_A, expm_frechet_AE = expm_frechet_block_enlarge(A, E)
else:
raise ValueError('Unknown implementation %s' % method)
if compute_expm:
return expm_A, expm_frechet_AE
else:
return expm_frechet_AE
def expm_frechet_block_enlarge(A, E):
"""
This is a helper function, mostly for testing and profiling.
Return expm(A), frechet(A, E)
"""
n = A.shape[0]
M = np.vstack([
np.hstack([A, E]),
np.hstack([np.zeros_like(A), A])])
expm_M = scipy.linalg.expm(M)
return expm_M[:n, :n], expm_M[:n, n:]
"""
Maximal values ell_m of ||2**-s A|| such that the backward error bound
does not exceed 2**-53.
"""
ell_table_61 = (
None,
# 1
2.11e-8,
3.56e-4,
1.08e-2,
6.49e-2,
2.00e-1,
4.37e-1,
7.83e-1,
1.23e0,
1.78e0,
2.42e0,
# 11
3.13e0,
3.90e0,
4.74e0,
5.63e0,
6.56e0,
7.52e0,
8.53e0,
9.56e0,
1.06e1,
1.17e1,
)
# The b vectors and U and V are copypasted
# from scipy.sparse.linalg.matfuncs.py.
# M, Lu, Lv follow (6.11), (6.12), (6.13), (3.3)
def _diff_pade3(A, E, ident):
b = (120., 60., 12., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
U = A.dot(b[3]*A2 + b[1]*ident)
V = b[2]*A2 + b[0]*ident
Lu = A.dot(b[3]*M2) + E.dot(b[3]*A2 + b[1]*ident)
Lv = b[2]*M2
return U, V, Lu, Lv
def _diff_pade5(A, E, ident):
b = (30240., 15120., 3360., 420., 30., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
U = A.dot(b[5]*A4 + b[3]*A2 + b[1]*ident)
V = b[4]*A4 + b[2]*A2 + b[0]*ident
Lu = (A.dot(b[5]*M4 + b[3]*M2) +
E.dot(b[5]*A4 + b[3]*A2 + b[1]*ident))
Lv = b[4]*M4 + b[2]*M2
return U, V, Lu, Lv
def _diff_pade7(A, E, ident):
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
A6 = np.dot(A2, A4)
M6 = np.dot(A4, M2) + np.dot(M4, A2)
U = A.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
V = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
Lu = (A.dot(b[7]*M6 + b[5]*M4 + b[3]*M2) +
E.dot(b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))
Lv = b[6]*M6 + b[4]*M4 + b[2]*M2
return U, V, Lu, Lv
def _diff_pade9(A, E, ident):
b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
2162160., 110880., 3960., 90., 1.)
A2 = A.dot(A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
A6 = np.dot(A2, A4)
M6 = np.dot(A4, M2) + np.dot(M4, A2)
A8 = np.dot(A4, A4)
M8 = np.dot(A4, M4) + np.dot(M4, A4)
U = A.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident)
V = b[8]*A8 + b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
Lu = (A.dot(b[9]*M8 + b[7]*M6 + b[5]*M4 + b[3]*M2) +
E.dot(b[9]*A8 + b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident))
Lv = b[8]*M8 + b[6]*M6 + b[4]*M4 + b[2]*M2
return U, V, Lu, Lv
def expm_frechet_algo_64(A, E):
n = A.shape[0]
s = None
ident = np.identity(n)
A_norm_1 = scipy.linalg.norm(A, 1)
m_pade_pairs = (
(3, _diff_pade3),
(5, _diff_pade5),
(7, _diff_pade7),
(9, _diff_pade9))
for m, pade in m_pade_pairs:
if A_norm_1 <= ell_table_61[m]:
U, V, Lu, Lv = pade(A, E, ident)
s = 0
break
if s is None:
# scaling
s = max(0, int(np.ceil(np.log2(A_norm_1 / ell_table_61[13]))))
A = A * 2.0**-s
E = E * 2.0**-s
# pade order 13
A2 = np.dot(A, A)
M2 = np.dot(A, E) + np.dot(E, A)
A4 = np.dot(A2, A2)
M4 = np.dot(A2, M2) + np.dot(M2, A2)
A6 = np.dot(A2, A4)
M6 = np.dot(A4, M2) + np.dot(M4, A2)
b = (64764752532480000., 32382376266240000., 7771770303897600.,
1187353796428800., 129060195264000., 10559470521600.,
670442572800., 33522128640., 1323241920., 40840800., 960960.,
16380., 182., 1.)
W1 = b[13]*A6 + b[11]*A4 + b[9]*A2
W2 = b[7]*A6 + b[5]*A4 + b[3]*A2 + b[1]*ident
Z1 = b[12]*A6 + b[10]*A4 + b[8]*A2
Z2 = b[6]*A6 + b[4]*A4 + b[2]*A2 + b[0]*ident
W = np.dot(A6, W1) + W2
U = np.dot(A, W)
V = np.dot(A6, Z1) + Z2
Lw1 = b[13]*M6 + b[11]*M4 + b[9]*M2
Lw2 = b[7]*M6 + b[5]*M4 + b[3]*M2
Lz1 = b[12]*M6 + b[10]*M4 + b[8]*M2
Lz2 = b[6]*M6 + b[4]*M4 + b[2]*M2
Lw = np.dot(A6, Lw1) + np.dot(M6, W1) + Lw2
Lu = np.dot(A, Lw) + np.dot(E, W)
Lv = np.dot(A6, Lz1) + np.dot(M6, Z1) + Lz2
# factor once and solve twice
lu_piv = scipy.linalg.lu_factor(-U + V)
R = scipy.linalg.lu_solve(lu_piv, U + V)
L = scipy.linalg.lu_solve(lu_piv, Lu + Lv + np.dot((Lu - Lv), R))
# squaring
for k in range(s):
L = np.dot(R, L) + np.dot(L, R)
R = np.dot(R, R)
return R, L
def vec(M):
"""
Stack columns of M to construct a single vector.
This is somewhat standard notation in linear algebra.
Parameters
----------
M : 2-D array_like
Input matrix
Returns
-------
v : 1-D ndarray
Output vector
"""
return M.T.ravel()
def expm_frechet_kronform(A, method=None, check_finite=True):
"""
Construct the Kronecker form of the Frechet derivative of expm.
Parameters
----------
A : array_like with shape (N, N)
Matrix to be expm'd.
method : str, optional
Extra keyword to be passed to expm_frechet.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
K : 2-D ndarray with shape (N*N, N*N)
Kronecker form of the Frechet derivative of the matrix exponential.
Notes
-----
This function is used to help compute the condition number
of the matrix exponential.
See Also
--------
expm : Compute a matrix exponential.
expm_frechet : Compute the Frechet derivative of the matrix exponential.
expm_cond : Compute the relative condition number of the matrix exponential
in the Frobenius norm.
"""
if check_finite:
A = np.asarray_chkfinite(A)
else:
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
n = A.shape[0]
ident = np.identity(n)
cols = []
for i in range(n):
for j in range(n):
E = np.outer(ident[i], ident[j])
F = expm_frechet(A, E,
method=method, compute_expm=False, check_finite=False)
cols.append(vec(F))
return np.vstack(cols).T
def expm_cond(A, check_finite=True):
"""
Relative condition number of the matrix exponential in the Frobenius norm.
Parameters
----------
A : 2-D array_like
Square input matrix with shape (N, N).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
kappa : float
The relative condition number of the matrix exponential
in the Frobenius norm
Notes
-----
A faster estimate for the condition number in the 1-norm
has been published but is not yet implemented in SciPy.
.. versionadded:: 0.14.0
See Also
--------
expm : Compute the exponential of a matrix.
expm_frechet : Compute the Frechet derivative of the matrix exponential.
Examples
--------
>>> from scipy.linalg import expm_cond
>>> A = np.array([[-0.3, 0.2, 0.6], [0.6, 0.3, -0.1], [-0.7, 1.2, 0.9]])
>>> k = expm_cond(A)
>>> k
1.7787805864469866
"""
if check_finite:
A = np.asarray_chkfinite(A)
else:
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
X = scipy.linalg.expm(A)
K = expm_frechet_kronform(A, check_finite=False)
# The following norm choices are deliberate.
# The norms of A and X are Frobenius norms,
# and the norm of K is the induced 2-norm.
A_norm = scipy.linalg.norm(A, 'fro')
X_norm = scipy.linalg.norm(X, 'fro')
K_norm = scipy.linalg.norm(K, 2)
kappa = (K_norm * A_norm) / X_norm
return kappa
| 30.226277 | 79 | 0.55848 |
6006fd7f941907c972aac7bc95997ac01d1937fa | 4,265 | py | Python | blis/monte_carlo_inputs.py | EnergyModels/BLIS | 514577a99a16804894019b4ee53d48eda0c6c313 | [
"Unlicense"
] | null | null | null | blis/monte_carlo_inputs.py | EnergyModels/BLIS | 514577a99a16804894019b4ee53d48eda0c6c313 | [
"Unlicense"
] | null | null | null | blis/monte_carlo_inputs.py | EnergyModels/BLIS | 514577a99a16804894019b4ee53d48eda0c6c313 | [
"Unlicense"
] | null | null | null | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
"""
BLIS - Balancing Load of Intermittent Solar:
A characteristic-based transient power plant model
Copyright (C) 2020. University of Virginia Licensing & Ventures Group (UVA LVG). All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import pandas as pd
import numpy as np
# =============================================================================#
# Create MonteCarlo Inputs
# =============================================================================#
def monteCarloInputs(filename, sheetname, iterations):
# Read Excel with inputs
df_xls = pd.read_excel(filename, sheet_name=sheetname, index_col=0)
# Create Dataframe to hold inputs
rows = range(iterations)
parameters1 = df_xls.index.values
parameters2 = np.append('sheetname', parameters1)
df = pd.DataFrame(data=0.0, index=rows, columns=parameters2)
# Create Inputs
for param in parameters1:
dist_type = df_xls.loc[param]["Distribution"]
# Constants
if dist_type == "constant" or dist_type == "Constant" or dist_type == "C":
avg = df_xls.loc[param]["Average"]
df.loc[:][param] = avg
# Uniform Distributions
elif dist_type == "uniform" or dist_type == "Uniform" or dist_type == "U":
low = df_xls.loc[param]["Low"]
high = df_xls.loc[param]["High"]
df.loc[:][param] = np.random.uniform(low=low, high=high, size=iterations)
# Normal Distributions
elif dist_type == "normal" or dist_type == "Normal" or dist_type == "N":
avg = df_xls.loc[param]["Average"]
stdev = df_xls.loc[param]["Stdev"]
df.loc[:][param] = np.random.normal(loc=avg, scale=stdev, size=iterations)
# LogNormal Distributions
elif dist_type == "lognormal" or dist_type == "Lognormal" or dist_type == "LN":
avg = df_xls.loc[param]["Average"]
stdev = df_xls.loc[param]["Stdev"]
df.loc[:][param] = np.random.lognormal(mean=avg, sigma=stdev, size=iterations)
# Traingular Distributions
elif dist_type == "triangle" or dist_type == "Triangle" or dist_type == "T":
left = df_xls.loc[param]["Low"]
mode = df_xls.loc[param]["Average"]
right = df_xls.loc[param]["High"]
df.loc[:][param] = np.random.triangular(left, mode, right, size=iterations)
df.loc[:, 'sheetname'] = sheetname
return df
# =============================================================================#
# Use MonteCarlo Inputs to Create Baselines
# =============================================================================#
def baselineInputs(filename, sheetname):
# Read Excel with inputs
df_xls = pd.read_excel(filename, sheet_name=sheetname, index_col=0)
# Create series to hold inputs
parameters1 = df_xls.index.values
parameters2 = np.append('sheetname', parameters1)
s = pd.Series(data=0.0, index=parameters2)
# Create Inputs
for param in parameters1:
s.loc[param] = df_xls.loc[param]["Average"]
s.loc['sheetname'] = sheetname
return s
| 44.894737 | 117 | 0.606331 |
8544c315a9ff275a7657df603980742f34bbe8cf | 6,225 | py | Python | isi_sdk/models/results_id_top_files_file.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | isi_sdk/models/results_id_top_files_file.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | isi_sdk/models/results_id_top_files_file.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class ResultsIdTopFilesFile(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ResultsIdTopFilesFile - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'atime': 'int',
'btime': 'int',
'ctime': 'int',
'log_size': 'int',
'path': 'str',
'phys_size': 'int'
}
self.attribute_map = {
'atime': 'atime',
'btime': 'btime',
'ctime': 'ctime',
'log_size': 'log_size',
'path': 'path',
'phys_size': 'phys_size'
}
self._atime = None
self._btime = None
self._ctime = None
self._log_size = None
self._path = None
self._phys_size = None
@property
def atime(self):
"""
Gets the atime of this ResultsIdTopFilesFile.
File access time.
:return: The atime of this ResultsIdTopFilesFile.
:rtype: int
"""
return self._atime
@atime.setter
def atime(self, atime):
"""
Sets the atime of this ResultsIdTopFilesFile.
File access time.
:param atime: The atime of this ResultsIdTopFilesFile.
:type: int
"""
self._atime = atime
@property
def btime(self):
"""
Gets the btime of this ResultsIdTopFilesFile.
File creation begin time.
:return: The btime of this ResultsIdTopFilesFile.
:rtype: int
"""
return self._btime
@btime.setter
def btime(self, btime):
"""
Sets the btime of this ResultsIdTopFilesFile.
File creation begin time.
:param btime: The btime of this ResultsIdTopFilesFile.
:type: int
"""
self._btime = btime
@property
def ctime(self):
"""
Gets the ctime of this ResultsIdTopFilesFile.
Unix inode change time.
:return: The ctime of this ResultsIdTopFilesFile.
:rtype: int
"""
return self._ctime
@ctime.setter
def ctime(self, ctime):
"""
Sets the ctime of this ResultsIdTopFilesFile.
Unix inode change time.
:param ctime: The ctime of this ResultsIdTopFilesFile.
:type: int
"""
self._ctime = ctime
@property
def log_size(self):
"""
Gets the log_size of this ResultsIdTopFilesFile.
Logical file size in bytes.
:return: The log_size of this ResultsIdTopFilesFile.
:rtype: int
"""
return self._log_size
@log_size.setter
def log_size(self, log_size):
"""
Sets the log_size of this ResultsIdTopFilesFile.
Logical file size in bytes.
:param log_size: The log_size of this ResultsIdTopFilesFile.
:type: int
"""
self._log_size = log_size
@property
def path(self):
"""
Gets the path of this ResultsIdTopFilesFile.
Relative file path under /ifs/.
:return: The path of this ResultsIdTopFilesFile.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""
Sets the path of this ResultsIdTopFilesFile.
Relative file path under /ifs/.
:param path: The path of this ResultsIdTopFilesFile.
:type: str
"""
self._path = path
@property
def phys_size(self):
"""
Gets the phys_size of this ResultsIdTopFilesFile.
Physical file size in bytes.
:return: The phys_size of this ResultsIdTopFilesFile.
:rtype: int
"""
return self._phys_size
@phys_size.setter
def phys_size(self, phys_size):
"""
Sets the phys_size of this ResultsIdTopFilesFile.
Physical file size in bytes.
:param phys_size: The phys_size of this ResultsIdTopFilesFile.
:type: int
"""
self._phys_size = phys_size
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 25.9375 | 77 | 0.564819 |
2d119e2cf8601a889785f2f58ceff68ec4de5755 | 4,423 | py | Python | homeassistant/components/concord232/binary_sensor.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 23 | 2017-11-15T21:03:53.000Z | 2021-03-29T21:33:48.000Z | homeassistant/components/concord232/binary_sensor.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 9 | 2022-01-27T06:32:10.000Z | 2022-03-31T07:07:51.000Z | homeassistant/components/concord232/binary_sensor.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 10 | 2018-01-01T00:12:51.000Z | 2021-12-21T23:08:05.000Z | """Support for exposing Concord232 elements as sensors."""
import datetime
import logging
from concord232 import client as concord232_client
import requests
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES,
PLATFORM_SCHEMA,
BinarySensorDevice,
)
from homeassistant.const import CONF_HOST, CONF_PORT
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_EXCLUDE_ZONES = "exclude_zones"
CONF_ZONE_TYPES = "zone_types"
DEFAULT_HOST = "localhost"
DEFAULT_NAME = "Alarm"
DEFAULT_PORT = "5007"
DEFAULT_SSL = False
SCAN_INTERVAL = datetime.timedelta(seconds=10)
ZONE_TYPES_SCHEMA = vol.Schema({cv.positive_int: vol.In(DEVICE_CLASSES)})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_EXCLUDE_ZONES, default=[]): vol.All(
cv.ensure_list, [cv.positive_int]
),
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_ZONE_TYPES, default={}): ZONE_TYPES_SCHEMA,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Concord232 binary sensor platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
exclude = config.get(CONF_EXCLUDE_ZONES)
zone_types = config.get(CONF_ZONE_TYPES)
sensors = []
try:
_LOGGER.debug("Initializing client")
client = concord232_client.Client(f"http://{host}:{port}")
client.zones = client.list_zones()
client.last_zone_update = dt_util.utcnow()
except requests.exceptions.ConnectionError as ex:
_LOGGER.error("Unable to connect to Concord232: %s", str(ex))
return False
# The order of zones returned by client.list_zones() can vary.
# When the zones are not named, this can result in the same entity
# name mapping to different sensors in an unpredictable way. Sort
# the zones by zone number to prevent this.
client.zones.sort(key=lambda zone: zone["number"])
for zone in client.zones:
_LOGGER.info("Loading Zone found: %s", zone["name"])
if zone["number"] not in exclude:
sensors.append(
Concord232ZoneSensor(
hass,
client,
zone,
zone_types.get(zone["number"], get_opening_type(zone)),
)
)
add_entities(sensors, True)
def get_opening_type(zone):
"""Return the result of the type guessing from name."""
if "MOTION" in zone["name"]:
return "motion"
if "KEY" in zone["name"]:
return "safety"
if "SMOKE" in zone["name"]:
return "smoke"
if "WATER" in zone["name"]:
return "water"
return "opening"
class Concord232ZoneSensor(BinarySensorDevice):
"""Representation of a Concord232 zone as a sensor."""
def __init__(self, hass, client, zone, zone_type):
"""Initialize the Concord232 binary sensor."""
self._hass = hass
self._client = client
self._zone = zone
self._number = zone["number"]
self._zone_type = zone_type
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return self._zone_type
@property
def should_poll(self):
"""No polling needed."""
return True
@property
def name(self):
"""Return the name of the binary sensor."""
return self._zone["name"]
@property
def is_on(self):
"""Return true if the binary sensor is on."""
# True means "faulted" or "open" or "abnormal state"
return bool(self._zone["state"] != "Normal")
def update(self):
"""Get updated stats from API."""
last_update = dt_util.utcnow() - self._client.last_zone_update
_LOGGER.debug("Zone: %s ", self._zone)
if last_update > datetime.timedelta(seconds=1):
self._client.zones = self._client.list_zones()
self._client.last_zone_update = dt_util.utcnow()
_LOGGER.debug("Updated from zone: %s", self._zone["name"])
if hasattr(self._client, "zones"):
self._zone = next(
(x for x in self._client.zones if x["number"] == self._number), None
)
| 30.93007 | 84 | 0.645037 |
05bfcd7ebded41c4802361241a8bd25a2c76f5e3 | 1,086 | py | Python | tests/test_doe.py | eTuDpy/phuzzy | 77b3f30a199907d2f97511179cdce21a45bf3068 | [
"MIT"
] | 2 | 2018-04-12T22:46:16.000Z | 2018-09-05T08:01:20.000Z | tests/test_doe.py | eTuDpy/phuzzy | 77b3f30a199907d2f97511179cdce21a45bf3068 | [
"MIT"
] | 92 | 2018-04-13T09:39:40.000Z | 2020-12-07T17:22:06.000Z | tests/test_doe.py | eTuDpy/phuzzy | 77b3f30a199907d2f97511179cdce21a45bf3068 | [
"MIT"
] | 3 | 2018-04-14T22:02:09.000Z | 2020-11-28T19:45:15.000Z | # -*- coding: utf-8 -*-
import phuzzy
import phuzzy.approx.doe
def test_doe_meshgrid():
x = phuzzy.TruncNorm(alpha0=[1, 2], name="x")
y = phuzzy.Triangle(alpha0=[3, 6], alpha1=[4], name="y")
doe = phuzzy.approx.doe.DOE(designvars = [x,y], name="xy")
doe.sample_doe(n=10, method="meshgrid")
print(doe)
print(doe.samples)
assert len(doe.samples)==441
assert x.min() <= doe.samples.iloc[:,0].min()
assert x.max() >= doe.samples.iloc[:,0].max()
assert y.min() <= doe.samples.iloc[:,1].min()
assert y.max() >= doe.samples.iloc[:,1].max()
def test_doe_lhs():
x = phuzzy.TruncNorm(alpha0=[1, 2], name="x")
y = phuzzy.Triangle(alpha0=[3, 6], alpha1=[4], name="y")
doe = phuzzy.approx.doe.DOE(designvars = [x,y], name="xy")
doe.sample_doe(n=10, method="lhs")
print(doe)
print(doe.samples)
assert len(doe.samples)==10
assert x.min() <= doe.samples.iloc[:,0].min()
assert x.max() >= doe.samples.iloc[:,0].max()
assert y.min() <= doe.samples.iloc[:,1].min()
assert y.max() >= doe.samples.iloc[:,1].max()
| 31.941176 | 62 | 0.600368 |
3784656f88877134cb09e4a54149380b892bcdfa | 1,392 | py | Python | conftest.py | fanshuai1996/lianxi | 06252e08e4bc14f8556e659a6f326aa874e8cc17 | [
"CC0-1.0"
] | null | null | null | conftest.py | fanshuai1996/lianxi | 06252e08e4bc14f8556e659a6f326aa874e8cc17 | [
"CC0-1.0"
] | null | null | null | conftest.py | fanshuai1996/lianxi | 06252e08e4bc14f8556e659a6f326aa874e8cc17 | [
"CC0-1.0"
] | null | null | null | # coding=utf8
import os
import sys
import requests
import pytest
from common.read_yaml import readYaml
from common.user_mysql import UserMysql
from common.mylogger import logger
def pytest_addoption(parser):
parser.addoption(
'--environment', action='store''', default='pro', help='运行环境选择:test/pro,默认test环境'
)
curPath = os.path.dirname(__file__)
yamlFilePath = os.path.join(curPath, 'configuration/env.yml')
@pytest.fixture(scope='session')
def getYaml():
yamlpath='./../configuration/config.yml'
text=readYaml(yamlpath)
return text
@pytest.fixture(scope='session')
def usesql(getYaml):
db=getYaml['db']
usersql=UserMysql(db)
return usersql
@pytest.fixture(scope='session',autouse=True)
def environment(request):
os.environ['environment'] = request.config.getoption('--environment')
print('当前运行环境',os.environ['environment'])
logger.info('当前运行环境:%s'%os.environ['environment'])
return os.environ['environment']
@pytest.fixture(scope='session',autouse=True)
def file_path(environment):
env = environment
print('env=',env)
file_abspath=os.path.dirname(__file__)
filepath = readYaml(yamlFilePath)[env]['file_path']
file_path=os.path.join(file_abspath,filepath)
file_path=os.path.abspath(file_path)
print('file_path= @@@',file_path)
return file_path
| 27.84 | 90 | 0.697557 |
f853bf7b0a0e319ba37ca4670a7e386bd05ac084 | 28,574 | py | Python | python/ccxt/liqui.py | bonesoul/ccxt | 8a4aacb6dd4d75de8771fc2ad66da97660f9ddb6 | [
"MIT"
] | null | null | null | python/ccxt/liqui.py | bonesoul/ccxt | 8a4aacb6dd4d75de8771fc2ad66da97660f9ddb6 | [
"MIT"
] | null | null | null | python/ccxt/liqui.py | bonesoul/ccxt | 8a4aacb6dd4d75de8771fc2ad66da97660f9ddb6 | [
"MIT"
] | 1 | 2018-07-20T15:16:46.000Z | 2018-07-20T15:16:46.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
class liqui (Exchange):
def describe(self):
return self.deep_extend(super(liqui, self).describe(), {
'id': 'liqui',
'name': 'Liqui',
'countries': 'UA',
'rateLimit': 3000,
'version': '3',
'userAgent': self.userAgents['chrome'],
'has': {
'CORS': False,
'createMarketOrder': False,
'fetchOrderBooks': True,
'fetchOrder': True,
'fetchOrders': 'emulated',
'fetchOpenOrders': True,
'fetchClosedOrders': 'emulated',
'fetchTickers': True,
'fetchMyTrades': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27982022-75aea828-63a0-11e7-9511-ca584a8edd74.jpg',
'api': {
'public': 'https://api.liqui.io/api',
'private': 'https://api.liqui.io/tapi',
},
'www': 'https://liqui.io',
'doc': 'https://liqui.io/api',
'fees': 'https://liqui.io/fee',
},
'api': {
'public': {
'get': [
'info',
'ticker/{pair}',
'depth/{pair}',
'trades/{pair}',
],
},
'private': {
'post': [
'getInfo',
'Trade',
'ActiveOrders',
'OrderInfo',
'CancelOrder',
'TradeHistory',
'CoinDepositAddress',
'WithdrawCoin',
'CreateCoupon',
'RedeemCoupon',
],
},
},
'fees': {
'trading': {
'maker': 0.001,
'taker': 0.0025,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
'commonCurrencies': {
'DSH': 'DASH',
},
'exceptions': {
'803': InvalidOrder, # "Count could not be less than 0.001."(selling below minAmount)
'804': InvalidOrder, # "Count could not be more than 10000."(buying above maxAmount)
'805': InvalidOrder, # "price could not be less than X."(minPrice violation on buy & sell)
'806': InvalidOrder, # "price could not be more than X."(maxPrice violation on buy & sell)
'807': InvalidOrder, # "cost could not be less than X."(minCost violation on buy & sell)
'831': InsufficientFunds, # "Not enougth X to create buy order."(buying with balance.quote < order.cost)
'832': InsufficientFunds, # "Not enougth X to create sell order."(selling with balance.base < order.amount)
'833': OrderNotFound, # "Order with id X was not found."(cancelling non-existent, closed and cancelled order)
},
})
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
key = 'quote'
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * rate))
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': cost,
}
def get_base_quote_from_market_id(self, id):
uppercase = id.upper()
base, quote = uppercase.split('_')
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
return [base, quote]
def fetch_markets(self):
response = self.publicGetInfo()
markets = response['pairs']
keys = list(markets.keys())
result = []
for p in range(0, len(keys)):
id = keys[p]
market = markets[id]
base, quote = self.get_base_quote_from_market_id(id)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(market, 'decimal_places'),
'price': self.safe_integer(market, 'decimal_places'),
}
amountLimits = {
'min': self.safe_float(market, 'min_amount'),
'max': self.safe_float(market, 'max_amount'),
}
priceLimits = {
'min': self.safe_float(market, 'min_price'),
'max': self.safe_float(market, 'max_price'),
}
costLimits = {
'min': self.safe_float(market, 'min_total'),
}
limits = {
'amount': amountLimits,
'price': priceLimits,
'cost': costLimits,
}
hidden = self.safe_integer(market, 'hidden')
active = (hidden == 0)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': active,
'taker': market['fee'] / 100,
'lot': amountLimits['min'],
'precision': precision,
'limits': limits,
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostGetInfo()
balances = response['return']
result = {'info': balances}
funds = balances['funds']
currencies = list(funds.keys())
for c in range(0, len(currencies)):
currency = currencies[c]
uppercase = currency.upper()
uppercase = self.common_currency_code(uppercase)
total = None
used = None
if balances['open_orders'] == 0:
total = funds[currency]
used = 0.0
account = {
'free': funds[currency],
'used': used,
'total': total,
}
result[uppercase] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
if limit is not None:
request['limit'] = limit # default = 150, max = 2000
response = self.publicGetDepthPair(self.extend(request, params))
market_id_in_reponse = (market['id'] in list(response.keys()))
if not market_id_in_reponse:
raise ExchangeError(self.id + ' ' + market['symbol'] + ' order book is empty or not available')
orderbook = response[market['id']]
return self.parse_order_book(orderbook)
def fetch_order_books(self, symbols=None, params={}):
self.load_markets()
ids = None
if not symbols:
ids = '-'.join(self.ids)
# max URL length is 2083 symbols, including http schema, hostname, tld, etc...
if len(ids) > 2048:
numIds = len(self.ids)
raise ExchangeError(self.id + ' has ' + str(numIds) + ' symbols exceeding max URL length, you are required to specify a list of symbols in the first argument to fetchOrderBooks')
else:
ids = self.market_ids(symbols)
ids = '-'.join(ids)
response = self.publicGetDepthPair(self.extend({
'pair': ids,
}, params))
result = {}
ids = list(response.keys())
for i in range(0, len(ids)):
id = ids[i]
symbol = id
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
result[symbol] = self.parse_order_book(response[id])
return result
def parse_ticker(self, ticker, market=None):
timestamp = ticker['updated'] * 1000
symbol = None
if market:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_float(ticker, 'avg'),
'baseVolume': self.safe_float(ticker, 'vol_cur'),
'quoteVolume': self.safe_float(ticker, 'vol'),
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
ids = None
if not symbols:
ids = '-'.join(self.ids)
# max URL length is 2083 symbols, including http schema, hostname, tld, etc...
if len(ids) > 2048:
numIds = len(self.ids)
raise ExchangeError(self.id + ' has ' + str(numIds) + ' symbols exceeding max URL length, you are required to specify a list of symbols in the first argument to fetchTickers')
else:
ids = self.market_ids(symbols)
ids = '-'.join(ids)
tickers = self.publicGetTickerPair(self.extend({
'pair': ids,
}, params))
result = {}
keys = list(tickers.keys())
for k in range(0, len(keys)):
id = keys[k]
ticker = tickers[id]
symbol = id
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
tickers = self.fetch_tickers([symbol], params)
return tickers[symbol]
def parse_trade(self, trade, market=None):
timestamp = int(trade['timestamp']) * 1000
side = trade['type']
if side == 'ask':
side = 'sell'
if side == 'bid':
side = 'buy'
price = self.safe_float(trade, 'price')
if 'rate' in trade:
price = self.safe_float(trade, 'rate')
id = self.safe_string(trade, 'tid')
if 'trade_id' in trade:
id = self.safe_string(trade, 'trade_id')
order = self.safe_string(trade, self.get_order_id_key())
if 'pair' in trade:
marketId = trade['pair']
market = self.markets_by_id[marketId]
symbol = None
if market:
symbol = market['symbol']
amount = trade['amount']
type = 'limit' # all trades are still limit trades
isYourOrder = self.safe_value(trade, 'is_your_order')
takerOrMaker = 'taker'
if isYourOrder is not None:
if isYourOrder:
takerOrMaker = 'maker'
fee = self.calculate_fee(symbol, type, side, amount, price, takerOrMaker)
return {
'id': id,
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'fee': fee,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
if limit is not None:
request['limit'] = limit
response = self.publicGetTradesPair(self.extend(request, params))
return self.parse_trades(response[market['id']], market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise ExchangeError(self.id + ' allows limit orders only')
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'type': side,
'amount': self.amount_to_precision(symbol, amount),
'rate': self.price_to_precision(symbol, price),
}
price = float(price)
amount = float(amount)
response = self.privatePostTrade(self.extend(request, params))
id = None
status = 'open'
filled = 0.0
remaining = amount
if 'return' in response:
id = self.safe_string(response['return'], self.get_order_id_key())
if id == '0':
id = self.safe_string(response['return'], 'init_order_id')
status = 'closed'
filled = self.safe_float(response['return'], 'received', 0.0)
remaining = self.safe_float(response['return'], 'remains', amount)
timestamp = self.milliseconds()
order = {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': price * filled,
'amount': amount,
'remaining': remaining,
'filled': filled,
'fee': None,
# 'trades': self.parse_trades(order['trades'], market),
}
self.orders[id] = order
return self.extend({'info': response}, order)
def get_order_id_key(self):
return 'order_id'
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
response = None
request = {}
idKey = self.get_order_id_key()
request[idKey] = id
response = self.privatePostCancelOrder(self.extend(request, params))
if id in self.orders:
self.orders[id]['status'] = 'canceled'
return response
def parse_order_status(self, status):
statuses = {
'0': 'open',
'1': 'closed',
'2': 'canceled',
'3': 'canceled', # or partially-filled and still open? https://github.com/ccxt/ccxt/issues/1594
}
if status in statuses:
return statuses[status]
return status
def parse_order(self, order, market=None):
id = str(order['id'])
status = self.safe_string(order, 'status')
if status != 'None':
status = self.parse_order_status(status)
timestamp = int(order['timestamp_created']) * 1000
symbol = None
if not market:
market = self.markets_by_id[order['pair']]
if market:
symbol = market['symbol']
remaining = None
amount = None
price = self.safe_float(order, 'rate')
filled = None
cost = None
if 'start_amount' in order:
amount = self.safe_float(order, 'start_amount')
remaining = self.safe_float(order, 'amount')
else:
remaining = self.safe_float(order, 'amount')
if id in self.orders:
amount = self.orders[id]['amount']
if amount is not None:
if remaining is not None:
filled = amount - remaining
cost = price * filled
fee = None
result = {
'info': order,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'type': 'limit',
'side': order['type'],
'price': price,
'cost': cost,
'amount': amount,
'remaining': remaining,
'filled': filled,
'status': status,
'fee': fee,
}
return result
def parse_orders(self, orders, market=None, since=None, limit=None):
ids = list(orders.keys())
result = []
for i in range(0, len(ids)):
id = ids[i]
order = orders[id]
extended = self.extend(order, {'id': id})
result.append(self.parse_order(extended, market))
return self.filter_by_since_limit(result, since, limit)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
response = self.privatePostOrderInfo(self.extend({
'order_id': int(id),
}, params))
id = str(id)
newOrder = self.parse_order(self.extend({'id': id}, response['return'][id]))
oldOrder = self.orders[id] if (id in list(self.orders.keys())) else {}
self.orders[id] = self.extend(oldOrder, newOrder)
return self.orders[id]
def update_cached_orders(self, openOrders, symbol):
# update local cache with open orders
# self will add unseen orders and overwrite existing ones
for j in range(0, len(openOrders)):
id = openOrders[j]['id']
self.orders[id] = openOrders[j]
openOrdersIndexedById = self.index_by(openOrders, 'id')
cachedOrderIds = list(self.orders.keys())
for k in range(0, len(cachedOrderIds)):
# match each cached order to an order in the open orders array
# possible reasons why a cached order may be missing in the open orders array:
# - order was closed or canceled -> update cache
# - symbol mismatch(e.g. cached BTC/USDT, fetched ETH/USDT) -> skip
cachedOrderId = cachedOrderIds[k]
cachedOrder = self.orders[cachedOrderId]
if not(cachedOrderId in list(openOrdersIndexedById.keys())):
# cached order is not in open orders array
# if we fetched orders by symbol and it doesn't match the cached order -> won't update the cached order
if symbol is not None and symbol != cachedOrder['symbol']:
continue
# cached order is absent from the list of open orders -> mark the cached order as closed
if cachedOrder['status'] == 'open':
cachedOrder = self.extend(cachedOrder, {
'status': 'closed', # likewise it might have been canceled externally(unnoticed by "us")
'cost': None,
'filled': cachedOrder['amount'],
'remaining': 0.0,
})
if cachedOrder['cost'] is None:
if cachedOrder['filled'] is not None:
cachedOrder['cost'] = cachedOrder['filled'] * cachedOrder['price']
self.orders[cachedOrderId] = cachedOrder
return self.to_array(self.orders)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if 'fetchOrdersRequiresSymbol' in self.options:
if self.options['fetchOrdersRequiresSymbol']:
if symbol is None:
raise ExchangeError(self.id + ' fetchOrders requires a symbol argument')
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
response = self.privatePostActiveOrders(self.extend(request, params))
# liqui etc can only return 'open' orders(i.e. no way to fetch 'closed' orders)
openOrders = []
if 'return' in response:
openOrders = self.parse_orders(response['return'], market)
allOrders = self.update_cached_orders(openOrders, symbol)
result = self.filter_by_symbol(allOrders, symbol)
return self.filter_by_since_limit(result, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
orders = self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'open')
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
orders = self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {
# 'from': 123456789, # trade ID, from which the display starts numerical 0(test result: liqui ignores self field)
# 'count': 1000, # the number of trades for display numerical, default = 1000
# 'from_id': trade ID, from which the display starts numerical 0
# 'end_id': trade ID on which the display ends numerical ∞
# 'order': 'ASC', # sorting, default = DESC(test result: liqui ignores self field, most recent trade always goes last)
# 'since': 1234567890, # UTC start time, default = 0(test result: liqui ignores self field)
# 'end': 1234567890, # UTC end time, default = ∞(test result: liqui ignores self field)
# 'pair': 'eth_btc', # default = all markets
}
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
if limit is not None:
request['count'] = int(limit)
if since is not None:
request['since'] = int(since / 1000)
response = self.privatePostTradeHistory(self.extend(request, params))
trades = []
if 'return' in response:
trades = response['return']
return self.parse_trades(trades, market, since, limit)
def withdraw(self, currency, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
response = self.privatePostWithdrawCoin(self.extend({
'coinName': currency,
'amount': float(amount),
'address': address,
}, params))
return {
'info': response,
'id': response['return']['tId'],
}
def sign_body_with_secret(self, body):
return self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512)
def get_version_string(self):
return '/' + self.version
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
query = self.omit(params, self.extract_params(path))
if api == 'private':
self.check_required_credentials()
nonce = self.nonce()
body = self.urlencode(self.extend({
'nonce': nonce,
'method': path,
}, query))
signature = self.sign_body_with_secret(body)
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Key': self.apiKey,
'Sign': signature,
}
elif api == 'public':
url += self.get_version_string() + '/' + self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
else:
url += '/' + self.implode_params(path, params)
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
else:
if query:
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return # fallback to default error handler
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
if 'success' in response:
#
# 1 - Liqui only returns the integer 'success' key from their private API
#
# {"success": 1, ...} httpCode == 200
# {"success": 0, ...} httpCode == 200
#
# 2 - However, exchanges derived from Liqui, can return non-integers
#
# It can be a numeric string
# {"sucesss": "1", ...}
# {"sucesss": "0", ...}, httpCode >= 200(can be 403, 502, etc)
#
# Or just a string
# {"success": "true", ...}
# {"success": "false", ...}, httpCode >= 200
#
# Or a boolean
# {"success": True, ...}
# {"success": False, ...}, httpCode >= 200
#
# 3 - Oversimplified, Python PEP8 forbids comparison operator(==) of different types
#
# 4 - We do not want to copy-paste and duplicate the code of self handler to other exchanges derived from Liqui
#
# To cover points 1, 2, 3 and 4 combined self handler should work like self:
#
success = self.safe_value(response, 'success', False)
if isinstance(success, basestring):
if (success == 'true') or (success == '1'):
success = True
else:
success = False
if not success:
code = self.safe_string(response, 'code')
message = self.safe_string(response, 'error')
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if code in exceptions:
raise exceptions[code](feedback)
# need a second error map for these messages, apparently...
# in fact, we can use the same .exceptions with string-keys to save some loc here
if message == 'invalid api key':
raise AuthenticationError(feedback)
elif message == 'api key dont have trade permission':
raise AuthenticationError(feedback)
elif message.find('invalid parameter') >= 0: # errorCode 0, returned on buy(symbol, 0, 0)
raise InvalidOrder(feedback)
elif message == 'invalid order':
raise InvalidOrder(feedback)
elif message == 'Requests too often':
raise DDoSProtection(feedback)
elif message == 'not available':
raise DDoSProtection(feedback)
elif message == 'data unavailable':
raise DDoSProtection(feedback)
elif message == 'external service unavailable':
raise DDoSProtection(feedback)
else:
raise ExchangeError(self.id + ' unknown "error" value: ' + self.json(response))
| 40.761769 | 194 | 0.514944 |
42da2037f640c981081ddba1430b4c98b70a9af4 | 5,865 | py | Python | nfv/nfv-common/nfv_common/state_machine/_state_machine.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | 2 | 2020-02-07T19:01:36.000Z | 2022-02-23T01:41:46.000Z | nfv/nfv-common/nfv_common/state_machine/_state_machine.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | 1 | 2021-01-14T12:02:25.000Z | 2021-01-14T12:02:25.000Z | nfv/nfv-common/nfv_common/state_machine/_state_machine.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | 2 | 2021-01-13T08:39:21.000Z | 2022-02-09T00:21:55.000Z | #
# Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import weakref
from nfv_common import debug
from nfv_common.state_machine._state_exception import StateException
DLOG = debug.debug_get_logger('nfv_common.state_machine.state_machine')
class StateMachine(object):
"""
State Machine Object
"""
def __init__(self, context, context_args, context_kwargs, initial_state,
states):
"""
Create State Machine
"""
if context_args is None:
context_args = tuple()
if context_kwargs is None:
context_kwargs = dict()
self._context_reference = weakref.ref(context)
self._context_args = context_args
self._context_kwargs = context_kwargs
self._states = states
self._current_state = initial_state
self._state_change_callbacks = list()
self._transitioning = False
self._event_backlog_state = None
self._event_backlog = list()
@property
def _context(self):
"""
Returns the context
"""
context = self._context_reference()
return context
@property
def current_state(self):
"""
Returns the current state
"""
return self._current_state
def register_state_change_callback(self, callback):
"""
Register state change callback
"""
if callback not in self._state_change_callbacks:
self._state_change_callbacks.append(callback)
def handle_event(self, event, event_data=None):
"""
Handle event
"""
if self._transitioning:
self._event_backlog.append((self._event_backlog_state, event,
event_data))
return
try:
prev_state = self._current_state
next_state_name = self._current_state.handle_event(
self._context, event, event_data)
next_state = self._states[next_state_name]
if prev_state != self._current_state:
# Nested handle event calls, we have already moved away
DLOG.verbose("Nested handle event calls detected, "
"prev_state=%s, current_state=%s."
% (prev_state.name, self.current_state.name))
return
if next_state_name != prev_state.name:
# Attempt to exit the current state
try:
prev_state.exit(self._context)
except StateException as e:
DLOG.error("Caught exception while trying to exit state "
"(%s), event=%s, error=%s."
% (prev_state, event, e))
return
# Attempt to transition from the current state
try:
prev_state.transition(self._context, event, event_data,
next_state)
except StateException as e:
DLOG.error("Caught exception while trying to transition "
"from state (%s) to state (%s), event=%s, "
"error=%s." % (prev_state, next_state, event, e))
prev_state.enter(self._context, *self._context_args,
**self._context_kwargs)
return
# Attempt to enter the next state
try:
self._transitioning = True
self._event_backlog_state = next_state
next_state.enter(self._context, *self._context_args,
**self._context_kwargs)
self._current_state = next_state
for callback in self._state_change_callbacks:
callback(prev_state, next_state, event)
event_backlog = list(self._event_backlog)
self._transitioning = False
self._event_backlog_state = None
del self._event_backlog[:]
for event_state, event, event_data in event_backlog:
if event_state != self._current_state:
DLOG.info("Ignoring event %s, no longer in state "
"%s, now in state %s."
% (event, event_state,
self._current_state))
else:
DLOG.info("Handling event backlog, event=%s while "
"transitioning to state %s."
% (event, self._current_state))
self.handle_event(event, event_data)
del event_backlog[:]
except StateException as e:
DLOG.error("Caught exception while trying to enter state "
"(%s) from state (%s), event=%s, error=%s."
% (next_state, prev_state, event, e))
self._transitioning = False
self._event_backlog_state = None
del self._event_backlog[:]
prev_state.transition(self._context, event, event_data,
prev_state)
prev_state.enter(self._context, *self._context_args,
**self._context_kwargs)
return
except StateException as e:
DLOG.error("Caught exception while trying to handle event (%s), "
"error=%s." % (event, e))
return
| 37.596154 | 80 | 0.510315 |
2beaaef436e6906761391b9ba4e7d475b32a7b74 | 3,256 | py | Python | app.py | Bielgomes/SLPY | 20c4751a100ac3dbcefcee71714a84f7da61cb06 | [
"MIT"
] | null | null | null | app.py | Bielgomes/SLPY | 20c4751a100ac3dbcefcee71714a84f7da61cb06 | [
"MIT"
] | null | null | null | app.py | Bielgomes/SLPY | 20c4751a100ac3dbcefcee71714a84f7da61cb06 | [
"MIT"
] | null | null | null | from flask import Flask, app, render_template, request, jsonify
from dotenv import load_dotenv, find_dotenv
from pymongo import MongoClient
from emailer import send_email
import bcrypt
import secrets
import os
load_dotenv(find_dotenv())
MONGOCLIENT = os.getenv('MONGOCLIENT')
client = MongoClient(MONGOCLIENT)
users = client['users-data']['users']
codes = client['users-data']['codes']
app = Flask(__name__, template_folder='pages')
# ROTA PRINCIPAL
@app.route('/')
def home():
return render_template('login.html')
# ROTAS DE RENDERIZAÇÃO DE PAGES
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/logado')
def logado():
return render_template('pages_final/logado.html')
@app.route('/registrado')
def registrado():
return render_template('pages_final/registrado.html')
@app.route('/registrar')
def registrar():
return render_template('registrar.html')
@app.route('/forgot')
def forgot():
return render_template('forgot/forgot.html')
@app.route('/forgot-final')
def forgot_final():
return render_template('forgot/forgot-final.html')
# ROTAS De AUTENTICAÇÃO
@app.route('/oauth', methods=['POST'])
def oauth():
content = request.json
username = content['username'].lower()
password = content['password']
user = users.find_one({'username': username})
if user is None:
return jsonify({"code": 204})
else:
if bcrypt.checkpw(password.encode('utf8'), user['password']):
return jsonify({"code": 200})
else:
return jsonify({"code": 401})
@app.route('/register', methods=['POST'])
def register():
content = request.json
username = content['username']
password = content['password']
password_enc = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt())
if not users.find_one({'username': username}):
users.insert_one({'username': username, 'password': password_enc})
return jsonify({"code": 200,"redirect": "/login"})
else:
return jsonify({"code": 207})
@app.route('/forgot-email', methods=['POST'])
def forgot_email():
content = request.json
username = content['username']
if users.find_one({'username': username}):
try:
code = codes.find_one({'username': username})['code']
except:
code = secrets.token_urlsafe(16)
while codes.find_one({'code': code}):
code = secrets.token_urlsafe(16)
codes.insert_one({'username': username, 'code': code})
send_email(username, code)
return jsonify({"code": '200'})
else:
return jsonify({"code": '207'})
@app.route('/recover', methods=['POST'])
def recover():
content = request.json
usercode = content['code']
password = content['password']
code = codes.find_one({'code': usercode})
if code is not None:
if usercode == code['code']:
password_enc = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt())
users.update_one({'username': code['username']}, {'$set': {'password': password_enc}})
codes.find_one_and_delete({'code': usercode})
return jsonify({"code": '200'})
else:
return jsonify({"code": '201'})
else:
return jsonify({"code": '202'})
app.run() | 26.258065 | 93 | 0.652334 |
2a6be616ed3cc377e172469408dead58ee3df413 | 1,569 | py | Python | examples/grad/11-excited_state_casci_grad.py | QuESt-Calculator/pyscf | 0ed03633b699505c7278f1eb501342667d0aa910 | [
"Apache-2.0"
] | 501 | 2018-12-06T23:48:17.000Z | 2022-03-31T11:53:18.000Z | examples/grad/11-excited_state_casci_grad.py | QuESt-Calculator/pyscf | 0ed03633b699505c7278f1eb501342667d0aa910 | [
"Apache-2.0"
] | 710 | 2018-11-26T22:04:52.000Z | 2022-03-30T03:53:12.000Z | examples/grad/11-excited_state_casci_grad.py | QuESt-Calculator/pyscf | 0ed03633b699505c7278f1eb501342667d0aa910 | [
"Apache-2.0"
] | 273 | 2018-11-26T10:10:24.000Z | 2022-03-30T12:25:28.000Z | #!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Analytical nuclear gradients of CASCI excited state.
'''
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
mol = gto.M(
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. ,-0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '631g'
)
mf = scf.RHF(mol).run()
mc = mcscf.CASCI(mf, 4, 4)
mc.fcisolver.nroots = 4
mc.run()
# PySCF-1.6.1 and newer supports the .Gradients method to create a grad
# object after grad module was imported. It is equivalent to call the
# .nuc_grad_method method.
from pyscf import grad
g = mc.Gradients().kernel(state=3)
print('Gradients of the 3rd excited state')
print(g)
# An equivalent way to specify the exicited state is to directly input the
# excited state wavefunction
g = mc.nuc_grad_method().kernel(ci=mc.ci[3])
print('Gradients of the 3rd excited state')
print(g)
#
# Use gradients scanner.
#
# Note the returned gradients are based on atomic unit.
#
g_scanner = mc.nuc_grad_method().as_scanner(state=3)
e, g = g_scanner(mol)
print('Gradients of the 3rd excited state')
print(g)
#
# Specify state ID for the gradients of another state.
#
# Unless explicitly specified as an input argument of set_geom_ function,
# set_geom_ function will use the same unit as the one specified in mol.unit.
mol.set_geom_('''O 0. 0. 0.1
H 0. -0.757 0.587
H 0. 0.757 0.587''')
e, g = g_scanner(mol, state=2)
print('Gradients of the 2nd excited state')
print(g)
| 24.515625 | 77 | 0.659656 |
197ed0a8a4030bcb69fd1d9cf5414412cee01250 | 1,895 | py | Python | setup.py | gunjan5/libnet | d5e06fdd41f8865171a8c41309b235e2881ce686 | [
"Apache-2.0"
] | null | null | null | setup.py | gunjan5/libnet | d5e06fdd41f8865171a8c41309b235e2881ce686 | [
"Apache-2.0"
] | null | null | null | setup.py | gunjan5/libnet | d5e06fdd41f8865171a8c41309b235e2881ce686 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2016 Tigera, Inc. All rights reserved.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import setuptools
version = '0.9.0-dev'
setuptools.setup(
name='libnetwork',
version=version,
description='Docker libnetwork plugin',
# The project's main homepage.
url='https://github.com/projectcalico/libnetwork-plugin/',
# Author details
author='Project Calico',
author_email='maintainers@projectcalico.org',
# Choose your license
license='Apache 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Networking',
],
# What does your project relate to?
keywords='calico docker etcd mesos kubernetes rkt openstack',
packages=["libnetwork"],
install_requires=['netaddr', 'python-etcd>=0.4.3', 'subprocess32', 'flask', 'gunicorn', 'gevent'],
dependency_links=[
"git+https://github.com/projectcalico/python-etcd.git",
"git+https://github.com/projectcalico/libcalico.git"
]
) | 32.118644 | 102 | 0.672823 |
111b5983a2afac3266eae8588ff2192767f09e81 | 1,815 | py | Python | model-optimizer/extensions/front/LRNReplacer.py | apexxs/dldt | 17e66dc5a6631d630da454506902bd7c25d4170b | [
"Apache-2.0"
] | 2 | 2021-04-19T06:08:35.000Z | 2021-08-25T02:43:43.000Z | model-optimizer/extensions/front/LRNReplacer.py | apexxs/dldt | 17e66dc5a6631d630da454506902bd7c25d4170b | [
"Apache-2.0"
] | 6 | 2022-01-11T18:56:22.000Z | 2022-02-21T13:20:20.000Z | model-optimizer/extensions/front/LRNReplacer.py | apexxs/dldt | 17e66dc5a6631d630da454506902bd7c25d4170b | [
"Apache-2.0"
] | 3 | 2021-02-05T17:11:17.000Z | 2021-04-19T08:33:31.000Z | """
Copyright (c) 2017-2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import networkx as nx
from mo.front.common.replacement import FrontReplacementOp
from mo.ops.lin_op import Mul
from mo.ops.const import Const
class LRNReplacer(FrontReplacementOp):
op = 'LRN'
enabled = True
def replace_sub_graph(self, graph: nx.MultiDiGraph, match: dict):
node = match['op']
if not node.has_valid('bias') or (node.has_valid('bias') and node.bias == 1):
return
# Calculate scale value & create Const op
scale_value = np.array(1. / (pow(node.bias, node.beta)))
node.alpha /= node.bias
const_node = Const(graph, dict(value=scale_value, shape=scale_value.shape))
# Get all outputs for LRN layer
out_nodes = [node for node in node.out_nodes().values()]
# Create Mul node with inputs
mul_node = Mul(graph, dict(name=node.id + "/Mul_"))
mnode = mul_node.create_node(inputs=[node, const_node.create_node()])
# Move edges from LRN to Mul node
for out_node in out_nodes:
edge_attrs = graph.get_edge_data(node.id, out_node.id)[0]
graph.remove_edge(node.id, out_node.id)
graph.add_edges_from([(mnode.id, out_node.id, edge_attrs)])
| 34.903846 | 85 | 0.689256 |
b9c4ce9ee558ff4191c81b0cb820e69e100d12f8 | 1,450 | py | Python | Testing/Python/PrefixParserTest.py | josephsnyder/VistA-1 | 7bd7e4cdda72c3e76c7ef0f6947383dc5823ea0b | [
"Apache-2.0"
] | 1 | 2017-04-18T15:55:43.000Z | 2017-04-18T15:55:43.000Z | Testing/Python/PrefixParserTest.py | josephsnyder/VistA-1 | 7bd7e4cdda72c3e76c7ef0f6947383dc5823ea0b | [
"Apache-2.0"
] | null | null | null | Testing/Python/PrefixParserTest.py | josephsnyder/VistA-1 | 7bd7e4cdda72c3e76c7ef0f6947383dc5823ea0b | [
"Apache-2.0"
] | null | null | null | #---------------------------------------------------------------------------
# Copyright 2013 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
### set up the IO that is need by unit test
import sys,os
sys.path = [os.path.dirname(__file__)+'/../../Python/vista'] + sys.path
print ('sys.argv is %s' % sys.argv)
if len(sys.argv) <= 1:
print ('Need the two arguments:packagename,packages_csv_file ')
sys.exit()
from ParseCSVforPackagePrefixes import FindPackagePrefixes
ExpectedOutput = ["'%", "'ABC", "'BCD", "'DEF", 'ABCD', "'CDEH", 'DEFG']
TestOutput = FindPackagePrefixes(sys.argv[1],sys.argv[2])
if ExpectedOutput == TestOutput:
print "Output of test matches the expected output"
sys.exit(0)
else:
print "Error: Expected output was: " + str(ExpectedOutput) + ". Test output was: " + str(TestOutput)
sys.exit(1) | 41.428571 | 104 | 0.649655 |
c1c32e22799ce898847efaedadde20e0e159b464 | 1,956 | py | Python | ard/ablation_study/no_death_strat/female/generate_data_by_seed.py | DPBayes/data-sharing-examples | f9fffc5b8f45d8dd7b93cb7e812439decfa51193 | [
"MIT"
] | null | null | null | ard/ablation_study/no_death_strat/female/generate_data_by_seed.py | DPBayes/data-sharing-examples | f9fffc5b8f45d8dd7b93cb7e812439decfa51193 | [
"MIT"
] | 2 | 2020-11-13T18:31:07.000Z | 2021-05-03T12:48:43.000Z | ard/ablation_study/no_death_strat/female/generate_data_by_seed.py | DPBayes/data-sharing-examples | f9fffc5b8f45d8dd7b93cb7e812439decfa51193 | [
"MIT"
] | 2 | 2020-11-21T06:35:20.000Z | 2020-11-25T16:58:09.000Z | import torch, sys, math, pickle, datetime, os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import numpy.random as npr
from itertools import count
from collections import OrderedDict
npr.seed(1234)
use_cuda = False
if use_cuda:
torch.set_default_tensor_type('torch.cuda.DoubleTensor')
torch.cuda.manual_seed(1234)
else:
torch.set_default_tensor_type('torch.DoubleTensor')
torch.manual_seed(1234)
##################################################
##################################################
from variable_types import independent_model
female_variable_types_ = independent_model
maps = pickle.load(open('maps.pickle', 'rb'))[0]
maps['age'] = lambda x : sum(maps['age_lim']*np.array([-1, 1]))*x + maps['age_lim'][0]
maps['per'] = lambda x : sum(maps['per_lim']*np.array([-1, 1]))*x + maps['per_lim'][0]
from sampler import fast_sample
from load_diabetes import decode_data
N_female = 208148
def main():
eps = float(sys.argv[1])
seed = int(sys.argv[2])
model_fname = os.system("ls ./female_models/ | grep {} | grep {} >> model_fnames.txt".format(eps, seed))
model_fnames_file = open("model_fnames.txt", "r")
model_fnames = model_fnames_file.readlines()
model_fnames_file.close()
model_fname = [fname for fname in model_fnames][0][:-1]
print(model_fname)
os.system("rm model_fnames.txt")
female_models = pd.read_pickle('./female_models/{}'.format(model_fname))
for i_rep, female_model in enumerate(female_models):
female_variable_types = {key : female_variable_types_[key] for key in female_model.param_dims.keys()}
print(i_rep)
female_syn_data = fast_sample(female_model, female_variable_types, N_female)
#female_syn_data[female_syn_data["ep"] == 0]["lex.dur"] = 1.0
female_syn_decoded = decode_data(female_syn_data, maps, for_poisson=False)
female_syn_decoded.to_csv('./syn_data/female_data_{}_{}_{}.csv'.format(seed, np.round(eps, 2), i_rep), index=False)
if __name__=="__main__":
main()
| 36.222222 | 117 | 0.711145 |
fd9e58db6f99dca667a05dfb52437c1b45ae4607 | 4,453 | py | Python | gsa_framework/sensitivity_analysis/gradient_boosting.py | aleksandra-kim/gsa_framework | d119755cd96a31827f499f09a3a203aed295ba46 | [
"BSD-3-Clause"
] | 2 | 2020-12-28T17:11:47.000Z | 2021-11-20T19:59:33.000Z | gsa_framework/sensitivity_analysis/gradient_boosting.py | aleksandra-kim/gsa_framework | d119755cd96a31827f499f09a3a203aed295ba46 | [
"BSD-3-Clause"
] | 7 | 2020-09-09T10:12:21.000Z | 2021-11-25T09:54:14.000Z | gsa_framework/sensitivity_analysis/gradient_boosting.py | aleksandra-kim/gsa_framework | d119755cd96a31827f499f09a3a203aed295ba46 | [
"BSD-3-Clause"
] | 3 | 2020-09-08T09:41:28.000Z | 2021-12-28T03:21:40.000Z | # Local files
from .method_base import SensitivityAnalysisMethod as SAM
from ..sensitivity_methods.gradient_boosting import xgboost_indices
from ..utils import write_pickle, read_pickle
class GradientBoosting(SAM):
"""Global sensitivity analysis with feature importance measures from gradient boosted trees.
Computed sensitivity indices include:
* ``weight``: the number of times a feature is used to split the data across all trees.
* ``gain``: the average gain across all splits the feature is used in.
* ``cover``: the average coverage across all splits the feature is used in.
* ``total_gain``: the total gain across all splits the feature is used in.
* ``total_cover``: the total coverage across all splits the feature is used in.
* ``fscore``: how many times each feature is split on.
References
----------
Paper:
:cite:ts:`chen2016xgboost`
Useful links:
https://xgboost.readthedocs.io/en/latest/python/python_api.html
"""
gsa_label = "xgboostGsa"
def __init__(self, tuning_parameters=None, test_size=0.2, xgb_model=None, **kwargs):
super().__init__(**kwargs)
if tuning_parameters is None:
tuning_parameters = {}
tuning_parameters.update({"random_state": self.seed})
self.tuning_parameters = tuning_parameters
self.test_size = test_size
self.xgb_model = xgb_model
self.gsa_label = self.create_gsa_label()
self.write_dir_convergence = (
self.write_dir / "convergence_intermediate_{}".format(self.gsa_label)
) # TODO
self.write_dir_convergence.mkdir(parents=True, exist_ok=True)
self.write_dir_stability = self.write_dir / "stability_intermediate_{}".format(
self.gsa_label
) # TODO
self.write_dir_stability.mkdir(parents=True, exist_ok=True)
# def create_S_convergence_filepath(self, iterations_step, iterations):
# filename = "S.{}.{}.{}Step{}.{}.pickle".format(
# self.gsa_label,
# self.sampling_label,
# iterations,
# iterations_step,
# self.seed,
# )
# filepath = self.write_dir_convergence / filename
# return filepath
def create_gsa_label(self):
gsa_label = self.gsa_label + "_Lr{}G{}Mcw{}Md{}RegL{}RegA{}Ne{}Ss{}Cbt{}_".format( # TODO change to include more info in the filename
self.tuning_parameters.get("learning_rate", 0.3),
self.tuning_parameters.get("gamma", 0),
self.tuning_parameters.get("min_child_weight", 1),
self.tuning_parameters.get("max_depth", 6),
self.tuning_parameters.get("reg_lambda", 0),
self.tuning_parameters.get("reg_alpha", 0),
self.tuning_parameters.get("n_estimators", 10),
self.tuning_parameters.get("subsample", 1),
self.tuning_parameters.get("colsample_bytree", 1),
)
return gsa_label
def generate_gsa_indices_based_on_method(self, **kwargs):
"""Uses XGBoost gradient boosted trees and random samples to compute feature importances."""
# flag_convergence = kwargs.get("flag_convergence", False)
# if not flag_convergence:
flag_return_xgb_model = kwargs.get("flag_return_xgb_model", True)
S_dict = xgboost_indices(
filepath_Y=self.filepath_Y,
filepath_X=self.filepath_X_rescaled,
tuning_parameters=self.tuning_parameters,
test_size=self.test_size,
xgb_model=self.xgb_model,
flag_return_xgb_model=flag_return_xgb_model,
)
# else:
# iterations = kwargs.get("iterations", self.iterations)
# iterations_step = kwargs.get("iterations_step", self.iterations)
# filepath_S = self.create_S_convergence_filepath(iterations_step, iterations)
# if not filepath_S.exists():
# S_dict, r2, explained_var = xgboost_scores(
# filepath_Y=self.filepath_Y,
# filepath_X=self.filepath_X_rescaled,
# tuning_parameters=self.tuning_parameters,
# num_boost_round=self.num_boost_round,
# xgb_model=self.xgb_model,
# )
# write_pickle(S_dict, filepath_S)
# else:
# S_dict = read_pickle(filepath_S)
return S_dict
| 43.23301 | 142 | 0.640018 |
f923cee6138a85961c720d661398a3d79926e4e6 | 761 | py | Python | senz-client-samples/python/device_1/sender.py | ani4aniket/senz | 70168a6847d7af43d15e34b33d6c6310ca88a107 | [
"Apache-2.0"
] | 2 | 2019-02-26T05:03:20.000Z | 2019-02-27T07:16:05.000Z | senz-client-samples/python/device_1/sender.py | ani4aniket/senz | 70168a6847d7af43d15e34b33d6c6310ca88a107 | [
"Apache-2.0"
] | null | null | null | senz-client-samples/python/device_1/sender.py | ani4aniket/senz | 70168a6847d7af43d15e34b33d6c6310ca88a107 | [
"Apache-2.0"
] | 1 | 2019-03-24T09:32:53.000Z | 2019-03-24T09:32:53.000Z | #!/usr/bin/env python3
import sys, os
sys.path.insert(0, os.path.abspath('..'))
from senz.client import *
from utils.image_utils import *
from utils.aes_utils import *
deviceName = "dev1"
sharedKey = "LGWlhb329Y09oluI"
imagePathToSend = "sample.jpg"
# Register device
print("Registering device...")
msg = "SHARE #pubkey KEY @senz #time {} ^{} signature".format(getTimestamp(), deviceName)
sendMessage(msg)
# Convert image to byte string
byteString = imageToString(imagePathToSend)
# Encrypt using AES Crypto
aes = AESUtils(sharedKey)
byteString = aes.encrypt(byteString)
# Send the message
print("Press enter to send the image...")
raw_input()
msg = "DATA $image {} @dev2 #time {} ^dev1 signature\n".format(byteString, getTimestamp())
sendMessage(msg)
| 23.060606 | 90 | 0.735874 |
7edbb795b2bf7c1686415f71428ef5109dac94a5 | 711 | py | Python | models/t_import.py | THM-MA/XSDATA-waypoint | dd94442f9d6677c525bf3ebb03c15fec52fa1079 | [
"MIT"
] | null | null | null | models/t_import.py | THM-MA/XSDATA-waypoint | dd94442f9d6677c525bf3ebb03c15fec52fa1079 | [
"MIT"
] | null | null | null | models/t_import.py | THM-MA/XSDATA-waypoint | dd94442f9d6677c525bf3ebb03c15fec52fa1079 | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class TImport:
class Meta:
name = "tImport"
namespace: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
location: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
import_type: Optional[str] = field(
default=None,
metadata={
"name": "importType",
"type": "Attribute",
"required": True,
}
)
| 20.911765 | 61 | 0.518987 |
ded461022f8b82f4596792920cf75c8a52aeadc7 | 30,395 | py | Python | packaging/setup/plugins/ovirt-engine-setup/ovirt-engine/network/ovirtproviderovn.py | 18641315209/ovirt-engine | 65ffe7ca81c6ff4da56a2a977a0183bd79283685 | [
"Apache-2.0"
] | null | null | null | packaging/setup/plugins/ovirt-engine-setup/ovirt-engine/network/ovirtproviderovn.py | 18641315209/ovirt-engine | 65ffe7ca81c6ff4da56a2a977a0183bd79283685 | [
"Apache-2.0"
] | null | null | null | packaging/setup/plugins/ovirt-engine-setup/ovirt-engine/network/ovirtproviderovn.py | 18641315209/ovirt-engine | 65ffe7ca81c6ff4da56a2a977a0183bd79283685 | [
"Apache-2.0"
] | null | null | null | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""ovirt-provider-ovn plugin."""
import base64
import gettext
import os
import random
import string
import uuid
from collections import namedtuple
from M2Crypto import RSA
from otopi import constants as otopicons
from otopi import filetransaction
from otopi import plugin
from otopi import util
from ovirt_engine import configfile
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine.constants import Const
from ovirt_engine_setup.engine.constants import Defaults
from ovirt_engine_setup.engine.constants import FileLocations
from ovirt_engine_setup.engine.constants import OvnEnv
from ovirt_engine_setup.engine.constants import OvnFileLocations
from ovirt_engine_setup.engine_common import constants as oengcommcons
from ovirt_setup_lib import dialog
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
OvnDbConfig = namedtuple(
'OvnDbConfig',
[
'name',
'port',
'protocol',
'command',
'key_file',
'cert_file'
]
)
@util.export
class Plugin(plugin.PluginBase):
"""ovirt-provider-ovn plugin."""
CONNECTION_TCP = 'tcp'
CONNECTION_SSL = 'ssl'
# TODO: OVN north db will be temporarily configured to
# connect over TCP, not SSL.
# This is because of OVN bug: 1446538
# Once the bug is fixed, the connection can be moved to SSL
OVN_NORTH_DB_CONFIG = OvnDbConfig(
'OVN NORTH DB',
'6641',
CONNECTION_SSL,
'ovn-nbctl',
oenginecons.OvnFileLocations.OVIRT_PROVIDER_OVN_NDB_KEY,
oenginecons.OvnFileLocations.OVIRT_PROVIDER_OVN_NDB_CERT,
)
OVN_SOUTH_DB_CONFIG = OvnDbConfig(
'OVN SOUTH DB',
'6642',
CONNECTION_SSL,
'ovn-sbctl',
oenginecons.OvnFileLocations.OVIRT_PROVIDER_OVN_SDB_KEY,
oenginecons.OvnFileLocations.OVIRT_PROVIDER_OVN_SDB_CERT,
)
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._manual_commands = []
self._failed_commands = []
self._manual_tasks = []
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
OvnEnv.OVIRT_PROVIDER_OVN,
None
)
self.environment.setdefault(
OvnEnv.FIREWALLD_SERVICES_DIR,
OvnFileLocations.DEFAULT_FIREWALLD_SERVICES_DIR
)
self.environment.setdefault(
OvnEnv.OVN_FIREWALLD_SERVICES,
Defaults.DEFAULT_OVN_FIREWALLD_SERVICES
)
def _add_provider_to_db(self):
auth_required = self._user is not None
fqdn = self.environment[osetupcons.ConfigEnv.FQDN]
provider_id = str(uuid.uuid4())
self.logger.info(_('Adding default OVN provider to database'))
password = (
self._encrypt_password(self._password)
if self._password
else None
)
self.environment[
oenginecons.EngineDBEnv.STATEMENT
].execute(
statement="""
select InsertProvider(
v_id:=%(provider_id)s,
v_name:=%(provider_name)s,
v_description:=%(provider_description)s,
v_url:=%(provider_url)s,
v_provider_type:=%(provider_type)s,
v_auth_required:=%(auth_required)s,
v_auth_username:=%(auth_username)s,
v_auth_password:=%(auth_password)s,
v_custom_properties:=%(custom_properties)s,
v_plugin_type:=%(plugin_type)s,
v_auth_url:=%(auth_url)s
)
""",
args=dict(
provider_id=provider_id,
provider_name='ovirt-provider-ovn',
provider_description='oVirt network provider for OVN',
provider_url='https://%s:9696' % fqdn,
provider_type='EXTERNAL_NETWORK',
auth_required=auth_required,
auth_username=self._user,
auth_password=password,
custom_properties=None,
plugin_type='OVIRT_PROVIDER_OVN',
auth_url='https://%s:35357/v2.0/' % fqdn
),
)
return provider_id
def _set_default_network_provider_in_db(self):
self.environment[
oenginecons.EngineDBEnv.STATEMENT
].execute(
statement="""
update cluster
set default_network_provider_id=%(provider_id)s
where default_network_provider_id is null and
name = 'Default'
and exists (
select 1 from providers where id = %(provider_id)s
)
""",
args=dict(
provider_id=self.environment[OvnEnv.OVIRT_PROVIDER_ID],
),
)
def _generate_client_secret(self):
def generatePassword():
rand = random.SystemRandom()
return ''.join([
rand.choice(string.ascii_letters + string.digits)
for i in range(32)
])
self.environment.setdefault(
OvnEnv.OVIRT_PROVIDER_OVN_SECRET,
generatePassword()
)
def _add_client_secret_to_db(self):
self.logger.info(_('Adding OVN provider secret to database'))
rc, stdout, stderr = self.execute(
(
oenginecons.FileLocations.OVIRT_ENGINE_CRYPTO_TOOL,
'pbe-encode',
'--password=env:pass',
),
envAppend={
'OVIRT_ENGINE_JAVA_HOME_FORCE': '1',
'OVIRT_ENGINE_JAVA_HOME': self.environment[
oengcommcons.ConfigEnv.JAVA_HOME
],
'OVIRT_JBOSS_HOME': self.environment[
oengcommcons.ConfigEnv.JBOSS_HOME
],
'pass': self.environment[
OvnEnv.OVIRT_PROVIDER_OVN_SECRET
]
},
logStreams=False,
)
self.environment[oenginecons.EngineDBEnv.STATEMENT].execute(
statement="""
select sso_oauth_register_client(
%(client_id)s,
%(client_secret)s,
%(scope)s,
%(certificate)s,
%(callback_prefix)s,
%(description)s,
%(email)s,
%(trusted)s,
%(notification_callback)s,
%(notification_callback_host_protocol)s,
%(notification_callback_host_verification)s,
%(notification_callback_chain_validation)s
)
""",
args=dict(
client_id=Const.OVIRT_PROVIDER_OVN_CLIENT_ID_VALUE,
client_secret=stdout[0],
scope=' '.join(
(
'ovirt-app-api',
'ovirt-ext=token-info:validate',
'ovirt-ext=token-info:public-authz-search',
)
),
certificate=(
oenginecons.FileLocations.
OVIRT_ENGINE_PKI_ENGINE_CERT
),
callback_prefix='',
description='ovirt-provider-ovn',
email='',
trusted=True,
notification_callback='',
notification_callback_host_protocol='TLS',
notification_callback_host_verification=False,
notification_callback_chain_validation=True,
),
)
def _getSink(self, pm, pmsinkbase):
class MyPMSink(pmsinkbase):
def __init__(self, log):
super(MyPMSink, self).__init__()
self._log = log
def verbose(self, msg):
super(MyPMSink, self).verbose(msg)
self._log.debug('%s %s', pm, msg)
def info(self, msg):
super(MyPMSink, self).info(msg)
self._log.info('%s %s', pm, msg)
def error(self, msg):
super(MyPMSink, self).error(msg)
self._log.error('%s %s', pm, msg)
return MyPMSink(self.logger)
def _setup_firewalld_services(self):
services = [
s.strip()
for s in self.environment[
OvnEnv.OVN_FIREWALLD_SERVICES
].split(',')
]
# TODO: handle services that were copied over to
# /etc/firewalld/services
services_dir = self.environment[
OvnEnv.FIREWALLD_SERVICES_DIR
]
for service in services:
self.environment[osetupcons.NetEnv.FIREWALLD_SERVICES].append(
{
'name': service,
'absolute_path': os.path.join(
services_dir, '%s.xml' % (service,)
)
},
)
def _prompt_for_credentials(self):
user = self._query_ovn_user()
password = self._query_ovn_password()
return user, password
def _encrypt_password(self, password):
def _getRSA():
rc, stdout, stderr = self.execute(
args=(
self.command.get('openssl'),
'pkcs12',
'-in', (
FileLocations.OVIRT_ENGINE_PKI_ENGINE_STORE
),
'-passin', 'pass:%s' % self.environment[
oenginecons.PKIEnv.STORE_PASS
],
'-nocerts',
'-nodes',
),
logStreams=False,
)
return RSA.load_key_string(
str('\n'.join(stdout))
)
encrypted_password = _getRSA().public_encrypt(
data=password,
padding=RSA.pkcs1_padding,
)
return base64.b64encode(encrypted_password)
def _query_install_ovn(self):
return dialog.queryBoolean(
dialog=self.dialog,
name='ovirt-provider-ovn',
note=_(
'Configure ovirt-provider-ovn '
'(@VALUES@) [@DEFAULT@]: '
),
prompt=True,
default=True
)
def _query_default_credentials(self, user):
return dialog.queryBoolean(
dialog=self.dialog,
name='ovirt-provider-ovn-default-credentials',
note=_(
'Use default credentials (%s) for '
'ovirt-provider-ovn (@VALUES@) [@DEFAULT@]: ' % user
),
prompt=True,
default=True
)
def _query_ovn_user(self):
return self.dialog.queryString(
name='ovirt-provider-ovn-user',
note=_(
'oVirt OVN provider user'
'[@DEFAULT@]: '
),
prompt=True,
default='admin@internal',
)
def _query_ovn_password(self):
return self.dialog.queryString(
name='ovirt-provider-ovn-password',
note=_(
'oVirt OVN provider password: '
),
prompt=True,
hidden=True,
)
def _get_provider_credentials(self):
user = self.environment.get(
OvnEnv.OVIRT_PROVIDER_OVN_USER
)
password = self.environment.get(
OvnEnv.OVIRT_PROVIDER_OVN_PASSWORD
)
if user:
return user, password
use_default_credentials = False
user = self.environment[
oenginecons.ConfigEnv.ADMIN_USER
]
password = self.environment[
oenginecons.ConfigEnv.ADMIN_PASSWORD
]
if user is not None and password is not None:
use_default_credentials = self._query_default_credentials(user)
if not use_default_credentials:
user, password = self._prompt_for_credentials()
self.environment[
OvnEnv.OVIRT_PROVIDER_OVN_USER
] = user
self.environment[
OvnEnv.OVIRT_PROVIDER_OVN_PASSWORD
] = password
return user, password
def _generate_pki(self):
self.environment[oenginecons.PKIEnv.ENTITIES].extend(
(
{
'name':
oenginecons.OvnFileLocations.OVIRT_PROVIDER_OVN_NDB,
'extract': True,
'user': oengcommcons.SystemEnv.USER_ROOT,
'keepKey': False,
},
{
'name':
oenginecons.OvnFileLocations.OVIRT_PROVIDER_OVN_SDB,
'extract': True,
'user': oengcommcons.SystemEnv.USER_ROOT,
'keepKey': False,
},
{
'name':
oenginecons.OvnFileLocations.OVIRT_PROVIDER_OVN_HTTPS,
'extract': True,
'user': oengcommcons.SystemEnv.USER_ROOT,
'keepKey': False,
}
)
)
def _execute_command(self, command, error_message, manual_commands=None):
if not self.environment[osetupcons.CoreEnv.DEVELOPER_MODE]:
rc, stdout, stderr = self.execute(
command,
raiseOnError=False
)
if rc != 0:
self.logger.error(error_message)
self._append_manual_commands(
self._failed_commands, command, manual_commands
)
else:
self._append_manual_commands(
self._manual_commands, command, manual_commands
)
def _append_manual_commands(self, append_to, command, manual_commands):
if manual_commands:
for manual_command in manual_commands:
append_to.append(manual_command)
else:
append_to.append(command)
def _configure_ovndb_connection(self, ovn_db_config):
if (ovn_db_config.protocol == self.CONNECTION_SSL):
self._execute_command(
(
ovn_db_config.command,
'set-ssl',
ovn_db_config.key_file,
ovn_db_config.cert_file,
oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_CA_CERT,
),
_(
'Failed to configure {name} with SSL'
).format(
name=ovn_db_config.name
)
)
self._execute_command(
(
ovn_db_config.command,
'set-connection',
'p%s:%s' % (ovn_db_config.protocol, ovn_db_config.port),
),
_(
'Failed to open {name} SSL connection'
).format(
name=ovn_db_config.name
)
)
def _configure_ovndb_north_connection(self):
self._configure_ovndb_connection(self.OVN_NORTH_DB_CONFIG)
def _configure_ovndb_south_connection(self):
self._configure_ovndb_connection(self.OVN_SOUTH_DB_CONFIG)
def _format_config_file_content(self, parameters):
content = []
content.append(
'# This file is automatically generated by engine-setup. '
'Please do not edit manually'
)
for section in parameters:
content.append(section)
content.extend([
'{key}={value}'.format(key=k, value=v)
for k, v in parameters[section].items()
])
return content
def _display_config_file_dev_task(self):
self._manual_tasks.append(
_(
'ovirt-provider-ovn could not be configured because setup'
' is executed in developer mode.\n'
'To configure ovirt-provider-ovn, please copy the '
'content of:\n'
' {example}'
'\ninto:\n'
' {target}'
).format(
example=oenginecons.OvnFileLocations.
OVIRT_PROVIDER_ENGINE_SETUP_CONFIG_EXAMPLE,
target=oenginecons.OvnFileLocations.
OVIRT_PROVIDER_ENGINE_SETUP_CONFIG_FILE
)
)
def _display_config_file_production_message(self):
self._manual_tasks.append(
_(
'ovirt-provider-ovn configuration file was created in:\n'
'{target}\n'
'A sample configuration file for future reference was '
'created in:\n'
'{example}'
).format(
example=oenginecons.OvnFileLocations.
OVIRT_PROVIDER_ENGINE_SETUP_CONFIG_EXAMPLE,
target=oenginecons.OvnFileLocations.
OVIRT_PROVIDER_ENGINE_SETUP_CONFIG_FILE
)
)
def _create_provider_config(self, content, config_file, uninstall_files):
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
filetransaction.FileTransaction(
name=config_file,
content=content,
visibleButUnsafe=True,
modifiedList=uninstall_files,
)
)
def _create_config_content(self):
engine_port = self.environment[
oengcommcons.ConfigEnv.HTTPS_PORT
] if self.environment[
oengcommcons.ConfigEnv.JBOSS_AJP_PORT
] else self.environment[
oengcommcons.ConfigEnv.JBOSS_DIRECT_HTTPS_PORT
]
parameters = {
'[PROVIDER]': {
'provider-host': self.environment[osetupcons.ConfigEnv.FQDN]
},
'[SSL]': {
'ssl-cert-file':
oenginecons.OvnFileLocations.OVIRT_PROVIDER_OVN_HTTPS_CERT,
'ssl-key-file':
oenginecons.OvnFileLocations.OVIRT_PROVIDER_OVN_HTTPS_KEY,
'ssl-cacert-file':
oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_CA_CERT,
'https-enabled':
'true',
},
'[OVN REMOTE]': {
'ovn-remote':
'%s:127.0.0.1:%s' % (
self.OVN_NORTH_DB_CONFIG.protocol,
self.OVN_NORTH_DB_CONFIG.port,
),
},
'[OVIRT]': {
'ovirt-sso-client-id':
Const.OVIRT_PROVIDER_OVN_CLIENT_ID_VALUE,
'ovirt-sso-client-secret':
self.environment[
OvnEnv.OVIRT_PROVIDER_OVN_SECRET
],
'ovirt-host':
'https://%s:%s' % (
self.environment[osetupcons.ConfigEnv.FQDN],
engine_port
),
'ovirt-ca-file':
oenginecons.FileLocations.
OVIRT_ENGINE_PKI_ENGINE_CA_CERT,
},
}
return self._format_config_file_content(parameters)
def _configure_ovirt_provider_ovn(self):
content = self._create_config_content()
uninstall_files = []
self.environment[
osetupcons.CoreEnv.REGISTER_UNINSTALL_GROUPS
].createGroup(
group='ovirt-provider-ovn',
description='ovirt-provider-ovn configuration files',
optional=True,
).addFiles(
group='ovirt-provider-ovn',
fileList=uninstall_files,
)
self._create_provider_config(
content,
oenginecons.OvnFileLocations.
OVIRT_PROVIDER_ENGINE_SETUP_CONFIG_EXAMPLE,
uninstall_files
)
if self.environment[osetupcons.CoreEnv.DEVELOPER_MODE]:
self._display_config_file_dev_task()
else:
self._create_provider_config(
content,
oenginecons.OvnFileLocations.
OVIRT_PROVIDER_ENGINE_SETUP_CONFIG_FILE,
uninstall_files
)
self._display_config_file_production_message()
def _upate_external_providers_keystore(self):
config = configfile.ConfigFile([
oenginecons.FileLocations.OVIRT_ENGINE_SERVICE_CONFIG_DEFAULTS,
oenginecons.FileLocations.OVIRT_ENGINE_SERVICE_CONFIG_SSO
])
truststore = config.get(
'ENGINE_EXTERNAL_PROVIDERS_TRUST_STORE'
)
truststore_password = config.get(
'ENGINE_EXTERNAL_PROVIDERS_TRUST_STORE_PASSWORD'
)
command_parts = (
'keytool',
'-import',
'-alias',
OvnEnv.PROVIDER_NAME,
'-keystore',
truststore,
'-file',
oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_CA_CERT,
'-noprompt',
'-storepass',
)
command = command_parts + (truststore_password, )
manual_keytool_command = (
command_parts +
('"${ENGINE_EXTERNAL_PROVIDERS_TRUST_STORE_PASSWORD}"', )
)
self._execute_command(
command,
_(
'Failed to import provider certificate into '
'the external provider keystore'
),
manual_commands=(
(
'.',
FileLocations.OVIRT_ENGINE_DATADIR +
'/bin/engine-prolog.sh'
),
manual_keytool_command,
)
)
def _is_provider_installed(self):
# TODO: we currently only check against installations done by
# engine-setup
# In the future we should also add a check against manual installation
if self.environment.get(
OvnEnv.OVIRT_PROVIDER_ID
):
self.logger.info(_(
'ovirt-provider-ovn already installed, skipping.'))
return True
return False
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
before=(
osetupcons.Stages.DIALOG_TITLES_E_PRODUCT_OPTIONS,
),
after=(
osetupcons.Stages.DIALOG_TITLES_S_PRODUCT_OPTIONS,
),
)
def _customization(self):
provider_installed = self._is_provider_installed()
if (
self.environment[OvnEnv.OVIRT_PROVIDER_OVN] is None and
not provider_installed
):
self.environment[OvnEnv.OVIRT_PROVIDER_OVN] = \
self._query_install_ovn()
self._enabled = (
self.environment[OvnEnv.OVIRT_PROVIDER_OVN] and
not provider_installed
)
if self._enabled or provider_installed:
self._setup_firewalld_services()
def _print_commands(self, message, commands):
self.dialog.note(
text='{message}\n {commands}'.format(
message=message,
commands=(
'\n '.join(
command
for command in commands
)
)
),
)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
before=(
osetupcons.Stages.DIALOG_TITLES_E_MISC,
),
after=(
oengcommcons.Stages.ADMIN_PASSWORD_SET,
),
condition=lambda self: self._enabled,
)
def _customization_credentials(self):
self._user, self._password = self._get_provider_credentials()
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
before=(
oenginecons.Stages.CA_AVAILABLE,
),
condition=lambda self: self._enabled,
)
def _misc_pki(self):
self._generate_pki()
def _restart_service(self, service):
self.services.startup(
name=service,
state=True,
)
for state in (False, True):
self.services.state(
name=service,
state=state,
)
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
name=oenginecons.Stages.OVN_SERVICES_RESTART,
condition=lambda self: (
self._enabled and
not self.environment[osetupcons.CoreEnv.DEVELOPER_MODE]
)
)
def _restart_ovn_services(self):
for service in OvnEnv.ENGINE_MACHINE_OVN_SERVICES:
self._restart_service(service)
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
before=(
oenginecons.Stages.OVN_PROVIDER_SERVICE_RESTART
),
after=(
oenginecons.Stages.CA_AVAILABLE,
oenginecons.Stages.OVN_SERVICES_RESTART,
),
condition=lambda self: self._enabled,
)
def _misc_configure_ovn_pki(self):
self._configure_ovndb_north_connection()
self._configure_ovndb_south_connection()
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
before=(
oenginecons.Stages.OVN_PROVIDER_SERVICE_RESTART
),
after=(
oenginecons.Stages.CA_AVAILABLE,
oenginecons.Stages.OVN_SERVICES_RESTART,
),
condition=lambda self:
self._enabled
)
def _misc_configure_provider(self):
self._generate_client_secret()
self._configure_ovirt_provider_ovn()
self._upate_external_providers_keystore()
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
name=oenginecons.Stages.OVN_PROVIDER_SERVICE_RESTART,
condition=lambda self: (
self._enabled and
not self.environment[osetupcons.CoreEnv.DEVELOPER_MODE]
)
)
def _restart_provider_service(self):
self._restart_service(OvnEnv.OVIRT_PROVIDER_OVN_SERVICE)
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
name=oenginecons.Stages.OVN_PROVIDER_OVN_DB,
after=(
oengcommcons.Stages.DB_CONNECTION_AVAILABLE,
oenginecons.Stages.CA_AVAILABLE,
),
condition=lambda self: self._enabled,
)
def _misc_db_entries(self):
provider_id = self._add_provider_to_db()
if self.environment.get(OvnEnv.OVIRT_PROVIDER_ID) is not None:
raise Exception(_(
'Attempting to set ovirt-provider-ovn id, but'
' the id has already been set. Overwriting'
' an already existing provider id is not allowed.'
))
self.environment[
OvnEnv.OVIRT_PROVIDER_ID
] = provider_id
self._add_client_secret_to_db()
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
after=(
oenginecons.Stages.OVN_PROVIDER_OVN_DB,
),
condition=lambda self: (
self.environment.get(OvnEnv.OVIRT_PROVIDER_ID) is not None
)
)
def _set_default_network_provider(self):
self._set_default_network_provider_in_db()
@plugin.event(
stage=plugin.Stages.STAGE_CLOSEUP,
before=(
osetupcons.Stages.DIALOG_TITLES_E_SUMMARY,
),
after=(
osetupcons.Stages.DIALOG_TITLES_S_SUMMARY,
),
condition=lambda self: (
self._enabled and
self.environment[osetupcons.CoreEnv.DEVELOPER_MODE]
)
)
def _print_restart_services_commands(self):
self._print_commands(
_(
'Some services were not restarted automatically \n'
'in developer mode and must be restarted manually.\n'
'Please execute the following commands to start them:'
),
[
'systemctl restart ' + name
for name in OvnEnv.ENGINE_MACHINE_OVN_SERVICES
]
)
@plugin.event(
stage=plugin.Stages.STAGE_CLOSEUP,
before=(
osetupcons.Stages.DIALOG_TITLES_E_SUMMARY,
),
after=(
osetupcons.Stages.DIALOG_TITLES_S_SUMMARY,
),
condition=lambda self: (
self._enabled and (
self._manual_commands or
self._failed_commands
)
),
)
def _print_manual_commands(self):
if self._manual_tasks:
for task in self._manual_tasks:
self.dialog.note(
text=task
)
if self._manual_commands:
self._print_commands(
_(
'The following commands can not be executed in\n'
'developer mode. Please execute them as root:'
),
[
' '.join(command)
for command
in self._manual_commands
]
)
if self._failed_commands:
self._print_commands(
_(
'The following commands failed to execute.\n'
'Please execute them manually as root:'
),
[
' '.join(command)
for command
in self._failed_commands
]
)
# vim: expandtab tabstop=4 shiftwidth=4
| 32.335106 | 79 | 0.544794 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.