max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
practica2/ejercicio5.py
|
danipozo/practicas-mnii
| 1
|
6629351
|
#Librerias
import numpy as num
import scipy as sci
from numpy.polynomial import polynomial as pol
def rkj(f,a,b,k,j):
if(j == 0):
h = (b-a)/(2**k)
parcial = 0
for i in range (2**k - 1):
parcial = parcial + f(a+i*h)
res = (h/2)*(f(a) + 2*parcial +f(b))
else:
res = rkj(f,a,b,k,j-1) + (1/(4**j-1))*(rkj(f,a,b,k,j-1) - rkj(f,a,b,k-1,j-1))
return res;
#k = n
def romberg(f,a,b,n):
aprox = rkj(f,a,b,n,n)
return aprox
f = lambda x: num.log(x)
a=1
b=2
n=10
res = romberg(f,a,b,n)
print("\n El valor de la aproximacion por el metodo de Romberg es:", res)
|
#Librerias
import numpy as num
import scipy as sci
from numpy.polynomial import polynomial as pol
def rkj(f,a,b,k,j):
if(j == 0):
h = (b-a)/(2**k)
parcial = 0
for i in range (2**k - 1):
parcial = parcial + f(a+i*h)
res = (h/2)*(f(a) + 2*parcial +f(b))
else:
res = rkj(f,a,b,k,j-1) + (1/(4**j-1))*(rkj(f,a,b,k,j-1) - rkj(f,a,b,k-1,j-1))
return res;
#k = n
def romberg(f,a,b,n):
aprox = rkj(f,a,b,n,n)
return aprox
f = lambda x: num.log(x)
a=1
b=2
n=10
res = romberg(f,a,b,n)
print("\n El valor de la aproximacion por el metodo de Romberg es:", res)
|
it
| 0.239359
|
#Librerias #k = n
| 3.145649
| 3
|
rasa_core/policies/ensemble.py
|
ymihay/dialogue_flow
| 1
|
6629352
|
<reponame>ymihay/dialogue_flow<gh_stars>1-10
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import json
import logging
import os
import numpy as np
import typing
from builtins import str
from typing import Text, Optional
import rasa_core
from rasa_core import utils
from rasa_core.events import SlotSet
from rasa_core.trackers import DialogueStateTracker
from rasa_core.training.data import DialogueTrainingData
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
from rasa_core.domain import Domain
from rasa_core.featurizers import Featurizer
class PolicyEnsemble(object):
def __init__(self, policies, action_fingerprints=None):
self.policies = policies
self.training_metadata = {}
if action_fingerprints:
self.action_fingerprints = action_fingerprints
else:
self.action_fingerprints = {}
def train(self, training_data, domain, featurizer, **kwargs):
# type: (DialogueTrainingData, Domain, Featurizer, **Any) -> None
if not training_data.is_empty():
for policy in self.policies:
policy.prepare(featurizer,
max_history=training_data.max_history())
policy.train(training_data, domain, **kwargs)
self.training_metadata.update(training_data.metadata)
else:
logger.info("Skipped training, because there are no "
"training samples.")
def predict_next_action(self, tracker, domain):
# type: (DialogueStateTracker, Domain) -> (float, int)
"""Predicts the next action the bot should take after seeing x.
This should be overwritten by more advanced policies to use ML to
predict the action. Returns the index of the next action"""
probabilities = self.probabilities_using_best_policy(tracker, domain)
max_index = np.argmax(probabilities)
logger.debug("Predicted next action #{} with prob {:.2f}.".format(
max_index, probabilities[max_index]))
return max_index
def probabilities_using_best_policy(self, tracker, domain):
raise NotImplementedError
@staticmethod
def _create_action_fingerprints(training_events):
"""Fingerprint each action using the events it created during train.
This allows us to emit warnings when the model is used
if an action does things it hasn't done during training."""
action_fingerprints = {}
for k, vs in training_events.items():
slots = list({v.key for v in vs if isinstance(v, SlotSet)})
action_fingerprints[k] = {"slots": slots}
return action_fingerprints
def _persist_metadata(self, path, max_history):
# type: (Text, Optional[int]) -> None
"""Persists the domain specification to storage."""
# make sure the directory we persist to exists
domain_spec_path = os.path.join(path, 'policy_metadata.json')
utils.create_dir_for_file(domain_spec_path)
policy_names = [utils.module_path_from_instance(p)
for p in self.policies]
training_events = self.training_metadata.get("events", {})
action_fingerprints = self._create_action_fingerprints(training_events)
metadata = {
"action_fingerprints": action_fingerprints,
"rasa_core": rasa_core.__version__,
"max_history": max_history,
"ensemble_name": self.__module__ + "." + self.__class__.__name__,
"policy_names": policy_names
}
utils.dump_obj_as_json_to_file(domain_spec_path, metadata)
def persist(self, path):
# type: (Text) -> None
"""Persists the policy to storage."""
if self.policies:
self._persist_metadata(path, self.policies[0].max_history)
else:
self._persist_metadata(path, None)
for policy in self.policies:
policy.persist(path)
@classmethod
def load_metadata(cls, path):
matadata_path = os.path.join(path, 'policy_metadata.json')
with io.open(matadata_path) as f:
metadata = json.loads(f.read())
return metadata
@classmethod
def load(cls, path, featurizer):
# type: (Text, Optional[Featurizer]) -> PolicyEnsemble
"""Loads policy and domain specification from storage"""
metadata = cls.load_metadata(path)
policies = []
for policy_name in metadata["policy_names"]:
policy_cls = utils.class_from_module_path(policy_name)
policy = policy_cls.load(path, featurizer, metadata["max_history"])
policies.append(policy)
ensemble_cls = utils.class_from_module_path(metadata["ensemble_name"])
fingerprints = metadata.get("action_fingerprints", {})
ensemble = ensemble_cls(policies, fingerprints)
return ensemble
class SimplePolicyEnsemble(PolicyEnsemble):
def __init__(self, policies, known_slot_events=None):
super(SimplePolicyEnsemble, self).__init__(policies, known_slot_events)
def probabilities_using_best_policy(self, tracker, domain):
result = None
max_confidence = -1
for p in self.policies:
probabilities = p.predict_action_probabilities(tracker, domain)
confidence = np.max(probabilities)
if confidence > max_confidence:
max_confidence = confidence
result = probabilities
return result
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import json
import logging
import os
import numpy as np
import typing
from builtins import str
from typing import Text, Optional
import rasa_core
from rasa_core import utils
from rasa_core.events import SlotSet
from rasa_core.trackers import DialogueStateTracker
from rasa_core.training.data import DialogueTrainingData
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
from rasa_core.domain import Domain
from rasa_core.featurizers import Featurizer
class PolicyEnsemble(object):
def __init__(self, policies, action_fingerprints=None):
self.policies = policies
self.training_metadata = {}
if action_fingerprints:
self.action_fingerprints = action_fingerprints
else:
self.action_fingerprints = {}
def train(self, training_data, domain, featurizer, **kwargs):
# type: (DialogueTrainingData, Domain, Featurizer, **Any) -> None
if not training_data.is_empty():
for policy in self.policies:
policy.prepare(featurizer,
max_history=training_data.max_history())
policy.train(training_data, domain, **kwargs)
self.training_metadata.update(training_data.metadata)
else:
logger.info("Skipped training, because there are no "
"training samples.")
def predict_next_action(self, tracker, domain):
# type: (DialogueStateTracker, Domain) -> (float, int)
"""Predicts the next action the bot should take after seeing x.
This should be overwritten by more advanced policies to use ML to
predict the action. Returns the index of the next action"""
probabilities = self.probabilities_using_best_policy(tracker, domain)
max_index = np.argmax(probabilities)
logger.debug("Predicted next action #{} with prob {:.2f}.".format(
max_index, probabilities[max_index]))
return max_index
def probabilities_using_best_policy(self, tracker, domain):
raise NotImplementedError
@staticmethod
def _create_action_fingerprints(training_events):
"""Fingerprint each action using the events it created during train.
This allows us to emit warnings when the model is used
if an action does things it hasn't done during training."""
action_fingerprints = {}
for k, vs in training_events.items():
slots = list({v.key for v in vs if isinstance(v, SlotSet)})
action_fingerprints[k] = {"slots": slots}
return action_fingerprints
def _persist_metadata(self, path, max_history):
# type: (Text, Optional[int]) -> None
"""Persists the domain specification to storage."""
# make sure the directory we persist to exists
domain_spec_path = os.path.join(path, 'policy_metadata.json')
utils.create_dir_for_file(domain_spec_path)
policy_names = [utils.module_path_from_instance(p)
for p in self.policies]
training_events = self.training_metadata.get("events", {})
action_fingerprints = self._create_action_fingerprints(training_events)
metadata = {
"action_fingerprints": action_fingerprints,
"rasa_core": rasa_core.__version__,
"max_history": max_history,
"ensemble_name": self.__module__ + "." + self.__class__.__name__,
"policy_names": policy_names
}
utils.dump_obj_as_json_to_file(domain_spec_path, metadata)
def persist(self, path):
# type: (Text) -> None
"""Persists the policy to storage."""
if self.policies:
self._persist_metadata(path, self.policies[0].max_history)
else:
self._persist_metadata(path, None)
for policy in self.policies:
policy.persist(path)
@classmethod
def load_metadata(cls, path):
matadata_path = os.path.join(path, 'policy_metadata.json')
with io.open(matadata_path) as f:
metadata = json.loads(f.read())
return metadata
@classmethod
def load(cls, path, featurizer):
# type: (Text, Optional[Featurizer]) -> PolicyEnsemble
"""Loads policy and domain specification from storage"""
metadata = cls.load_metadata(path)
policies = []
for policy_name in metadata["policy_names"]:
policy_cls = utils.class_from_module_path(policy_name)
policy = policy_cls.load(path, featurizer, metadata["max_history"])
policies.append(policy)
ensemble_cls = utils.class_from_module_path(metadata["ensemble_name"])
fingerprints = metadata.get("action_fingerprints", {})
ensemble = ensemble_cls(policies, fingerprints)
return ensemble
class SimplePolicyEnsemble(PolicyEnsemble):
def __init__(self, policies, known_slot_events=None):
super(SimplePolicyEnsemble, self).__init__(policies, known_slot_events)
def probabilities_using_best_policy(self, tracker, domain):
result = None
max_confidence = -1
for p in self.policies:
probabilities = p.predict_action_probabilities(tracker, domain)
confidence = np.max(probabilities)
if confidence > max_confidence:
max_confidence = confidence
result = probabilities
return result
|
en
| 0.816467
|
# type: (DialogueTrainingData, Domain, Featurizer, **Any) -> None # type: (DialogueStateTracker, Domain) -> (float, int) Predicts the next action the bot should take after seeing x. This should be overwritten by more advanced policies to use ML to predict the action. Returns the index of the next action #{} with prob {:.2f}.".format( Fingerprint each action using the events it created during train. This allows us to emit warnings when the model is used if an action does things it hasn't done during training. # type: (Text, Optional[int]) -> None Persists the domain specification to storage. # make sure the directory we persist to exists # type: (Text) -> None Persists the policy to storage. # type: (Text, Optional[Featurizer]) -> PolicyEnsemble Loads policy and domain specification from storage
| 2.233946
| 2
|
src/website/migrations/0001_initial.py
|
IkramKhan-DevOps/cw-ai-expression-detector
| 0
|
6629353
|
<reponame>IkramKhan-DevOps/cw-ai-expression-detector
# Generated by Django 3.2 on 2022-03-07 16:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ScanImage',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image_url', models.ImageField(upload_to='images/')),
('created_on', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Scan Images',
'ordering': ['-id'],
},
),
]
|
# Generated by Django 3.2 on 2022-03-07 16:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ScanImage',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image_url', models.ImageField(upload_to='images/')),
('created_on', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Scan Images',
'ordering': ['-id'],
},
),
]
|
en
| 0.87567
|
# Generated by Django 3.2 on 2022-03-07 16:37
| 1.738361
| 2
|
test_numpy.py
|
m-takeuchi/ilislife_wxp
| 0
|
6629354
|
<filename>test_numpy.py
import numpy as np
def id(x):
# この関数は配列のメモリブロックアドレスを返します
return x.__array_interface__['data'][0]
def get_data_base(arr):
"""与えられたNumPyの配列から、本当のデータを
「持っている」ベース配列を探す"""
base = arr
while isinstance(base.base, np.ndarray):
base = base.base
return base
def arrays_share_data(x, y):
return get_data_base(x) is get_data_base(y)
print(arrays_share_data(a,a.copy()),
arrays_share_data(a,a[1:]))
a = np.arange(0,10)
id(a)
b = np.roll(a, 1, axis=0)
id(b)
## np.rollすると暗黙にコピーされてメモリが消費される!!
# if len(self.data_buffer[0]) > self.BUFFSIZE:
# del(self.data_buffer[0][0]) # バッファがサイズを越えたら古いvalから削除
# del(self.data_buffer[1][0]) # バッファがサイズを越えたら古いvalから削除
# del(self.data_buffer[2][0]) # バッファがサイズを越えたら古いvalから削除
# if(not val == (None, None, None)):
# self.data_buffer[0].append(val[0]) # バッファにデータを追加
# self.data_buffer[1].append(val[1]) # バッファにデータを追加
# self.data_buffer[2].append(val[2]) # バッファにデータを追加
### 時間t を設定
buff_len = len(self.data_buffer[0])
t = list(range(100))[::-1]
# for i, p in enumerate(self.plot):
# self.plot[i].points = self.T_list(t, self.data_buffer[i][-buff_len:]) #リストの転\
self.plot[0].points = self.T_list(t, self.data_buffer[0][-buff_len:])
self.plot[1].points = self.T_list(t, self.data_buffer[1][-buff_len:])
self.plot[2].points = self.T_list(t, self.data_buffer[2][-buff_len:])
print(self.graph_y_upl,self.graph_y_lwl)
def T_list(self, x, y):
#リストの転置
return list(map(list, zip(*[x, y] )))
def format_val(self, val):
return '{0:.3f}'.format(val)
|
<filename>test_numpy.py
import numpy as np
def id(x):
# この関数は配列のメモリブロックアドレスを返します
return x.__array_interface__['data'][0]
def get_data_base(arr):
"""与えられたNumPyの配列から、本当のデータを
「持っている」ベース配列を探す"""
base = arr
while isinstance(base.base, np.ndarray):
base = base.base
return base
def arrays_share_data(x, y):
return get_data_base(x) is get_data_base(y)
print(arrays_share_data(a,a.copy()),
arrays_share_data(a,a[1:]))
a = np.arange(0,10)
id(a)
b = np.roll(a, 1, axis=0)
id(b)
## np.rollすると暗黙にコピーされてメモリが消費される!!
# if len(self.data_buffer[0]) > self.BUFFSIZE:
# del(self.data_buffer[0][0]) # バッファがサイズを越えたら古いvalから削除
# del(self.data_buffer[1][0]) # バッファがサイズを越えたら古いvalから削除
# del(self.data_buffer[2][0]) # バッファがサイズを越えたら古いvalから削除
# if(not val == (None, None, None)):
# self.data_buffer[0].append(val[0]) # バッファにデータを追加
# self.data_buffer[1].append(val[1]) # バッファにデータを追加
# self.data_buffer[2].append(val[2]) # バッファにデータを追加
### 時間t を設定
buff_len = len(self.data_buffer[0])
t = list(range(100))[::-1]
# for i, p in enumerate(self.plot):
# self.plot[i].points = self.T_list(t, self.data_buffer[i][-buff_len:]) #リストの転\
self.plot[0].points = self.T_list(t, self.data_buffer[0][-buff_len:])
self.plot[1].points = self.T_list(t, self.data_buffer[1][-buff_len:])
self.plot[2].points = self.T_list(t, self.data_buffer[2][-buff_len:])
print(self.graph_y_upl,self.graph_y_lwl)
def T_list(self, x, y):
#リストの転置
return list(map(list, zip(*[x, y] )))
def format_val(self, val):
return '{0:.3f}'.format(val)
|
ja
| 0.948782
|
# この関数は配列のメモリブロックアドレスを返します 与えられたNumPyの配列から、本当のデータを 「持っている」ベース配列を探す ## np.rollすると暗黙にコピーされてメモリが消費される!! # if len(self.data_buffer[0]) > self.BUFFSIZE: # del(self.data_buffer[0][0]) # バッファがサイズを越えたら古いvalから削除 # del(self.data_buffer[1][0]) # バッファがサイズを越えたら古いvalから削除 # del(self.data_buffer[2][0]) # バッファがサイズを越えたら古いvalから削除 # if(not val == (None, None, None)): # self.data_buffer[0].append(val[0]) # バッファにデータを追加 # self.data_buffer[1].append(val[1]) # バッファにデータを追加 # self.data_buffer[2].append(val[2]) # バッファにデータを追加 ### 時間t を設定 # for i, p in enumerate(self.plot): # self.plot[i].points = self.T_list(t, self.data_buffer[i][-buff_len:]) #リストの転\ #リストの転置
| 2.925628
| 3
|
merlion/models/anomaly/forecast_based/prophet.py
|
ankitakashyap05/Merlion
| 2,215
|
6629355
|
<reponame>ankitakashyap05/Merlion
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
"""
Adaptation of Facebook's Prophet forecasting model to anomaly detection.
"""
from merlion.models.anomaly.forecast_based.base import ForecastingDetectorBase
from merlion.models.anomaly.base import DetectorConfig
from merlion.models.forecast.prophet import ProphetConfig, Prophet
from merlion.post_process.threshold import AggregateAlarms
from merlion.transform.moving_average import DifferenceTransform
class ProphetDetectorConfig(ProphetConfig, DetectorConfig):
_default_transform = DifferenceTransform()
_default_threshold = AggregateAlarms(alm_threshold=3)
class ProphetDetector(ForecastingDetectorBase, Prophet):
config_class = ProphetDetectorConfig
|
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
"""
Adaptation of Facebook's Prophet forecasting model to anomaly detection.
"""
from merlion.models.anomaly.forecast_based.base import ForecastingDetectorBase
from merlion.models.anomaly.base import DetectorConfig
from merlion.models.forecast.prophet import ProphetConfig, Prophet
from merlion.post_process.threshold import AggregateAlarms
from merlion.transform.moving_average import DifferenceTransform
class ProphetDetectorConfig(ProphetConfig, DetectorConfig):
_default_transform = DifferenceTransform()
_default_threshold = AggregateAlarms(alm_threshold=3)
class ProphetDetector(ForecastingDetectorBase, Prophet):
config_class = ProphetDetectorConfig
|
en
| 0.68969
|
# # Copyright (c) 2021 salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause # Adaptation of Facebook's Prophet forecasting model to anomaly detection.
| 1.537837
| 2
|
auth-api/src/auth_api/schemas/membership.py
|
karthik-aot/sbc-auth
| 3
|
6629356
|
<gh_stars>1-10
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manager for membership schema and export."""
from marshmallow import fields
from auth_api.models import Membership as MembershipModel
from .base_schema import BaseSchema
class MembershipSchema(BaseSchema): # pylint: disable=too-many-ancestors, too-few-public-methods
"""This is the schema for the Membership model."""
class Meta: # pylint: disable=too-few-public-methods
"""Maps all of the Membership fields to a default schema."""
model = MembershipModel
fields = ('id', 'membership_type_code', 'user', 'org', 'membership_status')
user = fields.Nested('UserSchema', only=(
'firstname', 'lastname', 'username', 'modified', 'contacts', 'login_source', 'id'))
org = fields.Nested('OrgSchema', only=('id', 'name', 'affiliated_entities', 'org_type', 'members', 'invitations'))
membership_status = fields.Pluck('MembershipStatusCodeSchema', 'name', data_key='membershipStatus')
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manager for membership schema and export."""
from marshmallow import fields
from auth_api.models import Membership as MembershipModel
from .base_schema import BaseSchema
class MembershipSchema(BaseSchema): # pylint: disable=too-many-ancestors, too-few-public-methods
"""This is the schema for the Membership model."""
class Meta: # pylint: disable=too-few-public-methods
"""Maps all of the Membership fields to a default schema."""
model = MembershipModel
fields = ('id', 'membership_type_code', 'user', 'org', 'membership_status')
user = fields.Nested('UserSchema', only=(
'firstname', 'lastname', 'username', 'modified', 'contacts', 'login_source', 'id'))
org = fields.Nested('OrgSchema', only=('id', 'name', 'affiliated_entities', 'org_type', 'members', 'invitations'))
membership_status = fields.Pluck('MembershipStatusCodeSchema', 'name', data_key='membershipStatus')
|
en
| 0.83851
|
# Copyright © 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Manager for membership schema and export. # pylint: disable=too-many-ancestors, too-few-public-methods This is the schema for the Membership model. # pylint: disable=too-few-public-methods Maps all of the Membership fields to a default schema.
| 1.923987
| 2
|
src/05/wip.py
|
j-carson/advent_2021
| 1
|
6629357
|
import re
from collections import defaultdict, namedtuple
from pathlib import Path
import numpy as np
from ycecream import y
Point = namedtuple("Point", "x,y")
def generate_points(start, end):
xstep = np.sign(end.x - start.x)
ystep = np.sign(end.y - start.y)
nextpoint = start
while True:
yield nextpoint
if nextpoint == end:
return
nextpoint = Point(nextpoint.x + xstep, nextpoint.y + ystep)
def solve1(startpts, endpts, part2flag=False):
result = defaultdict(int)
for start, end in zip(startpts, endpts):
if any(
(
part2flag,
start.x == end.x,
start.y == end.y,
)
):
for point in generate_points(start, end):
result[point] += 1
score = sum([val >= 2 for val in result.values()])
return score
def solve2(startpts, endpts):
return solve1(startpts, endpts, True)
def parsetext(text):
numre = re.compile(r"(\d+)")
lines = text.splitlines()
startpts = []
endpts = []
for line in lines:
x1, y1, x2, y2 = [int(i) for i in numre.findall(line)]
startpts.append(Point(x1, y1))
endpts.append(Point(x2, y2))
return startpts, endpts
def testdata():
return """0,9 -> 5,9
8,0 -> 0,8
9,4 -> 3,4
2,2 -> 2,1
7,0 -> 7,4
6,4 -> 2,0
0,9 -> 2,9
3,4 -> 1,4
0,0 -> 8,8
5,5 -> 8,2
"""
def mydata():
return Path("input.txt").read_text()
def part1():
test_solution = 5
result = solve1(*parsetext(testdata()))
assert result == test_solution
return solve1(*parsetext(mydata()))
def part2():
test_solution = 12
result = solve2(*parsetext(testdata()))
assert result == test_solution
return solve2(*parsetext(mydata()))
result = part1()
print("Part 1: ", result)
result = part2()
print("Part 2: ", result)
|
import re
from collections import defaultdict, namedtuple
from pathlib import Path
import numpy as np
from ycecream import y
Point = namedtuple("Point", "x,y")
def generate_points(start, end):
xstep = np.sign(end.x - start.x)
ystep = np.sign(end.y - start.y)
nextpoint = start
while True:
yield nextpoint
if nextpoint == end:
return
nextpoint = Point(nextpoint.x + xstep, nextpoint.y + ystep)
def solve1(startpts, endpts, part2flag=False):
result = defaultdict(int)
for start, end in zip(startpts, endpts):
if any(
(
part2flag,
start.x == end.x,
start.y == end.y,
)
):
for point in generate_points(start, end):
result[point] += 1
score = sum([val >= 2 for val in result.values()])
return score
def solve2(startpts, endpts):
return solve1(startpts, endpts, True)
def parsetext(text):
numre = re.compile(r"(\d+)")
lines = text.splitlines()
startpts = []
endpts = []
for line in lines:
x1, y1, x2, y2 = [int(i) for i in numre.findall(line)]
startpts.append(Point(x1, y1))
endpts.append(Point(x2, y2))
return startpts, endpts
def testdata():
return """0,9 -> 5,9
8,0 -> 0,8
9,4 -> 3,4
2,2 -> 2,1
7,0 -> 7,4
6,4 -> 2,0
0,9 -> 2,9
3,4 -> 1,4
0,0 -> 8,8
5,5 -> 8,2
"""
def mydata():
return Path("input.txt").read_text()
def part1():
test_solution = 5
result = solve1(*parsetext(testdata()))
assert result == test_solution
return solve1(*parsetext(mydata()))
def part2():
test_solution = 12
result = solve2(*parsetext(testdata()))
assert result == test_solution
return solve2(*parsetext(mydata()))
result = part1()
print("Part 1: ", result)
result = part2()
print("Part 2: ", result)
|
fr
| 0.921246
|
0,9 -> 5,9 8,0 -> 0,8 9,4 -> 3,4 2,2 -> 2,1 7,0 -> 7,4 6,4 -> 2,0 0,9 -> 2,9 3,4 -> 1,4 0,0 -> 8,8 5,5 -> 8,2
| 3.123338
| 3
|
subclass.py
|
IanMcLaughlin19/dataClassUtil
| 0
|
6629358
|
import pprint
def convert_keys_to_snake_case(dict_to_fix: dict) -> dict:
result = {}
for key, value in dict_to_fix.items():
new_key = key.replace("-", "_")
result[new_key] = value
return result
class SubClass:
"""
Not meant to be instantiated directly, this class enables other classes to be recursively instantiated so that
you can make a deeply nested fully typed call such as
AlgorandTransaction.application_transaction.local_state_schema.num_byte_slice and have it work.
Without the init from json dict method, subclasses that are instantiated won't be able to actually access the attributes
in a nested fashion, so it is more performance to not use the init_from_json_dict() method, but the code will be
less descriptive so it depends what the users requirements are
I think this could be improved by digging into the python dataclasses API a bit, just an initial hack.
"""
SUBCLASSES = {}
@classmethod
def init_from_json_dict(cls, json_dict: dict):
"""
This is a bit of Python magic neccesary to actually instantiate deeply nested python data structures. From a
performance perspective there is a lot to be done here to improve, this is intended to mainly enable as readable
and useable code as possible for now
:param json_dict: arbitrary json dict data. The data schema should match the schema of the subclass or it
will throw a KeyError
:return:
"""
new_dict = convert_keys_to_snake_case(json_dict)
for key in new_dict:
if key in cls.SUBCLASSES:
if type(new_dict[key]) == dict:
new_dict[key] = convert_keys_to_snake_case(new_dict[key])
new_dict[key] = cls.SUBCLASSES[key].init_from_json_dict(new_dict[key])
elif type(new_dict[key]) == list:
new_dict[key] = list(
map(lambda listDicts: cls.SUBCLASSES[key].init_from_json_dict(listDicts), new_dict[key]))
try:
return cls(**new_dict)
except Exception as e:
print("class that caused issue", cls)
print("data struct that caused issue")
pprint.pprint(e)
pprint.pprint(new_dict)
|
import pprint
def convert_keys_to_snake_case(dict_to_fix: dict) -> dict:
result = {}
for key, value in dict_to_fix.items():
new_key = key.replace("-", "_")
result[new_key] = value
return result
class SubClass:
"""
Not meant to be instantiated directly, this class enables other classes to be recursively instantiated so that
you can make a deeply nested fully typed call such as
AlgorandTransaction.application_transaction.local_state_schema.num_byte_slice and have it work.
Without the init from json dict method, subclasses that are instantiated won't be able to actually access the attributes
in a nested fashion, so it is more performance to not use the init_from_json_dict() method, but the code will be
less descriptive so it depends what the users requirements are
I think this could be improved by digging into the python dataclasses API a bit, just an initial hack.
"""
SUBCLASSES = {}
@classmethod
def init_from_json_dict(cls, json_dict: dict):
"""
This is a bit of Python magic neccesary to actually instantiate deeply nested python data structures. From a
performance perspective there is a lot to be done here to improve, this is intended to mainly enable as readable
and useable code as possible for now
:param json_dict: arbitrary json dict data. The data schema should match the schema of the subclass or it
will throw a KeyError
:return:
"""
new_dict = convert_keys_to_snake_case(json_dict)
for key in new_dict:
if key in cls.SUBCLASSES:
if type(new_dict[key]) == dict:
new_dict[key] = convert_keys_to_snake_case(new_dict[key])
new_dict[key] = cls.SUBCLASSES[key].init_from_json_dict(new_dict[key])
elif type(new_dict[key]) == list:
new_dict[key] = list(
map(lambda listDicts: cls.SUBCLASSES[key].init_from_json_dict(listDicts), new_dict[key]))
try:
return cls(**new_dict)
except Exception as e:
print("class that caused issue", cls)
print("data struct that caused issue")
pprint.pprint(e)
pprint.pprint(new_dict)
|
en
| 0.900745
|
Not meant to be instantiated directly, this class enables other classes to be recursively instantiated so that you can make a deeply nested fully typed call such as AlgorandTransaction.application_transaction.local_state_schema.num_byte_slice and have it work. Without the init from json dict method, subclasses that are instantiated won't be able to actually access the attributes in a nested fashion, so it is more performance to not use the init_from_json_dict() method, but the code will be less descriptive so it depends what the users requirements are I think this could be improved by digging into the python dataclasses API a bit, just an initial hack. This is a bit of Python magic neccesary to actually instantiate deeply nested python data structures. From a performance perspective there is a lot to be done here to improve, this is intended to mainly enable as readable and useable code as possible for now :param json_dict: arbitrary json dict data. The data schema should match the schema of the subclass or it will throw a KeyError :return:
| 3.366902
| 3
|
src/atcoder/abc003/d/sol_2.py
|
kagemeka/competitive-programming
| 1
|
6629359
|
<filename>src/atcoder/abc003/d/sol_2.py<gh_stars>1-10
import typing
import sys
class ModChoosePascal():
def __call__(self, n: int, k: int) -> int:
c = self.__c
return c[n][k] if 0 <= k <= n < len(c) else 0
def __init__(self, n: int, mod: int) -> typing.NoReturn:
c = [[0] * n for _ in range(n)]
for i in range(n): c[i][0] = 1
for i in range(1, n):
for j in range(1, i + 1):
c[i][j] = (c[i - 1][j - 1] + c[i - 1][j]) % mod
self.__c = c
def solve(
r: int,
c: int,
x: int,
y: int,
d: int,
l: int,
) -> typing.NoReturn:
mod = 1_000_000_007
choose = ModChoosePascal(1 << 10, mod)
n = 4
res = choose(x * y, d + l)
for s in range(1, 1 << n):
cnt = [0] * 2
sign = -1
for i in range(n):
if ~s >> i & 1: continue
cnt[i & 1] += 1
sign *= -1
if not (x - cnt[0] > 0 and y - cnt[1] > 0): continue
res -= sign * choose((x - cnt[0]) * (y - cnt[1]), d + l)
res %= mod
res *= (r - x + 1) * (c - y + 1) * choose(l + d, d) % mod
print(res % mod)
def main() -> typing.NoReturn:
r, c = map(int, input().split())
x, y = map(int, input().split())
d, l = map(int, input().split())
solve(r, c, x, y, d, l)
main()
|
<filename>src/atcoder/abc003/d/sol_2.py<gh_stars>1-10
import typing
import sys
class ModChoosePascal():
def __call__(self, n: int, k: int) -> int:
c = self.__c
return c[n][k] if 0 <= k <= n < len(c) else 0
def __init__(self, n: int, mod: int) -> typing.NoReturn:
c = [[0] * n for _ in range(n)]
for i in range(n): c[i][0] = 1
for i in range(1, n):
for j in range(1, i + 1):
c[i][j] = (c[i - 1][j - 1] + c[i - 1][j]) % mod
self.__c = c
def solve(
r: int,
c: int,
x: int,
y: int,
d: int,
l: int,
) -> typing.NoReturn:
mod = 1_000_000_007
choose = ModChoosePascal(1 << 10, mod)
n = 4
res = choose(x * y, d + l)
for s in range(1, 1 << n):
cnt = [0] * 2
sign = -1
for i in range(n):
if ~s >> i & 1: continue
cnt[i & 1] += 1
sign *= -1
if not (x - cnt[0] > 0 and y - cnt[1] > 0): continue
res -= sign * choose((x - cnt[0]) * (y - cnt[1]), d + l)
res %= mod
res *= (r - x + 1) * (c - y + 1) * choose(l + d, d) % mod
print(res % mod)
def main() -> typing.NoReturn:
r, c = map(int, input().split())
x, y = map(int, input().split())
d, l = map(int, input().split())
solve(r, c, x, y, d, l)
main()
|
none
| 1
| 2.564793
| 3
|
|
CTD_controller/data_logger_emergencia.py
|
Raniita/Accuatic-Probe
| 1
|
6629360
|
from datetime import datetime
import socket
import time
import csv
# Esta version esta pensada para cuando el sensor de profundidad falla
# Es necesario introducir la profundidad de forma manual
# Usar solo cuando sea necesario.
if __name__ == "__main__":
# Arduino IP + port [10.0.1.10 DHCP del barco]
arduino = ('10.0.1.10', 55055)
buffersize = 1024
station = input("Introduce la estacion actual: ")
start_timestamp = datetime.today().strftime("%d-%m-%Y")
filename = "".join((station, "-", start_timestamp, ".csv"))
# Creating the csv
with open(filename, 'a', newline='') as f:
row = ["station", "time", "depth", "temp1", "temp2", "cdom [gain]", "cdom [ppb]",
"cdom [mv]", "pe [gain]", "pe [ppb]", "pe [mv]", "chl [gain]", "chl [ppb]", "chl [mv]"]
writer = csv.writer(f, delimiter=',')
writer.writerow(row)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("", 45045))
sock.settimeout(5)
print("Listo para recibir datos: ... \n")
while True:
depth = input("Introduce profundidad actual: ")
temp1 = 0
pressure = 0
altitude = 0
# Preguntamos por todos los valores
# cdom -> Respuesta: <type>;<gain>;<measure>;<mv> (cyclops)
# phy -> Respuesta: <type>;<gain>;<measure>;<mv> (cyclops)
# chl -> Respuesta: <type>;<gain>;<measure>;<mv> (cyclops)
# ms5 -> Respuesta: <type>;<pressure>;<temp>;<depth>;<altitude>
# temp -> Respuesta: <type>;<measure> (i2c sensors)
#msg2send = "ms5"
#sock.sendto(msg2send.encode(), arduino)
#try:
# recv_d, addr = sock.recvfrom(buffersize)
# data = recv_d.decode().split(";")
# if(data[1] == "error"):
# pressure = data[1]
# temp1 = data[1]
# depth = data[1]
# altitude = data[1]
# else:
# pressure = data[1]
# temp1 = data[2]
# depth = data[3]
# altitude = data[4]
# print(
# f"[MS5] Depth:{depth} Pressure:{pressure} Altitude:{altitude} Temp:{temp1}")
# Save OK
# ms5_read = True
#except:
# ms5_read = False
# print("Error. No response for depth.")
# pass
msg2send = "cdom"
sock.sendto(msg2send.encode(), arduino)
try:
recv_d, addr = sock.recvfrom(buffersize)
data = recv_d.decode().split(";")
cdom_gain = data[1]
cdom_ppb = data[2]
cdom_mv = data[3]
print(f"[CDOM] Gain:x{cdom_gain} PPB:{cdom_ppb} mV:{cdom_mv}")
# Save OK
cdom_read = True
except:
cdom_read = False
print("Error. No hay respuesta de CDOM.")
#pass
msg2send = "phy"
sock.sendto(msg2send.encode(), arduino)
try:
recv_d, addr = sock.recvfrom(buffersize)
data = recv_d.decode().split(";")
phy_gain = data[1]
phy_ppb = data[2]
phy_mv = data[3]
print(f"[PE] Gain:x{phy_gain} PPB:{phy_ppb} mV:{phy_mv}")
# Save OK
phy_read = True
except:
phy_read = False
print("Error. No hay respuesta de PE.")
#pass
msg2send = "chl"
sock.sendto(msg2send.encode(), arduino)
try:
recv_d, addr = sock.recvfrom(buffersize)
data = recv_d.decode().split(";")
chl_gain = data[1]
chl_ppb = data[2]
chl_mv = data[3]
print(f"[CHL] Gain:x{chl_gain} PPB:{chl_ppb} mV:{chl_mv}")
# Save OK
chl_read = True
except:
chl_read = False
print("Error. No hay respuesta de CHL.")
#pass
msg2send = "temp"
sock.sendto(msg2send.encode(), arduino)
try:
recv_d, addr = sock.recvfrom(buffersize)
data = recv_d.decode().split(";")
temp2 = data[1]
print(f"[Temp] temp2:{temp2}")
# Save OK
temp_read = True
except:
temp_read = False
print("Error. No hay respuesta de Temp2.")
#pass
# Terminamos, guardamos en el CSV
if (cdom_read and phy_read and chl_read and temp_read):
time = datetime.today().strftime("%H:%M:%S")
# Preparing the row
row = [station, time, depth, temp1, temp2, cdom_gain, cdom_ppb,
cdom_mv, phy_gain, phy_ppb, phy_mv, chl_gain, chl_ppb, chl_mv]
# Write to CSV
with open(filename, 'a', newline='') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(row)
else:
print("Error. No se ha podido guardar en el CSV.\nAlgunos sensores no se han leido correctamente.")
# Listo para continuar, ya puedes bajar
print("Datos guardados, listo para cambiar de profundidad.\n")
|
from datetime import datetime
import socket
import time
import csv
# Esta version esta pensada para cuando el sensor de profundidad falla
# Es necesario introducir la profundidad de forma manual
# Usar solo cuando sea necesario.
if __name__ == "__main__":
# Arduino IP + port [10.0.1.10 DHCP del barco]
arduino = ('10.0.1.10', 55055)
buffersize = 1024
station = input("Introduce la estacion actual: ")
start_timestamp = datetime.today().strftime("%d-%m-%Y")
filename = "".join((station, "-", start_timestamp, ".csv"))
# Creating the csv
with open(filename, 'a', newline='') as f:
row = ["station", "time", "depth", "temp1", "temp2", "cdom [gain]", "cdom [ppb]",
"cdom [mv]", "pe [gain]", "pe [ppb]", "pe [mv]", "chl [gain]", "chl [ppb]", "chl [mv]"]
writer = csv.writer(f, delimiter=',')
writer.writerow(row)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("", 45045))
sock.settimeout(5)
print("Listo para recibir datos: ... \n")
while True:
depth = input("Introduce profundidad actual: ")
temp1 = 0
pressure = 0
altitude = 0
# Preguntamos por todos los valores
# cdom -> Respuesta: <type>;<gain>;<measure>;<mv> (cyclops)
# phy -> Respuesta: <type>;<gain>;<measure>;<mv> (cyclops)
# chl -> Respuesta: <type>;<gain>;<measure>;<mv> (cyclops)
# ms5 -> Respuesta: <type>;<pressure>;<temp>;<depth>;<altitude>
# temp -> Respuesta: <type>;<measure> (i2c sensors)
#msg2send = "ms5"
#sock.sendto(msg2send.encode(), arduino)
#try:
# recv_d, addr = sock.recvfrom(buffersize)
# data = recv_d.decode().split(";")
# if(data[1] == "error"):
# pressure = data[1]
# temp1 = data[1]
# depth = data[1]
# altitude = data[1]
# else:
# pressure = data[1]
# temp1 = data[2]
# depth = data[3]
# altitude = data[4]
# print(
# f"[MS5] Depth:{depth} Pressure:{pressure} Altitude:{altitude} Temp:{temp1}")
# Save OK
# ms5_read = True
#except:
# ms5_read = False
# print("Error. No response for depth.")
# pass
msg2send = "cdom"
sock.sendto(msg2send.encode(), arduino)
try:
recv_d, addr = sock.recvfrom(buffersize)
data = recv_d.decode().split(";")
cdom_gain = data[1]
cdom_ppb = data[2]
cdom_mv = data[3]
print(f"[CDOM] Gain:x{cdom_gain} PPB:{cdom_ppb} mV:{cdom_mv}")
# Save OK
cdom_read = True
except:
cdom_read = False
print("Error. No hay respuesta de CDOM.")
#pass
msg2send = "phy"
sock.sendto(msg2send.encode(), arduino)
try:
recv_d, addr = sock.recvfrom(buffersize)
data = recv_d.decode().split(";")
phy_gain = data[1]
phy_ppb = data[2]
phy_mv = data[3]
print(f"[PE] Gain:x{phy_gain} PPB:{phy_ppb} mV:{phy_mv}")
# Save OK
phy_read = True
except:
phy_read = False
print("Error. No hay respuesta de PE.")
#pass
msg2send = "chl"
sock.sendto(msg2send.encode(), arduino)
try:
recv_d, addr = sock.recvfrom(buffersize)
data = recv_d.decode().split(";")
chl_gain = data[1]
chl_ppb = data[2]
chl_mv = data[3]
print(f"[CHL] Gain:x{chl_gain} PPB:{chl_ppb} mV:{chl_mv}")
# Save OK
chl_read = True
except:
chl_read = False
print("Error. No hay respuesta de CHL.")
#pass
msg2send = "temp"
sock.sendto(msg2send.encode(), arduino)
try:
recv_d, addr = sock.recvfrom(buffersize)
data = recv_d.decode().split(";")
temp2 = data[1]
print(f"[Temp] temp2:{temp2}")
# Save OK
temp_read = True
except:
temp_read = False
print("Error. No hay respuesta de Temp2.")
#pass
# Terminamos, guardamos en el CSV
if (cdom_read and phy_read and chl_read and temp_read):
time = datetime.today().strftime("%H:%M:%S")
# Preparing the row
row = [station, time, depth, temp1, temp2, cdom_gain, cdom_ppb,
cdom_mv, phy_gain, phy_ppb, phy_mv, chl_gain, chl_ppb, chl_mv]
# Write to CSV
with open(filename, 'a', newline='') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(row)
else:
print("Error. No se ha podido guardar en el CSV.\nAlgunos sensores no se han leido correctamente.")
# Listo para continuar, ya puedes bajar
print("Datos guardados, listo para cambiar de profundidad.\n")
|
es
| 0.382767
|
# Esta version esta pensada para cuando el sensor de profundidad falla # Es necesario introducir la profundidad de forma manual # Usar solo cuando sea necesario. # Arduino IP + port [10.0.1.10 DHCP del barco] # Creating the csv # Preguntamos por todos los valores # cdom -> Respuesta: <type>;<gain>;<measure>;<mv> (cyclops) # phy -> Respuesta: <type>;<gain>;<measure>;<mv> (cyclops) # chl -> Respuesta: <type>;<gain>;<measure>;<mv> (cyclops) # ms5 -> Respuesta: <type>;<pressure>;<temp>;<depth>;<altitude> # temp -> Respuesta: <type>;<measure> (i2c sensors) #msg2send = "ms5" #sock.sendto(msg2send.encode(), arduino) #try: # recv_d, addr = sock.recvfrom(buffersize) # data = recv_d.decode().split(";") # if(data[1] == "error"): # pressure = data[1] # temp1 = data[1] # depth = data[1] # altitude = data[1] # else: # pressure = data[1] # temp1 = data[2] # depth = data[3] # altitude = data[4] # print( # f"[MS5] Depth:{depth} Pressure:{pressure} Altitude:{altitude} Temp:{temp1}") # Save OK # ms5_read = True #except: # ms5_read = False # print("Error. No response for depth.") # pass # Save OK #pass # Save OK #pass # Save OK #pass # Save OK #pass # Terminamos, guardamos en el CSV # Preparing the row # Write to CSV # Listo para continuar, ya puedes bajar
| 2.982328
| 3
|
devito/core/operator.py
|
cc-a/devito
| 0
|
6629361
|
from devito.core.autotuning import autotune
from devito.dle import NThreads
from devito.ir.support import align_accesses
from devito.parameters import configuration
from devito.operator import Operator
__all__ = ['OperatorCore']
class OperatorCore(Operator):
def _specialize_exprs(self, expressions):
# Align data accesses to the computational domain
key = lambda i: i.is_DiscreteFunction
expressions = [align_accesses(e, key=key) for e in expressions]
return super(OperatorCore, self)._specialize_exprs(expressions)
def _autotune(self, args, setup):
if setup in [False, 'off']:
return args
elif setup is True:
level = configuration['autotuning'].level or 'basic'
args, summary = autotune(self, args, level, configuration['autotuning'].mode)
elif isinstance(setup, str):
args, summary = autotune(self, args, setup, configuration['autotuning'].mode)
elif isinstance(setup, tuple) and len(setup) == 2:
level, mode = setup
if level is False:
return args
else:
args, summary = autotune(self, args, level, mode)
else:
raise ValueError("Expected bool, str, or 2-tuple, got `%s` instead"
% type(setup))
# Record the tuned values
self._state.setdefault('autotuning', []).append(summary)
return args
@property
def nthreads(self):
nthreads = [i for i in self.input if isinstance(i, NThreads)]
if len(nthreads) == 0:
return 1
else:
assert len(nthreads) == 1
return nthreads.pop()
|
from devito.core.autotuning import autotune
from devito.dle import NThreads
from devito.ir.support import align_accesses
from devito.parameters import configuration
from devito.operator import Operator
__all__ = ['OperatorCore']
class OperatorCore(Operator):
def _specialize_exprs(self, expressions):
# Align data accesses to the computational domain
key = lambda i: i.is_DiscreteFunction
expressions = [align_accesses(e, key=key) for e in expressions]
return super(OperatorCore, self)._specialize_exprs(expressions)
def _autotune(self, args, setup):
if setup in [False, 'off']:
return args
elif setup is True:
level = configuration['autotuning'].level or 'basic'
args, summary = autotune(self, args, level, configuration['autotuning'].mode)
elif isinstance(setup, str):
args, summary = autotune(self, args, setup, configuration['autotuning'].mode)
elif isinstance(setup, tuple) and len(setup) == 2:
level, mode = setup
if level is False:
return args
else:
args, summary = autotune(self, args, level, mode)
else:
raise ValueError("Expected bool, str, or 2-tuple, got `%s` instead"
% type(setup))
# Record the tuned values
self._state.setdefault('autotuning', []).append(summary)
return args
@property
def nthreads(self):
nthreads = [i for i in self.input if isinstance(i, NThreads)]
if len(nthreads) == 0:
return 1
else:
assert len(nthreads) == 1
return nthreads.pop()
|
en
| 0.505214
|
# Align data accesses to the computational domain # Record the tuned values
| 2.387579
| 2
|
step2/dataAugmentationForUsers.py
|
Lintianqianjin/reappearance-of-some-classical-CNNs
| 6
|
6629362
|
import cv2
import os
def dataAugmentation(BasePath = 'data/rightOutputs/train_224'):
'''
只需写水平翻转,翻转后的文件的文件名命名规范为文件名末尾加上 _flipx
示例 原文件名 “图片1.png”,翻转后保存的文件命名为 “图片1__flipx.png”
文件保存在'data/flipUserOutputs'目录下
:param BasePath: 待处理的图片文件夹路径
:return:
'''
#********** Begin **********#
#********** End **********#
if __name__ == '__main__':
dataAugmentation()
|
import cv2
import os
def dataAugmentation(BasePath = 'data/rightOutputs/train_224'):
'''
只需写水平翻转,翻转后的文件的文件名命名规范为文件名末尾加上 _flipx
示例 原文件名 “图片1.png”,翻转后保存的文件命名为 “图片1__flipx.png”
文件保存在'data/flipUserOutputs'目录下
:param BasePath: 待处理的图片文件夹路径
:return:
'''
#********** Begin **********#
#********** End **********#
if __name__ == '__main__':
dataAugmentation()
|
zh
| 0.929243
|
只需写水平翻转,翻转后的文件的文件名命名规范为文件名末尾加上 _flipx 示例 原文件名 “图片1.png”,翻转后保存的文件命名为 “图片1__flipx.png” 文件保存在'data/flipUserOutputs'目录下 :param BasePath: 待处理的图片文件夹路径 :return: #********** Begin **********# #********** End **********#
| 2.056844
| 2
|
server.py
|
dan-ess/alexa-cycles
| 0
|
6629363
|
from functools import wraps
import logging
import sys
from flask import Flask
from flask_ask import Ask, statement
from pycycles import Client, ServiceArea
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
app = Flask(__name__)
ask = Ask(app, '/')
# set these.
USERNAME = ''
PASSWORD = ''
def get_cycles_statement(inputs):
inputs['be_form'] = 'is' if len(inputs['cycles']) == 1 else 'are'
inputs['cycles'] = 'no' if len(input['cycles']) == 0 else inputs['cycles']
return statement('There {be_form} {count} cycles at {portname}'.format(**inputs))
def rent_cycle_statement(inputs):
return statement('Cycle rented from {portname}. Your PIN is {pin}'.format(**inputs))
def port_not_found_statement(inputs):
return statement('Could not find cycleport: {portname}'.format(**inputs))
def match_port(target_port, ports):
match = None
for port in ports:
if target_port.lower() in port['name_en'].lower():
match = port
break
return match
def get_client():
client = Client(USERNAME, PASSWORD)
client.login()
return client
@ask.intent('GetCyclesForCycleport')
def get_cycles_for_port(client, portname):
client = get_client()
cycleports = client.cycleports(ServiceArea.CHUO)
cycleport = match_port(portname, cycleports)
if cycleport == None:
return port_not_found_statement({'portname': portname})
cycles = client.cycles(cycleport)
return get_cycles_statement(
{'cycles': cycles, 'portname': cycleport['name_en'], 'count': len(cycles)})
@ask.intent('RentCycle')
def rent_cycle_from_port(client, portname):
client = get_client()
cycleports = client.cycleports(ServiceArea.CHUO)
cycleport = match_port(portname, cycleports)
if cycleport == None:
return port_not_found_statement({'portname': portname})
cycles = client.cycles(cycleport)
if len(cycles) == 0:
return get_cycles_statement({'cycles': cycles, 'portname': portname})
rental = client.rent(cycles[0])
return rent_cycle_statement(
{'portname': rental['cycleport']['name_en'], 'pin': rental['pin']})
if __name__ == '__main__':
app.run()
|
from functools import wraps
import logging
import sys
from flask import Flask
from flask_ask import Ask, statement
from pycycles import Client, ServiceArea
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
app = Flask(__name__)
ask = Ask(app, '/')
# set these.
USERNAME = ''
PASSWORD = ''
def get_cycles_statement(inputs):
inputs['be_form'] = 'is' if len(inputs['cycles']) == 1 else 'are'
inputs['cycles'] = 'no' if len(input['cycles']) == 0 else inputs['cycles']
return statement('There {be_form} {count} cycles at {portname}'.format(**inputs))
def rent_cycle_statement(inputs):
return statement('Cycle rented from {portname}. Your PIN is {pin}'.format(**inputs))
def port_not_found_statement(inputs):
return statement('Could not find cycleport: {portname}'.format(**inputs))
def match_port(target_port, ports):
match = None
for port in ports:
if target_port.lower() in port['name_en'].lower():
match = port
break
return match
def get_client():
client = Client(USERNAME, PASSWORD)
client.login()
return client
@ask.intent('GetCyclesForCycleport')
def get_cycles_for_port(client, portname):
client = get_client()
cycleports = client.cycleports(ServiceArea.CHUO)
cycleport = match_port(portname, cycleports)
if cycleport == None:
return port_not_found_statement({'portname': portname})
cycles = client.cycles(cycleport)
return get_cycles_statement(
{'cycles': cycles, 'portname': cycleport['name_en'], 'count': len(cycles)})
@ask.intent('RentCycle')
def rent_cycle_from_port(client, portname):
client = get_client()
cycleports = client.cycleports(ServiceArea.CHUO)
cycleport = match_port(portname, cycleports)
if cycleport == None:
return port_not_found_statement({'portname': portname})
cycles = client.cycles(cycleport)
if len(cycles) == 0:
return get_cycles_statement({'cycles': cycles, 'portname': portname})
rental = client.rent(cycles[0])
return rent_cycle_statement(
{'portname': rental['cycleport']['name_en'], 'pin': rental['pin']})
if __name__ == '__main__':
app.run()
|
it
| 0.421933
|
# set these.
| 2.513717
| 3
|
pendulum/tz/zoneinfo/reader.py
|
Sn3akyP3t3/pendulum
| 2
|
6629364
|
import os
import pytzdata
from collections import namedtuple
from struct import unpack
from typing import List, Dict
from pytzdata.exceptions import TimezoneNotFound
from pendulum.utils._compat import PY2
from .exceptions import InvalidZoneinfoFile, InvalidTimezone
from .timezone import Timezone
from .transition import Transition
from .posix_timezone import posix_spec, PosixTimezone
from .transition_type import TransitionType
_offset = namedtuple('offset', 'utc_total_offset is_dst abbr_idx')
header = namedtuple(
'header',
'version '
'utclocals '
'stdwalls '
'leaps '
'transitions '
'types '
'abbr_size'
)
class Reader:
"""
Reads compiled zoneinfo TZif (\0, 2 or 3) files.
"""
def __init__(self, extend=True): # type: (bool) -> None
self._extend = extend
def read_for(self, timezone): # type: (str) -> Timezone
"""
Read the zoneinfo structure for a given timezone name.
:param timezone: The timezone.
"""
try:
file_path = pytzdata.tz_path(timezone)
except TimezoneNotFound:
raise InvalidTimezone(timezone)
return self.read(file_path)
def read(self, file_path): # type: (str) -> Timezone
"""
Read a zoneinfo structure from the given path.
:param file_path: The path of a zoneinfo file.
"""
if not os.path.exists(file_path):
raise InvalidZoneinfoFile('The tzinfo file does not exist')
with open(file_path, 'rb') as fd:
return self._parse(fd)
def _check_read(self, fd, nbytes): # type: (...) -> bytes
"""
Reads the given number of bytes from the given file
and checks that the correct number of bytes could be read.
"""
result = fd.read(nbytes)
if not result or len(result) != nbytes:
raise InvalidZoneinfoFile(
'Expected {} bytes reading {}, '
'but got {}'.format(
nbytes, fd.name, len(result) if result else 0
)
)
if PY2:
return bytearray(result)
return result
def _parse(self, fd): # type: (...) -> Timezone
"""
Parse a zoneinfo file.
"""
hdr = self._parse_header(fd)
if hdr.version in (2, 3):
# We're skipping the entire v1 file since
# at least the same data will be found in TZFile 2.
fd.seek(
hdr.transitions * 5
+ hdr.types * 6
+ hdr.abbr_size
+ hdr.leaps * 4
+ hdr.stdwalls
+ hdr.utclocals,
1
)
# Parse the second header
hdr = self._parse_header(fd)
if hdr.version != 2 and hdr.version != 3:
raise InvalidZoneinfoFile(
'Header versions mismatch for file {}'.format(fd.name)
)
# Parse the v2 data
trans = self._parse_trans_64(fd, hdr.transitions)
type_idx = self._parse_type_idx(fd, hdr.transitions)
types = self._parse_types(fd, hdr.types)
abbrs = self._parse_abbrs(fd, hdr.abbr_size, types)
fd.seek(
hdr.leaps * 8
+ hdr.stdwalls
+ hdr.utclocals,
1
)
trule = self._parse_posix_tz(fd)
else:
# TZFile v1
trans = self._parse_trans_32(fd, hdr.transitions)
type_idx = self._parse_type_idx(fd, hdr.transitions)
types = self._parse_types(fd, hdr.types)
abbrs = self._parse_abbrs(fd, hdr.abbr_size, types)
trule = None
types = [
TransitionType(off, is_dst, abbrs[abbr])
for off, is_dst, abbr in types
]
transitions = []
previous = None
for trans, idx in zip(trans, type_idx):
transition = Transition(trans, types[idx], previous)
transitions.append(transition)
previous = transition
if not transitions:
transitions.append(
Transition(0, types[0], None)
)
return Timezone(transitions, posix_rule=trule, extended=self._extend)
def _parse_header(self, fd): # type: (...) -> header
buff = self._check_read(fd, 44)
if buff[:4] != b'TZif':
raise InvalidZoneinfoFile(
'The file "{}" has an invalid header.'.format(fd.name)
)
version = {
0x00: 1,
0x32: 2,
0x33: 3
}.get(buff[4])
if version is None:
raise InvalidZoneinfoFile(
'The file "{}" has an invalid version.'.format(fd.name)
)
hdr = header(
version,
*unpack('>6l', buff[20:44])
)
return hdr
def _parse_trans_64(self, fd, n): # type: (..., int) -> List[int]
trans = []
for _ in range(n):
buff = self._check_read(fd, 8)
trans.append(unpack('>q', buff)[0])
return trans
def _parse_trans_32(self, fd, n): # type: (..., int) -> List[int]
trans = []
for _ in range(n):
buff = self._check_read(fd, 4)
trans.append(unpack('>i', buff)[0])
return trans
def _parse_type_idx(self, fd, n): # type: (..., int) -> List[int]
buff = self._check_read(fd, n)
return list(unpack('{}B'.format(n), buff))
def _parse_types(self, fd, n): # type: (..., int) -> List[tuple]
types = []
for _ in range(n):
buff = self._check_read(fd, 6)
offset = unpack('>l', buff[:4])[0]
is_dst = buff[4] == 1
types.append((offset, is_dst, buff[5]))
return types
def _parse_abbrs(self,
fd,
n, # type: int
types # type: List[tuple]
): # type: (...) -> Dict[int, str]
abbrs = {}
buff = self._check_read(fd, n)
for offset, is_dst, idx in types:
if idx not in abbrs:
abbr = buff[idx:buff.find(b'\0', idx)].decode('utf-8')
abbrs[idx] = abbr
return abbrs
def _parse_posix_tz(self, fd): # type: (...) -> PosixTimezone
s = fd.read().decode('utf-8')
if not s.startswith('\n') or not s.endswith('\n'):
raise InvalidZoneinfoFile(
'Invalid posix rule in file "{}"'.format(fd.name)
)
s = s.strip()
return posix_spec(s)
|
import os
import pytzdata
from collections import namedtuple
from struct import unpack
from typing import List, Dict
from pytzdata.exceptions import TimezoneNotFound
from pendulum.utils._compat import PY2
from .exceptions import InvalidZoneinfoFile, InvalidTimezone
from .timezone import Timezone
from .transition import Transition
from .posix_timezone import posix_spec, PosixTimezone
from .transition_type import TransitionType
_offset = namedtuple('offset', 'utc_total_offset is_dst abbr_idx')
header = namedtuple(
'header',
'version '
'utclocals '
'stdwalls '
'leaps '
'transitions '
'types '
'abbr_size'
)
class Reader:
"""
Reads compiled zoneinfo TZif (\0, 2 or 3) files.
"""
def __init__(self, extend=True): # type: (bool) -> None
self._extend = extend
def read_for(self, timezone): # type: (str) -> Timezone
"""
Read the zoneinfo structure for a given timezone name.
:param timezone: The timezone.
"""
try:
file_path = pytzdata.tz_path(timezone)
except TimezoneNotFound:
raise InvalidTimezone(timezone)
return self.read(file_path)
def read(self, file_path): # type: (str) -> Timezone
"""
Read a zoneinfo structure from the given path.
:param file_path: The path of a zoneinfo file.
"""
if not os.path.exists(file_path):
raise InvalidZoneinfoFile('The tzinfo file does not exist')
with open(file_path, 'rb') as fd:
return self._parse(fd)
def _check_read(self, fd, nbytes): # type: (...) -> bytes
"""
Reads the given number of bytes from the given file
and checks that the correct number of bytes could be read.
"""
result = fd.read(nbytes)
if not result or len(result) != nbytes:
raise InvalidZoneinfoFile(
'Expected {} bytes reading {}, '
'but got {}'.format(
nbytes, fd.name, len(result) if result else 0
)
)
if PY2:
return bytearray(result)
return result
def _parse(self, fd): # type: (...) -> Timezone
"""
Parse a zoneinfo file.
"""
hdr = self._parse_header(fd)
if hdr.version in (2, 3):
# We're skipping the entire v1 file since
# at least the same data will be found in TZFile 2.
fd.seek(
hdr.transitions * 5
+ hdr.types * 6
+ hdr.abbr_size
+ hdr.leaps * 4
+ hdr.stdwalls
+ hdr.utclocals,
1
)
# Parse the second header
hdr = self._parse_header(fd)
if hdr.version != 2 and hdr.version != 3:
raise InvalidZoneinfoFile(
'Header versions mismatch for file {}'.format(fd.name)
)
# Parse the v2 data
trans = self._parse_trans_64(fd, hdr.transitions)
type_idx = self._parse_type_idx(fd, hdr.transitions)
types = self._parse_types(fd, hdr.types)
abbrs = self._parse_abbrs(fd, hdr.abbr_size, types)
fd.seek(
hdr.leaps * 8
+ hdr.stdwalls
+ hdr.utclocals,
1
)
trule = self._parse_posix_tz(fd)
else:
# TZFile v1
trans = self._parse_trans_32(fd, hdr.transitions)
type_idx = self._parse_type_idx(fd, hdr.transitions)
types = self._parse_types(fd, hdr.types)
abbrs = self._parse_abbrs(fd, hdr.abbr_size, types)
trule = None
types = [
TransitionType(off, is_dst, abbrs[abbr])
for off, is_dst, abbr in types
]
transitions = []
previous = None
for trans, idx in zip(trans, type_idx):
transition = Transition(trans, types[idx], previous)
transitions.append(transition)
previous = transition
if not transitions:
transitions.append(
Transition(0, types[0], None)
)
return Timezone(transitions, posix_rule=trule, extended=self._extend)
def _parse_header(self, fd): # type: (...) -> header
buff = self._check_read(fd, 44)
if buff[:4] != b'TZif':
raise InvalidZoneinfoFile(
'The file "{}" has an invalid header.'.format(fd.name)
)
version = {
0x00: 1,
0x32: 2,
0x33: 3
}.get(buff[4])
if version is None:
raise InvalidZoneinfoFile(
'The file "{}" has an invalid version.'.format(fd.name)
)
hdr = header(
version,
*unpack('>6l', buff[20:44])
)
return hdr
def _parse_trans_64(self, fd, n): # type: (..., int) -> List[int]
trans = []
for _ in range(n):
buff = self._check_read(fd, 8)
trans.append(unpack('>q', buff)[0])
return trans
def _parse_trans_32(self, fd, n): # type: (..., int) -> List[int]
trans = []
for _ in range(n):
buff = self._check_read(fd, 4)
trans.append(unpack('>i', buff)[0])
return trans
def _parse_type_idx(self, fd, n): # type: (..., int) -> List[int]
buff = self._check_read(fd, n)
return list(unpack('{}B'.format(n), buff))
def _parse_types(self, fd, n): # type: (..., int) -> List[tuple]
types = []
for _ in range(n):
buff = self._check_read(fd, 6)
offset = unpack('>l', buff[:4])[0]
is_dst = buff[4] == 1
types.append((offset, is_dst, buff[5]))
return types
def _parse_abbrs(self,
fd,
n, # type: int
types # type: List[tuple]
): # type: (...) -> Dict[int, str]
abbrs = {}
buff = self._check_read(fd, n)
for offset, is_dst, idx in types:
if idx not in abbrs:
abbr = buff[idx:buff.find(b'\0', idx)].decode('utf-8')
abbrs[idx] = abbr
return abbrs
def _parse_posix_tz(self, fd): # type: (...) -> PosixTimezone
s = fd.read().decode('utf-8')
if not s.startswith('\n') or not s.endswith('\n'):
raise InvalidZoneinfoFile(
'Invalid posix rule in file "{}"'.format(fd.name)
)
s = s.strip()
return posix_spec(s)
|
en
| 0.657733
|
Reads compiled zoneinfo TZif (\0, 2 or 3) files. # type: (bool) -> None # type: (str) -> Timezone Read the zoneinfo structure for a given timezone name. :param timezone: The timezone. # type: (str) -> Timezone Read a zoneinfo structure from the given path. :param file_path: The path of a zoneinfo file. # type: (...) -> bytes Reads the given number of bytes from the given file and checks that the correct number of bytes could be read. # type: (...) -> Timezone Parse a zoneinfo file. # We're skipping the entire v1 file since # at least the same data will be found in TZFile 2. # Parse the second header # Parse the v2 data # TZFile v1 # type: (...) -> header # type: (..., int) -> List[int] # type: (..., int) -> List[int] # type: (..., int) -> List[int] # type: (..., int) -> List[tuple] # type: int # type: List[tuple] # type: (...) -> Dict[int, str] # type: (...) -> PosixTimezone
| 2.52987
| 3
|
books/model/InvoicePayment.py
|
nudglabs/books-python-wrappers
| 9
|
6629365
|
<reponame>nudglabs/books-python-wrappers
#$Id$
class InvoicePayment:
"""This class is used to create object for Invoice Payments."""
def __init__(self):
"""Initialize parameters for Invoice payments."""
self.invoice_payment_id = ''
self.payment_id = ''
self.invoice_id = ''
self.amount_used = 0.0
self.amount_applied = 0.0
self.payment_number = ''
self.payment_mode = ''
self.description = ''
self.date = ''
self.reference_number = ''
self.exchange_rate = 0.00
self.amount = 0.00
self.tax_amount_withheld = 0.0
self.is_single_invoice_payment = None
def set_invoice_payment_id(self, invoice_payment_id):
"""Set invoice payment id.
Args:
invoice_payment_id(str): Invoice payment id.
"""
self.invoice_payment_id = invoice_payment_id
def get_invoice_payment_id(self):
"""Get invoice payment id.
Returns:
str: Invoice payment id.
"""
return self.invoice_payment_id
def set_invoice_id(self, invoice_id):
"""Set invoice id.
Args:
invoice_id(str): Invoice id.
"""
self.invoice_id = invoice_id
def get_invoice_id(self):
"""Get invoice id.
Returns:
str: Invoice id.
"""
return self.invoice_id
def set_payment_id(self, payment_id):
"""Set payment id.
Args:
payment_id(str): Payment id.
"""
self.payment_id = payment_id
def get_payment_id(self):
"""Get payment id.
Returns:
str: Payment id.
"""
return self.payment_id
def set_amount_applied(self, amount_applied):
"""Set amount applied.
Args:
amount_applied(float): Amount applied.
"""
self.amount_applied = amount_applied
def get_amount_applied(self):
"""Get amount applied.
Returns:
float: Amount applied.
"""
return self.amount_applied
def set_amount_used(self, amount_used):
"""Set amount used.
Args:
amount_used(float): Amount used.
"""
self.amount_used = amount_used
def get_amount_used(self):
"""Get amount used.
Returns:
float: Amount used.
"""
return self.amount_used
def set_payment_number(self, payment_number):
"""Set payment number.
Args:
payment_number(str): Payment number.
"""
self.payment_number = payment_number
def get_payment_number(self):
"""Get payment number.
Returns:
str: Payment number.
"""
return self.payment_number
def set_payment_mode(self, payment_mode):
"""Set payment mode.
Args:
payment_mode(str): Payment mode.
"""
self.payment_mode = payment_mode
def get_payment_mode(self):
"""Get payment mode.
Returns:
str: Payment mode.
"""
return self.payment_mode
def set_description(self, description):
"""Set description.
Args:
description(str): Description.
"""
self.description = description
def get_description(self):
"""Get description.
Returns:
str: Description.
"""
return self.description
def set_date(self, date):
"""Set date.
Args:
date(str): Date.
"""
self.date = date
def get_date(self):
"""Get date.
Returns:
str: Date.
"""
return self.date
def set_reference_number(self, reference_number):
"""Set reference number.
Args:
reference_number(str): Reference number.
"""
self.reference_number = reference_number
def get_reference_number(self):
"""Get reference number.
Returns:
str: Reference number.
"""
return self.reference_number
def set_exchange_rate(self, exchange_rate):
"""Set exchange rate.
Args:
exchange_rate(float): Exchange rate.
"""
self.exchange_rate = exchange_rate
def get_exchange_rate(self):
"""Get exchange rate.
Returns:
float: Exchange rate.
"""
return self.exchange_rate
def set_amount(self, amount):
"""Set amount.
Args:
amount(float): Amount.
"""
self.amount = amount
def get_amount(self):
"""Get amount.
Returns:
float: Amount.
"""
return self.amount
def set_tax_amount_withheld(self, tax_amount_withheld):
"""Set tax amount withheld.
Args:
tax_amount_withheld(float): Tax amount withheld.
"""
self.tax_amount_withheld = tax_amount_withheld
def get_tax_amount_withheld(self):
"""Get tax amount withheld.
Returns:
float: Tax amount withheld.
"""
return self.tax_amount_withheld
def set_is_single_invoice_payment(self, is_single_invoice_payment):
"""Set whether it is single invoice payment.
Args:
is_single_invoice_payment(bool): True if it is single invoice
payment else False.
"""
self.is_single_invoice_payment = is_single_invoice_payment
def get_is_single_invoice_payment(self):
"""Get whether it is single invoice payment.
Returns:
bool: True if it is single invoice payment else False.
"""
return self.is_single_invoice_payment
|
#$Id$
class InvoicePayment:
"""This class is used to create object for Invoice Payments."""
def __init__(self):
"""Initialize parameters for Invoice payments."""
self.invoice_payment_id = ''
self.payment_id = ''
self.invoice_id = ''
self.amount_used = 0.0
self.amount_applied = 0.0
self.payment_number = ''
self.payment_mode = ''
self.description = ''
self.date = ''
self.reference_number = ''
self.exchange_rate = 0.00
self.amount = 0.00
self.tax_amount_withheld = 0.0
self.is_single_invoice_payment = None
def set_invoice_payment_id(self, invoice_payment_id):
"""Set invoice payment id.
Args:
invoice_payment_id(str): Invoice payment id.
"""
self.invoice_payment_id = invoice_payment_id
def get_invoice_payment_id(self):
"""Get invoice payment id.
Returns:
str: Invoice payment id.
"""
return self.invoice_payment_id
def set_invoice_id(self, invoice_id):
"""Set invoice id.
Args:
invoice_id(str): Invoice id.
"""
self.invoice_id = invoice_id
def get_invoice_id(self):
"""Get invoice id.
Returns:
str: Invoice id.
"""
return self.invoice_id
def set_payment_id(self, payment_id):
"""Set payment id.
Args:
payment_id(str): Payment id.
"""
self.payment_id = payment_id
def get_payment_id(self):
"""Get payment id.
Returns:
str: Payment id.
"""
return self.payment_id
def set_amount_applied(self, amount_applied):
"""Set amount applied.
Args:
amount_applied(float): Amount applied.
"""
self.amount_applied = amount_applied
def get_amount_applied(self):
"""Get amount applied.
Returns:
float: Amount applied.
"""
return self.amount_applied
def set_amount_used(self, amount_used):
"""Set amount used.
Args:
amount_used(float): Amount used.
"""
self.amount_used = amount_used
def get_amount_used(self):
"""Get amount used.
Returns:
float: Amount used.
"""
return self.amount_used
def set_payment_number(self, payment_number):
"""Set payment number.
Args:
payment_number(str): Payment number.
"""
self.payment_number = payment_number
def get_payment_number(self):
"""Get payment number.
Returns:
str: Payment number.
"""
return self.payment_number
def set_payment_mode(self, payment_mode):
"""Set payment mode.
Args:
payment_mode(str): Payment mode.
"""
self.payment_mode = payment_mode
def get_payment_mode(self):
"""Get payment mode.
Returns:
str: Payment mode.
"""
return self.payment_mode
def set_description(self, description):
"""Set description.
Args:
description(str): Description.
"""
self.description = description
def get_description(self):
"""Get description.
Returns:
str: Description.
"""
return self.description
def set_date(self, date):
"""Set date.
Args:
date(str): Date.
"""
self.date = date
def get_date(self):
"""Get date.
Returns:
str: Date.
"""
return self.date
def set_reference_number(self, reference_number):
"""Set reference number.
Args:
reference_number(str): Reference number.
"""
self.reference_number = reference_number
def get_reference_number(self):
"""Get reference number.
Returns:
str: Reference number.
"""
return self.reference_number
def set_exchange_rate(self, exchange_rate):
"""Set exchange rate.
Args:
exchange_rate(float): Exchange rate.
"""
self.exchange_rate = exchange_rate
def get_exchange_rate(self):
"""Get exchange rate.
Returns:
float: Exchange rate.
"""
return self.exchange_rate
def set_amount(self, amount):
"""Set amount.
Args:
amount(float): Amount.
"""
self.amount = amount
def get_amount(self):
"""Get amount.
Returns:
float: Amount.
"""
return self.amount
def set_tax_amount_withheld(self, tax_amount_withheld):
"""Set tax amount withheld.
Args:
tax_amount_withheld(float): Tax amount withheld.
"""
self.tax_amount_withheld = tax_amount_withheld
def get_tax_amount_withheld(self):
"""Get tax amount withheld.
Returns:
float: Tax amount withheld.
"""
return self.tax_amount_withheld
def set_is_single_invoice_payment(self, is_single_invoice_payment):
"""Set whether it is single invoice payment.
Args:
is_single_invoice_payment(bool): True if it is single invoice
payment else False.
"""
self.is_single_invoice_payment = is_single_invoice_payment
def get_is_single_invoice_payment(self):
"""Get whether it is single invoice payment.
Returns:
bool: True if it is single invoice payment else False.
"""
return self.is_single_invoice_payment
|
en
| 0.660462
|
#$Id$ This class is used to create object for Invoice Payments. Initialize parameters for Invoice payments. Set invoice payment id. Args: invoice_payment_id(str): Invoice payment id. Get invoice payment id. Returns: str: Invoice payment id. Set invoice id. Args: invoice_id(str): Invoice id. Get invoice id. Returns: str: Invoice id. Set payment id. Args: payment_id(str): Payment id. Get payment id. Returns: str: Payment id. Set amount applied. Args: amount_applied(float): Amount applied. Get amount applied. Returns: float: Amount applied. Set amount used. Args: amount_used(float): Amount used. Get amount used. Returns: float: Amount used. Set payment number. Args: payment_number(str): Payment number. Get payment number. Returns: str: Payment number. Set payment mode. Args: payment_mode(str): Payment mode. Get payment mode. Returns: str: Payment mode. Set description. Args: description(str): Description. Get description. Returns: str: Description. Set date. Args: date(str): Date. Get date. Returns: str: Date. Set reference number. Args: reference_number(str): Reference number. Get reference number. Returns: str: Reference number. Set exchange rate. Args: exchange_rate(float): Exchange rate. Get exchange rate. Returns: float: Exchange rate. Set amount. Args: amount(float): Amount. Get amount. Returns: float: Amount. Set tax amount withheld. Args: tax_amount_withheld(float): Tax amount withheld. Get tax amount withheld. Returns: float: Tax amount withheld. Set whether it is single invoice payment. Args: is_single_invoice_payment(bool): True if it is single invoice payment else False. Get whether it is single invoice payment. Returns: bool: True if it is single invoice payment else False.
| 3.333728
| 3
|
stable_baselines/common/mpi_adam.py
|
yfletberliac/transformrl
| 0
|
6629366
|
<filename>stable_baselines/common/mpi_adam.py
import mpi4py
import numpy as np
import tensorflow as tf
import stable_baselines.common.tf_util as tf_utils
class MpiAdam(object):
def __init__(self, var_list, *, beta1=0.9, beta2=0.999, epsilon=1e-08, scale_grad_by_procs=True, comm=None,
sess=None):
"""
A parallel MPI implementation of the Adam optimizer for TensorFlow
https://arxiv.org/abs/1412.6980
:param var_list: ([TensorFlow Tensor]) the variables
:param beta1: (float) Adam beta1 parameter
:param beta2: (float) Adam beta1 parameter
:param epsilon: (float) to help with preventing arithmetic issues
:param scale_grad_by_procs: (bool) if the scaling should be done by processes
:param comm: (MPI Communicators) if None, mpi4py.MPI.COMM_WORLD
:param sess: (TensorFlow Session) if None, tf.get_default_session()
"""
self.var_list = var_list
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.scale_grad_by_procs = scale_grad_by_procs
size = sum(tf_utils.numel(v) for v in var_list)
# Exponential moving average of gradient values
# "first moment estimate" m in the paper
self.exp_avg = np.zeros(size, 'float32')
# Exponential moving average of squared gradient values
# "second raw moment estimate" v in the paper
self.exp_avg_sq = np.zeros(size, 'float32')
self.step = 0
self.setfromflat = tf_utils.SetFromFlat(var_list, sess=sess)
self.getflat = tf_utils.GetFlat(var_list, sess=sess)
self.comm = mpi4py.MPI.COMM_WORLD if comm is None else comm
def update(self, local_grad, learning_rate):
"""
update the values of the graph
:param local_grad: (numpy float) the gradient
:param learning_rate: (float) the learning_rate for the update
"""
if self.step % 100 == 0:
self.check_synced()
local_grad = local_grad.astype('float32')
global_grad = np.zeros_like(local_grad)
self.comm.Allreduce(local_grad, global_grad, op=mpi4py.MPI.SUM)
if self.scale_grad_by_procs:
global_grad /= self.comm.Get_size()
self.step += 1
# Learning rate with bias correction
step_size = learning_rate * np.sqrt(1 - self.beta2 ** self.step) / (1 - self.beta1 ** self.step)
# Decay the first and second moment running average coefficient
self.exp_avg = self.beta1 * self.exp_avg + (1 - self.beta1) * global_grad
self.exp_avg_sq = self.beta2 * self.exp_avg_sq + (1 - self.beta2) * (global_grad * global_grad)
step = (- step_size) * self.exp_avg / (np.sqrt(self.exp_avg_sq) + self.epsilon)
self.setfromflat(self.getflat() + step)
def sync(self):
"""
syncronize the MPI threads
"""
theta = self.getflat()
self.comm.Bcast(theta, root=0)
self.setfromflat(theta)
def check_synced(self):
"""
confirm the MPI threads are synced
"""
if self.comm.Get_rank() == 0: # this is root
theta = self.getflat()
self.comm.Bcast(theta, root=0)
else:
thetalocal = self.getflat()
thetaroot = np.empty_like(thetalocal)
self.comm.Bcast(thetaroot, root=0)
assert (thetaroot == thetalocal).all(), (thetaroot, thetalocal)
@tf_utils.in_session
def test_mpi_adam():
"""
tests the MpiAdam object's functionality
"""
np.random.seed(0)
tf.compat.v1.set_random_seed(0)
a_var = tf.Variable(np.random.randn(3).astype('float32'))
b_var = tf.Variable(np.random.randn(2, 5).astype('float32'))
loss = tf.reduce_sum(tf.square(a_var)) + tf.reduce_sum(tf.sin(b_var))
learning_rate = 1e-2
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
do_update = tf_utils.function([], loss, updates=[update_op])
tf.get_default_session().run(tf.global_variables_initializer())
for step in range(10):
print(step, do_update())
tf.compat.v1.set_random_seed(0)
tf.get_default_session().run(tf.global_variables_initializer())
var_list = [a_var, b_var]
lossandgrad = tf_utils.function([], [loss, tf_utils.flatgrad(loss, var_list)], updates=[update_op])
adam = MpiAdam(var_list)
for step in range(10):
loss, grad = lossandgrad()
adam.update(grad, learning_rate)
print(step, loss)
if __name__ == "__main__":
# Run with mpirun -np 2 python <filename>
test_mpi_adam()
|
<filename>stable_baselines/common/mpi_adam.py
import mpi4py
import numpy as np
import tensorflow as tf
import stable_baselines.common.tf_util as tf_utils
class MpiAdam(object):
def __init__(self, var_list, *, beta1=0.9, beta2=0.999, epsilon=1e-08, scale_grad_by_procs=True, comm=None,
sess=None):
"""
A parallel MPI implementation of the Adam optimizer for TensorFlow
https://arxiv.org/abs/1412.6980
:param var_list: ([TensorFlow Tensor]) the variables
:param beta1: (float) Adam beta1 parameter
:param beta2: (float) Adam beta1 parameter
:param epsilon: (float) to help with preventing arithmetic issues
:param scale_grad_by_procs: (bool) if the scaling should be done by processes
:param comm: (MPI Communicators) if None, mpi4py.MPI.COMM_WORLD
:param sess: (TensorFlow Session) if None, tf.get_default_session()
"""
self.var_list = var_list
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.scale_grad_by_procs = scale_grad_by_procs
size = sum(tf_utils.numel(v) for v in var_list)
# Exponential moving average of gradient values
# "first moment estimate" m in the paper
self.exp_avg = np.zeros(size, 'float32')
# Exponential moving average of squared gradient values
# "second raw moment estimate" v in the paper
self.exp_avg_sq = np.zeros(size, 'float32')
self.step = 0
self.setfromflat = tf_utils.SetFromFlat(var_list, sess=sess)
self.getflat = tf_utils.GetFlat(var_list, sess=sess)
self.comm = mpi4py.MPI.COMM_WORLD if comm is None else comm
def update(self, local_grad, learning_rate):
"""
update the values of the graph
:param local_grad: (numpy float) the gradient
:param learning_rate: (float) the learning_rate for the update
"""
if self.step % 100 == 0:
self.check_synced()
local_grad = local_grad.astype('float32')
global_grad = np.zeros_like(local_grad)
self.comm.Allreduce(local_grad, global_grad, op=mpi4py.MPI.SUM)
if self.scale_grad_by_procs:
global_grad /= self.comm.Get_size()
self.step += 1
# Learning rate with bias correction
step_size = learning_rate * np.sqrt(1 - self.beta2 ** self.step) / (1 - self.beta1 ** self.step)
# Decay the first and second moment running average coefficient
self.exp_avg = self.beta1 * self.exp_avg + (1 - self.beta1) * global_grad
self.exp_avg_sq = self.beta2 * self.exp_avg_sq + (1 - self.beta2) * (global_grad * global_grad)
step = (- step_size) * self.exp_avg / (np.sqrt(self.exp_avg_sq) + self.epsilon)
self.setfromflat(self.getflat() + step)
def sync(self):
"""
syncronize the MPI threads
"""
theta = self.getflat()
self.comm.Bcast(theta, root=0)
self.setfromflat(theta)
def check_synced(self):
"""
confirm the MPI threads are synced
"""
if self.comm.Get_rank() == 0: # this is root
theta = self.getflat()
self.comm.Bcast(theta, root=0)
else:
thetalocal = self.getflat()
thetaroot = np.empty_like(thetalocal)
self.comm.Bcast(thetaroot, root=0)
assert (thetaroot == thetalocal).all(), (thetaroot, thetalocal)
@tf_utils.in_session
def test_mpi_adam():
"""
tests the MpiAdam object's functionality
"""
np.random.seed(0)
tf.compat.v1.set_random_seed(0)
a_var = tf.Variable(np.random.randn(3).astype('float32'))
b_var = tf.Variable(np.random.randn(2, 5).astype('float32'))
loss = tf.reduce_sum(tf.square(a_var)) + tf.reduce_sum(tf.sin(b_var))
learning_rate = 1e-2
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
do_update = tf_utils.function([], loss, updates=[update_op])
tf.get_default_session().run(tf.global_variables_initializer())
for step in range(10):
print(step, do_update())
tf.compat.v1.set_random_seed(0)
tf.get_default_session().run(tf.global_variables_initializer())
var_list = [a_var, b_var]
lossandgrad = tf_utils.function([], [loss, tf_utils.flatgrad(loss, var_list)], updates=[update_op])
adam = MpiAdam(var_list)
for step in range(10):
loss, grad = lossandgrad()
adam.update(grad, learning_rate)
print(step, loss)
if __name__ == "__main__":
# Run with mpirun -np 2 python <filename>
test_mpi_adam()
|
en
| 0.547709
|
A parallel MPI implementation of the Adam optimizer for TensorFlow https://arxiv.org/abs/1412.6980 :param var_list: ([TensorFlow Tensor]) the variables :param beta1: (float) Adam beta1 parameter :param beta2: (float) Adam beta1 parameter :param epsilon: (float) to help with preventing arithmetic issues :param scale_grad_by_procs: (bool) if the scaling should be done by processes :param comm: (MPI Communicators) if None, mpi4py.MPI.COMM_WORLD :param sess: (TensorFlow Session) if None, tf.get_default_session() # Exponential moving average of gradient values # "first moment estimate" m in the paper # Exponential moving average of squared gradient values # "second raw moment estimate" v in the paper update the values of the graph :param local_grad: (numpy float) the gradient :param learning_rate: (float) the learning_rate for the update # Learning rate with bias correction # Decay the first and second moment running average coefficient syncronize the MPI threads confirm the MPI threads are synced # this is root tests the MpiAdam object's functionality # Run with mpirun -np 2 python <filename>
| 2.160996
| 2
|
waitress/app/tests/test_models.py
|
Maxcutex/waitressappv2
| 0
|
6629367
|
<reponame>Maxcutex/waitressappv2<filename>waitress/app/tests/test_models.py
import unittest
from app.models import Passphrase, SlackUser, MealSession, MealService
from django.utils import timezone
def create_user():
"""
Creates a user.
:Returns: SlackUser object
"""
user_dummy_data = {
'slack_id': 'UX03131',
'firstname': 'Test',
'lastname': 'User',
'email': '<EMAIL>',
'photo': 'inexistent_photo.jpg'
}
return SlackUser.create(user_dummy_data)
user = create_user()
class PassphraseModelTestCase(unittest.TestCase):
"""
A testcase for the Passphrase model.
"""
def test_can_crud_passphrase(self):
# Creating passphrase.
passphrase = Passphrase(word='<PASSWORD>')
passphrase.save()
self.assertIsNotNone(passphrase.id)
# Reading passphrase.
all_passphrase = Passphrase.objects.all()
self.assertIn(passphrase, all_passphrase)
# Updating passphrase.
passphrase.word = '<PASSWORD>'
passphrase.save()
# Deleting passphrase.
passphrase.delete()
with self.assertRaises(Passphrase.DoesNotExist):
Passphrase.objects.get(word='<PASSWORD>')
class SlackUserModelTestCase(unittest.TestCase):
"""
A testcase for the SlackUser model.
"""
def test_can_crud_slackuser(self):
# Reading slack user.
all_slack_user = SlackUser.objects.all()
self.assertIn(user, all_slack_user)
# Updating slack user.
user.lastname = 'Admin'
user.save()
self.assertIsInstance(
SlackUser.objects.get(lastname='Admin'), SlackUser)
# Deleting slack user.
user.delete()
with self.assertRaises(SlackUser.DoesNotExist):
SlackUser.objects.get(email='<EMAIL>')
class MealSessionModel(unittest.TestCase):
"""
A testcase for the MealSession model.
"""
def test_can_crud_mealsession(self):
# Creating meal session.
user.save()
date_today = timezone.now()
mealsession = MealSession.objects.create(
status=True, date=date_today
)
self.assertIsNotNone(mealsession.id)
# Reading meal session.
all_mealsessions = MealSession.objects.all()
self.assertIn(mealsession, all_mealsessions)
# Updating meal session.
mealsession.status = False
mealsession.save()
self.assertIsInstance(
MealSession.objects.get(date=date_today), MealSession)
# Deleting meal session.
mealsession.delete()
with self.assertRaises(MealSession.DoesNotExist):
MealSession.objects.get(date=date_today)
class MealServiceModel(unittest.TestCase):
"""
A testcase for the MealService model.
"""
def test_can_crud_mealservice(self):
# Creating meal service.
user.save()
date_today = timezone.now()
mealservice = MealService.objects.create(
breakfast=1, lunch=0, user=user, date=date_today
)
self.assertIsNotNone(mealservice.id)
# Reading meal service.
all_mealservice = MealService.objects.all()
self.assertIn(mealservice, all_mealservice)
# Updating meal service.
mealservice.lunch = 1
mealservice.save()
self.assertIsInstance(MealService.objects.get(
breakfast=1, lunch=1, user=user), MealService)
# Deleting meal service.
mealservice.delete()
self.assertRaises
with self.assertRaises(MealService.DoesNotExist):
MealService.objects.get(user=user)
|
import unittest
from app.models import Passphrase, SlackUser, MealSession, MealService
from django.utils import timezone
def create_user():
"""
Creates a user.
:Returns: SlackUser object
"""
user_dummy_data = {
'slack_id': 'UX03131',
'firstname': 'Test',
'lastname': 'User',
'email': '<EMAIL>',
'photo': 'inexistent_photo.jpg'
}
return SlackUser.create(user_dummy_data)
user = create_user()
class PassphraseModelTestCase(unittest.TestCase):
"""
A testcase for the Passphrase model.
"""
def test_can_crud_passphrase(self):
# Creating passphrase.
passphrase = Passphrase(word='<PASSWORD>')
passphrase.save()
self.assertIsNotNone(passphrase.id)
# Reading passphrase.
all_passphrase = Passphrase.objects.all()
self.assertIn(passphrase, all_passphrase)
# Updating passphrase.
passphrase.word = '<PASSWORD>'
passphrase.save()
# Deleting passphrase.
passphrase.delete()
with self.assertRaises(Passphrase.DoesNotExist):
Passphrase.objects.get(word='<PASSWORD>')
class SlackUserModelTestCase(unittest.TestCase):
"""
A testcase for the SlackUser model.
"""
def test_can_crud_slackuser(self):
# Reading slack user.
all_slack_user = SlackUser.objects.all()
self.assertIn(user, all_slack_user)
# Updating slack user.
user.lastname = 'Admin'
user.save()
self.assertIsInstance(
SlackUser.objects.get(lastname='Admin'), SlackUser)
# Deleting slack user.
user.delete()
with self.assertRaises(SlackUser.DoesNotExist):
SlackUser.objects.get(email='<EMAIL>')
class MealSessionModel(unittest.TestCase):
"""
A testcase for the MealSession model.
"""
def test_can_crud_mealsession(self):
# Creating meal session.
user.save()
date_today = timezone.now()
mealsession = MealSession.objects.create(
status=True, date=date_today
)
self.assertIsNotNone(mealsession.id)
# Reading meal session.
all_mealsessions = MealSession.objects.all()
self.assertIn(mealsession, all_mealsessions)
# Updating meal session.
mealsession.status = False
mealsession.save()
self.assertIsInstance(
MealSession.objects.get(date=date_today), MealSession)
# Deleting meal session.
mealsession.delete()
with self.assertRaises(MealSession.DoesNotExist):
MealSession.objects.get(date=date_today)
class MealServiceModel(unittest.TestCase):
"""
A testcase for the MealService model.
"""
def test_can_crud_mealservice(self):
# Creating meal service.
user.save()
date_today = timezone.now()
mealservice = MealService.objects.create(
breakfast=1, lunch=0, user=user, date=date_today
)
self.assertIsNotNone(mealservice.id)
# Reading meal service.
all_mealservice = MealService.objects.all()
self.assertIn(mealservice, all_mealservice)
# Updating meal service.
mealservice.lunch = 1
mealservice.save()
self.assertIsInstance(MealService.objects.get(
breakfast=1, lunch=1, user=user), MealService)
# Deleting meal service.
mealservice.delete()
self.assertRaises
with self.assertRaises(MealService.DoesNotExist):
MealService.objects.get(user=user)
|
en
| 0.787909
|
Creates a user. :Returns: SlackUser object A testcase for the Passphrase model. # Creating passphrase. # Reading passphrase. # Updating passphrase. # Deleting passphrase. A testcase for the SlackUser model. # Reading slack user. # Updating slack user. # Deleting slack user. A testcase for the MealSession model. # Creating meal session. # Reading meal session. # Updating meal session. # Deleting meal session. A testcase for the MealService model. # Creating meal service. # Reading meal service. # Updating meal service. # Deleting meal service.
| 2.879817
| 3
|
tests/graphql/objects/infinite_recursion/objects.py
|
karlosss/simple_api
| 2
|
6629368
|
from simple_api.adapters.graphql.graphql import GraphQLAdapter
from simple_api.adapters.utils import generate
from simple_api.object.actions import Action
from simple_api.object.datatypes import ObjectType
from simple_api.object.object import Object
from tests.graphql.graphql_test_utils import build_patterns
def get(request, params, **kwargs):
return None
class TestObject(Object):
fields = {
"self": ObjectType("self", nullable=True)
}
actions = {
"get": Action(return_value=ObjectType(TestObject, nullable=True), exec_fn=get)
}
schema = generate(GraphQLAdapter, actions)
patterns = build_patterns(schema)
|
from simple_api.adapters.graphql.graphql import GraphQLAdapter
from simple_api.adapters.utils import generate
from simple_api.object.actions import Action
from simple_api.object.datatypes import ObjectType
from simple_api.object.object import Object
from tests.graphql.graphql_test_utils import build_patterns
def get(request, params, **kwargs):
return None
class TestObject(Object):
fields = {
"self": ObjectType("self", nullable=True)
}
actions = {
"get": Action(return_value=ObjectType(TestObject, nullable=True), exec_fn=get)
}
schema = generate(GraphQLAdapter, actions)
patterns = build_patterns(schema)
|
none
| 1
| 1.997225
| 2
|
|
experiments/gyroMove.py
|
nshenoy/ev3-python
| 3
|
6629369
|
#!/usr/bin/env micropython
from ev3dev2.motor import LargeMotor, MediumMotor, OUTPUT_A, OUTPUT_C, OUTPUT_D, SpeedPercent, MoveSteering, follow_for_ms
from ev3dev2.sensor.lego import ColorSensor, GyroSensor, UltrasonicSensor
from ev3dev2.led import Leds
from sys import stderr
from time import sleep
import os
from libs.moveTankWithGyro import MoveTankWithGyro
from libs.logToDisplay import logToDisplay
def gyroMove():
"""Test code for gyro PID drive"""
gyro_drive = MoveTankWithGyro(OUTPUT_A, OUTPUT_D)
gyro_drive.gyro = GyroSensor()
sleep(2)
gyro_drive.calibrate_gyro()
target_angle = 90
# Pivot 90 degrees
gyro_drive.pivot_gyro(
speed=SpeedPercent(5),
target_angle=target_angle
)
# Drive straight at the angle
gyro_drive.follow_gyro(
kp=11.3, ki=0.05, kd=3.2,
speed=SpeedPercent(30),
target_angle=target_angle,
follow_for=follow_for_ms,
ms=5000
)
def main():
gyroMove()
if __name__ == "__main__":
main()
|
#!/usr/bin/env micropython
from ev3dev2.motor import LargeMotor, MediumMotor, OUTPUT_A, OUTPUT_C, OUTPUT_D, SpeedPercent, MoveSteering, follow_for_ms
from ev3dev2.sensor.lego import ColorSensor, GyroSensor, UltrasonicSensor
from ev3dev2.led import Leds
from sys import stderr
from time import sleep
import os
from libs.moveTankWithGyro import MoveTankWithGyro
from libs.logToDisplay import logToDisplay
def gyroMove():
"""Test code for gyro PID drive"""
gyro_drive = MoveTankWithGyro(OUTPUT_A, OUTPUT_D)
gyro_drive.gyro = GyroSensor()
sleep(2)
gyro_drive.calibrate_gyro()
target_angle = 90
# Pivot 90 degrees
gyro_drive.pivot_gyro(
speed=SpeedPercent(5),
target_angle=target_angle
)
# Drive straight at the angle
gyro_drive.follow_gyro(
kp=11.3, ki=0.05, kd=3.2,
speed=SpeedPercent(30),
target_angle=target_angle,
follow_for=follow_for_ms,
ms=5000
)
def main():
gyroMove()
if __name__ == "__main__":
main()
|
en
| 0.560847
|
#!/usr/bin/env micropython Test code for gyro PID drive # Pivot 90 degrees # Drive straight at the angle
| 2.552172
| 3
|
custom/ilsgateway/tanzania/reports/delivery.py
|
rochakchauhan/commcare-hq
| 0
|
6629370
|
<reponame>rochakchauhan/commcare-hq<filename>custom/ilsgateway/tanzania/reports/delivery.py
from dateutil import rrule
from django.db.models.aggregates import Avg
from corehq.apps.locations.models import SQLLocation
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from custom.ilsgateway.filters import ProgramFilter, ILSDateFilter, ILSAsyncLocationFilter
from custom.ilsgateway.tanzania import ILSData, DetailsReport
from custom.ilsgateway.tanzania.reports.facility_details import FacilityDetailsReport, InventoryHistoryData, \
RegistrationData, RandRHistory, Notes, RecentMessages
from custom.ilsgateway.models import OrganizationSummary, DeliveryGroups, SupplyPointStatusTypes, GroupSummary
from custom.ilsgateway.tanzania.reports.mixins import DeliverySubmissionData
from custom.ilsgateway.tanzania.reports.utils import make_url, link_format, latest_status_or_none,\
get_this_lead_time, get_avg_lead_time
from memoized import memoized
from django.utils.translation import ugettext as _
class LeadTimeHistory(ILSData):
show_table = True
title = "Lead Time History"
slug = "lead_time_history"
show_chart = False
searchable = True
use_datatables = True
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn(_('Name')),
DataTablesColumn(_('Average Lead Time In Days'))
)
@property
def rows(self):
locations = SQLLocation.objects.filter(parent__location_id=self.config['location_id'])
rows = []
for loc in locations:
try:
org_summary = OrganizationSummary.objects.filter(
location_id=loc.location_id,
date__range=(self.config['startdate'], self.config['enddate'])
).aggregate(average_lead_time_in_days=Avg('average_lead_time_in_days'))
except OrganizationSummary.DoesNotExist:
continue
avg_lead_time = org_summary['average_lead_time_in_days']
if avg_lead_time:
avg_lead_time = "%.1f" % avg_lead_time
else:
avg_lead_time = "None"
url = make_url(DeliveryReport, self.config['domain'],
'?location_id=%s&filter_by_program=%s&'
'datespan_type=%s&datespan_first=%s&datespan_second=%s',
(loc.location_id,
self.config['program'], self.config['datespan_type'],
self.config['datespan_first'], self.config['datespan_second']))
rows.append([link_format(loc.name, url), avg_lead_time])
return rows
class DeliveryStatus(ILSData):
show_table = True
slug = "delivery_status"
show_chart = False
searchable = True
def __init__(self, config=None, css_class='row_chart'):
super(DeliveryStatus, self).__init__(config, css_class)
self.config = config or {}
self.css_class = css_class
datespan_type = self.config.get('datespan_type')
if datespan_type == 1:
self.title = "Delivery Status: Group %s" %\
DeliveryGroups(int(self.config['datespan_first'])).current_delivering_group()
else:
self.title = "Delivery Status"
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn(_('Code')),
DataTablesColumn(_('Facility Name')),
DataTablesColumn(_('Delivery Status')),
DataTablesColumn(_('Delivery Date')),
DataTablesColumn(_('This Cycle Lead Time')),
DataTablesColumn(_('Average Lead Time In Days'))
)
@property
def rows(self):
rows = []
locations = SQLLocation.objects.filter(parent__location_id=self.config['location_id'])
dg = []
for date in list(rrule.rrule(rrule.MONTHLY, dtstart=self.config['startdate'],
until=self.config['enddate'])):
dg.extend(DeliveryGroups().delivering(locations, date.month))
for child in dg:
group_summary = GroupSummary.objects.filter(
org_summary__date__lte=self.config['startdate'],
org_summary__location_id=child.location_id,
title=SupplyPointStatusTypes.DELIVERY_FACILITY,
total=1
).exists()
if not group_summary:
continue
latest = latest_status_or_none(
child.location_id,
SupplyPointStatusTypes.DELIVERY_FACILITY,
self.config['startdate'],
self.config['enddate']
)
status_name = latest.name if latest else ""
status_date = latest.status_date.strftime("%d-%m-%Y") if latest else "None"
url = make_url(FacilityDetailsReport, self.config['domain'],
'?location_id=%s&filter_by_program=%s&'
'datespan_type=%s&datespan_first=%s&datespan_second=%s',
(child.location_id,
self.config['program'], self.config['datespan_type'],
self.config['datespan_first'], self.config['datespan_second']))
cycle_lead_time = get_this_lead_time(
child.location_id,
self.config['startdate'],
self.config['enddate']
)
avg_lead_time = get_avg_lead_time(child.location_id, self.config['startdate'],
self.config['enddate'])
rows.append(
[
child.site_code,
link_format(child.name, url),
status_name,
status_date,
cycle_lead_time,
avg_lead_time
]
)
return rows
class DeliveryData(ILSData):
show_table = True
show_chart = False
slug = 'delivery_data'
title = 'Delivery Data'
searchable = True
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn(_('Category'), sort_direction="desc"),
DataTablesColumn(_('# Facilities')),
DataTablesColumn(_('% of total')),
)
@property
def rows(self):
data = DeliverySubmissionData(config=self.config, css_class='row_chart_all').rows
if data:
dg = data[0]
percent_format = lambda x, y: x * 100 / (y or 1)
return [
[_('Didn\'t Respond'), '%d' % dg.not_responding,
'%.1f%%' % percent_format(dg.not_responding, dg.total)],
[_('Not Received'), '%d' % dg.not_received, '%.1f%%' % percent_format(dg.not_received, dg.total)],
[_('Received'), '%d' % dg.received, '%.1f%%' % percent_format(dg.received, dg.total)],
[_('Total'), '%d' % dg.total, '100%'],
]
class DeliveryReport(DetailsReport):
slug = "delivery_report"
name = 'Delivery'
use_datatables = True
@property
def title(self):
title = _('Delivery Report {0}'.format(self.title_month))
if self.location and self.location.location_type.name.upper() == 'FACILITY':
return "{0} ({1}) Group {2}".format(self.location.name,
self.location.site_code,
self.location.metadata.get('group', '---'))
return title
@property
def fields(self):
fields = [ILSAsyncLocationFilter, ILSDateFilter, ProgramFilter]
if self.location and self.location.location_type.name.upper() == 'FACILITY':
fields = []
return fields
@property
@memoized
def data_providers(self):
data_providers = [
DeliverySubmissionData(config=self.report_config, css_class='row_chart_all'),
]
config = self.report_config
if config['location_id']:
location = SQLLocation.objects.get(location_id=config['location_id'])
if location.location_type.name.upper() in ['REGION', 'MSDZONE', 'MOHSW']:
data_providers.append(DeliveryData(config=config, css_class='row_chart_all'))
data_providers.append(LeadTimeHistory(config=config, css_class='row_chart_all'))
elif location.location_type.name.upper() == 'FACILITY':
return [
InventoryHistoryData(config=config),
RandRHistory(config=config),
Notes(config=config),
RecentMessages(config=config),
RegistrationData(config=dict(loc_type='FACILITY', **config), css_class='row_chart_all'),
RegistrationData(config=dict(loc_type='DISTRICT', **config), css_class='row_chart_all'),
RegistrationData(config=dict(loc_type='REGION', **config), css_class='row_chart_all')
]
else:
data_providers.append(DeliveryStatus(config=config, css_class='row_chart_all'))
return data_providers
@property
def report_context(self):
ret = super(DeliveryReport, self).report_context
ret['view_mode'] = 'delivery'
return ret
|
from dateutil import rrule
from django.db.models.aggregates import Avg
from corehq.apps.locations.models import SQLLocation
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from custom.ilsgateway.filters import ProgramFilter, ILSDateFilter, ILSAsyncLocationFilter
from custom.ilsgateway.tanzania import ILSData, DetailsReport
from custom.ilsgateway.tanzania.reports.facility_details import FacilityDetailsReport, InventoryHistoryData, \
RegistrationData, RandRHistory, Notes, RecentMessages
from custom.ilsgateway.models import OrganizationSummary, DeliveryGroups, SupplyPointStatusTypes, GroupSummary
from custom.ilsgateway.tanzania.reports.mixins import DeliverySubmissionData
from custom.ilsgateway.tanzania.reports.utils import make_url, link_format, latest_status_or_none,\
get_this_lead_time, get_avg_lead_time
from memoized import memoized
from django.utils.translation import ugettext as _
class LeadTimeHistory(ILSData):
show_table = True
title = "Lead Time History"
slug = "lead_time_history"
show_chart = False
searchable = True
use_datatables = True
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn(_('Name')),
DataTablesColumn(_('Average Lead Time In Days'))
)
@property
def rows(self):
locations = SQLLocation.objects.filter(parent__location_id=self.config['location_id'])
rows = []
for loc in locations:
try:
org_summary = OrganizationSummary.objects.filter(
location_id=loc.location_id,
date__range=(self.config['startdate'], self.config['enddate'])
).aggregate(average_lead_time_in_days=Avg('average_lead_time_in_days'))
except OrganizationSummary.DoesNotExist:
continue
avg_lead_time = org_summary['average_lead_time_in_days']
if avg_lead_time:
avg_lead_time = "%.1f" % avg_lead_time
else:
avg_lead_time = "None"
url = make_url(DeliveryReport, self.config['domain'],
'?location_id=%s&filter_by_program=%s&'
'datespan_type=%s&datespan_first=%s&datespan_second=%s',
(loc.location_id,
self.config['program'], self.config['datespan_type'],
self.config['datespan_first'], self.config['datespan_second']))
rows.append([link_format(loc.name, url), avg_lead_time])
return rows
class DeliveryStatus(ILSData):
show_table = True
slug = "delivery_status"
show_chart = False
searchable = True
def __init__(self, config=None, css_class='row_chart'):
super(DeliveryStatus, self).__init__(config, css_class)
self.config = config or {}
self.css_class = css_class
datespan_type = self.config.get('datespan_type')
if datespan_type == 1:
self.title = "Delivery Status: Group %s" %\
DeliveryGroups(int(self.config['datespan_first'])).current_delivering_group()
else:
self.title = "Delivery Status"
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn(_('Code')),
DataTablesColumn(_('Facility Name')),
DataTablesColumn(_('Delivery Status')),
DataTablesColumn(_('Delivery Date')),
DataTablesColumn(_('This Cycle Lead Time')),
DataTablesColumn(_('Average Lead Time In Days'))
)
@property
def rows(self):
rows = []
locations = SQLLocation.objects.filter(parent__location_id=self.config['location_id'])
dg = []
for date in list(rrule.rrule(rrule.MONTHLY, dtstart=self.config['startdate'],
until=self.config['enddate'])):
dg.extend(DeliveryGroups().delivering(locations, date.month))
for child in dg:
group_summary = GroupSummary.objects.filter(
org_summary__date__lte=self.config['startdate'],
org_summary__location_id=child.location_id,
title=SupplyPointStatusTypes.DELIVERY_FACILITY,
total=1
).exists()
if not group_summary:
continue
latest = latest_status_or_none(
child.location_id,
SupplyPointStatusTypes.DELIVERY_FACILITY,
self.config['startdate'],
self.config['enddate']
)
status_name = latest.name if latest else ""
status_date = latest.status_date.strftime("%d-%m-%Y") if latest else "None"
url = make_url(FacilityDetailsReport, self.config['domain'],
'?location_id=%s&filter_by_program=%s&'
'datespan_type=%s&datespan_first=%s&datespan_second=%s',
(child.location_id,
self.config['program'], self.config['datespan_type'],
self.config['datespan_first'], self.config['datespan_second']))
cycle_lead_time = get_this_lead_time(
child.location_id,
self.config['startdate'],
self.config['enddate']
)
avg_lead_time = get_avg_lead_time(child.location_id, self.config['startdate'],
self.config['enddate'])
rows.append(
[
child.site_code,
link_format(child.name, url),
status_name,
status_date,
cycle_lead_time,
avg_lead_time
]
)
return rows
class DeliveryData(ILSData):
show_table = True
show_chart = False
slug = 'delivery_data'
title = 'Delivery Data'
searchable = True
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn(_('Category'), sort_direction="desc"),
DataTablesColumn(_('# Facilities')),
DataTablesColumn(_('% of total')),
)
@property
def rows(self):
data = DeliverySubmissionData(config=self.config, css_class='row_chart_all').rows
if data:
dg = data[0]
percent_format = lambda x, y: x * 100 / (y or 1)
return [
[_('Didn\'t Respond'), '%d' % dg.not_responding,
'%.1f%%' % percent_format(dg.not_responding, dg.total)],
[_('Not Received'), '%d' % dg.not_received, '%.1f%%' % percent_format(dg.not_received, dg.total)],
[_('Received'), '%d' % dg.received, '%.1f%%' % percent_format(dg.received, dg.total)],
[_('Total'), '%d' % dg.total, '100%'],
]
class DeliveryReport(DetailsReport):
slug = "delivery_report"
name = 'Delivery'
use_datatables = True
@property
def title(self):
title = _('Delivery Report {0}'.format(self.title_month))
if self.location and self.location.location_type.name.upper() == 'FACILITY':
return "{0} ({1}) Group {2}".format(self.location.name,
self.location.site_code,
self.location.metadata.get('group', '---'))
return title
@property
def fields(self):
fields = [ILSAsyncLocationFilter, ILSDateFilter, ProgramFilter]
if self.location and self.location.location_type.name.upper() == 'FACILITY':
fields = []
return fields
@property
@memoized
def data_providers(self):
data_providers = [
DeliverySubmissionData(config=self.report_config, css_class='row_chart_all'),
]
config = self.report_config
if config['location_id']:
location = SQLLocation.objects.get(location_id=config['location_id'])
if location.location_type.name.upper() in ['REGION', 'MSDZONE', 'MOHSW']:
data_providers.append(DeliveryData(config=config, css_class='row_chart_all'))
data_providers.append(LeadTimeHistory(config=config, css_class='row_chart_all'))
elif location.location_type.name.upper() == 'FACILITY':
return [
InventoryHistoryData(config=config),
RandRHistory(config=config),
Notes(config=config),
RecentMessages(config=config),
RegistrationData(config=dict(loc_type='FACILITY', **config), css_class='row_chart_all'),
RegistrationData(config=dict(loc_type='DISTRICT', **config), css_class='row_chart_all'),
RegistrationData(config=dict(loc_type='REGION', **config), css_class='row_chart_all')
]
else:
data_providers.append(DeliveryStatus(config=config, css_class='row_chart_all'))
return data_providers
@property
def report_context(self):
ret = super(DeliveryReport, self).report_context
ret['view_mode'] = 'delivery'
return ret
|
none
| 1
| 1.823753
| 2
|
|
deep_learning/optimiser_L2_CNNC.py
|
eddymarts/Linear-Regression
| 0
|
6629371
|
from nn_models import CNNClassifier
from torchvision import datasets, transforms
import torch
from multiprocessing import cpu_count
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
mnist_train = datasets.MNIST(root="datasets/mnist_train",
download=True, train=True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
mnist_test = datasets.MNIST(root="datasets/mnist_test",
download=True, train=False,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
train_load = DataLoader(mnist_train, batch_size=16,
shuffle=True, num_workers=round(cpu_count()/2))
test_load = DataLoader(mnist_test, batch_size=16,
shuffle=False, num_workers=round(cpu_count()/2))
nn_classifier = CNNClassifier()
optimiser = torch.optim.SGD(nn_classifier.parameters(), lr=0.01, weight_decay=1e-5)
loss = nn_classifier.fit(train_load, test_load, return_loss=True, optimiser=optimiser,
epochs=10, acceptable_error=0.0001, lr=0.01)
y_val, y_hat_val = nn_classifier.predict(test_load, return_y=True)
print(torch.cat((y_val, y_hat_val), dim=1)[0:10])
print("R^2 score:", r2_score(y_hat_val.detach().numpy(), y_val.detach().numpy()))
plt.plot(loss['training'], label="Training set loss")
plt.plot(loss['validation'], label="Validation set loss")
plt.xlabel(f"Epochs\nl={loss['validation'][-1]}")
plt.ylabel("CE")
plt.legend()
plt.show()
|
from nn_models import CNNClassifier
from torchvision import datasets, transforms
import torch
from multiprocessing import cpu_count
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
mnist_train = datasets.MNIST(root="datasets/mnist_train",
download=True, train=True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
mnist_test = datasets.MNIST(root="datasets/mnist_test",
download=True, train=False,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
train_load = DataLoader(mnist_train, batch_size=16,
shuffle=True, num_workers=round(cpu_count()/2))
test_load = DataLoader(mnist_test, batch_size=16,
shuffle=False, num_workers=round(cpu_count()/2))
nn_classifier = CNNClassifier()
optimiser = torch.optim.SGD(nn_classifier.parameters(), lr=0.01, weight_decay=1e-5)
loss = nn_classifier.fit(train_load, test_load, return_loss=True, optimiser=optimiser,
epochs=10, acceptable_error=0.0001, lr=0.01)
y_val, y_hat_val = nn_classifier.predict(test_load, return_y=True)
print(torch.cat((y_val, y_hat_val), dim=1)[0:10])
print("R^2 score:", r2_score(y_hat_val.detach().numpy(), y_val.detach().numpy()))
plt.plot(loss['training'], label="Training set loss")
plt.plot(loss['validation'], label="Validation set loss")
plt.xlabel(f"Epochs\nl={loss['validation'][-1]}")
plt.ylabel("CE")
plt.legend()
plt.show()
|
none
| 1
| 2.784944
| 3
|
|
week09/code03.py
|
byeongal/KMUCP
| 0
|
6629372
|
def print_my_info(name):
print("안녕하세요.")
print(name+"입니다.")
print("만나서반갑습니다.")
print_my_info("김영재")
|
def print_my_info(name):
print("안녕하세요.")
print(name+"입니다.")
print("만나서반갑습니다.")
print_my_info("김영재")
|
none
| 1
| 2.073751
| 2
|
|
dataProcessor-tests.py
|
debbieneaeraconsulting/cvp-ingest-documentedtest
| 0
|
6629373
|
import unittest
import dataProcessor
import json
import logging
import sys
import boto3
import os
from moto import mock_s3
# logger = logging.getLogger()
# logger.level = logging.DEBUG
# stream_handler = logging.StreamHandler(sys.stdout)
# logger.addHandler(stream_handler)
class TestLambdaHandler(unittest.TestCase):
def setUp(self):
json_event_data = """{
"Records":
[
{
"s3": {
"bucket": {
"name": "test"
},
"object": {
"key": "bsm/file.csv"
}
}
}
]
}""".encode('utf-8')
self.event_data = json.loads(json_event_data)
json_wrong_event_data = """{
"Records":
[
{
}
]
}""".encode('utf-8')
self.wrong_event_data = json.loads(json_wrong_event_data)
@mock_s3
def test_lambda_handler_wydot(self):
source_bucket = 'test'
target_bucket = 'random_target_bucket'
target_key = '<KEY>
key = 'bsm/file.csv'
self.basic_template(source_bucket, target_bucket, target_key, key)
@mock_s3
def test_lambda_handler_thea(self):
source_bucket = 'test'
target_bucket = 'random_target_bucket'
target_key = 'cv/thea/'
key = 'bsm/file.csv'
self.basic_template(source_bucket, target_bucket, target_key, key)
def basic_template(self, source_bucket, target_bucket, target_key, key):
os.environ['TARGET_DATA_BUCKET'] = target_bucket
os.environ['TARGET_DATA_KEY'] = target_key
conn = boto3.resource('s3')
conn.create_bucket(Bucket=source_bucket)
conn.create_bucket(Bucket=target_bucket)
# Arrange
# create object
bucket = conn.Bucket(source_bucket)
bucket.put_object(Body='ola', Key=key)
bucket = conn.Bucket(target_bucket)
count = 0
for obj in bucket.objects.all():
print(obj)
count += 1
self.assertTrue(count == 0, "Should be empty")
# Act
dataProcessor.lambda_handler(self.event_data, '')
# Assert
bucket = conn.Bucket(target_bucket)
count = 0
for obj in bucket.objects.all():
print(obj)
self.assertTrue(obj.key.startswith(target_key), "wrong destination folder")
self.assertTrue(obj.key.endswith(os.path.basename(key)), "invalid filename")
count += 1
self.assertTrue(count == 1, "Should have new file")
@mock_s3
def test_lambda_handler_with_wrong_event_data(self):
conn = boto3.resource('s3')
conn.create_bucket(Bucket='asd')
self.assertRaises(KeyError, dataProcessor.lambda_handler, self.wrong_event_data, '')
if __name__ == '__main__':
unittest.main()
|
import unittest
import dataProcessor
import json
import logging
import sys
import boto3
import os
from moto import mock_s3
# logger = logging.getLogger()
# logger.level = logging.DEBUG
# stream_handler = logging.StreamHandler(sys.stdout)
# logger.addHandler(stream_handler)
class TestLambdaHandler(unittest.TestCase):
def setUp(self):
json_event_data = """{
"Records":
[
{
"s3": {
"bucket": {
"name": "test"
},
"object": {
"key": "bsm/file.csv"
}
}
}
]
}""".encode('utf-8')
self.event_data = json.loads(json_event_data)
json_wrong_event_data = """{
"Records":
[
{
}
]
}""".encode('utf-8')
self.wrong_event_data = json.loads(json_wrong_event_data)
@mock_s3
def test_lambda_handler_wydot(self):
source_bucket = 'test'
target_bucket = 'random_target_bucket'
target_key = '<KEY>
key = 'bsm/file.csv'
self.basic_template(source_bucket, target_bucket, target_key, key)
@mock_s3
def test_lambda_handler_thea(self):
source_bucket = 'test'
target_bucket = 'random_target_bucket'
target_key = 'cv/thea/'
key = 'bsm/file.csv'
self.basic_template(source_bucket, target_bucket, target_key, key)
def basic_template(self, source_bucket, target_bucket, target_key, key):
os.environ['TARGET_DATA_BUCKET'] = target_bucket
os.environ['TARGET_DATA_KEY'] = target_key
conn = boto3.resource('s3')
conn.create_bucket(Bucket=source_bucket)
conn.create_bucket(Bucket=target_bucket)
# Arrange
# create object
bucket = conn.Bucket(source_bucket)
bucket.put_object(Body='ola', Key=key)
bucket = conn.Bucket(target_bucket)
count = 0
for obj in bucket.objects.all():
print(obj)
count += 1
self.assertTrue(count == 0, "Should be empty")
# Act
dataProcessor.lambda_handler(self.event_data, '')
# Assert
bucket = conn.Bucket(target_bucket)
count = 0
for obj in bucket.objects.all():
print(obj)
self.assertTrue(obj.key.startswith(target_key), "wrong destination folder")
self.assertTrue(obj.key.endswith(os.path.basename(key)), "invalid filename")
count += 1
self.assertTrue(count == 1, "Should have new file")
@mock_s3
def test_lambda_handler_with_wrong_event_data(self):
conn = boto3.resource('s3')
conn.create_bucket(Bucket='asd')
self.assertRaises(KeyError, dataProcessor.lambda_handler, self.wrong_event_data, '')
if __name__ == '__main__':
unittest.main()
|
en
| 0.22842
|
# logger = logging.getLogger() # logger.level = logging.DEBUG # stream_handler = logging.StreamHandler(sys.stdout) # logger.addHandler(stream_handler) { "Records": [ { "s3": { "bucket": { "name": "test" }, "object": { "key": "bsm/file.csv" } } } ] } { "Records": [ { } ] } # Arrange # create object # Act # Assert
| 2.19592
| 2
|
tests/urls.py
|
almahmoud/djcloudbridge
| 0
|
6629374
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'admin/', admin.site.urls),
url(r'^', include('djcloudbridge.urls',
namespace='djcloudbridge')),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework'))
]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'admin/', admin.site.urls),
url(r'^', include('djcloudbridge.urls',
namespace='djcloudbridge')),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework'))
]
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 1.54963
| 2
|
pyiron/base/generic/template.py
|
pmrv/pyiron
| 0
|
6629375
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
"""
Template class to list the required properties and functions for every pyiron object.
"""
__author__ = "<NAME>"
__copyright__ = (
"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "production"
__date__ = "Sep 1, 2017"
class PyironObject(object):
"""
Template class to list the required properties and functions for every pyiron object.
"""
@property
def id(self):
"""
Every pyiron object should have the ability to be stored in the database
Returns:
int: object id
"""
raise NotImplementedError("id should be implemented in the derived class")
@id.setter
def id(self, new_id):
"""
Every pyiron object should have the ability to be stored in the database
Args:
new_id (int): object id
"""
raise NotImplementedError("id should be implemented in the derived class")
@property
def master_id(self):
"""
If the pyiron object belongs to a series of objects, series object is linked by the master id
Returns:
int: master id
"""
raise NotImplementedError(
"master_id should be implemented in the derived class"
)
@master_id.setter
def master_id(self, master_id):
"""
If the pyiron object belongs to a series of objects, series object is linked by the master id
Args:
master_id (int): master id
"""
raise NotImplementedError(
"master_id should be implemented in the derived class"
)
@property
def parent_id(self):
"""
If the pyiron object belongs to a serial series of objects, the predecessor is linked by the parent id
Returns:
int: parent id
"""
raise NotImplementedError(
"master_id should be implemented in the derived class"
)
@parent_id.setter
def parent_id(self, parent_id):
"""
If the pyiron object belongs to a serial series of objects, the predecessor is linked by the parent id
Args:
parent_id (int): parent id
"""
raise NotImplementedError(
"master_id should be implemented in the derived class"
)
@property
def child_ids(self):
"""
If the pyiron object is a meta object which includes a series of objects these objects ids are listed as child ids.
Returns:
list: list of child ids
"""
raise NotImplementedError(
"child_ids should be implemented in the derived class"
)
def save(self):
"""
Store the pyiron object in the HDF5 file an create a corresponding database entry.
"""
raise NotImplementedError("save() should be implemented in the derived class")
def remove(self):
"""
Remove the pyiron obect from the database and delete the HDF5 file
"""
raise NotImplementedError("remove() should be implemented in the derived class")
def load(self, job_specifier, convert_to_object=True):
"""
Load a pyiron object from the database
Args:
job_specifier (str, int): identifier of the pyiron object - this needs to be unique in the project.
convert_to_object (bool): [True/False] - it is faster to only load the HDF5 access but the pyiron object
offers a whole more functionality.
Returns:
PyironObject: the pyiron object
"""
raise NotImplementedError("load() should be implemented in the derived class")
def inspect(self, job_specifier):
"""
Inspect a pyiron object from the database - the same like load(self, job_specifier, convert_to_object=False).
The inspect mode provides quick access to the HDF5 file without loading the full object, which is especially
helpful if only a specific section of the output is required.
Args:
job_specifier (str, int): identifier of the pyiron object - this needs to be unique in the project.
Returns:
PyironObject: a reduces pyiron object like JobPath
"""
raise NotImplementedError("insect() should be implemented in the derived class")
def copy(self):
"""
Copy the pyiron object - this copies only the link to the HDF5 file - not the content of the HDF5 file.
Returns:
PyironObject: a copy of the pyiron object
"""
raise NotImplementedError("copy() should be implemented in the derived class")
def copy_to(self, new_project=None):
"""
Copy the pyiron object including the HDF5 file to an new location
Args:
new_project (ProjectHDFio): new location
Returns:
PyironObject: a copy of the pyiron object
"""
raise NotImplementedError("copy() should be implemented in the derived class")
def move_to(self, new_project):
"""
Move the pyiron object including the HDF5 file to an new location
Args:
new_project (ProjectHDFio): new location
"""
raise NotImplementedError("move() should be implemented in the derived class")
def to_hdf(self, hdf, group_name="group"):
"""
Store the PyironObject in an HDF5 file
Args:
hdf (ProjectHDFio): HDF5 group object
group_name (str): HDF5 subgroup name - optional
"""
raise NotImplementedError("to_hdf() should be implemented in the derived class")
def from_hdf(self, hdf, group_name="group"):
"""
Restore the PyironObject from an HDF5 file
Args:
hdf (ProjectHDFio): HDF5 group object
group_name (str): HDF5 subgroup name - optional
"""
raise NotImplementedError(
"from_hdf() should be implemented in the derived class"
)
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
"""
Template class to list the required properties and functions for every pyiron object.
"""
__author__ = "<NAME>"
__copyright__ = (
"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "production"
__date__ = "Sep 1, 2017"
class PyironObject(object):
"""
Template class to list the required properties and functions for every pyiron object.
"""
@property
def id(self):
"""
Every pyiron object should have the ability to be stored in the database
Returns:
int: object id
"""
raise NotImplementedError("id should be implemented in the derived class")
@id.setter
def id(self, new_id):
"""
Every pyiron object should have the ability to be stored in the database
Args:
new_id (int): object id
"""
raise NotImplementedError("id should be implemented in the derived class")
@property
def master_id(self):
"""
If the pyiron object belongs to a series of objects, series object is linked by the master id
Returns:
int: master id
"""
raise NotImplementedError(
"master_id should be implemented in the derived class"
)
@master_id.setter
def master_id(self, master_id):
"""
If the pyiron object belongs to a series of objects, series object is linked by the master id
Args:
master_id (int): master id
"""
raise NotImplementedError(
"master_id should be implemented in the derived class"
)
@property
def parent_id(self):
"""
If the pyiron object belongs to a serial series of objects, the predecessor is linked by the parent id
Returns:
int: parent id
"""
raise NotImplementedError(
"master_id should be implemented in the derived class"
)
@parent_id.setter
def parent_id(self, parent_id):
"""
If the pyiron object belongs to a serial series of objects, the predecessor is linked by the parent id
Args:
parent_id (int): parent id
"""
raise NotImplementedError(
"master_id should be implemented in the derived class"
)
@property
def child_ids(self):
"""
If the pyiron object is a meta object which includes a series of objects these objects ids are listed as child ids.
Returns:
list: list of child ids
"""
raise NotImplementedError(
"child_ids should be implemented in the derived class"
)
def save(self):
"""
Store the pyiron object in the HDF5 file an create a corresponding database entry.
"""
raise NotImplementedError("save() should be implemented in the derived class")
def remove(self):
"""
Remove the pyiron obect from the database and delete the HDF5 file
"""
raise NotImplementedError("remove() should be implemented in the derived class")
def load(self, job_specifier, convert_to_object=True):
"""
Load a pyiron object from the database
Args:
job_specifier (str, int): identifier of the pyiron object - this needs to be unique in the project.
convert_to_object (bool): [True/False] - it is faster to only load the HDF5 access but the pyiron object
offers a whole more functionality.
Returns:
PyironObject: the pyiron object
"""
raise NotImplementedError("load() should be implemented in the derived class")
def inspect(self, job_specifier):
"""
Inspect a pyiron object from the database - the same like load(self, job_specifier, convert_to_object=False).
The inspect mode provides quick access to the HDF5 file without loading the full object, which is especially
helpful if only a specific section of the output is required.
Args:
job_specifier (str, int): identifier of the pyiron object - this needs to be unique in the project.
Returns:
PyironObject: a reduces pyiron object like JobPath
"""
raise NotImplementedError("insect() should be implemented in the derived class")
def copy(self):
"""
Copy the pyiron object - this copies only the link to the HDF5 file - not the content of the HDF5 file.
Returns:
PyironObject: a copy of the pyiron object
"""
raise NotImplementedError("copy() should be implemented in the derived class")
def copy_to(self, new_project=None):
"""
Copy the pyiron object including the HDF5 file to an new location
Args:
new_project (ProjectHDFio): new location
Returns:
PyironObject: a copy of the pyiron object
"""
raise NotImplementedError("copy() should be implemented in the derived class")
def move_to(self, new_project):
"""
Move the pyiron object including the HDF5 file to an new location
Args:
new_project (ProjectHDFio): new location
"""
raise NotImplementedError("move() should be implemented in the derived class")
def to_hdf(self, hdf, group_name="group"):
"""
Store the PyironObject in an HDF5 file
Args:
hdf (ProjectHDFio): HDF5 group object
group_name (str): HDF5 subgroup name - optional
"""
raise NotImplementedError("to_hdf() should be implemented in the derived class")
def from_hdf(self, hdf, group_name="group"):
"""
Restore the PyironObject from an HDF5 file
Args:
hdf (ProjectHDFio): HDF5 group object
group_name (str): HDF5 subgroup name - optional
"""
raise NotImplementedError(
"from_hdf() should be implemented in the derived class"
)
|
en
| 0.742904
|
# coding: utf-8 # Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department # Distributed under the terms of "New BSD License", see the LICENSE file. Template class to list the required properties and functions for every pyiron object. Template class to list the required properties and functions for every pyiron object. Every pyiron object should have the ability to be stored in the database Returns: int: object id Every pyiron object should have the ability to be stored in the database Args: new_id (int): object id If the pyiron object belongs to a series of objects, series object is linked by the master id Returns: int: master id If the pyiron object belongs to a series of objects, series object is linked by the master id Args: master_id (int): master id If the pyiron object belongs to a serial series of objects, the predecessor is linked by the parent id Returns: int: parent id If the pyiron object belongs to a serial series of objects, the predecessor is linked by the parent id Args: parent_id (int): parent id If the pyiron object is a meta object which includes a series of objects these objects ids are listed as child ids. Returns: list: list of child ids Store the pyiron object in the HDF5 file an create a corresponding database entry. Remove the pyiron obect from the database and delete the HDF5 file Load a pyiron object from the database Args: job_specifier (str, int): identifier of the pyiron object - this needs to be unique in the project. convert_to_object (bool): [True/False] - it is faster to only load the HDF5 access but the pyiron object offers a whole more functionality. Returns: PyironObject: the pyiron object Inspect a pyiron object from the database - the same like load(self, job_specifier, convert_to_object=False). The inspect mode provides quick access to the HDF5 file without loading the full object, which is especially helpful if only a specific section of the output is required. Args: job_specifier (str, int): identifier of the pyiron object - this needs to be unique in the project. Returns: PyironObject: a reduces pyiron object like JobPath Copy the pyiron object - this copies only the link to the HDF5 file - not the content of the HDF5 file. Returns: PyironObject: a copy of the pyiron object Copy the pyiron object including the HDF5 file to an new location Args: new_project (ProjectHDFio): new location Returns: PyironObject: a copy of the pyiron object Move the pyiron object including the HDF5 file to an new location Args: new_project (ProjectHDFio): new location Store the PyironObject in an HDF5 file Args: hdf (ProjectHDFio): HDF5 group object group_name (str): HDF5 subgroup name - optional Restore the PyironObject from an HDF5 file Args: hdf (ProjectHDFio): HDF5 group object group_name (str): HDF5 subgroup name - optional
| 2.198365
| 2
|
hotel/admin.py
|
Hotel-online/hotel-serveAntes
| 0
|
6629376
|
<filename>hotel/admin.py<gh_stars>0
from django.contrib import admin
from hotel.models import Reservacion
from hotel.models import CategoriaHabitacion
from hotel.models import Habitacion
from hotel.models import DetalleReservacion
from hotel.models import Forma_de_pago
from hotel.models import Cliente
from hotel.models import Venta
from hotel.models import DetalleVenta
from hotel.models import Doc_Type
from hotel.models import Car
# Register your models here.
admin.site.register(Reservacion)
admin.site.register(CategoriaHabitacion)
admin.site.register(Habitacion)
admin.site.register(DetalleReservacion)
admin.site.register(Forma_de_pago)
admin.site.register(Cliente)
admin.site.register(Venta)
admin.site.register(DetalleVenta)
admin.site.register(Doc_Type)
admin.site.register(Car)
|
<filename>hotel/admin.py<gh_stars>0
from django.contrib import admin
from hotel.models import Reservacion
from hotel.models import CategoriaHabitacion
from hotel.models import Habitacion
from hotel.models import DetalleReservacion
from hotel.models import Forma_de_pago
from hotel.models import Cliente
from hotel.models import Venta
from hotel.models import DetalleVenta
from hotel.models import Doc_Type
from hotel.models import Car
# Register your models here.
admin.site.register(Reservacion)
admin.site.register(CategoriaHabitacion)
admin.site.register(Habitacion)
admin.site.register(DetalleReservacion)
admin.site.register(Forma_de_pago)
admin.site.register(Cliente)
admin.site.register(Venta)
admin.site.register(DetalleVenta)
admin.site.register(Doc_Type)
admin.site.register(Car)
|
en
| 0.968259
|
# Register your models here.
| 1.575525
| 2
|
tests/actions/conftest.py
|
lfpll/great_expectations
| 1
|
6629377
|
import pytest
from great_expectations.data_context import BaseDataContext
from great_expectations.data_context.types.base import DataContextConfig
@pytest.fixture(scope="module")
def basic_data_context_config_for_validation_operator():
return DataContextConfig(
config_version=1,
plugins_directory=None,
evaluation_parameter_store_name="evaluation_parameter_store",
expectations_store_name="expectations_store",
datasources={},
stores={
"expectations_store": {"class_name": "ExpectationsStore"},
"evaluation_parameter_store": {"class_name": "EvaluationParameterStore"},
"validation_result_store": {"class_name": "ValidationsStore"},
"metrics_store": {"class_name": "MetricStore"},
},
validations_store_name="validation_result_store",
data_docs_sites={},
validation_operators={
"store_val_res_and_extract_eval_params": {
"class_name": "ActionListValidationOperator",
"action_list": [
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
"target_store_name": "validation_result_store",
},
},
{
"name": "extract_and_store_eval_parameters",
"action": {
"class_name": "StoreEvaluationParametersAction",
"target_store_name": "evaluation_parameter_store",
},
},
],
},
"errors_and_warnings_validation_operator": {
"class_name": "WarningAndFailureExpectationSuitesValidationOperator",
"action_list": [
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
"target_store_name": "validation_result_store",
},
},
{
"name": "extract_and_store_eval_parameters",
"action": {
"class_name": "StoreEvaluationParametersAction",
"target_store_name": "evaluation_parameter_store",
},
},
],
},
},
)
@pytest.fixture(scope="module")
def basic_in_memory_data_context_for_validation_operator(
basic_data_context_config_for_validation_operator,
):
return BaseDataContext(basic_data_context_config_for_validation_operator)
|
import pytest
from great_expectations.data_context import BaseDataContext
from great_expectations.data_context.types.base import DataContextConfig
@pytest.fixture(scope="module")
def basic_data_context_config_for_validation_operator():
return DataContextConfig(
config_version=1,
plugins_directory=None,
evaluation_parameter_store_name="evaluation_parameter_store",
expectations_store_name="expectations_store",
datasources={},
stores={
"expectations_store": {"class_name": "ExpectationsStore"},
"evaluation_parameter_store": {"class_name": "EvaluationParameterStore"},
"validation_result_store": {"class_name": "ValidationsStore"},
"metrics_store": {"class_name": "MetricStore"},
},
validations_store_name="validation_result_store",
data_docs_sites={},
validation_operators={
"store_val_res_and_extract_eval_params": {
"class_name": "ActionListValidationOperator",
"action_list": [
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
"target_store_name": "validation_result_store",
},
},
{
"name": "extract_and_store_eval_parameters",
"action": {
"class_name": "StoreEvaluationParametersAction",
"target_store_name": "evaluation_parameter_store",
},
},
],
},
"errors_and_warnings_validation_operator": {
"class_name": "WarningAndFailureExpectationSuitesValidationOperator",
"action_list": [
{
"name": "store_validation_result",
"action": {
"class_name": "StoreValidationResultAction",
"target_store_name": "validation_result_store",
},
},
{
"name": "extract_and_store_eval_parameters",
"action": {
"class_name": "StoreEvaluationParametersAction",
"target_store_name": "evaluation_parameter_store",
},
},
],
},
},
)
@pytest.fixture(scope="module")
def basic_in_memory_data_context_for_validation_operator(
basic_data_context_config_for_validation_operator,
):
return BaseDataContext(basic_data_context_config_for_validation_operator)
|
none
| 1
| 2.002444
| 2
|
|
odap/propagators.py
|
ReeceHumphreys/ODAP
| 3
|
6629378
|
import numpy as np
from numba import njit as jit, prange
from numpy import pi, sin, cos, sqrt
from scipy import integrate
from scipy.special import iv
# User defined libearayr
import data.planetary_data as pd
import odap.aerodynamics as aero
from .utils import E_to_M, Nu_to_E
def null_perts():
return {
"J2": False,
"aero": False,
"moon_grav": False,
"solar_grav": False,
}
class OrbitPropagator:
def __init__(
self,
states0,
A,
M,
tspan,
dt,
rv=False,
cb=pd.earth,
perts=null_perts(),
):
# Need to add support for initializing with radius and velocity
if rv:
self.inital_state = 0
else:
self.inital_state = states0
# Setting the areas and masses
self.A = A
self.M = M
# Integration information
self.tspan = tspan
self.dt = dt
# Central body properties
self.cb = cb
# Defining perturbations being considered
self.perts = perts
# Defining constants for aerodynamic drag
if self.perts["aero"]:
self.K_a = np.matrix(
[
[1, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 0, 0, 0],
[3 / 4, 0, 3 / 4, 0, 0, 0, 0],
[0, 3 / 4, 0, 1 / 4, 0, 0, 0],
[21 / 64, 0, 28 / 64, 0, 7 / 64, 0, 0],
[0, 30 / 64, 0, 15 / 64, 0, 3 / 64, 0],
]
)
self.K_e = np.matrix(
[
[0, 1, 0, 0, 0, 0, 0],
[1 / 2, 0, 1 / 2, 0, 0, 0, 0],
[0, -5 / 8, 0, 1 / 8, 0, 0, 0],
[-5 / 16, 0, -4 / 16, 0, 1 / 16, 0, 0],
[0, -18 / 128, 0, -1 / 128, 0, 3 / 128, 0],
[-18 / 256, 0, -19 / 256, 0, 2 / 256, 0, 3 / 256],
]
)
def diffy_q(self, t, state):
e, a, i, Omega, omega = state.reshape(5, len(self.A))
N_f = len(self.A)
# Central body information
mu = self.cb["mu"]
radius = self.cb["radius"] # [m]
J2 = self.cb["J2"]
# Local variables
delta_e = np.zeros_like(e)
delta_a = np.zeros_like(a)
delta_i = np.zeros_like(i)
delta_Omega = np.zeros_like(Omega)
delta_omega = np.zeros_like(omega)
# Current orbital information
peri = a * (1 - e) # [m]
p = a * (1 - e**2) # [m] (Semi parameter)
n = np.sqrt(mu / a**3) # (Mea motion)
############### Drag effects ###############
if self.perts["aero"]:
h_p = peri - radius # [m]
rho = aero.atmosphere_density(h_p / 1e3) # [kg * m^-3]
H = aero.scale_height(h_p / 1e3) * 1e3 # [m]
z = a * e / H
Cd = 0.7
tilt_factor = 1
delta = Cd * (self.A[0] * tilt_factor) / self.M[0]
e_T = np.array(
[np.ones_like(e), e, e**2, e**3, e**4, e**5]
)
I_T = np.array([iv(i, z) for i in range(7)])
k_a = delta * np.sqrt(mu * a) * rho
k_e = k_a / a
# CASE e < 0.001
delta_e = np.zeros_like(e)
delta_a = -k_a
# CASE e>= 0.001
I = e >= 0.001
trunc_err_a = (
a[I] ** 2 * rho[I] * np.exp(-z[I]) * iv(0, z[I]) * e[I] ** 6
)
trunc_err_e = (
a[I] * rho[I] * np.exp(-z[I]) * iv(1, z[I]) * e[I] ** 6
)
transform_e = e_T.T.dot(self.K_e) * I_T
coef_e = np.array([transform_e[i, i] for i in range(N_f)])[I]
transform_a = e_T.T.dot(self.K_a) * I_T
coef_a = np.array([transform_a[i, i] for i in range(N_f)])[I]
delta_e[I] = -k_e[I] * np.exp(-z[I]) * (coef_e + trunc_err_e)
delta_a[I] = -k_a[I] * np.exp(-z[I]) * (coef_a + trunc_err_a)
delta_e[np.isnan(delta_e)] = 0
delta_a[np.isnan(delta_a)] = 0
# Deorbit check
J = h_p < 100 * 1e3
delta_a[J] = 0
delta_e[J] = 0
############### J2 effects ###############
if self.perts["J2"]:
base = (3 / 2) * self.cb["J2"] * (radius**2 / p**2) * n
i = np.deg2rad(i)
delta_omega = base * (2 - (5 / 2) * np.sin(i) ** 2)
delta_Omega = -base * np.cos(i)
delta_omega = np.rad2deg(delta_omega) % 360
delta_Omega = np.rad2deg(delta_Omega) % 360
return np.concatenate(
(delta_e, delta_a, delta_i, delta_Omega, delta_omega)
)
# Performing a regular propagation, i.e. w/ perturbations
def propagate_perturbations(self):
# Initial states
a0, e0, i0, Omega0, omega0 = self.inital_state[5, :]
y0 = np.concatenate((e0, a0, i0, Omega0, omega0))
# Propagation time
T_avg = np.mean(self.inital_state[-1, 8, :])
times = np.arange(self.tspan[0], self.tspan[-1], self.dt)
output = integrate.solve_ivp(
self.diffy_q, self.tspan, y0, t_eval=times
)
# Unpacking output (Need to drop first timestep as sudden introduction of drag causes discontinuities)
N_f = len(self.A)
de = output.y[0:N_f, 1:]
da = output.y[N_f : 2 * N_f, 1:]
di = output.y[2 * N_f : 3 * N_f, 1:]
dOmega = output.y[3 * N_f : 4 * N_f, 1:]
domega = output.y[4 * N_f :, 1:]
dnu = np.random.uniform(low=0.0, high=360.0, size=domega.shape)
dp = da * (1 - de**2)
# Results
return de, da, di, dOmega, domega, dnu, dp
# Performing a Keplerian propagation, i.e. w/o perturbations
def propagate_orbit(self):
a0: np.ndarray = self.inital_state[:, 0]
e0: np.ndarray = self.inital_state[:, 1]
i0: np.ndarray = self.inital_state[:, 2]
Omega0: np.ndarray = self.inital_state[:, 3]
omega0: np.ndarray = self.inital_state[:, 4]
nu0: np.ndarray = self.inital_state[:, 5]
times = np.arange(self.tspan[0], self.tspan[-1], self.dt)
# # Mean anomaly rate of change
n = sqrt(self.cb["mu"] / a0**3)
# Mean anomaly over time
M0 = E_to_M(Nu_to_E(nu0, e0), e0) % 2 * np.pi
M_dt = n[None, :] * times[:, None]
M_t = M0 + M_dt
M_t = np.deg2rad(np.rad2deg(np.mod(M_t, 2 * pi)))
# Eccentric anomaly over time. Note need to use E_t in rad, thus convert to deg after using it in
# x1 and x2
E_t = M2E(e0, M_t.T)
x1 = sqrt(1 + e0)[:, None] * sin(E_t / 2)
x2 = sqrt(1 - e0)[:, None] * cos(E_t / 2)
# # True anomaly over time
nu_t = 2 * np.arctan2(x1, x2) % (2 * pi)
n_times = nu_t.shape[1]
states = np.empty(shape=(n_times, len(a0), 6))
for i in prange(n_times):
state = self.inital_state.copy()
state[:, 5] = nu_t[:, i]
states[i] = state
# Update internal states
return states
# Modified from OrbitalPy.utilities
@jit(parallel=True, fastmath=True)
def M2E(e_deb, M_t, tolerance=1e-5):
# Convert mean anomaly to eccentric anomaly.
# Implemented from [A Practical Method for Solving the Kepler Equation][1]
# by <NAME> from the U.S. Naval Observatory
# [1]: http://murison.alpheratz.net/dynamics/twobody/KeplerIterations_summary.pdf
n_deb = M_t.shape[0]
n_times = M_t.shape[1]
E_t = np.empty_like(M_t)
for i in prange(n_deb):
e = e_deb[i]
for j in prange(n_times):
M = M_t[i, j]
MAX_ITERATIONS = 100
Mnorm = np.mod(M, 2 * pi)
E0 = M + (
-1 / 2 * e**3
+ e
+ (e**2 + 3 / 2 * cos(M) * e**3) * cos(M)
) * sin(M)
dE = tolerance + 1
count = 0
while dE > tolerance:
t1 = cos(E0)
t2 = -1 + e * t1
t3 = sin(E0)
t4 = e * t3
t5 = -E0 + t4 + Mnorm
t6 = t5 / (1 / 2 * t5 * t4 / t2 + t2)
E = E0 - t5 / ((1 / 2 * t3 - 1 / 6 * t1 * t6) * e * t6 + t2)
dE = np.abs(E - E0)
E0 = E
count += 1
if count == MAX_ITERATIONS:
print("Did not converge, increase number of iterations")
E_t[i, j] = E
return E_t
|
import numpy as np
from numba import njit as jit, prange
from numpy import pi, sin, cos, sqrt
from scipy import integrate
from scipy.special import iv
# User defined libearayr
import data.planetary_data as pd
import odap.aerodynamics as aero
from .utils import E_to_M, Nu_to_E
def null_perts():
return {
"J2": False,
"aero": False,
"moon_grav": False,
"solar_grav": False,
}
class OrbitPropagator:
def __init__(
self,
states0,
A,
M,
tspan,
dt,
rv=False,
cb=pd.earth,
perts=null_perts(),
):
# Need to add support for initializing with radius and velocity
if rv:
self.inital_state = 0
else:
self.inital_state = states0
# Setting the areas and masses
self.A = A
self.M = M
# Integration information
self.tspan = tspan
self.dt = dt
# Central body properties
self.cb = cb
# Defining perturbations being considered
self.perts = perts
# Defining constants for aerodynamic drag
if self.perts["aero"]:
self.K_a = np.matrix(
[
[1, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 0, 0, 0],
[3 / 4, 0, 3 / 4, 0, 0, 0, 0],
[0, 3 / 4, 0, 1 / 4, 0, 0, 0],
[21 / 64, 0, 28 / 64, 0, 7 / 64, 0, 0],
[0, 30 / 64, 0, 15 / 64, 0, 3 / 64, 0],
]
)
self.K_e = np.matrix(
[
[0, 1, 0, 0, 0, 0, 0],
[1 / 2, 0, 1 / 2, 0, 0, 0, 0],
[0, -5 / 8, 0, 1 / 8, 0, 0, 0],
[-5 / 16, 0, -4 / 16, 0, 1 / 16, 0, 0],
[0, -18 / 128, 0, -1 / 128, 0, 3 / 128, 0],
[-18 / 256, 0, -19 / 256, 0, 2 / 256, 0, 3 / 256],
]
)
def diffy_q(self, t, state):
e, a, i, Omega, omega = state.reshape(5, len(self.A))
N_f = len(self.A)
# Central body information
mu = self.cb["mu"]
radius = self.cb["radius"] # [m]
J2 = self.cb["J2"]
# Local variables
delta_e = np.zeros_like(e)
delta_a = np.zeros_like(a)
delta_i = np.zeros_like(i)
delta_Omega = np.zeros_like(Omega)
delta_omega = np.zeros_like(omega)
# Current orbital information
peri = a * (1 - e) # [m]
p = a * (1 - e**2) # [m] (Semi parameter)
n = np.sqrt(mu / a**3) # (Mea motion)
############### Drag effects ###############
if self.perts["aero"]:
h_p = peri - radius # [m]
rho = aero.atmosphere_density(h_p / 1e3) # [kg * m^-3]
H = aero.scale_height(h_p / 1e3) * 1e3 # [m]
z = a * e / H
Cd = 0.7
tilt_factor = 1
delta = Cd * (self.A[0] * tilt_factor) / self.M[0]
e_T = np.array(
[np.ones_like(e), e, e**2, e**3, e**4, e**5]
)
I_T = np.array([iv(i, z) for i in range(7)])
k_a = delta * np.sqrt(mu * a) * rho
k_e = k_a / a
# CASE e < 0.001
delta_e = np.zeros_like(e)
delta_a = -k_a
# CASE e>= 0.001
I = e >= 0.001
trunc_err_a = (
a[I] ** 2 * rho[I] * np.exp(-z[I]) * iv(0, z[I]) * e[I] ** 6
)
trunc_err_e = (
a[I] * rho[I] * np.exp(-z[I]) * iv(1, z[I]) * e[I] ** 6
)
transform_e = e_T.T.dot(self.K_e) * I_T
coef_e = np.array([transform_e[i, i] for i in range(N_f)])[I]
transform_a = e_T.T.dot(self.K_a) * I_T
coef_a = np.array([transform_a[i, i] for i in range(N_f)])[I]
delta_e[I] = -k_e[I] * np.exp(-z[I]) * (coef_e + trunc_err_e)
delta_a[I] = -k_a[I] * np.exp(-z[I]) * (coef_a + trunc_err_a)
delta_e[np.isnan(delta_e)] = 0
delta_a[np.isnan(delta_a)] = 0
# Deorbit check
J = h_p < 100 * 1e3
delta_a[J] = 0
delta_e[J] = 0
############### J2 effects ###############
if self.perts["J2"]:
base = (3 / 2) * self.cb["J2"] * (radius**2 / p**2) * n
i = np.deg2rad(i)
delta_omega = base * (2 - (5 / 2) * np.sin(i) ** 2)
delta_Omega = -base * np.cos(i)
delta_omega = np.rad2deg(delta_omega) % 360
delta_Omega = np.rad2deg(delta_Omega) % 360
return np.concatenate(
(delta_e, delta_a, delta_i, delta_Omega, delta_omega)
)
# Performing a regular propagation, i.e. w/ perturbations
def propagate_perturbations(self):
# Initial states
a0, e0, i0, Omega0, omega0 = self.inital_state[5, :]
y0 = np.concatenate((e0, a0, i0, Omega0, omega0))
# Propagation time
T_avg = np.mean(self.inital_state[-1, 8, :])
times = np.arange(self.tspan[0], self.tspan[-1], self.dt)
output = integrate.solve_ivp(
self.diffy_q, self.tspan, y0, t_eval=times
)
# Unpacking output (Need to drop first timestep as sudden introduction of drag causes discontinuities)
N_f = len(self.A)
de = output.y[0:N_f, 1:]
da = output.y[N_f : 2 * N_f, 1:]
di = output.y[2 * N_f : 3 * N_f, 1:]
dOmega = output.y[3 * N_f : 4 * N_f, 1:]
domega = output.y[4 * N_f :, 1:]
dnu = np.random.uniform(low=0.0, high=360.0, size=domega.shape)
dp = da * (1 - de**2)
# Results
return de, da, di, dOmega, domega, dnu, dp
# Performing a Keplerian propagation, i.e. w/o perturbations
def propagate_orbit(self):
a0: np.ndarray = self.inital_state[:, 0]
e0: np.ndarray = self.inital_state[:, 1]
i0: np.ndarray = self.inital_state[:, 2]
Omega0: np.ndarray = self.inital_state[:, 3]
omega0: np.ndarray = self.inital_state[:, 4]
nu0: np.ndarray = self.inital_state[:, 5]
times = np.arange(self.tspan[0], self.tspan[-1], self.dt)
# # Mean anomaly rate of change
n = sqrt(self.cb["mu"] / a0**3)
# Mean anomaly over time
M0 = E_to_M(Nu_to_E(nu0, e0), e0) % 2 * np.pi
M_dt = n[None, :] * times[:, None]
M_t = M0 + M_dt
M_t = np.deg2rad(np.rad2deg(np.mod(M_t, 2 * pi)))
# Eccentric anomaly over time. Note need to use E_t in rad, thus convert to deg after using it in
# x1 and x2
E_t = M2E(e0, M_t.T)
x1 = sqrt(1 + e0)[:, None] * sin(E_t / 2)
x2 = sqrt(1 - e0)[:, None] * cos(E_t / 2)
# # True anomaly over time
nu_t = 2 * np.arctan2(x1, x2) % (2 * pi)
n_times = nu_t.shape[1]
states = np.empty(shape=(n_times, len(a0), 6))
for i in prange(n_times):
state = self.inital_state.copy()
state[:, 5] = nu_t[:, i]
states[i] = state
# Update internal states
return states
# Modified from OrbitalPy.utilities
@jit(parallel=True, fastmath=True)
def M2E(e_deb, M_t, tolerance=1e-5):
# Convert mean anomaly to eccentric anomaly.
# Implemented from [A Practical Method for Solving the Kepler Equation][1]
# by <NAME> from the U.S. Naval Observatory
# [1]: http://murison.alpheratz.net/dynamics/twobody/KeplerIterations_summary.pdf
n_deb = M_t.shape[0]
n_times = M_t.shape[1]
E_t = np.empty_like(M_t)
for i in prange(n_deb):
e = e_deb[i]
for j in prange(n_times):
M = M_t[i, j]
MAX_ITERATIONS = 100
Mnorm = np.mod(M, 2 * pi)
E0 = M + (
-1 / 2 * e**3
+ e
+ (e**2 + 3 / 2 * cos(M) * e**3) * cos(M)
) * sin(M)
dE = tolerance + 1
count = 0
while dE > tolerance:
t1 = cos(E0)
t2 = -1 + e * t1
t3 = sin(E0)
t4 = e * t3
t5 = -E0 + t4 + Mnorm
t6 = t5 / (1 / 2 * t5 * t4 / t2 + t2)
E = E0 - t5 / ((1 / 2 * t3 - 1 / 6 * t1 * t6) * e * t6 + t2)
dE = np.abs(E - E0)
E0 = E
count += 1
if count == MAX_ITERATIONS:
print("Did not converge, increase number of iterations")
E_t[i, j] = E
return E_t
|
en
| 0.722194
|
# User defined libearayr # Need to add support for initializing with radius and velocity # Setting the areas and masses # Integration information # Central body properties # Defining perturbations being considered # Defining constants for aerodynamic drag # Central body information # [m] # Local variables # Current orbital information # [m] # [m] (Semi parameter) # (Mea motion) ############### Drag effects ############### # [m] # [kg * m^-3] # [m] # CASE e < 0.001 # CASE e>= 0.001 # Deorbit check ############### J2 effects ############### # Performing a regular propagation, i.e. w/ perturbations # Initial states # Propagation time # Unpacking output (Need to drop first timestep as sudden introduction of drag causes discontinuities) # Results # Performing a Keplerian propagation, i.e. w/o perturbations # # Mean anomaly rate of change # Mean anomaly over time # Eccentric anomaly over time. Note need to use E_t in rad, thus convert to deg after using it in # x1 and x2 # # True anomaly over time # Update internal states # Modified from OrbitalPy.utilities # Convert mean anomaly to eccentric anomaly. # Implemented from [A Practical Method for Solving the Kepler Equation][1] # by <NAME> from the U.S. Naval Observatory # [1]: http://murison.alpheratz.net/dynamics/twobody/KeplerIterations_summary.pdf
| 2.426132
| 2
|
data2/generator.py
|
giordanoDaloisio/fairness
| 0
|
6629379
|
<reponame>giordanoDaloisio/fairness
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
import argparse
# BUILD A SYNTHETIC DATASET
parser = argparse.ArgumentParser(description="Generate a synthetic dataset")
parser.add_argument('-s', '--samples', type=int, help='Number of samples')
parser.add_argument('-c', '--classes', type=int, help='Number of classes')
parser.add_argument('-f', '--features', type=int, help='Number of features')
parser.add_argument('-i', '--informatives', type=int,
help='Number of informative features')
parser.add_argument('-sv', '--sensitive_vars', type=int,
help='Number of sensitive variables')
parser.add_argument('-n', '--name', type=str, help='Name of the file')
args = parser.parse_args()
data = make_classification(n_samples=args.samples, n_features=args.features,
n_classes=args.classes, n_informative=args.informatives)
df = pd.DataFrame(data[0])
i = 0
for vars in range(args.sensitive_vars):
i += 1
sens_var0 = np.full(shape=round(args.samples/2), fill_value=0, dtype=int)
sens_var1 = np.full(shape=round(args.samples/2), fill_value=1, dtype=int)
df['s'+str(i)] = np.hstack((sens_var0, sens_var1))
df = df.sample(frac=1)
df['y'] = data[1]
df.to_csv(args.name+'.csv', index=False)
|
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
import argparse
# BUILD A SYNTHETIC DATASET
parser = argparse.ArgumentParser(description="Generate a synthetic dataset")
parser.add_argument('-s', '--samples', type=int, help='Number of samples')
parser.add_argument('-c', '--classes', type=int, help='Number of classes')
parser.add_argument('-f', '--features', type=int, help='Number of features')
parser.add_argument('-i', '--informatives', type=int,
help='Number of informative features')
parser.add_argument('-sv', '--sensitive_vars', type=int,
help='Number of sensitive variables')
parser.add_argument('-n', '--name', type=str, help='Name of the file')
args = parser.parse_args()
data = make_classification(n_samples=args.samples, n_features=args.features,
n_classes=args.classes, n_informative=args.informatives)
df = pd.DataFrame(data[0])
i = 0
for vars in range(args.sensitive_vars):
i += 1
sens_var0 = np.full(shape=round(args.samples/2), fill_value=0, dtype=int)
sens_var1 = np.full(shape=round(args.samples/2), fill_value=1, dtype=int)
df['s'+str(i)] = np.hstack((sens_var0, sens_var1))
df = df.sample(frac=1)
df['y'] = data[1]
df.to_csv(args.name+'.csv', index=False)
|
en
| 0.382746
|
# BUILD A SYNTHETIC DATASET
| 3.204121
| 3
|
hdijupyterutils/hdijupyterutils/tests/test_configuration.py
|
viaduct-ai/sparkmagic
| 1
|
6629380
|
from mock import MagicMock
from nose.tools import assert_equals, assert_not_equals, raises, with_setup
import json
from hdijupyterutils.configuration import override, override_all, with_override
from hdijupyterutils.configuration import _merge_conf
# This is a sample implementation of how a module would use the config methods.
# We'll use these three functions to test it works.
d = {}
path = "~/.testing/config.json"
original_value = 0
def module_override(config, value):
global d, path
override(d, path, config, value)
def module_override_all(obj):
global d
override_all(d, obj)
# Configs
@with_override(d, path)
def my_config():
global original_value
return original_value
@with_override(d, path)
def my_config_2():
global original_value
return original_value
# Test helper functions
def _setup():
module_override_all({})
def _teardown():
module_override_all({})
# Unit tests begin
@with_setup(_setup, _teardown)
def test_original_value_without_overrides():
assert_equals(original_value, my_config())
@with_setup(_setup, _teardown)
def test_original_value_with_overrides():
new_value = 2
module_override(my_config.__name__, new_value)
assert_equals(new_value, my_config())
@with_setup(_setup, _teardown)
def test_original_values_when_others_override():
new_value = 2
module_override(my_config.__name__, new_value)
assert_equals(new_value, my_config())
assert_equals(original_value, my_config_2())
@with_setup(_setup, _teardown)
def test_resetting_values_when_others_override():
new_value = 2
module_override(my_config.__name__, new_value)
assert_equals(new_value, my_config())
assert_equals(original_value, my_config_2())
# Reset
module_override_all({})
assert_equals(original_value, my_config())
assert_equals(original_value, my_config_2())
@with_setup(_setup, _teardown)
def test_configuration_merge_required():
current_session_confs = {
"archives": ["s3://my-test-archive"],
"numExecutors": 5,
"conf": {
"spark.dynamicAllocation.enabled":
"false",
"spark.sql.shuffle.partitions":
20,
"spark.yarn.tags":
"my=tag,wee=wa",
"spark.jars.packages":
"net.snowflake:spark-snowflake_2.11:2.5.1-spark_2.4"
}
}
required_session_confs = {
"conf": {
"spark.yarn.tags":
"created-by=vaatu-raava"
},
"numExecutors": 10
}
_merge_conf(current_session_confs, required_session_confs)
assert_equals(current_session_confs, {
"archives": ["s3://my-test-archive"],
"numExecutors": 10,
"conf": {
"spark.dynamicAllocation.enabled":
"false",
"spark.sql.shuffle.partitions":
20,
"spark.yarn.tags":
"created-by=vaatu-raava",
"spark.jars.packages":
"net.snowflake:spark-snowflake_2.11:2.5.1-spark_2.4"
}
})
|
from mock import MagicMock
from nose.tools import assert_equals, assert_not_equals, raises, with_setup
import json
from hdijupyterutils.configuration import override, override_all, with_override
from hdijupyterutils.configuration import _merge_conf
# This is a sample implementation of how a module would use the config methods.
# We'll use these three functions to test it works.
d = {}
path = "~/.testing/config.json"
original_value = 0
def module_override(config, value):
global d, path
override(d, path, config, value)
def module_override_all(obj):
global d
override_all(d, obj)
# Configs
@with_override(d, path)
def my_config():
global original_value
return original_value
@with_override(d, path)
def my_config_2():
global original_value
return original_value
# Test helper functions
def _setup():
module_override_all({})
def _teardown():
module_override_all({})
# Unit tests begin
@with_setup(_setup, _teardown)
def test_original_value_without_overrides():
assert_equals(original_value, my_config())
@with_setup(_setup, _teardown)
def test_original_value_with_overrides():
new_value = 2
module_override(my_config.__name__, new_value)
assert_equals(new_value, my_config())
@with_setup(_setup, _teardown)
def test_original_values_when_others_override():
new_value = 2
module_override(my_config.__name__, new_value)
assert_equals(new_value, my_config())
assert_equals(original_value, my_config_2())
@with_setup(_setup, _teardown)
def test_resetting_values_when_others_override():
new_value = 2
module_override(my_config.__name__, new_value)
assert_equals(new_value, my_config())
assert_equals(original_value, my_config_2())
# Reset
module_override_all({})
assert_equals(original_value, my_config())
assert_equals(original_value, my_config_2())
@with_setup(_setup, _teardown)
def test_configuration_merge_required():
current_session_confs = {
"archives": ["s3://my-test-archive"],
"numExecutors": 5,
"conf": {
"spark.dynamicAllocation.enabled":
"false",
"spark.sql.shuffle.partitions":
20,
"spark.yarn.tags":
"my=tag,wee=wa",
"spark.jars.packages":
"net.snowflake:spark-snowflake_2.11:2.5.1-spark_2.4"
}
}
required_session_confs = {
"conf": {
"spark.yarn.tags":
"created-by=vaatu-raava"
},
"numExecutors": 10
}
_merge_conf(current_session_confs, required_session_confs)
assert_equals(current_session_confs, {
"archives": ["s3://my-test-archive"],
"numExecutors": 10,
"conf": {
"spark.dynamicAllocation.enabled":
"false",
"spark.sql.shuffle.partitions":
20,
"spark.yarn.tags":
"created-by=vaatu-raava",
"spark.jars.packages":
"net.snowflake:spark-snowflake_2.11:2.5.1-spark_2.4"
}
})
|
en
| 0.881668
|
# This is a sample implementation of how a module would use the config methods. # We'll use these three functions to test it works. # Configs # Test helper functions # Unit tests begin # Reset
| 2.637206
| 3
|
sandbox/lib/jumpscale/Jumpscale/data/schema/tests/6_numeric.py
|
threefoldtech/threebot_prebuilt
| 0
|
6629381
|
<filename>sandbox/lib/jumpscale/Jumpscale/data/schema/tests/6_numeric.py<gh_stars>0
# Copyright (C) July 2018: TF TECH NV in Belgium see https://www.threefold.tech/
# In case TF TECH NV ceases to exist (e.g. because of bankruptcy)
# then Incubaid NV also in Belgium will get the Copyright & Authorship for all changes made since July 2018
# and the license will automatically become Apache v2 for all code related to Jumpscale & DigitalMe
# This file is part of jumpscale at <https://github.com/threefoldtech>.
# jumpscale is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jumpscale is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License v3 for more details.
#
# You should have received a copy of the GNU General Public License
# along with jumpscale or jumpscale derived works. If not, see <http://www.gnu.org/licenses/>.
# LICENSE END
from Jumpscale import j
def main(self):
"""
to run:
kosmos 'j.data.schema.test(name="numeric")' --debug
"""
schema = """
@url = despiegk.test
token_price = "10 USD" (N)
a = "10 USD"
B = True
t = (D)
"""
schema_object = j.data.schema.get_from_text(schema_text=schema)
assert schema_object.url == "despiegk.test"
print(schema_object)
schema_test = schema_object.new()
schema_test.token_price = "10 USD"
usd2usd = schema_test.token_price.usd # convert USD-to-USD... same value
assert usd2usd == 10
inr = schema_test.token_price.value_currency("inr")
# print ("convert 10 USD to INR", inr)
assert inr > 100 # ok INR is pretty high... check properly in a bit...
eur = schema_test.token_price.value_currency("eur")
# print ("convert 10 USD to EUR", eur)
currency = j.clients.currencylayer
cureur = currency.cur2usd["eur"]
curinr = currency.cur2usd["inr"]
# print (cureur, curinr, o.token_price)
assert usd2usd * cureur == eur
assert usd2usd * curinr == inr
# try EUR to USD as well
schema_test.token_price = "10 EUR"
assert schema_test.token_price == b"\x000\n\x00\x00\x00"
eur2usd = schema_test.token_price.usd
assert eur2usd * cureur == 10
schema_test.token_price = "10 EUR"
assert schema_test.token_price.currency_code == "eur"
# CLEAN STATE
# j.data.schema.remove_from_text(schema)
|
<filename>sandbox/lib/jumpscale/Jumpscale/data/schema/tests/6_numeric.py<gh_stars>0
# Copyright (C) July 2018: TF TECH NV in Belgium see https://www.threefold.tech/
# In case TF TECH NV ceases to exist (e.g. because of bankruptcy)
# then Incubaid NV also in Belgium will get the Copyright & Authorship for all changes made since July 2018
# and the license will automatically become Apache v2 for all code related to Jumpscale & DigitalMe
# This file is part of jumpscale at <https://github.com/threefoldtech>.
# jumpscale is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jumpscale is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License v3 for more details.
#
# You should have received a copy of the GNU General Public License
# along with jumpscale or jumpscale derived works. If not, see <http://www.gnu.org/licenses/>.
# LICENSE END
from Jumpscale import j
def main(self):
"""
to run:
kosmos 'j.data.schema.test(name="numeric")' --debug
"""
schema = """
@url = despiegk.test
token_price = "10 USD" (N)
a = "10 USD"
B = True
t = (D)
"""
schema_object = j.data.schema.get_from_text(schema_text=schema)
assert schema_object.url == "despiegk.test"
print(schema_object)
schema_test = schema_object.new()
schema_test.token_price = "10 USD"
usd2usd = schema_test.token_price.usd # convert USD-to-USD... same value
assert usd2usd == 10
inr = schema_test.token_price.value_currency("inr")
# print ("convert 10 USD to INR", inr)
assert inr > 100 # ok INR is pretty high... check properly in a bit...
eur = schema_test.token_price.value_currency("eur")
# print ("convert 10 USD to EUR", eur)
currency = j.clients.currencylayer
cureur = currency.cur2usd["eur"]
curinr = currency.cur2usd["inr"]
# print (cureur, curinr, o.token_price)
assert usd2usd * cureur == eur
assert usd2usd * curinr == inr
# try EUR to USD as well
schema_test.token_price = "10 EUR"
assert schema_test.token_price == b"\x000\n\x00\x00\x00"
eur2usd = schema_test.token_price.usd
assert eur2usd * cureur == 10
schema_test.token_price = "10 EUR"
assert schema_test.token_price.currency_code == "eur"
# CLEAN STATE
# j.data.schema.remove_from_text(schema)
|
en
| 0.8476
|
# Copyright (C) July 2018: TF TECH NV in Belgium see https://www.threefold.tech/ # In case TF TECH NV ceases to exist (e.g. because of bankruptcy) # then Incubaid NV also in Belgium will get the Copyright & Authorship for all changes made since July 2018 # and the license will automatically become Apache v2 for all code related to Jumpscale & DigitalMe # This file is part of jumpscale at <https://github.com/threefoldtech>. # jumpscale is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # jumpscale is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License v3 for more details. # # You should have received a copy of the GNU General Public License # along with jumpscale or jumpscale derived works. If not, see <http://www.gnu.org/licenses/>. # LICENSE END to run: kosmos 'j.data.schema.test(name="numeric")' --debug @url = despiegk.test token_price = "10 USD" (N) a = "10 USD" B = True t = (D) # convert USD-to-USD... same value # print ("convert 10 USD to INR", inr) # ok INR is pretty high... check properly in a bit... # print ("convert 10 USD to EUR", eur) # print (cureur, curinr, o.token_price) # try EUR to USD as well # CLEAN STATE # j.data.schema.remove_from_text(schema)
| 1.840406
| 2
|
savona/exporter/docx_.py
|
lucianolorenti/savona
| 0
|
6629382
|
<gh_stars>0
import base64
import json
import os
import tempfile
from io import BytesIO
from pathlib import Path
from sys import int_info
import mistletoe
import nbformat
import pandas as pd
from bs4 import BeautifulSoup
from docx import Document
from docx.enum.style import WD_STYLE_TYPE
from docx.enum.table import WD_TABLE_ALIGNMENT
from docx.enum.text import WD_ALIGN_PARAGRAPH, WD_TAB_ALIGNMENT
from docx.oxml import parse_xml
from docx.oxml.ns import nsdecls
from docx.shared import Inches, Pt, RGBColor
from mistletoe.ast_renderer import ASTRenderer
from PIL import Image
from savona.exporter import Exporter
HEADING_TYPE = "Heading"
RAWTEXT_TYPE = "RawText"
PARAGRAPH_TYPE = "Paragraph"
LIST_TYPE = "List"
LISTITME_TYPE = "ListItem"
STREM_OUTPUT_TYPE = "stream"
IMAGE_OUTPUT_TYPE = "image/png"
TEXT_OUTPUT_TYPE = "text/plain"
HTML_OUTPUT_TYPE = "text/html"
MARKDOWN_OUTPUT_TYPE = "text/markdown"
TABLE_BACKGROUND = "EFEFFF"
def table_from_pandas(document, data: pd.DataFrame, include_header=True):
nrows = data.shape[0]
if include_header:
nrows += 1
table = document.add_table(rows=nrows, cols=data.shape[1])
drow = 0
if include_header:
for i, column in enumerate(data):
content = data.columns[i]
if content.startswith("Unnamed:"):
content = ""
shading_elm = parse_xml(
('<w:shd {} w:fill="' + TABLE_BACKGROUND + '"/>').format(nsdecls("w"))
)
table.cell(0, i)._tc.get_or_add_tcPr().append(shading_elm)
table.cell(0, i).text = content
drow = 1
for i, column in enumerate(data):
for row in range(data.shape[0]):
table.cell(row + drow, i).text = str(data[column][data.index[row]])
def add_image_from_figure(doc, fig):
with tempfile.TemporaryDirectory() as tmp:
path = os.path.join(tmp, "tmp_figure.png")
fig.savefig(path)
doc.add_picture(path, width=effective_width(doc.sections[0]))
doc.paragraphs[-1].alignment = WD_ALIGN_PARAGRAPH.CENTER
def effective_width(section):
return section.page_width - section.bottom_margin - section.left_margin
class HTMLDocxParser:
pass
class DocxExporter(Exporter):
def __init__(self, config={}):
self.doc = Document()
style = self.doc.styles["Normal"]
font = style.font
font.name = "Roboto"
font.size = Pt(10)
font.color.rgb = RGBColor(0x00, 0x00, 0x00)
for i, size in zip(range(1, 6), [20, 18, 16, 12, 10]):
heading_1_style = self.doc.styles[f"Heading {i}"]
heading_1_style.element.xpath("w:rPr/w:rFonts")[0].attrib.clear()
font = heading_1_style.font
font.name = "Roboto"
font.size = Pt(size)
font.color.rgb = RGBColor(0x00, 0x00, 0x00)
self.config = config
@property
def header(self):
return self.doc.sections[0].header
@property
def footer(self):
return self.doc.sections[0].footer
def export(self, notebook_path: Path, output_path: Path):
with open(notebook_path, "r") as file:
content = file.read()
notebook = nbformat.reads(content, as_version=4)
for node in notebook.cells:
if node["cell_type"] == "code":
self.add_output(node["outputs"], node['metadata'])
if node["cell_type"] == "markdown":
self.add_content(node)
if not output_path.suffix == ".docx":
output_path = output_path / (notebook_path.stem + ".docx")
self.doc.save(str(output_path))
def _write_image(self, data):
fp = tempfile.TemporaryFile(suffix=".png")
data = base64.b64decode(data)
fp.write(data)
fp.seek(0)
return fp
@property
def effective_width(self):
return effective_width(self.doc.sections[0])
def add_image(self, data, where=None, width=None):
fp = self._write_image(data)
if where is None:
where = self.doc
if width is None:
width = self.effective_width
where.add_picture(fp, width=width)
def add_figure(self, data, caption:str, width=None):
table = self.doc.add_table(rows=2, cols=1)
table.alignment = WD_TABLE_ALIGNMENT.CENTER
table.rows[1].cells[0].text = str(caption)
table.rows[0].cells[0].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
table.rows[1].cells[0].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
image_paragraph = table.rows[0].cells[0].paragraphs[0].add_run()
if width is None:
width = table.rows[0].cells[0].width
self.add_image(
data,
where=image_paragraph,
width=width,
)
def process_html_figure(self, figure, table, col, width):
def process_figure(img):
img_src = img.get("src")
image_paragraph = table.rows[0].cells[col].paragraphs[0].add_run()
self.add_image(
img_src[len("data:image/png;base64,") :].encode(),
where=image_paragraph,
width=width,
)
imgs = figure.find_all("img")
if len(imgs) > 0:
process_figure(imgs[0])
else:
tables = figure.find_all("table")
if len(tables) > 0:
df = pd.read_html(str(tables[0]))[0]
table_from_pandas(table.rows[0].cells[col], df)
figcaption = figure.find_all("figcaption")[0]
table.rows[1].cells[col].text = str(figcaption.string)
table.rows[0].cells[col].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
table.rows[1].cells[col].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
def process_row_element(self, elem, table, col: int, nrows):
for div in elem.contents:
if div.name == "figure":
figure_width = (self.effective_width - (Inches(1.5))) / nrows
self.process_html_figure(div, table, col, width=figure_width)
def process_row(self, data):
table = self.doc.add_table(rows=2, cols=len(data.contents))
table.alignment = WD_TABLE_ALIGNMENT.CENTER
nrows = len(data.contents)
for i, element in enumerate(data.contents):
self.process_row_element(element, table, i, nrows)
def process_markdown(self, md):
elements = json.loads(mistletoe.markdown(md, ASTRenderer))
self.process_elements(elements["children"])
def process_html(self, html):
soup = BeautifulSoup(html, "html.parser")
row = soup.find_all("div", class_="row center")
if len(row) > 0:
self.process_row(row[0])
def add_output(self, cell_code, metadata:dict):
savona_metadata = metadata.get('savona', {})
for o in cell_code:
if o["output_type"] == STREM_OUTPUT_TYPE:
self.doc.add_paragraph(o["text"])
continue
data = o["data"]
data_type = list(data.keys())[0]
if data_type == TEXT_OUTPUT_TYPE:
self.doc.add_paragraph(data[data_type])
elif data_type == IMAGE_OUTPUT_TYPE:
if savona_metadata.get('output_type', '') == 'figure':
self.add_figure(data[data_type], savona_metadata.get('caption', ''))
else:
self.add_image(data[data_type])
self.doc.paragraphs[-1].alignment = WD_ALIGN_PARAGRAPH.CENTER
elif data_type == HTML_OUTPUT_TYPE:
self.process_html(data[data_type])
elif data_type == MARKDOWN_OUTPUT_TYPE:
self.process_markdown(data[data_type])
def flatten(self, elements):
content = []
for el in elements:
if el["type"] == RAWTEXT_TYPE:
content.append(el["content"])
elif el["type"] == PARAGRAPH_TYPE:
content.append(self.flatten(el["children"]))
return " ".join(content)
def add_list(self, node):
for el in node["children"]:
if el["type"] == LISTITME_TYPE:
self.doc.add_paragraph(
self.flatten(el["children"]), style="List Bullet"
)
def process_node(self, node):
if node["type"] == HEADING_TYPE:
level = node["level"]
self.doc.add_heading(self.flatten(node["children"]), level=node["level"])
elif node["type"] == PARAGRAPH_TYPE:
self.doc.add_paragraph(self.flatten(node["children"]))
elif node["type"] == LIST_TYPE:
self.add_list(node)
def process_elements(self, elements):
for el in elements:
self.process_node(el)
def add_content(self, node):
elements = json.loads(mistletoe.markdown(node["source"], ASTRenderer))
self.process_elements(elements["children"])
|
import base64
import json
import os
import tempfile
from io import BytesIO
from pathlib import Path
from sys import int_info
import mistletoe
import nbformat
import pandas as pd
from bs4 import BeautifulSoup
from docx import Document
from docx.enum.style import WD_STYLE_TYPE
from docx.enum.table import WD_TABLE_ALIGNMENT
from docx.enum.text import WD_ALIGN_PARAGRAPH, WD_TAB_ALIGNMENT
from docx.oxml import parse_xml
from docx.oxml.ns import nsdecls
from docx.shared import Inches, Pt, RGBColor
from mistletoe.ast_renderer import ASTRenderer
from PIL import Image
from savona.exporter import Exporter
HEADING_TYPE = "Heading"
RAWTEXT_TYPE = "RawText"
PARAGRAPH_TYPE = "Paragraph"
LIST_TYPE = "List"
LISTITME_TYPE = "ListItem"
STREM_OUTPUT_TYPE = "stream"
IMAGE_OUTPUT_TYPE = "image/png"
TEXT_OUTPUT_TYPE = "text/plain"
HTML_OUTPUT_TYPE = "text/html"
MARKDOWN_OUTPUT_TYPE = "text/markdown"
TABLE_BACKGROUND = "EFEFFF"
def table_from_pandas(document, data: pd.DataFrame, include_header=True):
nrows = data.shape[0]
if include_header:
nrows += 1
table = document.add_table(rows=nrows, cols=data.shape[1])
drow = 0
if include_header:
for i, column in enumerate(data):
content = data.columns[i]
if content.startswith("Unnamed:"):
content = ""
shading_elm = parse_xml(
('<w:shd {} w:fill="' + TABLE_BACKGROUND + '"/>').format(nsdecls("w"))
)
table.cell(0, i)._tc.get_or_add_tcPr().append(shading_elm)
table.cell(0, i).text = content
drow = 1
for i, column in enumerate(data):
for row in range(data.shape[0]):
table.cell(row + drow, i).text = str(data[column][data.index[row]])
def add_image_from_figure(doc, fig):
with tempfile.TemporaryDirectory() as tmp:
path = os.path.join(tmp, "tmp_figure.png")
fig.savefig(path)
doc.add_picture(path, width=effective_width(doc.sections[0]))
doc.paragraphs[-1].alignment = WD_ALIGN_PARAGRAPH.CENTER
def effective_width(section):
return section.page_width - section.bottom_margin - section.left_margin
class HTMLDocxParser:
pass
class DocxExporter(Exporter):
def __init__(self, config={}):
self.doc = Document()
style = self.doc.styles["Normal"]
font = style.font
font.name = "Roboto"
font.size = Pt(10)
font.color.rgb = RGBColor(0x00, 0x00, 0x00)
for i, size in zip(range(1, 6), [20, 18, 16, 12, 10]):
heading_1_style = self.doc.styles[f"Heading {i}"]
heading_1_style.element.xpath("w:rPr/w:rFonts")[0].attrib.clear()
font = heading_1_style.font
font.name = "Roboto"
font.size = Pt(size)
font.color.rgb = RGBColor(0x00, 0x00, 0x00)
self.config = config
@property
def header(self):
return self.doc.sections[0].header
@property
def footer(self):
return self.doc.sections[0].footer
def export(self, notebook_path: Path, output_path: Path):
with open(notebook_path, "r") as file:
content = file.read()
notebook = nbformat.reads(content, as_version=4)
for node in notebook.cells:
if node["cell_type"] == "code":
self.add_output(node["outputs"], node['metadata'])
if node["cell_type"] == "markdown":
self.add_content(node)
if not output_path.suffix == ".docx":
output_path = output_path / (notebook_path.stem + ".docx")
self.doc.save(str(output_path))
def _write_image(self, data):
fp = tempfile.TemporaryFile(suffix=".png")
data = base64.b64decode(data)
fp.write(data)
fp.seek(0)
return fp
@property
def effective_width(self):
return effective_width(self.doc.sections[0])
def add_image(self, data, where=None, width=None):
fp = self._write_image(data)
if where is None:
where = self.doc
if width is None:
width = self.effective_width
where.add_picture(fp, width=width)
def add_figure(self, data, caption:str, width=None):
table = self.doc.add_table(rows=2, cols=1)
table.alignment = WD_TABLE_ALIGNMENT.CENTER
table.rows[1].cells[0].text = str(caption)
table.rows[0].cells[0].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
table.rows[1].cells[0].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
image_paragraph = table.rows[0].cells[0].paragraphs[0].add_run()
if width is None:
width = table.rows[0].cells[0].width
self.add_image(
data,
where=image_paragraph,
width=width,
)
def process_html_figure(self, figure, table, col, width):
def process_figure(img):
img_src = img.get("src")
image_paragraph = table.rows[0].cells[col].paragraphs[0].add_run()
self.add_image(
img_src[len("data:image/png;base64,") :].encode(),
where=image_paragraph,
width=width,
)
imgs = figure.find_all("img")
if len(imgs) > 0:
process_figure(imgs[0])
else:
tables = figure.find_all("table")
if len(tables) > 0:
df = pd.read_html(str(tables[0]))[0]
table_from_pandas(table.rows[0].cells[col], df)
figcaption = figure.find_all("figcaption")[0]
table.rows[1].cells[col].text = str(figcaption.string)
table.rows[0].cells[col].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
table.rows[1].cells[col].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
def process_row_element(self, elem, table, col: int, nrows):
for div in elem.contents:
if div.name == "figure":
figure_width = (self.effective_width - (Inches(1.5))) / nrows
self.process_html_figure(div, table, col, width=figure_width)
def process_row(self, data):
table = self.doc.add_table(rows=2, cols=len(data.contents))
table.alignment = WD_TABLE_ALIGNMENT.CENTER
nrows = len(data.contents)
for i, element in enumerate(data.contents):
self.process_row_element(element, table, i, nrows)
def process_markdown(self, md):
elements = json.loads(mistletoe.markdown(md, ASTRenderer))
self.process_elements(elements["children"])
def process_html(self, html):
soup = BeautifulSoup(html, "html.parser")
row = soup.find_all("div", class_="row center")
if len(row) > 0:
self.process_row(row[0])
def add_output(self, cell_code, metadata:dict):
savona_metadata = metadata.get('savona', {})
for o in cell_code:
if o["output_type"] == STREM_OUTPUT_TYPE:
self.doc.add_paragraph(o["text"])
continue
data = o["data"]
data_type = list(data.keys())[0]
if data_type == TEXT_OUTPUT_TYPE:
self.doc.add_paragraph(data[data_type])
elif data_type == IMAGE_OUTPUT_TYPE:
if savona_metadata.get('output_type', '') == 'figure':
self.add_figure(data[data_type], savona_metadata.get('caption', ''))
else:
self.add_image(data[data_type])
self.doc.paragraphs[-1].alignment = WD_ALIGN_PARAGRAPH.CENTER
elif data_type == HTML_OUTPUT_TYPE:
self.process_html(data[data_type])
elif data_type == MARKDOWN_OUTPUT_TYPE:
self.process_markdown(data[data_type])
def flatten(self, elements):
content = []
for el in elements:
if el["type"] == RAWTEXT_TYPE:
content.append(el["content"])
elif el["type"] == PARAGRAPH_TYPE:
content.append(self.flatten(el["children"]))
return " ".join(content)
def add_list(self, node):
for el in node["children"]:
if el["type"] == LISTITME_TYPE:
self.doc.add_paragraph(
self.flatten(el["children"]), style="List Bullet"
)
def process_node(self, node):
if node["type"] == HEADING_TYPE:
level = node["level"]
self.doc.add_heading(self.flatten(node["children"]), level=node["level"])
elif node["type"] == PARAGRAPH_TYPE:
self.doc.add_paragraph(self.flatten(node["children"]))
elif node["type"] == LIST_TYPE:
self.add_list(node)
def process_elements(self, elements):
for el in elements:
self.process_node(el)
def add_content(self, node):
elements = json.loads(mistletoe.markdown(node["source"], ASTRenderer))
self.process_elements(elements["children"])
|
none
| 1
| 2.310264
| 2
|
|
learning-labs/PID Analysis/processAnalysis.py
|
natanascimento/cisco-devnet
| 0
|
6629383
|
<reponame>natanascimento/cisco-devnet<filename>learning-labs/PID Analysis/processAnalysis.py
#Analisando determinado processo do Windows
import psutil as ps
class checkProc:
def __init__(self):
self.process = ' '
self.pc = ' '
self.exist = ' '
super().__init__()
def checkProcess (self):
self.process = ("chrome")
self.pc = (self.process + '.exe')
qtd_process = []
for proc in ps.process_iter():
self.info = proc.as_dict(attrs=['name', 'status'])
#Setar lowercase para os processos
try:
self.info = {k.lower(): v.lower() for k, v in self.info.items()}
except:
pass
#Analisando processos
for akey in self.info.keys():
self.exist = self.info[akey]
if self.exist == self.pc:
self.true = 1
qtd_process.append(self.true)
else:
self.false = 0
#Atestando se o processo está rodando ou não
if len(qtd_process) != 0:
print ("O Processo", self.process, "está ativo")
if len(qtd_process) == 0:
print ("O Processo", self.process, "está desligado")
run = checkProc()
run.checkProcess()
|
Analysis/processAnalysis.py
#Analisando determinado processo do Windows
import psutil as ps
class checkProc:
def __init__(self):
self.process = ' '
self.pc = ' '
self.exist = ' '
super().__init__()
def checkProcess (self):
self.process = ("chrome")
self.pc = (self.process + '.exe')
qtd_process = []
for proc in ps.process_iter():
self.info = proc.as_dict(attrs=['name', 'status'])
#Setar lowercase para os processos
try:
self.info = {k.lower(): v.lower() for k, v in self.info.items()}
except:
pass
#Analisando processos
for akey in self.info.keys():
self.exist = self.info[akey]
if self.exist == self.pc:
self.true = 1
qtd_process.append(self.true)
else:
self.false = 0
#Atestando se o processo está rodando ou não
if len(qtd_process) != 0:
print ("O Processo", self.process, "está ativo")
if len(qtd_process) == 0:
print ("O Processo", self.process, "está desligado")
run = checkProc()
run.checkProcess()
|
pt
| 0.938798
|
#Analisando determinado processo do Windows #Setar lowercase para os processos #Analisando processos #Atestando se o processo está rodando ou não
| 2.88525
| 3
|
flypy/cache/keys.py
|
filmackay/flypy
| 0
|
6629384
|
# -*- coding: utf-8 -*-
"""
Bytecode and serialization for the purpose of defining a key to find cached
IR.
"""
from __future__ import print_function, division, absolute_import
import zlib
import types
#===------------------------------------------------------------------===
# Errors
#===------------------------------------------------------------------===
class IncompatibleConstantError(Exception):
pass
#===------------------------------------------------------------------===
# Blobbify
#===------------------------------------------------------------------===
def make_code_blob(py_func, argtypes):
"""
Create a code "blob" for the given Python function and flypy argument
types.
Return
------
A json string encoding the function and argument types structurally.
"""
try:
code = code_tuple(py_func)
except IncompatibleConstantError:
return None
result = str((code, tuple(map(qualify, argtypes))))
#result = zlib.compress(result)
return result
def code_tuple(func):
"""Build a tuple for the code object"""
attributes = ['argcount', 'code', 'filename', 'firstlineno', 'flags',
'freevars', 'lnotab', 'name', 'nlocals', 'stacksize']
attrs = [getattr(func.__code__, 'co_' + attrib) for attrib in attributes]
attrs.append([encode_constant(const) for const in func.__code__.co_consts])
attrs.append([encode_constant(const) for const in find_globals(func)])
return tuple(attrs)
def find_globals(func):
"""Load any globals references by the function"""
global_names = func.__code__.co_names
#return [func.__globals__[name] for name in global_names]
return global_names
#===------------------------------------------------------------------===
# Constants
#===------------------------------------------------------------------===
def compatible_const(const):
"""See whether we can blobify the constant"""
if isinstance(const, tuple):
return all(map(compatible_const, const))
return isinstance(const, (type(None), bool, int, float, str, complex))
def encode_constant(const):
"""Return a string-encodable representation for `const` that is compatible"""
# TODO: use 'conversion' and mutability flags to determine whether `const`
# can be serialized
if not compatible_const(const):
raise IncompatibleConstantError(const)
return {'type': type(const).__name__, 'value': const}
#===------------------------------------------------------------------===
# Types
#===------------------------------------------------------------------===
def qualify(ty):
"""Qualify the type"""
name = ".".join([ty.impl.__module__, ty.impl.__name__])
return str((name, ty))
|
# -*- coding: utf-8 -*-
"""
Bytecode and serialization for the purpose of defining a key to find cached
IR.
"""
from __future__ import print_function, division, absolute_import
import zlib
import types
#===------------------------------------------------------------------===
# Errors
#===------------------------------------------------------------------===
class IncompatibleConstantError(Exception):
pass
#===------------------------------------------------------------------===
# Blobbify
#===------------------------------------------------------------------===
def make_code_blob(py_func, argtypes):
"""
Create a code "blob" for the given Python function and flypy argument
types.
Return
------
A json string encoding the function and argument types structurally.
"""
try:
code = code_tuple(py_func)
except IncompatibleConstantError:
return None
result = str((code, tuple(map(qualify, argtypes))))
#result = zlib.compress(result)
return result
def code_tuple(func):
"""Build a tuple for the code object"""
attributes = ['argcount', 'code', 'filename', 'firstlineno', 'flags',
'freevars', 'lnotab', 'name', 'nlocals', 'stacksize']
attrs = [getattr(func.__code__, 'co_' + attrib) for attrib in attributes]
attrs.append([encode_constant(const) for const in func.__code__.co_consts])
attrs.append([encode_constant(const) for const in find_globals(func)])
return tuple(attrs)
def find_globals(func):
"""Load any globals references by the function"""
global_names = func.__code__.co_names
#return [func.__globals__[name] for name in global_names]
return global_names
#===------------------------------------------------------------------===
# Constants
#===------------------------------------------------------------------===
def compatible_const(const):
"""See whether we can blobify the constant"""
if isinstance(const, tuple):
return all(map(compatible_const, const))
return isinstance(const, (type(None), bool, int, float, str, complex))
def encode_constant(const):
"""Return a string-encodable representation for `const` that is compatible"""
# TODO: use 'conversion' and mutability flags to determine whether `const`
# can be serialized
if not compatible_const(const):
raise IncompatibleConstantError(const)
return {'type': type(const).__name__, 'value': const}
#===------------------------------------------------------------------===
# Types
#===------------------------------------------------------------------===
def qualify(ty):
"""Qualify the type"""
name = ".".join([ty.impl.__module__, ty.impl.__name__])
return str((name, ty))
|
en
| 0.353036
|
# -*- coding: utf-8 -*- Bytecode and serialization for the purpose of defining a key to find cached IR. #===------------------------------------------------------------------=== # Errors #===------------------------------------------------------------------=== #===------------------------------------------------------------------=== # Blobbify #===------------------------------------------------------------------=== Create a code "blob" for the given Python function and flypy argument types. Return ------ A json string encoding the function and argument types structurally. #result = zlib.compress(result) Build a tuple for the code object Load any globals references by the function #return [func.__globals__[name] for name in global_names] #===------------------------------------------------------------------=== # Constants #===------------------------------------------------------------------=== See whether we can blobify the constant Return a string-encodable representation for `const` that is compatible # TODO: use 'conversion' and mutability flags to determine whether `const` # can be serialized #===------------------------------------------------------------------=== # Types #===------------------------------------------------------------------=== Qualify the type
| 2.551617
| 3
|
libcst/codemod/visitors/_remove_imports.py
|
jschavesr/LibCST
| 880
|
6629385
|
<reponame>jschavesr/LibCST
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union
import libcst as cst
from libcst.codemod._context import CodemodContext
from libcst.codemod._visitor import ContextAwareTransformer, ContextAwareVisitor
from libcst.codemod.visitors._gather_unused_imports import GatherUnusedImportsVisitor
from libcst.helpers import get_absolute_module_for_import, get_full_name_for_node
from libcst.metadata import Assignment, ProviderT, ScopeProvider
class RemovedNodeVisitor(ContextAwareVisitor):
def _remove_imports_from_import_stmt(
self, local_name: str, import_node: cst.Import
) -> None:
for import_alias in import_node.names:
if import_alias.evaluated_alias is None:
prefix = import_alias.evaluated_name
else:
prefix = import_alias.evaluated_alias
if local_name == prefix or local_name.startswith(f"{prefix}."):
RemoveImportsVisitor.remove_unused_import(
self.context,
import_alias.evaluated_name,
asname=import_alias.evaluated_alias,
)
def _remove_imports_from_importfrom_stmt(
self, local_name: str, import_node: cst.ImportFrom
) -> None:
names = import_node.names
if isinstance(names, cst.ImportStar):
# We don't handle removing this, so ignore it.
return
module_name = get_absolute_module_for_import(
self.context.full_module_name, import_node
)
if module_name is None:
raise Exception("Cannot look up absolute module from relative import!")
# We know any local names will refer to this as an alias if
# there is one, and as the original name if there is not one
for import_alias in names:
if import_alias.evaluated_alias is None:
prefix = import_alias.evaluated_name
else:
prefix = import_alias.evaluated_alias
if local_name == prefix or local_name.startswith(f"{prefix}."):
RemoveImportsVisitor.remove_unused_import(
self.context,
module_name,
obj=import_alias.evaluated_name,
asname=import_alias.evaluated_alias,
)
def _visit_name_attr_alike(self, node: Union[cst.Name, cst.Attribute]) -> None:
# Look up the local name of this node.
local_name = get_full_name_for_node(node)
if local_name is None:
return
# Look up the scope for this node, remove the import that caused it to exist.
metadata_wrapper = self.context.wrapper
if metadata_wrapper is None:
raise Exception("Cannot look up import, metadata is not computed for node!")
scope_provider = metadata_wrapper.resolve(ScopeProvider)
try:
scope = scope_provider[node]
if scope is None:
# This object has no scope, so we can't remove it.
return
except KeyError:
# This object has no scope, so we can't remove it.
return
while True:
for assignment in scope.assignments[node] or set():
# We only care about non-builtins.
if isinstance(assignment, Assignment):
import_node = assignment.node
if isinstance(import_node, cst.Import):
self._remove_imports_from_import_stmt(local_name, import_node)
elif isinstance(import_node, cst.ImportFrom):
self._remove_imports_from_importfrom_stmt(
local_name, import_node
)
if scope is scope.parent:
break
scope = scope.parent
def visit_Name(self, node: cst.Name) -> None:
self._visit_name_attr_alike(node)
def visit_Attribute(self, node: cst.Attribute) -> None:
self._visit_name_attr_alike(node)
class RemoveImportsVisitor(ContextAwareTransformer):
"""
Attempt to remove given imports from a module, dependent on whether there are
any uses of the imported objects. Given a :class:`~libcst.codemod.CodemodContext`
and a sequence of tuples specifying a module to remove as a string. Optionally
an object being imported from that module and optionally an alias assigned to
that imported object, ensures that that import no longer exists as long as there
are no remaining references.
Note that static analysis is able to determine safely whether an import is still
needed given a particular module, but it is currently unable to determine whether
an imported object is re-exported and used inside another module unless that
object appears in an ``__any__`` list.
This is one of the transforms that is available automatically to you when running
a codemod. To use it in this manner, import
:class:`~libcst.codemod.visitors.RemoveImportsVisitor` and then call the static
:meth:`~libcst.codemod.visitors.RemoveImportsVisitor.remove_unused_import` method,
giving it the current context (found as ``self.context`` for all subclasses of
:class:`~libcst.codemod.Codemod`), the module you wish to remove and
optionally an object you wish to stop importing as well as an alias that the
object is currently assigned to.
For example::
RemoveImportsVisitor.remove_unused_import(self.context, "typing", "Optional")
This will remove any ``from typing import Optional`` that exists in the module
as long as there are no uses of ``Optional`` in that module.
As another example::
RemoveImportsVisitor.remove_unused_import(self.context, "typing")
This will remove any ``import typing`` that exists in the module, as long as
there are no references to ``typing`` in that module, including references
such as ``typing.Optional``.
Additionally, :class:`~libcst.codemod.visitors.RemoveImportsVisitor` includes
a convenience function
:meth:`~libcst.codemod.visitors.RemoveImportsVisitor.remove_unused_import_by_node`
which will attempt to schedule removal of all imports referenced in that node
and its children. This is especially useful inside transforms when you are going
to remove a node using :func:`~libcst.RemoveFromParent` to get rid of a node.
For example::
def leave_AnnAssign(
self, original_node: cst.AnnAssign, updated_node: cst.AnnAssign,
) -> cst.RemovalSentinel:
# Remove all annotated assignment statements, clean up imports.
RemoveImportsVisitor.remove_unused_import_by_node(self.context, original_node)
return cst.RemovalFromParent()
This will remove all annotated assignment statements from a module as well
as clean up any imports that were only referenced in those assignments. Note
that we pass the ``original_node`` to the helper function as it uses scope analysis
under the hood which is only computed on the original tree.
Note that this is a subclass of :class:`~libcst.CSTTransformer` so it is
possible to instantiate it and pass it to a :class:`~libcst.Module`
:meth:`~libcst.CSTNode.visit` method. However, it is far easier to use
the automatic transform feature of :class:`~libcst.codemod.CodemodCommand`
and schedule an import to be added by calling
:meth:`~libcst.codemod.visitors.RemoveImportsVisitor.remove_unused_import`
"""
CONTEXT_KEY = "RemoveImportsVisitor"
METADATA_DEPENDENCIES: Tuple[ProviderT] = (
*GatherUnusedImportsVisitor.METADATA_DEPENDENCIES,
)
@staticmethod
def _get_imports_from_context(
context: CodemodContext,
) -> List[Tuple[str, Optional[str], Optional[str]]]:
unused_imports = context.scratch.get(RemoveImportsVisitor.CONTEXT_KEY, [])
if not isinstance(unused_imports, list):
raise Exception("Logic error!")
return unused_imports
@staticmethod
def remove_unused_import(
context: CodemodContext,
module: str,
obj: Optional[str] = None,
asname: Optional[str] = None,
) -> None:
"""
Schedule an import to be removed in a future invocation of this class by
updating the ``context`` to include the ``module`` and optionally ``obj``
which is currently imported as well as optionally ``alias`` that the
imported ``module`` or ``obj`` is aliased to. When subclassing from
:class:`~libcst.codemod.CodemodCommand`, this will be performed for you
after your transform finishes executing. If you are subclassing from a
:class:`~libcst.codemod.Codemod` instead, you will need to call the
:meth:`~libcst.codemod.Codemod.transform_module` method on the module
under modification with an instance of this class after performing your
transform. Note that if the particular ``module`` or ``obj`` you are
requesting to remove is still in use somewhere in the current module
at the time of executing :meth:`~libcst.codemod.Codemod.transform_module`
on an instance of :class:`~libcst.codemod.visitors.AddImportsVisitor`,
this will perform no action in order to avoid removing an in-use import.
"""
unused_imports = RemoveImportsVisitor._get_imports_from_context(context)
unused_imports.append((module, obj, asname))
context.scratch[RemoveImportsVisitor.CONTEXT_KEY] = unused_imports
@staticmethod
def remove_unused_import_by_node(
context: CodemodContext, node: cst.CSTNode
) -> None:
"""
Schedule any imports referenced by ``node`` or one of its children
to be removed in a future invocation of this class by updating the
``context`` to include the ``module``, ``obj`` and ``alias`` for each
import in question. When subclassing from
:class:`~libcst.codemod.CodemodCommand`, this will be performed for you
after your transform finishes executing. If you are subclassing from a
:class:`~libcst.codemod.Codemod` instead, you will need to call the
:meth:`~libcst.codemod.Codemod.transform_module` method on the module
under modification with an instance of this class after performing your
transform. Note that all imports that are referenced by this ``node``
or its children will only be removed if they are not in use at the time
of exeucting :meth:`~libcst.codemod.Codemod.transform_module`
on an instance of :class:`~libcst.codemod.visitors.AddImportsVisitor`
in order to avoid removing an in-use import.
"""
# Special case both Import and ImportFrom so they can be
# directly removed here.
if isinstance(node, cst.Import):
for import_alias in node.names:
RemoveImportsVisitor.remove_unused_import(
context,
import_alias.evaluated_name,
asname=import_alias.evaluated_alias,
)
elif isinstance(node, cst.ImportFrom):
names = node.names
if isinstance(names, cst.ImportStar):
# We don't handle removing this, so ignore it.
return
module_name = get_absolute_module_for_import(context.full_module_name, node)
if module_name is None:
raise Exception("Cannot look up absolute module from relative import!")
for import_alias in names:
RemoveImportsVisitor.remove_unused_import(
context,
module_name,
obj=import_alias.evaluated_name,
asname=import_alias.evaluated_alias,
)
else:
# Look up all children that could have been imported. Any that
# we find will be scheduled for removal.
node.visit(RemovedNodeVisitor(context))
def __init__(
self,
context: CodemodContext,
unused_imports: Sequence[Tuple[str, Optional[str], Optional[str]]] = (),
) -> None:
# Allow for instantiation from either a context (used when multiple transforms
# get chained) or from a direct instantiation.
super().__init__(context)
all_unused_imports: List[Tuple[str, Optional[str], Optional[str]]] = [
*RemoveImportsVisitor._get_imports_from_context(context),
*unused_imports,
]
self.unused_module_imports: Dict[str, Optional[str]] = {
module: alias for module, obj, alias in all_unused_imports if obj is None
}
self.unused_obj_imports: Dict[str, Set[Tuple[str, Optional[str]]]] = {}
for module, obj, alias in all_unused_imports:
if obj is None:
continue
if module not in self.unused_obj_imports:
self.unused_obj_imports[module] = set()
self.unused_obj_imports[module].add((obj, alias))
self._unused_imports: Dict[
cst.ImportAlias, Union[cst.Import, cst.ImportFrom]
] = {}
def visit_Module(self, node: cst.Module) -> None:
visitor = GatherUnusedImportsVisitor(self.context)
node.visit(visitor)
self._unused_imports = {k: v for (k, v) in visitor.unused_imports}
def leave_Import(
self, original_node: cst.Import, updated_node: cst.Import
) -> Union[cst.Import, cst.RemovalSentinel]:
names_to_keep = []
for import_alias in original_node.names:
if import_alias.evaluated_name not in self.unused_module_imports:
# This is a keeper since we aren't removing it
names_to_keep.append(import_alias)
continue
if (
import_alias.evaluated_alias
!= self.unused_module_imports[import_alias.evaluated_name]
):
# This is a keeper since the alias does not match
# what we are looking for.
names_to_keep.append(import_alias)
continue
# Now that we know we want to remove this module, figure out if
# there are any live references to it.
if import_alias not in self._unused_imports:
names_to_keep.append(import_alias)
continue
# no changes
if names_to_keep == original_node.names:
return updated_node
# Now, either remove this statement or remove the imports we are
# deleting from this statement.
if len(names_to_keep) == 0:
return cst.RemoveFromParent()
if names_to_keep[-1] != original_node.names[-1]:
# Remove trailing comma in order to not mess up import statements.
names_to_keep = [
*names_to_keep[:-1],
names_to_keep[-1].with_changes(comma=cst.MaybeSentinel.DEFAULT),
]
return updated_node.with_changes(names=names_to_keep)
def _process_importfrom_aliases(
self,
updated_node: cst.ImportFrom,
names: Iterable[cst.ImportAlias],
module_name: str,
) -> Dict[str, Any]:
updates = {}
names_to_keep = []
objects_to_remove = self.unused_obj_imports[module_name]
for import_alias in names:
# Figure out if it is in our list of things to kill
for name, alias in objects_to_remove:
if (
name == import_alias.evaluated_name
and alias == import_alias.evaluated_alias
):
break
else:
# This is a keeper, we don't have it on our list.
names_to_keep.append(import_alias)
continue
# Now that we know we want to remove this object, figure out if
# there are any live references to it.
if import_alias not in self._unused_imports:
names_to_keep.append(import_alias)
continue
# We are about to remove `import_alias`. Check if there are any
# trailing comments and reparent them to the previous import.
# We only do this in case there's a trailing comma, otherwise the
# entire import statement is going to be removed anyway.
comma = import_alias.comma
if isinstance(comma, cst.Comma):
if len(names_to_keep) != 0:
# there is a previous import alias
prev = names_to_keep[-1]
if isinstance(prev.comma, cst.Comma):
prev = prev.with_deep_changes(
prev.comma,
whitespace_after=_merge_whitespace_after(
prev.comma.whitespace_after,
comma.whitespace_after,
),
)
else:
# The previous alias didn't have a trailing comma. This can
# occur if the alias was generated, instead of being parsed
# from source.
prev = prev.with_changes(comma=comma)
names_to_keep[-1] = prev
else:
# No previous import alias, need to attach comment to `ImportFrom`.
# We can only do this if there was a leftparen on the import
# statement. Otherwise there can't be any standalone comments
# anyway, so it's fine to skip this logic.
lpar = updated_node.lpar
if isinstance(lpar, cst.LeftParen):
updates["lpar"] = lpar.with_changes(
whitespace_after=_merge_whitespace_after(
lpar.whitespace_after,
comma.whitespace_after,
)
)
updates["names"] = names_to_keep
return updates
def leave_ImportFrom(
self, original_node: cst.ImportFrom, updated_node: cst.ImportFrom
) -> Union[cst.ImportFrom, cst.RemovalSentinel]:
names = original_node.names
if isinstance(names, cst.ImportStar):
# This is a star import, so we won't remove it.
return updated_node
# Make sure we actually know the absolute module.
module_name = get_absolute_module_for_import(
self.context.full_module_name, updated_node
)
if module_name is None or module_name not in self.unused_obj_imports:
# This node isn't on our list of todos, so let's bail.
return updated_node
updates = self._process_importfrom_aliases(updated_node, names, module_name)
names_to_keep = updates["names"]
# no changes
if names_to_keep == names:
return updated_node
# Now, either remove this statement or remove the imports we are
# deleting from this statement.
if len(names_to_keep) == 0:
return cst.RemoveFromParent()
if names_to_keep[-1] != names[-1]:
# Remove trailing comma in order to not mess up import statements.
names_to_keep = [
*names_to_keep[:-1],
names_to_keep[-1].with_changes(comma=cst.MaybeSentinel.DEFAULT),
]
updates["names"] = names_to_keep
return updated_node.with_changes(**updates)
def _merge_whitespace_after(
left: cst.BaseParenthesizableWhitespace, right: cst.BaseParenthesizableWhitespace
) -> cst.BaseParenthesizableWhitespace:
if not isinstance(right, cst.ParenthesizedWhitespace):
return left
if not isinstance(left, cst.ParenthesizedWhitespace):
return right
return left.with_changes(
empty_lines=tuple(
line for line in right.empty_lines if line.comment is not None
),
)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union
import libcst as cst
from libcst.codemod._context import CodemodContext
from libcst.codemod._visitor import ContextAwareTransformer, ContextAwareVisitor
from libcst.codemod.visitors._gather_unused_imports import GatherUnusedImportsVisitor
from libcst.helpers import get_absolute_module_for_import, get_full_name_for_node
from libcst.metadata import Assignment, ProviderT, ScopeProvider
class RemovedNodeVisitor(ContextAwareVisitor):
def _remove_imports_from_import_stmt(
self, local_name: str, import_node: cst.Import
) -> None:
for import_alias in import_node.names:
if import_alias.evaluated_alias is None:
prefix = import_alias.evaluated_name
else:
prefix = import_alias.evaluated_alias
if local_name == prefix or local_name.startswith(f"{prefix}."):
RemoveImportsVisitor.remove_unused_import(
self.context,
import_alias.evaluated_name,
asname=import_alias.evaluated_alias,
)
def _remove_imports_from_importfrom_stmt(
self, local_name: str, import_node: cst.ImportFrom
) -> None:
names = import_node.names
if isinstance(names, cst.ImportStar):
# We don't handle removing this, so ignore it.
return
module_name = get_absolute_module_for_import(
self.context.full_module_name, import_node
)
if module_name is None:
raise Exception("Cannot look up absolute module from relative import!")
# We know any local names will refer to this as an alias if
# there is one, and as the original name if there is not one
for import_alias in names:
if import_alias.evaluated_alias is None:
prefix = import_alias.evaluated_name
else:
prefix = import_alias.evaluated_alias
if local_name == prefix or local_name.startswith(f"{prefix}."):
RemoveImportsVisitor.remove_unused_import(
self.context,
module_name,
obj=import_alias.evaluated_name,
asname=import_alias.evaluated_alias,
)
def _visit_name_attr_alike(self, node: Union[cst.Name, cst.Attribute]) -> None:
# Look up the local name of this node.
local_name = get_full_name_for_node(node)
if local_name is None:
return
# Look up the scope for this node, remove the import that caused it to exist.
metadata_wrapper = self.context.wrapper
if metadata_wrapper is None:
raise Exception("Cannot look up import, metadata is not computed for node!")
scope_provider = metadata_wrapper.resolve(ScopeProvider)
try:
scope = scope_provider[node]
if scope is None:
# This object has no scope, so we can't remove it.
return
except KeyError:
# This object has no scope, so we can't remove it.
return
while True:
for assignment in scope.assignments[node] or set():
# We only care about non-builtins.
if isinstance(assignment, Assignment):
import_node = assignment.node
if isinstance(import_node, cst.Import):
self._remove_imports_from_import_stmt(local_name, import_node)
elif isinstance(import_node, cst.ImportFrom):
self._remove_imports_from_importfrom_stmt(
local_name, import_node
)
if scope is scope.parent:
break
scope = scope.parent
def visit_Name(self, node: cst.Name) -> None:
self._visit_name_attr_alike(node)
def visit_Attribute(self, node: cst.Attribute) -> None:
self._visit_name_attr_alike(node)
class RemoveImportsVisitor(ContextAwareTransformer):
"""
Attempt to remove given imports from a module, dependent on whether there are
any uses of the imported objects. Given a :class:`~libcst.codemod.CodemodContext`
and a sequence of tuples specifying a module to remove as a string. Optionally
an object being imported from that module and optionally an alias assigned to
that imported object, ensures that that import no longer exists as long as there
are no remaining references.
Note that static analysis is able to determine safely whether an import is still
needed given a particular module, but it is currently unable to determine whether
an imported object is re-exported and used inside another module unless that
object appears in an ``__any__`` list.
This is one of the transforms that is available automatically to you when running
a codemod. To use it in this manner, import
:class:`~libcst.codemod.visitors.RemoveImportsVisitor` and then call the static
:meth:`~libcst.codemod.visitors.RemoveImportsVisitor.remove_unused_import` method,
giving it the current context (found as ``self.context`` for all subclasses of
:class:`~libcst.codemod.Codemod`), the module you wish to remove and
optionally an object you wish to stop importing as well as an alias that the
object is currently assigned to.
For example::
RemoveImportsVisitor.remove_unused_import(self.context, "typing", "Optional")
This will remove any ``from typing import Optional`` that exists in the module
as long as there are no uses of ``Optional`` in that module.
As another example::
RemoveImportsVisitor.remove_unused_import(self.context, "typing")
This will remove any ``import typing`` that exists in the module, as long as
there are no references to ``typing`` in that module, including references
such as ``typing.Optional``.
Additionally, :class:`~libcst.codemod.visitors.RemoveImportsVisitor` includes
a convenience function
:meth:`~libcst.codemod.visitors.RemoveImportsVisitor.remove_unused_import_by_node`
which will attempt to schedule removal of all imports referenced in that node
and its children. This is especially useful inside transforms when you are going
to remove a node using :func:`~libcst.RemoveFromParent` to get rid of a node.
For example::
def leave_AnnAssign(
self, original_node: cst.AnnAssign, updated_node: cst.AnnAssign,
) -> cst.RemovalSentinel:
# Remove all annotated assignment statements, clean up imports.
RemoveImportsVisitor.remove_unused_import_by_node(self.context, original_node)
return cst.RemovalFromParent()
This will remove all annotated assignment statements from a module as well
as clean up any imports that were only referenced in those assignments. Note
that we pass the ``original_node`` to the helper function as it uses scope analysis
under the hood which is only computed on the original tree.
Note that this is a subclass of :class:`~libcst.CSTTransformer` so it is
possible to instantiate it and pass it to a :class:`~libcst.Module`
:meth:`~libcst.CSTNode.visit` method. However, it is far easier to use
the automatic transform feature of :class:`~libcst.codemod.CodemodCommand`
and schedule an import to be added by calling
:meth:`~libcst.codemod.visitors.RemoveImportsVisitor.remove_unused_import`
"""
CONTEXT_KEY = "RemoveImportsVisitor"
METADATA_DEPENDENCIES: Tuple[ProviderT] = (
*GatherUnusedImportsVisitor.METADATA_DEPENDENCIES,
)
@staticmethod
def _get_imports_from_context(
context: CodemodContext,
) -> List[Tuple[str, Optional[str], Optional[str]]]:
unused_imports = context.scratch.get(RemoveImportsVisitor.CONTEXT_KEY, [])
if not isinstance(unused_imports, list):
raise Exception("Logic error!")
return unused_imports
@staticmethod
def remove_unused_import(
context: CodemodContext,
module: str,
obj: Optional[str] = None,
asname: Optional[str] = None,
) -> None:
"""
Schedule an import to be removed in a future invocation of this class by
updating the ``context`` to include the ``module`` and optionally ``obj``
which is currently imported as well as optionally ``alias`` that the
imported ``module`` or ``obj`` is aliased to. When subclassing from
:class:`~libcst.codemod.CodemodCommand`, this will be performed for you
after your transform finishes executing. If you are subclassing from a
:class:`~libcst.codemod.Codemod` instead, you will need to call the
:meth:`~libcst.codemod.Codemod.transform_module` method on the module
under modification with an instance of this class after performing your
transform. Note that if the particular ``module`` or ``obj`` you are
requesting to remove is still in use somewhere in the current module
at the time of executing :meth:`~libcst.codemod.Codemod.transform_module`
on an instance of :class:`~libcst.codemod.visitors.AddImportsVisitor`,
this will perform no action in order to avoid removing an in-use import.
"""
unused_imports = RemoveImportsVisitor._get_imports_from_context(context)
unused_imports.append((module, obj, asname))
context.scratch[RemoveImportsVisitor.CONTEXT_KEY] = unused_imports
@staticmethod
def remove_unused_import_by_node(
context: CodemodContext, node: cst.CSTNode
) -> None:
"""
Schedule any imports referenced by ``node`` or one of its children
to be removed in a future invocation of this class by updating the
``context`` to include the ``module``, ``obj`` and ``alias`` for each
import in question. When subclassing from
:class:`~libcst.codemod.CodemodCommand`, this will be performed for you
after your transform finishes executing. If you are subclassing from a
:class:`~libcst.codemod.Codemod` instead, you will need to call the
:meth:`~libcst.codemod.Codemod.transform_module` method on the module
under modification with an instance of this class after performing your
transform. Note that all imports that are referenced by this ``node``
or its children will only be removed if they are not in use at the time
of exeucting :meth:`~libcst.codemod.Codemod.transform_module`
on an instance of :class:`~libcst.codemod.visitors.AddImportsVisitor`
in order to avoid removing an in-use import.
"""
# Special case both Import and ImportFrom so they can be
# directly removed here.
if isinstance(node, cst.Import):
for import_alias in node.names:
RemoveImportsVisitor.remove_unused_import(
context,
import_alias.evaluated_name,
asname=import_alias.evaluated_alias,
)
elif isinstance(node, cst.ImportFrom):
names = node.names
if isinstance(names, cst.ImportStar):
# We don't handle removing this, so ignore it.
return
module_name = get_absolute_module_for_import(context.full_module_name, node)
if module_name is None:
raise Exception("Cannot look up absolute module from relative import!")
for import_alias in names:
RemoveImportsVisitor.remove_unused_import(
context,
module_name,
obj=import_alias.evaluated_name,
asname=import_alias.evaluated_alias,
)
else:
# Look up all children that could have been imported. Any that
# we find will be scheduled for removal.
node.visit(RemovedNodeVisitor(context))
def __init__(
self,
context: CodemodContext,
unused_imports: Sequence[Tuple[str, Optional[str], Optional[str]]] = (),
) -> None:
# Allow for instantiation from either a context (used when multiple transforms
# get chained) or from a direct instantiation.
super().__init__(context)
all_unused_imports: List[Tuple[str, Optional[str], Optional[str]]] = [
*RemoveImportsVisitor._get_imports_from_context(context),
*unused_imports,
]
self.unused_module_imports: Dict[str, Optional[str]] = {
module: alias for module, obj, alias in all_unused_imports if obj is None
}
self.unused_obj_imports: Dict[str, Set[Tuple[str, Optional[str]]]] = {}
for module, obj, alias in all_unused_imports:
if obj is None:
continue
if module not in self.unused_obj_imports:
self.unused_obj_imports[module] = set()
self.unused_obj_imports[module].add((obj, alias))
self._unused_imports: Dict[
cst.ImportAlias, Union[cst.Import, cst.ImportFrom]
] = {}
def visit_Module(self, node: cst.Module) -> None:
visitor = GatherUnusedImportsVisitor(self.context)
node.visit(visitor)
self._unused_imports = {k: v for (k, v) in visitor.unused_imports}
def leave_Import(
self, original_node: cst.Import, updated_node: cst.Import
) -> Union[cst.Import, cst.RemovalSentinel]:
names_to_keep = []
for import_alias in original_node.names:
if import_alias.evaluated_name not in self.unused_module_imports:
# This is a keeper since we aren't removing it
names_to_keep.append(import_alias)
continue
if (
import_alias.evaluated_alias
!= self.unused_module_imports[import_alias.evaluated_name]
):
# This is a keeper since the alias does not match
# what we are looking for.
names_to_keep.append(import_alias)
continue
# Now that we know we want to remove this module, figure out if
# there are any live references to it.
if import_alias not in self._unused_imports:
names_to_keep.append(import_alias)
continue
# no changes
if names_to_keep == original_node.names:
return updated_node
# Now, either remove this statement or remove the imports we are
# deleting from this statement.
if len(names_to_keep) == 0:
return cst.RemoveFromParent()
if names_to_keep[-1] != original_node.names[-1]:
# Remove trailing comma in order to not mess up import statements.
names_to_keep = [
*names_to_keep[:-1],
names_to_keep[-1].with_changes(comma=cst.MaybeSentinel.DEFAULT),
]
return updated_node.with_changes(names=names_to_keep)
def _process_importfrom_aliases(
self,
updated_node: cst.ImportFrom,
names: Iterable[cst.ImportAlias],
module_name: str,
) -> Dict[str, Any]:
updates = {}
names_to_keep = []
objects_to_remove = self.unused_obj_imports[module_name]
for import_alias in names:
# Figure out if it is in our list of things to kill
for name, alias in objects_to_remove:
if (
name == import_alias.evaluated_name
and alias == import_alias.evaluated_alias
):
break
else:
# This is a keeper, we don't have it on our list.
names_to_keep.append(import_alias)
continue
# Now that we know we want to remove this object, figure out if
# there are any live references to it.
if import_alias not in self._unused_imports:
names_to_keep.append(import_alias)
continue
# We are about to remove `import_alias`. Check if there are any
# trailing comments and reparent them to the previous import.
# We only do this in case there's a trailing comma, otherwise the
# entire import statement is going to be removed anyway.
comma = import_alias.comma
if isinstance(comma, cst.Comma):
if len(names_to_keep) != 0:
# there is a previous import alias
prev = names_to_keep[-1]
if isinstance(prev.comma, cst.Comma):
prev = prev.with_deep_changes(
prev.comma,
whitespace_after=_merge_whitespace_after(
prev.comma.whitespace_after,
comma.whitespace_after,
),
)
else:
# The previous alias didn't have a trailing comma. This can
# occur if the alias was generated, instead of being parsed
# from source.
prev = prev.with_changes(comma=comma)
names_to_keep[-1] = prev
else:
# No previous import alias, need to attach comment to `ImportFrom`.
# We can only do this if there was a leftparen on the import
# statement. Otherwise there can't be any standalone comments
# anyway, so it's fine to skip this logic.
lpar = updated_node.lpar
if isinstance(lpar, cst.LeftParen):
updates["lpar"] = lpar.with_changes(
whitespace_after=_merge_whitespace_after(
lpar.whitespace_after,
comma.whitespace_after,
)
)
updates["names"] = names_to_keep
return updates
def leave_ImportFrom(
self, original_node: cst.ImportFrom, updated_node: cst.ImportFrom
) -> Union[cst.ImportFrom, cst.RemovalSentinel]:
names = original_node.names
if isinstance(names, cst.ImportStar):
# This is a star import, so we won't remove it.
return updated_node
# Make sure we actually know the absolute module.
module_name = get_absolute_module_for_import(
self.context.full_module_name, updated_node
)
if module_name is None or module_name not in self.unused_obj_imports:
# This node isn't on our list of todos, so let's bail.
return updated_node
updates = self._process_importfrom_aliases(updated_node, names, module_name)
names_to_keep = updates["names"]
# no changes
if names_to_keep == names:
return updated_node
# Now, either remove this statement or remove the imports we are
# deleting from this statement.
if len(names_to_keep) == 0:
return cst.RemoveFromParent()
if names_to_keep[-1] != names[-1]:
# Remove trailing comma in order to not mess up import statements.
names_to_keep = [
*names_to_keep[:-1],
names_to_keep[-1].with_changes(comma=cst.MaybeSentinel.DEFAULT),
]
updates["names"] = names_to_keep
return updated_node.with_changes(**updates)
def _merge_whitespace_after(
left: cst.BaseParenthesizableWhitespace, right: cst.BaseParenthesizableWhitespace
) -> cst.BaseParenthesizableWhitespace:
if not isinstance(right, cst.ParenthesizedWhitespace):
return left
if not isinstance(left, cst.ParenthesizedWhitespace):
return right
return left.with_changes(
empty_lines=tuple(
line for line in right.empty_lines if line.comment is not None
),
)
|
en
| 0.863159
|
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # # We don't handle removing this, so ignore it. # We know any local names will refer to this as an alias if # there is one, and as the original name if there is not one # Look up the local name of this node. # Look up the scope for this node, remove the import that caused it to exist. # This object has no scope, so we can't remove it. # This object has no scope, so we can't remove it. # We only care about non-builtins. Attempt to remove given imports from a module, dependent on whether there are any uses of the imported objects. Given a :class:`~libcst.codemod.CodemodContext` and a sequence of tuples specifying a module to remove as a string. Optionally an object being imported from that module and optionally an alias assigned to that imported object, ensures that that import no longer exists as long as there are no remaining references. Note that static analysis is able to determine safely whether an import is still needed given a particular module, but it is currently unable to determine whether an imported object is re-exported and used inside another module unless that object appears in an ``__any__`` list. This is one of the transforms that is available automatically to you when running a codemod. To use it in this manner, import :class:`~libcst.codemod.visitors.RemoveImportsVisitor` and then call the static :meth:`~libcst.codemod.visitors.RemoveImportsVisitor.remove_unused_import` method, giving it the current context (found as ``self.context`` for all subclasses of :class:`~libcst.codemod.Codemod`), the module you wish to remove and optionally an object you wish to stop importing as well as an alias that the object is currently assigned to. For example:: RemoveImportsVisitor.remove_unused_import(self.context, "typing", "Optional") This will remove any ``from typing import Optional`` that exists in the module as long as there are no uses of ``Optional`` in that module. As another example:: RemoveImportsVisitor.remove_unused_import(self.context, "typing") This will remove any ``import typing`` that exists in the module, as long as there are no references to ``typing`` in that module, including references such as ``typing.Optional``. Additionally, :class:`~libcst.codemod.visitors.RemoveImportsVisitor` includes a convenience function :meth:`~libcst.codemod.visitors.RemoveImportsVisitor.remove_unused_import_by_node` which will attempt to schedule removal of all imports referenced in that node and its children. This is especially useful inside transforms when you are going to remove a node using :func:`~libcst.RemoveFromParent` to get rid of a node. For example:: def leave_AnnAssign( self, original_node: cst.AnnAssign, updated_node: cst.AnnAssign, ) -> cst.RemovalSentinel: # Remove all annotated assignment statements, clean up imports. RemoveImportsVisitor.remove_unused_import_by_node(self.context, original_node) return cst.RemovalFromParent() This will remove all annotated assignment statements from a module as well as clean up any imports that were only referenced in those assignments. Note that we pass the ``original_node`` to the helper function as it uses scope analysis under the hood which is only computed on the original tree. Note that this is a subclass of :class:`~libcst.CSTTransformer` so it is possible to instantiate it and pass it to a :class:`~libcst.Module` :meth:`~libcst.CSTNode.visit` method. However, it is far easier to use the automatic transform feature of :class:`~libcst.codemod.CodemodCommand` and schedule an import to be added by calling :meth:`~libcst.codemod.visitors.RemoveImportsVisitor.remove_unused_import` Schedule an import to be removed in a future invocation of this class by updating the ``context`` to include the ``module`` and optionally ``obj`` which is currently imported as well as optionally ``alias`` that the imported ``module`` or ``obj`` is aliased to. When subclassing from :class:`~libcst.codemod.CodemodCommand`, this will be performed for you after your transform finishes executing. If you are subclassing from a :class:`~libcst.codemod.Codemod` instead, you will need to call the :meth:`~libcst.codemod.Codemod.transform_module` method on the module under modification with an instance of this class after performing your transform. Note that if the particular ``module`` or ``obj`` you are requesting to remove is still in use somewhere in the current module at the time of executing :meth:`~libcst.codemod.Codemod.transform_module` on an instance of :class:`~libcst.codemod.visitors.AddImportsVisitor`, this will perform no action in order to avoid removing an in-use import. Schedule any imports referenced by ``node`` or one of its children to be removed in a future invocation of this class by updating the ``context`` to include the ``module``, ``obj`` and ``alias`` for each import in question. When subclassing from :class:`~libcst.codemod.CodemodCommand`, this will be performed for you after your transform finishes executing. If you are subclassing from a :class:`~libcst.codemod.Codemod` instead, you will need to call the :meth:`~libcst.codemod.Codemod.transform_module` method on the module under modification with an instance of this class after performing your transform. Note that all imports that are referenced by this ``node`` or its children will only be removed if they are not in use at the time of exeucting :meth:`~libcst.codemod.Codemod.transform_module` on an instance of :class:`~libcst.codemod.visitors.AddImportsVisitor` in order to avoid removing an in-use import. # Special case both Import and ImportFrom so they can be # directly removed here. # We don't handle removing this, so ignore it. # Look up all children that could have been imported. Any that # we find will be scheduled for removal. # Allow for instantiation from either a context (used when multiple transforms # get chained) or from a direct instantiation. # This is a keeper since we aren't removing it # This is a keeper since the alias does not match # what we are looking for. # Now that we know we want to remove this module, figure out if # there are any live references to it. # no changes # Now, either remove this statement or remove the imports we are # deleting from this statement. # Remove trailing comma in order to not mess up import statements. # Figure out if it is in our list of things to kill # This is a keeper, we don't have it on our list. # Now that we know we want to remove this object, figure out if # there are any live references to it. # We are about to remove `import_alias`. Check if there are any # trailing comments and reparent them to the previous import. # We only do this in case there's a trailing comma, otherwise the # entire import statement is going to be removed anyway. # there is a previous import alias # The previous alias didn't have a trailing comma. This can # occur if the alias was generated, instead of being parsed # from source. # No previous import alias, need to attach comment to `ImportFrom`. # We can only do this if there was a leftparen on the import # statement. Otherwise there can't be any standalone comments # anyway, so it's fine to skip this logic. # This is a star import, so we won't remove it. # Make sure we actually know the absolute module. # This node isn't on our list of todos, so let's bail. # no changes # Now, either remove this statement or remove the imports we are # deleting from this statement. # Remove trailing comma in order to not mess up import statements.
| 1.79385
| 2
|
src/sprig/intervals.py
|
apljungquist/spr
| 2
|
6629386
|
"""Utilities for working with intervals."""
import itertools
from typing import (
Any,
Collection,
Dict,
FrozenSet,
Hashable,
Iterable,
Iterator,
List,
Mapping,
Sequence,
Set,
Tuple,
TypeVar,
)
from typing_extensions import Literal, Protocol
End = Literal["L", "R"]
class SupportsLessThan(Protocol):
# pylint: disable=too-few-public-methods
def __lt__(self, __other: Any) -> bool:
...
SupportsLessThanT = TypeVar("SupportsLessThanT", bound=SupportsLessThan)
HashableT = TypeVar("HashableT", bound=Hashable)
T = TypeVar("T")
_LEFT = "L"
_RIGHT = "R"
def _subsets(items: Collection[T]) -> Iterator[Tuple[T, ...]]:
"""Iterate over all possible subsets of `items`.
The order in which subsets appear is not guaranteed.
>>> sorted(_subsets([0]))
[(0,)]
>>> sorted(_subsets([0, 1]))
[(0,), (0, 1), (1,)]
>>> sorted(_subsets([0, 1, 2]))
[(0,), (0, 1), (0, 1, 2), (0, 2), (1,), (1, 2), (2,)]
"""
return itertools.chain.from_iterable(
itertools.combinations(items, i + 1) for i in range(len(items))
)
def _intersection(
intervals: Iterable[Tuple[SupportsLessThanT, SupportsLessThanT]]
) -> Tuple[SupportsLessThanT, SupportsLessThanT]:
"""Return the biggest interval that is a subset of all given intervals
>>> _intersection([(10,60), (40, 90)])
(40, 60)
>>> _intersection([(10,60), (40, 90), (45, 55)])
(45, 55)
"""
lefts, rights = zip(*intervals)
left = max(lefts)
right = min(rights)
# TODO: Consider raising on degenerate
return left, right
def _endpoints(
intervals: Iterable[Tuple[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]]]
) -> Iterator[Tuple[SupportsLessThanT, int, HashableT, End]]:
for tie_breaker, (key, (left, right)) in enumerate(intervals):
yield left, tie_breaker, key, "L"
yield right, tie_breaker, key, "R"
def _intervals(
endpoints: Iterable[Tuple[SupportsLessThanT, int, HashableT, End]]
) -> Iterator[Tuple[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]]]:
active = {}
for when, _, key, side in endpoints:
if side is _LEFT:
active[key] = when
else:
yield key, (active.pop(key), when)
def _intersecting_subsets(
endpoints: Iterable[Tuple[SupportsLessThanT, int, HashableT, End]]
) -> Iterator[Tuple[SupportsLessThanT, int, FrozenSet[HashableT], End]]:
active: Dict[HashableT, SupportsLessThanT] = {}
for when, tie_breaker, key, side in endpoints:
if side is _RIGHT:
del active[key]
yield when, tie_breaker, frozenset([key]), side
for keys in _subsets(active):
yield when, tie_breaker, frozenset((key,) + keys), side
if side is _LEFT:
active[key] = when
def intersecting_subsets(
intervals: Mapping[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]]
) -> Dict[FrozenSet[HashableT], Tuple[SupportsLessThanT, SupportsLessThanT]]:
"""All intervals formed by reducing subsets with intersection
In no particular order.
"""
items = intervals.items()
input_endpoints = sorted(_endpoints(items))
output_endpoints = _intersecting_subsets(input_endpoints)
return dict(_intervals(output_endpoints))
def _intersecting_combinations(
endpoints: Iterable[Tuple[SupportsLessThanT, int, HashableT, End]], k: int
) -> Iterator[Tuple[SupportsLessThanT, int, FrozenSet[HashableT], End]]:
active: Dict[HashableT, SupportsLessThanT] = {}
for when, tie_breaker, key, side in endpoints:
if side is _RIGHT:
del active[key]
for keys in itertools.combinations(active, k - 1):
yield when, tie_breaker, frozenset((key,) + keys), side
if side is _LEFT:
active[key] = when
def intersecting_combinations(
intervals: Mapping[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]], k: int
) -> Dict[FrozenSet[HashableT], Tuple[SupportsLessThanT, SupportsLessThanT]]:
"""All intervals formed by reducing k-combinations with intersection
In no particular order.
"""
items = intervals.items()
input_endpoints = sorted(_endpoints(items))
output_endpoints = _intersecting_combinations(input_endpoints, k)
return dict(_intervals(output_endpoints))
def _intersecting_products(
factored_endpoints: Iterable[
Tuple[SupportsLessThanT, int, Tuple[int, HashableT], End]
],
num_factor: int,
) -> Iterator[Tuple[SupportsLessThanT, int, Tuple[HashableT, ...], End]]:
active: List[Set[HashableT]] = [set() for _ in range(num_factor)]
for when, tie_breaker, (factor_num, key), side in factored_endpoints:
if side is _LEFT:
active[factor_num].add(key)
tmp = active[:]
tmp[factor_num] = {key}
for keys in itertools.product(*tmp):
yield when, tie_breaker, keys, side
if side is _RIGHT:
active[factor_num].remove(key)
def intersecting_products(
factors: Sequence[Mapping[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]]]
) -> Mapping[Sequence[HashableT], Tuple[SupportsLessThanT, SupportsLessThanT]]:
"""All intervals formed by reducing products with intersection.
In other words every interval that can be formed by intersecting exactly one
interval from each of the `factors`.
>>> intersecting_products([{0: (1, 7)}, {1:(3, 9)}, {2: (0, 2), 3: (0,4)}])
{(0, 1, 3): (3, 4)}
Note that the time complexity worse if intervals within a factor may intersect.
It is potentially much worse if a large portion of the intervals do intersect.
"""
items = [
((factor_num, key), (left, right))
for factor_num, factor in enumerate(factors)
for key, (left, right) in factor.items()
]
input_endpoints = sorted(_endpoints(items))
output_endpoints = _intersecting_products(input_endpoints, len(factors))
return dict(_intervals(output_endpoints))
def auto_intersections(
intervals: Mapping[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]]
) -> Dict[FrozenSet[HashableT], Tuple[SupportsLessThanT, SupportsLessThanT]]:
"""All intervals formed by intersecting two of the given intervals
>>> auto_intersections({0:(0,3), 1:(5,6), 2:(2,4)})
{frozenset({0, 2}): (2, 3)}
"""
return intersecting_combinations(intervals, 2)
def without_degenerate(
intervals: Mapping[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]],
) -> Dict[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]]:
"""Return only proper intervals
>>> without_degenerate({0:(1,1), 1:(2,3)})
{1: (2, 3)}
"""
return {
key: (left, right) for key, (left, right) in intervals.items() if left < right
}
|
"""Utilities for working with intervals."""
import itertools
from typing import (
Any,
Collection,
Dict,
FrozenSet,
Hashable,
Iterable,
Iterator,
List,
Mapping,
Sequence,
Set,
Tuple,
TypeVar,
)
from typing_extensions import Literal, Protocol
End = Literal["L", "R"]
class SupportsLessThan(Protocol):
# pylint: disable=too-few-public-methods
def __lt__(self, __other: Any) -> bool:
...
SupportsLessThanT = TypeVar("SupportsLessThanT", bound=SupportsLessThan)
HashableT = TypeVar("HashableT", bound=Hashable)
T = TypeVar("T")
_LEFT = "L"
_RIGHT = "R"
def _subsets(items: Collection[T]) -> Iterator[Tuple[T, ...]]:
"""Iterate over all possible subsets of `items`.
The order in which subsets appear is not guaranteed.
>>> sorted(_subsets([0]))
[(0,)]
>>> sorted(_subsets([0, 1]))
[(0,), (0, 1), (1,)]
>>> sorted(_subsets([0, 1, 2]))
[(0,), (0, 1), (0, 1, 2), (0, 2), (1,), (1, 2), (2,)]
"""
return itertools.chain.from_iterable(
itertools.combinations(items, i + 1) for i in range(len(items))
)
def _intersection(
intervals: Iterable[Tuple[SupportsLessThanT, SupportsLessThanT]]
) -> Tuple[SupportsLessThanT, SupportsLessThanT]:
"""Return the biggest interval that is a subset of all given intervals
>>> _intersection([(10,60), (40, 90)])
(40, 60)
>>> _intersection([(10,60), (40, 90), (45, 55)])
(45, 55)
"""
lefts, rights = zip(*intervals)
left = max(lefts)
right = min(rights)
# TODO: Consider raising on degenerate
return left, right
def _endpoints(
intervals: Iterable[Tuple[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]]]
) -> Iterator[Tuple[SupportsLessThanT, int, HashableT, End]]:
for tie_breaker, (key, (left, right)) in enumerate(intervals):
yield left, tie_breaker, key, "L"
yield right, tie_breaker, key, "R"
def _intervals(
endpoints: Iterable[Tuple[SupportsLessThanT, int, HashableT, End]]
) -> Iterator[Tuple[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]]]:
active = {}
for when, _, key, side in endpoints:
if side is _LEFT:
active[key] = when
else:
yield key, (active.pop(key), when)
def _intersecting_subsets(
endpoints: Iterable[Tuple[SupportsLessThanT, int, HashableT, End]]
) -> Iterator[Tuple[SupportsLessThanT, int, FrozenSet[HashableT], End]]:
active: Dict[HashableT, SupportsLessThanT] = {}
for when, tie_breaker, key, side in endpoints:
if side is _RIGHT:
del active[key]
yield when, tie_breaker, frozenset([key]), side
for keys in _subsets(active):
yield when, tie_breaker, frozenset((key,) + keys), side
if side is _LEFT:
active[key] = when
def intersecting_subsets(
intervals: Mapping[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]]
) -> Dict[FrozenSet[HashableT], Tuple[SupportsLessThanT, SupportsLessThanT]]:
"""All intervals formed by reducing subsets with intersection
In no particular order.
"""
items = intervals.items()
input_endpoints = sorted(_endpoints(items))
output_endpoints = _intersecting_subsets(input_endpoints)
return dict(_intervals(output_endpoints))
def _intersecting_combinations(
endpoints: Iterable[Tuple[SupportsLessThanT, int, HashableT, End]], k: int
) -> Iterator[Tuple[SupportsLessThanT, int, FrozenSet[HashableT], End]]:
active: Dict[HashableT, SupportsLessThanT] = {}
for when, tie_breaker, key, side in endpoints:
if side is _RIGHT:
del active[key]
for keys in itertools.combinations(active, k - 1):
yield when, tie_breaker, frozenset((key,) + keys), side
if side is _LEFT:
active[key] = when
def intersecting_combinations(
intervals: Mapping[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]], k: int
) -> Dict[FrozenSet[HashableT], Tuple[SupportsLessThanT, SupportsLessThanT]]:
"""All intervals formed by reducing k-combinations with intersection
In no particular order.
"""
items = intervals.items()
input_endpoints = sorted(_endpoints(items))
output_endpoints = _intersecting_combinations(input_endpoints, k)
return dict(_intervals(output_endpoints))
def _intersecting_products(
factored_endpoints: Iterable[
Tuple[SupportsLessThanT, int, Tuple[int, HashableT], End]
],
num_factor: int,
) -> Iterator[Tuple[SupportsLessThanT, int, Tuple[HashableT, ...], End]]:
active: List[Set[HashableT]] = [set() for _ in range(num_factor)]
for when, tie_breaker, (factor_num, key), side in factored_endpoints:
if side is _LEFT:
active[factor_num].add(key)
tmp = active[:]
tmp[factor_num] = {key}
for keys in itertools.product(*tmp):
yield when, tie_breaker, keys, side
if side is _RIGHT:
active[factor_num].remove(key)
def intersecting_products(
factors: Sequence[Mapping[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]]]
) -> Mapping[Sequence[HashableT], Tuple[SupportsLessThanT, SupportsLessThanT]]:
"""All intervals formed by reducing products with intersection.
In other words every interval that can be formed by intersecting exactly one
interval from each of the `factors`.
>>> intersecting_products([{0: (1, 7)}, {1:(3, 9)}, {2: (0, 2), 3: (0,4)}])
{(0, 1, 3): (3, 4)}
Note that the time complexity worse if intervals within a factor may intersect.
It is potentially much worse if a large portion of the intervals do intersect.
"""
items = [
((factor_num, key), (left, right))
for factor_num, factor in enumerate(factors)
for key, (left, right) in factor.items()
]
input_endpoints = sorted(_endpoints(items))
output_endpoints = _intersecting_products(input_endpoints, len(factors))
return dict(_intervals(output_endpoints))
def auto_intersections(
intervals: Mapping[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]]
) -> Dict[FrozenSet[HashableT], Tuple[SupportsLessThanT, SupportsLessThanT]]:
"""All intervals formed by intersecting two of the given intervals
>>> auto_intersections({0:(0,3), 1:(5,6), 2:(2,4)})
{frozenset({0, 2}): (2, 3)}
"""
return intersecting_combinations(intervals, 2)
def without_degenerate(
intervals: Mapping[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]],
) -> Dict[HashableT, Tuple[SupportsLessThanT, SupportsLessThanT]]:
"""Return only proper intervals
>>> without_degenerate({0:(1,1), 1:(2,3)})
{1: (2, 3)}
"""
return {
key: (left, right) for key, (left, right) in intervals.items() if left < right
}
|
en
| 0.790868
|
Utilities for working with intervals. # pylint: disable=too-few-public-methods Iterate over all possible subsets of `items`. The order in which subsets appear is not guaranteed. >>> sorted(_subsets([0])) [(0,)] >>> sorted(_subsets([0, 1])) [(0,), (0, 1), (1,)] >>> sorted(_subsets([0, 1, 2])) [(0,), (0, 1), (0, 1, 2), (0, 2), (1,), (1, 2), (2,)] Return the biggest interval that is a subset of all given intervals >>> _intersection([(10,60), (40, 90)]) (40, 60) >>> _intersection([(10,60), (40, 90), (45, 55)]) (45, 55) # TODO: Consider raising on degenerate All intervals formed by reducing subsets with intersection In no particular order. All intervals formed by reducing k-combinations with intersection In no particular order. All intervals formed by reducing products with intersection. In other words every interval that can be formed by intersecting exactly one interval from each of the `factors`. >>> intersecting_products([{0: (1, 7)}, {1:(3, 9)}, {2: (0, 2), 3: (0,4)}]) {(0, 1, 3): (3, 4)} Note that the time complexity worse if intervals within a factor may intersect. It is potentially much worse if a large portion of the intervals do intersect. All intervals formed by intersecting two of the given intervals >>> auto_intersections({0:(0,3), 1:(5,6), 2:(2,4)}) {frozenset({0, 2}): (2, 3)} Return only proper intervals >>> without_degenerate({0:(1,1), 1:(2,3)}) {1: (2, 3)}
| 3.442033
| 3
|
scripts/hail_batch/hgdp1kg_tobwgs_pca_pop_densified_new_variants/hgdp_1kg_tob_wgs_pop_pca_densified.py
|
populationgenomics/ancestry
| 0
|
6629387
|
"""
Perform pca on samples specific to a population
from the HGDP,1KG, and tob-wgs dataset after densifying.
Reliant on output from
```hgdp1kg_tobwgs_densified_pca_new_variants/
hgdp_1kg_tob_wgs_densified_pca_new_variants.py
```
"""
import hail as hl
import click
import pandas as pd
from analysis_runner import bucket_path, output_path
HGDP1KG_TOBWGS = bucket_path(
'1kg_hgdp_densified_pca_new_variants/v0/hgdp1kg_tobwgs_joined_all_samples.mt'
)
@click.command()
@click.option('--pop', help='Population to subset from the 1KG (e.g. afr, nfe)')
def query(pop):
"""Query script entry point."""
hl.init(default_reference='GRCh38')
mt = hl.read_matrix_table(HGDP1KG_TOBWGS)
if pop:
# Get samples from the specified population only
mt = mt.filter_cols(
(mt.hgdp_1kg_metadata.population_inference.pop == pop.lower())
| (mt.s.contains('TOB'))
)
else:
mt = mt.filter_cols(mt.s.contains('TOB'))
# Perform PCA
eigenvalues_path = output_path('eigenvalues.ht')
scores_path = output_path('scores.ht')
loadings_path = output_path('loadings.ht')
eigenvalues, scores, loadings = hl.hwe_normalized_pca(
mt.GT, compute_loadings=True, k=20
)
hl.Table.from_pandas(pd.DataFrame(eigenvalues)).export(eigenvalues_path)
scores.write(scores_path, overwrite=True)
loadings.write(loadings_path, overwrite=True)
if __name__ == '__main__':
query() # pylint: disable=no-value-for-parameter
|
"""
Perform pca on samples specific to a population
from the HGDP,1KG, and tob-wgs dataset after densifying.
Reliant on output from
```hgdp1kg_tobwgs_densified_pca_new_variants/
hgdp_1kg_tob_wgs_densified_pca_new_variants.py
```
"""
import hail as hl
import click
import pandas as pd
from analysis_runner import bucket_path, output_path
HGDP1KG_TOBWGS = bucket_path(
'1kg_hgdp_densified_pca_new_variants/v0/hgdp1kg_tobwgs_joined_all_samples.mt'
)
@click.command()
@click.option('--pop', help='Population to subset from the 1KG (e.g. afr, nfe)')
def query(pop):
"""Query script entry point."""
hl.init(default_reference='GRCh38')
mt = hl.read_matrix_table(HGDP1KG_TOBWGS)
if pop:
# Get samples from the specified population only
mt = mt.filter_cols(
(mt.hgdp_1kg_metadata.population_inference.pop == pop.lower())
| (mt.s.contains('TOB'))
)
else:
mt = mt.filter_cols(mt.s.contains('TOB'))
# Perform PCA
eigenvalues_path = output_path('eigenvalues.ht')
scores_path = output_path('scores.ht')
loadings_path = output_path('loadings.ht')
eigenvalues, scores, loadings = hl.hwe_normalized_pca(
mt.GT, compute_loadings=True, k=20
)
hl.Table.from_pandas(pd.DataFrame(eigenvalues)).export(eigenvalues_path)
scores.write(scores_path, overwrite=True)
loadings.write(loadings_path, overwrite=True)
if __name__ == '__main__':
query() # pylint: disable=no-value-for-parameter
|
en
| 0.560288
|
Perform pca on samples specific to a population from the HGDP,1KG, and tob-wgs dataset after densifying. Reliant on output from ```hgdp1kg_tobwgs_densified_pca_new_variants/ hgdp_1kg_tob_wgs_densified_pca_new_variants.py ``` Query script entry point. # Get samples from the specified population only # Perform PCA # pylint: disable=no-value-for-parameter
| 2.471096
| 2
|
tests_dataset_svc.py
|
cmorisse/openerp-jsonrpc-client-python
| 4
|
6629388
|
<filename>tests_dataset_svc.py
# coding: utf8
import random
import unittest
import requests
from openerp_jsonrpc_client import *
OE_BASE_SERVER_URL = 'http://localhost:8069'
class TestDatasetService(unittest.TestCase):
def setUp(self):
self.server = OpenERPJSONRPCClient(OE_BASE_SERVER_URL)
def test_010_setup_db(self):
"""search then read a set of objects"""
# TODO: Use db_list to chech db_exists before droppping it
db_list = self.server.db_get_list()
if 'test_dataset' in db_list:
result = self.server.db_drop("admin", "test_dataset")
self.server.db_create("admin", "test_dataset", False, "Fr_fr", "admin")
session_info = self.server.session_authenticate("test_dataset", "admin", "admin")
module_obj = self.server.get_model('ir.module.module')
module_ids = module_obj.search([('name', '=', 'sale')])
module = module_obj.read(module_ids[0])
self.assertFalse(module['state'] == 'installed', "Sales module is already installed")
module_obj.button_immediate_install([module_ids[0]])
module = module_obj.read(module_ids[0])
self.assertTrue(module['state'] == 'installed', "Sales module installation failed")
def test_020_search_read(self):
"""test search_read()"""
result = self.server.session_authenticate("test_dataset", "admin", "admin")
print result
# Search then load all ir.ui.view
result = self.server.dataset_search_read("ir.ui.view")
print result
def test_030_load(self):
"""test load()"""
result = self.server.session_authenticate("test_dataset", "admin", "admin")
print result
result = self.server.dataset_load("ir.ui.view", 1)
print result
def test_040_call_kw_read_direct(self):
"""test dataset/call_kw"""
try:
result = self.server.session_authenticate('test_dataset', 'admin', 'admin', OE_BASE_SERVER_URL)
self.assertTrue(result, "Failed to authenticate against db_test_dataset database")
result = self.server.dataset_call_kw('res.users', 'read', [1], fields=['login', 'password'], context={})
self.assertTrue(result, "call_kw failed")
print "call_kw() => %s" % result
except OpenERPJSONRPCClientException as exc:
print "message: %s" % exc.message
print "data: %s" % exc.data
print "data.type: %s" % exc.data['type']
print "data.fault_code: %s" % exc.data['fault_code']
raise exc
def test_050_call_kw_read_via_model_proxy(self):
"""test dataset/call_kw via Model proxy"""
try:
result = self.server.session_authenticate('test_dataset', 'admin', 'admin', OE_BASE_SERVER_URL)
self.assertTrue(result, "Failed to authenticate against db_test_session database")
res_partner_obj = self.server.get_model('res.users')
# call with args only
result = res_partner_obj.read([1], ['login', 'password'])
self.assertTrue(result, "call_kw failed")
print "call_kw() via model_proxy => %s" % result
except OpenERPJSONRPCClientException as exc:
print "message: %s" % exc.message
print "data: %s" % exc.data
print "data.type: %s" % exc.data['type']
print "data.fault_code: %s" % exc.data['fault_code']
raise exc
def NOtest_060_exec_workflow(self):
"""test exec_workflow via Model proxy"""
# To use this test, you must create a valid sale.order and define sale_order_id below
sale_order_id = 2
# TODO: Programmatically setup a sale order
try:
result = self.server.session_authenticate('test_dataset', 'admin', 'admin', OE_BASE_SERVER_URL)
self.assertTrue(result, "Failed to authenticate against test_dataset database")
result = self.server.dataset_exec_workflow('sale.order', 2, 'order_confirm')
# self.assertTrue(result, "exec_workflow failed because it requires a draft sale order")
# TODO: check why exec_workflow always return false
except OpenERPJSONRPCClientException as exc:
print "message: %s" % exc.message
print "data: %s" % exc.data
print "data.type: %s" % exc.data['type']
print "data.fault_code: %s" % exc.data['fault_code']
raise exc
if __name__ == '__main__':
unittest.main()
|
<filename>tests_dataset_svc.py
# coding: utf8
import random
import unittest
import requests
from openerp_jsonrpc_client import *
OE_BASE_SERVER_URL = 'http://localhost:8069'
class TestDatasetService(unittest.TestCase):
def setUp(self):
self.server = OpenERPJSONRPCClient(OE_BASE_SERVER_URL)
def test_010_setup_db(self):
"""search then read a set of objects"""
# TODO: Use db_list to chech db_exists before droppping it
db_list = self.server.db_get_list()
if 'test_dataset' in db_list:
result = self.server.db_drop("admin", "test_dataset")
self.server.db_create("admin", "test_dataset", False, "Fr_fr", "admin")
session_info = self.server.session_authenticate("test_dataset", "admin", "admin")
module_obj = self.server.get_model('ir.module.module')
module_ids = module_obj.search([('name', '=', 'sale')])
module = module_obj.read(module_ids[0])
self.assertFalse(module['state'] == 'installed', "Sales module is already installed")
module_obj.button_immediate_install([module_ids[0]])
module = module_obj.read(module_ids[0])
self.assertTrue(module['state'] == 'installed', "Sales module installation failed")
def test_020_search_read(self):
"""test search_read()"""
result = self.server.session_authenticate("test_dataset", "admin", "admin")
print result
# Search then load all ir.ui.view
result = self.server.dataset_search_read("ir.ui.view")
print result
def test_030_load(self):
"""test load()"""
result = self.server.session_authenticate("test_dataset", "admin", "admin")
print result
result = self.server.dataset_load("ir.ui.view", 1)
print result
def test_040_call_kw_read_direct(self):
"""test dataset/call_kw"""
try:
result = self.server.session_authenticate('test_dataset', 'admin', 'admin', OE_BASE_SERVER_URL)
self.assertTrue(result, "Failed to authenticate against db_test_dataset database")
result = self.server.dataset_call_kw('res.users', 'read', [1], fields=['login', 'password'], context={})
self.assertTrue(result, "call_kw failed")
print "call_kw() => %s" % result
except OpenERPJSONRPCClientException as exc:
print "message: %s" % exc.message
print "data: %s" % exc.data
print "data.type: %s" % exc.data['type']
print "data.fault_code: %s" % exc.data['fault_code']
raise exc
def test_050_call_kw_read_via_model_proxy(self):
"""test dataset/call_kw via Model proxy"""
try:
result = self.server.session_authenticate('test_dataset', 'admin', 'admin', OE_BASE_SERVER_URL)
self.assertTrue(result, "Failed to authenticate against db_test_session database")
res_partner_obj = self.server.get_model('res.users')
# call with args only
result = res_partner_obj.read([1], ['login', 'password'])
self.assertTrue(result, "call_kw failed")
print "call_kw() via model_proxy => %s" % result
except OpenERPJSONRPCClientException as exc:
print "message: %s" % exc.message
print "data: %s" % exc.data
print "data.type: %s" % exc.data['type']
print "data.fault_code: %s" % exc.data['fault_code']
raise exc
def NOtest_060_exec_workflow(self):
"""test exec_workflow via Model proxy"""
# To use this test, you must create a valid sale.order and define sale_order_id below
sale_order_id = 2
# TODO: Programmatically setup a sale order
try:
result = self.server.session_authenticate('test_dataset', 'admin', 'admin', OE_BASE_SERVER_URL)
self.assertTrue(result, "Failed to authenticate against test_dataset database")
result = self.server.dataset_exec_workflow('sale.order', 2, 'order_confirm')
# self.assertTrue(result, "exec_workflow failed because it requires a draft sale order")
# TODO: check why exec_workflow always return false
except OpenERPJSONRPCClientException as exc:
print "message: %s" % exc.message
print "data: %s" % exc.data
print "data.type: %s" % exc.data['type']
print "data.fault_code: %s" % exc.data['fault_code']
raise exc
if __name__ == '__main__':
unittest.main()
|
en
| 0.52624
|
# coding: utf8 search then read a set of objects # TODO: Use db_list to chech db_exists before droppping it test search_read() # Search then load all ir.ui.view test load() test dataset/call_kw test dataset/call_kw via Model proxy # call with args only test exec_workflow via Model proxy # To use this test, you must create a valid sale.order and define sale_order_id below # TODO: Programmatically setup a sale order # self.assertTrue(result, "exec_workflow failed because it requires a draft sale order") # TODO: check why exec_workflow always return false
| 2.50256
| 3
|
keycloak/keycloak_openid.py
|
mxk1235/python-keycloak
| 0
|
6629389
|
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (C) 2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
from jose import jwt
from .authorization import Authorization
from .connection import ConnectionManager
from .exceptions import raise_error_from_response, KeycloakGetError, \
KeycloakRPTNotFound, KeycloakAuthorizationConfigError, KeycloakInvalidTokenError
from .urls_patterns import (
URL_REALM,
URL_AUTH,
URL_TOKEN,
URL_USERINFO,
URL_WELL_KNOWN,
URL_LOGOUT,
URL_CERTS,
URL_ENTITLEMENT,
URL_INTROSPECT
)
class KeycloakOpenID:
def __init__(self, server_url, realm_name, client_id, client_secret_key=None, verify=True, custom_headers=None):
"""
:param server_url: Keycloak server url
:param client_id: client id
:param realm_name: realm name
:param client_secret_key: client secret key
:param verify: True if want check connection SSL
:param custom_headers: dict of custom header to pass to each HTML request
"""
self._client_id = client_id
self._client_secret_key = client_secret_key
self._realm_name = realm_name
headers = dict()
if custom_headers is not None:
# merge custom headers to main headers
headers.update(custom_headers)
self._connection = ConnectionManager(base_url=server_url,
headers=headers,
timeout=60,
verify=verify)
self._authorization = Authorization()
@property
def client_id(self):
return self._client_id
@client_id.setter
def client_id(self, value):
self._client_id = value
@property
def client_secret_key(self):
return self._client_secret_key
@client_secret_key.setter
def client_secret_key(self, value):
self._client_secret_key = value
@property
def realm_name(self):
return self._realm_name
@realm_name.setter
def realm_name(self, value):
self._realm_name = value
@property
def connection(self):
return self._connection
@connection.setter
def connection(self, value):
self._connection = value
@property
def authorization(self):
return self._authorization
@authorization.setter
def authorization(self, value):
self._authorization = value
def _add_secret_key(self, payload):
"""
Add secret key if exist.
:param payload:
:return:
"""
if self.client_secret_key:
payload.update({"client_secret": self.client_secret_key})
return payload
def _build_name_role(self, role):
"""
:param role:
:return:
"""
return self.client_id + "/" + role
def _token_info(self, token, method_token_info, **kwargs):
"""
:param token:
:param method_token_info:
:param kwargs:
:return:
"""
if method_token_info == 'introspect':
token_info = self.introspect(token)
else:
token_info = self.decode_token(token, **kwargs)
return token_info
def well_know(self):
""" The most important endpoint to understand is the well-known configuration
endpoint. It lists endpoints and other configuration options relevant to
the OpenID Connect implementation in Keycloak.
:return It lists endpoints and other configuration options relevant.
"""
params_path = {"realm-name": self.realm_name}
data_raw = self.connection.raw_get(URL_WELL_KNOWN.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)
def auth_url(self, redirect_uri):
"""
http://openid.net/specs/openid-connect-core-1_0.html#AuthorizationEndpoint
:return:
"""
params_path = {"authorization-endpoint": self.well_know()['authorization_endpoint'],
"client-id": self.client_id,
"redirect-uri": redirect_uri}
return URL_AUTH.format(**params_path)
def token(self, username=None, password=<PASSWORD>, grant_type=["password"], code="", redirect_uri="", totp=None, **extra):
"""
The token endpoint is used to obtain tokens. Tokens can either be obtained by
exchanging an authorization code or by supplying credentials directly depending on
what flow is used. The token endpoint is also used to obtain new access tokens
when they expire.
http://openid.net/specs/openid-connect-core-1_0.html#TokenEndpoint
:param username:
:param password:
:param grant_type:
:param code:
:param redirect_uri
:param totp
:return:
"""
params_path = {"realm-name": self.realm_name}
payload = {}
if username is not None:
payload = {"username": username, "password": password,
"client_id": self.client_id, "grant_type": grant_type,
"code": code, "redirect_uri": redirect_uri}
else:
payload = {"client_id": self.client_id, "grant_type": grant_type,
"code": code, "redirect_uri": redirect_uri}
if payload:
payload.update(extra)
if totp:
payload["totp"] = totp
payload = self._add_secret_key(payload)
data_raw = self.connection.raw_post(URL_TOKEN.format(**params_path),
data=payload)
return raise_error_from_response(data_raw, KeycloakGetError)
def refresh_token(self, refresh_token, grant_type=["refresh_token"]):
"""
The token endpoint is used to obtain tokens. Tokens can either be obtained by
exchanging an authorization code or by supplying credentials directly depending on
what flow is used. The token endpoint is also used to obtain new access tokens
when they expire.
http://openid.net/specs/openid-connect-core-1_0.html#TokenEndpoint
:param refresh_token:
:param grant_type:
:return:
"""
params_path = {"realm-name": self.realm_name}
payload = {"client_id": self.client_id, "grant_type": grant_type, "refresh_token": refresh_token}
payload = self._add_secret_key(payload)
data_raw = self.connection.raw_post(URL_TOKEN.format(**params_path),
data=payload)
return raise_error_from_response(data_raw, KeycloakGetError)
def userinfo(self, token):
"""
The userinfo endpoint returns standard claims about the authenticated user,
and is protected by a bearer token.
http://openid.net/specs/openid-connect-core-1_0.html#UserInfo
:param token:
:return:
"""
self.connection.add_param_headers("Authorization", "Bearer " + token)
params_path = {"realm-name": self.realm_name}
data_raw = self.connection.raw_get(URL_USERINFO.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)
def logout(self, refresh_token):
"""
The logout endpoint logs out the authenticated user.
:param refresh_token:
:return:
"""
params_path = {"realm-name": self.realm_name}
payload = {"client_id": self.client_id, "refresh_token": refresh_token}
payload = self._add_secret_key(payload)
data_raw = self.connection.raw_post(URL_LOGOUT.format(**params_path),
data=payload)
return raise_error_from_response(data_raw, KeycloakGetError, expected_code=204)
def certs(self):
"""
The certificate endpoint returns the public keys enabled by the realm, encoded as a
JSON Web Key (JWK). Depending on the realm settings there can be one or more keys enabled
for verifying tokens.
https://tools.ietf.org/html/rfc7517
:return:
"""
params_path = {"realm-name": self.realm_name}
data_raw = self.connection.raw_get(URL_CERTS.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)
def public_key(self):
"""
The public key is exposed by the realm page directly.
:return:
"""
params_path = {"realm-name": self.realm_name}
data_raw = self.connection.raw_get(URL_REALM.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)['public_key']
def entitlement(self, token, resource_server_id):
"""
Client applications can use a specific endpoint to obtain a special security token
called a requesting party token (RPT). This token consists of all the entitlements
(or permissions) for a user as a result of the evaluation of the permissions and authorization
policies associated with the resources being requested. With an RPT, client applications can
gain access to protected resources at the resource server.
:return:
"""
self.connection.add_param_headers("Authorization", "Bearer " + token)
params_path = {"realm-name": self.realm_name, "resource-server-id": resource_server_id}
data_raw = self.connection.raw_get(URL_ENTITLEMENT.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)
def introspect(self, token, rpt=None, token_type_hint=None):
"""
The introspection endpoint is used to retrieve the active state of a token. It is can only be
invoked by confidential clients.
https://tools.ietf.org/html/rfc7662
:param token:
:param rpt:
:param token_type_hint:
:return:
"""
params_path = {"realm-name": self.realm_name}
payload = {"client_id": self.client_id, "token": token}
if token_type_hint == 'requesting_party_token':
if rpt:
payload.update({"token": rpt, "token_type_hint": token_type_hint})
self.connection.add_param_headers("Authorization", "Bearer " + token)
else:
raise KeycloakRPTNotFound("Can't found RPT.")
payload = self._add_secret_key(payload)
data_raw = self.connection.raw_post(URL_INTROSPECT.format(**params_path),
data=payload)
return raise_error_from_response(data_raw, KeycloakGetError)
def decode_token(self, token, key, algorithms=['RS256'], **kwargs):
"""
A JSON Web Key (JWK) is a JavaScript Object Notation (JSON) data
structure that represents a cryptographic key. This specification
also defines a JWK Set JSON data structure that represents a set of
JWKs. Cryptographic algorithms and identifiers for use with this
specification are described in the separate JSON Web Algorithms (JWA)
specification and IANA registries established by that specification.
https://tools.ietf.org/html/rfc7517
:param token:
:param key:
:param algorithms:
:return:
"""
return jwt.decode(token, key, algorithms=algorithms,
audience=self.client_id, **kwargs)
def load_authorization_config(self, path):
"""
Load Keycloak settings (authorization)
:param path: settings file (json)
:return:
"""
authorization_file = open(path, 'r')
authorization_json = json.loads(authorization_file.read())
self.authorization.load_config(authorization_json)
authorization_file.close()
def get_policies(self, token, method_token_info='introspect', **kwargs):
"""
Get policies by user token
:param token: user token
:return: policies list
"""
if not self.authorization.policies:
raise KeycloakAuthorizationConfigError(
"Keycloak settings not found. Load Authorization Keycloak settings."
)
token_info = self._token_info(token, method_token_info, **kwargs)
if method_token_info == 'introspect' and not token_info['active']:
raise KeycloakInvalidTokenError(
"Token expired or invalid."
)
user_resources = token_info['resource_access'].get(self.client_id)
if not user_resources:
return None
policies = []
for policy_name, policy in self.authorization.policies.items():
for role in user_resources['roles']:
if self._build_name_role(role) in policy.roles:
policies.append(policy)
return list(set(policies))
def get_permissions(self, token, method_token_info='introspect', **kwargs):
"""
Get permission by user token
:param token: user token
:param method_token_info: Decode token method
:param kwargs: parameters for decode
:return: permissions list
"""
if not self.authorization.policies:
raise KeycloakAuthorizationConfigError(
"Keycloak settings not found. Load Authorization Keycloak settings."
)
token_info = self._token_info(token, method_token_info, **kwargs)
if method_token_info == 'introspect' and not token_info['active']:
raise KeycloakInvalidTokenError(
"Token expired or invalid."
)
user_resources = token_info['resource_access'].get(self.client_id)
if not user_resources:
return None
permissions = []
for policy_name, policy in self.authorization.policies.items():
for role in user_resources['roles']:
if self._build_name_role(role) in policy.roles:
permissions += policy.permissions
return list(set(permissions))
|
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (C) 2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
from jose import jwt
from .authorization import Authorization
from .connection import ConnectionManager
from .exceptions import raise_error_from_response, KeycloakGetError, \
KeycloakRPTNotFound, KeycloakAuthorizationConfigError, KeycloakInvalidTokenError
from .urls_patterns import (
URL_REALM,
URL_AUTH,
URL_TOKEN,
URL_USERINFO,
URL_WELL_KNOWN,
URL_LOGOUT,
URL_CERTS,
URL_ENTITLEMENT,
URL_INTROSPECT
)
class KeycloakOpenID:
def __init__(self, server_url, realm_name, client_id, client_secret_key=None, verify=True, custom_headers=None):
"""
:param server_url: Keycloak server url
:param client_id: client id
:param realm_name: realm name
:param client_secret_key: client secret key
:param verify: True if want check connection SSL
:param custom_headers: dict of custom header to pass to each HTML request
"""
self._client_id = client_id
self._client_secret_key = client_secret_key
self._realm_name = realm_name
headers = dict()
if custom_headers is not None:
# merge custom headers to main headers
headers.update(custom_headers)
self._connection = ConnectionManager(base_url=server_url,
headers=headers,
timeout=60,
verify=verify)
self._authorization = Authorization()
@property
def client_id(self):
return self._client_id
@client_id.setter
def client_id(self, value):
self._client_id = value
@property
def client_secret_key(self):
return self._client_secret_key
@client_secret_key.setter
def client_secret_key(self, value):
self._client_secret_key = value
@property
def realm_name(self):
return self._realm_name
@realm_name.setter
def realm_name(self, value):
self._realm_name = value
@property
def connection(self):
return self._connection
@connection.setter
def connection(self, value):
self._connection = value
@property
def authorization(self):
return self._authorization
@authorization.setter
def authorization(self, value):
self._authorization = value
def _add_secret_key(self, payload):
"""
Add secret key if exist.
:param payload:
:return:
"""
if self.client_secret_key:
payload.update({"client_secret": self.client_secret_key})
return payload
def _build_name_role(self, role):
"""
:param role:
:return:
"""
return self.client_id + "/" + role
def _token_info(self, token, method_token_info, **kwargs):
"""
:param token:
:param method_token_info:
:param kwargs:
:return:
"""
if method_token_info == 'introspect':
token_info = self.introspect(token)
else:
token_info = self.decode_token(token, **kwargs)
return token_info
def well_know(self):
""" The most important endpoint to understand is the well-known configuration
endpoint. It lists endpoints and other configuration options relevant to
the OpenID Connect implementation in Keycloak.
:return It lists endpoints and other configuration options relevant.
"""
params_path = {"realm-name": self.realm_name}
data_raw = self.connection.raw_get(URL_WELL_KNOWN.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)
def auth_url(self, redirect_uri):
"""
http://openid.net/specs/openid-connect-core-1_0.html#AuthorizationEndpoint
:return:
"""
params_path = {"authorization-endpoint": self.well_know()['authorization_endpoint'],
"client-id": self.client_id,
"redirect-uri": redirect_uri}
return URL_AUTH.format(**params_path)
def token(self, username=None, password=<PASSWORD>, grant_type=["password"], code="", redirect_uri="", totp=None, **extra):
"""
The token endpoint is used to obtain tokens. Tokens can either be obtained by
exchanging an authorization code or by supplying credentials directly depending on
what flow is used. The token endpoint is also used to obtain new access tokens
when they expire.
http://openid.net/specs/openid-connect-core-1_0.html#TokenEndpoint
:param username:
:param password:
:param grant_type:
:param code:
:param redirect_uri
:param totp
:return:
"""
params_path = {"realm-name": self.realm_name}
payload = {}
if username is not None:
payload = {"username": username, "password": password,
"client_id": self.client_id, "grant_type": grant_type,
"code": code, "redirect_uri": redirect_uri}
else:
payload = {"client_id": self.client_id, "grant_type": grant_type,
"code": code, "redirect_uri": redirect_uri}
if payload:
payload.update(extra)
if totp:
payload["totp"] = totp
payload = self._add_secret_key(payload)
data_raw = self.connection.raw_post(URL_TOKEN.format(**params_path),
data=payload)
return raise_error_from_response(data_raw, KeycloakGetError)
def refresh_token(self, refresh_token, grant_type=["refresh_token"]):
"""
The token endpoint is used to obtain tokens. Tokens can either be obtained by
exchanging an authorization code or by supplying credentials directly depending on
what flow is used. The token endpoint is also used to obtain new access tokens
when they expire.
http://openid.net/specs/openid-connect-core-1_0.html#TokenEndpoint
:param refresh_token:
:param grant_type:
:return:
"""
params_path = {"realm-name": self.realm_name}
payload = {"client_id": self.client_id, "grant_type": grant_type, "refresh_token": refresh_token}
payload = self._add_secret_key(payload)
data_raw = self.connection.raw_post(URL_TOKEN.format(**params_path),
data=payload)
return raise_error_from_response(data_raw, KeycloakGetError)
def userinfo(self, token):
"""
The userinfo endpoint returns standard claims about the authenticated user,
and is protected by a bearer token.
http://openid.net/specs/openid-connect-core-1_0.html#UserInfo
:param token:
:return:
"""
self.connection.add_param_headers("Authorization", "Bearer " + token)
params_path = {"realm-name": self.realm_name}
data_raw = self.connection.raw_get(URL_USERINFO.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)
def logout(self, refresh_token):
"""
The logout endpoint logs out the authenticated user.
:param refresh_token:
:return:
"""
params_path = {"realm-name": self.realm_name}
payload = {"client_id": self.client_id, "refresh_token": refresh_token}
payload = self._add_secret_key(payload)
data_raw = self.connection.raw_post(URL_LOGOUT.format(**params_path),
data=payload)
return raise_error_from_response(data_raw, KeycloakGetError, expected_code=204)
def certs(self):
"""
The certificate endpoint returns the public keys enabled by the realm, encoded as a
JSON Web Key (JWK). Depending on the realm settings there can be one or more keys enabled
for verifying tokens.
https://tools.ietf.org/html/rfc7517
:return:
"""
params_path = {"realm-name": self.realm_name}
data_raw = self.connection.raw_get(URL_CERTS.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)
def public_key(self):
"""
The public key is exposed by the realm page directly.
:return:
"""
params_path = {"realm-name": self.realm_name}
data_raw = self.connection.raw_get(URL_REALM.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)['public_key']
def entitlement(self, token, resource_server_id):
"""
Client applications can use a specific endpoint to obtain a special security token
called a requesting party token (RPT). This token consists of all the entitlements
(or permissions) for a user as a result of the evaluation of the permissions and authorization
policies associated with the resources being requested. With an RPT, client applications can
gain access to protected resources at the resource server.
:return:
"""
self.connection.add_param_headers("Authorization", "Bearer " + token)
params_path = {"realm-name": self.realm_name, "resource-server-id": resource_server_id}
data_raw = self.connection.raw_get(URL_ENTITLEMENT.format(**params_path))
return raise_error_from_response(data_raw, KeycloakGetError)
def introspect(self, token, rpt=None, token_type_hint=None):
"""
The introspection endpoint is used to retrieve the active state of a token. It is can only be
invoked by confidential clients.
https://tools.ietf.org/html/rfc7662
:param token:
:param rpt:
:param token_type_hint:
:return:
"""
params_path = {"realm-name": self.realm_name}
payload = {"client_id": self.client_id, "token": token}
if token_type_hint == 'requesting_party_token':
if rpt:
payload.update({"token": rpt, "token_type_hint": token_type_hint})
self.connection.add_param_headers("Authorization", "Bearer " + token)
else:
raise KeycloakRPTNotFound("Can't found RPT.")
payload = self._add_secret_key(payload)
data_raw = self.connection.raw_post(URL_INTROSPECT.format(**params_path),
data=payload)
return raise_error_from_response(data_raw, KeycloakGetError)
def decode_token(self, token, key, algorithms=['RS256'], **kwargs):
"""
A JSON Web Key (JWK) is a JavaScript Object Notation (JSON) data
structure that represents a cryptographic key. This specification
also defines a JWK Set JSON data structure that represents a set of
JWKs. Cryptographic algorithms and identifiers for use with this
specification are described in the separate JSON Web Algorithms (JWA)
specification and IANA registries established by that specification.
https://tools.ietf.org/html/rfc7517
:param token:
:param key:
:param algorithms:
:return:
"""
return jwt.decode(token, key, algorithms=algorithms,
audience=self.client_id, **kwargs)
def load_authorization_config(self, path):
"""
Load Keycloak settings (authorization)
:param path: settings file (json)
:return:
"""
authorization_file = open(path, 'r')
authorization_json = json.loads(authorization_file.read())
self.authorization.load_config(authorization_json)
authorization_file.close()
def get_policies(self, token, method_token_info='introspect', **kwargs):
"""
Get policies by user token
:param token: user token
:return: policies list
"""
if not self.authorization.policies:
raise KeycloakAuthorizationConfigError(
"Keycloak settings not found. Load Authorization Keycloak settings."
)
token_info = self._token_info(token, method_token_info, **kwargs)
if method_token_info == 'introspect' and not token_info['active']:
raise KeycloakInvalidTokenError(
"Token expired or invalid."
)
user_resources = token_info['resource_access'].get(self.client_id)
if not user_resources:
return None
policies = []
for policy_name, policy in self.authorization.policies.items():
for role in user_resources['roles']:
if self._build_name_role(role) in policy.roles:
policies.append(policy)
return list(set(policies))
def get_permissions(self, token, method_token_info='introspect', **kwargs):
"""
Get permission by user token
:param token: user token
:param method_token_info: Decode token method
:param kwargs: parameters for decode
:return: permissions list
"""
if not self.authorization.policies:
raise KeycloakAuthorizationConfigError(
"Keycloak settings not found. Load Authorization Keycloak settings."
)
token_info = self._token_info(token, method_token_info, **kwargs)
if method_token_info == 'introspect' and not token_info['active']:
raise KeycloakInvalidTokenError(
"Token expired or invalid."
)
user_resources = token_info['resource_access'].get(self.client_id)
if not user_resources:
return None
permissions = []
for policy_name, policy in self.authorization.policies.items():
for role in user_resources['roles']:
if self._build_name_role(role) in policy.roles:
permissions += policy.permissions
return list(set(permissions))
|
en
| 0.74768
|
# -*- coding: utf-8 -*- # # The MIT License (MIT) # # Copyright (C) 2017 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. :param server_url: Keycloak server url :param client_id: client id :param realm_name: realm name :param client_secret_key: client secret key :param verify: True if want check connection SSL :param custom_headers: dict of custom header to pass to each HTML request # merge custom headers to main headers Add secret key if exist. :param payload: :return: :param role: :return: :param token: :param method_token_info: :param kwargs: :return: The most important endpoint to understand is the well-known configuration endpoint. It lists endpoints and other configuration options relevant to the OpenID Connect implementation in Keycloak. :return It lists endpoints and other configuration options relevant. http://openid.net/specs/openid-connect-core-1_0.html#AuthorizationEndpoint :return: The token endpoint is used to obtain tokens. Tokens can either be obtained by exchanging an authorization code or by supplying credentials directly depending on what flow is used. The token endpoint is also used to obtain new access tokens when they expire. http://openid.net/specs/openid-connect-core-1_0.html#TokenEndpoint :param username: :param password: :param grant_type: :param code: :param redirect_uri :param totp :return: The token endpoint is used to obtain tokens. Tokens can either be obtained by exchanging an authorization code or by supplying credentials directly depending on what flow is used. The token endpoint is also used to obtain new access tokens when they expire. http://openid.net/specs/openid-connect-core-1_0.html#TokenEndpoint :param refresh_token: :param grant_type: :return: The userinfo endpoint returns standard claims about the authenticated user, and is protected by a bearer token. http://openid.net/specs/openid-connect-core-1_0.html#UserInfo :param token: :return: The logout endpoint logs out the authenticated user. :param refresh_token: :return: The certificate endpoint returns the public keys enabled by the realm, encoded as a JSON Web Key (JWK). Depending on the realm settings there can be one or more keys enabled for verifying tokens. https://tools.ietf.org/html/rfc7517 :return: The public key is exposed by the realm page directly. :return: Client applications can use a specific endpoint to obtain a special security token called a requesting party token (RPT). This token consists of all the entitlements (or permissions) for a user as a result of the evaluation of the permissions and authorization policies associated with the resources being requested. With an RPT, client applications can gain access to protected resources at the resource server. :return: The introspection endpoint is used to retrieve the active state of a token. It is can only be invoked by confidential clients. https://tools.ietf.org/html/rfc7662 :param token: :param rpt: :param token_type_hint: :return: A JSON Web Key (JWK) is a JavaScript Object Notation (JSON) data structure that represents a cryptographic key. This specification also defines a JWK Set JSON data structure that represents a set of JWKs. Cryptographic algorithms and identifiers for use with this specification are described in the separate JSON Web Algorithms (JWA) specification and IANA registries established by that specification. https://tools.ietf.org/html/rfc7517 :param token: :param key: :param algorithms: :return: Load Keycloak settings (authorization) :param path: settings file (json) :return: Get policies by user token :param token: user token :return: policies list Get permission by user token :param token: user token :param method_token_info: Decode token method :param kwargs: parameters for decode :return: permissions list
| 1.696956
| 2
|
mpas_ght/src/adjMat.py
|
trainsn/GNN-Surrogate
| 3
|
6629390
|
<gh_stars>1-10
import os
import argparse
import numpy as np
import scipy.sparse as sp
import torch
from torch_sparse import coalesce, spspmm
import pdb
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--root", type=str, required=True,
help="root of the graph hierarchy")
args = parser.parse_args()
root = args.root
graphSizes = np.load(os.path.join(root, "ghtGraphSizes.npy"))
for idx in range(0, graphSizes.shape[0] - 1):
idxFile = "ghtAdjacencyIdx" + str(idx) + ".npy"
valueFile = "ghtAdjacencyValue" + str(idx) + ".npy"
edgeFile = "ghtEdgeInfo" + str(idx) + ".npy"
edgeIdx0 = np.load(os.path.join(root, idxFile))
edgeInfo0 = np.load(os.path.join(root, valueFile)).astype(np.float32)
N = edgeIdx0.max() + 1
# horizontal weight
for i in range(4):
edgeInfo0[i] = edgeInfo0[i] * edgeInfo0[4] * edgeInfo0[-1]
# vertical weight
edgeInfo0[4] = edgeInfo0[5] * edgeInfo0[-1]
edgeInfo0[5] = edgeInfo0[6] * edgeInfo0[-1]
# now use as the dimension for self edges
edgeInfo0[6] = np.zeros(edgeInfo0.shape[1])
edgeInfo0 = edgeInfo0[:-1]
edgeIdx0 = torch.from_numpy(edgeIdx0.astype(np.int64))
edgeInfo0 = torch.from_numpy(edgeInfo0.T)
edgeIdx0, edgeInfo0 = coalesce(edgeIdx0, edgeInfo0, m=graphSizes[idx], n=graphSizes[idx], op="mean")
edgeIdx0 = edgeIdx0.numpy()
edgeInfo0 = edgeInfo0.numpy().T
adj = sp.csr_matrix((np.ones(edgeInfo0.shape[1]), (edgeIdx0[0], edgeIdx0[1])), shape=(graphSizes[idx], graphSizes[idx]))
adj = normalize(adj + sp.eye(adj.shape[0]))
adj = adj.tocoo().astype(np.float32)
adjIdx = np.vstack((adj.row, adj.col)).astype(np.int64)
adjValue = adj.data
adjIdx = torch.from_numpy(adjIdx)
adjValue = torch.from_numpy(adjValue)
adjIdx, adjValue = coalesce(adjIdx, adjValue, m=graphSizes[idx], n=graphSizes[idx])
edgeIdxSelf = np.tile(np.arange(0, N, 1), (2, 1))
edgeInfoSelf = np.concatenate((np.zeros((edgeInfo0.shape[0] - 1, N)), np.ones((1, N))), axis=0)
edgeIdx = np.concatenate((edgeIdx0, edgeIdxSelf), axis=1)
edgeInfo = np.concatenate((edgeInfo0, edgeInfoSelf), axis=1)
edgeIdx = torch.from_numpy(edgeIdx)
edgeInfo = torch.from_numpy(edgeInfo.T)
edgeIdx, edgeInfo = coalesce(edgeIdx, edgeInfo, m=graphSizes[idx], n=graphSizes[idx])
assert (adjIdx != edgeIdx).sum() == 0
np.save(os.path.join(root, idxFile), adjIdx.type(torch.int32))
np.save(os.path.join(root, valueFile), adjValue.type(torch.float16))
np.save(os.path.join(root, edgeFile), edgeInfo.type(torch.float16))
|
import os
import argparse
import numpy as np
import scipy.sparse as sp
import torch
from torch_sparse import coalesce, spspmm
import pdb
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--root", type=str, required=True,
help="root of the graph hierarchy")
args = parser.parse_args()
root = args.root
graphSizes = np.load(os.path.join(root, "ghtGraphSizes.npy"))
for idx in range(0, graphSizes.shape[0] - 1):
idxFile = "ghtAdjacencyIdx" + str(idx) + ".npy"
valueFile = "ghtAdjacencyValue" + str(idx) + ".npy"
edgeFile = "ghtEdgeInfo" + str(idx) + ".npy"
edgeIdx0 = np.load(os.path.join(root, idxFile))
edgeInfo0 = np.load(os.path.join(root, valueFile)).astype(np.float32)
N = edgeIdx0.max() + 1
# horizontal weight
for i in range(4):
edgeInfo0[i] = edgeInfo0[i] * edgeInfo0[4] * edgeInfo0[-1]
# vertical weight
edgeInfo0[4] = edgeInfo0[5] * edgeInfo0[-1]
edgeInfo0[5] = edgeInfo0[6] * edgeInfo0[-1]
# now use as the dimension for self edges
edgeInfo0[6] = np.zeros(edgeInfo0.shape[1])
edgeInfo0 = edgeInfo0[:-1]
edgeIdx0 = torch.from_numpy(edgeIdx0.astype(np.int64))
edgeInfo0 = torch.from_numpy(edgeInfo0.T)
edgeIdx0, edgeInfo0 = coalesce(edgeIdx0, edgeInfo0, m=graphSizes[idx], n=graphSizes[idx], op="mean")
edgeIdx0 = edgeIdx0.numpy()
edgeInfo0 = edgeInfo0.numpy().T
adj = sp.csr_matrix((np.ones(edgeInfo0.shape[1]), (edgeIdx0[0], edgeIdx0[1])), shape=(graphSizes[idx], graphSizes[idx]))
adj = normalize(adj + sp.eye(adj.shape[0]))
adj = adj.tocoo().astype(np.float32)
adjIdx = np.vstack((adj.row, adj.col)).astype(np.int64)
adjValue = adj.data
adjIdx = torch.from_numpy(adjIdx)
adjValue = torch.from_numpy(adjValue)
adjIdx, adjValue = coalesce(adjIdx, adjValue, m=graphSizes[idx], n=graphSizes[idx])
edgeIdxSelf = np.tile(np.arange(0, N, 1), (2, 1))
edgeInfoSelf = np.concatenate((np.zeros((edgeInfo0.shape[0] - 1, N)), np.ones((1, N))), axis=0)
edgeIdx = np.concatenate((edgeIdx0, edgeIdxSelf), axis=1)
edgeInfo = np.concatenate((edgeInfo0, edgeInfoSelf), axis=1)
edgeIdx = torch.from_numpy(edgeIdx)
edgeInfo = torch.from_numpy(edgeInfo.T)
edgeIdx, edgeInfo = coalesce(edgeIdx, edgeInfo, m=graphSizes[idx], n=graphSizes[idx])
assert (adjIdx != edgeIdx).sum() == 0
np.save(os.path.join(root, idxFile), adjIdx.type(torch.int32))
np.save(os.path.join(root, valueFile), adjValue.type(torch.float16))
np.save(os.path.join(root, edgeFile), edgeInfo.type(torch.float16))
|
en
| 0.757741
|
Row-normalize sparse matrix # horizontal weight # vertical weight # now use as the dimension for self edges
| 2.321362
| 2
|
src/3-12.py
|
Zepyhrus/tf2
| 0
|
6629391
|
<filename>src/3-12.py
import os
from os.path import join, split, basename
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Dense, Input, Layer
from tensorflow.keras.models import Model
import random
class MyLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
# define build
def build(self, input_shape):
shape = tf.TensorShape((input_shape[1], self.output_dim))
# define trainable variables
self.weight = self.add_weight(
name='weight',
shape=shape,
initializer='uniform',
trainable=True
)
super(MyLayer, self).build(input_shape)
def call(self, inputs):
return tf.matmul(inputs, self.weight)
#
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.output_dim
return tf.TensorShape(shape)
def get_config(self):
base_config = super(MyLayer, self).get_config()
base_config['output_dim'] = self.output_dim
return base_config
@classmethod
def from_config(cls, config):
return cls(**config)
if __name__ == "__main__":
# train data
x_train = np.linspace(0, 10, 100)
y_train_random = -1 + 2 * np.random.random(*x_train.shape)
y_train = 2 * x_train + y_train_random
# test data
x_test = np.linspace(10, 20, 10)
y_test_random = -1 + 2 * np.random.random(*x_test.shape)
y_test = 2 * x_test + y_test_random
# predict
x_predict = random.sample(range(20, 30), 10)
# define the network
inputs = Input(shape=(1, ))
x = Dense(64, activation='relu')(inputs)
x = MyLayer(64)(x)
predictions = Dense(1)(x)
# compile the model
model = Model(inputs=inputs, outputs=predictions)
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=['mae']
)
# train
history = model.fit(
x=x_train,
y=y_train,
epochs=100,
batch_size=16
)
# test
score = model.evaluate(
x=x_test,
y= y_test,
batch_size=16
)
print('score: ', score)
# predict
y_predict = model.predict(x_predict)
print('x_predict: ', x_predict)
print('y_predict: ', y_predict)
|
<filename>src/3-12.py
import os
from os.path import join, split, basename
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import Dense, Input, Layer
from tensorflow.keras.models import Model
import random
class MyLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
# define build
def build(self, input_shape):
shape = tf.TensorShape((input_shape[1], self.output_dim))
# define trainable variables
self.weight = self.add_weight(
name='weight',
shape=shape,
initializer='uniform',
trainable=True
)
super(MyLayer, self).build(input_shape)
def call(self, inputs):
return tf.matmul(inputs, self.weight)
#
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.output_dim
return tf.TensorShape(shape)
def get_config(self):
base_config = super(MyLayer, self).get_config()
base_config['output_dim'] = self.output_dim
return base_config
@classmethod
def from_config(cls, config):
return cls(**config)
if __name__ == "__main__":
# train data
x_train = np.linspace(0, 10, 100)
y_train_random = -1 + 2 * np.random.random(*x_train.shape)
y_train = 2 * x_train + y_train_random
# test data
x_test = np.linspace(10, 20, 10)
y_test_random = -1 + 2 * np.random.random(*x_test.shape)
y_test = 2 * x_test + y_test_random
# predict
x_predict = random.sample(range(20, 30), 10)
# define the network
inputs = Input(shape=(1, ))
x = Dense(64, activation='relu')(inputs)
x = MyLayer(64)(x)
predictions = Dense(1)(x)
# compile the model
model = Model(inputs=inputs, outputs=predictions)
model.compile(
optimizer='rmsprop',
loss='mse',
metrics=['mae']
)
# train
history = model.fit(
x=x_train,
y=y_train,
epochs=100,
batch_size=16
)
# test
score = model.evaluate(
x=x_test,
y= y_test,
batch_size=16
)
print('score: ', score)
# predict
y_predict = model.predict(x_predict)
print('x_predict: ', x_predict)
print('y_predict: ', y_predict)
|
en
| 0.510315
|
# define build # define trainable variables # # train data # test data # predict # define the network # compile the model # train # test # predict
| 2.817622
| 3
|
yhackss17/apps.py
|
DannyKong12/yhack17
| 1
|
6629392
|
from django.apps import AppConfig
class Yhackss17Config(AppConfig):
name = 'yhackss17'
|
from django.apps import AppConfig
class Yhackss17Config(AppConfig):
name = 'yhackss17'
|
none
| 1
| 1.139006
| 1
|
|
cosmic-core/systemvm/patches/centos7/opt/cosmic/router/bin/cs/CsNetfilter.py
|
sanderv32/cosmic
| 64
|
6629393
|
<gh_stars>10-100
from __future__ import print_function
import logging
from subprocess import Popen, PIPE
import CsHelper
from databag.cs_iptables_save import Tables
class CsChain(object):
def __init__(self):
self.chain = { }
self.last_added = ''
self.count = { }
def add(self, table, chain):
if table not in self.chain.keys():
self.chain.setdefault(table, []).append(chain)
else:
self.chain[table].append(chain)
if self.last_added != chain:
self.last_added = chain
self.count[chain] = 0
def add_rule(self, chain):
self.count[chain] += 1
def get(self, table):
if table not in self.chain.keys():
return { }
return self.chain[table]
def get_count(self, chain):
return self.count[chain]
def last(self):
return self.last_added
def has_chain(self, table, chain):
if table not in self.chain.keys():
return False
if chain not in self.chain[table]:
return False
return True
class CsTable(object):
def __init__(self):
self.table = []
self.last_added = ''
def add(self, name):
if name not in self.table:
self.table.append(name)
self.last_added = name
def get(self):
return self.table
def last(self):
return self.last_added
class CsNetfilters(object):
def __init__(self, config, load=True):
self.config = config
self.rules = []
self.iptablerules = []
self.table = CsTable()
self.chain = CsChain()
if load:
self.get_all_rules()
def get_all_rules(self):
for i in CsHelper.execute("iptables-save"):
if i.startswith('*'): # Table
self.table.add(i[1:])
if i.startswith(':'): # Chain
string = i[1:].split(' ')[0]
cmd = "iptables -t %s -N %s" % (self.table.last(), string)
self.iptablerules.append(cmd)
self.chain.add(self.table.last(), string)
if i.startswith('-A'): # Rule
self.chain.add_rule(i.split()[1])
rule = CsNetfilter()
rule.parse(i)
rule.set_table(self.table.last())
rule.set_chain(i.split()[1])
rule.set_count(self.chain.get_count(i.split()[1]))
self.save(rule)
def save(self, rule):
self.rules.append(rule)
def get(self):
return self.rules
def has_table(self, table):
return table in self.table.get()
def has_chain(self, table, chain):
return self.chain.has_chain(table, chain)
def has_rule(self, new_rule):
for r in self.get():
if new_rule == r:
if new_rule.get_count() > 0:
continue
r.mark_seen()
return True
return False
def get_unseen(self):
del_list = [x for x in self.rules if x.unseen()]
for r in del_list:
self.delete(r)
logging.info("Delete rule %s from table %s", r.to_str(True), r.get_table())
def compare(self, list):
""" Compare reality with what is needed """
for c in self.chain.get("filter"):
# Ensure all inbound/outbound chains have a default drop rule
if c.startswith("ACL_INBOUND") or c.startswith("ACL_OUTBOUND"):
list.append(["filter", "", "-A %s -j DROP" % c])
# PASS 1: Ensure all chains are present and cleanup unused rules.
for fw in list:
new_rule = CsNetfilter()
new_rule.parse(fw[2])
new_rule.set_table(fw[0])
self.has_rule(new_rule)
self.del_standard()
self.get_unseen()
# PASS 2: Create rules
for fw in list:
new_rule = CsNetfilter()
new_rule.parse(fw[2])
new_rule.set_table(fw[0])
if isinstance(fw[1], int):
new_rule.set_count(fw[1])
# This makes the logs very verbose, you probably don't want this
# Uncomment when debugging
# logging.info("Add: rule=%s table=%s", fw[2], new_rule.get_table())
# front means insert instead of append
cpy = fw[2]
if fw[1] == "front":
cpy = cpy.replace('-A', '-I')
if isinstance(fw[1], int):
cpy = cpy.replace("-A %s" % new_rule.get_chain(), '-I %s %s' % (new_rule.get_chain(), fw[1]))
self.iptablerules.append("iptables -t %s %s" % (new_rule.get_table(), cpy))
self.apply_rules()
def apply_rules(self):
s = []
for r in self.iptablerules:
r.replace(' ', ' ') # Remove duplicate spaces
if r not in s:
s.append(r)
chains = Tables(s)
chains.table_printout()
# COMMIT all rules.
p = Popen("iptables-restore < /etc/sysconfig/iptables", shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if not stderr:
logging.info("iptables-restore result: success!")
else:
print("iptables-restore failed, resulted in %s" % stderr)
logging.info("iptables-restore failed, result: %s", stderr)
exit(1)
def del_standard(self):
""" Del rules that are there but should not be deleted
These standard firewall rules vary according to the device type
"""
type = self.config.dbag_cmdline['config']['type']
try:
table = ''
for i in open("/etc/iptables/iptables-%s" % type):
if i.startswith('*'): # Table
table = i[1:].strip()
if i.startswith('-A'): # Rule
self.del_rule(table, i.strip())
except IOError:
logging.debug("Exception in del_standard, returning")
# Nothing can be done
return
def del_rule(self, table, rule):
nr = CsNetfilter()
nr.parse(rule)
nr.set_table(table)
self.delete(nr)
def delete(self, rule):
""" Delete a rule from the list of configured rules
The rule will not actually be removed on the host """
self.rules[:] = [x for x in self.rules if not x == rule]
class CsNetfilter(object):
def __init__(self):
self.rule = { }
self.table = ''
self.chain = ''
self.seen = False
self.count = 0
def parse(self, rule):
self.rule = self.__convert_to_dict(rule)
def unseen(self):
return self.seen is False
def mark_seen(self):
self.seen = True
def __convert_to_dict(self, rule):
rule = unicode(rule.lstrip())
rule = rule.replace('! -', '!_-')
rule = rule.replace('-p all', '')
rule = rule.replace(' ', ' ')
rule = rule.replace('bootpc', '68')
# Ugly hack no.23 split this or else I will have an odd number of parameters
rule = rule.replace('--checksum-fill', '--checksum fill')
# -m can appear twice in a string
rule = rule.replace('-m state', '-m2 state')
rule = rule.replace('ESTABLISHED,RELATED', 'RELATED,ESTABLISHED')
bits = rule.split(' ')
rule = dict(zip(bits[0::2], bits[1::2]))
if "-A" in rule.keys():
self.chain = rule["-A"]
return rule
def set_table(self, table):
if table == '':
table = "filter"
self.table = table
def get_table(self):
return self.table
def set_chain(self, chain):
self.chain = chain
def set_count(self, count=0):
self.count = count
def get_count(self):
return self.count
def get_chain(self):
return self.chain
def get_rule(self):
return self.rule
def to_str(self, delete=False):
""" Convert the rule back into aynactically correct iptables command """
# Order is important
order = ['-A', '-s', '-d', '!_-d', '-i', '!_-i', '-p', '-m', '-m2', '--icmp-type', '--state',
'--dport', '--destination-port', '-o', '!_-o', '-j', '--set-xmark', '--checksum',
'--to-source', '--to-destination', '--mark']
str = ''
for k in order:
if k in self.rule.keys():
printable = k.replace('-m2', '-m')
printable = printable.replace('!_-', '! -')
if delete:
printable = printable.replace('-A', '-D')
if str == '':
str = "%s %s" % (printable, self.rule[k])
else:
str = "%s %s %s" % (str, printable, self.rule[k])
str = str.replace("--checksum fill", "--checksum-fill")
return str
def __eq__(self, rule):
if rule.get_table() != self.get_table():
return False
if rule.get_chain() != self.get_chain():
return False
if len(rule.get_rule().items()) != len(self.get_rule().items()):
return False
common = set(rule.get_rule().items()) & set(self.get_rule().items())
if len(common) != len(rule.get_rule()):
return False
return True
|
from __future__ import print_function
import logging
from subprocess import Popen, PIPE
import CsHelper
from databag.cs_iptables_save import Tables
class CsChain(object):
def __init__(self):
self.chain = { }
self.last_added = ''
self.count = { }
def add(self, table, chain):
if table not in self.chain.keys():
self.chain.setdefault(table, []).append(chain)
else:
self.chain[table].append(chain)
if self.last_added != chain:
self.last_added = chain
self.count[chain] = 0
def add_rule(self, chain):
self.count[chain] += 1
def get(self, table):
if table not in self.chain.keys():
return { }
return self.chain[table]
def get_count(self, chain):
return self.count[chain]
def last(self):
return self.last_added
def has_chain(self, table, chain):
if table not in self.chain.keys():
return False
if chain not in self.chain[table]:
return False
return True
class CsTable(object):
def __init__(self):
self.table = []
self.last_added = ''
def add(self, name):
if name not in self.table:
self.table.append(name)
self.last_added = name
def get(self):
return self.table
def last(self):
return self.last_added
class CsNetfilters(object):
def __init__(self, config, load=True):
self.config = config
self.rules = []
self.iptablerules = []
self.table = CsTable()
self.chain = CsChain()
if load:
self.get_all_rules()
def get_all_rules(self):
for i in CsHelper.execute("iptables-save"):
if i.startswith('*'): # Table
self.table.add(i[1:])
if i.startswith(':'): # Chain
string = i[1:].split(' ')[0]
cmd = "iptables -t %s -N %s" % (self.table.last(), string)
self.iptablerules.append(cmd)
self.chain.add(self.table.last(), string)
if i.startswith('-A'): # Rule
self.chain.add_rule(i.split()[1])
rule = CsNetfilter()
rule.parse(i)
rule.set_table(self.table.last())
rule.set_chain(i.split()[1])
rule.set_count(self.chain.get_count(i.split()[1]))
self.save(rule)
def save(self, rule):
self.rules.append(rule)
def get(self):
return self.rules
def has_table(self, table):
return table in self.table.get()
def has_chain(self, table, chain):
return self.chain.has_chain(table, chain)
def has_rule(self, new_rule):
for r in self.get():
if new_rule == r:
if new_rule.get_count() > 0:
continue
r.mark_seen()
return True
return False
def get_unseen(self):
del_list = [x for x in self.rules if x.unseen()]
for r in del_list:
self.delete(r)
logging.info("Delete rule %s from table %s", r.to_str(True), r.get_table())
def compare(self, list):
""" Compare reality with what is needed """
for c in self.chain.get("filter"):
# Ensure all inbound/outbound chains have a default drop rule
if c.startswith("ACL_INBOUND") or c.startswith("ACL_OUTBOUND"):
list.append(["filter", "", "-A %s -j DROP" % c])
# PASS 1: Ensure all chains are present and cleanup unused rules.
for fw in list:
new_rule = CsNetfilter()
new_rule.parse(fw[2])
new_rule.set_table(fw[0])
self.has_rule(new_rule)
self.del_standard()
self.get_unseen()
# PASS 2: Create rules
for fw in list:
new_rule = CsNetfilter()
new_rule.parse(fw[2])
new_rule.set_table(fw[0])
if isinstance(fw[1], int):
new_rule.set_count(fw[1])
# This makes the logs very verbose, you probably don't want this
# Uncomment when debugging
# logging.info("Add: rule=%s table=%s", fw[2], new_rule.get_table())
# front means insert instead of append
cpy = fw[2]
if fw[1] == "front":
cpy = cpy.replace('-A', '-I')
if isinstance(fw[1], int):
cpy = cpy.replace("-A %s" % new_rule.get_chain(), '-I %s %s' % (new_rule.get_chain(), fw[1]))
self.iptablerules.append("iptables -t %s %s" % (new_rule.get_table(), cpy))
self.apply_rules()
def apply_rules(self):
s = []
for r in self.iptablerules:
r.replace(' ', ' ') # Remove duplicate spaces
if r not in s:
s.append(r)
chains = Tables(s)
chains.table_printout()
# COMMIT all rules.
p = Popen("iptables-restore < /etc/sysconfig/iptables", shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if not stderr:
logging.info("iptables-restore result: success!")
else:
print("iptables-restore failed, resulted in %s" % stderr)
logging.info("iptables-restore failed, result: %s", stderr)
exit(1)
def del_standard(self):
""" Del rules that are there but should not be deleted
These standard firewall rules vary according to the device type
"""
type = self.config.dbag_cmdline['config']['type']
try:
table = ''
for i in open("/etc/iptables/iptables-%s" % type):
if i.startswith('*'): # Table
table = i[1:].strip()
if i.startswith('-A'): # Rule
self.del_rule(table, i.strip())
except IOError:
logging.debug("Exception in del_standard, returning")
# Nothing can be done
return
def del_rule(self, table, rule):
nr = CsNetfilter()
nr.parse(rule)
nr.set_table(table)
self.delete(nr)
def delete(self, rule):
""" Delete a rule from the list of configured rules
The rule will not actually be removed on the host """
self.rules[:] = [x for x in self.rules if not x == rule]
class CsNetfilter(object):
def __init__(self):
self.rule = { }
self.table = ''
self.chain = ''
self.seen = False
self.count = 0
def parse(self, rule):
self.rule = self.__convert_to_dict(rule)
def unseen(self):
return self.seen is False
def mark_seen(self):
self.seen = True
def __convert_to_dict(self, rule):
rule = unicode(rule.lstrip())
rule = rule.replace('! -', '!_-')
rule = rule.replace('-p all', '')
rule = rule.replace(' ', ' ')
rule = rule.replace('bootpc', '68')
# Ugly hack no.23 split this or else I will have an odd number of parameters
rule = rule.replace('--checksum-fill', '--checksum fill')
# -m can appear twice in a string
rule = rule.replace('-m state', '-m2 state')
rule = rule.replace('ESTABLISHED,RELATED', 'RELATED,ESTABLISHED')
bits = rule.split(' ')
rule = dict(zip(bits[0::2], bits[1::2]))
if "-A" in rule.keys():
self.chain = rule["-A"]
return rule
def set_table(self, table):
if table == '':
table = "filter"
self.table = table
def get_table(self):
return self.table
def set_chain(self, chain):
self.chain = chain
def set_count(self, count=0):
self.count = count
def get_count(self):
return self.count
def get_chain(self):
return self.chain
def get_rule(self):
return self.rule
def to_str(self, delete=False):
""" Convert the rule back into aynactically correct iptables command """
# Order is important
order = ['-A', '-s', '-d', '!_-d', '-i', '!_-i', '-p', '-m', '-m2', '--icmp-type', '--state',
'--dport', '--destination-port', '-o', '!_-o', '-j', '--set-xmark', '--checksum',
'--to-source', '--to-destination', '--mark']
str = ''
for k in order:
if k in self.rule.keys():
printable = k.replace('-m2', '-m')
printable = printable.replace('!_-', '! -')
if delete:
printable = printable.replace('-A', '-D')
if str == '':
str = "%s %s" % (printable, self.rule[k])
else:
str = "%s %s %s" % (str, printable, self.rule[k])
str = str.replace("--checksum fill", "--checksum-fill")
return str
def __eq__(self, rule):
if rule.get_table() != self.get_table():
return False
if rule.get_chain() != self.get_chain():
return False
if len(rule.get_rule().items()) != len(self.get_rule().items()):
return False
common = set(rule.get_rule().items()) & set(self.get_rule().items())
if len(common) != len(rule.get_rule()):
return False
return True
|
en
| 0.890833
|
# Table # Chain # Rule Compare reality with what is needed # Ensure all inbound/outbound chains have a default drop rule # PASS 1: Ensure all chains are present and cleanup unused rules. # PASS 2: Create rules # This makes the logs very verbose, you probably don't want this # Uncomment when debugging # logging.info("Add: rule=%s table=%s", fw[2], new_rule.get_table()) # front means insert instead of append # Remove duplicate spaces # COMMIT all rules. Del rules that are there but should not be deleted These standard firewall rules vary according to the device type # Table # Rule # Nothing can be done Delete a rule from the list of configured rules The rule will not actually be removed on the host # Ugly hack no.23 split this or else I will have an odd number of parameters # -m can appear twice in a string Convert the rule back into aynactically correct iptables command # Order is important
| 2.299341
| 2
|
pyscf/shciscf/examples/03_c2_diffsymm.py
|
crisely09/pyscf
| 2
|
6629394
|
<reponame>crisely09/pyscf<gh_stars>1-10
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
"""
All output is deleted after the run to keep the directory neat. Comment out the
cleanup section to view output.
"""
import time
import numpy
import math
import os
from pyscf import gto, scf, ao2mo, mcscf, tools, fci
from pyscf.shciscf import shci, settings
t0 = time.time()
alpha = 0.007297351
mol = gto.M(
atom="C 0 0 0; C 0 0 1.3119", basis="cc-pvqz", verbose=5, symmetry=1, spin=2
)
myhf = scf.RHF(mol)
myhf.kernel()
##USE SHCISCF
solver1 = shci.SHCI(mol)
solver1.irrep_nelec = {"A1g": (2, 1), "A1u": (1, 1), "E1ux": (1, 1), "E1uy": (1, 0)}
solver1.prefix = "solver1"
solver1.epsilon2 = 1.0e-7
solver1.stochastic = False
solver2 = shci.SHCI(mol)
solver2.irrep_nelec = {"A1g": (2, 1), "A1u": (1, 1), "E1ux": (1, 0), "E1uy": (1, 1)}
solver2.prefix = "solver2"
solver2.epsilon2 = 1.0e-7
solver2.stochastic = False
mycas = shci.SHCISCF(myhf, 8, 8)
mcscf.state_average_mix_(mycas, [solver1, solver2], numpy.ones(2) / 2)
mycas.kernel()
print("Total Time: ", time.time() - t0)
# File cleanup
solver1.cleanup_dice_files()
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
"""
All output is deleted after the run to keep the directory neat. Comment out the
cleanup section to view output.
"""
import time
import numpy
import math
import os
from pyscf import gto, scf, ao2mo, mcscf, tools, fci
from pyscf.shciscf import shci, settings
t0 = time.time()
alpha = 0.007297351
mol = gto.M(
atom="C 0 0 0; C 0 0 1.3119", basis="cc-pvqz", verbose=5, symmetry=1, spin=2
)
myhf = scf.RHF(mol)
myhf.kernel()
##USE SHCISCF
solver1 = shci.SHCI(mol)
solver1.irrep_nelec = {"A1g": (2, 1), "A1u": (1, 1), "E1ux": (1, 1), "E1uy": (1, 0)}
solver1.prefix = "solver1"
solver1.epsilon2 = 1.0e-7
solver1.stochastic = False
solver2 = shci.SHCI(mol)
solver2.irrep_nelec = {"A1g": (2, 1), "A1u": (1, 1), "E1ux": (1, 0), "E1uy": (1, 1)}
solver2.prefix = "solver2"
solver2.epsilon2 = 1.0e-7
solver2.stochastic = False
mycas = shci.SHCISCF(myhf, 8, 8)
mcscf.state_average_mix_(mycas, [solver1, solver2], numpy.ones(2) / 2)
mycas.kernel()
print("Total Time: ", time.time() - t0)
# File cleanup
solver1.cleanup_dice_files()
|
en
| 0.823942
|
#!/usr/bin/env python # Copyright 2014-2019 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # All output is deleted after the run to keep the directory neat. Comment out the cleanup section to view output. ##USE SHCISCF # File cleanup
| 1.819915
| 2
|
app/populate_database.py
|
Rojber/open_password_management_API
| 2
|
6629395
|
<gh_stars>1-10
from random import randint
from bson.objectid import ObjectId
from faker import Faker
from faker.providers import internet, person, company, address, phone_number, date_time
fake = Faker()
fake.add_provider(internet)
fake.add_provider(person)
fake.add_provider(company)
fake.add_provider(address)
fake.add_provider(phone_number)
fake.add_provider(date_time)
def populate(db, client_encryption, data_key_id):
for x in range(0, 20):
account = {
'email': client_encryption.encrypt(fake.ascii_company_email(), "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", data_key_id),
'login': client_encryption.encrypt(fake.user_name(), "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", data_key_id),
'password': client_encryption.encrypt(fake.user_name(), "<PASSWORD>_256_CBC_<PASSWORD>_SHA_512-Random", data_key_id),
'logindata': [
{
"_id": ObjectId(),
'site': fake.domain_name(),
'login': client_encryption.encrypt(fake.user_name(), "AEAD_AES_256_CBC_HMAC_SHA_512-Random", data_key_id),
'password': client_encryption.encrypt(fake.user_name(), "<PASSWORD>", data_key_id),
'passwordStrength': randint(1, 5),
'note': fake.domain_word()
},
{
"_id": ObjectId(),
'site': fake.domain_name(),
'login': client_encryption.encrypt(fake.user_name(), "AEAD_AES_256_CBC_HMAC_SHA_512-Random", data_key_id),
'password': client_encryption.encrypt(fake.user_name(), "<PASSWORD>5<PASSWORD>", data_key_id),
'passwordStrength': randint(1, 5),
'note': fake.domain_word()
}
]
}
# Insert users directly into MongoDB
result = db.passwordManager.accounts.insert_one(account)
# Print to the console the ObjectID of the new document
print('Created {0} of 20 as {1}'.format(x, result.inserted_id))
print('Finished creating 20 accounts')
|
from random import randint
from bson.objectid import ObjectId
from faker import Faker
from faker.providers import internet, person, company, address, phone_number, date_time
fake = Faker()
fake.add_provider(internet)
fake.add_provider(person)
fake.add_provider(company)
fake.add_provider(address)
fake.add_provider(phone_number)
fake.add_provider(date_time)
def populate(db, client_encryption, data_key_id):
for x in range(0, 20):
account = {
'email': client_encryption.encrypt(fake.ascii_company_email(), "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", data_key_id),
'login': client_encryption.encrypt(fake.user_name(), "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", data_key_id),
'password': client_encryption.encrypt(fake.user_name(), "<PASSWORD>_256_CBC_<PASSWORD>_SHA_512-Random", data_key_id),
'logindata': [
{
"_id": ObjectId(),
'site': fake.domain_name(),
'login': client_encryption.encrypt(fake.user_name(), "AEAD_AES_256_CBC_HMAC_SHA_512-Random", data_key_id),
'password': client_encryption.encrypt(fake.user_name(), "<PASSWORD>", data_key_id),
'passwordStrength': randint(1, 5),
'note': fake.domain_word()
},
{
"_id": ObjectId(),
'site': fake.domain_name(),
'login': client_encryption.encrypt(fake.user_name(), "AEAD_AES_256_CBC_HMAC_SHA_512-Random", data_key_id),
'password': client_encryption.encrypt(fake.user_name(), "<PASSWORD>5<PASSWORD>", data_key_id),
'passwordStrength': randint(1, 5),
'note': fake.domain_word()
}
]
}
# Insert users directly into MongoDB
result = db.passwordManager.accounts.insert_one(account)
# Print to the console the ObjectID of the new document
print('Created {0} of 20 as {1}'.format(x, result.inserted_id))
print('Finished creating 20 accounts')
|
en
| 0.622171
|
# Insert users directly into MongoDB # Print to the console the ObjectID of the new document
| 2.530566
| 3
|
RC_RPi_v2/Stash/p5listen.py
|
njbuch/PiS2_Controller
| 0
|
6629396
|
import smbus
import time
# for RPI version 1, use "bus = smbus.SMBus(0)"
bus = smbus.SMBus(1)
# This is the address we setup in the Arduino Program
address = 0x07
def writeNumber(value):
bus.write_byte(address, value)
# bus.write_byte_data(address, 0, value)
return -1
def readNumber():
number = bus.read_byte(address)
# number = bus.read_byte_data(address, 1)
return number
while True:
var = input("Enter 1 - 9: ")
if not var:
continue
writeNumber(var)
print "RPI: Hi Arduino, I sent you ", var
# sleep one second
time.sleep(1)
number = readNumber()
print "Arduino: Hey RPI, I received a digit ", number
print
|
import smbus
import time
# for RPI version 1, use "bus = smbus.SMBus(0)"
bus = smbus.SMBus(1)
# This is the address we setup in the Arduino Program
address = 0x07
def writeNumber(value):
bus.write_byte(address, value)
# bus.write_byte_data(address, 0, value)
return -1
def readNumber():
number = bus.read_byte(address)
# number = bus.read_byte_data(address, 1)
return number
while True:
var = input("Enter 1 - 9: ")
if not var:
continue
writeNumber(var)
print "RPI: Hi Arduino, I sent you ", var
# sleep one second
time.sleep(1)
number = readNumber()
print "Arduino: Hey RPI, I received a digit ", number
print
|
en
| 0.729409
|
# for RPI version 1, use "bus = smbus.SMBus(0)" # This is the address we setup in the Arduino Program # bus.write_byte_data(address, 0, value) # number = bus.read_byte_data(address, 1) # sleep one second
| 3.395766
| 3
|
pymath/divisible_by_7_not_5/__init__.py
|
JASTYN/pythonmaster
| 3
|
6629397
|
class Divisible7(object):
"""
class that gets all the numbers divisible by 7, but not by 5 in a range
"""
def __init__(self, rnge):
self.rnge = rnge
def div7(self):
return ",".join([str(x) for x in self.rnge if x % 7 == 0 and x % 5 != 0])
# solution two
def div7_two(self):
l = []
for i in self.rnge:
if (i % 7 == 0) and (i % 5 != 0):
l.append(str(i))
return ','.join(l)
lst = Divisible7(range(2000, 3201))
print(lst.div7())
|
class Divisible7(object):
"""
class that gets all the numbers divisible by 7, but not by 5 in a range
"""
def __init__(self, rnge):
self.rnge = rnge
def div7(self):
return ",".join([str(x) for x in self.rnge if x % 7 == 0 and x % 5 != 0])
# solution two
def div7_two(self):
l = []
for i in self.rnge:
if (i % 7 == 0) and (i % 5 != 0):
l.append(str(i))
return ','.join(l)
lst = Divisible7(range(2000, 3201))
print(lst.div7())
|
en
| 0.969047
|
class that gets all the numbers divisible by 7, but not by 5 in a range # solution two
| 3.95945
| 4
|
foodSearchApp/admin.py
|
cpankajr/Food-Search-App
| 2
|
6629398
|
from django.contrib import admin
from django.contrib.auth.models import Group
from django.utils.safestring import mark_safe
from django.urls import reverse
from foodSearchApp.models import *
admin.site.register(FoodSearchData)
|
from django.contrib import admin
from django.contrib.auth.models import Group
from django.utils.safestring import mark_safe
from django.urls import reverse
from foodSearchApp.models import *
admin.site.register(FoodSearchData)
|
none
| 1
| 1.270189
| 1
|
|
python/ymt_components/ymt_face_eyepupil_01/__init__.py
|
yamahigashi/mgear_shifter_components
| 10
|
6629399
|
"""mGear shifter components"""
# pylint: disable=import-error,W0201,C0111,C0112
import maya.cmds as cmds
import maya.OpenMaya as om1
import maya.api.OpenMaya as om
import pymel.core as pm
from pymel.core import datatypes
import exprespy.cmd
from mgear.shifter import component
from mgear.rigbits.facial_rigger import helpers
from mgear.rigbits.facial_rigger import constraints
from mgear.rigbits import ghost
from mgear.core import (
transform,
curve,
applyop,
attribute,
icon,
fcurve,
vector,
meshNavigation,
node,
primitive,
utils,
)
from mgear.core.transform import (
getTransform,
resetTransform,
# getTransformLookingAt,
# getChainTransform2,
setMatrixPosition,
)
from mgear.core.primitive import (
addTransform,
)
import ymt_shifter_utility as ymt_util
import ymt_shifter_utility.curve as curve
if False: # pylint: disable=using-constant-test, wrong-import-order
# For type annotation
from typing import ( # NOQA: F401 pylint: disable=unused-import
Optional,
Dict,
List,
Tuple,
Pattern,
Callable,
Any,
Text,
Generator,
Union
)
from pathlib import Path # NOQA: F401, F811 pylint: disable=unused-import,reimported
from types import ModuleType # NOQA: F401 pylint: disable=unused-import
from six.moves import reload_module as reload # NOQA: F401 pylint: disable=unused-import
from logging import ( # noqa:F401 pylint: disable=unused-import, wrong-import-order
StreamHandler,
getLogger,
WARN,
DEBUG,
INFO
)
handler = StreamHandler()
handler.setLevel(DEBUG)
logger = getLogger(__name__)
logger.setLevel(INFO)
logger.addHandler(handler)
logger.propagate = False
#############################################
# COMPONENT
#############################################
class Component(component.Main):
"""Shifter component Class"""
# =====================================================
# OBJECTS
# =====================================================
def addToSubGroup(self, obj, group_name):
if self.settings["ctlGrp"]:
ctlGrp = self.settings["ctlGrp"]
else:
ctlGrp = "controllers"
self.addToGroup(obj, group_name, parentGrp=ctlGrp)
def addObjects(self):
"""Add all the objects needed to create the component."""
if self.settings["neutralRotation"]:
t = transform.getTransformFromPos(self.guide.pos["root"])
else:
t = self.guide.tra["root"]
if self.settings["mirrorBehaviour"] and self.negate:
scl = [1, 1, -1]
else:
scl = [1, 1, 1]
t = transform.setMatrixScale(t, scl)
self.detailControllersGroupName = "controllers_detail" # TODO: extract to settings
self.primaryControllersGroupName = "controllers_primary" # TODO: extract to settings
self.ik_cns = primitive.addTransform(
self.root, self.getName("ik_cns"), t)
self.ctl = self.addCtl(self.ik_cns,
"ctl",
t,
self.color_ik,
self.settings["icon"],
w=self.settings["ctlSize"] * self.size,
h=self.settings["ctlSize"] * self.size,
d=self.settings["ctlSize"] * self.size,
tp=self.parentCtlTag)
self.addToSubGroup(self.ctl, self.primaryControllersGroupName)
t = self.guide.tra["lookat"]
self.lookat = self.addCtl(self.ik_cns,
"lookat_ctl",
t,
self.color_ik,
self.settings["icon"],
w=self.settings["ctlSize"] * self.size,
h=self.settings["ctlSize"] * self.size,
d=self.settings["ctlSize"] * self.size,
tp=self.parentCtlTag)
self.addToSubGroup(self.lookat, self.primaryControllersGroupName)
# we need to set the rotation order before lock any rotation axis
if self.settings["k_ro"]:
rotOderList = ["XYZ", "YZX", "ZXY", "XZY", "YXZ", "ZYX"]
attribute.setRotOrder(
self.ctl, rotOderList[self.settings["default_rotorder"]])
params = [s for s in
["tx", "ty", "tz", "ro", "rx", "ry", "rz", "sx", "sy", "sz"]
if self.settings["k_" + s]]
ymt_util.setKeyableAttributesDontLockVisibility(self.ctl, params)
if self.settings["joint"]:
self.jnt_pos.append([self.ctl, 0, None, self.settings["uniScale"]])
self.sliding_surface = pm.duplicate(self.guide.getObjects(self.guide.root)["sliding_surface"])[0]
pm.parent(self.sliding_surface, self.root)
self.sliding_surface.visibility.set(False)
pm.makeIdentity(self.sliding_surface, apply=True, t=1, r=1, s=1, n=0, pn=1)
def addAttributes(self):
# Ref
if self.settings["ikrefarray"]:
ref_names = self.get_valid_alias_list(
self.settings["ikrefarray"].split(","))
if len(ref_names) > 1:
self.ikref_att = self.addAnimEnumParam(
"ikref",
"Ik Ref",
0,
ref_names)
def addOperators(self):
return
# =====================================================
# CONNECTOR
# =====================================================
def setRelation(self):
"""Set the relation beetween object from guide to rig"""
self.relatives["root"] = self.ctl
self.relatives["lookat"] = self.lookat
self.controlRelatives["root"] = self.ctl
if self.settings["joint"]:
self.jointRelatives["root"] = 0
self.aliasRelatives["root"] = "ctl"
def addConnection(self):
"""Add more connection definition to the set"""
self.connections["standard"] = self.connect_standard
self.connections["orientation"] = self.connect_orientation
def connect_standard(self):
"""standard connection definition for the component"""
self.connect_standardWithSimpleIkRef()
self.connect_slide_ghost()
def connect_orientation(self):
"""Orient connection definition for the component"""
self.connect_orientCns()
def _visi_off_lock(self, node):
"""Short cuts."""
return
node.visibility.set(False)
ymt_util.setKeyableAttributesDontLockVisibility(node, [])
cmds.setAttr("{}.visibility".format(node.name()), l=False)
def connect_slide_ghost(self):
# slide system
try:
ghostSliderForPupil(
self.lookat,
self.ctl,
self.sliding_surface,
self.root)
except:
import traceback as tb
tb.print_exc()
raise
def ghostSliderForPupil(ctl, ghostCtl, surface, sliderParent):
"""Modify the ghost control behaviour to slide on top of a surface
Args:
ghostControls (dagNode): The ghost control
surface (Surface): The NURBS surface
sliderParent (dagNode): The parent for the slider.
"""
def conn(ctl, driver, ghost):
for attr in ["translate", "scale", "rotate"]:
pm.connectAttr("{}.{}".format(ctl, attr), "{}.{}".format(driver, attr))
# pm.disconnectAttr("{}.{}".format(ctl, attr), "{}.{}".format(ghost, attr))
surfaceShape = surface.getShape()
t = ctl.getMatrix(worldSpace=True)
gDriver = primitive.addTransform(ghostCtl.getParent(), "{}_slideDriver".format(ctl.name()), t)
# conn(ctl, gDriver, ghostCtl)
print("ctl: {}, gDriver: {}, ghostCtl: {}".format(ctl, gDriver, ghostCtl))
oParent = ghostCtl.getParent()
npoName = "_".join(ghostCtl.name().split("_")[:-1]) + "_npo"
oTra = pm.PyNode(pm.createNode("transform", n=npoName, p=oParent, ss=True))
oTra.setTransformation(ghostCtl.getMatrix())
pm.parent(ghostCtl, oTra)
slider = primitive.addTransform(sliderParent, ctl.name() + "_slideDriven", t)
down, _, up = findPathAtoB(ctl, sliderParent)
mul_node = pm.createNode("multMatrix")
j = k = 0
for j, d in enumerate(down):
d.attr("matrix") >> mul_node.attr("matrixIn[{}]".format(j))
for k, u in enumerate(up):
u.attr("inverseMatrix") >> mul_node.attr("matrixIn[{}]".format(k + j))
dm_node = node.createDecomposeMatrixNode(mul_node.attr("matrixSum"))
cps_node = pm.createNode("closestPointOnSurface")
dm_node.attr("outputTranslate") >> cps_node.attr("inPosition")
surfaceShape.attr("local") >> cps_node.attr("inputSurface")
cps_node.attr("position") >> slider.attr("translate")
pm.normalConstraint(surfaceShape,
slider,
aimVector=[0, 0, 1],
upVector=[0, 1, 0],
worldUpType="objectrotation",
worldUpVector=[0, 1, 0],
worldUpObject=gDriver)
pm.parent(ghostCtl.getParent(), slider)
def getFullPath(start, routes=None):
# type: (pm.nt.transform, List[pm.nt.transform]) -> List[pm.nt.transform]
if not routes:
routes = []
if not start.getParent():
return routes
else:
return getFullPath(start.getParent(), routes + [start, ])
def findPathAtoB(a, b):
# type: (pm.nt.transform, pm.nt.transform) -> Tuple[List[pm.nt.transform], pm.nt.transform, List[pm.nt.transform]]
"""Returns route of A to B in formed Tuple[down(to root), turning point, up(to leaf)]"""
# aPath = ["x", "a", "b", "c"]
# bPath = ["b", "c"]
# down [x, a]
# turn b
# up []
aPath = getFullPath(a)
bPath = getFullPath(b)
return _findPathAtoB(aPath, bPath)
def _findPathAtoB(aPath, bPath):
# type: (List, List) -> Tuple[List, Any, List]
"""Returns route of A to B in formed Tuple[down(to root), turning point, up(to leaf)]
>>> aPath = ["x", "a", "b", "c"]
>>> bPath = ["b", "c"]
>>> d, c, u = _findPathAtoB(aPath, bPath)
>>> d == ["x", "a"]
True
>>> c == "b"
True
>>> u == []
True
"""
down = []
up = []
sharedNode = None
for u in aPath:
if u in bPath:
sharedNode = u
break
down.append(u)
idx = bPath.index(sharedNode)
up = list(reversed(bPath[:(idx)]))
return down, sharedNode, up
def applyPathCnsLocal(target, curve, u):
cns = applyop.pathCns(target, curve, cnsType=False, u=u, tangent=False)
pm.connectAttr(curve.attr("local"), cns.attr("geometryPath"), f=True) # tobe local space
comp_node = pm.createNode("composeMatrix")
cns.attr("allCoordinates") >> comp_node.attr("inputTranslate")
cns.attr("rotate") >> comp_node.attr("inputRotate")
cns.attr("rotateOrder") >> comp_node.attr("inputRotateOrder")
mul_node = pm.createNode("multMatrix")
comp_node.attr("outputMatrix") >> mul_node.attr("matrixIn[0]")
curve.attr("matrix") >> mul_node.attr("matrixIn[1]")
decomp_node = pm.createNode("decomposeMatrix")
mul_node.attr("matrixSum") >> decomp_node.attr("inputMatrix")
decomp_node.attr("outputTranslate") >> target.attr("translate")
decomp_node.attr("outputRotate") >> target.attr("rotate")
return cns
|
"""mGear shifter components"""
# pylint: disable=import-error,W0201,C0111,C0112
import maya.cmds as cmds
import maya.OpenMaya as om1
import maya.api.OpenMaya as om
import pymel.core as pm
from pymel.core import datatypes
import exprespy.cmd
from mgear.shifter import component
from mgear.rigbits.facial_rigger import helpers
from mgear.rigbits.facial_rigger import constraints
from mgear.rigbits import ghost
from mgear.core import (
transform,
curve,
applyop,
attribute,
icon,
fcurve,
vector,
meshNavigation,
node,
primitive,
utils,
)
from mgear.core.transform import (
getTransform,
resetTransform,
# getTransformLookingAt,
# getChainTransform2,
setMatrixPosition,
)
from mgear.core.primitive import (
addTransform,
)
import ymt_shifter_utility as ymt_util
import ymt_shifter_utility.curve as curve
if False: # pylint: disable=using-constant-test, wrong-import-order
# For type annotation
from typing import ( # NOQA: F401 pylint: disable=unused-import
Optional,
Dict,
List,
Tuple,
Pattern,
Callable,
Any,
Text,
Generator,
Union
)
from pathlib import Path # NOQA: F401, F811 pylint: disable=unused-import,reimported
from types import ModuleType # NOQA: F401 pylint: disable=unused-import
from six.moves import reload_module as reload # NOQA: F401 pylint: disable=unused-import
from logging import ( # noqa:F401 pylint: disable=unused-import, wrong-import-order
StreamHandler,
getLogger,
WARN,
DEBUG,
INFO
)
handler = StreamHandler()
handler.setLevel(DEBUG)
logger = getLogger(__name__)
logger.setLevel(INFO)
logger.addHandler(handler)
logger.propagate = False
#############################################
# COMPONENT
#############################################
class Component(component.Main):
"""Shifter component Class"""
# =====================================================
# OBJECTS
# =====================================================
def addToSubGroup(self, obj, group_name):
if self.settings["ctlGrp"]:
ctlGrp = self.settings["ctlGrp"]
else:
ctlGrp = "controllers"
self.addToGroup(obj, group_name, parentGrp=ctlGrp)
def addObjects(self):
"""Add all the objects needed to create the component."""
if self.settings["neutralRotation"]:
t = transform.getTransformFromPos(self.guide.pos["root"])
else:
t = self.guide.tra["root"]
if self.settings["mirrorBehaviour"] and self.negate:
scl = [1, 1, -1]
else:
scl = [1, 1, 1]
t = transform.setMatrixScale(t, scl)
self.detailControllersGroupName = "controllers_detail" # TODO: extract to settings
self.primaryControllersGroupName = "controllers_primary" # TODO: extract to settings
self.ik_cns = primitive.addTransform(
self.root, self.getName("ik_cns"), t)
self.ctl = self.addCtl(self.ik_cns,
"ctl",
t,
self.color_ik,
self.settings["icon"],
w=self.settings["ctlSize"] * self.size,
h=self.settings["ctlSize"] * self.size,
d=self.settings["ctlSize"] * self.size,
tp=self.parentCtlTag)
self.addToSubGroup(self.ctl, self.primaryControllersGroupName)
t = self.guide.tra["lookat"]
self.lookat = self.addCtl(self.ik_cns,
"lookat_ctl",
t,
self.color_ik,
self.settings["icon"],
w=self.settings["ctlSize"] * self.size,
h=self.settings["ctlSize"] * self.size,
d=self.settings["ctlSize"] * self.size,
tp=self.parentCtlTag)
self.addToSubGroup(self.lookat, self.primaryControllersGroupName)
# we need to set the rotation order before lock any rotation axis
if self.settings["k_ro"]:
rotOderList = ["XYZ", "YZX", "ZXY", "XZY", "YXZ", "ZYX"]
attribute.setRotOrder(
self.ctl, rotOderList[self.settings["default_rotorder"]])
params = [s for s in
["tx", "ty", "tz", "ro", "rx", "ry", "rz", "sx", "sy", "sz"]
if self.settings["k_" + s]]
ymt_util.setKeyableAttributesDontLockVisibility(self.ctl, params)
if self.settings["joint"]:
self.jnt_pos.append([self.ctl, 0, None, self.settings["uniScale"]])
self.sliding_surface = pm.duplicate(self.guide.getObjects(self.guide.root)["sliding_surface"])[0]
pm.parent(self.sliding_surface, self.root)
self.sliding_surface.visibility.set(False)
pm.makeIdentity(self.sliding_surface, apply=True, t=1, r=1, s=1, n=0, pn=1)
def addAttributes(self):
# Ref
if self.settings["ikrefarray"]:
ref_names = self.get_valid_alias_list(
self.settings["ikrefarray"].split(","))
if len(ref_names) > 1:
self.ikref_att = self.addAnimEnumParam(
"ikref",
"Ik Ref",
0,
ref_names)
def addOperators(self):
return
# =====================================================
# CONNECTOR
# =====================================================
def setRelation(self):
"""Set the relation beetween object from guide to rig"""
self.relatives["root"] = self.ctl
self.relatives["lookat"] = self.lookat
self.controlRelatives["root"] = self.ctl
if self.settings["joint"]:
self.jointRelatives["root"] = 0
self.aliasRelatives["root"] = "ctl"
def addConnection(self):
"""Add more connection definition to the set"""
self.connections["standard"] = self.connect_standard
self.connections["orientation"] = self.connect_orientation
def connect_standard(self):
"""standard connection definition for the component"""
self.connect_standardWithSimpleIkRef()
self.connect_slide_ghost()
def connect_orientation(self):
"""Orient connection definition for the component"""
self.connect_orientCns()
def _visi_off_lock(self, node):
"""Short cuts."""
return
node.visibility.set(False)
ymt_util.setKeyableAttributesDontLockVisibility(node, [])
cmds.setAttr("{}.visibility".format(node.name()), l=False)
def connect_slide_ghost(self):
# slide system
try:
ghostSliderForPupil(
self.lookat,
self.ctl,
self.sliding_surface,
self.root)
except:
import traceback as tb
tb.print_exc()
raise
def ghostSliderForPupil(ctl, ghostCtl, surface, sliderParent):
"""Modify the ghost control behaviour to slide on top of a surface
Args:
ghostControls (dagNode): The ghost control
surface (Surface): The NURBS surface
sliderParent (dagNode): The parent for the slider.
"""
def conn(ctl, driver, ghost):
for attr in ["translate", "scale", "rotate"]:
pm.connectAttr("{}.{}".format(ctl, attr), "{}.{}".format(driver, attr))
# pm.disconnectAttr("{}.{}".format(ctl, attr), "{}.{}".format(ghost, attr))
surfaceShape = surface.getShape()
t = ctl.getMatrix(worldSpace=True)
gDriver = primitive.addTransform(ghostCtl.getParent(), "{}_slideDriver".format(ctl.name()), t)
# conn(ctl, gDriver, ghostCtl)
print("ctl: {}, gDriver: {}, ghostCtl: {}".format(ctl, gDriver, ghostCtl))
oParent = ghostCtl.getParent()
npoName = "_".join(ghostCtl.name().split("_")[:-1]) + "_npo"
oTra = pm.PyNode(pm.createNode("transform", n=npoName, p=oParent, ss=True))
oTra.setTransformation(ghostCtl.getMatrix())
pm.parent(ghostCtl, oTra)
slider = primitive.addTransform(sliderParent, ctl.name() + "_slideDriven", t)
down, _, up = findPathAtoB(ctl, sliderParent)
mul_node = pm.createNode("multMatrix")
j = k = 0
for j, d in enumerate(down):
d.attr("matrix") >> mul_node.attr("matrixIn[{}]".format(j))
for k, u in enumerate(up):
u.attr("inverseMatrix") >> mul_node.attr("matrixIn[{}]".format(k + j))
dm_node = node.createDecomposeMatrixNode(mul_node.attr("matrixSum"))
cps_node = pm.createNode("closestPointOnSurface")
dm_node.attr("outputTranslate") >> cps_node.attr("inPosition")
surfaceShape.attr("local") >> cps_node.attr("inputSurface")
cps_node.attr("position") >> slider.attr("translate")
pm.normalConstraint(surfaceShape,
slider,
aimVector=[0, 0, 1],
upVector=[0, 1, 0],
worldUpType="objectrotation",
worldUpVector=[0, 1, 0],
worldUpObject=gDriver)
pm.parent(ghostCtl.getParent(), slider)
def getFullPath(start, routes=None):
# type: (pm.nt.transform, List[pm.nt.transform]) -> List[pm.nt.transform]
if not routes:
routes = []
if not start.getParent():
return routes
else:
return getFullPath(start.getParent(), routes + [start, ])
def findPathAtoB(a, b):
# type: (pm.nt.transform, pm.nt.transform) -> Tuple[List[pm.nt.transform], pm.nt.transform, List[pm.nt.transform]]
"""Returns route of A to B in formed Tuple[down(to root), turning point, up(to leaf)]"""
# aPath = ["x", "a", "b", "c"]
# bPath = ["b", "c"]
# down [x, a]
# turn b
# up []
aPath = getFullPath(a)
bPath = getFullPath(b)
return _findPathAtoB(aPath, bPath)
def _findPathAtoB(aPath, bPath):
# type: (List, List) -> Tuple[List, Any, List]
"""Returns route of A to B in formed Tuple[down(to root), turning point, up(to leaf)]
>>> aPath = ["x", "a", "b", "c"]
>>> bPath = ["b", "c"]
>>> d, c, u = _findPathAtoB(aPath, bPath)
>>> d == ["x", "a"]
True
>>> c == "b"
True
>>> u == []
True
"""
down = []
up = []
sharedNode = None
for u in aPath:
if u in bPath:
sharedNode = u
break
down.append(u)
idx = bPath.index(sharedNode)
up = list(reversed(bPath[:(idx)]))
return down, sharedNode, up
def applyPathCnsLocal(target, curve, u):
cns = applyop.pathCns(target, curve, cnsType=False, u=u, tangent=False)
pm.connectAttr(curve.attr("local"), cns.attr("geometryPath"), f=True) # tobe local space
comp_node = pm.createNode("composeMatrix")
cns.attr("allCoordinates") >> comp_node.attr("inputTranslate")
cns.attr("rotate") >> comp_node.attr("inputRotate")
cns.attr("rotateOrder") >> comp_node.attr("inputRotateOrder")
mul_node = pm.createNode("multMatrix")
comp_node.attr("outputMatrix") >> mul_node.attr("matrixIn[0]")
curve.attr("matrix") >> mul_node.attr("matrixIn[1]")
decomp_node = pm.createNode("decomposeMatrix")
mul_node.attr("matrixSum") >> decomp_node.attr("inputMatrix")
decomp_node.attr("outputTranslate") >> target.attr("translate")
decomp_node.attr("outputRotate") >> target.attr("rotate")
return cns
|
en
| 0.553461
|
mGear shifter components # pylint: disable=import-error,W0201,C0111,C0112 # getTransformLookingAt, # getChainTransform2, # pylint: disable=using-constant-test, wrong-import-order # For type annotation # NOQA: F401 pylint: disable=unused-import # NOQA: F401, F811 pylint: disable=unused-import,reimported # NOQA: F401 pylint: disable=unused-import # NOQA: F401 pylint: disable=unused-import # noqa:F401 pylint: disable=unused-import, wrong-import-order ############################################# # COMPONENT ############################################# Shifter component Class # ===================================================== # OBJECTS # ===================================================== Add all the objects needed to create the component. # TODO: extract to settings # TODO: extract to settings # we need to set the rotation order before lock any rotation axis # Ref # ===================================================== # CONNECTOR # ===================================================== Set the relation beetween object from guide to rig Add more connection definition to the set standard connection definition for the component Orient connection definition for the component Short cuts. # slide system Modify the ghost control behaviour to slide on top of a surface Args: ghostControls (dagNode): The ghost control surface (Surface): The NURBS surface sliderParent (dagNode): The parent for the slider. # pm.disconnectAttr("{}.{}".format(ctl, attr), "{}.{}".format(ghost, attr)) # conn(ctl, gDriver, ghostCtl) # type: (pm.nt.transform, List[pm.nt.transform]) -> List[pm.nt.transform] # type: (pm.nt.transform, pm.nt.transform) -> Tuple[List[pm.nt.transform], pm.nt.transform, List[pm.nt.transform]] Returns route of A to B in formed Tuple[down(to root), turning point, up(to leaf)] # aPath = ["x", "a", "b", "c"] # bPath = ["b", "c"] # down [x, a] # turn b # up [] # type: (List, List) -> Tuple[List, Any, List] Returns route of A to B in formed Tuple[down(to root), turning point, up(to leaf)] >>> aPath = ["x", "a", "b", "c"] >>> bPath = ["b", "c"] >>> d, c, u = _findPathAtoB(aPath, bPath) >>> d == ["x", "a"] True >>> c == "b" True >>> u == [] True # tobe local space
| 1.671628
| 2
|
test/countries/__init__.py
|
LaudateCorpus1/python-holidays
| 0
|
6629400
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <<EMAIL>> (c) 2017-2022
# ryanss <<EMAIL>> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import pkgutil
__all__ = []
for loader, module_name, is_pkg in pkgutil.walk_packages(__path__):
__all__.append(module_name)
_module = loader.find_module(module_name).load_module(module_name)
globals()[module_name] = _module
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <<EMAIL>> (c) 2017-2022
# ryanss <<EMAIL>> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import pkgutil
__all__ = []
for loader, module_name, is_pkg in pkgutil.walk_packages(__path__):
__all__.append(module_name)
_module = loader.find_module(module_name).load_module(module_name)
globals()[module_name] = _module
|
en
| 0.735528
|
# -*- coding: utf-8 -*- # python-holidays # --------------- # A fast, efficient Python library for generating country, province and state # specific sets of holidays on the fly. It aims to make determining whether a # specific date is a holiday as fast and flexible as possible. # # Authors: dr-prodigy <<EMAIL>> (c) 2017-2022 # ryanss <<EMAIL>> (c) 2014-2017 # Website: https://github.com/dr-prodigy/python-holidays # License: MIT (see LICENSE file)
| 2.109108
| 2
|
Mining/demo.py
|
ichiro17/FinMind
| 3
|
6629401
|
<gh_stars>1-10
from FinMind.Mining import Mind
_2330 = Mind.Stock('2330','2019-01-01')
_2330.StockPrice.head()
_2330.FinancialStatements.head()
_2330.ShareHolding.head()
_2330.InstitutionalInvestors.head()
_2330.MarginPurchaseShortSale.head()
_2330.MonthRevenue.head()
_2330.HoldingSharesPer.head()
_2330.BalanceSheet.head()
_2330.StockPrice['move_average'] = Mind.MoveAverage(
_2330.StockPrice,days = 5,variable = 'close')
_2330.StockPrice['RSV'] = Mind.RSV(
_2330.StockPrice,days = 5)
_2330.StockPrice['BIAS'] = Mind.BIAS(
_2330.StockPrice,days = 5)
_2330.StockPrice.head()
|
from FinMind.Mining import Mind
_2330 = Mind.Stock('2330','2019-01-01')
_2330.StockPrice.head()
_2330.FinancialStatements.head()
_2330.ShareHolding.head()
_2330.InstitutionalInvestors.head()
_2330.MarginPurchaseShortSale.head()
_2330.MonthRevenue.head()
_2330.HoldingSharesPer.head()
_2330.BalanceSheet.head()
_2330.StockPrice['move_average'] = Mind.MoveAverage(
_2330.StockPrice,days = 5,variable = 'close')
_2330.StockPrice['RSV'] = Mind.RSV(
_2330.StockPrice,days = 5)
_2330.StockPrice['BIAS'] = Mind.BIAS(
_2330.StockPrice,days = 5)
_2330.StockPrice.head()
|
none
| 1
| 1.758441
| 2
|
|
alipay/aop/api/response/AlipayCommerceYuntaskPointinstructionQueryResponse.py
|
antopen/alipay-sdk-python-all
| 0
|
6629402
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.PointInstruction import PointInstruction
class AlipayCommerceYuntaskPointinstructionQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceYuntaskPointinstructionQueryResponse, self).__init__()
self._data = None
self._page = None
self._page_size = None
self._total_size = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if isinstance(value, list):
self._data = list()
for i in value:
if isinstance(i, PointInstruction):
self._data.append(i)
else:
self._data.append(PointInstruction.from_alipay_dict(i))
@property
def page(self):
return self._page
@page.setter
def page(self, value):
self._page = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def total_size(self):
return self._total_size
@total_size.setter
def total_size(self, value):
self._total_size = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceYuntaskPointinstructionQueryResponse, self).parse_response_content(response_content)
if 'data' in response:
self.data = response['data']
if 'page' in response:
self.page = response['page']
if 'page_size' in response:
self.page_size = response['page_size']
if 'total_size' in response:
self.total_size = response['total_size']
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.PointInstruction import PointInstruction
class AlipayCommerceYuntaskPointinstructionQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceYuntaskPointinstructionQueryResponse, self).__init__()
self._data = None
self._page = None
self._page_size = None
self._total_size = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if isinstance(value, list):
self._data = list()
for i in value:
if isinstance(i, PointInstruction):
self._data.append(i)
else:
self._data.append(PointInstruction.from_alipay_dict(i))
@property
def page(self):
return self._page
@page.setter
def page(self, value):
self._page = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def total_size(self):
return self._total_size
@total_size.setter
def total_size(self, value):
self._total_size = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceYuntaskPointinstructionQueryResponse, self).parse_response_content(response_content)
if 'data' in response:
self.data = response['data']
if 'page' in response:
self.page = response['page']
if 'page_size' in response:
self.page_size = response['page_size']
if 'total_size' in response:
self.total_size = response['total_size']
|
en
| 0.352855
|
#!/usr/bin/env python # -*- coding: utf-8 -*-
| 1.989204
| 2
|
backend/todoist/urls.py
|
Zhiwei1996/Todoist
| 0
|
6629403
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from todoist import views
urlpatterns = [
# url(r'^$', views.index, name='index'),
url(r'^todos/$', views.TodoList.as_view()),
url(r'^todos/(?P<pk>[0-9]+)/$', views.TodoDetail.as_view()),
url(r'^users/$', views.UserList.as_view()),
url(r'^users/(?P<pk>[0-9]+)/$', views.UserDetail.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from todoist import views
urlpatterns = [
# url(r'^$', views.index, name='index'),
url(r'^todos/$', views.TodoList.as_view()),
url(r'^todos/(?P<pk>[0-9]+)/$', views.TodoDetail.as_view()),
url(r'^users/$', views.UserList.as_view()),
url(r'^users/(?P<pk>[0-9]+)/$', views.UserDetail.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
en
| 0.412533
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # url(r'^$', views.index, name='index'),
| 2.068374
| 2
|
src/faces.py
|
BachFive/481_FR
| 0
|
6629404
|
import numpy as np
import cv2
import pickle
face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')
eye_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_eye.xml')
eyeglasses_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_eye_tree_eyeglasses.xml')
smile_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_smile.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("./recognizers/face-trainner.yml")
labels = {"person_name": 1}
with open("pickles/face-labels.pickle", 'rb') as f:
og_labels = pickle.load(f)
labels = {v: k for k, v in og_labels.items()}
cap = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)
for (x, y, w, h) in faces:
# print(x,y,w,h)
roi_gray = gray[y:y + h, x:x + w] # (ycord_start, ycord_end)
roi_color = frame[y:y + h, x:x + w]
# recognize? deep learned model predict keras tensorflow pytorch scikit learn
id_, conf = recognizer.predict(roi_gray)
if 50 <= conf <= 100:
# print(id_)
print(labels[id_], end=' ')
print("confidence", conf)
font = cv2.FONT_HERSHEY_SIMPLEX
name = labels[id_]
color = (255, 255, 255)
stroke = 2
cv2.putText(frame, name, (x, y), font, 1, color, stroke, cv2.LINE_AA)
img_item = "7.png"
cv2.imwrite(img_item, roi_color)
color = (255, 0, 0) # BGR 0-255
stroke = 2
end_cord_x = x + w
end_cord_y = y + h
cv2.rectangle(frame, (x, y), (end_cord_x, end_cord_y), color, stroke)
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
eyeglasses = eyeglasses_cascade.detectMultiScale(roi_gray)
for (egx, egy, egw, egh) in eyeglasses:
cv2.rectangle(roi_color, (egx, egy), (egx+egw, egy+egh), (0, 255, 0), 2)
smile = smile_cascade.detectMultiScale(roi_gray)
for (sx, sy, sw, sh) in smile:
cv2.rectangle(roi_color, (sx, sy), (sx+sw, sy+sh), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
import numpy as np
import cv2
import pickle
face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')
eye_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_eye.xml')
eyeglasses_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_eye_tree_eyeglasses.xml')
smile_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_smile.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("./recognizers/face-trainner.yml")
labels = {"person_name": 1}
with open("pickles/face-labels.pickle", 'rb') as f:
og_labels = pickle.load(f)
labels = {v: k for k, v in og_labels.items()}
cap = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)
for (x, y, w, h) in faces:
# print(x,y,w,h)
roi_gray = gray[y:y + h, x:x + w] # (ycord_start, ycord_end)
roi_color = frame[y:y + h, x:x + w]
# recognize? deep learned model predict keras tensorflow pytorch scikit learn
id_, conf = recognizer.predict(roi_gray)
if 50 <= conf <= 100:
# print(id_)
print(labels[id_], end=' ')
print("confidence", conf)
font = cv2.FONT_HERSHEY_SIMPLEX
name = labels[id_]
color = (255, 255, 255)
stroke = 2
cv2.putText(frame, name, (x, y), font, 1, color, stroke, cv2.LINE_AA)
img_item = "7.png"
cv2.imwrite(img_item, roi_color)
color = (255, 0, 0) # BGR 0-255
stroke = 2
end_cord_x = x + w
end_cord_y = y + h
cv2.rectangle(frame, (x, y), (end_cord_x, end_cord_y), color, stroke)
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
eyeglasses = eyeglasses_cascade.detectMultiScale(roi_gray)
for (egx, egy, egw, egh) in eyeglasses:
cv2.rectangle(roi_color, (egx, egy), (egx+egw, egy+egh), (0, 255, 0), 2)
smile = smile_cascade.detectMultiScale(roi_gray)
for (sx, sy, sw, sh) in smile:
cv2.rectangle(roi_color, (sx, sy), (sx+sw, sy+sh), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
en
| 0.565406
|
# Capture frame-by-frame # print(x,y,w,h) # (ycord_start, ycord_end) # recognize? deep learned model predict keras tensorflow pytorch scikit learn # print(id_) # BGR 0-255 # Display the resulting frame # When everything done, release the capture
| 2.8293
| 3
|
David and Pooja/++Validating Linked Mods/Python-3.0/Tools/pybench/pybench.py
|
LinkedModernismProject/web_code
| 1
|
6629405
|
<filename>David and Pooja/++Validating Linked Mods/Python-3.0/Tools/pybench/pybench.py
#!/usr/local/bin/python -O
""" A Python Benchmark Suite
"""
#
# Note: Please keep this module compatible to Python 1.5.2.
#
# Tests may include features in later Python versions, but these
# should then be embedded in try-except clauses in the configuration
# module Setup.py.
#
# pybench Copyright
__copyright__ = """\
Copyright (c), 1997-2006, <NAME> (<EMAIL>)
Copyright (c), 2000-2006, eGenix.com Software GmbH (<EMAIL>)
All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby
granted, provided that the above copyright notice appear in all copies
and that both that copyright notice and this permission notice appear
in supporting documentation or portions thereof, including
modifications, that you make.
THE AUTHOR MARC-ANDRE LEMBURG DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
"""
import sys, time, operator, platform
from CommandLine import *
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
# Version number; version history: see README file !
__version__ = '2.0'
### Constants
# Second fractions
MILLI_SECONDS = 1e3
MICRO_SECONDS = 1e6
# Percent unit
PERCENT = 100
# Horizontal line length
LINE = 79
# Minimum test run-time
MIN_TEST_RUNTIME = 1e-3
# Number of calibration runs to use for calibrating the tests
CALIBRATION_RUNS = 20
# Number of calibration loops to run for each calibration run
CALIBRATION_LOOPS = 20
# Allow skipping calibration ?
ALLOW_SKIPPING_CALIBRATION = 1
# Timer types
TIMER_TIME_TIME = 'time.time'
TIMER_TIME_CLOCK = 'time.clock'
TIMER_SYSTIMES_PROCESSTIME = 'systimes.processtime'
# Choose platform default timer
if sys.platform[:3] == 'win':
# On WinXP this has 2.5ms resolution
TIMER_PLATFORM_DEFAULT = TIMER_TIME_CLOCK
else:
# On Linux this has 1ms resolution
TIMER_PLATFORM_DEFAULT = TIMER_TIME_TIME
# Print debug information ?
_debug = 0
### Helpers
def get_timer(timertype):
if timertype == TIMER_TIME_TIME:
return time.time
elif timertype == TIMER_TIME_CLOCK:
return time.clock
elif timertype == TIMER_SYSTIMES_PROCESSTIME:
import systimes
return systimes.processtime
else:
raise TypeError('unknown timer type: %s' % timertype)
def get_machine_details():
if _debug:
print('Getting machine details...')
buildno, builddate = platform.python_build()
python = platform.python_version()
if sys.maxunicode == 65535:
# UCS2 build (standard)
unitype = 'UCS2'
else:
# UCS4 build (most recent Linux distros)
unitype = 'UCS4'
bits, linkage = platform.architecture()
return {
'platform': platform.platform(),
'processor': platform.processor(),
'executable': sys.executable,
'implementation': getattr(platform, 'python_implementation',
lambda:'n/a')(),
'python': platform.python_version(),
'compiler': platform.python_compiler(),
'buildno': buildno,
'builddate': builddate,
'unicode': unitype,
'bits': bits,
}
def print_machine_details(d, indent=''):
l = ['Machine Details:',
' Platform ID: %s' % d.get('platform', 'n/a'),
' Processor: %s' % d.get('processor', 'n/a'),
'',
'Python:',
' Implementation: %s' % d.get('implementation', 'n/a'),
' Executable: %s' % d.get('executable', 'n/a'),
' Version: %s' % d.get('python', 'n/a'),
' Compiler: %s' % d.get('compiler', 'n/a'),
' Bits: %s' % d.get('bits', 'n/a'),
' Build: %s (#%s)' % (d.get('builddate', 'n/a'),
d.get('buildno', 'n/a')),
' Unicode: %s' % d.get('unicode', 'n/a'),
]
joiner = '\n' + indent
print(indent + joiner.join(l) + '\n')
### Test baseclass
class Test:
""" All test must have this class as baseclass. It provides
the necessary interface to the benchmark machinery.
The tests must set .rounds to a value high enough to let the
test run between 20-50 seconds. This is needed because
clock()-timing only gives rather inaccurate values (on Linux,
for example, it is accurate to a few hundreths of a
second). If you don't want to wait that long, use a warp
factor larger than 1.
It is also important to set the .operations variable to a
value representing the number of "virtual operations" done per
call of .run().
If you change a test in some way, don't forget to increase
its version number.
"""
### Instance variables that each test should override
# Version number of the test as float (x.yy); this is important
# for comparisons of benchmark runs - tests with unequal version
# number will not get compared.
version = 2.0
# The number of abstract operations done in each round of the
# test. An operation is the basic unit of what you want to
# measure. The benchmark will output the amount of run-time per
# operation. Note that in order to raise the measured timings
# significantly above noise level, it is often required to repeat
# sets of operations more than once per test round. The measured
# overhead per test round should be less than 1 second.
operations = 1
# Number of rounds to execute per test run. This should be
# adjusted to a figure that results in a test run-time of between
# 1-2 seconds.
rounds = 100000
### Internal variables
# Mark this class as implementing a test
is_a_test = 1
# Last timing: (real, run, overhead)
last_timing = (0.0, 0.0, 0.0)
# Warp factor to use for this test
warp = 1
# Number of calibration runs to use
calibration_runs = CALIBRATION_RUNS
# List of calibration timings
overhead_times = None
# List of test run timings
times = []
# Timer used for the benchmark
timer = TIMER_PLATFORM_DEFAULT
def __init__(self, warp=None, calibration_runs=None, timer=None):
# Set parameters
if warp is not None:
self.rounds = int(self.rounds / warp)
if self.rounds == 0:
raise ValueError('warp factor set too high')
self.warp = warp
if calibration_runs is not None:
if (not ALLOW_SKIPPING_CALIBRATION and
calibration_runs < 1):
raise ValueError('at least one calibration run is required')
self.calibration_runs = calibration_runs
if timer is not None:
timer = timer
# Init variables
self.times = []
self.overhead_times = []
# We want these to be in the instance dict, so that pickle
# saves them
self.version = self.version
self.operations = self.operations
self.rounds = self.rounds
def get_timer(self):
""" Return the timer function to use for the test.
"""
return get_timer(self.timer)
def compatible(self, other):
""" Return 1/0 depending on whether the test is compatible
with the other Test instance or not.
"""
if self.version != other.version:
return 0
if self.rounds != other.rounds:
return 0
return 1
def calibrate_test(self):
if self.calibration_runs == 0:
self.overhead_times = [0.0]
return
calibrate = self.calibrate
timer = self.get_timer()
calibration_loops = range(CALIBRATION_LOOPS)
# Time the calibration loop overhead
prep_times = []
for i in range(self.calibration_runs):
t = timer()
for i in calibration_loops:
pass
t = timer() - t
prep_times.append(t)
min_prep_time = min(prep_times)
if _debug:
print()
print('Calib. prep time = %.6fms' % (
min_prep_time * MILLI_SECONDS))
# Time the calibration runs (doing CALIBRATION_LOOPS loops of
# .calibrate() method calls each)
for i in range(self.calibration_runs):
t = timer()
for i in calibration_loops:
calibrate()
t = timer() - t
self.overhead_times.append(t / CALIBRATION_LOOPS
- min_prep_time)
# Check the measured times
min_overhead = min(self.overhead_times)
max_overhead = max(self.overhead_times)
if _debug:
print('Calib. overhead time = %.6fms' % (
min_overhead * MILLI_SECONDS))
if min_overhead < 0.0:
raise ValueError('calibration setup did not work')
if max_overhead - min_overhead > 0.1:
raise ValueError(
'overhead calibration timing range too inaccurate: '
'%r - %r' % (min_overhead, max_overhead))
def run(self):
""" Run the test in two phases: first calibrate, then
do the actual test. Be careful to keep the calibration
timing low w/r to the test timing.
"""
test = self.test
timer = self.get_timer()
# Get calibration
min_overhead = min(self.overhead_times)
# Test run
t = timer()
test()
t = timer() - t
if t < MIN_TEST_RUNTIME:
raise ValueError('warp factor too high: '
'test times are < 10ms')
eff_time = t - min_overhead
if eff_time < 0:
raise ValueError('wrong calibration')
self.last_timing = (eff_time, t, min_overhead)
self.times.append(eff_time)
def calibrate(self):
""" Calibrate the test.
This method should execute everything that is needed to
setup and run the test - except for the actual operations
that you intend to measure. pybench uses this method to
measure the test implementation overhead.
"""
return
def test(self):
""" Run the test.
The test needs to run self.rounds executing
self.operations number of operations each.
"""
return
def stat(self):
""" Return test run statistics as tuple:
(minimum run time,
average run time,
total run time,
average time per operation,
minimum overhead time)
"""
runs = len(self.times)
if runs == 0:
return 0.0, 0.0, 0.0, 0.0
min_time = min(self.times)
total_time = sum(self.times)
avg_time = total_time / float(runs)
operation_avg = total_time / float(runs
* self.rounds
* self.operations)
if self.overhead_times:
min_overhead = min(self.overhead_times)
else:
min_overhead = self.last_timing[2]
return min_time, avg_time, total_time, operation_avg, min_overhead
### Load Setup
# This has to be done after the definition of the Test class, since
# the Setup module will import subclasses using this class.
import Setup
### Benchmark base class
class Benchmark:
# Name of the benchmark
name = ''
# Number of benchmark rounds to run
rounds = 1
# Warp factor use to run the tests
warp = 1 # Warp factor
# Average benchmark round time
roundtime = 0
# Benchmark version number as float x.yy
version = 2.0
# Produce verbose output ?
verbose = 0
# Dictionary with the machine details
machine_details = None
# Timer used for the benchmark
timer = TIMER_PLATFORM_DEFAULT
def __init__(self, name, verbose=None, timer=None, warp=None,
calibration_runs=None):
if name:
self.name = name
else:
self.name = '%04i-%02i-%02i %02i:%02i:%02i' % \
(time.localtime(time.time())[:6])
if verbose is not None:
self.verbose = verbose
if timer is not None:
self.timer = timer
if warp is not None:
self.warp = warp
if calibration_runs is not None:
self.calibration_runs = calibration_runs
# Init vars
self.tests = {}
if _debug:
print('Getting machine details...')
self.machine_details = get_machine_details()
# Make .version an instance attribute to have it saved in the
# Benchmark pickle
self.version = self.version
def get_timer(self):
""" Return the timer function to use for the test.
"""
return get_timer(self.timer)
def compatible(self, other):
""" Return 1/0 depending on whether the benchmark is
compatible with the other Benchmark instance or not.
"""
if self.version != other.version:
return 0
if (self.machine_details == other.machine_details and
self.timer != other.timer):
return 0
if (self.calibration_runs == 0 and
other.calibration_runs != 0):
return 0
if (self.calibration_runs != 0 and
other.calibration_runs == 0):
return 0
return 1
def load_tests(self, setupmod, limitnames=None):
# Add tests
if self.verbose:
print('Searching for tests ...')
print('--------------------------------------')
for testclass in setupmod.__dict__.values():
if not hasattr(testclass, 'is_a_test'):
continue
name = testclass.__name__
if name == 'Test':
continue
if (limitnames is not None and
limitnames.search(name) is None):
continue
self.tests[name] = testclass(
warp=self.warp,
calibration_runs=self.calibration_runs,
timer=self.timer)
l = sorted(self.tests)
if self.verbose:
for name in l:
print(' %s' % name)
print('--------------------------------------')
print(' %i tests found' % len(l))
print()
def calibrate(self):
print('Calibrating tests. Please wait...', end=' ')
sys.stdout.flush()
if self.verbose:
print()
print()
print('Test min max')
print('-' * LINE)
tests = sorted(self.tests.items())
for i in range(len(tests)):
name, test = tests[i]
test.calibrate_test()
if self.verbose:
print('%30s: %6.3fms %6.3fms' % \
(name,
min(test.overhead_times) * MILLI_SECONDS,
max(test.overhead_times) * MILLI_SECONDS))
if self.verbose:
print()
print('Done with the calibration.')
else:
print('done.')
print()
def run(self):
tests = sorted(self.tests.items())
timer = self.get_timer()
print('Running %i round(s) of the suite at warp factor %i:' % \
(self.rounds, self.warp))
print()
self.roundtimes = []
for i in range(self.rounds):
if self.verbose:
print(' Round %-25i effective absolute overhead' % (i+1))
total_eff_time = 0.0
for j in range(len(tests)):
name, test = tests[j]
if self.verbose:
print('%30s:' % name, end=' ')
test.run()
(eff_time, abs_time, min_overhead) = test.last_timing
total_eff_time = total_eff_time + eff_time
if self.verbose:
print(' %5.0fms %5.0fms %7.3fms' % \
(eff_time * MILLI_SECONDS,
abs_time * MILLI_SECONDS,
min_overhead * MILLI_SECONDS))
self.roundtimes.append(total_eff_time)
if self.verbose:
print(' '
' ------------------------------')
print(' '
' Totals: %6.0fms' %
(total_eff_time * MILLI_SECONDS))
print()
else:
print('* Round %i done in %.3f seconds.' % (i+1,
total_eff_time))
print()
def stat(self):
""" Return benchmark run statistics as tuple:
(minimum round time,
average round time,
maximum round time)
XXX Currently not used, since the benchmark does test
statistics across all rounds.
"""
runs = len(self.roundtimes)
if runs == 0:
return 0.0, 0.0
min_time = min(self.roundtimes)
total_time = sum(self.roundtimes)
avg_time = total_time / float(runs)
max_time = max(self.roundtimes)
return (min_time, avg_time, max_time)
def print_header(self, title='Benchmark'):
print('-' * LINE)
print('%s: %s' % (title, self.name))
print('-' * LINE)
print()
print(' Rounds: %s' % self.rounds)
print(' Warp: %s' % self.warp)
print(' Timer: %s' % self.timer)
print()
if self.machine_details:
print_machine_details(self.machine_details, indent=' ')
print()
def print_benchmark(self, hidenoise=0, limitnames=None):
print('Test '
' minimum average operation overhead')
print('-' * LINE)
tests = sorted(self.tests.items())
total_min_time = 0.0
total_avg_time = 0.0
for name, test in tests:
if (limitnames is not None and
limitnames.search(name) is None):
continue
(min_time,
avg_time,
total_time,
op_avg,
min_overhead) = test.stat()
total_min_time = total_min_time + min_time
total_avg_time = total_avg_time + avg_time
print('%30s: %5.0fms %5.0fms %6.2fus %7.3fms' % \
(name,
min_time * MILLI_SECONDS,
avg_time * MILLI_SECONDS,
op_avg * MICRO_SECONDS,
min_overhead *MILLI_SECONDS))
print('-' * LINE)
print('Totals: '
' %6.0fms %6.0fms' %
(total_min_time * MILLI_SECONDS,
total_avg_time * MILLI_SECONDS,
))
print()
def print_comparison(self, compare_to, hidenoise=0, limitnames=None):
# Check benchmark versions
if compare_to.version != self.version:
print('* Benchmark versions differ: '
'cannot compare this benchmark to "%s" !' %
compare_to.name)
print()
self.print_benchmark(hidenoise=hidenoise,
limitnames=limitnames)
return
# Print header
compare_to.print_header('Comparing with')
print('Test '
' minimum run-time average run-time')
print(' '
' this other diff this other diff')
print('-' * LINE)
# Print test comparisons
tests = sorted(self.tests.items())
total_min_time = other_total_min_time = 0.0
total_avg_time = other_total_avg_time = 0.0
benchmarks_compatible = self.compatible(compare_to)
tests_compatible = 1
for name, test in tests:
if (limitnames is not None and
limitnames.search(name) is None):
continue
(min_time,
avg_time,
total_time,
op_avg,
min_overhead) = test.stat()
total_min_time = total_min_time + min_time
total_avg_time = total_avg_time + avg_time
try:
other = compare_to.tests[name]
except KeyError:
other = None
if other is None:
# Other benchmark doesn't include the given test
min_diff, avg_diff = 'n/a', 'n/a'
other_min_time = 0.0
other_avg_time = 0.0
tests_compatible = 0
else:
(other_min_time,
other_avg_time,
other_total_time,
other_op_avg,
other_min_overhead) = other.stat()
other_total_min_time = other_total_min_time + other_min_time
other_total_avg_time = other_total_avg_time + other_avg_time
if (benchmarks_compatible and
test.compatible(other)):
# Both benchmark and tests are comparible
min_diff = ((min_time * self.warp) /
(other_min_time * other.warp) - 1.0)
avg_diff = ((avg_time * self.warp) /
(other_avg_time * other.warp) - 1.0)
if hidenoise and abs(min_diff) < 10.0:
min_diff = ''
else:
min_diff = '%+5.1f%%' % (min_diff * PERCENT)
if hidenoise and abs(avg_diff) < 10.0:
avg_diff = ''
else:
avg_diff = '%+5.1f%%' % (avg_diff * PERCENT)
else:
# Benchmark or tests are not comparible
min_diff, avg_diff = 'n/a', 'n/a'
tests_compatible = 0
print('%30s: %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % \
(name,
min_time * MILLI_SECONDS,
other_min_time * MILLI_SECONDS * compare_to.warp / self.warp,
min_diff,
avg_time * MILLI_SECONDS,
other_avg_time * MILLI_SECONDS * compare_to.warp / self.warp,
avg_diff))
print('-' * LINE)
# Summarise test results
if not benchmarks_compatible or not tests_compatible:
min_diff, avg_diff = 'n/a', 'n/a'
else:
if other_total_min_time != 0.0:
min_diff = '%+5.1f%%' % (
((total_min_time * self.warp) /
(other_total_min_time * compare_to.warp) - 1.0) * PERCENT)
else:
min_diff = 'n/a'
if other_total_avg_time != 0.0:
avg_diff = '%+5.1f%%' % (
((total_avg_time * self.warp) /
(other_total_avg_time * compare_to.warp) - 1.0) * PERCENT)
else:
avg_diff = 'n/a'
print('Totals: '
' %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' %
(total_min_time * MILLI_SECONDS,
(other_total_min_time * compare_to.warp/self.warp
* MILLI_SECONDS),
min_diff,
total_avg_time * MILLI_SECONDS,
(other_total_avg_time * compare_to.warp/self.warp
* MILLI_SECONDS),
avg_diff
))
print()
print('(this=%s, other=%s)' % (self.name,
compare_to.name))
print()
class PyBenchCmdline(Application):
header = ("PYBENCH - a benchmark test suite for Python "
"interpreters/compilers.")
version = __version__
debug = _debug
options = [ArgumentOption('-n',
'number of rounds',
Setup.Number_of_rounds),
ArgumentOption('-f',
'save benchmark to file arg',
''),
ArgumentOption('-c',
'compare benchmark with the one in file arg',
''),
ArgumentOption('-s',
'show benchmark in file arg, then exit',
''),
ArgumentOption('-w',
'set warp factor to arg',
Setup.Warp_factor),
ArgumentOption('-t',
'run only tests with names matching arg',
''),
ArgumentOption('-C',
'set the number of calibration runs to arg',
CALIBRATION_RUNS),
SwitchOption('-d',
'hide noise in comparisons',
0),
SwitchOption('-v',
'verbose output (not recommended)',
0),
SwitchOption('--with-gc',
'enable garbage collection',
0),
SwitchOption('--with-syscheck',
'use default sys check interval',
0),
ArgumentOption('--timer',
'use given timer',
TIMER_PLATFORM_DEFAULT),
]
about = """\
The normal operation is to run the suite and display the
results. Use -f to save them for later reuse or comparisons.
Available timers:
time.time
time.clock
systimes.processtime
Examples:
python2.1 pybench.py -f p21.pybench
python2.5 pybench.py -f p25.pybench
python pybench.py -s p25.pybench -c p21.pybench
"""
copyright = __copyright__
def main(self):
rounds = self.values['-n']
reportfile = self.values['-f']
show_bench = self.values['-s']
compare_to = self.values['-c']
hidenoise = self.values['-d']
warp = int(self.values['-w'])
withgc = self.values['--with-gc']
limitnames = self.values['-t']
if limitnames:
if _debug:
print('* limiting test names to one with substring "%s"' % \
limitnames)
limitnames = re.compile(limitnames, re.I)
else:
limitnames = None
verbose = self.verbose
withsyscheck = self.values['--with-syscheck']
calibration_runs = self.values['-C']
timer = self.values['--timer']
print('-' * LINE)
print('PYBENCH %s' % __version__)
print('-' * LINE)
print('* using %s %s' % (
getattr(platform, 'python_implementation', lambda:'Python')(),
' '.join(sys.version.split())))
# Switch off garbage collection
if not withgc:
try:
import gc
except ImportError:
print('* Python version doesn\'t support garbage collection')
else:
try:
gc.disable()
except NotImplementedError:
print('* Python version doesn\'t support gc.disable')
else:
print('* disabled garbage collection')
# "Disable" sys check interval
if not withsyscheck:
# Too bad the check interval uses an int instead of a long...
value = 2147483647
try:
sys.setcheckinterval(value)
except (AttributeError, NotImplementedError):
print('* Python version doesn\'t support sys.setcheckinterval')
else:
print('* system check interval set to maximum: %s' % value)
if timer == TIMER_SYSTIMES_PROCESSTIME:
import systimes
print('* using timer: systimes.processtime (%s)' % \
systimes.SYSTIMES_IMPLEMENTATION)
else:
print('* using timer: %s' % timer)
print()
if compare_to:
try:
f = open(compare_to,'rb')
bench = pickle.load(f)
bench.name = compare_to
f.close()
compare_to = bench
except IOError as reason:
print('* Error opening/reading file %s: %s' % (
repr(compare_to),
reason))
compare_to = None
if show_bench:
try:
f = open(show_bench,'rb')
bench = pickle.load(f)
bench.name = show_bench
f.close()
bench.print_header()
if compare_to:
bench.print_comparison(compare_to,
hidenoise=hidenoise,
limitnames=limitnames)
else:
bench.print_benchmark(hidenoise=hidenoise,
limitnames=limitnames)
except IOError as reason:
print('* Error opening/reading file %s: %s' % (
repr(show_bench),
reason))
print()
return
if reportfile:
print('Creating benchmark: %s (rounds=%i, warp=%i)' % \
(reportfile, rounds, warp))
print()
# Create benchmark object
bench = Benchmark(reportfile,
verbose=verbose,
timer=timer,
warp=warp,
calibration_runs=calibration_runs)
bench.rounds = rounds
bench.load_tests(Setup, limitnames=limitnames)
try:
bench.calibrate()
bench.run()
except KeyboardInterrupt:
print()
print('*** KeyboardInterrupt -- Aborting')
print()
return
bench.print_header()
if compare_to:
bench.print_comparison(compare_to,
hidenoise=hidenoise,
limitnames=limitnames)
else:
bench.print_benchmark(hidenoise=hidenoise,
limitnames=limitnames)
# Ring bell
sys.stderr.write('\007')
if reportfile:
try:
f = open(reportfile,'wb')
bench.name = reportfile
pickle.dump(bench,f)
f.close()
except IOError as reason:
print('* Error opening/writing reportfile')
except IOError as reason:
print('* Error opening/writing reportfile %s: %s' % (
reportfile,
reason))
print()
if __name__ == '__main__':
PyBenchCmdline()
|
<filename>David and Pooja/++Validating Linked Mods/Python-3.0/Tools/pybench/pybench.py
#!/usr/local/bin/python -O
""" A Python Benchmark Suite
"""
#
# Note: Please keep this module compatible to Python 1.5.2.
#
# Tests may include features in later Python versions, but these
# should then be embedded in try-except clauses in the configuration
# module Setup.py.
#
# pybench Copyright
__copyright__ = """\
Copyright (c), 1997-2006, <NAME> (<EMAIL>)
Copyright (c), 2000-2006, eGenix.com Software GmbH (<EMAIL>)
All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby
granted, provided that the above copyright notice appear in all copies
and that both that copyright notice and this permission notice appear
in supporting documentation or portions thereof, including
modifications, that you make.
THE AUTHOR MARC-ANDRE LEMBURG DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
"""
import sys, time, operator, platform
from CommandLine import *
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
# Version number; version history: see README file !
__version__ = '2.0'
### Constants
# Second fractions
MILLI_SECONDS = 1e3
MICRO_SECONDS = 1e6
# Percent unit
PERCENT = 100
# Horizontal line length
LINE = 79
# Minimum test run-time
MIN_TEST_RUNTIME = 1e-3
# Number of calibration runs to use for calibrating the tests
CALIBRATION_RUNS = 20
# Number of calibration loops to run for each calibration run
CALIBRATION_LOOPS = 20
# Allow skipping calibration ?
ALLOW_SKIPPING_CALIBRATION = 1
# Timer types
TIMER_TIME_TIME = 'time.time'
TIMER_TIME_CLOCK = 'time.clock'
TIMER_SYSTIMES_PROCESSTIME = 'systimes.processtime'
# Choose platform default timer
if sys.platform[:3] == 'win':
# On WinXP this has 2.5ms resolution
TIMER_PLATFORM_DEFAULT = TIMER_TIME_CLOCK
else:
# On Linux this has 1ms resolution
TIMER_PLATFORM_DEFAULT = TIMER_TIME_TIME
# Print debug information ?
_debug = 0
### Helpers
def get_timer(timertype):
if timertype == TIMER_TIME_TIME:
return time.time
elif timertype == TIMER_TIME_CLOCK:
return time.clock
elif timertype == TIMER_SYSTIMES_PROCESSTIME:
import systimes
return systimes.processtime
else:
raise TypeError('unknown timer type: %s' % timertype)
def get_machine_details():
if _debug:
print('Getting machine details...')
buildno, builddate = platform.python_build()
python = platform.python_version()
if sys.maxunicode == 65535:
# UCS2 build (standard)
unitype = 'UCS2'
else:
# UCS4 build (most recent Linux distros)
unitype = 'UCS4'
bits, linkage = platform.architecture()
return {
'platform': platform.platform(),
'processor': platform.processor(),
'executable': sys.executable,
'implementation': getattr(platform, 'python_implementation',
lambda:'n/a')(),
'python': platform.python_version(),
'compiler': platform.python_compiler(),
'buildno': buildno,
'builddate': builddate,
'unicode': unitype,
'bits': bits,
}
def print_machine_details(d, indent=''):
l = ['Machine Details:',
' Platform ID: %s' % d.get('platform', 'n/a'),
' Processor: %s' % d.get('processor', 'n/a'),
'',
'Python:',
' Implementation: %s' % d.get('implementation', 'n/a'),
' Executable: %s' % d.get('executable', 'n/a'),
' Version: %s' % d.get('python', 'n/a'),
' Compiler: %s' % d.get('compiler', 'n/a'),
' Bits: %s' % d.get('bits', 'n/a'),
' Build: %s (#%s)' % (d.get('builddate', 'n/a'),
d.get('buildno', 'n/a')),
' Unicode: %s' % d.get('unicode', 'n/a'),
]
joiner = '\n' + indent
print(indent + joiner.join(l) + '\n')
### Test baseclass
class Test:
""" All test must have this class as baseclass. It provides
the necessary interface to the benchmark machinery.
The tests must set .rounds to a value high enough to let the
test run between 20-50 seconds. This is needed because
clock()-timing only gives rather inaccurate values (on Linux,
for example, it is accurate to a few hundreths of a
second). If you don't want to wait that long, use a warp
factor larger than 1.
It is also important to set the .operations variable to a
value representing the number of "virtual operations" done per
call of .run().
If you change a test in some way, don't forget to increase
its version number.
"""
### Instance variables that each test should override
# Version number of the test as float (x.yy); this is important
# for comparisons of benchmark runs - tests with unequal version
# number will not get compared.
version = 2.0
# The number of abstract operations done in each round of the
# test. An operation is the basic unit of what you want to
# measure. The benchmark will output the amount of run-time per
# operation. Note that in order to raise the measured timings
# significantly above noise level, it is often required to repeat
# sets of operations more than once per test round. The measured
# overhead per test round should be less than 1 second.
operations = 1
# Number of rounds to execute per test run. This should be
# adjusted to a figure that results in a test run-time of between
# 1-2 seconds.
rounds = 100000
### Internal variables
# Mark this class as implementing a test
is_a_test = 1
# Last timing: (real, run, overhead)
last_timing = (0.0, 0.0, 0.0)
# Warp factor to use for this test
warp = 1
# Number of calibration runs to use
calibration_runs = CALIBRATION_RUNS
# List of calibration timings
overhead_times = None
# List of test run timings
times = []
# Timer used for the benchmark
timer = TIMER_PLATFORM_DEFAULT
def __init__(self, warp=None, calibration_runs=None, timer=None):
# Set parameters
if warp is not None:
self.rounds = int(self.rounds / warp)
if self.rounds == 0:
raise ValueError('warp factor set too high')
self.warp = warp
if calibration_runs is not None:
if (not ALLOW_SKIPPING_CALIBRATION and
calibration_runs < 1):
raise ValueError('at least one calibration run is required')
self.calibration_runs = calibration_runs
if timer is not None:
timer = timer
# Init variables
self.times = []
self.overhead_times = []
# We want these to be in the instance dict, so that pickle
# saves them
self.version = self.version
self.operations = self.operations
self.rounds = self.rounds
def get_timer(self):
""" Return the timer function to use for the test.
"""
return get_timer(self.timer)
def compatible(self, other):
""" Return 1/0 depending on whether the test is compatible
with the other Test instance or not.
"""
if self.version != other.version:
return 0
if self.rounds != other.rounds:
return 0
return 1
def calibrate_test(self):
if self.calibration_runs == 0:
self.overhead_times = [0.0]
return
calibrate = self.calibrate
timer = self.get_timer()
calibration_loops = range(CALIBRATION_LOOPS)
# Time the calibration loop overhead
prep_times = []
for i in range(self.calibration_runs):
t = timer()
for i in calibration_loops:
pass
t = timer() - t
prep_times.append(t)
min_prep_time = min(prep_times)
if _debug:
print()
print('Calib. prep time = %.6fms' % (
min_prep_time * MILLI_SECONDS))
# Time the calibration runs (doing CALIBRATION_LOOPS loops of
# .calibrate() method calls each)
for i in range(self.calibration_runs):
t = timer()
for i in calibration_loops:
calibrate()
t = timer() - t
self.overhead_times.append(t / CALIBRATION_LOOPS
- min_prep_time)
# Check the measured times
min_overhead = min(self.overhead_times)
max_overhead = max(self.overhead_times)
if _debug:
print('Calib. overhead time = %.6fms' % (
min_overhead * MILLI_SECONDS))
if min_overhead < 0.0:
raise ValueError('calibration setup did not work')
if max_overhead - min_overhead > 0.1:
raise ValueError(
'overhead calibration timing range too inaccurate: '
'%r - %r' % (min_overhead, max_overhead))
def run(self):
""" Run the test in two phases: first calibrate, then
do the actual test. Be careful to keep the calibration
timing low w/r to the test timing.
"""
test = self.test
timer = self.get_timer()
# Get calibration
min_overhead = min(self.overhead_times)
# Test run
t = timer()
test()
t = timer() - t
if t < MIN_TEST_RUNTIME:
raise ValueError('warp factor too high: '
'test times are < 10ms')
eff_time = t - min_overhead
if eff_time < 0:
raise ValueError('wrong calibration')
self.last_timing = (eff_time, t, min_overhead)
self.times.append(eff_time)
def calibrate(self):
""" Calibrate the test.
This method should execute everything that is needed to
setup and run the test - except for the actual operations
that you intend to measure. pybench uses this method to
measure the test implementation overhead.
"""
return
def test(self):
""" Run the test.
The test needs to run self.rounds executing
self.operations number of operations each.
"""
return
def stat(self):
""" Return test run statistics as tuple:
(minimum run time,
average run time,
total run time,
average time per operation,
minimum overhead time)
"""
runs = len(self.times)
if runs == 0:
return 0.0, 0.0, 0.0, 0.0
min_time = min(self.times)
total_time = sum(self.times)
avg_time = total_time / float(runs)
operation_avg = total_time / float(runs
* self.rounds
* self.operations)
if self.overhead_times:
min_overhead = min(self.overhead_times)
else:
min_overhead = self.last_timing[2]
return min_time, avg_time, total_time, operation_avg, min_overhead
### Load Setup
# This has to be done after the definition of the Test class, since
# the Setup module will import subclasses using this class.
import Setup
### Benchmark base class
class Benchmark:
# Name of the benchmark
name = ''
# Number of benchmark rounds to run
rounds = 1
# Warp factor use to run the tests
warp = 1 # Warp factor
# Average benchmark round time
roundtime = 0
# Benchmark version number as float x.yy
version = 2.0
# Produce verbose output ?
verbose = 0
# Dictionary with the machine details
machine_details = None
# Timer used for the benchmark
timer = TIMER_PLATFORM_DEFAULT
def __init__(self, name, verbose=None, timer=None, warp=None,
calibration_runs=None):
if name:
self.name = name
else:
self.name = '%04i-%02i-%02i %02i:%02i:%02i' % \
(time.localtime(time.time())[:6])
if verbose is not None:
self.verbose = verbose
if timer is not None:
self.timer = timer
if warp is not None:
self.warp = warp
if calibration_runs is not None:
self.calibration_runs = calibration_runs
# Init vars
self.tests = {}
if _debug:
print('Getting machine details...')
self.machine_details = get_machine_details()
# Make .version an instance attribute to have it saved in the
# Benchmark pickle
self.version = self.version
def get_timer(self):
""" Return the timer function to use for the test.
"""
return get_timer(self.timer)
def compatible(self, other):
""" Return 1/0 depending on whether the benchmark is
compatible with the other Benchmark instance or not.
"""
if self.version != other.version:
return 0
if (self.machine_details == other.machine_details and
self.timer != other.timer):
return 0
if (self.calibration_runs == 0 and
other.calibration_runs != 0):
return 0
if (self.calibration_runs != 0 and
other.calibration_runs == 0):
return 0
return 1
def load_tests(self, setupmod, limitnames=None):
# Add tests
if self.verbose:
print('Searching for tests ...')
print('--------------------------------------')
for testclass in setupmod.__dict__.values():
if not hasattr(testclass, 'is_a_test'):
continue
name = testclass.__name__
if name == 'Test':
continue
if (limitnames is not None and
limitnames.search(name) is None):
continue
self.tests[name] = testclass(
warp=self.warp,
calibration_runs=self.calibration_runs,
timer=self.timer)
l = sorted(self.tests)
if self.verbose:
for name in l:
print(' %s' % name)
print('--------------------------------------')
print(' %i tests found' % len(l))
print()
def calibrate(self):
print('Calibrating tests. Please wait...', end=' ')
sys.stdout.flush()
if self.verbose:
print()
print()
print('Test min max')
print('-' * LINE)
tests = sorted(self.tests.items())
for i in range(len(tests)):
name, test = tests[i]
test.calibrate_test()
if self.verbose:
print('%30s: %6.3fms %6.3fms' % \
(name,
min(test.overhead_times) * MILLI_SECONDS,
max(test.overhead_times) * MILLI_SECONDS))
if self.verbose:
print()
print('Done with the calibration.')
else:
print('done.')
print()
def run(self):
tests = sorted(self.tests.items())
timer = self.get_timer()
print('Running %i round(s) of the suite at warp factor %i:' % \
(self.rounds, self.warp))
print()
self.roundtimes = []
for i in range(self.rounds):
if self.verbose:
print(' Round %-25i effective absolute overhead' % (i+1))
total_eff_time = 0.0
for j in range(len(tests)):
name, test = tests[j]
if self.verbose:
print('%30s:' % name, end=' ')
test.run()
(eff_time, abs_time, min_overhead) = test.last_timing
total_eff_time = total_eff_time + eff_time
if self.verbose:
print(' %5.0fms %5.0fms %7.3fms' % \
(eff_time * MILLI_SECONDS,
abs_time * MILLI_SECONDS,
min_overhead * MILLI_SECONDS))
self.roundtimes.append(total_eff_time)
if self.verbose:
print(' '
' ------------------------------')
print(' '
' Totals: %6.0fms' %
(total_eff_time * MILLI_SECONDS))
print()
else:
print('* Round %i done in %.3f seconds.' % (i+1,
total_eff_time))
print()
def stat(self):
""" Return benchmark run statistics as tuple:
(minimum round time,
average round time,
maximum round time)
XXX Currently not used, since the benchmark does test
statistics across all rounds.
"""
runs = len(self.roundtimes)
if runs == 0:
return 0.0, 0.0
min_time = min(self.roundtimes)
total_time = sum(self.roundtimes)
avg_time = total_time / float(runs)
max_time = max(self.roundtimes)
return (min_time, avg_time, max_time)
def print_header(self, title='Benchmark'):
print('-' * LINE)
print('%s: %s' % (title, self.name))
print('-' * LINE)
print()
print(' Rounds: %s' % self.rounds)
print(' Warp: %s' % self.warp)
print(' Timer: %s' % self.timer)
print()
if self.machine_details:
print_machine_details(self.machine_details, indent=' ')
print()
def print_benchmark(self, hidenoise=0, limitnames=None):
print('Test '
' minimum average operation overhead')
print('-' * LINE)
tests = sorted(self.tests.items())
total_min_time = 0.0
total_avg_time = 0.0
for name, test in tests:
if (limitnames is not None and
limitnames.search(name) is None):
continue
(min_time,
avg_time,
total_time,
op_avg,
min_overhead) = test.stat()
total_min_time = total_min_time + min_time
total_avg_time = total_avg_time + avg_time
print('%30s: %5.0fms %5.0fms %6.2fus %7.3fms' % \
(name,
min_time * MILLI_SECONDS,
avg_time * MILLI_SECONDS,
op_avg * MICRO_SECONDS,
min_overhead *MILLI_SECONDS))
print('-' * LINE)
print('Totals: '
' %6.0fms %6.0fms' %
(total_min_time * MILLI_SECONDS,
total_avg_time * MILLI_SECONDS,
))
print()
def print_comparison(self, compare_to, hidenoise=0, limitnames=None):
# Check benchmark versions
if compare_to.version != self.version:
print('* Benchmark versions differ: '
'cannot compare this benchmark to "%s" !' %
compare_to.name)
print()
self.print_benchmark(hidenoise=hidenoise,
limitnames=limitnames)
return
# Print header
compare_to.print_header('Comparing with')
print('Test '
' minimum run-time average run-time')
print(' '
' this other diff this other diff')
print('-' * LINE)
# Print test comparisons
tests = sorted(self.tests.items())
total_min_time = other_total_min_time = 0.0
total_avg_time = other_total_avg_time = 0.0
benchmarks_compatible = self.compatible(compare_to)
tests_compatible = 1
for name, test in tests:
if (limitnames is not None and
limitnames.search(name) is None):
continue
(min_time,
avg_time,
total_time,
op_avg,
min_overhead) = test.stat()
total_min_time = total_min_time + min_time
total_avg_time = total_avg_time + avg_time
try:
other = compare_to.tests[name]
except KeyError:
other = None
if other is None:
# Other benchmark doesn't include the given test
min_diff, avg_diff = 'n/a', 'n/a'
other_min_time = 0.0
other_avg_time = 0.0
tests_compatible = 0
else:
(other_min_time,
other_avg_time,
other_total_time,
other_op_avg,
other_min_overhead) = other.stat()
other_total_min_time = other_total_min_time + other_min_time
other_total_avg_time = other_total_avg_time + other_avg_time
if (benchmarks_compatible and
test.compatible(other)):
# Both benchmark and tests are comparible
min_diff = ((min_time * self.warp) /
(other_min_time * other.warp) - 1.0)
avg_diff = ((avg_time * self.warp) /
(other_avg_time * other.warp) - 1.0)
if hidenoise and abs(min_diff) < 10.0:
min_diff = ''
else:
min_diff = '%+5.1f%%' % (min_diff * PERCENT)
if hidenoise and abs(avg_diff) < 10.0:
avg_diff = ''
else:
avg_diff = '%+5.1f%%' % (avg_diff * PERCENT)
else:
# Benchmark or tests are not comparible
min_diff, avg_diff = 'n/a', 'n/a'
tests_compatible = 0
print('%30s: %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % \
(name,
min_time * MILLI_SECONDS,
other_min_time * MILLI_SECONDS * compare_to.warp / self.warp,
min_diff,
avg_time * MILLI_SECONDS,
other_avg_time * MILLI_SECONDS * compare_to.warp / self.warp,
avg_diff))
print('-' * LINE)
# Summarise test results
if not benchmarks_compatible or not tests_compatible:
min_diff, avg_diff = 'n/a', 'n/a'
else:
if other_total_min_time != 0.0:
min_diff = '%+5.1f%%' % (
((total_min_time * self.warp) /
(other_total_min_time * compare_to.warp) - 1.0) * PERCENT)
else:
min_diff = 'n/a'
if other_total_avg_time != 0.0:
avg_diff = '%+5.1f%%' % (
((total_avg_time * self.warp) /
(other_total_avg_time * compare_to.warp) - 1.0) * PERCENT)
else:
avg_diff = 'n/a'
print('Totals: '
' %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' %
(total_min_time * MILLI_SECONDS,
(other_total_min_time * compare_to.warp/self.warp
* MILLI_SECONDS),
min_diff,
total_avg_time * MILLI_SECONDS,
(other_total_avg_time * compare_to.warp/self.warp
* MILLI_SECONDS),
avg_diff
))
print()
print('(this=%s, other=%s)' % (self.name,
compare_to.name))
print()
class PyBenchCmdline(Application):
header = ("PYBENCH - a benchmark test suite for Python "
"interpreters/compilers.")
version = __version__
debug = _debug
options = [ArgumentOption('-n',
'number of rounds',
Setup.Number_of_rounds),
ArgumentOption('-f',
'save benchmark to file arg',
''),
ArgumentOption('-c',
'compare benchmark with the one in file arg',
''),
ArgumentOption('-s',
'show benchmark in file arg, then exit',
''),
ArgumentOption('-w',
'set warp factor to arg',
Setup.Warp_factor),
ArgumentOption('-t',
'run only tests with names matching arg',
''),
ArgumentOption('-C',
'set the number of calibration runs to arg',
CALIBRATION_RUNS),
SwitchOption('-d',
'hide noise in comparisons',
0),
SwitchOption('-v',
'verbose output (not recommended)',
0),
SwitchOption('--with-gc',
'enable garbage collection',
0),
SwitchOption('--with-syscheck',
'use default sys check interval',
0),
ArgumentOption('--timer',
'use given timer',
TIMER_PLATFORM_DEFAULT),
]
about = """\
The normal operation is to run the suite and display the
results. Use -f to save them for later reuse or comparisons.
Available timers:
time.time
time.clock
systimes.processtime
Examples:
python2.1 pybench.py -f p21.pybench
python2.5 pybench.py -f p25.pybench
python pybench.py -s p25.pybench -c p21.pybench
"""
copyright = __copyright__
def main(self):
rounds = self.values['-n']
reportfile = self.values['-f']
show_bench = self.values['-s']
compare_to = self.values['-c']
hidenoise = self.values['-d']
warp = int(self.values['-w'])
withgc = self.values['--with-gc']
limitnames = self.values['-t']
if limitnames:
if _debug:
print('* limiting test names to one with substring "%s"' % \
limitnames)
limitnames = re.compile(limitnames, re.I)
else:
limitnames = None
verbose = self.verbose
withsyscheck = self.values['--with-syscheck']
calibration_runs = self.values['-C']
timer = self.values['--timer']
print('-' * LINE)
print('PYBENCH %s' % __version__)
print('-' * LINE)
print('* using %s %s' % (
getattr(platform, 'python_implementation', lambda:'Python')(),
' '.join(sys.version.split())))
# Switch off garbage collection
if not withgc:
try:
import gc
except ImportError:
print('* Python version doesn\'t support garbage collection')
else:
try:
gc.disable()
except NotImplementedError:
print('* Python version doesn\'t support gc.disable')
else:
print('* disabled garbage collection')
# "Disable" sys check interval
if not withsyscheck:
# Too bad the check interval uses an int instead of a long...
value = 2147483647
try:
sys.setcheckinterval(value)
except (AttributeError, NotImplementedError):
print('* Python version doesn\'t support sys.setcheckinterval')
else:
print('* system check interval set to maximum: %s' % value)
if timer == TIMER_SYSTIMES_PROCESSTIME:
import systimes
print('* using timer: systimes.processtime (%s)' % \
systimes.SYSTIMES_IMPLEMENTATION)
else:
print('* using timer: %s' % timer)
print()
if compare_to:
try:
f = open(compare_to,'rb')
bench = pickle.load(f)
bench.name = compare_to
f.close()
compare_to = bench
except IOError as reason:
print('* Error opening/reading file %s: %s' % (
repr(compare_to),
reason))
compare_to = None
if show_bench:
try:
f = open(show_bench,'rb')
bench = pickle.load(f)
bench.name = show_bench
f.close()
bench.print_header()
if compare_to:
bench.print_comparison(compare_to,
hidenoise=hidenoise,
limitnames=limitnames)
else:
bench.print_benchmark(hidenoise=hidenoise,
limitnames=limitnames)
except IOError as reason:
print('* Error opening/reading file %s: %s' % (
repr(show_bench),
reason))
print()
return
if reportfile:
print('Creating benchmark: %s (rounds=%i, warp=%i)' % \
(reportfile, rounds, warp))
print()
# Create benchmark object
bench = Benchmark(reportfile,
verbose=verbose,
timer=timer,
warp=warp,
calibration_runs=calibration_runs)
bench.rounds = rounds
bench.load_tests(Setup, limitnames=limitnames)
try:
bench.calibrate()
bench.run()
except KeyboardInterrupt:
print()
print('*** KeyboardInterrupt -- Aborting')
print()
return
bench.print_header()
if compare_to:
bench.print_comparison(compare_to,
hidenoise=hidenoise,
limitnames=limitnames)
else:
bench.print_benchmark(hidenoise=hidenoise,
limitnames=limitnames)
# Ring bell
sys.stderr.write('\007')
if reportfile:
try:
f = open(reportfile,'wb')
bench.name = reportfile
pickle.dump(bench,f)
f.close()
except IOError as reason:
print('* Error opening/writing reportfile')
except IOError as reason:
print('* Error opening/writing reportfile %s: %s' % (
reportfile,
reason))
print()
if __name__ == '__main__':
PyBenchCmdline()
|
en
| 0.813797
|
#!/usr/local/bin/python -O A Python Benchmark Suite # # Note: Please keep this module compatible to Python 1.5.2. # # Tests may include features in later Python versions, but these # should then be embedded in try-except clauses in the configuration # module Setup.py. # # pybench Copyright \ Copyright (c), 1997-2006, <NAME> (<EMAIL>) Copyright (c), 2000-2006, eGenix.com Software GmbH (<EMAIL>) All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee or royalty is hereby granted, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation or portions thereof, including modifications, that you make. THE AUTHOR MARC-ANDRE LEMBURG DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE ! # Version number; version history: see README file ! ### Constants # Second fractions # Percent unit # Horizontal line length # Minimum test run-time # Number of calibration runs to use for calibrating the tests # Number of calibration loops to run for each calibration run # Allow skipping calibration ? # Timer types # Choose platform default timer # On WinXP this has 2.5ms resolution # On Linux this has 1ms resolution # Print debug information ? ### Helpers # UCS2 build (standard) # UCS4 build (most recent Linux distros) #%s)' % (d.get('builddate', 'n/a'), ### Test baseclass All test must have this class as baseclass. It provides the necessary interface to the benchmark machinery. The tests must set .rounds to a value high enough to let the test run between 20-50 seconds. This is needed because clock()-timing only gives rather inaccurate values (on Linux, for example, it is accurate to a few hundreths of a second). If you don't want to wait that long, use a warp factor larger than 1. It is also important to set the .operations variable to a value representing the number of "virtual operations" done per call of .run(). If you change a test in some way, don't forget to increase its version number. ### Instance variables that each test should override # Version number of the test as float (x.yy); this is important # for comparisons of benchmark runs - tests with unequal version # number will not get compared. # The number of abstract operations done in each round of the # test. An operation is the basic unit of what you want to # measure. The benchmark will output the amount of run-time per # operation. Note that in order to raise the measured timings # significantly above noise level, it is often required to repeat # sets of operations more than once per test round. The measured # overhead per test round should be less than 1 second. # Number of rounds to execute per test run. This should be # adjusted to a figure that results in a test run-time of between # 1-2 seconds. ### Internal variables # Mark this class as implementing a test # Last timing: (real, run, overhead) # Warp factor to use for this test # Number of calibration runs to use # List of calibration timings # List of test run timings # Timer used for the benchmark # Set parameters # Init variables # We want these to be in the instance dict, so that pickle # saves them Return the timer function to use for the test. Return 1/0 depending on whether the test is compatible with the other Test instance or not. # Time the calibration loop overhead # Time the calibration runs (doing CALIBRATION_LOOPS loops of # .calibrate() method calls each) # Check the measured times Run the test in two phases: first calibrate, then do the actual test. Be careful to keep the calibration timing low w/r to the test timing. # Get calibration # Test run Calibrate the test. This method should execute everything that is needed to setup and run the test - except for the actual operations that you intend to measure. pybench uses this method to measure the test implementation overhead. Run the test. The test needs to run self.rounds executing self.operations number of operations each. Return test run statistics as tuple: (minimum run time, average run time, total run time, average time per operation, minimum overhead time) ### Load Setup # This has to be done after the definition of the Test class, since # the Setup module will import subclasses using this class. ### Benchmark base class # Name of the benchmark # Number of benchmark rounds to run # Warp factor use to run the tests # Warp factor # Average benchmark round time # Benchmark version number as float x.yy # Produce verbose output ? # Dictionary with the machine details # Timer used for the benchmark # Init vars # Make .version an instance attribute to have it saved in the # Benchmark pickle Return the timer function to use for the test. Return 1/0 depending on whether the benchmark is compatible with the other Benchmark instance or not. # Add tests Return benchmark run statistics as tuple: (minimum round time, average round time, maximum round time) XXX Currently not used, since the benchmark does test statistics across all rounds. # Check benchmark versions # Print header # Print test comparisons # Other benchmark doesn't include the given test # Both benchmark and tests are comparible # Benchmark or tests are not comparible # Summarise test results \ The normal operation is to run the suite and display the results. Use -f to save them for later reuse or comparisons. Available timers: time.time time.clock systimes.processtime Examples: python2.1 pybench.py -f p21.pybench python2.5 pybench.py -f p25.pybench python pybench.py -s p25.pybench -c p21.pybench # Switch off garbage collection # "Disable" sys check interval # Too bad the check interval uses an int instead of a long... # Create benchmark object # Ring bell
| 1.674299
| 2
|
examples/reader_demo.py
|
alenrajsp/tcxreader
| 1
|
6629406
|
<filename>examples/reader_demo.py
"""
Simple example of using the TCX reader!
"""
from tcxreader.tcxreader import TCXReader, TCXTrackPoint, TCXExercise
tcx_reader = TCXReader()
file_location = '../example_data/15.tcx'
data: TCXExercise = tcx_reader.read(file_location)
print("Output")
print(str(data.trackpoints[0]))
"""
Example output:
= {TCXTrackPoint}
TPX_speed = {float} 5.011000156402588
cadence = {float} 80
distance = {float} 514.0499877929688
elevation = {float} 46.79999923706055
hr_value = {int} 134
latitude = {float} 45.5244944896549
longitude = {float} 13.596355207264423
time = {datetime} 2015-02-19 09:34:17+00:00
watts = {float} 123
"""
|
<filename>examples/reader_demo.py
"""
Simple example of using the TCX reader!
"""
from tcxreader.tcxreader import TCXReader, TCXTrackPoint, TCXExercise
tcx_reader = TCXReader()
file_location = '../example_data/15.tcx'
data: TCXExercise = tcx_reader.read(file_location)
print("Output")
print(str(data.trackpoints[0]))
"""
Example output:
= {TCXTrackPoint}
TPX_speed = {float} 5.011000156402588
cadence = {float} 80
distance = {float} 514.0499877929688
elevation = {float} 46.79999923706055
hr_value = {int} 134
latitude = {float} 45.5244944896549
longitude = {float} 13.596355207264423
time = {datetime} 2015-02-19 09:34:17+00:00
watts = {float} 123
"""
|
en
| 0.290551
|
Simple example of using the TCX reader! Example output: = {TCXTrackPoint} TPX_speed = {float} 5.011000156402588 cadence = {float} 80 distance = {float} 514.0499877929688 elevation = {float} 46.79999923706055 hr_value = {int} 134 latitude = {float} 45.5244944896549 longitude = {float} 13.596355207264423 time = {datetime} 2015-02-19 09:34:17+00:00 watts = {float} 123
| 2.675137
| 3
|
smartfridge/sql_connector/__init__.py
|
ndoering/smartfridge
| 0
|
6629407
|
<reponame>ndoering/smartfridge
from .sqlconnector import SQLConnector, MySQLConnector
|
from .sqlconnector import SQLConnector, MySQLConnector
|
none
| 1
| 1.10701
| 1
|
|
steam/enums/emsg.py
|
tjensen/steam
| 727
|
6629408
|
<reponame>tjensen/steam<filename>steam/enums/emsg.py
"""The EMsg enum contains many members and takes a bit to load.
For this reason it is seperate, and imported only when needed.
"""
from steam.enums.base import SteamIntEnum
class EMsg(SteamIntEnum):
Invalid = 0
Multi = 1
ProtobufWrapped = 2
GenericReply = 100
BaseGeneral = 100
DestJobFailed = 113
Alert = 115
SCIDRequest = 120
SCIDResponse = 121
JobHeartbeat = 123
HubConnect = 124
Subscribe = 126
RouteMessage = 127
RemoteSysID = 128 #: removed
AMCreateAccountResponse = 129 #: removed
WGRequest = 130
WGResponse = 131
KeepAlive = 132
WebAPIJobRequest = 133
WebAPIJobResponse = 134
ClientSessionStart = 135
ClientSessionEnd = 136
# ClientSessionUpdateAuthTicket = 137 #: removed
ClientSessionUpdate = 137
StatsDeprecated = 138
Ping = 139
PingResponse = 140
Stats = 141
RequestFullStatsBlock = 142
LoadDBOCacheItem = 143
LoadDBOCacheItemResponse = 144
InvalidateDBOCacheItems = 145
ServiceMethod = 146
ServiceMethodResponse = 147
ClientPackageVersions = 148
TimestampRequest = 149
TimestampResponse = 150
ServiceMethodCallFromClient = 151
ServiceMethodSendToClient = 152
AssignSysID = 200
BaseShell = 200
Exit = 201
DirRequest = 202
DirResponse = 203
ZipRequest = 204
ZipResponse = 205
UpdateRecordResponse = 215
UpdateCreditCardRequest = 221
UpdateUserBanResponse = 225
PrepareToExit = 226
ContentDescriptionUpdate = 227
TestResetServer = 228
UniverseChanged = 229
ShellConfigInfoUpdate = 230
RequestWindowsEventLogEntries = 233
ProvideWindowsEventLogEntries = 234
ShellSearchLogs = 235
ShellSearchLogsResponse = 236
ShellCheckWindowsUpdates = 237
ShellCheckWindowsUpdatesResponse = 238
ShellFlushUserLicenseCache = 239 #: removed
TestFlushDelayedSQL = 240
TestFlushDelayedSQLResponse = 241
EnsureExecuteScheduledTask_TEST = 242
EnsureExecuteScheduledTaskResponse_TEST = 243
UpdateScheduledTaskEnableState_TEST = 244
UpdateScheduledTaskEnableStateResponse_TEST = 245
ContentDescriptionDeltaUpdate = 246
Heartbeat = 300
BaseGM = 300
ShellFailed = 301
ExitShells = 307
ExitShell = 308
GracefulExitShell = 309
LicenseProcessingComplete = 316
SetTestFlag = 317
QueuedEmailsComplete = 318
GMReportPHPError = 319
GMDRMSync = 320
PhysicalBoxInventory = 321
UpdateConfigFile = 322
TestInitDB = 323
GMWriteConfigToSQL = 324
GMLoadActivationCodes = 325
GMQueueForFBS = 326
GMSchemaConversionResults = 327
GMSchemaConversionResultsResponse = 328 #: removed
GMWriteShellFailureToSQL = 329
GMWriteStatsToSOS = 330
GMGetServiceMethodRouting = 331
GMGetServiceMethodRoutingResponse = 332
GMConvertUserWallets = 333 #: removed
GMTestNextBuildSchemaConversion = 334
GMTestNextBuildSchemaConversionResponse = 335
ExpectShellRestart = 336
HotFixProgress = 337
BaseAIS = 400
AISRefreshContentDescription = 401 #: removed
AISRequestContentDescription = 402
AISUpdateAppInfo = 403
# AISUpdatePackageInfo = 404 #: removed
AISUpdatePackageCosts = 404 #: removed
AISGetPackageChangeNumber = 405
AISGetPackageChangeNumberResponse = 406
AISAppInfoTableChanged = 407 #: removed
AISUpdatePackageCostsResponse = 408 #: removed
AISCreateMarketingMessage = 409 #: removed
AISCreateMarketingMessageResponse = 410 #: removed
AISGetMarketingMessage = 411 #: removed
AISGetMarketingMessageResponse = 412 #: removed
AISUpdateMarketingMessage = 413 #: removed
AISUpdateMarketingMessageResponse = 414 #: removed
AISRequestMarketingMessageUpdate = 415 #: removed
AISDeleteMarketingMessage = 416 #: removed
AISGetMarketingTreatments = 419 #: removed
AISGetMarketingTreatmentsResponse = 420 #: removed
AISRequestMarketingTreatmentUpdate = 421 #: removed
AISTestAddPackage = 422 #: removed
AIGetAppGCFlags = 423
AIGetAppGCFlagsResponse = 424
AIGetAppList = 425
AIGetAppListResponse = 426
AIGetAppInfo = 427 #: removed
AIGetAppInfoResponse = 428 #: removed
AISGetCouponDefinition = 429
AISGetCouponDefinitionResponse = 430
AISUpdateSlaveContentDescription = 431
AISUpdateSlaveContentDescriptionResponse = 432
AISTestEnableGC = 433
BaseAM = 500
AMUpdateUserBanRequest = 504
AMAddLicense = 505
AMBeginProcessingLicenses = 507 #: removed
AMSendSystemIMToUser = 508
AMExtendLicense = 509
AMAddMinutesToLicense = 510
AMCancelLicense = 511
AMInitPurchase = 512
AMPurchaseResponse = 513
AMGetFinalPrice = 514
AMGetFinalPriceResponse = 515
AMGetLegacyGameKey = 516
AMGetLegacyGameKeyResponse = 517
AMFindHungTransactions = 518
AMSetAccountTrustedRequest = 519
AMCompletePurchase = 521 #: removed
AMCancelPurchase = 522
AMNewChallenge = 523
AMLoadOEMTickets = 524
AMFixPendingPurchase = 525
AMFixPendingPurchaseResponse = 526
AMIsUserBanned = 527
AMRegisterKey = 528
AMLoadActivationCodes = 529
AMLoadActivationCodesResponse = 530
AMLookupKeyResponse = 531
AMLookupKey = 532
AMChatCleanup = 533
AMClanCleanup = 534
AMFixPendingRefund = 535
AMReverseChargeback = 536
AMReverseChargebackResponse = 537
AMClanCleanupList = 538
AMGetLicenses = 539
AMGetLicensesResponse = 540
AMSendCartRepurchase = 541
AMSendCartRepurchaseResponse = 542
AllowUserToPlayQuery = 550
AllowUserToPlayResponse = 551
AMVerfiyUser = 552
AMClientNotPlaying = 553
AMClientRequestFriendship = 554
AMRelayPublishStatus = 555
AMResetCommunityContent = 556 #: removed
AMPrimePersonaStateCache = 557 #: removed
AMAllowUserContentQuery = 558 #: removed
AMAllowUserContentResponse = 559 #: removed
AMInitPurchaseResponse = 560
AMRevokePurchaseResponse = 561
AMLockProfile = 562 #: removed
AMRefreshGuestPasses = 563
AMInviteUserToClan = 564 #: removed
AMAcknowledgeClanInvite = 565 #: removed
AMGrantGuestPasses = 566
AMClanDataUpdated = 567
AMReloadAccount = 568
AMClientChatMsgRelay = 569
AMChatMulti = 570
AMClientChatInviteRelay = 571
AMChatInvite = 572
AMClientJoinChatRelay = 573
AMClientChatMemberInfoRelay = 574
AMPublishChatMemberInfo = 575
AMClientAcceptFriendInvite = 576
AMChatEnter = 577
AMClientPublishRemovalFromSource = 578
AMChatActionResult = 579
AMFindAccounts = 580
AMFindAccountsResponse = 581
AMRequestAccountData = 582
AMRequestAccountDataResponse = 583
AMSetAccountFlags = 584
AMCreateClan = 586
AMCreateClanResponse = 587
AMGetClanDetails = 588
AMGetClanDetailsResponse = 589
AMSetPersonaName = 590
AMSetAvatar = 591
AMAuthenticateUser = 592
AMAuthenticateUserResponse = 593
AMGetAccountFriendsCount = 594 #: removed
AMGetAccountFriendsCountResponse = 595 #: removed
AMP2PIntroducerMessage = 596
ClientChatAction = 597
AMClientChatActionRelay = 598
ReqChallenge = 600
BaseVS = 600
VACResponse = 601
ReqChallengeTest = 602
VSMarkCheat = 604
VSAddCheat = 605
VSPurgeCodeModDB = 606
VSGetChallengeResults = 607
VSChallengeResultText = 608
VSReportLingerer = 609
VSRequestManagedChallenge = 610
VSLoadDBFinished = 611
BaseDRMS = 625
DRMBuildBlobRequest = 628
DRMBuildBlobResponse = 629
DRMResolveGuidRequest = 630
DRMResolveGuidResponse = 631
DRMVariabilityReport = 633
DRMVariabilityReportResponse = 634
DRMStabilityReport = 635
DRMStabilityReportResponse = 636
DRMDetailsReportRequest = 637
DRMDetailsReportResponse = 638
DRMProcessFile = 639
DRMAdminUpdate = 640
DRMAdminUpdateResponse = 641
DRMSync = 642
DRMSyncResponse = 643
DRMProcessFileResponse = 644
DRMEmptyGuidCache = 645
DRMEmptyGuidCacheResponse = 646
BaseCS = 650
CSUserContentRequest = 652 #: removed
BaseClient = 700
ClientLogOn_Deprecated = 701 #: removed
ClientAnonLogOn_Deprecated = 702 #: removed
ClientHeartBeat = 703
ClientVACResponse = 704
ClientGamesPlayed_obsolete = 705 #: removed
ClientLogOff = 706
ClientNoUDPConnectivity = 707
ClientInformOfCreateAccount = 708 #: removed
ClientAckVACBan = 709 #: removed
ClientConnectionStats = 710
ClientInitPurchase = 711 #: removed
ClientPingResponse = 712
ClientRemoveFriend = 714
ClientGamesPlayedNoDataBlob = 715
ClientChangeStatus = 716
ClientVacStatusResponse = 717
ClientFriendMsg = 718
ClientGameConnect_obsolete = 719 #: removed
ClientGamesPlayed2_obsolete = 720 #: removed
ClientGameEnded_obsolete = 721 #: removed
ClientGetFinalPrice = 722 #: removed
ClientSystemIM = 726
ClientSystemIMAck = 727
ClientGetLicenses = 728
ClientCancelLicense = 729 #: removed
ClientGetLegacyGameKey = 730
ClientContentServerLogOn_Deprecated = 731 #: removed
ClientAckVACBan2 = 732
ClientAckMessageByGID = 735 #: removed
ClientGetPurchaseReceipts = 736
ClientAckPurchaseReceipt = 737 #: removed
ClientGamesPlayed3_obsolete = 738 #: removed
ClientSendGuestPass = 739 #: removed
ClientAckGuestPass = 740
ClientRedeemGuestPass = 741
ClientGamesPlayed = 742
ClientRegisterKey = 743
ClientInviteUserToClan = 744
ClientAcknowledgeClanInvite = 745
ClientPurchaseWithMachineID = 746
ClientAppUsageEvent = 747
ClientGetGiftTargetList = 748 #: removed
ClientGetGiftTargetListResponse = 749 #: removed
ClientLogOnResponse = 751
ClientVACChallenge = 753 #: removed
ClientSetHeartbeatRate = 755
ClientNotLoggedOnDeprecated = 756 #: removed
ClientLoggedOff = 757
GSApprove = 758
GSDeny = 759
GSKick = 760
ClientCreateAcctResponse = 761
ClientPurchaseResponse = 763
ClientPing = 764
ClientNOP = 765
ClientPersonaState = 766
ClientFriendsList = 767
ClientAccountInfo = 768
ClientVacStatusQuery = 770 #: removed
ClientNewsUpdate = 771
ClientGameConnectDeny = 773
GSStatusReply = 774
ClientGetFinalPriceResponse = 775 #: removed
ClientGameConnectTokens = 779
ClientLicenseList = 780
ClientCancelLicenseResponse = 781 #: removed
ClientVACBanStatus = 782
ClientCMList = 783
ClientEncryptPct = 784
ClientGetLegacyGameKeyResponse = 785
ClientFavoritesList = 786 #: removed
CSUserContentApprove = 787 #: removed
CSUserContentDeny = 788 #: removed
ClientInitPurchaseResponse = 789 #: removed
ClientAddFriend = 791
ClientAddFriendResponse = 792
ClientInviteFriend = 793 #: removed
ClientInviteFriendResponse = 794 #: removed
ClientSendGuestPassResponse = 795 #: removed
ClientAckGuestPassResponse = 796
ClientRedeemGuestPassResponse = 797
ClientUpdateGuestPassesList = 798
ClientChatMsg = 799
ClientChatInvite = 800
ClientJoinChat = 801
ClientChatMemberInfo = 802
ClientLogOnWithCredentials_Deprecated = 803 #: removed
ClientPasswordChangeResponse = 805
ClientChatEnter = 807
ClientFriendRemovedFromSource = 808
ClientCreateChat = 809
ClientCreateChatResponse = 810
ClientUpdateChatMetadata = 811 #: removed
ClientP2PIntroducerMessage = 813
ClientChatActionResult = 814
ClientRequestFriendData = 815
ClientGetUserStats = 818
ClientGetUserStatsResponse = 819
ClientStoreUserStats = 820
ClientStoreUserStatsResponse = 821
ClientClanState = 822
ClientServiceModule = 830
ClientServiceCall = 831
ClientServiceCallResponse = 832
ClientPackageInfoRequest = 833 #: removed
ClientPackageInfoResponse = 834 #: removed
ClientNatTraversalStatEvent = 839
ClientAppInfoRequest = 840 #: removed
ClientAppInfoResponse = 841 #: removed
ClientSteamUsageEvent = 842
ClientCheckPassword = <PASSWORD>
ClientResetPassword = <PASSWORD>
ClientCheckPasswordResponse = 848
ClientResetPasswordResponse = 849
ClientSessionToken = 850
ClientDRMProblemReport = 851
ClientSetIgnoreFriend = 855
ClientSetIgnoreFriendResponse = 856
ClientGetAppOwnershipTicket = 857
ClientGetAppOwnershipTicketResponse = 858
ClientGetLobbyListResponse = 860
ClientGetLobbyMetadata = 861 #: removed
ClientGetLobbyMetadataResponse = 862 #: removed
ClientVTTCert = 863 #: removed
ClientAppInfoUpdate = 866 #: removed
ClientAppInfoChanges = 867 #: removed
ClientServerList = 880
ClientEmailChangeResponse = 891 #: removed
ClientSecretQAChangeResponse = 892 #: removed
ClientDRMBlobRequest = 896
ClientDRMBlobResponse = 897
ClientLookupKey = 898 #: removed
ClientLookupKeyResponse = 899 #: removed
BaseGameServer = 900
GSDisconnectNotice = 901
GSStatus = 903
GSUserPlaying = 905
GSStatus2 = 906
GSStatusUpdate_Unused = 907
GSServerType = 908
GSPlayerList = 909
GSGetUserAchievementStatus = 910
GSGetUserAchievementStatusResponse = 911
GSGetPlayStats = 918
GSGetPlayStatsResponse = 919
GSGetUserGroupStatus = 920
AMGetUserGroupStatus = 921
AMGetUserGroupStatusResponse = 922
GSGetUserGroupStatusResponse = 923
GSGetReputation = 936
GSGetReputationResponse = 937
GSAssociateWithClan = 938
GSAssociateWithClanResponse = 939
GSComputeNewPlayerCompatibility = 940
GSComputeNewPlayerCompatibilityResponse = 941
AdminCmd = 1000
BaseAdmin = 1000
AdminCmdResponse = 1004
AdminLogListenRequest = 1005
AdminLogEvent = 1006
LogSearchRequest = 1007 #: removed
LogSearchResponse = 1008 #: removed
LogSearchCancel = 1009 #: removed
UniverseData = 1010
RequestStatHistory = 1014 #: removed
StatHistory = 1015 #: removed
AdminPwLogon = 1017 #: removed
AdminPwLogonResponse = 1018 #: removed
AdminSpew = 1019
AdminConsoleTitle = 1020
AdminGCSpew = 1023
AdminGCCommand = 1024
AdminGCGetCommandList = 1025
AdminGCGetCommandListResponse = 1026
FBSConnectionData = 1027
AdminMsgSpew = 1028
FBSReqVersion = 1100
BaseFBS = 1100
FBSVersionInfo = 1101
FBSForceRefresh = 1102
FBSForceBounce = 1103
FBSDeployPackage = 1104
FBSDeployResponse = 1105
FBSUpdateBootstrapper = 1106
FBSSetState = 1107
FBSApplyOSUpdates = 1108
FBSRunCMDScript = 1109
FBSRebootBox = 1110
FBSSetBigBrotherMode = 1111
FBSMinidumpServer = 1112
FBSSetShellCount_obsolete = 1113 #: removed
FBSDeployHotFixPackage = 1114
FBSDeployHotFixResponse = 1115
FBSDownloadHotFix = 1116
FBSDownloadHotFixResponse = 1117
FBSUpdateTargetConfigFile = 1118
FBSApplyAccountCred = 1119
FBSApplyAccountCredResponse = 1120
FBSSetShellCount = 1121
FBSTerminateShell = 1122
FBSQueryGMForRequest = 1123
FBSQueryGMResponse = 1124
FBSTerminateZombies = 1125
FBSInfoFromBootstrapper = 1126
FBSRebootBoxResponse = 1127
FBSBootstrapperPackageRequest = 1128
FBSBootstrapperPackageResponse = 1129
FBSBootstrapperGetPackageChunk = 1130
FBSBootstrapperGetPackageChunkResponse = 1131
FBSBootstrapperPackageTransferProgress = 1132
FBSRestartBootstrapper = 1133
FBSPauseFrozenDumps = 1134
FileXferRequest = 1200
BaseFileXfer = 1200
FileXferResponse = 1201
FileXferData = 1202
FileXferEnd = 1203
FileXferDataAck = 1204
ChannelAuthChallenge = 1300
BaseChannelAuth = 1300
ChannelAuthResponse = 1301
ChannelAuthResult = 1302
ChannelEncryptRequest = 1303
ChannelEncryptResponse = 1304
ChannelEncryptResult = 1305
BaseBS = 1400
BSPurchaseStart = 1401
BSPurchaseResponse = 1402
BSAuthenticateCCTrans = 1403
BSAuthenticateCCTransResponse = 1404
BSSettleComplete = 1406
BSBannedRequest = 1407 #: removed
BSInitPayPalTxn = 1408
BSInitPayPalTxnResponse = 1409
BSGetPayPalUserInfo = 1410
BSGetPayPalUserInfoResponse = 1411
BSRefundTxn = 1413 #: removed
BSRefundTxnResponse = 1414 #: removed
BSGetEvents = 1415 #: removed
BSChaseRFRRequest = 1416 #: removed
BSPaymentInstrBan = 1417
BSPaymentInstrBanResponse = 1418
BSProcessGCReports = 1419 #: removed
BSProcessPPReports = 1420 #: removed
BSInitGCBankXferTxn = 1421
BSInitGCBankXferTxnResponse = 1422
BSQueryGCBankXferTxn = 1423 #: removed
BSQueryGCBankXferTxnResponse = 1424 #: removed
BSCommitGCTxn = 1425
BSQueryTransactionStatus = 1426
BSQueryTransactionStatusResponse = 1427
BSQueryCBOrderStatus = 1428 #: removed
BSQueryCBOrderStatusResponse = 1429 #: removed
BSRunRedFlagReport = 1430 #: removed
BSQueryPaymentInstUsage = 1431
BSQueryPaymentInstResponse = 1432
BSQueryTxnExtendedInfo = 1433
BSQueryTxnExtendedInfoResponse = 1434
BSUpdateConversionRates = 1435
BSProcessUSBankReports = 1436 #: removed
BSPurchaseRunFraudChecks = 1437
BSPurchaseRunFraudChecksResponse = 1438
BSStartShippingJobs = 1439 #: removed
BSQueryBankInformation = 1440
BSQueryBankInformationResponse = 1441
BSValidateXsollaSignature = 1445
BSValidateXsollaSignatureResponse = 1446
BSQiwiWalletInvoice = 1448
BSQiwiWalletInvoiceResponse = 1449
BSUpdateInventoryFromProPack = 1450
BSUpdateInventoryFromProPackResponse = 1451
BSSendShippingRequest = 1452
BSSendShippingRequestResponse = 1453
BSGetProPackOrderStatus = 1454
BSGetProPackOrderStatusResponse = 1455
BSCheckJobRunning = 1456
BSCheckJobRunningResponse = 1457
BSResetPackagePurchaseRateLimit = 1458
BSResetPackagePurchaseRateLimitResponse = 1459
BSUpdatePaymentData = 1460
BSUpdatePaymentDataResponse = 1461
BSGetBillingAddress = 1462
BSGetBillingAddressResponse = 1463
BSGetCreditCardInfo = 1464
BSGetCreditCardInfoResponse = 1465
BSRemoveExpiredPaymentData = 1468
BSRemoveExpiredPaymentDataResponse = 1469
BSConvertToCurrentKeys = 1470
BSConvertToCurrentKeysResponse = 1471
BSInitPurchase = 1472
BSInitPurchaseResponse = 1473
BSCompletePurchase = 1474
BSCompletePurchaseResponse = 1475
BSPruneCardUsageStats = 1476
BSPruneCardUsageStatsResponse = 1477
BSStoreBankInformation = 1478
BSStoreBankInformationResponse = 1479
BSVerifyPOSAKey = 1480
BSVerifyPOSAKeyResponse = 1481
BSReverseRedeemPOSAKey = 1482
BSReverseRedeemPOSAKeyResponse = 1483
BSQueryFindCreditCard = 1484
BSQueryFindCreditCardResponse = 1485
BSStatusInquiryPOSAKey = 1486
BSStatusInquiryPOSAKeyResponse = 1487
BSValidateMoPaySignature = 1488 #: removed
BSValidateMoPaySignatureResponse = 1489 #: removed
BSMoPayConfirmProductDelivery = 1490 #: removed
BSMoPayConfirmProductDeliveryResponse = 1491 #: removed
BSGenerateMoPayMD5 = 1492 #: removed
BSGenerateMoPayMD5Response = 1493 #: removed
BSBoaCompraConfirmProductDelivery = 1494
BSBoaCompraConfirmProductDeliveryResponse = 1495
BSGenerateBoaCompraMD5 = 1496
BSGenerateBoaCompraMD5Response = 1497
BSCommitWPTxn = 1498
BSCommitAdyenTxn = 1499
BaseATS = 1500
ATSStartStressTest = 1501
ATSStopStressTest = 1502
ATSRunFailServerTest = 1503
ATSUFSPerfTestTask = 1504
ATSUFSPerfTestResponse = 1505
ATSCycleTCM = 1506
ATSInitDRMSStressTest = 1507
ATSCallTest = 1508
ATSCallTestReply = 1509
ATSStartExternalStress = 1510
ATSExternalStressJobStart = 1511
ATSExternalStressJobQueued = 1512
ATSExternalStressJobRunning = 1513
ATSExternalStressJobStopped = 1514
ATSExternalStressJobStopAll = 1515
ATSExternalStressActionResult = 1516
ATSStarted = 1517
ATSCSPerfTestTask = 1518
ATSCSPerfTestResponse = 1519
BaseDP = 1600
DPSetPublishingState = 1601
DPGamePlayedStats = 1602 #: removed
DPUniquePlayersStat = 1603
DPStreamingUniquePlayersStat = 1604
DPVacInfractionStats = 1605 #: removed
DPVacBanStats = 1606 #: removed
DPBlockingStats = 1607
DPNatTraversalStats = 1608
DPSteamUsageEvent = 1609 #: removed
DPVacCertBanStats = 1610 #: removed
DPVacCafeBanStats = 1611 #: removed
DPCloudStats = 1612
DPAchievementStats = 1613
DPAccountCreationStats = 1614 #: removed
DPGetPlayerCount = 1615
DPGetPlayerCountResponse = 1616
DPGameServersPlayersStats = 1617
DPDownloadRateStatistics = 1618 #: removed
DPFacebookStatistics = 1619 #: removed
ClientDPCheckSpecialSurvey = 1620
ClientDPCheckSpecialSurveyResponse = 1621
ClientDPSendSpecialSurveyResponse = 1622
ClientDPSendSpecialSurveyResponseReply = 1623
DPStoreSaleStatistics = 1624
ClientDPUpdateAppJobReport = 1625
ClientDPSteam2AppStarted = 1627 #: removed
DPUpdateContentEvent = 1626
ClientDPUnsignedInstallScript = 1627
DPPartnerMicroTxns = 1628
DPPartnerMicroTxnsResponse = 1629
ClientDPContentStatsReport = 1630
DPVRUniquePlayersStat = 1631
BaseCM = 1700
CMSetAllowState = 1701
CMSpewAllowState = 1702
CMSessionRejected = 1703
CMSetSecrets = 1704
CMGetSecrets = 1705
BaseDSS = 1800 #: removed
DSSNewFile = 1801 #: removed
DSSCurrentFileList = 1802 #: removed
DSSSynchList = 1803 #: removed
DSSSynchListResponse = 1804 #: removed
DSSSynchSubscribe = 1805 #: removed
DSSSynchUnsubscribe = 1806 #: removed
BaseEPM = 1900 #: removed
EPMStartProcess = 1901 #: removed
EPMStopProcess = 1902 #: removed
EPMRestartProcess = 1903 #: removed
GCSendClient = 2200 #: removed
BaseGC = 2200
AMRelayToGC = 2201 #: removed
GCUpdatePlayedState = 2202 #: removed
GCCmdRevive = 2203
GCCmdBounce = 2204 #: removed
GCCmdForceBounce = 2205 #: removed
GCCmdDown = 2206
GCCmdDeploy = 2207
GCCmdDeployResponse = 2208
GCCmdSwitch = 2209
AMRefreshSessions = 2210
GCUpdateGSState = 2211 #: removed
GCAchievementAwarded = 2212
GCSystemMessage = 2213
GCValidateSession = 2214 #: removed
GCValidateSessionResponse = 2215 #: removed
GCCmdStatus = 2216
GCRegisterWebInterfaces_Deprecated = 2217 #: removed
GCGetAccountDetails_DEPRECATED = 2218 #: removed
GCInterAppMessage = 2219
GCGetEmailTemplate = 2220
GCGetEmailTemplateResponse = 2221
GCHRelay = 2222
GCHRelayToClient = 2223
GCHUpdateSession = 2224
GCHRequestUpdateSession = 2225
GCHRequestStatus = 2226
GCHRequestStatusResponse = 2227
GCHAccountVacStatusChange = 2228
GCHSpawnGC = 2229
GCHSpawnGCResponse = 2230
GCHKillGC = 2231
GCHKillGCResponse = 2232
GCHAccountTradeBanStatusChange = 2233
GCHAccountLockStatusChange = 2234
GCHVacVerificationChange = 2235
GCHAccountPhoneNumberChange = 2236
GCHAccountTwoFactorChange = 2237
GCHInviteUserToLobby = 2238
BaseP2P = 2500
P2PIntroducerMessage = 2502
BaseSM = 2900
SMExpensiveReport = 2902
SMHourlyReport = 2903
SMFishingReport = 2904 #: removed
SMPartitionRenames = 2905
SMMonitorSpace = 2906
SMTestNextBuildSchemaConversion = 2907
SMTestNextBuildSchemaConversionResponse = 2908
BaseTest = 3000
FailServer = 3000
JobHeartbeatTest = 3001
JobHeartbeatTestResponse = 3002
BaseFTSRange = 3100
FTSGetBrowseCounts = 3101 #: removed
FTSGetBrowseCountsResponse = 3102 #: removed
FTSBrowseClans = 3103 #: removed
FTSBrowseClansResponse = 3104 #: removed
FTSSearchClansByLocation = 3105 #: removed
FTSSearchClansByLocationResponse = 3106 #: removed
FTSSearchPlayersByLocation = 3107 #: removed
FTSSearchPlayersByLocationResponse = 3108 #: removed
FTSClanDeleted = 3109 #: removed
FTSSearch = 3110 #: removed
FTSSearchResponse = 3111 #: removed
FTSSearchStatus = 3112 #: removed
FTSSearchStatusResponse = 3113 #: removed
FTSGetGSPlayStats = 3114 #: removed
FTSGetGSPlayStatsResponse = 3115 #: removed
FTSGetGSPlayStatsForServer = 3116 #: removed
FTSGetGSPlayStatsForServerResponse = 3117 #: removed
FTSReportIPUpdates = 3118 #: removed
BaseCCSRange = 3150
CCSGetComments = 3151 #: removed
CCSGetCommentsResponse = 3152 #: removed
CCSAddComment = 3153 #: removed
CCSAddCommentResponse = 3154 #: removed
CCSDeleteComment = 3155 #: removed
CCSDeleteCommentResponse = 3156 #: removed
CCSPreloadComments = 3157 #: removed
CCSNotifyCommentCount = 3158 #: removed
CCSGetCommentsForNews = 3159 #: removed
CCSGetCommentsForNewsResponse = 3160 #: removed
CCSDeleteAllCommentsByAuthor = 3161
CCSDeleteAllCommentsByAuthorResponse = 3162
BaseLBSRange = 3200
LBSSetScore = 3201
LBSSetScoreResponse = 3202
LBSFindOrCreateLB = 3203
LBSFindOrCreateLBResponse = 3204
LBSGetLBEntries = 3205
LBSGetLBEntriesResponse = 3206
LBSGetLBList = 3207
LBSGetLBListResponse = 3208
LBSSetLBDetails = 3209
LBSDeleteLB = 3210
LBSDeleteLBEntry = 3211
LBSResetLB = 3212
LBSResetLBResponse = 3213
LBSDeleteLBResponse = 3214
BaseOGS = 3400
OGSBeginSession = 3401
OGSBeginSessionResponse = 3402
OGSEndSession = 3403
OGSEndSessionResponse = 3404
OGSWriteAppSessionRow = 3406
BaseBRP = 3600
BRPStartShippingJobs = 3601
BRPProcessUSBankReports = 3602
BRPProcessGCReports = 3603
BRPProcessPPReports = 3604
BRPSettleNOVA = 3605 #: removed
BRPSettleCB = 3606 #: removed
BRPCommitGC = 3607
BRPCommitGCResponse = 3608
BRPFindHungTransactions = 3609
BRPCheckFinanceCloseOutDate = 3610
BRPProcessLicenses = 3611
BRPProcessLicensesResponse = 3612
BRPRemoveExpiredPaymentData = 3613
BRPRemoveExpiredPaymentDataResponse = 3614
BRPConvertToCurrentKeys = 3615
BRPConvertToCurrentKeysResponse = 3616
BRPPruneCardUsageStats = 3617
BRPPruneCardUsageStatsResponse = 3618
BRPCheckActivationCodes = 3619
BRPCheckActivationCodesResponse = 3620
BRPCommitWP = 3621
BRPCommitWPResponse = 3622
BRPProcessWPReports = 3623
BRPProcessPaymentRules = 3624
BRPProcessPartnerPayments = 3625
BRPCheckSettlementReports = 3626
BRPPostTaxToAvalara = 3628
BRPPostTransactionTax = 3629
BRPPostTransactionTaxResponse = 3630
BRPProcessIMReports = 3631
BaseAMRange2 = 4000
AMCreateChat = 4001
AMCreateChatResponse = 4002
AMUpdateChatMetadata = 4003 #: removed
AMPublishChatMetadata = 4004 #: removed
AMSetProfileURL = 4005
AMGetAccountEmailAddress = 4006
AMGetAccountEmailAddressResponse = 4007
# AMRequestFriendData = 4008 #: removed
AMRequestClanData = 4008
AMRouteToClients = 4009
AMLeaveClan = 4010
AMClanPermissions = 4011
AMClanPermissionsResponse = 4012
AMCreateClanEventDummyForRateLimiting = 4013
AMUpdateClanEventDummyForRateLimiting = 4015
AMCreateClanEventResponse = 4014
AMUpdateClanEvent = 4015
AMUpdateClanEventResponse = 4016
AMGetClanEvents = 4017
AMGetClanEventsResponse = 4018
AMDeleteClanEvent = 4019
AMDeleteClanEventResponse = 4020
AMSetClanPermissionSettings = 4021
AMSetClanPermissionSettingsResponse = 4022
AMGetClanPermissionSettings = 4023
AMGetClanPermissionSettingsResponse = 4024
AMPublishChatRoomInfo = 4025
ClientChatRoomInfo = 4026
AMCreateClanAnnouncement = 4027 #: removed
AMCreateClanAnnouncementResponse = 4028 #: removed
AMUpdateClanAnnouncement = 4029 #: removed
AMUpdateClanAnnouncementResponse = 4030 #: removed
AMGetClanAnnouncementsCount = 4031 #: removed
AMGetClanAnnouncementsCountResponse = 4032 #: removed
AMGetClanAnnouncements = 4033 #: removed
AMGetClanAnnouncementsResponse = 4034 #: removed
AMDeleteClanAnnouncement = 4035 #: removed
AMDeleteClanAnnouncementResponse = 4036 #: removed
AMGetSingleClanAnnouncement = 4037 #: removed
AMGetSingleClanAnnouncementResponse = 4038 #: removed
AMGetClanHistory = 4039
AMGetClanHistoryResponse = 4040
AMGetClanPermissionBits = 4041
AMGetClanPermissionBitsResponse = 4042
AMSetClanPermissionBits = 4043
AMSetClanPermissionBitsResponse = 4044
AMSessionInfoRequest = 4045
AMSessionInfoResponse = 4046
AMValidateWGToken = 4047
AMGetSingleClanEvent = 4048
AMGetSingleClanEventResponse = 4049
AMGetClanRank = 4050
AMGetClanRankResponse = 4051
AMSetClanRank = 4052
AMSetClanRankResponse = 4053
AMGetClanPOTW = 4054
AMGetClanPOTWResponse = 4055
AMSetClanPOTW = 4056
AMSetClanPOTWResponse = 4057
AMRequestChatMetadata = 4058 #: removed
AMDumpUser = 4059
AMKickUserFromClan = 4060
AMAddFounderToClan = 4061
AMValidateWGTokenResponse = 4062
AMSetCommunityState = 4063
AMSetAccountDetails = 4064
AMGetChatBanList = 4065
AMGetChatBanListResponse = 4066
AMUnBanFromChat = 4067
AMSetClanDetails = 4068
AMGetAccountLinks = 4069
AMGetAccountLinksResponse = 4070
AMSetAccountLinks = 4071
AMSetAccountLinksResponse = 4072
UGSGetUserGameStats = 4073
UGSGetUserGameStatsResponse = 4074
AMCheckClanMembership = 4075
AMGetClanMembers = 4076
AMGetClanMembersResponse = 4077
AMJoinPublicClan = 4078
AMNotifyChatOfClanChange = 4079
AMResubmitPurchase = 4080
AMAddFriend = 4081
AMAddFriendResponse = 4082
AMRemoveFriend = 4083
AMDumpClan = 4084
AMChangeClanOwner = 4085
AMCancelEasyCollect = 4086
AMCancelEasyCollectResponse = 4087
AMGetClanMembershipList = 4088 #: removed
AMGetClanMembershipListResponse = 4089 #: removed
AMClansInCommon = 4090
AMClansInCommonResponse = 4091
AMIsValidAccountID = 4092
AMConvertClan = 4093
AMGetGiftTargetListRelay = 4094 #: removed
AMWipeFriendsList = 4095
AMSetIgnored = 4096
AMClansInCommonCountResponse = 4097
AMFriendsList = 4098
AMFriendsListResponse = 4099
AMFriendsInCommon = 4100
AMFriendsInCommonResponse = 4101
AMFriendsInCommonCountResponse = 4102
AMClansInCommonCount = 4103
AMChallengeVerdict = 4104
AMChallengeNotification = 4105
AMFindGSByIP = 4106
AMFoundGSByIP = 4107
AMGiftRevoked = 4108
AMCreateAccountRecord = 4109
AMUserClanList = 4110
AMUserClanListResponse = 4111
AMGetAccountDetails2 = 4112
AMGetAccountDetailsResponse2 = 4113
AMSetCommunityProfileSettings = 4114
AMSetCommunityProfileSettingsResponse = 4115
AMGetCommunityPrivacyState = 4116
AMGetCommunityPrivacyStateResponse = 4117
AMCheckClanInviteRateLimiting = 4118
UGSGetUserAchievementStatus = 4119
AMGetIgnored = 4120
AMGetIgnoredResponse = 4121
AMSetIgnoredResponse = 4122
AMSetFriendRelationshipNone = 4123
AMGetFriendRelationship = 4124
AMGetFriendRelationshipResponse = 4125
AMServiceModulesCache = 4126
AMServiceModulesCall = 4127
AMServiceModulesCallResponse = 4128
AMGetCaptchaDataForIP = 4129
AMGetCaptchaDataForIPResponse = 4130
AMValidateCaptchaDataForIP = 4131
AMValidateCaptchaDataForIPResponse = 4132
AMTrackFailedAuthByIP = 4133
AMGetCaptchaDataByGID = 4134
AMGetCaptchaDataByGIDResponse = 4135
AMGetLobbyList = 4136 #: removed
AMGetLobbyListResponse = 4137 #: removed
AMGetLobbyMetadata = 4138 #: removed
AMGetLobbyMetadataResponse = 4139 #: removed
CommunityAddFriendNews = 4140
AMAddClanNews = 4141 #: removed
AMWriteNews = 4142 #: removed
AMFindClanUser = 4143
AMFindClanUserResponse = 4144
AMBanFromChat = 4145
AMGetUserHistoryResponse = 4146 #: removed
AMGetUserNewsSubscriptions = 4147
AMGetUserNewsSubscriptionsResponse = 4148
AMSetUserNewsSubscriptions = 4149
AMGetUserNews = 4150 #: removed
AMGetUserNewsResponse = 4151 #: removed
AMSendQueuedEmails = 4152
AMSetLicenseFlags = 4153
AMGetUserHistory = 4154 #: removed
CommunityDeleteUserNews = 4155
AMAllowUserFilesRequest = 4156
AMAllowUserFilesResponse = 4157
AMGetAccountStatus = 4158
AMGetAccountStatusResponse = 4159
AMEditBanReason = 4160
AMCheckClanMembershipResponse = 4161
AMProbeClanMembershipList = 4162
AMProbeClanMembershipListResponse = 4163
UGSGetUserAchievementStatusResponse = 4164
AMGetFriendsLobbies = 4165
AMGetFriendsLobbiesResponse = 4166
AMGetUserFriendNewsResponse = 4172
CommunityGetUserFriendNews = 4173
AMGetUserClansNewsResponse = 4174
AMGetUserClansNews = 4175
AMStoreInitPurchase = 4176 #: removed
AMStoreInitPurchaseResponse = 4177 #: removed
AMStoreGetFinalPrice = 4178 #: removed
AMStoreGetFinalPriceResponse = 4179 #: removed
AMStoreCompletePurchase = 4180 #: removed
AMStoreCancelPurchase = 4181 #: removed
AMStorePurchaseResponse = 4182 #: removed
AMCreateAccountRecordInSteam3 = 4183 #: removed
AMGetPreviousCBAccount = 4184
AMGetPreviousCBAccountResponse = 4185
AMUpdateBillingAddress = 4186 #: removed
AMUpdateBillingAddressResponse = 4187 #: removed
AMGetBillingAddress = 4188 #: removed
AMGetBillingAddressResponse = 4189 #: removed
AMGetUserLicenseHistory = 4190
AMGetUserLicenseHistoryResponse = 4191
AMSupportChangePassword = <PASSWORD>
AMSupportChangeEmail = 4195
AMSupportChangeSecretQA = 4196 #: removed
AMResetUserVerificationGSByIP = 4197
AMUpdateGSPlayStats = 4198
AMSupportEnableOrDisable = 4199
AMGetComments = 4200 #: removed
AMGetCommentsResponse = 4201 #: removed
AMAddComment = 4202 #: removed
AMAddCommentResponse = 4203 #: removed
AMDeleteComment = 4204 #: removed
AMDeleteCommentResponse = 4205 #: removed
AMGetPurchaseStatus = 4206
AMSupportIsAccountEnabled = 4209
AMSupportIsAccountEnabledResponse = 4210
UGSGetUserStats = 4211
AMSupportKickSession = 4212
AMGSSearch = 4213
MarketingMessageUpdate = 4216
ChatServerRouteFriendMsg = 4219
AMTicketAuthRequestOrResponse = 4220
AMVerifyDepotManagementRights = 4222
AMVerifyDepotManagementRightsResponse = 4223
AMAddFreeLicense = 4224
AMGetUserFriendsMinutesPlayed = 4225 #: removed
AMGetUserFriendsMinutesPlayedResponse = 4226 #: removed
AMGetUserMinutesPlayed = 4227 #: removed
AMGetUserMinutesPlayedResponse = 4228 #: removed
AMValidateEmailLink = 4231
AMValidateEmailLinkResponse = 4232
AMAddUsersToMarketingTreatment = 4234 #: removed
UGSStoreUserStats = 4236
AMGetUserGameplayInfo = 4237 #: removed
AMGetUserGameplayInfoResponse = 4238 #: removed
AMGetCardList = 4239 #: removed
AMGetCardListResponse = 4240 #: removed
AMDeleteStoredCard = 4241
AMRevokeLegacyGameKeys = 4242
AMGetWalletDetails = 4244
AMGetWalletDetailsResponse = 4245
AMDeleteStoredPaymentInfo = 4246
AMGetStoredPaymentSummary = 4247
AMGetStoredPaymentSummaryResponse = 4248
AMGetWalletConversionRate = 4249
AMGetWalletConversionRateResponse = 4250
AMConvertWallet = 4251
AMConvertWalletResponse = 4252
AMRelayGetFriendsWhoPlayGame = 4253 #: removed
AMRelayGetFriendsWhoPlayGameResponse = 4254 #: removed
AMSetPreApproval = 4255
AMSetPreApprovalResponse = 4256
AMMarketingTreatmentUpdate = 4257 #: removed
AMCreateRefund = 4258
AMCreateRefundResponse = 4259
AMCreateChargeback = 4260
AMCreateChargebackResponse = 4261
AMCreateDispute = 4262
AMCreateDisputeResponse = 4263
AMClearDispute = 4264
AMCreateFinancialAdjustment = 4265
AMPlayerNicknameList = 4266
AMPlayerNicknameListResponse = 4267
AMSetDRMTestConfig = 4268
AMGetUserCurrentGameInfo = 4269
AMGetUserCurrentGameInfoResponse = 4270
AMGetGSPlayerList = 4271
AMGetGSPlayerListResponse = 4272
AMUpdatePersonaStateCache = 4275 #: removed
AMGetGameMembers = 4276
AMGetGameMembersResponse = 4277
AMGetSteamIDForMicroTxn = 4278
AMGetSteamIDForMicroTxnResponse = 4279
AMSetPartnerMember = 4280
AMRemovePublisherUser = 4281
AMGetUserLicenseList = 4282
AMGetUserLicenseListResponse = 4283
AMReloadGameGroupPolicy = 4284
AMAddFreeLicenseResponse = 4285
AMVACStatusUpdate = 4286
AMGetAccountDetails = 4287
AMGetAccountDetailsResponse = 4288
AMGetPlayerLinkDetails = 4289
AMGetPlayerLinkDetailsResponse = 4290
AMSubscribeToPersonaFeed = 4291 #: removed
AMGetUserVacBanList = 4292 #: removed
AMGetUserVacBanListResponse = 4293 #: removed
AMGetAccountFlagsForWGSpoofing = 4294
AMGetAccountFlagsForWGSpoofingResponse = 4295
AMGetFriendsWishlistInfo = 4296 #: removed
AMGetFriendsWishlistInfoResponse = 4297 #: removed
AMGetClanOfficers = 4298
AMGetClanOfficersResponse = 4299
AMNameChange = 4300
AMGetNameHistory = 4301
AMGetNameHistoryResponse = 4302
AMUpdateProviderStatus = 4305
AMClearPersonaMetadataBlob = 4306 #: removed
AMSupportRemoveAccountSecurity = 4307
AMIsAccountInCaptchaGracePeriod = 4308
AMIsAccountInCaptchaGracePeriodResponse = 4309
AMAccountPS3Unlink = 4310
AMAccountPS3UnlinkResponse = 4311
UGSStoreUserStatsResponse = 4312
AMGetAccountPSNInfo = 4313
AMGetAccountPSNInfoResponse = 4314
AMAuthenticatedPlayerList = 4315
AMGetUserGifts = 4316
AMGetUserGiftsResponse = 4317
AMTransferLockedGifts = 4320
AMTransferLockedGiftsResponse = 4321
AMPlayerHostedOnGameServer = 4322
AMGetAccountBanInfo = 4323
AMGetAccountBanInfoResponse = 4324
AMRecordBanEnforcement = 4325
AMRollbackGiftTransfer = 4326
AMRollbackGiftTransferResponse = 4327
AMHandlePendingTransaction = 4328
AMRequestClanDetails = 4329
AMDeleteStoredPaypalAgreement = 4330
AMGameServerUpdate = 4331
AMGameServerRemove = 4332
AMGetPaypalAgreements = 4333
AMGetPaypalAgreementsResponse = 4334
AMGameServerPlayerCompatibilityCheck = 4335
AMGameServerPlayerCompatibilityCheckResponse = 4336
AMRenewLicense = 4337
AMGetAccountCommunityBanInfo = 4338
AMGetAccountCommunityBanInfoResponse = 4339
AMGameServerAccountChangePassword = <PASSWORD>
AMGameServerAccountDeleteAccount = 4341
AMRenewAgreement = 4342
AMSendEmail = 4343 #: removed
AMXsollaPayment = 4344
AMXsollaPaymentResponse = 4345
AMAcctAllowedToPurchase = 4346
AMAcctAllowedToPurchaseResponse = 4347
AMSwapKioskDeposit = 4348
AMSwapKioskDepositResponse = 4349
AMSetUserGiftUnowned = 4350
AMSetUserGiftUnownedResponse = 4351
AMClaimUnownedUserGift = 4352
AMClaimUnownedUserGiftResponse = 4353
AMSetClanName = 4354
AMSetClanNameResponse = 4355
AMGrantCoupon = 4356
AMGrantCouponResponse = 4357
AMIsPackageRestrictedInUserCountry = 4358
AMIsPackageRestrictedInUserCountryResponse = 4359
AMHandlePendingTransactionResponse = 4360
AMGrantGuestPasses2 = 4361
AMGrantGuestPasses2Response = 4362
AMSessionQuery = 4363
AMSessionQueryResponse = 4364
AMGetPlayerBanDetails = 4365
AMGetPlayerBanDetailsResponse = 4366
AMFinalizePurchase = 4367
AMFinalizePurchaseResponse = 4368
AMPersonaChangeResponse = 4372
AMGetClanDetailsForForumCreation = 4373
AMGetClanDetailsForForumCreationResponse = 4374
AMGetPendingNotificationCount = 4375
AMGetPendingNotificationCountResponse = 4376
AMPasswordHashUpgrade = 4377
AMMoPayPayment = 4378
AMMoPayPaymentResponse = 4379
AMBoaCompraPayment = 4380
AMBoaCompraPaymentResponse = 4381
AMExpireCaptchaByGID = 4382
AMCompleteExternalPurchase = 4383
AMCompleteExternalPurchaseResponse = 4384
AMResolveNegativeWalletCredits = 4385
AMResolveNegativeWalletCreditsResponse = 4386
AMPayelpPayment = 4387
AMPayelpPaymentResponse = 4388
AMPlayerGetClanBasicDetails = 4389
AMPlayerGetClanBasicDetailsResponse = 4390
AMMOLPayment = 4391
AMMOLPaymentResponse = 4392
GetUserIPCountry = 4393
GetUserIPCountryResponse = 4394
NotificationOfSuspiciousActivity = 4395
AMDegicaPayment = 4396
AMDegicaPaymentResponse = 4397
AMEClubPayment = 4398
AMEClubPaymentResponse = 4399
AMPayPalPaymentsHubPayment = 4400
AMPayPalPaymentsHubPaymentResponse = 4401
AMTwoFactorRecoverAuthenticatorRequest = 4402
AMTwoFactorRecoverAuthenticatorResponse = 4403
AMSmart2PayPayment = 4404
AMSmart2PayPaymentResponse = 4405
AMValidatePasswordResetCodeAndSendSmsRequest = 4406
AMValidatePasswordResetCodeAndSendSmsResponse = 4407
AMGetAccountResetDetailsRequest = 4408
AMGetAccountResetDetailsResponse = 4409
AMBitPayPayment = 4410
AMBitPayPaymentResponse = 4411
AMSendAccountInfoUpdate = 4412
AMSendScheduledGift = 4413
AMNodwinPayment = 4414
AMNodwinPaymentResponse = 4415
AMResolveWalletRevoke = 4416
AMResolveWalletReverseRevoke = 4417
AMFundedPayment = 4418
AMFundedPaymentResponse = 4419
AMRequestPersonaUpdateForChatServer = 4420
AMPerfectWorldPayment = 4421
AMPerfectWorldPaymentResponse = 4422
BasePSRange = 5000
PSCreateShoppingCart = 5001
PSCreateShoppingCartResponse = 5002
PSIsValidShoppingCart = 5003
PSIsValidShoppingCartResponse = 5004
PSAddPackageToShoppingCart = 5005
PSAddPackageToShoppingCartResponse = 5006
PSRemoveLineItemFromShoppingCart = 5007
PSRemoveLineItemFromShoppingCartResponse = 5008
PSGetShoppingCartContents = 5009
PSGetShoppingCartContentsResponse = 5010
PSAddWalletCreditToShoppingCart = 5011
PSAddWalletCreditToShoppingCartResponse = 5012
BaseUFSRange = 5200
ClientUFSUploadFileRequest = 5202
ClientUFSUploadFileResponse = 5203
ClientUFSUploadFileChunk = 5204
ClientUFSUploadFileFinished = 5205
ClientUFSGetFileListForApp = 5206
ClientUFSGetFileListForAppResponse = 5207
ClientUFSDownloadRequest = 5210
ClientUFSDownloadResponse = 5211
ClientUFSDownloadChunk = 5212
ClientUFSLoginRequest = 5213
ClientUFSLoginResponse = 5214
UFSReloadPartitionInfo = 5215
ClientUFSTransferHeartbeat = 5216
UFSSynchronizeFile = 5217
UFSSynchronizeFileResponse = 5218
ClientUFSDeleteFileRequest = 5219
ClientUFSDeleteFileResponse = 5220
UFSDownloadRequest = 5221
UFSDownloadResponse = 5222
UFSDownloadChunk = 5223
ClientUFSGetUGCDetails = 5226
ClientUFSGetUGCDetailsResponse = 5227
UFSUpdateFileFlags = 5228
UFSUpdateFileFlagsResponse = 5229
ClientUFSGetSingleFileInfo = 5230
ClientUFSGetSingleFileInfoResponse = 5231
ClientUFSShareFile = 5232
ClientUFSShareFileResponse = 5233
UFSReloadAccount = 5234
UFSReloadAccountResponse = 5235
UFSUpdateRecordBatched = 5236
UFSUpdateRecordBatchedResponse = 5237
UFSMigrateFile = 5238
UFSMigrateFileResponse = 5239
UFSGetUGCURLs = 5240
UFSGetUGCURLsResponse = 5241
UFSHttpUploadFileFinishRequest = 5242
UFSHttpUploadFileFinishResponse = 5243
UFSDownloadStartRequest = 5244
UFSDownloadStartResponse = 5245
UFSDownloadChunkRequest = 5246
UFSDownloadChunkResponse = 5247
UFSDownloadFinishRequest = 5248
UFSDownloadFinishResponse = 5249
UFSFlushURLCache = 5250
ClientUFSUploadCommit = 5251
ClientUFSUploadCommitResponse = 5252
UFSMigrateFileAppID = 5253
UFSMigrateFileAppIDResponse = 5254
BaseClient2 = 5400
ClientRequestForgottenPasswordEmail = 5401
ClientRequestForgottenPasswordEmailResponse = 5402
ClientCreateAccountResponse = 5403
ClientResetForgottenPassword = 5404
ClientResetForgottenPasswordResponse = 5405
ClientCreateAccount2 = 5406
ClientInformOfResetForgottenPassword = 5407
ClientInformOfResetForgottenPasswordResponse = 5408
ClientAnonUserLogOn_Deprecated = 5409 #: removed
ClientGamesPlayedWithDataBlob = 5410
ClientUpdateUserGameInfo = 5411
ClientFileToDownload = 5412
ClientFileToDownloadResponse = 5413
ClientLBSSetScore = 5414
ClientLBSSetScoreResponse = 5415
ClientLBSFindOrCreateLB = 5416
ClientLBSFindOrCreateLBResponse = 5417
ClientLBSGetLBEntries = 5418
ClientLBSGetLBEntriesResponse = 5419
ClientMarketingMessageUpdate = 5420 #: removed
ClientChatDeclined = 5426
ClientFriendMsgIncoming = 5427
ClientAuthList_Deprecated = 5428 #: removed
ClientTicketAuthComplete = 5429
ClientIsLimitedAccount = 5430
ClientRequestAuthList = 5431
ClientAuthList = 5432
ClientStat = 5433
ClientP2PConnectionInfo = 5434
ClientP2PConnectionFailInfo = 5435
ClientGetNumberOfCurrentPlayers = 5436 #: removed
ClientGetNumberOfCurrentPlayersResponse = 5437 #: removed
ClientGetDepotDecryptionKey = 5438
ClientGetDepotDecryptionKeyResponse = 5439
GSPerformHardwareSurvey = 5440
ClientGetAppBetaPasswords = 5441 #: removed
ClientGetAppBetaPasswordsResponse = 5442 #: removed
ClientEnableTestLicense = 5443
ClientEnableTestLicenseResponse = 5444
ClientDisableTestLicense = 5445
ClientDisableTestLicenseResponse = 5446
ClientRequestValidationMail = 5448
ClientRequestValidationMailResponse = 5449
ClientCheckAppBetaPassword = <PASSWORD>
ClientCheckAppBetaPasswordResponse = 5451
ClientToGC = 5452
ClientFromGC = 5453
ClientRequestChangeMail = 5454
ClientRequestChangeMailResponse = 5455
ClientEmailAddrInfo = 5456
ClientPasswordChange3 = 5457
ClientEmailChange3 = 5458
ClientPersonalQAChange3 = 5459
ClientResetForgottenPassword3 = 5460
ClientRequestForgottenPasswordEmail3 = 5461
ClientCreateAccount3 = 5462 #: removed
ClientNewLoginKey = 5463
ClientNewLoginKeyAccepted = 5464
ClientLogOnWithHash_Deprecated = 5465 #: removed
ClientStoreUserStats2 = 5466
ClientStatsUpdated = 5467
ClientActivateOEMLicense = 5468
ClientRegisterOEMMachine = 5469
ClientRegisterOEMMachineResponse = 5470
ClientRequestedClientStats = 5480
ClientStat2Int32 = 5481
ClientStat2 = 5482
ClientVerifyPassword = <PASSWORD>
ClientVerifyPasswordResponse = 5484
ClientDRMDownloadRequest = 5485
ClientDRMDownloadResponse = 5486
ClientDRMFinalResult = 5487
ClientGetFriendsWhoPlayGame = 5488
ClientGetFriendsWhoPlayGameResponse = 5489
ClientOGSBeginSession = 5490
ClientOGSBeginSessionResponse = 5491
ClientOGSEndSession = 5492
ClientOGSEndSessionResponse = 5493
ClientOGSWriteRow = 5494
ClientDRMTest = 5495
ClientDRMTestResult = 5496
ClientServerUnavailable = 5500
ClientServersAvailable = 5501
ClientRegisterAuthTicketWithCM = 5502
ClientGCMsgFailed = 5503
ClientMicroTxnAuthRequest = 5504
ClientMicroTxnAuthorize = 5505
ClientMicroTxnAuthorizeResponse = 5506
ClientAppMinutesPlayedData = 5507
ClientGetMicroTxnInfo = 5508
ClientGetMicroTxnInfoResponse = 5509
ClientMarketingMessageUpdate2 = 5510
ClientDeregisterWithServer = 5511
ClientSubscribeToPersonaFeed = 5512
ClientLogon = 5514
ClientGetClientDetails = 5515
ClientGetClientDetailsResponse = 5516
ClientReportOverlayDetourFailure = 5517
ClientGetClientAppList = 5518
ClientGetClientAppListResponse = 5519
ClientInstallClientApp = 5520
ClientInstallClientAppResponse = 5521
ClientUninstallClientApp = 5522
ClientUninstallClientAppResponse = 5523
ClientSetClientAppUpdateState = 5524
ClientSetClientAppUpdateStateResponse = 5525
ClientRequestEncryptedAppTicket = 5526
ClientRequestEncryptedAppTicketResponse = 5527
ClientWalletInfoUpdate = 5528
ClientLBSSetUGC = 5529
ClientLBSSetUGCResponse = 5530
ClientAMGetClanOfficers = 5531
ClientAMGetClanOfficersResponse = 5532
ClientCheckFileSignature = 5533 #: removed
ClientCheckFileSignatureResponse = 5534 #: removed
ClientFriendProfileInfo = 5535
ClientFriendProfileInfoResponse = 5536
ClientUpdateMachineAuth = 5537
ClientUpdateMachineAuthResponse = 5538
ClientReadMachineAuth = 5539
ClientReadMachineAuthResponse = 5540
ClientRequestMachineAuth = 5541
ClientRequestMachineAuthResponse = 5542
ClientScreenshotsChanged = 5543
ClientEmailChange4 = 5544
ClientEmailChangeResponse4 = 5545
ClientGetCDNAuthToken = 5546
ClientGetCDNAuthTokenResponse = 5547
ClientDownloadRateStatistics = 5548
ClientRequestAccountData = 5549
ClientRequestAccountDataResponse = 5550
ClientResetForgottenPassword4 = 5551
ClientHideFriend = 5552
ClientFriendsGroupsList = 5553
ClientGetClanActivityCounts = 5554
ClientGetClanActivityCountsResponse = 5555
ClientOGSReportString = 5556
ClientOGSReportBug = 5557
ClientSentLogs = 5558
ClientLogonGameServer = 5559
AMClientCreateFriendsGroup = 5560
AMClientCreateFriendsGroupResponse = 5561
AMClientDeleteFriendsGroup = 5562
AMClientDeleteFriendsGroupResponse = 5563
AMClientManageFriendsGroup = 5564
AMClientManageFriendsGroupResponse = 5565
AMClientAddFriendToGroup = 5566
AMClientAddFriendToGroupResponse = 5567
AMClientRemoveFriendFromGroup = 5568
AMClientRemoveFriendFromGroupResponse = 5569
ClientAMGetPersonaNameHistory = 5570
ClientAMGetPersonaNameHistoryResponse = 5571
ClientRequestFreeLicense = 5572
ClientRequestFreeLicenseResponse = 5573
ClientDRMDownloadRequestWithCrashData = 5574
ClientAuthListAck = 5575
ClientItemAnnouncements = 5576
ClientRequestItemAnnouncements = 5577
ClientFriendMsgEchoToSender = 5578
ClientChangeSteamGuardOptions = 5579 #: removed
ClientChangeSteamGuardOptionsResponse = 5580 #: removed
ClientOGSGameServerPingSample = 5581
ClientCommentNotifications = 5582
ClientRequestCommentNotifications = 5583
ClientPersonaChangeResponse = 5584
ClientRequestWebAPIAuthenticateUserNonce = 5585
ClientRequestWebAPIAuthenticateUserNonceResponse = 5586
ClientPlayerNicknameList = 5587
AMClientSetPlayerNickname = 5588
AMClientSetPlayerNicknameResponse = 5589
# ClientRequestOAuthTokenForApp = 5590 #: removed
# ClientRequestOAuthTokenForAppResponse = 5591 #: removed
ClientCreateAccountProto = 5590
ClientCreateAccountProtoResponse = 5591
ClientGetNumberOfCurrentPlayersDP = 5592
ClientGetNumberOfCurrentPlayersDPResponse = 5593
ClientServiceMethodLegacy = 5594
ClientServiceMethodLegacyResponse = 5595
ClientFriendUserStatusPublished = 5596
ClientCurrentUIMode = 5597
ClientVanityURLChangedNotification = 5598
ClientUserNotifications = 5599
BaseDFS = 5600
DFSGetFile = 5601
DFSInstallLocalFile = 5602
DFSConnection = 5603
DFSConnectionReply = 5604
ClientDFSAuthenticateRequest = 5605
ClientDFSAuthenticateResponse = 5606
ClientDFSEndSession = 5607
DFSPurgeFile = 5608
DFSRouteFile = 5609
DFSGetFileFromServer = 5610
DFSAcceptedResponse = 5611
DFSRequestPingback = 5612
DFSRecvTransmitFile = 5613
DFSSendTransmitFile = 5614
DFSRequestPingback2 = 5615
DFSResponsePingback2 = 5616
ClientDFSDownloadStatus = 5617
DFSStartTransfer = 5618
DFSTransferComplete = 5619
DFSRouteFileResponse = 5620
ClientNetworkingCertRequest = 5621
ClientNetworkingCertRequestResponse = 5622
ClientChallengeRequest = 5623
ClientChallengeResponse = 5624
BadgeCraftedNotification = 5625
ClientNetworkingMobileCertRequest = 5626
ClientNetworkingMobileCertRequestResponse = 5627
BaseMDS = 5800
ClientMDSLoginRequest = 5801 #: removed
ClientMDSLoginResponse = 5802 #: removed
ClientMDSUploadManifestRequest = 5803 #: removed
ClientMDSUploadManifestResponse = 5804 #: removed
ClientMDSTransmitManifestDataChunk = 5805 #: removed
ClientMDSHeartbeat = 5806 #: removed
ClientMDSUploadDepotChunks = 5807 #: removed
ClientMDSUploadDepotChunksResponse = 5808 #: removed
ClientMDSInitDepotBuildRequest = 5809 #: removed
ClientMDSInitDepotBuildResponse = 5810 #: removed
AMToMDSGetDepotDecryptionKey = 5812
MDSToAMGetDepotDecryptionKeyResponse = 5813
MDSGetVersionsForDepot = 5814 #: removed
MDSGetVersionsForDepotResponse = 5815 #: removed
# MDSSetPublicVersionForDepot = 5816 #: removed
# MDSSetPublicVersionForDepotResponse = 5817 #: removed
ClientMDSInitWorkshopBuildRequest = 5816 #: removed
ClientMDSInitWorkshopBuildResponse = 5817 #: removed
ClientMDSGetDepotManifest = 5818 #: removed
ClientMDSGetDepotManifestResponse = 5819 #: removed
ClientMDSGetDepotManifestChunk = 5820 #: removed
ClientMDSUploadRateTest = 5823 #: removed
ClientMDSUploadRateTestResponse = 5824 #: removed
MDSDownloadDepotChunksAck = 5825 #: removed
MDSContentServerStatsBroadcast = 5826 #: removed
MDSContentServerConfigRequest = 5827
MDSContentServerConfig = 5828
MDSGetDepotManifest = 5829
MDSGetDepotManifestResponse = 5830
MDSGetDepotManifestChunk = 5831
MDSGetDepotChunk = 5832
MDSGetDepotChunkResponse = 5833
MDSGetDepotChunkChunk = 5834
MDSUpdateContentServerConfig = 5835 #: removed
MDSGetServerListForUser = 5836
MDSGetServerListForUserResponse = 5837
ClientMDSRegisterAppBuild = 5838 #: removed
ClientMDSRegisterAppBuildResponse = 5839 #: removed
ClientMDSSetAppBuildLive = 5840 #: removed
ClientMDSSetAppBuildLiveResponse = 5841 #: removed
ClientMDSGetPrevDepotBuild = 5842 #: removed
ClientMDSGetPrevDepotBuildResponse = 5843 #: removed
MDSToCSFlushChunk = 5844
ClientMDSSignInstallScript = 5845 #: removed
ClientMDSSignInstallScriptResponse = 5846 #: removed
MDSMigrateChunk = 5847
MDSMigrateChunkResponse = 5848
MDSToCSFlushManifest = 5849
CSBase = 6200
CSPing = 6201
CSPingResponse = 6202
GMSBase = 6400
GMSGameServerReplicate = 6401
ClientGMSServerQuery = 6403
GMSClientServerQueryResponse = 6404
AMGMSGameServerUpdate = 6405
AMGMSGameServerRemove = 6406
GameServerOutOfDate = 6407
DeviceAuthorizationBase = 6500
ClientAuthorizeLocalDeviceRequest = 6501
# ClientAuthorizeLocalDevice = 6502 #: removed
ClientAuthorizeLocalDeviceResponse = 6502
ClientDeauthorizeDeviceRequest = 6503
ClientDeauthorizeDevice = 6504
ClientUseLocalDeviceAuthorizations = 6505
ClientGetAuthorizedDevices = 6506
ClientGetAuthorizedDevicesResponse = 6507
AMNotifySessionDeviceAuthorized = 6508
ClientAuthorizeLocalDeviceNotification = 6509
MMSBase = 6600
ClientMMSCreateLobby = 6601
ClientMMSCreateLobbyResponse = 6602
ClientMMSJoinLobby = 6603
ClientMMSJoinLobbyResponse = 6604
ClientMMSLeaveLobby = 6605
ClientMMSLeaveLobbyResponse = 6606
ClientMMSGetLobbyList = 6607
ClientMMSGetLobbyListResponse = 6608
ClientMMSSetLobbyData = 6609
ClientMMSSetLobbyDataResponse = 6610
ClientMMSGetLobbyData = 6611
ClientMMSLobbyData = 6612
ClientMMSSendLobbyChatMsg = 6613
ClientMMSLobbyChatMsg = 6614
ClientMMSSetLobbyOwner = 6615
ClientMMSSetLobbyOwnerResponse = 6616
ClientMMSSetLobbyGameServer = 6617
ClientMMSLobbyGameServerSet = 6618
ClientMMSUserJoinedLobby = 6619
ClientMMSUserLeftLobby = 6620
ClientMMSInviteToLobby = 6621
ClientMMSFlushFrenemyListCache = 6622
ClientMMSFlushFrenemyListCacheResponse = 6623
ClientMMSSetLobbyLinked = 6624
ClientMMSSetRatelimitPolicyOnClient = 6625
ClientMMSGetLobbyStatus = 6626
ClientMMSGetLobbyStatusResponse = 6627
MMSGetLobbyList = 6628
MMSGetLobbyListResponse = 6629
NonStdMsgBase = 6800
NonStdMsgMemcached = 6801
NonStdMsgHTTPServer = 6802
NonStdMsgHTTPClient = 6803
NonStdMsgWGResponse = 6804
NonStdMsgPHPSimulator = 6805
NonStdMsgChase = 6806
NonStdMsgDFSTransfer = 6807
NonStdMsgTests = 6808
NonStdMsgUMQpipeAAPL = 6809
NonStdMsgSyslog = 6810
NonStdMsgLogsink = 6811
NonStdMsgSteam2Emulator = 6812
NonStdMsgRTMPServer = 6813
NonStdMsgWebSocket = 6814
NonStdMsgRedis = 6815
UDSBase = 7000
ClientUDSP2PSessionStarted = 7001
ClientUDSP2PSessionEnded = 7002
UDSRenderUserAuth = 7003
UDSRenderUserAuthResponse = 7004
ClientInviteToGame = 7005
# UDSFindSession = 7006 #: removed
UDSHasSession = 7006
# UDSFindSessionResponse = 7007 #: removed
UDSHasSessionResponse = 7007
MPASBase = 7100
MPASVacBanReset = 7101
KGSBase = 7200
KGSAllocateKeyRange = 7201 #: removed
KGSAllocateKeyRangeResponse = 7202 #: removed
KGSGenerateKeys = 7203 #: removed
KGSGenerateKeysResponse = 7204 #: removed
KGSRemapKeys = 7205 #: removed
KGSRemapKeysResponse = 7206 #: removed
KGSGenerateGameStopWCKeys = 7207 #: removed
KGSGenerateGameStopWCKeysResponse = 7208 #: removed
UCMBase = 7300
ClientUCMAddScreenshot = 7301
ClientUCMAddScreenshotResponse = 7302
UCMValidateObjectExists = 7303 #: removed
UCMValidateObjectExistsResponse = 7304 #: removed
UCMResetCommunityContent = 7307
UCMResetCommunityContentResponse = 7308
ClientUCMDeleteScreenshot = 7309
ClientUCMDeleteScreenshotResponse = 7310
ClientUCMPublishFile = 7311
ClientUCMPublishFileResponse = 7312
ClientUCMGetPublishedFileDetails = 7313 #: removed
ClientUCMGetPublishedFileDetailsResponse = 7314 #: removed
ClientUCMDeletePublishedFile = 7315
ClientUCMDeletePublishedFileResponse = 7316
ClientUCMEnumerateUserPublishedFiles = 7317
ClientUCMEnumerateUserPublishedFilesResponse = 7318
ClientUCMSubscribePublishedFile = 7319 #: removed
ClientUCMSubscribePublishedFileResponse = 7320 #: removed
ClientUCMEnumerateUserSubscribedFiles = 7321
ClientUCMEnumerateUserSubscribedFilesResponse = 7322
ClientUCMUnsubscribePublishedFile = 7323 #: removed
ClientUCMUnsubscribePublishedFileResponse = 7324 #: removed
ClientUCMUpdatePublishedFile = 7325
ClientUCMUpdatePublishedFileResponse = 7326
UCMUpdatePublishedFile = 7327
UCMUpdatePublishedFileResponse = 7328
UCMDeletePublishedFile = 7329
UCMDeletePublishedFileResponse = 7330
UCMUpdatePublishedFileStat = 7331
UCMUpdatePublishedFileBan = 7332
UCMUpdatePublishedFileBanResponse = 7333
UCMUpdateTaggedScreenshot = 7334 #: removed
UCMAddTaggedScreenshot = 7335 #: removed
UCMRemoveTaggedScreenshot = 7336 #: removed
UCMReloadPublishedFile = 7337
UCMReloadUserFileListCaches = 7338
UCMPublishedFileReported = 7339
UCMUpdatePublishedFileIncompatibleStatus = 7340 #: removed
UCMPublishedFilePreviewAdd = 7341
UCMPublishedFilePreviewAddResponse = 7342
UCMPublishedFilePreviewRemove = 7343
UCMPublishedFilePreviewRemoveResponse = 7344
UCMPublishedFilePreviewChangeSortOrder = 7345 #: removed
UCMPublishedFilePreviewChangeSortOrderResponse = 7346 #: removed
ClientUCMPublishedFileSubscribed = 7347
ClientUCMPublishedFileUnsubscribed = 7348
UCMPublishedFileSubscribed = 7349
UCMPublishedFileUnsubscribed = 7350
UCMPublishFile = 7351
UCMPublishFileResponse = 7352
UCMPublishedFileChildAdd = 7353
UCMPublishedFileChildAddResponse = 7354
UCMPublishedFileChildRemove = 7355
UCMPublishedFileChildRemoveResponse = 7356
UCMPublishedFileChildChangeSortOrder = 7357 #: removed
UCMPublishedFileChildChangeSortOrderResponse = 7358 #: removed
UCMPublishedFileParentChanged = 7359
ClientUCMGetPublishedFilesForUser = 7360
ClientUCMGetPublishedFilesForUserResponse = 7361
UCMGetPublishedFilesForUser = 7362 #: removed
UCMGetPublishedFilesForUserResponse = 7363 #: removed
ClientUCMSetUserPublishedFileAction = 7364
ClientUCMSetUserPublishedFileActionResponse = 7365
ClientUCMEnumeratePublishedFilesByUserAction = 7366
ClientUCMEnumeratePublishedFilesByUserActionResponse = 7367
ClientUCMPublishedFileDeleted = 7368
UCMGetUserSubscribedFiles = 7369
UCMGetUserSubscribedFilesResponse = 7370
UCMFixStatsPublishedFile = 7371
UCMDeleteOldScreenshot = 7372 #: removed
UCMDeleteOldScreenshotResponse = 7373 #: removed
UCMDeleteOldVideo = 7374 #: removed
UCMDeleteOldVideoResponse = 7375 #: removed
UCMUpdateOldScreenshotPrivacy = 7376 #: removed
UCMUpdateOldScreenshotPrivacyResponse = 7377 #: removed
ClientUCMEnumerateUserSubscribedFilesWithUpdates = 7378
ClientUCMEnumerateUserSubscribedFilesWithUpdatesResponse = 7379
UCMPublishedFileContentUpdated = 7380
ClientUCMPublishedFileUpdated = 7381
ClientWorkshopItemChangesRequest = 7382
ClientWorkshopItemChangesResponse = 7383
ClientWorkshopItemInfoRequest = 7384
ClientWorkshopItemInfoResponse = 7385
FSBase = 7500
ClientRichPresenceUpload = 7501
ClientRichPresenceRequest = 7502
ClientRichPresenceInfo = 7503
FSRichPresenceRequest = 7504
FSRichPresenceResponse = 7505
FSComputeFrenematrix = 7506
FSComputeFrenematrixResponse = 7507
FSPlayStatusNotification = 7508
FSPublishPersonaStatus = 7509 #: removed
FSAddOrRemoveFollower = 7510
FSAddOrRemoveFollowerResponse = 7511
FSUpdateFollowingList = 7512
FSCommentNotification = 7513
FSCommentNotificationViewed = 7514
ClientFSGetFollowerCount = 7515
ClientFSGetFollowerCountResponse = 7516
ClientFSGetIsFollowing = 7517
ClientFSGetIsFollowingResponse = 7518
ClientFSEnumerateFollowingList = 7519
ClientFSEnumerateFollowingListResponse = 7520
FSGetPendingNotificationCount = 7521
FSGetPendingNotificationCountResponse = 7522
# ClientFSOfflineMessageNotification = 7523 #: renamed
# ClientFSRequestOfflineMessageCount = 7524 #: renamed
# ClientFSGetFriendMessageHistory = 7525 #: renamed
# ClientFSGetFriendMessageHistoryResponse = 7526 #: renamed
# ClientFSGetFriendMessageHistoryForOfflineMessages = 7527 #: renamed
ClientChatOfflineMessageNotification = 7523
ClientChatRequestOfflineMessageCount = 7524
ClientChatGetFriendMessageHistory = 7525
ClientChatGetFriendMessageHistoryResponse = 7526
ClientChatGetFriendMessageHistoryForOfflineMessages = 7527
ClientFSGetFriendsSteamLevels = 7528
ClientFSGetFriendsSteamLevelsResponse = 7529
AMRequestFriendData = 7530
CEGVersionSetEnableDisableRequest = 7600
DRMRange2 = 7600
CEGVersionSetEnableDisableResponse = 7601
CEGPropStatusDRMSRequest = 7602
CEGPropStatusDRMSResponse = 7603
CEGWhackFailureReportRequest = 7604
CEGWhackFailureReportResponse = 7605
DRMSFetchVersionSet = 7606
DRMSFetchVersionSetResponse = 7607
EconBase = 7700
EconTrading_InitiateTradeRequest = 7701
EconTrading_InitiateTradeProposed = 7702
EconTrading_InitiateTradeResponse = 7703
EconTrading_InitiateTradeResult = 7704
EconTrading_StartSession = 7705
EconTrading_CancelTradeRequest = 7706
EconFlushInventoryCache = 7707
EconFlushInventoryCacheResponse = 7708
EconCDKeyProcessTransaction = 7711
EconCDKeyProcessTransactionResponse = 7712
EconGetErrorLogs = 7713
EconGetErrorLogsResponse = 7714
RMRange = 7800
RMTestVerisignOTP = 7800
RMTestVerisignOTPResponse = 7801
RMDeleteMemcachedKeys = 7803
RMRemoteInvoke = 7804
BadLoginIPList = 7805
RMMsgTraceAddTrigger = 7806
RMMsgTraceRemoveTrigger = 7807
RMMsgTraceEvent = 7808
UGSUpdateGlobalStats = 7900
UGSBase = 7900
ClientUGSGetGlobalStats = 7901
ClientUGSGetGlobalStatsResponse = 7902
StoreUpdateRecommendationCount = 8000 #: removed
StoreBase = 8000
UMQLogonRequest = 8100
UMQBase = 8100
UMQLogonResponse = 8101
UMQLogoffRequest = 8102
UMQLogoffResponse = 8103
UMQSendChatMessage = 8104
UMQIncomingChatMessage = 8105
UMQPoll = 8106
UMQPollResults = 8107
UMQ2AM_ClientMsgBatch = 8108
UMQEnqueueMobileSalePromotions = 8109 #: removed
UMQEnqueueMobileAnnouncements = 8110 #: removed
WorkshopAcceptTOSRequest = 8200 #: removed
WorkshopBase = 8200
WorkshopAcceptTOSResponse = 8201 #: removed
WebAPIValidateOAuth2Token = <PASSWORD>
WebAPIBase = 8300
WebAPIValidateOAuth2TokenResponse = 8301
WebAPIInvalidateTokensForAccount = 8302 #: removed
WebAPIRegisterGCInterfaces = 8303
WebAPIInvalidateOAuthClientCache = 8304
WebAPIInvalidateOAuthTokenCache = 8305
WebAPISetSecrets = 8306
BackpackBase = 8400
BackpackAddToCurrency = 8401
BackpackAddToCurrencyResponse = 8402
CREBase = 8500
CRERankByTrend = 8501 #: removed
CRERankByTrendResponse = 8502 #: removed
CREItemVoteSummary = 8503
CREItemVoteSummaryResponse = 8504
CRERankByVote = 8505 #: removed
CRERankByVoteResponse = 8506 #: removed
CREUpdateUserPublishedItemVote = 8507
CREUpdateUserPublishedItemVoteResponse = 8508
CREGetUserPublishedItemVoteDetails = 8509
CREGetUserPublishedItemVoteDetailsResponse = 8510
CREEnumeratePublishedFiles = 8511
CREEnumeratePublishedFilesResponse = 8512
CREPublishedFileVoteAdded = 8513
SecretsRequestCredentialPair = 8600
SecretsBase = 8600
SecretsCredentialPairResponse = 8601
SecretsRequestServerIdentity = 8602 #: removed
SecretsServerIdentityResponse = 8603 #: removed
SecretsUpdateServerIdentities = 8604 #: removed
BoxMonitorReportRequest = 8700
BoxMonitorBase = 8700
BoxMonitorReportResponse = 8701
LogsinkWriteReport = 8800
LogsinkBase = 8800
PICSBase = 8900
ClientPICSChangesSinceRequest = 8901
ClientPICSChangesSinceResponse = 8902
ClientPICSProductInfoRequest = 8903
ClientPICSProductInfoResponse = 8904
ClientPICSAccessTokenRequest = 8905
ClientPICSAccessTokenResponse = 8906
WorkerProcess = 9000
WorkerProcessPingRequest = 9000
WorkerProcessPingResponse = 9001
WorkerProcessShutdown = 9002
DRMWorkerProcess = 9100
DRMWorkerProcessDRMAndSign = 9100
DRMWorkerProcessDRMAndSignResponse = 9101
DRMWorkerProcessSteamworksInfoRequest = 9102
DRMWorkerProcessSteamworksInfoResponse = 9103
DRMWorkerProcessInstallDRMDLLRequest = 9104
DRMWorkerProcessInstallDRMDLLResponse = 9105
DRMWorkerProcessSecretIdStringRequest = 9106
DRMWorkerProcessSecretIdStringResponse = 9107
DRMWorkerProcessGetDRMGuidsFromFileRequest = 9108 #: removed
DRMWorkerProcessGetDRMGuidsFromFileResponse = 9109 #: removed
DRMWorkerProcessInstallProcessedFilesRequest = 9110
DRMWorkerProcessInstallProcessedFilesResponse = 9111
DRMWorkerProcessExamineBlobRequest = 9112
DRMWorkerProcessExamineBlobResponse = 9113
DRMWorkerProcessDescribeSecretRequest = 9114
DRMWorkerProcessDescribeSecretResponse = 9115
DRMWorkerProcessBackfillOriginalRequest = 9116
DRMWorkerProcessBackfillOriginalResponse = 9117
DRMWorkerProcessValidateDRMDLLRequest = 9118
DRMWorkerProcessValidateDRMDLLResponse = 9119
DRMWorkerProcessValidateFileRequest = 9120
DRMWorkerProcessValidateFileResponse = 9121
DRMWorkerProcessSplitAndInstallRequest = 9122
DRMWorkerProcessSplitAndInstallResponse = 9123
DRMWorkerProcessGetBlobRequest = 9124
DRMWorkerProcessGetBlobResponse = 9125
DRMWorkerProcessEvaluateCrashRequest = 9126
DRMWorkerProcessEvaluateCrashResponse = 9127
DRMWorkerProcessAnalyzeFileRequest = 9128
DRMWorkerProcessAnalyzeFileResponse = 9129
DRMWorkerProcessUnpackBlobRequest = 9130
DRMWorkerProcessUnpackBlobResponse = 9131
DRMWorkerProcessInstallAllRequest = 9132
DRMWorkerProcessInstallAllResponse = 9133
TestWorkerProcess = 9200
TestWorkerProcessLoadUnloadModuleRequest = 9200
TestWorkerProcessLoadUnloadModuleResponse = 9201
TestWorkerProcessServiceModuleCallRequest = 9202
TestWorkerProcessServiceModuleCallResponse = 9203
QuestServerBase = 9300
ClientGetEmoticonList = 9330
ClientEmoticonList = 9331
# ClientSharedLibraryBase = 9400 #: removed
SLCUserSessionStatus = 9400
SLCBase = 9400
SLCRequestUserSessionStatus = 9401
SLCSharedLicensesLockStatus = 9402
ClientSharedLicensesLockStatus = 9403 #: removed
ClientSharedLicensesStopPlaying = 9404 #: removed
ClientSharedLibraryLockStatus = 9405
ClientSharedLibraryStopPlaying = 9406
SLCOwnerLibraryChanged = 9407
SLCSharedLibraryChanged = 9408
RemoteClientAuth_OBSOLETE = 9500
RemoteClientBase = 9500
RemoteClientAuthResponse_OBSOLETE = 9501
RemoteClientAppStatus = 9502
RemoteClientStartStream = 9503
RemoteClientStartStreamResponse = 9504
RemoteClientPing = 9505
RemoteClientPingResponse = 9506
ClientUnlockStreaming = 9507
ClientUnlockStreamingResponse = 9508
RemoteClientAcceptEULA = 9509
RemoteClientGetControllerConfig = 9510
RemoteClientGetControllerConfigResponse = 9511
RemoteClientStreamingEnabled = 9512
ClientUnlockHEVC = 9513
ClientUnlockHEVCResponse = 9514
RemoteClientStatusRequest = 9515
RemoteClientStatusResponse = 9516
ClientPlayingSessionState = 9600
ClientConcurrentSessionsBase = 9600
ClientKickPlayingSession = 9601
ClientBroadcastInit = 9700 #: removed
ClientBroadcastBase = 9700
ClientBroadcastFrames = 9701
ClientBroadcastDisconnect = 9702
ClientBroadcastScreenshot = 9703
ClientBroadcastUploadConfig = 9704
ClientVoiceCallPreAuthorize = 9800 #: removed
BaseClient3 = 9800
ClientVoiceCallPreAuthorizeResponse = 9801
ClientServerTimestampRequest = 9802
ClientServerTimestampResponse = 9803
ClientLANP2PRequestChunk = 9900
ClientLANP2PBase = 9900
ClientLANP2PRequestChunkResponse = 9901
ClientLANP2PMax = 9999
# BaseWatchdogServer = 10000
NotifyWatchdog = 10000
ClientSiteLicenseSiteInfoNotification = 10100
ClientSiteLicenseBase = 10100
ClientSiteLicenseCheckout = 10101
ClientSiteLicenseCheckoutResponse = 10102
ClientSiteLicenseGetAvailableSeats = 10103
ClientSiteLicenseGetAvailableSeatsResponse = 10104
ClientSiteLicenseGetContentCacheInfo = 10105
ClientSiteLicenseGetContentCacheInfoResponse = 10106
ChatServerGetPendingNotificationCount = 12000
BaseChatServer = 12000
ChatServerGetPendingNotificationCountResponse = 12001
ServerSecretChanged = 12100
BaseSecretServer = 12100
|
"""The EMsg enum contains many members and takes a bit to load.
For this reason it is seperate, and imported only when needed.
"""
from steam.enums.base import SteamIntEnum
class EMsg(SteamIntEnum):
Invalid = 0
Multi = 1
ProtobufWrapped = 2
GenericReply = 100
BaseGeneral = 100
DestJobFailed = 113
Alert = 115
SCIDRequest = 120
SCIDResponse = 121
JobHeartbeat = 123
HubConnect = 124
Subscribe = 126
RouteMessage = 127
RemoteSysID = 128 #: removed
AMCreateAccountResponse = 129 #: removed
WGRequest = 130
WGResponse = 131
KeepAlive = 132
WebAPIJobRequest = 133
WebAPIJobResponse = 134
ClientSessionStart = 135
ClientSessionEnd = 136
# ClientSessionUpdateAuthTicket = 137 #: removed
ClientSessionUpdate = 137
StatsDeprecated = 138
Ping = 139
PingResponse = 140
Stats = 141
RequestFullStatsBlock = 142
LoadDBOCacheItem = 143
LoadDBOCacheItemResponse = 144
InvalidateDBOCacheItems = 145
ServiceMethod = 146
ServiceMethodResponse = 147
ClientPackageVersions = 148
TimestampRequest = 149
TimestampResponse = 150
ServiceMethodCallFromClient = 151
ServiceMethodSendToClient = 152
AssignSysID = 200
BaseShell = 200
Exit = 201
DirRequest = 202
DirResponse = 203
ZipRequest = 204
ZipResponse = 205
UpdateRecordResponse = 215
UpdateCreditCardRequest = 221
UpdateUserBanResponse = 225
PrepareToExit = 226
ContentDescriptionUpdate = 227
TestResetServer = 228
UniverseChanged = 229
ShellConfigInfoUpdate = 230
RequestWindowsEventLogEntries = 233
ProvideWindowsEventLogEntries = 234
ShellSearchLogs = 235
ShellSearchLogsResponse = 236
ShellCheckWindowsUpdates = 237
ShellCheckWindowsUpdatesResponse = 238
ShellFlushUserLicenseCache = 239 #: removed
TestFlushDelayedSQL = 240
TestFlushDelayedSQLResponse = 241
EnsureExecuteScheduledTask_TEST = 242
EnsureExecuteScheduledTaskResponse_TEST = 243
UpdateScheduledTaskEnableState_TEST = 244
UpdateScheduledTaskEnableStateResponse_TEST = 245
ContentDescriptionDeltaUpdate = 246
Heartbeat = 300
BaseGM = 300
ShellFailed = 301
ExitShells = 307
ExitShell = 308
GracefulExitShell = 309
LicenseProcessingComplete = 316
SetTestFlag = 317
QueuedEmailsComplete = 318
GMReportPHPError = 319
GMDRMSync = 320
PhysicalBoxInventory = 321
UpdateConfigFile = 322
TestInitDB = 323
GMWriteConfigToSQL = 324
GMLoadActivationCodes = 325
GMQueueForFBS = 326
GMSchemaConversionResults = 327
GMSchemaConversionResultsResponse = 328 #: removed
GMWriteShellFailureToSQL = 329
GMWriteStatsToSOS = 330
GMGetServiceMethodRouting = 331
GMGetServiceMethodRoutingResponse = 332
GMConvertUserWallets = 333 #: removed
GMTestNextBuildSchemaConversion = 334
GMTestNextBuildSchemaConversionResponse = 335
ExpectShellRestart = 336
HotFixProgress = 337
BaseAIS = 400
AISRefreshContentDescription = 401 #: removed
AISRequestContentDescription = 402
AISUpdateAppInfo = 403
# AISUpdatePackageInfo = 404 #: removed
AISUpdatePackageCosts = 404 #: removed
AISGetPackageChangeNumber = 405
AISGetPackageChangeNumberResponse = 406
AISAppInfoTableChanged = 407 #: removed
AISUpdatePackageCostsResponse = 408 #: removed
AISCreateMarketingMessage = 409 #: removed
AISCreateMarketingMessageResponse = 410 #: removed
AISGetMarketingMessage = 411 #: removed
AISGetMarketingMessageResponse = 412 #: removed
AISUpdateMarketingMessage = 413 #: removed
AISUpdateMarketingMessageResponse = 414 #: removed
AISRequestMarketingMessageUpdate = 415 #: removed
AISDeleteMarketingMessage = 416 #: removed
AISGetMarketingTreatments = 419 #: removed
AISGetMarketingTreatmentsResponse = 420 #: removed
AISRequestMarketingTreatmentUpdate = 421 #: removed
AISTestAddPackage = 422 #: removed
AIGetAppGCFlags = 423
AIGetAppGCFlagsResponse = 424
AIGetAppList = 425
AIGetAppListResponse = 426
AIGetAppInfo = 427 #: removed
AIGetAppInfoResponse = 428 #: removed
AISGetCouponDefinition = 429
AISGetCouponDefinitionResponse = 430
AISUpdateSlaveContentDescription = 431
AISUpdateSlaveContentDescriptionResponse = 432
AISTestEnableGC = 433
BaseAM = 500
AMUpdateUserBanRequest = 504
AMAddLicense = 505
AMBeginProcessingLicenses = 507 #: removed
AMSendSystemIMToUser = 508
AMExtendLicense = 509
AMAddMinutesToLicense = 510
AMCancelLicense = 511
AMInitPurchase = 512
AMPurchaseResponse = 513
AMGetFinalPrice = 514
AMGetFinalPriceResponse = 515
AMGetLegacyGameKey = 516
AMGetLegacyGameKeyResponse = 517
AMFindHungTransactions = 518
AMSetAccountTrustedRequest = 519
AMCompletePurchase = 521 #: removed
AMCancelPurchase = 522
AMNewChallenge = 523
AMLoadOEMTickets = 524
AMFixPendingPurchase = 525
AMFixPendingPurchaseResponse = 526
AMIsUserBanned = 527
AMRegisterKey = 528
AMLoadActivationCodes = 529
AMLoadActivationCodesResponse = 530
AMLookupKeyResponse = 531
AMLookupKey = 532
AMChatCleanup = 533
AMClanCleanup = 534
AMFixPendingRefund = 535
AMReverseChargeback = 536
AMReverseChargebackResponse = 537
AMClanCleanupList = 538
AMGetLicenses = 539
AMGetLicensesResponse = 540
AMSendCartRepurchase = 541
AMSendCartRepurchaseResponse = 542
AllowUserToPlayQuery = 550
AllowUserToPlayResponse = 551
AMVerfiyUser = 552
AMClientNotPlaying = 553
AMClientRequestFriendship = 554
AMRelayPublishStatus = 555
AMResetCommunityContent = 556 #: removed
AMPrimePersonaStateCache = 557 #: removed
AMAllowUserContentQuery = 558 #: removed
AMAllowUserContentResponse = 559 #: removed
AMInitPurchaseResponse = 560
AMRevokePurchaseResponse = 561
AMLockProfile = 562 #: removed
AMRefreshGuestPasses = 563
AMInviteUserToClan = 564 #: removed
AMAcknowledgeClanInvite = 565 #: removed
AMGrantGuestPasses = 566
AMClanDataUpdated = 567
AMReloadAccount = 568
AMClientChatMsgRelay = 569
AMChatMulti = 570
AMClientChatInviteRelay = 571
AMChatInvite = 572
AMClientJoinChatRelay = 573
AMClientChatMemberInfoRelay = 574
AMPublishChatMemberInfo = 575
AMClientAcceptFriendInvite = 576
AMChatEnter = 577
AMClientPublishRemovalFromSource = 578
AMChatActionResult = 579
AMFindAccounts = 580
AMFindAccountsResponse = 581
AMRequestAccountData = 582
AMRequestAccountDataResponse = 583
AMSetAccountFlags = 584
AMCreateClan = 586
AMCreateClanResponse = 587
AMGetClanDetails = 588
AMGetClanDetailsResponse = 589
AMSetPersonaName = 590
AMSetAvatar = 591
AMAuthenticateUser = 592
AMAuthenticateUserResponse = 593
AMGetAccountFriendsCount = 594 #: removed
AMGetAccountFriendsCountResponse = 595 #: removed
AMP2PIntroducerMessage = 596
ClientChatAction = 597
AMClientChatActionRelay = 598
ReqChallenge = 600
BaseVS = 600
VACResponse = 601
ReqChallengeTest = 602
VSMarkCheat = 604
VSAddCheat = 605
VSPurgeCodeModDB = 606
VSGetChallengeResults = 607
VSChallengeResultText = 608
VSReportLingerer = 609
VSRequestManagedChallenge = 610
VSLoadDBFinished = 611
BaseDRMS = 625
DRMBuildBlobRequest = 628
DRMBuildBlobResponse = 629
DRMResolveGuidRequest = 630
DRMResolveGuidResponse = 631
DRMVariabilityReport = 633
DRMVariabilityReportResponse = 634
DRMStabilityReport = 635
DRMStabilityReportResponse = 636
DRMDetailsReportRequest = 637
DRMDetailsReportResponse = 638
DRMProcessFile = 639
DRMAdminUpdate = 640
DRMAdminUpdateResponse = 641
DRMSync = 642
DRMSyncResponse = 643
DRMProcessFileResponse = 644
DRMEmptyGuidCache = 645
DRMEmptyGuidCacheResponse = 646
BaseCS = 650
CSUserContentRequest = 652 #: removed
BaseClient = 700
ClientLogOn_Deprecated = 701 #: removed
ClientAnonLogOn_Deprecated = 702 #: removed
ClientHeartBeat = 703
ClientVACResponse = 704
ClientGamesPlayed_obsolete = 705 #: removed
ClientLogOff = 706
ClientNoUDPConnectivity = 707
ClientInformOfCreateAccount = 708 #: removed
ClientAckVACBan = 709 #: removed
ClientConnectionStats = 710
ClientInitPurchase = 711 #: removed
ClientPingResponse = 712
ClientRemoveFriend = 714
ClientGamesPlayedNoDataBlob = 715
ClientChangeStatus = 716
ClientVacStatusResponse = 717
ClientFriendMsg = 718
ClientGameConnect_obsolete = 719 #: removed
ClientGamesPlayed2_obsolete = 720 #: removed
ClientGameEnded_obsolete = 721 #: removed
ClientGetFinalPrice = 722 #: removed
ClientSystemIM = 726
ClientSystemIMAck = 727
ClientGetLicenses = 728
ClientCancelLicense = 729 #: removed
ClientGetLegacyGameKey = 730
ClientContentServerLogOn_Deprecated = 731 #: removed
ClientAckVACBan2 = 732
ClientAckMessageByGID = 735 #: removed
ClientGetPurchaseReceipts = 736
ClientAckPurchaseReceipt = 737 #: removed
ClientGamesPlayed3_obsolete = 738 #: removed
ClientSendGuestPass = 739 #: removed
ClientAckGuestPass = 740
ClientRedeemGuestPass = 741
ClientGamesPlayed = 742
ClientRegisterKey = 743
ClientInviteUserToClan = 744
ClientAcknowledgeClanInvite = 745
ClientPurchaseWithMachineID = 746
ClientAppUsageEvent = 747
ClientGetGiftTargetList = 748 #: removed
ClientGetGiftTargetListResponse = 749 #: removed
ClientLogOnResponse = 751
ClientVACChallenge = 753 #: removed
ClientSetHeartbeatRate = 755
ClientNotLoggedOnDeprecated = 756 #: removed
ClientLoggedOff = 757
GSApprove = 758
GSDeny = 759
GSKick = 760
ClientCreateAcctResponse = 761
ClientPurchaseResponse = 763
ClientPing = 764
ClientNOP = 765
ClientPersonaState = 766
ClientFriendsList = 767
ClientAccountInfo = 768
ClientVacStatusQuery = 770 #: removed
ClientNewsUpdate = 771
ClientGameConnectDeny = 773
GSStatusReply = 774
ClientGetFinalPriceResponse = 775 #: removed
ClientGameConnectTokens = 779
ClientLicenseList = 780
ClientCancelLicenseResponse = 781 #: removed
ClientVACBanStatus = 782
ClientCMList = 783
ClientEncryptPct = 784
ClientGetLegacyGameKeyResponse = 785
ClientFavoritesList = 786 #: removed
CSUserContentApprove = 787 #: removed
CSUserContentDeny = 788 #: removed
ClientInitPurchaseResponse = 789 #: removed
ClientAddFriend = 791
ClientAddFriendResponse = 792
ClientInviteFriend = 793 #: removed
ClientInviteFriendResponse = 794 #: removed
ClientSendGuestPassResponse = 795 #: removed
ClientAckGuestPassResponse = 796
ClientRedeemGuestPassResponse = 797
ClientUpdateGuestPassesList = 798
ClientChatMsg = 799
ClientChatInvite = 800
ClientJoinChat = 801
ClientChatMemberInfo = 802
ClientLogOnWithCredentials_Deprecated = 803 #: removed
ClientPasswordChangeResponse = 805
ClientChatEnter = 807
ClientFriendRemovedFromSource = 808
ClientCreateChat = 809
ClientCreateChatResponse = 810
ClientUpdateChatMetadata = 811 #: removed
ClientP2PIntroducerMessage = 813
ClientChatActionResult = 814
ClientRequestFriendData = 815
ClientGetUserStats = 818
ClientGetUserStatsResponse = 819
ClientStoreUserStats = 820
ClientStoreUserStatsResponse = 821
ClientClanState = 822
ClientServiceModule = 830
ClientServiceCall = 831
ClientServiceCallResponse = 832
ClientPackageInfoRequest = 833 #: removed
ClientPackageInfoResponse = 834 #: removed
ClientNatTraversalStatEvent = 839
ClientAppInfoRequest = 840 #: removed
ClientAppInfoResponse = 841 #: removed
ClientSteamUsageEvent = 842
ClientCheckPassword = <PASSWORD>
ClientResetPassword = <PASSWORD>
ClientCheckPasswordResponse = 848
ClientResetPasswordResponse = 849
ClientSessionToken = 850
ClientDRMProblemReport = 851
ClientSetIgnoreFriend = 855
ClientSetIgnoreFriendResponse = 856
ClientGetAppOwnershipTicket = 857
ClientGetAppOwnershipTicketResponse = 858
ClientGetLobbyListResponse = 860
ClientGetLobbyMetadata = 861 #: removed
ClientGetLobbyMetadataResponse = 862 #: removed
ClientVTTCert = 863 #: removed
ClientAppInfoUpdate = 866 #: removed
ClientAppInfoChanges = 867 #: removed
ClientServerList = 880
ClientEmailChangeResponse = 891 #: removed
ClientSecretQAChangeResponse = 892 #: removed
ClientDRMBlobRequest = 896
ClientDRMBlobResponse = 897
ClientLookupKey = 898 #: removed
ClientLookupKeyResponse = 899 #: removed
BaseGameServer = 900
GSDisconnectNotice = 901
GSStatus = 903
GSUserPlaying = 905
GSStatus2 = 906
GSStatusUpdate_Unused = 907
GSServerType = 908
GSPlayerList = 909
GSGetUserAchievementStatus = 910
GSGetUserAchievementStatusResponse = 911
GSGetPlayStats = 918
GSGetPlayStatsResponse = 919
GSGetUserGroupStatus = 920
AMGetUserGroupStatus = 921
AMGetUserGroupStatusResponse = 922
GSGetUserGroupStatusResponse = 923
GSGetReputation = 936
GSGetReputationResponse = 937
GSAssociateWithClan = 938
GSAssociateWithClanResponse = 939
GSComputeNewPlayerCompatibility = 940
GSComputeNewPlayerCompatibilityResponse = 941
AdminCmd = 1000
BaseAdmin = 1000
AdminCmdResponse = 1004
AdminLogListenRequest = 1005
AdminLogEvent = 1006
LogSearchRequest = 1007 #: removed
LogSearchResponse = 1008 #: removed
LogSearchCancel = 1009 #: removed
UniverseData = 1010
RequestStatHistory = 1014 #: removed
StatHistory = 1015 #: removed
AdminPwLogon = 1017 #: removed
AdminPwLogonResponse = 1018 #: removed
AdminSpew = 1019
AdminConsoleTitle = 1020
AdminGCSpew = 1023
AdminGCCommand = 1024
AdminGCGetCommandList = 1025
AdminGCGetCommandListResponse = 1026
FBSConnectionData = 1027
AdminMsgSpew = 1028
FBSReqVersion = 1100
BaseFBS = 1100
FBSVersionInfo = 1101
FBSForceRefresh = 1102
FBSForceBounce = 1103
FBSDeployPackage = 1104
FBSDeployResponse = 1105
FBSUpdateBootstrapper = 1106
FBSSetState = 1107
FBSApplyOSUpdates = 1108
FBSRunCMDScript = 1109
FBSRebootBox = 1110
FBSSetBigBrotherMode = 1111
FBSMinidumpServer = 1112
FBSSetShellCount_obsolete = 1113 #: removed
FBSDeployHotFixPackage = 1114
FBSDeployHotFixResponse = 1115
FBSDownloadHotFix = 1116
FBSDownloadHotFixResponse = 1117
FBSUpdateTargetConfigFile = 1118
FBSApplyAccountCred = 1119
FBSApplyAccountCredResponse = 1120
FBSSetShellCount = 1121
FBSTerminateShell = 1122
FBSQueryGMForRequest = 1123
FBSQueryGMResponse = 1124
FBSTerminateZombies = 1125
FBSInfoFromBootstrapper = 1126
FBSRebootBoxResponse = 1127
FBSBootstrapperPackageRequest = 1128
FBSBootstrapperPackageResponse = 1129
FBSBootstrapperGetPackageChunk = 1130
FBSBootstrapperGetPackageChunkResponse = 1131
FBSBootstrapperPackageTransferProgress = 1132
FBSRestartBootstrapper = 1133
FBSPauseFrozenDumps = 1134
FileXferRequest = 1200
BaseFileXfer = 1200
FileXferResponse = 1201
FileXferData = 1202
FileXferEnd = 1203
FileXferDataAck = 1204
ChannelAuthChallenge = 1300
BaseChannelAuth = 1300
ChannelAuthResponse = 1301
ChannelAuthResult = 1302
ChannelEncryptRequest = 1303
ChannelEncryptResponse = 1304
ChannelEncryptResult = 1305
BaseBS = 1400
BSPurchaseStart = 1401
BSPurchaseResponse = 1402
BSAuthenticateCCTrans = 1403
BSAuthenticateCCTransResponse = 1404
BSSettleComplete = 1406
BSBannedRequest = 1407 #: removed
BSInitPayPalTxn = 1408
BSInitPayPalTxnResponse = 1409
BSGetPayPalUserInfo = 1410
BSGetPayPalUserInfoResponse = 1411
BSRefundTxn = 1413 #: removed
BSRefundTxnResponse = 1414 #: removed
BSGetEvents = 1415 #: removed
BSChaseRFRRequest = 1416 #: removed
BSPaymentInstrBan = 1417
BSPaymentInstrBanResponse = 1418
BSProcessGCReports = 1419 #: removed
BSProcessPPReports = 1420 #: removed
BSInitGCBankXferTxn = 1421
BSInitGCBankXferTxnResponse = 1422
BSQueryGCBankXferTxn = 1423 #: removed
BSQueryGCBankXferTxnResponse = 1424 #: removed
BSCommitGCTxn = 1425
BSQueryTransactionStatus = 1426
BSQueryTransactionStatusResponse = 1427
BSQueryCBOrderStatus = 1428 #: removed
BSQueryCBOrderStatusResponse = 1429 #: removed
BSRunRedFlagReport = 1430 #: removed
BSQueryPaymentInstUsage = 1431
BSQueryPaymentInstResponse = 1432
BSQueryTxnExtendedInfo = 1433
BSQueryTxnExtendedInfoResponse = 1434
BSUpdateConversionRates = 1435
BSProcessUSBankReports = 1436 #: removed
BSPurchaseRunFraudChecks = 1437
BSPurchaseRunFraudChecksResponse = 1438
BSStartShippingJobs = 1439 #: removed
BSQueryBankInformation = 1440
BSQueryBankInformationResponse = 1441
BSValidateXsollaSignature = 1445
BSValidateXsollaSignatureResponse = 1446
BSQiwiWalletInvoice = 1448
BSQiwiWalletInvoiceResponse = 1449
BSUpdateInventoryFromProPack = 1450
BSUpdateInventoryFromProPackResponse = 1451
BSSendShippingRequest = 1452
BSSendShippingRequestResponse = 1453
BSGetProPackOrderStatus = 1454
BSGetProPackOrderStatusResponse = 1455
BSCheckJobRunning = 1456
BSCheckJobRunningResponse = 1457
BSResetPackagePurchaseRateLimit = 1458
BSResetPackagePurchaseRateLimitResponse = 1459
BSUpdatePaymentData = 1460
BSUpdatePaymentDataResponse = 1461
BSGetBillingAddress = 1462
BSGetBillingAddressResponse = 1463
BSGetCreditCardInfo = 1464
BSGetCreditCardInfoResponse = 1465
BSRemoveExpiredPaymentData = 1468
BSRemoveExpiredPaymentDataResponse = 1469
BSConvertToCurrentKeys = 1470
BSConvertToCurrentKeysResponse = 1471
BSInitPurchase = 1472
BSInitPurchaseResponse = 1473
BSCompletePurchase = 1474
BSCompletePurchaseResponse = 1475
BSPruneCardUsageStats = 1476
BSPruneCardUsageStatsResponse = 1477
BSStoreBankInformation = 1478
BSStoreBankInformationResponse = 1479
BSVerifyPOSAKey = 1480
BSVerifyPOSAKeyResponse = 1481
BSReverseRedeemPOSAKey = 1482
BSReverseRedeemPOSAKeyResponse = 1483
BSQueryFindCreditCard = 1484
BSQueryFindCreditCardResponse = 1485
BSStatusInquiryPOSAKey = 1486
BSStatusInquiryPOSAKeyResponse = 1487
BSValidateMoPaySignature = 1488 #: removed
BSValidateMoPaySignatureResponse = 1489 #: removed
BSMoPayConfirmProductDelivery = 1490 #: removed
BSMoPayConfirmProductDeliveryResponse = 1491 #: removed
BSGenerateMoPayMD5 = 1492 #: removed
BSGenerateMoPayMD5Response = 1493 #: removed
BSBoaCompraConfirmProductDelivery = 1494
BSBoaCompraConfirmProductDeliveryResponse = 1495
BSGenerateBoaCompraMD5 = 1496
BSGenerateBoaCompraMD5Response = 1497
BSCommitWPTxn = 1498
BSCommitAdyenTxn = 1499
BaseATS = 1500
ATSStartStressTest = 1501
ATSStopStressTest = 1502
ATSRunFailServerTest = 1503
ATSUFSPerfTestTask = 1504
ATSUFSPerfTestResponse = 1505
ATSCycleTCM = 1506
ATSInitDRMSStressTest = 1507
ATSCallTest = 1508
ATSCallTestReply = 1509
ATSStartExternalStress = 1510
ATSExternalStressJobStart = 1511
ATSExternalStressJobQueued = 1512
ATSExternalStressJobRunning = 1513
ATSExternalStressJobStopped = 1514
ATSExternalStressJobStopAll = 1515
ATSExternalStressActionResult = 1516
ATSStarted = 1517
ATSCSPerfTestTask = 1518
ATSCSPerfTestResponse = 1519
BaseDP = 1600
DPSetPublishingState = 1601
DPGamePlayedStats = 1602 #: removed
DPUniquePlayersStat = 1603
DPStreamingUniquePlayersStat = 1604
DPVacInfractionStats = 1605 #: removed
DPVacBanStats = 1606 #: removed
DPBlockingStats = 1607
DPNatTraversalStats = 1608
DPSteamUsageEvent = 1609 #: removed
DPVacCertBanStats = 1610 #: removed
DPVacCafeBanStats = 1611 #: removed
DPCloudStats = 1612
DPAchievementStats = 1613
DPAccountCreationStats = 1614 #: removed
DPGetPlayerCount = 1615
DPGetPlayerCountResponse = 1616
DPGameServersPlayersStats = 1617
DPDownloadRateStatistics = 1618 #: removed
DPFacebookStatistics = 1619 #: removed
ClientDPCheckSpecialSurvey = 1620
ClientDPCheckSpecialSurveyResponse = 1621
ClientDPSendSpecialSurveyResponse = 1622
ClientDPSendSpecialSurveyResponseReply = 1623
DPStoreSaleStatistics = 1624
ClientDPUpdateAppJobReport = 1625
ClientDPSteam2AppStarted = 1627 #: removed
DPUpdateContentEvent = 1626
ClientDPUnsignedInstallScript = 1627
DPPartnerMicroTxns = 1628
DPPartnerMicroTxnsResponse = 1629
ClientDPContentStatsReport = 1630
DPVRUniquePlayersStat = 1631
BaseCM = 1700
CMSetAllowState = 1701
CMSpewAllowState = 1702
CMSessionRejected = 1703
CMSetSecrets = 1704
CMGetSecrets = 1705
BaseDSS = 1800 #: removed
DSSNewFile = 1801 #: removed
DSSCurrentFileList = 1802 #: removed
DSSSynchList = 1803 #: removed
DSSSynchListResponse = 1804 #: removed
DSSSynchSubscribe = 1805 #: removed
DSSSynchUnsubscribe = 1806 #: removed
BaseEPM = 1900 #: removed
EPMStartProcess = 1901 #: removed
EPMStopProcess = 1902 #: removed
EPMRestartProcess = 1903 #: removed
GCSendClient = 2200 #: removed
BaseGC = 2200
AMRelayToGC = 2201 #: removed
GCUpdatePlayedState = 2202 #: removed
GCCmdRevive = 2203
GCCmdBounce = 2204 #: removed
GCCmdForceBounce = 2205 #: removed
GCCmdDown = 2206
GCCmdDeploy = 2207
GCCmdDeployResponse = 2208
GCCmdSwitch = 2209
AMRefreshSessions = 2210
GCUpdateGSState = 2211 #: removed
GCAchievementAwarded = 2212
GCSystemMessage = 2213
GCValidateSession = 2214 #: removed
GCValidateSessionResponse = 2215 #: removed
GCCmdStatus = 2216
GCRegisterWebInterfaces_Deprecated = 2217 #: removed
GCGetAccountDetails_DEPRECATED = 2218 #: removed
GCInterAppMessage = 2219
GCGetEmailTemplate = 2220
GCGetEmailTemplateResponse = 2221
GCHRelay = 2222
GCHRelayToClient = 2223
GCHUpdateSession = 2224
GCHRequestUpdateSession = 2225
GCHRequestStatus = 2226
GCHRequestStatusResponse = 2227
GCHAccountVacStatusChange = 2228
GCHSpawnGC = 2229
GCHSpawnGCResponse = 2230
GCHKillGC = 2231
GCHKillGCResponse = 2232
GCHAccountTradeBanStatusChange = 2233
GCHAccountLockStatusChange = 2234
GCHVacVerificationChange = 2235
GCHAccountPhoneNumberChange = 2236
GCHAccountTwoFactorChange = 2237
GCHInviteUserToLobby = 2238
BaseP2P = 2500
P2PIntroducerMessage = 2502
BaseSM = 2900
SMExpensiveReport = 2902
SMHourlyReport = 2903
SMFishingReport = 2904 #: removed
SMPartitionRenames = 2905
SMMonitorSpace = 2906
SMTestNextBuildSchemaConversion = 2907
SMTestNextBuildSchemaConversionResponse = 2908
BaseTest = 3000
FailServer = 3000
JobHeartbeatTest = 3001
JobHeartbeatTestResponse = 3002
BaseFTSRange = 3100
FTSGetBrowseCounts = 3101 #: removed
FTSGetBrowseCountsResponse = 3102 #: removed
FTSBrowseClans = 3103 #: removed
FTSBrowseClansResponse = 3104 #: removed
FTSSearchClansByLocation = 3105 #: removed
FTSSearchClansByLocationResponse = 3106 #: removed
FTSSearchPlayersByLocation = 3107 #: removed
FTSSearchPlayersByLocationResponse = 3108 #: removed
FTSClanDeleted = 3109 #: removed
FTSSearch = 3110 #: removed
FTSSearchResponse = 3111 #: removed
FTSSearchStatus = 3112 #: removed
FTSSearchStatusResponse = 3113 #: removed
FTSGetGSPlayStats = 3114 #: removed
FTSGetGSPlayStatsResponse = 3115 #: removed
FTSGetGSPlayStatsForServer = 3116 #: removed
FTSGetGSPlayStatsForServerResponse = 3117 #: removed
FTSReportIPUpdates = 3118 #: removed
BaseCCSRange = 3150
CCSGetComments = 3151 #: removed
CCSGetCommentsResponse = 3152 #: removed
CCSAddComment = 3153 #: removed
CCSAddCommentResponse = 3154 #: removed
CCSDeleteComment = 3155 #: removed
CCSDeleteCommentResponse = 3156 #: removed
CCSPreloadComments = 3157 #: removed
CCSNotifyCommentCount = 3158 #: removed
CCSGetCommentsForNews = 3159 #: removed
CCSGetCommentsForNewsResponse = 3160 #: removed
CCSDeleteAllCommentsByAuthor = 3161
CCSDeleteAllCommentsByAuthorResponse = 3162
BaseLBSRange = 3200
LBSSetScore = 3201
LBSSetScoreResponse = 3202
LBSFindOrCreateLB = 3203
LBSFindOrCreateLBResponse = 3204
LBSGetLBEntries = 3205
LBSGetLBEntriesResponse = 3206
LBSGetLBList = 3207
LBSGetLBListResponse = 3208
LBSSetLBDetails = 3209
LBSDeleteLB = 3210
LBSDeleteLBEntry = 3211
LBSResetLB = 3212
LBSResetLBResponse = 3213
LBSDeleteLBResponse = 3214
BaseOGS = 3400
OGSBeginSession = 3401
OGSBeginSessionResponse = 3402
OGSEndSession = 3403
OGSEndSessionResponse = 3404
OGSWriteAppSessionRow = 3406
BaseBRP = 3600
BRPStartShippingJobs = 3601
BRPProcessUSBankReports = 3602
BRPProcessGCReports = 3603
BRPProcessPPReports = 3604
BRPSettleNOVA = 3605 #: removed
BRPSettleCB = 3606 #: removed
BRPCommitGC = 3607
BRPCommitGCResponse = 3608
BRPFindHungTransactions = 3609
BRPCheckFinanceCloseOutDate = 3610
BRPProcessLicenses = 3611
BRPProcessLicensesResponse = 3612
BRPRemoveExpiredPaymentData = 3613
BRPRemoveExpiredPaymentDataResponse = 3614
BRPConvertToCurrentKeys = 3615
BRPConvertToCurrentKeysResponse = 3616
BRPPruneCardUsageStats = 3617
BRPPruneCardUsageStatsResponse = 3618
BRPCheckActivationCodes = 3619
BRPCheckActivationCodesResponse = 3620
BRPCommitWP = 3621
BRPCommitWPResponse = 3622
BRPProcessWPReports = 3623
BRPProcessPaymentRules = 3624
BRPProcessPartnerPayments = 3625
BRPCheckSettlementReports = 3626
BRPPostTaxToAvalara = 3628
BRPPostTransactionTax = 3629
BRPPostTransactionTaxResponse = 3630
BRPProcessIMReports = 3631
BaseAMRange2 = 4000
AMCreateChat = 4001
AMCreateChatResponse = 4002
AMUpdateChatMetadata = 4003 #: removed
AMPublishChatMetadata = 4004 #: removed
AMSetProfileURL = 4005
AMGetAccountEmailAddress = 4006
AMGetAccountEmailAddressResponse = 4007
# AMRequestFriendData = 4008 #: removed
AMRequestClanData = 4008
AMRouteToClients = 4009
AMLeaveClan = 4010
AMClanPermissions = 4011
AMClanPermissionsResponse = 4012
AMCreateClanEventDummyForRateLimiting = 4013
AMUpdateClanEventDummyForRateLimiting = 4015
AMCreateClanEventResponse = 4014
AMUpdateClanEvent = 4015
AMUpdateClanEventResponse = 4016
AMGetClanEvents = 4017
AMGetClanEventsResponse = 4018
AMDeleteClanEvent = 4019
AMDeleteClanEventResponse = 4020
AMSetClanPermissionSettings = 4021
AMSetClanPermissionSettingsResponse = 4022
AMGetClanPermissionSettings = 4023
AMGetClanPermissionSettingsResponse = 4024
AMPublishChatRoomInfo = 4025
ClientChatRoomInfo = 4026
AMCreateClanAnnouncement = 4027 #: removed
AMCreateClanAnnouncementResponse = 4028 #: removed
AMUpdateClanAnnouncement = 4029 #: removed
AMUpdateClanAnnouncementResponse = 4030 #: removed
AMGetClanAnnouncementsCount = 4031 #: removed
AMGetClanAnnouncementsCountResponse = 4032 #: removed
AMGetClanAnnouncements = 4033 #: removed
AMGetClanAnnouncementsResponse = 4034 #: removed
AMDeleteClanAnnouncement = 4035 #: removed
AMDeleteClanAnnouncementResponse = 4036 #: removed
AMGetSingleClanAnnouncement = 4037 #: removed
AMGetSingleClanAnnouncementResponse = 4038 #: removed
AMGetClanHistory = 4039
AMGetClanHistoryResponse = 4040
AMGetClanPermissionBits = 4041
AMGetClanPermissionBitsResponse = 4042
AMSetClanPermissionBits = 4043
AMSetClanPermissionBitsResponse = 4044
AMSessionInfoRequest = 4045
AMSessionInfoResponse = 4046
AMValidateWGToken = 4047
AMGetSingleClanEvent = 4048
AMGetSingleClanEventResponse = 4049
AMGetClanRank = 4050
AMGetClanRankResponse = 4051
AMSetClanRank = 4052
AMSetClanRankResponse = 4053
AMGetClanPOTW = 4054
AMGetClanPOTWResponse = 4055
AMSetClanPOTW = 4056
AMSetClanPOTWResponse = 4057
AMRequestChatMetadata = 4058 #: removed
AMDumpUser = 4059
AMKickUserFromClan = 4060
AMAddFounderToClan = 4061
AMValidateWGTokenResponse = 4062
AMSetCommunityState = 4063
AMSetAccountDetails = 4064
AMGetChatBanList = 4065
AMGetChatBanListResponse = 4066
AMUnBanFromChat = 4067
AMSetClanDetails = 4068
AMGetAccountLinks = 4069
AMGetAccountLinksResponse = 4070
AMSetAccountLinks = 4071
AMSetAccountLinksResponse = 4072
UGSGetUserGameStats = 4073
UGSGetUserGameStatsResponse = 4074
AMCheckClanMembership = 4075
AMGetClanMembers = 4076
AMGetClanMembersResponse = 4077
AMJoinPublicClan = 4078
AMNotifyChatOfClanChange = 4079
AMResubmitPurchase = 4080
AMAddFriend = 4081
AMAddFriendResponse = 4082
AMRemoveFriend = 4083
AMDumpClan = 4084
AMChangeClanOwner = 4085
AMCancelEasyCollect = 4086
AMCancelEasyCollectResponse = 4087
AMGetClanMembershipList = 4088 #: removed
AMGetClanMembershipListResponse = 4089 #: removed
AMClansInCommon = 4090
AMClansInCommonResponse = 4091
AMIsValidAccountID = 4092
AMConvertClan = 4093
AMGetGiftTargetListRelay = 4094 #: removed
AMWipeFriendsList = 4095
AMSetIgnored = 4096
AMClansInCommonCountResponse = 4097
AMFriendsList = 4098
AMFriendsListResponse = 4099
AMFriendsInCommon = 4100
AMFriendsInCommonResponse = 4101
AMFriendsInCommonCountResponse = 4102
AMClansInCommonCount = 4103
AMChallengeVerdict = 4104
AMChallengeNotification = 4105
AMFindGSByIP = 4106
AMFoundGSByIP = 4107
AMGiftRevoked = 4108
AMCreateAccountRecord = 4109
AMUserClanList = 4110
AMUserClanListResponse = 4111
AMGetAccountDetails2 = 4112
AMGetAccountDetailsResponse2 = 4113
AMSetCommunityProfileSettings = 4114
AMSetCommunityProfileSettingsResponse = 4115
AMGetCommunityPrivacyState = 4116
AMGetCommunityPrivacyStateResponse = 4117
AMCheckClanInviteRateLimiting = 4118
UGSGetUserAchievementStatus = 4119
AMGetIgnored = 4120
AMGetIgnoredResponse = 4121
AMSetIgnoredResponse = 4122
AMSetFriendRelationshipNone = 4123
AMGetFriendRelationship = 4124
AMGetFriendRelationshipResponse = 4125
AMServiceModulesCache = 4126
AMServiceModulesCall = 4127
AMServiceModulesCallResponse = 4128
AMGetCaptchaDataForIP = 4129
AMGetCaptchaDataForIPResponse = 4130
AMValidateCaptchaDataForIP = 4131
AMValidateCaptchaDataForIPResponse = 4132
AMTrackFailedAuthByIP = 4133
AMGetCaptchaDataByGID = 4134
AMGetCaptchaDataByGIDResponse = 4135
AMGetLobbyList = 4136 #: removed
AMGetLobbyListResponse = 4137 #: removed
AMGetLobbyMetadata = 4138 #: removed
AMGetLobbyMetadataResponse = 4139 #: removed
CommunityAddFriendNews = 4140
AMAddClanNews = 4141 #: removed
AMWriteNews = 4142 #: removed
AMFindClanUser = 4143
AMFindClanUserResponse = 4144
AMBanFromChat = 4145
AMGetUserHistoryResponse = 4146 #: removed
AMGetUserNewsSubscriptions = 4147
AMGetUserNewsSubscriptionsResponse = 4148
AMSetUserNewsSubscriptions = 4149
AMGetUserNews = 4150 #: removed
AMGetUserNewsResponse = 4151 #: removed
AMSendQueuedEmails = 4152
AMSetLicenseFlags = 4153
AMGetUserHistory = 4154 #: removed
CommunityDeleteUserNews = 4155
AMAllowUserFilesRequest = 4156
AMAllowUserFilesResponse = 4157
AMGetAccountStatus = 4158
AMGetAccountStatusResponse = 4159
AMEditBanReason = 4160
AMCheckClanMembershipResponse = 4161
AMProbeClanMembershipList = 4162
AMProbeClanMembershipListResponse = 4163
UGSGetUserAchievementStatusResponse = 4164
AMGetFriendsLobbies = 4165
AMGetFriendsLobbiesResponse = 4166
AMGetUserFriendNewsResponse = 4172
CommunityGetUserFriendNews = 4173
AMGetUserClansNewsResponse = 4174
AMGetUserClansNews = 4175
AMStoreInitPurchase = 4176 #: removed
AMStoreInitPurchaseResponse = 4177 #: removed
AMStoreGetFinalPrice = 4178 #: removed
AMStoreGetFinalPriceResponse = 4179 #: removed
AMStoreCompletePurchase = 4180 #: removed
AMStoreCancelPurchase = 4181 #: removed
AMStorePurchaseResponse = 4182 #: removed
AMCreateAccountRecordInSteam3 = 4183 #: removed
AMGetPreviousCBAccount = 4184
AMGetPreviousCBAccountResponse = 4185
AMUpdateBillingAddress = 4186 #: removed
AMUpdateBillingAddressResponse = 4187 #: removed
AMGetBillingAddress = 4188 #: removed
AMGetBillingAddressResponse = 4189 #: removed
AMGetUserLicenseHistory = 4190
AMGetUserLicenseHistoryResponse = 4191
AMSupportChangePassword = <PASSWORD>
AMSupportChangeEmail = 4195
AMSupportChangeSecretQA = 4196 #: removed
AMResetUserVerificationGSByIP = 4197
AMUpdateGSPlayStats = 4198
AMSupportEnableOrDisable = 4199
AMGetComments = 4200 #: removed
AMGetCommentsResponse = 4201 #: removed
AMAddComment = 4202 #: removed
AMAddCommentResponse = 4203 #: removed
AMDeleteComment = 4204 #: removed
AMDeleteCommentResponse = 4205 #: removed
AMGetPurchaseStatus = 4206
AMSupportIsAccountEnabled = 4209
AMSupportIsAccountEnabledResponse = 4210
UGSGetUserStats = 4211
AMSupportKickSession = 4212
AMGSSearch = 4213
MarketingMessageUpdate = 4216
ChatServerRouteFriendMsg = 4219
AMTicketAuthRequestOrResponse = 4220
AMVerifyDepotManagementRights = 4222
AMVerifyDepotManagementRightsResponse = 4223
AMAddFreeLicense = 4224
AMGetUserFriendsMinutesPlayed = 4225 #: removed
AMGetUserFriendsMinutesPlayedResponse = 4226 #: removed
AMGetUserMinutesPlayed = 4227 #: removed
AMGetUserMinutesPlayedResponse = 4228 #: removed
AMValidateEmailLink = 4231
AMValidateEmailLinkResponse = 4232
AMAddUsersToMarketingTreatment = 4234 #: removed
UGSStoreUserStats = 4236
AMGetUserGameplayInfo = 4237 #: removed
AMGetUserGameplayInfoResponse = 4238 #: removed
AMGetCardList = 4239 #: removed
AMGetCardListResponse = 4240 #: removed
AMDeleteStoredCard = 4241
AMRevokeLegacyGameKeys = 4242
AMGetWalletDetails = 4244
AMGetWalletDetailsResponse = 4245
AMDeleteStoredPaymentInfo = 4246
AMGetStoredPaymentSummary = 4247
AMGetStoredPaymentSummaryResponse = 4248
AMGetWalletConversionRate = 4249
AMGetWalletConversionRateResponse = 4250
AMConvertWallet = 4251
AMConvertWalletResponse = 4252
AMRelayGetFriendsWhoPlayGame = 4253 #: removed
AMRelayGetFriendsWhoPlayGameResponse = 4254 #: removed
AMSetPreApproval = 4255
AMSetPreApprovalResponse = 4256
AMMarketingTreatmentUpdate = 4257 #: removed
AMCreateRefund = 4258
AMCreateRefundResponse = 4259
AMCreateChargeback = 4260
AMCreateChargebackResponse = 4261
AMCreateDispute = 4262
AMCreateDisputeResponse = 4263
AMClearDispute = 4264
AMCreateFinancialAdjustment = 4265
AMPlayerNicknameList = 4266
AMPlayerNicknameListResponse = 4267
AMSetDRMTestConfig = 4268
AMGetUserCurrentGameInfo = 4269
AMGetUserCurrentGameInfoResponse = 4270
AMGetGSPlayerList = 4271
AMGetGSPlayerListResponse = 4272
AMUpdatePersonaStateCache = 4275 #: removed
AMGetGameMembers = 4276
AMGetGameMembersResponse = 4277
AMGetSteamIDForMicroTxn = 4278
AMGetSteamIDForMicroTxnResponse = 4279
AMSetPartnerMember = 4280
AMRemovePublisherUser = 4281
AMGetUserLicenseList = 4282
AMGetUserLicenseListResponse = 4283
AMReloadGameGroupPolicy = 4284
AMAddFreeLicenseResponse = 4285
AMVACStatusUpdate = 4286
AMGetAccountDetails = 4287
AMGetAccountDetailsResponse = 4288
AMGetPlayerLinkDetails = 4289
AMGetPlayerLinkDetailsResponse = 4290
AMSubscribeToPersonaFeed = 4291 #: removed
AMGetUserVacBanList = 4292 #: removed
AMGetUserVacBanListResponse = 4293 #: removed
AMGetAccountFlagsForWGSpoofing = 4294
AMGetAccountFlagsForWGSpoofingResponse = 4295
AMGetFriendsWishlistInfo = 4296 #: removed
AMGetFriendsWishlistInfoResponse = 4297 #: removed
AMGetClanOfficers = 4298
AMGetClanOfficersResponse = 4299
AMNameChange = 4300
AMGetNameHistory = 4301
AMGetNameHistoryResponse = 4302
AMUpdateProviderStatus = 4305
AMClearPersonaMetadataBlob = 4306 #: removed
AMSupportRemoveAccountSecurity = 4307
AMIsAccountInCaptchaGracePeriod = 4308
AMIsAccountInCaptchaGracePeriodResponse = 4309
AMAccountPS3Unlink = 4310
AMAccountPS3UnlinkResponse = 4311
UGSStoreUserStatsResponse = 4312
AMGetAccountPSNInfo = 4313
AMGetAccountPSNInfoResponse = 4314
AMAuthenticatedPlayerList = 4315
AMGetUserGifts = 4316
AMGetUserGiftsResponse = 4317
AMTransferLockedGifts = 4320
AMTransferLockedGiftsResponse = 4321
AMPlayerHostedOnGameServer = 4322
AMGetAccountBanInfo = 4323
AMGetAccountBanInfoResponse = 4324
AMRecordBanEnforcement = 4325
AMRollbackGiftTransfer = 4326
AMRollbackGiftTransferResponse = 4327
AMHandlePendingTransaction = 4328
AMRequestClanDetails = 4329
AMDeleteStoredPaypalAgreement = 4330
AMGameServerUpdate = 4331
AMGameServerRemove = 4332
AMGetPaypalAgreements = 4333
AMGetPaypalAgreementsResponse = 4334
AMGameServerPlayerCompatibilityCheck = 4335
AMGameServerPlayerCompatibilityCheckResponse = 4336
AMRenewLicense = 4337
AMGetAccountCommunityBanInfo = 4338
AMGetAccountCommunityBanInfoResponse = 4339
AMGameServerAccountChangePassword = <PASSWORD>
AMGameServerAccountDeleteAccount = 4341
AMRenewAgreement = 4342
AMSendEmail = 4343 #: removed
AMXsollaPayment = 4344
AMXsollaPaymentResponse = 4345
AMAcctAllowedToPurchase = 4346
AMAcctAllowedToPurchaseResponse = 4347
AMSwapKioskDeposit = 4348
AMSwapKioskDepositResponse = 4349
AMSetUserGiftUnowned = 4350
AMSetUserGiftUnownedResponse = 4351
AMClaimUnownedUserGift = 4352
AMClaimUnownedUserGiftResponse = 4353
AMSetClanName = 4354
AMSetClanNameResponse = 4355
AMGrantCoupon = 4356
AMGrantCouponResponse = 4357
AMIsPackageRestrictedInUserCountry = 4358
AMIsPackageRestrictedInUserCountryResponse = 4359
AMHandlePendingTransactionResponse = 4360
AMGrantGuestPasses2 = 4361
AMGrantGuestPasses2Response = 4362
AMSessionQuery = 4363
AMSessionQueryResponse = 4364
AMGetPlayerBanDetails = 4365
AMGetPlayerBanDetailsResponse = 4366
AMFinalizePurchase = 4367
AMFinalizePurchaseResponse = 4368
AMPersonaChangeResponse = 4372
AMGetClanDetailsForForumCreation = 4373
AMGetClanDetailsForForumCreationResponse = 4374
AMGetPendingNotificationCount = 4375
AMGetPendingNotificationCountResponse = 4376
AMPasswordHashUpgrade = 4377
AMMoPayPayment = 4378
AMMoPayPaymentResponse = 4379
AMBoaCompraPayment = 4380
AMBoaCompraPaymentResponse = 4381
AMExpireCaptchaByGID = 4382
AMCompleteExternalPurchase = 4383
AMCompleteExternalPurchaseResponse = 4384
AMResolveNegativeWalletCredits = 4385
AMResolveNegativeWalletCreditsResponse = 4386
AMPayelpPayment = 4387
AMPayelpPaymentResponse = 4388
AMPlayerGetClanBasicDetails = 4389
AMPlayerGetClanBasicDetailsResponse = 4390
AMMOLPayment = 4391
AMMOLPaymentResponse = 4392
GetUserIPCountry = 4393
GetUserIPCountryResponse = 4394
NotificationOfSuspiciousActivity = 4395
AMDegicaPayment = 4396
AMDegicaPaymentResponse = 4397
AMEClubPayment = 4398
AMEClubPaymentResponse = 4399
AMPayPalPaymentsHubPayment = 4400
AMPayPalPaymentsHubPaymentResponse = 4401
AMTwoFactorRecoverAuthenticatorRequest = 4402
AMTwoFactorRecoverAuthenticatorResponse = 4403
AMSmart2PayPayment = 4404
AMSmart2PayPaymentResponse = 4405
AMValidatePasswordResetCodeAndSendSmsRequest = 4406
AMValidatePasswordResetCodeAndSendSmsResponse = 4407
AMGetAccountResetDetailsRequest = 4408
AMGetAccountResetDetailsResponse = 4409
AMBitPayPayment = 4410
AMBitPayPaymentResponse = 4411
AMSendAccountInfoUpdate = 4412
AMSendScheduledGift = 4413
AMNodwinPayment = 4414
AMNodwinPaymentResponse = 4415
AMResolveWalletRevoke = 4416
AMResolveWalletReverseRevoke = 4417
AMFundedPayment = 4418
AMFundedPaymentResponse = 4419
AMRequestPersonaUpdateForChatServer = 4420
AMPerfectWorldPayment = 4421
AMPerfectWorldPaymentResponse = 4422
BasePSRange = 5000
PSCreateShoppingCart = 5001
PSCreateShoppingCartResponse = 5002
PSIsValidShoppingCart = 5003
PSIsValidShoppingCartResponse = 5004
PSAddPackageToShoppingCart = 5005
PSAddPackageToShoppingCartResponse = 5006
PSRemoveLineItemFromShoppingCart = 5007
PSRemoveLineItemFromShoppingCartResponse = 5008
PSGetShoppingCartContents = 5009
PSGetShoppingCartContentsResponse = 5010
PSAddWalletCreditToShoppingCart = 5011
PSAddWalletCreditToShoppingCartResponse = 5012
BaseUFSRange = 5200
ClientUFSUploadFileRequest = 5202
ClientUFSUploadFileResponse = 5203
ClientUFSUploadFileChunk = 5204
ClientUFSUploadFileFinished = 5205
ClientUFSGetFileListForApp = 5206
ClientUFSGetFileListForAppResponse = 5207
ClientUFSDownloadRequest = 5210
ClientUFSDownloadResponse = 5211
ClientUFSDownloadChunk = 5212
ClientUFSLoginRequest = 5213
ClientUFSLoginResponse = 5214
UFSReloadPartitionInfo = 5215
ClientUFSTransferHeartbeat = 5216
UFSSynchronizeFile = 5217
UFSSynchronizeFileResponse = 5218
ClientUFSDeleteFileRequest = 5219
ClientUFSDeleteFileResponse = 5220
UFSDownloadRequest = 5221
UFSDownloadResponse = 5222
UFSDownloadChunk = 5223
ClientUFSGetUGCDetails = 5226
ClientUFSGetUGCDetailsResponse = 5227
UFSUpdateFileFlags = 5228
UFSUpdateFileFlagsResponse = 5229
ClientUFSGetSingleFileInfo = 5230
ClientUFSGetSingleFileInfoResponse = 5231
ClientUFSShareFile = 5232
ClientUFSShareFileResponse = 5233
UFSReloadAccount = 5234
UFSReloadAccountResponse = 5235
UFSUpdateRecordBatched = 5236
UFSUpdateRecordBatchedResponse = 5237
UFSMigrateFile = 5238
UFSMigrateFileResponse = 5239
UFSGetUGCURLs = 5240
UFSGetUGCURLsResponse = 5241
UFSHttpUploadFileFinishRequest = 5242
UFSHttpUploadFileFinishResponse = 5243
UFSDownloadStartRequest = 5244
UFSDownloadStartResponse = 5245
UFSDownloadChunkRequest = 5246
UFSDownloadChunkResponse = 5247
UFSDownloadFinishRequest = 5248
UFSDownloadFinishResponse = 5249
UFSFlushURLCache = 5250
ClientUFSUploadCommit = 5251
ClientUFSUploadCommitResponse = 5252
UFSMigrateFileAppID = 5253
UFSMigrateFileAppIDResponse = 5254
BaseClient2 = 5400
ClientRequestForgottenPasswordEmail = 5401
ClientRequestForgottenPasswordEmailResponse = 5402
ClientCreateAccountResponse = 5403
ClientResetForgottenPassword = 5404
ClientResetForgottenPasswordResponse = 5405
ClientCreateAccount2 = 5406
ClientInformOfResetForgottenPassword = 5407
ClientInformOfResetForgottenPasswordResponse = 5408
ClientAnonUserLogOn_Deprecated = 5409 #: removed
ClientGamesPlayedWithDataBlob = 5410
ClientUpdateUserGameInfo = 5411
ClientFileToDownload = 5412
ClientFileToDownloadResponse = 5413
ClientLBSSetScore = 5414
ClientLBSSetScoreResponse = 5415
ClientLBSFindOrCreateLB = 5416
ClientLBSFindOrCreateLBResponse = 5417
ClientLBSGetLBEntries = 5418
ClientLBSGetLBEntriesResponse = 5419
ClientMarketingMessageUpdate = 5420 #: removed
ClientChatDeclined = 5426
ClientFriendMsgIncoming = 5427
ClientAuthList_Deprecated = 5428 #: removed
ClientTicketAuthComplete = 5429
ClientIsLimitedAccount = 5430
ClientRequestAuthList = 5431
ClientAuthList = 5432
ClientStat = 5433
ClientP2PConnectionInfo = 5434
ClientP2PConnectionFailInfo = 5435
ClientGetNumberOfCurrentPlayers = 5436 #: removed
ClientGetNumberOfCurrentPlayersResponse = 5437 #: removed
ClientGetDepotDecryptionKey = 5438
ClientGetDepotDecryptionKeyResponse = 5439
GSPerformHardwareSurvey = 5440
ClientGetAppBetaPasswords = 5441 #: removed
ClientGetAppBetaPasswordsResponse = 5442 #: removed
ClientEnableTestLicense = 5443
ClientEnableTestLicenseResponse = 5444
ClientDisableTestLicense = 5445
ClientDisableTestLicenseResponse = 5446
ClientRequestValidationMail = 5448
ClientRequestValidationMailResponse = 5449
ClientCheckAppBetaPassword = <PASSWORD>
ClientCheckAppBetaPasswordResponse = 5451
ClientToGC = 5452
ClientFromGC = 5453
ClientRequestChangeMail = 5454
ClientRequestChangeMailResponse = 5455
ClientEmailAddrInfo = 5456
ClientPasswordChange3 = 5457
ClientEmailChange3 = 5458
ClientPersonalQAChange3 = 5459
ClientResetForgottenPassword3 = 5460
ClientRequestForgottenPasswordEmail3 = 5461
ClientCreateAccount3 = 5462 #: removed
ClientNewLoginKey = 5463
ClientNewLoginKeyAccepted = 5464
ClientLogOnWithHash_Deprecated = 5465 #: removed
ClientStoreUserStats2 = 5466
ClientStatsUpdated = 5467
ClientActivateOEMLicense = 5468
ClientRegisterOEMMachine = 5469
ClientRegisterOEMMachineResponse = 5470
ClientRequestedClientStats = 5480
ClientStat2Int32 = 5481
ClientStat2 = 5482
ClientVerifyPassword = <PASSWORD>
ClientVerifyPasswordResponse = 5484
ClientDRMDownloadRequest = 5485
ClientDRMDownloadResponse = 5486
ClientDRMFinalResult = 5487
ClientGetFriendsWhoPlayGame = 5488
ClientGetFriendsWhoPlayGameResponse = 5489
ClientOGSBeginSession = 5490
ClientOGSBeginSessionResponse = 5491
ClientOGSEndSession = 5492
ClientOGSEndSessionResponse = 5493
ClientOGSWriteRow = 5494
ClientDRMTest = 5495
ClientDRMTestResult = 5496
ClientServerUnavailable = 5500
ClientServersAvailable = 5501
ClientRegisterAuthTicketWithCM = 5502
ClientGCMsgFailed = 5503
ClientMicroTxnAuthRequest = 5504
ClientMicroTxnAuthorize = 5505
ClientMicroTxnAuthorizeResponse = 5506
ClientAppMinutesPlayedData = 5507
ClientGetMicroTxnInfo = 5508
ClientGetMicroTxnInfoResponse = 5509
ClientMarketingMessageUpdate2 = 5510
ClientDeregisterWithServer = 5511
ClientSubscribeToPersonaFeed = 5512
ClientLogon = 5514
ClientGetClientDetails = 5515
ClientGetClientDetailsResponse = 5516
ClientReportOverlayDetourFailure = 5517
ClientGetClientAppList = 5518
ClientGetClientAppListResponse = 5519
ClientInstallClientApp = 5520
ClientInstallClientAppResponse = 5521
ClientUninstallClientApp = 5522
ClientUninstallClientAppResponse = 5523
ClientSetClientAppUpdateState = 5524
ClientSetClientAppUpdateStateResponse = 5525
ClientRequestEncryptedAppTicket = 5526
ClientRequestEncryptedAppTicketResponse = 5527
ClientWalletInfoUpdate = 5528
ClientLBSSetUGC = 5529
ClientLBSSetUGCResponse = 5530
ClientAMGetClanOfficers = 5531
ClientAMGetClanOfficersResponse = 5532
ClientCheckFileSignature = 5533 #: removed
ClientCheckFileSignatureResponse = 5534 #: removed
ClientFriendProfileInfo = 5535
ClientFriendProfileInfoResponse = 5536
ClientUpdateMachineAuth = 5537
ClientUpdateMachineAuthResponse = 5538
ClientReadMachineAuth = 5539
ClientReadMachineAuthResponse = 5540
ClientRequestMachineAuth = 5541
ClientRequestMachineAuthResponse = 5542
ClientScreenshotsChanged = 5543
ClientEmailChange4 = 5544
ClientEmailChangeResponse4 = 5545
ClientGetCDNAuthToken = 5546
ClientGetCDNAuthTokenResponse = 5547
ClientDownloadRateStatistics = 5548
ClientRequestAccountData = 5549
ClientRequestAccountDataResponse = 5550
ClientResetForgottenPassword4 = 5551
ClientHideFriend = 5552
ClientFriendsGroupsList = 5553
ClientGetClanActivityCounts = 5554
ClientGetClanActivityCountsResponse = 5555
ClientOGSReportString = 5556
ClientOGSReportBug = 5557
ClientSentLogs = 5558
ClientLogonGameServer = 5559
AMClientCreateFriendsGroup = 5560
AMClientCreateFriendsGroupResponse = 5561
AMClientDeleteFriendsGroup = 5562
AMClientDeleteFriendsGroupResponse = 5563
AMClientManageFriendsGroup = 5564
AMClientManageFriendsGroupResponse = 5565
AMClientAddFriendToGroup = 5566
AMClientAddFriendToGroupResponse = 5567
AMClientRemoveFriendFromGroup = 5568
AMClientRemoveFriendFromGroupResponse = 5569
ClientAMGetPersonaNameHistory = 5570
ClientAMGetPersonaNameHistoryResponse = 5571
ClientRequestFreeLicense = 5572
ClientRequestFreeLicenseResponse = 5573
ClientDRMDownloadRequestWithCrashData = 5574
ClientAuthListAck = 5575
ClientItemAnnouncements = 5576
ClientRequestItemAnnouncements = 5577
ClientFriendMsgEchoToSender = 5578
ClientChangeSteamGuardOptions = 5579 #: removed
ClientChangeSteamGuardOptionsResponse = 5580 #: removed
ClientOGSGameServerPingSample = 5581
ClientCommentNotifications = 5582
ClientRequestCommentNotifications = 5583
ClientPersonaChangeResponse = 5584
ClientRequestWebAPIAuthenticateUserNonce = 5585
ClientRequestWebAPIAuthenticateUserNonceResponse = 5586
ClientPlayerNicknameList = 5587
AMClientSetPlayerNickname = 5588
AMClientSetPlayerNicknameResponse = 5589
# ClientRequestOAuthTokenForApp = 5590 #: removed
# ClientRequestOAuthTokenForAppResponse = 5591 #: removed
ClientCreateAccountProto = 5590
ClientCreateAccountProtoResponse = 5591
ClientGetNumberOfCurrentPlayersDP = 5592
ClientGetNumberOfCurrentPlayersDPResponse = 5593
ClientServiceMethodLegacy = 5594
ClientServiceMethodLegacyResponse = 5595
ClientFriendUserStatusPublished = 5596
ClientCurrentUIMode = 5597
ClientVanityURLChangedNotification = 5598
ClientUserNotifications = 5599
BaseDFS = 5600
DFSGetFile = 5601
DFSInstallLocalFile = 5602
DFSConnection = 5603
DFSConnectionReply = 5604
ClientDFSAuthenticateRequest = 5605
ClientDFSAuthenticateResponse = 5606
ClientDFSEndSession = 5607
DFSPurgeFile = 5608
DFSRouteFile = 5609
DFSGetFileFromServer = 5610
DFSAcceptedResponse = 5611
DFSRequestPingback = 5612
DFSRecvTransmitFile = 5613
DFSSendTransmitFile = 5614
DFSRequestPingback2 = 5615
DFSResponsePingback2 = 5616
ClientDFSDownloadStatus = 5617
DFSStartTransfer = 5618
DFSTransferComplete = 5619
DFSRouteFileResponse = 5620
ClientNetworkingCertRequest = 5621
ClientNetworkingCertRequestResponse = 5622
ClientChallengeRequest = 5623
ClientChallengeResponse = 5624
BadgeCraftedNotification = 5625
ClientNetworkingMobileCertRequest = 5626
ClientNetworkingMobileCertRequestResponse = 5627
BaseMDS = 5800
ClientMDSLoginRequest = 5801 #: removed
ClientMDSLoginResponse = 5802 #: removed
ClientMDSUploadManifestRequest = 5803 #: removed
ClientMDSUploadManifestResponse = 5804 #: removed
ClientMDSTransmitManifestDataChunk = 5805 #: removed
ClientMDSHeartbeat = 5806 #: removed
ClientMDSUploadDepotChunks = 5807 #: removed
ClientMDSUploadDepotChunksResponse = 5808 #: removed
ClientMDSInitDepotBuildRequest = 5809 #: removed
ClientMDSInitDepotBuildResponse = 5810 #: removed
AMToMDSGetDepotDecryptionKey = 5812
MDSToAMGetDepotDecryptionKeyResponse = 5813
MDSGetVersionsForDepot = 5814 #: removed
MDSGetVersionsForDepotResponse = 5815 #: removed
# MDSSetPublicVersionForDepot = 5816 #: removed
# MDSSetPublicVersionForDepotResponse = 5817 #: removed
ClientMDSInitWorkshopBuildRequest = 5816 #: removed
ClientMDSInitWorkshopBuildResponse = 5817 #: removed
ClientMDSGetDepotManifest = 5818 #: removed
ClientMDSGetDepotManifestResponse = 5819 #: removed
ClientMDSGetDepotManifestChunk = 5820 #: removed
ClientMDSUploadRateTest = 5823 #: removed
ClientMDSUploadRateTestResponse = 5824 #: removed
MDSDownloadDepotChunksAck = 5825 #: removed
MDSContentServerStatsBroadcast = 5826 #: removed
MDSContentServerConfigRequest = 5827
MDSContentServerConfig = 5828
MDSGetDepotManifest = 5829
MDSGetDepotManifestResponse = 5830
MDSGetDepotManifestChunk = 5831
MDSGetDepotChunk = 5832
MDSGetDepotChunkResponse = 5833
MDSGetDepotChunkChunk = 5834
MDSUpdateContentServerConfig = 5835 #: removed
MDSGetServerListForUser = 5836
MDSGetServerListForUserResponse = 5837
ClientMDSRegisterAppBuild = 5838 #: removed
ClientMDSRegisterAppBuildResponse = 5839 #: removed
ClientMDSSetAppBuildLive = 5840 #: removed
ClientMDSSetAppBuildLiveResponse = 5841 #: removed
ClientMDSGetPrevDepotBuild = 5842 #: removed
ClientMDSGetPrevDepotBuildResponse = 5843 #: removed
MDSToCSFlushChunk = 5844
ClientMDSSignInstallScript = 5845 #: removed
ClientMDSSignInstallScriptResponse = 5846 #: removed
MDSMigrateChunk = 5847
MDSMigrateChunkResponse = 5848
MDSToCSFlushManifest = 5849
CSBase = 6200
CSPing = 6201
CSPingResponse = 6202
GMSBase = 6400
GMSGameServerReplicate = 6401
ClientGMSServerQuery = 6403
GMSClientServerQueryResponse = 6404
AMGMSGameServerUpdate = 6405
AMGMSGameServerRemove = 6406
GameServerOutOfDate = 6407
DeviceAuthorizationBase = 6500
ClientAuthorizeLocalDeviceRequest = 6501
# ClientAuthorizeLocalDevice = 6502 #: removed
ClientAuthorizeLocalDeviceResponse = 6502
ClientDeauthorizeDeviceRequest = 6503
ClientDeauthorizeDevice = 6504
ClientUseLocalDeviceAuthorizations = 6505
ClientGetAuthorizedDevices = 6506
ClientGetAuthorizedDevicesResponse = 6507
AMNotifySessionDeviceAuthorized = 6508
ClientAuthorizeLocalDeviceNotification = 6509
MMSBase = 6600
ClientMMSCreateLobby = 6601
ClientMMSCreateLobbyResponse = 6602
ClientMMSJoinLobby = 6603
ClientMMSJoinLobbyResponse = 6604
ClientMMSLeaveLobby = 6605
ClientMMSLeaveLobbyResponse = 6606
ClientMMSGetLobbyList = 6607
ClientMMSGetLobbyListResponse = 6608
ClientMMSSetLobbyData = 6609
ClientMMSSetLobbyDataResponse = 6610
ClientMMSGetLobbyData = 6611
ClientMMSLobbyData = 6612
ClientMMSSendLobbyChatMsg = 6613
ClientMMSLobbyChatMsg = 6614
ClientMMSSetLobbyOwner = 6615
ClientMMSSetLobbyOwnerResponse = 6616
ClientMMSSetLobbyGameServer = 6617
ClientMMSLobbyGameServerSet = 6618
ClientMMSUserJoinedLobby = 6619
ClientMMSUserLeftLobby = 6620
ClientMMSInviteToLobby = 6621
ClientMMSFlushFrenemyListCache = 6622
ClientMMSFlushFrenemyListCacheResponse = 6623
ClientMMSSetLobbyLinked = 6624
ClientMMSSetRatelimitPolicyOnClient = 6625
ClientMMSGetLobbyStatus = 6626
ClientMMSGetLobbyStatusResponse = 6627
MMSGetLobbyList = 6628
MMSGetLobbyListResponse = 6629
NonStdMsgBase = 6800
NonStdMsgMemcached = 6801
NonStdMsgHTTPServer = 6802
NonStdMsgHTTPClient = 6803
NonStdMsgWGResponse = 6804
NonStdMsgPHPSimulator = 6805
NonStdMsgChase = 6806
NonStdMsgDFSTransfer = 6807
NonStdMsgTests = 6808
NonStdMsgUMQpipeAAPL = 6809
NonStdMsgSyslog = 6810
NonStdMsgLogsink = 6811
NonStdMsgSteam2Emulator = 6812
NonStdMsgRTMPServer = 6813
NonStdMsgWebSocket = 6814
NonStdMsgRedis = 6815
UDSBase = 7000
ClientUDSP2PSessionStarted = 7001
ClientUDSP2PSessionEnded = 7002
UDSRenderUserAuth = 7003
UDSRenderUserAuthResponse = 7004
ClientInviteToGame = 7005
# UDSFindSession = 7006 #: removed
UDSHasSession = 7006
# UDSFindSessionResponse = 7007 #: removed
UDSHasSessionResponse = 7007
MPASBase = 7100
MPASVacBanReset = 7101
KGSBase = 7200
KGSAllocateKeyRange = 7201 #: removed
KGSAllocateKeyRangeResponse = 7202 #: removed
KGSGenerateKeys = 7203 #: removed
KGSGenerateKeysResponse = 7204 #: removed
KGSRemapKeys = 7205 #: removed
KGSRemapKeysResponse = 7206 #: removed
KGSGenerateGameStopWCKeys = 7207 #: removed
KGSGenerateGameStopWCKeysResponse = 7208 #: removed
UCMBase = 7300
ClientUCMAddScreenshot = 7301
ClientUCMAddScreenshotResponse = 7302
UCMValidateObjectExists = 7303 #: removed
UCMValidateObjectExistsResponse = 7304 #: removed
UCMResetCommunityContent = 7307
UCMResetCommunityContentResponse = 7308
ClientUCMDeleteScreenshot = 7309
ClientUCMDeleteScreenshotResponse = 7310
ClientUCMPublishFile = 7311
ClientUCMPublishFileResponse = 7312
ClientUCMGetPublishedFileDetails = 7313 #: removed
ClientUCMGetPublishedFileDetailsResponse = 7314 #: removed
ClientUCMDeletePublishedFile = 7315
ClientUCMDeletePublishedFileResponse = 7316
ClientUCMEnumerateUserPublishedFiles = 7317
ClientUCMEnumerateUserPublishedFilesResponse = 7318
ClientUCMSubscribePublishedFile = 7319 #: removed
ClientUCMSubscribePublishedFileResponse = 7320 #: removed
ClientUCMEnumerateUserSubscribedFiles = 7321
ClientUCMEnumerateUserSubscribedFilesResponse = 7322
ClientUCMUnsubscribePublishedFile = 7323 #: removed
ClientUCMUnsubscribePublishedFileResponse = 7324 #: removed
ClientUCMUpdatePublishedFile = 7325
ClientUCMUpdatePublishedFileResponse = 7326
UCMUpdatePublishedFile = 7327
UCMUpdatePublishedFileResponse = 7328
UCMDeletePublishedFile = 7329
UCMDeletePublishedFileResponse = 7330
UCMUpdatePublishedFileStat = 7331
UCMUpdatePublishedFileBan = 7332
UCMUpdatePublishedFileBanResponse = 7333
UCMUpdateTaggedScreenshot = 7334 #: removed
UCMAddTaggedScreenshot = 7335 #: removed
UCMRemoveTaggedScreenshot = 7336 #: removed
UCMReloadPublishedFile = 7337
UCMReloadUserFileListCaches = 7338
UCMPublishedFileReported = 7339
UCMUpdatePublishedFileIncompatibleStatus = 7340 #: removed
UCMPublishedFilePreviewAdd = 7341
UCMPublishedFilePreviewAddResponse = 7342
UCMPublishedFilePreviewRemove = 7343
UCMPublishedFilePreviewRemoveResponse = 7344
UCMPublishedFilePreviewChangeSortOrder = 7345 #: removed
UCMPublishedFilePreviewChangeSortOrderResponse = 7346 #: removed
ClientUCMPublishedFileSubscribed = 7347
ClientUCMPublishedFileUnsubscribed = 7348
UCMPublishedFileSubscribed = 7349
UCMPublishedFileUnsubscribed = 7350
UCMPublishFile = 7351
UCMPublishFileResponse = 7352
UCMPublishedFileChildAdd = 7353
UCMPublishedFileChildAddResponse = 7354
UCMPublishedFileChildRemove = 7355
UCMPublishedFileChildRemoveResponse = 7356
UCMPublishedFileChildChangeSortOrder = 7357 #: removed
UCMPublishedFileChildChangeSortOrderResponse = 7358 #: removed
UCMPublishedFileParentChanged = 7359
ClientUCMGetPublishedFilesForUser = 7360
ClientUCMGetPublishedFilesForUserResponse = 7361
UCMGetPublishedFilesForUser = 7362 #: removed
UCMGetPublishedFilesForUserResponse = 7363 #: removed
ClientUCMSetUserPublishedFileAction = 7364
ClientUCMSetUserPublishedFileActionResponse = 7365
ClientUCMEnumeratePublishedFilesByUserAction = 7366
ClientUCMEnumeratePublishedFilesByUserActionResponse = 7367
ClientUCMPublishedFileDeleted = 7368
UCMGetUserSubscribedFiles = 7369
UCMGetUserSubscribedFilesResponse = 7370
UCMFixStatsPublishedFile = 7371
UCMDeleteOldScreenshot = 7372 #: removed
UCMDeleteOldScreenshotResponse = 7373 #: removed
UCMDeleteOldVideo = 7374 #: removed
UCMDeleteOldVideoResponse = 7375 #: removed
UCMUpdateOldScreenshotPrivacy = 7376 #: removed
UCMUpdateOldScreenshotPrivacyResponse = 7377 #: removed
ClientUCMEnumerateUserSubscribedFilesWithUpdates = 7378
ClientUCMEnumerateUserSubscribedFilesWithUpdatesResponse = 7379
UCMPublishedFileContentUpdated = 7380
ClientUCMPublishedFileUpdated = 7381
ClientWorkshopItemChangesRequest = 7382
ClientWorkshopItemChangesResponse = 7383
ClientWorkshopItemInfoRequest = 7384
ClientWorkshopItemInfoResponse = 7385
FSBase = 7500
ClientRichPresenceUpload = 7501
ClientRichPresenceRequest = 7502
ClientRichPresenceInfo = 7503
FSRichPresenceRequest = 7504
FSRichPresenceResponse = 7505
FSComputeFrenematrix = 7506
FSComputeFrenematrixResponse = 7507
FSPlayStatusNotification = 7508
FSPublishPersonaStatus = 7509 #: removed
FSAddOrRemoveFollower = 7510
FSAddOrRemoveFollowerResponse = 7511
FSUpdateFollowingList = 7512
FSCommentNotification = 7513
FSCommentNotificationViewed = 7514
ClientFSGetFollowerCount = 7515
ClientFSGetFollowerCountResponse = 7516
ClientFSGetIsFollowing = 7517
ClientFSGetIsFollowingResponse = 7518
ClientFSEnumerateFollowingList = 7519
ClientFSEnumerateFollowingListResponse = 7520
FSGetPendingNotificationCount = 7521
FSGetPendingNotificationCountResponse = 7522
# ClientFSOfflineMessageNotification = 7523 #: renamed
# ClientFSRequestOfflineMessageCount = 7524 #: renamed
# ClientFSGetFriendMessageHistory = 7525 #: renamed
# ClientFSGetFriendMessageHistoryResponse = 7526 #: renamed
# ClientFSGetFriendMessageHistoryForOfflineMessages = 7527 #: renamed
ClientChatOfflineMessageNotification = 7523
ClientChatRequestOfflineMessageCount = 7524
ClientChatGetFriendMessageHistory = 7525
ClientChatGetFriendMessageHistoryResponse = 7526
ClientChatGetFriendMessageHistoryForOfflineMessages = 7527
ClientFSGetFriendsSteamLevels = 7528
ClientFSGetFriendsSteamLevelsResponse = 7529
AMRequestFriendData = 7530
CEGVersionSetEnableDisableRequest = 7600
DRMRange2 = 7600
CEGVersionSetEnableDisableResponse = 7601
CEGPropStatusDRMSRequest = 7602
CEGPropStatusDRMSResponse = 7603
CEGWhackFailureReportRequest = 7604
CEGWhackFailureReportResponse = 7605
DRMSFetchVersionSet = 7606
DRMSFetchVersionSetResponse = 7607
EconBase = 7700
EconTrading_InitiateTradeRequest = 7701
EconTrading_InitiateTradeProposed = 7702
EconTrading_InitiateTradeResponse = 7703
EconTrading_InitiateTradeResult = 7704
EconTrading_StartSession = 7705
EconTrading_CancelTradeRequest = 7706
EconFlushInventoryCache = 7707
EconFlushInventoryCacheResponse = 7708
EconCDKeyProcessTransaction = 7711
EconCDKeyProcessTransactionResponse = 7712
EconGetErrorLogs = 7713
EconGetErrorLogsResponse = 7714
RMRange = 7800
RMTestVerisignOTP = 7800
RMTestVerisignOTPResponse = 7801
RMDeleteMemcachedKeys = 7803
RMRemoteInvoke = 7804
BadLoginIPList = 7805
RMMsgTraceAddTrigger = 7806
RMMsgTraceRemoveTrigger = 7807
RMMsgTraceEvent = 7808
UGSUpdateGlobalStats = 7900
UGSBase = 7900
ClientUGSGetGlobalStats = 7901
ClientUGSGetGlobalStatsResponse = 7902
StoreUpdateRecommendationCount = 8000 #: removed
StoreBase = 8000
UMQLogonRequest = 8100
UMQBase = 8100
UMQLogonResponse = 8101
UMQLogoffRequest = 8102
UMQLogoffResponse = 8103
UMQSendChatMessage = 8104
UMQIncomingChatMessage = 8105
UMQPoll = 8106
UMQPollResults = 8107
UMQ2AM_ClientMsgBatch = 8108
UMQEnqueueMobileSalePromotions = 8109 #: removed
UMQEnqueueMobileAnnouncements = 8110 #: removed
WorkshopAcceptTOSRequest = 8200 #: removed
WorkshopBase = 8200
WorkshopAcceptTOSResponse = 8201 #: removed
WebAPIValidateOAuth2Token = <PASSWORD>
WebAPIBase = 8300
WebAPIValidateOAuth2TokenResponse = 8301
WebAPIInvalidateTokensForAccount = 8302 #: removed
WebAPIRegisterGCInterfaces = 8303
WebAPIInvalidateOAuthClientCache = 8304
WebAPIInvalidateOAuthTokenCache = 8305
WebAPISetSecrets = 8306
BackpackBase = 8400
BackpackAddToCurrency = 8401
BackpackAddToCurrencyResponse = 8402
CREBase = 8500
CRERankByTrend = 8501 #: removed
CRERankByTrendResponse = 8502 #: removed
CREItemVoteSummary = 8503
CREItemVoteSummaryResponse = 8504
CRERankByVote = 8505 #: removed
CRERankByVoteResponse = 8506 #: removed
CREUpdateUserPublishedItemVote = 8507
CREUpdateUserPublishedItemVoteResponse = 8508
CREGetUserPublishedItemVoteDetails = 8509
CREGetUserPublishedItemVoteDetailsResponse = 8510
CREEnumeratePublishedFiles = 8511
CREEnumeratePublishedFilesResponse = 8512
CREPublishedFileVoteAdded = 8513
SecretsRequestCredentialPair = 8600
SecretsBase = 8600
SecretsCredentialPairResponse = 8601
SecretsRequestServerIdentity = 8602 #: removed
SecretsServerIdentityResponse = 8603 #: removed
SecretsUpdateServerIdentities = 8604 #: removed
BoxMonitorReportRequest = 8700
BoxMonitorBase = 8700
BoxMonitorReportResponse = 8701
LogsinkWriteReport = 8800
LogsinkBase = 8800
PICSBase = 8900
ClientPICSChangesSinceRequest = 8901
ClientPICSChangesSinceResponse = 8902
ClientPICSProductInfoRequest = 8903
ClientPICSProductInfoResponse = 8904
ClientPICSAccessTokenRequest = 8905
ClientPICSAccessTokenResponse = 8906
WorkerProcess = 9000
WorkerProcessPingRequest = 9000
WorkerProcessPingResponse = 9001
WorkerProcessShutdown = 9002
DRMWorkerProcess = 9100
DRMWorkerProcessDRMAndSign = 9100
DRMWorkerProcessDRMAndSignResponse = 9101
DRMWorkerProcessSteamworksInfoRequest = 9102
DRMWorkerProcessSteamworksInfoResponse = 9103
DRMWorkerProcessInstallDRMDLLRequest = 9104
DRMWorkerProcessInstallDRMDLLResponse = 9105
DRMWorkerProcessSecretIdStringRequest = 9106
DRMWorkerProcessSecretIdStringResponse = 9107
DRMWorkerProcessGetDRMGuidsFromFileRequest = 9108 #: removed
DRMWorkerProcessGetDRMGuidsFromFileResponse = 9109 #: removed
DRMWorkerProcessInstallProcessedFilesRequest = 9110
DRMWorkerProcessInstallProcessedFilesResponse = 9111
DRMWorkerProcessExamineBlobRequest = 9112
DRMWorkerProcessExamineBlobResponse = 9113
DRMWorkerProcessDescribeSecretRequest = 9114
DRMWorkerProcessDescribeSecretResponse = 9115
DRMWorkerProcessBackfillOriginalRequest = 9116
DRMWorkerProcessBackfillOriginalResponse = 9117
DRMWorkerProcessValidateDRMDLLRequest = 9118
DRMWorkerProcessValidateDRMDLLResponse = 9119
DRMWorkerProcessValidateFileRequest = 9120
DRMWorkerProcessValidateFileResponse = 9121
DRMWorkerProcessSplitAndInstallRequest = 9122
DRMWorkerProcessSplitAndInstallResponse = 9123
DRMWorkerProcessGetBlobRequest = 9124
DRMWorkerProcessGetBlobResponse = 9125
DRMWorkerProcessEvaluateCrashRequest = 9126
DRMWorkerProcessEvaluateCrashResponse = 9127
DRMWorkerProcessAnalyzeFileRequest = 9128
DRMWorkerProcessAnalyzeFileResponse = 9129
DRMWorkerProcessUnpackBlobRequest = 9130
DRMWorkerProcessUnpackBlobResponse = 9131
DRMWorkerProcessInstallAllRequest = 9132
DRMWorkerProcessInstallAllResponse = 9133
TestWorkerProcess = 9200
TestWorkerProcessLoadUnloadModuleRequest = 9200
TestWorkerProcessLoadUnloadModuleResponse = 9201
TestWorkerProcessServiceModuleCallRequest = 9202
TestWorkerProcessServiceModuleCallResponse = 9203
QuestServerBase = 9300
ClientGetEmoticonList = 9330
ClientEmoticonList = 9331
# ClientSharedLibraryBase = 9400 #: removed
SLCUserSessionStatus = 9400
SLCBase = 9400
SLCRequestUserSessionStatus = 9401
SLCSharedLicensesLockStatus = 9402
ClientSharedLicensesLockStatus = 9403 #: removed
ClientSharedLicensesStopPlaying = 9404 #: removed
ClientSharedLibraryLockStatus = 9405
ClientSharedLibraryStopPlaying = 9406
SLCOwnerLibraryChanged = 9407
SLCSharedLibraryChanged = 9408
RemoteClientAuth_OBSOLETE = 9500
RemoteClientBase = 9500
RemoteClientAuthResponse_OBSOLETE = 9501
RemoteClientAppStatus = 9502
RemoteClientStartStream = 9503
RemoteClientStartStreamResponse = 9504
RemoteClientPing = 9505
RemoteClientPingResponse = 9506
ClientUnlockStreaming = 9507
ClientUnlockStreamingResponse = 9508
RemoteClientAcceptEULA = 9509
RemoteClientGetControllerConfig = 9510
RemoteClientGetControllerConfigResponse = 9511
RemoteClientStreamingEnabled = 9512
ClientUnlockHEVC = 9513
ClientUnlockHEVCResponse = 9514
RemoteClientStatusRequest = 9515
RemoteClientStatusResponse = 9516
ClientPlayingSessionState = 9600
ClientConcurrentSessionsBase = 9600
ClientKickPlayingSession = 9601
ClientBroadcastInit = 9700 #: removed
ClientBroadcastBase = 9700
ClientBroadcastFrames = 9701
ClientBroadcastDisconnect = 9702
ClientBroadcastScreenshot = 9703
ClientBroadcastUploadConfig = 9704
ClientVoiceCallPreAuthorize = 9800 #: removed
BaseClient3 = 9800
ClientVoiceCallPreAuthorizeResponse = 9801
ClientServerTimestampRequest = 9802
ClientServerTimestampResponse = 9803
ClientLANP2PRequestChunk = 9900
ClientLANP2PBase = 9900
ClientLANP2PRequestChunkResponse = 9901
ClientLANP2PMax = 9999
# BaseWatchdogServer = 10000
NotifyWatchdog = 10000
ClientSiteLicenseSiteInfoNotification = 10100
ClientSiteLicenseBase = 10100
ClientSiteLicenseCheckout = 10101
ClientSiteLicenseCheckoutResponse = 10102
ClientSiteLicenseGetAvailableSeats = 10103
ClientSiteLicenseGetAvailableSeatsResponse = 10104
ClientSiteLicenseGetContentCacheInfo = 10105
ClientSiteLicenseGetContentCacheInfoResponse = 10106
ChatServerGetPendingNotificationCount = 12000
BaseChatServer = 12000
ChatServerGetPendingNotificationCountResponse = 12001
ServerSecretChanged = 12100
BaseSecretServer = 12100
|
en
| 0.94147
|
The EMsg enum contains many members and takes a bit to load. For this reason it is seperate, and imported only when needed. #: removed #: removed # ClientSessionUpdateAuthTicket = 137 #: removed #: removed #: removed #: removed #: removed # AISUpdatePackageInfo = 404 #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed # AMRequestFriendData = 4008 #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed # ClientRequestOAuthTokenForApp = 5590 #: removed # ClientRequestOAuthTokenForAppResponse = 5591 #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed # MDSSetPublicVersionForDepot = 5816 #: removed # MDSSetPublicVersionForDepotResponse = 5817 #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed # ClientAuthorizeLocalDevice = 6502 #: removed # UDSFindSession = 7006 #: removed # UDSFindSessionResponse = 7007 #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed # ClientFSOfflineMessageNotification = 7523 #: renamed # ClientFSRequestOfflineMessageCount = 7524 #: renamed # ClientFSGetFriendMessageHistory = 7525 #: renamed # ClientFSGetFriendMessageHistoryResponse = 7526 #: renamed # ClientFSGetFriendMessageHistoryForOfflineMessages = 7527 #: renamed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed #: removed # ClientSharedLibraryBase = 9400 #: removed #: removed #: removed #: removed #: removed # BaseWatchdogServer = 10000
| 1.577182
| 2
|
gpytorch/kernels/rbf_kernel_grad.py
|
shalijiang/gpytorch
| 2
|
6629409
|
<gh_stars>1-10
#!/usr/bin/env python3
from .rbf_kernel import RBFKernel
import torch
from ..lazy.kronecker_product_lazy_tensor import KroneckerProductLazyTensor
class RBFKernelGrad(RBFKernel):
r"""
Computes a covariance matrix of the RBF kernel that models the covariance
between the values and partial derivatives for inputs :math:`\mathbf{x_1}`
and :math:`\mathbf{x_2}`.
See :class:`gpytorch.kernels.Kernel` for descriptions of the lengthscale options.
.. note::
This kernel does not have an `outputscale` parameter. To add a scaling parameter,
decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.
Args:
:attr:`batch_shape` (torch.Size, optional):
Set this if you want a separate lengthscale for each
batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`.
:attr:`active_dims` (tuple of ints, optional):
Set this if you want to compute the covariance of only a few input dimensions. The ints
corresponds to the indices of the dimensions. Default: `None`.
:attr:`lengthscale_prior` (Prior, optional):
Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.
:attr:`lengthscale_constraint` (Constraint, optional):
Set this if you want to apply a constraint to the lengthscale parameter. Default: `Positive`.
:attr:`eps` (float):
The minimum value that the lengthscale can take (prevents divide by zero errors). Default: `1e-6`.
Attributes:
:attr:`lengthscale` (Tensor):
The lengthscale parameter. Size/shape of parameter depends on the
:attr:`ard_num_dims` and :attr:`batch_shape` arguments.
Example:
>>> x = torch.randn(10, 5)
>>> # Non-batch: Simple option
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())
>>> covar = covar_module(x) # Output: LazyTensor of size (60 x 60), where 60 = n * (d + 1)
>>>
>>> batch_x = torch.randn(2, 10, 5)
>>> # Batch: Simple option
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())
>>> # Batch: different lengthscale for each batch
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad(batch_shape=torch.Size([2])))
>>> covar = covar_module(x) # Output: LazyTensor of size (2 x 60 x 60)
"""
def forward(self, x1, x2, diag=False, **params):
batch_shape = x1.shape[:-2]
n1, d = x1.shape[-2:]
n2 = x2.shape[-2]
K = torch.zeros(*batch_shape, n1 * (d + 1), n2 * (d + 1), device=x1.device, dtype=x1.dtype)
if not diag:
# Scale the inputs by the lengthscale (for stability)
x1_ = x1.div(self.lengthscale)
x2_ = x2.div(self.lengthscale)
# Form all possible rank-1 products for the gradient and Hessian blocks
outer = x1_.view(*batch_shape, n1, 1, d) - x2_.view(*batch_shape, 1, n2, d)
outer = outer / self.lengthscale
outer = torch.transpose(outer, -1, -2).contiguous()
# 1) Kernel block
diff = self._covar_dist(x1_, x2_, square_dist=True, **params)
K_11 = diff.div_(-2).exp_()
K[..., :n1, :n2] = K_11
# 2) First gradient block
outer1 = outer.view(*batch_shape, n1, n2 * d)
K[..., :n1, n2:] = outer1 * K_11.repeat([1, 1, d])
# 3) Second gradient block
outer2 = outer.transpose(-1, -3).contiguous().view(*batch_shape, n2, n1 * d)
outer2 = outer2.transpose(-1, -2)
K[..., n1:, :n2] = -outer2 * K_11.repeat([1, d, 1])
# 4) Hessian block
outer3 = outer1.repeat([1, d, 1]) * outer2.repeat([1, 1, d])
kp = KroneckerProductLazyTensor(
torch.eye(d, d, device=x1.device, dtype=x1.dtype).repeat(*batch_shape, 1, 1) / self.lengthscale.pow_(2),
torch.ones(n1, n2, device=x1.device, dtype=x1.dtype).repeat(*batch_shape, 1, 1)
)
chain_rule = kp.evaluate() - outer3
K[..., n1:, n2:] = chain_rule * K_11.repeat([1, d, d])
# Symmetrize for stability
if n1 == n2 and torch.eq(x1, x2).all():
K = 0.5 * (K.transpose(-1, -2) + K)
# Apply a perfect shuffle permutation to match the MutiTask ordering
pi1 = torch.arange(n1 * (d + 1)).view(d + 1, n1).t().contiguous().view((n1 * (d + 1)))
pi2 = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().contiguous().view((n2 * (d + 1)))
K = K[..., pi1, :][..., :, pi2]
return K
else:
if not (n1 == n2 and torch.eq(x1, x2).all()):
raise RuntimeError("diag=True only works when x1 == x2")
kernel_diag = super(RBFKernelGrad, self).forward(x1, x2, diag=True)
grad_diag = torch.ones(*batch_shape, n2, d, device=x1.device, dtype=x1.dtype) / self.lengthscale.pow_(2)
grad_diag = grad_diag.transpose(-1, -2).contiguous().view(*batch_shape, n2 * d)
k_diag = torch.cat((kernel_diag, grad_diag), dim=-1)
pi = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().contiguous().view((n2 * (d + 1)))
return k_diag[..., pi]
def size(self, x1, x2):
"""
Given `x_1` with `n_1` data points and `x_2` with `n_2` data points, both in
`d` dimensions, RBFKernelGrad returns an `n_1(d+1) x n_2(d+1)` kernel matrix.
"""
non_batch_size = ((x1.size(-1) + 1) * x1.size(-2), (x2.size(-1) + 1) * x2.size(-2))
if x1.ndimension() == 3:
return torch.Size((x1.size(0),) + non_batch_size)
else:
return torch.Size(non_batch_size)
|
#!/usr/bin/env python3
from .rbf_kernel import RBFKernel
import torch
from ..lazy.kronecker_product_lazy_tensor import KroneckerProductLazyTensor
class RBFKernelGrad(RBFKernel):
r"""
Computes a covariance matrix of the RBF kernel that models the covariance
between the values and partial derivatives for inputs :math:`\mathbf{x_1}`
and :math:`\mathbf{x_2}`.
See :class:`gpytorch.kernels.Kernel` for descriptions of the lengthscale options.
.. note::
This kernel does not have an `outputscale` parameter. To add a scaling parameter,
decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.
Args:
:attr:`batch_shape` (torch.Size, optional):
Set this if you want a separate lengthscale for each
batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`.
:attr:`active_dims` (tuple of ints, optional):
Set this if you want to compute the covariance of only a few input dimensions. The ints
corresponds to the indices of the dimensions. Default: `None`.
:attr:`lengthscale_prior` (Prior, optional):
Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.
:attr:`lengthscale_constraint` (Constraint, optional):
Set this if you want to apply a constraint to the lengthscale parameter. Default: `Positive`.
:attr:`eps` (float):
The minimum value that the lengthscale can take (prevents divide by zero errors). Default: `1e-6`.
Attributes:
:attr:`lengthscale` (Tensor):
The lengthscale parameter. Size/shape of parameter depends on the
:attr:`ard_num_dims` and :attr:`batch_shape` arguments.
Example:
>>> x = torch.randn(10, 5)
>>> # Non-batch: Simple option
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())
>>> covar = covar_module(x) # Output: LazyTensor of size (60 x 60), where 60 = n * (d + 1)
>>>
>>> batch_x = torch.randn(2, 10, 5)
>>> # Batch: Simple option
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad())
>>> # Batch: different lengthscale for each batch
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad(batch_shape=torch.Size([2])))
>>> covar = covar_module(x) # Output: LazyTensor of size (2 x 60 x 60)
"""
def forward(self, x1, x2, diag=False, **params):
batch_shape = x1.shape[:-2]
n1, d = x1.shape[-2:]
n2 = x2.shape[-2]
K = torch.zeros(*batch_shape, n1 * (d + 1), n2 * (d + 1), device=x1.device, dtype=x1.dtype)
if not diag:
# Scale the inputs by the lengthscale (for stability)
x1_ = x1.div(self.lengthscale)
x2_ = x2.div(self.lengthscale)
# Form all possible rank-1 products for the gradient and Hessian blocks
outer = x1_.view(*batch_shape, n1, 1, d) - x2_.view(*batch_shape, 1, n2, d)
outer = outer / self.lengthscale
outer = torch.transpose(outer, -1, -2).contiguous()
# 1) Kernel block
diff = self._covar_dist(x1_, x2_, square_dist=True, **params)
K_11 = diff.div_(-2).exp_()
K[..., :n1, :n2] = K_11
# 2) First gradient block
outer1 = outer.view(*batch_shape, n1, n2 * d)
K[..., :n1, n2:] = outer1 * K_11.repeat([1, 1, d])
# 3) Second gradient block
outer2 = outer.transpose(-1, -3).contiguous().view(*batch_shape, n2, n1 * d)
outer2 = outer2.transpose(-1, -2)
K[..., n1:, :n2] = -outer2 * K_11.repeat([1, d, 1])
# 4) Hessian block
outer3 = outer1.repeat([1, d, 1]) * outer2.repeat([1, 1, d])
kp = KroneckerProductLazyTensor(
torch.eye(d, d, device=x1.device, dtype=x1.dtype).repeat(*batch_shape, 1, 1) / self.lengthscale.pow_(2),
torch.ones(n1, n2, device=x1.device, dtype=x1.dtype).repeat(*batch_shape, 1, 1)
)
chain_rule = kp.evaluate() - outer3
K[..., n1:, n2:] = chain_rule * K_11.repeat([1, d, d])
# Symmetrize for stability
if n1 == n2 and torch.eq(x1, x2).all():
K = 0.5 * (K.transpose(-1, -2) + K)
# Apply a perfect shuffle permutation to match the MutiTask ordering
pi1 = torch.arange(n1 * (d + 1)).view(d + 1, n1).t().contiguous().view((n1 * (d + 1)))
pi2 = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().contiguous().view((n2 * (d + 1)))
K = K[..., pi1, :][..., :, pi2]
return K
else:
if not (n1 == n2 and torch.eq(x1, x2).all()):
raise RuntimeError("diag=True only works when x1 == x2")
kernel_diag = super(RBFKernelGrad, self).forward(x1, x2, diag=True)
grad_diag = torch.ones(*batch_shape, n2, d, device=x1.device, dtype=x1.dtype) / self.lengthscale.pow_(2)
grad_diag = grad_diag.transpose(-1, -2).contiguous().view(*batch_shape, n2 * d)
k_diag = torch.cat((kernel_diag, grad_diag), dim=-1)
pi = torch.arange(n2 * (d + 1)).view(d + 1, n2).t().contiguous().view((n2 * (d + 1)))
return k_diag[..., pi]
def size(self, x1, x2):
"""
Given `x_1` with `n_1` data points and `x_2` with `n_2` data points, both in
`d` dimensions, RBFKernelGrad returns an `n_1(d+1) x n_2(d+1)` kernel matrix.
"""
non_batch_size = ((x1.size(-1) + 1) * x1.size(-2), (x2.size(-1) + 1) * x2.size(-2))
if x1.ndimension() == 3:
return torch.Size((x1.size(0),) + non_batch_size)
else:
return torch.Size(non_batch_size)
|
en
| 0.568849
|
#!/usr/bin/env python3 Computes a covariance matrix of the RBF kernel that models the covariance between the values and partial derivatives for inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`. See :class:`gpytorch.kernels.Kernel` for descriptions of the lengthscale options. .. note:: This kernel does not have an `outputscale` parameter. To add a scaling parameter, decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`. Args: :attr:`batch_shape` (torch.Size, optional): Set this if you want a separate lengthscale for each batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`. :attr:`active_dims` (tuple of ints, optional): Set this if you want to compute the covariance of only a few input dimensions. The ints corresponds to the indices of the dimensions. Default: `None`. :attr:`lengthscale_prior` (Prior, optional): Set this if you want to apply a prior to the lengthscale parameter. Default: `None`. :attr:`lengthscale_constraint` (Constraint, optional): Set this if you want to apply a constraint to the lengthscale parameter. Default: `Positive`. :attr:`eps` (float): The minimum value that the lengthscale can take (prevents divide by zero errors). Default: `1e-6`. Attributes: :attr:`lengthscale` (Tensor): The lengthscale parameter. Size/shape of parameter depends on the :attr:`ard_num_dims` and :attr:`batch_shape` arguments. Example: >>> x = torch.randn(10, 5) >>> # Non-batch: Simple option >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad()) >>> covar = covar_module(x) # Output: LazyTensor of size (60 x 60), where 60 = n * (d + 1) >>> >>> batch_x = torch.randn(2, 10, 5) >>> # Batch: Simple option >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad()) >>> # Batch: different lengthscale for each batch >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernelGrad(batch_shape=torch.Size([2]))) >>> covar = covar_module(x) # Output: LazyTensor of size (2 x 60 x 60) # Scale the inputs by the lengthscale (for stability) # Form all possible rank-1 products for the gradient and Hessian blocks # 1) Kernel block # 2) First gradient block # 3) Second gradient block # 4) Hessian block # Symmetrize for stability # Apply a perfect shuffle permutation to match the MutiTask ordering Given `x_1` with `n_1` data points and `x_2` with `n_2` data points, both in `d` dimensions, RBFKernelGrad returns an `n_1(d+1) x n_2(d+1)` kernel matrix.
| 2.6327
| 3
|
simple_exercises/lanesexercises/exam1_repitition/1.py
|
ilante/programming_immanuela_englander
| 0
|
6629410
|
<filename>simple_exercises/lanesexercises/exam1_repitition/1.py<gh_stars>0
#Write a protram that takes a string from the user and prints on the elememts in odd postion on the screen (the first postion is 0).
x=input("Dear user please provide a string ")
print(x[1:len(x):2])
|
<filename>simple_exercises/lanesexercises/exam1_repitition/1.py<gh_stars>0
#Write a protram that takes a string from the user and prints on the elememts in odd postion on the screen (the first postion is 0).
x=input("Dear user please provide a string ")
print(x[1:len(x):2])
|
en
| 0.928512
|
#Write a protram that takes a string from the user and prints on the elememts in odd postion on the screen (the first postion is 0).
| 3.972702
| 4
|
setup.py
|
xypron/pyrelayctl
| 6
|
6629411
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from distutils.core import setup
import setuptools
import relayctl
setup(
name='pyrelayctl',
version='0.1',
description='Library for FT232R controlled relay boards',
author='<NAME>',
author_email='<EMAIL>',
license = 'BSD',
url='https://github.com/xypron/pyrelayctl',
packages=['relayctl'],
install_requires=["pyusb >= 1.0.0a"],
long_description =
"""
PyRelayCtl is a library to control FTDI FT245R based relay boards.
This includes the SainSmart 4-channel 5V USB relay board.
The outlets can be switched on and off via USB.
The library depends on PyUSB (https://github.com/walac/pyusb).
On Debian PyUSB can be installed using::
apt-get install python3-usb
Per default, only root is allowed to use devices directly, therefore the
library also only works as root.
To allow group relayctl access create file /lib/udev/rules.d/60-relayctl.rules
with the following content::
SUBSYSTEM=="usb", ATTR{idVendor}=="0403", ATTR{idProduct}=="6001", GROUP="relayctl", MODE="660"
Then reload the udev rules with::
udevadm control --reload-rules
""",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Topic :: System :: Hardware :: Hardware Drivers'
]
)
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from distutils.core import setup
import setuptools
import relayctl
setup(
name='pyrelayctl',
version='0.1',
description='Library for FT232R controlled relay boards',
author='<NAME>',
author_email='<EMAIL>',
license = 'BSD',
url='https://github.com/xypron/pyrelayctl',
packages=['relayctl'],
install_requires=["pyusb >= 1.0.0a"],
long_description =
"""
PyRelayCtl is a library to control FTDI FT245R based relay boards.
This includes the SainSmart 4-channel 5V USB relay board.
The outlets can be switched on and off via USB.
The library depends on PyUSB (https://github.com/walac/pyusb).
On Debian PyUSB can be installed using::
apt-get install python3-usb
Per default, only root is allowed to use devices directly, therefore the
library also only works as root.
To allow group relayctl access create file /lib/udev/rules.d/60-relayctl.rules
with the following content::
SUBSYSTEM=="usb", ATTR{idVendor}=="0403", ATTR{idProduct}=="6001", GROUP="relayctl", MODE="660"
Then reload the udev rules with::
udevadm control --reload-rules
""",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Topic :: System :: Hardware :: Hardware Drivers'
]
)
|
en
| 0.695486
|
#!/usr/bin/env python3 # # Copyright (c) 2016, <NAME> <<EMAIL>> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. PyRelayCtl is a library to control FTDI FT245R based relay boards. This includes the SainSmart 4-channel 5V USB relay board. The outlets can be switched on and off via USB. The library depends on PyUSB (https://github.com/walac/pyusb). On Debian PyUSB can be installed using:: apt-get install python3-usb Per default, only root is allowed to use devices directly, therefore the library also only works as root. To allow group relayctl access create file /lib/udev/rules.d/60-relayctl.rules with the following content:: SUBSYSTEM=="usb", ATTR{idVendor}=="0403", ATTR{idProduct}=="6001", GROUP="relayctl", MODE="660" Then reload the udev rules with:: udevadm control --reload-rules
| 1.01009
| 1
|
Examples/AppKit/ClassBrowser/setup.py
|
linuxfood/pyobjc-framework-Cocoa-test
| 0
|
6629412
|
"""
Script for building the example.
Usage:
python3 setup.py py2app
"""
from setuptools import setup
plist = {"NSMainNibFile": "ClassBrowser"}
setup(
name="ClassBrowser",
app=["ClassBrowser.py"],
data_files=["ClassBrowser.nib"],
options={"py2app": {"plist": plist}},
setup_requires=["py2app", "pyobjc-framework-Cocoa"],
)
|
"""
Script for building the example.
Usage:
python3 setup.py py2app
"""
from setuptools import setup
plist = {"NSMainNibFile": "ClassBrowser"}
setup(
name="ClassBrowser",
app=["ClassBrowser.py"],
data_files=["ClassBrowser.nib"],
options={"py2app": {"plist": plist}},
setup_requires=["py2app", "pyobjc-framework-Cocoa"],
)
|
en
| 0.720983
|
Script for building the example. Usage: python3 setup.py py2app
| 1.668225
| 2
|
i3d_tf_to_pt.py
|
eric-xw/kinetics-i3d-pytorch
| 33
|
6629413
|
<filename>i3d_tf_to_pt.py
import argparse
from matplotlib import pyplot as plt
import tensorflow as tf
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from src.i3dtf import InceptionI3d
from src.i3dpt import I3D
from src.monitorutils import compare_outputs
def transfer_weights(tf_checkpoint, pt_checkpoint, batch_size, modality='rgb'):
intermediate_feature = False
im_size = 224
dataset = datasets.ImageFolder(
'data/dummy-dataset',
transforms.Compose([
transforms.CenterCrop(im_size),
transforms.ToTensor(),
# normalize,
]))
# Initialize input params
if modality == 'rgb':
in_channels = 3
elif modality == 'flow':
in_channels = 2
else:
raise ValueError(
'{} not among known modalities [rgb|flow]'.format(modality))
frame_nb = 16 # Number of items in depth (temporal) dimension
class_nb = 600
# Initialize dataset
loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False)
# Initialize pytorch I3D
i3nception_pt = I3D(num_classes=600, modality=modality)
# Initialzie tensorflow I3D
if modality == 'rgb':
scope = 'RGB'
elif modality == 'flow':
scope = 'Flow'
with tf.variable_scope(scope):
rgb_model = InceptionI3d(class_nb, final_endpoint='Predictions')
# Tensorflow forward pass
rgb_input = tf.placeholder(
tf.float32,
shape=(batch_size, frame_nb, im_size, im_size, in_channels))
rgb_logits, _ = rgb_model(
rgb_input, is_training=False, dropout_keep_prob=1.0)
# Get params for tensorflow weight retreival
rgb_variable_map = {}
for variable in tf.global_variables():
if variable.name.split('/')[0] == scope:
rgb_variable_map[variable.name.replace(':0', '')] = variable
criterion = torch.nn.L1Loss()
rgb_saver = tf.train.Saver(var_list=rgb_variable_map, reshape=True)
with tf.Session() as sess:
# Load saved tensorflow weights
rgb_saver.restore(sess, tf_checkpoint)
# Transfer weights from tensorflow to pytorch
i3nception_pt.eval()
i3nception_pt.load_tf_weights(sess)
# Save pytorch weights for future loading
i3nception_state_dict = i3nception_pt.cpu().state_dict()
torch.save(i3nception_state_dict, pt_checkpoint)
# Load data
for i, (input_2d, target) in enumerate(loader):
input_2d = torch.from_numpy(input_2d.numpy())
if modality == 'flow':
input_2d = input_2d[:, 0:2] # Remove one dimension
# Prepare data for pytorch forward pass
target_var = torch.autograd.Variable(target)
input_3d = input_2d.clone().unsqueeze(2).repeat(
1, 1, frame_nb, 1, 1)
input_3d_var = torch.autograd.Variable(input_3d)
# Prepare data for tensorflow pass
feed_dict = {}
input_3d_tf = input_3d.numpy().transpose(0, 2, 3, 4, 1)
feed_dict[rgb_input] = input_3d_tf
# Tensorflow forward pass
tf_out3dsample = sess.run(rgb_logits, feed_dict=feed_dict)
out_tf_np = tf_out3dsample
if intermediate_feature:
# Reshape intermediary input to insure they are comparable
out_tf_np = tf_out3dsample.transpose((0, 4, 1, 2, 3))
# Pytorch forward pass
out_pt, _ = i3nception_pt(input_3d_var)
out_pt_np = out_pt.data.numpy()
# Make sure the tensorflow and pytorch outputs have the same shape
assert out_tf_np.shape == out_pt_np.shape, 'tf output: {} != pt output : {}'.format(
out_tf_np.shape, out_pt_np.shape)
compare_outputs(out_tf_np, out_pt_np)
# Display slices of filter map for intermediate features
# for visual comparison
if intermediate_feature:
filter_idx = 219
img_tf = out_tf_np[0][filter_idx][0]
img_pt = out_pt_np[0][filter_idx][0]
max_v = max(img_tf.max(), img_pt.max())
min_v = min(img_tf.min(), img_pt.min())
plt.subplot(2, 2, 1)
plt.imshow(img_pt, vmax=max_v, vmin=min_v)
plt.subplot(2, 2, 2)
plt.imshow(img_tf, vmax=max_v, vmin=min_v)
plt.subplot(2, 2, 3)
plt.imshow(img_tf - img_pt)
plt.show()
print('min val : {}, max_val : {}, mean val : {}'.format(
min_v, max_v, out_pt_np.mean()))
loss = criterion(out_pt, torch.ones_like(out_pt))
loss.backward()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
'Transfers the kinetics rgb pretrained i3d\
inception v1 weights from tensorflow to pytorch and saves the weights as\
as state_dict')
parser.add_argument(
'--rgb', action='store_true', help='Convert RGB pretrained network')
parser.add_argument(
'--rgb_tf_checkpoint',
type=str,
default='model/tf_rgb_scratch_kin600/model.ckpt',
help='Path to tensorflow weight checkpoint trained on rgb')
parser.add_argument(
'--rgb_pt_checkpoint',
type=str,
default='model/model_rgb_kin600.pth',
help='Path for pytorch state_dict saving')
parser.add_argument(
'--flow', action='store_true', help='Convert Flow pretrained network')
parser.add_argument(
'--flow_tf_checkpoint',
type=str,
default='model/tf_flow_imagenet/model.ckpt',
help='Path to tensorflow weight checkpoint trained on flow')
parser.add_argument(
'--flow_pt_checkpoint',
type=str,
default='model/model_flow.pth',
help='Path for pytorch state_dict saving')
parser.add_argument(
'--batch_size',
type=int,
default='2',
help='Batch size for comparison between tensorflow and pytorch outputs'
)
args = parser.parse_args()
if args.rgb:
transfer_weights(
args.rgb_tf_checkpoint,
args.rgb_pt_checkpoint,
batch_size=args.batch_size,
modality='rgb')
if args.flow:
transfer_weights(
args.flow_tf_checkpoint,
args.flow_pt_checkpoint,
batch_size=args.batch_size,
modality='flow')
|
<filename>i3d_tf_to_pt.py
import argparse
from matplotlib import pyplot as plt
import tensorflow as tf
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from src.i3dtf import InceptionI3d
from src.i3dpt import I3D
from src.monitorutils import compare_outputs
def transfer_weights(tf_checkpoint, pt_checkpoint, batch_size, modality='rgb'):
intermediate_feature = False
im_size = 224
dataset = datasets.ImageFolder(
'data/dummy-dataset',
transforms.Compose([
transforms.CenterCrop(im_size),
transforms.ToTensor(),
# normalize,
]))
# Initialize input params
if modality == 'rgb':
in_channels = 3
elif modality == 'flow':
in_channels = 2
else:
raise ValueError(
'{} not among known modalities [rgb|flow]'.format(modality))
frame_nb = 16 # Number of items in depth (temporal) dimension
class_nb = 600
# Initialize dataset
loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False)
# Initialize pytorch I3D
i3nception_pt = I3D(num_classes=600, modality=modality)
# Initialzie tensorflow I3D
if modality == 'rgb':
scope = 'RGB'
elif modality == 'flow':
scope = 'Flow'
with tf.variable_scope(scope):
rgb_model = InceptionI3d(class_nb, final_endpoint='Predictions')
# Tensorflow forward pass
rgb_input = tf.placeholder(
tf.float32,
shape=(batch_size, frame_nb, im_size, im_size, in_channels))
rgb_logits, _ = rgb_model(
rgb_input, is_training=False, dropout_keep_prob=1.0)
# Get params for tensorflow weight retreival
rgb_variable_map = {}
for variable in tf.global_variables():
if variable.name.split('/')[0] == scope:
rgb_variable_map[variable.name.replace(':0', '')] = variable
criterion = torch.nn.L1Loss()
rgb_saver = tf.train.Saver(var_list=rgb_variable_map, reshape=True)
with tf.Session() as sess:
# Load saved tensorflow weights
rgb_saver.restore(sess, tf_checkpoint)
# Transfer weights from tensorflow to pytorch
i3nception_pt.eval()
i3nception_pt.load_tf_weights(sess)
# Save pytorch weights for future loading
i3nception_state_dict = i3nception_pt.cpu().state_dict()
torch.save(i3nception_state_dict, pt_checkpoint)
# Load data
for i, (input_2d, target) in enumerate(loader):
input_2d = torch.from_numpy(input_2d.numpy())
if modality == 'flow':
input_2d = input_2d[:, 0:2] # Remove one dimension
# Prepare data for pytorch forward pass
target_var = torch.autograd.Variable(target)
input_3d = input_2d.clone().unsqueeze(2).repeat(
1, 1, frame_nb, 1, 1)
input_3d_var = torch.autograd.Variable(input_3d)
# Prepare data for tensorflow pass
feed_dict = {}
input_3d_tf = input_3d.numpy().transpose(0, 2, 3, 4, 1)
feed_dict[rgb_input] = input_3d_tf
# Tensorflow forward pass
tf_out3dsample = sess.run(rgb_logits, feed_dict=feed_dict)
out_tf_np = tf_out3dsample
if intermediate_feature:
# Reshape intermediary input to insure they are comparable
out_tf_np = tf_out3dsample.transpose((0, 4, 1, 2, 3))
# Pytorch forward pass
out_pt, _ = i3nception_pt(input_3d_var)
out_pt_np = out_pt.data.numpy()
# Make sure the tensorflow and pytorch outputs have the same shape
assert out_tf_np.shape == out_pt_np.shape, 'tf output: {} != pt output : {}'.format(
out_tf_np.shape, out_pt_np.shape)
compare_outputs(out_tf_np, out_pt_np)
# Display slices of filter map for intermediate features
# for visual comparison
if intermediate_feature:
filter_idx = 219
img_tf = out_tf_np[0][filter_idx][0]
img_pt = out_pt_np[0][filter_idx][0]
max_v = max(img_tf.max(), img_pt.max())
min_v = min(img_tf.min(), img_pt.min())
plt.subplot(2, 2, 1)
plt.imshow(img_pt, vmax=max_v, vmin=min_v)
plt.subplot(2, 2, 2)
plt.imshow(img_tf, vmax=max_v, vmin=min_v)
plt.subplot(2, 2, 3)
plt.imshow(img_tf - img_pt)
plt.show()
print('min val : {}, max_val : {}, mean val : {}'.format(
min_v, max_v, out_pt_np.mean()))
loss = criterion(out_pt, torch.ones_like(out_pt))
loss.backward()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
'Transfers the kinetics rgb pretrained i3d\
inception v1 weights from tensorflow to pytorch and saves the weights as\
as state_dict')
parser.add_argument(
'--rgb', action='store_true', help='Convert RGB pretrained network')
parser.add_argument(
'--rgb_tf_checkpoint',
type=str,
default='model/tf_rgb_scratch_kin600/model.ckpt',
help='Path to tensorflow weight checkpoint trained on rgb')
parser.add_argument(
'--rgb_pt_checkpoint',
type=str,
default='model/model_rgb_kin600.pth',
help='Path for pytorch state_dict saving')
parser.add_argument(
'--flow', action='store_true', help='Convert Flow pretrained network')
parser.add_argument(
'--flow_tf_checkpoint',
type=str,
default='model/tf_flow_imagenet/model.ckpt',
help='Path to tensorflow weight checkpoint trained on flow')
parser.add_argument(
'--flow_pt_checkpoint',
type=str,
default='model/model_flow.pth',
help='Path for pytorch state_dict saving')
parser.add_argument(
'--batch_size',
type=int,
default='2',
help='Batch size for comparison between tensorflow and pytorch outputs'
)
args = parser.parse_args()
if args.rgb:
transfer_weights(
args.rgb_tf_checkpoint,
args.rgb_pt_checkpoint,
batch_size=args.batch_size,
modality='rgb')
if args.flow:
transfer_weights(
args.flow_tf_checkpoint,
args.flow_pt_checkpoint,
batch_size=args.batch_size,
modality='flow')
|
en
| 0.726357
|
# normalize, # Initialize input params # Number of items in depth (temporal) dimension # Initialize dataset # Initialize pytorch I3D # Initialzie tensorflow I3D # Tensorflow forward pass # Get params for tensorflow weight retreival # Load saved tensorflow weights # Transfer weights from tensorflow to pytorch # Save pytorch weights for future loading # Load data # Remove one dimension # Prepare data for pytorch forward pass # Prepare data for tensorflow pass # Tensorflow forward pass # Reshape intermediary input to insure they are comparable # Pytorch forward pass # Make sure the tensorflow and pytorch outputs have the same shape # Display slices of filter map for intermediate features # for visual comparison
| 2.294131
| 2
|
backend/settings.py
|
lietu/pydashery
| 1
|
6629414
|
# Which widgets should be enabled?
WIDGETS = [
{
"type": "Clock",
# Optional, defaults to ISO-8601 -like format
"format": "%m/%d/%Y\n%I:%M:%S %p"
},
{
"type": "FunctionResult",
"update_minutes": 0.0167,
# Definition can be either "module.path:class.method" or
# "module.path:function_name"
"func": "time:time"
},
{
"type": "ImageURL",
"url": "http://www.foreca.fi/meteogram.php?loc_id=100658225&lang=fi",
"update_minutes": 15
},
{
"type": "Iframe",
"url": "http://isitchristmas.com",
"update_minutes": 15
},
{
"type": "TextFile",
"filename": "test.txt",
},
{
"type": "Iframe",
"url": "http://www.metoffice.gov.uk/mobile/forecast/ud9wx0fhw",
"update_minutes": 15
}
]
# Check for updates this many times per second
UPDATES_PER_SEC = 3
# Which interface and port to listen to
LISTEN_ADDRESS = "0.0.0.0"
LISTEN_PORT = 8080
# Reloads the app automatically when code changes are detected
DEBUG = True
|
# Which widgets should be enabled?
WIDGETS = [
{
"type": "Clock",
# Optional, defaults to ISO-8601 -like format
"format": "%m/%d/%Y\n%I:%M:%S %p"
},
{
"type": "FunctionResult",
"update_minutes": 0.0167,
# Definition can be either "module.path:class.method" or
# "module.path:function_name"
"func": "time:time"
},
{
"type": "ImageURL",
"url": "http://www.foreca.fi/meteogram.php?loc_id=100658225&lang=fi",
"update_minutes": 15
},
{
"type": "Iframe",
"url": "http://isitchristmas.com",
"update_minutes": 15
},
{
"type": "TextFile",
"filename": "test.txt",
},
{
"type": "Iframe",
"url": "http://www.metoffice.gov.uk/mobile/forecast/ud9wx0fhw",
"update_minutes": 15
}
]
# Check for updates this many times per second
UPDATES_PER_SEC = 3
# Which interface and port to listen to
LISTEN_ADDRESS = "0.0.0.0"
LISTEN_PORT = 8080
# Reloads the app automatically when code changes are detected
DEBUG = True
|
en
| 0.598354
|
# Which widgets should be enabled? # Optional, defaults to ISO-8601 -like format # Definition can be either "module.path:class.method" or # "module.path:function_name" # Check for updates this many times per second # Which interface and port to listen to # Reloads the app automatically when code changes are detected
| 2.507238
| 3
|
deprecated/tests/test_PulsedProgramming.py
|
3it-nano/QDMS
| 1
|
6629415
|
import qdms
import numpy as np
def test_read_resistance_without_variability():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2)
value = pulsed_programming.read_resistance(pulsed_programming.circuit.memristor_model)
assert round(value) == round(pulsed_programming.circuit.memristor_model.r_on)
def test_read_resistance_with_variability():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2, variance_read=1/300)
result = []
for _ in range(1000):
value = pulsed_programming.read_resistance(pulsed_programming.circuit.memristor_model)
max = pulsed_programming.circuit.memristor_model.r_on + 0.015 * pulsed_programming.circuit.memristor_model.r_on
min = pulsed_programming.circuit.memristor_model.r_on - 0.015 * pulsed_programming.circuit.memristor_model.r_on
if min < value < max:
result.append(True)
else:
result.append(False)
assert np.all(result)
def test_write_resistance_without_variability():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2)
pulsed_programming.write_resistance(pulsed_programming.circuit.memristor_model, -2, 200e-9)
value = pulsed_programming.read_resistance(pulsed_programming.circuit.memristor_model)
assert value > pulsed_programming.circuit.memristor_model.r_on
def test_write_resistance_with_variability():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2, variance_write=1/300)
result_max = []
result_min = []
pulsed_programming.circuit.memristor_model.g = 1/2000
for _ in range(1000):
previous = pulsed_programming.read_resistance(pulsed_programming.circuit.memristor_model)
pulsed_programming.write_resistance(pulsed_programming.circuit.memristor_model, 0, 200e-9)
next = pulsed_programming.read_resistance(pulsed_programming.circuit.memristor_model)
result_max.append((next - previous) / 2000 * 100 <= 1.2)
result_min.append((next - previous) / 2000 * 100 >= 0.9)
assert np.all(result_max) and np.any(result_min)
def test_distribution():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 9)
pulsed_programming_linear = qdms.PulsedProgramming(circuit, 2, distribution_type='linear')
pulsed_programming_half_spread = qdms.PulsedProgramming(circuit, 2, distribution_type='half_spread')
pulsed_programming_full_spread = qdms.PulsedProgramming(circuit, 2, distribution_type='full_spread')
result = False
if len(pulsed_programming_linear.res_states) == 1:
if len(pulsed_programming_half_spread.res_states) == 3:
if len(pulsed_programming_full_spread.res_states) == 9:
result = True
assert result
def test_log_convergence():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2, hrs=3000, tolerance=1, is_relative_tolerance=True,
pulse_algorithm='log')
pulsed_programming.simulate()
assert not len(pulsed_programming.graph_resistance) -1 == pulsed_programming.max_pulse
def test_fabien_convergence():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2, hrs=3000, tolerance=1, is_relative_tolerance=True,
pulse_algorithm='fabien')
pulsed_programming.simulate()
assert not len(pulsed_programming.graph_resistance) - 1 == pulsed_programming.max_pulse
|
import qdms
import numpy as np
def test_read_resistance_without_variability():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2)
value = pulsed_programming.read_resistance(pulsed_programming.circuit.memristor_model)
assert round(value) == round(pulsed_programming.circuit.memristor_model.r_on)
def test_read_resistance_with_variability():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2, variance_read=1/300)
result = []
for _ in range(1000):
value = pulsed_programming.read_resistance(pulsed_programming.circuit.memristor_model)
max = pulsed_programming.circuit.memristor_model.r_on + 0.015 * pulsed_programming.circuit.memristor_model.r_on
min = pulsed_programming.circuit.memristor_model.r_on - 0.015 * pulsed_programming.circuit.memristor_model.r_on
if min < value < max:
result.append(True)
else:
result.append(False)
assert np.all(result)
def test_write_resistance_without_variability():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2)
pulsed_programming.write_resistance(pulsed_programming.circuit.memristor_model, -2, 200e-9)
value = pulsed_programming.read_resistance(pulsed_programming.circuit.memristor_model)
assert value > pulsed_programming.circuit.memristor_model.r_on
def test_write_resistance_with_variability():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2, variance_write=1/300)
result_max = []
result_min = []
pulsed_programming.circuit.memristor_model.g = 1/2000
for _ in range(1000):
previous = pulsed_programming.read_resistance(pulsed_programming.circuit.memristor_model)
pulsed_programming.write_resistance(pulsed_programming.circuit.memristor_model, 0, 200e-9)
next = pulsed_programming.read_resistance(pulsed_programming.circuit.memristor_model)
result_max.append((next - previous) / 2000 * 100 <= 1.2)
result_min.append((next - previous) / 2000 * 100 >= 0.9)
assert np.all(result_max) and np.any(result_min)
def test_distribution():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 9)
pulsed_programming_linear = qdms.PulsedProgramming(circuit, 2, distribution_type='linear')
pulsed_programming_half_spread = qdms.PulsedProgramming(circuit, 2, distribution_type='half_spread')
pulsed_programming_full_spread = qdms.PulsedProgramming(circuit, 2, distribution_type='full_spread')
result = False
if len(pulsed_programming_linear.res_states) == 1:
if len(pulsed_programming_half_spread.res_states) == 3:
if len(pulsed_programming_full_spread.res_states) == 9:
result = True
assert result
def test_log_convergence():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2, hrs=3000, tolerance=1, is_relative_tolerance=True,
pulse_algorithm='log')
pulsed_programming.simulate()
assert not len(pulsed_programming.graph_resistance) -1 == pulsed_programming.max_pulse
def test_fabien_convergence():
memristor = qdms.Data_Driven()
circuit = qdms.Circuit(memristor, 1)
pulsed_programming = qdms.PulsedProgramming(circuit, 2, hrs=3000, tolerance=1, is_relative_tolerance=True,
pulse_algorithm='fabien')
pulsed_programming.simulate()
assert not len(pulsed_programming.graph_resistance) - 1 == pulsed_programming.max_pulse
|
none
| 1
| 2.668911
| 3
|
|
scripts/update_version.py
|
confiare/SN-Core
| 53
|
6629416
|
import tomlkit
import os
self_dir = os.path.dirname(__file__)
config_path = os.path.join(self_dir, "update_version.toml")
with open(config_path, 'r') as config_file:
config = tomlkit.loads(config_file.read())
VERSION = ".".join([str(config['version'][sem]) for sem in ['major', 'minor', 'patch']])
if "core" in config['paths']:
def update_validator(manifest):
manifest['package']['version'] = VERSION
return manifest
def update_runtime(manifest):
manifest['package']['version'] = VERSION
manifest['dependencies']['smartnoise_validator']['version'] = VERSION
return manifest
def update_ffi(manifest):
manifest['package']['version'] = VERSION
manifest['dependencies']['smartnoise_validator']['version'] = VERSION
manifest['dependencies']['smartnoise_runtime']['version'] = VERSION
return manifest
crates = {
"validator-rust": update_validator,
"runtime-rust": update_runtime,
"ffi-rust": update_ffi
}
# update version references in all three crates
for project in crates:
manifest_path = os.path.join(config['paths']['core'], project, "Cargo.toml")
with open(manifest_path, 'r') as manifest_file:
manifest = tomlkit.loads(manifest_file.read())
manifest = crates[project](manifest)
with open(manifest_path, 'w') as runtime_toml_file:
runtime_toml_file.write(tomlkit.dumps(manifest))
if "python" in config['paths']:
# update version number in setup.cfg
import configparser
setup_path = os.path.join(config['paths']['python'], "setup.cfg")
setup = configparser.ConfigParser(comment_prefixes='/', allow_no_value=True)
setup.read(setup_path)
setup['metadata']['version'] = VERSION
with open(setup_path, 'w') as setup_file:
setup.write(setup_file)
# update version number in documentation
doc_builder_path = os.path.join(config['paths']['python'], "scripts", "build_docs.sh")
with open(doc_builder_path, "r") as doc_builder_file:
lines = doc_builder_file.readlines()
lines[next(i for i, l in enumerate(lines) if l.startswith("WN_VERSION="))] = f"WN_VERSION={VERSION}\n"
with open(doc_builder_path, "w") as doc_builder_file:
doc_builder_file.writelines(lines)
if "R" in config['paths']:
# update DESCRIPTION file
description_path = os.path.join(config['paths']['R'], "DESCRIPTION")
with open(description_path, 'r') as description_file:
lines = description_file.readlines()
lines[next(i for i, l in enumerate(lines) if l.startswith("Version: "))] = f"Version: {VERSION}\n"
with open(description_path, "w") as doc_builder_file:
doc_builder_file.writelines(lines)
|
import tomlkit
import os
self_dir = os.path.dirname(__file__)
config_path = os.path.join(self_dir, "update_version.toml")
with open(config_path, 'r') as config_file:
config = tomlkit.loads(config_file.read())
VERSION = ".".join([str(config['version'][sem]) for sem in ['major', 'minor', 'patch']])
if "core" in config['paths']:
def update_validator(manifest):
manifest['package']['version'] = VERSION
return manifest
def update_runtime(manifest):
manifest['package']['version'] = VERSION
manifest['dependencies']['smartnoise_validator']['version'] = VERSION
return manifest
def update_ffi(manifest):
manifest['package']['version'] = VERSION
manifest['dependencies']['smartnoise_validator']['version'] = VERSION
manifest['dependencies']['smartnoise_runtime']['version'] = VERSION
return manifest
crates = {
"validator-rust": update_validator,
"runtime-rust": update_runtime,
"ffi-rust": update_ffi
}
# update version references in all three crates
for project in crates:
manifest_path = os.path.join(config['paths']['core'], project, "Cargo.toml")
with open(manifest_path, 'r') as manifest_file:
manifest = tomlkit.loads(manifest_file.read())
manifest = crates[project](manifest)
with open(manifest_path, 'w') as runtime_toml_file:
runtime_toml_file.write(tomlkit.dumps(manifest))
if "python" in config['paths']:
# update version number in setup.cfg
import configparser
setup_path = os.path.join(config['paths']['python'], "setup.cfg")
setup = configparser.ConfigParser(comment_prefixes='/', allow_no_value=True)
setup.read(setup_path)
setup['metadata']['version'] = VERSION
with open(setup_path, 'w') as setup_file:
setup.write(setup_file)
# update version number in documentation
doc_builder_path = os.path.join(config['paths']['python'], "scripts", "build_docs.sh")
with open(doc_builder_path, "r") as doc_builder_file:
lines = doc_builder_file.readlines()
lines[next(i for i, l in enumerate(lines) if l.startswith("WN_VERSION="))] = f"WN_VERSION={VERSION}\n"
with open(doc_builder_path, "w") as doc_builder_file:
doc_builder_file.writelines(lines)
if "R" in config['paths']:
# update DESCRIPTION file
description_path = os.path.join(config['paths']['R'], "DESCRIPTION")
with open(description_path, 'r') as description_file:
lines = description_file.readlines()
lines[next(i for i, l in enumerate(lines) if l.startswith("Version: "))] = f"Version: {VERSION}\n"
with open(description_path, "w") as doc_builder_file:
doc_builder_file.writelines(lines)
|
en
| 0.607774
|
# update version references in all three crates # update version number in setup.cfg # update version number in documentation # update DESCRIPTION file
| 2.019559
| 2
|
oceny.py
|
Ellectronx/wsb-oceny
| 5
|
6629417
|
#!/usr/bin/env python
# main script from: https://www.lisenet.com/2017/basic-python-script-to-log-in-to-website-using-selenium-webdriver/
import os
import time
import signal
import hashlib
import subprocess
from selenium import webdriver
from bs4 import BeautifulSoup
import db
import fb
from helper import *
from credent import secret
def main():
killXvfb()
os.system('/usr/bin/Xvfb :11 -ac -screen 0 1024x768x24 &')
os.environ['DISPLAY'] = ':11.0'
#URL
url = "https://portal.wsb.pl/group/gdansk/oceny-wstepne"
profile = webdriver.FirefoxProfile()
# Set a user agent string to help parse webserver logs easily
profile.set_preference("general.useragent.override", "Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 selenium_xvfb.py")
browser = webdriver.Firefox(profile)
browser.get(url)
time.sleep(1)
# DEFINE USERNAME & PASSWORD FIELDS
user = browser.find_element_by_name("username")
password = browser.find_element_by_name("password")
# Clear the input fields
user.clear()
password.clear()
user.send_keys(secret["wsb_login"])
password.send_keys(secret["ws<PASSWORD>"])
#PRESS LOGIN BUTTON
browser.find_element_by_id("login_button").click()
time.sleep(5)
browser.get(url)
html = browser.page_source
soup = BeautifulSoup(html,features="lxml")
# create a database connection
conn = db.prepareTablesAndConnection()
table = soup.find("table", { "class" : "dataTable" })
for row in table.findAll("tr"):
t=()
cells = row.findAll("td")
for c in cells:
t=t+(c.find(text=True).strip(),)
try:
h = t[0] + t[1] + t[2]
t=( hashlib.md5(h.encode()).hexdigest(),) + t
#print(t)
#db.insert_oceny(conn,"oceny",t)
db.insert_oceny(conn,"oceny_nowe",t)
except IndexError:
continue
except:
raise
"""
rows = db.select_oceny(conn,"oceny")
print(rows)
print("\n\n\n\n\n\n")
rows = db.select_oceny(conn,"oceny_nowe")
print(rows)
"""
#rows_diff = db.select_oceny(conn,"oceny_nowe")
print("\n\n\nROW DIFF:")
rows_diff = db.select_diff(conn)#print(rows_diff)
ID=[]
for row in rows_diff:
ID.append(row[0])
#print(strx)
#print(ID)
#comparison_result=[]
private_content=""
public_content=""
for idx in ID:
T1 = db.select_przedmiot(conn,"oceny",idx)
T2 = db.select_przedmiot(conn,"oceny_nowe",idx)
cr = compareT(T1,T2)
private_content = private_content + cr["private"] + "\r\n\r\n"
public_content = public_content + cr["public"] + "\r\n\r\n"
print("private_content:"+private_content)
if private_content !="":
sendEmail("Powiadomienie o ocenach WSB",secret["email_from"],secret["email_to"],private_content)
if public_content !="":
fb.sendMessage(public_content)
#datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
db.oceny_copy(conn)
db.query(conn,"DELETE FROM oceny_nowe;")
conn.commit()
conn.close()
file = open("plik.html","w")
file.write(html)
file.close()
# Keep the page loaded for 8 seconds
time.sleep(8)
# Log out
browser.find_element_by_link_text('Wyloguj')
time.sleep(2)
#browser.find_element_by_id("dialog_button_ok").click()
#time.sleep(1)
browser.delete_all_cookies()
browser.close()
killXvfb()
def killXvfb():
p = subprocess.Popen(['ps', '-A'],
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True).communicate()[0]
#out, err = p.communicate()
#print(p.splitlines())
for line in p.splitlines():
if 'Xvfb' in line:
pid = int(line.split(None, 1)[0])
os.kill(pid, signal.SIGKILL)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# main script from: https://www.lisenet.com/2017/basic-python-script-to-log-in-to-website-using-selenium-webdriver/
import os
import time
import signal
import hashlib
import subprocess
from selenium import webdriver
from bs4 import BeautifulSoup
import db
import fb
from helper import *
from credent import secret
def main():
killXvfb()
os.system('/usr/bin/Xvfb :11 -ac -screen 0 1024x768x24 &')
os.environ['DISPLAY'] = ':11.0'
#URL
url = "https://portal.wsb.pl/group/gdansk/oceny-wstepne"
profile = webdriver.FirefoxProfile()
# Set a user agent string to help parse webserver logs easily
profile.set_preference("general.useragent.override", "Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 selenium_xvfb.py")
browser = webdriver.Firefox(profile)
browser.get(url)
time.sleep(1)
# DEFINE USERNAME & PASSWORD FIELDS
user = browser.find_element_by_name("username")
password = browser.find_element_by_name("password")
# Clear the input fields
user.clear()
password.clear()
user.send_keys(secret["wsb_login"])
password.send_keys(secret["ws<PASSWORD>"])
#PRESS LOGIN BUTTON
browser.find_element_by_id("login_button").click()
time.sleep(5)
browser.get(url)
html = browser.page_source
soup = BeautifulSoup(html,features="lxml")
# create a database connection
conn = db.prepareTablesAndConnection()
table = soup.find("table", { "class" : "dataTable" })
for row in table.findAll("tr"):
t=()
cells = row.findAll("td")
for c in cells:
t=t+(c.find(text=True).strip(),)
try:
h = t[0] + t[1] + t[2]
t=( hashlib.md5(h.encode()).hexdigest(),) + t
#print(t)
#db.insert_oceny(conn,"oceny",t)
db.insert_oceny(conn,"oceny_nowe",t)
except IndexError:
continue
except:
raise
"""
rows = db.select_oceny(conn,"oceny")
print(rows)
print("\n\n\n\n\n\n")
rows = db.select_oceny(conn,"oceny_nowe")
print(rows)
"""
#rows_diff = db.select_oceny(conn,"oceny_nowe")
print("\n\n\nROW DIFF:")
rows_diff = db.select_diff(conn)#print(rows_diff)
ID=[]
for row in rows_diff:
ID.append(row[0])
#print(strx)
#print(ID)
#comparison_result=[]
private_content=""
public_content=""
for idx in ID:
T1 = db.select_przedmiot(conn,"oceny",idx)
T2 = db.select_przedmiot(conn,"oceny_nowe",idx)
cr = compareT(T1,T2)
private_content = private_content + cr["private"] + "\r\n\r\n"
public_content = public_content + cr["public"] + "\r\n\r\n"
print("private_content:"+private_content)
if private_content !="":
sendEmail("Powiadomienie o ocenach WSB",secret["email_from"],secret["email_to"],private_content)
if public_content !="":
fb.sendMessage(public_content)
#datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
db.oceny_copy(conn)
db.query(conn,"DELETE FROM oceny_nowe;")
conn.commit()
conn.close()
file = open("plik.html","w")
file.write(html)
file.close()
# Keep the page loaded for 8 seconds
time.sleep(8)
# Log out
browser.find_element_by_link_text('Wyloguj')
time.sleep(2)
#browser.find_element_by_id("dialog_button_ok").click()
#time.sleep(1)
browser.delete_all_cookies()
browser.close()
killXvfb()
def killXvfb():
p = subprocess.Popen(['ps', '-A'],
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True).communicate()[0]
#out, err = p.communicate()
#print(p.splitlines())
for line in p.splitlines():
if 'Xvfb' in line:
pid = int(line.split(None, 1)[0])
os.kill(pid, signal.SIGKILL)
if __name__ == '__main__':
main()
|
en
| 0.280893
|
#!/usr/bin/env python # main script from: https://www.lisenet.com/2017/basic-python-script-to-log-in-to-website-using-selenium-webdriver/ #URL # Set a user agent string to help parse webserver logs easily # DEFINE USERNAME & PASSWORD FIELDS # Clear the input fields #PRESS LOGIN BUTTON # create a database connection #print(t) #db.insert_oceny(conn,"oceny",t) rows = db.select_oceny(conn,"oceny") print(rows) print("\n\n\n\n\n\n") rows = db.select_oceny(conn,"oceny_nowe") print(rows) #rows_diff = db.select_oceny(conn,"oceny_nowe") #print(rows_diff) #print(strx) #print(ID) #comparison_result=[] #datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") # Keep the page loaded for 8 seconds # Log out #browser.find_element_by_id("dialog_button_ok").click() #time.sleep(1) #out, err = p.communicate() #print(p.splitlines())
| 2.995893
| 3
|
final_table.py
|
Mfallh/cocktail-nutrition-facts
| 0
|
6629418
|
<gh_stars>0
import pandas as pd
cocktails = pd.read_csv('/Users/mariusfall/Desktop/cocktails.csv')
nutrition = pd.read_csv('/Users/mariusfall/Desktop/nutrition_facts_cleaned.csv')
states = pd.read_csv('/Users/mariusfall/Desktop/state_ranking_cleaned.csv')
table_final = (pd.merge(cocktails, nutrition, left_on='name', right_on='cocktail_name')).drop(['cocktail_name'], axis=1)
table_final = table_final.replace('-','0')
table_final.head()
|
import pandas as pd
cocktails = pd.read_csv('/Users/mariusfall/Desktop/cocktails.csv')
nutrition = pd.read_csv('/Users/mariusfall/Desktop/nutrition_facts_cleaned.csv')
states = pd.read_csv('/Users/mariusfall/Desktop/state_ranking_cleaned.csv')
table_final = (pd.merge(cocktails, nutrition, left_on='name', right_on='cocktail_name')).drop(['cocktail_name'], axis=1)
table_final = table_final.replace('-','0')
table_final.head()
|
none
| 1
| 2.485255
| 2
|
|
onadata/apps/api/urls.py
|
jnm/kobocat-branches-archive-20200507
| 0
|
6629419
|
from django.conf.urls import url
from rest_framework import routers
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.urlpatterns import format_suffix_patterns
from rest_framework.views import APIView
from onadata.apps.api.viewsets.charts_viewset import ChartsViewSet
from onadata.apps.api.viewsets.connect_viewset import ConnectViewSet
from onadata.apps.api.viewsets.data_viewset import DataViewSet
from onadata.apps.api.viewsets.metadata_viewset import MetaDataViewSet
from onadata.apps.api.viewsets.note_viewset import NoteViewSet
from onadata.apps.api.viewsets.organization_profile_viewset import\
OrganizationProfileViewSet
from onadata.apps.api.viewsets.project_viewset import ProjectViewSet
from onadata.apps.api.viewsets.stats_viewset import StatsViewSet
from onadata.apps.api.viewsets.team_viewset import TeamViewSet
from onadata.apps.api.viewsets.xform_viewset import XFormViewSet
from onadata.apps.api.viewsets.user_profile_viewset import UserProfileViewSet
from onadata.apps.api.viewsets.user_viewset import UserViewSet
from onadata.apps.api.viewsets.submissionstats_viewset import\
SubmissionStatsViewSet
from onadata.apps.api.viewsets.attachment_viewset import AttachmentViewSet
from onadata.apps.api.viewsets.xform_list_api import XFormListApi
from onadata.apps.api.viewsets.xform_submission_api import XFormSubmissionApi
from onadata.apps.api.viewsets.briefcase_api import BriefcaseApi
class MultiLookupRouter(routers.DefaultRouter):
def __init__(self, *args, **kwargs):
super(MultiLookupRouter, self).__init__(*args, **kwargs)
self.lookups_routes = []
self.lookups_routes.append(routers.Route(
url=r'^{prefix}/{lookups}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
))
self.lookups_routes.append(self.make_routes('lookup'))
self.lookups_routes.append(self.make_routes('lookups'))
# Dynamically generated routes.
# Generated using @action or @link decorators on methods of the viewset
self.lookups_routes.append(routers.Route(
url=[
r'^{prefix}/{lookups}/{methodname}{trailing_slash}$',
r'^{prefix}/{lookups}/{methodname}/{extra}{trailing_slash}$'],
mapping={
'{httpmethod}': '{methodname}',
},
name='{basename}-{methodnamehyphen}',
initkwargs={}
))
@staticmethod
def make_routes(template_text):
return routers.Route(
url=r'^{prefix}/{%s}{trailing_slash}$' % template_text,
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs={'suffix': 'List'})
def get_extra_lookup_regexes(self, route):
ret = []
base_regex = '(?P<{lookup_field}>[^/]+)'
if 'extra_lookup_fields' in route.initkwargs:
for lookup_field in route.initkwargs['extra_lookup_fields']:
ret.append(base_regex.format(lookup_field=lookup_field))
return '/'.join(ret)
def get_lookup_regexes(self, viewset):
ret = []
lookup_fields = getattr(viewset, 'lookup_fields', None)
if lookup_fields:
for i in range(1, len(lookup_fields)):
tmp = []
for lookup_field in lookup_fields[:i + 1]:
if lookup_field == lookup_fields[i]:
base_regex = '(?P<{lookup_field}>[^/.]+)'
else:
base_regex = '(?P<{lookup_field}>[^/]+)'
tmp.append(base_regex.format(lookup_field=lookup_field))
ret.append(tmp)
return ret
def get_lookup_routes(self, viewset):
ret = [self.routes[0]]
# Determine any `@action` or `@link` decorated methods on the viewset
dynamic_routes = []
for methodname in dir(viewset):
attr = getattr(viewset, methodname)
httpmethods = getattr(attr, 'bind_to_methods', None)
if httpmethods:
httpmethods = [method.lower() for method in httpmethods]
dynamic_routes.append((httpmethods, methodname))
for route in self.lookups_routes:
if route.mapping == {'{httpmethod}': '{methodname}'}:
# Dynamic routes (@link or @action decorator)
for httpmethods, methodname in dynamic_routes:
initkwargs = route.initkwargs.copy()
initkwargs.update(getattr(viewset, methodname).kwargs)
mapping = dict(
(httpmethod, methodname) for httpmethod in httpmethods)
name = routers.replace_methodname(route.name, methodname)
if 'extra_lookup_fields' in initkwargs:
uri = route.url[1]
uri = routers.replace_methodname(uri, methodname)
ret.append(routers.Route(
url=uri, mapping=mapping, name='%s-extra' % name,
initkwargs=initkwargs,
))
uri = routers.replace_methodname(route.url[0], methodname)
ret.append(routers.Route(
url=uri, mapping=mapping, name=name,
initkwargs=initkwargs,
))
else:
# Standard route
ret.append(route)
return ret
def get_routes(self, viewset):
ret = []
lookup_fields = getattr(viewset, 'lookup_fields', None)
if lookup_fields:
ret = self.get_lookup_routes(viewset)
else:
ret = super(MultiLookupRouter, self).get_routes(viewset)
return ret
def get_api_root_view(self):
"""
Return a view to use as the API root.
"""
api_root_dict = {}
list_name = self.routes[0].name
for prefix, viewset, basename in self.registry:
api_root_dict[prefix] = list_name.format(basename=basename)
class OnaApi(APIView):
"""
## KoBo JSON Rest API endpoints:
### Data
* [/api/v1/charts](/api/v1/charts) - List, Retrieve Charts of collected data
* [/api/v1/data](/api/v1/data) - List, Retrieve submission data
* [/api/v1/stats](/api/v1/stats) - Summary statistics
### Forms
* [/api/v1/forms](/api/v1/forms) - List, Retrieve form information
* [/api/v1/media](/api/v1/media) - List, Retrieve media attachments
* [/api/v1/metadata](/api/v1/metadata) - List, Retrieve form metadata
* [/api/v1/projects](/api/v1/projects) - List, Retrieve, Create,
Update organization projects, forms
* [/api/v1/submissions](/api/v1/submissions) - Submit XForms to a form
### Users and Organizations
* [/api/v1/orgs](/api/v1/orgs) - List, Retrieve, Create,
Update organization and organization info
* [/api/v1/profiles](/api/v1/profiles) - List, Create, Update user information
* [/api/v1/teams](/api/v1/teams) - List, Retrieve, Create, Update teams
* [/api/v1/user](/api/v1/user) - Return authenticated user profile info
* [/api/v1/users](/api/v1/users) - List, Retrieve user data
## Status Codes
* **200** - Successful [`GET`, `PATCH`, `PUT`]
* **201** - Resource successfully created [`POST`]
* **204** - Resouce successfully deleted [`DELETE`]
* **403** - Permission denied to resource
* **404** - Resource was not found
## Authentication
KoBo JSON API enpoints support both Basic authentication
and API Token Authentication through the `Authorization` header.
### Basic Authentication
Example using curl:
curl -X GET https://example.com/api/v1/ -u username:password
### Token Authentication
Example using curl:
curl -X GET https://example.com/api/v1/ -H "Authorization: Token TOKEN_KEY"
### KoBo Tagging API
* [Filter form list by tags.](
/api/v1/forms#get-list-of-forms-with-specific-tags)
* [List Tags for a specific form.](
/api/v1/forms#get-list-of-tags-for-a-specific-form)
* [Tag Forms.](/api/v1/forms#tag-forms)
* [Delete a specific tag.](/api/v1/forms#delete-a-specific-tag)
* [List form data by tag.](
/api/v1/data#query-submitted-data-of-a-specific-form-using-tags)
* [Tag a specific submission](/api/v1/data#tag-a-submission-data-point)
## Using Oauth2 with the KoBo API
You can learn more about oauth2 [here](
http://tools.ietf.org/html/rfc6749).
### 1. Register your client application with KoBo - [register](\
/o/applications/register/)
- `name` - name of your application
- `client_type` - Client Type: select confidential
- `authorization_grant_type` - Authorization grant type: Authorization code
- `redirect_uri` - Redirect urls: redirection endpoint
Keep note of the `client_id` and the `client_secret`, it is required when
requesting for an `access_token`.
### 2. Authorize client application.
The authorization url is of the form:
<pre class="prettyprint">
<b>GET</b> /o/authorize?client_id=XXXXXX&response_type=code&state=abc</pre>
example:
http://localhost:8000/o/authorize?client_id=e8&response_type=code&state=xyz
Note: Providing the url to any user will prompt for a password and
request for read and write permission for the application whose `client_id` is
specified.
Where:
- `client_id` - is the client application id - ensure its urlencoded
- `response_type` - should be code
- `state` - a random state string that you client application will get when
redirection happens
What happens:
1. a login page is presented, the username used to login determines the account
that provides access.
2. redirection to the client application occurs, the url is of the form:
> REDIRECT_URI/?state=abc&code=YYYYYYYYY
example redirect uri
http://localhost:30000/?state=xyz&code=SWWk2PN6NdCwfpqiDiPRcLmvkw2uWd
- `code` - is the code to use to request for `access_token`
- `state` - same state string used during authorization request
Your client application should use the `code` to request for an access_token.
### 3. Request for access token.
You need to make a `POST` request with `grant_type`, `code`, `client_id` and
`redirect_uri` as `POST` payload params. You should authenticate the request
with `Basic Authentication` using your `client_id` and `client_secret` as
`username:password` pair.
Request:
<pre class="prettyprint">
<b>POST</b>/o/token</pre>
Payload:
grant_type=authorization_code&code=YYYYYYYYY&client_id=XXXXXX&
redirect_uri=http://redirect/uri/path
curl example:
curl -X POST -d "grant_type=authorization_code&
code=PSwrMilnJESZVFfFsyEmEukNv0sGZ8&
client_id=e8x4zzJJIyOikDqjPcsCJrmnU22QbpfHQo4HhRnv&
redirect_uri=http://localhost:30000" "http://localhost:8000/o/token/"
--user "e8:xo7i4LNpMj"
Response:
{
"access_token": "<KEY>",
"token_type": "Bearer", "expires_in": 36000,
"refresh_token": "<PASSWORD>",
"scope": "read write groups"
}
Where:
- `access_token` - access token - expires
- `refresh_token` - token to use to request a new `access_token` in case it has
expored.
Now that you have an `access_token` you can make API calls.
### 4. Accessing the KoBo API using the `access_token`.
Example using curl:
curl -X GET https://example.com/api/v1
-H "Authorization: Bearer ACCESS_TOKEN"
"""
_ignore_model_permissions = True
def get(self, request, format=None):
ret = {}
for key, url_name in api_root_dict.items():
ret[key] = reverse(
url_name, request=request, format=format)
return Response(ret)
return OnaApi.as_view()
def get_urls(self):
ret = []
if self.include_root_view:
root_url = url(r'^$', self.get_api_root_view(),
name=self.root_view_name)
ret.append(root_url)
for prefix, viewset, basename in self.registry:
lookup = self.get_lookup_regex(viewset)
lookup_list = self.get_lookup_regexes(viewset)
if lookup_list:
# lookup = lookups[0]
lookup_list = [u'/'.join(k) for k in lookup_list]
else:
lookup_list = [u'']
routes = self.get_routes(viewset)
for route in routes:
mapping = self.get_method_map(viewset, route.mapping)
if not mapping:
continue
for lookups in lookup_list:
regex = route.url.format(
prefix=prefix,
lookup=lookup,
lookups=lookups,
trailing_slash=self.trailing_slash,
extra=self.get_extra_lookup_regexes(route)
)
view = viewset.as_view(mapping, **route.initkwargs)
name = route.name.format(basename=basename)
ret.append(url(regex, view, name=name))
if self.include_format_suffixes:
ret = format_suffix_patterns(ret, allowed=['[a-z0-9]+'])
return ret
class MultiLookupRouterWithPatchList(MultiLookupRouter):
"""
This class only extends MultiLookupRouter to allow PATCH method on list endpoint
"""
@staticmethod
def make_routes(template_text):
return routers.Route(
url=r'^{prefix}/{%s}{trailing_slash}$' % template_text,
mapping={
'get': 'list',
'post': 'create',
'patch': 'bulk_validation_status',
'delete': 'bulk_delete'
},
name='{basename}-list',
initkwargs={'suffix': 'List'})
router = MultiLookupRouter(trailing_slash=False)
router.register(r'users', UserViewSet)
router.register(r'user', ConnectViewSet)
router.register(r'profiles', UserProfileViewSet)
router.register(r'orgs', OrganizationProfileViewSet)
router.register(r'forms', XFormViewSet)
router.register(r'projects', ProjectViewSet)
router.register(r'teams', TeamViewSet)
router.register(r'notes', NoteViewSet)
router.register(r'stats', StatsViewSet, base_name='stats')
router.register(r'stats/submissions', SubmissionStatsViewSet,
base_name='submissionstats')
router.register(r'charts', ChartsViewSet, base_name='chart')
router.register(r'metadata', MetaDataViewSet, base_name='metadata')
router.register(r'media', AttachmentViewSet, base_name='attachment')
router.register(r'formlist', XFormListApi, base_name='formlist')
router.register(r'submissions', XFormSubmissionApi, base_name='submissions')
router.register(r'briefcase', BriefcaseApi, base_name='briefcase')
router_with_patch_list = MultiLookupRouterWithPatchList(trailing_slash=False)
router_with_patch_list.register(r'data', DataViewSet, base_name='data')
|
from django.conf.urls import url
from rest_framework import routers
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.urlpatterns import format_suffix_patterns
from rest_framework.views import APIView
from onadata.apps.api.viewsets.charts_viewset import ChartsViewSet
from onadata.apps.api.viewsets.connect_viewset import ConnectViewSet
from onadata.apps.api.viewsets.data_viewset import DataViewSet
from onadata.apps.api.viewsets.metadata_viewset import MetaDataViewSet
from onadata.apps.api.viewsets.note_viewset import NoteViewSet
from onadata.apps.api.viewsets.organization_profile_viewset import\
OrganizationProfileViewSet
from onadata.apps.api.viewsets.project_viewset import ProjectViewSet
from onadata.apps.api.viewsets.stats_viewset import StatsViewSet
from onadata.apps.api.viewsets.team_viewset import TeamViewSet
from onadata.apps.api.viewsets.xform_viewset import XFormViewSet
from onadata.apps.api.viewsets.user_profile_viewset import UserProfileViewSet
from onadata.apps.api.viewsets.user_viewset import UserViewSet
from onadata.apps.api.viewsets.submissionstats_viewset import\
SubmissionStatsViewSet
from onadata.apps.api.viewsets.attachment_viewset import AttachmentViewSet
from onadata.apps.api.viewsets.xform_list_api import XFormListApi
from onadata.apps.api.viewsets.xform_submission_api import XFormSubmissionApi
from onadata.apps.api.viewsets.briefcase_api import BriefcaseApi
class MultiLookupRouter(routers.DefaultRouter):
def __init__(self, *args, **kwargs):
super(MultiLookupRouter, self).__init__(*args, **kwargs)
self.lookups_routes = []
self.lookups_routes.append(routers.Route(
url=r'^{prefix}/{lookups}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
))
self.lookups_routes.append(self.make_routes('lookup'))
self.lookups_routes.append(self.make_routes('lookups'))
# Dynamically generated routes.
# Generated using @action or @link decorators on methods of the viewset
self.lookups_routes.append(routers.Route(
url=[
r'^{prefix}/{lookups}/{methodname}{trailing_slash}$',
r'^{prefix}/{lookups}/{methodname}/{extra}{trailing_slash}$'],
mapping={
'{httpmethod}': '{methodname}',
},
name='{basename}-{methodnamehyphen}',
initkwargs={}
))
@staticmethod
def make_routes(template_text):
return routers.Route(
url=r'^{prefix}/{%s}{trailing_slash}$' % template_text,
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs={'suffix': 'List'})
def get_extra_lookup_regexes(self, route):
ret = []
base_regex = '(?P<{lookup_field}>[^/]+)'
if 'extra_lookup_fields' in route.initkwargs:
for lookup_field in route.initkwargs['extra_lookup_fields']:
ret.append(base_regex.format(lookup_field=lookup_field))
return '/'.join(ret)
def get_lookup_regexes(self, viewset):
ret = []
lookup_fields = getattr(viewset, 'lookup_fields', None)
if lookup_fields:
for i in range(1, len(lookup_fields)):
tmp = []
for lookup_field in lookup_fields[:i + 1]:
if lookup_field == lookup_fields[i]:
base_regex = '(?P<{lookup_field}>[^/.]+)'
else:
base_regex = '(?P<{lookup_field}>[^/]+)'
tmp.append(base_regex.format(lookup_field=lookup_field))
ret.append(tmp)
return ret
def get_lookup_routes(self, viewset):
ret = [self.routes[0]]
# Determine any `@action` or `@link` decorated methods on the viewset
dynamic_routes = []
for methodname in dir(viewset):
attr = getattr(viewset, methodname)
httpmethods = getattr(attr, 'bind_to_methods', None)
if httpmethods:
httpmethods = [method.lower() for method in httpmethods]
dynamic_routes.append((httpmethods, methodname))
for route in self.lookups_routes:
if route.mapping == {'{httpmethod}': '{methodname}'}:
# Dynamic routes (@link or @action decorator)
for httpmethods, methodname in dynamic_routes:
initkwargs = route.initkwargs.copy()
initkwargs.update(getattr(viewset, methodname).kwargs)
mapping = dict(
(httpmethod, methodname) for httpmethod in httpmethods)
name = routers.replace_methodname(route.name, methodname)
if 'extra_lookup_fields' in initkwargs:
uri = route.url[1]
uri = routers.replace_methodname(uri, methodname)
ret.append(routers.Route(
url=uri, mapping=mapping, name='%s-extra' % name,
initkwargs=initkwargs,
))
uri = routers.replace_methodname(route.url[0], methodname)
ret.append(routers.Route(
url=uri, mapping=mapping, name=name,
initkwargs=initkwargs,
))
else:
# Standard route
ret.append(route)
return ret
def get_routes(self, viewset):
ret = []
lookup_fields = getattr(viewset, 'lookup_fields', None)
if lookup_fields:
ret = self.get_lookup_routes(viewset)
else:
ret = super(MultiLookupRouter, self).get_routes(viewset)
return ret
def get_api_root_view(self):
"""
Return a view to use as the API root.
"""
api_root_dict = {}
list_name = self.routes[0].name
for prefix, viewset, basename in self.registry:
api_root_dict[prefix] = list_name.format(basename=basename)
class OnaApi(APIView):
"""
## KoBo JSON Rest API endpoints:
### Data
* [/api/v1/charts](/api/v1/charts) - List, Retrieve Charts of collected data
* [/api/v1/data](/api/v1/data) - List, Retrieve submission data
* [/api/v1/stats](/api/v1/stats) - Summary statistics
### Forms
* [/api/v1/forms](/api/v1/forms) - List, Retrieve form information
* [/api/v1/media](/api/v1/media) - List, Retrieve media attachments
* [/api/v1/metadata](/api/v1/metadata) - List, Retrieve form metadata
* [/api/v1/projects](/api/v1/projects) - List, Retrieve, Create,
Update organization projects, forms
* [/api/v1/submissions](/api/v1/submissions) - Submit XForms to a form
### Users and Organizations
* [/api/v1/orgs](/api/v1/orgs) - List, Retrieve, Create,
Update organization and organization info
* [/api/v1/profiles](/api/v1/profiles) - List, Create, Update user information
* [/api/v1/teams](/api/v1/teams) - List, Retrieve, Create, Update teams
* [/api/v1/user](/api/v1/user) - Return authenticated user profile info
* [/api/v1/users](/api/v1/users) - List, Retrieve user data
## Status Codes
* **200** - Successful [`GET`, `PATCH`, `PUT`]
* **201** - Resource successfully created [`POST`]
* **204** - Resouce successfully deleted [`DELETE`]
* **403** - Permission denied to resource
* **404** - Resource was not found
## Authentication
KoBo JSON API enpoints support both Basic authentication
and API Token Authentication through the `Authorization` header.
### Basic Authentication
Example using curl:
curl -X GET https://example.com/api/v1/ -u username:password
### Token Authentication
Example using curl:
curl -X GET https://example.com/api/v1/ -H "Authorization: Token TOKEN_KEY"
### KoBo Tagging API
* [Filter form list by tags.](
/api/v1/forms#get-list-of-forms-with-specific-tags)
* [List Tags for a specific form.](
/api/v1/forms#get-list-of-tags-for-a-specific-form)
* [Tag Forms.](/api/v1/forms#tag-forms)
* [Delete a specific tag.](/api/v1/forms#delete-a-specific-tag)
* [List form data by tag.](
/api/v1/data#query-submitted-data-of-a-specific-form-using-tags)
* [Tag a specific submission](/api/v1/data#tag-a-submission-data-point)
## Using Oauth2 with the KoBo API
You can learn more about oauth2 [here](
http://tools.ietf.org/html/rfc6749).
### 1. Register your client application with KoBo - [register](\
/o/applications/register/)
- `name` - name of your application
- `client_type` - Client Type: select confidential
- `authorization_grant_type` - Authorization grant type: Authorization code
- `redirect_uri` - Redirect urls: redirection endpoint
Keep note of the `client_id` and the `client_secret`, it is required when
requesting for an `access_token`.
### 2. Authorize client application.
The authorization url is of the form:
<pre class="prettyprint">
<b>GET</b> /o/authorize?client_id=XXXXXX&response_type=code&state=abc</pre>
example:
http://localhost:8000/o/authorize?client_id=e8&response_type=code&state=xyz
Note: Providing the url to any user will prompt for a password and
request for read and write permission for the application whose `client_id` is
specified.
Where:
- `client_id` - is the client application id - ensure its urlencoded
- `response_type` - should be code
- `state` - a random state string that you client application will get when
redirection happens
What happens:
1. a login page is presented, the username used to login determines the account
that provides access.
2. redirection to the client application occurs, the url is of the form:
> REDIRECT_URI/?state=abc&code=YYYYYYYYY
example redirect uri
http://localhost:30000/?state=xyz&code=SWWk2PN6NdCwfpqiDiPRcLmvkw2uWd
- `code` - is the code to use to request for `access_token`
- `state` - same state string used during authorization request
Your client application should use the `code` to request for an access_token.
### 3. Request for access token.
You need to make a `POST` request with `grant_type`, `code`, `client_id` and
`redirect_uri` as `POST` payload params. You should authenticate the request
with `Basic Authentication` using your `client_id` and `client_secret` as
`username:password` pair.
Request:
<pre class="prettyprint">
<b>POST</b>/o/token</pre>
Payload:
grant_type=authorization_code&code=YYYYYYYYY&client_id=XXXXXX&
redirect_uri=http://redirect/uri/path
curl example:
curl -X POST -d "grant_type=authorization_code&
code=PSwrMilnJESZVFfFsyEmEukNv0sGZ8&
client_id=e8x4zzJJIyOikDqjPcsCJrmnU22QbpfHQo4HhRnv&
redirect_uri=http://localhost:30000" "http://localhost:8000/o/token/"
--user "e8:xo7i4LNpMj"
Response:
{
"access_token": "<KEY>",
"token_type": "Bearer", "expires_in": 36000,
"refresh_token": "<PASSWORD>",
"scope": "read write groups"
}
Where:
- `access_token` - access token - expires
- `refresh_token` - token to use to request a new `access_token` in case it has
expored.
Now that you have an `access_token` you can make API calls.
### 4. Accessing the KoBo API using the `access_token`.
Example using curl:
curl -X GET https://example.com/api/v1
-H "Authorization: Bearer ACCESS_TOKEN"
"""
_ignore_model_permissions = True
def get(self, request, format=None):
ret = {}
for key, url_name in api_root_dict.items():
ret[key] = reverse(
url_name, request=request, format=format)
return Response(ret)
return OnaApi.as_view()
def get_urls(self):
ret = []
if self.include_root_view:
root_url = url(r'^$', self.get_api_root_view(),
name=self.root_view_name)
ret.append(root_url)
for prefix, viewset, basename in self.registry:
lookup = self.get_lookup_regex(viewset)
lookup_list = self.get_lookup_regexes(viewset)
if lookup_list:
# lookup = lookups[0]
lookup_list = [u'/'.join(k) for k in lookup_list]
else:
lookup_list = [u'']
routes = self.get_routes(viewset)
for route in routes:
mapping = self.get_method_map(viewset, route.mapping)
if not mapping:
continue
for lookups in lookup_list:
regex = route.url.format(
prefix=prefix,
lookup=lookup,
lookups=lookups,
trailing_slash=self.trailing_slash,
extra=self.get_extra_lookup_regexes(route)
)
view = viewset.as_view(mapping, **route.initkwargs)
name = route.name.format(basename=basename)
ret.append(url(regex, view, name=name))
if self.include_format_suffixes:
ret = format_suffix_patterns(ret, allowed=['[a-z0-9]+'])
return ret
class MultiLookupRouterWithPatchList(MultiLookupRouter):
"""
This class only extends MultiLookupRouter to allow PATCH method on list endpoint
"""
@staticmethod
def make_routes(template_text):
return routers.Route(
url=r'^{prefix}/{%s}{trailing_slash}$' % template_text,
mapping={
'get': 'list',
'post': 'create',
'patch': 'bulk_validation_status',
'delete': 'bulk_delete'
},
name='{basename}-list',
initkwargs={'suffix': 'List'})
router = MultiLookupRouter(trailing_slash=False)
router.register(r'users', UserViewSet)
router.register(r'user', ConnectViewSet)
router.register(r'profiles', UserProfileViewSet)
router.register(r'orgs', OrganizationProfileViewSet)
router.register(r'forms', XFormViewSet)
router.register(r'projects', ProjectViewSet)
router.register(r'teams', TeamViewSet)
router.register(r'notes', NoteViewSet)
router.register(r'stats', StatsViewSet, base_name='stats')
router.register(r'stats/submissions', SubmissionStatsViewSet,
base_name='submissionstats')
router.register(r'charts', ChartsViewSet, base_name='chart')
router.register(r'metadata', MetaDataViewSet, base_name='metadata')
router.register(r'media', AttachmentViewSet, base_name='attachment')
router.register(r'formlist', XFormListApi, base_name='formlist')
router.register(r'submissions', XFormSubmissionApi, base_name='submissions')
router.register(r'briefcase', BriefcaseApi, base_name='briefcase')
router_with_patch_list = MultiLookupRouterWithPatchList(trailing_slash=False)
router_with_patch_list.register(r'data', DataViewSet, base_name='data')
|
en
| 0.576656
|
# Dynamically generated routes. # Generated using @action or @link decorators on methods of the viewset # Determine any `@action` or `@link` decorated methods on the viewset # Dynamic routes (@link or @action decorator) # Standard route Return a view to use as the API root. ## KoBo JSON Rest API endpoints: ### Data * [/api/v1/charts](/api/v1/charts) - List, Retrieve Charts of collected data * [/api/v1/data](/api/v1/data) - List, Retrieve submission data * [/api/v1/stats](/api/v1/stats) - Summary statistics ### Forms * [/api/v1/forms](/api/v1/forms) - List, Retrieve form information * [/api/v1/media](/api/v1/media) - List, Retrieve media attachments * [/api/v1/metadata](/api/v1/metadata) - List, Retrieve form metadata * [/api/v1/projects](/api/v1/projects) - List, Retrieve, Create, Update organization projects, forms * [/api/v1/submissions](/api/v1/submissions) - Submit XForms to a form ### Users and Organizations * [/api/v1/orgs](/api/v1/orgs) - List, Retrieve, Create, Update organization and organization info * [/api/v1/profiles](/api/v1/profiles) - List, Create, Update user information * [/api/v1/teams](/api/v1/teams) - List, Retrieve, Create, Update teams * [/api/v1/user](/api/v1/user) - Return authenticated user profile info * [/api/v1/users](/api/v1/users) - List, Retrieve user data ## Status Codes * **200** - Successful [`GET`, `PATCH`, `PUT`] * **201** - Resource successfully created [`POST`] * **204** - Resouce successfully deleted [`DELETE`] * **403** - Permission denied to resource * **404** - Resource was not found ## Authentication KoBo JSON API enpoints support both Basic authentication and API Token Authentication through the `Authorization` header. ### Basic Authentication Example using curl: curl -X GET https://example.com/api/v1/ -u username:password ### Token Authentication Example using curl: curl -X GET https://example.com/api/v1/ -H "Authorization: Token TOKEN_KEY" ### KoBo Tagging API * [Filter form list by tags.]( /api/v1/forms#get-list-of-forms-with-specific-tags) * [List Tags for a specific form.]( /api/v1/forms#get-list-of-tags-for-a-specific-form) * [Tag Forms.](/api/v1/forms#tag-forms) * [Delete a specific tag.](/api/v1/forms#delete-a-specific-tag) * [List form data by tag.]( /api/v1/data#query-submitted-data-of-a-specific-form-using-tags) * [Tag a specific submission](/api/v1/data#tag-a-submission-data-point) ## Using Oauth2 with the KoBo API You can learn more about oauth2 [here]( http://tools.ietf.org/html/rfc6749). ### 1. Register your client application with KoBo - [register](\ /o/applications/register/) - `name` - name of your application - `client_type` - Client Type: select confidential - `authorization_grant_type` - Authorization grant type: Authorization code - `redirect_uri` - Redirect urls: redirection endpoint Keep note of the `client_id` and the `client_secret`, it is required when requesting for an `access_token`. ### 2. Authorize client application. The authorization url is of the form: <pre class="prettyprint"> <b>GET</b> /o/authorize?client_id=XXXXXX&response_type=code&state=abc</pre> example: http://localhost:8000/o/authorize?client_id=e8&response_type=code&state=xyz Note: Providing the url to any user will prompt for a password and request for read and write permission for the application whose `client_id` is specified. Where: - `client_id` - is the client application id - ensure its urlencoded - `response_type` - should be code - `state` - a random state string that you client application will get when redirection happens What happens: 1. a login page is presented, the username used to login determines the account that provides access. 2. redirection to the client application occurs, the url is of the form: > REDIRECT_URI/?state=abc&code=YYYYYYYYY example redirect uri http://localhost:30000/?state=xyz&code=SWWk2PN6NdCwfpqiDiPRcLmvkw2uWd - `code` - is the code to use to request for `access_token` - `state` - same state string used during authorization request Your client application should use the `code` to request for an access_token. ### 3. Request for access token. You need to make a `POST` request with `grant_type`, `code`, `client_id` and `redirect_uri` as `POST` payload params. You should authenticate the request with `Basic Authentication` using your `client_id` and `client_secret` as `username:password` pair. Request: <pre class="prettyprint"> <b>POST</b>/o/token</pre> Payload: grant_type=authorization_code&code=YYYYYYYYY&client_id=XXXXXX& redirect_uri=http://redirect/uri/path curl example: curl -X POST -d "grant_type=authorization_code& code=PSwrMilnJESZVFfFsyEmEukNv0sGZ8& client_id=e8x4zzJJIyOikDqjPcsCJrmnU22QbpfHQo4HhRnv& redirect_uri=http://localhost:30000" "http://localhost:8000/o/token/" --user "e8:xo7i4LNpMj" Response: { "access_token": "<KEY>", "token_type": "Bearer", "expires_in": 36000, "refresh_token": "<PASSWORD>", "scope": "read write groups" } Where: - `access_token` - access token - expires - `refresh_token` - token to use to request a new `access_token` in case it has expored. Now that you have an `access_token` you can make API calls. ### 4. Accessing the KoBo API using the `access_token`. Example using curl: curl -X GET https://example.com/api/v1 -H "Authorization: Bearer ACCESS_TOKEN" # lookup = lookups[0] This class only extends MultiLookupRouter to allow PATCH method on list endpoint
| 1.656102
| 2
|
ean13-generator.py
|
maxmumford/random-ean13-generator
| 13
|
6629420
|
<filename>ean13-generator.py
#! /usr/bin/python
"""
This script generates a random EAN13 number and prints it to the standard out.
"""
from random import randrange
def generate_12_random_numbers():
numbers = []
for x in range(12):
numbers.append(randrange(10))
return numbers
def calculate_checksum(ean):
"""
Calculates the checksum for an EAN13
@param list ean: List of 12 numbers for first part of EAN13
:returns: The checksum for `ean`.
:rtype: Integer
"""
assert len(ean) == 12, "EAN must be a list of 12 numbers"
sum_ = lambda x, y: int(x) + int(y)
evensum = reduce(sum_, ean[::2])
oddsum = reduce(sum_, ean[1::2])
return (10 - ((evensum + oddsum * 3) % 10)) % 10
numbers = generate_12_random_numbers()
numbers.append(calculate_checksum(numbers))
print ''.join(map(str, numbers))
|
<filename>ean13-generator.py
#! /usr/bin/python
"""
This script generates a random EAN13 number and prints it to the standard out.
"""
from random import randrange
def generate_12_random_numbers():
numbers = []
for x in range(12):
numbers.append(randrange(10))
return numbers
def calculate_checksum(ean):
"""
Calculates the checksum for an EAN13
@param list ean: List of 12 numbers for first part of EAN13
:returns: The checksum for `ean`.
:rtype: Integer
"""
assert len(ean) == 12, "EAN must be a list of 12 numbers"
sum_ = lambda x, y: int(x) + int(y)
evensum = reduce(sum_, ean[::2])
oddsum = reduce(sum_, ean[1::2])
return (10 - ((evensum + oddsum * 3) % 10)) % 10
numbers = generate_12_random_numbers()
numbers.append(calculate_checksum(numbers))
print ''.join(map(str, numbers))
|
en
| 0.628551
|
#! /usr/bin/python This script generates a random EAN13 number and prints it to the standard out. Calculates the checksum for an EAN13 @param list ean: List of 12 numbers for first part of EAN13 :returns: The checksum for `ean`. :rtype: Integer
| 3.893247
| 4
|
astropop/framedata/tests/test_memmap.py
|
FCeoni/astropop
| 1
|
6629421
|
<reponame>FCeoni/astropop<filename>astropop/framedata/tests/test_memmap.py<gh_stars>1-10
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import mmap
import pytest
import pytest_check as check
from astropop.framedata import MemMapArray, create_array_memmap, \
delete_array_memmap, EmptyDataError
from astropy import units as u
import numpy as np
import numpy.testing as npt
def test_create_and_delete_memmap(tmpdir):
# Creation
f = os.path.join(tmpdir, 'testarray.npy')
g = os.path.join(tmpdir, 'test2array.npy')
a = np.ones((30, 30), dtype='f8')
b = create_array_memmap(f, a)
c = create_array_memmap(g, a, dtype=bool)
check.is_instance(b, np.memmap)
check.is_instance(c, np.memmap)
npt.assert_array_equal(a, b)
npt.assert_allclose(a, c)
check.is_true(os.path.exists(f))
check.is_true(os.path.exists(g))
# Deletion
# Since for the uses the object is overwritten, we do it here too
d = delete_array_memmap(b, read=True, remove=False)
e = delete_array_memmap(c, read=True, remove=False)
check.is_not_instance(d, np.memmap)
check.is_not_instance(e, np.memmap)
check.is_instance(d, np.ndarray)
check.is_instance(e, np.ndarray)
npt.assert_array_equal(a, d)
npt.assert_allclose(a, e)
check.is_true(os.path.exists(f))
check.is_true(os.path.exists(g))
d = delete_array_memmap(b, read=False, remove=True)
e = delete_array_memmap(c, read=False, remove=True)
check.is_true(d is None)
check.is_true(e is None)
check.is_false(os.path.exists(f))
check.is_false(os.path.exists(g))
# None should not raise errors
create_array_memmap('dummy', None)
delete_array_memmap(None)
@pytest.mark.parametrize('memmap', [True, False])
def test_create_empty_memmap(tmpdir, memmap):
f = os.path.join(tmpdir, 'empty.npy')
a = MemMapArray(None, filename=f, dtype=None, unit=None, memmap=memmap)
check.equal(a.filename, f)
check.is_true(a._contained is None)
check.equal(a.memmap, memmap)
check.is_true(a.empty)
check.is_false(os.path.exists(f))
check.is_true(a.unit is u.dimensionless_unscaled)
with pytest.raises(EmptyDataError):
# dtype whould rise
a.dtype
with pytest.raises(EmptyDataError):
# shape whould rise
a.shape
with pytest.raises(EmptyDataError):
# item whould rise
a[0]
with pytest.raises(EmptyDataError):
# set item whould rise
a[0] = 1
@pytest.mark.parametrize('memmap', [True, False])
def test_create_memmap(tmpdir, memmap):
f = os.path.join(tmpdir, 'npn_empty.npy')
arr = [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]
a = MemMapArray(arr, filename=f, dtype=None, unit=None, memmap=memmap)
check.equal(a.filename, f)
npt.assert_array_equal(a, arr)
check.is_false(a.empty)
check.equal(a.memmap, memmap)
check.equal(os.path.exists(f), memmap)
check.equal(a.unit, u.dimensionless_unscaled)
check.equal(a.dtype, np.int64)
a[0][0] = 10
check.equal(a[0][0], 10)
a[0][:] = 20
npt.assert_array_equal(a[0], [20, 20, 20, 20, 20, 20])
def test_enable_disable_memmap(tmpdir):
f = os.path.join(tmpdir, 'npn_empty.npy')
arr = [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]
a = MemMapArray(arr, filename=f, dtype=None, unit=None, memmap=False)
check.is_false(a.memmap)
check.is_false(os.path.exists(f))
a.enable_memmap()
check.is_true(a.memmap)
check.is_true(os.path.exists(f))
check.is_instance(a._contained, np.memmap)
# First keep the file
a.disable_memmap(remove=False)
check.is_false(a.memmap)
check.is_true(os.path.exists(f))
check.is_not_instance(a._contained, np.memmap)
a.enable_memmap()
check.is_true(a.memmap)
check.is_true(os.path.exists(f))
check.is_instance(a._contained, np.memmap)
# Remove the file
a.disable_memmap(remove=True)
check.is_false(a.memmap)
check.is_false(os.path.exists(f))
check.is_not_instance(a._contained, np.memmap)
with pytest.raises(ValueError):
# raises error if name is locked
a.enable_memmap('not_the_same_name.npy')
def test_units_on_creation(tmpdir):
m = MemMapArray(None, os.path.join(tmpdir, 'unit.npy'), unit='adu')
check.is_true(m.unit is u.adu)
def test_unit_assign(tmpdir):
m = MemMapArray(None, os.path.join(tmpdir, 'unit.npy'), unit=None)
m.set_unit('adu')
check.is_true(m.unit is u.adu)
with pytest.raises(AttributeError):
# no direct assignment
m.unit = 'adu'
def test_invalid_unit(tmpdir):
with pytest.raises(ValueError):
m = MemMapArray(None, unit='Invalid')
m = MemMapArray(None)
with pytest.raises(ValueError):
m.set_unit('invalid')
with pytest.raises(AttributeError):
m.unit = 'invalid'
def test_unit_change(tmpdir):
m = MemMapArray(None, unit='m')
check.is_true(m.unit is u.m)
m.set_unit('adu')
check.is_true(m.unit is u.adu)
def test_reset_data(tmpdir):
d1 = np.array([[1, 2], [3, 4]]).astype('float32')
m = MemMapArray(d1, os.path.join(tmpdir, 'reset.npy'),
dtype='float64', unit='adu', memmap=True)
check.is_true(np.issubdtype(m.dtype, np.float64))
npt.assert_array_equal(m, d1)
check.is_false(m.empty)
check.is_true(m.memmap)
m.reset_data(d1)
check.is_true(np.issubdtype(m.dtype, np.float32))
npt.assert_array_equal(m, d1)
check.is_false(m.empty)
check.is_true(m.memmap)
m.reset_data(d1.astype('int16'))
check.is_true(np.issubdtype(m.dtype, np.int16))
npt.assert_array_equal(m, d1)
check.is_false(m.empty)
check.is_true(m.memmap)
m.reset_data(None)
check.is_true(m.empty)
check.is_true(m._contained is None)
check.is_true(m.memmap)
m.disable_memmap()
m.reset_data(np.ones((10, 10)), unit='m', dtype='float32')
check.is_true(np.issubdtype(m.dtype, np.float32))
npt.assert_array_equal(m, np.ones((10, 10)))
check.is_false(m.empty)
check.is_false(m.memmap)
# TODO: flush
# TODO: repr
###############################################################################
# For math tests we supose that numpy's math is correct
###############################################################################
parametrize_matrice = pytest.mark.parametrize('memmap, value, other',
[(True, 3, 2), (False, 3, 2),
(True, 1.5, 3.5),
(False, 1.5, 3.5),
(True, 4, 0), (False, 4, 0),
(True, 0, 4), (False, 0, 4),
(True, 1.5, -2),
(False, 1.5, -2),
(True, -10, 3.5),
(False, -10, 3.5),
(True, 10, 3.5),
(False, 10, 3.5),
(True, 1, np.nan),
(False, 1, np.nan),
(True, 1, np.inf),
(False, 1, np.inf)])
@parametrize_matrice
def test_memmap_lt(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'lt.npy')
arr = np.arange(0, 10, 1)
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr < other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a < other
else:
ap_v = a < other
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_memmap_le(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'le.npy')
arr = np.arange(0, 10, 1)
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr <= other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a <= other
else:
ap_v = a <= other
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_memmap_gt(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'gt.npy')
arr = np.arange(0, 10, 1)
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr > other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a > other
else:
ap_v = a > other
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_memmap_ge(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'ge.npy')
arr = np.arange(0, 10, 1)
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr >= other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a >= other
else:
ap_v = a >= other
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_memmap_eq(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'eq.npy')
arr = np.arange(0, 10, 1)
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr == other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a == other
else:
ap_v = a == other
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_memmap_ne(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'ne.npy')
arr = np.arange(0, 10, 1)
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr != other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a != other
else:
ap_v = a != other
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_math_add(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'add.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr+other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a+other
else:
ap_v = a+other
npt.assert_array_equal(ap_v, np_v)
try:
arr += other
except Exception as e:
with pytest.raises(e.__class__):
a += other
else:
a += other
npt.assert_array_equal(a, arr)
@parametrize_matrice
def test_math_sub(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'sun.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr-other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a-other
else:
ap_v = a-other
npt.assert_array_equal(ap_v, np_v)
try:
arr -= other
except Exception as e:
with pytest.raises(e.__class__):
a -= other
else:
a -= other
npt.assert_array_equal(a, arr)
@parametrize_matrice
def test_math_pow(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'pow.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr**other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a**other
else:
ap_v = a**other
npt.assert_array_equal(ap_v, np_v)
try:
arr **= other
except Exception as e:
with pytest.raises(e.__class__):
a **= other
else:
a **= other
npt.assert_array_equal(a, arr)
@parametrize_matrice
def test_math_truediv(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'div.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr/other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a/other
else:
ap_v = a/other
npt.assert_array_equal(ap_v, np_v)
try:
arr /= other
except Exception as e:
with pytest.raises(e.__class__):
a /= other
else:
a /= other
npt.assert_array_equal(a, arr)
@parametrize_matrice
def test_math_floordiv(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'floordiv.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr//other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a//other
else:
ap_v = a//other
npt.assert_array_equal(ap_v, np_v)
try:
arr //= other
except Exception as e:
with pytest.raises(e.__class__):
a //= other
else:
a //= other
npt.assert_array_equal(a, arr)
@parametrize_matrice
def test_math_mul(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'mul.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr*other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a*other
else:
ap_v = a*other
npt.assert_array_equal(ap_v, np_v)
try:
arr *= other
except Exception as e:
with pytest.raises(e.__class__):
a *= other
else:
a *= other
npt.assert_array_equal(a, arr)
@parametrize_matrice
def test_math_mod(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'mod.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr % other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a % other
else:
ap_v = a % other
npt.assert_array_equal(ap_v, np_v)
try:
arr %= other
except Exception as e:
with pytest.raises(e.__class__):
a %= other
else:
a %= other
npt.assert_array_equal(a, arr)
@parametrize_matrice
def test_math_lshift(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'lshift.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr << other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a << other
else:
ap_v = a << other
npt.assert_array_equal(ap_v, np_v)
try:
arr <<= other
except Exception as e:
with pytest.raises(e.__class__):
a <<= other
else:
a <<= other
npt.assert_array_equal(a, arr)
# FIXME: why do this simply don't work like in numpy array???
# npt.assert_array_equal(other<<arr, other<<a)
@parametrize_matrice
def test_math_rshift(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'rshift.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr >> other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a >> other
else:
ap_v = a >> other
npt.assert_array_equal(ap_v, np_v)
try:
arr >>= other
except Exception as e:
with pytest.raises(e.__class__):
a >>= other
else:
a >>= other
npt.assert_array_equal(a, arr)
# FIXME: why do this simply don't work like in numpy array???
# npt.assert_array_equal(other>>arr, other>>a)
@parametrize_matrice
def test_math_and(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'and.npy')
arr = np.array([[0, 1, 0, 1], [1, 0, 1, 0]]) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr & other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a & other
else:
ap_v = a & other
npt.assert_array_equal(ap_v, np_v)
try:
arr &= other
except Exception as e:
with pytest.raises(e.__class__):
a &= other
else:
a &= other
npt.assert_array_equal(a, arr)
# FIXME: why do this simply don't work like in numpy array???
# npt.assert_array_equal(other & arr, other & a)
@parametrize_matrice
def test_math_or(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'xor.npy')
arr = np.array([[0, 1, 0, 1], [1, 0, 1, 0]]) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr | other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a | other
else:
ap_v = a | other
npt.assert_array_equal(ap_v, np_v)
try:
arr |= other
except Exception as e:
with pytest.raises(e.__class__):
a |= other
else:
a |= other
npt.assert_array_equal(a, arr)
# FIXME: why do this simply don't work like in numpy array???
# npt.assert_array_equal(other | arr, other | a)
@parametrize_matrice
def test_math_xor(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'xor.npy')
arr = np.array([[0, 1, 0, 1], [1, 0, 1, 0]]) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr ^ other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a ^ other
else:
ap_v = a ^ other
npt.assert_array_equal(ap_v, np_v)
try:
arr ^= other
except Exception as e:
with pytest.raises(e.__class__):
a ^= other
else:
a ^= other
npt.assert_array_equal(a, arr)
# FIXME: why do this simply don't work like in numpy array???
# npt.assert_array_equal(other | arr, other | a)
@parametrize_matrice
def test_math_neg(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'neg.npy')
arr = np.array([[0, 1, 0, -1], [1, 0, -1, 0]]) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = -arr
except Exception as e:
with pytest.raises(e.__class__):
ap_v = -a
else:
ap_v = -a
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_math_pos(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'pos.npy')
arr = np.array([[0, 1, 0, -1], [1, 0, -1, 0]]) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = +arr
except Exception as e:
with pytest.raises(e.__class__):
ap_v = +a
else:
ap_v = +a
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_math_abs(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'abs.npy')
arr = np.array([[0, 1, 0, -1], [1, 0, -1, 0]]) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr.__abs__()
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a.__abs__()
else:
ap_v = a.__abs__()
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_math_invert(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'invert.npy')
arr = np.array([[0, 1, 0, -1], [1, 0, -1, 0]]) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = ~arr
except Exception as e:
with pytest.raises(e.__class__):
ap_v = ~a
else:
ap_v = ~a
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_math_matmul(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'invert.npy')
arr = np.array([[0, 1, 0, -1], [1, 0, -1, 0]]) * value
other = np.random.randn(4, 2)
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr@other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a@other
else:
ap_v = a@other
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_math_bool_all_any(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'bool.npy')
arr = np.array([[0, 1, 0, 1], [1, 0, 1, 0]])
a = MemMapArray(arr, filename=f, memmap=memmap)
with pytest.raises(ValueError):
bool(a)
check.is_false(a.all())
check.is_true(a.any())
arr = np.array([0, 0, 0])
a = MemMapArray(arr, filename=f+'1', memmap=memmap)
check.is_false(a.all())
check.is_false(a.any())
arr = np.array([1, 1, 1])
a = MemMapArray(arr, filename=f+'2', memmap=memmap)
check.is_true(a.all())
check.is_true(a.any())
@parametrize_matrice
def test_math_float(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'float.npy')
arr = np.arange(10, dtype='int8')*value
a = MemMapArray(arr, filename=f, memmap=memmap)
with pytest.raises(TypeError):
float(a)
a = MemMapArray([value], filename=f, memmap=memmap)
check.equal(float(value), float(a))
@parametrize_matrice
def test_math_int(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'int.npy')
arr = np.arange(10, dtype='int8')*value
a = MemMapArray(arr, filename=f, memmap=memmap)
with pytest.raises(TypeError):
int(a)
a = MemMapArray([value], filename=f, memmap=memmap)
check.equal(int(value), int(a))
@parametrize_matrice
def test_math_complex(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'complex.npy')
arr = np.arange(10, dtype='int8')*value
a = MemMapArray(arr, filename=f, memmap=memmap)
with pytest.raises(TypeError):
complex(a)
a = MemMapArray([value], filename=f, memmap=memmap)
try:
complex(np.array([value]), other)
except Exception as e:
with pytest.raises(e.__class__):
complex(a, other)
else:
try:
check.equal(complex(value), complex(arr))
check.equal(complex(value, other), complex(arr, other))
except Exception as e:
with pytest.raises(e.__class__):
check.equal(complex(value), complex(arr))
check.equal(complex(value, other), complex(arr, other))
else:
check.equal(complex(value), complex(arr))
check.equal(complex(value, other), complex(arr, other))
@pytest.mark.parametrize('memmap', [True, False])
def test_math_len(tmpdir, memmap):
f = os.path.join(tmpdir, 'len.npy')
for i in [np.arange(10), np.array([1]), np.zeros((10, 10)),
np.zeros((10, 10, 10)), np.array(None)]:
arr = i
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = len(arr)
except Exception as e:
with pytest.raises(e.__class__):
ap_v = len(a)
else:
ap_v = len(a)
check.equal(np_v, ap_v)
@pytest.mark.parametrize('memmap', [True, False])
def test_math_redirects(tmpdir, memmap):
f = os.path.join(tmpdir, 'redirects.npy')
def check_arr(arr, a):
arr_flags = arr.flags
a_flags = a.flags
for i in ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'WRITEABLE', 'ALIGNED',
'WRITEBACKIFCOPY', 'UPDATEIFCOPY', 'FNC', 'FORC',
'BEHAVED', 'CARRAY', 'FARRAY']:
check.equal(arr_flags[i], a_flags[i])
for i in ['OWNDATA']:
if memmap:
check.not_equal(arr_flags[i], a_flags[i])
else:
check.equal(arr_flags[i], a_flags[i])
if memmap:
check.is_instance(a.base, mmap.mmap)
else:
check.is_true(a.base is None)
check.equal(arr.shape, a.shape)
check.equal(arr.strides, a.strides)
check.equal(arr.ndim, a.ndim)
check.equal(arr.data, a.data)
check.equal(arr.size, a.size)
check.equal(arr.itemsize, a.itemsize)
check.equal(arr.nbytes, a.nbytes)
check.equal(arr.dtype, a.dtype)
check.is_instance(a.tolist(), list)
check.equal(arr.tolist(), a.tolist())
check.is_instance(a.tostring(), bytes)
check.equal(arr.tostring(), a.tostring())
check.is_instance(a.tobytes(), bytes)
check.equal(arr.tobytes(), a.tobytes())
check.is_instance(a.dumps(), bytes)
# FIXME: check.equal(arr.dumps(), a.dumps())
npt.assert_array_equal(arr.T, a.T)
npt.assert_array_equal(arr.transpose(), a.transpose())
npt.assert_array_equal(arr.flatten(), a.flatten())
npt.assert_array_equal(arr.ravel(), a.ravel())
npt.assert_array_equal(arr.squeeze(), a.squeeze())
npt.assert_array_equal(arr.argsort(), a.argsort())
npt.assert_array_equal(arr.argpartition(1), a.argpartition(1))
npt.assert_array_equal(arr.nonzero(), a.nonzero())
check.equal(arr.max(), a.max())
check.equal(arr.argmax(), a.argmax())
check.equal(arr.min(), a.min())
check.equal(arr.argmin(), a.argmin())
npt.assert_array_equal(arr.max(axis=0), a.max(axis=0))
npt.assert_array_equal(arr.min(axis=0), a.min(axis=0))
npt.assert_array_equal(arr.argmax(axis=0), a.argmax(axis=0))
npt.assert_array_equal(arr.argmin(axis=0), a.argmin(axis=0))
npt.assert_array_equal(arr.real, a.real)
npt.assert_array_equal(arr.imag, a.imag)
npt.assert_array_equal(arr.round(), a.round())
check.equal(arr.sum(), a.sum())
npt.assert_array_equal(arr.sum(axis=0), a.sum(axis=0))
npt.assert_array_equal(arr.cumsum(), a.cumsum())
npt.assert_array_equal(arr.cumsum(axis=0), a.cumsum(axis=0))
check.equal(arr.mean(), a.mean())
npt.assert_array_equal(arr.mean(axis=0), a.mean(axis=0))
check.equal(arr.var(), a.var())
npt.assert_array_equal(arr.var(axis=0), a.var(axis=0))
check.equal(arr.std(), a.std())
npt.assert_array_equal(arr.std(axis=0), a.std(axis=0))
check.equal(arr.prod(), a.prod())
npt.assert_array_equal(arr.prod(axis=0), a.prod(axis=0))
npt.assert_array_equal(arr.cumprod(), a.cumprod())
npt.assert_array_equal(arr.cumprod(axis=0), a.cumprod(axis=0))
for i, j in zip(arr.flat, a.flat):
check.equal(i, j)
for i in range(9):
check.equal(arr.item(i), a.item(i))
npt.assert_array_equal(arr.astype(bool), a.astype(bool))
npt.assert_array_equal(arr.astype(int), a.astype(int))
check.equal(arr.all(), a.all())
check.equal(arr.any(), a.any())
# FIXME: check.equal(arr.ctypes, a.ctypes)
# TODO: itemset
# TODO: tofile
# TODO: dump
# TODO: byteswap
# TODO: copy
# TODO: view
# TODO: setflags
# TODO: getfield
# TODO: reshape
# TODO: resize
# TODO: take
# TODO: put
# TODO: repeat
# TODO: sort
# TODO: choose
# TODO: partition
# TODO: searchsorted
# TODO: compress
# TODO: ptp
# TODO: conj
# TODO: swapaxes
# TODO: diagonal
# TODO: trace
x = np.random.randint(9, size=(3, 3)).astype(np.float)
y = MemMapArray(x, filename=f, memmap=memmap)
check_arr(x, y)
x = np.zeros((5, 5)).astype(np.float)
y = MemMapArray(x, filename=f, memmap=memmap)
check_arr(x, y)
x = np.ones((5, 5)).astype(np.float)
y = MemMapArray(x, filename=f, memmap=memmap)
check_arr(x, y)
x = np.arange(20).astype(np.float)
y = MemMapArray(x, filename=f, memmap=memmap)
check_arr(x, y)
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import mmap
import pytest
import pytest_check as check
from astropop.framedata import MemMapArray, create_array_memmap, \
delete_array_memmap, EmptyDataError
from astropy import units as u
import numpy as np
import numpy.testing as npt
def test_create_and_delete_memmap(tmpdir):
# Creation
f = os.path.join(tmpdir, 'testarray.npy')
g = os.path.join(tmpdir, 'test2array.npy')
a = np.ones((30, 30), dtype='f8')
b = create_array_memmap(f, a)
c = create_array_memmap(g, a, dtype=bool)
check.is_instance(b, np.memmap)
check.is_instance(c, np.memmap)
npt.assert_array_equal(a, b)
npt.assert_allclose(a, c)
check.is_true(os.path.exists(f))
check.is_true(os.path.exists(g))
# Deletion
# Since for the uses the object is overwritten, we do it here too
d = delete_array_memmap(b, read=True, remove=False)
e = delete_array_memmap(c, read=True, remove=False)
check.is_not_instance(d, np.memmap)
check.is_not_instance(e, np.memmap)
check.is_instance(d, np.ndarray)
check.is_instance(e, np.ndarray)
npt.assert_array_equal(a, d)
npt.assert_allclose(a, e)
check.is_true(os.path.exists(f))
check.is_true(os.path.exists(g))
d = delete_array_memmap(b, read=False, remove=True)
e = delete_array_memmap(c, read=False, remove=True)
check.is_true(d is None)
check.is_true(e is None)
check.is_false(os.path.exists(f))
check.is_false(os.path.exists(g))
# None should not raise errors
create_array_memmap('dummy', None)
delete_array_memmap(None)
@pytest.mark.parametrize('memmap', [True, False])
def test_create_empty_memmap(tmpdir, memmap):
f = os.path.join(tmpdir, 'empty.npy')
a = MemMapArray(None, filename=f, dtype=None, unit=None, memmap=memmap)
check.equal(a.filename, f)
check.is_true(a._contained is None)
check.equal(a.memmap, memmap)
check.is_true(a.empty)
check.is_false(os.path.exists(f))
check.is_true(a.unit is u.dimensionless_unscaled)
with pytest.raises(EmptyDataError):
# dtype whould rise
a.dtype
with pytest.raises(EmptyDataError):
# shape whould rise
a.shape
with pytest.raises(EmptyDataError):
# item whould rise
a[0]
with pytest.raises(EmptyDataError):
# set item whould rise
a[0] = 1
@pytest.mark.parametrize('memmap', [True, False])
def test_create_memmap(tmpdir, memmap):
f = os.path.join(tmpdir, 'npn_empty.npy')
arr = [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]
a = MemMapArray(arr, filename=f, dtype=None, unit=None, memmap=memmap)
check.equal(a.filename, f)
npt.assert_array_equal(a, arr)
check.is_false(a.empty)
check.equal(a.memmap, memmap)
check.equal(os.path.exists(f), memmap)
check.equal(a.unit, u.dimensionless_unscaled)
check.equal(a.dtype, np.int64)
a[0][0] = 10
check.equal(a[0][0], 10)
a[0][:] = 20
npt.assert_array_equal(a[0], [20, 20, 20, 20, 20, 20])
def test_enable_disable_memmap(tmpdir):
f = os.path.join(tmpdir, 'npn_empty.npy')
arr = [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]
a = MemMapArray(arr, filename=f, dtype=None, unit=None, memmap=False)
check.is_false(a.memmap)
check.is_false(os.path.exists(f))
a.enable_memmap()
check.is_true(a.memmap)
check.is_true(os.path.exists(f))
check.is_instance(a._contained, np.memmap)
# First keep the file
a.disable_memmap(remove=False)
check.is_false(a.memmap)
check.is_true(os.path.exists(f))
check.is_not_instance(a._contained, np.memmap)
a.enable_memmap()
check.is_true(a.memmap)
check.is_true(os.path.exists(f))
check.is_instance(a._contained, np.memmap)
# Remove the file
a.disable_memmap(remove=True)
check.is_false(a.memmap)
check.is_false(os.path.exists(f))
check.is_not_instance(a._contained, np.memmap)
with pytest.raises(ValueError):
# raises error if name is locked
a.enable_memmap('not_the_same_name.npy')
def test_units_on_creation(tmpdir):
m = MemMapArray(None, os.path.join(tmpdir, 'unit.npy'), unit='adu')
check.is_true(m.unit is u.adu)
def test_unit_assign(tmpdir):
m = MemMapArray(None, os.path.join(tmpdir, 'unit.npy'), unit=None)
m.set_unit('adu')
check.is_true(m.unit is u.adu)
with pytest.raises(AttributeError):
# no direct assignment
m.unit = 'adu'
def test_invalid_unit(tmpdir):
with pytest.raises(ValueError):
m = MemMapArray(None, unit='Invalid')
m = MemMapArray(None)
with pytest.raises(ValueError):
m.set_unit('invalid')
with pytest.raises(AttributeError):
m.unit = 'invalid'
def test_unit_change(tmpdir):
m = MemMapArray(None, unit='m')
check.is_true(m.unit is u.m)
m.set_unit('adu')
check.is_true(m.unit is u.adu)
def test_reset_data(tmpdir):
d1 = np.array([[1, 2], [3, 4]]).astype('float32')
m = MemMapArray(d1, os.path.join(tmpdir, 'reset.npy'),
dtype='float64', unit='adu', memmap=True)
check.is_true(np.issubdtype(m.dtype, np.float64))
npt.assert_array_equal(m, d1)
check.is_false(m.empty)
check.is_true(m.memmap)
m.reset_data(d1)
check.is_true(np.issubdtype(m.dtype, np.float32))
npt.assert_array_equal(m, d1)
check.is_false(m.empty)
check.is_true(m.memmap)
m.reset_data(d1.astype('int16'))
check.is_true(np.issubdtype(m.dtype, np.int16))
npt.assert_array_equal(m, d1)
check.is_false(m.empty)
check.is_true(m.memmap)
m.reset_data(None)
check.is_true(m.empty)
check.is_true(m._contained is None)
check.is_true(m.memmap)
m.disable_memmap()
m.reset_data(np.ones((10, 10)), unit='m', dtype='float32')
check.is_true(np.issubdtype(m.dtype, np.float32))
npt.assert_array_equal(m, np.ones((10, 10)))
check.is_false(m.empty)
check.is_false(m.memmap)
# TODO: flush
# TODO: repr
###############################################################################
# For math tests we supose that numpy's math is correct
###############################################################################
parametrize_matrice = pytest.mark.parametrize('memmap, value, other',
[(True, 3, 2), (False, 3, 2),
(True, 1.5, 3.5),
(False, 1.5, 3.5),
(True, 4, 0), (False, 4, 0),
(True, 0, 4), (False, 0, 4),
(True, 1.5, -2),
(False, 1.5, -2),
(True, -10, 3.5),
(False, -10, 3.5),
(True, 10, 3.5),
(False, 10, 3.5),
(True, 1, np.nan),
(False, 1, np.nan),
(True, 1, np.inf),
(False, 1, np.inf)])
@parametrize_matrice
def test_memmap_lt(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'lt.npy')
arr = np.arange(0, 10, 1)
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr < other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a < other
else:
ap_v = a < other
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_memmap_le(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'le.npy')
arr = np.arange(0, 10, 1)
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr <= other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a <= other
else:
ap_v = a <= other
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_memmap_gt(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'gt.npy')
arr = np.arange(0, 10, 1)
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr > other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a > other
else:
ap_v = a > other
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_memmap_ge(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'ge.npy')
arr = np.arange(0, 10, 1)
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr >= other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a >= other
else:
ap_v = a >= other
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_memmap_eq(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'eq.npy')
arr = np.arange(0, 10, 1)
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr == other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a == other
else:
ap_v = a == other
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_memmap_ne(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'ne.npy')
arr = np.arange(0, 10, 1)
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr != other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a != other
else:
ap_v = a != other
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_math_add(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'add.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr+other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a+other
else:
ap_v = a+other
npt.assert_array_equal(ap_v, np_v)
try:
arr += other
except Exception as e:
with pytest.raises(e.__class__):
a += other
else:
a += other
npt.assert_array_equal(a, arr)
@parametrize_matrice
def test_math_sub(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'sun.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr-other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a-other
else:
ap_v = a-other
npt.assert_array_equal(ap_v, np_v)
try:
arr -= other
except Exception as e:
with pytest.raises(e.__class__):
a -= other
else:
a -= other
npt.assert_array_equal(a, arr)
@parametrize_matrice
def test_math_pow(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'pow.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr**other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a**other
else:
ap_v = a**other
npt.assert_array_equal(ap_v, np_v)
try:
arr **= other
except Exception as e:
with pytest.raises(e.__class__):
a **= other
else:
a **= other
npt.assert_array_equal(a, arr)
@parametrize_matrice
def test_math_truediv(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'div.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr/other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a/other
else:
ap_v = a/other
npt.assert_array_equal(ap_v, np_v)
try:
arr /= other
except Exception as e:
with pytest.raises(e.__class__):
a /= other
else:
a /= other
npt.assert_array_equal(a, arr)
@parametrize_matrice
def test_math_floordiv(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'floordiv.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr//other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a//other
else:
ap_v = a//other
npt.assert_array_equal(ap_v, np_v)
try:
arr //= other
except Exception as e:
with pytest.raises(e.__class__):
a //= other
else:
a //= other
npt.assert_array_equal(a, arr)
@parametrize_matrice
def test_math_mul(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'mul.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr*other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a*other
else:
ap_v = a*other
npt.assert_array_equal(ap_v, np_v)
try:
arr *= other
except Exception as e:
with pytest.raises(e.__class__):
a *= other
else:
a *= other
npt.assert_array_equal(a, arr)
@parametrize_matrice
def test_math_mod(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'mod.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr % other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a % other
else:
ap_v = a % other
npt.assert_array_equal(ap_v, np_v)
try:
arr %= other
except Exception as e:
with pytest.raises(e.__class__):
a %= other
else:
a %= other
npt.assert_array_equal(a, arr)
@parametrize_matrice
def test_math_lshift(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'lshift.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr << other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a << other
else:
ap_v = a << other
npt.assert_array_equal(ap_v, np_v)
try:
arr <<= other
except Exception as e:
with pytest.raises(e.__class__):
a <<= other
else:
a <<= other
npt.assert_array_equal(a, arr)
# FIXME: why do this simply don't work like in numpy array???
# npt.assert_array_equal(other<<arr, other<<a)
@parametrize_matrice
def test_math_rshift(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'rshift.npy')
arr = np.arange(0, 10, 1) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr >> other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a >> other
else:
ap_v = a >> other
npt.assert_array_equal(ap_v, np_v)
try:
arr >>= other
except Exception as e:
with pytest.raises(e.__class__):
a >>= other
else:
a >>= other
npt.assert_array_equal(a, arr)
# FIXME: why do this simply don't work like in numpy array???
# npt.assert_array_equal(other>>arr, other>>a)
@parametrize_matrice
def test_math_and(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'and.npy')
arr = np.array([[0, 1, 0, 1], [1, 0, 1, 0]]) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr & other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a & other
else:
ap_v = a & other
npt.assert_array_equal(ap_v, np_v)
try:
arr &= other
except Exception as e:
with pytest.raises(e.__class__):
a &= other
else:
a &= other
npt.assert_array_equal(a, arr)
# FIXME: why do this simply don't work like in numpy array???
# npt.assert_array_equal(other & arr, other & a)
@parametrize_matrice
def test_math_or(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'xor.npy')
arr = np.array([[0, 1, 0, 1], [1, 0, 1, 0]]) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr | other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a | other
else:
ap_v = a | other
npt.assert_array_equal(ap_v, np_v)
try:
arr |= other
except Exception as e:
with pytest.raises(e.__class__):
a |= other
else:
a |= other
npt.assert_array_equal(a, arr)
# FIXME: why do this simply don't work like in numpy array???
# npt.assert_array_equal(other | arr, other | a)
@parametrize_matrice
def test_math_xor(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'xor.npy')
arr = np.array([[0, 1, 0, 1], [1, 0, 1, 0]]) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr ^ other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a ^ other
else:
ap_v = a ^ other
npt.assert_array_equal(ap_v, np_v)
try:
arr ^= other
except Exception as e:
with pytest.raises(e.__class__):
a ^= other
else:
a ^= other
npt.assert_array_equal(a, arr)
# FIXME: why do this simply don't work like in numpy array???
# npt.assert_array_equal(other | arr, other | a)
@parametrize_matrice
def test_math_neg(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'neg.npy')
arr = np.array([[0, 1, 0, -1], [1, 0, -1, 0]]) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = -arr
except Exception as e:
with pytest.raises(e.__class__):
ap_v = -a
else:
ap_v = -a
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_math_pos(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'pos.npy')
arr = np.array([[0, 1, 0, -1], [1, 0, -1, 0]]) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = +arr
except Exception as e:
with pytest.raises(e.__class__):
ap_v = +a
else:
ap_v = +a
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_math_abs(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'abs.npy')
arr = np.array([[0, 1, 0, -1], [1, 0, -1, 0]]) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr.__abs__()
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a.__abs__()
else:
ap_v = a.__abs__()
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_math_invert(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'invert.npy')
arr = np.array([[0, 1, 0, -1], [1, 0, -1, 0]]) * value
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = ~arr
except Exception as e:
with pytest.raises(e.__class__):
ap_v = ~a
else:
ap_v = ~a
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_math_matmul(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'invert.npy')
arr = np.array([[0, 1, 0, -1], [1, 0, -1, 0]]) * value
other = np.random.randn(4, 2)
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = arr@other
except Exception as e:
with pytest.raises(e.__class__):
ap_v = a@other
else:
ap_v = a@other
npt.assert_array_equal(ap_v, np_v)
@parametrize_matrice
def test_math_bool_all_any(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'bool.npy')
arr = np.array([[0, 1, 0, 1], [1, 0, 1, 0]])
a = MemMapArray(arr, filename=f, memmap=memmap)
with pytest.raises(ValueError):
bool(a)
check.is_false(a.all())
check.is_true(a.any())
arr = np.array([0, 0, 0])
a = MemMapArray(arr, filename=f+'1', memmap=memmap)
check.is_false(a.all())
check.is_false(a.any())
arr = np.array([1, 1, 1])
a = MemMapArray(arr, filename=f+'2', memmap=memmap)
check.is_true(a.all())
check.is_true(a.any())
@parametrize_matrice
def test_math_float(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'float.npy')
arr = np.arange(10, dtype='int8')*value
a = MemMapArray(arr, filename=f, memmap=memmap)
with pytest.raises(TypeError):
float(a)
a = MemMapArray([value], filename=f, memmap=memmap)
check.equal(float(value), float(a))
@parametrize_matrice
def test_math_int(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'int.npy')
arr = np.arange(10, dtype='int8')*value
a = MemMapArray(arr, filename=f, memmap=memmap)
with pytest.raises(TypeError):
int(a)
a = MemMapArray([value], filename=f, memmap=memmap)
check.equal(int(value), int(a))
@parametrize_matrice
def test_math_complex(tmpdir, memmap, value, other):
f = os.path.join(tmpdir, 'complex.npy')
arr = np.arange(10, dtype='int8')*value
a = MemMapArray(arr, filename=f, memmap=memmap)
with pytest.raises(TypeError):
complex(a)
a = MemMapArray([value], filename=f, memmap=memmap)
try:
complex(np.array([value]), other)
except Exception as e:
with pytest.raises(e.__class__):
complex(a, other)
else:
try:
check.equal(complex(value), complex(arr))
check.equal(complex(value, other), complex(arr, other))
except Exception as e:
with pytest.raises(e.__class__):
check.equal(complex(value), complex(arr))
check.equal(complex(value, other), complex(arr, other))
else:
check.equal(complex(value), complex(arr))
check.equal(complex(value, other), complex(arr, other))
@pytest.mark.parametrize('memmap', [True, False])
def test_math_len(tmpdir, memmap):
f = os.path.join(tmpdir, 'len.npy')
for i in [np.arange(10), np.array([1]), np.zeros((10, 10)),
np.zeros((10, 10, 10)), np.array(None)]:
arr = i
a = MemMapArray(arr, filename=f, memmap=memmap)
try:
np_v = len(arr)
except Exception as e:
with pytest.raises(e.__class__):
ap_v = len(a)
else:
ap_v = len(a)
check.equal(np_v, ap_v)
@pytest.mark.parametrize('memmap', [True, False])
def test_math_redirects(tmpdir, memmap):
f = os.path.join(tmpdir, 'redirects.npy')
def check_arr(arr, a):
arr_flags = arr.flags
a_flags = a.flags
for i in ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'WRITEABLE', 'ALIGNED',
'WRITEBACKIFCOPY', 'UPDATEIFCOPY', 'FNC', 'FORC',
'BEHAVED', 'CARRAY', 'FARRAY']:
check.equal(arr_flags[i], a_flags[i])
for i in ['OWNDATA']:
if memmap:
check.not_equal(arr_flags[i], a_flags[i])
else:
check.equal(arr_flags[i], a_flags[i])
if memmap:
check.is_instance(a.base, mmap.mmap)
else:
check.is_true(a.base is None)
check.equal(arr.shape, a.shape)
check.equal(arr.strides, a.strides)
check.equal(arr.ndim, a.ndim)
check.equal(arr.data, a.data)
check.equal(arr.size, a.size)
check.equal(arr.itemsize, a.itemsize)
check.equal(arr.nbytes, a.nbytes)
check.equal(arr.dtype, a.dtype)
check.is_instance(a.tolist(), list)
check.equal(arr.tolist(), a.tolist())
check.is_instance(a.tostring(), bytes)
check.equal(arr.tostring(), a.tostring())
check.is_instance(a.tobytes(), bytes)
check.equal(arr.tobytes(), a.tobytes())
check.is_instance(a.dumps(), bytes)
# FIXME: check.equal(arr.dumps(), a.dumps())
npt.assert_array_equal(arr.T, a.T)
npt.assert_array_equal(arr.transpose(), a.transpose())
npt.assert_array_equal(arr.flatten(), a.flatten())
npt.assert_array_equal(arr.ravel(), a.ravel())
npt.assert_array_equal(arr.squeeze(), a.squeeze())
npt.assert_array_equal(arr.argsort(), a.argsort())
npt.assert_array_equal(arr.argpartition(1), a.argpartition(1))
npt.assert_array_equal(arr.nonzero(), a.nonzero())
check.equal(arr.max(), a.max())
check.equal(arr.argmax(), a.argmax())
check.equal(arr.min(), a.min())
check.equal(arr.argmin(), a.argmin())
npt.assert_array_equal(arr.max(axis=0), a.max(axis=0))
npt.assert_array_equal(arr.min(axis=0), a.min(axis=0))
npt.assert_array_equal(arr.argmax(axis=0), a.argmax(axis=0))
npt.assert_array_equal(arr.argmin(axis=0), a.argmin(axis=0))
npt.assert_array_equal(arr.real, a.real)
npt.assert_array_equal(arr.imag, a.imag)
npt.assert_array_equal(arr.round(), a.round())
check.equal(arr.sum(), a.sum())
npt.assert_array_equal(arr.sum(axis=0), a.sum(axis=0))
npt.assert_array_equal(arr.cumsum(), a.cumsum())
npt.assert_array_equal(arr.cumsum(axis=0), a.cumsum(axis=0))
check.equal(arr.mean(), a.mean())
npt.assert_array_equal(arr.mean(axis=0), a.mean(axis=0))
check.equal(arr.var(), a.var())
npt.assert_array_equal(arr.var(axis=0), a.var(axis=0))
check.equal(arr.std(), a.std())
npt.assert_array_equal(arr.std(axis=0), a.std(axis=0))
check.equal(arr.prod(), a.prod())
npt.assert_array_equal(arr.prod(axis=0), a.prod(axis=0))
npt.assert_array_equal(arr.cumprod(), a.cumprod())
npt.assert_array_equal(arr.cumprod(axis=0), a.cumprod(axis=0))
for i, j in zip(arr.flat, a.flat):
check.equal(i, j)
for i in range(9):
check.equal(arr.item(i), a.item(i))
npt.assert_array_equal(arr.astype(bool), a.astype(bool))
npt.assert_array_equal(arr.astype(int), a.astype(int))
check.equal(arr.all(), a.all())
check.equal(arr.any(), a.any())
# FIXME: check.equal(arr.ctypes, a.ctypes)
# TODO: itemset
# TODO: tofile
# TODO: dump
# TODO: byteswap
# TODO: copy
# TODO: view
# TODO: setflags
# TODO: getfield
# TODO: reshape
# TODO: resize
# TODO: take
# TODO: put
# TODO: repeat
# TODO: sort
# TODO: choose
# TODO: partition
# TODO: searchsorted
# TODO: compress
# TODO: ptp
# TODO: conj
# TODO: swapaxes
# TODO: diagonal
# TODO: trace
x = np.random.randint(9, size=(3, 3)).astype(np.float)
y = MemMapArray(x, filename=f, memmap=memmap)
check_arr(x, y)
x = np.zeros((5, 5)).astype(np.float)
y = MemMapArray(x, filename=f, memmap=memmap)
check_arr(x, y)
x = np.ones((5, 5)).astype(np.float)
y = MemMapArray(x, filename=f, memmap=memmap)
check_arr(x, y)
x = np.arange(20).astype(np.float)
y = MemMapArray(x, filename=f, memmap=memmap)
check_arr(x, y)
|
en
| 0.541637
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst # Creation # Deletion # Since for the uses the object is overwritten, we do it here too # None should not raise errors # dtype whould rise # shape whould rise # item whould rise # set item whould rise # First keep the file # Remove the file # raises error if name is locked # no direct assignment # TODO: flush # TODO: repr ############################################################################### # For math tests we supose that numpy's math is correct ############################################################################### # FIXME: why do this simply don't work like in numpy array??? # npt.assert_array_equal(other<<arr, other<<a) # FIXME: why do this simply don't work like in numpy array??? # npt.assert_array_equal(other>>arr, other>>a) # FIXME: why do this simply don't work like in numpy array??? # npt.assert_array_equal(other & arr, other & a) # FIXME: why do this simply don't work like in numpy array??? # npt.assert_array_equal(other | arr, other | a) # FIXME: why do this simply don't work like in numpy array??? # npt.assert_array_equal(other | arr, other | a) # FIXME: check.equal(arr.dumps(), a.dumps()) # FIXME: check.equal(arr.ctypes, a.ctypes) # TODO: itemset # TODO: tofile # TODO: dump # TODO: byteswap # TODO: copy # TODO: view # TODO: setflags # TODO: getfield # TODO: reshape # TODO: resize # TODO: take # TODO: put # TODO: repeat # TODO: sort # TODO: choose # TODO: partition # TODO: searchsorted # TODO: compress # TODO: ptp # TODO: conj # TODO: swapaxes # TODO: diagonal # TODO: trace
| 1.999437
| 2
|
subs2apkg.py
|
Zutatensuppe/subs2apkg
| 0
|
6629422
|
import argparse
import random
import subprocess
import pysubs2
import genanki
import re
from pathlib import Path
OFFSET_AUDIO_START = -250
OFFSET_AUDIO_END = 250
OFFSET_IMAGE = 0
model = genanki.Model(
1740692504,
"japanese + subs2srs",
fields=[
{"name": "SequenceMarker"},
{"name": "Expression"},
{"name": "Reading"},
{"name": "Meaning"},
{"name": "Audio"},
{"name": "Image"},
],
templates=[
{
"name": "Card 1",
"qfmt": "<div class=jp> {{Expression}} </div>{{Audio}}{{Image}}",
"afmt": """{{FrontSide}}
<hr id=answer>
<div class=jp> {{furigana:Reading}} </div><br>
{{Meaning}}""",
},
],
css=""".card {
font-family: arial;
font-size: 20px;
text-align: center;
color: black;
background-color: white;
}
.jp { font-size: 30px }
.win .jp { font-family: "MS Mincho", "MS 明朝"; }
.mac .jp { font-family: "Hiragino Mincho Pro", "ヒラギノ明朝 Pro"; }
.linux .jp { font-family: "Kochi Mincho", "東風明朝"; }
.mobile .jp { font-family: "Hiragino Mincho ProN"; }""",
)
def clean_text(t):
return re.sub(r"\{\\[^}]+\}|\\N", "", t)
def audio_ref(f: Path):
return f"[sound:{f.name}]"
def image_ref(f: Path):
return f'<img src="{f.name}">'
def ffmpegtime(t):
return pysubs2.time.ms_to_str(t, fractions=True)
def middle(start, end):
return start + ((end - start) / 2)
def create_audio(video_path, audio_path, line, offset):
if not audio_path.exists():
cmd = [
"ffmpeg",
"-n",
"-ss",
ffmpegtime(line.start + offset + OFFSET_AUDIO_START),
"-to",
ffmpegtime(line.end + offset + OFFSET_AUDIO_END),
"-i",
str(video_path),
# no video
"-vn",
# audio settings
"-ar",
"44100",
"-ac",
"2",
"-ab",
"96k",
"-acodec",
"mp3",
str(audio_path),
]
# print(" ".join(cmd))
subprocess.run(cmd, capture_output=True)
return audio_path
def create_image(
video_path, image_path, line, offset, crop
):
if not image_path.exists():
cmd = [
"ffmpeg",
"-n",
"-ss",
ffmpegtime(middle(line.start + offset, line.end + offset) + OFFSET_IMAGE),
"-i",
str(video_path),
"-vframes",
"1",
]
if crop:
cmd.extend(
[
"-filter:v",
f"crop=in_w-{crop[1] + crop[3]}:in_h-{crop[0] + crop[2]}:{crop[3]}:{crop[0]}",
]
)
cmd.append(str(image_path))
# print(" ".join(cmd))
subprocess.run(cmd, capture_output=True)
return image_path
def create_notes(
subs, video_path: Path, tmp_path: Path, styles, offset, crop
):
media_files = []
notes = []
# combine lines that have same start/end
subs2 = []
last = None
for line in subs:
# skip lines where style is not the correct one
if styles and line.style not in styles:
continue
if not last or last.start != line.start or last.end != line.end:
subs2.append(line)
else:
subs2[len(subs2)-1].text += f" {line.text}"
last = line
subs = subs2
for idx, line in enumerate(subs, start=1):
print(f"{line!r}")
audio_path = Path(f"{tmp_path}/{idx}.mp3")
image_path = Path(f"{tmp_path}/{idx}.jpg")
audio_file = create_audio(video_path, audio_path, line, offset)
image_file = create_image(
video_path, image_path, line, offset, crop
)
if not audio_file.exists() or not image_file.exists():
print("skipped")
continue
media_files.extend([audio_file, image_file])
notes.append(
genanki.Note(
model=model,
fields=[
f"{idx}", # SequenceMarker
clean_text(line.text), # Expression
"", # Reading
"", # Meaning
audio_ref(audio_path),
image_ref(image_path),
],
)
)
return notes, media_files
def main(args):
video = args.video
sub = args.sub
styles = args.styles
apkg = args.apkg
name = args.name
offset = args.offset
crop = args.crop
video_path = Path(video)
subs_path = Path(sub or video_path.with_suffix(".ass"))
apkg_path = Path(apkg or video_path.with_suffix(".apkg"))
name = name or str(video_path.with_suffix("").name)
tmp_path = video_path.with_suffix("")
if not tmp_path.is_dir():
tmp_path.mkdir()
subs = pysubs2.load(subs_path)
notes, media_files = create_notes(
subs, video_path, tmp_path, styles, offset, crop
)
deck = genanki.Deck(deck_id=random.randrange(1 << 30, 1 << 31), name=name)
for note in notes:
deck.add_note(note)
apkg = genanki.Package(deck)
apkg.media_files = media_files
apkg.write_to_file(apkg_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--in", dest="video", help="Video file", required=True)
parser.add_argument("-s", "--subs", dest="sub", help="Subtitle file")
parser.add_argument("-o", "--out", dest="apkg", help="Output anki deck file")
parser.add_argument("-n", "--name", dest="name", help="Name of anki deck")
parser.add_argument("--styles", dest="styles", nargs="*", help="Styles of relevant subtitles in ass files", )
parser.add_argument(
"--offset",
dest="offset",
type=int,
default=0,
help="Subtitle time offset in ms",
)
parser.add_argument(
"--crop",
dest="crop",
nargs=4,
type=int,
help="Crop pixels from the images (top right bottom left)",
)
args = parser.parse_args()
main(args)
|
import argparse
import random
import subprocess
import pysubs2
import genanki
import re
from pathlib import Path
OFFSET_AUDIO_START = -250
OFFSET_AUDIO_END = 250
OFFSET_IMAGE = 0
model = genanki.Model(
1740692504,
"japanese + subs2srs",
fields=[
{"name": "SequenceMarker"},
{"name": "Expression"},
{"name": "Reading"},
{"name": "Meaning"},
{"name": "Audio"},
{"name": "Image"},
],
templates=[
{
"name": "Card 1",
"qfmt": "<div class=jp> {{Expression}} </div>{{Audio}}{{Image}}",
"afmt": """{{FrontSide}}
<hr id=answer>
<div class=jp> {{furigana:Reading}} </div><br>
{{Meaning}}""",
},
],
css=""".card {
font-family: arial;
font-size: 20px;
text-align: center;
color: black;
background-color: white;
}
.jp { font-size: 30px }
.win .jp { font-family: "MS Mincho", "MS 明朝"; }
.mac .jp { font-family: "Hiragino Mincho Pro", "ヒラギノ明朝 Pro"; }
.linux .jp { font-family: "Kochi Mincho", "東風明朝"; }
.mobile .jp { font-family: "Hiragino Mincho ProN"; }""",
)
def clean_text(t):
return re.sub(r"\{\\[^}]+\}|\\N", "", t)
def audio_ref(f: Path):
return f"[sound:{f.name}]"
def image_ref(f: Path):
return f'<img src="{f.name}">'
def ffmpegtime(t):
return pysubs2.time.ms_to_str(t, fractions=True)
def middle(start, end):
return start + ((end - start) / 2)
def create_audio(video_path, audio_path, line, offset):
if not audio_path.exists():
cmd = [
"ffmpeg",
"-n",
"-ss",
ffmpegtime(line.start + offset + OFFSET_AUDIO_START),
"-to",
ffmpegtime(line.end + offset + OFFSET_AUDIO_END),
"-i",
str(video_path),
# no video
"-vn",
# audio settings
"-ar",
"44100",
"-ac",
"2",
"-ab",
"96k",
"-acodec",
"mp3",
str(audio_path),
]
# print(" ".join(cmd))
subprocess.run(cmd, capture_output=True)
return audio_path
def create_image(
video_path, image_path, line, offset, crop
):
if not image_path.exists():
cmd = [
"ffmpeg",
"-n",
"-ss",
ffmpegtime(middle(line.start + offset, line.end + offset) + OFFSET_IMAGE),
"-i",
str(video_path),
"-vframes",
"1",
]
if crop:
cmd.extend(
[
"-filter:v",
f"crop=in_w-{crop[1] + crop[3]}:in_h-{crop[0] + crop[2]}:{crop[3]}:{crop[0]}",
]
)
cmd.append(str(image_path))
# print(" ".join(cmd))
subprocess.run(cmd, capture_output=True)
return image_path
def create_notes(
subs, video_path: Path, tmp_path: Path, styles, offset, crop
):
media_files = []
notes = []
# combine lines that have same start/end
subs2 = []
last = None
for line in subs:
# skip lines where style is not the correct one
if styles and line.style not in styles:
continue
if not last or last.start != line.start or last.end != line.end:
subs2.append(line)
else:
subs2[len(subs2)-1].text += f" {line.text}"
last = line
subs = subs2
for idx, line in enumerate(subs, start=1):
print(f"{line!r}")
audio_path = Path(f"{tmp_path}/{idx}.mp3")
image_path = Path(f"{tmp_path}/{idx}.jpg")
audio_file = create_audio(video_path, audio_path, line, offset)
image_file = create_image(
video_path, image_path, line, offset, crop
)
if not audio_file.exists() or not image_file.exists():
print("skipped")
continue
media_files.extend([audio_file, image_file])
notes.append(
genanki.Note(
model=model,
fields=[
f"{idx}", # SequenceMarker
clean_text(line.text), # Expression
"", # Reading
"", # Meaning
audio_ref(audio_path),
image_ref(image_path),
],
)
)
return notes, media_files
def main(args):
video = args.video
sub = args.sub
styles = args.styles
apkg = args.apkg
name = args.name
offset = args.offset
crop = args.crop
video_path = Path(video)
subs_path = Path(sub or video_path.with_suffix(".ass"))
apkg_path = Path(apkg or video_path.with_suffix(".apkg"))
name = name or str(video_path.with_suffix("").name)
tmp_path = video_path.with_suffix("")
if not tmp_path.is_dir():
tmp_path.mkdir()
subs = pysubs2.load(subs_path)
notes, media_files = create_notes(
subs, video_path, tmp_path, styles, offset, crop
)
deck = genanki.Deck(deck_id=random.randrange(1 << 30, 1 << 31), name=name)
for note in notes:
deck.add_note(note)
apkg = genanki.Package(deck)
apkg.media_files = media_files
apkg.write_to_file(apkg_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--in", dest="video", help="Video file", required=True)
parser.add_argument("-s", "--subs", dest="sub", help="Subtitle file")
parser.add_argument("-o", "--out", dest="apkg", help="Output anki deck file")
parser.add_argument("-n", "--name", dest="name", help="Name of anki deck")
parser.add_argument("--styles", dest="styles", nargs="*", help="Styles of relevant subtitles in ass files", )
parser.add_argument(
"--offset",
dest="offset",
type=int,
default=0,
help="Subtitle time offset in ms",
)
parser.add_argument(
"--crop",
dest="crop",
nargs=4,
type=int,
help="Crop pixels from the images (top right bottom left)",
)
args = parser.parse_args()
main(args)
|
en
| 0.253666
|
{{FrontSide}} <hr id=answer> <div class=jp> {{furigana:Reading}} </div><br> {{Meaning}} .card { font-family: arial; font-size: 20px; text-align: center; color: black; background-color: white; } .jp { font-size: 30px } .win .jp { font-family: "MS Mincho", "MS 明朝"; } .mac .jp { font-family: "Hiragino Mincho Pro", "ヒラギノ明朝 Pro"; } .linux .jp { font-family: "Kochi Mincho", "東風明朝"; } .mobile .jp { font-family: "Hiragino Mincho ProN"; } # no video # audio settings # print(" ".join(cmd)) # print(" ".join(cmd)) # combine lines that have same start/end # skip lines where style is not the correct one # SequenceMarker # Expression # Reading # Meaning
| 2.277734
| 2
|
ifpi/Lavagem de carros.py
|
AlexCaprian/Python
| 0
|
6629423
|
<reponame>AlexCaprian/Python<filename>ifpi/Lavagem de carros.py
#A variável 'x' recebe um valor constante 12
x=12
#A variável 'y' recebe um valor constante 12.50
y=12.50
#A variável 'z' recebe um valor do resultado da formula de calculo entre x e y
z=x*y
#imprime 'Eu cobro R$ 12.5 para cada lavagem, então se eu lavar {x} carros irei arrecadar R$ {z}'
print(f'Eu cobro R${y} para cada lavagem, então se eu lavar {x} carros irei arrecadar R$ {z}')
|
de carros.py
#A variável 'x' recebe um valor constante 12
x=12
#A variável 'y' recebe um valor constante 12.50
y=12.50
#A variável 'z' recebe um valor do resultado da formula de calculo entre x e y
z=x*y
#imprime 'Eu cobro R$ 12.5 para cada lavagem, então se eu lavar {x} carros irei arrecadar R$ {z}'
print(f'Eu cobro R${y} para cada lavagem, então se eu lavar {x} carros irei arrecadar R$ {z}')
|
pt
| 0.969327
|
#A variável 'x' recebe um valor constante 12 #A variável 'y' recebe um valor constante 12.50 #A variável 'z' recebe um valor do resultado da formula de calculo entre x e y #imprime 'Eu cobro R$ 12.5 para cada lavagem, então se eu lavar {x} carros irei arrecadar R$ {z}'
| 3.745811
| 4
|
ppjson/ppjson.py
|
jiamo/ppjson
| 1
|
6629424
|
from sly import Lexer, Parser
import sys
from copy import deepcopy
class JsonLexer(Lexer):
tokens = {
LSBRACKET,
RSBRACKET,
LBRACE,
RBRACE,
COLON,
STRING,
SINGLE_STRING,
CONSTANT,
COMMA,
INT,
FLOAT,
LITERRAL_VALUE,
TRUE,
FALSE,
NULL,
}
# WS = r'[ \t\n\r]+'
# todo how to do it
# literals = { '=', '+', '-', '*', '/', '(', ')' }
ignore = ' \t\n\r'
# Tokens
LITERRAL_VALUE = r'[a-zA-Z_][a-zA-Z0-9_]*'
LITERRAL_VALUE['true'] = TRUE
LITERRAL_VALUE['false'] = FALSE
LITERRAL_VALUE['null'] = NULL
LSBRACKET = r'\['
RSBRACKET = r'\]'
LBRACE = r'\{'
RBRACE = r'\}'
COLON = r':'
COMMA = r','
@_(r'"([ !#-\[\]-\U0010ffff]+|\\(["\/\\bfnrt]|u[0-9A-Fa-f]{4}))*"')
def STRING(self, t):
t.value = str(t.value[1:-1])
return t
@_(r'-?(0|[1-9][0-9]*)(\.[0-9]+)?([Ee][+-]?[0-9]+)?')
def FLOAT(self, t):
t.value = float(t.value)
return t
@_(r'-?(0|[1-9][0-9]*)')
def INT(self, t):
t.value = int(t.value)
return t
# @_(r"'([^'\n]|(\\'))*'")
# def STRING(self, t):
# t.value = str(t.value[1:-1])
# return t
@_(r'\n+')
def newline(self, t):
self.lineno += t.value.count('\n')
def error(self, t):
raise Exception(str(t))
class JsonParser(Parser):
debugfile = 'parser.out'
tokens = JsonLexer.tokens
ARRAY = 1
DICT = 2
def __init__(self):
self.names = {}
self.value = None
self.json_type = None
self.json_value = None
@_('value')
def json_text(self, p):
print("json_text")
self.json_value = p.value
print("self.json_value", p.value)
@_('')
def empty(self, p):
print("empty")
@_('object')
def value(self, p):
print("value-object:", p.object)
return p.object
@_('array')
def value(self, p):
print("value-array:", p.array)
return p.array
@_('STRING')
def value(self, p):
print("value-string")
return p.STRING
@_('TRUE')
def value(self, p):
print("LITERRAL_VALUE", p)
return True
@_('FALSE')
def value(self, p):
print("LITERRAL_VALUE", p)
return False
@_('NULL')
def value(self, p):
print("LITERRAL_VALUE", p)
return None
@_('INT')
def value(self, p):
return p.INT
@_('FLOAT')
def value(self, p):
return p.FLOAT
@_('LSBRACKET')
def begin_array(self, p):
print("begin_array")
@_('RSBRACKET')
def end_array(self, p):
print("end_array")
@_('LBRACE')
def begin_object(self, p):
print("begin_object")
@_('RBRACE')
def end_object(self, p):
print("end_object")
@_('begin_object [ member_list ] end_object')
def object(self, p):
# TODO simple the process may be can just return the p.memlist
print("object --- is", p.member_list)
result = {}
if isinstance(p.member_list, list):
for value in p.member_list:
result.update(value)
elif p.member_list is not None:
result = p.member_list
return result
@_('begin_array [ value_list ] end_array')
def array(self, p):
# This is not very good. because the value_list may not be list!
result = []
if isinstance(p.value_list, list):
result = p.value_list
elif p.value_list is not None:
result.append(p.value_list)
return result
@_('member')
def member_list(self, p):
print("member_list-member ---", p.member)
return p.member
@_('member_list COMMA member')
def member_list(self, p):
print("member_list - member")
result = []
if isinstance(p.member_list, list):
p.member_list.append(p.member)
result = p.member_list
else:
result = [p.member_list, p.member]
return result
# very same as member
@_('value')
def value_list(self, p):
print("array-array")
return p.value
@_('value_list COMMA value')
def value_list(self, p):
result = []
if isinstance(p.value_list, list):
p.value_list.append(p.value)
result = p.value_list
else:
result = [p.value_list, p.value]
print("array-list", p.value_list, p.value, 'r is ', result)
return result
@_('COLON')
def name_separator(self, p):
print("name_separator")
@_('STRING name_separator value')
def member(self, p):
print("member, ", type(p.STRING), " ", p.STRING)
return {
p.STRING: p.value
}
def error(self, p):
raise Exception(str(p))
def loads(s):
lexer = JsonLexer()
parser = JsonParser()
tokens = lexer.tokenize(s)
# print(list(tokens))
parser.parse(tokens)
return parser.json_value
if __name__ == '__main__':
lexer = JsonLexer()
parser = JsonParser()
while True:
try:
text = input('ppjson > ')
except EOFError:
break
if text:
tokens = lexer.tokenize(text)
# debug_tokens = list(tokens)
# for tok in debug_tokens:
# print(tok)
# sys.stdout.flush()
parser.parse(tokens)
print("value is {} and the python type is {}".format(
parser.json_value, type(parser.json_value) ))
|
from sly import Lexer, Parser
import sys
from copy import deepcopy
class JsonLexer(Lexer):
tokens = {
LSBRACKET,
RSBRACKET,
LBRACE,
RBRACE,
COLON,
STRING,
SINGLE_STRING,
CONSTANT,
COMMA,
INT,
FLOAT,
LITERRAL_VALUE,
TRUE,
FALSE,
NULL,
}
# WS = r'[ \t\n\r]+'
# todo how to do it
# literals = { '=', '+', '-', '*', '/', '(', ')' }
ignore = ' \t\n\r'
# Tokens
LITERRAL_VALUE = r'[a-zA-Z_][a-zA-Z0-9_]*'
LITERRAL_VALUE['true'] = TRUE
LITERRAL_VALUE['false'] = FALSE
LITERRAL_VALUE['null'] = NULL
LSBRACKET = r'\['
RSBRACKET = r'\]'
LBRACE = r'\{'
RBRACE = r'\}'
COLON = r':'
COMMA = r','
@_(r'"([ !#-\[\]-\U0010ffff]+|\\(["\/\\bfnrt]|u[0-9A-Fa-f]{4}))*"')
def STRING(self, t):
t.value = str(t.value[1:-1])
return t
@_(r'-?(0|[1-9][0-9]*)(\.[0-9]+)?([Ee][+-]?[0-9]+)?')
def FLOAT(self, t):
t.value = float(t.value)
return t
@_(r'-?(0|[1-9][0-9]*)')
def INT(self, t):
t.value = int(t.value)
return t
# @_(r"'([^'\n]|(\\'))*'")
# def STRING(self, t):
# t.value = str(t.value[1:-1])
# return t
@_(r'\n+')
def newline(self, t):
self.lineno += t.value.count('\n')
def error(self, t):
raise Exception(str(t))
class JsonParser(Parser):
debugfile = 'parser.out'
tokens = JsonLexer.tokens
ARRAY = 1
DICT = 2
def __init__(self):
self.names = {}
self.value = None
self.json_type = None
self.json_value = None
@_('value')
def json_text(self, p):
print("json_text")
self.json_value = p.value
print("self.json_value", p.value)
@_('')
def empty(self, p):
print("empty")
@_('object')
def value(self, p):
print("value-object:", p.object)
return p.object
@_('array')
def value(self, p):
print("value-array:", p.array)
return p.array
@_('STRING')
def value(self, p):
print("value-string")
return p.STRING
@_('TRUE')
def value(self, p):
print("LITERRAL_VALUE", p)
return True
@_('FALSE')
def value(self, p):
print("LITERRAL_VALUE", p)
return False
@_('NULL')
def value(self, p):
print("LITERRAL_VALUE", p)
return None
@_('INT')
def value(self, p):
return p.INT
@_('FLOAT')
def value(self, p):
return p.FLOAT
@_('LSBRACKET')
def begin_array(self, p):
print("begin_array")
@_('RSBRACKET')
def end_array(self, p):
print("end_array")
@_('LBRACE')
def begin_object(self, p):
print("begin_object")
@_('RBRACE')
def end_object(self, p):
print("end_object")
@_('begin_object [ member_list ] end_object')
def object(self, p):
# TODO simple the process may be can just return the p.memlist
print("object --- is", p.member_list)
result = {}
if isinstance(p.member_list, list):
for value in p.member_list:
result.update(value)
elif p.member_list is not None:
result = p.member_list
return result
@_('begin_array [ value_list ] end_array')
def array(self, p):
# This is not very good. because the value_list may not be list!
result = []
if isinstance(p.value_list, list):
result = p.value_list
elif p.value_list is not None:
result.append(p.value_list)
return result
@_('member')
def member_list(self, p):
print("member_list-member ---", p.member)
return p.member
@_('member_list COMMA member')
def member_list(self, p):
print("member_list - member")
result = []
if isinstance(p.member_list, list):
p.member_list.append(p.member)
result = p.member_list
else:
result = [p.member_list, p.member]
return result
# very same as member
@_('value')
def value_list(self, p):
print("array-array")
return p.value
@_('value_list COMMA value')
def value_list(self, p):
result = []
if isinstance(p.value_list, list):
p.value_list.append(p.value)
result = p.value_list
else:
result = [p.value_list, p.value]
print("array-list", p.value_list, p.value, 'r is ', result)
return result
@_('COLON')
def name_separator(self, p):
print("name_separator")
@_('STRING name_separator value')
def member(self, p):
print("member, ", type(p.STRING), " ", p.STRING)
return {
p.STRING: p.value
}
def error(self, p):
raise Exception(str(p))
def loads(s):
lexer = JsonLexer()
parser = JsonParser()
tokens = lexer.tokenize(s)
# print(list(tokens))
parser.parse(tokens)
return parser.json_value
if __name__ == '__main__':
lexer = JsonLexer()
parser = JsonParser()
while True:
try:
text = input('ppjson > ')
except EOFError:
break
if text:
tokens = lexer.tokenize(text)
# debug_tokens = list(tokens)
# for tok in debug_tokens:
# print(tok)
# sys.stdout.flush()
parser.parse(tokens)
print("value is {} and the python type is {}".format(
parser.json_value, type(parser.json_value) ))
|
en
| 0.503461
|
# WS = r'[ \t\n\r]+' # todo how to do it # literals = { '=', '+', '-', '*', '/', '(', ')' } # Tokens #-\[\]-\U0010ffff]+|\\(["\/\\bfnrt]|u[0-9A-Fa-f]{4}))*"') # @_(r"'([^'\n]|(\\'))*'") # def STRING(self, t): # t.value = str(t.value[1:-1]) # return t # TODO simple the process may be can just return the p.memlist # This is not very good. because the value_list may not be list! # very same as member # print(list(tokens)) # debug_tokens = list(tokens) # for tok in debug_tokens: # print(tok) # sys.stdout.flush()
| 2.499412
| 2
|
datadog_checks_base/datadog_checks/base/checks/win/winpdh_base.py
|
remicalixte/integrations-core
| 1
|
6629425
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from collections import defaultdict
from typing import Dict, List
import win32wnet
from six import iteritems
from ... import AgentCheck, is_affirmative
from ...utils.containers import hash_mutable
try:
from .winpdh import WinPDHCounter, DATA_TYPE_INT, DATA_TYPE_DOUBLE
except ImportError:
from .winpdh_stub import WinPDHCounter, DATA_TYPE_INT, DATA_TYPE_DOUBLE
RESOURCETYPE_ANY = 0
DEFAULT_SHARE = 'c$'
int_types = ["int", "long", "uint"]
double_types = ["double", "float"]
class PDHBaseCheck(AgentCheck):
"""
PDH based check. check.
Windows only.
"""
def __init__(self, *args, **kwargs): # To support optional agentConfig
# TODO: Change signature to (self, name, init_config, instances, counter_list) once subclasses have been edited
counter_list = kwargs.get('counter_list', args[-1]) # type: List[List[str]]
if 'counter_list' not in kwargs:
args = args[:-1] # Base class does not know how to interpret it
super(PDHBaseCheck, self).__init__(*args, **kwargs)
self._missing_counters = {} # type: Dict[str, tuple]
self._metrics = defaultdict(list) # type: Dict[int, List[List]] # This dictionary only has one key
self._tags = defaultdict(list) # type: Dict[int, List[str]] # This dictionary only has one key
self.refresh_counters = is_affirmative(self.instance.get('refresh_counters', True)) # type: bool
try:
self.instance_hash = hash_mutable(self.instance) # type: int
cfg_tags = self.instance.get('tags') # type: List[str]
if cfg_tags is not None:
if not isinstance(cfg_tags, list):
self.log.error("Tags must be configured as a list")
raise ValueError("Tags must be type list, not %s" % str(type(cfg_tags)))
self._tags[self.instance_hash] = list(cfg_tags)
remote_machine = None
host = self.instance.get('host')
if host is not None and host != ".":
try:
remote_machine = host
username = self.instance.get('username')
password = self.instance.get('password')
nr = self._get_netresource(remote_machine)
win32wnet.WNetAddConnection2(nr, password, username, 0)
except Exception as e:
self.log.error("Failed to make remote connection %s", str(e))
return
# counter_data_types allows the precision with which counters are queried
# to be configured on a per-metric basis. In the metric instance, precision
# should be specified as
# counter_data_types:
# - iis.httpd_request_method.get,int
# - iis.net.bytes_rcvd,float
#
# the above would query the counter associated with iis.httpd_request_method.get
# as an integer (LONG) and iis.net.bytes_rcvd as a double
datatypes = {}
precisions = self.instance.get('counter_data_types')
if precisions is not None:
if not isinstance(precisions, list):
self.log.warning("incorrect type for counter_data_type %s", str(precisions))
else:
for p in precisions:
k, v = p.split(",")
v = v.lower().strip()
if v in int_types:
self.log.info("Setting datatype for %s to integer", k)
datatypes[k] = DATA_TYPE_INT
elif v in double_types:
self.log.info("Setting datatype for %s to double", k)
datatypes[k] = DATA_TYPE_DOUBLE
else:
self.log.warning("Unknown data type %s", str(v))
self._make_counters(counter_data=(counter_list, (datatypes, remote_machine, False, 'entry')))
# get any additional metrics in the instance
addl_metrics = self.instance.get('additional_metrics')
if addl_metrics is not None:
self._make_counters(
counter_data=(addl_metrics, (datatypes, remote_machine, True, 'additional metric entry'))
)
except Exception as e:
self.log.debug("Exception in PDH init: %s", str(e))
raise
if not self.instance_hash or not self._metrics.get(self.instance_hash):
raise AttributeError('No valid counters to collect')
def _get_netresource(self, remote_machine):
# To connect you have to use the name of the server followed by an optional administrative share.
# Administrative shares are hidden network shares created that allow system administrators to have remote access
# to every disk volume on a network-connected system.
# These shares may not be permanently deleted but may be disabled.
# Administrative shares cannot be accessed by users without administrative privileges.
#
# This page explains how to enable them: https://www.wintips.org/how-to-enable-admin-shares-windows-7/
#
# The administrative share can be:
# * A disk volume like c$
# * admin$: The folder in which Windows is installed
# * fax$: The folder in which faxed pages and cover pages are cached
# * ipc$: Area used for interprocess communication and is not part of the file system.
# * print$: Virtual folder that contains a representation of the installed printers
# * Domain controller shares: Windows creates two domain controller specific shares called sysvol and netlogon
# which do not have $ appended to their names.
# * Empty string: No admin share specified
administrative_share = self.instance.get('admin_share', DEFAULT_SHARE)
nr = win32wnet.NETRESOURCE()
# Specifies the network resource to connect to.
nr.lpRemoteName = r"\\{}\{}".format(remote_machine, administrative_share).rstrip('\\')
# The type of network resource to connect to.
#
# Although this member is required, its information may be ignored by the network service provider.
nr.dwType = RESOURCETYPE_ANY
# Specifies the name of a local device to redirect, such as "F:" or "LPT1".
# If the string is empty, NULL, it connects to the network resource without redirecting a local device.
nr.lpLocalName = None
return nr
def check(self, instance):
self.log.debug("PDHBaseCheck: check()")
if self.refresh_counters:
for counter, values in list(iteritems(self._missing_counters)):
self._make_counters(counter_data=([counter], values))
for inst_name, dd_name, metric_func, counter in self._metrics[self.instance_hash]:
try:
if self.refresh_counters:
counter.collect_counters()
vals = counter.get_all_values()
for instance_name, val in iteritems(vals):
tags = list(self._tags.get(self.instance_hash, [])) # type: List[str]
if not counter.is_single_instance():
tag = "instance:%s" % instance_name
tags.append(tag)
metric_func(dd_name, val, tags)
except Exception as e:
# don't give up on all of the metrics because one failed
self.log.error("Failed to get data for %s %s: %s", inst_name, dd_name, str(e))
def _make_counters(self, key=None, counter_data=([], ())): # Key left in for retrocompatibility
# type: (int, tuple) -> None
counter_list, (datatypes, remote_machine, check_instance, message) = counter_data
# list of the metrics. Each entry is itself an entry,
# which is the pdh name, datadog metric name, type, and the
# pdh counter object
for counterset, inst_name, counter_name, dd_name, mtype in counter_list:
if check_instance and self._no_instance(inst_name):
inst_name = None
m = getattr(self, mtype.lower())
precision = datatypes.get(dd_name)
try:
obj = WinPDHCounter(
counterset, counter_name, self.log, inst_name, machine_name=remote_machine, precision=precision
)
except Exception as e:
self.log.debug(
'Could not create counter %s\\%s due to %s, will not report %s.',
counterset,
counter_name,
e,
dd_name,
)
self._missing_counters[(counterset, inst_name, counter_name, dd_name, mtype)] = (
datatypes,
remote_machine,
check_instance,
message,
)
continue
else:
self._missing_counters.pop((counterset, inst_name, counter_name, dd_name, mtype), None)
entry = [inst_name, dd_name, m, obj]
self.log.debug('%s: %s', message, entry)
self._metrics[self.instance_hash].append(entry)
@classmethod
def _no_instance(cls, inst_name):
return inst_name.lower() == 'none' or len(inst_name) == 0 or inst_name == '*' or inst_name.lower() == 'all'
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from collections import defaultdict
from typing import Dict, List
import win32wnet
from six import iteritems
from ... import AgentCheck, is_affirmative
from ...utils.containers import hash_mutable
try:
from .winpdh import WinPDHCounter, DATA_TYPE_INT, DATA_TYPE_DOUBLE
except ImportError:
from .winpdh_stub import WinPDHCounter, DATA_TYPE_INT, DATA_TYPE_DOUBLE
RESOURCETYPE_ANY = 0
DEFAULT_SHARE = 'c$'
int_types = ["int", "long", "uint"]
double_types = ["double", "float"]
class PDHBaseCheck(AgentCheck):
"""
PDH based check. check.
Windows only.
"""
def __init__(self, *args, **kwargs): # To support optional agentConfig
# TODO: Change signature to (self, name, init_config, instances, counter_list) once subclasses have been edited
counter_list = kwargs.get('counter_list', args[-1]) # type: List[List[str]]
if 'counter_list' not in kwargs:
args = args[:-1] # Base class does not know how to interpret it
super(PDHBaseCheck, self).__init__(*args, **kwargs)
self._missing_counters = {} # type: Dict[str, tuple]
self._metrics = defaultdict(list) # type: Dict[int, List[List]] # This dictionary only has one key
self._tags = defaultdict(list) # type: Dict[int, List[str]] # This dictionary only has one key
self.refresh_counters = is_affirmative(self.instance.get('refresh_counters', True)) # type: bool
try:
self.instance_hash = hash_mutable(self.instance) # type: int
cfg_tags = self.instance.get('tags') # type: List[str]
if cfg_tags is not None:
if not isinstance(cfg_tags, list):
self.log.error("Tags must be configured as a list")
raise ValueError("Tags must be type list, not %s" % str(type(cfg_tags)))
self._tags[self.instance_hash] = list(cfg_tags)
remote_machine = None
host = self.instance.get('host')
if host is not None and host != ".":
try:
remote_machine = host
username = self.instance.get('username')
password = self.instance.get('password')
nr = self._get_netresource(remote_machine)
win32wnet.WNetAddConnection2(nr, password, username, 0)
except Exception as e:
self.log.error("Failed to make remote connection %s", str(e))
return
# counter_data_types allows the precision with which counters are queried
# to be configured on a per-metric basis. In the metric instance, precision
# should be specified as
# counter_data_types:
# - iis.httpd_request_method.get,int
# - iis.net.bytes_rcvd,float
#
# the above would query the counter associated with iis.httpd_request_method.get
# as an integer (LONG) and iis.net.bytes_rcvd as a double
datatypes = {}
precisions = self.instance.get('counter_data_types')
if precisions is not None:
if not isinstance(precisions, list):
self.log.warning("incorrect type for counter_data_type %s", str(precisions))
else:
for p in precisions:
k, v = p.split(",")
v = v.lower().strip()
if v in int_types:
self.log.info("Setting datatype for %s to integer", k)
datatypes[k] = DATA_TYPE_INT
elif v in double_types:
self.log.info("Setting datatype for %s to double", k)
datatypes[k] = DATA_TYPE_DOUBLE
else:
self.log.warning("Unknown data type %s", str(v))
self._make_counters(counter_data=(counter_list, (datatypes, remote_machine, False, 'entry')))
# get any additional metrics in the instance
addl_metrics = self.instance.get('additional_metrics')
if addl_metrics is not None:
self._make_counters(
counter_data=(addl_metrics, (datatypes, remote_machine, True, 'additional metric entry'))
)
except Exception as e:
self.log.debug("Exception in PDH init: %s", str(e))
raise
if not self.instance_hash or not self._metrics.get(self.instance_hash):
raise AttributeError('No valid counters to collect')
def _get_netresource(self, remote_machine):
# To connect you have to use the name of the server followed by an optional administrative share.
# Administrative shares are hidden network shares created that allow system administrators to have remote access
# to every disk volume on a network-connected system.
# These shares may not be permanently deleted but may be disabled.
# Administrative shares cannot be accessed by users without administrative privileges.
#
# This page explains how to enable them: https://www.wintips.org/how-to-enable-admin-shares-windows-7/
#
# The administrative share can be:
# * A disk volume like c$
# * admin$: The folder in which Windows is installed
# * fax$: The folder in which faxed pages and cover pages are cached
# * ipc$: Area used for interprocess communication and is not part of the file system.
# * print$: Virtual folder that contains a representation of the installed printers
# * Domain controller shares: Windows creates two domain controller specific shares called sysvol and netlogon
# which do not have $ appended to their names.
# * Empty string: No admin share specified
administrative_share = self.instance.get('admin_share', DEFAULT_SHARE)
nr = win32wnet.NETRESOURCE()
# Specifies the network resource to connect to.
nr.lpRemoteName = r"\\{}\{}".format(remote_machine, administrative_share).rstrip('\\')
# The type of network resource to connect to.
#
# Although this member is required, its information may be ignored by the network service provider.
nr.dwType = RESOURCETYPE_ANY
# Specifies the name of a local device to redirect, such as "F:" or "LPT1".
# If the string is empty, NULL, it connects to the network resource without redirecting a local device.
nr.lpLocalName = None
return nr
def check(self, instance):
self.log.debug("PDHBaseCheck: check()")
if self.refresh_counters:
for counter, values in list(iteritems(self._missing_counters)):
self._make_counters(counter_data=([counter], values))
for inst_name, dd_name, metric_func, counter in self._metrics[self.instance_hash]:
try:
if self.refresh_counters:
counter.collect_counters()
vals = counter.get_all_values()
for instance_name, val in iteritems(vals):
tags = list(self._tags.get(self.instance_hash, [])) # type: List[str]
if not counter.is_single_instance():
tag = "instance:%s" % instance_name
tags.append(tag)
metric_func(dd_name, val, tags)
except Exception as e:
# don't give up on all of the metrics because one failed
self.log.error("Failed to get data for %s %s: %s", inst_name, dd_name, str(e))
def _make_counters(self, key=None, counter_data=([], ())): # Key left in for retrocompatibility
# type: (int, tuple) -> None
counter_list, (datatypes, remote_machine, check_instance, message) = counter_data
# list of the metrics. Each entry is itself an entry,
# which is the pdh name, datadog metric name, type, and the
# pdh counter object
for counterset, inst_name, counter_name, dd_name, mtype in counter_list:
if check_instance and self._no_instance(inst_name):
inst_name = None
m = getattr(self, mtype.lower())
precision = datatypes.get(dd_name)
try:
obj = WinPDHCounter(
counterset, counter_name, self.log, inst_name, machine_name=remote_machine, precision=precision
)
except Exception as e:
self.log.debug(
'Could not create counter %s\\%s due to %s, will not report %s.',
counterset,
counter_name,
e,
dd_name,
)
self._missing_counters[(counterset, inst_name, counter_name, dd_name, mtype)] = (
datatypes,
remote_machine,
check_instance,
message,
)
continue
else:
self._missing_counters.pop((counterset, inst_name, counter_name, dd_name, mtype), None)
entry = [inst_name, dd_name, m, obj]
self.log.debug('%s: %s', message, entry)
self._metrics[self.instance_hash].append(entry)
@classmethod
def _no_instance(cls, inst_name):
return inst_name.lower() == 'none' or len(inst_name) == 0 or inst_name == '*' or inst_name.lower() == 'all'
|
en
| 0.842167
|
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) PDH based check. check. Windows only. # To support optional agentConfig # TODO: Change signature to (self, name, init_config, instances, counter_list) once subclasses have been edited # type: List[List[str]] # Base class does not know how to interpret it # type: Dict[str, tuple] # type: Dict[int, List[List]] # This dictionary only has one key # type: Dict[int, List[str]] # This dictionary only has one key # type: bool # type: int # type: List[str] # counter_data_types allows the precision with which counters are queried # to be configured on a per-metric basis. In the metric instance, precision # should be specified as # counter_data_types: # - iis.httpd_request_method.get,int # - iis.net.bytes_rcvd,float # # the above would query the counter associated with iis.httpd_request_method.get # as an integer (LONG) and iis.net.bytes_rcvd as a double # get any additional metrics in the instance # To connect you have to use the name of the server followed by an optional administrative share. # Administrative shares are hidden network shares created that allow system administrators to have remote access # to every disk volume on a network-connected system. # These shares may not be permanently deleted but may be disabled. # Administrative shares cannot be accessed by users without administrative privileges. # # This page explains how to enable them: https://www.wintips.org/how-to-enable-admin-shares-windows-7/ # # The administrative share can be: # * A disk volume like c$ # * admin$: The folder in which Windows is installed # * fax$: The folder in which faxed pages and cover pages are cached # * ipc$: Area used for interprocess communication and is not part of the file system. # * print$: Virtual folder that contains a representation of the installed printers # * Domain controller shares: Windows creates two domain controller specific shares called sysvol and netlogon # which do not have $ appended to their names. # * Empty string: No admin share specified # Specifies the network resource to connect to. # The type of network resource to connect to. # # Although this member is required, its information may be ignored by the network service provider. # Specifies the name of a local device to redirect, such as "F:" or "LPT1". # If the string is empty, NULL, it connects to the network resource without redirecting a local device. # type: List[str] # don't give up on all of the metrics because one failed # Key left in for retrocompatibility # type: (int, tuple) -> None # list of the metrics. Each entry is itself an entry, # which is the pdh name, datadog metric name, type, and the # pdh counter object
| 1.918512
| 2
|
src/envs/__init__.py
|
OkYongChoi/smac-windows
| 64
|
6629426
|
from functools import partial
from envs.starcraft2.starcraft2 import MultiAgentEnv, StarCraft2Env
import sys
import os
def env_fn(env, **kwargs) -> MultiAgentEnv:
return env(**kwargs)
REGISTRY = {}
REGISTRY["sc2"] = partial(env_fn, env=StarCraft2Env)
if sys.platform == "linux":
os.environ.setdefault("SC2PATH",
os.path.join(os.getcwd(), "3rdparty", "StarCraftII"))
|
from functools import partial
from envs.starcraft2.starcraft2 import MultiAgentEnv, StarCraft2Env
import sys
import os
def env_fn(env, **kwargs) -> MultiAgentEnv:
return env(**kwargs)
REGISTRY = {}
REGISTRY["sc2"] = partial(env_fn, env=StarCraft2Env)
if sys.platform == "linux":
os.environ.setdefault("SC2PATH",
os.path.join(os.getcwd(), "3rdparty", "StarCraftII"))
|
none
| 1
| 2.305497
| 2
|
|
sdcit/hsic.py
|
sanghack81/SDCIT
| 11
|
6629427
|
<gh_stars>10-100
import numpy as np
import scipy.stats
from typing import List, Tuple
from sdcit.cython_impl.cy_sdcit import cy_hsic
from sdcit.utils import p_value_of, cythonize, random_seeds, centering
def HSIC(K: np.ndarray, L: np.ndarray, p_val_method='bootstrap', num_boot=1000) -> float:
if p_val_method == 'bootstrap':
return HSIC_boot(K, L, num_boot)
elif p_val_method == 'gamma':
return HSIC_gamma_approx(K, L)
else:
raise ValueError('unknown p value computation method: {}'.format(p_val_method))
def sum_except_diag(M: np.ndarray):
return M.sum() - M.trace()
def HSIC_gamma_approx(K: np.ndarray, L: np.ndarray) -> float:
"""Hilbert-Schmidt Independence Criterion where null distribution is based on approximated Gamma distribution
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2005). Kernel Methods for Measuring Independence. Journal of Machine Learning Research, 6, 2075–2129.
"""
Kc, Lc = centering(K), centering(L)
m = len(K)
test_stat = 1 / m * np.sum(Kc * Lc)
muX = 1 / m / (m - 1) * sum_except_diag(K)
muY = 1 / m / (m - 1) * sum_except_diag(L)
mHSIC = 1 / m * (1 + muX * muY - muX - muY)
varHSIC = 72 * (m - 4) * (m - 5) / m / (m - 1) / (m - 2) / (m - 3) * (1 / m / (m - 1) * (sum_except_diag((1 / 6 * Kc * Lc) ** 2)))
al = mHSIC ** 2 / varHSIC
bet = varHSIC * m / mHSIC
return scipy.stats.gamma.sf(test_stat, al, scale=bet)
def HSIC_stat(K: np.ndarray, L: np.ndarray) -> float:
"""HSIC statistic assuming given two centered kernel matrices.
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2005). Kernel Methods for Measuring Independence. Journal of Machine Learning Research, 6, 2075–2129.
"""
m = len(K)
return float(1 / m * np.sum(K * L))
def HSIC_boot(K: np.ndarray, L: np.ndarray, num_boot=1000, seed=None) -> Tuple[float, List[float]]:
"""A Hilbert-Schmidt Independence Criterion where null distribution is based on bootstrapping
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2005). Kernel Methods for Measuring Independence. Journal of Machine Learning Research, 6, 2075–2129.
"""
if seed is not None:
np.random.seed(seed)
Kc, Lc = centering(K), centering(L)
test_statistics = HSIC_stat(Kc, Lc)
def shuffled():
perm = np.random.permutation(len(K))
return Lc[np.ix_(perm, perm)]
null_distribution = [HSIC_stat(Kc, shuffled()) for _ in range(num_boot)]
return p_value_of(test_statistics, null_distribution)
def c_HSIC(K: np.ndarray, L: np.ndarray, size_of_null_sample=1000, with_null=False, seed=None, n_jobs=1):
if seed is not None:
np.random.seed(seed)
K, L = centering(K), centering(L)
K, L = cythonize(K, L)
raw_null = np.zeros((size_of_null_sample,), dtype='float64')
test_statistic = np.zeros((1,), dtype='float64')
# run SDCIT
cy_hsic(K, L, size_of_null_sample, random_seeds(), n_jobs, test_statistic, raw_null)
# post-process outputs
test_statistic = test_statistic[0]
if with_null:
return test_statistic, p_value_of(test_statistic, raw_null), raw_null
else:
return test_statistic, p_value_of(test_statistic, raw_null)
|
import numpy as np
import scipy.stats
from typing import List, Tuple
from sdcit.cython_impl.cy_sdcit import cy_hsic
from sdcit.utils import p_value_of, cythonize, random_seeds, centering
def HSIC(K: np.ndarray, L: np.ndarray, p_val_method='bootstrap', num_boot=1000) -> float:
if p_val_method == 'bootstrap':
return HSIC_boot(K, L, num_boot)
elif p_val_method == 'gamma':
return HSIC_gamma_approx(K, L)
else:
raise ValueError('unknown p value computation method: {}'.format(p_val_method))
def sum_except_diag(M: np.ndarray):
return M.sum() - M.trace()
def HSIC_gamma_approx(K: np.ndarray, L: np.ndarray) -> float:
"""Hilbert-Schmidt Independence Criterion where null distribution is based on approximated Gamma distribution
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2005). Kernel Methods for Measuring Independence. Journal of Machine Learning Research, 6, 2075–2129.
"""
Kc, Lc = centering(K), centering(L)
m = len(K)
test_stat = 1 / m * np.sum(Kc * Lc)
muX = 1 / m / (m - 1) * sum_except_diag(K)
muY = 1 / m / (m - 1) * sum_except_diag(L)
mHSIC = 1 / m * (1 + muX * muY - muX - muY)
varHSIC = 72 * (m - 4) * (m - 5) / m / (m - 1) / (m - 2) / (m - 3) * (1 / m / (m - 1) * (sum_except_diag((1 / 6 * Kc * Lc) ** 2)))
al = mHSIC ** 2 / varHSIC
bet = varHSIC * m / mHSIC
return scipy.stats.gamma.sf(test_stat, al, scale=bet)
def HSIC_stat(K: np.ndarray, L: np.ndarray) -> float:
"""HSIC statistic assuming given two centered kernel matrices.
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2005). Kernel Methods for Measuring Independence. Journal of Machine Learning Research, 6, 2075–2129.
"""
m = len(K)
return float(1 / m * np.sum(K * L))
def HSIC_boot(K: np.ndarray, L: np.ndarray, num_boot=1000, seed=None) -> Tuple[float, List[float]]:
"""A Hilbert-Schmidt Independence Criterion where null distribution is based on bootstrapping
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2005). Kernel Methods for Measuring Independence. Journal of Machine Learning Research, 6, 2075–2129.
"""
if seed is not None:
np.random.seed(seed)
Kc, Lc = centering(K), centering(L)
test_statistics = HSIC_stat(Kc, Lc)
def shuffled():
perm = np.random.permutation(len(K))
return Lc[np.ix_(perm, perm)]
null_distribution = [HSIC_stat(Kc, shuffled()) for _ in range(num_boot)]
return p_value_of(test_statistics, null_distribution)
def c_HSIC(K: np.ndarray, L: np.ndarray, size_of_null_sample=1000, with_null=False, seed=None, n_jobs=1):
if seed is not None:
np.random.seed(seed)
K, L = centering(K), centering(L)
K, L = cythonize(K, L)
raw_null = np.zeros((size_of_null_sample,), dtype='float64')
test_statistic = np.zeros((1,), dtype='float64')
# run SDCIT
cy_hsic(K, L, size_of_null_sample, random_seeds(), n_jobs, test_statistic, raw_null)
# post-process outputs
test_statistic = test_statistic[0]
if with_null:
return test_statistic, p_value_of(test_statistic, raw_null), raw_null
else:
return test_statistic, p_value_of(test_statistic, raw_null)
|
en
| 0.641151
|
Hilbert-Schmidt Independence Criterion where null distribution is based on approximated Gamma distribution References ---------- <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2005). Kernel Methods for Measuring Independence. Journal of Machine Learning Research, 6, 2075–2129. HSIC statistic assuming given two centered kernel matrices. References ---------- <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2005). Kernel Methods for Measuring Independence. Journal of Machine Learning Research, 6, 2075–2129. A Hilbert-Schmidt Independence Criterion where null distribution is based on bootstrapping References ---------- <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2005). Kernel Methods for Measuring Independence. Journal of Machine Learning Research, 6, 2075–2129. # run SDCIT # post-process outputs
| 2.233367
| 2
|
build/scripts/gen_py_protos.py
|
kevinyen-oath/catboost
| 1
|
6629428
|
<gh_stars>1-10
import os
from os import path
import shutil
import subprocess
import sys
import tempfile
OUT_DIR_ARG = '--python_out='
GRPC_OUT_DIR_ARG = '--grpc_py_out='
PB_PY_RENAMES = [
('_pb2_grpc.py', '__int___pb2_grpc.py'),
('_ev_pb2.py', '__int___ev_pb2.py'),
('_pb2.py', '__int___pb2.py')
]
def main(args):
out_dir_orig = None
out_dir_temp = None
grpc_out_dir_orig = None
for i in range(len(args)):
if args[i].startswith(OUT_DIR_ARG):
assert not out_dir_orig, 'Duplicate "{0}" param'.format(OUT_DIR_ARG)
out_dir_orig = args[i][len(OUT_DIR_ARG):]
out_dir_temp = tempfile.mkdtemp(dir=out_dir_orig)
args[i] = OUT_DIR_ARG + out_dir_temp
elif args[i].startswith(GRPC_OUT_DIR_ARG):
assert not grpc_out_dir_orig, 'Duplicate "{0}" param'.format(GRPC_OUT_DIR_ARG)
grpc_out_dir_orig = args[i][len(GRPC_OUT_DIR_ARG):]
assert grpc_out_dir_orig == out_dir_orig, 'Params "{0}" and "{1}" expected to have the same value'.format(OUT_DIR_ARG, GRPC_OUT_DIR_ARG)
args[i] = GRPC_OUT_DIR_ARG + out_dir_temp
assert out_dir_temp, 'Param "{0}" not found'.format(OUT_DIR_ARG)
retcode = subprocess.call(args)
assert not retcode, 'Protoc failed'
for root_temp, dirs, files in os.walk(out_dir_temp):
sub_dir = path.relpath(root_temp, out_dir_temp)
root_orig = path.join(out_dir_orig, sub_dir)
for d in dirs:
d_orig = path.join(root_orig, d)
if not path.exists(d_orig):
os.mkdir(d_orig)
for f in files:
f_orig = f
for old_ext, new_ext in PB_PY_RENAMES:
if f.endswith(old_ext):
f_orig = f[:-len(old_ext)] + new_ext
break
os.rename(path.join(root_temp, f), path.join(root_orig, f_orig))
shutil.rmtree(out_dir_temp)
if __name__ == '__main__':
main(sys.argv[1:])
|
import os
from os import path
import shutil
import subprocess
import sys
import tempfile
OUT_DIR_ARG = '--python_out='
GRPC_OUT_DIR_ARG = '--grpc_py_out='
PB_PY_RENAMES = [
('_pb2_grpc.py', '__int___pb2_grpc.py'),
('_ev_pb2.py', '__int___ev_pb2.py'),
('_pb2.py', '__int___pb2.py')
]
def main(args):
out_dir_orig = None
out_dir_temp = None
grpc_out_dir_orig = None
for i in range(len(args)):
if args[i].startswith(OUT_DIR_ARG):
assert not out_dir_orig, 'Duplicate "{0}" param'.format(OUT_DIR_ARG)
out_dir_orig = args[i][len(OUT_DIR_ARG):]
out_dir_temp = tempfile.mkdtemp(dir=out_dir_orig)
args[i] = OUT_DIR_ARG + out_dir_temp
elif args[i].startswith(GRPC_OUT_DIR_ARG):
assert not grpc_out_dir_orig, 'Duplicate "{0}" param'.format(GRPC_OUT_DIR_ARG)
grpc_out_dir_orig = args[i][len(GRPC_OUT_DIR_ARG):]
assert grpc_out_dir_orig == out_dir_orig, 'Params "{0}" and "{1}" expected to have the same value'.format(OUT_DIR_ARG, GRPC_OUT_DIR_ARG)
args[i] = GRPC_OUT_DIR_ARG + out_dir_temp
assert out_dir_temp, 'Param "{0}" not found'.format(OUT_DIR_ARG)
retcode = subprocess.call(args)
assert not retcode, 'Protoc failed'
for root_temp, dirs, files in os.walk(out_dir_temp):
sub_dir = path.relpath(root_temp, out_dir_temp)
root_orig = path.join(out_dir_orig, sub_dir)
for d in dirs:
d_orig = path.join(root_orig, d)
if not path.exists(d_orig):
os.mkdir(d_orig)
for f in files:
f_orig = f
for old_ext, new_ext in PB_PY_RENAMES:
if f.endswith(old_ext):
f_orig = f[:-len(old_ext)] + new_ext
break
os.rename(path.join(root_temp, f), path.join(root_orig, f_orig))
shutil.rmtree(out_dir_temp)
if __name__ == '__main__':
main(sys.argv[1:])
|
none
| 1
| 2.577244
| 3
|
|
trump/converting/objects.py
|
Equitable/trump
| 8
|
6629429
|
# -*- coding: utf-8 -*-
import pandas as pd
import Quandl as qdl
from datetime import datetime as dt
def recip(t):
return t[1], t[0]
class CurPair(object):
def __init__(self, sym):
if len(sym) == 6:
self.num, self.den = sym[3:], sym[:3]
elif "//" in sym:
self.num, self.den = sym.split("//")
elif len(sym) == 2: #should be a tuple
self.num, self.den = sym
@property
def pair(self):
return (self.num, self.den)
@property
def inverse(self):
return CurPair(self.pair[::-1])
def __eq__(self, obj):
if not isinstance(obj, CurPair):
obj = CurPair(obj)
return self.num == obj.num and self.den == obj.den
def __gt__(self, obj):
if self == obj:
return 'equal'
elif self == obj.inverse:
return 'recip'
elif self.den in obj.pair[0]:
raise NotImplementedError("CurPair not done")
class FXConverter(object):
def __init__(self):
self.pairs = [('EUR','USD'),
('USD','JPY'),
('GBP','USD'),
('AUD','USD'),
('USD','CHF'),
('NZD','USD'),
('USD','CAD')]
self.start = dt(2015,1,1)
self.end = dt.now()
def use_quandl_data(self, authtoken):
"""
Use quandl data to build conversion table
"""
dfs = {}
st = self.start.strftime("%Y-%m-%d")
at = authtoken
for pair in self.pairs:
symbol = "".join(pair)
qsym = "CURRFX/{}".format(symbol)
dfs[symbol] = qdl.get(qsym,authtoken=at, trim_start=st)['Rate']
self.build_conversion_table(dfs)
def use_trump_data(self, symbols):
"""
Use trump data to build conversion table
symbols :
list of symbols:
will attempt to use units to build the conversion table,
strings represent symbol names.
"""
dfs = {sym.units : sym.df[sym.name] for sym in symbols}
self.build_conversion_table(dfs)
def build_conversion_table(self, dataframes):
"""
Build conversion table from a dictionary of dataframes
"""
self.data = pd.DataFrame(dataframes)
tmp_pairs = [s.split("/") for s in self.data.columns]
self.data.columns = pd.MultiIndex.from_tuples(tmp_pairs)
def convert(self, data, denom, to):
# print "Trying to convert", denom, to
# We need to do this, cause humans are dumb,
if "/" in denom:
denom = denom.split(r"/")[0]
if "/" in to:
a,b = to.split(r"/")
if b == 'unit':
to = a
else:
to = b
pair = (denom, to)
denusd = (denom, 'USD')
usdto = ('USD', to)
#print "Trying to convert..." + str(pair)
#print list(self.data.columns)
#print pair in self.data.columns
#print recip(pair) in self.data.columns
pairs = self.data.columns
# if 'LOC' (local currency), simply use default Trump units
if denom == to or to == 'LOC':
tmp = data
elif pair in pairs:
tmp = data.div(self.data[pair], axis=0)
elif recip(pair) in pairs:
tmp = data.mul(self.data[recip(pair)], axis=0)
elif ((denusd in pairs) or (recip(denusd) in pairs)) and \
((usdto in pairs) or (recip(usdto) in pairs)):
tmp = self.convert(data, denom, 'USD')
tmp = self.convert(tmp, 'USD', to)
else:
raise Exception ("Converter has insufficient data to process {} to {}".format(denom,to))
return tmp
if __name__ == '__main__':
FXc = FXConverter()
FXc.use_quandl_data('TODO')
gold = qdl.get('LBMA/GOLD', authtoken='TODO')
g_eur = gold['EURO (PM)']
g_gbp = gold['GBP (PM)']
g_usd = gold['USD (PM)']
|
# -*- coding: utf-8 -*-
import pandas as pd
import Quandl as qdl
from datetime import datetime as dt
def recip(t):
return t[1], t[0]
class CurPair(object):
def __init__(self, sym):
if len(sym) == 6:
self.num, self.den = sym[3:], sym[:3]
elif "//" in sym:
self.num, self.den = sym.split("//")
elif len(sym) == 2: #should be a tuple
self.num, self.den = sym
@property
def pair(self):
return (self.num, self.den)
@property
def inverse(self):
return CurPair(self.pair[::-1])
def __eq__(self, obj):
if not isinstance(obj, CurPair):
obj = CurPair(obj)
return self.num == obj.num and self.den == obj.den
def __gt__(self, obj):
if self == obj:
return 'equal'
elif self == obj.inverse:
return 'recip'
elif self.den in obj.pair[0]:
raise NotImplementedError("CurPair not done")
class FXConverter(object):
def __init__(self):
self.pairs = [('EUR','USD'),
('USD','JPY'),
('GBP','USD'),
('AUD','USD'),
('USD','CHF'),
('NZD','USD'),
('USD','CAD')]
self.start = dt(2015,1,1)
self.end = dt.now()
def use_quandl_data(self, authtoken):
"""
Use quandl data to build conversion table
"""
dfs = {}
st = self.start.strftime("%Y-%m-%d")
at = authtoken
for pair in self.pairs:
symbol = "".join(pair)
qsym = "CURRFX/{}".format(symbol)
dfs[symbol] = qdl.get(qsym,authtoken=at, trim_start=st)['Rate']
self.build_conversion_table(dfs)
def use_trump_data(self, symbols):
"""
Use trump data to build conversion table
symbols :
list of symbols:
will attempt to use units to build the conversion table,
strings represent symbol names.
"""
dfs = {sym.units : sym.df[sym.name] for sym in symbols}
self.build_conversion_table(dfs)
def build_conversion_table(self, dataframes):
"""
Build conversion table from a dictionary of dataframes
"""
self.data = pd.DataFrame(dataframes)
tmp_pairs = [s.split("/") for s in self.data.columns]
self.data.columns = pd.MultiIndex.from_tuples(tmp_pairs)
def convert(self, data, denom, to):
# print "Trying to convert", denom, to
# We need to do this, cause humans are dumb,
if "/" in denom:
denom = denom.split(r"/")[0]
if "/" in to:
a,b = to.split(r"/")
if b == 'unit':
to = a
else:
to = b
pair = (denom, to)
denusd = (denom, 'USD')
usdto = ('USD', to)
#print "Trying to convert..." + str(pair)
#print list(self.data.columns)
#print pair in self.data.columns
#print recip(pair) in self.data.columns
pairs = self.data.columns
# if 'LOC' (local currency), simply use default Trump units
if denom == to or to == 'LOC':
tmp = data
elif pair in pairs:
tmp = data.div(self.data[pair], axis=0)
elif recip(pair) in pairs:
tmp = data.mul(self.data[recip(pair)], axis=0)
elif ((denusd in pairs) or (recip(denusd) in pairs)) and \
((usdto in pairs) or (recip(usdto) in pairs)):
tmp = self.convert(data, denom, 'USD')
tmp = self.convert(tmp, 'USD', to)
else:
raise Exception ("Converter has insufficient data to process {} to {}".format(denom,to))
return tmp
if __name__ == '__main__':
FXc = FXConverter()
FXc.use_quandl_data('TODO')
gold = qdl.get('LBMA/GOLD', authtoken='TODO')
g_eur = gold['EURO (PM)']
g_gbp = gold['GBP (PM)']
g_usd = gold['USD (PM)']
|
en
| 0.543735
|
# -*- coding: utf-8 -*- #should be a tuple Use quandl data to build conversion table Use trump data to build conversion table symbols : list of symbols: will attempt to use units to build the conversion table, strings represent symbol names. Build conversion table from a dictionary of dataframes # print "Trying to convert", denom, to # We need to do this, cause humans are dumb, #print "Trying to convert..." + str(pair) #print list(self.data.columns) #print pair in self.data.columns #print recip(pair) in self.data.columns # if 'LOC' (local currency), simply use default Trump units
| 2.74157
| 3
|
test/test_clustering.py
|
p123hx/scHiC-py
| 15
|
6629430
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import sys
import os
sys.path.insert(0, os.path.abspath(
os.path.join(
os.path.dirname(__file__), '..')))
from scHiCTools import kmeans, spectral_clustering, HAC
center=np.array([[0,0],[100,100],[-100,100]])
rand_data = np.random.normal(size=(90,2)) + np.repeat(center, 30, axis=0)
def test_kmeans():
label=kmeans(rand_data, k=3)
l1 = np.equal(label[:30], label[0]).all()
l2 = np.equal(label[30:-30], label[30]).all()
l3 = np.equal(label[-30:], label[-30]).all()
assert l1
assert l2
assert l3
def test_SC():
label=spectral_clustering(rand_data, n_clusters=3)
l1 = np.equal(label[:30], label[0]).all()
l2 = np.equal(label[30:-30], label[30]).all()
l3 = np.equal(label[-30:], label[-30]).all()
assert l1
assert l2
assert l3
def test_HAC():
label=HAC(rand_data, n_clusters=3)
l1 = np.equal(label[:30], label[0]).all()
l2 = np.equal(label[30:-30], label[30]).all()
l3 = np.equal(label[-30:], label[-30]).all()
assert l1
assert l2
assert l3
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import sys
import os
sys.path.insert(0, os.path.abspath(
os.path.join(
os.path.dirname(__file__), '..')))
from scHiCTools import kmeans, spectral_clustering, HAC
center=np.array([[0,0],[100,100],[-100,100]])
rand_data = np.random.normal(size=(90,2)) + np.repeat(center, 30, axis=0)
def test_kmeans():
label=kmeans(rand_data, k=3)
l1 = np.equal(label[:30], label[0]).all()
l2 = np.equal(label[30:-30], label[30]).all()
l3 = np.equal(label[-30:], label[-30]).all()
assert l1
assert l2
assert l3
def test_SC():
label=spectral_clustering(rand_data, n_clusters=3)
l1 = np.equal(label[:30], label[0]).all()
l2 = np.equal(label[30:-30], label[30]).all()
l3 = np.equal(label[-30:], label[-30]).all()
assert l1
assert l2
assert l3
def test_HAC():
label=HAC(rand_data, n_clusters=3)
l1 = np.equal(label[:30], label[0]).all()
l2 = np.equal(label[30:-30], label[30]).all()
l3 = np.equal(label[-30:], label[-30]).all()
assert l1
assert l2
assert l3
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 2.167541
| 2
|
buildpack/java.py
|
ernororive/cf-mendix-buildpack
| 0
|
6629431
|
import json
import logging
import os
import re
import subprocess
from buildpack import util
def compile(buildpack_path, cache_path, local_path, java_version):
logging.debug("begin download and install java")
util.mkdir_p(os.path.join(local_path, "bin"))
jvm_location = ensure_and_get_jvm(
java_version, cache_path, local_path, package="jre"
)
# create a symlink in .local/bin/java
os.symlink(
# use .. when jdk is in .local because absolute path
# is different at staging time
os.path.join(jvm_location.replace(local_path, ".."), "bin", "java"),
os.path.join(local_path, "bin", "java"),
)
# update cacert file
update_java_cacert(buildpack_path, jvm_location)
logging.debug("end download and install java")
def determine_jdk(java_version, package="jdk"):
if java_version["vendor"] == "AdoptOpenJDK":
java_version.update({"type": "AdoptOpenJDK-{}".format(package)})
else:
java_version.update({"type": package})
return java_version
def compose_jvm_target_dir(jdk):
return "usr/lib/jvm/{type}-{version}-{vendor}-x64".format(
type=jdk["type"], version=jdk["version"], vendor=jdk["vendor"]
)
def _compose_jre_url_path(jdk):
return "/mx-buildpack/{type}-{version}-linux-x64.tar.gz".format(
type=jdk["type"], version=jdk["version"]
)
def ensure_and_get_jvm(
java_version, cache_dir, dot_local_location, package="jdk"
):
logging.debug("Begin download and install java %s" % package)
jdk = determine_jdk(java_version, package)
rootfs_java_path = "/{}".format(compose_jvm_target_dir(jdk))
if not os.path.isdir(rootfs_java_path):
logging.debug("rootfs without java sdk detected")
util.download_and_unpack(
util.get_blobstore_url(_compose_jre_url_path(jdk)),
os.path.join(dot_local_location, compose_jvm_target_dir(jdk)),
cache_dir,
)
else:
logging.debug("rootfs with java sdk detected")
logging.debug("end download and install java %s" % package)
return util.get_existing_directory_or_raise(
[
"/" + compose_jvm_target_dir(jdk),
os.path.join(dot_local_location, compose_jvm_target_dir(jdk)),
],
"Java not found",
)
def update_java_cacert(buildpack_dir, jvm_location):
logging.debug("Applying Mozilla CA certificates update to JVM cacerts...")
cacerts_file = os.path.join(jvm_location, "lib", "security", "cacerts")
if not os.path.exists(cacerts_file):
logging.warning(
"Cannot locate cacerts file %s. Skipping update of CA certificates.",
cacerts_file,
)
return
update_cacert_path = os.path.join(buildpack_dir, "vendor", "cacert")
if not os.path.exists(update_cacert_path):
logging.warning(
"Cannot locate cacert lib folder %s. Skipping update of CA certificates.",
update_cacert_path,
)
return
cacert_merged = "cacerts.merged"
env = dict(os.environ)
try:
subprocess.check_output(
(
os.path.join(jvm_location, "bin", "java"),
"-jar",
os.path.join(update_cacert_path, "keyutil-0.4.0.jar"),
"-i",
"--new-keystore",
cacert_merged,
"--password",
"<PASSWORD>",
"--import-pem-file",
os.path.join(update_cacert_path, "cacert.pem"),
"--import-jks-file",
"{}:changeit".format(cacerts_file),
),
env=env,
stderr=subprocess.STDOUT,
)
except Exception as ex:
logging.error("Error applying cacert update: {}".format(ex), ex)
raise ex
os.rename(cacert_merged, cacerts_file)
logging.debug("Update of cacerts file finished.")
def _set_jvm_locale(m2ee_section, java_version):
javaopts = m2ee_section["javaopts"]
# override locale providers for java8
if java_version.startswith("8"):
javaopts.append("-Djava.locale.providers=JRE,SPI,CLDR")
def _set_user_provided_java_options(m2ee_section):
javaopts = m2ee_section["javaopts"]
options = os.environ.get("JAVA_OPTS", None)
if options:
try:
options = json.loads(options)
except Exception as e:
logging.error(
"Failed to parse JAVA_OPTS, due to invalid JSON.",
exc_info=True,
)
raise
javaopts.extend(options)
def _set_jvm_memory(m2ee_section, vcap, java_version):
max_memory = os.environ.get("MEMORY_LIMIT")
if max_memory:
match = re.search("([0-9]+)M", max_memory.upper())
limit = int(match.group(1))
else:
limit = int(vcap["limits"]["mem"])
if limit >= 8192:
heap_size = limit - 2048
elif limit >= 4096:
heap_size = limit - 1536
elif limit >= 2048:
heap_size = limit - 1024
else:
heap_size = int(limit / 2)
heap_size = str(heap_size) + "M"
env_heap_size = os.environ.get("HEAP_SIZE")
if env_heap_size:
if int(env_heap_size[:-1]) < limit:
heap_size = env_heap_size
else:
logging.warning(
"specified heap size %s is larger than max memory of the "
"container (%s), falling back to a heap size of %s",
env_heap_size,
str(limit) + "M",
heap_size,
)
javaopts = m2ee_section["javaopts"]
javaopts.append("-Xmx%s" % heap_size)
javaopts.append("-Xms%s" % heap_size)
if java_version.startswith("7"):
javaopts.append("-XX:MaxPermSize=256M")
else:
javaopts.append("-XX:MaxMetaspaceSize=256M")
logging.debug("Java heap size set to %s", heap_size)
if os.getenv("MALLOC_ARENA_MAX"):
logging.info("Using provided environment setting for MALLOC_ARENA_MAX")
else:
m2ee_section["custom_environment"]["MALLOC_ARENA_MAX"] = str(
max(1, limit / 1024) * 2
)
def update_config(m2ee_section, vcap_data, java_version):
_set_jvm_memory(m2ee_section, vcap_data, java_version)
_set_jvm_locale(m2ee_section, java_version)
_set_user_provided_java_options(m2ee_section)
|
import json
import logging
import os
import re
import subprocess
from buildpack import util
def compile(buildpack_path, cache_path, local_path, java_version):
logging.debug("begin download and install java")
util.mkdir_p(os.path.join(local_path, "bin"))
jvm_location = ensure_and_get_jvm(
java_version, cache_path, local_path, package="jre"
)
# create a symlink in .local/bin/java
os.symlink(
# use .. when jdk is in .local because absolute path
# is different at staging time
os.path.join(jvm_location.replace(local_path, ".."), "bin", "java"),
os.path.join(local_path, "bin", "java"),
)
# update cacert file
update_java_cacert(buildpack_path, jvm_location)
logging.debug("end download and install java")
def determine_jdk(java_version, package="jdk"):
if java_version["vendor"] == "AdoptOpenJDK":
java_version.update({"type": "AdoptOpenJDK-{}".format(package)})
else:
java_version.update({"type": package})
return java_version
def compose_jvm_target_dir(jdk):
return "usr/lib/jvm/{type}-{version}-{vendor}-x64".format(
type=jdk["type"], version=jdk["version"], vendor=jdk["vendor"]
)
def _compose_jre_url_path(jdk):
return "/mx-buildpack/{type}-{version}-linux-x64.tar.gz".format(
type=jdk["type"], version=jdk["version"]
)
def ensure_and_get_jvm(
java_version, cache_dir, dot_local_location, package="jdk"
):
logging.debug("Begin download and install java %s" % package)
jdk = determine_jdk(java_version, package)
rootfs_java_path = "/{}".format(compose_jvm_target_dir(jdk))
if not os.path.isdir(rootfs_java_path):
logging.debug("rootfs without java sdk detected")
util.download_and_unpack(
util.get_blobstore_url(_compose_jre_url_path(jdk)),
os.path.join(dot_local_location, compose_jvm_target_dir(jdk)),
cache_dir,
)
else:
logging.debug("rootfs with java sdk detected")
logging.debug("end download and install java %s" % package)
return util.get_existing_directory_or_raise(
[
"/" + compose_jvm_target_dir(jdk),
os.path.join(dot_local_location, compose_jvm_target_dir(jdk)),
],
"Java not found",
)
def update_java_cacert(buildpack_dir, jvm_location):
logging.debug("Applying Mozilla CA certificates update to JVM cacerts...")
cacerts_file = os.path.join(jvm_location, "lib", "security", "cacerts")
if not os.path.exists(cacerts_file):
logging.warning(
"Cannot locate cacerts file %s. Skipping update of CA certificates.",
cacerts_file,
)
return
update_cacert_path = os.path.join(buildpack_dir, "vendor", "cacert")
if not os.path.exists(update_cacert_path):
logging.warning(
"Cannot locate cacert lib folder %s. Skipping update of CA certificates.",
update_cacert_path,
)
return
cacert_merged = "cacerts.merged"
env = dict(os.environ)
try:
subprocess.check_output(
(
os.path.join(jvm_location, "bin", "java"),
"-jar",
os.path.join(update_cacert_path, "keyutil-0.4.0.jar"),
"-i",
"--new-keystore",
cacert_merged,
"--password",
"<PASSWORD>",
"--import-pem-file",
os.path.join(update_cacert_path, "cacert.pem"),
"--import-jks-file",
"{}:changeit".format(cacerts_file),
),
env=env,
stderr=subprocess.STDOUT,
)
except Exception as ex:
logging.error("Error applying cacert update: {}".format(ex), ex)
raise ex
os.rename(cacert_merged, cacerts_file)
logging.debug("Update of cacerts file finished.")
def _set_jvm_locale(m2ee_section, java_version):
javaopts = m2ee_section["javaopts"]
# override locale providers for java8
if java_version.startswith("8"):
javaopts.append("-Djava.locale.providers=JRE,SPI,CLDR")
def _set_user_provided_java_options(m2ee_section):
javaopts = m2ee_section["javaopts"]
options = os.environ.get("JAVA_OPTS", None)
if options:
try:
options = json.loads(options)
except Exception as e:
logging.error(
"Failed to parse JAVA_OPTS, due to invalid JSON.",
exc_info=True,
)
raise
javaopts.extend(options)
def _set_jvm_memory(m2ee_section, vcap, java_version):
max_memory = os.environ.get("MEMORY_LIMIT")
if max_memory:
match = re.search("([0-9]+)M", max_memory.upper())
limit = int(match.group(1))
else:
limit = int(vcap["limits"]["mem"])
if limit >= 8192:
heap_size = limit - 2048
elif limit >= 4096:
heap_size = limit - 1536
elif limit >= 2048:
heap_size = limit - 1024
else:
heap_size = int(limit / 2)
heap_size = str(heap_size) + "M"
env_heap_size = os.environ.get("HEAP_SIZE")
if env_heap_size:
if int(env_heap_size[:-1]) < limit:
heap_size = env_heap_size
else:
logging.warning(
"specified heap size %s is larger than max memory of the "
"container (%s), falling back to a heap size of %s",
env_heap_size,
str(limit) + "M",
heap_size,
)
javaopts = m2ee_section["javaopts"]
javaopts.append("-Xmx%s" % heap_size)
javaopts.append("-Xms%s" % heap_size)
if java_version.startswith("7"):
javaopts.append("-XX:MaxPermSize=256M")
else:
javaopts.append("-XX:MaxMetaspaceSize=256M")
logging.debug("Java heap size set to %s", heap_size)
if os.getenv("MALLOC_ARENA_MAX"):
logging.info("Using provided environment setting for MALLOC_ARENA_MAX")
else:
m2ee_section["custom_environment"]["MALLOC_ARENA_MAX"] = str(
max(1, limit / 1024) * 2
)
def update_config(m2ee_section, vcap_data, java_version):
_set_jvm_memory(m2ee_section, vcap_data, java_version)
_set_jvm_locale(m2ee_section, java_version)
_set_user_provided_java_options(m2ee_section)
|
en
| 0.788516
|
# create a symlink in .local/bin/java # use .. when jdk is in .local because absolute path # is different at staging time # update cacert file # override locale providers for java8
| 2.272029
| 2
|
ms_mint/peak_optimization/ManualRetentionTimeOptimizer.py
|
soerendip/ms-mint
| 1
|
6629432
|
import numpy as np
import pandas as pd
import ipywidgets as W
import plotly.express as px
from tqdm import tqdm
from IPython.display import display
from .io import ms_file_to_df
class ManualRetentionTimeOptimizer:
def __init__(self, mint):
self.df = pd.concat(
[ms_file_to_df(fn).assign(ms_file=fn) for fn in tqdm(mint.ms_files)]
)
self.out = W.Output()
self.mint = mint
self.w_rt_min = W.FloatText(value=0, description="RT min:", disabled=False)
self.w_rt_max = W.FloatText(
value=13,
description="RT max:",
disabled=False,
)
self.set_rt_button = W.Button(description="Set new RT")
self.delete_button = W.Button(description="Remove from peaklist")
self.menu = W.Dropdown(options=mint.peaklist.peak_label, value=None)
def update(*args):
peak_label = self.menu.value
self.plot(peak_label)
def update_rt(button):
rt_min, rt_max = (
self.w_rt_min.value,
self.w_rt_max.value,
)
peak_label = self.menu.value
self.mint.peaklist.loc[
self.mint.peaklist.peak_label == peak_label, "rt_min"
] = rt_min
self.mint.peaklist.loc[
self.mint.peaklist.peak_label == peak_label, "rt_max"
] = rt_max
self.plot(peak_label)
def remove_peak(button):
peak_label = self.menu.value
mint.peaklist = mint.peaklist[mint.peaklist.peak_label != peak_label]
new_options = mint.peaklist.peak_label
self.menu.options = new_options
self.menu.observe(update, names="value")
self.set_rt_button.on_click(update_rt)
self.delete_button.on_click(remove_peak)
self.layout = W.VBox(
[
self.menu,
self.w_rt_min,
self.w_rt_max,
self.set_rt_button,
self.out,
self.delete_button,
],
)
def plot(self, peak_label):
peak_data = self.mint.peaklist[
self.mint.peaklist.peak_label == peak_label
].T.iloc[:, 0]
mz_mean, mz_width, rt_min, rt_max = peak_data[
["mz_mean", "mz_width", "rt_min", "rt_max"]
]
dmz = mz_mean * 1e-6 * mz_width
selection = self.df[np.abs(self.df["m/z array"] - mz_mean) <= dmz]
fig = px.line(
data_frame=selection,
x="retentionTime",
y="intensity array",
color="ms_file",
title=peak_label,
)
fig.update_layout(showlegend=False)
fig.update_layout(hovermode="closest", xaxis=dict(range=[rt_min, rt_max]))
self.out.clear_output()
with self.out:
display(fig)
self.w_rt_min.value, self.w_rt_max.value = rt_min, rt_max
def show(self):
return self.layout
|
import numpy as np
import pandas as pd
import ipywidgets as W
import plotly.express as px
from tqdm import tqdm
from IPython.display import display
from .io import ms_file_to_df
class ManualRetentionTimeOptimizer:
def __init__(self, mint):
self.df = pd.concat(
[ms_file_to_df(fn).assign(ms_file=fn) for fn in tqdm(mint.ms_files)]
)
self.out = W.Output()
self.mint = mint
self.w_rt_min = W.FloatText(value=0, description="RT min:", disabled=False)
self.w_rt_max = W.FloatText(
value=13,
description="RT max:",
disabled=False,
)
self.set_rt_button = W.Button(description="Set new RT")
self.delete_button = W.Button(description="Remove from peaklist")
self.menu = W.Dropdown(options=mint.peaklist.peak_label, value=None)
def update(*args):
peak_label = self.menu.value
self.plot(peak_label)
def update_rt(button):
rt_min, rt_max = (
self.w_rt_min.value,
self.w_rt_max.value,
)
peak_label = self.menu.value
self.mint.peaklist.loc[
self.mint.peaklist.peak_label == peak_label, "rt_min"
] = rt_min
self.mint.peaklist.loc[
self.mint.peaklist.peak_label == peak_label, "rt_max"
] = rt_max
self.plot(peak_label)
def remove_peak(button):
peak_label = self.menu.value
mint.peaklist = mint.peaklist[mint.peaklist.peak_label != peak_label]
new_options = mint.peaklist.peak_label
self.menu.options = new_options
self.menu.observe(update, names="value")
self.set_rt_button.on_click(update_rt)
self.delete_button.on_click(remove_peak)
self.layout = W.VBox(
[
self.menu,
self.w_rt_min,
self.w_rt_max,
self.set_rt_button,
self.out,
self.delete_button,
],
)
def plot(self, peak_label):
peak_data = self.mint.peaklist[
self.mint.peaklist.peak_label == peak_label
].T.iloc[:, 0]
mz_mean, mz_width, rt_min, rt_max = peak_data[
["mz_mean", "mz_width", "rt_min", "rt_max"]
]
dmz = mz_mean * 1e-6 * mz_width
selection = self.df[np.abs(self.df["m/z array"] - mz_mean) <= dmz]
fig = px.line(
data_frame=selection,
x="retentionTime",
y="intensity array",
color="ms_file",
title=peak_label,
)
fig.update_layout(showlegend=False)
fig.update_layout(hovermode="closest", xaxis=dict(range=[rt_min, rt_max]))
self.out.clear_output()
with self.out:
display(fig)
self.w_rt_min.value, self.w_rt_max.value = rt_min, rt_max
def show(self):
return self.layout
|
none
| 1
| 2.364289
| 2
|
|
krmining/classification/__init__.py
|
SynitCool/keyar-mining
| 2
|
6629433
|
<gh_stars>1-10
from ._knn import KNearestNeighborsClassifier
from ._logistic_regression import LogisticRegression
__all__ = ["KNearestNeighborsClassifier", "LogisticRegression"]
|
from ._knn import KNearestNeighborsClassifier
from ._logistic_regression import LogisticRegression
__all__ = ["KNearestNeighborsClassifier", "LogisticRegression"]
|
none
| 1
| 1.174592
| 1
|
|
pyosmo/end_conditions/base.py
|
OPpuolitaival/pyosmo
| 7
|
6629434
|
from abc import abstractmethod
from pyosmo.history.history import OsmoHistory
from pyosmo.model import OsmoModelCollector
class OsmoEndCondition:
"""
Abstract end condition class
"""
@abstractmethod
def end_test(self, history: OsmoHistory, model: OsmoModelCollector) -> bool:
raise Exception("This is not implemented!")
@abstractmethod
def end_suite(self, history: OsmoHistory, model: OsmoModelCollector) -> bool:
raise Exception("This is not implemented!")
|
from abc import abstractmethod
from pyosmo.history.history import OsmoHistory
from pyosmo.model import OsmoModelCollector
class OsmoEndCondition:
"""
Abstract end condition class
"""
@abstractmethod
def end_test(self, history: OsmoHistory, model: OsmoModelCollector) -> bool:
raise Exception("This is not implemented!")
@abstractmethod
def end_suite(self, history: OsmoHistory, model: OsmoModelCollector) -> bool:
raise Exception("This is not implemented!")
|
en
| 0.600438
|
Abstract end condition class
| 2.859647
| 3
|
yt/data_objects/index_subobjects/grid_patch.py
|
lconaboy/yt
| 0
|
6629435
|
import warnings
import weakref
from typing import List, Tuple
import numpy as np
import yt.geometry.particle_deposit as particle_deposit
from yt.config import ytcfg
from yt.data_objects.selection_objects.data_selection_objects import (
YTSelectionContainer,
)
from yt.funcs import is_sequence
from yt.geometry.selection_routines import convert_mask_to_indices
from yt.units.yt_array import YTArray
from yt.utilities.exceptions import (
YTFieldTypeNotFound,
YTParticleDepositionNotImplemented,
)
from yt.utilities.lib.interpolators import ghost_zone_interpolate
from yt.utilities.lib.mesh_utilities import clamp_edges
from yt.utilities.nodal_data_utils import get_nodal_slices
RECONSTRUCT_INDEX = bool(ytcfg.get("yt", "reconstruct_index"))
class AMRGridPatch(YTSelectionContainer):
_spatial = True
_num_ghost_zones = 0
_grids = None
_id_offset = 1
_cache_mask = True
_type_name = "grid"
_skip_add = True
_con_args = ("id", "filename")
_container_fields = (
("index", "dx"),
("index", "dy"),
("index", "dz"),
("index", "x"),
("index", "y"),
("index", "z"),
)
OverlappingSiblings = None
def __init__(self, id, filename=None, index=None):
super().__init__(index.dataset, None)
self.id = id
self._child_mask = self._child_indices = self._child_index_mask = None
self.ds = index.dataset
self._index = weakref.proxy(index)
self.start_index = None
self.filename = filename
self._last_mask = None
self._last_count = -1
self._last_selector_id = None
def get_global_startindex(self):
"""
Return the integer starting index for each dimension at the current
level.
"""
if self.start_index is not None:
return self.start_index
if self.Parent is None:
left = self.LeftEdge.d - self.ds.domain_left_edge.d
start_index = left / self.dds.d
return np.rint(start_index).astype("int64").ravel()
pdx = self.Parent.dds.d
di = np.rint((self.LeftEdge.d - self.Parent.LeftEdge.d) / pdx)
start_index = self.Parent.get_global_startindex() + di
self.start_index = (start_index * self.ds.refine_by).astype("int64").ravel()
return self.start_index
def __getitem__(self, key):
tr = super().__getitem__(key)
try:
fields = self._determine_fields(key)
except YTFieldTypeNotFound:
return tr
finfo = self.ds._get_field_info(*fields[0])
if not finfo.sampling_type == "particle":
num_nodes = 2 ** sum(finfo.nodal_flag)
new_shape = list(self.ActiveDimensions)
if num_nodes > 1:
new_shape += [num_nodes]
return tr.reshape(new_shape)
return tr
def convert(self, datatype):
"""
This will attempt to convert a given unit to cgs from code units. It
either returns the multiplicative factor or throws a KeyError.
"""
return self.ds[datatype]
@property
def shape(self):
return self.ActiveDimensions
def _reshape_vals(self, arr):
if len(arr.shape) == 3:
return arr
return arr.reshape(self.ActiveDimensions, order="C")
def _generate_container_field(self, field):
if self._current_chunk is None:
self.index._identify_base_chunk(self)
if field == ("index", "dx"):
tr = self._current_chunk.fwidth[:, 0]
elif field == ("index", "dy"):
tr = self._current_chunk.fwidth[:, 1]
elif field == ("index", "dz"):
tr = self._current_chunk.fwidth[:, 2]
elif field == ("index", "x"):
tr = self._current_chunk.fcoords[:, 0]
elif field == ("index", "y"):
tr = self._current_chunk.fcoords[:, 1]
elif field == ("index", "z"):
tr = self._current_chunk.fcoords[:, 2]
return self._reshape_vals(tr)
def _setup_dx(self):
# So first we figure out what the index is. We don't assume
# that dx=dy=dz, at least here. We probably do elsewhere.
id = self.id - self._id_offset
ds = self.ds
index = self.index
if self.Parent is not None:
if not hasattr(self.Parent, "dds"):
self.Parent._setup_dx()
self.dds = self.Parent.dds.d / self.ds.refine_by
else:
LE, RE = (index.grid_left_edge[id, :].d, index.grid_right_edge[id, :].d)
self.dds = (RE - LE) / self.ActiveDimensions
if self.ds.dimensionality < 3:
self.dds[2] = ds.domain_right_edge[2] - ds.domain_left_edge[2]
elif self.ds.dimensionality < 2:
self.dds[1] = ds.domain_right_edge[1] - ds.domain_left_edge[1]
self.dds = self.dds.view(YTArray)
self.dds.units = self.index.grid_left_edge.units
def __repr__(self):
return "AMRGridPatch_%04i" % (self.id)
def __int__(self):
return self.id
def clear_data(self):
"""
Clear out the following things: child_mask, child_indices, all fields,
all field parameters.
"""
super().clear_data()
self._setup_dx()
def _prepare_grid(self):
"""Copies all the appropriate attributes from the index."""
# This is definitely the slowest part of generating the index
# Now we give it pointers to all of its attributes
# Note that to keep in line with Enzo, we have broken PEP-8
h = self.index # cache it
my_ind = self.id - self._id_offset
self.ActiveDimensions = h.grid_dimensions[my_ind]
self.LeftEdge = h.grid_left_edge[my_ind]
self.RightEdge = h.grid_right_edge[my_ind]
# This can be expensive so we allow people to disable this behavior
# via a config option
if RECONSTRUCT_INDEX:
if is_sequence(self.Parent) and len(self.Parent) > 0:
p = self.Parent[0]
else:
p = self.Parent
if p is not None and p != []:
# clamp grid edges to an integer multiple of the parent cell
# width
clamp_edges(self.LeftEdge, p.LeftEdge, p.dds)
clamp_edges(self.RightEdge, p.RightEdge, p.dds)
h.grid_levels[my_ind, 0] = self.Level
# This might be needed for streaming formats
# self.Time = h.gridTimes[my_ind,0]
self.NumberOfParticles = h.grid_particle_count[my_ind, 0]
def get_position(self, index):
"""Returns center position of an *index*."""
pos = (index + 0.5) * self.dds + self.LeftEdge
return pos
def _fill_child_mask(self, child, mask, tofill, dlevel=1):
rf = self.ds.refine_by
if dlevel != 1:
rf = rf**dlevel
gi, cgi = self.get_global_startindex(), child.get_global_startindex()
startIndex = np.maximum(0, cgi // rf - gi)
endIndex = np.minimum(
(cgi + child.ActiveDimensions) // rf - gi, self.ActiveDimensions
)
endIndex += startIndex == endIndex
mask[
startIndex[0] : endIndex[0],
startIndex[1] : endIndex[1],
startIndex[2] : endIndex[2],
] = tofill
@property
def child_mask(self):
"""
Generates self.child_mask, which is zero where child grids exist (and
thus, where higher resolution data is available).
"""
child_mask = np.ones(self.ActiveDimensions, "bool")
for child in self.Children:
self._fill_child_mask(child, child_mask, 0)
for sibling in self.OverlappingSiblings or []:
self._fill_child_mask(sibling, child_mask, 0, dlevel=0)
return child_mask
@property
def child_indices(self):
return self.child_mask == 0
@property
def child_index_mask(self):
"""
Generates self.child_index_mask, which is -1 where there is no child,
and otherwise has the ID of the grid that resides there.
"""
child_index_mask = np.zeros(self.ActiveDimensions, "int32") - 1
for child in self.Children:
self._fill_child_mask(child, child_index_mask, child.id)
for sibling in self.OverlappingSiblings or []:
self._fill_child_mask(sibling, child_index_mask, sibling.id, dlevel=0)
return child_index_mask
def retrieve_ghost_zones(self, n_zones, fields, all_levels=False, smoothed=False):
# We will attempt this by creating a datacube that is exactly bigger
# than the grid by nZones*dx in each direction
nl = self.get_global_startindex() - n_zones
new_left_edge = nl * self.dds + self.ds.domain_left_edge
# Something different needs to be done for the root grid, though
level = self.Level
if all_levels:
level = self.index.max_level + 1
kwargs = {
"dims": self.ActiveDimensions + 2 * n_zones,
"num_ghost_zones": n_zones,
"use_pbar": False,
"fields": fields,
}
# This should update the arguments to set the field parameters to be
# those of this grid.
field_parameters = {}
field_parameters.update(self.field_parameters)
if smoothed:
cube = self.ds.smoothed_covering_grid(
level, new_left_edge, field_parameters=field_parameters, **kwargs
)
else:
cube = self.ds.covering_grid(
level, new_left_edge, field_parameters=field_parameters, **kwargs
)
cube._base_grid = self
return cube
def get_vertex_centered_data(
self,
fields: List[Tuple[str, str]],
smoothed: bool = True,
no_ghost: bool = False,
):
_old_api = isinstance(fields, (str, tuple))
if _old_api:
message = (
"get_vertex_centered_data() requires list of fields, rather than "
"a single field as an argument."
)
warnings.warn(message, DeprecationWarning, stacklevel=2)
fields = [fields] # type: ignore
# Make sure the field list has only unique entries
fields = list(set(fields))
new_fields = {}
for field in fields:
finfo = self.ds._get_field_info(field)
new_fields[field] = self.ds.arr(
np.zeros(self.ActiveDimensions + 1), finfo.output_units
)
if no_ghost:
for field in fields:
# Ensure we have the native endianness in this array. Avoid making
# a copy if possible.
old_field = np.asarray(self[field], dtype="=f8")
# We'll use the ghost zone routine, which will naturally
# extrapolate here.
input_left = np.array([0.5, 0.5, 0.5], dtype="float64")
output_left = np.array([0.0, 0.0, 0.0], dtype="float64")
# rf = 1 here
ghost_zone_interpolate(
1, old_field, input_left, new_fields[field], output_left
)
else:
cg = self.retrieve_ghost_zones(1, fields, smoothed=smoothed)
for field in fields:
src = cg[field].in_units(new_fields[field].units).d
dest = new_fields[field].d
np.add(dest, src[1:, 1:, 1:], dest)
np.add(dest, src[:-1, 1:, 1:], dest)
np.add(dest, src[1:, :-1, 1:], dest)
np.add(dest, src[1:, 1:, :-1], dest)
np.add(dest, src[:-1, 1:, :-1], dest)
np.add(dest, src[1:, :-1, :-1], dest)
np.add(dest, src[:-1, :-1, 1:], dest)
np.add(dest, src[:-1, :-1, :-1], dest)
np.multiply(dest, 0.125, dest)
if _old_api:
return new_fields[fields[0]]
return new_fields
def select_icoords(self, dobj):
mask = self._get_selector_mask(dobj.selector)
if mask is None:
return np.empty((0, 3), dtype="int64")
coords = convert_mask_to_indices(mask, self._last_count)
coords += self.get_global_startindex()[None, :]
return coords
def select_fcoords(self, dobj):
mask = self._get_selector_mask(dobj.selector)
if mask is None:
return np.empty((0, 3), dtype="float64")
coords = convert_mask_to_indices(mask, self._last_count).astype("float64")
coords += 0.5
coords *= self.dds[None, :]
coords += self.LeftEdge[None, :]
return coords
def select_fwidth(self, dobj):
count = self.count(dobj.selector)
if count == 0:
return np.empty((0, 3), dtype="float64")
coords = np.empty((count, 3), dtype="float64")
for axis in range(3):
coords[:, axis] = self.dds[axis]
return coords
def select_ires(self, dobj):
mask = self._get_selector_mask(dobj.selector)
if mask is None:
return np.empty(0, dtype="int64")
coords = np.empty(self._last_count, dtype="int64")
coords[:] = self.Level
return coords
def select_tcoords(self, dobj):
dt, t = dobj.selector.get_dt(self)
return dt, t
def smooth(self, *args, **kwargs):
raise NotImplementedError
def particle_operation(self, *args, **kwargs):
raise NotImplementedError
def deposit(self, positions, fields=None, method=None, kernel_name="cubic"):
# Here we perform our particle deposition.
cls = getattr(particle_deposit, f"deposit_{method}", None)
if cls is None:
raise YTParticleDepositionNotImplemented(method)
# We allocate number of zones, not number of octs. Everything
# inside this is Fortran ordered because of the ordering in the
# octree deposit routines, so we reverse it here to match the
# convention there
nvals = tuple(self.ActiveDimensions[::-1])
# append a dummy dimension because we are only depositing onto
# one grid
op = cls(nvals + (1,), kernel_name)
op.initialize()
op.process_grid(self, positions, fields)
vals = op.finalize()
if vals is None:
return
# Fortran-ordered, so transpose.
vals = vals.transpose()
# squeeze dummy dimension we appended above
return np.squeeze(vals, axis=0)
def select_blocks(self, selector):
mask = self._get_selector_mask(selector)
yield self, mask
def _get_selector_mask(self, selector):
if self._cache_mask and hash(selector) == self._last_selector_id:
mask = self._last_mask
else:
mask = selector.fill_mask(self)
if self._cache_mask:
self._last_mask = mask
self._last_selector_id = hash(selector)
if mask is None:
self._last_count = 0
else:
self._last_count = mask.sum()
return mask
def select(self, selector, source, dest, offset):
mask = self._get_selector_mask(selector)
count = self.count(selector)
if count == 0:
return 0
dim = np.squeeze(self.ds.dimensionality)
nodal_flag = source.shape[:dim] - self.ActiveDimensions[:dim]
if sum(nodal_flag) == 0:
dest[offset : offset + count] = source[mask]
else:
slices = get_nodal_slices(source.shape, nodal_flag, dim)
for i, sl in enumerate(slices):
dest[offset : offset + count, i] = source[tuple(sl)][np.squeeze(mask)]
return count
def count(self, selector):
mask = self._get_selector_mask(selector)
if mask is None:
return 0
return self._last_count
def count_particles(self, selector, x, y, z):
# We don't cache the selector results
count = selector.count_points(x, y, z, 0.0)
return count
def select_particles(self, selector, x, y, z):
mask = selector.select_points(x, y, z, 0.0)
return mask
|
import warnings
import weakref
from typing import List, Tuple
import numpy as np
import yt.geometry.particle_deposit as particle_deposit
from yt.config import ytcfg
from yt.data_objects.selection_objects.data_selection_objects import (
YTSelectionContainer,
)
from yt.funcs import is_sequence
from yt.geometry.selection_routines import convert_mask_to_indices
from yt.units.yt_array import YTArray
from yt.utilities.exceptions import (
YTFieldTypeNotFound,
YTParticleDepositionNotImplemented,
)
from yt.utilities.lib.interpolators import ghost_zone_interpolate
from yt.utilities.lib.mesh_utilities import clamp_edges
from yt.utilities.nodal_data_utils import get_nodal_slices
RECONSTRUCT_INDEX = bool(ytcfg.get("yt", "reconstruct_index"))
class AMRGridPatch(YTSelectionContainer):
_spatial = True
_num_ghost_zones = 0
_grids = None
_id_offset = 1
_cache_mask = True
_type_name = "grid"
_skip_add = True
_con_args = ("id", "filename")
_container_fields = (
("index", "dx"),
("index", "dy"),
("index", "dz"),
("index", "x"),
("index", "y"),
("index", "z"),
)
OverlappingSiblings = None
def __init__(self, id, filename=None, index=None):
super().__init__(index.dataset, None)
self.id = id
self._child_mask = self._child_indices = self._child_index_mask = None
self.ds = index.dataset
self._index = weakref.proxy(index)
self.start_index = None
self.filename = filename
self._last_mask = None
self._last_count = -1
self._last_selector_id = None
def get_global_startindex(self):
"""
Return the integer starting index for each dimension at the current
level.
"""
if self.start_index is not None:
return self.start_index
if self.Parent is None:
left = self.LeftEdge.d - self.ds.domain_left_edge.d
start_index = left / self.dds.d
return np.rint(start_index).astype("int64").ravel()
pdx = self.Parent.dds.d
di = np.rint((self.LeftEdge.d - self.Parent.LeftEdge.d) / pdx)
start_index = self.Parent.get_global_startindex() + di
self.start_index = (start_index * self.ds.refine_by).astype("int64").ravel()
return self.start_index
def __getitem__(self, key):
tr = super().__getitem__(key)
try:
fields = self._determine_fields(key)
except YTFieldTypeNotFound:
return tr
finfo = self.ds._get_field_info(*fields[0])
if not finfo.sampling_type == "particle":
num_nodes = 2 ** sum(finfo.nodal_flag)
new_shape = list(self.ActiveDimensions)
if num_nodes > 1:
new_shape += [num_nodes]
return tr.reshape(new_shape)
return tr
def convert(self, datatype):
"""
This will attempt to convert a given unit to cgs from code units. It
either returns the multiplicative factor or throws a KeyError.
"""
return self.ds[datatype]
@property
def shape(self):
return self.ActiveDimensions
def _reshape_vals(self, arr):
if len(arr.shape) == 3:
return arr
return arr.reshape(self.ActiveDimensions, order="C")
def _generate_container_field(self, field):
if self._current_chunk is None:
self.index._identify_base_chunk(self)
if field == ("index", "dx"):
tr = self._current_chunk.fwidth[:, 0]
elif field == ("index", "dy"):
tr = self._current_chunk.fwidth[:, 1]
elif field == ("index", "dz"):
tr = self._current_chunk.fwidth[:, 2]
elif field == ("index", "x"):
tr = self._current_chunk.fcoords[:, 0]
elif field == ("index", "y"):
tr = self._current_chunk.fcoords[:, 1]
elif field == ("index", "z"):
tr = self._current_chunk.fcoords[:, 2]
return self._reshape_vals(tr)
def _setup_dx(self):
# So first we figure out what the index is. We don't assume
# that dx=dy=dz, at least here. We probably do elsewhere.
id = self.id - self._id_offset
ds = self.ds
index = self.index
if self.Parent is not None:
if not hasattr(self.Parent, "dds"):
self.Parent._setup_dx()
self.dds = self.Parent.dds.d / self.ds.refine_by
else:
LE, RE = (index.grid_left_edge[id, :].d, index.grid_right_edge[id, :].d)
self.dds = (RE - LE) / self.ActiveDimensions
if self.ds.dimensionality < 3:
self.dds[2] = ds.domain_right_edge[2] - ds.domain_left_edge[2]
elif self.ds.dimensionality < 2:
self.dds[1] = ds.domain_right_edge[1] - ds.domain_left_edge[1]
self.dds = self.dds.view(YTArray)
self.dds.units = self.index.grid_left_edge.units
def __repr__(self):
return "AMRGridPatch_%04i" % (self.id)
def __int__(self):
return self.id
def clear_data(self):
"""
Clear out the following things: child_mask, child_indices, all fields,
all field parameters.
"""
super().clear_data()
self._setup_dx()
def _prepare_grid(self):
"""Copies all the appropriate attributes from the index."""
# This is definitely the slowest part of generating the index
# Now we give it pointers to all of its attributes
# Note that to keep in line with Enzo, we have broken PEP-8
h = self.index # cache it
my_ind = self.id - self._id_offset
self.ActiveDimensions = h.grid_dimensions[my_ind]
self.LeftEdge = h.grid_left_edge[my_ind]
self.RightEdge = h.grid_right_edge[my_ind]
# This can be expensive so we allow people to disable this behavior
# via a config option
if RECONSTRUCT_INDEX:
if is_sequence(self.Parent) and len(self.Parent) > 0:
p = self.Parent[0]
else:
p = self.Parent
if p is not None and p != []:
# clamp grid edges to an integer multiple of the parent cell
# width
clamp_edges(self.LeftEdge, p.LeftEdge, p.dds)
clamp_edges(self.RightEdge, p.RightEdge, p.dds)
h.grid_levels[my_ind, 0] = self.Level
# This might be needed for streaming formats
# self.Time = h.gridTimes[my_ind,0]
self.NumberOfParticles = h.grid_particle_count[my_ind, 0]
def get_position(self, index):
"""Returns center position of an *index*."""
pos = (index + 0.5) * self.dds + self.LeftEdge
return pos
def _fill_child_mask(self, child, mask, tofill, dlevel=1):
rf = self.ds.refine_by
if dlevel != 1:
rf = rf**dlevel
gi, cgi = self.get_global_startindex(), child.get_global_startindex()
startIndex = np.maximum(0, cgi // rf - gi)
endIndex = np.minimum(
(cgi + child.ActiveDimensions) // rf - gi, self.ActiveDimensions
)
endIndex += startIndex == endIndex
mask[
startIndex[0] : endIndex[0],
startIndex[1] : endIndex[1],
startIndex[2] : endIndex[2],
] = tofill
@property
def child_mask(self):
"""
Generates self.child_mask, which is zero where child grids exist (and
thus, where higher resolution data is available).
"""
child_mask = np.ones(self.ActiveDimensions, "bool")
for child in self.Children:
self._fill_child_mask(child, child_mask, 0)
for sibling in self.OverlappingSiblings or []:
self._fill_child_mask(sibling, child_mask, 0, dlevel=0)
return child_mask
@property
def child_indices(self):
return self.child_mask == 0
@property
def child_index_mask(self):
"""
Generates self.child_index_mask, which is -1 where there is no child,
and otherwise has the ID of the grid that resides there.
"""
child_index_mask = np.zeros(self.ActiveDimensions, "int32") - 1
for child in self.Children:
self._fill_child_mask(child, child_index_mask, child.id)
for sibling in self.OverlappingSiblings or []:
self._fill_child_mask(sibling, child_index_mask, sibling.id, dlevel=0)
return child_index_mask
def retrieve_ghost_zones(self, n_zones, fields, all_levels=False, smoothed=False):
# We will attempt this by creating a datacube that is exactly bigger
# than the grid by nZones*dx in each direction
nl = self.get_global_startindex() - n_zones
new_left_edge = nl * self.dds + self.ds.domain_left_edge
# Something different needs to be done for the root grid, though
level = self.Level
if all_levels:
level = self.index.max_level + 1
kwargs = {
"dims": self.ActiveDimensions + 2 * n_zones,
"num_ghost_zones": n_zones,
"use_pbar": False,
"fields": fields,
}
# This should update the arguments to set the field parameters to be
# those of this grid.
field_parameters = {}
field_parameters.update(self.field_parameters)
if smoothed:
cube = self.ds.smoothed_covering_grid(
level, new_left_edge, field_parameters=field_parameters, **kwargs
)
else:
cube = self.ds.covering_grid(
level, new_left_edge, field_parameters=field_parameters, **kwargs
)
cube._base_grid = self
return cube
def get_vertex_centered_data(
self,
fields: List[Tuple[str, str]],
smoothed: bool = True,
no_ghost: bool = False,
):
_old_api = isinstance(fields, (str, tuple))
if _old_api:
message = (
"get_vertex_centered_data() requires list of fields, rather than "
"a single field as an argument."
)
warnings.warn(message, DeprecationWarning, stacklevel=2)
fields = [fields] # type: ignore
# Make sure the field list has only unique entries
fields = list(set(fields))
new_fields = {}
for field in fields:
finfo = self.ds._get_field_info(field)
new_fields[field] = self.ds.arr(
np.zeros(self.ActiveDimensions + 1), finfo.output_units
)
if no_ghost:
for field in fields:
# Ensure we have the native endianness in this array. Avoid making
# a copy if possible.
old_field = np.asarray(self[field], dtype="=f8")
# We'll use the ghost zone routine, which will naturally
# extrapolate here.
input_left = np.array([0.5, 0.5, 0.5], dtype="float64")
output_left = np.array([0.0, 0.0, 0.0], dtype="float64")
# rf = 1 here
ghost_zone_interpolate(
1, old_field, input_left, new_fields[field], output_left
)
else:
cg = self.retrieve_ghost_zones(1, fields, smoothed=smoothed)
for field in fields:
src = cg[field].in_units(new_fields[field].units).d
dest = new_fields[field].d
np.add(dest, src[1:, 1:, 1:], dest)
np.add(dest, src[:-1, 1:, 1:], dest)
np.add(dest, src[1:, :-1, 1:], dest)
np.add(dest, src[1:, 1:, :-1], dest)
np.add(dest, src[:-1, 1:, :-1], dest)
np.add(dest, src[1:, :-1, :-1], dest)
np.add(dest, src[:-1, :-1, 1:], dest)
np.add(dest, src[:-1, :-1, :-1], dest)
np.multiply(dest, 0.125, dest)
if _old_api:
return new_fields[fields[0]]
return new_fields
def select_icoords(self, dobj):
mask = self._get_selector_mask(dobj.selector)
if mask is None:
return np.empty((0, 3), dtype="int64")
coords = convert_mask_to_indices(mask, self._last_count)
coords += self.get_global_startindex()[None, :]
return coords
def select_fcoords(self, dobj):
mask = self._get_selector_mask(dobj.selector)
if mask is None:
return np.empty((0, 3), dtype="float64")
coords = convert_mask_to_indices(mask, self._last_count).astype("float64")
coords += 0.5
coords *= self.dds[None, :]
coords += self.LeftEdge[None, :]
return coords
def select_fwidth(self, dobj):
count = self.count(dobj.selector)
if count == 0:
return np.empty((0, 3), dtype="float64")
coords = np.empty((count, 3), dtype="float64")
for axis in range(3):
coords[:, axis] = self.dds[axis]
return coords
def select_ires(self, dobj):
mask = self._get_selector_mask(dobj.selector)
if mask is None:
return np.empty(0, dtype="int64")
coords = np.empty(self._last_count, dtype="int64")
coords[:] = self.Level
return coords
def select_tcoords(self, dobj):
dt, t = dobj.selector.get_dt(self)
return dt, t
def smooth(self, *args, **kwargs):
raise NotImplementedError
def particle_operation(self, *args, **kwargs):
raise NotImplementedError
def deposit(self, positions, fields=None, method=None, kernel_name="cubic"):
# Here we perform our particle deposition.
cls = getattr(particle_deposit, f"deposit_{method}", None)
if cls is None:
raise YTParticleDepositionNotImplemented(method)
# We allocate number of zones, not number of octs. Everything
# inside this is Fortran ordered because of the ordering in the
# octree deposit routines, so we reverse it here to match the
# convention there
nvals = tuple(self.ActiveDimensions[::-1])
# append a dummy dimension because we are only depositing onto
# one grid
op = cls(nvals + (1,), kernel_name)
op.initialize()
op.process_grid(self, positions, fields)
vals = op.finalize()
if vals is None:
return
# Fortran-ordered, so transpose.
vals = vals.transpose()
# squeeze dummy dimension we appended above
return np.squeeze(vals, axis=0)
def select_blocks(self, selector):
mask = self._get_selector_mask(selector)
yield self, mask
def _get_selector_mask(self, selector):
if self._cache_mask and hash(selector) == self._last_selector_id:
mask = self._last_mask
else:
mask = selector.fill_mask(self)
if self._cache_mask:
self._last_mask = mask
self._last_selector_id = hash(selector)
if mask is None:
self._last_count = 0
else:
self._last_count = mask.sum()
return mask
def select(self, selector, source, dest, offset):
mask = self._get_selector_mask(selector)
count = self.count(selector)
if count == 0:
return 0
dim = np.squeeze(self.ds.dimensionality)
nodal_flag = source.shape[:dim] - self.ActiveDimensions[:dim]
if sum(nodal_flag) == 0:
dest[offset : offset + count] = source[mask]
else:
slices = get_nodal_slices(source.shape, nodal_flag, dim)
for i, sl in enumerate(slices):
dest[offset : offset + count, i] = source[tuple(sl)][np.squeeze(mask)]
return count
def count(self, selector):
mask = self._get_selector_mask(selector)
if mask is None:
return 0
return self._last_count
def count_particles(self, selector, x, y, z):
# We don't cache the selector results
count = selector.count_points(x, y, z, 0.0)
return count
def select_particles(self, selector, x, y, z):
mask = selector.select_points(x, y, z, 0.0)
return mask
|
en
| 0.865238
|
Return the integer starting index for each dimension at the current level. This will attempt to convert a given unit to cgs from code units. It either returns the multiplicative factor or throws a KeyError. # So first we figure out what the index is. We don't assume # that dx=dy=dz, at least here. We probably do elsewhere. Clear out the following things: child_mask, child_indices, all fields, all field parameters. Copies all the appropriate attributes from the index. # This is definitely the slowest part of generating the index # Now we give it pointers to all of its attributes # Note that to keep in line with Enzo, we have broken PEP-8 # cache it # This can be expensive so we allow people to disable this behavior # via a config option # clamp grid edges to an integer multiple of the parent cell # width # This might be needed for streaming formats # self.Time = h.gridTimes[my_ind,0] Returns center position of an *index*. Generates self.child_mask, which is zero where child grids exist (and thus, where higher resolution data is available). Generates self.child_index_mask, which is -1 where there is no child, and otherwise has the ID of the grid that resides there. # We will attempt this by creating a datacube that is exactly bigger # than the grid by nZones*dx in each direction # Something different needs to be done for the root grid, though # This should update the arguments to set the field parameters to be # those of this grid. # type: ignore # Make sure the field list has only unique entries # Ensure we have the native endianness in this array. Avoid making # a copy if possible. # We'll use the ghost zone routine, which will naturally # extrapolate here. # rf = 1 here # Here we perform our particle deposition. # We allocate number of zones, not number of octs. Everything # inside this is Fortran ordered because of the ordering in the # octree deposit routines, so we reverse it here to match the # convention there # append a dummy dimension because we are only depositing onto # one grid # Fortran-ordered, so transpose. # squeeze dummy dimension we appended above # We don't cache the selector results
| 1.813421
| 2
|
evap/evaluation/tests/test_models.py
|
JenniferStamm/EvaP
| 0
|
6629436
|
<filename>evap/evaluation/tests/test_models.py
from datetime import datetime, timedelta, date
from unittest.mock import patch, Mock
from django.test import TestCase, override_settings
from django.core.cache import cache
from django.core import mail
from model_mommy import mommy
from evap.evaluation.models import (Contribution, Course, CourseType, EmailTemplate, NotArchiveable, Questionnaire,
RatingAnswerCounter, Semester, UserProfile)
from evap.results.tools import calculate_average_grades_and_deviation
@override_settings(EVALUATION_END_OFFSET=0)
class TestCourses(TestCase):
def test_approved_to_in_evaluation(self):
course = mommy.make(Course, state='approved', vote_start_datetime=datetime.now())
with patch('evap.evaluation.models.EmailTemplate.send_to_users_in_courses') as mock:
Course.update_courses()
template = EmailTemplate.objects.get(name=EmailTemplate.EVALUATION_STARTED)
mock.assert_called_once_with(template, [course], [EmailTemplate.ALL_PARTICIPANTS],
use_cc=False, request=None)
course = Course.objects.get(pk=course.pk)
self.assertEqual(course.state, 'in_evaluation')
def test_in_evaluation_to_evaluated(self):
course = mommy.make(Course, state='in_evaluation', vote_end_date=date.today() - timedelta(days=1))
with patch('evap.evaluation.models.Course.is_fully_reviewed') as mock:
mock.__get__ = Mock(return_value=False)
Course.update_courses()
course = Course.objects.get(pk=course.pk)
self.assertEqual(course.state, 'evaluated')
def test_in_evaluation_to_reviewed(self):
# Course is "fully reviewed" as no open text_answers are present by default,
course = mommy.make(Course, state='in_evaluation', vote_end_date=date.today() - timedelta(days=1))
Course.update_courses()
course = Course.objects.get(pk=course.pk)
self.assertEqual(course.state, 'reviewed')
def test_in_evaluation_to_published(self):
# Course is "fully reviewed" and not graded, thus gets published immediately.
course = mommy.make(Course, state='in_evaluation', vote_end_date=date.today() - timedelta(days=1),
is_graded=False)
with patch('evap.evaluation.tools.send_publish_notifications') as mock:
Course.update_courses()
mock.assert_called_once_with([course])
course = Course.objects.get(pk=course.pk)
self.assertEqual(course.state, 'published')
def test_evaluation_ended(self):
# Course is out of evaluation period.
mommy.make(Course, state='in_evaluation', vote_end_date=date.today() - timedelta(days=1), is_graded=False)
# This course is not.
mommy.make(Course, state='in_evaluation', vote_end_date=date.today(), is_graded=False)
with patch('evap.evaluation.models.Course.evaluation_end') as mock:
Course.update_courses()
self.assertEqual(mock.call_count, 1)
def test_approved_to_in_evaluation_sends_emails(self):
""" Regression test for #945 """
participant = mommy.make(UserProfile, email='<EMAIL>')
course = mommy.make(Course, state='approved', vote_start_datetime=datetime.now(), participants=[participant])
Course.update_courses()
course = Course.objects.get(pk=course.pk)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(course.state, 'in_evaluation')
def test_has_enough_questionnaires(self):
# manually circumvent Course's save() method to have a Course without a general contribution
# the semester must be specified because of https://github.com/vandersonmota/model_mommy/issues/258
Course.objects.bulk_create([mommy.prepare(Course, semester=mommy.make(Semester), type=mommy.make(CourseType))])
course = Course.objects.get()
self.assertEqual(course.contributions.count(), 0)
self.assertFalse(course.general_contribution_has_questionnaires)
self.assertFalse(course.all_contributions_have_questionnaires)
responsible_contribution = mommy.make(
Contribution, course=course, contributor=mommy.make(UserProfile),
responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
course = Course.objects.get()
self.assertFalse(course.general_contribution_has_questionnaires)
self.assertFalse(course.all_contributions_have_questionnaires)
general_contribution = mommy.make(Contribution, course=course, contributor=None)
course = Course.objects.get()
self.assertFalse(course.general_contribution_has_questionnaires)
self.assertFalse(course.all_contributions_have_questionnaires)
questionnaire = mommy.make(Questionnaire)
general_contribution.questionnaires.add(questionnaire)
self.assertTrue(course.general_contribution_has_questionnaires)
self.assertFalse(course.all_contributions_have_questionnaires)
responsible_contribution.questionnaires.add(questionnaire)
self.assertTrue(course.general_contribution_has_questionnaires)
self.assertTrue(course.all_contributions_have_questionnaires)
def test_deleting_last_modified_user_does_not_delete_course(self):
user = mommy.make(UserProfile)
course = mommy.make(Course, last_modified_user=user)
user.delete()
self.assertTrue(Course.objects.filter(pk=course.pk).exists())
def test_responsible_contributors_ordering(self):
course = mommy.make(Course)
responsible1 = mommy.make(UserProfile)
responsible2 = mommy.make(UserProfile)
contribution1 = mommy.make(Contribution, course=course, contributor=responsible1, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS, order=0)
mommy.make(Contribution, course=course, contributor=responsible2, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS, order=1)
self.assertEqual(list(course.responsible_contributors), [responsible1, responsible2])
contribution1.order = 2
contribution1.save()
course = Course.objects.get(pk=course.pk)
self.assertEqual(list(course.responsible_contributors), [responsible2, responsible1])
def test_single_result_can_be_deleted_only_in_reviewed(self):
responsible = mommy.make(UserProfile)
course = mommy.make(Course, semester=mommy.make(Semester))
contribution = mommy.make(Contribution,
course=course, contributor=responsible, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS,
questionnaires=[Questionnaire.single_result_questionnaire()]
)
course.single_result_created()
course.publish()
course.save()
self.assertTrue(Course.objects.filter(pk=course.pk).exists())
self.assertFalse(course.can_staff_delete)
course.unpublish()
self.assertTrue(course.can_staff_delete)
RatingAnswerCounter.objects.filter(contribution__course=course).delete()
course.delete()
self.assertFalse(Course.objects.filter(pk=course.pk).exists())
class TestUserProfile(TestCase):
def test_is_student(self):
some_user = mommy.make(UserProfile)
self.assertFalse(some_user.is_student)
student = mommy.make(UserProfile, courses_participating_in=[mommy.make(Course)])
self.assertTrue(student.is_student)
contributor = mommy.make(UserProfile, contributions=[mommy.make(Contribution)])
self.assertFalse(contributor.is_student)
semester_contributed_to = mommy.make(Semester, created_at=date.today())
semester_participated_in = mommy.make(Semester, created_at=date.today())
course_contributed_to = mommy.make(Course, semester=semester_contributed_to)
course_participated_in = mommy.make(Course, semester=semester_participated_in)
contribution = mommy.make(Contribution, course=course_contributed_to)
user = mommy.make(UserProfile, contributions=[contribution], courses_participating_in=[course_participated_in])
self.assertTrue(user.is_student)
semester_contributed_to.created_at = date.today() - timedelta(days=1)
semester_contributed_to.save()
self.assertTrue(user.is_student)
semester_participated_in.created_at = date.today() - timedelta(days=2)
semester_participated_in.save()
self.assertFalse(user.is_student)
def test_can_staff_delete(self):
user = mommy.make(UserProfile)
mommy.make(Course, participants=[user], state="new")
self.assertTrue(user.can_staff_delete)
user2 = mommy.make(UserProfile)
mommy.make(Course, participants=[user2], state="in_evaluation")
self.assertFalse(user2.can_staff_delete)
contributor = mommy.make(UserProfile)
mommy.make(Contribution, contributor=contributor)
self.assertFalse(contributor.can_staff_delete)
def test_inactive_users_hidden(self):
active_user = mommy.make(UserProfile)
mommy.make(UserProfile, is_active=False)
self.assertEqual(list(UserProfile.objects.exclude_inactive_users().all()), [active_user])
def test_inactive_users_shown(self):
active_user = mommy.make(UserProfile)
inactive_user = mommy.make(UserProfile, is_active=False)
user_list = list(UserProfile.objects.all())
self.assertIn(active_user, user_list)
self.assertIn(inactive_user, user_list)
class ArchivingTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.semester = mommy.make(Semester)
cls.course = mommy.make(Course, pk=7, state="published", semester=cls.semester)
users = mommy.make(UserProfile, _quantity=3)
cls.course.participants.set(users)
cls.course.voters.set(users[:2])
def refresh_course(self):
""" refresh_from_db does not work with courses"""
self.course = self.semester.course_set.first()
def setUp(self):
self.semester.refresh_from_db()
self.refresh_course()
def test_counts_dont_change(self):
"""
Asserts that course.num_voters course.num_participants don't change after archiving.
"""
voter_count = self.course.num_voters
participant_count = self.course.num_participants
self.semester.archive()
self.refresh_course()
self.assertEqual(voter_count, self.course.num_voters)
self.assertEqual(participant_count, self.course.num_participants)
def test_participants_do_not_loose_courses(self):
"""
Asserts that participants still participate in their courses after they get archived.
"""
some_participant = self.course.participants.first()
self.semester.archive()
self.assertEqual(list(some_participant.courses_participating_in.all()), [self.course])
def test_is_archived(self):
"""
Tests whether is_archived returns True on archived semesters and courses.
"""
self.assertFalse(self.course.is_archived)
self.semester.archive()
self.refresh_course()
self.assertTrue(self.course.is_archived)
def test_archiving_does_not_change_results(self):
results = calculate_average_grades_and_deviation(self.course)
self.semester.archive()
self.refresh_course()
cache.clear()
self.assertEqual(calculate_average_grades_and_deviation(self.course), results)
def test_archiving_twice_raises_exception(self):
self.semester.archive()
with self.assertRaises(NotArchiveable):
self.semester.archive()
with self.assertRaises(NotArchiveable):
self.semester.course_set.first()._archive()
def test_course_is_not_archived_if_participant_count_is_set(self):
course = mommy.make(Course, state="published", _participant_count=1, _voter_count=1)
self.assertFalse(course.is_archived)
self.assertTrue(course.is_archiveable)
def test_archiving_doesnt_change_single_results_participant_count(self):
responsible = mommy.make(UserProfile)
course = mommy.make(Course, state="published")
contribution = mommy.make(Contribution, course=course, contributor=responsible, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
contribution.questionnaires.add(Questionnaire.single_result_questionnaire())
self.assertTrue(course.is_single_result)
course._participant_count = 5
course._voter_count = 5
course.save()
course._archive()
self.assertEqual(course._participant_count, 5)
self.assertEqual(course._voter_count, 5)
class TestLoginUrlEmail(TestCase):
@classmethod
def setUpTestData(cls):
cls.other_user = mommy.make(UserProfile, email="<EMAIL>")
cls.user = mommy.make(UserProfile, email="<EMAIL>")
cls.user.generate_login_key()
cls.course = mommy.make(Course)
mommy.make(Contribution, course=cls.course, contributor=cls.user, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
cls.template = mommy.make(EmailTemplate, body="{{ login_url }}")
EmailTemplate.objects.filter(name="Login Key Created").update(body="{{ user.login_url }}")
def test_no_login_url_when_delegates_in_cc(self):
self.user.delegates.add(self.other_user)
EmailTemplate.send_to_users_in_courses(self.template, [self.course], EmailTemplate.CONTRIBUTORS, use_cc=True, request=None)
self.assertEqual(len(mail.outbox), 2)
self.assertFalse("loginkey" in mail.outbox[0].body) # message does not contain the login url
self.assertTrue("loginkey" in mail.outbox[1].body) # separate email with login url was sent
self.assertEqual(len(mail.outbox[1].cc), 0)
self.assertEqual(mail.outbox[1].to, [self.user.email])
def test_no_login_url_when_cc_users_in_cc(self):
self.user.cc_users.add(self.other_user)
EmailTemplate.send_to_users_in_courses(self.template, [self.course], [EmailTemplate.CONTRIBUTORS], use_cc=True, request=None)
self.assertEqual(len(mail.outbox), 2)
self.assertFalse("loginkey" in mail.outbox[0].body) # message does not contain the login url
self.assertTrue("loginkey" in mail.outbox[1].body) # separate email with login url was sent
self.assertEqual(len(mail.outbox[1].cc), 0)
self.assertEqual(mail.outbox[1].to, [self.user.email])
def test_login_url_when_nobody_in_cc(self):
# message is not sent to others in cc
EmailTemplate.send_to_users_in_courses(self.template, [self.course], [EmailTemplate.CONTRIBUTORS], use_cc=True, request=None)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("loginkey" in mail.outbox[0].body) # message does contain the login url
def test_login_url_when_use_cc_is_false(self):
# message is not sent to others in cc
self.user.delegates.add(self.other_user)
EmailTemplate.send_to_users_in_courses(self.template, [self.course], [EmailTemplate.CONTRIBUTORS], use_cc=False, request=None)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("loginkey" in mail.outbox[0].body) # message does contain the login url
class TestEmailTemplate(TestCase):
def test_missing_email_address(self):
"""
Tests that __send_to_user behaves when the user has no email address.
Regression test to https://github.com/fsr-itse/EvaP/issues/825
"""
user = mommy.make(UserProfile, email=None)
template = EmailTemplate.objects.get(name=EmailTemplate.STUDENT_REMINDER)
EmailTemplate.send_to_user(user, template, {}, {}, False, None)
class TestEmailRecipientList(TestCase):
def test_recipient_list(self):
course = mommy.make(Course)
responsible = mommy.make(UserProfile)
editor = mommy.make(UserProfile)
contributor = mommy.make(UserProfile)
mommy.make(Contribution, course=course, contributor=responsible, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
mommy.make(Contribution, course=course, contributor=editor, can_edit=True)
mommy.make(Contribution, course=course, contributor=contributor)
participant1 = mommy.make(UserProfile, courses_participating_in=[course])
participant2 = mommy.make(UserProfile, courses_participating_in=[course])
course.voters.set([participant1])
recipient_list = EmailTemplate.recipient_list_for_course(course, [], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [])
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.RESPONSIBLE], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [responsible])
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.EDITORS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [responsible, editor])
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [responsible, editor, contributor])
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.ALL_PARTICIPANTS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [participant1, participant2])
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.DUE_PARTICIPANTS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [participant2])
def test_recipient_list_filtering(self):
course = mommy.make(Course)
contributor1 = mommy.make(UserProfile)
contributor2 = mommy.make(UserProfile, delegates=[contributor1])
mommy.make(Contribution, course=course, contributor=contributor1)
mommy.make(Contribution, course=course, contributor=contributor2)
# no-one should get filtered.
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [contributor1, contributor2])
# contributor1 is in cc of contributor2 and gets filtered.
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=True)
self.assertCountEqual(recipient_list, [contributor2])
contributor3 = mommy.make(UserProfile, delegates=[contributor2])
mommy.make(Contribution, course=course, contributor=contributor3)
# again, no-one should get filtered.
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [contributor1, contributor2, contributor3])
# contributor1 is in cc of contributor2 and gets filtered.
# contributor2 is in cc of contributor3 but is not filtered since contributor1 wouldn't get an email at all then.
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=True)
self.assertCountEqual(recipient_list, [contributor2, contributor3])
|
<filename>evap/evaluation/tests/test_models.py
from datetime import datetime, timedelta, date
from unittest.mock import patch, Mock
from django.test import TestCase, override_settings
from django.core.cache import cache
from django.core import mail
from model_mommy import mommy
from evap.evaluation.models import (Contribution, Course, CourseType, EmailTemplate, NotArchiveable, Questionnaire,
RatingAnswerCounter, Semester, UserProfile)
from evap.results.tools import calculate_average_grades_and_deviation
@override_settings(EVALUATION_END_OFFSET=0)
class TestCourses(TestCase):
def test_approved_to_in_evaluation(self):
course = mommy.make(Course, state='approved', vote_start_datetime=datetime.now())
with patch('evap.evaluation.models.EmailTemplate.send_to_users_in_courses') as mock:
Course.update_courses()
template = EmailTemplate.objects.get(name=EmailTemplate.EVALUATION_STARTED)
mock.assert_called_once_with(template, [course], [EmailTemplate.ALL_PARTICIPANTS],
use_cc=False, request=None)
course = Course.objects.get(pk=course.pk)
self.assertEqual(course.state, 'in_evaluation')
def test_in_evaluation_to_evaluated(self):
course = mommy.make(Course, state='in_evaluation', vote_end_date=date.today() - timedelta(days=1))
with patch('evap.evaluation.models.Course.is_fully_reviewed') as mock:
mock.__get__ = Mock(return_value=False)
Course.update_courses()
course = Course.objects.get(pk=course.pk)
self.assertEqual(course.state, 'evaluated')
def test_in_evaluation_to_reviewed(self):
# Course is "fully reviewed" as no open text_answers are present by default,
course = mommy.make(Course, state='in_evaluation', vote_end_date=date.today() - timedelta(days=1))
Course.update_courses()
course = Course.objects.get(pk=course.pk)
self.assertEqual(course.state, 'reviewed')
def test_in_evaluation_to_published(self):
# Course is "fully reviewed" and not graded, thus gets published immediately.
course = mommy.make(Course, state='in_evaluation', vote_end_date=date.today() - timedelta(days=1),
is_graded=False)
with patch('evap.evaluation.tools.send_publish_notifications') as mock:
Course.update_courses()
mock.assert_called_once_with([course])
course = Course.objects.get(pk=course.pk)
self.assertEqual(course.state, 'published')
def test_evaluation_ended(self):
# Course is out of evaluation period.
mommy.make(Course, state='in_evaluation', vote_end_date=date.today() - timedelta(days=1), is_graded=False)
# This course is not.
mommy.make(Course, state='in_evaluation', vote_end_date=date.today(), is_graded=False)
with patch('evap.evaluation.models.Course.evaluation_end') as mock:
Course.update_courses()
self.assertEqual(mock.call_count, 1)
def test_approved_to_in_evaluation_sends_emails(self):
""" Regression test for #945 """
participant = mommy.make(UserProfile, email='<EMAIL>')
course = mommy.make(Course, state='approved', vote_start_datetime=datetime.now(), participants=[participant])
Course.update_courses()
course = Course.objects.get(pk=course.pk)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(course.state, 'in_evaluation')
def test_has_enough_questionnaires(self):
# manually circumvent Course's save() method to have a Course without a general contribution
# the semester must be specified because of https://github.com/vandersonmota/model_mommy/issues/258
Course.objects.bulk_create([mommy.prepare(Course, semester=mommy.make(Semester), type=mommy.make(CourseType))])
course = Course.objects.get()
self.assertEqual(course.contributions.count(), 0)
self.assertFalse(course.general_contribution_has_questionnaires)
self.assertFalse(course.all_contributions_have_questionnaires)
responsible_contribution = mommy.make(
Contribution, course=course, contributor=mommy.make(UserProfile),
responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
course = Course.objects.get()
self.assertFalse(course.general_contribution_has_questionnaires)
self.assertFalse(course.all_contributions_have_questionnaires)
general_contribution = mommy.make(Contribution, course=course, contributor=None)
course = Course.objects.get()
self.assertFalse(course.general_contribution_has_questionnaires)
self.assertFalse(course.all_contributions_have_questionnaires)
questionnaire = mommy.make(Questionnaire)
general_contribution.questionnaires.add(questionnaire)
self.assertTrue(course.general_contribution_has_questionnaires)
self.assertFalse(course.all_contributions_have_questionnaires)
responsible_contribution.questionnaires.add(questionnaire)
self.assertTrue(course.general_contribution_has_questionnaires)
self.assertTrue(course.all_contributions_have_questionnaires)
def test_deleting_last_modified_user_does_not_delete_course(self):
user = mommy.make(UserProfile)
course = mommy.make(Course, last_modified_user=user)
user.delete()
self.assertTrue(Course.objects.filter(pk=course.pk).exists())
def test_responsible_contributors_ordering(self):
course = mommy.make(Course)
responsible1 = mommy.make(UserProfile)
responsible2 = mommy.make(UserProfile)
contribution1 = mommy.make(Contribution, course=course, contributor=responsible1, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS, order=0)
mommy.make(Contribution, course=course, contributor=responsible2, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS, order=1)
self.assertEqual(list(course.responsible_contributors), [responsible1, responsible2])
contribution1.order = 2
contribution1.save()
course = Course.objects.get(pk=course.pk)
self.assertEqual(list(course.responsible_contributors), [responsible2, responsible1])
def test_single_result_can_be_deleted_only_in_reviewed(self):
responsible = mommy.make(UserProfile)
course = mommy.make(Course, semester=mommy.make(Semester))
contribution = mommy.make(Contribution,
course=course, contributor=responsible, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS,
questionnaires=[Questionnaire.single_result_questionnaire()]
)
course.single_result_created()
course.publish()
course.save()
self.assertTrue(Course.objects.filter(pk=course.pk).exists())
self.assertFalse(course.can_staff_delete)
course.unpublish()
self.assertTrue(course.can_staff_delete)
RatingAnswerCounter.objects.filter(contribution__course=course).delete()
course.delete()
self.assertFalse(Course.objects.filter(pk=course.pk).exists())
class TestUserProfile(TestCase):
def test_is_student(self):
some_user = mommy.make(UserProfile)
self.assertFalse(some_user.is_student)
student = mommy.make(UserProfile, courses_participating_in=[mommy.make(Course)])
self.assertTrue(student.is_student)
contributor = mommy.make(UserProfile, contributions=[mommy.make(Contribution)])
self.assertFalse(contributor.is_student)
semester_contributed_to = mommy.make(Semester, created_at=date.today())
semester_participated_in = mommy.make(Semester, created_at=date.today())
course_contributed_to = mommy.make(Course, semester=semester_contributed_to)
course_participated_in = mommy.make(Course, semester=semester_participated_in)
contribution = mommy.make(Contribution, course=course_contributed_to)
user = mommy.make(UserProfile, contributions=[contribution], courses_participating_in=[course_participated_in])
self.assertTrue(user.is_student)
semester_contributed_to.created_at = date.today() - timedelta(days=1)
semester_contributed_to.save()
self.assertTrue(user.is_student)
semester_participated_in.created_at = date.today() - timedelta(days=2)
semester_participated_in.save()
self.assertFalse(user.is_student)
def test_can_staff_delete(self):
user = mommy.make(UserProfile)
mommy.make(Course, participants=[user], state="new")
self.assertTrue(user.can_staff_delete)
user2 = mommy.make(UserProfile)
mommy.make(Course, participants=[user2], state="in_evaluation")
self.assertFalse(user2.can_staff_delete)
contributor = mommy.make(UserProfile)
mommy.make(Contribution, contributor=contributor)
self.assertFalse(contributor.can_staff_delete)
def test_inactive_users_hidden(self):
active_user = mommy.make(UserProfile)
mommy.make(UserProfile, is_active=False)
self.assertEqual(list(UserProfile.objects.exclude_inactive_users().all()), [active_user])
def test_inactive_users_shown(self):
active_user = mommy.make(UserProfile)
inactive_user = mommy.make(UserProfile, is_active=False)
user_list = list(UserProfile.objects.all())
self.assertIn(active_user, user_list)
self.assertIn(inactive_user, user_list)
class ArchivingTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.semester = mommy.make(Semester)
cls.course = mommy.make(Course, pk=7, state="published", semester=cls.semester)
users = mommy.make(UserProfile, _quantity=3)
cls.course.participants.set(users)
cls.course.voters.set(users[:2])
def refresh_course(self):
""" refresh_from_db does not work with courses"""
self.course = self.semester.course_set.first()
def setUp(self):
self.semester.refresh_from_db()
self.refresh_course()
def test_counts_dont_change(self):
"""
Asserts that course.num_voters course.num_participants don't change after archiving.
"""
voter_count = self.course.num_voters
participant_count = self.course.num_participants
self.semester.archive()
self.refresh_course()
self.assertEqual(voter_count, self.course.num_voters)
self.assertEqual(participant_count, self.course.num_participants)
def test_participants_do_not_loose_courses(self):
"""
Asserts that participants still participate in their courses after they get archived.
"""
some_participant = self.course.participants.first()
self.semester.archive()
self.assertEqual(list(some_participant.courses_participating_in.all()), [self.course])
def test_is_archived(self):
"""
Tests whether is_archived returns True on archived semesters and courses.
"""
self.assertFalse(self.course.is_archived)
self.semester.archive()
self.refresh_course()
self.assertTrue(self.course.is_archived)
def test_archiving_does_not_change_results(self):
results = calculate_average_grades_and_deviation(self.course)
self.semester.archive()
self.refresh_course()
cache.clear()
self.assertEqual(calculate_average_grades_and_deviation(self.course), results)
def test_archiving_twice_raises_exception(self):
self.semester.archive()
with self.assertRaises(NotArchiveable):
self.semester.archive()
with self.assertRaises(NotArchiveable):
self.semester.course_set.first()._archive()
def test_course_is_not_archived_if_participant_count_is_set(self):
course = mommy.make(Course, state="published", _participant_count=1, _voter_count=1)
self.assertFalse(course.is_archived)
self.assertTrue(course.is_archiveable)
def test_archiving_doesnt_change_single_results_participant_count(self):
responsible = mommy.make(UserProfile)
course = mommy.make(Course, state="published")
contribution = mommy.make(Contribution, course=course, contributor=responsible, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
contribution.questionnaires.add(Questionnaire.single_result_questionnaire())
self.assertTrue(course.is_single_result)
course._participant_count = 5
course._voter_count = 5
course.save()
course._archive()
self.assertEqual(course._participant_count, 5)
self.assertEqual(course._voter_count, 5)
class TestLoginUrlEmail(TestCase):
@classmethod
def setUpTestData(cls):
cls.other_user = mommy.make(UserProfile, email="<EMAIL>")
cls.user = mommy.make(UserProfile, email="<EMAIL>")
cls.user.generate_login_key()
cls.course = mommy.make(Course)
mommy.make(Contribution, course=cls.course, contributor=cls.user, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
cls.template = mommy.make(EmailTemplate, body="{{ login_url }}")
EmailTemplate.objects.filter(name="Login Key Created").update(body="{{ user.login_url }}")
def test_no_login_url_when_delegates_in_cc(self):
self.user.delegates.add(self.other_user)
EmailTemplate.send_to_users_in_courses(self.template, [self.course], EmailTemplate.CONTRIBUTORS, use_cc=True, request=None)
self.assertEqual(len(mail.outbox), 2)
self.assertFalse("loginkey" in mail.outbox[0].body) # message does not contain the login url
self.assertTrue("loginkey" in mail.outbox[1].body) # separate email with login url was sent
self.assertEqual(len(mail.outbox[1].cc), 0)
self.assertEqual(mail.outbox[1].to, [self.user.email])
def test_no_login_url_when_cc_users_in_cc(self):
self.user.cc_users.add(self.other_user)
EmailTemplate.send_to_users_in_courses(self.template, [self.course], [EmailTemplate.CONTRIBUTORS], use_cc=True, request=None)
self.assertEqual(len(mail.outbox), 2)
self.assertFalse("loginkey" in mail.outbox[0].body) # message does not contain the login url
self.assertTrue("loginkey" in mail.outbox[1].body) # separate email with login url was sent
self.assertEqual(len(mail.outbox[1].cc), 0)
self.assertEqual(mail.outbox[1].to, [self.user.email])
def test_login_url_when_nobody_in_cc(self):
# message is not sent to others in cc
EmailTemplate.send_to_users_in_courses(self.template, [self.course], [EmailTemplate.CONTRIBUTORS], use_cc=True, request=None)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("loginkey" in mail.outbox[0].body) # message does contain the login url
def test_login_url_when_use_cc_is_false(self):
# message is not sent to others in cc
self.user.delegates.add(self.other_user)
EmailTemplate.send_to_users_in_courses(self.template, [self.course], [EmailTemplate.CONTRIBUTORS], use_cc=False, request=None)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("loginkey" in mail.outbox[0].body) # message does contain the login url
class TestEmailTemplate(TestCase):
def test_missing_email_address(self):
"""
Tests that __send_to_user behaves when the user has no email address.
Regression test to https://github.com/fsr-itse/EvaP/issues/825
"""
user = mommy.make(UserProfile, email=None)
template = EmailTemplate.objects.get(name=EmailTemplate.STUDENT_REMINDER)
EmailTemplate.send_to_user(user, template, {}, {}, False, None)
class TestEmailRecipientList(TestCase):
def test_recipient_list(self):
course = mommy.make(Course)
responsible = mommy.make(UserProfile)
editor = mommy.make(UserProfile)
contributor = mommy.make(UserProfile)
mommy.make(Contribution, course=course, contributor=responsible, responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
mommy.make(Contribution, course=course, contributor=editor, can_edit=True)
mommy.make(Contribution, course=course, contributor=contributor)
participant1 = mommy.make(UserProfile, courses_participating_in=[course])
participant2 = mommy.make(UserProfile, courses_participating_in=[course])
course.voters.set([participant1])
recipient_list = EmailTemplate.recipient_list_for_course(course, [], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [])
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.RESPONSIBLE], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [responsible])
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.EDITORS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [responsible, editor])
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [responsible, editor, contributor])
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.ALL_PARTICIPANTS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [participant1, participant2])
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.DUE_PARTICIPANTS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [participant2])
def test_recipient_list_filtering(self):
course = mommy.make(Course)
contributor1 = mommy.make(UserProfile)
contributor2 = mommy.make(UserProfile, delegates=[contributor1])
mommy.make(Contribution, course=course, contributor=contributor1)
mommy.make(Contribution, course=course, contributor=contributor2)
# no-one should get filtered.
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [contributor1, contributor2])
# contributor1 is in cc of contributor2 and gets filtered.
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=True)
self.assertCountEqual(recipient_list, [contributor2])
contributor3 = mommy.make(UserProfile, delegates=[contributor2])
mommy.make(Contribution, course=course, contributor=contributor3)
# again, no-one should get filtered.
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=False)
self.assertCountEqual(recipient_list, [contributor1, contributor2, contributor3])
# contributor1 is in cc of contributor2 and gets filtered.
# contributor2 is in cc of contributor3 but is not filtered since contributor1 wouldn't get an email at all then.
recipient_list = EmailTemplate.recipient_list_for_course(course, [EmailTemplate.CONTRIBUTORS], filter_users_in_cc=True)
self.assertCountEqual(recipient_list, [contributor2, contributor3])
|
en
| 0.915091
|
# Course is "fully reviewed" as no open text_answers are present by default, # Course is "fully reviewed" and not graded, thus gets published immediately. # Course is out of evaluation period. # This course is not. Regression test for #945 # manually circumvent Course's save() method to have a Course without a general contribution # the semester must be specified because of https://github.com/vandersonmota/model_mommy/issues/258 refresh_from_db does not work with courses Asserts that course.num_voters course.num_participants don't change after archiving. Asserts that participants still participate in their courses after they get archived. Tests whether is_archived returns True on archived semesters and courses. # message does not contain the login url # separate email with login url was sent # message does not contain the login url # separate email with login url was sent # message is not sent to others in cc # message does contain the login url # message is not sent to others in cc # message does contain the login url Tests that __send_to_user behaves when the user has no email address. Regression test to https://github.com/fsr-itse/EvaP/issues/825 # no-one should get filtered. # contributor1 is in cc of contributor2 and gets filtered. # again, no-one should get filtered. # contributor1 is in cc of contributor2 and gets filtered. # contributor2 is in cc of contributor3 but is not filtered since contributor1 wouldn't get an email at all then.
| 2.235442
| 2
|
tests/batch_fetch/test_many_to_one_relationships.py
|
jd/sqlalchemy-utils
| 1
|
6629437
|
import sqlalchemy as sa
from sqlalchemy_utils import batch_fetch
from tests import TestCase
class TestBatchFetchManyToOneRelationships(TestCase):
def create_models(self):
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
class Article(self.Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
author_id = sa.Column(sa.Integer, sa.ForeignKey(User.id))
author = sa.orm.relationship(
User,
backref=sa.orm.backref(
'articles'
)
)
self.User = User
self.Article = Article
def setup_method(self, method):
TestCase.setup_method(self, method)
self.users = [
self.User(id=333, name=u'John'),
self.User(id=334, name=u'Matt')
]
articles = [
self.Article(
id=1,
name=u'Article 1',
author=self.users[0]
),
self.Article(
id=2,
name=u'Article 2',
author=self.users[1]
),
self.Article(
id=3,
name=u'Article 3'
)
]
self.session.add_all(articles)
self.session.commit()
def test_supports_relationship_attributes(self):
articles = self.session.query(self.Article).all()
batch_fetch(
articles,
'author'
)
query_count = self.connection.query_count
assert articles[0].author == self.users[0] # no lazy load should occur
assert articles[1].author == self.users[1] # no lazy load should occur
assert articles[2].author is None # no lazy load should occur
assert self.connection.query_count == query_count
|
import sqlalchemy as sa
from sqlalchemy_utils import batch_fetch
from tests import TestCase
class TestBatchFetchManyToOneRelationships(TestCase):
def create_models(self):
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.Unicode(255))
class Article(self.Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
author_id = sa.Column(sa.Integer, sa.ForeignKey(User.id))
author = sa.orm.relationship(
User,
backref=sa.orm.backref(
'articles'
)
)
self.User = User
self.Article = Article
def setup_method(self, method):
TestCase.setup_method(self, method)
self.users = [
self.User(id=333, name=u'John'),
self.User(id=334, name=u'Matt')
]
articles = [
self.Article(
id=1,
name=u'Article 1',
author=self.users[0]
),
self.Article(
id=2,
name=u'Article 2',
author=self.users[1]
),
self.Article(
id=3,
name=u'Article 3'
)
]
self.session.add_all(articles)
self.session.commit()
def test_supports_relationship_attributes(self):
articles = self.session.query(self.Article).all()
batch_fetch(
articles,
'author'
)
query_count = self.connection.query_count
assert articles[0].author == self.users[0] # no lazy load should occur
assert articles[1].author == self.users[1] # no lazy load should occur
assert articles[2].author is None # no lazy load should occur
assert self.connection.query_count == query_count
|
en
| 0.864555
|
# no lazy load should occur # no lazy load should occur # no lazy load should occur
| 2.59742
| 3
|
appdaemon/appdaemon.py
|
chipsi007/appdaemon
| 0
|
6629438
|
<filename>appdaemon/appdaemon.py
import sys
import importlib
import traceback
import os
import os.path
from queue import Queue
import datetime
import uuid
import astral
import pytz
import math
import asyncio
import yaml
import concurrent.futures
import threading
import random
import re
from copy import deepcopy, copy
import subprocess
import functools
import time
import cProfile
import io
import pstats
import appdaemon.utils as utils
class AppDaemon:
required_meta = ["latitude", "longitude", "elevation", "time_zone"]
def __init__(self, logger, error, diag, loop, **kwargs):
self.logger = logger
self.error = error
self.diagnostic = diag
self.config = kwargs
self.config["ad_version"] = utils.__version__
self.q = Queue(maxsize=0)
self.check_app_updates_profile = ""
self.was_dst = False
self.last_state = None
self.last_plugin_state = {}
self.monitored_files = {}
self.filter_files = {}
self.modules = {}
self.appq = None
self.executor = None
self.loop = None
self.srv = None
self.appd = None
self.stopping = False
self.dashboard = None
self.now = datetime.datetime.now().timestamp()
self.objects = {}
self.objects_lock = threading.RLock()
self.schedule = {}
self.schedule_lock = threading.RLock()
self.callbacks = {}
self.callbacks_lock = threading.RLock()
self.thread_info = {}
self.thread_info_lock = threading.RLock()
self.thread_info["threads"] = {}
self.thread_info["current_busy"] = 0
self.thread_info["max_busy"] = 0
self.thread_info["max_busy_time"] = 0
self.thread_info["last_action_time"] = 0
self.state = {}
self.state["default"] = {}
self.state_lock = threading.RLock()
self.endpoints = {}
self.endpoints_lock = threading.RLock()
self.plugin_meta = {}
self.plugin_objs = {}
# No locking yet
self.global_vars = {}
self.sun = {}
self.config_file_modified = 0
self.tz = None
self.realtime = True
self.version = 0
self.app_config_file_modified = 0
self.app_config = {}
self.app_config_file = None
self._process_arg("app_config_file", kwargs)
self.plugin_params = kwargs["plugins"]
# User Supplied/Defaults
self.threads = 10
self._process_arg("threads", kwargs, int=True)
self.app_dir = None
self._process_arg("app_dir", kwargs)
self.starttime = None
self._process_arg("starttime", kwargs)
self._process_arg("now", kwargs)
self.logfile = None
self._process_arg("logfile", kwargs)
if self.logfile is None:
self.logfile = "STDOUT"
self.latitude = None
self._process_arg("latitude", kwargs)
self.longitude = None
self._process_arg("longitude", kwargs)
self.elevation = None
self._process_arg("elevation", kwargs)
self.time_zone = None
self._process_arg("time_zone", kwargs)
self.errfile = None
self._process_arg("error_file", kwargs)
if self.errfile is None:
self.errfile = "STDERR"
self.config_file = None
self._process_arg("config_file", kwargs)
self.config_dir = None
self._process_arg("config_dir", kwargs)
self.plugins = {}
self._process_arg("plugins", kwargs)
self.tick = 1
self._process_arg("tick", kwargs, int=True)
self.max_clock_skew = 1
self._process_arg("max_clock_skew", kwargs, int=True)
self.threadpool_workers = 10
self._process_arg("threadpool_workers", kwargs, int=True)
self.endtime = None
if "endtime" in kwargs:
self.endtime = datetime.datetime.strptime(kwargs["endtime"], "%Y-%m-%d %H:%M:%S")
self.interval = 1
self._process_arg("interval", kwargs, int=True)
self.loglevel = "INFO"
self._process_arg("loglevel", kwargs)
self.api_port = None
self._process_arg("api_port", kwargs)
self.utility_delay = 1
self._process_arg("utility_delay", kwargs, int=True)
self.max_utility_skew = self.utility_delay * 0.9
self._process_arg("max_utility_skew", kwargs, float=True)
self.check_app_updates_profile = False
self._process_arg("check_app_updates_profile", kwargs)
self.production_mode = False
self._process_arg("production_mode", kwargs)
self.invalid_yaml_warnings = True
self._process_arg("invalid_yaml_warnings", kwargs)
self.missing_app_warnings = True
self._process_arg("missing_app_warnings", kwargs)
self.log_thread_actions = False
self._process_arg("log_thread_actions", kwargs)
self.exclude_dirs = ["__pycache__"]
if "exclude_dirs" in kwargs:
self.exclude_dirs += kwargs["exclude_dirs"]
self.stop_function = None
self.stop_function = None
self._process_arg("stop_function", kwargs)
if self.tick != 1 or self.interval != 1 or self.starttime is not None:
self.realtime = False
if not kwargs.get("cert_verify", True):
self.certpath = False
if kwargs.get("disable_apps") is True:
self.apps = False
self.log("INFO", "Apps are disabled")
else:
self.apps = True
self.log("INFO", "Starting Apps")
# Initialize config file tracking
self.app_config_file_modified = 0
self.app_config_files = {}
self.module_dirs = []
if self.apps is True:
if self.app_dir is None:
if self.config_dir is None:
self.app_dir = utils.find_path("apps")
self.config_dir = os.path.dirname(self.app_dir)
else:
self.app_dir = os.path.join(self.config_dir, "apps")
utils.check_path("config_dir", logger, self.config_dir, permissions="rwx")
utils.check_path("appdir", logger, self.app_dir)
#if os.path.isdir(self.app_dir) is False:
# self.log("ERROR", "Invalid value for app_dir: {}".format(self.app_dir))
# return
#
# Initial Setup
#
self.appq = asyncio.Queue(maxsize=0)
self.log("DEBUG", "Creating worker threads ...")
# Create Worker Threads
for i in range(self.threads):
t = threading.Thread(target=self.worker)
t.daemon = True
t.setName("thread-{}".format(i+1))
with self.thread_info_lock:
self.thread_info["threads"][t.getName()] = {"callback": "idle", "time_called": 0, "thread": t}
t.start()
if self.apps is True:
self.process_filters()
self.log("DEBUG", "Done")
self.loop = loop
self.stopping = False
self.log("DEBUG", "Entering run()")
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=self.threadpool_workers)
# Load Plugins
plugins = []
if os.path.isdir(os.path.join(self.config_dir, "custom_plugins")):
plugins = [f.path for f in os.scandir(os.path.join(self.config_dir, "custom_plugins")) if f.is_dir(follow_symlinks=True)]
for plugin in plugins:
sys.path.insert(0, plugin)
if self.plugins is not None:
for name in self.plugins:
basename = self.plugins[name]["type"]
type = self.plugins[name]["type"]
module_name = "{}plugin".format(basename)
class_name = "{}Plugin".format(basename.capitalize())
full_module_name = None
for plugin in plugins:
if os.path.basename(plugin) == type:
full_module_name = "{}".format(module_name)
self.log("INFO",
"Loading Custom Plugin {} using class {} from module {}".format(name, class_name,
module_name))
break
if full_module_name == None:
#
# Not a custom plugin, assume it's a built in
#
basepath = "appdaemon.plugins"
full_module_name = "{}.{}.{}".format(basepath, basename, module_name)
self.log("INFO",
"Loading Plugin {} using class {} from module {}".format(name, class_name,
module_name))
try:
mod = __import__(full_module_name, globals(), locals(), [module_name], 0)
app_class = getattr(mod, class_name)
plugin = app_class(self, name, self.logger, self.err, self.loglevel, self.plugins[name])
namespace = plugin.get_namespace()
if namespace in self.plugin_objs:
raise ValueError("Duplicate namespace: {}".format(namespace))
self.plugin_objs[namespace] = plugin
loop.create_task(plugin.get_updates())
except:
self.log("WARNING", "error loading plugin: {} - ignoring".format(name))
self.log("WARNING", '-' * 60)
self.log("WARNING", traceback.format_exc())
self.log("WARNING", '-' * 60)
# Create utility loop
self.log("DEBUG", "Starting utility loop")
loop.create_task(self.utility())
# Create AppState Loop
if self.apps:
loop.create_task(self.appstate_loop())
def _process_arg(self, arg, args, **kwargs):
if args:
if arg in args:
value = args[arg]
if "int" in kwargs and kwargs["int"] is True:
try:
value = int(value)
setattr(self, arg, value)
except ValueError:
self.log("WARNING", "Invalid value for {}: {}, using default({})".format(arg, value, getattr(self, arg)))
if "float" in kwargs and kwargs["float"] is True:
try:
value = float(value)
setattr(self, arg, value)
except ValueError:
self.log("WARNING", "Invalid value for {}: {}, using default({})".format(arg, value, getattr(self, arg)))
else:
setattr(self, arg, value)
def _timeit(func):
@functools.wraps(func)
def newfunc(self, *args, **kwargs):
start_time = time.time()
result = func(self, *args, **kwargs)
elapsed_time = time.time() - start_time
self.log("INFO", 'function [{}] finished in {} ms'.format(
func.__name__, int(elapsed_time * 1000)))
return result
return newfunc
def _profile_this(fn):
def profiled_fn(self, *args, **kwargs):
self.pr = cProfile.Profile()
self.pr.enable()
result = fn(self, *args, **kwargs)
self.pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
self.profile = fn + s.getvalue()
return result
return profiled_fn
def stop(self):
self.stopping = True
# if ws is not None:
# ws.close()
if self.apps:
self.appq.put_nowait({"namespace": "global", "event_type": "ha_stop", "data": None})
for plugin in self.plugin_objs:
self.plugin_objs[plugin].stop()
#
# Diagnostics
#
def dump_callbacks(self):
if self.callbacks == {}:
self.diag("INFO", "No callbacks")
else:
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Callbacks")
self.diag("INFO", "--------------------------------------------------")
for name in self.callbacks.keys():
self.diag("INFO", "{}:".format(name))
for uuid_ in self.callbacks[name]:
self.diag( "INFO", " {} = {}".format(uuid_, self.callbacks[name][uuid_]))
self.diag("INFO", "--------------------------------------------------")
def dump_objects(self):
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Objects")
self.diag("INFO", "--------------------------------------------------")
with self.objects_lock:
for object_ in self.objects.keys():
self.diag("INFO", "{}: {}".format(object_, self.objects[object_]))
self.diag("INFO", "--------------------------------------------------")
def dump_queue(self):
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Current Queue Size is {}".format(self.q.qsize()))
self.diag("INFO", "--------------------------------------------------")
@staticmethod
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(self, text):
return [self.atoi(c) for c in re.split('(\d+)', text)]
def get_thread_info(self):
info = {}
# Make a copy without the thread objects
with self.thread_info_lock:
info["max_busy_time"] = copy(self.thread_info["max_busy_time"])
info["last_action_time"] = copy(self.thread_info["last_action_time"])
info["current_busy"] = copy(self.thread_info["current_busy"])
info["max_busy"] = copy(self.thread_info["max_busy"])
info["threads"] = {}
for thread in self.thread_info["threads"]:
if thread not in info["threads"]:
info["threads"][thread] = {}
info["threads"][thread]["time_called"] = self.thread_info["threads"][thread]["time_called"]
info["threads"][thread]["callback"] = self.thread_info["threads"][thread]["callback"]
info["threads"][thread]["is_alive"] = self.thread_info["threads"][thread]["thread"].is_alive()
return info
def dump_threads(self):
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Threads")
self.diag("INFO", "--------------------------------------------------")
with self.thread_info_lock:
max_ts = datetime.datetime.fromtimestamp(self.thread_info["max_busy_time"])
last_ts = datetime.datetime.fromtimestamp(self.thread_info["last_action_time"])
self.diag("INFO", "Currently busy threads: {}".format(self.thread_info["current_busy"]))
self.diag("INFO", "Most used threads: {} at {}".format(self.thread_info["max_busy"], max_ts))
self.diag("INFO", "Last activity: {}".format(last_ts))
self.diag("INFO", "--------------------------------------------------")
for thread in sorted(self.thread_info["threads"], key=self.natural_keys):
ts = datetime.datetime.fromtimestamp(self.thread_info["threads"][thread]["time_called"])
self.diag("INFO",
"{} - current callback: {} since {}, alive: {}".format(
thread,
self.thread_info["threads"][thread]["callback"],
ts,
self.thread_info["threads"][thread]["thread"].is_alive()
))
self.diag("INFO", "--------------------------------------------------")
def get_callback_entries(self):
callbacks = {}
for name in self.callbacks.keys():
callbacks[name] = {}
for uuid_ in self.callbacks[name]:
callbacks[name][uuid_] = {}
if "entity" in callbacks[name][uuid_]:
callbacks[name][uuid_]["entity"] = self.callbacks[name][uuid_]["entity"]
else:
callbacks[name][uuid_]["entity"] = None
callbacks[name][uuid_]["type"] = self.callbacks[name][uuid_]["type"]
callbacks[name][uuid_]["kwargs"] = self.callbacks[name][uuid_]["kwargs"]
callbacks[name][uuid_]["function"] = self.callbacks[name][uuid_]["function"]
callbacks[name][uuid_]["name"] = self.callbacks[name][uuid_]["name"]
return callbacks
#
# Constraints
#
def check_constraint(self, key, value, app):
unconstrained = True
if key in app.list_constraints():
method = getattr(app, key)
unconstrained = method(value)
return unconstrained
def check_time_constraint(self, args, name):
unconstrained = True
if "constrain_start_time" in args or "constrain_end_time" in args:
if "constrain_start_time" not in args:
start_time = "00:00:00"
else:
start_time = args["constrain_start_time"]
if "constrain_end_time" not in args:
end_time = "23:59:59"
else:
end_time = args["constrain_end_time"]
if not self.now_is_between(start_time, end_time, name):
unconstrained = False
return unconstrained
#
# Thread Management
#
def dispatch_worker(self, name, args):
with self.objects_lock:
unconstrained = True
#
# Argument Constraints
#
for arg in self.app_config[name].keys():
constrained = self.check_constraint(arg, self.app_config[name][arg], self.objects[name]["object"])
if not constrained:
unconstrained = False
if not self.check_time_constraint(self.app_config[name], name):
unconstrained = False
#
# Callback level constraints
#
if "kwargs" in args:
for arg in args["kwargs"].keys():
constrained = self.check_constraint(arg, args["kwargs"][arg], self.objects[name]["object"])
if not constrained:
unconstrained = False
if not self.check_time_constraint(args["kwargs"], name):
unconstrained = False
if unconstrained:
self.q.put_nowait(args)
def update_thread_info(self, thread_id, callback, type = None):
if self.log_thread_actions:
if callback == "idle":
self.diag("INFO",
"{} done".format(thread_id, type, callback))
else:
self.diag("INFO",
"{} calling {} callback {}".format(thread_id, type, callback))
with self.thread_info_lock:
ts = self.now
self.thread_info["threads"][thread_id]["callback"] = callback
self.thread_info["threads"][thread_id]["time_called"] = ts
if callback == "idle":
self.thread_info["current_busy"] -= 1
else:
self.thread_info["current_busy"] += 1
if self.thread_info["current_busy"] > self.thread_info["max_busy"]:
self.thread_info["max_busy"] = self.thread_info["current_busy"]
self.thread_info["max_busy_time"] = ts
self.thread_info["last_action_time"] = ts
# noinspection PyBroadException
def worker(self):
while True:
thread_id = threading.current_thread().name
args = self.q.get()
_type = args["type"]
funcref = args["function"]
_id = args["id"]
name = args["name"]
callback = "{}() in {}".format(funcref.__name__, name)
app = None
with self.objects_lock:
if name in self.objects and self.objects[name]["id"] == _id:
app = self.objects[name]["object"]
if app is not None:
try:
if _type == "timer":
self.update_thread_info(thread_id, callback, _type)
funcref(self.sanitize_timer_kwargs(app, args["kwargs"]))
self.update_thread_info(thread_id, "idle")
elif _type == "attr":
entity = args["entity"]
attr = args["attribute"]
old_state = args["old_state"]
new_state = args["new_state"]
self.update_thread_info(thread_id, callback, _type)
funcref(entity, attr, old_state, new_state,
self.sanitize_state_kwargs(app, args["kwargs"]))
self.update_thread_info(thread_id, "idle")
elif _type == "event":
data = args["data"]
self.update_thread_info(thread_id, callback, _type)
funcref(args["event"], data, args["kwargs"])
self.update_thread_info(thread_id, "idle")
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error in worker for App {}:".format(name))
self.err("WARNING", "Worker Ags: {}".format(args))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
else:
self.log("WARNING", "Found stale callback for {} - discarding".format(name))
self.q.task_done()
#
# State
#
def entity_exists(self, namespace, entity):
with self.state_lock:
if namespace in self.state and entity in self.state[namespace]:
return True
else:
return False
def add_state_callback(self, name, namespace, entity, cb, kwargs):
with self.callbacks_lock:
if name not in self.callbacks:
self.callbacks[name] = {}
handle = uuid.uuid4()
with self.objects_lock:
self.callbacks[name][handle] = {
"name": name,
"id": self.objects[name]["id"],
"type": "state",
"function": cb,
"entity": entity,
"namespace": namespace,
"kwargs": kwargs
}
#
# In the case of a quick_start parameter,
# start the clock immediately if the device is already in the new state
#
if "immediate" in kwargs and kwargs["immediate"] is True:
if entity is not None and "new" in kwargs and "duration" in kwargs:
with self.state_lock:
if self.state[namespace][entity]["state"] == kwargs["new"]:
exec_time = self.get_now_ts() + int(kwargs["duration"])
kwargs["_duration"] = self.insert_schedule(
name, exec_time, cb, False, None,
entity=entity,
attribute=None,
old_state=None,
new_state=kwargs["new"], **kwargs
)
return handle
def cancel_state_callback(self, handle, name):
with self.callbacks_lock:
if name not in self.callbacks or handle not in self.callbacks[name]:
self.log("WARNING", "Invalid callback in cancel_state_callback() from app {}".format(name))
if name in self.callbacks and handle in self.callbacks[name]:
del self.callbacks[name][handle]
if name in self.callbacks and self.callbacks[name] == {}:
del self.callbacks[name]
def info_state_callback(self, handle, name):
with self.callbacks_lock:
if name in self.callbacks and handle in self.callbacks[name]:
callback = self.callbacks[name][handle]
with self.objects_lock:
return (
callback["namespace"],
callback["entity"],
callback["kwargs"].get("attribute", None),
self.sanitize_state_kwargs(self.objects[name]["object"], callback["kwargs"])
)
else:
raise ValueError("Invalid handle: {}".format(handle))
def get_entity(self, namespace, entity_id):
with self.state_lock:
if namespace in self.state:
if entity_id in self.state[namespace]:
return self.state[namespace][entity_id]
else:
return None
else:
self.log("WARNING", "Unknown namespace: {}".format(namespace))
return None
def get_state(self, namespace, device, entity, attribute):
with self.state_lock:
if device is None:
return deepcopy(self.state[namespace])
elif entity is None:
devices = {}
for entity_id in self.state[namespace].keys():
thisdevice, thisentity = entity_id.split(".")
if device == thisdevice:
devices[entity_id] = self.state[namespace][entity_id]
return deepcopy(devices)
elif attribute is None:
entity_id = "{}.{}".format(device, entity)
if entity_id in self.state[namespace]:
return deepcopy(self.state[namespace][entity_id]["state"])
else:
return None
else:
entity_id = "{}.{}".format(device, entity)
if attribute == "all":
if entity_id in self.state[namespace]:
return deepcopy(self.state[namespace][entity_id])
else:
return None
else:
if attribute in self.state[namespace][entity_id]["attributes"]:
return deepcopy(self.state[namespace][entity_id]["attributes"][
attribute])
elif attribute in self.state[namespace][entity_id]:
return deepcopy(self.state[namespace][entity_id][attribute])
else:
return None
def set_state(self, namespace, entity, state):
with self.state_lock:
self.state[namespace][entity] = state
#
# App State
#
async def appstate_loop(self):
while not self.stopping:
args = await self.appq.get()
namespace = args["namespace"]
await self.state_update(namespace, args)
self.appq.task_done()
def set_app_state(self, namespace, entity_id, state):
self.log("DEBUG", "set_app_state: {}".format(entity_id))
#print(state)
if entity_id is not None and "." in entity_id:
with self.state_lock:
if entity_id in self.state[namespace]:
old_state = self.state[namespace][entity_id]
else:
old_state = None
data = {"entity_id": entity_id, "new_state": state, "old_state": old_state}
args = {"namespace": namespace, "event_type": "state_changed", "data": data}
self.state[namespace][entity_id] = state
self.appq.put_nowait(args)
#
# Events
#
def add_event_callback(self, _name, namespace, cb, event, **kwargs):
with self.callbacks_lock:
if _name not in self.callbacks:
self.callbacks[_name] = {}
handle = uuid.uuid4()
with self.objects_lock:
self.callbacks[_name][handle] = {
"name": _name,
"id": self.objects[_name]["id"],
"type": "event",
"function": cb,
"namespace": namespace,
"event": event,
"kwargs": kwargs
}
return handle
def cancel_event_callback(self, name, handle):
with self.callbacks_lock:
if name in self.callbacks and handle in self.callbacks[name]:
del self.callbacks[name][handle]
if name in self.callbacks and self.callbacks[name] == {}:
del self.callbacks[name]
def info_event_callback(self, name, handle):
with self.callbacks_lock:
if name in self.callbacks and handle in self.callbacks[name]:
callback = self.callbacks[name][handle]
return callback["event"], callback["kwargs"].copy()
else:
raise ValueError("Invalid handle: {}".format(handle))
#
# Scheduler
#
def cancel_timer(self, name, handle):
self.log("DEBUG", "Canceling timer for {}".format(name))
with self.schedule_lock:
if name in self.schedule and handle in self.schedule[name]:
del self.schedule[name][handle]
if name in self.schedule and self.schedule[name] == {}:
del self.schedule[name]
# noinspection PyBroadException
def exec_schedule(self, name, entry, args):
try:
# Locking performed in calling function
if "inactive" in args:
return
# Call function
with self.objects_lock:
if "entity" in args["kwargs"]:
self.dispatch_worker(name, {
"name": name,
"id": self.objects[name]["id"],
"type": "attr",
"function": args["callback"],
"attribute": args["kwargs"]["attribute"],
"entity": args["kwargs"]["entity"],
"new_state": args["kwargs"]["new_state"],
"old_state": args["kwargs"]["old_state"],
"kwargs": args["kwargs"],
})
else:
self.dispatch_worker(name, {
"name": name,
"id": self.objects[name]["id"],
"type": "timer",
"function": args["callback"],
"kwargs": args["kwargs"],
})
# If it is a repeating entry, rewrite with new timestamp
if args["repeat"]:
if args["type"] == "next_rising" or args["type"] == "next_setting":
# It's sunrise or sunset - if the offset is negative we
# won't know the next rise or set time yet so mark as inactive
# So we can adjust with a scan at sun rise/set
if args["offset"] < 0:
args["inactive"] = 1
else:
# We have a valid time for the next sunrise/set so use it
c_offset = self.get_offset(args)
args["timestamp"] = self.calc_sun(args["type"]) + c_offset
args["offset"] = c_offset
else:
# Not sunrise or sunset so just increment
# the timestamp with the repeat interval
args["basetime"] += args["interval"]
args["timestamp"] = args["basetime"] + self.get_offset(args)
else: # Otherwise just delete
del self.schedule[name][entry]
except:
self.err("WARNING", '-' * 60)
self.err(
"WARNING",
"Unexpected error during exec_schedule() for App: {}".format(name)
)
self.err("WARNING", "Args: {}".format(args))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
# When explicitly logging to stdout and stderr, suppress
# verbose_log messages about writing an error (since they show up anyway)
self.log("WARNING", "Logged an error to {}".format(self.errfile))
self.err("WARNING", "Scheduler entry has been deleted")
self.err("WARNING", '-' * 60)
del self.schedule[name][entry]
def process_sun(self, action):
self.log(
"DEBUG",
"Process sun: {}, next sunrise: {}, next sunset: {}".format(
action, self.sun["next_rising"], self.sun["next_setting"]
)
)
with self.schedule_lock:
for name in self.schedule.keys():
for entry in sorted(
self.schedule[name].keys(),
key=lambda uuid_: self.schedule[name][uuid_]["timestamp"]
):
schedule = self.schedule[name][entry]
if schedule["type"] == action and "inactive" in schedule:
del schedule["inactive"]
c_offset = self.get_offset(schedule)
schedule["timestamp"] = self.calc_sun(action) + c_offset
schedule["offset"] = c_offset
def calc_sun(self, type_):
# convert to a localized timestamp
return self.sun[type_].timestamp()
def info_timer(self, handle, name):
with self.schedule_lock:
if name in self.schedule and handle in self.schedule[name]:
callback = self.schedule[name][handle]
return (
datetime.datetime.fromtimestamp(callback["timestamp"]),
callback["interval"],
self.sanitize_timer_kwargs(self.objects[name]["object"], callback["kwargs"])
)
else:
raise ValueError("Invalid handle: {}".format(handle))
def init_sun(self):
latitude = self.latitude
longitude = self.longitude
if -90 > latitude < 90:
raise ValueError("Latitude needs to be -90 .. 90")
if -180 > longitude < 180:
raise ValueError("Longitude needs to be -180 .. 180")
elevation = self.elevation
self.tz = pytz.timezone(self.time_zone)
self.location = astral.Location((
'', '', latitude, longitude, self.tz.zone, elevation
))
def update_sun(self):
#now = datetime.datetime.now(self.tz)
#now = pytz.utc.localize(self.get_now())
now = self.tz.localize(self.get_now())
mod = -1
while True:
try:
next_rising_dt = self.location.sunrise(
(now + datetime.timedelta(days=mod)).date(), local=False
)
if next_rising_dt > now:
break
except astral.AstralError:
pass
mod += 1
mod = -1
while True:
try:
next_setting_dt = self.location.sunset(
(now + datetime.timedelta(days=mod)).date(), local=False
)
if next_setting_dt > now:
break
except astral.AstralError:
pass
mod += 1
old_next_rising_dt = self.sun.get("next_rising")
old_next_setting_dt = self.sun.get("next_setting")
self.sun["next_rising"] = next_rising_dt
self.sun["next_setting"] = next_setting_dt
if old_next_rising_dt is not None and old_next_rising_dt != self.sun["next_rising"]:
# dump_schedule()
self.process_sun("next_rising")
# dump_schedule()
if old_next_setting_dt is not None and old_next_setting_dt != self.sun["next_setting"]:
# dump_schedule()
self.process_sun("next_setting")
# dump_schedule()
@staticmethod
def get_offset(kwargs):
if "offset" in kwargs["kwargs"]:
if "random_start" in kwargs["kwargs"] \
or "random_end" in kwargs["kwargs"]:
raise ValueError(
"Can't specify offset as well as 'random_start' or "
"'random_end' in 'run_at_sunrise()' or 'run_at_sunset()'"
)
else:
offset = kwargs["kwargs"]["offset"]
else:
rbefore = kwargs["kwargs"].get("random_start", 0)
rafter = kwargs["kwargs"].get("random_end", 0)
offset = random.randint(rbefore, rafter)
# verbose_log(conf.logger, "INFO", "sun: offset = {}".format(offset))
return offset
def insert_schedule(self, name, utc, callback, repeat, type_, **kwargs):
with self.schedule_lock:
if name not in self.schedule:
self.schedule[name] = {}
handle = uuid.uuid4()
utc = int(utc)
c_offset = self.get_offset({"kwargs": kwargs})
ts = utc + c_offset
interval = kwargs.get("interval", 0)
with self.objects_lock:
self.schedule[name][handle] = {
"name": name,
"id": self.objects[name]["id"],
"callback": callback,
"timestamp": ts,
"interval": interval,
"basetime": utc,
"repeat": repeat,
"offset": c_offset,
"type": type_,
"kwargs": kwargs
}
# verbose_log(conf.logger, "INFO", conf.schedule[name][handle])
return handle
def get_scheduler_entries(self):
schedule = {}
for name in self.schedule.keys():
schedule[name] = {}
for entry in sorted(
self.schedule[name].keys(),
key=lambda uuid_: self.schedule[name][uuid_]["timestamp"]
):
schedule[name][entry] = {}
schedule[name][entry]["timestamp"] = self.schedule[name][entry]["timestamp"]
schedule[name][entry]["type"] = self.schedule[name][entry]["type"]
schedule[name][entry]["name"] = self.schedule[name][entry]["name"]
schedule[name][entry]["basetime"] = self.schedule[name][entry]["basetime"]
schedule[name][entry]["repeat"] = self.schedule[name][entry]["basetime"]
schedule[name][entry]["offset"] = self.schedule[name][entry]["basetime"]
schedule[name][entry]["interval"] = self.schedule[name][entry]["basetime"]
schedule[name][entry]["kwargs"] = self.schedule[name][entry]["basetime"]
schedule[name][entry]["callback"] = self.schedule[name][entry]["callback"]
return schedule
def is_dst(self):
return bool(time.localtime(self.get_now_ts()).tm_isdst)
def get_now(self):
return datetime.datetime.fromtimestamp(self.now)
def get_now_ts(self):
return self.now
def now_is_between(self, start_time_str, end_time_str, name=None):
start_time = self.parse_time(start_time_str, name)
end_time = self.parse_time(end_time_str, name)
now = self.get_now()
start_date = now.replace(
hour=start_time.hour, minute=start_time.minute,
second=start_time.second
)
end_date = now.replace(
hour=end_time.hour, minute=end_time.minute, second=end_time.second
)
if end_date < start_date:
# Spans midnight
if now < start_date and now < end_date:
now = now + datetime.timedelta(days=1)
end_date = end_date + datetime.timedelta(days=1)
return start_date <= now <= end_date
def sunset(self):
return datetime.datetime.fromtimestamp(self.calc_sun("next_setting"))
def sunrise(self):
return datetime.datetime.fromtimestamp(self.calc_sun("next_rising"))
def parse_time(self, time_str, name=None):
parsed_time = None
parts = re.search('^(\d+):(\d+):(\d+)', time_str)
if parts:
parsed_time = datetime.time(
int(parts.group(1)), int(parts.group(2)), int(parts.group(3))
)
else:
if time_str == "sunrise":
parsed_time = self.sunrise().time()
elif time_str == "sunset":
parsed_time = self.sunset().time()
else:
parts = re.search(
'^sunrise\s*([+-])\s*(\d+):(\d+):(\d+)', time_str
)
if parts:
if parts.group(1) == "+":
parsed_time = (self.sunrise() + datetime.timedelta(
hours=int(parts.group(2)), minutes=int(parts.group(3)),
seconds=int(parts.group(4))
)).time()
else:
parsed_time = (self.sunrise() - datetime.timedelta(
hours=int(parts.group(2)), minutes=int(parts.group(3)),
seconds=int(parts.group(4))
)).time()
else:
parts = re.search(
'^sunset\s*([+-])\s*(\d+):(\d+):(\d+)', time_str
)
if parts:
if parts.group(1) == "+":
parsed_time = (self.sunset() + datetime.timedelta(
hours=int(parts.group(2)),
minutes=int(parts.group(3)),
seconds=int(parts.group(4))
)).time()
else:
parsed_time = (self.sunset() - datetime.timedelta(
hours=int(parts.group(2)),
minutes=int(parts.group(3)),
seconds=int(parts.group(4))
)).time()
if parsed_time is None:
if name is not None:
raise ValueError(
"{}: invalid time string: {}".format(name, time_str))
else:
raise ValueError("invalid time string: {}".format(time_str))
return parsed_time
def dump_sun(self):
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Sun")
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", self.sun)
self.diag("INFO", "--------------------------------------------------")
def dump_schedule(self):
if self.schedule == {}:
self.diag("INFO", "Schedule is empty")
else:
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Scheduler Table")
self.diag("INFO", "--------------------------------------------------")
for name in self.schedule.keys():
self.diag( "INFO", "{}:".format(name))
for entry in sorted(
self.schedule[name].keys(),
key=lambda uuid_: self.schedule[name][uuid_]["timestamp"]
):
self.diag(
"INFO",
" Timestamp: {} - data: {}".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(
self.schedule[name][entry]["timestamp"]
)),
self.schedule[name][entry]
)
)
self.diag("INFO", "--------------------------------------------------")
async def do_every(self, period, f):
#
# We already set self.now for DST calculation and initial sunset,
# but lets reset it at the start of the timer loop to avoid an initial clock skew
#
if self.starttime:
self.now = datetime.datetime.strptime(self.starttime, "%Y-%m-%d %H:%M:%S").timestamp()
else:
self.now = datetime.datetime.now().timestamp()
t = math.floor(self.now)
count = 0
t_ = math.floor(time.time())
while not self.stopping:
count += 1
delay = max(t_ + count * period - time.time(), 0)
await asyncio.sleep(delay)
t += self.interval
r = await f(t)
if r is not None and r != t:
# print("r: {}, t: {}".format(r,t))
t = r
t_ = r
count = 0
#
# Scheduler Loop
#
# noinspection PyBroadException,PyBroadException
async def do_every_tick(self, utc):
try:
start_time = datetime.datetime.now().timestamp()
self.now = utc
# If we have reached endtime bail out
if self.endtime is not None and self.get_now() >= self.endtime:
self.log("INFO", "End time reached, exiting")
if self.stop_function is not None:
self.stop_function()
else:
#
# We aren't in a standalone environment so the best we can do is terminate the AppDaemon parts
#
self.stop()
if self.realtime:
real_now = datetime.datetime.now().timestamp()
delta = abs(utc - real_now)
if delta > self.max_clock_skew:
self.log("WARNING",
"Scheduler clock skew detected - delta = {} - resetting".format(delta))
return real_now
# Update sunrise/sunset etc.
self.update_sun()
# Check if we have entered or exited DST - if so, reload apps
# to ensure all time callbacks are recalculated
now_dst = self.is_dst()
if now_dst != self.was_dst:
self.log(
"INFO",
"Detected change in DST from {} to {} -"
" reloading all modules".format(self.was_dst, now_dst)
)
# dump_schedule()
self.log("INFO", "-" * 40)
await utils.run_in_executor(self.loop, self.executor, self.check_app_updates, "__ALL__")
# dump_schedule()
self.was_dst = now_dst
# dump_schedule()
# test code for clock skew
# if random.randint(1, 10) == 5:
# time.sleep(random.randint(1,20))
# Process callbacks
# self.log("DEBUG", "Scheduler invoked at {}".format(now))
with self.schedule_lock:
for name in self.schedule.keys():
for entry in sorted(
self.schedule[name].keys(),
key=lambda uuid_: self.schedule[name][uuid_]["timestamp"]
):
if self.schedule[name][entry]["timestamp"] <= utc:
self.exec_schedule(name, entry, self.schedule[name][entry])
else:
break
for k, v in list(self.schedule.items()):
if v == {}:
del self.schedule[k]
end_time = datetime.datetime.now().timestamp()
loop_duration = (int((end_time - start_time) * 1000) / 1000) * 1000
self.log("DEBUG", "Scheduler loop compute time: {}ms".format(loop_duration))
if loop_duration > 900:
self.log("WARNING", "Excessive time spent in scheduler loop: {}ms".format(loop_duration))
return utc
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error during do_every_tick()")
self.err("WARNING", '-' * 60)
self.err( "WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
# When explicitly logging to stdout and stderr, suppress
# verbose_log messages about writing an error (since they show up anyway)
self.log(
"WARNING",
"Logged an error to {}".format(self.errfile)
)
def process_meta(self, meta, namespace):
if meta is not None:
for key in self.required_meta:
if getattr(self, key) == None:
if key in meta:
# We have a value so override
setattr(self, key, meta[key])
def get_plugin_from_namespace(self, namespace):
if self.plugins is not None:
for name in self.plugins:
if "namespace" in self.plugins[name] and self.plugins[name]["namespace"] == namespace:
return name
if "namespace" not in self.plugins[name] and namespace == "default":
return name
else:
return None
async def notify_plugin_started(self, namespace, first_time=False):
try:
self.last_plugin_state[namespace] = datetime.datetime.now()
meta = await self.plugin_objs[namespace].get_metadata()
self.process_meta(meta, namespace)
if not self.stopping:
self.plugin_meta[namespace] = meta
state = await self.plugin_objs[namespace].get_complete_state()
with self.state_lock:
self.state[namespace] = state
if not first_time:
await utils.run_in_executor(self.loop, self.executor, self.check_app_updates, self.get_plugin_from_namespace(namespace))
else:
self.log("INFO", "Got initial state from namespace {}".format(namespace))
self.process_event("global", {"event_type": "plugin_started".format(namespace), "data": {"name": namespace}})
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error during notify_plugin_started()")
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
# When explicitly logging to stdout and stderr, suppress
# verbose_log messages about writing an error (since they show up anyway)
self.log(
"WARNING",
"Logged an error to {}".format(self.errfile)
)
def notify_plugin_stopped(self, namespace):
self.process_event("global", {"event_type": "plugin_stopped".format(namespace), "data": {"name": namespace}})
#
# Utility Loop
#
async def utility(self):
#
# Wait for all plugins to initialize
#
initialized = False
while not initialized and self.stopping is False:
initialized = True
for plugin in self.plugin_objs:
if not self.plugin_objs[plugin].active():
initialized = False
break
await asyncio.sleep(1)
# Check if we need to bail due to missing metadata
for key in self.required_meta:
if getattr(self, key) == None:
# No value so bail
self.err("ERROR", "Required attribute not set or obtainable from any plugin: {}".format(key))
self.err("ERROR", "AppDaemon is terminating")
self.stop()
if not self.stopping:
#
# All plugins are loaded and we have initial state
#
if self.starttime:
new_now = datetime.datetime.strptime(self.starttime, "%Y-%m-%d %H:%M:%S")
self.log("INFO", "Starting time travel ...")
self.log("INFO", "Setting clocks to {}".format(new_now))
self.now = new_now.timestamp()
else:
self.now = datetime.datetime.now().timestamp()
self.thread_info["max_used"] = 0
self.thread_info["max_used_time"] = self.now
# Take a note of DST
self.was_dst = self.is_dst()
# Setup sun
self.init_sun()
self.update_sun()
# Create timer loop
self.log("DEBUG", "Starting timer loop")
self.loop.create_task(self.do_every(self.tick, self.do_every_tick))
if self.apps:
self.log("DEBUG", "Reading Apps")
await utils.run_in_executor(self.loop, self.executor, self.check_app_updates)
self.log("INFO", "App initialization complete")
#
# Fire APPD Started Event
#
self.process_event("global", {"event_type": "appd_started", "data": {}})
while not self.stopping:
start_time = datetime.datetime.now().timestamp()
try:
if self.apps:
if self.production_mode is False:
# Check to see if config has changed
await utils.run_in_executor(self.loop, self.executor, self.check_app_updates)
# Call me suspicious, but lets update state from the plugins periodically
# in case we miss events for whatever reason
# Every 10 minutes seems like a good place to start
for plugin in self.plugin_objs:
if self.plugin_objs[plugin].active():
if datetime.datetime.now() - self.last_plugin_state[plugin] > datetime.timedelta(
minutes=10):
try:
self.log("DEBUG",
"Refreshing {} state".format(plugin))
state = await self.plugin_objs[plugin].get_complete_state()
with self.state_lock:
self.state[plugin] = state
self.last_plugin_state[plugin] = datetime.datetime.now()
except:
self.log("WARNING",
"Unexpected error refreshing {} state - retrying in 10 minutes".format(plugin))
# Check for thread starvation
qsize = self.q.qsize()
if qsize > 0 and qsize % 10 == 0:
self.log("WARNING", "Queue size is {}, suspect thread starvation".format(self.q.qsize()))
self.dump_threads()
# Run utility for each plugin
for plugin in self.plugin_objs:
self.plugin_objs[plugin].utility()
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error during utility()")
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
# When explicitly logging to stdout and stderr, suppress
# verbose_log messages about writing an error (since they show up anyway)
self.log(
"WARNING",
"Logged an error to {}".format(self.errfile)
)
end_time = datetime.datetime.now().timestamp()
loop_duration = (int((end_time - start_time) * 1000) / 1000) * 1000
self.log("DEBUG", "Util loop compute time: {}ms".format(loop_duration))
if loop_duration > (self.max_utility_skew * 1000):
self.log("WARNING", "Excessive time spent in utility loop: {}ms".format(loop_duration))
if self.check_app_updates_profile is True:
self.diag("INFO", "Profile information for Utility Loop")
self.diag("INFO", self.check_app_updates_profile_stats)
await asyncio.sleep(self.utility_delay)
#
# Stopping, so terminate apps.
#
self.check_app_updates(exit=True)
#
# AppDaemon API
#
def register_endpoint(self, cb, name):
handle = uuid.uuid4()
with self.endpoints_lock:
if name not in self.endpoints:
self.endpoints[name] = {}
self.endpoints[name][handle] = {"callback": cb, "name": name}
return handle
def unregister_endpoint(self, handle, name):
with self.endpoints_lock:
if name in self.endpoints and handle in self.endpoints[name]:
del self.endpoints[name][handle]
#
# App Management
#
def get_app(self, name):
with self.objects_lock:
if name in self.objects:
return self.objects[name]["object"]
else:
return None
def term_object(self, name):
with self.objects_lock:
term = None
if name in self.objects and hasattr(self.objects[name]["object"], "terminate"):
self.log("INFO", "Calling terminate() for {}".format(name))
# Call terminate directly rather than via worker thread
# so we know terminate has completed before we move on
term = self.objects[name]["object"].terminate
if term is not None:
try:
term()
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error running terminate() for {}".format(name))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
with self.objects_lock:
if name in self.objects:
del self.objects[name]
self.log("DEBUG", "Clearing callbacks for {}".format(name))
with self.callbacks_lock:
if name in self.callbacks:
del self.callbacks[name]
with self.schedule_lock:
if name in self.schedule:
del self.schedule[name]
with self.endpoints_lock:
if name in self.endpoints:
del self.endpoints[name]
def init_object(self, name):
app_args = self.app_config[name]
self.log("INFO",
"Initializing app {} using class {} from module {}".format(name, app_args["class"], app_args["module"]))
if self.get_file_from_module(app_args["module"]) is not None:
with self.objects_lock:
modname = __import__(app_args["module"])
app_class = getattr(modname, app_args["class"])
self.objects[name] = {
"object": app_class(
self, name, self.logger, self.error, app_args, self.config, self.app_config, self.global_vars
),
"id": uuid.uuid4()
}
init = self.objects[name]["object"].initialize
# Call its initialize function
try:
init()
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error running initialize() for {}".format(name))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
else:
self.log("WARNING", "Unable to find module module {} - {} is not initialized".format(app_args["module"], name))
def read_config(self):
new_config = None
if os.path.isfile(self.app_config_file):
self.log("WARNING", "apps.yaml in the Config directory is deprecated. Please move apps.yaml to the apps directory.")
new_config = self.read_config_file(self.app_config_file)
else:
for root, subdirs, files in os.walk(self.app_dir):
subdirs[:] = [d for d in subdirs if d not in self.exclude_dirs]
if root[-11:] != "__pycache__":
for file in files:
if file[-5:] == ".yaml":
self.log("DEBUG", "Reading {}".format(os.path.join(root, file)))
config = self.read_config_file(os.path.join(root, file))
valid_apps = {}
if type(config).__name__ == "dict":
for app in config:
if config[app] is not None:
if app == "global_modules":
valid_apps[app] = config[app]
elif "class" in config[app] and "module" in config[app]:
valid_apps[app] = config[app]
else:
if self.invalid_yaml_warnings:
self.log("WARNING",
"App '{}' missing 'class' or 'module' entry - ignoring".format(app))
else:
if self.invalid_yaml_warnings:
self.log("WARNING",
"File '{}' invalid structure - ignoring".format(os.path.join(root, file)))
if new_config is None:
new_config = {}
for app in valid_apps:
if app in new_config:
self.log("WARNING",
"File '{}' duplicate app: {} - ignoring".format(os.path.join(root, file), app))
else:
new_config[app] = valid_apps[app]
return new_config
def check_later_app_configs(self, last_latest):
if os.path.isfile(self.app_config_file):
ts = os.path.getmtime(self.app_config_file)
return {"latest": ts, "files": [{"name": self.app_config_file, "ts": os.path.getmtime(self.app_config_file)}]}
else:
later_files = {}
app_config_files = []
later_files["files"] = []
later_files["latest"] = last_latest
later_files["deleted"] = []
for root, subdirs, files in os.walk(self.app_dir):
subdirs[:] = [d for d in subdirs if d not in self.exclude_dirs]
if root[-11:] != "__pycache__":
for file in files:
if file[-5:] == ".yaml":
path = os.path.join(root, file)
app_config_files.append(path)
ts = os.path.getmtime(path)
if ts > last_latest:
later_files["files"].append(path)
if ts > later_files["latest"]:
later_files["latest"] = ts
for file in self.app_config_files:
if file not in app_config_files:
later_files["deleted"].append(file)
for file in app_config_files:
if file not in self.app_config_files:
later_files["files"].append(file)
self.app_config_files = app_config_files
return later_files
def read_config_file(self, file):
new_config = None
try:
with open(file, 'r') as yamlfd:
config_file_contents = yamlfd.read()
try:
new_config = yaml.load(config_file_contents)
except yaml.YAMLError as exc:
self.log("WARNING", "Error loading configuration")
if hasattr(exc, 'problem_mark'):
if exc.context is not None:
self.log("WARNING", "parser says")
self.log("WARNING", str(exc.problem_mark))
self.log("WARNING", str(exc.problem) + " " + str(exc.context))
else:
self.log("WARNING", "parser says")
self.log("WARNING", str(exc.problem_mark))
self.log("WARNING", str(exc.problem))
return new_config
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error loading config file: {}".format(file))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
# noinspection PyBroadException
def check_config(self):
terminate_apps = {}
initialize_apps = {}
try:
latest = self.check_later_app_configs(self.app_config_file_modified)
self.app_config_file_modified = latest["latest"]
if latest["files"] or latest["deleted"]:
self.log("INFO", "Reading config")
new_config = self.read_config()
if new_config is None:
self.log("WARNING", "New config not applied")
return
for file in latest["deleted"]:
self.log("INFO", "{} deleted".format(file))
for file in latest["files"]:
self.log("INFO", "{} added or modified".format(file))
# Check for changes
for name in self.app_config:
if name in new_config:
if self.app_config[name] != new_config[name]:
# Something changed, clear and reload
self.log("INFO", "App '{}' changed".format(name))
terminate_apps[name] = 1
initialize_apps[name] = 1
else:
# Section has been deleted, clear it out
self.log("INFO", "App '{}' deleted".format(name))
#
# Since the entry has been deleted we can't sensibly determine dependencies
# So just immediately terminate it
#
self.term_object(name)
for name in new_config:
if name not in self.app_config:
#
# New section added!
#
if "class" in new_config[name] and "module" in new_config[name]:
self.log("INFO", "App '{}' added".format(name))
initialize_apps[name] = 1
elif name == "global_modules":
pass
else:
if self.invalid_yaml_warnings:
self.log("WARNING", "App '{}' missing 'class' or 'module' entry - ignoring".format(name))
self.app_config = new_config
return {"init": initialize_apps, "term": terminate_apps}
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error:")
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
def get_app_from_file(self, file):
module = self.get_module_from_path(file)
for app in self.app_config:
if "module" in self.app_config[app] and self.app_config[app]["module"] == module:
return app
return None
# noinspection PyBroadException
def read_app(self, file, reload=False):
name = os.path.basename(file)
module_name = os.path.splitext(name)[0]
# Import the App
if reload:
self.log("INFO", "Reloading Module: {}".format(file))
file, ext = os.path.splitext(name)
#
# Reload
#
try:
importlib.reload(self.modules[module_name])
except KeyError:
if name not in sys.modules:
# Probably failed to compile on initial load
# so we need to re-import not reload
self.read_app(file)
else:
# A real KeyError!
raise
else:
app = self.get_app_from_file(file)
if app is not None:
self.log("INFO", "Loading App Module: {}".format(file))
self.modules[module_name] = importlib.import_module(module_name)
elif "global_modules" in self.app_config and module_name in self.app_config["global_modules"]:
self.log("INFO", "Loading Global Module: {}".format(file))
self.modules[module_name] = importlib.import_module(module_name)
else:
if self.missing_app_warnings:
self.log("WARNING", "No app description found for: {} - ignoring".format(file))
@staticmethod
def get_module_from_path(path):
name = os.path.basename(path)
module_name = os.path.splitext(name)[0]
return module_name
def get_file_from_module(self, mod):
for file in self.monitored_files:
module_name = self.get_module_from_path(file)
if module_name == mod:
return file
return None
def process_filters(self):
if "filters" in self.config:
for filter in self.config["filters"]:
for root, subdirs, files in os.walk(self.app_dir, topdown=True):
# print(root, subdirs, files)
#
# Prune dir list
#
subdirs[:] = [d for d in subdirs if d not in self.exclude_dirs]
ext = filter["input_ext"]
extlen = len(ext) * -1
for file in files:
run = False
if file[extlen:] == ext:
infile = os.path.join(root, file)
modified = os.path.getmtime(infile)
if infile in self.filter_files:
if self.filter_files[infile] < modified:
run = True
else:
self.log("INFO", "Found new filter file {}".format(infile))
run = True
if run is True:
filtered = True
self.log("INFO", "Running filter on {}".format(infile))
self.filter_files[infile] = modified
# Run the filter
outfile = utils.rreplace(infile, ext, filter["output_ext"], 1)
command_line = filter["command_line"].replace("$1", infile)
command_line = command_line.replace("$2", outfile)
try:
p = subprocess.Popen(command_line, shell=True)
except:
self.log("WARNING", '-' * 60)
self.log("WARNING", "Unexpected running filter on: {}:".format(infile))
self.log("WARNING", '-' * 60)
self.log("WARNING", traceback.format_exc())
self.log("WARNING", '-' * 60)
@staticmethod
def file_in_modules(file, modules):
for mod in modules:
if mod["name"] == file:
return True
return False
#@_timeit
def check_app_updates(self, plugin=None, exit=False):
if not self.apps:
return
# Lets add some profiling
pr = None
if self.check_app_updates_profile is True:
pr = cProfile.Profile()
pr.enable()
# Process filters
self.process_filters()
# Get list of apps we need to terminate and/or initialize
apps = self.check_config()
found_files = []
modules = []
for root, subdirs, files in os.walk(self.app_dir, topdown=True):
# print(root, subdirs, files)
#
# Prune dir list
#
subdirs[:] = [d for d in subdirs if d not in self.exclude_dirs]
if root[-11:] != "__pycache__":
if root not in self.module_dirs:
self.log("INFO", "Adding {} to module import path".format(root))
sys.path.insert(0, root)
self.module_dirs.append(root)
for file in files:
if file[-3:] == ".py":
found_files.append(os.path.join(root, file))
for file in found_files:
if file == os.path.join(self.app_dir, "__init__.py"):
continue
try:
# check we can actually open the file
fh = open(file)
fh.close()
modified = os.path.getmtime(file)
if file in self.monitored_files:
if self.monitored_files[file] < modified:
modules.append({"name": file, "reload": True})
self.monitored_files[file] = modified
else:
self.log("DEBUG", "Found module {}".format(file))
modules.append({"name": file, "reload": False})
self. monitored_files[file] = modified
except IOError as err:
self.log("WARNING",
"Unable to read app {}: {} - skipping".format(file, err))
# Check for deleted modules and add them to the terminate list
deleted_modules = []
for file in self.monitored_files:
if file not in found_files or exit is True:
deleted_modules.append(file)
self.log("INFO", "Removing module {}".format(file))
for file in deleted_modules:
del self.monitored_files[file]
for app in self.apps_per_module(self.get_module_from_path(file)):
apps["term"][app] = 1
# Add any apps we need to reload because of file changes
for module in modules:
for app in self.apps_per_module(self.get_module_from_path(module["name"])):
if module["reload"]:
apps["term"][app] = 1
apps["init"][app] = 1
if "global_modules" in self.app_config:
for gm in utils.single_or_list(self.app_config["global_modules"]):
if gm == self.get_module_from_path(module["name"]):
for app in self.apps_per_global_module(gm):
if module["reload"]:
apps["term"][app] = 1
apps["init"][app] = 1
if plugin is not None:
self.log("INFO", "Processing restart for {}".format(plugin))
# This is a restart of one of the plugins so check which apps need to be restarted
for app in self.app_config:
reload = False
if app == "global_modules":
continue
if "plugin" in self.app_config[app]:
for this_plugin in utils.single_or_list(self.app_config[app]["plugin"]):
if this_plugin == plugin:
# We got a match so do the reload
reload = True
break
elif plugin == "__ALL__":
reload = True
break
else:
# No plugin dependency specified, reload to err on the side of caution
reload = True
if reload is True:
apps["term"][app] = 1
apps["init"][app] = 1
# Terminate apps
if apps is not None and apps["term"]:
prio_apps = self.get_app_deps_and_prios(apps["term"])
for app in sorted(prio_apps, key=prio_apps.get, reverse=True):
try:
self.log("INFO", "Terminating {}".format(app))
self.term_object(app)
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error terminating app: {}:".format(app))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
# Load/reload modules
for mod in modules:
try:
self.read_app(mod["name"], mod["reload"])
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error loading module: {}:".format(mod["name"]))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Unexpected error loading module: {}:".format(mod["name"]))
self.log("WARNING", "Removing associated apps:")
module = self.get_module_from_path(mod["name"])
for app in self.app_config:
if self.app_config[app]["module"] == module:
if apps["init"] and app in apps["init"]:
del apps["init"][app]
self.log("WARNING", "{}".format(app))
if apps is not None and apps["init"]:
prio_apps = self.get_app_deps_and_prios(apps["init"])
# Initialize Apps
for app in sorted(prio_apps, key=prio_apps.get):
try:
self.init_object(app)
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error initializing app: {}:".format(app))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
if self.check_app_updates_profile is True:
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
self.check_app_updates_profile_stats = s.getvalue()
def get_app_deps_and_prios(self, applist):
# Build a list of modules and their dependencies
deplist = []
for app in applist:
if app not in deplist:
deplist.append(app)
self.get_dependent_apps(app, deplist)
# Need to gove the topological sort a full list of apps or it will fail
full_list = list(self.app_config.keys())
deps = []
for app in full_list:
dependees = []
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
if dep in self.app_config:
dependees.append(dep)
else:
self.log("WARNING", "Unable to find app {} in dependencies for {}".format(dep, app))
self.log("WARNING", "Ignoring app {}".format(app))
deps.append((app, dependees))
prio_apps = {}
prio = float(50.1)
try:
for app in self.topological_sort(deps):
if "dependencies" in self.app_config[app] or self.app_has_dependents(app):
prio_apps[app] = prio
prio += float(0.0001)
else:
if "priority" in self.app_config[app]:
prio_apps[app] = float(self.app_config[app]["priority"])
else:
prio_apps[app] = float(50)
except ValueError:
pass
# now we remove the ones we aren't interested in
final_apps = {}
for app in prio_apps:
if app in deplist:
final_apps[app] = prio_apps[app]
return final_apps
def app_has_dependents(self, name):
for app in self.app_config:
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
if dep == name:
return True
return False
def get_dependent_apps(self, dependee, deps):
for app in self.app_config:
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
#print("app= {} dep = {}, dependee = {} deps = {}".format(app, dep, dependee, deps))
if dep == dependee and app not in deps:
deps.append(app)
new_deps = self.get_dependent_apps(app, deps)
if new_deps is not None:
deps.append(new_deps)
def topological_sort(self, source):
pending = [(name, set(deps)) for name, deps in source] # copy deps so we can modify set in-place
emitted = []
while pending:
next_pending = []
next_emitted = []
for entry in pending:
name, deps = entry
deps.difference_update(emitted) # remove deps we emitted last pass
if deps: # still has deps? recheck during next pass
next_pending.append(entry)
else: # no more deps? time to emit
yield name
emitted.append(name) # <-- not required, but helps preserve original ordering
next_emitted.append(name) # remember what we emitted for difference_update() in next pass
if not next_emitted:
# all entries have unmet deps, we have cyclic redundancies
# since we already know all deps are correct
self.log("WARNING", "Cyclic or missing app dependencies detected")
for pend in next_pending:
deps = ""
for dep in pend[1]:
deps += "{} ".format(dep)
self.log("WARNING", "{} depends on {}".format(pend[0], deps))
raise ValueError("cyclic dependancy detected")
pending = next_pending
emitted = next_emitted
def apps_per_module(self, module):
apps = []
for app in self.app_config:
if app != "global_modules" and self.app_config[app]["module"] == module:
apps.append(app)
return apps
def apps_per_global_module(self, module):
apps = []
for app in self.app_config:
if "global_dependencies" in self.app_config[app]:
for gm in utils.single_or_list(self.app_config[app]["global_dependencies"]):
if gm == module:
apps.append(app)
return apps
#
# State Updates
#
def check_and_disapatch(self, name, funcref, entity, attribute, new_state,
old_state, cold, cnew, kwargs, uuid_):
kwargs["handle"] = uuid_
if attribute == "all":
with self.objects_lock:
self.dispatch_worker(name, {
"name": name,
"id": self.objects[name]["id"],
"type": "attr",
"function": funcref,
"attribute": attribute,
"entity": entity,
"new_state": new_state,
"old_state": old_state,
"kwargs": kwargs,
})
else:
if old_state is None:
old = None
else:
if attribute in old_state:
old = old_state[attribute]
elif 'attributes' in old_state and attribute in old_state['attributes']:
old = old_state['attributes'][attribute]
else:
old = None
if new_state is None:
new = None
else:
if attribute in new_state:
new = new_state[attribute]
elif 'attributes' in new_state and attribute in new_state['attributes']:
new = new_state['attributes'][attribute]
else:
new = None
if (cold is None or cold == old) and (cnew is None or cnew == new):
if "duration" in kwargs:
# Set a timer
exec_time = self.get_now_ts() + int(kwargs["duration"])
kwargs["_duration"] = self.insert_schedule(
name, exec_time, funcref, False, None,
entity=entity,
attribute=attribute,
old_state=old,
new_state=new, **kwargs
)
else:
# Do it now
with self.objects_lock:
self.dispatch_worker(name, {
"name": name,
"id": self.objects[name]["id"],
"type": "attr",
"function": funcref,
"attribute": attribute,
"entity": entity,
"new_state": new,
"old_state": old,
"kwargs": kwargs
})
else:
if "_duration" in kwargs:
# cancel timer
self.cancel_timer(name, kwargs["_duration"])
def process_state_change(self, namespace, state):
data = state["data"]
entity_id = data['entity_id']
self.log("DEBUG", data)
device, entity = entity_id.split(".")
# Process state callbacks
removes = []
with self.callbacks_lock:
for name in self.callbacks.keys():
for uuid_ in self.callbacks[name]:
callback = self.callbacks[name][uuid_]
if callback["type"] == "state" and (callback["namespace"] == namespace or callback["namespace"] == "global" or namespace == "global"):
cdevice = None
centity = None
if callback["entity"] is not None:
if "." not in callback["entity"]:
cdevice = callback["entity"]
centity = None
else:
cdevice, centity = callback["entity"].split(".")
if callback["kwargs"].get("attribute") is None:
cattribute = "state"
else:
cattribute = callback["kwargs"].get("attribute")
cold = callback["kwargs"].get("old")
cnew = callback["kwargs"].get("new")
if cdevice is None:
self.check_and_disapatch(
name, callback["function"], entity_id,
cattribute,
data['new_state'],
data['old_state'],
cold, cnew,
callback["kwargs"],
uuid_
)
elif centity is None:
if device == cdevice:
self.check_and_disapatch(
name, callback["function"], entity_id,
cattribute,
data['new_state'],
data['old_state'],
cold, cnew,
callback["kwargs"],
uuid_
)
elif device == cdevice and entity == centity:
self.check_and_disapatch(
name, callback["function"], entity_id,
cattribute,
data['new_state'],
data['old_state'], cold,
cnew,
callback["kwargs"],
uuid_
)
# Remove the callback if appropriate
remove = callback["kwargs"].get("oneshot", False)
if remove:
removes.append({"name": callback["name"], "uuid": callback["kwargs"]["handle"]})
for remove in removes:
#print(remove)
self.cancel_state_callback(remove["uuid"], remove["name"])
async def state_update(self, namespace, data):
try:
self.log(
"DEBUG",
"Event type:{}:".format(data['event_type'])
)
self.log( "DEBUG", data["data"])
if data['event_type'] == "state_changed":
entity_id = data['data']['entity_id']
# First update our global state
with self.state_lock:
self.state[namespace][entity_id] = data['data']['new_state']
if self.apps is True:
# Process state changed message
if data['event_type'] == "state_changed":
self.process_state_change(namespace, data)
else:
# Process non-state callbacks
self.process_event(namespace, data)
# Update dashboards
if self.dashboard is not None:
await self.dashboard.ws_update(namespace, data)
except:
self.log("WARNING", '-' * 60)
self.log("WARNING", "Unexpected error during state_update()")
self.log("WARNING", '-' * 60)
self.log("WARNING", traceback.format_exc())
self.log("WARNING", '-' * 60)
#
# Event Update
#
def process_event(self, namespace, data):
with self.callbacks_lock:
for name in self.callbacks.keys():
for uuid_ in self.callbacks[name]:
callback = self.callbacks[name][uuid_]
if callback["namespace"] == namespace or callback["namespace"] == "global" or namespace == "global":
if "event" in callback and (
callback["event"] is None
or data['event_type'] == callback["event"]):
# Check any filters
_run = True
for key in callback["kwargs"]:
if key in data["data"] and callback["kwargs"][key] != \
data["data"][key]:
_run = False
if _run:
with self.objects_lock:
self.dispatch_worker(name, {
"name": name,
"id": self.objects[name]["id"],
"type": "event",
"event": data['event_type'],
"function": callback["function"],
"data": data["data"],
"kwargs": callback["kwargs"]
})
#
# Plugin Management
#
def get_plugin(self, name):
if name in self.plugin_objs:
return self.plugin_objs[name]
else:
return None
def get_plugin_meta(self, namespace):
for name in self.plugins:
if "namespace" not in self.plugins[name] and namespace == "default":
return self.plugin_meta[namespace]
elif "namespace" in self.plugins[name] and self.plugins[name]["namespace"] == namespace:
return self.plugin_meta[namespace]
else:
return None
#
# Utilities
#
def sanitize_state_kwargs(self, app, kwargs):
kwargs_copy = kwargs.copy()
return self._sanitize_kwargs(kwargs_copy, [
"old", "new", "attribute", "duration", "state",
"entity", "_duration", "old_state", "new_state",
"oneshot"
] + app.list_constraints())
def sanitize_timer_kwargs(self, app, kwargs):
kwargs_copy = kwargs.copy()
return self._sanitize_kwargs(kwargs_copy, [
"interval", "constrain_days", "constrain_input_boolean",
] + app.list_constraints())
def _sanitize_kwargs(self, kwargs, keys):
for key in keys:
if key in kwargs:
del kwargs[key]
return kwargs
def log(self, level, message, name="AppDaemon"):
if not self.realtime:
ts = self.get_now()
else:
ts = None
utils.log(self.logger, level, message, name, ts)
def err(self, level, message, name="AppDaemon"):
if not self.realtime:
ts = self.get_now()
else:
ts = None
utils.log(self.error, level, message, name, ts)
def diag(self, level, message, name="AppDaemon"):
if not self.realtime:
ts = self.get_now()
else:
ts = None
utils.log(self.diagnostic, level, message, name, ts)
def register_dashboard(self, dash):
self.dashboard = dash
async def dispatch_app_by_name(self, name, args):
with self.endpoints_lock:
callback = None
for app in self.endpoints:
for handle in self.endpoints[app]:
if self.endpoints[app][handle]["name"] == name:
callback = self.endpoints[app][handle]["callback"]
if callback is not None:
return await utils.run_in_executor(self.loop, self.executor, callback, args)
else:
return '', 404
|
<filename>appdaemon/appdaemon.py
import sys
import importlib
import traceback
import os
import os.path
from queue import Queue
import datetime
import uuid
import astral
import pytz
import math
import asyncio
import yaml
import concurrent.futures
import threading
import random
import re
from copy import deepcopy, copy
import subprocess
import functools
import time
import cProfile
import io
import pstats
import appdaemon.utils as utils
class AppDaemon:
required_meta = ["latitude", "longitude", "elevation", "time_zone"]
def __init__(self, logger, error, diag, loop, **kwargs):
self.logger = logger
self.error = error
self.diagnostic = diag
self.config = kwargs
self.config["ad_version"] = utils.__version__
self.q = Queue(maxsize=0)
self.check_app_updates_profile = ""
self.was_dst = False
self.last_state = None
self.last_plugin_state = {}
self.monitored_files = {}
self.filter_files = {}
self.modules = {}
self.appq = None
self.executor = None
self.loop = None
self.srv = None
self.appd = None
self.stopping = False
self.dashboard = None
self.now = datetime.datetime.now().timestamp()
self.objects = {}
self.objects_lock = threading.RLock()
self.schedule = {}
self.schedule_lock = threading.RLock()
self.callbacks = {}
self.callbacks_lock = threading.RLock()
self.thread_info = {}
self.thread_info_lock = threading.RLock()
self.thread_info["threads"] = {}
self.thread_info["current_busy"] = 0
self.thread_info["max_busy"] = 0
self.thread_info["max_busy_time"] = 0
self.thread_info["last_action_time"] = 0
self.state = {}
self.state["default"] = {}
self.state_lock = threading.RLock()
self.endpoints = {}
self.endpoints_lock = threading.RLock()
self.plugin_meta = {}
self.plugin_objs = {}
# No locking yet
self.global_vars = {}
self.sun = {}
self.config_file_modified = 0
self.tz = None
self.realtime = True
self.version = 0
self.app_config_file_modified = 0
self.app_config = {}
self.app_config_file = None
self._process_arg("app_config_file", kwargs)
self.plugin_params = kwargs["plugins"]
# User Supplied/Defaults
self.threads = 10
self._process_arg("threads", kwargs, int=True)
self.app_dir = None
self._process_arg("app_dir", kwargs)
self.starttime = None
self._process_arg("starttime", kwargs)
self._process_arg("now", kwargs)
self.logfile = None
self._process_arg("logfile", kwargs)
if self.logfile is None:
self.logfile = "STDOUT"
self.latitude = None
self._process_arg("latitude", kwargs)
self.longitude = None
self._process_arg("longitude", kwargs)
self.elevation = None
self._process_arg("elevation", kwargs)
self.time_zone = None
self._process_arg("time_zone", kwargs)
self.errfile = None
self._process_arg("error_file", kwargs)
if self.errfile is None:
self.errfile = "STDERR"
self.config_file = None
self._process_arg("config_file", kwargs)
self.config_dir = None
self._process_arg("config_dir", kwargs)
self.plugins = {}
self._process_arg("plugins", kwargs)
self.tick = 1
self._process_arg("tick", kwargs, int=True)
self.max_clock_skew = 1
self._process_arg("max_clock_skew", kwargs, int=True)
self.threadpool_workers = 10
self._process_arg("threadpool_workers", kwargs, int=True)
self.endtime = None
if "endtime" in kwargs:
self.endtime = datetime.datetime.strptime(kwargs["endtime"], "%Y-%m-%d %H:%M:%S")
self.interval = 1
self._process_arg("interval", kwargs, int=True)
self.loglevel = "INFO"
self._process_arg("loglevel", kwargs)
self.api_port = None
self._process_arg("api_port", kwargs)
self.utility_delay = 1
self._process_arg("utility_delay", kwargs, int=True)
self.max_utility_skew = self.utility_delay * 0.9
self._process_arg("max_utility_skew", kwargs, float=True)
self.check_app_updates_profile = False
self._process_arg("check_app_updates_profile", kwargs)
self.production_mode = False
self._process_arg("production_mode", kwargs)
self.invalid_yaml_warnings = True
self._process_arg("invalid_yaml_warnings", kwargs)
self.missing_app_warnings = True
self._process_arg("missing_app_warnings", kwargs)
self.log_thread_actions = False
self._process_arg("log_thread_actions", kwargs)
self.exclude_dirs = ["__pycache__"]
if "exclude_dirs" in kwargs:
self.exclude_dirs += kwargs["exclude_dirs"]
self.stop_function = None
self.stop_function = None
self._process_arg("stop_function", kwargs)
if self.tick != 1 or self.interval != 1 or self.starttime is not None:
self.realtime = False
if not kwargs.get("cert_verify", True):
self.certpath = False
if kwargs.get("disable_apps") is True:
self.apps = False
self.log("INFO", "Apps are disabled")
else:
self.apps = True
self.log("INFO", "Starting Apps")
# Initialize config file tracking
self.app_config_file_modified = 0
self.app_config_files = {}
self.module_dirs = []
if self.apps is True:
if self.app_dir is None:
if self.config_dir is None:
self.app_dir = utils.find_path("apps")
self.config_dir = os.path.dirname(self.app_dir)
else:
self.app_dir = os.path.join(self.config_dir, "apps")
utils.check_path("config_dir", logger, self.config_dir, permissions="rwx")
utils.check_path("appdir", logger, self.app_dir)
#if os.path.isdir(self.app_dir) is False:
# self.log("ERROR", "Invalid value for app_dir: {}".format(self.app_dir))
# return
#
# Initial Setup
#
self.appq = asyncio.Queue(maxsize=0)
self.log("DEBUG", "Creating worker threads ...")
# Create Worker Threads
for i in range(self.threads):
t = threading.Thread(target=self.worker)
t.daemon = True
t.setName("thread-{}".format(i+1))
with self.thread_info_lock:
self.thread_info["threads"][t.getName()] = {"callback": "idle", "time_called": 0, "thread": t}
t.start()
if self.apps is True:
self.process_filters()
self.log("DEBUG", "Done")
self.loop = loop
self.stopping = False
self.log("DEBUG", "Entering run()")
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=self.threadpool_workers)
# Load Plugins
plugins = []
if os.path.isdir(os.path.join(self.config_dir, "custom_plugins")):
plugins = [f.path for f in os.scandir(os.path.join(self.config_dir, "custom_plugins")) if f.is_dir(follow_symlinks=True)]
for plugin in plugins:
sys.path.insert(0, plugin)
if self.plugins is not None:
for name in self.plugins:
basename = self.plugins[name]["type"]
type = self.plugins[name]["type"]
module_name = "{}plugin".format(basename)
class_name = "{}Plugin".format(basename.capitalize())
full_module_name = None
for plugin in plugins:
if os.path.basename(plugin) == type:
full_module_name = "{}".format(module_name)
self.log("INFO",
"Loading Custom Plugin {} using class {} from module {}".format(name, class_name,
module_name))
break
if full_module_name == None:
#
# Not a custom plugin, assume it's a built in
#
basepath = "appdaemon.plugins"
full_module_name = "{}.{}.{}".format(basepath, basename, module_name)
self.log("INFO",
"Loading Plugin {} using class {} from module {}".format(name, class_name,
module_name))
try:
mod = __import__(full_module_name, globals(), locals(), [module_name], 0)
app_class = getattr(mod, class_name)
plugin = app_class(self, name, self.logger, self.err, self.loglevel, self.plugins[name])
namespace = plugin.get_namespace()
if namespace in self.plugin_objs:
raise ValueError("Duplicate namespace: {}".format(namespace))
self.plugin_objs[namespace] = plugin
loop.create_task(plugin.get_updates())
except:
self.log("WARNING", "error loading plugin: {} - ignoring".format(name))
self.log("WARNING", '-' * 60)
self.log("WARNING", traceback.format_exc())
self.log("WARNING", '-' * 60)
# Create utility loop
self.log("DEBUG", "Starting utility loop")
loop.create_task(self.utility())
# Create AppState Loop
if self.apps:
loop.create_task(self.appstate_loop())
def _process_arg(self, arg, args, **kwargs):
if args:
if arg in args:
value = args[arg]
if "int" in kwargs and kwargs["int"] is True:
try:
value = int(value)
setattr(self, arg, value)
except ValueError:
self.log("WARNING", "Invalid value for {}: {}, using default({})".format(arg, value, getattr(self, arg)))
if "float" in kwargs and kwargs["float"] is True:
try:
value = float(value)
setattr(self, arg, value)
except ValueError:
self.log("WARNING", "Invalid value for {}: {}, using default({})".format(arg, value, getattr(self, arg)))
else:
setattr(self, arg, value)
def _timeit(func):
@functools.wraps(func)
def newfunc(self, *args, **kwargs):
start_time = time.time()
result = func(self, *args, **kwargs)
elapsed_time = time.time() - start_time
self.log("INFO", 'function [{}] finished in {} ms'.format(
func.__name__, int(elapsed_time * 1000)))
return result
return newfunc
def _profile_this(fn):
def profiled_fn(self, *args, **kwargs):
self.pr = cProfile.Profile()
self.pr.enable()
result = fn(self, *args, **kwargs)
self.pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
self.profile = fn + s.getvalue()
return result
return profiled_fn
def stop(self):
self.stopping = True
# if ws is not None:
# ws.close()
if self.apps:
self.appq.put_nowait({"namespace": "global", "event_type": "ha_stop", "data": None})
for plugin in self.plugin_objs:
self.plugin_objs[plugin].stop()
#
# Diagnostics
#
def dump_callbacks(self):
if self.callbacks == {}:
self.diag("INFO", "No callbacks")
else:
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Callbacks")
self.diag("INFO", "--------------------------------------------------")
for name in self.callbacks.keys():
self.diag("INFO", "{}:".format(name))
for uuid_ in self.callbacks[name]:
self.diag( "INFO", " {} = {}".format(uuid_, self.callbacks[name][uuid_]))
self.diag("INFO", "--------------------------------------------------")
def dump_objects(self):
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Objects")
self.diag("INFO", "--------------------------------------------------")
with self.objects_lock:
for object_ in self.objects.keys():
self.diag("INFO", "{}: {}".format(object_, self.objects[object_]))
self.diag("INFO", "--------------------------------------------------")
def dump_queue(self):
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Current Queue Size is {}".format(self.q.qsize()))
self.diag("INFO", "--------------------------------------------------")
@staticmethod
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(self, text):
return [self.atoi(c) for c in re.split('(\d+)', text)]
def get_thread_info(self):
info = {}
# Make a copy without the thread objects
with self.thread_info_lock:
info["max_busy_time"] = copy(self.thread_info["max_busy_time"])
info["last_action_time"] = copy(self.thread_info["last_action_time"])
info["current_busy"] = copy(self.thread_info["current_busy"])
info["max_busy"] = copy(self.thread_info["max_busy"])
info["threads"] = {}
for thread in self.thread_info["threads"]:
if thread not in info["threads"]:
info["threads"][thread] = {}
info["threads"][thread]["time_called"] = self.thread_info["threads"][thread]["time_called"]
info["threads"][thread]["callback"] = self.thread_info["threads"][thread]["callback"]
info["threads"][thread]["is_alive"] = self.thread_info["threads"][thread]["thread"].is_alive()
return info
def dump_threads(self):
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Threads")
self.diag("INFO", "--------------------------------------------------")
with self.thread_info_lock:
max_ts = datetime.datetime.fromtimestamp(self.thread_info["max_busy_time"])
last_ts = datetime.datetime.fromtimestamp(self.thread_info["last_action_time"])
self.diag("INFO", "Currently busy threads: {}".format(self.thread_info["current_busy"]))
self.diag("INFO", "Most used threads: {} at {}".format(self.thread_info["max_busy"], max_ts))
self.diag("INFO", "Last activity: {}".format(last_ts))
self.diag("INFO", "--------------------------------------------------")
for thread in sorted(self.thread_info["threads"], key=self.natural_keys):
ts = datetime.datetime.fromtimestamp(self.thread_info["threads"][thread]["time_called"])
self.diag("INFO",
"{} - current callback: {} since {}, alive: {}".format(
thread,
self.thread_info["threads"][thread]["callback"],
ts,
self.thread_info["threads"][thread]["thread"].is_alive()
))
self.diag("INFO", "--------------------------------------------------")
def get_callback_entries(self):
callbacks = {}
for name in self.callbacks.keys():
callbacks[name] = {}
for uuid_ in self.callbacks[name]:
callbacks[name][uuid_] = {}
if "entity" in callbacks[name][uuid_]:
callbacks[name][uuid_]["entity"] = self.callbacks[name][uuid_]["entity"]
else:
callbacks[name][uuid_]["entity"] = None
callbacks[name][uuid_]["type"] = self.callbacks[name][uuid_]["type"]
callbacks[name][uuid_]["kwargs"] = self.callbacks[name][uuid_]["kwargs"]
callbacks[name][uuid_]["function"] = self.callbacks[name][uuid_]["function"]
callbacks[name][uuid_]["name"] = self.callbacks[name][uuid_]["name"]
return callbacks
#
# Constraints
#
def check_constraint(self, key, value, app):
unconstrained = True
if key in app.list_constraints():
method = getattr(app, key)
unconstrained = method(value)
return unconstrained
def check_time_constraint(self, args, name):
unconstrained = True
if "constrain_start_time" in args or "constrain_end_time" in args:
if "constrain_start_time" not in args:
start_time = "00:00:00"
else:
start_time = args["constrain_start_time"]
if "constrain_end_time" not in args:
end_time = "23:59:59"
else:
end_time = args["constrain_end_time"]
if not self.now_is_between(start_time, end_time, name):
unconstrained = False
return unconstrained
#
# Thread Management
#
def dispatch_worker(self, name, args):
with self.objects_lock:
unconstrained = True
#
# Argument Constraints
#
for arg in self.app_config[name].keys():
constrained = self.check_constraint(arg, self.app_config[name][arg], self.objects[name]["object"])
if not constrained:
unconstrained = False
if not self.check_time_constraint(self.app_config[name], name):
unconstrained = False
#
# Callback level constraints
#
if "kwargs" in args:
for arg in args["kwargs"].keys():
constrained = self.check_constraint(arg, args["kwargs"][arg], self.objects[name]["object"])
if not constrained:
unconstrained = False
if not self.check_time_constraint(args["kwargs"], name):
unconstrained = False
if unconstrained:
self.q.put_nowait(args)
def update_thread_info(self, thread_id, callback, type = None):
if self.log_thread_actions:
if callback == "idle":
self.diag("INFO",
"{} done".format(thread_id, type, callback))
else:
self.diag("INFO",
"{} calling {} callback {}".format(thread_id, type, callback))
with self.thread_info_lock:
ts = self.now
self.thread_info["threads"][thread_id]["callback"] = callback
self.thread_info["threads"][thread_id]["time_called"] = ts
if callback == "idle":
self.thread_info["current_busy"] -= 1
else:
self.thread_info["current_busy"] += 1
if self.thread_info["current_busy"] > self.thread_info["max_busy"]:
self.thread_info["max_busy"] = self.thread_info["current_busy"]
self.thread_info["max_busy_time"] = ts
self.thread_info["last_action_time"] = ts
# noinspection PyBroadException
def worker(self):
while True:
thread_id = threading.current_thread().name
args = self.q.get()
_type = args["type"]
funcref = args["function"]
_id = args["id"]
name = args["name"]
callback = "{}() in {}".format(funcref.__name__, name)
app = None
with self.objects_lock:
if name in self.objects and self.objects[name]["id"] == _id:
app = self.objects[name]["object"]
if app is not None:
try:
if _type == "timer":
self.update_thread_info(thread_id, callback, _type)
funcref(self.sanitize_timer_kwargs(app, args["kwargs"]))
self.update_thread_info(thread_id, "idle")
elif _type == "attr":
entity = args["entity"]
attr = args["attribute"]
old_state = args["old_state"]
new_state = args["new_state"]
self.update_thread_info(thread_id, callback, _type)
funcref(entity, attr, old_state, new_state,
self.sanitize_state_kwargs(app, args["kwargs"]))
self.update_thread_info(thread_id, "idle")
elif _type == "event":
data = args["data"]
self.update_thread_info(thread_id, callback, _type)
funcref(args["event"], data, args["kwargs"])
self.update_thread_info(thread_id, "idle")
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error in worker for App {}:".format(name))
self.err("WARNING", "Worker Ags: {}".format(args))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
else:
self.log("WARNING", "Found stale callback for {} - discarding".format(name))
self.q.task_done()
#
# State
#
def entity_exists(self, namespace, entity):
with self.state_lock:
if namespace in self.state and entity in self.state[namespace]:
return True
else:
return False
def add_state_callback(self, name, namespace, entity, cb, kwargs):
with self.callbacks_lock:
if name not in self.callbacks:
self.callbacks[name] = {}
handle = uuid.uuid4()
with self.objects_lock:
self.callbacks[name][handle] = {
"name": name,
"id": self.objects[name]["id"],
"type": "state",
"function": cb,
"entity": entity,
"namespace": namespace,
"kwargs": kwargs
}
#
# In the case of a quick_start parameter,
# start the clock immediately if the device is already in the new state
#
if "immediate" in kwargs and kwargs["immediate"] is True:
if entity is not None and "new" in kwargs and "duration" in kwargs:
with self.state_lock:
if self.state[namespace][entity]["state"] == kwargs["new"]:
exec_time = self.get_now_ts() + int(kwargs["duration"])
kwargs["_duration"] = self.insert_schedule(
name, exec_time, cb, False, None,
entity=entity,
attribute=None,
old_state=None,
new_state=kwargs["new"], **kwargs
)
return handle
def cancel_state_callback(self, handle, name):
with self.callbacks_lock:
if name not in self.callbacks or handle not in self.callbacks[name]:
self.log("WARNING", "Invalid callback in cancel_state_callback() from app {}".format(name))
if name in self.callbacks and handle in self.callbacks[name]:
del self.callbacks[name][handle]
if name in self.callbacks and self.callbacks[name] == {}:
del self.callbacks[name]
def info_state_callback(self, handle, name):
with self.callbacks_lock:
if name in self.callbacks and handle in self.callbacks[name]:
callback = self.callbacks[name][handle]
with self.objects_lock:
return (
callback["namespace"],
callback["entity"],
callback["kwargs"].get("attribute", None),
self.sanitize_state_kwargs(self.objects[name]["object"], callback["kwargs"])
)
else:
raise ValueError("Invalid handle: {}".format(handle))
def get_entity(self, namespace, entity_id):
with self.state_lock:
if namespace in self.state:
if entity_id in self.state[namespace]:
return self.state[namespace][entity_id]
else:
return None
else:
self.log("WARNING", "Unknown namespace: {}".format(namespace))
return None
def get_state(self, namespace, device, entity, attribute):
with self.state_lock:
if device is None:
return deepcopy(self.state[namespace])
elif entity is None:
devices = {}
for entity_id in self.state[namespace].keys():
thisdevice, thisentity = entity_id.split(".")
if device == thisdevice:
devices[entity_id] = self.state[namespace][entity_id]
return deepcopy(devices)
elif attribute is None:
entity_id = "{}.{}".format(device, entity)
if entity_id in self.state[namespace]:
return deepcopy(self.state[namespace][entity_id]["state"])
else:
return None
else:
entity_id = "{}.{}".format(device, entity)
if attribute == "all":
if entity_id in self.state[namespace]:
return deepcopy(self.state[namespace][entity_id])
else:
return None
else:
if attribute in self.state[namespace][entity_id]["attributes"]:
return deepcopy(self.state[namespace][entity_id]["attributes"][
attribute])
elif attribute in self.state[namespace][entity_id]:
return deepcopy(self.state[namespace][entity_id][attribute])
else:
return None
def set_state(self, namespace, entity, state):
with self.state_lock:
self.state[namespace][entity] = state
#
# App State
#
async def appstate_loop(self):
while not self.stopping:
args = await self.appq.get()
namespace = args["namespace"]
await self.state_update(namespace, args)
self.appq.task_done()
def set_app_state(self, namespace, entity_id, state):
self.log("DEBUG", "set_app_state: {}".format(entity_id))
#print(state)
if entity_id is not None and "." in entity_id:
with self.state_lock:
if entity_id in self.state[namespace]:
old_state = self.state[namespace][entity_id]
else:
old_state = None
data = {"entity_id": entity_id, "new_state": state, "old_state": old_state}
args = {"namespace": namespace, "event_type": "state_changed", "data": data}
self.state[namespace][entity_id] = state
self.appq.put_nowait(args)
#
# Events
#
def add_event_callback(self, _name, namespace, cb, event, **kwargs):
with self.callbacks_lock:
if _name not in self.callbacks:
self.callbacks[_name] = {}
handle = uuid.uuid4()
with self.objects_lock:
self.callbacks[_name][handle] = {
"name": _name,
"id": self.objects[_name]["id"],
"type": "event",
"function": cb,
"namespace": namespace,
"event": event,
"kwargs": kwargs
}
return handle
def cancel_event_callback(self, name, handle):
with self.callbacks_lock:
if name in self.callbacks and handle in self.callbacks[name]:
del self.callbacks[name][handle]
if name in self.callbacks and self.callbacks[name] == {}:
del self.callbacks[name]
def info_event_callback(self, name, handle):
with self.callbacks_lock:
if name in self.callbacks and handle in self.callbacks[name]:
callback = self.callbacks[name][handle]
return callback["event"], callback["kwargs"].copy()
else:
raise ValueError("Invalid handle: {}".format(handle))
#
# Scheduler
#
def cancel_timer(self, name, handle):
self.log("DEBUG", "Canceling timer for {}".format(name))
with self.schedule_lock:
if name in self.schedule and handle in self.schedule[name]:
del self.schedule[name][handle]
if name in self.schedule and self.schedule[name] == {}:
del self.schedule[name]
# noinspection PyBroadException
def exec_schedule(self, name, entry, args):
try:
# Locking performed in calling function
if "inactive" in args:
return
# Call function
with self.objects_lock:
if "entity" in args["kwargs"]:
self.dispatch_worker(name, {
"name": name,
"id": self.objects[name]["id"],
"type": "attr",
"function": args["callback"],
"attribute": args["kwargs"]["attribute"],
"entity": args["kwargs"]["entity"],
"new_state": args["kwargs"]["new_state"],
"old_state": args["kwargs"]["old_state"],
"kwargs": args["kwargs"],
})
else:
self.dispatch_worker(name, {
"name": name,
"id": self.objects[name]["id"],
"type": "timer",
"function": args["callback"],
"kwargs": args["kwargs"],
})
# If it is a repeating entry, rewrite with new timestamp
if args["repeat"]:
if args["type"] == "next_rising" or args["type"] == "next_setting":
# It's sunrise or sunset - if the offset is negative we
# won't know the next rise or set time yet so mark as inactive
# So we can adjust with a scan at sun rise/set
if args["offset"] < 0:
args["inactive"] = 1
else:
# We have a valid time for the next sunrise/set so use it
c_offset = self.get_offset(args)
args["timestamp"] = self.calc_sun(args["type"]) + c_offset
args["offset"] = c_offset
else:
# Not sunrise or sunset so just increment
# the timestamp with the repeat interval
args["basetime"] += args["interval"]
args["timestamp"] = args["basetime"] + self.get_offset(args)
else: # Otherwise just delete
del self.schedule[name][entry]
except:
self.err("WARNING", '-' * 60)
self.err(
"WARNING",
"Unexpected error during exec_schedule() for App: {}".format(name)
)
self.err("WARNING", "Args: {}".format(args))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
# When explicitly logging to stdout and stderr, suppress
# verbose_log messages about writing an error (since they show up anyway)
self.log("WARNING", "Logged an error to {}".format(self.errfile))
self.err("WARNING", "Scheduler entry has been deleted")
self.err("WARNING", '-' * 60)
del self.schedule[name][entry]
def process_sun(self, action):
self.log(
"DEBUG",
"Process sun: {}, next sunrise: {}, next sunset: {}".format(
action, self.sun["next_rising"], self.sun["next_setting"]
)
)
with self.schedule_lock:
for name in self.schedule.keys():
for entry in sorted(
self.schedule[name].keys(),
key=lambda uuid_: self.schedule[name][uuid_]["timestamp"]
):
schedule = self.schedule[name][entry]
if schedule["type"] == action and "inactive" in schedule:
del schedule["inactive"]
c_offset = self.get_offset(schedule)
schedule["timestamp"] = self.calc_sun(action) + c_offset
schedule["offset"] = c_offset
def calc_sun(self, type_):
# convert to a localized timestamp
return self.sun[type_].timestamp()
def info_timer(self, handle, name):
with self.schedule_lock:
if name in self.schedule and handle in self.schedule[name]:
callback = self.schedule[name][handle]
return (
datetime.datetime.fromtimestamp(callback["timestamp"]),
callback["interval"],
self.sanitize_timer_kwargs(self.objects[name]["object"], callback["kwargs"])
)
else:
raise ValueError("Invalid handle: {}".format(handle))
def init_sun(self):
latitude = self.latitude
longitude = self.longitude
if -90 > latitude < 90:
raise ValueError("Latitude needs to be -90 .. 90")
if -180 > longitude < 180:
raise ValueError("Longitude needs to be -180 .. 180")
elevation = self.elevation
self.tz = pytz.timezone(self.time_zone)
self.location = astral.Location((
'', '', latitude, longitude, self.tz.zone, elevation
))
def update_sun(self):
#now = datetime.datetime.now(self.tz)
#now = pytz.utc.localize(self.get_now())
now = self.tz.localize(self.get_now())
mod = -1
while True:
try:
next_rising_dt = self.location.sunrise(
(now + datetime.timedelta(days=mod)).date(), local=False
)
if next_rising_dt > now:
break
except astral.AstralError:
pass
mod += 1
mod = -1
while True:
try:
next_setting_dt = self.location.sunset(
(now + datetime.timedelta(days=mod)).date(), local=False
)
if next_setting_dt > now:
break
except astral.AstralError:
pass
mod += 1
old_next_rising_dt = self.sun.get("next_rising")
old_next_setting_dt = self.sun.get("next_setting")
self.sun["next_rising"] = next_rising_dt
self.sun["next_setting"] = next_setting_dt
if old_next_rising_dt is not None and old_next_rising_dt != self.sun["next_rising"]:
# dump_schedule()
self.process_sun("next_rising")
# dump_schedule()
if old_next_setting_dt is not None and old_next_setting_dt != self.sun["next_setting"]:
# dump_schedule()
self.process_sun("next_setting")
# dump_schedule()
@staticmethod
def get_offset(kwargs):
if "offset" in kwargs["kwargs"]:
if "random_start" in kwargs["kwargs"] \
or "random_end" in kwargs["kwargs"]:
raise ValueError(
"Can't specify offset as well as 'random_start' or "
"'random_end' in 'run_at_sunrise()' or 'run_at_sunset()'"
)
else:
offset = kwargs["kwargs"]["offset"]
else:
rbefore = kwargs["kwargs"].get("random_start", 0)
rafter = kwargs["kwargs"].get("random_end", 0)
offset = random.randint(rbefore, rafter)
# verbose_log(conf.logger, "INFO", "sun: offset = {}".format(offset))
return offset
def insert_schedule(self, name, utc, callback, repeat, type_, **kwargs):
with self.schedule_lock:
if name not in self.schedule:
self.schedule[name] = {}
handle = uuid.uuid4()
utc = int(utc)
c_offset = self.get_offset({"kwargs": kwargs})
ts = utc + c_offset
interval = kwargs.get("interval", 0)
with self.objects_lock:
self.schedule[name][handle] = {
"name": name,
"id": self.objects[name]["id"],
"callback": callback,
"timestamp": ts,
"interval": interval,
"basetime": utc,
"repeat": repeat,
"offset": c_offset,
"type": type_,
"kwargs": kwargs
}
# verbose_log(conf.logger, "INFO", conf.schedule[name][handle])
return handle
def get_scheduler_entries(self):
schedule = {}
for name in self.schedule.keys():
schedule[name] = {}
for entry in sorted(
self.schedule[name].keys(),
key=lambda uuid_: self.schedule[name][uuid_]["timestamp"]
):
schedule[name][entry] = {}
schedule[name][entry]["timestamp"] = self.schedule[name][entry]["timestamp"]
schedule[name][entry]["type"] = self.schedule[name][entry]["type"]
schedule[name][entry]["name"] = self.schedule[name][entry]["name"]
schedule[name][entry]["basetime"] = self.schedule[name][entry]["basetime"]
schedule[name][entry]["repeat"] = self.schedule[name][entry]["basetime"]
schedule[name][entry]["offset"] = self.schedule[name][entry]["basetime"]
schedule[name][entry]["interval"] = self.schedule[name][entry]["basetime"]
schedule[name][entry]["kwargs"] = self.schedule[name][entry]["basetime"]
schedule[name][entry]["callback"] = self.schedule[name][entry]["callback"]
return schedule
def is_dst(self):
return bool(time.localtime(self.get_now_ts()).tm_isdst)
def get_now(self):
return datetime.datetime.fromtimestamp(self.now)
def get_now_ts(self):
return self.now
def now_is_between(self, start_time_str, end_time_str, name=None):
start_time = self.parse_time(start_time_str, name)
end_time = self.parse_time(end_time_str, name)
now = self.get_now()
start_date = now.replace(
hour=start_time.hour, minute=start_time.minute,
second=start_time.second
)
end_date = now.replace(
hour=end_time.hour, minute=end_time.minute, second=end_time.second
)
if end_date < start_date:
# Spans midnight
if now < start_date and now < end_date:
now = now + datetime.timedelta(days=1)
end_date = end_date + datetime.timedelta(days=1)
return start_date <= now <= end_date
def sunset(self):
return datetime.datetime.fromtimestamp(self.calc_sun("next_setting"))
def sunrise(self):
return datetime.datetime.fromtimestamp(self.calc_sun("next_rising"))
def parse_time(self, time_str, name=None):
parsed_time = None
parts = re.search('^(\d+):(\d+):(\d+)', time_str)
if parts:
parsed_time = datetime.time(
int(parts.group(1)), int(parts.group(2)), int(parts.group(3))
)
else:
if time_str == "sunrise":
parsed_time = self.sunrise().time()
elif time_str == "sunset":
parsed_time = self.sunset().time()
else:
parts = re.search(
'^sunrise\s*([+-])\s*(\d+):(\d+):(\d+)', time_str
)
if parts:
if parts.group(1) == "+":
parsed_time = (self.sunrise() + datetime.timedelta(
hours=int(parts.group(2)), minutes=int(parts.group(3)),
seconds=int(parts.group(4))
)).time()
else:
parsed_time = (self.sunrise() - datetime.timedelta(
hours=int(parts.group(2)), minutes=int(parts.group(3)),
seconds=int(parts.group(4))
)).time()
else:
parts = re.search(
'^sunset\s*([+-])\s*(\d+):(\d+):(\d+)', time_str
)
if parts:
if parts.group(1) == "+":
parsed_time = (self.sunset() + datetime.timedelta(
hours=int(parts.group(2)),
minutes=int(parts.group(3)),
seconds=int(parts.group(4))
)).time()
else:
parsed_time = (self.sunset() - datetime.timedelta(
hours=int(parts.group(2)),
minutes=int(parts.group(3)),
seconds=int(parts.group(4))
)).time()
if parsed_time is None:
if name is not None:
raise ValueError(
"{}: invalid time string: {}".format(name, time_str))
else:
raise ValueError("invalid time string: {}".format(time_str))
return parsed_time
def dump_sun(self):
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Sun")
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", self.sun)
self.diag("INFO", "--------------------------------------------------")
def dump_schedule(self):
if self.schedule == {}:
self.diag("INFO", "Schedule is empty")
else:
self.diag("INFO", "--------------------------------------------------")
self.diag("INFO", "Scheduler Table")
self.diag("INFO", "--------------------------------------------------")
for name in self.schedule.keys():
self.diag( "INFO", "{}:".format(name))
for entry in sorted(
self.schedule[name].keys(),
key=lambda uuid_: self.schedule[name][uuid_]["timestamp"]
):
self.diag(
"INFO",
" Timestamp: {} - data: {}".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(
self.schedule[name][entry]["timestamp"]
)),
self.schedule[name][entry]
)
)
self.diag("INFO", "--------------------------------------------------")
async def do_every(self, period, f):
#
# We already set self.now for DST calculation and initial sunset,
# but lets reset it at the start of the timer loop to avoid an initial clock skew
#
if self.starttime:
self.now = datetime.datetime.strptime(self.starttime, "%Y-%m-%d %H:%M:%S").timestamp()
else:
self.now = datetime.datetime.now().timestamp()
t = math.floor(self.now)
count = 0
t_ = math.floor(time.time())
while not self.stopping:
count += 1
delay = max(t_ + count * period - time.time(), 0)
await asyncio.sleep(delay)
t += self.interval
r = await f(t)
if r is not None and r != t:
# print("r: {}, t: {}".format(r,t))
t = r
t_ = r
count = 0
#
# Scheduler Loop
#
# noinspection PyBroadException,PyBroadException
async def do_every_tick(self, utc):
try:
start_time = datetime.datetime.now().timestamp()
self.now = utc
# If we have reached endtime bail out
if self.endtime is not None and self.get_now() >= self.endtime:
self.log("INFO", "End time reached, exiting")
if self.stop_function is not None:
self.stop_function()
else:
#
# We aren't in a standalone environment so the best we can do is terminate the AppDaemon parts
#
self.stop()
if self.realtime:
real_now = datetime.datetime.now().timestamp()
delta = abs(utc - real_now)
if delta > self.max_clock_skew:
self.log("WARNING",
"Scheduler clock skew detected - delta = {} - resetting".format(delta))
return real_now
# Update sunrise/sunset etc.
self.update_sun()
# Check if we have entered or exited DST - if so, reload apps
# to ensure all time callbacks are recalculated
now_dst = self.is_dst()
if now_dst != self.was_dst:
self.log(
"INFO",
"Detected change in DST from {} to {} -"
" reloading all modules".format(self.was_dst, now_dst)
)
# dump_schedule()
self.log("INFO", "-" * 40)
await utils.run_in_executor(self.loop, self.executor, self.check_app_updates, "__ALL__")
# dump_schedule()
self.was_dst = now_dst
# dump_schedule()
# test code for clock skew
# if random.randint(1, 10) == 5:
# time.sleep(random.randint(1,20))
# Process callbacks
# self.log("DEBUG", "Scheduler invoked at {}".format(now))
with self.schedule_lock:
for name in self.schedule.keys():
for entry in sorted(
self.schedule[name].keys(),
key=lambda uuid_: self.schedule[name][uuid_]["timestamp"]
):
if self.schedule[name][entry]["timestamp"] <= utc:
self.exec_schedule(name, entry, self.schedule[name][entry])
else:
break
for k, v in list(self.schedule.items()):
if v == {}:
del self.schedule[k]
end_time = datetime.datetime.now().timestamp()
loop_duration = (int((end_time - start_time) * 1000) / 1000) * 1000
self.log("DEBUG", "Scheduler loop compute time: {}ms".format(loop_duration))
if loop_duration > 900:
self.log("WARNING", "Excessive time spent in scheduler loop: {}ms".format(loop_duration))
return utc
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error during do_every_tick()")
self.err("WARNING", '-' * 60)
self.err( "WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
# When explicitly logging to stdout and stderr, suppress
# verbose_log messages about writing an error (since they show up anyway)
self.log(
"WARNING",
"Logged an error to {}".format(self.errfile)
)
def process_meta(self, meta, namespace):
if meta is not None:
for key in self.required_meta:
if getattr(self, key) == None:
if key in meta:
# We have a value so override
setattr(self, key, meta[key])
def get_plugin_from_namespace(self, namespace):
if self.plugins is not None:
for name in self.plugins:
if "namespace" in self.plugins[name] and self.plugins[name]["namespace"] == namespace:
return name
if "namespace" not in self.plugins[name] and namespace == "default":
return name
else:
return None
async def notify_plugin_started(self, namespace, first_time=False):
try:
self.last_plugin_state[namespace] = datetime.datetime.now()
meta = await self.plugin_objs[namespace].get_metadata()
self.process_meta(meta, namespace)
if not self.stopping:
self.plugin_meta[namespace] = meta
state = await self.plugin_objs[namespace].get_complete_state()
with self.state_lock:
self.state[namespace] = state
if not first_time:
await utils.run_in_executor(self.loop, self.executor, self.check_app_updates, self.get_plugin_from_namespace(namespace))
else:
self.log("INFO", "Got initial state from namespace {}".format(namespace))
self.process_event("global", {"event_type": "plugin_started".format(namespace), "data": {"name": namespace}})
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error during notify_plugin_started()")
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
# When explicitly logging to stdout and stderr, suppress
# verbose_log messages about writing an error (since they show up anyway)
self.log(
"WARNING",
"Logged an error to {}".format(self.errfile)
)
def notify_plugin_stopped(self, namespace):
self.process_event("global", {"event_type": "plugin_stopped".format(namespace), "data": {"name": namespace}})
#
# Utility Loop
#
async def utility(self):
#
# Wait for all plugins to initialize
#
initialized = False
while not initialized and self.stopping is False:
initialized = True
for plugin in self.plugin_objs:
if not self.plugin_objs[plugin].active():
initialized = False
break
await asyncio.sleep(1)
# Check if we need to bail due to missing metadata
for key in self.required_meta:
if getattr(self, key) == None:
# No value so bail
self.err("ERROR", "Required attribute not set or obtainable from any plugin: {}".format(key))
self.err("ERROR", "AppDaemon is terminating")
self.stop()
if not self.stopping:
#
# All plugins are loaded and we have initial state
#
if self.starttime:
new_now = datetime.datetime.strptime(self.starttime, "%Y-%m-%d %H:%M:%S")
self.log("INFO", "Starting time travel ...")
self.log("INFO", "Setting clocks to {}".format(new_now))
self.now = new_now.timestamp()
else:
self.now = datetime.datetime.now().timestamp()
self.thread_info["max_used"] = 0
self.thread_info["max_used_time"] = self.now
# Take a note of DST
self.was_dst = self.is_dst()
# Setup sun
self.init_sun()
self.update_sun()
# Create timer loop
self.log("DEBUG", "Starting timer loop")
self.loop.create_task(self.do_every(self.tick, self.do_every_tick))
if self.apps:
self.log("DEBUG", "Reading Apps")
await utils.run_in_executor(self.loop, self.executor, self.check_app_updates)
self.log("INFO", "App initialization complete")
#
# Fire APPD Started Event
#
self.process_event("global", {"event_type": "appd_started", "data": {}})
while not self.stopping:
start_time = datetime.datetime.now().timestamp()
try:
if self.apps:
if self.production_mode is False:
# Check to see if config has changed
await utils.run_in_executor(self.loop, self.executor, self.check_app_updates)
# Call me suspicious, but lets update state from the plugins periodically
# in case we miss events for whatever reason
# Every 10 minutes seems like a good place to start
for plugin in self.plugin_objs:
if self.plugin_objs[plugin].active():
if datetime.datetime.now() - self.last_plugin_state[plugin] > datetime.timedelta(
minutes=10):
try:
self.log("DEBUG",
"Refreshing {} state".format(plugin))
state = await self.plugin_objs[plugin].get_complete_state()
with self.state_lock:
self.state[plugin] = state
self.last_plugin_state[plugin] = datetime.datetime.now()
except:
self.log("WARNING",
"Unexpected error refreshing {} state - retrying in 10 minutes".format(plugin))
# Check for thread starvation
qsize = self.q.qsize()
if qsize > 0 and qsize % 10 == 0:
self.log("WARNING", "Queue size is {}, suspect thread starvation".format(self.q.qsize()))
self.dump_threads()
# Run utility for each plugin
for plugin in self.plugin_objs:
self.plugin_objs[plugin].utility()
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error during utility()")
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
# When explicitly logging to stdout and stderr, suppress
# verbose_log messages about writing an error (since they show up anyway)
self.log(
"WARNING",
"Logged an error to {}".format(self.errfile)
)
end_time = datetime.datetime.now().timestamp()
loop_duration = (int((end_time - start_time) * 1000) / 1000) * 1000
self.log("DEBUG", "Util loop compute time: {}ms".format(loop_duration))
if loop_duration > (self.max_utility_skew * 1000):
self.log("WARNING", "Excessive time spent in utility loop: {}ms".format(loop_duration))
if self.check_app_updates_profile is True:
self.diag("INFO", "Profile information for Utility Loop")
self.diag("INFO", self.check_app_updates_profile_stats)
await asyncio.sleep(self.utility_delay)
#
# Stopping, so terminate apps.
#
self.check_app_updates(exit=True)
#
# AppDaemon API
#
def register_endpoint(self, cb, name):
handle = uuid.uuid4()
with self.endpoints_lock:
if name not in self.endpoints:
self.endpoints[name] = {}
self.endpoints[name][handle] = {"callback": cb, "name": name}
return handle
def unregister_endpoint(self, handle, name):
with self.endpoints_lock:
if name in self.endpoints and handle in self.endpoints[name]:
del self.endpoints[name][handle]
#
# App Management
#
def get_app(self, name):
with self.objects_lock:
if name in self.objects:
return self.objects[name]["object"]
else:
return None
def term_object(self, name):
with self.objects_lock:
term = None
if name in self.objects and hasattr(self.objects[name]["object"], "terminate"):
self.log("INFO", "Calling terminate() for {}".format(name))
# Call terminate directly rather than via worker thread
# so we know terminate has completed before we move on
term = self.objects[name]["object"].terminate
if term is not None:
try:
term()
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error running terminate() for {}".format(name))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
with self.objects_lock:
if name in self.objects:
del self.objects[name]
self.log("DEBUG", "Clearing callbacks for {}".format(name))
with self.callbacks_lock:
if name in self.callbacks:
del self.callbacks[name]
with self.schedule_lock:
if name in self.schedule:
del self.schedule[name]
with self.endpoints_lock:
if name in self.endpoints:
del self.endpoints[name]
def init_object(self, name):
app_args = self.app_config[name]
self.log("INFO",
"Initializing app {} using class {} from module {}".format(name, app_args["class"], app_args["module"]))
if self.get_file_from_module(app_args["module"]) is not None:
with self.objects_lock:
modname = __import__(app_args["module"])
app_class = getattr(modname, app_args["class"])
self.objects[name] = {
"object": app_class(
self, name, self.logger, self.error, app_args, self.config, self.app_config, self.global_vars
),
"id": uuid.uuid4()
}
init = self.objects[name]["object"].initialize
# Call its initialize function
try:
init()
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error running initialize() for {}".format(name))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
else:
self.log("WARNING", "Unable to find module module {} - {} is not initialized".format(app_args["module"], name))
def read_config(self):
new_config = None
if os.path.isfile(self.app_config_file):
self.log("WARNING", "apps.yaml in the Config directory is deprecated. Please move apps.yaml to the apps directory.")
new_config = self.read_config_file(self.app_config_file)
else:
for root, subdirs, files in os.walk(self.app_dir):
subdirs[:] = [d for d in subdirs if d not in self.exclude_dirs]
if root[-11:] != "__pycache__":
for file in files:
if file[-5:] == ".yaml":
self.log("DEBUG", "Reading {}".format(os.path.join(root, file)))
config = self.read_config_file(os.path.join(root, file))
valid_apps = {}
if type(config).__name__ == "dict":
for app in config:
if config[app] is not None:
if app == "global_modules":
valid_apps[app] = config[app]
elif "class" in config[app] and "module" in config[app]:
valid_apps[app] = config[app]
else:
if self.invalid_yaml_warnings:
self.log("WARNING",
"App '{}' missing 'class' or 'module' entry - ignoring".format(app))
else:
if self.invalid_yaml_warnings:
self.log("WARNING",
"File '{}' invalid structure - ignoring".format(os.path.join(root, file)))
if new_config is None:
new_config = {}
for app in valid_apps:
if app in new_config:
self.log("WARNING",
"File '{}' duplicate app: {} - ignoring".format(os.path.join(root, file), app))
else:
new_config[app] = valid_apps[app]
return new_config
def check_later_app_configs(self, last_latest):
if os.path.isfile(self.app_config_file):
ts = os.path.getmtime(self.app_config_file)
return {"latest": ts, "files": [{"name": self.app_config_file, "ts": os.path.getmtime(self.app_config_file)}]}
else:
later_files = {}
app_config_files = []
later_files["files"] = []
later_files["latest"] = last_latest
later_files["deleted"] = []
for root, subdirs, files in os.walk(self.app_dir):
subdirs[:] = [d for d in subdirs if d not in self.exclude_dirs]
if root[-11:] != "__pycache__":
for file in files:
if file[-5:] == ".yaml":
path = os.path.join(root, file)
app_config_files.append(path)
ts = os.path.getmtime(path)
if ts > last_latest:
later_files["files"].append(path)
if ts > later_files["latest"]:
later_files["latest"] = ts
for file in self.app_config_files:
if file not in app_config_files:
later_files["deleted"].append(file)
for file in app_config_files:
if file not in self.app_config_files:
later_files["files"].append(file)
self.app_config_files = app_config_files
return later_files
def read_config_file(self, file):
new_config = None
try:
with open(file, 'r') as yamlfd:
config_file_contents = yamlfd.read()
try:
new_config = yaml.load(config_file_contents)
except yaml.YAMLError as exc:
self.log("WARNING", "Error loading configuration")
if hasattr(exc, 'problem_mark'):
if exc.context is not None:
self.log("WARNING", "parser says")
self.log("WARNING", str(exc.problem_mark))
self.log("WARNING", str(exc.problem) + " " + str(exc.context))
else:
self.log("WARNING", "parser says")
self.log("WARNING", str(exc.problem_mark))
self.log("WARNING", str(exc.problem))
return new_config
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error loading config file: {}".format(file))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
# noinspection PyBroadException
def check_config(self):
terminate_apps = {}
initialize_apps = {}
try:
latest = self.check_later_app_configs(self.app_config_file_modified)
self.app_config_file_modified = latest["latest"]
if latest["files"] or latest["deleted"]:
self.log("INFO", "Reading config")
new_config = self.read_config()
if new_config is None:
self.log("WARNING", "New config not applied")
return
for file in latest["deleted"]:
self.log("INFO", "{} deleted".format(file))
for file in latest["files"]:
self.log("INFO", "{} added or modified".format(file))
# Check for changes
for name in self.app_config:
if name in new_config:
if self.app_config[name] != new_config[name]:
# Something changed, clear and reload
self.log("INFO", "App '{}' changed".format(name))
terminate_apps[name] = 1
initialize_apps[name] = 1
else:
# Section has been deleted, clear it out
self.log("INFO", "App '{}' deleted".format(name))
#
# Since the entry has been deleted we can't sensibly determine dependencies
# So just immediately terminate it
#
self.term_object(name)
for name in new_config:
if name not in self.app_config:
#
# New section added!
#
if "class" in new_config[name] and "module" in new_config[name]:
self.log("INFO", "App '{}' added".format(name))
initialize_apps[name] = 1
elif name == "global_modules":
pass
else:
if self.invalid_yaml_warnings:
self.log("WARNING", "App '{}' missing 'class' or 'module' entry - ignoring".format(name))
self.app_config = new_config
return {"init": initialize_apps, "term": terminate_apps}
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error:")
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
def get_app_from_file(self, file):
module = self.get_module_from_path(file)
for app in self.app_config:
if "module" in self.app_config[app] and self.app_config[app]["module"] == module:
return app
return None
# noinspection PyBroadException
def read_app(self, file, reload=False):
name = os.path.basename(file)
module_name = os.path.splitext(name)[0]
# Import the App
if reload:
self.log("INFO", "Reloading Module: {}".format(file))
file, ext = os.path.splitext(name)
#
# Reload
#
try:
importlib.reload(self.modules[module_name])
except KeyError:
if name not in sys.modules:
# Probably failed to compile on initial load
# so we need to re-import not reload
self.read_app(file)
else:
# A real KeyError!
raise
else:
app = self.get_app_from_file(file)
if app is not None:
self.log("INFO", "Loading App Module: {}".format(file))
self.modules[module_name] = importlib.import_module(module_name)
elif "global_modules" in self.app_config and module_name in self.app_config["global_modules"]:
self.log("INFO", "Loading Global Module: {}".format(file))
self.modules[module_name] = importlib.import_module(module_name)
else:
if self.missing_app_warnings:
self.log("WARNING", "No app description found for: {} - ignoring".format(file))
@staticmethod
def get_module_from_path(path):
name = os.path.basename(path)
module_name = os.path.splitext(name)[0]
return module_name
def get_file_from_module(self, mod):
for file in self.monitored_files:
module_name = self.get_module_from_path(file)
if module_name == mod:
return file
return None
def process_filters(self):
if "filters" in self.config:
for filter in self.config["filters"]:
for root, subdirs, files in os.walk(self.app_dir, topdown=True):
# print(root, subdirs, files)
#
# Prune dir list
#
subdirs[:] = [d for d in subdirs if d not in self.exclude_dirs]
ext = filter["input_ext"]
extlen = len(ext) * -1
for file in files:
run = False
if file[extlen:] == ext:
infile = os.path.join(root, file)
modified = os.path.getmtime(infile)
if infile in self.filter_files:
if self.filter_files[infile] < modified:
run = True
else:
self.log("INFO", "Found new filter file {}".format(infile))
run = True
if run is True:
filtered = True
self.log("INFO", "Running filter on {}".format(infile))
self.filter_files[infile] = modified
# Run the filter
outfile = utils.rreplace(infile, ext, filter["output_ext"], 1)
command_line = filter["command_line"].replace("$1", infile)
command_line = command_line.replace("$2", outfile)
try:
p = subprocess.Popen(command_line, shell=True)
except:
self.log("WARNING", '-' * 60)
self.log("WARNING", "Unexpected running filter on: {}:".format(infile))
self.log("WARNING", '-' * 60)
self.log("WARNING", traceback.format_exc())
self.log("WARNING", '-' * 60)
@staticmethod
def file_in_modules(file, modules):
for mod in modules:
if mod["name"] == file:
return True
return False
#@_timeit
def check_app_updates(self, plugin=None, exit=False):
if not self.apps:
return
# Lets add some profiling
pr = None
if self.check_app_updates_profile is True:
pr = cProfile.Profile()
pr.enable()
# Process filters
self.process_filters()
# Get list of apps we need to terminate and/or initialize
apps = self.check_config()
found_files = []
modules = []
for root, subdirs, files in os.walk(self.app_dir, topdown=True):
# print(root, subdirs, files)
#
# Prune dir list
#
subdirs[:] = [d for d in subdirs if d not in self.exclude_dirs]
if root[-11:] != "__pycache__":
if root not in self.module_dirs:
self.log("INFO", "Adding {} to module import path".format(root))
sys.path.insert(0, root)
self.module_dirs.append(root)
for file in files:
if file[-3:] == ".py":
found_files.append(os.path.join(root, file))
for file in found_files:
if file == os.path.join(self.app_dir, "__init__.py"):
continue
try:
# check we can actually open the file
fh = open(file)
fh.close()
modified = os.path.getmtime(file)
if file in self.monitored_files:
if self.monitored_files[file] < modified:
modules.append({"name": file, "reload": True})
self.monitored_files[file] = modified
else:
self.log("DEBUG", "Found module {}".format(file))
modules.append({"name": file, "reload": False})
self. monitored_files[file] = modified
except IOError as err:
self.log("WARNING",
"Unable to read app {}: {} - skipping".format(file, err))
# Check for deleted modules and add them to the terminate list
deleted_modules = []
for file in self.monitored_files:
if file not in found_files or exit is True:
deleted_modules.append(file)
self.log("INFO", "Removing module {}".format(file))
for file in deleted_modules:
del self.monitored_files[file]
for app in self.apps_per_module(self.get_module_from_path(file)):
apps["term"][app] = 1
# Add any apps we need to reload because of file changes
for module in modules:
for app in self.apps_per_module(self.get_module_from_path(module["name"])):
if module["reload"]:
apps["term"][app] = 1
apps["init"][app] = 1
if "global_modules" in self.app_config:
for gm in utils.single_or_list(self.app_config["global_modules"]):
if gm == self.get_module_from_path(module["name"]):
for app in self.apps_per_global_module(gm):
if module["reload"]:
apps["term"][app] = 1
apps["init"][app] = 1
if plugin is not None:
self.log("INFO", "Processing restart for {}".format(plugin))
# This is a restart of one of the plugins so check which apps need to be restarted
for app in self.app_config:
reload = False
if app == "global_modules":
continue
if "plugin" in self.app_config[app]:
for this_plugin in utils.single_or_list(self.app_config[app]["plugin"]):
if this_plugin == plugin:
# We got a match so do the reload
reload = True
break
elif plugin == "__ALL__":
reload = True
break
else:
# No plugin dependency specified, reload to err on the side of caution
reload = True
if reload is True:
apps["term"][app] = 1
apps["init"][app] = 1
# Terminate apps
if apps is not None and apps["term"]:
prio_apps = self.get_app_deps_and_prios(apps["term"])
for app in sorted(prio_apps, key=prio_apps.get, reverse=True):
try:
self.log("INFO", "Terminating {}".format(app))
self.term_object(app)
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error terminating app: {}:".format(app))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
# Load/reload modules
for mod in modules:
try:
self.read_app(mod["name"], mod["reload"])
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error loading module: {}:".format(mod["name"]))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Unexpected error loading module: {}:".format(mod["name"]))
self.log("WARNING", "Removing associated apps:")
module = self.get_module_from_path(mod["name"])
for app in self.app_config:
if self.app_config[app]["module"] == module:
if apps["init"] and app in apps["init"]:
del apps["init"][app]
self.log("WARNING", "{}".format(app))
if apps is not None and apps["init"]:
prio_apps = self.get_app_deps_and_prios(apps["init"])
# Initialize Apps
for app in sorted(prio_apps, key=prio_apps.get):
try:
self.init_object(app)
except:
self.err("WARNING", '-' * 60)
self.err("WARNING", "Unexpected error initializing app: {}:".format(app))
self.err("WARNING", '-' * 60)
self.err("WARNING", traceback.format_exc())
self.err("WARNING", '-' * 60)
if self.errfile != "STDERR" and self.logfile != "STDOUT":
self.log("WARNING", "Logged an error to {}".format(self.errfile))
if self.check_app_updates_profile is True:
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
self.check_app_updates_profile_stats = s.getvalue()
def get_app_deps_and_prios(self, applist):
# Build a list of modules and their dependencies
deplist = []
for app in applist:
if app not in deplist:
deplist.append(app)
self.get_dependent_apps(app, deplist)
# Need to gove the topological sort a full list of apps or it will fail
full_list = list(self.app_config.keys())
deps = []
for app in full_list:
dependees = []
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
if dep in self.app_config:
dependees.append(dep)
else:
self.log("WARNING", "Unable to find app {} in dependencies for {}".format(dep, app))
self.log("WARNING", "Ignoring app {}".format(app))
deps.append((app, dependees))
prio_apps = {}
prio = float(50.1)
try:
for app in self.topological_sort(deps):
if "dependencies" in self.app_config[app] or self.app_has_dependents(app):
prio_apps[app] = prio
prio += float(0.0001)
else:
if "priority" in self.app_config[app]:
prio_apps[app] = float(self.app_config[app]["priority"])
else:
prio_apps[app] = float(50)
except ValueError:
pass
# now we remove the ones we aren't interested in
final_apps = {}
for app in prio_apps:
if app in deplist:
final_apps[app] = prio_apps[app]
return final_apps
def app_has_dependents(self, name):
for app in self.app_config:
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
if dep == name:
return True
return False
def get_dependent_apps(self, dependee, deps):
for app in self.app_config:
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
#print("app= {} dep = {}, dependee = {} deps = {}".format(app, dep, dependee, deps))
if dep == dependee and app not in deps:
deps.append(app)
new_deps = self.get_dependent_apps(app, deps)
if new_deps is not None:
deps.append(new_deps)
def topological_sort(self, source):
pending = [(name, set(deps)) for name, deps in source] # copy deps so we can modify set in-place
emitted = []
while pending:
next_pending = []
next_emitted = []
for entry in pending:
name, deps = entry
deps.difference_update(emitted) # remove deps we emitted last pass
if deps: # still has deps? recheck during next pass
next_pending.append(entry)
else: # no more deps? time to emit
yield name
emitted.append(name) # <-- not required, but helps preserve original ordering
next_emitted.append(name) # remember what we emitted for difference_update() in next pass
if not next_emitted:
# all entries have unmet deps, we have cyclic redundancies
# since we already know all deps are correct
self.log("WARNING", "Cyclic or missing app dependencies detected")
for pend in next_pending:
deps = ""
for dep in pend[1]:
deps += "{} ".format(dep)
self.log("WARNING", "{} depends on {}".format(pend[0], deps))
raise ValueError("cyclic dependancy detected")
pending = next_pending
emitted = next_emitted
def apps_per_module(self, module):
apps = []
for app in self.app_config:
if app != "global_modules" and self.app_config[app]["module"] == module:
apps.append(app)
return apps
def apps_per_global_module(self, module):
apps = []
for app in self.app_config:
if "global_dependencies" in self.app_config[app]:
for gm in utils.single_or_list(self.app_config[app]["global_dependencies"]):
if gm == module:
apps.append(app)
return apps
#
# State Updates
#
def check_and_disapatch(self, name, funcref, entity, attribute, new_state,
old_state, cold, cnew, kwargs, uuid_):
kwargs["handle"] = uuid_
if attribute == "all":
with self.objects_lock:
self.dispatch_worker(name, {
"name": name,
"id": self.objects[name]["id"],
"type": "attr",
"function": funcref,
"attribute": attribute,
"entity": entity,
"new_state": new_state,
"old_state": old_state,
"kwargs": kwargs,
})
else:
if old_state is None:
old = None
else:
if attribute in old_state:
old = old_state[attribute]
elif 'attributes' in old_state and attribute in old_state['attributes']:
old = old_state['attributes'][attribute]
else:
old = None
if new_state is None:
new = None
else:
if attribute in new_state:
new = new_state[attribute]
elif 'attributes' in new_state and attribute in new_state['attributes']:
new = new_state['attributes'][attribute]
else:
new = None
if (cold is None or cold == old) and (cnew is None or cnew == new):
if "duration" in kwargs:
# Set a timer
exec_time = self.get_now_ts() + int(kwargs["duration"])
kwargs["_duration"] = self.insert_schedule(
name, exec_time, funcref, False, None,
entity=entity,
attribute=attribute,
old_state=old,
new_state=new, **kwargs
)
else:
# Do it now
with self.objects_lock:
self.dispatch_worker(name, {
"name": name,
"id": self.objects[name]["id"],
"type": "attr",
"function": funcref,
"attribute": attribute,
"entity": entity,
"new_state": new,
"old_state": old,
"kwargs": kwargs
})
else:
if "_duration" in kwargs:
# cancel timer
self.cancel_timer(name, kwargs["_duration"])
def process_state_change(self, namespace, state):
data = state["data"]
entity_id = data['entity_id']
self.log("DEBUG", data)
device, entity = entity_id.split(".")
# Process state callbacks
removes = []
with self.callbacks_lock:
for name in self.callbacks.keys():
for uuid_ in self.callbacks[name]:
callback = self.callbacks[name][uuid_]
if callback["type"] == "state" and (callback["namespace"] == namespace or callback["namespace"] == "global" or namespace == "global"):
cdevice = None
centity = None
if callback["entity"] is not None:
if "." not in callback["entity"]:
cdevice = callback["entity"]
centity = None
else:
cdevice, centity = callback["entity"].split(".")
if callback["kwargs"].get("attribute") is None:
cattribute = "state"
else:
cattribute = callback["kwargs"].get("attribute")
cold = callback["kwargs"].get("old")
cnew = callback["kwargs"].get("new")
if cdevice is None:
self.check_and_disapatch(
name, callback["function"], entity_id,
cattribute,
data['new_state'],
data['old_state'],
cold, cnew,
callback["kwargs"],
uuid_
)
elif centity is None:
if device == cdevice:
self.check_and_disapatch(
name, callback["function"], entity_id,
cattribute,
data['new_state'],
data['old_state'],
cold, cnew,
callback["kwargs"],
uuid_
)
elif device == cdevice and entity == centity:
self.check_and_disapatch(
name, callback["function"], entity_id,
cattribute,
data['new_state'],
data['old_state'], cold,
cnew,
callback["kwargs"],
uuid_
)
# Remove the callback if appropriate
remove = callback["kwargs"].get("oneshot", False)
if remove:
removes.append({"name": callback["name"], "uuid": callback["kwargs"]["handle"]})
for remove in removes:
#print(remove)
self.cancel_state_callback(remove["uuid"], remove["name"])
async def state_update(self, namespace, data):
try:
self.log(
"DEBUG",
"Event type:{}:".format(data['event_type'])
)
self.log( "DEBUG", data["data"])
if data['event_type'] == "state_changed":
entity_id = data['data']['entity_id']
# First update our global state
with self.state_lock:
self.state[namespace][entity_id] = data['data']['new_state']
if self.apps is True:
# Process state changed message
if data['event_type'] == "state_changed":
self.process_state_change(namespace, data)
else:
# Process non-state callbacks
self.process_event(namespace, data)
# Update dashboards
if self.dashboard is not None:
await self.dashboard.ws_update(namespace, data)
except:
self.log("WARNING", '-' * 60)
self.log("WARNING", "Unexpected error during state_update()")
self.log("WARNING", '-' * 60)
self.log("WARNING", traceback.format_exc())
self.log("WARNING", '-' * 60)
#
# Event Update
#
def process_event(self, namespace, data):
with self.callbacks_lock:
for name in self.callbacks.keys():
for uuid_ in self.callbacks[name]:
callback = self.callbacks[name][uuid_]
if callback["namespace"] == namespace or callback["namespace"] == "global" or namespace == "global":
if "event" in callback and (
callback["event"] is None
or data['event_type'] == callback["event"]):
# Check any filters
_run = True
for key in callback["kwargs"]:
if key in data["data"] and callback["kwargs"][key] != \
data["data"][key]:
_run = False
if _run:
with self.objects_lock:
self.dispatch_worker(name, {
"name": name,
"id": self.objects[name]["id"],
"type": "event",
"event": data['event_type'],
"function": callback["function"],
"data": data["data"],
"kwargs": callback["kwargs"]
})
#
# Plugin Management
#
def get_plugin(self, name):
if name in self.plugin_objs:
return self.plugin_objs[name]
else:
return None
def get_plugin_meta(self, namespace):
for name in self.plugins:
if "namespace" not in self.plugins[name] and namespace == "default":
return self.plugin_meta[namespace]
elif "namespace" in self.plugins[name] and self.plugins[name]["namespace"] == namespace:
return self.plugin_meta[namespace]
else:
return None
#
# Utilities
#
def sanitize_state_kwargs(self, app, kwargs):
kwargs_copy = kwargs.copy()
return self._sanitize_kwargs(kwargs_copy, [
"old", "new", "attribute", "duration", "state",
"entity", "_duration", "old_state", "new_state",
"oneshot"
] + app.list_constraints())
def sanitize_timer_kwargs(self, app, kwargs):
kwargs_copy = kwargs.copy()
return self._sanitize_kwargs(kwargs_copy, [
"interval", "constrain_days", "constrain_input_boolean",
] + app.list_constraints())
def _sanitize_kwargs(self, kwargs, keys):
for key in keys:
if key in kwargs:
del kwargs[key]
return kwargs
def log(self, level, message, name="AppDaemon"):
if not self.realtime:
ts = self.get_now()
else:
ts = None
utils.log(self.logger, level, message, name, ts)
def err(self, level, message, name="AppDaemon"):
if not self.realtime:
ts = self.get_now()
else:
ts = None
utils.log(self.error, level, message, name, ts)
def diag(self, level, message, name="AppDaemon"):
if not self.realtime:
ts = self.get_now()
else:
ts = None
utils.log(self.diagnostic, level, message, name, ts)
def register_dashboard(self, dash):
self.dashboard = dash
async def dispatch_app_by_name(self, name, args):
with self.endpoints_lock:
callback = None
for app in self.endpoints:
for handle in self.endpoints[app]:
if self.endpoints[app][handle]["name"] == name:
callback = self.endpoints[app][handle]["callback"]
if callback is not None:
return await utils.run_in_executor(self.loop, self.executor, callback, args)
else:
return '', 404
|
en
| 0.761304
|
# No locking yet # User Supplied/Defaults # Initialize config file tracking #if os.path.isdir(self.app_dir) is False: # self.log("ERROR", "Invalid value for app_dir: {}".format(self.app_dir)) # return # # Initial Setup # # Create Worker Threads # Load Plugins # # Not a custom plugin, assume it's a built in # # Create utility loop # Create AppState Loop # if ws is not None: # ws.close() # # Diagnostics # # Make a copy without the thread objects # # Constraints # # # Thread Management # # # Argument Constraints # # # Callback level constraints # # noinspection PyBroadException # # State # # # In the case of a quick_start parameter, # start the clock immediately if the device is already in the new state # # # App State # #print(state) # # Events # # # Scheduler # # noinspection PyBroadException # Locking performed in calling function # Call function # If it is a repeating entry, rewrite with new timestamp # It's sunrise or sunset - if the offset is negative we # won't know the next rise or set time yet so mark as inactive # So we can adjust with a scan at sun rise/set # We have a valid time for the next sunrise/set so use it # Not sunrise or sunset so just increment # the timestamp with the repeat interval # Otherwise just delete # When explicitly logging to stdout and stderr, suppress # verbose_log messages about writing an error (since they show up anyway) # convert to a localized timestamp #now = datetime.datetime.now(self.tz) #now = pytz.utc.localize(self.get_now()) # dump_schedule() # dump_schedule() # dump_schedule() # dump_schedule() # verbose_log(conf.logger, "INFO", "sun: offset = {}".format(offset)) # verbose_log(conf.logger, "INFO", conf.schedule[name][handle]) # Spans midnight # # We already set self.now for DST calculation and initial sunset, # but lets reset it at the start of the timer loop to avoid an initial clock skew # # print("r: {}, t: {}".format(r,t)) # # Scheduler Loop # # noinspection PyBroadException,PyBroadException # If we have reached endtime bail out # # We aren't in a standalone environment so the best we can do is terminate the AppDaemon parts # # Update sunrise/sunset etc. # Check if we have entered or exited DST - if so, reload apps # to ensure all time callbacks are recalculated # dump_schedule() # dump_schedule() # dump_schedule() # test code for clock skew # if random.randint(1, 10) == 5: # time.sleep(random.randint(1,20)) # Process callbacks # self.log("DEBUG", "Scheduler invoked at {}".format(now)) # When explicitly logging to stdout and stderr, suppress # verbose_log messages about writing an error (since they show up anyway) # We have a value so override # When explicitly logging to stdout and stderr, suppress # verbose_log messages about writing an error (since they show up anyway) # # Utility Loop # # # Wait for all plugins to initialize # # Check if we need to bail due to missing metadata # No value so bail # # All plugins are loaded and we have initial state # # Take a note of DST # Setup sun # Create timer loop # # Fire APPD Started Event # # Check to see if config has changed # Call me suspicious, but lets update state from the plugins periodically # in case we miss events for whatever reason # Every 10 minutes seems like a good place to start # Check for thread starvation # Run utility for each plugin # When explicitly logging to stdout and stderr, suppress # verbose_log messages about writing an error (since they show up anyway) # # Stopping, so terminate apps. # # # AppDaemon API # # # App Management # # Call terminate directly rather than via worker thread # so we know terminate has completed before we move on # Call its initialize function # noinspection PyBroadException # Check for changes # Something changed, clear and reload # Section has been deleted, clear it out # # Since the entry has been deleted we can't sensibly determine dependencies # So just immediately terminate it # # # New section added! # # noinspection PyBroadException # Import the App # # Reload # # Probably failed to compile on initial load # so we need to re-import not reload # A real KeyError! # print(root, subdirs, files) # # Prune dir list # # Run the filter #@_timeit # Lets add some profiling # Process filters # Get list of apps we need to terminate and/or initialize # print(root, subdirs, files) # # Prune dir list # # check we can actually open the file # Check for deleted modules and add them to the terminate list # Add any apps we need to reload because of file changes # This is a restart of one of the plugins so check which apps need to be restarted # We got a match so do the reload # No plugin dependency specified, reload to err on the side of caution # Terminate apps # Load/reload modules # Initialize Apps # Build a list of modules and their dependencies # Need to gove the topological sort a full list of apps or it will fail # now we remove the ones we aren't interested in #print("app= {} dep = {}, dependee = {} deps = {}".format(app, dep, dependee, deps)) # copy deps so we can modify set in-place # remove deps we emitted last pass # still has deps? recheck during next pass # no more deps? time to emit # <-- not required, but helps preserve original ordering # remember what we emitted for difference_update() in next pass # all entries have unmet deps, we have cyclic redundancies # since we already know all deps are correct # # State Updates # # Set a timer # Do it now # cancel timer # Process state callbacks # Remove the callback if appropriate #print(remove) # First update our global state # Process state changed message # Process non-state callbacks # Update dashboards # # Event Update # # Check any filters # # Plugin Management # # # Utilities #
| 1.782441
| 2
|
main.py
|
KlaipedaBreeze/testAzureApi
| 0
|
6629439
|
from fastapi import FastAPI
import uvicorn
app = FastAPI()
@app.get("/")
def home():
return "Hello World"
|
from fastapi import FastAPI
import uvicorn
app = FastAPI()
@app.get("/")
def home():
return "Hello World"
|
none
| 1
| 2.164208
| 2
|
|
scripts/triangle_cuts.py
|
rekka/isosurface-rs
| 0
|
6629440
|
<filename>scripts/triangle_cuts.py
def recur(idx):
n = len(idx)
if len(idx) < 3:
print 'if u' + str(n) + ' >= 0. {'
if len(idx) < 2:
print ' let u' + str(n) + ' = u' + str(n) + ' + tiny;'
idx.append(0)
recur(idx)
idx.pop()
print('} else {')
idx.append(1)
recur(idx)
idx.pop()
print('}')
return
n = sum(idx)
if n == 0 or n == 3:
return
zero = []
one = []
for i in range(0, 3):
if idx[i]:
one.append(i)
else:
zero.append(i)
pairs = []
for i in one:
for j in zero:
p = sorted([i, j])
if p[1] - p[0] == 2:
e = 2
else:
e = p[0]
pairs.append([e, 'interpolate(u{i}, u{j}, v{i}, v{j})'.format(i = p[0], j = p[1])])
pairs.sort()
print('emit_line(({}, {}), ({}, {}));'.format(pairs[0][0], pairs[1][0], pairs[0][1], pairs[1][1]))
print('// START GENERATED: generated by `scripts/triangle_cuts.py`')
print('let tiny = 1e-15;')
for i in range(0, 3):
print('let u{0} = u[{0}];'.format(i))
for i in range(0, 3):
print('let v{0} = v[{0}];'.format(i))
recur([])
print('// END GENERATED')
|
<filename>scripts/triangle_cuts.py
def recur(idx):
n = len(idx)
if len(idx) < 3:
print 'if u' + str(n) + ' >= 0. {'
if len(idx) < 2:
print ' let u' + str(n) + ' = u' + str(n) + ' + tiny;'
idx.append(0)
recur(idx)
idx.pop()
print('} else {')
idx.append(1)
recur(idx)
idx.pop()
print('}')
return
n = sum(idx)
if n == 0 or n == 3:
return
zero = []
one = []
for i in range(0, 3):
if idx[i]:
one.append(i)
else:
zero.append(i)
pairs = []
for i in one:
for j in zero:
p = sorted([i, j])
if p[1] - p[0] == 2:
e = 2
else:
e = p[0]
pairs.append([e, 'interpolate(u{i}, u{j}, v{i}, v{j})'.format(i = p[0], j = p[1])])
pairs.sort()
print('emit_line(({}, {}), ({}, {}));'.format(pairs[0][0], pairs[1][0], pairs[0][1], pairs[1][1]))
print('// START GENERATED: generated by `scripts/triangle_cuts.py`')
print('let tiny = 1e-15;')
for i in range(0, 3):
print('let u{0} = u[{0}];'.format(i))
for i in range(0, 3):
print('let v{0} = v[{0}];'.format(i))
recur([])
print('// END GENERATED')
|
none
| 1
| 2.899645
| 3
|
|
PyMOTW/source/multiprocessing/multiprocessing_subclass.py
|
axetang/AxePython
| 1
|
6629441
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2008 <NAME> All rights reserved.
#
"""Creating and waiting for a process
"""
#end_pymotw_header
import multiprocessing
class Worker(multiprocessing.Process):
def run(self):
print('In {}'.format(self.name))
return
if __name__ == '__main__':
jobs = []
for i in range(5):
p = Worker()
jobs.append(p)
p.start()
for j in jobs:
j.join()
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2008 <NAME> All rights reserved.
#
"""Creating and waiting for a process
"""
#end_pymotw_header
import multiprocessing
class Worker(multiprocessing.Process):
def run(self):
print('In {}'.format(self.name))
return
if __name__ == '__main__':
jobs = []
for i in range(5):
p = Worker()
jobs.append(p)
p.start()
for j in jobs:
j.join()
|
en
| 0.690029
|
#!/usr/bin/env python3 # encoding: utf-8 # # Copyright (c) 2008 <NAME> All rights reserved. # Creating and waiting for a process #end_pymotw_header
| 3.30199
| 3
|
locs/training/train_utils.py
|
mkofinas/locs
| 16
|
6629442
|
import os
import torch
from torch.utils.tensorboard import SummaryWriter
def build_scheduler(opt, params):
lr_decay_factor = params.get('lr_decay_factor')
lr_decay_steps = params.get('lr_decay_steps')
if lr_decay_factor:
return torch.optim.lr_scheduler.StepLR(opt, lr_decay_steps, lr_decay_factor)
else:
return None
class build_writers:
def __init__(self, working_dir, is_test=False):
self.writer_dir = os.path.join(working_dir, 'logs/')
self.is_test = is_test
def __enter__(self):
train_writer_dir = os.path.join(self.writer_dir, 'train')
val_writer_dir = os.path.join(self.writer_dir, 'val')
self.train_writer = SummaryWriter(train_writer_dir)
self.val_writer = SummaryWriter(val_writer_dir)
if self.is_test:
test_writer_dir = os.path.join(self.writer_dir, 'test')
self.test_writer = SummaryWriter(test_writer_dir)
return self.train_writer, self.val_writer, self.test_writer
else:
return self.train_writer, self.val_writer
def __exit__(self, type, value, traceback):
self.train_writer.close()
self.val_writer.close()
if self.is_test:
self.test_writer.close()
|
import os
import torch
from torch.utils.tensorboard import SummaryWriter
def build_scheduler(opt, params):
lr_decay_factor = params.get('lr_decay_factor')
lr_decay_steps = params.get('lr_decay_steps')
if lr_decay_factor:
return torch.optim.lr_scheduler.StepLR(opt, lr_decay_steps, lr_decay_factor)
else:
return None
class build_writers:
def __init__(self, working_dir, is_test=False):
self.writer_dir = os.path.join(working_dir, 'logs/')
self.is_test = is_test
def __enter__(self):
train_writer_dir = os.path.join(self.writer_dir, 'train')
val_writer_dir = os.path.join(self.writer_dir, 'val')
self.train_writer = SummaryWriter(train_writer_dir)
self.val_writer = SummaryWriter(val_writer_dir)
if self.is_test:
test_writer_dir = os.path.join(self.writer_dir, 'test')
self.test_writer = SummaryWriter(test_writer_dir)
return self.train_writer, self.val_writer, self.test_writer
else:
return self.train_writer, self.val_writer
def __exit__(self, type, value, traceback):
self.train_writer.close()
self.val_writer.close()
if self.is_test:
self.test_writer.close()
|
none
| 1
| 2.285101
| 2
|
|
evaluations/__init__.py
|
xialeiliu/GFR-IL
| 34
|
6629443
|
from __future__ import absolute_import
import utils
from .cnn import extract_cnn_feature, extract_cnn_feature_classification
from .extract_featrure import extract_features, pairwise_distance, pairwise_similarity, extract_features_classification
from .recall_at_k import Recall_at_ks, Recall_at_ks_products
from .NMI import NMI
# from utils import to_torch
|
from __future__ import absolute_import
import utils
from .cnn import extract_cnn_feature, extract_cnn_feature_classification
from .extract_featrure import extract_features, pairwise_distance, pairwise_similarity, extract_features_classification
from .recall_at_k import Recall_at_ks, Recall_at_ks_products
from .NMI import NMI
# from utils import to_torch
|
en
| 0.522202
|
# from utils import to_torch
| 1.234147
| 1
|
src/run.py
|
Rocuku/python-stencil
| 0
|
6629444
|
<reponame>Rocuku/python-stencil
# coding: utf-8
from Table import Table
from Cell import Cell
import time, os
import sys
if __name__ == '__main__':
file_path = None
weight = 9
height = 9
time_slot = 1
for argv in sys.argv:
if argv[: 12] == "--file_path=":
file_path = argv[12:]
if argv[: 9] == "--height=":
height = int(argv[9:])
if argv[: 9] == "--weight=":
height = int(argv[9:])
if argv[: 12] == "--time_slot=":
time_slot = int(argv[12:])
table = Table(height = height, weight = weight, file_path = file_path)
while 1:
os.system('clear')
print(table.show_table())
table.next_generation()
time.sleep(time_slot)
|
# coding: utf-8
from Table import Table
from Cell import Cell
import time, os
import sys
if __name__ == '__main__':
file_path = None
weight = 9
height = 9
time_slot = 1
for argv in sys.argv:
if argv[: 12] == "--file_path=":
file_path = argv[12:]
if argv[: 9] == "--height=":
height = int(argv[9:])
if argv[: 9] == "--weight=":
height = int(argv[9:])
if argv[: 12] == "--time_slot=":
time_slot = int(argv[12:])
table = Table(height = height, weight = weight, file_path = file_path)
while 1:
os.system('clear')
print(table.show_table())
table.next_generation()
time.sleep(time_slot)
|
en
| 0.833554
|
# coding: utf-8
| 2.898852
| 3
|
kwctoolkit/base/generate_callback_impl.py
|
Kai-Wolf-SW-Consulting/KWCToolkit
| 0
|
6629445
|
<reponame>Kai-Wolf-SW-Consulting/KWCToolkit<filename>kwctoolkit/base/generate_callback_impl.py<gh_stars>0
#!/usr/bin/env python3
# Copyright (c) 2021, <NAME> - SW Consulting. All rights reserved.
# For the licensing terms see LICENSE file in the root directory. For the
# list of contributors see the AUTHORS file in the same directory.
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from abc import ABCMeta, abstractmethod
from datetime import date
class ACallbackBase:
"""Base class for generating different callback types"""
__metaclass__ = ABCMeta
def __init__(self, num_params):
self.num_params = num_params
def name(self):
"""Generate callback typename with suffixed number or arguments"""
name = self.__class__.__name__
return name + str(self.num_params) if self.num_params else name
def generate(self):
"""Generate callback base type (Callback|ResultCallback)"""
params = make_enum('P%d', self.num_params) if self.num_params else ""
cb_def = """class %s {
public:
virtual ~%s() {}
virtual bool isRepeatable() const { return false; }
virtual %s run(%s) = 0;
protected:
%s() {}
};
""" % (self.name(), self.name(), self.ret_type(), params, self.name())
return cb_def
def serialize(self, out_handle):
out_handle.write(self.template_decl() + "\n")
out_handle.write(self.generate() + "\n")
@abstractmethod
def ret_type(self):
pass
@abstractmethod
def template_decl(self):
pass
class Callback(ACallbackBase):
def template_decl(self):
if not self.num_params:
return ""
return "template <%s>" % make_enum('typename P%d', self.num_params)
def ret_type(self):
return "void"
class ResultCallback(ACallbackBase):
def template_decl(self):
appendix = ""
if self.num_params:
appendix += ", " + make_enum('typename P%d', self.num_params)
return "template <typename RetType%s>" % appendix
def ret_type(self):
return "RetType"
class CallbackKind:
"""Currently there are three types of callbacks supported"""
ConstMember, Member, Function = range(3)
class ACallback:
"""Base class for generating different callback categories"""
__metaclass__ = ABCMeta
def __init__(self, num_pb_args, num_ct_args):
self.num_pb_args = num_pb_args
self.num_ct_args = num_ct_args
def name(self):
name = self.__class__.__name__
if self.num_pb_args >= 0:
name += "%d" % self.num_pb_args
if self.num_ct_args >= 0:
name += "%d" % self.num_ct_args
return name
def template_decl(self, from_result_cb):
p = ["bool del"]
if from_result_cb:
p.append("typename RetType")
if self.kind() is not CallbackKind.Function:
p.append("typename Class")
if self.num_pb_args:
p.append(make_enum("typename P%d", self.num_pb_args))
if self.num_ct_args:
p.append(make_enum("typename A%d", self.num_ct_args))
if from_result_cb and not self.kind() is CallbackKind.Function:
p.append("typename OnlyIf = " + self.compound_type())
return ", ".join(p)
def parent_class(self, from_result_cb):
cb = "ResultCallback" if from_result_cb else "Callback"
if self.num_ct_args:
cb += str(self.num_ct_args)
templ_args = []
if from_result_cb:
templ_args.append("RetType")
if self.num_ct_args:
templ_args.append(make_enum("A%d", self.num_ct_args))
if templ_args:
return "public %s<%s>" % (cb, ", ".join(templ_args))
return "public %s" % cb
def class_templ_spec(self, from_result_cb):
if from_result_cb:
return ""
p = ["del", "void"]
if not self.kind() is CallbackKind.Function:
p.append("Class")
if self.num_pb_args:
p.append(make_enum("P%d", self.num_pb_args))
if self.num_ct_args:
p.append(make_enum("A%d", self.num_ct_args))
if not self.kind() is CallbackKind.Function:
p.append(self.compound_type())
return "<%s>" % ", ".join(p)
def class_members(self):
instance = "Class* instance_;"
if self.kind() is CallbackKind.ConstMember:
instance = "const " + instance
instance += "\nMethod method_;"
if self.kind() is CallbackKind.Function:
instance = "Function function_;"
members = []
if self.num_pb_args:
instance += make_enum_pair(
"\ntypename ::std::remove_reference<P%d>::type p%d_;",
self.num_pb_args, "")
return instance
def run_method(self, from_result_cb):
ret_type = "RetType" if from_result_cb else "void"
ret_call = "RetType result =" if from_result_cb else ""
ret = "return" if from_result_cb else ""
ret_result = "return result;" if from_result_cb else ""
run_args = ""
if self.num_ct_args:
run_args = make_enum_pair("A%d a%d", self.num_ct_args)
run_params = ""
if self.num_pb_args:
run_params += make_enum("p%d_", self.num_pb_args)
if self.num_ct_args:
if self.num_pb_args:
run_params += ", "
run_params += make_enum("a%d", self.num_ct_args)
instance = "method_"
if self.kind() is CallbackKind.Function:
instance = "function_"
run_call = ""
if self.kind() is CallbackKind.Function:
run_call = "(*%s)(%s)" % (instance, run_params)
else:
run_call = "(instance_->*%s)(%s)" % (instance, run_params)
run_def = """%s run(%s) override {
if (!del) {
%s %s;
} else {
%s %s;
%s = nullptr;
delete this;
%s
}
}
""" % (ret_type, run_args, ret, run_call, ret_call, run_call, instance,
ret_result)
return run_def
def generate(self, from_result_cb):
"""Generate callback type (ConstMember*, Member*, Function*)"""
cb_def = """template <%s> class %s%s : %s {
public:
typedef %s base;
typedef %s;
inline %s {}
bool isRepeatable() const override { return !del; }
%s
private:
%s
};
"""
return cb_def % (self.template_decl(from_result_cb), self.name(),
self.class_templ_spec(from_result_cb),
self.parent_class(from_result_cb),
self.parent_class(from_result_cb).replace(
"public", ""),
self.method_signature(from_result_cb), self.ctor(),
self.run_method(from_result_cb), self.class_members())
def api(self, deletable):
delete = "false"
cb_fn = "MakePermanentCallback"
caller = "RetType (*function)"
cb_args = ["instance", "method"]
if self.kind() is CallbackKind.Function:
cb_args = ["function"]
pb_args = ""
if self.num_pb_args:
pb_args = ", " + make_enum_pair(
"typename internal::ConstRef<P%d>::type p%d", self.num_pb_args)
cb_args.append(make_enum("p%d", self.num_pb_args))
cb_args = ", ".join(cb_args)
cb_params = []
if self.num_pb_args:
cb_params.append(make_enum("P%d", self.num_pb_args))
if self.num_ct_args:
cb_params.append(make_enum("A%d", self.num_ct_args))
cb_params = ", ".join(cb_params)
cb_sig = ""
if self.kind() is CallbackKind.ConstMember:
cb_sig = "const Caller* instance, RetType (Callee::*method)(%s) const" % cb_params
elif self.kind() is CallbackKind.Member:
cb_sig = "Caller* instance, RetType (Callee::*method)(%s)" % cb_params
elif self.kind() is CallbackKind.Function:
cb_sig = "RetType (*function)(%s)" % cb_params
if deletable:
delete = "true"
cb_fn = "MakeCallback"
temp_decl, templ_spec = [], [delete, "RetType"]
if not self.kind() is CallbackKind.Function:
temp_decl.append("typename Caller, typename Callee")
templ_spec.append("Caller")
caller = "Caller* instance, RetType (Callee::*method)"
if self.kind() is CallbackKind.ConstMember:
caller = "const " + caller
temp_decl.append("typename RetType")
if self.num_pb_args:
temp_decl.append(make_enum("typename P%d", self.num_pb_args))
templ_spec.append(make_enum("P%d", self.num_pb_args))
if self.num_ct_args:
temp_decl.append(make_enum("typename A%d", self.num_ct_args))
templ_spec.append(make_enum("A%d", self.num_ct_args))
cb_call = """
template <%s>
inline typename %s<%s>::base* %s(%s %s) {
return new %s<%s>(%s);
}
""" % (", ".join(temp_decl), self.name(), ", ".join(templ_spec), cb_fn,
cb_sig, pb_args, self.name(), ", ".join(templ_spec), cb_args)
return cb_call
def serialize(self, out_handle):
for t in [True, False]:
out_handle.write(self.generate(t))
for t in [True, False]:
out_handle.write(self.api(t))
def compound_type(self):
return "typename ::std::enable_if<::std::is_compound<Class>::value>::type"
@abstractmethod
def kind(self):
pass
@abstractmethod
def method_signature(self, from_result_cb):
pass
@abstractmethod
def ctor(self):
pass
class ConstMemberResultCallback(ACallback):
def kind(self):
return CallbackKind.ConstMember
def method_signature(self, from_result_cb):
ret_type = "RetType" if from_result_cb else "void"
p = []
if self.num_pb_args:
p.append(make_enum("P%d", self.num_pb_args))
if self.num_ct_args:
p.append(make_enum("A%d", self.num_ct_args))
return "%s (Class::*Method)(%s) const" % (ret_type, ", ".join(p))
def ctor(self):
p, m = [], []
if self.num_pb_args:
p.append(
make_enum_pair("typename internal::ConstRef<P%d>::type p%d",
self.num_pb_args))
m.append(make_enum_pair("p%d_(p%d)", self.num_pb_args))
return """%s(const Class* instance, Method method, %s)
: instance_(instance), method_(method), %s
""" % (self.name(), ", ".join(p), ", ".join(m))
return """%s(const Class* instance, Method method)
: instance_(instance), method_(method)""" % self.name()
class MemberResultCallback(ACallback):
def kind(self):
return CallbackKind.Member
def method_signature(self, from_result_cb):
ret_type = "RetType" if from_result_cb else "void"
p = []
if self.num_pb_args:
p.append(make_enum("P%d", self.num_pb_args))
if self.num_ct_args:
p.append(make_enum("A%d", self.num_ct_args))
return "%s (Class::*Method)(%s)" % (ret_type, ", ".join(p))
def ctor(self):
p, m = [], []
if self.num_pb_args:
p.append(
make_enum_pair("typename internal::ConstRef<P%d>::type p%d",
self.num_pb_args))
m.append(make_enum_pair("p%d_(p%d)", self.num_pb_args))
return """%s(Class* instance, Method method, %s)
: instance_(instance), method_(method), %s
""" % (self.name(), ", ".join(p), ", ".join(m))
return """%s(Class* instance, Method method)
: instance_(instance), method_(method)""" % self.name()
class FunctionResultCallback(ACallback):
def kind(self):
return CallbackKind.Function
def method_signature(self, from_result_cb):
ret_type = "RetType" if from_result_cb else "void"
p = []
if self.num_pb_args:
p.append(make_enum("P%d", self.num_pb_args))
if self.num_ct_args:
p.append(make_enum("A%d", self.num_ct_args))
return "%s (*Function)(%s)" % (ret_type, ", ".join(p))
def ctor(self):
p, m = [], []
if self.num_pb_args:
p.append(
make_enum_pair("typename internal::ConstRef<P%d>::type p%d",
self.num_pb_args))
m.append(make_enum_pair("p%d_(p%d)", self.num_pb_args))
return """%s(Function function, %s)
: function_(function), %s
""" % (self.name(), ", ".join(p), ", ".join(m))
return """%s(Function function) : function_(function)""" % self.name()
def make_enum(ele, till, delimiter=", "):
"""
Create list of strings with consecutive numbering as follows:
enumeration('param%d', 3) -> 'param1, param2, param3'
"""
return delimiter.join([ele % (i + 1) for i in range(till)])
def make_enum_pair(ele, till, delimiter=", "):
"""
Create list of pair of strings with consecutive numbering as follows:
make_enumerated_pair('P%d p%d', 3) -> 'P1 p1, P2 p2, P3 p3'
"""
return delimiter.join(ele % (i + 1, i + 1) for i in range(till))
def write_copyright_header(out_handle):
out_handle.write(
"// Copyright (c) %d, <NAME> - SW Consulting . All rights reserved.\n"
"// For the licensing terms see LICENSE file in the root directory. "
"For the\n// list of contributors see the AUTHORS "
" file in the same directory.\n\n" % date.today().year)
if __name__ == "__main__":
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--num-callbacks",
type=int,
default=3,
help="Number of callback types to generate")
parser.add_argument("--cb-types-filename",
default="CallbackTypes.h",
help="Name of the cb_types header file")
parser.add_argument("--cb-impl-filename",
default="CallbackImpl.h",
help="Name of the cb implementation header file")
args = parser.parse_args()
# write callback base classes
with open(args.cb_types_filename, 'w') as f:
write_copyright_header(f)
f.write("#ifndef KWCTOOLKIT_BASE_CALLBACK_TYPES_H_\n"
"#define KWCTOOLKIT_BASE_CALLBACK_TYPES_H_\n\n"
"namespace kwc {\n\n")
for idx in range(args.num_callbacks + 1):
Callback(idx).serialize(f)
ResultCallback(idx).serialize(f)
f.write("}\n\n#endif")
# write callback specializations for every pre-bound/call-time combination
with open(args.cb_impl_filename, 'w') as f:
write_copyright_header(f)
f.write("#ifndef KWCTOOLKIT_BASE_CALLBACK_IMPL_H_\n"
"#define KWCTOOLKIT_BASE_CALLBACK_IMPL_H_\n\n"
"#include <type_traits>\n#include \"kwctoolkit/base/%s\"\n\n" %
args.cb_types_filename)
f.write("namespace kwc {\nnamespace internal {\n\n"
"template <typename T> struct ConstRef {\n"
"typedef typename ::std::remove_reference<T>::type base_type;\n"
"typedef const base_type& type;\n};}")
for pb_args in range(args.num_callbacks + 1):
for ct_args in range(args.num_callbacks + 1):
ConstMemberResultCallback(pb_args, ct_args).serialize(f)
MemberResultCallback(pb_args, ct_args).serialize(f)
FunctionResultCallback(pb_args, ct_args).serialize(f)
f.write("}\n\n#endif")
|
#!/usr/bin/env python3
# Copyright (c) 2021, <NAME> - SW Consulting. All rights reserved.
# For the licensing terms see LICENSE file in the root directory. For the
# list of contributors see the AUTHORS file in the same directory.
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from abc import ABCMeta, abstractmethod
from datetime import date
class ACallbackBase:
"""Base class for generating different callback types"""
__metaclass__ = ABCMeta
def __init__(self, num_params):
self.num_params = num_params
def name(self):
"""Generate callback typename with suffixed number or arguments"""
name = self.__class__.__name__
return name + str(self.num_params) if self.num_params else name
def generate(self):
"""Generate callback base type (Callback|ResultCallback)"""
params = make_enum('P%d', self.num_params) if self.num_params else ""
cb_def = """class %s {
public:
virtual ~%s() {}
virtual bool isRepeatable() const { return false; }
virtual %s run(%s) = 0;
protected:
%s() {}
};
""" % (self.name(), self.name(), self.ret_type(), params, self.name())
return cb_def
def serialize(self, out_handle):
out_handle.write(self.template_decl() + "\n")
out_handle.write(self.generate() + "\n")
@abstractmethod
def ret_type(self):
pass
@abstractmethod
def template_decl(self):
pass
class Callback(ACallbackBase):
def template_decl(self):
if not self.num_params:
return ""
return "template <%s>" % make_enum('typename P%d', self.num_params)
def ret_type(self):
return "void"
class ResultCallback(ACallbackBase):
def template_decl(self):
appendix = ""
if self.num_params:
appendix += ", " + make_enum('typename P%d', self.num_params)
return "template <typename RetType%s>" % appendix
def ret_type(self):
return "RetType"
class CallbackKind:
"""Currently there are three types of callbacks supported"""
ConstMember, Member, Function = range(3)
class ACallback:
"""Base class for generating different callback categories"""
__metaclass__ = ABCMeta
def __init__(self, num_pb_args, num_ct_args):
self.num_pb_args = num_pb_args
self.num_ct_args = num_ct_args
def name(self):
name = self.__class__.__name__
if self.num_pb_args >= 0:
name += "%d" % self.num_pb_args
if self.num_ct_args >= 0:
name += "%d" % self.num_ct_args
return name
def template_decl(self, from_result_cb):
p = ["bool del"]
if from_result_cb:
p.append("typename RetType")
if self.kind() is not CallbackKind.Function:
p.append("typename Class")
if self.num_pb_args:
p.append(make_enum("typename P%d", self.num_pb_args))
if self.num_ct_args:
p.append(make_enum("typename A%d", self.num_ct_args))
if from_result_cb and not self.kind() is CallbackKind.Function:
p.append("typename OnlyIf = " + self.compound_type())
return ", ".join(p)
def parent_class(self, from_result_cb):
cb = "ResultCallback" if from_result_cb else "Callback"
if self.num_ct_args:
cb += str(self.num_ct_args)
templ_args = []
if from_result_cb:
templ_args.append("RetType")
if self.num_ct_args:
templ_args.append(make_enum("A%d", self.num_ct_args))
if templ_args:
return "public %s<%s>" % (cb, ", ".join(templ_args))
return "public %s" % cb
def class_templ_spec(self, from_result_cb):
if from_result_cb:
return ""
p = ["del", "void"]
if not self.kind() is CallbackKind.Function:
p.append("Class")
if self.num_pb_args:
p.append(make_enum("P%d", self.num_pb_args))
if self.num_ct_args:
p.append(make_enum("A%d", self.num_ct_args))
if not self.kind() is CallbackKind.Function:
p.append(self.compound_type())
return "<%s>" % ", ".join(p)
def class_members(self):
instance = "Class* instance_;"
if self.kind() is CallbackKind.ConstMember:
instance = "const " + instance
instance += "\nMethod method_;"
if self.kind() is CallbackKind.Function:
instance = "Function function_;"
members = []
if self.num_pb_args:
instance += make_enum_pair(
"\ntypename ::std::remove_reference<P%d>::type p%d_;",
self.num_pb_args, "")
return instance
def run_method(self, from_result_cb):
ret_type = "RetType" if from_result_cb else "void"
ret_call = "RetType result =" if from_result_cb else ""
ret = "return" if from_result_cb else ""
ret_result = "return result;" if from_result_cb else ""
run_args = ""
if self.num_ct_args:
run_args = make_enum_pair("A%d a%d", self.num_ct_args)
run_params = ""
if self.num_pb_args:
run_params += make_enum("p%d_", self.num_pb_args)
if self.num_ct_args:
if self.num_pb_args:
run_params += ", "
run_params += make_enum("a%d", self.num_ct_args)
instance = "method_"
if self.kind() is CallbackKind.Function:
instance = "function_"
run_call = ""
if self.kind() is CallbackKind.Function:
run_call = "(*%s)(%s)" % (instance, run_params)
else:
run_call = "(instance_->*%s)(%s)" % (instance, run_params)
run_def = """%s run(%s) override {
if (!del) {
%s %s;
} else {
%s %s;
%s = nullptr;
delete this;
%s
}
}
""" % (ret_type, run_args, ret, run_call, ret_call, run_call, instance,
ret_result)
return run_def
def generate(self, from_result_cb):
"""Generate callback type (ConstMember*, Member*, Function*)"""
cb_def = """template <%s> class %s%s : %s {
public:
typedef %s base;
typedef %s;
inline %s {}
bool isRepeatable() const override { return !del; }
%s
private:
%s
};
"""
return cb_def % (self.template_decl(from_result_cb), self.name(),
self.class_templ_spec(from_result_cb),
self.parent_class(from_result_cb),
self.parent_class(from_result_cb).replace(
"public", ""),
self.method_signature(from_result_cb), self.ctor(),
self.run_method(from_result_cb), self.class_members())
def api(self, deletable):
delete = "false"
cb_fn = "MakePermanentCallback"
caller = "RetType (*function)"
cb_args = ["instance", "method"]
if self.kind() is CallbackKind.Function:
cb_args = ["function"]
pb_args = ""
if self.num_pb_args:
pb_args = ", " + make_enum_pair(
"typename internal::ConstRef<P%d>::type p%d", self.num_pb_args)
cb_args.append(make_enum("p%d", self.num_pb_args))
cb_args = ", ".join(cb_args)
cb_params = []
if self.num_pb_args:
cb_params.append(make_enum("P%d", self.num_pb_args))
if self.num_ct_args:
cb_params.append(make_enum("A%d", self.num_ct_args))
cb_params = ", ".join(cb_params)
cb_sig = ""
if self.kind() is CallbackKind.ConstMember:
cb_sig = "const Caller* instance, RetType (Callee::*method)(%s) const" % cb_params
elif self.kind() is CallbackKind.Member:
cb_sig = "Caller* instance, RetType (Callee::*method)(%s)" % cb_params
elif self.kind() is CallbackKind.Function:
cb_sig = "RetType (*function)(%s)" % cb_params
if deletable:
delete = "true"
cb_fn = "MakeCallback"
temp_decl, templ_spec = [], [delete, "RetType"]
if not self.kind() is CallbackKind.Function:
temp_decl.append("typename Caller, typename Callee")
templ_spec.append("Caller")
caller = "Caller* instance, RetType (Callee::*method)"
if self.kind() is CallbackKind.ConstMember:
caller = "const " + caller
temp_decl.append("typename RetType")
if self.num_pb_args:
temp_decl.append(make_enum("typename P%d", self.num_pb_args))
templ_spec.append(make_enum("P%d", self.num_pb_args))
if self.num_ct_args:
temp_decl.append(make_enum("typename A%d", self.num_ct_args))
templ_spec.append(make_enum("A%d", self.num_ct_args))
cb_call = """
template <%s>
inline typename %s<%s>::base* %s(%s %s) {
return new %s<%s>(%s);
}
""" % (", ".join(temp_decl), self.name(), ", ".join(templ_spec), cb_fn,
cb_sig, pb_args, self.name(), ", ".join(templ_spec), cb_args)
return cb_call
def serialize(self, out_handle):
for t in [True, False]:
out_handle.write(self.generate(t))
for t in [True, False]:
out_handle.write(self.api(t))
def compound_type(self):
return "typename ::std::enable_if<::std::is_compound<Class>::value>::type"
@abstractmethod
def kind(self):
pass
@abstractmethod
def method_signature(self, from_result_cb):
pass
@abstractmethod
def ctor(self):
pass
class ConstMemberResultCallback(ACallback):
def kind(self):
return CallbackKind.ConstMember
def method_signature(self, from_result_cb):
ret_type = "RetType" if from_result_cb else "void"
p = []
if self.num_pb_args:
p.append(make_enum("P%d", self.num_pb_args))
if self.num_ct_args:
p.append(make_enum("A%d", self.num_ct_args))
return "%s (Class::*Method)(%s) const" % (ret_type, ", ".join(p))
def ctor(self):
p, m = [], []
if self.num_pb_args:
p.append(
make_enum_pair("typename internal::ConstRef<P%d>::type p%d",
self.num_pb_args))
m.append(make_enum_pair("p%d_(p%d)", self.num_pb_args))
return """%s(const Class* instance, Method method, %s)
: instance_(instance), method_(method), %s
""" % (self.name(), ", ".join(p), ", ".join(m))
return """%s(const Class* instance, Method method)
: instance_(instance), method_(method)""" % self.name()
class MemberResultCallback(ACallback):
def kind(self):
return CallbackKind.Member
def method_signature(self, from_result_cb):
ret_type = "RetType" if from_result_cb else "void"
p = []
if self.num_pb_args:
p.append(make_enum("P%d", self.num_pb_args))
if self.num_ct_args:
p.append(make_enum("A%d", self.num_ct_args))
return "%s (Class::*Method)(%s)" % (ret_type, ", ".join(p))
def ctor(self):
p, m = [], []
if self.num_pb_args:
p.append(
make_enum_pair("typename internal::ConstRef<P%d>::type p%d",
self.num_pb_args))
m.append(make_enum_pair("p%d_(p%d)", self.num_pb_args))
return """%s(Class* instance, Method method, %s)
: instance_(instance), method_(method), %s
""" % (self.name(), ", ".join(p), ", ".join(m))
return """%s(Class* instance, Method method)
: instance_(instance), method_(method)""" % self.name()
class FunctionResultCallback(ACallback):
def kind(self):
return CallbackKind.Function
def method_signature(self, from_result_cb):
ret_type = "RetType" if from_result_cb else "void"
p = []
if self.num_pb_args:
p.append(make_enum("P%d", self.num_pb_args))
if self.num_ct_args:
p.append(make_enum("A%d", self.num_ct_args))
return "%s (*Function)(%s)" % (ret_type, ", ".join(p))
def ctor(self):
p, m = [], []
if self.num_pb_args:
p.append(
make_enum_pair("typename internal::ConstRef<P%d>::type p%d",
self.num_pb_args))
m.append(make_enum_pair("p%d_(p%d)", self.num_pb_args))
return """%s(Function function, %s)
: function_(function), %s
""" % (self.name(), ", ".join(p), ", ".join(m))
return """%s(Function function) : function_(function)""" % self.name()
def make_enum(ele, till, delimiter=", "):
"""
Create list of strings with consecutive numbering as follows:
enumeration('param%d', 3) -> 'param1, param2, param3'
"""
return delimiter.join([ele % (i + 1) for i in range(till)])
def make_enum_pair(ele, till, delimiter=", "):
"""
Create list of pair of strings with consecutive numbering as follows:
make_enumerated_pair('P%d p%d', 3) -> 'P1 p1, P2 p2, P3 p3'
"""
return delimiter.join(ele % (i + 1, i + 1) for i in range(till))
def write_copyright_header(out_handle):
out_handle.write(
"// Copyright (c) %d, <NAME> - SW Consulting . All rights reserved.\n"
"// For the licensing terms see LICENSE file in the root directory. "
"For the\n// list of contributors see the AUTHORS "
" file in the same directory.\n\n" % date.today().year)
if __name__ == "__main__":
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--num-callbacks",
type=int,
default=3,
help="Number of callback types to generate")
parser.add_argument("--cb-types-filename",
default="CallbackTypes.h",
help="Name of the cb_types header file")
parser.add_argument("--cb-impl-filename",
default="CallbackImpl.h",
help="Name of the cb implementation header file")
args = parser.parse_args()
# write callback base classes
with open(args.cb_types_filename, 'w') as f:
write_copyright_header(f)
f.write("#ifndef KWCTOOLKIT_BASE_CALLBACK_TYPES_H_\n"
"#define KWCTOOLKIT_BASE_CALLBACK_TYPES_H_\n\n"
"namespace kwc {\n\n")
for idx in range(args.num_callbacks + 1):
Callback(idx).serialize(f)
ResultCallback(idx).serialize(f)
f.write("}\n\n#endif")
# write callback specializations for every pre-bound/call-time combination
with open(args.cb_impl_filename, 'w') as f:
write_copyright_header(f)
f.write("#ifndef KWCTOOLKIT_BASE_CALLBACK_IMPL_H_\n"
"#define KWCTOOLKIT_BASE_CALLBACK_IMPL_H_\n\n"
"#include <type_traits>\n#include \"kwctoolkit/base/%s\"\n\n" %
args.cb_types_filename)
f.write("namespace kwc {\nnamespace internal {\n\n"
"template <typename T> struct ConstRef {\n"
"typedef typename ::std::remove_reference<T>::type base_type;\n"
"typedef const base_type& type;\n};}")
for pb_args in range(args.num_callbacks + 1):
for ct_args in range(args.num_callbacks + 1):
ConstMemberResultCallback(pb_args, ct_args).serialize(f)
MemberResultCallback(pb_args, ct_args).serialize(f)
FunctionResultCallback(pb_args, ct_args).serialize(f)
f.write("}\n\n#endif")
|
en
| 0.490997
|
#!/usr/bin/env python3 # Copyright (c) 2021, <NAME> - SW Consulting. All rights reserved. # For the licensing terms see LICENSE file in the root directory. For the # list of contributors see the AUTHORS file in the same directory. Base class for generating different callback types Generate callback typename with suffixed number or arguments Generate callback base type (Callback|ResultCallback) class %s { public: virtual ~%s() {} virtual bool isRepeatable() const { return false; } virtual %s run(%s) = 0; protected: %s() {} }; Currently there are three types of callbacks supported Base class for generating different callback categories %s run(%s) override { if (!del) { %s %s; } else { %s %s; %s = nullptr; delete this; %s } } Generate callback type (ConstMember*, Member*, Function*) template <%s> class %s%s : %s { public: typedef %s base; typedef %s; inline %s {} bool isRepeatable() const override { return !del; } %s private: %s }; template <%s> inline typename %s<%s>::base* %s(%s %s) { return new %s<%s>(%s); } %s(const Class* instance, Method method, %s) : instance_(instance), method_(method), %s %s(const Class* instance, Method method) : instance_(instance), method_(method) %s(Class* instance, Method method, %s) : instance_(instance), method_(method), %s %s(Class* instance, Method method) : instance_(instance), method_(method) %s(Function function, %s) : function_(function), %s %s(Function function) : function_(function) Create list of strings with consecutive numbering as follows: enumeration('param%d', 3) -> 'param1, param2, param3' Create list of pair of strings with consecutive numbering as follows: make_enumerated_pair('P%d p%d', 3) -> 'P1 p1, P2 p2, P3 p3' # write callback base classes #endif") # write callback specializations for every pre-bound/call-time combination #include \"kwctoolkit/base/%s\"\n\n" % #endif")
| 2.666373
| 3
|
tests/test_suite.py
|
alexcwyu/python-trading
| 17
|
6629446
|
# add comment here
import unittest
from tests.test_bar import BarTest
from tests.test_bar_aggregator import BarAggregatorTest
from tests.test_broker import SimulatorTest
from tests.test_broker_mgr import BrokerManagerTest
from tests.test_clock import ClockTest
#from tests.test_cmp_functional_backtest import TestCompareWithFunctionalBacktest
from tests.test_data_series import DataSeriesTest
from tests.test_in_memory_db import InMemoryDBTest
from tests.test_indicator import IndicatorTest
from tests.test_instrument_data import InstrumentDataTest
from tests.test_ma import MovingAverageTest
from tests.test_market_data_processor import MarketDataProcessorTest
from tests.test_model_factory import ModelFactoryTest
from tests.test_order import OrderTest
from tests.test_order_handler import OrderHandlerTest
#from tests.test_pipeline import PipelineTest
#from tests.test_pipeline_pairwise import PairwiseTest
from tests.test_portfolio import PortfolioTest
from tests.test_position import PositionTest
from tests.test_ref_data import RefDataTest
from tests.test_rolling import RollingApplyTest
from tests.test_ser_deser import SerializationTest
from tests.test_persistence_strategy import StrategyPersistenceTest
from tests.test_persistence_indicator import IndicatorPersistenceTest
from tests.test_talib_wrapper import TALibSMATest
from tests.test_feed import FeedTest
from tests.test_plot import PlotTest
def suite():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(BarTest))
test_suite.addTest(unittest.makeSuite(BarAggregatorTest))
test_suite.addTest(unittest.makeSuite(SimulatorTest))
test_suite.addTest(unittest.makeSuite(BrokerManagerTest))
test_suite.addTest(unittest.makeSuite(ClockTest))
test_suite.addTest(unittest.makeSuite(DataSeriesTest))
test_suite.addTest(unittest.makeSuite(FeedTest))
test_suite.addTest(unittest.makeSuite(IndicatorTest))
test_suite.addTest(unittest.makeSuite(InstrumentDataTest))
test_suite.addTest(unittest.makeSuite(MovingAverageTest))
test_suite.addTest(unittest.makeSuite(MarketDataProcessorTest))
test_suite.addTest(unittest.makeSuite(ModelFactoryTest))
test_suite.addTest(unittest.makeSuite(OrderTest))
test_suite.addTest(unittest.makeSuite(OrderHandlerTest))
#test_suite.addTest(unittest.makeSuite(TestCompareWithFunctionalBacktest))
test_suite.addTest(unittest.makeSuite(InMemoryDBTest))
#test_suite.addTest(unittest.makeSuite(PersistenceTest))
#test_suite.addTest(unittest.makeSuite(PipelineTest))
#test_suite.addTest(unittest.makeSuite(PairwiseTest))
test_suite.addTest(unittest.makeSuite(PlotTest))
test_suite.addTest(unittest.makeSuite(PortfolioTest))
test_suite.addTest(unittest.makeSuite(PositionTest))
<<<<<<< HEAD
test_suite.addTest(unittest.makeSuite(SerializerTest))
test_suite.addTest(unittest.makeSuite(TALibSMATest))
#test_suite.addTest(unittest.makeSuite(TestCompareWithFunctionalBacktest))
test_suite.addTest(unittest.makeSuite(InMemoryDBTest))
#test_suite.addTest(unittest.makeSuite(PersistenceTest))
#test_suite.addTest(unittest.makeSuite(StrategyPersistenceTest))
test_suite.addTest(unittest.makeSuite(PipelineTest))
test_suite.addTest(unittest.makeSuite(PairwiseTest))
=======
test_suite.addTest(unittest.makeSuite(RefDataTest))
>>>>>>> cc21e5ebd346d2b2956bbf45f11daba52e4086b1
test_suite.addTest(unittest.makeSuite(RollingApplyTest))
test_suite.addTest(unittest.makeSuite(SerializationTest))
test_suite.addTest(unittest.makeSuite(IndicatorPersistenceTest))
test_suite.addTest(unittest.makeSuite(StrategyPersistenceTest))
test_suite.addTest(unittest.makeSuite(TALibSMATest))
return test_suite
mySuit = suite()
runner = unittest.TextTestRunner()
runner.run(mySuit)
# creating a new test suite
newSuite = unittest.TestSuite()
|
# add comment here
import unittest
from tests.test_bar import BarTest
from tests.test_bar_aggregator import BarAggregatorTest
from tests.test_broker import SimulatorTest
from tests.test_broker_mgr import BrokerManagerTest
from tests.test_clock import ClockTest
#from tests.test_cmp_functional_backtest import TestCompareWithFunctionalBacktest
from tests.test_data_series import DataSeriesTest
from tests.test_in_memory_db import InMemoryDBTest
from tests.test_indicator import IndicatorTest
from tests.test_instrument_data import InstrumentDataTest
from tests.test_ma import MovingAverageTest
from tests.test_market_data_processor import MarketDataProcessorTest
from tests.test_model_factory import ModelFactoryTest
from tests.test_order import OrderTest
from tests.test_order_handler import OrderHandlerTest
#from tests.test_pipeline import PipelineTest
#from tests.test_pipeline_pairwise import PairwiseTest
from tests.test_portfolio import PortfolioTest
from tests.test_position import PositionTest
from tests.test_ref_data import RefDataTest
from tests.test_rolling import RollingApplyTest
from tests.test_ser_deser import SerializationTest
from tests.test_persistence_strategy import StrategyPersistenceTest
from tests.test_persistence_indicator import IndicatorPersistenceTest
from tests.test_talib_wrapper import TALibSMATest
from tests.test_feed import FeedTest
from tests.test_plot import PlotTest
def suite():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(BarTest))
test_suite.addTest(unittest.makeSuite(BarAggregatorTest))
test_suite.addTest(unittest.makeSuite(SimulatorTest))
test_suite.addTest(unittest.makeSuite(BrokerManagerTest))
test_suite.addTest(unittest.makeSuite(ClockTest))
test_suite.addTest(unittest.makeSuite(DataSeriesTest))
test_suite.addTest(unittest.makeSuite(FeedTest))
test_suite.addTest(unittest.makeSuite(IndicatorTest))
test_suite.addTest(unittest.makeSuite(InstrumentDataTest))
test_suite.addTest(unittest.makeSuite(MovingAverageTest))
test_suite.addTest(unittest.makeSuite(MarketDataProcessorTest))
test_suite.addTest(unittest.makeSuite(ModelFactoryTest))
test_suite.addTest(unittest.makeSuite(OrderTest))
test_suite.addTest(unittest.makeSuite(OrderHandlerTest))
#test_suite.addTest(unittest.makeSuite(TestCompareWithFunctionalBacktest))
test_suite.addTest(unittest.makeSuite(InMemoryDBTest))
#test_suite.addTest(unittest.makeSuite(PersistenceTest))
#test_suite.addTest(unittest.makeSuite(PipelineTest))
#test_suite.addTest(unittest.makeSuite(PairwiseTest))
test_suite.addTest(unittest.makeSuite(PlotTest))
test_suite.addTest(unittest.makeSuite(PortfolioTest))
test_suite.addTest(unittest.makeSuite(PositionTest))
<<<<<<< HEAD
test_suite.addTest(unittest.makeSuite(SerializerTest))
test_suite.addTest(unittest.makeSuite(TALibSMATest))
#test_suite.addTest(unittest.makeSuite(TestCompareWithFunctionalBacktest))
test_suite.addTest(unittest.makeSuite(InMemoryDBTest))
#test_suite.addTest(unittest.makeSuite(PersistenceTest))
#test_suite.addTest(unittest.makeSuite(StrategyPersistenceTest))
test_suite.addTest(unittest.makeSuite(PipelineTest))
test_suite.addTest(unittest.makeSuite(PairwiseTest))
=======
test_suite.addTest(unittest.makeSuite(RefDataTest))
>>>>>>> cc21e5ebd346d2b2956bbf45f11daba52e4086b1
test_suite.addTest(unittest.makeSuite(RollingApplyTest))
test_suite.addTest(unittest.makeSuite(SerializationTest))
test_suite.addTest(unittest.makeSuite(IndicatorPersistenceTest))
test_suite.addTest(unittest.makeSuite(StrategyPersistenceTest))
test_suite.addTest(unittest.makeSuite(TALibSMATest))
return test_suite
mySuit = suite()
runner = unittest.TextTestRunner()
runner.run(mySuit)
# creating a new test suite
newSuite = unittest.TestSuite()
|
en
| 0.125818
|
# add comment here #from tests.test_cmp_functional_backtest import TestCompareWithFunctionalBacktest #from tests.test_pipeline import PipelineTest #from tests.test_pipeline_pairwise import PairwiseTest #test_suite.addTest(unittest.makeSuite(TestCompareWithFunctionalBacktest)) #test_suite.addTest(unittest.makeSuite(PersistenceTest)) #test_suite.addTest(unittest.makeSuite(PipelineTest)) #test_suite.addTest(unittest.makeSuite(PairwiseTest)) #test_suite.addTest(unittest.makeSuite(TestCompareWithFunctionalBacktest)) #test_suite.addTest(unittest.makeSuite(PersistenceTest)) #test_suite.addTest(unittest.makeSuite(StrategyPersistenceTest)) # creating a new test suite
| 1.60944
| 2
|
example/try.py
|
zhenhua32/luke
| 0
|
6629447
|
import aiohttp
import asyncio
import async_timeout
async def get():
async with aiohttp.ClientSession() as session:
async with session.get('http://httpbin.org/get') as resp:
print(resp.status)
# print(await resp.content.read(10))
print(await resp.text())
loop = asyncio.get_event_loop()
loop.run_until_complete(get())
|
import aiohttp
import asyncio
import async_timeout
async def get():
async with aiohttp.ClientSession() as session:
async with session.get('http://httpbin.org/get') as resp:
print(resp.status)
# print(await resp.content.read(10))
print(await resp.text())
loop = asyncio.get_event_loop()
loop.run_until_complete(get())
|
en
| 0.15861
|
# print(await resp.content.read(10))
| 2.975495
| 3
|
tests/test_organized_coronavirus_info.py
|
QMSS-G5072-2020/final_project_li_ruiqi
| 0
|
6629448
|
from organized_coronavirus_info import __version__
from organized_coronavirus_info import organized_coronavirus_info
import pandas as pd
import matplotlib.pyplot as plt
import requests
import json
def test_version():
assert __version__ == '0.1.0'
historical_data = organized_coronavirus_info.obtain_historical_data()
# Adding unit test corresponding to functions in package
# For functions in part 1
def test_obtain_historical_data():
historical_data = organized_coronavirus_info.obtain_historical_data()
assert type(historical_data) == list
def test_organized_historical_cases():
cases_organized = organized_coronavirus_info.organized_historical_cases(historical_data)
assert type(cases_organized) == pd.core.frame.DataFrame
def test_organized_historical_deaths():
deaths_organized = organized_coronavirus_info.organized_historical_deaths(historical_data)
assert type(deaths_organized) == pd.core.frame.DataFrame
def test_organized_historical_recovered():
recovered_organized = organized_coronavirus_info.organized_historical_recovered(historical_data)
assert type(recovered_organized) == pd.core.frame.DataFrame
# For functions in part 2
def test_obtain_global_data():
global_data = organized_coronavirus_info.obtain_global_data()
assert type(global_data) == pd.core.frame.DataFrame
def test_obtain_continent_data():
continent_data = organized_coronavirus_info.obtain_continent_data()
assert type(continent_data) == pd.core.frame.DataFrame
|
from organized_coronavirus_info import __version__
from organized_coronavirus_info import organized_coronavirus_info
import pandas as pd
import matplotlib.pyplot as plt
import requests
import json
def test_version():
assert __version__ == '0.1.0'
historical_data = organized_coronavirus_info.obtain_historical_data()
# Adding unit test corresponding to functions in package
# For functions in part 1
def test_obtain_historical_data():
historical_data = organized_coronavirus_info.obtain_historical_data()
assert type(historical_data) == list
def test_organized_historical_cases():
cases_organized = organized_coronavirus_info.organized_historical_cases(historical_data)
assert type(cases_organized) == pd.core.frame.DataFrame
def test_organized_historical_deaths():
deaths_organized = organized_coronavirus_info.organized_historical_deaths(historical_data)
assert type(deaths_organized) == pd.core.frame.DataFrame
def test_organized_historical_recovered():
recovered_organized = organized_coronavirus_info.organized_historical_recovered(historical_data)
assert type(recovered_organized) == pd.core.frame.DataFrame
# For functions in part 2
def test_obtain_global_data():
global_data = organized_coronavirus_info.obtain_global_data()
assert type(global_data) == pd.core.frame.DataFrame
def test_obtain_continent_data():
continent_data = organized_coronavirus_info.obtain_continent_data()
assert type(continent_data) == pd.core.frame.DataFrame
|
en
| 0.747042
|
# Adding unit test corresponding to functions in package # For functions in part 1 # For functions in part 2
| 2.647848
| 3
|
swampymud/item.py
|
ufosc/MuddySwamp
| 10
|
6629449
|
'''
This module provides base classes for Items.
item.Item acts as a base class for all items. Besides providing a few
skeletal methods to help with serialization and user interaction, this
class is relatively straightforward.
item.Usable is an abstract class used for checking ItemClasses. A
developer may create a 'Usable' class by simply providing an 'on_use'
method.
item.Equippable is a subclass of Item that provides additional features,
including support for character.Command methods.
'''
import inspect
import abc
from typing import List
from swampymud.util import camel_to_space
from swampymud.character import Command, Character
import swampymud.inventory as inv
#TODO: add interact and perceive filters
class ItemClass(type):
'''Metaclass establishing behavior for all items'''
def __init__(self, cls, bases, namespace):
if "classname" not in namespace:
self.classname = camel_to_space(cls)
if "description" not in namespace:
if self.__doc__ is not None:
self.description = inspect.cleandoc(self.__doc__)
else:
self.description = "[No description provided.]"
super().__init__(cls, bases, namespace)
def __str__(self):
'''returns a string representation of this class'''
return self.classname
class Item(metaclass=ItemClass):
'''Base class for all Item classes.
Implement 'on_use' to make a Usable item. To trigger certain code
when this item is picked up or dropped, override 'on_pickup'
or 'on_drop', respectively.
'''
# default label, can be overriden
label = "Item"
def __str__(self):
"""Return a simple representation of this item.
By default, str(item) returns the name of the item's class.
"""
return self.classname
# these methods can be overriden
def on_pickup(self, char: Character, args: List[str]):
"""override to trigger effects when this item is picked up"""
pass
def on_drop(self, char: Character, args: List[str]):
"""override to trigger effects when this item is dropped"""
pass
# serialization-related methods
@classmethod
def load(cls, data):
'''default implementation of load, calls init with no args'''
return cls()
def post_load(self, data):
'''no post-load actions required by default implementation'''
def save(self):
'''return a pythonic representation of this object
this base class has no fields, so no data is returned'''
return {}
class EquippableClass(ItemClass):
'''Metaclass for all items that can be equipped'''
def __init__(self, cls, bases, namespace):
super().__init__(cls, bases, namespace)
# ensure that developers have added an equip target
if cls != "Equippable":
if not hasattr(self, "target"):
raise AttributeError(f"Attempted to define Equippable '{cls}'"
" without defining a target.")
if not isinstance(self.target, inv.EquipTarget):
raise TypeError(f"When defining Euippable '{cls}' a target was"
" provided, but it wasn't an EquipTarget.")
# note that this is essentially the same as CharacterClass
# first gather the Commands defined in this class
self._local_commands = {}
for value in namespace.values():
if isinstance(value, Command):
value.label = "Equipped"
self._local_commands[str(value)] = value
# now gather all commands, with the most recent commands exposed
self._commands = {}
for base in reversed(self.__mro__):
if not isinstance(base, EquippableClass):
continue
self._commands.update(base._local_commands)
self._commands.update(self._local_commands)
class Equippable(Item, metaclass=EquippableClass):
'''Base class for all Equippable items.
You must define your own "target" like so:
target = inv.EquipTarget("right arm")
To trigger effects when the item is equipped or unequipped, override
the 'on_equip' or 'on_unequip' methods.
By default, any methods decorated with @character.Command will be
added to the player's equip_dict when equipped.
'''
def add_cmds(self, char: Character):
'''Add all the commands from this item to the char.
Any conflicting commands are simply shadowed'''
for cmd in self._commands.values():
if cmd.filter.permits(char):
cmd = cmd.specify(self, char)
char.cmd_dict[str(cmd)] = cmd
def remove_cmds(self, char: Character):
'''remove all the commands from this item from char'''
for cmd in self._commands.values():
cmd = cmd.specify(self, char)
try:
char.cmd_dict.remove_value(str(cmd), cmd)
# command was not in cmd_dict
except KeyError:
pass
except ValueError:
pass
# these methods can be overriden
def on_equip(self, char: Character):
"""override to trigger effects when this item is equipped"""
pass
def on_unequip(self, char: Character):
"""override to trigger effects when this item is unequipped"""
pass
class Usable(abc.ABC):
'''Use to Check if an item implements 'on_use' in a Pythonic way.
isinstance(item_obj, Usable)
is roughly equivalent to
isinstance(item_obj, Item) and item_obj has an 'on_use' method
Do not attempt to derive a new item type from this class!
If you want to make a 'Usable' item, simply derive from Item and add
an 'on_use' method yourself.
'''
@classmethod
def __subclasshook__(cls, subclass):
if cls is Usable:
return (isinstance(subclass, ItemClass) and
hasattr(subclass, "on_use") and
callable(subclass.on_use))
else:
return NotImplemented
|
'''
This module provides base classes for Items.
item.Item acts as a base class for all items. Besides providing a few
skeletal methods to help with serialization and user interaction, this
class is relatively straightforward.
item.Usable is an abstract class used for checking ItemClasses. A
developer may create a 'Usable' class by simply providing an 'on_use'
method.
item.Equippable is a subclass of Item that provides additional features,
including support for character.Command methods.
'''
import inspect
import abc
from typing import List
from swampymud.util import camel_to_space
from swampymud.character import Command, Character
import swampymud.inventory as inv
#TODO: add interact and perceive filters
class ItemClass(type):
'''Metaclass establishing behavior for all items'''
def __init__(self, cls, bases, namespace):
if "classname" not in namespace:
self.classname = camel_to_space(cls)
if "description" not in namespace:
if self.__doc__ is not None:
self.description = inspect.cleandoc(self.__doc__)
else:
self.description = "[No description provided.]"
super().__init__(cls, bases, namespace)
def __str__(self):
'''returns a string representation of this class'''
return self.classname
class Item(metaclass=ItemClass):
'''Base class for all Item classes.
Implement 'on_use' to make a Usable item. To trigger certain code
when this item is picked up or dropped, override 'on_pickup'
or 'on_drop', respectively.
'''
# default label, can be overriden
label = "Item"
def __str__(self):
"""Return a simple representation of this item.
By default, str(item) returns the name of the item's class.
"""
return self.classname
# these methods can be overriden
def on_pickup(self, char: Character, args: List[str]):
"""override to trigger effects when this item is picked up"""
pass
def on_drop(self, char: Character, args: List[str]):
"""override to trigger effects when this item is dropped"""
pass
# serialization-related methods
@classmethod
def load(cls, data):
'''default implementation of load, calls init with no args'''
return cls()
def post_load(self, data):
'''no post-load actions required by default implementation'''
def save(self):
'''return a pythonic representation of this object
this base class has no fields, so no data is returned'''
return {}
class EquippableClass(ItemClass):
'''Metaclass for all items that can be equipped'''
def __init__(self, cls, bases, namespace):
super().__init__(cls, bases, namespace)
# ensure that developers have added an equip target
if cls != "Equippable":
if not hasattr(self, "target"):
raise AttributeError(f"Attempted to define Equippable '{cls}'"
" without defining a target.")
if not isinstance(self.target, inv.EquipTarget):
raise TypeError(f"When defining Euippable '{cls}' a target was"
" provided, but it wasn't an EquipTarget.")
# note that this is essentially the same as CharacterClass
# first gather the Commands defined in this class
self._local_commands = {}
for value in namespace.values():
if isinstance(value, Command):
value.label = "Equipped"
self._local_commands[str(value)] = value
# now gather all commands, with the most recent commands exposed
self._commands = {}
for base in reversed(self.__mro__):
if not isinstance(base, EquippableClass):
continue
self._commands.update(base._local_commands)
self._commands.update(self._local_commands)
class Equippable(Item, metaclass=EquippableClass):
'''Base class for all Equippable items.
You must define your own "target" like so:
target = inv.EquipTarget("right arm")
To trigger effects when the item is equipped or unequipped, override
the 'on_equip' or 'on_unequip' methods.
By default, any methods decorated with @character.Command will be
added to the player's equip_dict when equipped.
'''
def add_cmds(self, char: Character):
'''Add all the commands from this item to the char.
Any conflicting commands are simply shadowed'''
for cmd in self._commands.values():
if cmd.filter.permits(char):
cmd = cmd.specify(self, char)
char.cmd_dict[str(cmd)] = cmd
def remove_cmds(self, char: Character):
'''remove all the commands from this item from char'''
for cmd in self._commands.values():
cmd = cmd.specify(self, char)
try:
char.cmd_dict.remove_value(str(cmd), cmd)
# command was not in cmd_dict
except KeyError:
pass
except ValueError:
pass
# these methods can be overriden
def on_equip(self, char: Character):
"""override to trigger effects when this item is equipped"""
pass
def on_unequip(self, char: Character):
"""override to trigger effects when this item is unequipped"""
pass
class Usable(abc.ABC):
'''Use to Check if an item implements 'on_use' in a Pythonic way.
isinstance(item_obj, Usable)
is roughly equivalent to
isinstance(item_obj, Item) and item_obj has an 'on_use' method
Do not attempt to derive a new item type from this class!
If you want to make a 'Usable' item, simply derive from Item and add
an 'on_use' method yourself.
'''
@classmethod
def __subclasshook__(cls, subclass):
if cls is Usable:
return (isinstance(subclass, ItemClass) and
hasattr(subclass, "on_use") and
callable(subclass.on_use))
else:
return NotImplemented
|
en
| 0.828205
|
This module provides base classes for Items. item.Item acts as a base class for all items. Besides providing a few skeletal methods to help with serialization and user interaction, this class is relatively straightforward. item.Usable is an abstract class used for checking ItemClasses. A developer may create a 'Usable' class by simply providing an 'on_use' method. item.Equippable is a subclass of Item that provides additional features, including support for character.Command methods. #TODO: add interact and perceive filters Metaclass establishing behavior for all items returns a string representation of this class Base class for all Item classes. Implement 'on_use' to make a Usable item. To trigger certain code when this item is picked up or dropped, override 'on_pickup' or 'on_drop', respectively. # default label, can be overriden Return a simple representation of this item. By default, str(item) returns the name of the item's class. # these methods can be overriden override to trigger effects when this item is picked up override to trigger effects when this item is dropped # serialization-related methods default implementation of load, calls init with no args no post-load actions required by default implementation return a pythonic representation of this object this base class has no fields, so no data is returned Metaclass for all items that can be equipped # ensure that developers have added an equip target # note that this is essentially the same as CharacterClass # first gather the Commands defined in this class # now gather all commands, with the most recent commands exposed Base class for all Equippable items. You must define your own "target" like so: target = inv.EquipTarget("right arm") To trigger effects when the item is equipped or unequipped, override the 'on_equip' or 'on_unequip' methods. By default, any methods decorated with @character.Command will be added to the player's equip_dict when equipped. Add all the commands from this item to the char. Any conflicting commands are simply shadowed remove all the commands from this item from char # command was not in cmd_dict # these methods can be overriden override to trigger effects when this item is equipped override to trigger effects when this item is unequipped Use to Check if an item implements 'on_use' in a Pythonic way. isinstance(item_obj, Usable) is roughly equivalent to isinstance(item_obj, Item) and item_obj has an 'on_use' method Do not attempt to derive a new item type from this class! If you want to make a 'Usable' item, simply derive from Item and add an 'on_use' method yourself.
| 3.20565
| 3
|
modules/users/domain/services/create_user_service.py
|
eduardolujan/hexagonal_architecture_django
| 6
|
6629450
|
# -*- coding: utf-8 -*-
from modules.users.domain.entities import User as UserEntity
from modules.users.domain.domain_events import CreateUserDomainEvent
from modules.users.domain.value_objects import (UserId,
Username,
Password,
Email,)
class CreateUserService:
"""
Create user entities
"""
@staticmethod
def create_user_entity(id: UserId = None,
username: Username = None,
password: Password = None,
email: Email = None):
if not isinstance(id, UserId):
raise ValueError(f"Parameter id: {id} "
f"is not instance of UserId")
if not isinstance(username, Username):
raise ValueError(f"Parameter username {username} "
f"is not instance of Username")
if not isinstance(password, Password):
raise ValueError(f"Parameter password {password} "
f"is not instance of Password")
if not isinstance(email, Email):
raise ValueError(f"Parameter email: {email} "
f"is not instance of Email")
user_entity = UserEntity(
id=id,
username=username,
password=password,
email=email
)
create_user_domain_event = CreateUserDomainEvent(
id=id.value,
username=username.value,
password=password.value,
email=email.value)
user_entity.record(create_user_domain_event)
return user_entity
|
# -*- coding: utf-8 -*-
from modules.users.domain.entities import User as UserEntity
from modules.users.domain.domain_events import CreateUserDomainEvent
from modules.users.domain.value_objects import (UserId,
Username,
Password,
Email,)
class CreateUserService:
"""
Create user entities
"""
@staticmethod
def create_user_entity(id: UserId = None,
username: Username = None,
password: Password = None,
email: Email = None):
if not isinstance(id, UserId):
raise ValueError(f"Parameter id: {id} "
f"is not instance of UserId")
if not isinstance(username, Username):
raise ValueError(f"Parameter username {username} "
f"is not instance of Username")
if not isinstance(password, Password):
raise ValueError(f"Parameter password {password} "
f"is not instance of Password")
if not isinstance(email, Email):
raise ValueError(f"Parameter email: {email} "
f"is not instance of Email")
user_entity = UserEntity(
id=id,
username=username,
password=password,
email=email
)
create_user_domain_event = CreateUserDomainEvent(
id=id.value,
username=username.value,
password=password.value,
email=email.value)
user_entity.record(create_user_domain_event)
return user_entity
|
en
| 0.822542
|
# -*- coding: utf-8 -*- Create user entities
| 2.720487
| 3
|