hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f5442fda543ea143316e891a5b0389f7115be0b9 | 789 | py | Python | baekjoon/11292.py | GihwanKim/Baekjoon | 52eb2bf80bb1243697858445e5b5e2d50d78be4e | [
"MIT"
] | null | null | null | baekjoon/11292.py | GihwanKim/Baekjoon | 52eb2bf80bb1243697858445e5b5e2d50d78be4e | [
"MIT"
] | null | null | null | baekjoon/11292.py | GihwanKim/Baekjoon | 52eb2bf80bb1243697858445e5b5e2d50d78be4e | [
"MIT"
] | null | null | null | """
11292 : 키 큰 사람
URL : https://www.acmicpc.net/problem/11292
Input :
3
John 1.75
Mary 1.64
Sam 1.81
2
Jose 1.62
Miguel 1.58
5
John 1.75
Mary 1.75
Sam 1.74
Jose 1.75
Miguel 1.75
0
Output :
Sam
Jose
John Mary Jose Miguel
"""
while True:
n = int(input())
if n == 0:
break
high_height = 0
high_students = []
for i in range(n):
name, height = input().split()
height = float(height)
if height > high_height:
high_height = height
high_students = [name]
elif height == high_height:
high_students.append(name)
print(' '.join(high_students))
| 18.785714 | 47 | 0.474018 |
59e714147166c65d1e080b89188625142c4390c8 | 25,994 | py | Python | scikits/crab/models/classes.py | wnyc/crab | 5f08cac958a930df82518a0216a3f2fa39a4e904 | [
"BSD-3-Clause"
] | 3 | 2015-06-02T16:43:05.000Z | 2020-05-27T22:30:36.000Z | scikits/crab/models/classes.py | wnyc/crab | 5f08cac958a930df82518a0216a3f2fa39a4e904 | [
"BSD-3-Clause"
] | null | null | null | scikits/crab/models/classes.py | wnyc/crab | 5f08cac958a930df82518a0216a3f2fa39a4e904 | [
"BSD-3-Clause"
] | null | null | null | #-*- coding:utf-8 -*-
"""
Several Basic Data models.
"""
# Authors: Marcel Caraciolo <marcel@muricoca.com>
# License: BSD Style
import numpy as np
from .base import BaseDataModel
from .utils import UserNotFoundError, ItemNotFoundError
import logging
logger = logging.getLogger('crab')
###############################################################################
# MatrixDataModel
class MatrixPreferenceDataModel(BaseDataModel):
'''
Matrix with preferences based Data model
A DataModel backed by a python dict structured data.
This class expects a simple dictionary where each
element contains a userID, followed by itemID,
followed by preference value and optional timestamp.
{userID:{itemID:preference, itemID2:preference2},
userID2:{itemID:preference3,itemID4:preference5}}
Preference value is the parameter that the user simply
expresses the degree of preference for an item.
Parameters
----------
dataset dict, shape = {userID:{itemID:preference, itemID2:preference2},
userID2:{itemID:preference3,itemID4:preference5}}
Examples
---------
>>> from scikits.crab.models.classes import MatrixPreferenceDataModel
>>> model = MatrixPreferenceDataModel({})
>>> #empty dataset
>>> model.user_ids()
array([], dtype=float64)
>>> model.item_ids()
array([], dtype=float64)
>>> movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, \
'Snakes on a Plane': 3.5, \
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, \
'The Night Listener': 3.0}, \
'Paola Pow': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, \
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, \
'You, Me and Dupree': 3.5}, \
'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, \
'Superman Returns': 3.5, 'The Night Listener': 4.0}, \
'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, \
'The Night Listener': 4.5, 'Superman Returns': 4.0, \
'You, Me and Dupree': 2.5}, \
'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, \
'You, Me and Dupree': 2.0}, \
'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \
'The Night Listener': 3.0, 'Superman Returns': 5.0, \
'You, Me and Dupree': 3.5}, \
'Penny Frewman': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0, \
'Superman Returns':4.0}, \
'Maria Gabriela': {}}
>>> model = MatrixPreferenceDataModel(movies)
>>> #non-empty dataset
>>> model.user_ids()
array(['Leopoldo Pires', 'Lorena Abreu', 'Marcel Caraciolo',
'Maria Gabriela', 'Paola Pow', 'Penny Frewman', 'Sheldom',
'Steve Gates'],
dtype='|S16')
>>> model.item_ids()
array(['Just My Luck', 'Lady in the Water', 'Snakes on a Plane',
'Superman Returns', 'The Night Listener', 'You, Me and Dupree'],
dtype='|S18')
>>> model.preferences_from_user('Sheldom')
[('Lady in the Water', 3.0), ('Snakes on a Plane', 4.0), ('Superman Returns', 5.0),
('The Night Listener', 3.0), ('You, Me and Dupree', 3.5)]
'''
def __init__(self, dataset):
BaseDataModel.__init__(self)
self.dataset = dataset
self.build_model()
def __getitem__(self, user_id):
return self.preferences_from_user(user_id)
def __iter__(self):
for index, user in enumerate(self.user_ids()):
yield user, self[user]
def __len__(self):
return self.index.shape
def build_model(self):
'''
Returns
-------
self:
Build the data model
'''
#Is it important to store as numpy array ?
self._user_ids = np.asanyarray(self.dataset.keys())
self._user_ids.sort()
#Is it important to store as numpy array ?
self._item_ids = []
for items in self.dataset.itervalues():
self._item_ids.extend(items.keys())
self._item_ids = np.unique(np.array(self._item_ids))
self._item_ids.sort()
self.max_pref = -np.inf
self.min_pref = np.inf
logger.info("creating matrix for %d users and %d items" % \
(self._user_ids.size, self._item_ids.size))
self.index = np.empty(shape=(self._user_ids.size, self._item_ids.size))
self.index[:] = np.NaN
itemnos = {}
for itemno, item_id in enumerate(self._item_ids):
itemnos[item_id] = itemno
for userno, user_id in enumerate(self._user_ids):
if userno % 2 == 0:
logger.debug("PROGRESS: at user_id #%i/%i" % \
(userno, self._user_ids.size))
for item_id, r in self.dataset[user_id].items():
self.index[userno, itemnos[item_id]] = r
if self.index.size:
self.max_pref = np.nanmax(self.index)
self.min_pref = np.nanmin(self.index)
def user_ids(self):
'''
Returns
-------
self.user_ids: numpy array of shape [n_user_ids]
Return all user ids in the model, in order
'''
return self._user_ids
def item_ids(self):
'''
Returns
-------
self.item_ids: numpy array of shape [n_item_ids]
Return all item ids in the model, in order
'''
return self._item_ids
def preference_values_from_user(self, user_id):
'''
Returns
--------
Return user's preferences values as an array
Notes
--------
This method is a particular method in MatrixDataModel
'''
user_id_loc = np.where(self._user_ids == user_id)
if not user_id_loc[0].size:
#user_id not found
raise UserNotFoundError
preferences = self.index[user_id_loc]
return preferences
def preferences_from_user(self, user_id, order_by_id=True):
'''
Returns
-------
self.user_preferences : list [(item_id,preference)]
Return user's preferences, ordered by user ID (if order_by_id is True)
or by the preference values (if order_by_id is False), as an array.
'''
preferences = self.preference_values_from_user(user_id)
#think in a way to return as numpy array and how to remove the nan values efficiently.
data = zip(self._item_ids, preferences.flatten())
if order_by_id:
return [(item_id, preference) for item_id, preference in data \
if not np.isnan(preference)]
else:
return sorted([(item_id, preference) for item_id, preference in data \
if not np.isnan(preference)], key=lambda item: - item[1])
def has_preference_values(self):
'''
Returns
-------
True/False: bool
Return True if this implementation actually
it is not a 'boolean' data model, otherwise returns False.
'''
return True
def maximum_preference_value(self):
'''
Returns
---------
self.max_preference: float
Return the maximum preference value that is possible in the
current problem domain being evaluated.
'''
return self.max_pref
def minimum_preference_value(self):
'''
Returns
---------
self.min_preference: float
Returns the minimum preference value that is possible in the
current problem domain being evaluated
'''
return self.min_pref
def users_count(self):
'''
Returns
--------
n_users: int
Return total number of users known to the model.
'''
return self._user_ids.size
def items_count(self):
'''
Returns
--------
n_items: int
Return total number of items known to the model.
'''
return self._item_ids.size
def items_from_user(self, user_id):
'''
Returns
-------
items_from_user : numpy array of shape [item_id,..]
Return IDs of items user expresses a preference for
'''
preferences = self.preferences_from_user(user_id)
return [key for key, value in preferences]
def preferences_for_item(self, item_id, order_by_id=True):
'''
Returns
-------
preferences: numpy array of shape [(item_id,preference)]
Return all existing Preferences expressed for that item,
'''
item_id_loc = np.where(self._item_ids == item_id)
if not item_id_loc[0].size:
#item_id not found
raise ItemNotFoundError('Item not found')
preferences = self.index[:, item_id_loc]
#think in a way to return as numpy array and how to remove the nan values efficiently.
data = zip(self._user_ids, preferences.flatten())
if order_by_id:
return [(user_id, preference) for user_id, preference in data \
if not np.isnan(preference)]
else:
return sorted([(user_id, preference) for user_id, preference in data \
if not np.isnan(preference)], key=lambda user: - user[1])
def preference_value(self, user_id, item_id):
'''
Returns
-------
preference: float
Retrieves the preference value for a single user and item.
'''
item_id_loc = np.where(self._item_ids == item_id)
user_id_loc = np.where(self._user_ids == user_id)
if not user_id_loc[0].size:
raise UserNotFoundError('user_id in the model not found')
if not item_id_loc[0].size:
raise ItemNotFoundError('item_id in the model not found')
return self.index[user_id_loc, item_id_loc].flatten()[0]
def set_preference(self, user_id, item_id, value):
'''
Returns
--------
self
Sets a particular preference (item plus rating) for a user.
'''
user_id_loc = np.where(self._user_ids == user_id)
if not user_id_loc[0].size:
raise UserNotFoundError('user_id in the model not found')
#ALLOW NEW ITEMS
#if not item_id_loc[0].size:
# raise ItemNotFoundError('item_id in the model not found')
#How not use the dataset in memory ?!
self.dataset[user_id][item_id] = value
self.build_model()
def remove_preference(self, user_id, item_id):
'''
Returns
--------
self
Removes a particular preference for a user.
'''
user_id_loc = np.where(self._user_ids == user_id)
item_id_loc = np.where(self._item_ids == item_id)
if not user_id_loc[0].size:
raise UserNotFoundError('user_id in the model not found')
if not item_id_loc[0].size:
raise ItemNotFoundError('item_id in the model not found')
del self.dataset[user_id][item_id]
self.build_model()
def __repr__(self):
return "<MatrixPreferenceDataModel (%d by %d)>" % (self.index.shape[0],
self.index.shape[1])
def _repr_matrix(self, matrix):
s = ""
cellWidth = 11
shape = matrix.shape
for i in range(shape[0]):
for j in range(shape[1]):
v = matrix[i, j]
if np.isnan(v):
s += "---".center(cellWidth)
else:
exp = np.log(abs(v))
if abs(exp) <= 4:
if exp < 0:
s += ("%9.6f" % v).ljust(cellWidth)
else:
s += ("%9.*f" % (6, v)).ljust(cellWidth)
else:
s += ("%9.2e" % v).ljust(cellWidth)
s += "\n"
return s[:-1]
def __unicode__(self):
"""
Write out a representative picture of this matrix.
The upper left corner of the matrix will be shown, with up to 20x5
entries, and the rows and columns will be labeled with up to 8
characters.
"""
matrix = self._repr_matrix(self.index[:20, :5])
lines = matrix.split('\n')
headers = [repr(self)[1:-1]]
if self._item_ids.size:
col_headers = [('%-8s' % unicode(item)[:8]) for item in self._item_ids[:5]]
headers.append(' ' + (' '.join(col_headers)))
if self._user_ids.size:
for (i, line) in enumerate(lines):
lines[i] = ('%-8s' % unicode(self._user_ids[i])[:8]) + line
for (i, line) in enumerate(headers):
if i > 0:
headers[i] = ' ' * 8 + line
lines = headers + lines
if self.index.shape[1] > 5 and self.index.shape[0] > 0:
lines[1] += ' ...'
if self.index.shape[0] > 20:
lines.append('...')
return '\n'.join(line.rstrip() for line in lines)
def __str__(self):
return unicode(self).encode('utf-8')
###############################################################################
# MatrixBooleanDataModel
class MatrixBooleanPrefDataModel(BaseDataModel):
'''
Matrix with preferences based Boolean Data model
This class expects a simple dictionary where each
element contains a userID, followed by the itemIDs
where the itemIDs represents the preference
for that item and optional timestamp. It also can
receive the dict with the preference values used
at DictPreferenceDataModel.
Preference value is the presence of the item in the list of
preferences for that user.
Parameters
----------
dataset dict, shape = {userID:{itemID:preference, itemID2:preference2},
userID2:{itemID:preference3,itemID4:preference5}} or
{userID:[itemID,itemID2,itemID3], userID2:[itemID1, itemID2,...]...}
Examples
---------
>>> from scikits.crab.models.classes import MatrixBooleanPrefDataModel
>>> model = MatrixBooleanPrefDataModel({})
>>> #empty dataset
>>> model.user_ids()
array([], dtype=float64)
>>> model.item_ids()
array([], dtype=float64)
>>> movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, \
'Snakes on a Plane': 3.5, \
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, \
'The Night Listener': 3.0}, \
'Paola Pow': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, \
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, \
'You, Me and Dupree': 3.5}, \
'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, \
'Superman Returns': 3.5, 'The Night Listener': 4.0}, \
'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, \
'The Night Listener': 4.5, 'Superman Returns': 4.0, \
'You, Me and Dupree': 2.5}, \
'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, \
'You, Me and Dupree': 2.0}, \
'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \
'The Night Listener': 3.0, 'Superman Returns': 5.0, \
'You, Me and Dupree': 3.5}, \
'Penny Frewman': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0, \
'Superman Returns':4.0}, \
'Maria Gabriela': {}}
>>> model = MatrixBooleanPrefDataModel(movies)
>>> #non-empty dataset
>>> model.user_ids()
array(['Leopoldo Pires', 'Lorena Abreu', 'Marcel Caraciolo',
'Maria Gabriela', 'Paola Pow', 'Penny Frewman', 'Sheldom',
'Steve Gates'],
dtype='|S16')
>>> model.item_ids()
array(['Just My Luck', 'Lady in the Water', 'Snakes on a Plane',
'Superman Returns', 'The Night Listener', 'You, Me and Dupree'],
dtype='|S18')
>>> model.preferences_from_user('Sheldom')
array(['Lady in the Water', 'Snakes on a Plane', 'Superman Returns',
'The Night Listener', 'You, Me and Dupree'],
dtype='|S18')
'''
def __init__(self, dataset):
BaseDataModel.__init__(self)
self.dataset = self._load_dataset(dataset.copy())
self.build_model()
def _load_dataset(self, dataset):
'''
Returns
-------
dataset: dict of shape {user_id:[item_id,item_id2,...]}
Load the dataset which the input can be the
{user_id:{item_id:preference,...},...}
or the {user_id:[item_id,item_id2,...],...}
'''
if dataset:
key = dataset.keys()[0]
if isinstance(dataset[key], dict):
for key in dataset:
dataset[key] = dataset[key].keys()
return dataset
def __getitem__(self, user_id):
return self.preferences_from_user(user_id)
def __iter__(self):
for index, user in enumerate(self.user_ids()):
yield user, self[user]
def __len__(self):
return self.index.shape
def build_model(self):
'''
Returns
-------
self:
Build the data model
'''
self._user_ids = np.asanyarray(self.dataset.keys())
self._user_ids.sort()
self._item_ids = np.array([])
for items in self.dataset.itervalues():
self._item_ids = np.append(self._item_ids, items)
self._item_ids = np.unique(self._item_ids)
self._item_ids.sort()
logger.info("creating matrix for %d users and %d items" % \
(self._user_ids.size, self._item_ids.size))
self.index = np.empty(shape=(self._user_ids.size, self._item_ids.size), dtype=bool)
for userno, user_id in enumerate(self._user_ids):
if userno % 2 == 0:
logger.debug("PROGRESS: at user_id #%i/%i" % \
(userno, self._user_ids.size))
for itemno, item_id in enumerate(self._item_ids):
r = True if item_id in self.dataset[user_id] else False
self.index[userno, itemno] = r
def user_ids(self):
'''
Returns
-------
self.user_ids: numpy array of shape [n_user_ids]
Return all user ids in the model, in order
'''
return self._user_ids
def item_ids(self):
'''
Returns
-------
self.item_ids: numpy array of shape [n_item_ids]
Return all item ids in the model, in order
'''
return self._item_ids
def preference_values_from_user(self, user_id):
'''
Returns
--------
Return user's preferences values as an array
Notes
--------
This method is a particular method in MatrixDataModel
'''
user_id_loc = np.where(self._user_ids == user_id)
if not user_id_loc[0].size:
#user_id not found
raise UserNotFoundError
preferences = self.index[user_id_loc]
return preferences
def preferences_from_user(self, user_id, order_by_id=True):
'''
Returns
-------
self.user_preferences : list [(item_id,preference)]
Return user's preferences, ordered by user ID (if order_by_id is True)
or by the preference values (if order_by_id is False), as an array.
'''
preferences = self.preference_values_from_user(user_id)
preferences = preferences.flatten()
return self._item_ids[preferences]
def has_preference_values(self):
'''
Returns
-------
True/False: bool
Return True if this implementation actually
it is not a 'boolean' data model, otherwise returns False.
'''
return False
def users_count(self):
'''
Returns
--------
n_users: int
Return total number of users known to the model.
'''
return self._user_ids.size
def items_count(self):
'''
Returns
--------
n_items: int
Return total number of items known to the model.
'''
return self._item_ids.size
def items_from_user(self, user_id):
'''
Returns
-------
items_from_user : numpy array of shape [item_id,..]
Return IDs of items user expresses a preference for
'''
preferences = self.preferences_from_user(user_id)
return preferences
def preferences_for_item(self, item_id, order_by_id=True):
'''
Returns
-------
preferences: numpy array of shape [(item_id,preference)]
Return all existing Preferences expressed for that item,
'''
item_id_loc = np.where(self._item_ids == item_id)
if not item_id_loc[0].size:
#item_id not found
raise ItemNotFoundError('Item not found')
preferences = self.index[:, item_id_loc]
preferences = preferences.flatten()
return self._user_ids[preferences]
def preference_value(self, user_id, item_id):
'''
Returns
-------
preference: float
Retrieves the preference value for a single user and item.
'''
item_id_loc = np.where(self._item_ids == item_id)
user_id_loc = np.where(self._user_ids == user_id)
if not user_id_loc[0].size:
raise UserNotFoundError('user_id in the model not found')
if not item_id_loc[0].size:
raise ItemNotFoundError('item_id in the model not found')
return 1.0 if self.index[user_id_loc, item_id_loc].flatten()[0] else np.NaN
def set_preference(self, user_id, item_id, value=None):
'''
Returns
--------
self
Sets a particular preference (item plus rating) for a user.
'''
user_id_loc = np.where(self._user_ids == user_id)
if not user_id_loc[0].size:
raise UserNotFoundError('user_id in the model not found')
#ALLOW NEW ITEMS
#if not item_id_loc[0].size:
# raise ItemNotFoundError('item_id in the model not found')
#How not use the dataset in memory ?!
self.dataset[user_id].append(item_id)
self.build_model()
def remove_preference(self, user_id, item_id):
'''
Returns
--------
self
Removes a particular preference for a user.
'''
user_id_loc = np.where(self._user_ids == user_id)
item_id_loc = np.where(self._item_ids == item_id)
if not user_id_loc[0].size:
raise UserNotFoundError('user_id in the model not found')
if not item_id_loc[0].size:
raise ItemNotFoundError('item_id in the model not found')
self.dataset[user_id].remove(item_id)
self.build_model()
def maximum_preference_value(self):
'''
Returns
---------
self.max_preference: float
Return the maximum preference value that is possible in the
current problem domain being evaluated.
'''
return 1.0
def minimum_preference_value(self):
'''
Returns
---------
self.min_preference: float
Returns the minimum preference value that is possible in the
current problem domain being evaluated
'''
return 0.0
def __repr__(self):
return "<MatrixBooleanPrefDataModel (%d by %d)>" % (self.index.shape[0],
self.index.shape[1])
def _repr_matrix(self, matrix):
s = ""
cellWidth = 11
shape = matrix.shape
for i in range(shape[0]):
for j in range(shape[1]):
v = matrix[i, j]
if not v:
s += "---".center(cellWidth)
else:
exp = np.log(abs(v))
if abs(exp) <= 4:
if exp < 0:
s += ("%9.6f" % v).ljust(cellWidth)
else:
s += ("%9.*f" % (6, v)).ljust(cellWidth)
else:
s += ("%9.2e" % v).ljust(cellWidth)
s += "\n"
return s[:-1]
def __unicode__(self):
"""
Write out a representative picture of this matrix.
The upper left corner of the matrix will be shown, with up to 20x5
entries, and the rows and columns will be labeled with up to 8
characters.
"""
matrix = self._repr_matrix(self.index[:20, :5])
lines = matrix.split('\n')
headers = [repr(self)[1:-1]]
if self._item_ids.size:
col_headers = [('%-8s' % unicode(item)[:8]) for item in self._item_ids[:5]]
headers.append(' ' + (' '.join(col_headers)))
if self._user_ids.size:
for (i, line) in enumerate(lines):
lines[i] = ('%-8s' % unicode(self._user_ids[i])[:8]) + line
for (i, line) in enumerate(headers):
if i > 0:
headers[i] = ' ' * 8 + line
lines = headers + lines
if self.index.shape[1] > 5 and self.index.shape[0] > 0:
lines[1] += ' ...'
if self.index.shape[0] > 20:
lines.append('...')
return '\n'.join(line.rstrip() for line in lines)
def __str__(self):
return unicode(self).encode('utf-8')
| 34.02356 | 94 | 0.552897 |
ce305c6476cdb43873a57e09e97428cdeb5103b3 | 26,320 | py | Python | elasticsearch_django/models.py | octoenergy/elasticsearch-django | 4b24fb8bb5729d950c8d56740f8be0acb336de1c | [
"MIT"
] | 87 | 2016-09-04T06:24:04.000Z | 2022-02-01T01:43:47.000Z | elasticsearch_django/models.py | octoenergy/elasticsearch-django | 4b24fb8bb5729d950c8d56740f8be0acb336de1c | [
"MIT"
] | 28 | 2016-12-09T22:48:29.000Z | 2021-04-07T11:01:34.000Z | elasticsearch_django/models.py | octoenergy/elasticsearch-django | 4b24fb8bb5729d950c8d56740f8be0acb336de1c | [
"MIT"
] | 31 | 2017-01-30T12:31:47.000Z | 2022-02-03T17:22:03.000Z | from __future__ import annotations
import logging
import time
from typing import TYPE_CHECKING, Any, cast
from django.conf import settings
from django.core.cache import cache
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
from django.db.models.expressions import RawSQL
from django.db.models.fields import CharField
from django.db.models.query import QuerySet
from django.utils.timezone import now as tz_now
from django.utils.translation import gettext_lazy as _lazy
from elasticsearch_dsl import Search
from .compat import JSONField
from .settings import (
get_client,
get_model_index_properties,
get_model_indexes,
get_setting,
)
if TYPE_CHECKING:
from django.contrib.auth.models import AbstractBaseUser
logger = logging.getLogger(__name__)
UPDATE_STRATEGY_FULL = "full"
UPDATE_STRATEGY_PARTIAL = "partial"
UPDATE_STRATEGY = get_setting("update_strategy", UPDATE_STRATEGY_FULL)
class SearchDocumentManagerMixin(models.Manager):
"""
Model manager mixin that adds search document methods.
There is one method in this class that must implemented -
`get_search_queryset`. This must return a queryset that is the
set of objects to be indexed. This queryset is then converted
into a generator that emits the objects as JSON documents.
"""
def get_search_queryset(self, index: str = "_all") -> QuerySet:
"""
Return the dataset used to populate the search index.
Kwargs:
index: string, the name of the index we are interested in -
this allows us to have different sets of objects in
different indexes. Defaults to '_all', in which case
all indexes index the same set of objects.
This must return a queryset object.
"""
raise NotImplementedError(
"{} does not implement 'get_search_queryset'.".format(
self.__class__.__name__
)
)
def in_search_queryset(self, instance_id: int, index: str = "_all") -> bool:
"""
Return True if an object is part of the search index queryset.
Sometimes it's useful to know if an object _should_ be indexed. If
an object is saved, how do you know if you should push that change
to the search index? The simplest (albeit not most efficient) way
is to check if it appears in the underlying search queryset.
NB this method doesn't evaluate the entire dataset, it chains an
additional queryset filter expression on the end. That's why it's
important that the `get_search_queryset` method returns a queryset.
Args:
instance_id: the id of model object that we are looking for.
Kwargs:
index: string, the name of the index in which to check.
Defaults to '_all'.
"""
return self.get_search_queryset(index=index).filter(pk=instance_id).exists()
def from_search_query(self, search_query: SearchQuery) -> QuerySet:
"""
Return queryset of objects from SearchQuery.results, **in order**.
EXPERIMENTAL: this will only work with results from a single index,
with a single doc_type - as we are returning a single QuerySet.
This method takes the hits JSON and converts that into a queryset
of all the relevant objects. The key part of this is the ordering -
the order in which search results are returned is based on relevance,
something that only ES can calculate, and that cannot be replicated
in the database.
It does this by adding custom SQL which annotates each record with
the score from the search 'hit'. This is brittle, caveat emptor.
The RawSQL clause is in the form:
SELECT CASE {{model}}.id WHEN {{id}} THEN {{score}} END
The "WHEN x THEN y" is repeated for every hit. The resulting SQL, in
full is like this:
SELECT "freelancer_freelancerprofile"."id",
(SELECT CASE freelancer_freelancerprofile.id
WHEN 25 THEN 1.0
WHEN 26 THEN 1.0
[...]
ELSE 0
END) AS "search_score"
FROM "freelancer_freelancerprofile"
WHERE "freelancer_freelancerprofile"."id" IN (25, 26, [...])
ORDER BY "search_score" DESC
It should be very fast, as there is no table lookup, but there is an
assumption at the heart of this, which is that the search query doesn't
contain the entire database - i.e. that it has been paged. (ES itself
caps the results at 10,000.)
"""
hits = search_query.hits
score_sql = self._raw_sql([(h["id"], h["score"] or 0) for h in hits])
rank_sql = self._raw_sql([(hits[i]["id"], i) for i in range(len(hits))])
return (
self.get_queryset()
.filter(pk__in=[h["id"] for h in hits])
# add the query relevance score
.annotate(search_score=RawSQL(score_sql, ())) # noqa: S611
# add the ordering number (0-based)
.annotate(search_rank=RawSQL(rank_sql, ())) # noqa: S611
.order_by("search_rank")
)
def _when(self, x: str | int, y: str | int) -> str:
return "WHEN {} THEN {}".format(x, y)
def _raw_sql(self, values: list[tuple[str | int, str | int]]) -> str:
"""Prepare SQL statement consisting of a sequence of WHEN .. THEN statements."""
if isinstance(self.model._meta.pk, CharField):
when_clauses = " ".join(
[self._when("'{}'".format(x), y) for (x, y) in values]
)
else:
when_clauses = " ".join([self._when(x, y) for (x, y) in values])
table_name = self.model._meta.db_table
primary_key = self.model._meta.pk.column
return 'SELECT CASE {}."{}" {} ELSE 0 END'.format(
table_name, primary_key, when_clauses
)
class SearchDocumentMixin(object):
"""
Mixin used by models that are indexed for ES.
This mixin defines the interface exposed by models that
are indexed ready for ES. The only method that needs
implementing is `as_search_document`.
"""
# Django model field types that can be serialized directly into
# a known format. All other types will need custom serialization.
# Used by as_search_document_update method
SIMPLE_UPDATE_FIELD_TYPES = [
"ArrayField",
"AutoField",
"BooleanField",
"CharField",
"DateField",
"DateTimeField",
"DecimalField",
"EmailField",
"FloatField",
"IntegerField",
"TextField",
"URLField",
"UUIDField",
]
@property
def search_indexes(self) -> list[str]:
"""Return the list of indexes for which this model is configured."""
return get_model_indexes(self.__class__)
@property
def search_document_cache_key(self) -> str:
"""Key used for storing search docs in local cache."""
return "elasticsearch_django:{}.{}.{}".format(
self._meta.app_label, self._meta.model_name, self.pk # type: ignore
)
@property
def search_doc_type(self) -> str:
"""Return the doc_type used for the model."""
raise DeprecationWarning("Mapping types have been removed from ES7.x")
def as_search_document(self, *, index: str) -> dict:
"""
Return the object as represented in a named index.
This is named to avoid confusion - if it was `get_search_document`,
which would be the logical name, it would not be clear whether it
referred to getting the local representation of the search document,
or actually fetching it from the index.
Kwargs:
index: string, the name of the index in which the object is to
appear - this allows different representations in different
indexes. Defaults to '_all', in which case all indexes use
the same search document structure.
Returns a dictionary.
"""
raise NotImplementedError(
"{} does not implement 'as_search_document'.".format(
self.__class__.__name__
)
)
def _is_field_serializable(self, field_name: str) -> bool:
"""Return True if the field can be serialized into a JSON doc."""
return (
self._meta.get_field(field_name).get_internal_type() # type: ignore
in self.SIMPLE_UPDATE_FIELD_TYPES
)
def clean_update_fields(self, index: str, update_fields: list[str]) -> list[str]:
"""
Clean the list of update_fields based on the index being updated.
If any field in the update_fields list is not in the set of properties
defined by the index mapping for this model, then we ignore it. If
a field _is_ in the mapping, but the underlying model field is a
related object, and thereby not directly serializable, then this
method will raise a ValueError.
"""
search_fields = get_model_index_properties(self, index)
clean_fields = [f for f in update_fields if f in search_fields]
ignore = [f for f in update_fields if f not in search_fields]
if ignore:
logger.debug(
"Ignoring fields from partial update: %s",
[f for f in update_fields if f not in search_fields],
)
for f in clean_fields:
if not self._is_field_serializable(f):
raise ValueError(
"'%s' cannot be automatically serialized into a search "
"document property. Please override as_search_document_update.",
f,
)
return clean_fields
def as_search_document_update(
self, *, index: str, update_fields: list[str]
) -> dict:
"""
Return a partial update document based on which fields have been updated.
If an object is saved with the `update_fields` argument passed
through, then it is assumed that this is a 'partial update'. In
this scenario we need a {property: value} dictionary containing
just the fields we want to update.
This method handles two possible update strategies - 'full' or 'partial'.
The default 'full' strategy simply returns the value of `as_search_document`
- thereby replacing the entire document each time. The 'partial' strategy is
more intelligent - it will determine whether the fields passed are in the
search document mapping, and return a partial update document that contains
only those that are. In addition, if any field that _is_ included cannot
be automatically serialized (e.g. a RelatedField object), then this method
will raise a ValueError. In this scenario, you should override this method
in your subclass.
>>> def as_search_document_update(self, index, update_fields):
... if 'user' in update_fields:
... update_fields.remove('user')
... doc = super().as_search_document_update(index, update_fields)
... doc['user'] = self.user.get_full_name()
... return doc
... return super().as_search_document_update(index, update_fields)
You may also wish to subclass this method to perform field-specific logic
- in this example if only the timestamp is being saved, then ignore the
update if the timestamp is later than a certain time.
>>> def as_search_document_update(self, index, update_fields):
... if update_fields == ['timestamp']:
... if self.timestamp > today():
... return {}
... return super().as_search_document_update(index, update_fields)
"""
if UPDATE_STRATEGY == UPDATE_STRATEGY_FULL:
return self.as_search_document(index=index)
if UPDATE_STRATEGY == UPDATE_STRATEGY_PARTIAL:
# in partial mode we update the intersection of update_fields and
# properties found in the mapping file.
return {
k: getattr(self, k)
for k in self.clean_update_fields(
index=index, update_fields=update_fields
)
}
raise ValueError("Invalid update strategy.")
def as_search_action(self, *, index: str, action: str) -> dict:
"""
Return an object as represented in a bulk api operation.
Bulk API operations have a very specific format. This function will
call the standard `as_search_document` method on the object and then
wrap that up in the correct format for the action specified.
https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
Args:
index: string, the name of the index in which the action is to
be taken. Bulk operations are only every carried out on a single
index at a time.
action: string ['index' | 'update' | 'delete'] - this decides
how the final document is formatted.
Returns a dictionary.
"""
if action not in ("index", "update", "delete"):
raise ValueError("Action must be 'index', 'update' or 'delete'.")
document = {
"_index": index,
"_op_type": action,
"_id": self.pk, # type: ignore
}
if action == "index":
document["_source"] = self.as_search_document(index=index)
elif action == "update":
document["doc"] = self.as_search_document(index=index)
return document
def fetch_search_document(self, *, index: str) -> dict:
"""Fetch the object's document from a search index by id."""
if not self.pk: # type: ignore
raise ValueError("Object must have a primary key before being indexed.")
client = get_client()
return client.get(index=index, id=self.pk) # type: ignore
def index_search_document(self, *, index: str) -> None:
"""
Create or replace search document in named index.
Checks the local cache to see if the document has changed,
and if not aborts the update, else pushes to ES, and then
resets the local cache. Cache timeout is set as "cache_expiry"
in the settings, and defaults to 60s.
"""
cache_key = self.search_document_cache_key
new_doc = self.as_search_document(index=index)
cached_doc = cache.get(cache_key)
if new_doc == cached_doc:
logger.debug("Search document for %r is unchanged, ignoring update.", self)
return
cache.set(cache_key, new_doc, timeout=get_setting("cache_expiry", 60))
get_client().index(index=index, body=new_doc, id=self.pk) # type: ignore
def update_search_document(self, *, index: str, update_fields: list[str]) -> None:
"""
Partial update of a document in named index.
Partial updates are invoked via a call to save the document
with 'update_fields'. These fields are passed to the
as_search_document method so that it can build a partial
document. NB we don't just call as_search_document and then
strip the fields _not_ in update_fields as we are trying
to avoid possibly expensive operations in building the
source document. The canonical example for this method
is updating a single timestamp on a model - we don't want
to have to walk the model relations and build a document
in this case - we just want to push the timestamp.
When POSTing a partial update the `as_search_document` doc
must be passed to the `client.update` wrapped in a "doc" node,
# noqa: E501, see: https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html
"""
doc = self.as_search_document_update(index=index, update_fields=update_fields)
if not doc:
logger.debug("Ignoring object update as document is empty.")
return
retry_on_conflict = cast(int, get_setting("retry_on_conflict", 0))
get_client().update(
index=index,
id=self.pk, # type: ignore
body={"doc": doc},
retry_on_conflict=retry_on_conflict,
)
def delete_search_document(self, *, index: str) -> None:
"""Delete document from named index."""
cache.delete(self.search_document_cache_key)
get_client().delete(index=index, id=self.pk) # type: ignore
class SearchQuery(models.Model):
"""
Model used to capture ES queries and responses.
For low-traffic sites it's useful to be able to replay
searches, and to track how a user filtered and searched.
This model can be used to store a search query and meta
information about the results (document type, id and score).
>>> from elasticsearch_dsl import Search
>>> search = Search(using=client)
>>> sq = SearchQuery.execute(search).save()
"""
class TotalHitsRelation(models.TextChoices):
"""The hits.total.relation response value."""
ACCURATE = "eq", _lazy("Accurate hit count")
ESTIMATE = "gte", _lazy("Lower bound of total hits")
class QueryType(models.TextChoices):
# whether this is a search query (returns results), or a count API
# query (returns the number of results, but no detail),
SEARCH = "SEARCH", _lazy("Search results")
COUNT = "COUNT", _lazy("Count only")
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="search_queries",
blank=True,
null=True,
help_text=_lazy("The user who made the search query (nullable)."),
on_delete=models.SET_NULL,
)
index = models.CharField(
max_length=100,
default="_all",
help_text=_lazy("The name of the ElasticSearch index(es) being queried."),
)
# The query property contains the raw DSL query, which can be arbitrarily complex -
# there is no one way of mapping input text to the query itself. However, it's
# often helpful to have the terms that the user themselves typed easily accessible
# without having to parse JSON.
search_terms = models.CharField(
max_length=400,
default="",
blank=True,
help_text=_lazy(
"Free text search terms used in the query, stored for easy reference."
),
)
query = JSONField(
help_text=_lazy("The raw ElasticSearch DSL query."), encoder=DjangoJSONEncoder
)
query_type = CharField(
help_text=_lazy("Does this query return results, or just the hit count?"),
choices=QueryType.choices,
default=QueryType.SEARCH,
max_length=10,
)
hits = JSONField(
help_text=_lazy(
"The list of meta info for each of the query matches returned."
),
encoder=DjangoJSONEncoder,
)
total_hits = models.IntegerField(
default=0,
help_text=_lazy(
"Total number of matches found for the query (!= the hits returned)."
),
)
total_hits_relation = models.CharField(
max_length=3,
default="",
blank=True,
choices=TotalHitsRelation.choices,
help_text=_lazy(
"Indicates whether this is an exact match ('eq') or a lower bound ('gte')"
),
)
aggregations = JSONField(
help_text=_lazy("The raw aggregations returned from the query."),
encoder=DjangoJSONEncoder,
default=dict,
)
reference = models.CharField(
max_length=100,
default="",
blank=True,
help_text=_lazy(
"Custom reference used to identify and group related searches."
),
)
executed_at = models.DateTimeField(
help_text=_lazy("When the search was executed - set via execute() method.")
)
duration = models.FloatField(
help_text=_lazy("Time taken to execute the search itself, in seconds.")
)
class Meta:
app_label = "elasticsearch_django"
verbose_name = "Search query"
verbose_name_plural = "Search queries"
def __str__(self) -> str:
return f"Query (id={self.pk}) run against index '{self.index}'"
def __repr__(self) -> str:
return (
f"<SearchQuery id={self.pk} user={self.user} "
f"index='{self.index}' total_hits={self.total_hits} >"
)
def save(self, *args: Any, **kwargs: Any) -> SearchQuery:
"""Save and return the object (for chaining)."""
if self.search_terms is None:
self.search_terms = ""
super().save(**kwargs)
return self
def _extract_set(self, _property: str) -> list[str | int]:
return [] if self.hits is None else (list({h[_property] for h in self.hits}))
@property
def doc_types(self) -> list[str]:
"""List of doc_types extracted from hits."""
raise DeprecationWarning("Mapping types have been removed from ES7.x")
@property
def max_score(self) -> int:
"""Max relevance score in the returned page."""
return int(max(self._extract_set("score") or [0]))
@property
def min_score(self) -> int:
"""Min relevance score in the returned page."""
return int(min(self._extract_set("score") or [0]))
@property
def object_ids(self) -> list[int]:
"""List of model ids extracted from hits."""
return [int(x) for x in self._extract_set("id")]
@property
def page_slice(self) -> tuple[int, int] | None:
"""Return the query from:size tuple (0-based)."""
return (
None
if self.query is None
else (self.query.get("from", 0), self.query.get("size", 10))
)
@property
def page_from(self) -> int:
"""1-based index of the first hit in the returned page."""
if self.page_size == 0:
return 0
if not self.page_slice:
return 0
return self.page_slice[0] + 1
@property
def page_to(self) -> int:
"""1-based index of the last hit in the returned page."""
return 0 if self.page_size == 0 else self.page_from + self.page_size - 1
@property
def page_size(self) -> int:
"""Return number of hits returned in this specific page."""
return 0 if self.hits is None else len(self.hits)
def execute_search(
search: Search,
*,
search_terms: str = "",
user: AbstractBaseUser | None = None,
reference: str | None = "",
save: bool = True,
) -> SearchQuery:
"""
Create a new SearchQuery instance and execute a search against ES.
Args:
search: elasticsearch.search.Search object, that internally contains
the connection and query; this is the query that is executed. All
we are doing is logging the input and parsing the output.
search_terms: raw end user search terms input - what they typed into the search
box.
user: Django User object, the person making the query - used for logging
purposes. Can be null.
reference: string, can be anything you like, used for identification,
grouping purposes.
save: bool, if True then save the new object immediately, can be
overridden to False to prevent logging absolutely everything.
Defaults to True
"""
start = time.time()
response = search.execute()
hits = [h.meta.to_dict() for h in response.hits]
total_hits = response.hits.total.value
total_hits_relation = response.hits.total.relation
aggregations = response.aggregations.to_dict()
duration = time.time() - start
search_query = SearchQuery(
user=user,
search_terms=search_terms,
index=", ".join(search._index or ["_all"])[:100], # field length restriction
query=search.to_dict(),
query_type=SearchQuery.QueryType.SEARCH,
hits=hits,
aggregations=aggregations,
total_hits=total_hits,
total_hits_relation=total_hits_relation,
reference=reference or "",
executed_at=tz_now(),
duration=duration,
)
search_query.response = response
return search_query.save() if save else search_query
def execute_count(
search: Search,
*,
search_terms: str = "",
user: AbstractBaseUser | None = None,
reference: str | None = "",
save: bool = True,
) -> SearchQuery:
"""
Run a "count" against ES and store the results.
Args:
search: elasticsearch.search.Search object, that internally contains
the connection and query; this is the query that is executed. All
we are doing is logging the input and parsing the output.
search_terms: raw end user search terms input - what they typed into the search
box.
user: Django User object, the person making the query - used for logging
purposes. Can be null.
reference: string, can be anything you like, used for identification,
grouping purposes.
save: bool, if True then save the new object immediately, can be
overridden to False to prevent logging absolutely everything.
Defaults to True
"""
start = time.time()
response = search.count()
duration = time.time() - start
search_query = SearchQuery(
user=user,
search_terms=search_terms,
index=", ".join(search._index or ["_all"])[:100], # field length restriction
query=search.to_dict(),
query_type=SearchQuery.QueryType.COUNT,
hits=[],
aggregations={},
total_hits=response,
total_hits_relation=SearchQuery.TotalHitsRelation.ACCURATE,
reference=reference or "",
executed_at=tz_now(),
duration=duration,
)
search_query.response = response
return search_query.save() if save else search_query
| 38.144928 | 107 | 0.630129 |
2ec51f1fcefc39d0874be31fed348978df5a89a0 | 3,517 | py | Python | pyleecan/Methods/Machine/Machine/check.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 95 | 2019-01-23T04:19:45.000Z | 2022-03-17T18:22:10.000Z | pyleecan/Methods/Machine/Machine/check.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 366 | 2019-02-20T07:15:08.000Z | 2022-03-31T13:37:23.000Z | pyleecan/Methods/Machine/Machine/check.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | 74 | 2019-01-24T01:47:31.000Z | 2022-02-25T05:44:42.000Z | # -*- coding: utf-8 -*-
from ....Methods.Machine import MachineCheckError
def check(self):
"""Check that the Machine object is correct
Parameters
----------
self :
A Machine object
Returns
-------
None
Raises
_______
MC_AbstractError
Machine is an abstract class
MC_StatorNotStator
self.stator.is_stator must be True
MC_RotorIsStator
self.rotor.is_stator must be False
MC_BothInternal
self.rotor.is_internal and self.rotor.is_internal can't be both True
MC_BothExternal
self.rotor.is_internal and self.rotor.is_internal can't be both False
MC_RotorDontFit
The Rotor is too big to fit in the Stator
MC_ShaftTooBig
The Shaft is too big to fit in the Rotor
MC_ShaftTooSmall
The Shaft is too small to fit in the Rotor
MC_MecAirgapError
The Stator and the rotor don't fit because of magnet or short circuit ring
"""
if type(self).__name__ == "Machine":
raise MC_AbstractError("Machine is an abstract class")
if not self.stator.is_stator:
raise MC_StatorNotStator("self.stator.is_stator must be True")
if self.rotor.is_stator:
raise MC_RotorIsStator("self.rotor.is_stator must be False")
if self.rotor.is_internal and self.stator.is_internal:
raise MC_BothInternal(
"self.rotor.is_internal and " "self.rotor.is_internal can't be both True"
)
if (not self.rotor.is_internal) and (not self.stator.is_internal):
raise MC_BothExternal(
"self.rotor.is_internal and " "self.rotor.is_internal can't be both False"
)
if self.rotor.is_internal:
if self.rotor.Rext > self.stator.Rint:
raise MC_RotorDontFit("The Rotor is too big to fit in the Stator")
if self.shaft.Drsh / 2.0 > self.rotor.Rint:
raise MC_ShaftTooBig("The Shaft is too big to fit in the Rotor")
if self.shaft.Drsh / 2.0 < self.rotor.Rint:
raise MC_ShaftTooSmall("The Shaft is too small to fit in the " "Rotor")
else:
if self.stator.Rext > self.rotor.Rint:
raise MC_StatorDontFit("The Stator is too big to fit in the Rotor")
if self.comp_width_airgap_mec() <= 0:
raise MC_MecAirgapError(
"The Stator and the rotor don't fit because "
"of magnet or short circuit ring"
)
self.rotor.check()
self.stator.check()
class MC_AbstractError(MachineCheckError):
""" """
pass
class MC_StatorNotStator(MachineCheckError):
""" """
pass
class MC_RotorIsStator(MachineCheckError):
""" """
pass
class MC_BothInternal(MachineCheckError):
""" """
pass
class MC_BothExternal(MachineCheckError):
""" """
pass
class MC_RotorDontFit(MachineCheckError):
"""
Parameters
----------
Returns
-------
Raises
------
stator
"""
pass
class MC_StatorDontFit(MachineCheckError):
"""
Parameters
----------
Returns
-------
Raises
------
rotor
"""
pass
class MC_ShaftTooBig(MachineCheckError):
"""
Parameters
----------
Returns
-------
Raises
------
rotor
"""
pass
class MC_ShaftTooSmall(MachineCheckError):
"""
Parameters
----------
Returns
-------
Raises
------
rotor
"""
pass
class MC_MecAirgapError(MachineCheckError):
""" """
pass
| 18.510526 | 86 | 0.607336 |
28d7436d7dccf88d4e5459e3bf14cfc026e2bd63 | 8,389 | py | Python | TicTacToe.py | sheetal322/HacktoberFest-2021 | 6e7f2a0e5650e681ac45051e08305c3371822e41 | [
"MIT"
] | null | null | null | TicTacToe.py | sheetal322/HacktoberFest-2021 | 6e7f2a0e5650e681ac45051e08305c3371822e41 | [
"MIT"
] | null | null | null | TicTacToe.py | sheetal322/HacktoberFest-2021 | 6e7f2a0e5650e681ac45051e08305c3371822e41 | [
"MIT"
] | null | null | null | def default():
print("\nWelcome! Let's play TIC TAC TOE!\n")
def rules():
print("The board will look like this!")
print("The positions of this 3 x 3 board is same as the right side of your key board.\n")
print(" 7 | 8 | 9 ")
print("-----------")
print(" 4 | 5 | 6 ")
print("-----------")
print(" 1 | 2 | 3 ")
print("\nYou just have to input the position(1-9).")
def names():
p1_name=input("\nEnter NAME of PLAYER 1:\t").capitalize()
p2_name=input("Enter NAME of PLAYER 2:\t").capitalize()
return (p1_name, p2_name)
def choice():
p1_choice = ' '
p2_choice = ' '
while p1_choice != 'X' or p1_choice != 'O':
p1_choice = input(f"\n{p1_name}, Do you want to be X or O?\t")[0].upper()
if p1_choice == 'X' or p1_choice == 'O':
break
print("INVALID INPUT! Please Try Again!")
if p1_choice == 'X':
p2_choice = 'O'
elif p1_choice == 'O':
p2_choice = 'X'
return (p1_choice, p2_choice)
def first_player():
import random
return random.choice((0, 1))
def display_board(board, avail):
print(" " + " {} | {} | {} ".format(board[7],board[8],board[9]) + " " + " {} | {} | {} ".format(avail[7],avail[8],avail[9]))
print(" " + "-----------" + " " + "-----------")
print(" " + " {} | {} | {} ".format(board[4],board[5],board[6]) + " " + " {} | {} | {} ".format(avail[4],avail[5],avail[6]))
print(" " + "-----------" + " " + "-----------")
print(" " + " {} | {} | {} ".format(board[1],board[2],board[3]) + " " + " {} | {} | {} ".format(avail[1],avail[2],avail[3]))
def player_choice(board, name, choice):
position = 0
while position not in [1,2,3,4,5,6,7,8,9] or not space_check(board, position):
position = int(input(f'\n{name} ({choice}), Choose your next position: (1-9) \t'))
if position not in [1,2,3,4,5,6,7,8,9] or not space_check(board, position) or position == "":
print(f"INVALID INPUT. Please Try Again!\n")
print("\n")
return position
def Comp(board, name, choice):
position = 0
possibilities = [x for x, letter in enumerate(board) if letter == ' ' and x != 0]
for let in ['O', 'X']:
for i in possibilities:
boardCopy = board[:]
boardCopy[i] = let
if(win_check(boardCopy, let)):
position = i
return position
openCorners = [x for x in possibilities if x in [1, 3, 7, 9]]
if len(openCorners) > 0:
position = selectRandom(openCorners)
return position
if 5 in possibilities:
position = 5
return position
openEdges = [x for x in possibilities if x in [2, 4, 6, 8]]
if len(openEdges) > 0:
position = selectRandom(openEdges)
return position
def selectRandom(board):
import random
ln = len(board)
r = random.randrange(0,ln)
return board[r]
def place_marker(board, avail, choice, position):
board[position] = choice
avail[position] = ' '
def space_check(board, position):
return board[position] == ' '
def full_board_check(board):
for i in range(1,10):
if space_check(board, i):
return False
return True
def win_check(board, choice):
return (
( board[1] == choice and board[2] == choice and board[3] == choice )
or ( board[4] == choice and board[5] == choice and board[6] == choice )
or ( board[7] == choice and board[8] == choice and board[9] == choice )
or ( board[1] == choice and board[4] == choice and board[7] == choice )
or ( board[2] == choice and board[5] == choice and board[8] == choice )
or ( board[3] == choice and board[6] == choice and board[9] == choice )
or ( board[1] == choice and board[5] == choice and board[9] == choice )
or ( board[3] == choice and board[5] == choice and board[7] == choice )
)
def delay(mode):
if mode == 2:
import time
time.sleep(2)
def replay():
return input('\nDo you want to play again? Enter [Y]es or [N]o: ').lower().startswith('y')
#Main Driver
input("Press ENTER to start!")
default()
rules()
while True:
theBoard = [' ']*10
available = [str(num) for num in range(0,10)]
print("\n[0]. Player vs. Computer")
print("[1]. Player vs. Player")
print("[2]. Computer vs. Computer")
mode = int(input("\nSelect an option [0]-[2]: "))
if mode == 1:
p1_name, p2_name = names()
p1_choice, p2_choice = choice()
print(f"\n{p1_name}:", p1_choice)
print(f"{p2_name}:", p2_choice)
elif mode == 0:
p1_name = input("\nEnter Name of PLAYER who will go against the Computer:\t").capitalize()
p2_name = "Computer"
p1_choice, p2_choice = choice()
print(f"\n{p1_name}:", p1_choice)
print(f"{p2_name}:", p2_choice)
else:
p1_name = "Computer1"
p2_name = "Computer2"
p1_choice, p2_choice = "X", "O"
print(f"\n{p1_name}:", p1_choice)
print(f"\n{p2_name}:", p2_choice)
if first_player():
turn = p2_name
else:
turn = p1_name
print(f"\n{turn} will go first!")
if(mode == 2):
ent = input("\nThis is going to be fast! Press Enter for the battle to begin!\n")
play_game = 1
else:
play_game = play()
while play_game:
if turn == p1_name:
display_board(theBoard, available)
if mode != 2:
position = player_choice(theBoard, p1_name, p1_choice)
else:
position = Comp(theBoard, p1_name, p1_choice)
print(f'\n{p1_name} ({p1_choice}) has placed on {position}\n')
place_marker(theBoard, available, p1_choice, position)
if win_check(theBoard, p1_choice):
display_board(theBoard, available)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
if(mode):
print(f'\n\nCONGRATULATIONS {p1_name}! YOU HAVE WON THE GAME!\n\n')
else:
print('\n\nTHE Computer HAS WON THE GAME!\n\n')
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
play_game = False
else:
if full_board_check(theBoard):
display_board(theBoard, available)
print("~~~~~~~~~~~~~~~~~~")
print('\nThe game is a DRAW!\n')
print("~~~~~~~~~~~~~~~~~~")
break
else:
turn = p2_name
elif turn == p2_name:
display_board(theBoard, available)
if(mode == 1):
position = player_choice(theBoard, p2_name, p2_choice)
else:
position = Comp(theBoard, p2_name, p2_choice)
print(f'\n{p2_name} ({p2_choice}) has placed on {position}\n')
place_marker(theBoard, available, p2_choice, position)
if win_check(theBoard, p2_choice):
display_board(theBoard, available)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
if(mode):
print(f'\n\nCONGRATULATIONS {p2_name}! YOU HAVE WON THE GAME!\n\n')
else:
print('\n\nTHE Computer HAS WON THE GAME!\n\n')
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
play_game = False
else:
if full_board_check(theBoard):
display_board(theBoard, available)
print("~~~~~~~~~~~~~~~~~~")
print('\nThe game is a DRAW!\n')
print("~~~~~~~~~~~~~~~~~~")
break
else:
turn = p1_name
if replay():
continue
else:
break
print("\n\n\t\t\tTHE END!")
| 28.631399 | 142 | 0.479318 |
6cedbca787c1d1a314ca83bf1685027e126158e5 | 2,128 | py | Python | numpoly/array_function/stack.py | jonathf/npoly | 9df4bd2a3b134e8a196e24389c0ad84c26da9662 | [
"BSD-2-Clause"
] | 8 | 2019-12-13T23:54:33.000Z | 2021-11-08T22:44:25.000Z | numpoly/array_function/stack.py | jonathf/npoly | 9df4bd2a3b134e8a196e24389c0ad84c26da9662 | [
"BSD-2-Clause"
] | 54 | 2019-08-25T20:03:10.000Z | 2021-08-09T08:59:27.000Z | numpoly/array_function/stack.py | jonathf/npoly | 9df4bd2a3b134e8a196e24389c0ad84c26da9662 | [
"BSD-2-Clause"
] | 2 | 2020-03-05T12:03:28.000Z | 2021-03-07T16:56:09.000Z | """Join a sequence of arrays along a new axis."""
from __future__ import annotations
from typing import Optional, Sequence
import numpy
import numpoly
from ..baseclass import ndpoly, PolyLike
from ..dispatch import implements
@implements(numpy.stack)
def stack(
arrays: Sequence[PolyLike],
axis: int = 0,
out: Optional[ndpoly] = None,
) -> ndpoly:
"""
Join a sequence of arrays along a new axis.
The ``axis`` parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the first
dimension and if ``axis=-1`` it will be the last dimension.
Args:
arrays:
Each array must have the same shape.
axis:
The axis in the result array along which the input arrays are
stacked.
out:
If provided, the destination to place the result. The shape must be
correct, matching that of what stack would have returned if no out
argument were specified.
Returns:
The stacked array has one more dimension than the input arrays.
Examples:
>>> poly = numpoly.variable(3)
>>> const = numpoly.polynomial([1, 2, 3])
>>> numpoly.stack([poly, const])
polynomial([[q0, q1, q2],
[1, 2, 3]])
>>> numpoly.stack([poly, const], axis=-1)
polynomial([[q0, 1],
[q1, 2],
[q2, 3]])
"""
arrays = numpoly.align_exponents(*arrays)
if out is None:
coefficients = [numpy.stack(
[array.values[key] for array in arrays], axis=axis)
for key in arrays[0].keys]
out = numpoly.polynomial_from_attributes(
exponents=arrays[0].exponents,
coefficients=coefficients,
names=arrays[0].names,
dtype=coefficients[0].dtype,
)
else:
for key in out.keys:
if key in arrays[0].keys:
numpy.stack([array.values[key] for array in arrays],
out=out.values[key], axis=axis)
return out
| 31.294118 | 79 | 0.580357 |
127bddc5ae34c1b81eb59f19f896474840c54351 | 9,369 | py | Python | tests/plugins/lti_consumer/test_forms.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
] | 174 | 2018-04-14T23:36:01.000Z | 2022-03-10T09:27:01.000Z | tests/plugins/lti_consumer/test_forms.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
] | 631 | 2018-04-04T11:28:53.000Z | 2022-03-31T11:18:31.000Z | tests/plugins/lti_consumer/test_forms.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
] | 64 | 2018-06-27T08:35:01.000Z | 2022-03-10T09:27:43.000Z | """
Forms tests
"""
from unittest import mock
from django.test import TestCase, override_settings
import exrex
from richie.plugins.lti_consumer.factories import LTIConsumerFactory
from richie.plugins.lti_consumer.forms import LTIConsumerForm
def get_lti_settings(is_regex=True):
"""Returns LTI provider settings to override settings in our tests."""
suffix = "[0-9a-f]{8}-[0-9a-f]" if is_regex else ""
return {
"lti_provider_test": {
"base_url": f"http://localhost:8060/lti/videos/{suffix:s}",
"is_base_url_regex": is_regex,
"oauth_consumer_key": "TestOauthConsumerKey",
"shared_secret": "TestSharedSecret",
}
}
class LTIConsumerFormTestCase(TestCase):
"""Tests for the LTI consumer forms"""
def test_lti_consumer_forms_predefined_providers(self):
"""
Verify LTI consumer form lists predefined providers
"""
self.assertListEqual(
LTIConsumerForm().fields["lti_provider_id"].widget.choices,
[
(None, "Custom provider configuration"),
("lti_provider_test", "LTI Provider Test Video"),
],
)
def test_lti_consumer_forms_clean_errors(self):
"""
Verify that LTI consumer form is displaying errors with predefined or
custom provider
"""
for data, errors in [
(
{"lti_provider_id": ""},
{
"lti_provider_id": [
"Please choose a predefined provider, or fill fields below"
],
"oauth_consumer_key": [
"Please choose a predefined provider above, or fill this field"
],
"form_shared_secret": [
"Please choose a predefined provider above, or fill this field"
],
"url": [
"Please choose a predefined provider above, or fill this field"
],
},
),
(
{
"lti_provider_id": "",
"oauth_consumer_key": "InsecureOauthConsumerKey",
"form_shared_secret": "InsecureSharedSecret",
},
{"url": ["Please fill this field"]},
),
(
{
"lti_provider_id": "",
"url": "http://example.com",
"form_shared_secret": "InsecureSharedSecret",
},
{"oauth_consumer_key": ["Please fill this field"]},
),
(
{
"lti_provider_id": "",
"url": "http://example.com",
"oauth_consumer_key": "InsecureOauthConsumerKey",
},
{"form_shared_secret": ["Please fill this field"]},
),
(
{"lti_provider_id": "", "url": "http://example.com"},
{
"oauth_consumer_key": ["Please fill this field"],
"form_shared_secret": ["Please fill this field"],
},
),
]:
with self.subTest(data=data, errors=errors):
form = LTIConsumerForm(data=data)
self.assertFalse(form.is_valid())
self.assertDictEqual(errors, form.errors)
def test_lti_consumer_forms_clean_valid(self):
"""
Verify that LTI consumer form is valid with predefined or custom provider
"""
for data in [
({"lti_provider_id": "lti_provider_test"}),
(
{
"lti_provider_id": "",
"url": "http://example.com",
"oauth_consumer_key": "InsecureOauthConsumerKey",
"form_shared_secret": "InsecureSharedSecret",
}
),
]:
with self.subTest(data=data):
form = LTIConsumerForm(data=data)
self.assertTrue(form.is_valid())
self.assertDictEqual(form.errors, {})
instance = form.save()
self.assertIsNotNone(instance.url)
@override_settings(RICHIE_LTI_PROVIDERS=get_lti_settings())
@mock.patch.object(
exrex, "getone", return_value="http://localhost:8060/lti/videos/1234abcd-1"
)
def test_lti_consumer_forms_url_regex_match(self, _mock_getone):
"""
The url field should match the regex url if a predefined LTI provider is
used and has a regex url.
"""
data = {"lti_provider_id": "lti_provider_test", "url": "http://invalid.com"}
form = LTIConsumerForm(data=data)
self.assertFalse(form.is_valid())
self.assertDictEqual(
form.errors,
{
"url": [
(
"The url is not valid for this provider. "
'It should be of the form "http://localhost:8060/lti/videos/1234abcd-1".'
)
]
},
)
@override_settings(RICHIE_LTI_PROVIDERS=get_lti_settings(is_regex=False))
def test_lti_consumer_forms_url_not_regex_included(self):
"""
The url field should include the provider's base url if a predefined LTI provider
is used and has a regex url.
"""
data = {"lti_provider_id": "lti_provider_test", "url": "http://invalid.com"}
form = LTIConsumerForm(data=data)
self.assertFalse(form.is_valid())
self.assertDictEqual(
form.errors,
{
"url": [
(
"The url is not valid for this provider. "
'It should start with "http://localhost:8060/lti/videos/".'
)
]
},
)
@override_settings(RICHIE_LTI_PROVIDERS=get_lti_settings())
def test_lti_consumer_forms_reset_credentials(self):
"""
The "oauth_consumer_key" and "shared_secret" fields should be reset when a value
is set for the "lti_provider_id" field.
"""
data = {
"lti_provider_id": None,
"url": "http://example.com",
"oauth_consumer_key": "thisIsAtestOauthConsumerKey",
"form_shared_secret": "thisIsAtestSharedSecre",
}
form = LTIConsumerForm(data=data)
form.is_valid()
self.assertTrue(form.is_valid())
lti_consumer = form.save()
self.assertEqual(lti_consumer.oauth_consumer_key, "thisIsAtestOauthConsumerKey")
self.assertEqual(lti_consumer.shared_secret, "thisIsAtestSharedSecre")
modified_data = LTIConsumerForm(instance=lti_consumer).initial
modified_data.update(
{
"lti_provider_id": "lti_provider_test",
"url": "http://localhost:8060/lti/videos/166d465f-f",
}
)
modified_form = LTIConsumerForm(instance=lti_consumer, data=modified_data)
modified_form.is_valid()
self.assertTrue(modified_form.is_valid())
modified_form.save()
lti_consumer.refresh_from_db()
self.assertIsNone(lti_consumer.oauth_consumer_key)
self.assertIsNone(lti_consumer.shared_secret)
@override_settings(RICHIE_LTI_PROVIDERS=get_lti_settings())
def test_lti_consumer_forms_shared_secret_placeholder(self):
"""
The "form_shared_secret" should act as a proxy to the "shared_secret" field on the model
and allow hiding the shared secret from the form after creation.
"""
lti_consumer = LTIConsumerFactory(
lti_provider_id=None,
oauth_consumer_key="thisIsAtestOauthConsumerKey",
shared_secret="thisIsAtestSharedSecret",
)
form = LTIConsumerForm(instance=lti_consumer)
rendered = form.as_p()
self.assertIn(
(
'<input type="password" name="form_shared_secret" '
'value="%%shared_secret_placeholder%%" onfocus="this.value=''" '
'maxlength="50" id="id_form_shared_secret">'
),
rendered,
)
self.assertNotIn('id="shared_secret"', rendered)
self.assertNotIn("thisIsAtestSharedSecret", rendered)
# Submitting the placeholder value for the secret should not
# impact the field on the model
data = form.initial
data["form_shared_secret"] = "%%shared_secret_placeholder%%"
form = LTIConsumerForm(instance=lti_consumer, data=data)
form.is_valid()
self.assertTrue(form.is_valid())
form.save()
lti_consumer.refresh_from_db()
self.assertEqual(lti_consumer.shared_secret, "thisIsAtestSharedSecret")
# Submitting a new secret should update the corresponding field on the model
data["form_shared_secret"] = "NewSharedSecret"
form = LTIConsumerForm(instance=lti_consumer, data=data)
form.is_valid()
self.assertTrue(form.is_valid())
form.save()
lti_consumer.refresh_from_db()
self.assertEqual(lti_consumer.shared_secret, "NewSharedSecret")
| 37.031621 | 97 | 0.558224 |
e1b3b0ec315c664b553c9bd34e6b7b9acc604506 | 577 | py | Python | testApp/views.py | Wizmann/DjangoSimditor | ced23966e5fab03921e059737e1abb40b72b6cea | [
"MIT"
] | 1 | 2017-12-07T05:27:56.000Z | 2017-12-07T05:27:56.000Z | testApp/views.py | Wizmann/DjangoSimditor | ced23966e5fab03921e059737e1abb40b72b6cea | [
"MIT"
] | null | null | null | testApp/views.py | Wizmann/DjangoSimditor | ced23966e5fab03921e059737e1abb40b72b6cea | [
"MIT"
] | null | null | null | #coding=utf-8
from django.shortcuts import render
from django.views.generic import View, ListView, CreateView, UpdateView
from django.core.urlresolvers import reverse_lazy
from testApp.models import Blog
class HomeView(ListView):
context_object_name = 'blog_list'
queryset = Blog.objects.all()
template_name = 'home.html'
class BlogFormView(View):
model = Blog
template_name = "form.html"
success_url = reverse_lazy('home_view')
class BlogCreateView(BlogFormView, CreateView):
pass
class BlogUpdateView(BlogFormView, UpdateView):
pass
| 24.041667 | 71 | 0.757366 |
36c63418922d2aba0e84303cb7b4d5ec32d2ae31 | 1,417 | py | Python | Src/arch.py | Darth-Ness/GraphicMan | b3824631f2f6e3b6fbfd052f636b1f5f0c6bfc42 | [
"CC0-1.0"
] | 1 | 2022-03-18T18:21:46.000Z | 2022-03-18T18:21:46.000Z | Src/arch.py | Darth-Ness/GraphicMan | b3824631f2f6e3b6fbfd052f636b1f5f0c6bfc42 | [
"CC0-1.0"
] | 1 | 2022-03-18T18:25:52.000Z | 2022-03-24T21:23:01.000Z | Src/arch.py | Darth-Ness/GraphicMan | b3824631f2f6e3b6fbfd052f636b1f5f0c6bfc42 | [
"CC0-1.0"
] | 1 | 2022-03-18T18:22:08.000Z | 2022-03-18T18:22:08.000Z | import tkinter
import os
def install():
os.system('sudo pacman -S' + " " + packageName.get())
def uninstall():
os.system('sudo pacman -Rs' + " " + packageName.get())
def update():
os.system('sudo pacman -Syu')
def info():
os.system('sudo pacman -Qi' + " " + packageName.get())
def search():
os.system('sudo pacman -Ss' + " " + packageName.get())
def installAUR():
os.system('git clone https://aur.archlinux.org/' + packageName.get() + ".git")
os.system('cd' + " " + packageName.get())
os.system('makepkg -si')
os.system('cd -')
root = tkinter.Tk()
root.configure(padx=30, pady=30, background="white")
button1 = tkinter.Button(text='install', command=install)
button1.pack(anchor=tkinter.NW, side=tkinter.LEFT)
button2 = tkinter.Button(text="Uninstall", command=uninstall)
button2.pack(anchor=tkinter.NW, side=tkinter.LEFT)
button3 = tkinter.Button(text="Update", command=update)
button3.pack(anchor=tkinter.NW, side=tkinter.LEFT)
button4 = tkinter.Button(text="Info", command=info)
button4.pack(anchor=tkinter.NW, side=tkinter.LEFT)
button5 = tkinter.Button(text="Search", command=search)
button5.pack(anchor=tkinter.NW, side=tkinter.LEFT)
button7 = tkinter.Button(text="install from AUR", command=installAUR)
button7.pack(anchor=tkinter.NW, side=tkinter.LEFT)
packageName = tkinter.Entry(root, text='Hello there!')
packageName.pack()
#root.destroy()
root.mainloop() | 28.34 | 82 | 0.700776 |
c4d4c860b96318deb86241fce4873aff08c57bb7 | 746 | py | Python | tests/trackings/test_list.py | ludeeus/pyaftership | aba525b408c7e83167d6891d9c9e68842b2a0c13 | [
"MIT"
] | null | null | null | tests/trackings/test_list.py | ludeeus/pyaftership | aba525b408c7e83167d6891d9c9e68842b2a0c13 | [
"MIT"
] | 28 | 2018-08-23T11:54:45.000Z | 2022-03-18T06:06:12.000Z | tests/trackings/test_list.py | ludeeus/pyaftership | aba525b408c7e83167d6891d9c9e68842b2a0c13 | [
"MIT"
] | 5 | 2018-08-18T16:06:55.000Z | 2021-11-04T16:14:12.000Z | """Test list trackings."""
import aiohttp
import pytest
from pyaftership import AfterShip
from tests.common import API_KEY, load_fixture
@pytest.mark.asyncio
async def test_list(aresponses):
"""Test list trackings."""
aresponses.add(
"api.aftership.com",
"/v4/trackings",
"get",
aresponses.Response(
text=load_fixture("get_trackings"),
status=200,
headers={"Content-Type": "application/json"},
),
)
async with aiohttp.ClientSession() as session:
aftership = AfterShip(API_KEY, session)
trackings = await aftership.trackings.list()
assert trackings["count"] != 0
assert trackings["count"] == len(trackings["trackings"])
| 27.62963 | 64 | 0.632708 |
de6a3bdd45c1766fdf18a0834a853655d0a1513e | 9,644 | py | Python | scripts/garmin_sync.py | aleung/running_page | be68da20f0dac1a1c6859ebb8efcdd2a7245159d | [
"MIT"
] | null | null | null | scripts/garmin_sync.py | aleung/running_page | be68da20f0dac1a1c6859ebb8efcdd2a7245159d | [
"MIT"
] | null | null | null | scripts/garmin_sync.py | aleung/running_page | be68da20f0dac1a1c6859ebb8efcdd2a7245159d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Python 3 API wrapper for Garmin Connect to get your statistics.
Copy most code from https://github.com/cyberjunky/python-garminconnect
"""
import argparse
import logging
import os
import time
import re
import sys
import traceback
import asyncio
import httpx
import aiofiles
from config import GPX_FOLDER, JSON_FILE, SQL_FILE, config
from utils import make_activities_file
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
GARMIN_COM_URL_DICT = {
"BASE_URL": "https://connect.garmin.com",
"SSO_URL_ORIGIN": "https://sso.garmin.com",
"SSO_URL": "https://sso.garmin.com/sso",
"MODERN_URL": "https://connect.garmin.com/modern",
"SIGNIN_URL": "https://sso.garmin.com/sso/signin",
"CSS_URL": "https://static.garmincdn.com/com.garmin.connect/ui/css/gauth-custom-v1.2-min.css",
}
GARMIN_CN_URL_DICT = {
"BASE_URL": "https://connect.garmin.cn",
"SSO_URL_ORIGIN": "https://sso.garmin.com",
"SSO_URL": "https://sso.garmin.cn/sso",
"MODERN_URL": "https://connect.garmin.cn/modern",
"SIGNIN_URL": "https://sso.garmin.cn/sso/signin",
"CSS_URL": "https://static.garmincdn.cn/cn.garmin.connect/ui/css/gauth-custom-v1.2-min.css",
}
class Garmin:
def __init__(self, email, password, auth_domain):
"""
Init module
"""
self.email = email
self.password = password
self.req = httpx.AsyncClient(timeout=httpx.Timeout(60.0))
self.URL_DICT = (
GARMIN_CN_URL_DICT
if auth_domain and str(auth_domain).upper() == "CN"
else GARMIN_COM_URL_DICT
)
self.modern_url = self.URL_DICT.get("MODERN_URL")
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36",
"origin": self.URL_DICT.get("SSO_URL_ORIGIN"),
}
async def login(self):
"""
Login to portal
"""
params = {
"webhost": self.URL_DICT.get("BASE_URL"),
"service": self.modern_url,
"source": self.URL_DICT.get("SIGNIN_URL"),
"redirectAfterAccountLoginUrl": self.modern_url,
"redirectAfterAccountCreationUrl": self.modern_url,
"gauthHost": self.URL_DICT.get("SSO_URL"),
"locale": "en_US",
"id": "gauth-widget",
"cssUrl": self.URL_DICT.get("CSS_URL"),
"clientId": "GarminConnect",
"rememberMeShown": "true",
"rememberMeChecked": "false",
"createAccountShown": "true",
"openCreateAccount": "false",
"usernameShown": "false",
"displayNameShown": "false",
"consumeServiceTicket": "false",
"initialFocus": "true",
"embedWidget": "false",
"generateExtraServiceTicket": "false",
}
data = {
"username": self.email,
"password": self.password,
"embed": "true",
"lt": "e1s1",
"_eventId": "submit",
"displayNameRequired": "false",
}
try:
response = await self.req.post(
self.URL_DICT.get("SIGNIN_URL"),
headers=self.headers,
params=params,
data=data,
)
if response.status_code == 429:
raise GarminConnectTooManyRequestsError("Too many requests")
response.raise_for_status()
logger.debug("Login response code %s", response.status_code)
text = response.text
except Exception as err:
raise GarminConnectConnectionError("Error connecting") from err
# logger.debug("Response is %s", text)
response_url = re.search(r'"(https:[^"]+?ticket=[^"]+)"', text)
if not response_url:
raise GarminConnectAuthenticationError("Authentication error")
response_url = re.sub(r"\\", "", response_url.group(1))
try:
response = await self.req.get(response_url)
if response.status_code == 429:
raise GarminConnectTooManyRequestsError("Too many requests")
response.raise_for_status()
except Exception as err:
raise GarminConnectConnectionError("Error connecting") from err
async def fetch_data(self, url, retrying=False):
"""
Fetch and return data
"""
try:
response = await self.req.get(url, headers=self.headers)
if response.status_code == 429:
raise GarminConnectTooManyRequestsError("Too many requests")
logger.debug(f"fetch_data got response code {response.status_code}")
response.raise_for_status()
return response.json()
except Exception as err:
if retrying:
logger.debug(
"Exception occurred during data retrieval, relogin without effect: %s"
% err
)
raise GarminConnectConnectionError("Error connecting") from err
else:
logger.debug(
"Exception occurred during data retrieval - perhaps session expired - trying relogin: %s"
% err
)
await self.login()
await self.fetch_data(url, retrying=True)
async def get_activities(self, start, limit):
"""
Fetch available activities
"""
url = f"{self.modern_url}/proxy/activitylist-service/activities/search/activities?start={start}&limit={limit}"
return await self.fetch_data(url)
async def download_activity(self, activity_id):
url = f"{self.modern_url}/proxy/download-service/export/gpx/activity/{activity_id}"
logger.info(f"Download activity from {url}")
response = await self.req.get(url, headers=self.headers)
response.raise_for_status()
return response.read()
class GarminConnectHttpError(Exception):
def __init__(self, status):
super(GarminConnectHttpError, self).__init__(status)
self.status = status
class GarminConnectConnectionError(Exception):
"""Raised when communication ended in error."""
def __init__(self, status):
"""Initialize."""
super(GarminConnectConnectionError, self).__init__(status)
self.status = status
class GarminConnectTooManyRequestsError(Exception):
"""Raised when rate limit is exceeded."""
def __init__(self, status):
"""Initialize."""
super(GarminConnectTooManyRequestsError, self).__init__(status)
self.status = status
class GarminConnectAuthenticationError(Exception):
"""Raised when login returns wrong result."""
def __init__(self, status):
"""Initialize."""
super(GarminConnectAuthenticationError, self).__init__(status)
self.status = status
async def download_garmin_gpx(client, activity_id):
try:
gpx_data = await client.download_activity(activity_id)
file_path = os.path.join(GPX_FOLDER, f"{activity_id}.gpx")
async with aiofiles.open(file_path, "wb") as fb:
await fb.write(gpx_data)
except:
print(f"Failed to download activity {activity_id}: ")
traceback.print_exc()
pass
async def get_activity_id_list(client, start=0):
activities = await client.get_activities(start, 100)
if len(activities) > 0:
ids = list(map(lambda a: a.get("activityId"), activities))
logger.debug(f"Activity IDs: {ids}")
return ids + await get_activity_id_list(client, start + 100)
else:
return []
async def gather_with_concurrency(n, tasks):
semaphore = asyncio.Semaphore(n)
async def sem_task(task):
async with semaphore:
return await task
return await asyncio.gather(*(sem_task(task) for task in tasks))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("email", nargs="?", help="email of garmin")
parser.add_argument("password", nargs="?", help="password of garmin")
parser.add_argument(
"--is-cn",
dest="is_cn",
action="store_true",
help="if garmin accout is com",
)
options = parser.parse_args()
email = options.email or config("sync", "garmin", "email")
password = options.password or config("sync", "garmin", "password")
auth_domain = (
"CN" if options.is_cn else config("sync", "garmin", "authentication_domain")
)
if email == None or password == None:
print("Missing argument nor valid configuration file")
sys.exit(1)
async def download_new_activities():
client = Garmin(email, password, auth_domain)
await client.login()
# because I don't find a para for after time, so I use garmin-id as filename
# to find new run to generage
downloaded_ids = [i.split(".")[0] for i in os.listdir(GPX_FOLDER)]
activity_ids = await get_activity_id_list(client)
to_generate_garmin_ids = list(set(activity_ids) - set(downloaded_ids))
print(f"{len(to_generate_garmin_ids)} new activities to be downloaded")
start_time = time.time()
await gather_with_concurrency(
10, [download_garmin_gpx(client, id) for id in to_generate_garmin_ids]
)
print(f"Download finished. Elapsed {time.time()-start_time} seconds")
make_activities_file(SQL_FILE, GPX_FOLDER, JSON_FILE)
asyncio.run(download_new_activities())
| 35.069091 | 143 | 0.619038 |
687228f99d36b463fac44100e0231bfee8560527 | 1,619 | py | Python | camera/processor/qr_detector.py | dmiyamoto/study-picamera-examples | 8f1e7b95823e1e46587f40fb94d4e033c8dbf9a8 | [
"MIT"
] | null | null | null | camera/processor/qr_detector.py | dmiyamoto/study-picamera-examples | 8f1e7b95823e1e46587f40fb94d4e033c8dbf9a8 | [
"MIT"
] | null | null | null | camera/processor/qr_detector.py | dmiyamoto/study-picamera-examples | 8f1e7b95823e1e46587f40fb94d4e033c8dbf9a8 | [
"MIT"
] | null | null | null | from imutils.video.pivideostream import PiVideoStream
import time
from datetime import datetime
import numpy as np
import cv2
from pyzbar import pyzbar
class QRDetector(object):
def __init__(self, flip = False):
self.vs = PiVideoStream(resolution=(800, 608)).start()
self.flip = flip
time.sleep(2.0)
def __del__(self):
self.vs.stop()
def flip_if_needed(self, frame):
if self.flip:
return np.flip(frame, 0)
return frame
def get_frame(self):
frame = self.flip_if_needed(self.vs.read())
frame = self.process_image(frame)
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
def process_image(self, frame):
decoded_objs = self.decode(frame)
frame = self.draw(frame, decoded_objs)
return frame
def decode(self, frame):
decoded_objs = pyzbar.decode(frame, scan_locations=True)
for obj in decoded_objs:
print(datetime.now().strftime('%H:%M:%S.%f'))
print('Type: ', obj.type)
print('Data: ', obj.data)
return decoded_objs
def draw(self, frame, decoded_objs):
for obj in decoded_objs:
left, top, width, height = obj.rect
frame = cv2.rectangle(frame,
(left, top),
(left + width, height + top),
(0, 0, 255), 2)
data = obj.data.decode('utf-8')
cv2.putText(frame,data,(left,top),cv2.FONT_HERSHEY_PLAIN, 2,(0, 0, 255))
return frame
| 29.981481 | 84 | 0.563928 |
5dee89649359a7c5650c18d574a3078ef48d57da | 5,439 | py | Python | JobConfig/ensemble/genEnsembleFcl.py | gaponenko/Offline | 6fb26f4eab5f962126307d30b4e8336fdd117881 | [
"Apache-2.0"
] | null | null | null | JobConfig/ensemble/genEnsembleFcl.py | gaponenko/Offline | 6fb26f4eab5f962126307d30b4e8336fdd117881 | [
"Apache-2.0"
] | 1 | 2019-11-22T14:45:51.000Z | 2019-11-22T14:50:03.000Z | JobConfig/ensemble/genEnsembleFcl.py | gaponenko/Offline | 6fb26f4eab5f962126307d30b4e8336fdd117881 | [
"Apache-2.0"
] | 2 | 2019-10-14T17:46:58.000Z | 2020-03-30T21:05:15.000Z | #!/usr/bin/env python
from string import Template
import sys
import random
import os
from normalizations import *
from argparse import ArgumentParser
def generate(verbose=True,dem_emin=93,dep_emin=83,rpc_tmin=400):
# when we run from SConscript, the cwd is the python subdir
# but all file name are relative to Offline, so go there
cwd = os.getcwd()
words = cwd.split("/")
if words[-1] == "ensemble" :
os.chdir("../..")
# lists of files to send to scons for dependencies
sourceFiles = [
"JobConfig/ensemble/epilog.fcl","JobConfig/ensemble/prolog.fcl",
"JobConfig/ensemble/epilog_reco.fcl","JobConfig/ensemble/prolog_reco.fcl","JobConfig/ensemble/reco-mcdigis-trig.fcl"]
targetFiles = []
projectDir = "gen/fcl/JobConfig/ensemble"
if not os.path.exists(projectDir) :
os.makedirs(projectDir)
for tname in ["DIOLeadingLog-cut-mix"]:
templateFileName = "JobConfig/ensemble/" + tname + ".fcl"
sourceFiles.append(templateFileName)
fin = open(templateFileName)
t = Template(fin.read())
d = {"minE": dem_emin, "particleTypes": [11, 13], "minMom": dem_emin}
fclFileName = projectDir + "/" + tname + ".fcl"
if verbose:
print "Creating " + fclFileName
targetFiles.append(fclFileName)
fout = open(fclFileName,"w")
fout.write(t.substitute(d))
fout.close()
templateFileName = "JobConfig/ensemble/reco-mcdigis-trig.fcl"
fin = open(templateFileName)
t = Template(fin.read())
d = {"name": tname}
fclFileName = projectDir + "/reco-" + tname + ".fcl"
if verbose:
print "Creating " + fclFileName
targetFiles.append(fclFileName)
fout = open(fclFileName,"w")
fout.write(t.substitute(d))
fout.close()
for tname in ["RPCexternal-cut-mix","RPCinternal-cut-mix"]:
templateFileName = "JobConfig/ensemble/" + tname + ".fcl"
sourceFiles.append(templateFileName)
fin = open(templateFileName)
t = Template(fin.read())
d = {"minE": dep_emin+1, "particleTypes": [11, 13,-11,-13], "minMom": dep_emin, "pionTMin": rpc_tmin}
fclFileName = projectDir + "/" + tname + ".fcl"
if verbose:
print "Creating " + fclFileName
targetFiles.append(fclFileName)
fout = open(fclFileName,"w")
fout.write(t.substitute(d))
fout.close()
templateFileName = "JobConfig/ensemble/reco-mcdigis-trig.fcl"
fin = open(templateFileName)
t = Template(fin.read())
d = {"name": tname}
fclFileName = projectDir + "/reco-" + tname + ".fcl"
if verbose:
print "Creating " + fclFileName
targetFiles.append(fclFileName)
fout = open(fclFileName,"w")
fout.write(t.substitute(d))
fout.close()
for tname in ["RMCexternal-cut-mix","RMCinternal-cut-mix"]:
for ikmax in range(8):
templateFileName = "JobConfig/ensemble/" + tname + ".fcl"
if ikmax == 0:
sourceFiles.append(templateFileName)
fin = open(templateFileName)
t = Template(fin.read())
temp_tname = tname.split("-")[0] + "-kMax%d-" % (ikmax) + tname[len(tname.split("-")[0])+1:]
d = {"minE": dep_emin+1, "particleTypes": [11, 13,-11,-13], "minMom": dep_emin, "kMaxNum": ikmax, "pionTMin": rpc_tmin}
fclFileName = projectDir + "/" + temp_tname + ".fcl"
if verbose:
print "Creating " + fclFileName
targetFiles.append(fclFileName)
fout = open(fclFileName,"w")
fout.write(t.substitute(d))
fout.close()
for tname in ["RMCexternal-cut-mix","RMCinternal-cut-mix"]:
for ikmax in range(8):
temp_tname = tname.split("-")[0] + "-kMax%d-" % (ikmax) + tname[len(tname.split("-")[0])+1:]
templateFileName = "JobConfig/ensemble/reco-mcdigis-trig.fcl"
fin = open(templateFileName)
t = Template(fin.read())
d = {"name": temp_tname}
fclFileName = projectDir + "/reco-" + temp_tname + ".fcl"
if verbose:
print "Creating " + fclFileName
targetFiles.append(fclFileName)
fout = open(fclFileName,"w")
fout.write(t.substitute(d))
fout.close()
for tname in ["CeMLeadingLog-mix", "CePLeadingLog-mix"]:
templateFileName = "JobConfig/ensemble/reco-mcdigis-trig.fcl"
fin = open(templateFileName)
t = Template(fin.read())
d = {"name": tname}
fclFileName = projectDir + "/reco-" + tname + ".fcl"
if verbose:
print "Creating " + fclFileName
targetFiles.append(fclFileName)
fout = open(fclFileName,"w")
fout.write(t.substitute(d))
fout.close()
return sourceFiles, targetFiles
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
parser.add_argument("-t", "--rpc-tmin", dest="rpc_tmin", default=400,
help="Early time cutoff for RPC generator")
parser.add_argument("-p", "--dep-emin", dest="dep_emin", default=83,
help="Minimum generated momentum for positrons")
parser.add_argument("-m", "--dem-emin", dest="dem_emin", default=93,
help="Minimum generated momentum for electrons")
args = parser.parse_args()
generate(args.verbose)
exit(0)
| 36.503356 | 127 | 0.615738 |
7b0fb8e15fea6010ca4f7a049f6a50dbfb377177 | 938 | py | Python | Loan.py | nwautier/BeTheBank | d5066d60beb1c585eee341a4a05452b6de70f1f7 | [
"MIT"
] | null | null | null | Loan.py | nwautier/BeTheBank | d5066d60beb1c585eee341a4a05452b6de70f1f7 | [
"MIT"
] | null | null | null | Loan.py | nwautier/BeTheBank | d5066d60beb1c585eee341a4a05452b6de70f1f7 | [
"MIT"
] | null | null | null | class Loan:
'Base class for all loans.'
LoanCount = 0
def __init__(self, m, y, v, r, score, term):
LoanCount += 1
self.PolicyNumber = LoanCount
self.OriginMonth = m
self.OriginYear = y
self.OriginValue = v
self.PrincipalValue = v
self.Balance = v
self.APR = r
self.CreditScore = score
self.Term = term
self.TermsPassed = 0
self.ScheduledPayment =(self.OriginValue/((((1+(self.APR/100/12))^self.Term)-1)/((self.APR/100/12)*(1+(self.APR/100/12))^self.Term)))
self.MissedPayments = 0
def TakePayment():
if self.ScheduledPayment > self.Balance:
self.ScheduledPayment = self.Balance
self.Balance -= self.ScheduledPayment
def ApplyInterest():
self.Balance += (self.Balalnce * (self.APR / 12))
def Term():
ApplyInterest()
TakePayment()
TermsPassed += 1
| 30.258065 | 141 | 0.581023 |
5f82f08a43f853462bf07c53fa1487f8d98d7324 | 2,370 | py | Python | byceps/services/seating/dbmodels/seat.py | GyBraLAN/byceps | b53087849c10a531b66d08999116fa1bef312a7f | [
"BSD-3-Clause"
] | null | null | null | byceps/services/seating/dbmodels/seat.py | GyBraLAN/byceps | b53087849c10a531b66d08999116fa1bef312a7f | [
"BSD-3-Clause"
] | null | null | null | byceps/services/seating/dbmodels/seat.py | GyBraLAN/byceps | b53087849c10a531b66d08999116fa1bef312a7f | [
"BSD-3-Clause"
] | null | null | null | """
byceps.services.seating.dbmodels.seat
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from collections import namedtuple
from typing import Optional, TYPE_CHECKING
if TYPE_CHECKING:
hybrid_property = property
else:
from sqlalchemy.ext.hybrid import hybrid_property
from ....database import db, generate_uuid
from ....util.instances import ReprBuilder
from ...ticketing.dbmodels.category import Category
from ...ticketing.transfer.models import TicketCategoryID
from ..transfer.models import AreaID
from .area import Area
Point = namedtuple('Point', ['x', 'y'])
class Seat(db.Model):
"""A seat."""
__tablename__ = 'seats'
id = db.Column(db.Uuid, default=generate_uuid, primary_key=True)
area_id = db.Column(db.Uuid, db.ForeignKey('seating_areas.id'), index=True, nullable=False)
area = db.relationship(Area, backref='seats')
coord_x = db.Column(db.Integer, nullable=False)
coord_y = db.Column(db.Integer, nullable=False)
rotation = db.Column(db.SmallInteger, nullable=True)
category_id = db.Column(db.Uuid, db.ForeignKey('ticket_categories.id'), index=True, nullable=False)
category = db.relationship(Category)
label = db.Column(db.UnicodeText, nullable=True)
type_ = db.Column('type', db.UnicodeText, nullable=True)
def __init__(
self,
area_id: AreaID,
category_id: TicketCategoryID,
*,
coord_x: int = 0,
coord_y: int = 0,
rotation: Optional[int] = None,
label: Optional[str] = None,
type_: Optional[str] = None,
) -> None:
self.area_id = area_id
self.coord_x = coord_x
self.coord_y = coord_y
self.rotation = rotation
self.category_id = category_id
self.label = label
self.type_ = type_
@hybrid_property
def coords(self) -> Point:
return Point(x=self.coord_x, y=self.coord_y)
@coords.setter
def coords(self, point: Point) -> None:
self.coord_x = point.x
self.coord_y = point.y
def __repr__(self) -> str:
return ReprBuilder(self) \
.add('id', str(self.id)) \
.add_with_lookup('area') \
.add_with_lookup('category') \
.add_with_lookup('label') \
.build()
| 28.902439 | 103 | 0.643038 |
70ca704d54949bd4d2dafea81e3f868dee626375 | 2,665 | py | Python | pybud/video/video_config_handler.py | vishal2612200/PyBud | 726c83ce85cf6dd42a2aaa2eb06609efd6a3fc1b | [
"MIT"
] | 11 | 2020-01-23T08:30:15.000Z | 2022-02-11T04:12:30.000Z | pybud/video/video_config_handler.py | vishal2612200/PyBud | 726c83ce85cf6dd42a2aaa2eb06609efd6a3fc1b | [
"MIT"
] | 5 | 2020-02-15T16:44:24.000Z | 2022-01-13T02:07:48.000Z | pybud/video/video_config_handler.py | vishal2612200/PyBud | 726c83ce85cf6dd42a2aaa2eb06609efd6a3fc1b | [
"MIT"
] | 3 | 2020-02-15T16:30:02.000Z | 2020-08-19T06:58:35.000Z | from pathlib import Path
import yaml
from PIL import ImageFont
class VideoCFG(object):
def __init__(self, config_path):
with open(config_path, "r") as f:
self.yml: dict = yaml.load(f, Loader=yaml.FullLoader)
self.FONT_DIR = Path(__file__).parent / ".." / "fonts"
# Color scheme
self.Colors = Colors()
# Variable display settings
self.muted_variables = self.yml["muted-variables"]
self.pointers = self.yml["pointers"]
# Display settings
self.intro_text = self.yml["intro"]["text"]
self.intro_time = self.yml["intro"]["time"]
self.intro_font = ImageFont.truetype(str(self.FONT_DIR / self.yml["intro"]["font-family"]), self.yml["intro"]["font-size"])
self.intro_color = self.yml["intro"]["color"]
self.watermark = self.yml["watermark"]
self.output_width = self.yml["output-resolution"]["width"]
self.output_height = self.yml["output-resolution"]["height"]
# Frame properties
self.fps = self.yml["fps"]
self.frame_width = self.yml["render-resolution"]["width"]
self.frame_height = self.yml["render-resolution"]["height"]
self.divider_width = 3
# Line exec section of canvas
self.LE_XSTART = 0.0
self.LE_XEND = 4 / 7 * self.frame_width
self.LE_YSTART = 0.0
self.LE_YEND = 0.07 * self.frame_height
# Variable section of canvas
self.VAR_XSTART = self.LE_XEND
self.VAR_XEND = self.frame_width
self.VAR_YSTART = 0.0
self.VAR_YEND = self.frame_height
# Output section of canvas
self.OP_XSTART = 0.0
self.OP_XEND = self.VAR_XSTART
self.OP_YSTART = 0.89 * self.frame_height
self.OP_YEND = self.frame_height
# Source code section of canvas
self.SRC_XSTART = 0.0
self.SRC_XEND = self.VAR_XSTART
self.SRC_YSTART = self.LE_YEND
self.SRC_YEND = self.OP_YSTART
# Text properties
self.CONTAINER_PADDING = 10.0
self.LINE_SPACING = 0.1
self.font_size = self.yml["font"]["font-size"]
self.main_font = ImageFont.truetype(str(self.FONT_DIR / self.yml["font"]["font-family"]), self.font_size)
class Colors(VideoCFG, object):
def __init__(self):
self.background = (41, 45, 52, 255)
self.green = (76, 175, 80, 255)
self.orange = (190, 138, 89, 255)
self.red = (193, 98, 102, 255)
self.highlight = (95, 115, 130, 255)
self.text_default = (240, 244, 250, 255)
self.watermark_color = (255, 255, 255, 90)
self.divider = self.text_default
| 35.065789 | 131 | 0.611257 |
179d054095f219bf22e45add9d6265e9e25f744e | 194 | py | Python | plan_backend/apps.py | 1uy31/plan | 66f0b1dff7f9704acb8cc97d838e1a331d6b4ec9 | [
"MIT"
] | null | null | null | plan_backend/apps.py | 1uy31/plan | 66f0b1dff7f9704acb8cc97d838e1a331d6b4ec9 | [
"MIT"
] | null | null | null | plan_backend/apps.py | 1uy31/plan | 66f0b1dff7f9704acb8cc97d838e1a331d6b4ec9 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class PlanBackendConfig(AppConfig):
"""AppConfig for plan_backend."""
default_auto_field = "django.db.models.BigAutoField"
name = "plan_backend"
| 21.555556 | 56 | 0.742268 |
cd905ee940773e60ca7bfa4010bf1468e2bc90ea | 525 | py | Python | resrc/search/views.py | theWhiteFox/resrc | d62bcf3ba2a55f50ae38a1e606072ee3d6025da5 | [
"MIT"
] | 274 | 2015-01-02T08:57:58.000Z | 2022-03-11T11:44:44.000Z | resrc/search/views.py | ninjaCheery/resrc | 8af3a1a3617fd305a2c8aecffb609ed3e9c1addc | [
"MIT"
] | 8 | 2015-05-19T02:54:49.000Z | 2016-07-07T18:10:40.000Z | resrc/search/views.py | ninjaCheery/resrc | 8af3a1a3617fd305a2c8aecffb609ed3e9c1addc | [
"MIT"
] | 112 | 2015-01-03T18:59:23.000Z | 2019-10-08T11:49:18.000Z | #-*- coding: utf-8 -*-:
from django.shortcuts import render
from forms import LinksSearchForm
def search(request):
if 'q' in request.GET:
query = request.GET['q']
else:
query = ''
# we retrieve the query to display it in the template
form = LinksSearchForm(request.GET)
# we call the search method from the LinksSearchForm. Haystack do the work!
results = form.search()
return render(request, 'search/search.html', {
'query' : query,
'links' : results,
})
| 23.863636 | 79 | 0.632381 |
03b6cd4dbc1c3789b3149cb9288ac4aaac85e633 | 697 | py | Python | secedgar/utils/cik_map.py | mjdhasan/sec-edgar | 51ee1639641f6dfb7b1c7b8daf2c252056624957 | [
"Apache-2.0"
] | 1 | 2021-09-09T10:54:39.000Z | 2021-09-09T10:54:39.000Z | secedgar/utils/cik_map.py | DijunLiu1995/sec-edgar | 51ee1639641f6dfb7b1c7b8daf2c252056624957 | [
"Apache-2.0"
] | null | null | null | secedgar/utils/cik_map.py | DijunLiu1995/sec-edgar | 51ee1639641f6dfb7b1c7b8daf2c252056624957 | [
"Apache-2.0"
] | null | null | null | import json
import requests
URL = "https://www.sec.gov/files/company_tickers.json"
def get_cik_map(key="ticker"):
"""Get dictionary of tickers to CIK numbers.
Args:
key (str): Should be either "ticker" or "title". Choosing "ticker"
will give dict with tickers as keys. Choosing "title" will use
company name as keys.
Returns:
Dictionary with either ticker or company name as keys, depending on
``key`` argument, and corresponding CIK as values.
.. versionadded:: 0.1.6
"""
response = requests.get(URL)
json_response = json.loads(response.text)
return {v[key]: str(v["cik_str"]) for v in json_response.values()}
| 29.041667 | 75 | 0.655667 |
59eb7c18a0593e47014ce44b7ab9d636611d9f01 | 884 | py | Python | service/protocol-plugin/plugins/mqtt-fan/lib/python/setup.py | mandeepshetty/iotivity | fea1aa7f7088fdf206ebc3ff587766da96836708 | [
"Apache-2.0"
] | null | null | null | service/protocol-plugin/plugins/mqtt-fan/lib/python/setup.py | mandeepshetty/iotivity | fea1aa7f7088fdf206ebc3ff587766da96836708 | [
"Apache-2.0"
] | null | null | null | service/protocol-plugin/plugins/mqtt-fan/lib/python/setup.py | mandeepshetty/iotivity | fea1aa7f7088fdf206ebc3ff587766da96836708 | [
"Apache-2.0"
] | 1 | 2022-01-22T19:42:20.000Z | 2022-01-22T19:42:20.000Z | from sys import version
from distutils.core import setup
setup(name='mosquitto',
version='1.3.1',
description='MQTT version 3.1 client class',
author='Roger Light',
author_email='roger@atchoo.org',
url='http://mosquitto.org/',
download_url='http://mosquitto.org/files/',
license='BSD License',
py_modules=['mosquitto'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Communications',
'Topic :: Internet',
]
)
| 30.482759 | 51 | 0.613122 |
becbc4bfa76c7d05066dd6601fce7fe61f7d02e1 | 2,499 | py | Python | examples/dfp/v201611/reconciliation_report_row_service/get_reconciliation_report_rows_for_reconciliation_report.py | agencia-watermelons/googleads-python-lib | d2e55863ecf7e5090c225d74b3f4c1f948cd5a21 | [
"Apache-2.0"
] | null | null | null | examples/dfp/v201611/reconciliation_report_row_service/get_reconciliation_report_rows_for_reconciliation_report.py | agencia-watermelons/googleads-python-lib | d2e55863ecf7e5090c225d74b3f4c1f948cd5a21 | [
"Apache-2.0"
] | null | null | null | examples/dfp/v201611/reconciliation_report_row_service/get_reconciliation_report_rows_for_reconciliation_report.py | agencia-watermelons/googleads-python-lib | d2e55863ecf7e5090c225d74b3f4c1f948cd5a21 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gets a reconciliation report's rows for line items that served through DFP.
"""
# Import appropriate modules from the client library.
from googleads import dfp
RECONCILIATION_REPORT_ID = 'INSERT_RECONCILIATION_REPORT_ID_HERE'
def main(client, reconciliation_report_id):
# Initialize appropriate service.
reconciliation_report_row_service = client.GetService(
'ReconciliationReportRowService', version='v201611')
query = ('WHERE reconciliationReportId = %s AND '
'lineItemId != :lineItemId') % reconciliation_report_id
values = [
{'key': 'lineItemId',
'value': {
'xsi_type': 'NumberValue',
'value': '0'
}},
]
# Create a statement to select reconciliation report rows.
statement = dfp.FilterStatement(query, values)
# Retrieve a small amount of reconciliation report rows at a time, paging
# through until all reconciliation report rows have been retrieved.
while True:
response = (
reconciliation_report_row_service
.getReconciliationReportRowsByStatement(
statement.ToStatement()))
if 'results' in response:
for reconciliation_report_row in response['results']:
# Print out some information for each reconciliation report row.
print('Reconciliation report row with ID "%d", reconciliation source '
'"%s", and reconciled volume "%d" was found.\n' %
(reconciliation_report_row['id'],
reconciliation_report_row['reconciliationSource'],
reconciliation_report_row['reconciledVolume']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, RECONCILIATION_REPORT_ID)
| 37.298507 | 78 | 0.712685 |
8c9c3f673cc8b7f2b9de84835462c06fde9a2d35 | 942 | py | Python | src/pretalx/submission/models/resource.py | oxinabox/pretalx | aa8cb810addd3f6f3fd6e7557497a04a51653413 | [
"Apache-2.0"
] | null | null | null | src/pretalx/submission/models/resource.py | oxinabox/pretalx | aa8cb810addd3f6f3fd6e7557497a04a51653413 | [
"Apache-2.0"
] | null | null | null | src/pretalx/submission/models/resource.py | oxinabox/pretalx | aa8cb810addd3f6f3fd6e7557497a04a51653413 | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_scopes import ScopedManager
from pretalx.common.mixins import LogMixin
class Resource(LogMixin, models.Model):
"""Resources are file uploads belonging to a :class:`~pretalx.submission.models.submission.Submission`."""
submission = models.ForeignKey(
to='submission.Submission', related_name='resources', on_delete=models.PROTECT
)
resource = models.FileField(
verbose_name=_('file'),
help_text=_('Please try to keep your upload small, preferably below 16 MB.'),
)
description = models.CharField(
null=True, blank=True, max_length=1000, verbose_name=_('description')
)
objects = ScopedManager(event='submission__event')
def __str__(self):
"""Help when debugging."""
return f'Resource(event={self.submission.event.slug}, submission={self.submission.title})'
| 36.230769 | 110 | 0.717622 |
dd79e8396ac219af8e401d9c7f6d9b4585b19ebc | 2,511 | py | Python | NotIncluded/Depreciated/_model_GK_Reduced.py | MattArran/GEMMES | ed48ebef08fdf740ed28248c65ed7d8239ab19c4 | [
"MIT"
] | 4 | 2021-06-28T07:11:34.000Z | 2022-01-11T13:43:17.000Z | NotIncluded/Depreciated/_model_GK_Reduced.py | DaluS/GEMMES | 10d4a062004ce5b7fd26eb8c4937d940b7d097d5 | [
"MIT"
] | 167 | 2021-06-28T07:10:21.000Z | 2022-03-18T17:30:40.000Z | NotIncluded/Depreciated/_model_GK_Reduced.py | MattArran/GEMMES | ed48ebef08fdf740ed28248c65ed7d8239ab19c4 | [
"MIT"
] | 3 | 2021-06-28T07:19:12.000Z | 2022-03-03T02:44:15.000Z | # -*- coding: utf-8 -*-
"""
Here we define the parameters and set of equation for a model of type 'GK'
All parameters can have value:
- None: set to the default value found in _def_fields.py
- scalar: int or float
- list or np.ndarray used for benchmarks
- function (callable): in that can it will be treated as a variable
the function will be called at each time step
"""
# ---------------------------
# user-defined function order (optional)
_FUNC_ORDER = None
# ---------------------------
# user-defined model
# contains parameters and functions of various types
_DPARAM = {
# ---------
# Fixed-value parameters
'alpha': None,
'delta': None,
'beta': None,
'nu': None,
'phinull': None,
# ---------
# functions
'lambda': {
'func': lambda itself=0, g=0, alpha=0, beta=0: itself * (g - alpha - beta),
'eqtype': 'ode',
'initial': 0.97,
},
'omega': {
'func': lambda itself=0, philips=0: itself * philips,
'eqtype': 'ode',
'initial': 0.85,
# Intermediary
},
'philips': {
'func': lambda phi0=0, phi1=0, lamb=0: -phi0 + phi1 / (1 - lamb)**2,
'eqtype': 'intermediary',
},
'g': {
'func': lambda pi=0, nu=1, delta=0: pi / nu - delta,
'eqtype': 'intermediary',
},
'pi': {
'func': lambda omega=0: 1. - omega,
'eqtype': 'intermediary',
},
'phi0': {
'func': lambda phinull=0: phinull / (1 - phinull**2),
'eqtype': 'auxiliary',
},
'phi1': {
'func': lambda phinull=0: phinull**3 / (1 - phinull**2),
'eqtype': 'auxiliary',
},
}
# ---------------------------
# List of presets for specific interesting simulations
_PRESETS = {
'smallcycle' : {
'variables' : {
'lambda': .97,
'omega' : .85 ,
},
'parameters': {},
'commentary': 'This is a run that should give simple stable sinusoidal oscillations'
},
'bigcycle' : {
'variables' : {
'lambda': .99,
'omega' : .85 ,
},
'parameters': {},
'commentary': 'This is a run that should give extremely violent stable oscillations'
},
'badnegociation' : {
'variables' : {},
'parameters': { 'phinull': .3,},
'commentary': 'This should displace the Solow Point'
},
} | 25.622449 | 92 | 0.487853 |
262fa57ffb2ff229a5fe91ae73922d862c7d0c39 | 1,382 | py | Python | giffer.py | AKAMEDIASYSTEM/timelapse-tweeter | 88f331955ca2e1f0bd556d4d7e5ba9b4a9c9a896 | [
"MIT"
] | null | null | null | giffer.py | AKAMEDIASYSTEM/timelapse-tweeter | 88f331955ca2e1f0bd556d4d7e5ba9b4a9c9a896 | [
"MIT"
] | null | null | null | giffer.py | AKAMEDIASYSTEM/timelapse-tweeter | 88f331955ca2e1f0bd556d4d7e5ba9b4a9c9a896 | [
"MIT"
] | null | null | null | import tweepy
import sys
import requests
import random
from datetime import datetime
# figure out how to get the below twitter things
twitter_consumer_key= 'FILL_IT_IN'
twitter_consumer_secret= 'FILL_IT_IN'
twitter_access_token = 'FILL_IT_IN'
twitter_access_token_secret = 'FILL_IT_IN'
tweet_text = "{:%B %d, %Y at %H:%M}".format(datetime.now())
# Only need to fill these in if you're also adding pinboard tags to your tweets
pinboard_token = 'FILL_IT_IN'
pinboard_url = 'https://api.pinboard.in/v1/posts/recent'
twitter_auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)
twitter_auth.set_access_token(twitter_access_token, twitter_access_token_secret)
theGif = sys.path[0] + '/lastHour.gif'
# this part can be uncommented if you want to add the pinboard links-as-text feature
'''
payload = {'auth_token': pinboard_token, 'format': 'json'}
ttags = []
r = requests.get(pinboard_url, params=payload)
j = r.json()
for foo in j['posts']:
# print foo
if len(foo['tags']) > 1:
# print foo['tags']
f = foo['tags'].split(' ')
for i in f:
ttags.append(i)
s = set(ttags)
qqq = random.sample(s, 2)
# for t in qqq:
# print t
tweet_text = ' | '.join(qqq)
print tweet_text
api = tweepy.API(twitter_auth)
api.update_with_media(theGif, tweet_text)
'''
api = tweepy.API(twitter_auth)
api.update_with_media(theGif, tweet_text) | 29.404255 | 84 | 0.727207 |
a8c413e50e782a0398529afe766b3497299ef5ff | 10,855 | py | Python | music21/analysis/transposition.py | cuthbertLab/music21 | 1be16c255460107c10d7b4bc8eb77f0d115b5eac | [
"MIT"
] | 1,449 | 2015-01-09T15:53:56.000Z | 2022-03-31T18:24:46.000Z | venv/Lib/site-packages/music21/analysis/transposition.py | alimirzazadeh/wolfGANg | 5bf56f7d8e6c1c283edb98bdaecfd5a606b4462c | [
"MIT"
] | 1,179 | 2015-01-07T17:07:54.000Z | 2022-03-31T16:46:02.000Z | music21/analysis/transposition.py | cuthbertLab/music21 | 1be16c255460107c10d7b4bc8eb77f0d115b5eac | [
"MIT"
] | 393 | 2015-01-03T20:38:16.000Z | 2022-03-25T16:51:22.000Z | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: transposition.py
# Purpose: Tools for checking distinct transposition
#
# Authors: Mark Gotham
#
# Copyright: Copyright © 2017 Michael Scott Cuthbert and the music21 Project
# License: BSD, see license.txt
# ------------------------------------------------------------------------------
import unittest
from music21 import common
from music21 import exceptions21
from music21 import pitch
from music21 import chord
from music21 import environment
_MOD = 'analysis.transposition'
environLocal = environment.Environment(_MOD)
class TranspositionException(exceptions21.Music21Exception):
pass
class TranspositionChecker:
'''
Given a list of pitches, checks for the number of distinct transpositions.
>>> pList = [pitch.Pitch('C4'), pitch.Pitch('E4'), pitch.Pitch('G#4')]
>>> tc = analysis.transposition.TranspositionChecker(pList)
>>> tc.numDistinctTranspositions()
4
>>> allNormalOrderPitchTuples = tc.getPitchesOfDistinctTranspositions()
>>> allNormalOrderPitchTuples
[(<music21.pitch.Pitch C>, <music21.pitch.Pitch E>,
<music21.pitch.Pitch G#>),
(<music21.pitch.Pitch C#>, <music21.pitch.Pitch F>,
<music21.pitch.Pitch A>),
(<music21.pitch.Pitch D>, <music21.pitch.Pitch F#>,
<music21.pitch.Pitch A#>),
(<music21.pitch.Pitch E->, <music21.pitch.Pitch G>,
<music21.pitch.Pitch B>)]
>>> myChord = chord.Chord(['C', 'E-', 'F#', 'A'])
>>> pList = myChord.pitches
>>> tc = analysis.transposition.TranspositionChecker(pList)
>>> allNormalOrderChords = tc.getChordsOfDistinctTranspositions()
>>> allNormalOrderChords
[<music21.chord.Chord C E- F# A>,
<music21.chord.Chord C# E G A#>,
<music21.chord.Chord D F G# B>]
'''
def __init__(self, pitches=None):
if pitches is None:
raise TranspositionException('Must have some input')
if not common.isIterable(pitches):
raise TranspositionException('Must be a list or tuple')
if not pitches:
raise TranspositionException(
'Must have at least one element in list'
)
# p0 = pitches[0]
# if not isinstance(p0, pitch.Pitch):
# raise TranspositionException('List must have pitch objects')
self.pitches = pitches
self.allTranspositions = None
self.allNormalOrders = None
self.distinctNormalOrders = None
def getTranspositions(self):
'''
Gets all 12 transpositions (distinct or otherwise)
>>> p = [pitch.Pitch('D#')]
>>> tc = analysis.transposition.TranspositionChecker(p)
>>> tc.getTranspositions()
[[<music21.pitch.Pitch E->],
[<music21.pitch.Pitch E>],
[<music21.pitch.Pitch F>],
[<music21.pitch.Pitch F#>],
[<music21.pitch.Pitch G>],
[<music21.pitch.Pitch G#>],
[<music21.pitch.Pitch A>],
[<music21.pitch.Pitch B->],
[<music21.pitch.Pitch B>],
[<music21.pitch.Pitch C>],
[<music21.pitch.Pitch C#>],
[<music21.pitch.Pitch D>]]
'''
allTranspositions = []
for i in range(12):
thisTransposition = []
for p in self.pitches:
thisTransposition.append(p.transpose(i))
allTranspositions.append(thisTransposition)
self.allTranspositions = allTranspositions
return allTranspositions
def listNormalOrders(self):
'''
List the normal orders for all 12 transpositions
>>> pList = [pitch.Pitch('C4'), pitch.Pitch('E4'), pitch.Pitch('G#4')]
>>> tc = analysis.transposition.TranspositionChecker(pList)
>>> tc.listNormalOrders()
[[0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11],
[0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11],
[0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11]]
'''
if self.allTranspositions is None:
self.getTranspositions()
allTranspositions = self.allTranspositions
allNormalOrders = []
for thisTransposition in allTranspositions:
# pass
c = chord.Chord(thisTransposition)
thisNormalOrder = c.normalOrder
allNormalOrders.append(thisNormalOrder)
self.allNormalOrders = allNormalOrders
return allNormalOrders
def listDistinctNormalOrders(self):
'''
List the distinct normal orders (without duplication).
>>> pList = [pitch.Pitch('C4'), pitch.Pitch('E4'), pitch.Pitch('G#4')]
>>> tc = analysis.transposition.TranspositionChecker(pList)
>>> tc.listDistinctNormalOrders()
[[0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11]]
'''
if self.allNormalOrders is None:
self.listNormalOrders()
allNormalOrders = self.allNormalOrders
seen = set()
distinctNormalOrders = [x for x in allNormalOrders
if not (tuple(x) in seen or seen.add(tuple(x)))]
self.distinctNormalOrders = distinctNormalOrders
return distinctNormalOrders
def numDistinctTranspositions(self):
'''
Gives the number of distinct transpositions (normal orders).
>>> pList = [pitch.Pitch('C4'), pitch.Pitch('E4'), pitch.Pitch('G#4')]
>>> tc = analysis.transposition.TranspositionChecker(pList)
>>> tc.numDistinctTranspositions()
4
'''
if self.distinctNormalOrders is None:
self.listDistinctNormalOrders()
return len(self.distinctNormalOrders)
def getChordsOfDistinctTranspositions(self):
'''
Outputs chords for each distinct transposition (normal order).
>>> pList = [pitch.Pitch('C4'), pitch.Pitch('E4'), pitch.Pitch('G#4')]
>>> tc = analysis.transposition.TranspositionChecker(pList)
>>> tc.getChordsOfDistinctTranspositions()
[<music21.chord.Chord C E G#>,
<music21.chord.Chord C# F A>,
<music21.chord.Chord D F# A#>,
<music21.chord.Chord E- G B>]
'''
if self.distinctNormalOrders is None:
self.listDistinctNormalOrders()
distinctNormalOrders = self.distinctNormalOrders
allNormalOrderChords = []
for thisNormalOrder in distinctNormalOrders:
thisNormalOrderChord = chord.Chord(thisNormalOrder)
allNormalOrderChords.append(thisNormalOrderChord)
return allNormalOrderChords
def getPitchesOfDistinctTranspositions(self):
'''
Outputs pitch tuples for each distinct transposition (normal order).
>>> pList = [pitch.Pitch('C4'), pitch.Pitch('E4'), pitch.Pitch('G#4')]
>>> tc = analysis.transposition.TranspositionChecker(pList)
>>> tc.getPitchesOfDistinctTranspositions()
[(<music21.pitch.Pitch C>, <music21.pitch.Pitch E>, <music21.pitch.Pitch G#>),
(<music21.pitch.Pitch C#>, <music21.pitch.Pitch F>, <music21.pitch.Pitch A>),
(<music21.pitch.Pitch D>, <music21.pitch.Pitch F#>, <music21.pitch.Pitch A#>),
(<music21.pitch.Pitch E->, <music21.pitch.Pitch G>, <music21.pitch.Pitch B>)]
'''
chords = self.getChordsOfDistinctTranspositions()
allNormalOrderPitchTuples = [c.pitches for c in chords]
return allNormalOrderPitchTuples
# ------------------------------------------------------------------------------
class Test(unittest.TestCase):
def testConstructTranspositionChecker(self):
p = [pitch.Pitch('D#')]
tc = TranspositionChecker(p)
self.assertEqual(tc.pitches, p)
numberOfPitchesInTc = len(tc.pitches)
self.assertEqual(numberOfPitchesInTc, len(p))
def testTranspositions(self):
p = [pitch.Pitch('D#')]
tc = TranspositionChecker(p)
allTranspositions = tc.getTranspositions()
self.assertEqual(len(allTranspositions), 12)
self.assertIsInstance(allTranspositions[0][0], pitch.Pitch)
self.assertEqual(allTranspositions[0][0].midi, p[0].midi)
self.assertEqual(allTranspositions[1][0].midi, p[0].midi + 1)
p = [pitch.Pitch('D#'), pitch.Pitch('F')]
tc = TranspositionChecker(p)
allTranspositions = tc.getTranspositions()
self.assertEqual(len(allTranspositions), 12)
self.assertIsInstance(allTranspositions[0][0], pitch.Pitch)
self.assertEqual(allTranspositions[0][0].midi, p[0].midi)
self.assertEqual(allTranspositions[0][1].midi, p[1].midi)
def testNormalOrders(self):
pList = [pitch.Pitch('C4'), pitch.Pitch('E4'), pitch.Pitch('G#4')]
tc = TranspositionChecker(pList)
normalOrders = tc.listNormalOrders()
self.assertEqual(len(normalOrders), 12)
self.assertLess(normalOrders[0][0], 13)
def testDistinctNormalOrders(self):
pList = [pitch.Pitch('C4'), pitch.Pitch('E4'), pitch.Pitch('G#4')]
tc = TranspositionChecker(pList)
allDistinctNormalOrders = tc.listDistinctNormalOrders()
lengthDistinctNormalOrders = tc.numDistinctTranspositions()
self.assertEqual(len(allDistinctNormalOrders), 4)
self.assertEqual(lengthDistinctNormalOrders, 4)
self.assertIsInstance(allDistinctNormalOrders, list)
self.assertEqual(allDistinctNormalOrders[0], [0, 4, 8])
def testNormalOrderChords(self):
pList = [pitch.Pitch('C4'), pitch.Pitch('E4'), pitch.Pitch('G#4')]
tc = TranspositionChecker(pList)
allNormalOrderChords = tc.getChordsOfDistinctTranspositions()
self.assertEqual(len(allNormalOrderChords), 4)
# self.assertEqual(lengthDistinctNormalOrders, 4)
self.assertIsInstance(allNormalOrderChords[0], chord.Chord)
self.assertIsInstance(allNormalOrderChords[0].pitches[0], pitch.Pitch)
# self.assertEqual(allDistinctNormalOrders[0], [0,4,8])
def testNormalOrdersPitches(self):
pList = [pitch.Pitch('C4'), pitch.Pitch('E4'), pitch.Pitch('G#4')]
tc = TranspositionChecker(pList)
allNormalOrderPitchTuples = tc.getPitchesOfDistinctTranspositions()
self.assertEqual(len(allNormalOrderPitchTuples), 4)
# self.assertEqual(lengthDistinctNormalOrders, 4)
self.assertIsInstance(allNormalOrderPitchTuples[0], tuple)
self.assertIsInstance(allNormalOrderPitchTuples[0][0], pitch.Pitch)
# self.assertEqual(allDistinctNormalOrders[0], [0,4,8])
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import music21
music21.mainTest(Test)
| 40.055351 | 87 | 0.608752 |
3f7a21e37b92ab35fff4ab4d1e4e06fbe288a7b1 | 4,084 | py | Python | mnist_cifar10/utils.py | tanimutomo/evaluating-logit-pairing-methods | 5473c66cfc3c516a5d7ea99987d5a60074a3857a | [
"Apache-2.0"
] | 22 | 2018-11-30T13:30:33.000Z | 2021-08-17T20:59:01.000Z | mnist_cifar10/utils.py | tanimutomo/evaluating-logit-pairing-methods | 5473c66cfc3c516a5d7ea99987d5a60074a3857a | [
"Apache-2.0"
] | 2 | 2019-11-07T10:16:01.000Z | 2020-11-21T08:44:29.000Z | mnist_cifar10/utils.py | tanimutomo/evaluating-logit-pairing-methods | 5473c66cfc3c516a5d7ea99987d5a60074a3857a | [
"Apache-2.0"
] | 4 | 2019-03-09T17:12:12.000Z | 2021-10-04T12:58:44.000Z | import tensorflow as tf
# -------------------------------------------------------------
# Helpers
# -------------------------------------------------------------
def load_mnist(batch_size, data_dir, augmentation=False, stddev=0.0, adv_subset=1000, workers=4):
from data_loader import get_mnist
trainloader, _, classes = get_mnist(batch_size=batch_size,
train=True,
path=data_dir,
augmentation=augmentation,
std=stddev,
shuffle=True,
workers=workers
)
testloader, _, _ = get_mnist(batch_size=batch_size,
train=False,
path=data_dir,
shuffle=False,
workers=workers
)
adv_testloader, _, _ = get_mnist(batch_size=batch_size,
train=False,
path=data_dir,
shuffle=False,
adversarial=True,
subset=adv_subset,
workers=workers
)
input_shape = (None, 28, 28, 1)
return trainloader, testloader, adv_testloader, input_shape, len(classes)
def load_cifar10(batch_size, data_dir, augmentation=False, stddev=0.0, adv_subset=1000, workers=4):
from data_loader import get_cifar10
trainloader, _, classes = get_cifar10(batch_size=batch_size,
train=True,
path=data_dir,
augmentation=augmentation,
std=stddev,
shuffle=True,
workers=workers
)
testloader, _, _ = get_cifar10(batch_size=batch_size,
train=False,
path=data_dir,
shuffle=False,
workers=workers
)
adv_testloader, _, _ = get_cifar10(batch_size=batch_size,
train=False,
path=data_dir,
shuffle=False,
adversarial=True,
subset=adv_subset,
workers=workers
)
input_shape = (None, 32, 32, 3)
return trainloader, testloader, adv_testloader, input_shape, len(classes)
def variable_summaries(var, name=None, collections=['training'], histo=True):
with tf.device('/gpu:0'):
if name is None:
name = var.op.name
var_shape = var.get_shape().as_list()
var_dim = 1.0
for dim in var_shape[1:]:
var_dim *= dim
with tf.name_scope('Compute-Mean'):
mean = tf.reduce_mean(var)
with tf.name_scope('Compute-Stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
with tf.name_scope('Compute-Max'):
max = tf.reduce_max(var)
with tf.name_scope('Compute-Min'):
min = tf.reduce_min(var)
# Write summaries
tf.summary.scalar(name + '/mean', mean, collections=collections)
tf.summary.scalar(name + '/stddev', stddev, collections=collections)
tf.summary.scalar(name + '/max', max, collections=collections)
tf.summary.scalar(name + '/min', min, collections=collections)
if histo:
tf.summary.histogram(name, tf.identity(var), collections=collections)
| 39.650485 | 99 | 0.426298 |
32440d5c6671ceb08fa060d66a1f77aac954de80 | 721 | py | Python | jhubflask/utils.py | devMoxie/jhubflask | 8cbda26d4a0dbb93165d433b726ac9dd4459e0a2 | [
"BSD-3-Clause"
] | null | null | null | jhubflask/utils.py | devMoxie/jhubflask | 8cbda26d4a0dbb93165d433b726ac9dd4459e0a2 | [
"BSD-3-Clause"
] | null | null | null | jhubflask/utils.py | devMoxie/jhubflask | 8cbda26d4a0dbb93165d433b726ac9dd4459e0a2 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Helper utilities and decorators."""
from flask import flash, current_app
from flask_login import current_user
from functools import wraps
def flash_errors(form, category='warning'):
"""Flash all errors for a form."""
for field, errors in form.errors.items():
for error in errors:
flash('{0} - {1}'.format(getattr(form, field).label.text, error), category)
def admin_required(func):
@wraps(func)
def decorated_view(*args, **kwargs):
if not current_user.is_admin:
flash("This page is for admins only!", "danger")
return current_app.login_manager.unauthorized()
return func(*args, **kwargs)
return decorated_view
| 31.347826 | 87 | 0.662968 |
050c846dd14fbee596e9275657acc9705b8992d0 | 1,126 | py | Python | plans/fixed_ensemble_vggish_linear_2.py | dbis-uibk/MediaEval2021 | 14d754d9cea36415090aaa115db81f5ace465964 | [
"BSD-2-Clause"
] | 1 | 2022-03-31T07:28:12.000Z | 2022-03-31T07:28:12.000Z | plans/fixed_ensemble_vggish_linear_2.py | dbis-uibk/MediaEval2021 | 14d754d9cea36415090aaa115db81f5ace465964 | [
"BSD-2-Clause"
] | null | null | null | plans/fixed_ensemble_vggish_linear_2.py | dbis-uibk/MediaEval2021 | 14d754d9cea36415090aaa115db81f5ace465964 | [
"BSD-2-Clause"
] | null | null | null | """Ensemble plan manually split by type moode/theme."""
import json
from dbispipeline.evaluators import FixedSplitEvaluator
from dbispipeline.evaluators import ModelCallbackWrapper
from sklearn.pipeline import Pipeline
import numpy as np
from mediaeval2021 import common
from mediaeval2021.dataloaders.melspectrograms import MelSpectPickleLoader
from mediaeval2021.dataloaders.melspectrograms import labels_to_indices
from mediaeval2021.models.ensemble import Ensemble
from mediaeval2021.models.wrapper import TorchWrapper
dataloader = MelSpectPickleLoader('data/mediaeval2020/melspect_1366.pickle')
label_splits = [
np.arange(0, 28, 1),
np.arange(28, 56, 1),
]
pipeline = Pipeline([
('model',
Ensemble(
base_estimator=TorchWrapper(model_name="CNN", dataloader=dataloader, batch_size=64),
label_splits=label_splits,
epochs=100,
)),
])
evaluator = ModelCallbackWrapper(
FixedSplitEvaluator(**common.fixed_split_params()),
lambda model: common.store_prediction(model, dataloader),
)
result_handlers = [
lambda results: print(json.dumps(results, indent=4)),
]
| 28.15 | 93 | 0.773535 |
05a74ac994ffe3d0c7b30f466bfbff93ab348305 | 4,840 | py | Python | flex/core.py | ivlevdenis/flex | 621a5de27d3d6f97b79378be11042883f2c8994c | [
"MIT"
] | 1 | 2018-11-19T02:38:46.000Z | 2018-11-19T02:38:46.000Z | flex/core.py | javabrett/flex | 55822c2ffaa17dfa5e403d7feed898cb9bc983bb | [
"MIT"
] | null | null | null | flex/core.py | javabrett/flex | 55822c2ffaa17dfa5e403d7feed898cb9bc983bb | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from six.moves import urllib_parse as urlparse
import os
import collections
import requests
from copy import deepcopy
import six
import json
import yaml
from flex.context_managers import ErrorDict
from flex.exceptions import ValidationError
from flex.loading.definitions import (
definitions_validator,
)
from flex.loading.schema import (
swagger_schema_validator,
)
from flex.loading.schema.paths.path_item.operation.responses.single.schema import (
schema_validator,
)
from flex.http import (
normalize_request,
normalize_response,
)
from flex.validation.common import validate_object
from flex.validation.request import validate_request
from flex.validation.response import validate_response
def load_source(source):
"""
Common entry point for loading some form of raw swagger schema.
Supports:
- python object (dictionary-like)
- path to yaml file
- path to json file
- file object (json or yaml).
- json string.
- yaml string.
"""
if isinstance(source, collections.Mapping):
return deepcopy(source)
elif hasattr(source, 'read') and callable(source.read):
raw_source = source.read()
elif os.path.exists(os.path.expanduser(str(source))):
with open(os.path.expanduser(str(source)), 'r') as source_file:
raw_source = source_file.read()
elif isinstance(source, six.string_types):
parts = urlparse.urlparse(source)
if parts.scheme and parts.netloc:
response = requests.get(source)
if isinstance(response.content, six.binary_type):
raw_source = six.text_type(response.content, encoding='utf-8')
else:
raw_source = response.content
else:
raw_source = source
try:
try:
return json.loads(raw_source)
except ValueError:
pass
try:
return yaml.safe_load(raw_source)
except (yaml.scanner.ScannerError, yaml.parser.ParserError):
pass
except NameError:
pass
raise ValueError(
"Unable to parse `{0}`. Tried yaml and json.".format(source),
)
def parse(raw_schema):
context = {
'deferred_references': set(),
}
swagger_definitions = definitions_validator(raw_schema, context=context)
swagger_schema = swagger_schema_validator(
raw_schema,
context=swagger_definitions,
)
return swagger_schema
def load(target):
"""
Given one of the supported target formats, load a swagger schema into it's
python representation.
"""
raw_schema = load_source(target)
return parse(raw_schema)
def validate(raw_schema, target=None, **kwargs):
"""
Given the python representation of a JSONschema as defined in the swagger
spec, validate that the schema complies to spec. If `target` is provided,
that target will be validated against the provided schema.
"""
schema = schema_validator(raw_schema, **kwargs)
if target is not None:
validate_object(target, schema=schema, **kwargs)
def validate_api_request(schema, raw_request):
request = normalize_request(raw_request)
with ErrorDict():
validate_request(request=request, schema=schema)
def validate_api_response(schema, raw_response, request_method='get', raw_request=None):
"""
Validate the response of an api call against a swagger schema.
"""
request = None
if raw_request is not None:
request = normalize_request(raw_request)
response = None
if raw_response is not None:
response = normalize_response(raw_response, request=request)
if response is not None:
validate_response(
response=response,
request_method=request_method,
schema=schema
)
def validate_api_call(schema, raw_request, raw_response):
"""
Validate the request/response cycle of an api call against a swagger
schema. Request/Response objects from the `requests` and `urllib` library
are supported.
"""
request = normalize_request(raw_request)
with ErrorDict() as errors:
try:
validate_request(
request=request,
schema=schema,
)
except ValidationError as err:
errors['request'].add_error(err.messages or getattr(err, 'detail'))
return
response = normalize_response(raw_response, raw_request)
try:
validate_response(
response=response,
request_method=request.method,
schema=schema
)
except ValidationError as err:
errors['response'].add_error(err.messages or getattr(err, 'detail'))
| 28.639053 | 88 | 0.663223 |
732b98527159ce10198daab7de9f2820b7faf35e | 4,641 | py | Python | pytation_examples/simple.py | jetperch/pytation | 55f2725eb20ba4fc0ea748884670763da203c698 | [
"Apache-2.0"
] | 1 | 2021-10-09T17:42:09.000Z | 2021-10-09T17:42:09.000Z | pytation_examples/simple.py | jetperch/pytation | 55f2725eb20ba4fc0ea748884670763da203c698 | [
"Apache-2.0"
] | null | null | null | pytation_examples/simple.py | jetperch/pytation | 55f2725eb20ba4fc0ea748884670763da203c698 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import time
from pytation import Context
MYPATH = os.path.dirname(os.path.abspath(__file__))
class Eq1:
NAME = 'Equipment Example 1'
def __init__(self):
self._log = logging.getLogger(__name__ + '.Eq1')
def setup(self, context: Context, config=None):
self._log.info('setup')
def restore(self):
self._log.info('restore')
def teardown(self):
self._log.info('teardown')
class Dut1:
NAME = 'Device under Test Example 1'
def __init__(self):
self._log = logging.getLogger(__name__ + '.Dut1')
def setup(self, context: Context, config=None):
self._log.info('setup')
def restore(self):
self._log.info('restore')
def teardown(self):
self._log.info('teardown')
def suite_setup(context, config=None):
context.state = 'wait_for_dut'
context.wait_for_user()
context.state = 'in_progress'
def suite_teardown(context, config=None):
if context.result == 0:
context.state = 'pass'
else:
context.state = 'fail'
context.wait_for_user()
def enter_serial_number(context, config=None):
serial_number = context.prompt('Enter serial number')
context.env['name'] = serial_number
context.env['serial_number'] = serial_number
return 0, {'serial_number': serial_number}
def test1(context, config=None):
config = {} if config is None else config
_log = logging.getLogger(__name__ + '.test1')
mode = config.get('mode', 'normal')
delay = float(config.get('delay', 0.01))
count = int(config.get('count', 50))
_log.info('start: mode=%s', mode)
for i in range(count):
time.sleep(delay)
context.progress((i + 1) / count)
_log.info('stop')
return 0, {'hello': 'world'}
STATION = {
'name': 'simple',
'full_name': 'Simple test station example',
'env': {},
'suite_setup': {'fn': suite_setup, 'config': {}},
'suite_teardown': {'fn': suite_teardown, 'config': {}},
'states': {
'initialize': {
'pixmap': ':/station/initialize.jpg',
'style': 'QLabel { background-color : yellow; color : black; font-size : 12pt; }',
'html': '<html><body><h1>Connect Equipment</h1></body></html>',
},
'wait_for_dut': {
'pixmap': ':/station/wait_for_dut.jpg',
'style': 'QLabel { background-color : #8080ff; color : black; font-size : 12pt; }',
'html': '<html><body><h1>Connect Device Under Test and press any key</h1></body></html>',
},
'in_progress': {
'pixmap': ':/station/in_progress.jpg',
'style': 'QLabel { background-color : white; color : black; font-size : 12pt; }',
'html': '<html><body><h1>Test in Progress</h1></body></html>',
},
'pass': {
'pixmap': ':/station/pass.jpg',
'style': 'QLabel { background-color : green; color : black; font-size : 12pt; }',
'html': '<html><body><h1>PASS</h1><p>Disconnect the device and press any key.</p></body></html>',
},
'fail': {
'pixmap': ':/station/fail.jpg',
'style': 'QLabel { background-color : red; color : black; font-size : 12pt; }',
'html': '<html><body><h1>FAILED</h1><p>Disconnect the device and press any key.</p></body></html>',
},
'abort': {
'pixmap': ':/station/abort.jpg',
'style': 'QLabel { background-color : red; color : black; font-size : 12pt; }',
'html': '<html><body><h1>ABORT</h1><p>Internal error - close & restart</p></body></html>',
},
},
'tests': [
{'fn': enter_serial_number},
{'fn': test1},
{'name': 'long_iter', 'fn': test1, 'config': {'delay': 0.5, 'count': 4}},
],
'devices': [
{'name': 'eq1', 'clz': Eq1},
{'name': 'dut', 'clz': Dut1, 'lifecycle': 'suite', 'config': {'mode': 'test'}},
],
'gui_resources': [['pytation_examples', 'pytation_examples.rcc']] # list of [package, resource]
}
| 33.388489 | 111 | 0.591898 |
711b3e1d279e0667f1cf3696574b37045145a9dd | 189 | py | Python | Ch05/test79.py | pearpai/MachineLearnAction | d23def83e50bf53b7cfcdb52d362d88eb52d9115 | [
"Apache-2.0"
] | null | null | null | Ch05/test79.py | pearpai/MachineLearnAction | d23def83e50bf53b7cfcdb52d362d88eb52d9115 | [
"Apache-2.0"
] | null | null | null | Ch05/test79.py | pearpai/MachineLearnAction | d23def83e50bf53b7cfcdb52d362d88eb52d9115 | [
"Apache-2.0"
] | null | null | null | import logRegres
if __name__ == '__main__':
dataArr, labelMat = logRegres.loadDataSet()
weights = logRegres.gradAscent(dataArr, labelMat)
logRegres.plotBestFit(weights.getA())
| 27 | 53 | 0.740741 |
1cfa86ffbc58e8963b37507b7a40065a74436079 | 15,894 | py | Python | optuna/samplers/cmaes.py | uskfujino/optuna | 15a1878f80b3dc6f064d8d654509154004f9f46f | [
"MIT"
] | null | null | null | optuna/samplers/cmaes.py | uskfujino/optuna | 15a1878f80b3dc6f064d8d654509154004f9f46f | [
"MIT"
] | null | null | null | optuna/samplers/cmaes.py | uskfujino/optuna | 15a1878f80b3dc6f064d8d654509154004f9f46f | [
"MIT"
] | null | null | null | import math
import pickle
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from cmaes import CMA
import numpy as np
import optuna
from optuna.distributions import BaseDistribution
from optuna.samplers import BaseSampler
from optuna.trial import FrozenTrial
from optuna.trial import TrialState
# Minimum value of sigma0 to avoid ZeroDivisionError.
_MIN_SIGMA0 = 1e-10
class CmaEsSampler(BaseSampler):
"""A Sampler using CMA-ES algorithm.
Example:
Optimize a simple quadratic function by using :class:`~optuna.samplers.CmaEsSampler`.
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_uniform('x', -1, 1)
y = trial.suggest_int('y', -1, 1)
return x ** 2 + y
sampler = optuna.samplers.CmaEsSampler()
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=20)
Please note that this sampler does not support CategoricalDistribution.
If your search space contains categorical parameters, I recommend you
to use :class:`~optuna.samplers.TPESampler` instead.
Furthermore, there is room for performance improvements in parallel
optimization settings. This sampler cannot use some trials for updating
the parameters of multivariate normal distribution.
.. seealso::
You can also use :class:`optuna.integration.CmaEsSampler` which is a sampler using cma
library as the backend.
Args:
x0:
A dictionary of an initial parameter values for CMA-ES. By default, the mean of ``low``
and ``high`` for each distribution is used.
sigma0:
Initial standard deviation of CMA-ES. By default, ``sigma0`` is set to
``min_range / 6``, where ``min_range`` denotes the minimum range of the distributions
in the search space.
seed:
A random seed for CMA-ES.
n_startup_trials:
The independent sampling is used instead of the CMA-ES algorithm until the given number
of trials finish in the same study.
independent_sampler:
A :class:`~optuna.samplers.BaseSampler` instance that is used for independent
sampling. The parameters not contained in the relative search space are sampled
by this sampler.
The search space for :class:`~optuna.samplers.CmaEsSampler` is determined by
:func:`~optuna.samplers.intersection_search_space()`.
If :obj:`None` is specified, :class:`~optuna.samplers.RandomSampler` is used
as the default.
.. seealso::
:class:`optuna.samplers` module provides built-in independent samplers
such as :class:`~optuna.samplers.RandomSampler` and
:class:`~optuna.samplers.TPESampler`.
warn_independent_sampling:
If this is :obj:`True`, a warning message is emitted when
the value of a parameter is sampled by using an independent sampler.
Note that the parameters of the first trial in a study are always sampled
via an independent sampler, so no warning messages are emitted in this case.
"""
def __init__(
self,
x0: Optional[Dict[str, Any]] = None,
sigma0: Optional[float] = None,
n_startup_trials: int = 1,
independent_sampler: Optional[BaseSampler] = None,
warn_independent_sampling: bool = True,
seed: Optional[int] = None,
) -> None:
self._x0 = x0
self._sigma0 = sigma0
self._independent_sampler = independent_sampler or optuna.samplers.RandomSampler(seed=seed)
self._n_startup_trials = n_startup_trials
self._warn_independent_sampling = warn_independent_sampling
self._logger = optuna.logging.get_logger(__name__)
self._cma_rng = np.random.RandomState(seed)
self._search_space = optuna.samplers.IntersectionSearchSpace()
def reseed_rng(self) -> None:
# _cma_rng doesn't require reseeding because the relative sampling reseeds in each trial.
self._independent_sampler.reseed_rng()
def infer_relative_search_space(
self, study: "optuna.Study", trial: "optuna.trial.FrozenTrial",
) -> Dict[str, BaseDistribution]:
search_space = {} # type: Dict[str, BaseDistribution]
for name, distribution in self._search_space.calculate(study).items():
if distribution.single():
# `cma` cannot handle distributions that contain just a single value, so we skip
# them. Note that the parameter values for such distributions are sampled in
# `Trial`.
continue
if not isinstance(
distribution,
(
optuna.distributions.UniformDistribution,
optuna.distributions.LogUniformDistribution,
optuna.distributions.DiscreteUniformDistribution,
optuna.distributions.IntUniformDistribution,
optuna.distributions.IntLogUniformDistribution,
),
):
# Categorical distribution is unsupported.
continue
search_space[name] = distribution
return search_space
def sample_relative(
self,
study: "optuna.Study",
trial: "optuna.trial.FrozenTrial",
search_space: Dict[str, BaseDistribution],
) -> Dict[str, Any]:
if len(search_space) == 0:
return {}
completed_trials = [
t
for t in study._storage.get_all_trials(study._study_id, deepcopy=False)
if t.state == TrialState.COMPLETE
]
if len(completed_trials) < self._n_startup_trials:
return {}
if len(search_space) == 1:
self._logger.info(
"`CmaEsSampler` only supports two or more dimensional continuous "
"search space. `{}` is used instead of `CmaEsSampler`.".format(
self._independent_sampler.__class__.__name__
)
)
self._warn_independent_sampling = False
return {}
# TODO(c-bata): Remove `ordered_keys` by passing `ordered_dict=True`
# to `intersection_search_space`.
ordered_keys = [key for key in search_space]
ordered_keys.sort()
optimizer = self._restore_or_init_optimizer(completed_trials, search_space, ordered_keys)
if optimizer.dim != len(ordered_keys):
self._logger.info(
"`CmaEsSampler` does not support dynamic search space. "
"`{}` is used instead of `CmaEsSampler`.".format(
self._independent_sampler.__class__.__name__
)
)
self._warn_independent_sampling = False
return {}
# TODO(c-bata): Reduce the number of wasted trials during parallel optimization.
# See https://github.com/optuna/optuna/pull/920#discussion_r385114002 for details.
solution_trials = [
t
for t in completed_trials
if optimizer.generation == t.system_attrs.get("cma:generation", -1)
]
if len(solution_trials) >= optimizer.population_size:
solutions = [] # type: List[Tuple[np.ndarray, float]]
for t in solution_trials[: optimizer.population_size]:
assert t.value is not None, "completed trials must have a value"
x = np.array(
[_to_cma_param(search_space[k], t.params[k]) for k in ordered_keys],
dtype=float,
)
solutions.append((x, t.value))
optimizer.tell(solutions)
optimizer_str = pickle.dumps(optimizer).hex()
study._storage.set_trial_system_attr(trial._trial_id, "cma:optimizer", optimizer_str)
# Caution: optimizer should update its seed value
seed = self._cma_rng.randint(1, 2 ** 16) + trial.number
optimizer._rng = np.random.RandomState(seed)
params = optimizer.ask()
study._storage.set_trial_system_attr(
trial._trial_id, "cma:generation", optimizer.generation
)
external_values = {
k: _to_optuna_param(search_space[k], p) for k, p in zip(ordered_keys, params)
}
return external_values
def _restore_or_init_optimizer(
self,
completed_trials: "List[optuna.trial.FrozenTrial]",
search_space: Dict[str, BaseDistribution],
ordered_keys: List[str],
) -> CMA:
# Restore a previous CMA object.
for trial in reversed(completed_trials):
serialized_optimizer = trial.system_attrs.get(
"cma:optimizer", None
) # type: Optional[str]
if serialized_optimizer is None:
continue
return pickle.loads(bytes.fromhex(serialized_optimizer))
# Init a CMA object.
if self._x0 is None:
self._x0 = _initialize_x0(search_space)
if self._sigma0 is None:
sigma0 = _initialize_sigma0(search_space)
else:
sigma0 = self._sigma0
sigma0 = max(sigma0, _MIN_SIGMA0)
mean = np.array([self._x0[k] for k in ordered_keys], dtype=float)
bounds = _get_search_space_bound(ordered_keys, search_space)
n_dimension = len(ordered_keys)
return CMA(
mean=mean,
sigma=sigma0,
bounds=bounds,
seed=self._cma_rng.randint(1, 2 ** 31 - 2),
n_max_resampling=10 * n_dimension,
)
def sample_independent(
self,
study: "optuna.Study",
trial: "optuna.trial.FrozenTrial",
param_name: str,
param_distribution: BaseDistribution,
) -> Any:
if self._warn_independent_sampling:
complete_trials = [t for t in study.trials if t.state == TrialState.COMPLETE]
if len(complete_trials) >= self._n_startup_trials:
self._log_independent_sampling(trial, param_name)
return self._independent_sampler.sample_independent(
study, trial, param_name, param_distribution
)
def _log_independent_sampling(self, trial: FrozenTrial, param_name: str) -> None:
self._logger.warning(
"The parameter '{}' in trial#{} is sampled independently "
"by using `{}` instead of `CmaEsSampler` "
"(optimization performance may be degraded). "
"You can suppress this warning by setting `warn_independent_sampling` "
"to `False` in the constructor of `CmaEsSampler`, "
"if this independent sampling is intended behavior.".format(
param_name, trial.number, self._independent_sampler.__class__.__name__
)
)
def _to_cma_param(distribution: BaseDistribution, optuna_param: Any) -> float:
if isinstance(distribution, optuna.distributions.LogUniformDistribution):
return math.log(optuna_param)
if isinstance(distribution, optuna.distributions.IntUniformDistribution):
return float(optuna_param)
if isinstance(distribution, optuna.distributions.IntLogUniformDistribution):
return math.log(optuna_param)
return optuna_param
def _to_optuna_param(distribution: BaseDistribution, cma_param: float) -> Any:
if isinstance(distribution, optuna.distributions.LogUniformDistribution):
return math.exp(cma_param)
if isinstance(distribution, optuna.distributions.DiscreteUniformDistribution):
v = np.round(cma_param / distribution.q) * distribution.q + distribution.low
# v may slightly exceed range due to round-off errors.
return float(min(max(v, distribution.low), distribution.high))
if isinstance(distribution, optuna.distributions.IntUniformDistribution):
r = np.round((cma_param - distribution.low) / distribution.step)
v = r * distribution.step + distribution.low
return int(v)
if isinstance(distribution, optuna.distributions.IntLogUniformDistribution):
r = np.round((cma_param - math.log(distribution.low)) / math.log(distribution.step))
v = r * math.log(distribution.step) + math.log(distribution.low)
return int(math.exp(v))
return cma_param
def _initialize_x0(search_space: Dict[str, BaseDistribution]) -> Dict[str, np.ndarray]:
x0 = {}
for name, distribution in search_space.items():
if isinstance(distribution, optuna.distributions.UniformDistribution):
x0[name] = np.mean([distribution.high, distribution.low])
elif isinstance(distribution, optuna.distributions.DiscreteUniformDistribution):
x0[name] = np.mean([distribution.high, distribution.low])
elif isinstance(distribution, optuna.distributions.IntUniformDistribution):
x0[name] = int(np.mean([distribution.high, distribution.low]))
elif isinstance(distribution, optuna.distributions.IntLogUniformDistribution):
log_high = math.log(distribution.high)
log_low = math.log(distribution.low)
x0[name] = np.mean([log_high, log_low])
elif isinstance(distribution, optuna.distributions.LogUniformDistribution):
log_high = math.log(distribution.high)
log_low = math.log(distribution.low)
x0[name] = math.exp(np.mean([log_high, log_low]))
else:
raise NotImplementedError(
"The distribution {} is not implemented.".format(distribution)
)
return x0
def _initialize_sigma0(search_space: Dict[str, BaseDistribution]) -> float:
sigma0 = []
for name, distribution in search_space.items():
if isinstance(distribution, optuna.distributions.UniformDistribution):
sigma0.append((distribution.high - distribution.low) / 6)
elif isinstance(distribution, optuna.distributions.DiscreteUniformDistribution):
sigma0.append((distribution.high - distribution.low) / 6)
elif isinstance(distribution, optuna.distributions.IntUniformDistribution):
sigma0.append((distribution.high - distribution.low) / 6)
elif isinstance(distribution, optuna.distributions.IntLogUniformDistribution):
log_high = math.log(distribution.high)
log_low = math.log(distribution.low)
sigma0.append((log_high - log_low) / 6)
elif isinstance(distribution, optuna.distributions.LogUniformDistribution):
log_high = math.log(distribution.high)
log_low = math.log(distribution.low)
sigma0.append((log_high - log_low) / 6)
else:
raise NotImplementedError(
"The distribution {} is not implemented.".format(distribution)
)
return min(sigma0)
def _get_search_space_bound(
keys: List[str], search_space: Dict[str, BaseDistribution],
) -> np.ndarray:
bounds = []
for param_name in keys:
dist = search_space[param_name]
if isinstance(
dist,
(
optuna.distributions.UniformDistribution,
optuna.distributions.LogUniformDistribution,
optuna.distributions.DiscreteUniformDistribution,
optuna.distributions.IntUniformDistribution,
optuna.distributions.IntLogUniformDistribution,
),
):
bounds.append([_to_cma_param(dist, dist.low), _to_cma_param(dist, dist.high)])
else:
raise NotImplementedError("The distribution {} is not implemented.".format(dist))
return np.array(bounds, dtype=float)
| 40.545918 | 99 | 0.640242 |
188914f3b5050130055a90852086d8d1e0d9a7d8 | 894 | py | Python | ex020.py | WesleyOlliver/CursoPython | 8decdc4f38c25429994c0f9cb8f206e167f161d6 | [
"MIT"
] | null | null | null | ex020.py | WesleyOlliver/CursoPython | 8decdc4f38c25429994c0f9cb8f206e167f161d6 | [
"MIT"
] | null | null | null | ex020.py | WesleyOlliver/CursoPython | 8decdc4f38c25429994c0f9cb8f206e167f161d6 | [
"MIT"
] | null | null | null | from random import shuffle
cor = {'traço': '\033[35m', 'ex': '\033[4;31m', 'título': '\033[1;34m', 'nom': '\033[1;32m', 'reset': '\033[m'}
print('{}-=-{}'.format(cor['traço'], cor['reset'])*18, '{} Exercício 020 {}'.format(cor['ex'], cor['reset']),
'{}-=-{}'.format(cor['traço'], cor['reset'])*18)
print('{}O mesmo professor do exercício 019 quer sortear a ordem de apresentação de trabalhos dos alunos. Faça um'
'programa que leia o \nnome dos quatros alunos e mostre a ordem sorteada.{}'.format(cor['título'], cor['reset']))
print('{}-=-{}'.format(cor['traço'], cor['reset'])*42)
al1 = input('Informe o primeiro aluno: ')
al2 = input('Informe o segundo aluno: ')
al3 = input('Informe o terceiro aluno: ')
al4 = input('Informe o quarto aluno: ')
lista = [al1, al2, al3, al4]
shuffle(lista)
print('A ordem de apresentação será a seguinte: ')
print(cor['nom'], lista, cor['reset'])
| 55.875 | 119 | 0.634228 |
7d2c0025ff7c069fd993d94112ce9a056fa6f500 | 37,729 | py | Python | src/cogent3/core/location.py | Phuong-Le/cogent3 | 3a2baaa77ad8a71781b8f79c04147b9693f7ebb2 | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/core/location.py | Phuong-Le/cogent3 | 3a2baaa77ad8a71781b8f79c04147b9693f7ebb2 | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/core/location.py | Phuong-Le/cogent3 | 3a2baaa77ad8a71781b8f79c04147b9693f7ebb2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""Alignments and Sequences are _Annotatables
_Annotatables hold a list of Maps.
Maps can be Features, Variables or AlignedSequences.
Maps have a list of Spans.
Also provides Range and Point classes for dealing with parts of sequences.
Span is a region with a start, an end, and a direction. Range is an ordered
collection of Spans (note: Range does _not_ support the list interface, but
you can always access Range.spans directly). Map is like a Range but is
immutable and is able to be nested, i.e. Maps can be defined relative to
other Maps.
Implementation Notes
Span and Range behave much like Python's slices: a Span contains the element
after its start but does not contain the element after its end. It may help to
think of the Span indices occurring _between_ the list elements:
a b c d e
| | | | | |
0 1 2 3 4 5
...so that a Span whose start is its end contains no elements (e.g. 2:2), and
a Span whose end is 2 more than its start contains 2 elements (e.g. 2:4 has c
and d), etc. Similarly, Span(0,2) does _not_ overlap Span(2,3), since the
former contains a and b while the latter contains c.
A Point is a Span whose start and end refer to the same object, i.e. the same
position in the sequence. A Point occurs between elements in the sequence,
and so does not contain any elements itself.
WARNING: this differs from the way e.g. NCBI handles sequence indices, where
the sequence is 1-based, a single index is treated as containing one element,
the point 3 contains exactly one element, 3, rather than no elements, and a
range from 2:4 contains 2, 3 and 4, _not_ just 2 and 3.
"""
import copy
from bisect import bisect_left, bisect_right
from functools import total_ordering
from itertools import chain
from cogent3.util.misc import (
ClassChecker,
ConstrainedList,
FunctionWrapper,
get_object_provenance,
iterable,
)
__author__ = "Rob Knight"
__copyright__ = "Copyright 2007-2021, The Cogent Project"
__credits__ = ["Rob Knight", "Peter Maxwell", "Matthew Wakefield", "Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2021.5.7a"
__maintainer__ = "Gavin Huttley"
__email__ = "Gavin.Huttley@anu.edu.au"
__status__ = "Prototype"
strip = str.strip
def _norm_index(i, length, default):
"""For converting s[:3] to s[0:3], s[-1] to s[len(s)-1] and s[0:lots] to s[0:len(s)]"""
if i is None:
i = default
elif i < 0:
i += length
return min(max(i, 0), length)
def _norm_slice(index, length):
"""_norm_slice(slice(1, -2, 3), 10) -> (1,8,3)"""
if isinstance(index, slice):
start = _norm_index(index.start, length, 0)
end = _norm_index(index.stop, length, length)
return (start, end, index.step)
else:
start = index
if start < 0:
start += length
if start >= length:
raise IndexError(index)
return (start, start + 1, 1)
def as_map(slice, length):
"""Take anything that might be used as a subscript: Integer, Slice,
or Map, and return a Map."""
if isinstance(slice, (list, tuple)):
spans = []
for i in slice:
spans.extend(as_map(i, length).spans)
map = Map(spans=spans, parent_length=length)
elif isinstance(slice, Map):
map = slice
# TODO reasons for failure when the following is not commented out
# should be checked further
# assert map.parent_length == length, (map, length)
else:
(lo, hi, step) = _norm_slice(slice, length)
assert (step or 1) == 1
map = Map([(lo, hi)], parent_length=length)
return map
class SpanI(object):
"""Abstract interface for Span and Range objects.
Required properties: start, end (must both be numbers)
"""
__slots__ = [] # override in subclass
def __contains__(self, other):
"""Returns True if other entirely contained in self."""
raise NotImplementedError
def overlaps(self, other):
"""Returns True if any positions in self are also in other."""
raise NotImplementedError
def reverses(self):
"""Reverses self."""
raise NotImplementedError
def __iter__(self):
"""Iterates over indices contained in self."""
raise NotImplementedError
def __str__(self):
"""Returns string representation of self."""
return "(%s,%s)" % (self.start, self.end)
def __len__(self):
"""Returns length of self."""
raise NotImplementedError
def __lt__(self, other):
"""Compares indices of self with indices of other."""
raise NotImplementedError
def starts_before(self, other):
"""Returns True if self starts before other or other.start."""
try:
return self.start < other.start
except AttributeError:
return self.start < other
def starts_after(self, other):
"""Returns True if self starts after other or after other.start."""
try:
return self.start > other.start
except AttributeError:
return self.start > other
def starts_at(self, other):
"""Returns True if self starts at the same place as other."""
try:
return self.start == other.start
except AttributeError:
return self.start == other
def starts_inside(self, other):
"""Returns True if self's start in other or equal to other."""
try:
return self.start in other
except (AttributeError, TypeError): # count other as empty span
return False
def ends_before(self, other):
"""Returns True if self ends before other or other.end."""
try:
return self.end < other.end
except AttributeError:
return self.end < other
def ends_after(self, other):
"""Returns True if self ends after other or after other.end."""
try:
return self.end > other.end
except AttributeError:
return self.end > other
def ends_at(self, other):
"""Returns True if self ends at the same place as other."""
try:
return self.end == other.end
except AttributeError:
return self.end == other
def ends_inside(self, other):
"""Returns True if self's end in other or equal to other."""
try:
return self.end in other
except (AttributeError, TypeError): # count other as empty span
return False
@total_ordering
class Span(SpanI):
"""A contiguous location, not much more than (start, end)
Spans don't even know what map they are on. The only smarts the class
has is the ability to slice correctly. Spans do not expect to be
reverse-sliced (sl[5,3]) and treat positions as relative to themselves,
not an underlying sequence (eg sl[:n] == sl[0:n]), so this slicing is
very different to feature slicing.
Spans may optionaly have a value, which gets preserved when they are remapped etc."""
lost = False
__slots__ = (
"tidy_start",
"tidy_end",
"length",
"value",
"start",
"end",
"reverse",
"_serialisable",
)
def __init__(
self,
start,
end=None,
tidy_start=False,
tidy_end=False,
value=None,
reverse=False,
):
d = locals()
x = ("self", "__class__", "__slots__")
self._serialisable = {k: v for k, v in d.items() if k not in x}
self._new_init(start, end, reverse)
self.tidy_start = tidy_start
self.tidy_end = tidy_end
self.value = value
self.length = self.end - self.start
assert self.length >= 0
def _new_init(self, start, end=None, reverse=False):
"""Returns a new Span object, with start, end, and reverse properties.
If end is not supplied, it is set to start + 1 (providing a 1-element
range).
reverse defaults to False.
"""
# This should replace the current __init__ method when deprecated vars
# are removed.
# special handling in case we were passed another Span
if isinstance(start, Span):
assert end is None
self.start, self.end, self.reverse = start.start, start.end, start.reverse
else:
# reverse start and end so that start is always first
if end is None:
end = start + 1
elif start > end:
start, end = end, start
self.start = start
self.end = end
self.reverse = reverse
def to_rich_dict(self):
attribs = self._serialisable.copy()
attribs["type"] = get_object_provenance(self)
attribs["version"] = __version__
return attribs
def __setstate__(self, args):
self.__init__(*args)
def __getstate__(self):
return (
self.start,
self.end,
self.tidy_start,
self.tidy_end,
self.value,
self.reverse,
)
def __repr__(self):
(start, end) = (self.start, self.end)
if self.reverse:
(end, start) = (start, end)
return "%s:%s" % (start, end)
def reversed(self):
return self.__class__(
self.start,
self.end,
self.tidy_end,
self.tidy_start,
self.value,
reverse=not self.reverse,
)
def __getitem__(self, slice):
start, end, step = _norm_slice(slice, self.length)
assert (step or 1) == 1, slice
assert start <= end, slice
tidy_start = self.tidy_start and start == 0
tidy_end = self.tidy_end and end == self.length
if self.reverse:
(start, end, reverse) = (self.end - end, self.end - start, True)
else:
(start, end, reverse) = (self.start + start, self.start + end, False)
return type(self)(start, end, tidy_start, tidy_end, self.value, reverse)
def __mul__(self, scale):
return Span(
self.start * scale,
self.end * scale,
self.tidy_start,
self.tidy_end,
self.value,
self.reverse,
)
def __div__(self, scale):
assert not self.start % scale or self.end % scale
return Span(
self.start // scale,
self.end // scale,
self.tidy_start,
self.tidy_end,
self.value,
self.reverse,
)
def remap_with(self, map):
"""The list of spans corresponding to this span on its grandparent, ie:
C is a span of a feature on B which itself is a feature on A, so to
place C on A return that part of B (map) covered by C (self)"""
(offsets, spans) = (map.offsets, map.spans)
map_length = offsets[-1] + spans[-1].length
# don't try to remap any non-corresponding end region(s)
# this won't matter if all spans lie properly within their
# parent maps, but that might not be true of Display slices.
(zlo, zhi) = (max(0, self.start), min(map_length, self.end))
# Find the right span(s) of the map
first = bisect_right(offsets, zlo) - 1
last = bisect_left(offsets, zhi, first) - 1
result = spans[first : last + 1]
# Cut off something at either end to get
# the same position and length as 'self'
if result:
end_trim = offsets[last] + spans[last].length - zhi
start_trim = zlo - offsets[first]
if end_trim > 0:
result[-1] = result[-1][: result[-1].length - end_trim]
if start_trim > 0:
result[0] = result[0][start_trim:]
# May need to add a bit at either end if the span didn't lie entirely
# within its parent map (eg: Display slice, inverse of feature map).
if self.start < 0:
result.insert(0, LostSpan(-self.start))
if self.end > map_length:
result.append(LostSpan(self.end - map_length))
# If the ends of self are meaningful then so are the new ends,
# but not any new internal breaks.
if result:
if self.tidy_start:
result[0].tidy_start = True
if self.tidy_end:
result[-1].tidy_end = True
# Deal with case where self is a reverse slice.
if self.reverse:
result = [part.reversed() for part in result]
result.reverse()
if self.value is not None:
result = [copy.copy(s) for s in result]
for s in result:
s.value = self.value
return result
def __contains__(self, other):
"""Returns True if other completely contained in self.
other must either be a number or have start and end properties.
"""
try:
return other.start >= self.start and other.end <= self.end
except AttributeError:
# other is scalar: must be _less_ than self.end,
# for the same reason that 3 is not in range(3).
return other >= self.start and other < self.end
def overlaps(self, other):
"""Returns True if any positions in self are also in other."""
# remember to subtract 1 from the Ends, since self.end isn't really
# in self...
try:
return (self.start in other) or (other.start in self)
except AttributeError: # other was probably a number?
return other in self
def reverses(self):
"""Reverses self."""
self.reverse = not self.reverse
def reversed_relative_to(self, length):
"""Returns a new span with positions adjusted relative to length. For
use in reverse complementing of nucleic acids"""
# if reverse complementing, the start becomes the length minus the end
# position
start = length - self.end
assert start >= 0
end = start + self.length
return self.__class__(start, end, value=self.value, reverse=not self.reverse)
def __iter__(self):
"""Iterates over indices contained in self.
NOTE: to make sure that the same items are contained whether going
through the range in forward or reverse, need to adjust the indices
by 1 if going backwards.
"""
if self.reverse:
return iter(range(self.end - 1, self.start - 1, -1))
else:
return iter(range(self.start, self.end, 1))
def __str__(self):
"""Returns string representation of self."""
return "(%s,%s,%s)" % (self.start, self.end, bool(self.reverse))
def __len__(self):
"""Returns length of self."""
return self.end - self.start
def __lt__(self, other):
"""Compares indices of self with indices of other."""
if hasattr(other, "start") and hasattr(other, "end"):
s = (self.start, self.end, self.reverse)
o = (other.start, other.end, other.reverse)
return s < o
else:
return type(self) < type(other)
def __eq__(self, other):
"""Compares indices of self with indices of other."""
if hasattr(other, "start") and hasattr(other, "end"):
return (
self.start == other.start
and self.end == other.end
and self.reverse == other.reverse
)
else:
return type(self) == type(other)
class _LostSpan(object):
"""A placeholder span which doesn't exist in the underlying sequence"""
__slots__ = ["length", "value", "_serialisable"]
lost = True
terminal = False
def __init__(self, length, value=None):
d = locals()
exclude = ("self", "__class__", "__slots__")
self._serialisable = {k: v for k, v in d.items() if k not in exclude}
self.length = length
self.value = value
def to_rich_dict(self):
attribs = self._serialisable.copy()
attribs["type"] = get_object_provenance(self)
attribs["version"] = __version__
return attribs
def __len__(self):
return self.length
def __setstate__(self, args):
self.__init__(*args)
def __getstate__(self):
return (self.length, self.value)
def __repr__(self):
return "-%s-" % (self.length)
def where(self, index):
return None
def reversed(self):
return self
def __getitem__(self, slice):
(start, end, step) = _norm_slice(slice, self.length)
assert (step or 1) == 1, slice
return self.__class__(abs(end - start), self.value)
def __mul__(self, scale):
return LostSpan(self.length * scale, self.value)
def __div__(self, scale):
assert not self.length % 3
return LostSpan(self.length // scale, self.value)
def remap_with(self, map):
return [self]
def reversed_relative_to(self, length):
return self
# Save memory by only making one of each small gap
_lost_span_cache = {}
def LostSpan(length, value=None):
global _lost_span_cache
if value is None and length < 1000:
if length not in _lost_span_cache:
_lost_span_cache[length] = _LostSpan(length, value)
return _lost_span_cache[length]
else:
return _LostSpan(length, value)
class TerminalPadding(_LostSpan):
terminal = True
def __repr__(self):
return "?%s?" % (self.length)
class Map(object):
"""A map holds a list of spans."""
def __init__(
self,
locations=None,
spans=None,
tidy=False,
parent_length=None,
termini_unknown=False,
):
assert parent_length is not None
d = locals()
exclude = ("self", "__class__", "__slots__")
self._serialisable = {k: v for k, v in d.items() if k not in exclude}
if spans is None:
spans = []
for (start, end) in locations:
diff = 0
reverse = start > end
if max(start, end) < 0 or min(start, end) > parent_length:
raise RuntimeError(
"located outside sequence: %s"
% str((start, end, parent_length))
)
elif max(start, end) < 0:
diff = min(start, end)
start = [start, 0][start < 0]
end = [end, 0][end < 0]
elif min(start, end) > parent_length:
diff = max(start, end) - parent_length
start = [start, parent_length][start > parent_length]
end = [end, parent_length][end > parent_length]
span = Span(start, end, tidy, tidy, reverse=reverse)
if diff < 0:
spans += [LostSpan(-diff), span]
elif diff > 0:
spans += [span, LostSpan(diff)]
else:
spans += [span]
self.offsets = []
self.useful = False
self.complete = True
self.reverse = None
posn = 0
for span in spans:
self.offsets.append(posn)
posn += span.length
if span.lost:
self.complete = False
elif not self.useful:
self.useful = True
(self.start, self.end) = (span.start, span.end)
self.reverse = span.reverse
else:
self.start = min(self.start, span.start)
self.end = max(self.end, span.end)
if self.reverse is not None and (span.reverse != self.reverse):
self.reverse = None
if termini_unknown:
if spans[0].lost:
spans[0] = TerminalPadding(spans[0].length)
if spans[-1].lost:
spans[-1] = TerminalPadding(spans[-1].length)
self.spans = spans
self.length = posn
self.parent_length = parent_length
self.__inverse = None
def __len__(self):
return self.length
def __repr__(self):
return repr(self.spans) + "/%s" % self.parent_length
def __getitem__(self, slice):
# A possible shorter map at the same level
slice = as_map(slice, len(self))
new_parts = []
for span in slice.spans:
new_parts.extend(span.remap_with(self))
return Map(spans=new_parts, parent_length=self.parent_length)
def __mul__(self, scale):
# For Protein -> DNA
new_parts = []
for span in self.spans:
new_parts.append(span * scale)
return Map(spans=new_parts, parent_length=self.parent_length * scale)
def __div__(self, scale):
# For DNA -> Protein
new_parts = []
for span in self.spans:
new_parts.append(span / scale)
return Map(spans=new_parts, parent_length=self.parent_length // scale)
def __add__(self, other):
if other.parent_length != self.parent_length:
raise ValueError("Those maps belong to different sequences")
return Map(spans=self.spans + other.spans, parent_length=self.parent_length)
def with_termini_unknown(self):
return Map(
self,
spans=self.spans[:],
parent_length=self.parent_length,
termini_unknown=True,
)
def get_covering_span(self):
if self.reverse:
span = (self.end, self.start)
else:
span = (self.start, self.end)
return Map([span], parent_length=self.parent_length)
def covered(self):
""">>> Map([(10,20), (15, 25), (80, 90)]).covered().spans
[Span(10,25), Span(80, 90)]"""
delta = {}
for span in self.spans:
if span.lost:
continue
delta[span.start] = delta.get(span.start, 0) + 1
delta[span.end] = delta.get(span.end, 0) - 1
positions = list(delta.keys())
positions.sort()
last_y = y = 0
last_x = start = None
result = []
for x in positions:
y += delta[x]
if x == last_x:
continue
if y and not last_y:
assert start is None
start = x
elif last_y and not y:
result.append((start, x))
start = None
last_x = x
last_y = y
assert y == 0
return Map(result, parent_length=self.parent_length)
def reversed(self):
"""Reversed location on same parent"""
spans = [s.reversed() for s in self.spans]
spans.reverse()
return Map(spans=spans, parent_length=self.parent_length)
def nucleic_reversed(self):
"""Same location on reversed parent"""
spans = [s.reversed_relative_to(self.parent_length) for s in self.spans]
return Map(spans=spans, parent_length=self.parent_length)
def gaps(self):
"""The gaps (lost spans) in this map"""
locations = []
offset = 0
for s in self.spans:
if s.lost:
locations.append((offset, offset + s.length))
offset += s.length
return Map(locations, parent_length=len(self))
def shadow(self):
"""The 'negative' map of the spans not included in this map"""
return self.inverse().gaps()
def nongap(self):
locations = []
offset = 0
for s in self.spans:
if not s.lost:
locations.append((offset, offset + s.length))
offset += s.length
return Map(locations, parent_length=len(self))
def without_gaps(self):
return Map(
spans=[s for s in self.spans if not s.lost],
parent_length=self.parent_length,
)
def inverse(self):
if self.__inverse is None:
self.__inverse = self._inverse()
return self.__inverse
def _inverse(self):
# can't work if there are overlaps in the map
# tidy ends don't survive inversion
if self.parent_length is None:
raise ValueError("Uninvertable. parent length not known")
posn = 0
temp = []
for span in self.spans:
if not span.lost:
if span.reverse:
temp.append((span.start, span.end, posn + span.length, posn))
else:
temp.append((span.start, span.end, posn, posn + span.length))
posn += span.length
temp.sort()
new_spans = []
last_hi = 0
for (lo, hi, start, end) in temp:
if lo > last_hi:
new_spans.append(LostSpan(lo - last_hi))
elif lo < last_hi:
raise ValueError("Uninvertable. Overlap: %s < %s" % (lo, last_hi))
new_spans.append(Span(start, end, reverse=start > end))
last_hi = hi
if self.parent_length > last_hi:
new_spans.append(LostSpan(self.parent_length - last_hi))
return Map(spans=new_spans, parent_length=len(self))
def get_coordinates(self):
"""returns span coordinates as [(v1, v2), ...]
v1/v2 are (start, end) unless the map is reversed, in which case it will
be (end, start)"""
if self.reverse:
order_func = lambda x: (max(x), min(x))
else:
order_func = lambda x: x
coords = list(
map(order_func, [(s.start, s.end) for s in self.spans if not s.lost])
)
return coords
def to_rich_dict(self):
"""returns dicts for contained spans [dict(), ..]"""
spans = [s.to_rich_dict() for s in self.spans]
data = self._serialisable.copy()
data.pop("locations")
data["spans"] = spans
data["type"] = get_object_provenance(self)
data["version"] = __version__
return data
def zeroed(self):
"""returns a new instance with the first span starting at 0
Note
----
Useful when an Annotatable object is sliced, but the connection to
the original parent is being deliberately broken as in the
Sequence.deepcopy(sliced=True) case.
"""
# todo there's probably a more efficient way to do this
# create the new instance
from cogent3.util.deserialise import deserialise_map_spans
data = self.to_rich_dict()
zeroed = deserialise_map_spans(data)
zeroed.parent_length = len(zeroed.get_covering_span())
min_val = min(zeroed.start, zeroed.end)
for span in zeroed.spans:
if span.lost:
continue
span.start -= min_val
span.end -= min_val
return zeroed
class SpansOnly(ConstrainedList):
"""List that converts elements to Spans on addition."""
mask = FunctionWrapper(Span)
_constraint = ClassChecker(Span)
@total_ordering
class Range(SpanI):
"""Complex object consisting of many spans."""
def __init__(self, spans=None):
"""Returns a new Range object with data in spans."""
spans = [] if spans is None else spans
result = SpansOnly()
# need to check if we got a single Span, since they define __iter__.
if isinstance(spans, Span):
result.append(spans)
elif hasattr(spans, "spans"): # probably a single range object?
result.extend(spans.spans)
else:
for s in iterable(spans):
if hasattr(s, "spans"):
result.extend(s.spans)
else:
result.append(s)
self.spans = result
def __str__(self):
"""Returns string representation of self."""
return "(%s)" % ",".join(map(str, self.spans))
def __len__(self):
"""Returns sum of span lengths.
NOTE: if spans overlap, will count multiple times. Use reduce() to
get rid of overlaps.
"""
return sum(map(len, self.spans))
def __lt__(self, other):
"""Compares spans of self with indices of other."""
if hasattr(other, "spans"):
return self.spans < other.spans
elif len(self.spans) == 1 and hasattr(other, "start") and hasattr(other, "end"):
return self.spans[0].start < other.start or self.spans[0].end < other.end
else:
return object < other
def __eq__(self, other):
"""Compares spans of self with indices of other."""
if hasattr(other, "spans"):
return self.spans == other.spans
elif len(self.spans) == 1 and hasattr(other, "start") and hasattr(other, "end"):
return self.spans[0].start == other.start and self.spans[0].end == other.end
else:
return object == other
def _get_start(self):
"""Finds earliest start of items in self.spans."""
return min([i.start for i in self.spans])
start = property(_get_start)
def _get_end(self):
"""Finds latest end of items in self.spans."""
return max([i.end for i in self.spans])
end = property(_get_end)
def _get_reverse(self):
"""reverse is True if any piece is reversed."""
for i in self.spans:
if i.reverse:
return True
return False
reverse = property(_get_reverse)
def reverses(self):
"""Reverses all spans in self."""
for i in self.spans:
i.reverses()
def __contains__(self, other):
"""Returns True if other completely contained in self.
other must either be a number or have start and end properties.
"""
if hasattr(other, "spans"):
for curr in other.spans:
found = False
for i in self.spans:
if curr in i:
found = True
break
if not found:
return False
return True
else:
for i in self.spans:
if other in i:
return True
return False
def overlaps(self, other):
"""Returns True if any positions in self are also in other."""
if hasattr(other, "spans"):
for i in self.spans:
for j in other.spans:
if i.overlaps(j):
return True
else:
for i in self.spans:
if i.overlaps(other):
return True
return False
def overlaps_extent(self, other):
"""Returns True if any positions in self's extent also in other's."""
if hasattr(other, "extent"):
return self.extent.overlaps(other.extent)
else:
return self.extent.overlaps(other)
def sort(self):
"""Sorts the spans in self."""
self.spans.sort()
def __iter__(self):
"""Iterates over indices contained in self."""
return chain(*[iter(i) for i in self.spans])
def _get_extent(self):
"""Returns Span object representing the extent of self."""
return Span(self.start, self.end)
extent = property(_get_extent)
def simplify(self):
"""Reduces the spans in self in-place to get fewest spans.
Will not condense spans with opposite directions.
Will condense adjacent but nonoverlapping spans (e.g. (1,3) and (4,5)).
"""
forward = []
reverse = []
spans = self.spans[:]
spans.sort()
for span in spans:
if span.reverse:
direction = reverse
else:
direction = forward
found_overlap = False
for other in direction:
if (
span.overlaps(other)
or (span.start == other.end)
or (other.start == span.end)
): # handle adjacent spans also
other.start = min(span.start, other.start)
other.end = max(span.end, other.end)
found_overlap = True
break
if not found_overlap:
direction.append(span)
self.spans[:] = forward + reverse
class Point(Span):
"""Point is a special case of Span, where start always equals end.
Note that, as per Python standard, a point is _between_ two elements
in a sequence. In other words, a point does not contain any elements.
If you want a single element, use a Span where end = start + 1.
A Point does have a direction (i.e. a reverse property) to indicate
where successive items would go if it were expanded.
"""
def __init__(self, start, reverse=False):
"""Returns new Point object."""
self.reverse = reverse
self._start = start
def _get_start(self):
"""Returns self.start."""
return self._start
def _set_start(self, start):
"""Sets self.start and self.end."""
self._start = start
start = property(_get_start, _set_start)
end = start # start and end are synonyms for the same property
def RangeFromString(string, delimiter=","):
"""Returns Range object from string of the form 1-5,11,20,30-50.
Ignores whitespace; expects values to be comma-delimited and positive.
"""
result = Range()
pairs = list(map(strip, string.split(delimiter)))
for p in pairs:
if not p: # adjacent delimiters?
continue
if "-" in p: # treat as pair
first, second = p.split("-")
result.spans.append(Span(int(first), int(second)))
else:
result.spans.append(Span(int(p)))
return result
def _gap_insertion_data(seq):
"""compute gap position, length and cumulative offsets
Parameters
----------
seq
a cogent3 annotatable sequence
Returns
-------
[(seq position, gap length), ...], [cum sum gap length, ...]
Notes
-----
The sequence position is in unaligned sequence coordinates. offsets are
calculated as the cumulative sum of gap lengths. The offset
plus the sequence position gives the alignment coordinate for a gap.
"""
gap_pos = []
offsets = []
offset = 0
for i, span in enumerate(seq.map.spans):
if not span.lost:
continue
pos = seq.map.spans[i - 1].end if i else 0
gap_pos.append((pos, len(span)))
offsets.append(offset)
offset += span.length
return gap_pos, offsets
def _merged_gaps(a_gaps, b_gaps, function="max"):
"""merges gaps that occupy same position
Parameters
----------
a_gaps, b_gaps
[(gap position, length),...]
function
When a and b contain a gap at the same position, function is applied
to the gap lengths. Valid values are either 'max' or 'sum'.
Returns
-------
Merged places as [(gap position, length),...]
Notes
-----
If a_gaps and b_gaps are from the same underlying sequence, set
function to 'max'. Use 'sum' when the gaps derive from different
sequences.
"""
if function.lower() not in "maxsum":
raise ValueError(f"{function} not allowed, choose either 'sum' or 'max'")
function = max if function.lower() == "max" else sum
if not a_gaps:
return b_gaps
if not b_gaps:
return a_gaps
places = sorted(a_gaps + b_gaps)
positions, lengths = [places[0][0]], [places[0][1]]
for i, (pos, l) in enumerate(places[1:], 1):
if positions[-1] == pos:
lengths[-1] = function([lengths[-1], l])
continue
positions.append(pos)
lengths.append(l)
return list(zip(positions, lengths))
def _gap_pos_to_map(gap_pos, gap_lengths, seq_length):
"""[(pos, gap length), ...]"""
if not gap_pos:
return Map([(0, seq_length)], parent_length=seq_length)
spans = []
last = pos = 0
for i, pos in enumerate(gap_pos):
if pos > seq_length:
raise ValueError(
f"cannot have gap at position {pos} beyond seq_length= {seq_length}"
)
gap = LostSpan(length=gap_lengths[i])
spans.extend([gap] if pos == 0 else [Span(last, pos), gap])
last = pos
if pos < seq_length:
spans.append(Span(last, seq_length))
return Map(spans=spans, parent_length=seq_length)
def _interconvert_seq_aln_coords(gaps, offsets, pos, seq_pos=True):
"""converts sequence position to an alignment position
Parameters
----------
gaps
series of [(seq pos, length), ..]
offsets
the offset of seq pos, which is basically the sum of all lengths up
to the previous gap
p
the sequence coordinate to convert
seq_pos : bool
whether pos is in sequence coordinates. If False, means it is in
alignment coordinates.
Returns
-------
alignment coordinate
"""
if pos < 0:
raise ValueError(f"negative value {pos}")
if not gaps or pos == 0:
return pos
offset = 0
for i, (p, len) in enumerate(gaps):
assert p >= 0 and len > 0
if p + offset >= pos:
break
offset += len
assert offset < pos, f"calculated offset {offset} greater than align pos {p}"
if not seq_pos: # we subtract the gap length total to get to seq coords
offset = -offset
return pos + offset
| 31.785173 | 91 | 0.576374 |
2370b10acff98abc47c8dd813350216130ddb48b | 623 | py | Python | tests/conftest.py | c-goosen/haveibeenpwned-asyncio | 62dd32b00c441a4c6e72cb6863eb593660dcf5d5 | [
"MIT"
] | null | null | null | tests/conftest.py | c-goosen/haveibeenpwned-asyncio | 62dd32b00c441a4c6e72cb6863eb593660dcf5d5 | [
"MIT"
] | null | null | null | tests/conftest.py | c-goosen/haveibeenpwned-asyncio | 62dd32b00c441a4c6e72cb6863eb593660dcf5d5 | [
"MIT"
] | null | null | null | import pytest
import asyncio
from aresponses import ResponsesMockServer
class MockResponse:
def __init__(self, text, status):
self._text = text
self.status = status
async def text(self):
return self._text
async def __aexit__(self, exc_type, exc, tb):
pass
async def __aenter__(self):
return self
@pytest.fixture()
def event_loop():
loop = asyncio.get_event_loop()
# asyncio.set_event_loop(loop)
yield loop
loop.close()
@pytest.fixture
async def aresponses(loop):
async with ResponsesMockServer(loop=loop) as server:
yield server
| 18.323529 | 56 | 0.674157 |
ac76b9bb2bb3e1d6ec437eda5003614e97b9fd1e | 390 | py | Python | login_registration_app/migrations/0002_auto_20210810_1352.py | Lstedmanfalls/BetterSelf | d903fa78b41f63c5053cef09cd127544952a2243 | [
"MIT"
] | 1 | 2021-09-21T03:08:56.000Z | 2021-09-21T03:08:56.000Z | login_registration_app/migrations/0002_auto_20210810_1352.py | Lstedmanfalls/BetterSelf | d903fa78b41f63c5053cef09cd127544952a2243 | [
"MIT"
] | null | null | null | login_registration_app/migrations/0002_auto_20210810_1352.py | Lstedmanfalls/BetterSelf | d903fa78b41f63c5053cef09cd127544952a2243 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2021-08-10 19:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login_registration_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='users',
name='password',
field=models.CharField(max_length=255),
),
]
| 20.526316 | 51 | 0.605128 |
0e0da3b288d2a5a3babfde3e6828e417bd3cb569 | 3,728 | py | Python | src/cogent3/parse/gff.py | wjjmjh/cogent3 | e10f4f933921d52b000096b7c016190a1602add6 | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/parse/gff.py | wjjmjh/cogent3 | e10f4f933921d52b000096b7c016190a1602add6 | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/parse/gff.py | wjjmjh/cogent3 | e10f4f933921d52b000096b7c016190a1602add6 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
__author__ = "Peter Maxwell"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = [
"Peter Maxwell",
"Matthew Wakefield",
"Gavin Huttley",
"Christopher Bradley",
]
__license__ = "BSD-3"
__version__ = "2020.6.30a"
__maintainer__ = "Peter Maxwell"
__email__ = "pm67nz@gmail.com"
__status__ = "Production"
from pathlib import Path
from cogent3.util.misc import open_
def gff_parser(f):
"""parses a gff file
Parameters
-----------
f
accepts string path or pathlib.Path or file-like object (e.g. StringIO)
Returns
-------
dict
contains each of the 9 parameters specified by gff3, and comments.
"""
# calling a separate function to ensure file closes correctly
f = f if not isinstance(f, Path) else str(f)
if isinstance(f, str):
with open_(f) as infile:
yield from _gff_parser(infile)
else:
yield from _gff_parser(f)
def _gff_parser(f):
"""parses a gff file"""
gff3_header = "gff-version 3"
if isinstance(f, list):
gff3 = f and gff3_header in f[0]
else:
gff3 = gff3_header in f.readline()
f.seek(0)
for line in f:
# comments and blank lines
if "#" in line:
(line, comments) = line.split("#", 1)
else:
comments = None
line = line.strip()
if not line:
continue
cols = line.split("\t")
# the final column (attributes) may be empty
if len(cols) == 8:
cols.append("")
assert len(cols) == 9, len(line)
(seqid, source, type_, start, end, score, strand, phase, attributes) = cols
# adjust for 0-based indexing
(start, end) = (int(start) - 1, int(end))
# start is always meant to be less than end in GFF
# features that extend beyond sequence have negative indices
if start < 0 or end < 0:
start, end = abs(start), abs(end)
if start > end:
start, end = end, start
# reverse indices when the feature is on the opposite strand
if strand == "-":
(start, end) = (end, start)
# all attributes have an "ID" but this may not be unique
if gff3:
attribute_parser = parse_attributes_gff3
else:
attribute_parser = parse_attributes_gff2
attributes = attribute_parser(attributes, (start, end))
rtn = {
"SeqID": seqid,
"Source": source,
"Type": type_,
"Start": start,
"End": end,
"Score": score,
"Strand": strand,
"Phase": phase,
"Attributes": attributes,
"Comments": comments,
}
yield rtn
def parse_attributes_gff2(attributes, span):
"""Returns a dict with name and info keys"""
name = attributes[attributes.find('"') + 1 :]
if '"' in name:
name = name[: name.find('"')]
attr_dict = {"ID": name, "Info": attributes}
return attr_dict
def parse_attributes_gff3(attributes, span):
"""Returns a dictionary containing all the attributes"""
attributes = attributes.strip(";")
attributes = attributes.split(";")
if attributes[0]:
attributes = dict(t.split("=") for t in attributes)
else:
attributes = {}
if "Parent" in attributes.keys():
# There may be multiple parents
if "," in attributes["Parent"]:
attributes["Parent"] = attributes["Parent"].split(",")
else:
attributes["Parent"] = [attributes["Parent"]]
if "ID" not in attributes.keys():
attributes["ID"] = ""
return attributes
| 28.458015 | 83 | 0.571084 |
39bba843c360ff657831178988ee9625b3815594 | 3,044 | py | Python | data/cirq_new/cirq_program/startCirq_pragma720.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_pragma720.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_pragma720.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=21
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=16
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[0])) # number=18
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=12
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=13
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=14
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=15
c.append(cirq.SWAP.on(input_qubit[2],input_qubit[0])) # number=19
c.append(cirq.SWAP.on(input_qubit[2],input_qubit[0])) # number=20
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma720.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 34.988506 | 92 | 0.653088 |
b0aa5db6cd503307fa76a3b82626ff88d5bc6fcf | 3,578 | py | Python | docs/reST/ref/code_examples/draw_module_example.py | czogran/pygame1 | 1591a3295402f914950ae15039b91136d8bf8f79 | [
"Python-2.0",
"OLDAP-2.3"
] | 2 | 2021-04-03T20:01:35.000Z | 2021-09-09T23:42:21.000Z | docs/reST/ref/code_examples/draw_module_example.py | czogran/pygame1 | 1591a3295402f914950ae15039b91136d8bf8f79 | [
"Python-2.0",
"OLDAP-2.3"
] | null | null | null | docs/reST/ref/code_examples/draw_module_example.py | czogran/pygame1 | 1591a3295402f914950ae15039b91136d8bf8f79 | [
"Python-2.0",
"OLDAP-2.3"
] | 1 | 2021-06-06T17:52:54.000Z | 2021-06-06T17:52:54.000Z | # Import a library of functions called 'pygame'
import pygame
from math import pi
# Initialize the game engine
pygame.init()
# Define the colors we will use in RGB format
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
BLUE = ( 0, 0, 255)
GREEN = ( 0, 255, 0)
RED = (255, 0, 0)
# Set the height and width of the screen
size = [400, 300]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Example code for the draw module")
#Loop until the user clicks the close button.
done = False
clock = pygame.time.Clock()
while not done:
# This limits the while loop to a max of 10 times per second.
# Leave this out and we will use all CPU we can.
clock.tick(10)
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done=True # Flag that we are done so we exit this loop
# All drawing code happens after the for loop and but
# inside the main while done==False loop.
# Clear the screen and set the screen background
screen.fill(WHITE)
# Draw on the screen a GREEN line from (0, 0) to (50, 30)
# 5 pixels wide.
pygame.draw.line(screen, GREEN, [0, 0], [50,30], 5)
# Draw on the screen 3 BLACK lines, each 5 pixels wide.
# The 'False' means the first and last points are not connected.
pygame.draw.lines(screen, BLACK, False, [[0, 80], [50, 90], [200, 80], [220, 30]], 5)
# Draw on the screen a GREEN line from (0, 50) to (50, 80)
# Because it is an antialiased line, it is 1 pixel wide.
pygame.draw.aaline(screen, GREEN, [0, 50],[50, 80], True)
# Draw a rectangle outline
pygame.draw.rect(screen, BLACK, [75, 10, 50, 20], 2)
# Draw a solid rectangle
pygame.draw.rect(screen, BLACK, [150, 10, 50, 20])
# Draw a rectangle with rounded corners
pygame.draw.rect(screen, GREEN, [115, 210, 70, 40], 10, border_radius=15)
pygame.draw.rect(screen, RED, [135, 260, 50, 30], 0, border_radius=10, border_top_left_radius=0,
border_bottom_right_radius=15)
# Draw an ellipse outline, using a rectangle as the outside boundaries
pygame.draw.ellipse(screen, RED, [225, 10, 50, 20], 2)
# Draw an solid ellipse, using a rectangle as the outside boundaries
pygame.draw.ellipse(screen, RED, [300, 10, 50, 20])
# This draws a triangle using the polygon command
pygame.draw.polygon(screen, BLACK, [[100, 100], [0, 200], [200, 200]], 5)
# Draw an arc as part of an ellipse.
# Use radians to determine what angle to draw.
pygame.draw.arc(screen, BLACK,[210, 75, 150, 125], 0, pi/2, 2)
pygame.draw.arc(screen, GREEN,[210, 75, 150, 125], pi/2, pi, 2)
pygame.draw.arc(screen, BLUE, [210, 75, 150, 125], pi,3*pi/2, 2)
pygame.draw.arc(screen, RED, [210, 75, 150, 125], 3*pi/2, 2*pi, 2)
# Draw a circle
pygame.draw.circle(screen, BLUE, [60, 250], 40)
# Draw only one circle quadrant
pygame.draw.circle(screen, BLUE, [250, 250], 40, 0, draw_top_right=True)
pygame.draw.circle(screen, RED, [250, 250], 40, 30, draw_top_left=True)
pygame.draw.circle(screen, GREEN, [250, 250], 40, 20, draw_bottom_left=True)
pygame.draw.circle(screen, BLACK, [250, 250], 40, 10, draw_bottom_right=True)
# Go ahead and update the screen with what we've drawn.
# This MUST happen after all the other drawing commands.
pygame.display.flip()
# Be IDLE friendly
pygame.quit()
| 37.663158 | 101 | 0.631638 |
a720081f1fd1a34157edf830b8e6919b387619b3 | 9,067 | py | Python | xrpl/core/binarycodec/types/path_set.py | SubCODERS/xrpl-py | 24a02d099002625794f5b6491ec2cafd872cc721 | [
"ISC"
] | 1 | 2021-04-07T16:59:01.000Z | 2021-04-07T16:59:01.000Z | xrpl/core/binarycodec/types/path_set.py | SubCODERS/xrpl-py | 24a02d099002625794f5b6491ec2cafd872cc721 | [
"ISC"
] | 2 | 2022-02-23T22:57:46.000Z | 2022-02-24T11:41:49.000Z | xrpl/core/binarycodec/types/path_set.py | SubCODERS/xrpl-py | 24a02d099002625794f5b6491ec2cafd872cc721 | [
"ISC"
] | 1 | 2022-02-21T07:36:36.000Z | 2022-02-21T07:36:36.000Z | """Codec for serializing and deserializing PathSet fields.
See `PathSet Fields <https://xrpl.org/serialization.html#pathset-fields>`_
"""
from __future__ import annotations
from typing import Dict, List, Optional, Type, cast
from typing_extensions import Final
from xrpl.core.binarycodec.binary_wrappers.binary_parser import BinaryParser
from xrpl.core.binarycodec.exceptions import XRPLBinaryCodecException
from xrpl.core.binarycodec.types.account_id import AccountID
from xrpl.core.binarycodec.types.currency import Currency
from xrpl.core.binarycodec.types.serialized_type import SerializedType
# Constant for masking types of a PathStep
_TYPE_ACCOUNT: Final[int] = 0x01
_TYPE_CURRENCY: Final[int] = 0x10
_TYPE_ISSUER: Final[int] = 0x20
# Constants for separating Paths in a PathSet
_PATHSET_END_BYTE: Final[int] = 0x00
_PATH_SEPARATOR_BYTE: Final[int] = 0xFF
def _is_path_step(value: Dict[str, str]) -> bool:
"""Helper function to determine if a dictionary represents a valid path step."""
return "issuer" in value or "account" in value or "currency" in value
def _is_path_set(value: List[List[Dict[str, str]]]) -> bool:
"""Helper function to determine if a list represents a valid path set."""
return len(value) == 0 or len(value[0]) == 0 or _is_path_step(value[0][0])
class PathStep(SerializedType):
"""Serialize and deserialize a single step in a Path."""
@classmethod
def from_value(cls: Type[PathStep], value: Dict[str, str]) -> PathStep:
"""
Construct a PathStep object from a dictionary.
Args:
value: The dictionary to construct a PathStep object from.
Returns:
The PathStep constructed from value.
Raises:
XRPLBinaryCodecException: If the supplied value is of the wrong type.
"""
if not isinstance(value, dict):
raise XRPLBinaryCodecException(
"Invalid type to construct a PathStep: expected dict,"
f" received {value.__class__.__name__}."
)
data_type = 0x00
buffer = b""
if "account" in value:
account_id = AccountID.from_value(value["account"])
buffer += bytes(account_id)
data_type |= _TYPE_ACCOUNT
if "currency" in value:
currency = Currency.from_value(value["currency"])
buffer += bytes(currency)
data_type |= _TYPE_CURRENCY
if "issuer" in value:
issuer = AccountID.from_value(value["issuer"])
buffer += bytes(issuer)
data_type |= _TYPE_ISSUER
return PathStep(bytes([data_type]) + buffer)
@classmethod
def from_parser(
cls: Type[PathStep], parser: BinaryParser, _length_hint: Optional[None] = None
) -> PathStep:
"""
Construct a PathStep object from an existing BinaryParser.
Args:
parser: The parser to construct a PathStep from.
Returns:
The PathStep constructed from parser.
"""
data_type = parser.read_uint8()
buffer = b""
if data_type & _TYPE_ACCOUNT:
account_id = parser.read(AccountID.LENGTH)
buffer += account_id
if data_type & _TYPE_CURRENCY:
currency = parser.read(Currency.LENGTH)
buffer += currency
if data_type & _TYPE_ISSUER:
issuer = parser.read(AccountID.LENGTH)
buffer += issuer
return PathStep(bytes([data_type]) + buffer)
def to_json(self: PathStep) -> Dict[str, str]:
"""
Returns the JSON representation of a PathStep.
Returns:
The JSON representation of a PathStep.
"""
parser = BinaryParser(str(self))
data_type = parser.read_uint8()
json = {}
if data_type & _TYPE_ACCOUNT:
account_id = AccountID.from_parser(parser).to_json()
json["account"] = account_id
if data_type & _TYPE_CURRENCY:
currency = Currency.from_parser(parser).to_json()
json["currency"] = currency
if data_type & _TYPE_ISSUER:
issuer = AccountID.from_parser(parser).to_json()
json["issuer"] = issuer
return json
@property
def type(self: PathStep) -> int:
"""Get a number representing the type of this PathStep.
Returns:
a number to be bitwise and-ed with TYPE_ constants to describe the types in
the PathStep.
"""
return self.buffer[0]
class Path(SerializedType):
"""Class for serializing/deserializing Paths."""
@classmethod
def from_value(cls: Type[Path], value: List[Dict[str, str]]) -> Path:
"""
Construct a Path from an array of dictionaries describing PathSteps.
Args:
value: The array to construct a Path object from.
Returns:
The Path constructed from value.
Raises:
XRPLBinaryCodecException: If the supplied value is of the wrong type.
"""
if not isinstance(value, list):
raise XRPLBinaryCodecException(
"Invalid type to construct a Path: expected list, "
f"received {value.__class__.__name__}."
)
buffer: bytes = b""
for PathStep_dict in value:
pathstep = PathStep.from_value(PathStep_dict)
buffer += bytes(pathstep)
return Path(buffer)
@classmethod
def from_parser(
cls: Type[Path], parser: BinaryParser, _length_hint: Optional[None] = None
) -> Path:
"""
Construct a Path object from an existing BinaryParser.
Args:
parser: The parser to construct a Path from.
Returns:
The Path constructed from parser.
"""
buffer: List[bytes] = []
while not parser.is_end():
pathstep = PathStep.from_parser(parser)
buffer.append(bytes(pathstep))
if parser.peek() == cast(bytes, _PATHSET_END_BYTE) or parser.peek() == cast(
bytes, _PATH_SEPARATOR_BYTE
):
break
return Path(b"".join(buffer))
def to_json(self: Path) -> List[Dict[str, str]]:
"""
Returns the JSON representation of a Path.
Returns:
The JSON representation of a Path.
"""
json = []
path_parser = BinaryParser(str(self))
while not path_parser.is_end():
pathstep = PathStep.from_parser(path_parser)
json.append(pathstep.to_json())
return json
class PathSet(SerializedType):
"""Codec for serializing and deserializing PathSet fields.
See `PathSet Fields <https://xrpl.org/serialization.html#pathset-fields>`_
"""
@classmethod
def from_value(cls: Type[PathSet], value: List[List[Dict[str, str]]]) -> PathSet:
"""
Construct a PathSet from a List of Lists representing paths.
Args:
value: The List to construct a PathSet object from.
Returns:
The PathSet constructed from value.
Raises:
XRPLBinaryCodecException: If the PathSet representation is invalid.
"""
if not isinstance(value, list):
raise XRPLBinaryCodecException(
"Invalid type to construct a PathSet: expected list,"
f" received {value.__class__.__name__}."
)
if _is_path_set(value):
buffer: List[bytes] = []
for path_dict in value:
path = Path.from_value(path_dict)
buffer.append(bytes(path))
buffer.append(bytes([_PATH_SEPARATOR_BYTE]))
buffer[-1] = bytes([_PATHSET_END_BYTE])
return PathSet(b"".join(buffer))
raise XRPLBinaryCodecException("Cannot construct PathSet from given value")
@classmethod
def from_parser(
cls: Type[PathSet], parser: BinaryParser, _length_hint: Optional[None] = None
) -> PathSet:
"""
Construct a PathSet object from an existing BinaryParser.
Args:
parser: The parser to construct a PathSet from.
Returns:
The PathSet constructed from parser.
"""
buffer: List[bytes] = []
while not parser.is_end():
path = Path.from_parser(parser)
buffer.append(bytes(path))
buffer.append(parser.read(1))
if buffer[-1][0] == _PATHSET_END_BYTE:
break
return PathSet(b"".join(buffer))
def to_json(self: PathSet) -> List[List[Dict[str, str]]]:
"""
Returns the JSON representation of a PathSet.
Returns:
The JSON representation of a PathSet.
"""
json = []
pathset_parser = BinaryParser(str(self))
while not pathset_parser.is_end():
path = Path.from_parser(pathset_parser)
json.append(path.to_json())
pathset_parser.skip(1)
return json
| 31.814035 | 88 | 0.607698 |
b0ef99111b81078b6a9506250df0d163cc3e5aeb | 8,717 | py | Python | lynx_code/plugins/cosigner_pool/qt.py | enkrypter/Lynx-wallet | 166b7e5810f017a6e12bf96e54b0d44767b2a901 | [
"MIT"
] | null | null | null | lynx_code/plugins/cosigner_pool/qt.py | enkrypter/Lynx-wallet | 166b7e5810f017a6e12bf96e54b0d44767b2a901 | [
"MIT"
] | null | null | null | lynx_code/plugins/cosigner_pool/qt.py | enkrypter/Lynx-wallet | 166b7e5810f017a6e12bf96e54b0d44767b2a901 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
from xmlrpc.client import ServerProxy
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtWidgets import QPushButton
from lynx_code import util, keystore, ecc, crypto
from lynx_code import transaction
from lynx_code.bip32 import BIP32Node
from lynx_code.plugin import BasePlugin, hook
from lynx_code.i18n import _
from lynx_code.wallet import Multisig_Wallet
from lynx_code.util import bh2u, bfh
from lynx_code.gui.qt.transaction_dialog import show_transaction
from lynx_code.gui.qt.util import WaitingDialog
import sys
import traceback
server = ServerProxy('https://cosigner.electrum.org/', allow_none=True)
class Listener(util.DaemonThread):
def __init__(self, parent):
util.DaemonThread.__init__(self)
self.daemon = True
self.parent = parent
self.received = set()
self.keyhashes = []
def set_keyhashes(self, keyhashes):
self.keyhashes = keyhashes
def clear(self, keyhash):
server.delete(keyhash)
self.received.remove(keyhash)
def run(self):
while self.running:
if not self.keyhashes:
time.sleep(2)
continue
for keyhash in self.keyhashes:
if keyhash in self.received:
continue
try:
message = server.get(keyhash)
except Exception as e:
self.logger.info("cannot contact cosigner pool")
time.sleep(30)
continue
if message:
self.received.add(keyhash)
self.logger.info(f"received message for {keyhash}")
self.parent.obj.cosigner_receive_signal.emit(
keyhash, message)
# poll every 30 seconds
time.sleep(30)
class QReceiveSignalObject(QObject):
cosigner_receive_signal = pyqtSignal(object, object)
class Plugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.listener = None
self.obj = QReceiveSignalObject()
self.obj.cosigner_receive_signal.connect(self.on_receive)
self.keys = []
self.cosigner_list = []
@hook
def init_qt(self, gui):
for window in gui.windows:
self.on_new_window(window)
@hook
def on_new_window(self, window):
self.update(window)
@hook
def on_close_window(self, window):
self.update(window)
def is_available(self):
return True
def update(self, window):
wallet = window.wallet
if type(wallet) != Multisig_Wallet:
return
if self.listener is None:
self.logger.info("starting listener")
self.listener = Listener(self)
self.listener.start()
elif self.listener:
self.logger.info("shutting down listener")
self.listener.stop()
self.listener = None
self.keys = []
self.cosigner_list = []
for key, keystore in wallet.keystores.items():
xpub = keystore.get_master_public_key()
pubkey = BIP32Node.from_xkey(xpub).eckey.get_public_key_bytes(compressed=True)
_hash = bh2u(crypto.sha256d(pubkey))
if not keystore.is_watching_only():
self.keys.append((key, _hash, window))
else:
self.cosigner_list.append((window, xpub, pubkey, _hash))
if self.listener:
self.listener.set_keyhashes([t[1] for t in self.keys])
@hook
def transaction_dialog(self, d):
d.cosigner_send_button = b = QPushButton(_("Send to cosigner"))
b.clicked.connect(lambda: self.do_send(d.tx))
d.buttons.insert(0, b)
self.transaction_dialog_update(d)
@hook
def transaction_dialog_update(self, d):
if d.tx.is_complete() or d.wallet.can_sign(d.tx):
d.cosigner_send_button.hide()
return
for window, xpub, K, _hash in self.cosigner_list:
if window.wallet == d.wallet and self.cosigner_can_sign(d.tx, xpub):
d.cosigner_send_button.show()
break
else:
d.cosigner_send_button.hide()
def cosigner_can_sign(self, tx, cosigner_xpub):
from lynx_code.keystore import is_xpubkey, parse_xpubkey
xpub_set = set([])
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
xpub_set.add(xpub)
return cosigner_xpub in xpub_set
def do_send(self, tx):
def on_success(result):
window.show_message(_("Your transaction was sent to the cosigning pool.") + '\n' +
_("Open your cosigner wallet to retrieve it."))
def on_failure(exc_info):
e = exc_info[1]
try: self.logger.error("on_failure", exc_info=exc_info)
except OSError: pass
window.show_error(_("Failed to send transaction to cosigning pool") + ':\n' + str(e))
for window, xpub, K, _hash in self.cosigner_list:
if not self.cosigner_can_sign(tx, xpub):
continue
# construct message
raw_tx_bytes = bfh(str(tx))
public_key = ecc.ECPubkey(K)
message = public_key.encrypt_message(raw_tx_bytes).decode('ascii')
# send message
task = lambda: server.put(_hash, message)
msg = _('Sending transaction to cosigning pool...')
WaitingDialog(window, msg, task, on_success, on_failure)
def on_receive(self, keyhash, message):
self.logger.info(f"signal arrived for {keyhash}")
for key, _hash, window in self.keys:
if _hash == keyhash:
break
else:
self.logger.info("keyhash not found")
return
wallet = window.wallet
if isinstance(wallet.keystore, keystore.Hardware_KeyStore):
window.show_warning(_('An encrypted transaction was retrieved from cosigning pool.') + '\n' +
_('However, hardware wallets do not support message decryption, '
'which makes them not compatible with the current design of cosigner pool.'))
return
elif wallet.has_keystore_encryption():
password = window.password_dialog(_('An encrypted transaction was retrieved from cosigning pool.') + '\n' +
_('Please enter your password to decrypt it.'))
if not password:
return
else:
password = None
if not window.question(_("An encrypted transaction was retrieved from cosigning pool.") + '\n' +
_("Do you want to open it now?")):
return
xprv = wallet.keystore.get_master_private_key(password)
if not xprv:
return
try:
privkey = BIP32Node.from_xkey(xprv).eckey
message = bh2u(privkey.decrypt_message(message))
except Exception as e:
self.logger.exception('')
window.show_error(_('Error decrypting message') + ':\n' + str(e))
return
self.listener.clear(keyhash)
tx = transaction.Transaction(message)
show_transaction(tx, window, prompt_if_unsaved=True)
| 37.093617 | 119 | 0.618103 |
b6f77ff249148b933230f2a7464cdf92cbed90eb | 4,833 | py | Python | neutron/services/trunk/drivers/linuxbridge/agent/trunk_plumber.py | congnt95/neutron | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | [
"Apache-2.0"
] | 1,080 | 2015-01-04T08:35:00.000Z | 2022-03-27T09:15:52.000Z | neutron/services/trunk/drivers/linuxbridge/agent/trunk_plumber.py | congnt95/neutron | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | [
"Apache-2.0"
] | 24 | 2015-02-21T01:48:28.000Z | 2021-11-26T02:38:56.000Z | neutron/services/trunk/drivers/linuxbridge/agent/trunk_plumber.py | congnt95/neutron | 6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1 | [
"Apache-2.0"
] | 1,241 | 2015-01-02T10:47:10.000Z | 2022-03-27T09:42:23.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.utils import runtime
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_utils import excutils
from neutron.agent.linux import ip_lib
from neutron.plugins.ml2.drivers.linuxbridge.agent.common import utils as lutil
LOG = logging.getLogger(__name__)
class Plumber(object):
"""Object responsible for VLAN interface CRUD.
This handles the creation/deletion/listing of VLAN interfaces for
a trunk within a namespace.
"""
def __init__(self, namespace=None):
self.namespace = namespace
def trunk_on_host(self, trunk):
"""Returns true if trunk device is present else False."""
trunk_dev = self._trunk_device_name(trunk)
return ip_lib.device_exists(trunk_dev, namespace=self.namespace)
def ensure_trunk_subports(self, trunk):
"""Idempotent wiring for a trunk's subports.
Given a trunk object, delete any vlan subinterfaces belonging to a
trunk that aren't on the object. Create any which are on the object
which do not exist.
"""
trunk_dev = self._trunk_device_name(trunk)
with self._trunk_lock(trunk_dev):
# lock scoped to trunk device so two diffs don't interleave
expected = self._get_subport_devs_and_vlans(trunk.sub_ports)
existing = self._get_vlan_children(trunk_dev)
to_delete = existing - expected
to_create = expected - existing
for devname, vlan_id in to_delete:
LOG.debug("Deleting subport %(name)s with vlan tag %(tag)s",
dict(name=devname, tag=vlan_id))
self._safe_delete_device(devname)
for devname, vlan_id in to_create:
LOG.debug("Creating subport %(name)s with vlan tag %(tag)s",
dict(name=devname, tag=vlan_id))
self._create_vlan_subint(trunk_dev, devname, vlan_id)
def delete_trunk_subports(self, trunk):
return self.delete_subports_by_port_id(trunk.port_id)
def delete_subports_by_port_id(self, port_id):
device = self._get_tap_device_name(port_id)
if not ip_lib.device_exists(device, namespace=self.namespace):
LOG.debug("Device %s not present on this host", device)
return
with self._trunk_lock(device):
for subname, vlan_id in self._get_vlan_children(device):
LOG.debug("Deleting subport %(name)s with vlan tag %(tag)s",
dict(name=subname, tag=vlan_id))
self._safe_delete_device(subname)
def _trunk_lock(self, trunk_dev):
lock_name = 'trunk-%s' % trunk_dev
return lockutils.lock(lock_name, runtime.SYNCHRONIZED_PREFIX)
def _create_vlan_subint(self, trunk_name, devname, vlan_id):
ip_wrap = ip_lib.IPWrapper(namespace=self.namespace)
try:
dev = ip_wrap.add_vlan(devname, trunk_name, vlan_id)
dev.disable_ipv6()
except Exception:
with excutils.save_and_reraise_exception() as ectx:
ectx.reraise = ip_lib.IPDevice(
devname, namespace=self.namespace).exists()
def _safe_delete_device(self, devname):
dev = ip_lib.IPDevice(devname, namespace=self.namespace)
try:
dev.link.set_down()
dev.link.delete()
except Exception:
with excutils.save_and_reraise_exception() as ectx:
ectx.reraise = dev.exists()
def _trunk_device_name(self, trunk):
return self._get_tap_device_name(trunk.port_id)
def _get_subport_devs_and_vlans(self, subports):
return {(self._get_tap_device_name(s.port_id),
s.segmentation_id)
for s in subports}
def _get_tap_device_name(self, devname):
return lutil.get_tap_device_name(devname)
def _get_vlan_children(self, dev):
"""Return set of (devname, vlan_id) tuples for children of device."""
devices = ip_lib.get_devices_info(namespace=self.namespace)
return {(device['name'], device['vlan_id']) for device in devices
if device.get('kind') == 'vlan' and
device.get('parent_name') == dev}
| 41.307692 | 79 | 0.660252 |
c8fa67c408151c43dd73d74b15e738ae157ad9b1 | 347 | py | Python | benchmarks/LUD K1/config.py | DependableSystemsLab/GPU-Trident | c734cd8a18146869fc915af73a6ca13ceca35c0b | [
"MIT"
] | 1 | 2021-01-17T10:36:21.000Z | 2021-01-17T10:36:21.000Z | benchmarks/LUD K1/config.py | DependableSystemsLab/GPU-Trident | c734cd8a18146869fc915af73a6ca13ceca35c0b | [
"MIT"
] | null | null | null | benchmarks/LUD K1/config.py | DependableSystemsLab/GPU-Trident | c734cd8a18146869fc915af73a6ca13ceca35c0b | [
"MIT"
] | null | null | null | import os
PROGRAM_NAME = "lud"
PROGRAM_OUTPUT_NAME = ""
INPUT_PARAMETERS = "-i 64.dat"
LLVM_PATH = ""
EXEC_MODE = 1 # 0 -> Single threaded, 1 -> Multi-threaded
CF_STAGE_1_NUM = 100
CF_STAGE_2_NUM = 100
# Loads that transfer data from global memory
GLOBAL_LOAD_LIST = [13]
# Stores that transfer data to global memory
GLOBAL_STORE_LIST = [137] | 21.6875 | 57 | 0.740634 |
60e68e8b7cadab58e43d9d59a5510e31ec5752b9 | 3,314 | py | Python | homeassistant/auth/permissions/util.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/auth/permissions/util.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/auth/permissions/util.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Helpers to deal with permissions."""
from __future__ import annotations
from collections.abc import Callable
from functools import wraps
from typing import Optional, cast
from .const import SUBCAT_ALL
from .models import PermissionLookup
from .types import CategoryType, SubCategoryDict, ValueType
LookupFunc = Callable[[PermissionLookup, SubCategoryDict, str], Optional[ValueType]]
SubCatLookupType = dict[str, LookupFunc]
def lookup_all(
perm_lookup: PermissionLookup, lookup_dict: SubCategoryDict, object_id: str
) -> ValueType:
"""Look up permission for all."""
# In case of ALL category, lookup_dict IS the schema.
return cast(ValueType, lookup_dict)
def compile_policy(
policy: CategoryType, subcategories: SubCatLookupType, perm_lookup: PermissionLookup
) -> Callable[[str, str], bool]:
"""Compile policy into a function that tests policy.
Subcategories are mapping key -> lookup function, ordered by highest
priority first.
"""
# None, False, empty dict
if not policy:
def apply_policy_deny_all(entity_id: str, key: str) -> bool:
"""Decline all."""
return False
return apply_policy_deny_all
if policy is True:
def apply_policy_allow_all(entity_id: str, key: str) -> bool:
"""Approve all."""
return True
return apply_policy_allow_all
assert isinstance(policy, dict)
funcs: list[Callable[[str, str], bool | None]] = []
for key, lookup_func in subcategories.items():
lookup_value = policy.get(key)
# If any lookup value is `True`, it will always be positive
if isinstance(lookup_value, bool):
return lambda object_id, key: True
if lookup_value is not None:
funcs.append(_gen_dict_test_func(perm_lookup, lookup_func, lookup_value))
if len(funcs) == 1:
func = funcs[0]
@wraps(func)
def apply_policy_func(object_id: str, key: str) -> bool:
"""Apply a single policy function."""
return func(object_id, key) is True
return apply_policy_func
def apply_policy_funcs(object_id: str, key: str) -> bool:
"""Apply several policy functions."""
for func in funcs:
if (result := func(object_id, key)) is not None:
return result
return False
return apply_policy_funcs
def _gen_dict_test_func(
perm_lookup: PermissionLookup, lookup_func: LookupFunc, lookup_dict: SubCategoryDict
) -> Callable[[str, str], bool | None]:
"""Generate a lookup function."""
def test_value(object_id: str, key: str) -> bool | None:
"""Test if permission is allowed based on the keys."""
schema: ValueType = lookup_func(perm_lookup, lookup_dict, object_id)
if schema is None or isinstance(schema, bool):
return schema
assert isinstance(schema, dict)
return schema.get(key)
return test_value
def test_all(policy: CategoryType, key: str) -> bool:
"""Test if a policy has an ALL access for a specific key."""
if not isinstance(policy, dict):
return bool(policy)
all_policy = policy.get(SUBCAT_ALL)
if not isinstance(all_policy, dict):
return bool(all_policy)
return all_policy.get(key, False)
| 29.327434 | 88 | 0.667773 |
df14fa0fd5a66f7386aa37c5e726ce49ea98d4de | 825 | py | Python | blog/api.py | sepehrab1996/flask_project_maktab_53 | dd9421e0505d1ff9100ae65f9fb59613f5c93d3b | [
"BSD-3-Clause"
] | 1 | 2021-08-13T18:48:41.000Z | 2021-08-13T18:48:41.000Z | blog/api.py | parsarmx/flask_project_maktab_53 | f75d571b7bbeee5ecc063828dab6f73bd02f40d8 | [
"BSD-3-Clause"
] | null | null | null | blog/api.py | parsarmx/flask_project_maktab_53 | f75d571b7bbeee5ecc063828dab6f73bd02f40d8 | [
"BSD-3-Clause"
] | 1 | 2021-08-12T22:17:21.000Z | 2021-08-12T22:17:21.000Z | from flask import Blueprint
bp = Blueprint("api", __name__)
@bp.route("/posts_list/")
def list_post():
return "return list of posts with details for use home page"
@bp.route("/post-delete/<int:post_id>/")
def post_delete(post_id):
return f'post {post_id} deleted'
@bp.route("/post-deactive/<int:post_id>/")
def post_deactive(post_id):
return f'post {post_id}deactived'
@bp.route("/categories-list/")
def list_categories():
return 'this page display categories list'
@bp.route("/tags-list/")
def list_tags():
return "this page displays list of tags"
@bp.route("/search/")
def search():
return "this page is for searching"
@bp.route("/user-profile/<int:user_id>")
def user_profile(user_id):
return f'detail of user {user_id}'
@bp.route("/logout/")
def logout():
return "logout"
| 18.75 | 64 | 0.688485 |
c17b8b7389600b2913d017a12e3686e4591bbae3 | 2,045 | py | Python | lib/surface/domains/registrations/authorization_code/get.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/domains/registrations/authorization_code/get.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/domains/registrations/authorization_code/get.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud domains registrations authorization-code get` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.domains import registrations
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.domains import resource_args
from googlecloudsdk.command_lib.domains import util
class GetAuthorizationCode(base.DescribeCommand):
"""Get authorization code of a specific Cloud Domains registration.
Get authorization code of a specific registration.
You can call this API only after 60 days have elapsed since initial
registration.
## EXAMPLES
To get authorization code of ``example.com'', run:
$ {command} example.com
"""
@staticmethod
def Args(parser):
resource_args.AddRegistrationResourceArg(parser,
'to get authorization code for')
def Run(self, args):
"""Run get authorization code command."""
api_version = registrations.GetApiVersionFromArgs(args)
client = registrations.RegistrationsClient(api_version)
args.registration = util.NormalizeResourceName(args.registration)
registration_ref = args.CONCEPTS.registration.Parse()
registration = client.Get(registration_ref)
util.AssertRegistrationOperational(api_version, registration)
return client.RetrieveAuthorizationCode(registration_ref)
| 35.258621 | 77 | 0.763325 |
ea35410b6271560a55e1325a9af1ca3b3cf0e4c7 | 814 | py | Python | pyshare/urls.py | KennyStryker/Pyshare | 5ca7916a83294ec92137026b3b186c5dcd3bcacb | [
"MIT"
] | null | null | null | pyshare/urls.py | KennyStryker/Pyshare | 5ca7916a83294ec92137026b3b186c5dcd3bcacb | [
"MIT"
] | null | null | null | pyshare/urls.py | KennyStryker/Pyshare | 5ca7916a83294ec92137026b3b186c5dcd3bcacb | [
"MIT"
] | null | null | null | """pyshare URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from source import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.login, name='login')
]
| 33.916667 | 77 | 0.706388 |
1e2fb3ad373d2d8888b4dda9b681ae125dfe4db3 | 1,660 | py | Python | surveys/migrations/0006_auto_20190730_1523.py | kumarshivam12/survey_django | 40ae9aa1f479f0ad3dca9a5851e587744834069c | [
"MIT"
] | null | null | null | surveys/migrations/0006_auto_20190730_1523.py | kumarshivam12/survey_django | 40ae9aa1f479f0ad3dca9a5851e587744834069c | [
"MIT"
] | 6 | 2020-03-25T16:58:30.000Z | 2021-06-10T19:55:55.000Z | surveys/migrations/0006_auto_20190730_1523.py | kumarshivam12/survey_django | 40ae9aa1f479f0ad3dca9a5851e587744834069c | [
"MIT"
] | null | null | null | # Generated by Django 2.2.3 on 2019-07-30 15:23
import datetime
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
def add_default_survey(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
Survey = apps.get_model("surveys", "Survey")
Survey.objects.create(slug='default_Survey')
class Migration(migrations.Migration):
dependencies = [
('surveys', '0005_question_slug'),
]
operations = [
migrations.CreateModel(
name='Survey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(max_length=20)),
('start_date', models.DateField(default=datetime.date.today, help_text='First day of the survey')),
('end_date', models.DateField(default=datetime.date.today, help_text='Last day of the survey')),
('is_active', models.BooleanField(default=True)),
],
),
migrations.RunPython(add_default_survey),
migrations.AlterModelManagers(
name='choice',
managers=[
('active_objects', django.db.models.manager.Manager()),
],
),
migrations.AddField(
model_name='question',
name='survey',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='surveys.Survey'),
preserve_default=False,
),
]
| 36.086957 | 115 | 0.624096 |
fc8d8736675bf99fe57ddb9def00daa2bf65a3d8 | 2,254 | py | Python | kivymd_akivymd_sylvia_dynamic/uix/fitimage.py | kengoon/KivyMD-AKivymd-Sylvia-Dynamic | ddd5252bb5e0fd8380076142f8c8301ad3968dac | [
"MIT"
] | 1 | 2021-01-16T02:35:17.000Z | 2021-01-16T02:35:17.000Z | kivymd_akivymd_sylvia_dynamic/uix/fitimage.py | kengoon/KivyMD-AKivymd-Sylvia-Dynamic | ddd5252bb5e0fd8380076142f8c8301ad3968dac | [
"MIT"
] | null | null | null | kivymd_akivymd_sylvia_dynamic/uix/fitimage.py | kengoon/KivyMD-AKivymd-Sylvia-Dynamic | ddd5252bb5e0fd8380076142f8c8301ad3968dac | [
"MIT"
] | null | null | null | """
Fit Image
=========
Feature to automatically crop a `Kivy` image to fit your layout
Write by Benedikt Zwölfer
Referene - https://gist.github.com/benni12er/95a45eb168fc33a4fcd2d545af692dad
Example:
========
BoxLayout:
size_hint_y: None
height: dp(200)
orientation: 'vertical'
FitImage:
size_hint_y: 3
source: 'images/img1.jpg'
FitImage:
size_hint_y: 1
source: 'images/img2.jpg'
"""
from kivy.graphics.context_instructions import Color
from kivy.graphics.vertex_instructions import RoundedRectangle
from kivy.properties import StringProperty, Clock, ListProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.image import Image
from kivy.uix.widget import Widget
class FitImage(BoxLayout):
source = StringProperty()
radius = ListProperty()
def __init__(self, **kwargs):
super().__init__(**kwargs)
Clock.schedule_once(self._late_init)
def _late_init(self, *args):
self.container = Container(self.source, self.radius)
self.add_widget(self.container)
class Container(Widget):
def __init__(self, source, radius, **kwargs):
super().__init__(**kwargs)
self.bind(size=self.adjust_size, pos=self.adjust_size)
self.image = Image(source=source)
self.radius = radius
def adjust_size(self, *args):
(par_x, par_y) = self.parent.size
if par_x == 0 or par_y == 0:
with self.canvas:
self.canvas.clear()
return
par_scale = par_x / par_y
(img_x, img_y) = self.image.texture.size
img_scale = img_x / img_y
if par_scale > img_scale:
(img_x_new, img_y_new) = (img_x, img_x / par_scale)
else:
(img_x_new, img_y_new) = (img_y * par_scale, img_y)
crop_pos_x = (img_x - img_x_new) / 2
crop_pos_y = (img_y - img_y_new) / 2
subtexture = self.image.texture.get_region(
crop_pos_x, crop_pos_y, img_x_new, img_y_new
)
with self.canvas:
self.canvas.clear()
Color(1, 1, 1)
RoundedRectangle(texture=subtexture, pos=self.pos, size=(par_x, par_y), radius=self.radius)
| 26.517647 | 103 | 0.627773 |
5308e189f0303cdef7b4697501c3cd434cdfdbc7 | 24 | py | Python | pyhiveapi/apyhiveapi/api/__init__.py | ms32035/Pyhiveapi | c84389aa8118acd006a4b228e58b6a966e49e7dc | [
"MIT"
] | 10 | 2020-12-03T14:23:56.000Z | 2022-02-01T10:48:42.000Z | pyhiveapi/apyhiveapi/api/__init__.py | ms32035/Pyhiveapi | c84389aa8118acd006a4b228e58b6a966e49e7dc | [
"MIT"
] | 65 | 2020-12-24T02:09:56.000Z | 2022-03-28T20:09:01.000Z | pyhiveapi/apyhiveapi/api/__init__.py | ms32035/Pyhiveapi | c84389aa8118acd006a4b228e58b6a966e49e7dc | [
"MIT"
] | 8 | 2020-10-05T18:55:41.000Z | 2021-03-04T23:45:05.000Z | """__init__.py file."""
| 12 | 23 | 0.583333 |
00878759072a723d610a5843199b12de39967ccd | 2,265 | py | Python | numba_dpcomp/numba_dpcomp/mlir/vectorize.py | nbpatel/mlir-extensions | 1270a2550694a53a0c70fd5b17d518eef133802b | [
"Apache-2.0"
] | null | null | null | numba_dpcomp/numba_dpcomp/mlir/vectorize.py | nbpatel/mlir-extensions | 1270a2550694a53a0c70fd5b17d518eef133802b | [
"Apache-2.0"
] | null | null | null | numba_dpcomp/numba_dpcomp/mlir/vectorize.py | nbpatel/mlir-extensions | 1270a2550694a53a0c70fd5b17d518eef133802b | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from .linalg_builder import eltwise
from .numpy.funcs import register_func
from numba.core.typing.templates import infer_global, CallableTemplate
from numba.core import types
import sys
def vectorize(arg_or_function=(), **kws):
if inspect.isfunction(arg_or_function):
return _gen_vectorize(arg_or_function)
return _gen_vectorize
class _VecFuncTyper(CallableTemplate):
def generic(self):
def typer(a):
if isinstance(a, types.Array):
return a
return typer
def _gen_vectorized_func_name(func, mod):
func_name = f"_{func.__module__}_{func.__qualname__}_vectorized"
for c in ["<", ">", "."]:
func_name = func_name.replace(c, "_")
i = 0
while True:
new_name = func_name if i == 0 else f"{func_name}{i}"
if not hasattr(mod, new_name):
return new_name
i += 1
def _gen_vectorize(func):
num_args = len(inspect.signature(func).parameters)
if num_args == 1:
mod = sys.modules[__name__]
func_name = _gen_vectorized_func_name(func, mod)
exec(f"def {func_name}(arg): pass")
vec_func_inner = eval(func_name)
setattr(mod, func_name, vec_func_inner)
infer_global(vec_func_inner)(_VecFuncTyper)
from ..decorators import mlir_njit
jit_func = mlir_njit(func, inline="always")
@register_func(func_name, vec_func_inner)
def impl(builder, arg):
return eltwise(builder, arg, lambda a, b: jit_func(a))
def vec_func(arg):
return vec_func_inner(arg)
return mlir_njit(vec_func, inline="always")
else:
assert False
| 29.038462 | 74 | 0.680353 |
7c70715ce5b0476654b586e7a26d65750fa5d4dc | 2,564 | py | Python | rpc/transfer.py | ninjadq/Windows-Agent | 3c165622511bae2a0542f070745d295bc14b538d | [
"Apache-2.0"
] | 60 | 2016-09-29T05:30:00.000Z | 2020-11-27T15:23:47.000Z | rpc/transfer.py | laiwei/Windows-Agent | 3c165622511bae2a0542f070745d295bc14b538d | [
"Apache-2.0"
] | 6 | 2016-10-10T05:35:22.000Z | 2019-10-12T05:35:43.000Z | rpc/transfer.py | laiwei/Windows-Agent | 3c165622511bae2a0542f070745d295bc14b538d | [
"Apache-2.0"
] | 27 | 2016-09-23T11:31:04.000Z | 2020-11-03T06:24:29.000Z | import random
import socket
import itertools
import logging
from client import JSONClient
from utils import g
transfer_rpc_clients = {}
class TransferJSONClient(JSONClient):
def __init__(self, addr):
super(TransferJSONClient, self).__init__(addr)
self.addr = addr
def insure_conn(self):
"""
insure tcp connection is alive, if not alive or client
is None create a new tcp connection.
Args:
addr (JSONClient): client which can send rpc request
Returns:
JSONClient with given addr
"""
for _ in range(3):
try:
self.call('Transfer.Ping', None)
except Exception as e:
logging.error(e)
logging.error("lose connection to transfer, prepare to rebuild")
self.socket = socket.create_connection(self.addr)
break
def init_rpc_client(addr_and_port):
"""
init tcp client
"""
addr, port = addr_and_port.split(':')
addr_tuple = (addr, int(port))
logging.info('make tcp connection --> addr: %s port: %s' % (addr, port))
transfer_rpc_clients[addr_and_port] = TransferJSONClient(addr_tuple)
return transfer_rpc_clients[addr_and_port]
def get_transfer_rpc_client(addr):
"""
return transfer rpc client with given address
"""
transfer_rpc_client = transfer_rpc_clients.get(addr)
if transfer_rpc_client is not None:
transfer_rpc_client.insure_conn()
else:
transfer_rpc_client = init_rpc_client(addr)
return transfer_rpc_client
def send_data_to_transfer(data):
"""
send formated data to transfer via rpc, select transfer randomly and every
transfer will retry 3 times if failure
Args:
data (list of dict): [{}, {}, ...]
"""
addrs = g.TRANSFER['addrs']
logging.debug(addrs)
random.shuffle(addrs)
for addr in addrs:
call_success = False
rpc = get_transfer_rpc_client(addr)
for i in range(3):
try:
res = rpc.call('Transfer.Update', data)
except Exception as e:
logging.warn("call (%s) Transfer.update failure, times: %s -> msg: %s" %
(addr, i, e))
continue
call_success = True
return res
if not call_success:
logging.error("send data %s to transfer (%s) failure" %
(data, addr))
| 30.52381 | 89 | 0.583853 |
93a3d7b148262c9865692fc4ab114f82ca7096b5 | 14,056 | py | Python | teekkari.py | liljaj/imneversorry | edc380da32590b7c44408500518358c46310b03f | [
"MIT"
] | null | null | null | teekkari.py | liljaj/imneversorry | edc380da32590b7c44408500518358c46310b03f | [
"MIT"
] | null | null | null | teekkari.py | liljaj/imneversorry | edc380da32590b7c44408500518358c46310b03f | [
"MIT"
] | null | null | null | import requests
import urllib
import random
import re
import db
import time
import datetime
import json
import hashlib
import emoji
from emoji import unicode_codes
class Teekkari:
def __init__(self):
self.commands = {
'vituttaa': self.getVitutus,
'viisaus': self.getViisaus,
'hakemus': self.handleHakemus,
'pekkauotila': self.getVittuilu,
'diagnoosi': self.getDiagnoosi,
'maitonimi': self.getMaitonimi,
'helveten' : self.getHelveten,
'pizza': self.getPizza,
'kalanimi': self.getKalanimi,
'addsikulla': self.banHammer,
'sotanimi': self.getSotanimi,
'sukunimi': self.getSukunimi,
}
self.vituttaaUrl = 'https://fi.wikipedia.org/wiki/Toiminnot:Satunnainen_sivu'
self.urbaaniUrl = 'https://urbaanisanakirja.com/random/'
self.urbaaniWordUrl = 'https://urbaanisanakirja.com/word/'
self.slangopediaUrl = 'http://www.slangopedia.se/slumpa/'
self.uutineUrl = 'https://www.is.fi/api/laneitems/392841/multilist'
self.sukunimiUrl = 'https://fi.wiktionary.org/wiki/Toiminnot:Satunnainen_kohde_luokasta/Luokka:Suomen_kielen_sukunimet'
self.viisaudet = db.readViisaudet()
self.sanat = db.readSanat()
self.diagnoosit = db.readDiagnoosit()
self.maidot = db.readMaidot()
self.nimet = db.readNimet()
self.kalat = db.readKalat()
self.vihanneet = db.readVihanneet()
self.planetoidit = db.readPlanetoidit()
self.kulkuneuvot = db.readKulkuneuvot()
self.linnut = db.readLinnut()
self.sotilasarvot = db.readSotilasarvot()
self.sotilasnimet = db.readSotilasnimet()
self.ennustukset = db.readEnnustukset()
self.nakutukset = db.readNakutukset()
self.lastVitun = {}
self.nextUutine = 0
self.lastUutineUpdate = 0
self.uutineet = [ [], [] ]
self.nextVaihdan = 0
def getCommands(self):
return self.commands
def getVittuilu(self, bot, update, args=''):
if random.randint(0, 4) == 0:
bot.sendMessage(chat_id=update.message.chat_id, text='TÖRKEÄÄ SOLVAAMISTA')
else:
bot.sendMessage(chat_id=update.message.chat_id, text='vittuilu'+random.sample(self.sanat, 1)[0][0])
def handleHakemus(self, bot, update, args=''):
# Shancial, [16.03.20 14:27]
# hakemus nerffiä zyrkin hakemuksiin
# Imneversorry, [16.03.20 14:27]
# hyy-vä
if random.randint(0, 9) == 0 and (update.message.from_user.id != 153013548 or random.randint(0, 3) == 0):
if random.randint(0, 200) == 0:
bot.sendSticker(chat_id=update.message.chat_id, sticker='CAADBAADJgADiR7LDbglwFauETpzFgQ')
else:
bot.sendMessage(chat_id=update.message.chat_id, text='hyy-vä')
else:
if random.randint(0, 1000) == 0:
bot.sendSticker(chat_id=update.message.chat_id, sticker='CAADBAADPwADiR7LDV1aPNns0V1YFgQ')
elif random.randint(0, 600) == 0:
bot.sendMessage(chat_id=update.message.chat_id, text='TAPAN KAIKKI')
else:
bot.sendMessage(chat_id=update.message.chat_id, text='tapan sut')
def getViisaus(self, bot, update, args=''):
bot.sendMessage(chat_id=update.message.chat_id, text=random.sample(self.viisaudet, 1)[0][0])
def getVitutus(self, bot, update, args=''):
r = requests.get(self.vituttaaUrl)
url = urllib.parse.unquote_plus(r.url).split('/')
vitutus = url[len(url)-1].replace('_', ' ') + " vituttaa"
bot.sendMessage(chat_id=update.message.chat_id, text=vitutus)
def getSukunimi(self, bot, update, args=''):
r = requests.get(self.sukunimiUrl)
url = urllib.parse.unquote_plus(r.url).split('/')
vitutus = url[len(url)-1].replace('_', ' ')
bot.sendMessage(chat_id=update.message.chat_id, text=vitutus)
def getDiagnoosi(self, bot, update, args=''):
bot.sendMessage(chat_id=update.message.chat_id, text=random.sample(self.diagnoosit, 1)[0][0])
def getMaitonimi(self, bot, update, args=''):
maitoNimi = random.sample(self.maidot, 1)[0][0] + "-" + random.sample(self.nimet, 1)[0][0]
bot.sendMessage(chat_id=update.message.chat_id, text=maitoNimi)
def getLintunimi(self, bot, update, args=''):
lintu = random.sample(self.linnut, 1)[0][0]
lintu = re.sub(r'nen$', 's', lintu)
lintuNimi = lintu + "-" + random.sample(self.nimet, 1)[0][0]
bot.sendMessage(chat_id=update.message.chat_id, text=lintuNimi)
def getKalanimi(self, bot, update, args=''):
bot.sendMessage(chat_id=update.message.chat_id, text=random.sample(self.kalat, 1)[0][0])
def getMoponimi(self, bot, update, args=''):
kurkku = random.sample(self.vihanneet, 1)[0][0]
mopo = random.sample(self.kulkuneuvot, 1)[0][0]
kuu = random.sample(self.planetoidit, 1)[0][0]
mopoNimi = kurkku + ("", "-")[kurkku[-1:] == mopo[0] and mopo[0] in ('a', 'e', 'i', 'o', 'u', 'y', 'ä', 'ö')] + mopo + " eli " + kuu + ("", "-")[kuu[-1:] == 'e'] + 'eläin ' + kurkku + 'maasta'
bot.sendMessage(chat_id=update.message.chat_id, text=mopoNimi)
def getSotanimi(self, bot, update, args=''):
arvo = random.sample(self.sotilasarvot, 1)[0][0]
nimi = random.sample(self.sotilasnimet, 1)[0][0]
if random.randint(0, 7) == 0:
if update.message.from_user:
if update.message.from_user.last_name:
nimi = update.message.from_user.last_name
elif update.message.from_user.first_name:
nimi = update.message.from_user.first_name
sotaNimi = arvo + ' ' + nimi
bot.sendMessage(chat_id=update.message.chat_id, text=sotaNimi)
def getNakuttaa(self, bot, update, args=''):
if random.randint(0, 100) == 0:
bot.sendMessage(chat_id=update.message.chat_id, text="Mikä vitun Nakuttaja?")
else:
bot.sendMessage(chat_id=update.message.chat_id, text=random.sample(self.nakutukset, 1)[0][0] + " vaa")
def getHalo(self, bot, update, args=''):
bot.sendMessage(chat_id=update.message.chat_id, text=random.choice(['Halo', 'Halo?', 'Halo?!']))
def getPizza(self, bot, update, args=''):
bot.sendMessage(chat_id=update.message.chat_id, text='Ananas kuuluu pizzaan!')
def getNoppa(self, bot, update, args=''):
bot.sendDice(chat_id=update.message.chat_id)
bot.sendDice(chat_id=update.message.chat_id)
def getVaihdan(self, bot, update, args=''):
now = time.time()
if self.nextVaihdan < now:
self.nextVaihdan = now + random.randint(60, 180)
bot.sendDice(chat_id=update.message.chat_id)
def getUrbaani(self):
webpage = urllib.request.urlopen(self.urbaaniUrl).read().decode("utf-8")
title = str(webpage).split('<title>')[1].split('</title>')[0]
sana = title.split(" |")[0]
return sana
def getUrbaaniSelitys(self, word):
webpage = urllib.request.urlopen(self.urbaaniWordUrl + word + '/').read().decode("utf-8")
meaning = str(webpage).split('<meta name="description" content="')[1].split('">')[0]
meaning = meaning[meaning.find('.')+2:]
return meaning
def getSlango(self):
r = requests.get(self.slangopediaUrl)
url = urllib.parse.unquote_plus(r.url, encoding='ISO-8859-1').split('/')
return str(url[-1].split('=')[-1].lower())
def getVitun(self, bot, update, args=''):
now = datetime.datetime.now().date()
userId = update.message.from_user.id
if userId not in self.lastVitun:
self.lastVitun[userId] = now
bot.sendMessage(chat_id=update.message.chat_id, text=self.getUrbaani().capitalize() + " vitun " + self.getUrbaani())
elif self.lastVitun[userId] != now:
self.lastVitun[userId] = now
bot.sendMessage(chat_id=update.message.chat_id, text=self.getUrbaani().capitalize() + " vitun " + self.getUrbaani())
def getVitunSelitys(self, bot, update, args=''):
word = update.message.text[11:].lower().replace(' ', '-').replace('ä', 'a').replace('ö', 'o').replace('å', 'a')
word = re.sub(r"[^a-z0-9\-]", '', word)
bot.sendMessage(chat_id=update.message.chat_id, text=self.getUrbaaniSelitys(word))
def getVaalikone(self, bot, update, args=''):
bot.sendMessage(chat_id=update.message.chat_id, text='Äänestä: ' + str(random.randint(1,424) + 1))
def getHelveten(self, bot, update, args=''):
bot.sendMessage(chat_id=update.message.chat_id,
text=self.getSlango().capitalize() + ' jävla ' + self.getSlango().lower() )
def getTEK(self, bot, update, args=''):
if random.randint(0, 50) == 0:
for word in update.message.text.lower().split(' '):
if re.match(r'.*tek.*', word) and word != 'tek':
bot.sendMessage(chat_id=update.message.chat_id, text='ai ' + word.replace('tek', 'TEK') + ' xD')
return
def getTUNI(self, bot, update, args=''):
if random.randint(0, 5) == 0:
for word in update.message.text.lower().split(' '):
if re.match(r'.*tuni.*', word) and word != 'tuni':
bot.sendMessage(chat_id=update.message.chat_id, text='ai ' + word.replace('tuni', 'TUNI') + ' xD')
return
def getEnnustus(self, bot, update, args=''):
now = datetime.datetime.now()
data = [
update.message.from_user.id,
now.day,
now.month,
now.year
]
seed = hashlib.md5(json.dumps(data, sort_keys=True).encode('utf-8')).hexdigest()
rigged = random.Random(seed)
ennustus = ""
n = rigged.randint(0, 2)
for x in range(n):
r = rigged.choice(tuple(unicode_codes.EMOJI_UNICODE))
ennustus += emoji.emojize(r)
n = rigged.randint(1, 4)
for x in range(n):
ennustus += rigged.sample(self.ennustukset, 1)[0][0]+". "
m = rigged.randint(0, 2)
for x in range(m):
r = rigged.choice(tuple(unicode_codes.EMOJI_UNICODE))
ennustus += emoji.emojize(r)
ennustus = ennustus.replace('?.', '.')
n = rigged.randint(1, 3)
for x in range(n):
r = rigged.choice(tuple(unicode_codes.EMOJI_UNICODE))
ennustus += emoji.emojize(r)
bot.sendMessage(chat_id=update.message.chat_id, text=ennustus)
def getUutine(self, bot, update, args=''):
now = time.time()
if self.lastUutineUpdate + 3600 < now:
self.lastUutineUpdate = now
req = requests.get(self.uutineUrl)
uutineet = req.json()[0]
self.uutineet = [ [], [] ]
for uutine in uutineet:
if 'title' in uutine:
otsikko = uutine['title']
if ' – ' in otsikko:
otsikko = otsikko.split(' – ')
self.uutineet[0].append(otsikko[0])
self.uutineet[1].append(otsikko[1])
if self.nextUutine < now:
self.nextUutine = now + random.randint(10, 120)
uutine = random.choice(self.uutineet[0]) + ' – ' + random.choice(self.uutineet[1])
bot.sendMessage(chat_id=update.message.chat_id, text=uutine)
def banHammer(self, bot, update, args=''):
duration = datetime.datetime.now() + datetime.timedelta(minutes=1)
print(duration)
bot.kickChatMember(update.message.chat.id, update.message.from_user.id, until_date=duration)
def messageHandler(self, bot, update):
msg = update.message
#print(msg)
if msg.text is not None:
if 'vituttaa' in msg.text.lower():
self.getVitutus(bot, update)
elif 'viisaus' in msg.text.lower():
self.getViisaus(bot, update)
elif 'pekkauotila' in msg.text.lower():
self.getVittuilu(bot, update)
elif 'hakemus' in msg.text.lower():
self.handleHakemus(bot, update)
elif 'diagno' in msg.text.lower():
self.getDiagnoosi(bot, update)
elif 'horoskoop' in msg.text.lower():
self.getEnnustus(bot, update)
elif 'uutine' in msg.text.lower():
self.getUutine(bot, update)
elif re.match(r'^halo', msg.text.lower()):
self.getHalo(bot, update)
elif re.match(r'^noppa', msg.text.lower()):
self.getNoppa(bot, update)
elif re.match(r'^vaihdan', msg.text.lower()):
self.getVaihdan(bot, update)
elif re.match(r'^vitun', msg.text.lower()):
self.getVitun(bot, update)
elif re.match(r'^mikä vitun ', msg.text.lower()):
self.getVitunSelitys(bot, update)
elif re.match(r'^helveten', msg.text.lower()):
self.getHelveten(bot, update)
elif re.match(r'^/maitonimi', msg.text.lower()):
self.getMaitonimi(bot, update)
elif re.match(r'^/lintuslanginimi', msg.text.lower()):
self.getLintunimi(bot, update)
elif re.match(r'^/kurkkumoponimi', msg.text.lower()):
self.getMoponimi(bot, update)
elif re.match(r'^/sotanimi', msg.text.lower()):
self.getSotanimi(bot, update)
elif re.match(r'^/sukunimi', msg.text.lower()):
self.getSukunimi(bot, update)
elif re.match(r'.*[tT]ek.*', msg.text):
self.getTEK(bot, update)
elif re.match(r'.*[tT]uni.*', msg.text):
self.getTUNI(bot, update)
elif 'nakuttaa' in msg.text.lower():
self.getNakuttaa(bot, update)
| 45.934641 | 200 | 0.586725 |
2939e7e64567b33083035a8c7bc498a00be92745 | 335 | py | Python | helpers.py | AndreMacedo88/VEnCode-App | 573531531f142c6207bc9156f0faa1c29159f3fa | [
"BSD-3-Clause"
] | null | null | null | helpers.py | AndreMacedo88/VEnCode-App | 573531531f142c6207bc9156f0faa1c29159f3fa | [
"BSD-3-Clause"
] | null | null | null | helpers.py | AndreMacedo88/VEnCode-App | 573531531f142c6207bc9156f0faa1c29159f3fa | [
"BSD-3-Clause"
] | null | null | null | import wx
def question_exit_safely(frame):
question = wx.MessageBox('Are you sure you want to quit?', 'Exit VEnCode',
wx.YES_NO | wx.NO_DEFAULT, frame)
if question == wx.YES:
frame.Close()
def panel_normal_layout(panel):
panel.SetAutoLayout(True)
panel.Layout()
panel.Fit()
| 22.333333 | 78 | 0.620896 |
74e529e160a2c9f003641c047f2a18a428ef64dd | 3,033 | py | Python | analysis.py | xavidram/FrequencyAnalyzerPhidget | bc387947719738b46287c79b44498a3efe21a83a | [
"Unlicense"
] | 1 | 2019-02-12T19:43:43.000Z | 2019-02-12T19:43:43.000Z | analysis.py | xavidram/FrequencyAnalyzerPhidget | bc387947719738b46287c79b44498a3efe21a83a | [
"Unlicense"
] | null | null | null | analysis.py | xavidram/FrequencyAnalyzerPhidget | bc387947719738b46287c79b44498a3efe21a83a | [
"Unlicense"
] | null | null | null | ####
# Author: Xavid Ramirez
# Email: xavidram@hotmail.com
# Alt Email: xavid.ramirez01@utrgv.edu
# Script Summary: This script will sample values
# from a vibration sensors at a
# desired sampling fequency.
# License: MIT => https://tldrlegal.com/license/mit-license
####
import u3
import threading
import os
import platform
import signal
import time
#initialize the Labjack U3 object
d = u3.U3()
#Global Variables
Frequency = 0
TimeCount = 0
Runtime = 0
# Function definitions:
def cleanUp():
"""
Function: cleanUp
Params: NONE
Libraries: os
Desc: Detect which system is running the
script and run the os's clear command
to clear the screan.
"""
if 'Windows' in platform.system():
os.system('cls')
elif 'Linux' in platform.system():
os.system('clear')
elif 'OSX' in platform.system():
os.system('clear')
def AnalyzeThreaded():
"""
Threading version of the calibrate function, works on normal
desktops not on microcontrollers
"""
threading.Timer(Frequency, pAIN0).start()
print "%.6f" % (d.readRegister(0))
with open(FileName , 'a') as textfile:
textfile.write(str(d.readRegister(0)) + '\n' )
def Analyze():
"""
Function: Calibrate
params: None
Libraries: csv, u3
Desc: Open the given file, and keep appending
to it every read register value from the
labjack analog register AIN0 (0). After
a value is read, the function sleeps for
the given sampling frequency so that only
samples at the desired sampling frequency
are logged.
"""
Timestart = time.time() #obtain current time
Timestamps = [] #initialize timestamps array
Values = [] #initialize value array
Duration = int(Runtime) #convert runtime to int
while (time.time() - Timestart) < Duration: #run while current timedifference less than program duration
Timestamps.append((time.time() - Timestart)) #grab current time
Values.append(d.readRegister(0)) #grab value (.0001 delay)
time.sleep(Frequency) #sleep for desired sampling frequency time
#write out all the contents after the data is captured to reduce delay time
with open(FileName, 'a') as textfile:
j = 0
while j < len(Timestamps):
textfile.write(str(Timestamps[j]) + '\t\t' + str(Values[j]) + '\n')
j += 1
#Program Start:
cleanUp()
print " ---Vibration Sensor Calibration---\n "
FileName = raw_input("What would you like to call the save file generated by this trial? ex: file.txt : ")
H = raw_input("What frequency would you like to capture values at? (hz): ")
Runtime = raw_input("how long would you like the trials to last?(seconds) : ")
Frequency = 1 / float(H) #get the sampling frequency by dividing the value one by the given Frequency (H)
print "Sampling rate set to: %s" % (Frequency)
print "REMEMBER: In order to stop script, use key combinations ctrl + c or script will run until stopped!"
print "Please double check that your sensor is connected on the proper channels: | Analog VS, Analog Ground, AIN0 | \n"
print "Starting.....\n"
time.sleep(2)
cleanUp()
Analyze() | 31.926316 | 121 | 0.70755 |
b95d46ac68646796a18f937c9afd91618dfd7969 | 9,718 | py | Python | ct/py/json_summary_combiner.py | FeliciaXmL/skia-buildbot | 446c1910507adcdad6ac52bee623400d158df363 | [
"BSD-3-Clause"
] | null | null | null | ct/py/json_summary_combiner.py | FeliciaXmL/skia-buildbot | 446c1910507adcdad6ac52bee623400d158df363 | [
"BSD-3-Clause"
] | 21 | 2022-02-13T21:21:58.000Z | 2022-03-02T10:01:09.000Z | ct/py/json_summary_combiner.py | FeliciaXmL/skia-buildbot | 446c1910507adcdad6ac52bee623400d158df363 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module that combines JSON summaries and outputs the summaries in HTML."""
import glob
import json
import optparse
import os
import posixpath
import sys
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import json_summary_constants
# Add the django settings file to DJANGO_SETTINGS_MODULE.
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'csv-django-settings'
django.setup()
from django.template import loader
STORAGE_HTTP_BASE = 'http://storage.cloud.google.com'
# Template variables used in the django templates defined in django-settings.
# If the values of these constants change then the django templates need to
# change as well.
WORKER_NAME_TO_INFO_ITEMS_TEMPLATE_VAR = 'worker_name_to_info_items'
ABSOLUTE_URL_TEMPLATE_VAR = 'absolute_url'
WORKER_INFO_TEMPLATE_VAR = 'worker_info'
FILE_INFO_TEMPLATE_VAR = 'file_info'
RENDER_PICTURES_ARGS_TEMPLATE_VAR = 'render_pictures_args'
NOPATCH_GPU_TEMPLATE_VAR = 'nopatch_gpu'
WITHPATCH_GPU_TEMPLATE_VAR = 'withpatch_gpu'
TOTAL_FAILING_FILES_TEMPLATE_VAR = 'failing_files_count'
GS_FILES_LOCATION_NO_PATCH_TEMPLATE_VAR = 'gs_http_files_location_nopatch'
GS_FILES_LOCATION_WITH_PATCH_TEMPLATE_VAR = 'gs_http_files_location_withpatch'
GS_FILES_LOCATION_DIFFS_TEMPLATE_VAR = 'gs_http_files_location_diffs'
GS_FILES_LOCATION_WHITE_DIFFS_TEMPLATE_VAR = 'gs_http_files_location_whitediffs'
class FileInfo(object):
"""Container class that holds all file data."""
def __init__(self, file_name, skp_location, num_pixels_differing,
percent_pixels_differing,
max_diff_per_channel, perceptual_diff):
self.file_name = file_name
self.diff_file_name = _GetDiffFileName(self.file_name)
self.skp_location = skp_location
self.num_pixels_differing = num_pixels_differing
self.percent_pixels_differing = percent_pixels_differing
self.max_diff_per_channel = max_diff_per_channel
self.perceptual_diff = perceptual_diff
def _GetDiffFileName(file_name):
file_name_no_ext, ext = os.path.splitext(file_name)
ext = ext.lstrip('.')
return '%s_nopatch_%s-vs-%s_withpatch_%s.%s' % (
file_name_no_ext, ext, file_name_no_ext, ext, ext)
class WorkerInfo(object):
"""Container class that holds all worker data."""
def __init__(self, worker_name, failed_files, skps_location,
files_location_nopatch, files_location_withpatch,
files_location_diffs, files_location_whitediffs):
self.worker_name = worker_name
self.failed_files = failed_files
self.files_location_nopatch = files_location_nopatch
self.files_location_withpatch = files_location_withpatch
self.files_location_diffs = files_location_diffs
self.files_location_whitediffs = files_location_whitediffs
self.skps_location = skps_location
def CombineJsonSummaries(json_summaries_dir):
"""Combines JSON summaries and returns the summaries in HTML."""
worker_name_to_info = {}
for json_summary in glob.glob(os.path.join(json_summaries_dir, '*.json')):
with open(json_summary) as f:
data = json.load(f)
# There must be only one top level key and it must be the worker name.
assert len(data.keys()) == 1
worker_name = data.keys()[0]
worker_data = data[worker_name]
file_info_list = []
for failed_file in worker_data[json_summary_constants.JSONKEY_FAILED_FILES]:
failed_file_name = failed_file[json_summary_constants.JSONKEY_FILE_NAME]
skp_location = posixpath.join(
STORAGE_HTTP_BASE,
failed_file[
json_summary_constants.JSONKEY_SKP_LOCATION].lstrip('gs://'))
num_pixels_differing = failed_file[
json_summary_constants.JSONKEY_NUM_PIXELS_DIFFERING]
percent_pixels_differing = failed_file[
json_summary_constants.JSONKEY_PERCENT_PIXELS_DIFFERING]
max_diff_per_channel = failed_file[
json_summary_constants.JSONKEY_MAX_DIFF_PER_CHANNEL]
perceptual_diff = failed_file[
json_summary_constants.JSONKEY_PERCEPTUAL_DIFF]
file_info = FileInfo(
file_name=failed_file_name,
skp_location=skp_location,
num_pixels_differing=num_pixels_differing,
percent_pixels_differing=percent_pixels_differing,
max_diff_per_channel=max_diff_per_channel,
perceptual_diff=perceptual_diff)
file_info_list.append(file_info)
worker_info = WorkerInfo(
worker_name=worker_name,
failed_files=file_info_list,
skps_location=worker_data[json_summary_constants.JSONKEY_SKPS_LOCATION],
files_location_nopatch=worker_data[
json_summary_constants.JSONKEY_FILES_LOCATION_NOPATCH],
files_location_withpatch=worker_data[
json_summary_constants.JSONKEY_FILES_LOCATION_WITHPATCH],
files_location_diffs=worker_data[
json_summary_constants.JSONKEY_FILES_LOCATION_DIFFS],
files_location_whitediffs=worker_data[
json_summary_constants.JSONKEY_FILES_LOCATION_WHITE_DIFFS])
worker_name_to_info[worker_name] = worker_info
return worker_name_to_info
def OutputToHTML(worker_name_to_info, output_html_dir, absolute_url,
render_pictures_args, nopatch_gpu, withpatch_gpu):
"""Outputs a worker name to WorkerInfo dict into HTML.
Creates a top level HTML file that lists worker names to the number of failing
files. Also creates X number of HTML files that lists all the failing files
and displays the nopatch and withpatch images. X here corresponds to the
number of workers that have failing files.
"""
# Get total failing file count.
total_failing_files = 0
for worker_info in worker_name_to_info.values():
total_failing_files += len(worker_info.failed_files)
worker_name_to_info_items = sorted(
worker_name_to_info.items(), key=lambda tuple: tuple[0])
rendered = loader.render_to_string(
'workers_totals.html',
{WORKER_NAME_TO_INFO_ITEMS_TEMPLATE_VAR: worker_name_to_info_items,
ABSOLUTE_URL_TEMPLATE_VAR: absolute_url,
RENDER_PICTURES_ARGS_TEMPLATE_VAR: render_pictures_args,
NOPATCH_GPU_TEMPLATE_VAR: nopatch_gpu,
WITHPATCH_GPU_TEMPLATE_VAR: withpatch_gpu,
TOTAL_FAILING_FILES_TEMPLATE_VAR: total_failing_files}
)
with open(os.path.join(output_html_dir, 'index.html'), 'wb') as index_html:
index_html.write(rendered)
rendered = loader.render_to_string(
'list_of_all_files.html',
{WORKER_NAME_TO_INFO_ITEMS_TEMPLATE_VAR: worker_name_to_info_items,
ABSOLUTE_URL_TEMPLATE_VAR: absolute_url}
)
with open(os.path.join(output_html_dir,
'list_of_all_files.html'), 'wb') as files_html:
files_html.write(rendered)
for worker_info in worker_name_to_info.values():
for file_info in worker_info.failed_files:
rendered = loader.render_to_string(
'single_file_details.html',
{FILE_INFO_TEMPLATE_VAR: file_info,
ABSOLUTE_URL_TEMPLATE_VAR: absolute_url,
GS_FILES_LOCATION_NO_PATCH_TEMPLATE_VAR: posixpath.join(
STORAGE_HTTP_BASE,
worker_info.files_location_nopatch.lstrip('gs://')),
GS_FILES_LOCATION_WITH_PATCH_TEMPLATE_VAR: posixpath.join(
STORAGE_HTTP_BASE,
worker_info.files_location_withpatch.lstrip('gs://')),
GS_FILES_LOCATION_DIFFS_TEMPLATE_VAR: posixpath.join(
STORAGE_HTTP_BASE,
worker_info.files_location_diffs.lstrip('gs://')),
GS_FILES_LOCATION_WHITE_DIFFS_TEMPLATE_VAR: posixpath.join(
STORAGE_HTTP_BASE,
worker_info.files_location_whitediffs.lstrip('gs://'))}
)
with open(os.path.join(output_html_dir, '%s.html' % file_info.file_name),
'wb') as per_file_html:
per_file_html.write(rendered)
if '__main__' == __name__:
option_parser = optparse.OptionParser()
option_parser.add_option(
'', '--json_summaries_dir',
help='Location of JSON summary files from all GCE workers.')
option_parser.add_option(
'', '--output_html_dir',
help='The absolute path of the HTML dir that will contain the results of'
' this script.')
option_parser.add_option(
'', '--absolute_url',
help='Servers like Google Storage require an absolute url for links '
'within the HTML output files.',
default='')
option_parser.add_option(
'', '--render_pictures_args',
help='The arguments specified by the user to the render_pictures binary.')
option_parser.add_option(
'', '--nopatch_gpu',
help='Specifies whether the nopatch render_pictures run was done with '
'GPU.')
option_parser.add_option(
'', '--withpatch_gpu',
help='Specifies whether the withpatch render_pictures run was done with '
'GPU.')
options, unused_args = option_parser.parse_args()
if (not options.json_summaries_dir or not options.output_html_dir
or not options.render_pictures_args or not options.nopatch_gpu
or not options.withpatch_gpu):
option_parser.error(
'Must specify json_summaries_dir, output_html_dir, '
'render_pictures_args, nopatch_gpu and withpatch_gpu')
OutputToHTML(
worker_name_to_info=CombineJsonSummaries(options.json_summaries_dir),
output_html_dir=options.output_html_dir,
absolute_url=options.absolute_url,
render_pictures_args=options.render_pictures_args,
nopatch_gpu=options.nopatch_gpu,
withpatch_gpu=options.withpatch_gpu)
| 41.529915 | 80 | 0.746244 |
e60b7bc33f755ceeef3bf3ac06e323a9eeeb6766 | 651 | py | Python | python/DL_for_HTT/common/model_inputs/PuppiMET_with_METcov_j1j2jr_Npu_no_mT.py | lucastorterotot/DL_for_HTT_mass | 2aff7741b5f497114dd826f9b167f66f2cdaa329 | [
"MIT"
] | 1 | 2021-09-22T09:45:49.000Z | 2021-09-22T09:45:49.000Z | python/DL_for_HTT/common/model_inputs/PuppiMET_with_METcov_j1j2jr_Npu_no_mT.py | dzuolo/DL_for_HTT_mass | 79d56f3fa5b44642c9c64ffdadf5d87325ad032f | [
"MIT"
] | null | null | null | python/DL_for_HTT/common/model_inputs/PuppiMET_with_METcov_j1j2jr_Npu_no_mT.py | dzuolo/DL_for_HTT_mass | 79d56f3fa5b44642c9c64ffdadf5d87325ad032f | [
"MIT"
] | 1 | 2021-06-17T07:46:29.000Z | 2021-06-17T07:46:29.000Z | inputs = [
"tau1_pt_reco",
"tau1_eta_reco",
"tau1_phi_reco",
"tau2_pt_reco",
"tau2_eta_reco",
"tau2_phi_reco",
"jet1_pt_reco",
"jet1_eta_reco",
"jet1_phi_reco",
"jet2_pt_reco",
"jet2_eta_reco",
"jet2_phi_reco",
"remaining_jets_pt_reco",
"remaining_jets_eta_reco",
"remaining_jets_phi_reco",
"remaining_jets_N_reco",
"PuppiMET_pt_reco",
"PuppiMET_phi_reco",
"MET_covXX_reco",
"MET_covXY_reco",
"MET_covYY_reco",
# "MET_significance_reco",
# "PuppimT1_reco",
# "PuppimT2_reco",
# "PuppimTtt_reco",
# "PuppimTtot_reco",
"PU_npvsGood_reco",
]
| 21.7 | 30 | 0.635945 |
29565a091c70b332f38d2ba9d4d41c89f39a3c8d | 632 | py | Python | examples/__init__.py | sapshah-cisco/cobra | e2b5a75495931844180b05d776c15829e63f0dab | [
"Apache-2.0"
] | 93 | 2015-02-11T01:41:22.000Z | 2022-02-03T22:55:57.000Z | examples/__init__.py | sapshah-cisco/cobra | e2b5a75495931844180b05d776c15829e63f0dab | [
"Apache-2.0"
] | 112 | 2015-02-23T22:20:29.000Z | 2022-03-22T21:46:52.000Z | examples/__init__.py | sapshah-cisco/cobra | e2b5a75495931844180b05d776c15829e63f0dab | [
"Apache-2.0"
] | 61 | 2015-02-22T01:34:01.000Z | 2022-01-19T09:50:21.000Z | # Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Examples for the ACI Python SDK (cobra)."""
| 39.5 | 74 | 0.751582 |
f970152cd506829e3db32b4522e086cf58ae60a6 | 1,144 | py | Python | AnalyzeImage.py | emilia-smolko/serverless-ai | e7cce73cd34e66917efbc5d5f993c26deafa093a | [
"MIT"
] | null | null | null | AnalyzeImage.py | emilia-smolko/serverless-ai | e7cce73cd34e66917efbc5d5f993c26deafa093a | [
"MIT"
] | null | null | null | AnalyzeImage.py | emilia-smolko/serverless-ai | e7cce73cd34e66917efbc5d5f993c26deafa093a | [
"MIT"
] | null | null | null | import json
import boto3
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
rekognition = boto3.client('rekognition')
s3 = boto3.client('s3')
def lambda_handler(event, context):
logger.info('Starting Analyzing Image for event: %s', event)
bucket = event['Records'][0]['s3']['bucket']['name']
file = event['Records'][0]['s3']['object']['key']
description = "";
# Invoke Rekognition service
celebrities = rekognition.recognize_celebrities(Image={ 'S3Object': { 'Bucket': bucket, 'Name': file } })["CelebrityFaces"]
if len(celebrities) == 0:
description = "I can't see anybody famous on the picture!"
if len(celebrities) > 0:
names = []
for celebrity in celebrities:
names.append(celebrity["Name"])
description = "I can see " + ', '.join(names) + " on the picture!";
logger.info('Description: %s', description);
#Saving description to S3
txtName = 'tmp/' + file[:-4] + ".txt"
s3.put_object(Body=description, Bucket=bucket, Key=txtName)
return {
'statusCode': 200,
'body': json.dumps(description)
} | 28.6 | 127 | 0.631993 |
8775fb1a59d61480629e91ce96942e3e87a15b17 | 12,059 | py | Python | rclpy/rclpy/qos.py | j-rivero/rclpy | 30eeae9b30ab15209798d62b501d1dec633e1db3 | [
"Apache-2.0"
] | null | null | null | rclpy/rclpy/qos.py | j-rivero/rclpy | 30eeae9b30ab15209798d62b501d1dec633e1db3 | [
"Apache-2.0"
] | null | null | null | rclpy/rclpy/qos.py | j-rivero/rclpy | 30eeae9b30ab15209798d62b501d1dec633e1db3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016-2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from enum import IntEnum
from rclpy.duration import Duration
from rclpy.impl.implementation_singleton import rclpy_action_implementation as _rclpy_action
from rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy
class InvalidQoSProfileException(Exception):
"""Raised when concstructing a QoSProfile with invalid arguments."""
def __init__(self, *args):
Exception.__init__(self, 'Invalid QoSProfile', *args)
class QoSProfile:
"""Define Quality of Service policies."""
# default QoS profile not exposed to the user to encourage them to think about QoS settings
__qos_profile_default_dict = _rclpy.rclpy_get_rmw_qos_profile('qos_profile_default')
__slots__ = [
'_history',
'_depth',
'_reliability',
'_durability',
'_lifespan',
'_deadline',
'_liveliness',
'_liveliness_lease_duration',
'_avoid_ros_namespace_conventions',
]
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %r' % kwargs.keys()
if 'history' not in kwargs:
if 'depth' not in kwargs:
raise InvalidQoSProfileException('History and/or depth settings are required.')
kwargs['history'] = QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_LAST
self.history = kwargs.get('history')
if (
QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_LAST == self.history and
'depth' not in kwargs
):
raise InvalidQoSProfileException('History set to KEEP_LAST without a depth setting.')
self.depth = kwargs.get('depth', QoSProfile.__qos_profile_default_dict['depth'])
self.reliability = kwargs.get(
'reliability', QoSProfile.__qos_profile_default_dict['reliability'])
self.durability = kwargs.get(
'durability', QoSProfile.__qos_profile_default_dict['durability'])
self.lifespan = kwargs.get('lifespan', QoSProfile.__qos_profile_default_dict['lifespan'])
self.deadline = kwargs.get('deadline', QoSProfile.__qos_profile_default_dict['deadline'])
self.liveliness = kwargs.get(
'liveliness', QoSProfile.__qos_profile_default_dict['liveliness'])
self.liveliness_lease_duration = kwargs.get(
'liveliness_lease_duration',
QoSProfile.__qos_profile_default_dict['liveliness_lease_duration'])
self.avoid_ros_namespace_conventions = kwargs.get(
'avoid_ros_namespace_conventions',
QoSProfile.__qos_profile_default_dict['avoid_ros_namespace_conventions'])
@property
def history(self):
"""
Get field 'history'.
:returns: history attribute
:rtype: QoSHistoryPolicy
"""
return self._history
@history.setter
def history(self, value):
assert isinstance(value, QoSHistoryPolicy) or isinstance(value, int)
self._history = QoSHistoryPolicy(value)
@property
def reliability(self):
"""
Get field 'reliability'.
:returns: reliability attribute
:rtype: QoSReliabilityPolicy
"""
return self._reliability
@reliability.setter
def reliability(self, value):
assert isinstance(value, QoSReliabilityPolicy) or isinstance(value, int)
self._reliability = QoSReliabilityPolicy(value)
@property
def durability(self):
"""
Get field 'durability'.
:returns: durability attribute
:rtype: QoSDurabilityPolicy
"""
return self._durability
@durability.setter
def durability(self, value):
assert isinstance(value, QoSDurabilityPolicy) or isinstance(value, int)
self._durability = QoSDurabilityPolicy(value)
@property
def depth(self):
"""
Get field 'depth'.
:returns: depth attribute
:rtype: int
"""
return self._depth
@depth.setter
def depth(self, value):
assert isinstance(value, int)
self._depth = value
@property
def lifespan(self):
"""
Get field 'lifespan'.
:returns: lifespan attribute
:rtype: Duration
"""
return self._lifespan
@lifespan.setter
def lifespan(self, value):
assert isinstance(value, Duration)
self._lifespan = value
@property
def deadline(self):
"""
Get field 'deadline'.
:returns: deadline attribute.
:rtype: Duration
"""
return self._deadline
@deadline.setter
def deadline(self, value):
assert isinstance(value, Duration)
self._deadline = value
@property
def liveliness(self):
"""
Get field 'liveliness'.
:returns: liveliness attribute
:rtype: QoSLivelinessPolicy
"""
return self._liveliness
@liveliness.setter
def liveliness(self, value):
assert isinstance(value, (QoSLivelinessPolicy, int))
self._liveliness = QoSLivelinessPolicy(value)
@property
def liveliness_lease_duration(self):
"""
Get field 'liveliness_lease_duration'.
:returns: liveliness_lease_duration attribute.
:rtype: Duration
"""
return self._liveliness_lease_duration
@liveliness_lease_duration.setter
def liveliness_lease_duration(self, value):
assert isinstance(value, Duration)
self._liveliness_lease_duration = value
@property
def avoid_ros_namespace_conventions(self):
"""
Get field 'avoid_ros_namespace_conventions'.
:returns: avoid_ros_namespace_conventions attribute
:rtype: bool
"""
return self._avoid_ros_namespace_conventions
@avoid_ros_namespace_conventions.setter
def avoid_ros_namespace_conventions(self, value):
assert isinstance(value, bool)
self._avoid_ros_namespace_conventions = value
def get_c_qos_profile(self):
return _rclpy.rclpy_convert_from_py_qos_policy(
self.history,
self.depth,
self.reliability,
self.durability,
self.lifespan.get_c_duration(),
self.deadline.get_c_duration(),
self.liveliness,
self.liveliness_lease_duration.get_c_duration(),
self.avoid_ros_namespace_conventions,
)
def __eq__(self, other):
if not isinstance(other, QoSProfile):
return False
return all(
self.__getattribute__(slot) == other.__getattribute__(slot)
for slot in self.__slots__)
class QoSPolicyEnum(IntEnum):
"""
Base for QoS Policy enumerations.
Provides helper function to filter keys for utilities.
"""
@classmethod
def short_keys(cls):
"""Return a list of shortened typing-friendly enum values."""
return [k.lower() for k in cls.__members__.keys() if not k.startswith('RMW')]
@classmethod
def get_from_short_key(cls, name):
"""Retrieve a policy type from a short name, case-insensitive."""
return cls[name.upper()].value
@property
def short_key(self):
for k, v in self.__class__.__members__.items():
if k.startswith('RMW'):
continue
if self.value == v:
return k.lower()
raise AttributeError(
'failed to find value %s in %s' %
(self.value, self.__class__.__name__))
class HistoryPolicy(QoSPolicyEnum):
"""
Enum for QoS History settings.
This enum matches the one defined in rmw/types.h
"""
RMW_QOS_POLICY_HISTORY_SYSTEM_DEFAULT = 0
SYSTEM_DEFAULT = RMW_QOS_POLICY_HISTORY_SYSTEM_DEFAULT
RMW_QOS_POLICY_HISTORY_KEEP_LAST = 1
KEEP_LAST = RMW_QOS_POLICY_HISTORY_KEEP_LAST
RMW_QOS_POLICY_HISTORY_KEEP_ALL = 2
KEEP_ALL = RMW_QOS_POLICY_HISTORY_KEEP_ALL
# Alias with the old name, for retrocompatibility
QoSHistoryPolicy = HistoryPolicy
class ReliabilityPolicy(QoSPolicyEnum):
"""
Enum for QoS Reliability settings.
This enum matches the one defined in rmw/types.h
"""
RMW_QOS_POLICY_RELIABILITY_SYSTEM_DEFAULT = 0
SYSTEM_DEFAULT = RMW_QOS_POLICY_RELIABILITY_SYSTEM_DEFAULT
RMW_QOS_POLICY_RELIABILITY_RELIABLE = 1
RELIABLE = RMW_QOS_POLICY_RELIABILITY_RELIABLE
RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT = 2
BEST_EFFORT = RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT
# Alias with the old name, for retrocompatibility
QoSReliabilityPolicy = ReliabilityPolicy
class DurabilityPolicy(QoSPolicyEnum):
"""
Enum for QoS Durability settings.
This enum matches the one defined in rmw/types.h
"""
RMW_QOS_POLICY_DURABILITY_SYSTEM_DEFAULT = 0
SYSTEM_DEFAULT = RMW_QOS_POLICY_DURABILITY_SYSTEM_DEFAULT
RMW_QOS_POLICY_DURABILITY_TRANSIENT_LOCAL = 1
TRANSIENT_LOCAL = RMW_QOS_POLICY_DURABILITY_TRANSIENT_LOCAL
RMW_QOS_POLICY_DURABILITY_VOLATILE = 2
VOLATILE = RMW_QOS_POLICY_DURABILITY_VOLATILE
# Alias with the old name, for retrocompatibility
QoSDurabilityPolicy = DurabilityPolicy
class LivelinessPolicy(QoSPolicyEnum):
"""
Enum for QoS Liveliness settings.
This enum matches the one defined in rmw/types.h
"""
RMW_QOS_POLICY_LIVELINESS_SYSTEM_DEFAULT = 0
SYSTEM_DEFAULT = RMW_QOS_POLICY_LIVELINESS_SYSTEM_DEFAULT
RMW_QOS_POLICY_LIVELINESS_AUTOMATIC = 1
AUTOMATIC = RMW_QOS_POLICY_LIVELINESS_AUTOMATIC
RMW_QOS_POLICY_LIVELINESS_MANUAL_BY_NODE = 2
MANUAL_BY_NODE = RMW_QOS_POLICY_LIVELINESS_MANUAL_BY_NODE
RMW_QOS_POLICY_LIVELINESS_MANUAL_BY_TOPIC = 3
MANUAL_BY_TOPIC = RMW_QOS_POLICY_LIVELINESS_MANUAL_BY_TOPIC
# Alias with the old name, for retrocompatibility
QoSLivelinessPolicy = LivelinessPolicy
qos_profile_system_default = QoSProfile(**_rclpy.rclpy_get_rmw_qos_profile(
'qos_profile_system_default'))
qos_profile_sensor_data = QoSProfile(**_rclpy.rclpy_get_rmw_qos_profile(
'qos_profile_sensor_data'))
qos_profile_services_default = QoSProfile(**_rclpy.rclpy_get_rmw_qos_profile(
'qos_profile_services_default'))
qos_profile_parameters = QoSProfile(**_rclpy.rclpy_get_rmw_qos_profile(
'qos_profile_parameters'))
qos_profile_parameter_events = QoSProfile(**_rclpy.rclpy_get_rmw_qos_profile(
'qos_profile_parameter_events'))
qos_profile_action_status_default = QoSProfile(
**_rclpy_action.rclpy_action_get_rmw_qos_profile('rcl_action_qos_profile_status_default'))
class QoSPresetProfiles(Enum):
SYSTEM_DEFAULT = qos_profile_system_default
SENSOR_DATA = qos_profile_sensor_data
SERVICES_DEFAULT = qos_profile_services_default
PARAMETERS = qos_profile_parameters
PARAMETER_EVENTS = qos_profile_parameter_events
ACTION_STATUS_DEFAULT = qos_profile_action_status_default
"""Noted that the following are duplicated from QoSPolicyEnum.
Our supported version of Python3 (3.5) doesn't have a fix that allows mixins on Enum.
"""
@classmethod
def short_keys(cls):
"""Return a list of shortened typing-friendly enum values."""
return [k.lower() for k in cls.__members__.keys() if not k.startswith('RMW')]
@classmethod
def get_from_short_key(cls, name):
"""Retrieve a policy type from a short name, case-insensitive."""
return cls[name.upper()].value
| 31.902116 | 97 | 0.696161 |
08d6629c78adb4a596f7c7a9cd3ca0fca6d141d1 | 3,189 | py | Python | python/paddle/tests/dist_hapi_mnist_dynamic.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 11 | 2016-08-29T07:43:26.000Z | 2016-08-29T07:51:24.000Z | python/paddle/tests/dist_hapi_mnist_dynamic.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | null | null | null | python/paddle/tests/dist_hapi_mnist_dynamic.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 1 | 2021-12-09T08:59:17.000Z | 2021-12-09T08:59:17.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
import contextlib
import paddle
import paddle.fluid as fluid
from paddle import Model, set_device
from paddle.static import InputSpec as Input
from paddle.nn.layer.loss import CrossEntropyLoss
from paddle.metric import Accuracy
from paddle.vision.models import LeNet
from paddle.vision.datasets import MNIST
class MnistDataset(MNIST):
def __init__(self, mode, return_label=True):
super(MnistDataset, self).__init__(mode=mode)
self.return_label = return_label
def __getitem__(self, idx):
img = np.reshape(self.images[idx], [1, 28, 28])
if self.return_label:
return img, np.array(self.labels[idx]).astype('int64')
return img,
def __len__(self):
return len(self.images)
def compute_accuracy(pred, gt):
pred = np.argmax(pred, -1)
gt = np.array(gt)
correct = pred[:, np.newaxis] == gt
return np.sum(correct) / correct.shape[0]
@unittest.skipIf(not fluid.is_compiled_with_cuda(),
'CPU testing is not supported')
class TestDistTraning(unittest.TestCase):
def test_dynamic_multiple_gpus(self):
device = set_device('gpu')
im_shape = (-1, 1, 28, 28)
batch_size = 128
inputs = [Input(im_shape, 'float32', 'image')]
labels = [Input([None, 1], 'int64', 'label')]
model = Model(LeNet(), inputs, labels)
optim = fluid.optimizer.Momentum(learning_rate=0.001,
momentum=.9,
parameter_list=model.parameters())
model.prepare(optim, CrossEntropyLoss(), Accuracy())
train_dataset = MnistDataset(mode='train')
val_dataset = MnistDataset(mode='test')
test_dataset = MnistDataset(mode='test', return_label=False)
cbk = paddle.callbacks.ProgBarLogger(50)
model.fit(train_dataset,
val_dataset,
epochs=2,
batch_size=batch_size,
callbacks=cbk)
eval_result = model.evaluate(val_dataset, batch_size=batch_size)
output = model.predict(test_dataset,
batch_size=batch_size,
stack_outputs=True)
np.testing.assert_equal(output[0].shape[0], len(test_dataset))
acc = compute_accuracy(output[0], val_dataset.labels)
np.testing.assert_allclose(acc, eval_result['acc'])
if __name__ == '__main__':
unittest.main()
| 30.663462 | 75 | 0.655378 |
22e647a7e0dc59cf1ac33d91f7b48dc6f5fb9127 | 1,724 | py | Python | frontend/animation/util.py | eldstal/CTF | 29ae870a362257c132ee729befbed86473cfb21d | [
"MIT"
] | null | null | null | frontend/animation/util.py | eldstal/CTF | 29ae870a362257c132ee729befbed86473cfb21d | [
"MIT"
] | 2 | 2021-02-07T21:31:09.000Z | 2021-02-18T10:33:01.000Z | frontend/animation/util.py | eldstal/CTF | 29ae870a362257c132ee729befbed86473cfb21d | [
"MIT"
] | null | null | null | from asciimatics.effects import Effect
from asciimatics.screen import Screen
RAINBOW_256 = [ 160, 196, 202, 208, 214, 220, 226,
192, 191, 190,120, 119, 118, 82, 46,
49, 51, 45, 39, 33, 27, 21, 19, 55,
56, 57, 128, 129, 165, 52, 88 ]
RAINBOW_8 = [
Screen.COLOUR_RED,
Screen.COLOUR_YELLOW,
Screen.COLOUR_GREEN,
Screen.COLOUR_CYAN,
Screen.COLOUR_BLUE,
Screen.COLOUR_MAGENTA,
]
# The cool characters from codepage 437
NOISE_DOS = (
u"┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌" +
u"αßΓπΣσµτΦΘΩδ∞φε" +
u"☺☻♥♦♣♠•◘○◙♂♀♪♫☼►◄↕‼¶§▬↨↑↓→←∟↔▲▼" +
u"∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■"
)
class ScreenShot(Effect):
"""
Copies screen content on construction, displays it in a single frame on playback.
"""
def __init__(self, screen, **kwargs):
"""
:param screen: The Screen being used for the Scene.
"""
super(ScreenShot, self).__init__(screen, **kwargs)
# get_from doesn't support unicode, so this isn't perfect.
# The proper solution is probably to have a Scene that we print to in the first place...?
self._data = [ [ screen.get_from(x,y) for x in range(screen.width) ] for y in range(screen.height) ]
def reset(self):
pass
@property
def stop_frame(self):
return 1
def _update(self, frame_no):
for y in range(self._screen.height):
for x in range(self._screen.width):
txt,fg,attr,bg = self._data[y][x]
self._screen.print_at(chr(txt), x, y, fg, attr, bg, transparent=False)
| 31.925926 | 108 | 0.526102 |
5393c76593bab6c8b4d0589e66085ef812a754da | 20,494 | py | Python | cyberbattle/agents/baseline/agent_dql.py | br0kej/CyberBattleSim | 4663616c5348f6ea87e788f53b0a0da75fe83c60 | [
"MIT"
] | 1 | 2021-04-16T06:31:58.000Z | 2021-04-16T06:31:58.000Z | cyberbattle/agents/baseline/agent_dql.py | br0kej/CyberBattleSim | 4663616c5348f6ea87e788f53b0a0da75fe83c60 | [
"MIT"
] | null | null | null | cyberbattle/agents/baseline/agent_dql.py | br0kej/CyberBattleSim | 4663616c5348f6ea87e788f53b0a0da75fe83c60 | [
"MIT"
] | 1 | 2021-11-11T14:04:01.000Z | 2021-11-11T14:04:01.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Function DeepQLearnerPolicy.optimize_model:
# Copyright (c) 2017, Pytorch contributors
# All rights reserved.
# https://github.com/pytorch/tutorials/blob/master/LICENSE
"""Deep Q-learning agent applied to chain network (notebook)
This notebooks can be run directly from VSCode, to generate a
traditional Jupyter Notebook to open in your browser
you can run the VSCode command `Export Currenty Python File As Jupyter Notebook`.
Requirements:
Nvidia CUDA drivers for WSL2: https://docs.nvidia.com/cuda/wsl-user-guide/index.html
PyTorch
"""
# pylint: disable=invalid-name
# %% [markdown]
# # Chain network CyberBattle Gym played by a Deeo Q-learning agent
# %%
from numpy import ndarray
from cyberbattle._env import cyberbattle_env
import numpy as np
from typing import List, NamedTuple, Optional, Tuple, Union
import random
# deep learning packages
from torch import Tensor
import torch.nn.functional as F
import torch.optim as optim
import torch.nn as nn
import torch
import torch.cuda
from .learner import Learner
from .agent_wrapper import EnvironmentBounds
import cyberbattle.agents.baseline.agent_wrapper as w
from .agent_randomcredlookup import CredentialCacheExploiter
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class CyberBattleStateActionModel:
""" Define an abstraction of the state and action space
for a CyberBattle environment, to be used to train a Q-function.
"""
def __init__(self, ep: EnvironmentBounds):
self.ep = ep
self.global_features = w.ConcatFeatures(ep, [
# w.Feature_discovered_node_count(ep),
# w.Feature_owned_node_count(ep),
w.Feature_discovered_notowned_node_count(ep, None)
# w.Feature_discovered_ports(ep),
# w.Feature_discovered_ports_counts(ep),
# w.Feature_discovered_ports_sliding(ep),
# w.Feature_discovered_credential_count(ep),
# w.Feature_discovered_nodeproperties_sliding(ep),
])
self.node_specific_features = w.ConcatFeatures(ep, [
# w.Feature_actions_tried_at_node(ep),
w.Feature_success_actions_at_node(ep),
w.Feature_failed_actions_at_node(ep),
w.Feature_active_node_properties(ep),
w.Feature_active_node_age(ep)
# w.Feature_active_node_id(ep)
])
self.state_space = w.ConcatFeatures(ep, self.global_features.feature_selection +
self.node_specific_features.feature_selection)
self.action_space = w.AbstractAction(ep)
def get_state_astensor(self, state: w.StateAugmentation):
state_vector = self.state_space.get(state, node=None)
state_vector_float = np.array(state_vector, dtype=np.float32)
state_tensor = torch.from_numpy(state_vector_float).unsqueeze(0)
return state_tensor
def implement_action(
self,
wrapped_env: w.AgentWrapper,
actor_features: ndarray,
abstract_action: np.int32) -> Tuple[str, Optional[cyberbattle_env.Action], Optional[int]]:
"""Specialize an abstract model action into a CyberBattle gym action.
actor_features -- the desired features of the actor to use (source CyberBattle node)
abstract_action -- the desired type of attack (connect, local, remote).
Returns a gym environment implementing the desired attack at a node with the desired embedding.
"""
observation = wrapped_env.state.observation
# Pick source node at random (owned and with the desired feature encoding)
potential_source_nodes = [
from_node
for from_node in w.owned_nodes(observation)
if np.all(actor_features == self.node_specific_features.get(wrapped_env.state, from_node))
]
if len(potential_source_nodes) > 0:
source_node = np.random.choice(potential_source_nodes)
gym_action = self.action_space.specialize_to_gymaction(
source_node, observation, np.int32(abstract_action))
if not gym_action:
return "exploit[undefined]->explore", None, None
elif wrapped_env.env.is_action_valid(gym_action, observation['action_mask']):
return "exploit", gym_action, source_node
else:
return "exploit[invalid]->explore", None, None
else:
return "exploit[no_actor]->explore", None, None
# %%
# Deep Q-learning
class Transition(NamedTuple):
"""One taken transition and its outcome"""
state: Union[Tuple[Tensor], List[Tensor]]
action: Union[Tuple[Tensor], List[Tensor]]
next_state: Union[Tuple[Tensor], List[Tensor]]
reward: Union[Tuple[Tensor], List[Tensor]]
class ReplayMemory(object):
"""Transition replay memory"""
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class DQN(nn.Module):
"""The Deep Neural Network used to estimate the Q function"""
def __init__(self, ep: EnvironmentBounds):
super(DQN, self).__init__()
model = CyberBattleStateActionModel(ep)
linear_input_size = len(model.state_space.dim_sizes)
output_size = model.action_space.flat_size()
self.hidden_layer1 = nn.Linear(linear_input_size, 1024)
# self.bn1 = nn.BatchNorm1d(256)
self.hidden_layer2 = nn.Linear(1024, 512)
self.hidden_layer3 = nn.Linear(512, 128)
# self.hidden_layer4 = nn.Linear(128, 64)
self.head = nn.Linear(128, output_size)
# Called with either one element to determine next action, or a batch
# during optimization. Returns tensor([[left0exp,right0exp]...]).
def forward(self, x):
x = F.relu(self.hidden_layer1(x))
# x = F.dropout(x, p=0.5, training=self.training)
x = F.relu(self.hidden_layer2(x))
# x = F.dropout(x, p=0.5, training=self.training)
x = F.relu(self.hidden_layer3(x))
# x = F.relu(self.hidden_layer4(x))
return self.head(x.view(x.size(0), -1))
def random_argmax(array):
"""Just like `argmax` but if there are multiple elements with the max
return a random index to break ties instead of returning the first one."""
max_value = np.max(array)
max_index = np.where(array == max_value)[0]
if max_index.shape[0] > 1:
max_index = int(np.random.choice(max_index, size=1))
else:
max_index = int(max_index)
return max_value, max_index
class ChosenActionMetadata(NamedTuple):
"""Additonal info about the action chosen by the DQN-induced policy"""
abstract_action: np.int32
actor_node: int
actor_features: ndarray
actor_state: ndarray
def __repr__(self) -> str:
return f"[abstract_action={self.abstract_action}, actor={self.actor_node}, state={self.actor_state}]"
class DeepQLearnerPolicy(Learner):
"""Deep Q-Learning on CyberBattle environments
Parameters
==========
ep -- global parameters of the environment
model -- define a state and action abstraction for the gym environment
gamma -- Q discount factor
replay_memory_size -- size of the replay memory
batch_size -- Deep Q-learning batch
target_update -- Deep Q-learning replay frequency (in number of episodes)
learning_rate -- the learning rate
Parameters from DeepDoubleQ paper
- learning_rate = 0.00025
- linear epsilon decay
- gamma = 0.99
Pytorch code from tutorial at
https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html
"""
def __init__(self,
ep: EnvironmentBounds,
gamma: float,
replay_memory_size: int,
target_update: int,
batch_size: int,
learning_rate: float
):
self.stateaction_model = CyberBattleStateActionModel(ep)
self.batch_size = batch_size
self.gamma = gamma
self.learning_rate = learning_rate
self.policy_net = DQN(ep).to(device)
self.target_net = DQN(ep).to(device)
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
self.target_update = target_update
self.optimizer = optim.RMSprop(self.policy_net.parameters(), lr=learning_rate)
self.memory = ReplayMemory(replay_memory_size)
self.credcache_policy = CredentialCacheExploiter()
def parameters_as_string(self):
return f'γ={self.gamma}, lr={self.learning_rate}, replaymemory={self.memory.capacity},\n' \
f'batch={self.batch_size}, target_update={self.target_update}'
def all_parameters_as_string(self) -> str:
model = self.stateaction_model
return f'{self.parameters_as_string()}\n' \
f'dimension={model.state_space.flat_size()}x{model.action_space.flat_size()}, ' \
f'Q={[f.name() for f in model.state_space.feature_selection]} ' \
f"-> 'abstract_action'"
def optimize_model(self, norm_clipping=False):
if len(self.memory) < self.batch_size:
return
transitions = self.memory.sample(self.batch_size)
# converts batch-array of Transitions to Transition of batch-arrays.
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
# (a final state would've been the one after which simulation ended)
non_final_mask = torch.tensor(tuple(map((lambda s: s is not None), batch.next_state)),
device=device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken. These are the actions which would've been taken
# for each batch state according to policy_net
# print(f'state_batch={state_batch.shape} input={len(self.stateaction_model.state_space.dim_sizes)}')
output = self.policy_net(state_batch)
# print(f'output={output.shape} batch.action={transitions[0].action.shape} action_batch={action_batch.shape}')
state_action_values = output.gather(1, action_batch)
# Compute V(s_{t+1}) for all next states.
# Expected values of actions for non_final_next_states are computed based
# on the "older" target_net; selecting their best reward with max(1)[0].
# This is merged based on the mask, such that we'll have either the expected
# state value or 0 in case the state was final.
next_state_values = torch.zeros(self.batch_size, device=device)
next_state_values[non_final_mask] = self.target_net(non_final_next_states).max(1)[0].detach()
# Compute the expected Q values
expected_state_action_values = (next_state_values * self.gamma) + reward_batch
# Compute Huber loss
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the model
self.optimizer.zero_grad()
loss.backward()
# Gradient clipping
if norm_clipping:
torch.nn.utils.clip_grad_norm_(self.policy_net.parameters(), 1.0)
else:
for param in self.policy_net.parameters():
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
def get_actor_state_vector(self, global_state: ndarray, actor_features: ndarray) -> ndarray:
return np.concatenate((np.array(global_state, dtype=np.float32),
np.array(actor_features, dtype=np.float32)))
def update_q_function(self,
reward: float,
actor_state: ndarray,
abstract_action: np.int32,
next_actor_state: Optional[ndarray]):
# store the transition in memory
reward_tensor = torch.tensor([reward], device=device, dtype=torch.float)
action_tensor = torch.tensor([[np.long(abstract_action)]], device=device, dtype=torch.long)
current_state_tensor = torch.as_tensor(actor_state, dtype=torch.float, device=device).unsqueeze(0)
if next_actor_state is None:
next_state_tensor = None
else:
next_state_tensor = torch.as_tensor(next_actor_state, dtype=torch.float, device=device).unsqueeze(0)
self.memory.push(current_state_tensor, action_tensor, next_state_tensor, reward_tensor)
# optimize the target network
self.optimize_model()
def on_step(self, wrapped_env: w.AgentWrapper,
observation, reward: float, done: bool, info, action_metadata):
agent_state = wrapped_env.state
if done:
self.update_q_function(reward,
actor_state=action_metadata.actor_state,
abstract_action=action_metadata.abstract_action,
next_actor_state=None)
else:
next_global_state = self.stateaction_model.global_features.get(agent_state, node=None)
next_actor_features = self.stateaction_model.node_specific_features.get(
agent_state, action_metadata.actor_node)
next_actor_state = self.get_actor_state_vector(next_global_state, next_actor_features)
self.update_q_function(reward,
actor_state=action_metadata.actor_state,
abstract_action=action_metadata.abstract_action,
next_actor_state=next_actor_state)
def end_of_episode(self, i_episode, t):
# Update the target network, copying all weights and biases in DQN
if i_episode % self.target_update == 0:
self.target_net.load_state_dict(self.policy_net.state_dict())
def lookup_dqn(self, states_to_consider: List[ndarray]) -> Tuple[List[np.int32], List[np.int32]]:
""" Given a set of possible current states return:
- index, in the provided list, of the state that would yield the best possible outcome
- the best action to take in such a state"""
with torch.no_grad():
# t.max(1) will return largest column value of each row.
# second column on max result is index of where max element was
# found, so we pick action with the larger expected reward.
# action: np.int32 = self.policy_net(states_to_consider).max(1)[1].view(1, 1).item()
state_batch = torch.tensor(states_to_consider).to(device)
dnn_output = self.policy_net(state_batch).max(1)
action_lookups = dnn_output[1].tolist()
expectedq_lookups = dnn_output[0].tolist()
return action_lookups, expectedq_lookups
def metadata_from_gymaction(self, wrapped_env, gym_action):
current_global_state = self.stateaction_model.global_features.get(wrapped_env.state, node=None)
actor_node = cyberbattle_env.sourcenode_of_action(gym_action)
actor_features = self.stateaction_model.node_specific_features.get(wrapped_env.state, actor_node)
abstract_action = self.stateaction_model.action_space.abstract_from_gymaction(gym_action)
return ChosenActionMetadata(
abstract_action=abstract_action,
actor_node=actor_node,
actor_features=actor_features,
actor_state=self.get_actor_state_vector(current_global_state, actor_features))
def explore(self, wrapped_env: w.AgentWrapper
) -> Tuple[str, cyberbattle_env.Action, object]:
"""Random exploration that avoids repeating actions previously taken in the same state"""
# sample local and remote actions only (excludes connect action)
gym_action = wrapped_env.env.sample_valid_action(kinds=[0, 1, 2])
metadata = self.metadata_from_gymaction(wrapped_env, gym_action)
return "explore", gym_action, metadata
def try_exploit_at_candidate_actor_states(
self,
wrapped_env,
current_global_state,
actor_features,
abstract_action):
actor_state = self.get_actor_state_vector(current_global_state, actor_features)
action_style, gym_action, actor_node = self.stateaction_model.implement_action(
wrapped_env, actor_features, abstract_action)
if gym_action:
assert actor_node is not None, 'actor_node should be set together with gym_action'
return action_style, gym_action, ChosenActionMetadata(
abstract_action=abstract_action,
actor_node=actor_node,
actor_features=actor_features,
actor_state=actor_state)
else:
# learn the failed exploit attempt in the current state
self.update_q_function(reward=0.0,
actor_state=actor_state,
next_actor_state=actor_state,
abstract_action=abstract_action)
return "exploit[undefined]->explore", None, None
def exploit(self,
wrapped_env,
observation
) -> Tuple[str, Optional[cyberbattle_env.Action], object]:
# first, attempt to exploit the credential cache
# using the crecache_policy
# action_style, gym_action, _ = self.credcache_policy.exploit(wrapped_env, observation)
# if gym_action:
# return action_style, gym_action, self.metadata_from_gymaction(wrapped_env, gym_action)
# Otherwise on exploit learnt Q-function
current_global_state = self.stateaction_model.global_features.get(wrapped_env.state, node=None)
# Gather the features of all the current active actors (i.e. owned nodes)
active_actors_features: List[ndarray] = [
self.stateaction_model.node_specific_features.get(wrapped_env.state, from_node)
for from_node in w.owned_nodes(observation)
]
unique_active_actors_features: List[ndarray] = np.unique(active_actors_features, axis=0)
# array of actor state vector for every possible set of node features
candidate_actor_state_vector: List[ndarray] = [
self.get_actor_state_vector(current_global_state, node_features)
for node_features in unique_active_actors_features]
remaining_action_lookups, remaining_expectedq_lookups = self.lookup_dqn(candidate_actor_state_vector)
remaining_candidate_indices = list(range(len(candidate_actor_state_vector)))
while remaining_candidate_indices:
_, remaining_candidate_index = random_argmax(remaining_expectedq_lookups)
actor_index = remaining_candidate_indices[remaining_candidate_index]
abstract_action = remaining_action_lookups[remaining_candidate_index]
actor_features = unique_active_actors_features[actor_index]
action_style, gym_action, metadata = self.try_exploit_at_candidate_actor_states(
wrapped_env,
current_global_state,
actor_features,
abstract_action)
if gym_action:
return action_style, gym_action, metadata
remaining_candidate_indices.pop(remaining_candidate_index)
remaining_expectedq_lookups.pop(remaining_candidate_index)
remaining_action_lookups.pop(remaining_candidate_index)
return "exploit[undefined]->explore", None, None
def stateaction_as_string(self, action_metadata) -> str:
return ''
| 41.82449 | 118 | 0.664682 |
4402cb1bf4dd13a5fc7c305cb6f3f23483acebbd | 6,427 | py | Python | Archive/new_working_set.py | dtn067/cms-git-set | df7a810225e717a20324556ab48e566f8a59be63 | [
"MIT"
] | null | null | null | Archive/new_working_set.py | dtn067/cms-git-set | df7a810225e717a20324556ab48e566f8a59be63 | [
"MIT"
] | null | null | null | Archive/new_working_set.py | dtn067/cms-git-set | df7a810225e717a20324556ab48e566f8a59be63 | [
"MIT"
] | 2 | 2019-04-08T16:27:15.000Z | 2019-04-08T16:29:47.000Z | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from __future__ import print_function
import datetime
from functools import reduce
import os
import pandas as pd
import numpy as np
#get_ipython().run_line_magic('matplotlib', 'tk')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# In[2]:
# Turning off interactive mode so that this script can be run in the
# background
plt.ioff()
# Data collected from a spark query at CERN, in pandas pickle format
# CRAB jobs only have data after Oct. 2017
ws = pd.read_pickle("data/working_set_day.pkl.gz")
# ws is a pandas.core.frame.DataFrame
# ws.day outputs a pandas.core.series.Series
# ws.day.values outputs an ndarray of the raw values
# spark returns lists, we want to use sets
ws['working_set_blocks'] = ws.apply(lambda x: set(x.working_set_blocks), 'columns')
ws['working_set'] = ws.apply(lambda x: set(x.working_set), 'columns')
# In[3]:
# DBS BLOCKS table schema:
# BLOCK_ID NOT NULL NUMBER(38)
# BLOCK_NAME NOT NULL VARCHAR2(500)
# DATASET_ID NOT NULL NUMBER(38)
# OPEN_FOR_WRITING NOT NULL NUMBER(38)
# ORIGIN_SITE_NAME NOT NULL VARCHAR2(100)
# BLOCK_SIZE NUMBER(38)
# FILE_COUNT NUMBER(38)
# CREATION_DATE NUMBER(38)
# CREATE_BY VARCHAR2(500)
# LAST_MODIFICATION_DATE NUMBER(38)
# LAST_MODIFIED_BY VARCHAR2(500)
if not os.path.exists('data/block_size.npy'):
blocksize = pd.read_csv("data/dbs_blocks.csv.gz", dtype='i8', usecols=(0,5), names=['block_id', 'block_size'])
np.save('data/block_size.npy', blocksize.values)
blocksize = blocksize.values
else:
blocksize = np.load('data/block_size.npy')
# We'll be accessing randomly, make a dictionary
blocksize = {v[0]:v[1] for v in blocksize}
# In[4]:
# join the data tier definitions
datatiers = pd.read_csv('data/dbs_datatiers.csv').set_index('id')
ws['data_tier'] = datatiers.loc[ws.d_data_tier_id].data_tier.values
# In[5]:
date_index = np.arange(np.min(ws.day.values//86400), np.max(ws.day.values//86400)+1)
date_index_ts = np.array(list(datetime.date.fromtimestamp(day*86400) for day in date_index))
# In[6]:
ws_filtered = ws[(ws.crab_job==True) & (ws.data_tier.str.contains('MINIAOD'))]
blocks_day = []
for i, day in enumerate(date_index):
today = (ws_filtered.day==day*86400)
blocks_day.append(reduce(lambda a,b: a.union(b), ws_filtered[today].working_set_blocks, set()))
print("Done assembling blocklists")
nrecords = np.zeros_like(date_index)
lifetimes = {
'1d' : 1,
'1w': 7,
'1m': 30,
'3m': 90,
'6m': 120,
}
ws_size = {k: np.zeros_like(date_index) for k in lifetimes}
nrecalls = {k: np.zeros_like(date_index) for k in lifetimes}
recall_size = {k: np.zeros_like(date_index) for k in lifetimes}
previous = {k: set() for k in lifetimes}
for i, day in enumerate(date_index):
nrecords[i] = ws_filtered[(ws_filtered.day==day*86400)].size
for key in lifetimes:
current = reduce(lambda a,b: a.union(b), blocks_day[max(0,i-lifetimes[key]):i+1], set())
recall = current - previous[key]
nrecalls[key][i] = len(recall)
ws_size[key][i] = sum(blocksize[bid] for bid in current)
recall_size[key][i] = sum(blocksize[bid] for bid in recall)
previous[key] = current
if i%30==0:
print("Day ", i)
print("Done")
# In[7]:
fig, ax = plt.subplots(1,1)
ax.plot(date_index_ts, recall_size['1w']/1e15, label='1 week')
ax.plot(date_index_ts, recall_size['1m']/1e15, label='1 month')
ax.plot(date_index_ts, recall_size['3m']/1e15, label='3 months')
ax.legend(title='Block lifetime')
ax.set_title('Simulated block recalls for CRAB users')
ax.set_ylabel('Recall rate [PB/day]')
ax.set_xlabel('Date')
ax.set_ylim(0, None)
ax.set_xlim(datetime.date(2017,10,1), None)
# Saving the plot as a figure
plt.savefig("Figure1.png")
# In[8]:
fig, ax = plt.subplots(1,1)
ax.plot(date_index_ts, ws_size['1w']/1e15, label='1 week')
ax.plot(date_index_ts, ws_size['1m']/1e15, label='1 month')
ax.plot(date_index_ts, ws_size['3m']/1e15, label='3 months')
ax.legend(title='Block lifetime')
ax.set_title('Working set for CRAB users, MINIAOD*')
ax.set_ylabel('Working set size [PB]')
ax.set_xlabel('Date')
ax.set_ylim(0, None)
ax.set_xlim(datetime.date(2017,10,1), None)
# Saving the plot as a figure
plt.savefig("Figure2.png")
# In[9]:
recall_size['3m'].mean()/1e12
# In[10]:
# Merging all block_day lists into one set
print("Merging daily block lists into one block set")
block_list = []
for i in range(0,len(blocks_day)):
block_list += blocks_day[i]
block_set = set(block_list)
print("Block Set Created")
# Creating a list to keep track of the number of times a setBlock
# (a unique instance of a block) appears in a day
block_occurrence = np.zeros_like(list(block_set))
for i, day in enumerate(date_index):
for setBlock in block_list:
if blocks_day[i] == setBlock:
block_occurrence[i] += 1
if i%30==0:
print("Day ", i)
# In[11]:
fig, ax = plt.subplots(1,1)
ax.plot(list(block_set), block_occurrence,label='Occurrences')
ax.legend(title='Block Occurrences')
ax.set_title('Simulated block recalls for CRAB users')
ax.set_ylabel('Block Occurrences')
ax.set_xlabel('Block')
ax.set_ylim(0, None)
ax.set_xlim(0, None)
# Saving the plot as a figure
plt.savefig("Figure3.png")
"""
print("New loop initiated.")
# A list of all blocks (nrecalls['1d'] is the total number of blocks)
block_list = list(np.empty([sum(nrecalls['1d']),1]))
print("Type of block_list[0]")
print(type(block_list[0]))
print("Length of block_list")
print(len(block_list))
#print("Print of block_list")
#print(block_list)
for i, day in enumerate(date_index):
nrecords[i] = ws_filtered[(ws_filtered.day==day*86400)].size
# This line of gives a list of blocks of length, lifetimes[key] that is
# incremented along by the day (counted by i).
current = reduce(lambda a,b: a.union(b),
blocks_day[max(0,i-lifetimes[key]):i+1],
set())
recall = current - previous[key]
# Appending all blocks to block_list
for j in range(len(previous),len(previous)+len(current)):
if j is 1:
block_list[j] = recall
previous[key] = current
if i%30==0:
print("Day ", i)
block_set = set(block_list)
for i, day in enumerate(date_index):
current = reduce(lambda a,b: a.union(b),
blocks_day[max(0,i-lifetimes[key]):i+1],
set())
"""
print("Done")
| 27.943478 | 114 | 0.690369 |
8f2008c035c11d5297daa69e9c3dc46fcf3760d7 | 4,729 | py | Python | src/main.py | LiamTyler/ClimbingWallLEDController | 99e65ab44e9bd84ac3dbe76ba4a3d66f24ba3c9e | [
"MIT"
] | null | null | null | src/main.py | LiamTyler/ClimbingWallLEDController | 99e65ab44e9bd84ac3dbe76ba4a3d66f24ba3c9e | [
"MIT"
] | null | null | null | src/main.py | LiamTyler/ClimbingWallLEDController | 99e65ab44e9bd84ac3dbe76ba4a3d66f24ba3c9e | [
"MIT"
] | null | null | null | from gui_route_viewer import *
from gui_main_menu import *
from gui_route_creator import *
from gui_route_details_form import *
from routes import *
from routeStore import *
from lights import *
STENCIL_FONT = QFont( QFont( 'Arial', 16 ) )
STENCIL_FONT.setBold( True )
WINDOW_SIZE = (600, 800)
class RouteListItem( QListWidgetItem ):
def __init__( self, route ):
super().__init__()
self.route = route
showString = route.name + ": V" + str( route.difficulty ) + "\n "
if route.style != RouteStyle.NONE:
showString += "Style: " + RouteStyleToString( route.style )
showString += " Rating: " + str( route.rating ) + "/5"
self.setText( showString )
self.setFont( STENCIL_FONT )
class MainWindow( QMainWindow ):
def __init__( self ):
super( MainWindow, self ).__init__()
self.resize( WINDOW_SIZE[0], WINDOW_SIZE[1] )
self.setStyleSheet( "background-color: lightGray;" )
self._centralWidget = QWidget( self )
self.setCentralWidget( self._centralWidget )
self._verticalLayout = QVBoxLayout()
self._verticalLayout.setSpacing( 0 )
self._verticalLayout.setContentsMargins( QMargins( 0, 0, 0, 0 ) )
self._centralWidget.setLayout( self._verticalLayout )
self._centralWidget.setSizePolicy( QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Fixed )
self.MainMenu()
self.show()
def _clearLayout( self, layout ):
if layout is not None:
for i in reversed( range( layout.count() ) ):
item = layout.itemAt( i )
widget = item.widget()
if widget:
widget.deleteLater()
else:
self._clearLayout( item.layout() )
def DeleteRoute( self, route ):
g_routeStore.DeleteRoute( route )
self.MainMenu()
def ViewRoutePage( self, dispRoute ):
self._clearLayout( self._verticalLayout )
LED_DisplayRoute( dispRoute.route )
self._verticalLayout.setSpacing( 0 )
routeViewer = RouteViewer( dispRoute.route, self )
self._verticalLayout.addWidget( routeViewer )
def CreateRoutePage( self ):
self._clearLayout( self._verticalLayout )
self._verticalLayout.addWidget( CreateRouteView( self ) )
def MainMenu( self ):
self.setWindowTitle( "Home Wall App" )
self._clearLayout( self._verticalLayout )
topBarWidget = QWidget()
#topBarWidget.setFixedHeight( 40 )
topBarWidget.setStyleSheet( "background-color: #292929" )
topBarHBox = QHBoxLayout()
topBarWidget.setLayout( topBarHBox )
filterButton = QPushButton()
filterButton.setStyleSheet( "background-color: #FFFFFF" )
filterButton.setIcon( QIcon( "../icons/menu.svg" ) )
#filterButton.clicked.connect( )
topBarHBox.setAlignment( Qt.AlignRight )
topBarHBox.addWidget( filterButton )
vlist = QListWidget( self )
routes = g_routeStore.GetAllRoutes()
for route in routes:
vlist.addItem( RouteListItem( route ) )
scrollBar = QScrollBar()
vlist.setVerticalScrollBar( scrollBar )
vlist.itemClicked.connect( self.ViewRoutePage )
addRouteButton = QPushButton( self )
addRouteButton.setText( "Add Route" )
addRouteButton.setStyleSheet( "background-color : white" )
addRouteButton.clicked.connect( self.CreateRoutePage )
self._verticalLayout.addWidget( topBarWidget )
self._verticalLayout.addWidget( vlist )
self._verticalLayout.addWidget( addRouteButton )
# For ease of test route creation
def Hold_S( str ):
return Hold( int( str[1:] ) - 1, ord( str[0] ) - 65, HoldStatus.START )
def Hold_R( str ):
return Hold( int( str[1:] ) - 1, ord( str[0] ) - 65, HoldStatus.REGULAR )
def Hold_F( str ):
return Hold( int( str[1:] ) - 1, ord( str[0] ) - 65, HoldStatus.FINISH )
if __name__ == '__main__':
LED_InitializeController()
g_routeStore = RouteStore()
route1 = Route( "Route 1", 5, 4, RouteStyle.SLOPEY )
route1.holds = [ Hold_S( "A1" ), Hold_S( "B3" ), Hold_R( "C6" ), Hold_R( "D7" ), Hold_F( "E9" ) ]
route2 = Route( "Route 2", 3, 5, RouteStyle.CRIMPY )
route2.holds = [ Hold_S( "A3" ), Hold_S( "B4" ), Hold_R( "B8" ), Hold_F( "C9" ) ]
route3 = Route( "Route 3", 4, 2, RouteStyle.JUGGY )
route3.holds = [ Hold_S( "E3" ), Hold_S( "D3" ), Hold_R( "D6" ), Hold_R( "C6" ), Hold_F( "B9" ) ]
g_routeStore.AddRoute( route1 )
g_routeStore.AddRoute( route2 )
g_routeStore.AddRoute( route3 )
app = QApplication( [] )
window = MainWindow()
app.exec_()
| 38.447154 | 101 | 0.629943 |
1581bc8cf5d63fce3c72d7b0369c6eb36fe92b5c | 452 | py | Python | src/quote/views.py | RonquilloAeon/fastapi-migrations | 524de15635cd92ed129a9a7d9d66cabf211a491c | [
"MIT"
] | null | null | null | src/quote/views.py | RonquilloAeon/fastapi-migrations | 524de15635cd92ed129a9a7d9d66cabf211a491c | [
"MIT"
] | null | null | null | src/quote/views.py | RonquilloAeon/fastapi-migrations | 524de15635cd92ed129a9a7d9d66cabf211a491c | [
"MIT"
] | 1 | 2020-10-03T22:13:54.000Z | 2020-10-03T22:13:54.000Z | from databases import Database
from fastapi import APIRouter, Depends
from src.dependencies import get_db
from src.quote.models import Quote
from src.models import ListResponse
router = APIRouter()
@router.get("", response_model=ListResponse[Quote])
async def list_quotes(db: Database = Depends(get_db)):
results = await db.fetch_all(
"SELECT id, category, source, content FROM quote ORDER BY id"
)
return {"results": results}
| 25.111111 | 69 | 0.747788 |
fdf924a4c7898605ce5157ecbb0907e333b0287b | 3,497 | py | Python | src/Yowsup/ConnectionIO/connectionengine.py | awesomebytes/yowsup | 5eead0d56e9871fbc9de695c8ec34b1f4df7f086 | [
"MIT"
] | 7 | 2015-05-26T09:28:52.000Z | 2021-05-03T06:50:37.000Z | src/Yowsup/ConnectionIO/connectionengine.py | awesomebytes/yowsup | 5eead0d56e9871fbc9de695c8ec34b1f4df7f086 | [
"MIT"
] | null | null | null | src/Yowsup/ConnectionIO/connectionengine.py | awesomebytes/yowsup | 5eead0d56e9871fbc9de695c8ec34b1f4df7f086 | [
"MIT"
] | 9 | 2015-01-02T00:01:01.000Z | 2017-11-11T23:14:33.000Z | '''
Copyright (c) <2012> Tarek Galal <tare2.galal@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import socket;
import sys
from .bintreenode import BinTreeNodeReader, BinTreeNodeWriter
from Yowsup.Common.debugger import Debugger
from .ioexceptions import ConnectionClosedException
class ConnectionEngine(socket.socket):
def __init__(self):
Debugger.attach(self)
self.reader = BinTreeNodeReader(self)
self.writer = BinTreeNodeWriter(self)
self.readSize = 1;
self.buf = [];
self.maxBufRead = 0;
self.connected = 0
self.jid = ""
super(ConnectionEngine,self).__init__(socket.AF_INET, socket.SOCK_STREAM);
def getId(self):
return self.id
def setId(self, idx):
self.id = idx
def flush(self):
'''FLUSH'''
self.write();
def getBuffer(self):
return self.buffer;
def reset(self):
self.buffer = "";
def write(self,data):
if type(data) is int:
try:
self.sendall(chr(data)) if sys.version_info < (3, 0) else self.sendall(chr(data).encode('iso-8859-1'))
except:
self._d("socket 1 write crashed, reason: %s" % sys.exc_info()[1])
raise ConnectionClosedException("socket 1 write crashed, reason: %s" % sys.exc_info()[1])
else:
tmp = "";
for d in data:
tmp += chr(d)
try:
self.sendall(tmp) if sys.version_info < (3, 0) else self.sendall(tmp.encode('iso-8859-1'))
except:
self._d("socket 2 write crashed, reason: %s" % sys.exc_info()[1])
raise ConnectionClosedException("socket 2 write crashed, reason: %s" % sys.exc_info()[1])
def setReadSize(self,size):
self.readSize = size;
def read(self, socketOnly = 0):
x = ""
try:
x = self.recv(self.readSize)#.decode('iso-8859-1');
except:
self._d("socket read crashed, reason %s " % sys.exc_info()[1])
raise ConnectionClosedException("socket read crashed, reason %s " % sys.exc_info()[1])
#x= self.recvX(self.readSize);
if len(x) == 1:
#Utilities.debug("GOT "+str(ord((x))));
return ord(x);
else:
raise ConnectionClosedException("Got 0 bytes, connection closed");
#return x;
def read2(self,b,off,length):
'''reads into a buffer'''
if off < 0 or length < 0 or (off+length)>len(b):
raise Exception("Out of bounds");
if length == 0:
return 0;
if b is None:
raise Exception("XNull pointerX");
count = 0;
while count < length:
#self.read();
#print "OKIIIIIIIIIIII";
#exit();
b[off+count]=self.read(0);
count= count+1;
return count;
| 26.694656 | 106 | 0.691736 |
b81aefd51bd42b845df1bf33096645010300b938 | 27,774 | py | Python | zerver/tests/test_audit_log.py | sayamsamal/zulip | d26a15b14dea5a5b1a93a14a91352798c074dc5e | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_audit_log.py | sayamsamal/zulip | d26a15b14dea5a5b1a93a14a91352798c074dc5e | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_audit_log.py | sayamsamal/zulip | d26a15b14dea5a5b1a93a14a91352798c074dc5e | [
"Apache-2.0"
] | null | null | null | from datetime import timedelta
from typing import Any, Dict, Union
import orjson
from django.contrib.auth.password_validation import validate_password
from django.utils.timezone import now as timezone_now
from analytics.models import StreamCount
from zerver.lib.actions import (
bulk_add_subscriptions,
bulk_remove_subscriptions,
do_activate_mirror_dummy_user,
do_add_realm_domain,
do_change_avatar_fields,
do_change_bot_owner,
do_change_default_all_public_streams,
do_change_default_events_register_stream,
do_change_default_sending_stream,
do_change_icon_source,
do_change_password,
do_change_realm_domain,
do_change_subscription_property,
do_change_tos_version,
do_change_user_delivery_email,
do_change_user_role,
do_change_user_setting,
do_create_user,
do_deactivate_realm,
do_deactivate_stream,
do_deactivate_user,
do_reactivate_realm,
do_reactivate_user,
do_regenerate_api_key,
do_remove_realm_domain,
do_rename_stream,
do_set_realm_authentication_methods,
do_set_realm_message_editing,
do_set_realm_notifications_stream,
do_set_realm_signup_notifications_stream,
get_streams_traffic,
)
from zerver.lib.message import get_last_message_id
from zerver.lib.streams import create_stream_if_needed
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import (
Message,
Realm,
RealmAuditLog,
Recipient,
Subscription,
UserProfile,
get_realm,
get_realm_domains,
get_stream,
)
class TestRealmAuditLog(ZulipTestCase):
def check_role_count_schema(self, role_counts: Dict[str, Any]) -> None:
for key in [
UserProfile.ROLE_REALM_ADMINISTRATOR,
UserProfile.ROLE_MEMBER,
UserProfile.ROLE_GUEST,
UserProfile.ROLE_REALM_OWNER,
]:
# str(key) since json keys are always strings, and ujson.dumps will have converted
# the UserProfile.role values into strings
self.assertTrue(isinstance(role_counts[RealmAuditLog.ROLE_COUNT_HUMANS][str(key)], int))
self.assertTrue(isinstance(role_counts[RealmAuditLog.ROLE_COUNT_BOTS], int))
def test_user_activation(self) -> None:
realm = get_realm("zulip")
now = timezone_now()
user = do_create_user("email", "password", realm, "full_name", acting_user=None)
do_deactivate_user(user, acting_user=user)
do_activate_mirror_dummy_user(user, acting_user=user)
do_deactivate_user(user, acting_user=user)
do_reactivate_user(user, acting_user=user)
self.assertEqual(RealmAuditLog.objects.filter(event_time__gte=now).count(), 6)
event_types = list(
RealmAuditLog.objects.filter(
realm=realm,
acting_user=user,
modified_user=user,
modified_stream=None,
event_time__gte=now,
event_time__lte=now + timedelta(minutes=60),
)
.order_by("event_time")
.values_list("event_type", flat=True)
)
self.assertEqual(
event_types,
[
RealmAuditLog.USER_CREATED,
RealmAuditLog.USER_DEACTIVATED,
RealmAuditLog.USER_ACTIVATED,
RealmAuditLog.USER_DEACTIVATED,
RealmAuditLog.USER_REACTIVATED,
],
)
for event in RealmAuditLog.objects.filter(
realm=realm,
acting_user=user,
modified_user=user,
modified_stream=None,
event_time__gte=now,
event_time__lte=now + timedelta(minutes=60),
):
extra_data = orjson.loads(event.extra_data)
self.check_role_count_schema(extra_data[RealmAuditLog.ROLE_COUNT])
self.assertNotIn(RealmAuditLog.OLD_VALUE, extra_data)
def test_change_role(self) -> None:
realm = get_realm("zulip")
now = timezone_now()
user_profile = self.example_user("hamlet")
acting_user = self.example_user("iago")
do_change_user_role(
user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=acting_user
)
do_change_user_role(user_profile, UserProfile.ROLE_MEMBER, acting_user=acting_user)
do_change_user_role(user_profile, UserProfile.ROLE_GUEST, acting_user=acting_user)
do_change_user_role(user_profile, UserProfile.ROLE_MEMBER, acting_user=acting_user)
do_change_user_role(user_profile, UserProfile.ROLE_REALM_OWNER, acting_user=acting_user)
do_change_user_role(user_profile, UserProfile.ROLE_MEMBER, acting_user=acting_user)
do_change_user_role(user_profile, UserProfile.ROLE_MODERATOR, acting_user=acting_user)
do_change_user_role(user_profile, UserProfile.ROLE_MEMBER, acting_user=acting_user)
old_values_seen = set()
new_values_seen = set()
for event in RealmAuditLog.objects.filter(
event_type=RealmAuditLog.USER_ROLE_CHANGED,
realm=realm,
modified_user=user_profile,
acting_user=acting_user,
event_time__gte=now,
event_time__lte=now + timedelta(minutes=60),
):
extra_data = orjson.loads(event.extra_data)
self.check_role_count_schema(extra_data[RealmAuditLog.ROLE_COUNT])
self.assertIn(RealmAuditLog.OLD_VALUE, extra_data)
self.assertIn(RealmAuditLog.NEW_VALUE, extra_data)
old_values_seen.add(extra_data[RealmAuditLog.OLD_VALUE])
new_values_seen.add(extra_data[RealmAuditLog.NEW_VALUE])
self.assertEqual(
old_values_seen,
{
UserProfile.ROLE_GUEST,
UserProfile.ROLE_MEMBER,
UserProfile.ROLE_REALM_ADMINISTRATOR,
UserProfile.ROLE_REALM_OWNER,
UserProfile.ROLE_MODERATOR,
},
)
self.assertEqual(old_values_seen, new_values_seen)
def test_change_password(self) -> None:
now = timezone_now()
user = self.example_user("hamlet")
password = "test1"
do_change_password(user, password)
self.assertEqual(
RealmAuditLog.objects.filter(
event_type=RealmAuditLog.USER_PASSWORD_CHANGED, event_time__gte=now
).count(),
1,
)
self.assertIsNone(validate_password(password, user))
def test_change_email(self) -> None:
now = timezone_now()
user = self.example_user("hamlet")
new_email = "test@example.com"
do_change_user_delivery_email(user, new_email)
self.assertEqual(
RealmAuditLog.objects.filter(
event_type=RealmAuditLog.USER_EMAIL_CHANGED, event_time__gte=now
).count(),
1,
)
self.assertEqual(new_email, user.delivery_email)
# Test the RealmAuditLog stringification
audit_entry = RealmAuditLog.objects.get(
event_type=RealmAuditLog.USER_EMAIL_CHANGED, event_time__gte=now
)
self.assertTrue(
str(audit_entry).startswith(
f"<RealmAuditLog: <UserProfile: {user.email} {user.realm}> {RealmAuditLog.USER_EMAIL_CHANGED} "
)
)
def test_change_avatar_source(self) -> None:
now = timezone_now()
user = self.example_user("hamlet")
avatar_source = "G"
do_change_avatar_fields(user, avatar_source, acting_user=user)
self.assertEqual(
RealmAuditLog.objects.filter(
event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED,
modified_user=user,
acting_user=user,
event_time__gte=now,
).count(),
1,
)
self.assertEqual(avatar_source, user.avatar_source)
def test_change_full_name(self) -> None:
start = timezone_now()
new_name = "George Hamletovich"
self.login("iago")
req = dict(full_name=new_name)
result = self.client_patch("/json/users/{}".format(self.example_user("hamlet").id), req)
self.assertTrue(result.status_code == 200)
query = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.USER_FULL_NAME_CHANGED, event_time__gte=start
)
self.assertEqual(query.count(), 1)
def test_change_tos_version(self) -> None:
now = timezone_now()
user = self.example_user("hamlet")
tos_version = "android"
do_change_tos_version(user, tos_version)
self.assertEqual(
RealmAuditLog.objects.filter(
event_type=RealmAuditLog.USER_TERMS_OF_SERVICE_VERSION_CHANGED, event_time__gte=now
).count(),
1,
)
self.assertEqual(tos_version, user.tos_version)
def test_change_bot_owner(self) -> None:
now = timezone_now()
admin = self.example_user("iago")
bot = self.notification_bot(admin.realm)
bot_owner = self.example_user("hamlet")
do_change_bot_owner(bot, bot_owner, admin)
self.assertEqual(
RealmAuditLog.objects.filter(
event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED, event_time__gte=now
).count(),
1,
)
self.assertEqual(bot_owner, bot.bot_owner)
def test_regenerate_api_key(self) -> None:
now = timezone_now()
user = self.example_user("hamlet")
do_regenerate_api_key(user, user)
self.assertEqual(
RealmAuditLog.objects.filter(
event_type=RealmAuditLog.USER_API_KEY_CHANGED, event_time__gte=now
).count(),
1,
)
self.assertTrue(user.api_key)
def test_get_streams_traffic(self) -> None:
realm = get_realm("zulip")
stream_name = "whatever"
stream = self.make_stream(stream_name, realm)
stream_ids = {stream.id}
result = get_streams_traffic(stream_ids)
self.assertEqual(result, {})
StreamCount.objects.create(
realm=realm,
stream=stream,
property="messages_in_stream:is_bot:day",
end_time=timezone_now(),
value=999,
)
result = get_streams_traffic(stream_ids)
self.assertEqual(result, {stream.id: 999})
def test_subscriptions(self) -> None:
now = timezone_now()
user = self.example_user("hamlet")
realm = user.realm
stream = self.make_stream("test_stream")
acting_user = self.example_user("iago")
bulk_add_subscriptions(user.realm, [stream], [user], acting_user=acting_user)
subscription_creation_logs = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time__gte=now,
acting_user=acting_user,
modified_user=user,
modified_stream=stream,
)
modified_stream = subscription_creation_logs[0].modified_stream
assert modified_stream is not None
self.assertEqual(subscription_creation_logs.count(), 1)
self.assertEqual(modified_stream.id, stream.id)
self.assertEqual(subscription_creation_logs[0].modified_user, user)
bulk_remove_subscriptions(realm, [user], [stream], acting_user=acting_user)
subscription_deactivation_logs = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED,
event_time__gte=now,
acting_user=acting_user,
modified_user=user,
modified_stream=stream,
)
modified_stream = subscription_deactivation_logs[0].modified_stream
assert modified_stream is not None
self.assertEqual(subscription_deactivation_logs.count(), 1)
self.assertEqual(modified_stream.id, stream.id)
self.assertEqual(subscription_deactivation_logs[0].modified_user, user)
def test_realm_activation(self) -> None:
realm = get_realm("zulip")
user = self.example_user("desdemona")
do_deactivate_realm(realm, acting_user=user)
log_entry = RealmAuditLog.objects.get(
realm=realm, event_type=RealmAuditLog.REALM_DEACTIVATED, acting_user=user
)
extra_data = orjson.loads(log_entry.extra_data)
self.check_role_count_schema(extra_data[RealmAuditLog.ROLE_COUNT])
do_reactivate_realm(realm)
log_entry = RealmAuditLog.objects.get(
realm=realm, event_type=RealmAuditLog.REALM_REACTIVATED
)
extra_data = orjson.loads(log_entry.extra_data)
self.check_role_count_schema(extra_data[RealmAuditLog.ROLE_COUNT])
def test_create_stream_if_needed(self) -> None:
now = timezone_now()
realm = get_realm("zulip")
user = self.example_user("hamlet")
stream = create_stream_if_needed(
realm,
"test",
invite_only=False,
stream_description="Test description",
acting_user=user,
)[0]
self.assertEqual(
RealmAuditLog.objects.filter(
realm=realm,
event_type=RealmAuditLog.STREAM_CREATED,
event_time__gte=now,
acting_user=user,
modified_stream=stream,
).count(),
1,
)
def test_deactivate_stream(self) -> None:
now = timezone_now()
realm = get_realm("zulip")
user = self.example_user("hamlet")
stream_name = "test"
stream = self.make_stream(stream_name, realm)
do_deactivate_stream(stream, acting_user=user)
self.assertEqual(
RealmAuditLog.objects.filter(
realm=realm,
event_type=RealmAuditLog.STREAM_DEACTIVATED,
event_time__gte=now,
acting_user=user,
modified_stream=stream,
).count(),
1,
)
self.assertEqual(stream.deactivated, True)
def test_set_realm_authentication_methods(self) -> None:
now = timezone_now()
realm = get_realm("zulip")
user = self.example_user("hamlet")
expected_old_value = realm.authentication_methods_dict()
auth_method_dict = {
"Google": False,
"Email": False,
"GitHub": False,
"Apple": False,
"Dev": True,
"SAML": True,
"GitLab": False,
"OpenID Connect": False,
}
do_set_realm_authentication_methods(realm, auth_method_dict, acting_user=user)
realm_audit_logs = RealmAuditLog.objects.filter(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time__gte=now,
acting_user=user,
)
self.assertEqual(realm_audit_logs.count(), 1)
extra_data = orjson.loads(realm_audit_logs[0].extra_data)
expected_new_value = auth_method_dict
self.assertEqual(extra_data[RealmAuditLog.OLD_VALUE], expected_old_value)
self.assertEqual(extra_data[RealmAuditLog.NEW_VALUE], expected_new_value)
def test_get_last_message_id(self) -> None:
# get_last_message_id is a helper mainly used for RealmAuditLog
self.assertEqual(
get_last_message_id(),
Message.objects.latest("id").id,
)
Message.objects.all().delete()
self.assertEqual(get_last_message_id(), -1)
def test_set_realm_message_editing(self) -> None:
now = timezone_now()
realm = get_realm("zulip")
user = self.example_user("hamlet")
values_expected = [
{
"property": "message_content_edit_limit_seconds",
RealmAuditLog.OLD_VALUE: realm.message_content_edit_limit_seconds,
RealmAuditLog.NEW_VALUE: 1000,
},
{
"property": "edit_topic_policy",
RealmAuditLog.OLD_VALUE: Realm.POLICY_EVERYONE,
RealmAuditLog.NEW_VALUE: Realm.POLICY_ADMINS_ONLY,
},
]
do_set_realm_message_editing(realm, True, 1000, Realm.POLICY_ADMINS_ONLY, acting_user=user)
realm_audit_logs = RealmAuditLog.objects.filter(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time__gte=now,
acting_user=user,
).order_by("id")
self.assertEqual(realm_audit_logs.count(), 2)
self.assertEqual(
[orjson.loads(entry.extra_data) for entry in realm_audit_logs], values_expected
)
def test_set_realm_notifications_stream(self) -> None:
now = timezone_now()
realm = get_realm("zulip")
user = self.example_user("hamlet")
old_value = realm.notifications_stream_id
stream_name = "test"
stream = self.make_stream(stream_name, realm)
do_set_realm_notifications_stream(realm, stream, stream.id, acting_user=user)
self.assertEqual(
RealmAuditLog.objects.filter(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time__gte=now,
acting_user=user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: stream.id,
"property": "notifications_stream",
}
).decode(),
).count(),
1,
)
def test_set_realm_signup_notifications_stream(self) -> None:
now = timezone_now()
realm = get_realm("zulip")
user = self.example_user("hamlet")
old_value = realm.signup_notifications_stream_id
stream_name = "test"
stream = self.make_stream(stream_name, realm)
do_set_realm_signup_notifications_stream(realm, stream, stream.id, acting_user=user)
self.assertEqual(
RealmAuditLog.objects.filter(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time__gte=now,
acting_user=user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: stream.id,
"property": "signup_notifications_stream",
}
).decode(),
).count(),
1,
)
def test_change_icon_source(self) -> None:
test_start = timezone_now()
realm = get_realm("zulip")
user = self.example_user("hamlet")
icon_source = "G"
do_change_icon_source(realm, icon_source, acting_user=user)
audit_entries = RealmAuditLog.objects.filter(
realm=realm,
event_type=RealmAuditLog.REALM_ICON_SOURCE_CHANGED,
acting_user=user,
event_time__gte=test_start,
)
audit_log = audit_entries.first()
assert audit_log is not None
self.assert_length(audit_entries, 1)
self.assertEqual(icon_source, realm.icon_source)
self.assertEqual(audit_log.extra_data, "{'icon_source': 'G', 'icon_version': 2}")
def test_change_subscription_property(self) -> None:
user = self.example_user("hamlet")
# Fetch the Denmark stream for testing
stream = get_stream("Denmark", user.realm)
sub = Subscription.objects.get(
user_profile=user, recipient__type=Recipient.STREAM, recipient__type_id=stream.id
)
properties = {
"color": True,
"is_muted": True,
"desktop_notifications": False,
"audible_notifications": False,
"push_notifications": True,
"email_notifications": True,
"pin_to_top": True,
"wildcard_mentions_notify": False,
}
for property, value in properties.items():
now = timezone_now()
old_value = getattr(sub, property)
self.assertNotEqual(old_value, value)
do_change_subscription_property(user, sub, stream, property, value, acting_user=user)
expected_extra_data = {
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
"property": property,
}
self.assertEqual(
RealmAuditLog.objects.filter(
realm=user.realm,
event_type=RealmAuditLog.SUBSCRIPTION_PROPERTY_CHANGED,
event_time__gte=now,
acting_user=user,
modified_user=user,
extra_data=orjson.dumps(expected_extra_data).decode(),
).count(),
1,
)
self.assertEqual(getattr(sub, property), value)
def test_change_default_streams(self) -> None:
now = timezone_now()
user = self.example_user("hamlet")
stream = get_stream("Denmark", user.realm)
old_value = user.default_sending_stream_id
do_change_default_sending_stream(user, stream, acting_user=user)
self.assertEqual(
RealmAuditLog.objects.filter(
realm=user.realm,
event_type=RealmAuditLog.USER_DEFAULT_SENDING_STREAM_CHANGED,
event_time__gte=now,
acting_user=user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: stream.id,
}
).decode(),
).count(),
1,
)
self.assertEqual(user.default_sending_stream, stream)
old_value = user.default_events_register_stream_id
do_change_default_events_register_stream(user, stream, acting_user=user)
self.assertEqual(
RealmAuditLog.objects.filter(
realm=user.realm,
event_type=RealmAuditLog.USER_DEFAULT_REGISTER_STREAM_CHANGED,
event_time__gte=now,
acting_user=user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: stream.id,
}
).decode(),
).count(),
1,
)
self.assertEqual(user.default_events_register_stream, stream)
old_value = user.default_all_public_streams
do_change_default_all_public_streams(user, False, acting_user=user)
self.assertEqual(
RealmAuditLog.objects.filter(
realm=user.realm,
event_type=RealmAuditLog.USER_DEFAULT_ALL_PUBLIC_STREAMS_CHANGED,
event_time__gte=now,
acting_user=user,
extra_data=orjson.dumps(
{RealmAuditLog.OLD_VALUE: old_value, RealmAuditLog.NEW_VALUE: False}
).decode(),
).count(),
1,
)
self.assertEqual(user.default_all_public_streams, False)
def test_rename_stream(self) -> None:
now = timezone_now()
user = self.example_user("hamlet")
stream = self.make_stream("test", user.realm)
old_name = stream.name
do_rename_stream(stream, "updated name", user)
self.assertEqual(
RealmAuditLog.objects.filter(
realm=user.realm,
event_type=RealmAuditLog.STREAM_NAME_CHANGED,
event_time__gte=now,
acting_user=user,
modified_stream=stream,
extra_data=orjson.dumps(
{RealmAuditLog.OLD_VALUE: old_name, RealmAuditLog.NEW_VALUE: "updated name"}
).decode(),
).count(),
1,
)
self.assertEqual(stream.name, "updated name")
def test_change_notification_settings(self) -> None:
user = self.example_user("hamlet")
value: Union[bool, int, str]
for setting, v in user.notification_setting_types.items():
if setting == "notification_sound":
value = "ding"
elif setting == "desktop_icon_count_display":
value = 3
else:
value = False
now = timezone_now()
old_value = getattr(user, setting)
do_change_user_setting(user, setting, value, acting_user=user)
expected_extra_data = {
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
"property": setting,
}
self.assertEqual(
RealmAuditLog.objects.filter(
realm=user.realm,
event_type=RealmAuditLog.USER_SETTING_CHANGED,
event_time__gte=now,
acting_user=user,
modified_user=user,
extra_data=orjson.dumps(expected_extra_data).decode(),
).count(),
1,
)
self.assertEqual(getattr(user, setting), value)
def test_realm_domain_entries(self) -> None:
user = self.example_user("iago")
initial_domains = get_realm_domains(user.realm)
now = timezone_now()
realm_domain = do_add_realm_domain(user.realm, "zulip.org", False, acting_user=user)
added_domain: Dict[str, Union[str, bool]] = {
"domain": "zulip.org",
"allow_subdomains": False,
}
expected_extra_data = {
"realm_domains": initial_domains + [added_domain],
"added_domain": added_domain,
}
self.assertEqual(
RealmAuditLog.objects.filter(
realm=user.realm,
event_type=RealmAuditLog.REALM_DOMAIN_ADDED,
event_time__gte=now,
acting_user=user,
extra_data=orjson.dumps(expected_extra_data).decode(),
).count(),
1,
)
now = timezone_now()
do_change_realm_domain(realm_domain, True, acting_user=user)
changed_domain: Dict[str, Union[str, bool]] = {
"domain": "zulip.org",
"allow_subdomains": True,
}
expected_extra_data = {
"realm_domains": initial_domains + [changed_domain],
"changed_domain": changed_domain,
}
self.assertEqual(
RealmAuditLog.objects.filter(
realm=user.realm,
event_type=RealmAuditLog.REALM_DOMAIN_CHANGED,
event_time__gte=now,
acting_user=user,
extra_data=orjson.dumps(expected_extra_data).decode(),
).count(),
1,
)
now = timezone_now()
do_remove_realm_domain(realm_domain, acting_user=user)
removed_domain = {
"domain": "zulip.org",
"allow_subdomains": True,
}
expected_extra_data = {
"realm_domains": initial_domains,
"removed_domain": removed_domain,
}
self.assertEqual(
RealmAuditLog.objects.filter(
realm=user.realm,
event_type=RealmAuditLog.REALM_DOMAIN_REMOVED,
event_time__gte=now,
acting_user=user,
extra_data=orjson.dumps(expected_extra_data).decode(),
).count(),
1,
)
| 37.942623 | 111 | 0.609203 |
528410f06ad5773943e405a1f8cc20972dc2d199 | 4,716 | py | Python | utils/benchmarks/unet.py | Dauriel/weather4cast2021 | 29e818c4bcd488ec84b51558bf5392e4a887db70 | [
"Apache-2.0"
] | null | null | null | utils/benchmarks/unet.py | Dauriel/weather4cast2021 | 29e818c4bcd488ec84b51558bf5392e4a887db70 | [
"Apache-2.0"
] | null | null | null | utils/benchmarks/unet.py | Dauriel/weather4cast2021 | 29e818c4bcd488ec84b51558bf5392e4a887db70 | [
"Apache-2.0"
] | null | null | null | "UNet implementation from https://github.com/jvanvugt/pytorch-unet"
import torch
from torch import nn
import torch.nn.functional as F
class UNet(nn.Module):
def __init__(
self,
in_channels=1,
n_classes=2,
depth=5,
wf=6,
padding=False,
batch_norm=False,
up_mode='upconv', **args,
):
"""
Implementation of
U-Net: Convolutional Networks for Biomedical Image Segmentation
(Ronneberger et al., 2015)
https://arxiv.org/abs/1505.04597
Using the default arguments will yield the exact version used
in the original paper
Args:
in_channels (int): number of input channels
n_classes (int): number of output channels
depth (int): depth of the network
wf (int): number of filters in the first layer is 2**wf
padding (bool): if True, apply padding such that the input shape
is the same as the output.
This may introduce artifacts
batch_norm (bool): Use BatchNorm after layers with an
activation function
up_mode (str): one of 'upconv' or 'upsample'.
'upconv' will use transposed convolutions for
learned upsampling.
'upsample' will use bilinear upsampling.
"""
super().__init__() ## using this instead of super(UNet, self).__init__()
## allow us to forget about **args (bad practice but fast implementation)
assert up_mode in ('upconv', 'upsample')
self.padding = padding
self.depth = depth
prev_channels = in_channels
self.down_path = nn.ModuleList()
for i in range(depth):
self.down_path.append(
UNetConvBlock(prev_channels, 2 ** (wf + i), padding, batch_norm)
)
prev_channels = 2 ** (wf + i)
self.up_path = nn.ModuleList()
for i in reversed(range(depth - 1)):
self.up_path.append(
UNetUpBlock(prev_channels, 2 ** (wf + i), up_mode, padding, batch_norm)
)
prev_channels = 2 ** (wf + i)
self.last = nn.Conv2d(prev_channels, n_classes, kernel_size=1)
#print('padding:', self.padding, padding)
def forward(self, x):
blocks = []
for i, down in enumerate(self.down_path):
x = down(x)
#print('down to', x.shape)
if i != len(self.down_path) - 1:
blocks.append(x)
x = F.max_pool2d(x, 2)
#print(i, 'shape:', x.shape)
for i, up in enumerate(self.up_path):
x = up(x, blocks[-i - 1])
#print('up to', x.shape)
#print(i, 'shape:', x.shape)
return self.last(x)
class UNetConvBlock(nn.Module):
def __init__(self, in_size, out_size, padding, batch_norm):
super(UNetConvBlock, self).__init__()
block = []
block.append(nn.Conv2d(in_size, out_size, kernel_size=3, padding=int(padding)))
block.append(nn.ReLU())
if batch_norm:
block.append(nn.BatchNorm2d(out_size))
block.append(nn.Conv2d(out_size, out_size, kernel_size=3, padding=int(padding)))
block.append(nn.ReLU())
if batch_norm:
block.append(nn.BatchNorm2d(out_size))
self.block = nn.Sequential(*block)
def forward(self, x):
out = self.block(x)
return out
class UNetUpBlock(nn.Module):
def __init__(self, in_size, out_size, up_mode, padding, batch_norm):
super(UNetUpBlock, self).__init__()
if up_mode == 'upconv':
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2)
elif up_mode == 'upsample':
self.up = nn.Sequential(
nn.Upsample(mode='bilinear', scale_factor=2),
nn.Conv2d(in_size, out_size, kernel_size=1),
)
self.conv_block = UNetConvBlock(in_size, out_size, padding, batch_norm)
def center_crop(self, layer, target_size):
_, _, layer_height, layer_width = layer.size()
diff_y = (layer_height - target_size[0]) // 2
diff_x = (layer_width - target_size[1]) // 2
return layer[
:, :, diff_y: (diff_y + target_size[0]), diff_x: (diff_x + target_size[1])
]
def forward(self, x, bridge):
up = self.up(x)
crop1 = self.center_crop(bridge, up.shape[2:])
out = torch.cat([up, crop1], 1)
out = self.conv_block(out)
return out | 36.276923 | 100 | 0.558948 |
e234719252edf3ce78bb1391beda64a18507ea21 | 21,250 | py | Python | VMBackup/main/freezesnapshotter.py | mbearup/azure-linux-extensions | ec6ee9a665a8140cb573dd5d1dc79804471a8401 | [
"Apache-2.0"
] | null | null | null | VMBackup/main/freezesnapshotter.py | mbearup/azure-linux-extensions | ec6ee9a665a8140cb573dd5d1dc79804471a8401 | [
"Apache-2.0"
] | null | null | null | VMBackup/main/freezesnapshotter.py | mbearup/azure-linux-extensions | ec6ee9a665a8140cb573dd5d1dc79804471a8401 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
try:
import urlparse as urlparser
except ImportError:
import urllib.parse as urlparser
import traceback
import datetime
try:
import ConfigParser as ConfigParsers
except ImportError:
import configparser as ConfigParsers
import multiprocessing as mp
import time
import json
from common import CommonVariables
from HttpUtil import HttpUtil
from Utils import Status
from Utils import HandlerUtil
from fsfreezer import FsFreezer
from guestsnapshotter import GuestSnapshotter
from hostsnapshotter import HostSnapshotter
from Utils import HostSnapshotObjects
import ExtensionErrorCodeHelper
# need to be implemented in next release
#from dhcpHandler import DhcpHandler
class FreezeSnapshotter(object):
"""description of class"""
def __init__(self, logger, hutil , freezer, g_fsfreeze_on, para_parser, takeCrashConsistentSnapshot):
self.logger = logger
self.configfile = '/etc/azure/vmbackup.conf'
self.hutil = hutil
self.freezer = freezer
self.g_fsfreeze_on = g_fsfreeze_on
self.para_parser = para_parser
if(para_parser.snapshotTaskToken == None):
para_parser.snapshotTaskToken = '' #making snaoshot string empty when snapshotTaskToken is null
self.logger.log('snapshotTaskToken : ' + str(para_parser.snapshotTaskToken))
self.takeSnapshotFrom = CommonVariables.firstGuestThenHost
self.isManaged = False
self.taskId = self.para_parser.taskId
self.hostIp = '168.63.129.16'
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success
self.takeCrashConsistentSnapshot = takeCrashConsistentSnapshot
self.logger.log('FreezeSnapshotter : takeCrashConsistentSnapshot = ' + str(self.takeCrashConsistentSnapshot))
#implement in next release
'''
# fetching wireserver IP from DHCP
self.dhcpHandlerObj = None
try:
self.dhcpHandlerObj = DhcpHandler(self.logger)
self.hostIp = self.dhcpHandlerObj.getHostEndoint()
except Exception as e:
errorMsg = "Failed to get hostIp from DHCP with error: %s, stack trace: %s" % (str(e), traceback.format_exc())
self.logger.log(errorMsg, True, 'Error')
self.hostIp = '168.63.129.16'
'''
self.logger.log( "hostIp : " + self.hostIp)
try:
if(para_parser.customSettings != None and para_parser.customSettings != ''):
self.logger.log('customSettings : ' + str(para_parser.customSettings))
customSettings = json.loads(para_parser.customSettings)
snapshotMethodConfigValue = self.hutil.get_strvalue_from_configfile(CommonVariables.SnapshotMethod,customSettings['takeSnapshotFrom'])
self.logger.log('snapshotMethodConfigValue : ' + str(snapshotMethodConfigValue))
if snapshotMethodConfigValue != None and snapshotMethodConfigValue != '':
self.takeSnapshotFrom = snapshotMethodConfigValue
else:
self.takeSnapshotFrom = customSettings['takeSnapshotFrom']
if(para_parser.includedDisks != None and CommonVariables.isAnyDiskExcluded in para_parser.includedDisks.keys()):
if (para_parser.includedDisks[CommonVariables.isAnyDiskExcluded] == True):
self.logger.log('Some disks are excluded from backup. Setting the snapshot mode to onlyGuest.')
self.takeSnapshotFrom = CommonVariables.onlyGuest
#Not hitting host when snapshot uri has special characters
if self.hutil.UriHasSpecialCharacters(self.para_parser.blobs):
self.logger.log('Some disk blob Uris have special characters. Setting the snapshot mode to onlyGuest.')
self.takeSnapshotFrom = CommonVariables.onlyGuest
self.isManaged = customSettings['isManagedVm']
if( "backupTaskId" in customSettings.keys()):
self.taskId = customSettings["backupTaskId"]
except Exception as e:
errMsg = 'Failed to serialize customSettings with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.logger.log(errMsg, True, 'Error')
self.isManaged = True
self.logger.log('[FreezeSnapshotter] isManaged flag : ' + str(self.isManaged))
def doFreezeSnapshot(self):
run_result = CommonVariables.success
run_status = 'success'
all_failed = False
unable_to_sleep = False
""" Do Not remove below HttpUtil object creation. This is to ensure HttpUtil singleton object is created before freeze."""
http_util = HttpUtil(self.logger)
if(self.takeSnapshotFrom == CommonVariables.onlyGuest):
run_result, run_status, blob_snapshot_info_array, all_failed, all_snapshots_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromGuest()
elif(self.takeSnapshotFrom == CommonVariables.firstGuestThenHost):
run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromFirstGuestThenHost()
elif(self.takeSnapshotFrom == CommonVariables.firstHostThenGuest):
run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromFirstHostThenGuest()
elif(self.takeSnapshotFrom == CommonVariables.onlyHost):
run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromOnlyHost()
else :
self.logger.log('Snapshot method did not match any listed type, taking firstHostThenGuest as default')
run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromFirstHostThenGuest()
self.logger.log('doFreezeSnapshot : run_result - {0} run_status - {1} all_failed - {2} unable_to_sleep - {3} is_inconsistent - {4} values post snapshot'.format(str(run_result), str(run_status), str(all_failed), str(unable_to_sleep), str(is_inconsistent)))
if (run_result == CommonVariables.success):
run_result, run_status = self.updateErrorCode(blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent)
snapshot_info_array = self.update_snapshotinfoarray(blob_snapshot_info_array)
if not (run_result == CommonVariables.success):
self.hutil.SetExtErrorCode(self.extensionErrorCode)
return run_result, run_status, snapshot_info_array
def update_snapshotinfoarray(self, blob_snapshot_info_array):
snapshot_info_array = []
self.logger.log('updating snapshot info array from blob snapshot info')
if blob_snapshot_info_array != None and blob_snapshot_info_array !=[]:
for blob_snapshot_info in blob_snapshot_info_array:
if blob_snapshot_info != None:
snapshot_info_array.append(Status.SnapshotInfoObj(blob_snapshot_info.isSuccessful, blob_snapshot_info.snapshotUri, blob_snapshot_info.errorMessage))
return snapshot_info_array
def updateErrorCode(self, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent):
run_result = CommonVariables.success
any_failed = False
run_status = 'success'
if unable_to_sleep:
run_result = CommonVariables.error
run_status = 'error'
error_msg = 'T:S Machine unable to sleep'
self.logger.log(error_msg, True, 'Error')
elif is_inconsistent == True :
run_result = CommonVariables.error
run_status = 'error'
error_msg = 'Snapshots are inconsistent'
self.logger.log(error_msg, True, 'Error')
elif blob_snapshot_info_array != None:
for blob_snapshot_info in blob_snapshot_info_array:
if blob_snapshot_info != None and blob_snapshot_info.errorMessage != None :
if 'The rate of snapshot blob calls is exceeded' in blob_snapshot_info.errorMessage:
run_result = CommonVariables.FailedRetryableSnapshotRateExceeded
run_status = 'error'
error_msg = 'Retrying when snapshot failed with SnapshotRateExceeded'
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotRateExceeded
self.logger.log(error_msg, True, 'Error')
break
elif 'The snapshot count against this blob has been exceeded' in blob_snapshot_info.errorMessage:
run_result = CommonVariables.FailedSnapshotLimitReached
run_status = 'error'
error_msg = 'T:S Enable failed with FailedSnapshotLimitReached errror'
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedSnapshotLimitReached
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)
self.logger.log(error_msg, True, 'Error')
break
elif blob_snapshot_info.isSuccessful == False and not all_failed:
any_failed = True
elif blob_snapshot_info != None and blob_snapshot_info.isSuccessful == False:
any_failed = True
if run_result == CommonVariables.success and all_failed:
run_status = 'error'
run_result = CommonVariables.FailedRetryableSnapshotFailedNoNetwork
error_msg = 'T:S Enable failed with FailedRetryableSnapshotFailedNoNetwork errror'
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedNoNetwork
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)
self.logger.log(error_msg, True, 'Error')
elif run_result == CommonVariables.success and any_failed:
run_result = CommonVariables.FailedRetryableSnapshotFailedNoNetwork
error_msg = 'T:S Enable failed with FailedRetryableSnapshotFailedRestrictedNetwork errror'
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedRestrictedNetwork
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)
run_status = 'error'
self.logger.log(error_msg, True, 'Error')
return run_result, run_status
def freeze(self):
try:
timeout = self.hutil.get_intvalue_from_configfile('timeout',60)
self.logger.log('T:S freeze, timeout value ' + str(timeout))
time_before_freeze = datetime.datetime.now()
freeze_result,timedout = self.freezer.freeze_safe(timeout)
time_after_freeze = datetime.datetime.now()
freezeTimeTaken = time_after_freeze-time_before_freeze
self.logger.log('T:S ***** freeze, time_before_freeze=' + str(time_before_freeze) + ", time_after_freeze=" + str(time_after_freeze) + ", freezeTimeTaken=" + str(freezeTimeTaken))
HandlerUtil.HandlerUtility.add_to_telemetery_data("FreezeTime", str(time_after_freeze-time_before_freeze-datetime.timedelta(seconds=5)))
run_result = CommonVariables.success
run_status = 'success'
all_failed= False
is_inconsistent = False
self.logger.log('T:S freeze result ' + str(freeze_result) + ', timedout :' + str(timedout))
if (timedout == True):
run_result = CommonVariables.FailedFsFreezeTimeout
run_status = 'error'
error_msg = 'T:S ###### Enable failed with error: freeze took longer than timeout'
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableFsFreezeTimeout
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)
self.logger.log(error_msg, True, 'Error')
elif(freeze_result is not None and len(freeze_result.errors) > 0 and CommonVariables.unable_to_open_err_string in str(freeze_result)):
run_result = CommonVariables.FailedUnableToOpenMount
run_status = 'error'
error_msg = 'T:S Enable failed with error: ' + str(freeze_result)
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableUnableToOpenMount
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)
self.logger.log(error_msg, True, 'Warning')
elif(freeze_result is not None and len(freeze_result.errors) > 0):
run_result = CommonVariables.FailedFsFreezeFailed
run_status = 'error'
error_msg = 'T:S Enable failed with error: ' + str(freeze_result)
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableFsFreezeFailed
error_msg = error_msg + ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.StatusCodeStringBuilder(self.extensionErrorCode)
self.logger.log(error_msg, True, 'Warning')
except Exception as e:
errMsg = 'Failed to do the freeze with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.logger.log(errMsg, True, 'Error')
run_result = CommonVariables.error
run_status = 'error'
return run_result, run_status
def takeSnapshotFromGuest(self):
run_result = CommonVariables.success
run_status = 'success'
all_failed= False
is_inconsistent = False
unable_to_sleep = False
blob_snapshot_info_array = None
all_snapshots_failed = False
try:
if( self.para_parser.blobs == None or len(self.para_parser.blobs) == 0) :
run_result = CommonVariables.FailedRetryableSnapshotFailedNoNetwork
run_status = 'error'
error_msg = 'T:S taking snapshot failed as blobs are empty or none'
self.logger.log(error_msg, True, 'Error')
all_failed = True
all_snapshots_failed = True
return run_result, run_status, blob_snapshot_info_array, all_failed, all_snapshots_failed, unable_to_sleep, is_inconsistent
if self.g_fsfreeze_on :
run_result, run_status = self.freeze()
if(run_result == CommonVariables.success or self.takeCrashConsistentSnapshot == True):
HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.snapshotCreator, CommonVariables.guestExtension)
snap_shotter = GuestSnapshotter(self.logger, self.hutil)
self.logger.log('T:S doing snapshot now...')
time_before_snapshot = datetime.datetime.now()
snapshot_result, blob_snapshot_info_array, all_failed, is_inconsistent, unable_to_sleep, all_snapshots_failed = snap_shotter.snapshotall(self.para_parser, self.freezer, self.g_fsfreeze_on)
time_after_snapshot = datetime.datetime.now()
snapshotTimeTaken = time_after_snapshot-time_before_snapshot
self.logger.log('T:S ***** takeSnapshotFromGuest, time_before_snapshot=' + str(time_before_snapshot) + ", time_after_snapshot=" + str(time_after_snapshot) + ", snapshotTimeTaken=" + str(snapshotTimeTaken))
HandlerUtil.HandlerUtility.add_to_telemetery_data("snapshotTimeTaken", str(snapshotTimeTaken))
self.logger.log('T:S snapshotall ends...', True)
except Exception as e:
errMsg = 'Failed to do the snapshot with error: %s, stack trace: %s' % (str(e), traceback.format_exc())
self.logger.log(errMsg, True, 'Error')
run_result = CommonVariables.error
run_status = 'error'
return run_result, run_status, blob_snapshot_info_array, all_failed, all_snapshots_failed, unable_to_sleep, is_inconsistent
def takeSnapshotFromFirstGuestThenHost(self):
run_result = CommonVariables.success
run_status = 'success'
all_failed= False
is_inconsistent = False
unable_to_sleep = False
blob_snapshot_info_array = None
all_snapshots_failed = False
run_result, run_status, blob_snapshot_info_array, all_failed, all_snapshots_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromGuest()
if(all_snapshots_failed):
try:
#to make sure binary is thawed
self.logger.log('[takeSnapshotFromFirstGuestThenHost] : Thawing again post the guest snapshotting failure')
self.freezer.thaw_safe()
except Exception as e:
self.logger.log('[takeSnapshotFromFirstGuestThenHost] : Exception in Thaw %s, stack trace: %s' % (str(e), traceback.format_exc()))
run_result, run_status, blob_snapshot_info_array,all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromOnlyHost()
return run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent
def takeSnapshotFromFirstHostThenGuest(self):
run_result = CommonVariables.success
run_status = 'success'
all_failed= False
is_inconsistent = False
unable_to_sleep = False
blob_snapshot_info_array = None
snap_shotter = HostSnapshotter(self.logger, self.hostIp)
pre_snapshot_statuscode = snap_shotter.pre_snapshot(self.para_parser, self.taskId)
if(pre_snapshot_statuscode == 200 or pre_snapshot_statuscode == 201):
run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromOnlyHost()
else:
run_result, run_status, blob_snapshot_info_array, all_failed, all_snapshots_failed, unable_to_sleep, is_inconsistent = self.takeSnapshotFromGuest()
if all_snapshots_failed and run_result != CommonVariables.success:
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedNoNetwork
elif run_result != CommonVariables.success :
self.extensionErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.FailedRetryableSnapshotFailedRestrictedNetwork
return run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent
def takeSnapshotFromOnlyHost(self):
run_result = CommonVariables.success
run_status = 'success'
all_failed= False
is_inconsistent = False
unable_to_sleep = False
blob_snapshot_info_array = None
self.logger.log('Taking Snapshot through Host')
HandlerUtil.HandlerUtility.add_to_telemetery_data(CommonVariables.snapshotCreator, CommonVariables.backupHostService)
if self.g_fsfreeze_on :
run_result, run_status = self.freeze()
if(run_result == CommonVariables.success or self.takeCrashConsistentSnapshot == True):
snap_shotter = HostSnapshotter(self.logger, self.hostIp)
self.logger.log('T:S doing snapshot now...')
time_before_snapshot = datetime.datetime.now()
blob_snapshot_info_array, all_failed, is_inconsistent, unable_to_sleep = snap_shotter.snapshotall(self.para_parser, self.freezer, self.g_fsfreeze_on, self.taskId)
time_after_snapshot = datetime.datetime.now()
snapshotTimeTaken = time_after_snapshot-time_before_snapshot
self.logger.log('T:S takeSnapshotFromHost, time_before_snapshot=' + str(time_before_snapshot) + ", time_after_snapshot=" + str(time_after_snapshot) + ", snapshotTimeTaken=" + str(snapshotTimeTaken))
HandlerUtil.HandlerUtility.add_to_telemetery_data("snapshotTimeTaken", str(snapshotTimeTaken))
self.logger.log('T:S snapshotall ends...', True)
return run_result, run_status, blob_snapshot_info_array, all_failed, unable_to_sleep, is_inconsistent
| 57.744565 | 263 | 0.694588 |
4f737bdfdc34823add388066866aa9566a9a7b93 | 85,980 | py | Python | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/operations/_tag_operations.py | mohamedshabanofficial/azure-sdk-for-python | 81c585f310cd2ec23d2ad145173958914a075a58 | [
"MIT"
] | 2 | 2021-03-24T06:26:11.000Z | 2021-04-18T15:55:59.000Z | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/operations/_tag_operations.py | mohamedshabanofficial/azure-sdk-for-python | 81c585f310cd2ec23d2ad145173958914a075a58 | [
"MIT"
] | 2 | 2021-11-03T06:10:36.000Z | 2021-12-01T06:29:39.000Z | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/operations/_tag_operations.py | mohamedshabanofficial/azure-sdk-for-python | 81c585f310cd2ec23d2ad145173958914a075a58 | [
"MIT"
] | 1 | 2021-05-19T02:55:10.000Z | 2021-05-19T02:55:10.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TagOperations:
"""TagOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.apimanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_operation(
self,
resource_group_name: str,
service_name: str,
api_id: str,
operation_id: str,
filter: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[int] = None,
**kwargs
) -> AsyncIterable["_models.TagCollection"]:
"""Lists all Tags associated with the Operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param operation_id: Operation identifier within an API. Must be unique in the current API
Management service instance.
:type operation_id: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| displayName
| filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.apimanagement.models.TagCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_operation.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str', max_length=80, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('TagCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/operations/{operationId}/tags'} # type: ignore
async def get_entity_state_by_operation(
self,
resource_group_name: str,
service_name: str,
api_id: str,
operation_id: str,
tag_id: str,
**kwargs
) -> bool:
"""Gets the entity state version of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param operation_id: Operation identifier within an API. Must be unique in the current API
Management service instance.
:type operation_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get_entity_state_by_operation.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str', max_length=80, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_state_by_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/operations/{operationId}/tags/{tagId}'} # type: ignore
async def get_by_operation(
self,
resource_group_name: str,
service_name: str,
api_id: str,
operation_id: str,
tag_id: str,
**kwargs
) -> "_models.TagContract":
"""Get tag associated with the Operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param operation_id: Operation identifier within an API. Must be unique in the current API
Management service instance.
:type operation_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get_by_operation.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str', max_length=80, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_by_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/operations/{operationId}/tags/{tagId}'} # type: ignore
async def assign_to_operation(
self,
resource_group_name: str,
service_name: str,
api_id: str,
operation_id: str,
tag_id: str,
**kwargs
) -> "_models.TagContract":
"""Assign tag to the Operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param operation_id: Operation identifier within an API. Must be unique in the current API
Management service instance.
:type operation_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.assign_to_operation.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str', max_length=80, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagContract', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
assign_to_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/operations/{operationId}/tags/{tagId}'} # type: ignore
async def detach_from_operation(
self,
resource_group_name: str,
service_name: str,
api_id: str,
operation_id: str,
tag_id: str,
**kwargs
) -> None:
"""Detach the tag from the Operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param operation_id: Operation identifier within an API. Must be unique in the current API
Management service instance.
:type operation_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.detach_from_operation.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'operationId': self._serialize.url("operation_id", operation_id, 'str', max_length=80, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
detach_from_operation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/operations/{operationId}/tags/{tagId}'} # type: ignore
def list_by_api(
self,
resource_group_name: str,
service_name: str,
api_id: str,
filter: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[int] = None,
**kwargs
) -> AsyncIterable["_models.TagCollection"]:
"""Lists all Tags associated with the API.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| displayName
| filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.apimanagement.models.TagCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_api.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('TagCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/tags'} # type: ignore
async def get_entity_state_by_api(
self,
resource_group_name: str,
service_name: str,
api_id: str,
tag_id: str,
**kwargs
) -> bool:
"""Gets the entity state version of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get_entity_state_by_api.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_state_by_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/tags/{tagId}'} # type: ignore
async def get_by_api(
self,
resource_group_name: str,
service_name: str,
api_id: str,
tag_id: str,
**kwargs
) -> "_models.TagContract":
"""Get tag associated with the API.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get_by_api.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_by_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/tags/{tagId}'} # type: ignore
async def assign_to_api(
self,
resource_group_name: str,
service_name: str,
api_id: str,
tag_id: str,
**kwargs
) -> "_models.TagContract":
"""Assign tag to the Api.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.assign_to_api.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
assign_to_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/tags/{tagId}'} # type: ignore
async def detach_from_api(
self,
resource_group_name: str,
service_name: str,
api_id: str,
tag_id: str,
**kwargs
) -> None:
"""Detach the tag from the Api.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param api_id: API revision identifier. Must be unique in the current API Management service
instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:type api_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.detach_from_api.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'apiId': self._serialize.url("api_id", api_id, 'str', max_length=256, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
detach_from_api.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/tags/{tagId}'} # type: ignore
def list_by_product(
self,
resource_group_name: str,
service_name: str,
product_id: str,
filter: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[int] = None,
**kwargs
) -> AsyncIterable["_models.TagCollection"]:
"""Lists all Tags associated with the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| displayName
| filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.apimanagement.models.TagCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('TagCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/tags'} # type: ignore
async def get_entity_state_by_product(
self,
resource_group_name: str,
service_name: str,
product_id: str,
tag_id: str,
**kwargs
) -> bool:
"""Gets the entity state version of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get_entity_state_by_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_state_by_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/tags/{tagId}'} # type: ignore
async def get_by_product(
self,
resource_group_name: str,
service_name: str,
product_id: str,
tag_id: str,
**kwargs
) -> "_models.TagContract":
"""Get tag associated with the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get_by_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_by_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/tags/{tagId}'} # type: ignore
async def assign_to_product(
self,
resource_group_name: str,
service_name: str,
product_id: str,
tag_id: str,
**kwargs
) -> "_models.TagContract":
"""Assign tag to the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.assign_to_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagContract', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
assign_to_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/tags/{tagId}'} # type: ignore
async def detach_from_product(
self,
resource_group_name: str,
service_name: str,
product_id: str,
tag_id: str,
**kwargs
) -> None:
"""Detach the tag from the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.detach_from_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
detach_from_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/tags/{tagId}'} # type: ignore
def list_by_service(
self,
resource_group_name: str,
service_name: str,
filter: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[int] = None,
scope: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.TagCollection"]:
"""Lists a collection of tags defined within a service instance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| name |
filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>|
displayName | filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith
|</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:param scope: Scope like 'apis', 'products' or 'apis/{apiId}.
:type scope: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.apimanagement.models.TagCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_service.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
if scope is not None:
query_parameters['scope'] = self._serialize.query("scope", scope, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('TagCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags'} # type: ignore
async def get_entity_state(
self,
resource_group_name: str,
service_name: str,
tag_id: str,
**kwargs
) -> bool:
"""Gets the entity state version of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get_entity_state.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_state.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags/{tagId}'} # type: ignore
async def get(
self,
resource_group_name: str,
service_name: str,
tag_id: str,
**kwargs
) -> "_models.TagContract":
"""Gets the details of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags/{tagId}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
service_name: str,
tag_id: str,
parameters: "_models.TagCreateUpdateParameters",
if_match: Optional[str] = None,
**kwargs
) -> "_models.TagContract":
"""Creates a tag.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:param parameters: Create parameters.
:type parameters: ~azure.mgmt.apimanagement.models.TagCreateUpdateParameters
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagCreateUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags/{tagId}'} # type: ignore
async def update(
self,
resource_group_name: str,
service_name: str,
tag_id: str,
if_match: str,
parameters: "_models.TagCreateUpdateParameters",
**kwargs
) -> "_models.TagContract":
"""Updates the details of the tag specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:param parameters: Update parameters.
:type parameters: ~azure.mgmt.apimanagement.models.TagCreateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.TagContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagCreateUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('TagContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags/{tagId}'} # type: ignore
async def delete(
self,
resource_group_name: str,
service_name: str,
tag_id: str,
if_match: str,
**kwargs
) -> None:
"""Deletes specific tag of the API Management service instance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param tag_id: Tag identifier. Must be unique in the current API Management service instance.
:type tag_id: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'tagId': self._serialize.url("tag_id", tag_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/tags/{tagId}'} # type: ignore
| 52.17233 | 245 | 0.649942 |
7d113ba3f7287292d18ce22a62303b05a867a1b4 | 704 | py | Python | regulations/generator/layers/graphics.py | navigo/regulations-site | 910c24e46f4e921210a40da452dff69feae692d4 | [
"CC0-1.0"
] | 18 | 2016-09-22T05:05:16.000Z | 2021-07-28T18:13:48.000Z | regulations/generator/layers/graphics.py | navigo/regulations-site | 910c24e46f4e921210a40da452dff69feae692d4 | [
"CC0-1.0"
] | 260 | 2016-04-05T22:06:10.000Z | 2021-01-07T22:08:15.000Z | regulations/generator/layers/graphics.py | navigo/regulations-site | 910c24e46f4e921210a40da452dff69feae692d4 | [
"CC0-1.0"
] | 25 | 2016-04-06T03:26:42.000Z | 2020-10-19T16:49:23.000Z | from django.template import loader
from regulations.generator.layers import utils
from regulations.generator.layers.base import SearchReplaceLayer
class GraphicsLayer(SearchReplaceLayer):
shorthand = 'graphics'
data_source = 'graphics'
def __init__(self, layer):
self.layer = layer
self.template = loader.get_template('regulations/layers/graphics.html')
def replacements_for(self, original, data):
"""Replace all instances of graphics with an img tag"""
context = {'url': data['url'], 'alt': data['alt']}
if 'thumb_url' in data:
context['thumb_url'] = data['thumb_url']
yield utils.render_template(self.template, context)
| 32 | 79 | 0.691761 |
756ff6c5797d71b7565dfb6eb7c3f67fbb8c10dc | 314 | py | Python | loader/__init__.py | g8a9/contextualizing-hate-speech-models-with-explanations | d1e96891c90d4729b92d884f5c1d4d85e853b8f7 | [
"MIT"
] | null | null | null | loader/__init__.py | g8a9/contextualizing-hate-speech-models-with-explanations | d1e96891c90d4729b92d884f5c1d4d85e853b8f7 | [
"MIT"
] | null | null | null | loader/__init__.py | g8a9/contextualizing-hate-speech-models-with-explanations | d1e96891c90d4729b92d884f5c1d4d85e853b8f7 | [
"MIT"
] | null | null | null | from .gab import GabProcessor, GabDataset
from .common import *
from .ws import WSProcessor, WSDataset
from .nyt import NytProcessor, NytDataset
from .wiki import WikiProcessor, WikiDataset
from .miso import MisoProcessor, MisoDataset, MisoItaProcessor, MisoItaDataset
from .mlma import MLMAProcessor, MLMADataset
| 39.25 | 78 | 0.834395 |
74fb81e771605f4a6dca3852ffab071693dafcdb | 106 | py | Python | Ex 5-5.py | EduarDomingos/Exercisios-LB-1B | 1c4b1bf8ccf9f17a7c8bead083a47bba0385fb66 | [
"MIT"
] | null | null | null | Ex 5-5.py | EduarDomingos/Exercisios-LB-1B | 1c4b1bf8ccf9f17a7c8bead083a47bba0385fb66 | [
"MIT"
] | null | null | null | Ex 5-5.py | EduarDomingos/Exercisios-LB-1B | 1c4b1bf8ccf9f17a7c8bead083a47bba0385fb66 | [
"MIT"
] | null | null | null | n1 = float(input("Numero: "))
quintaparte = (n1/5)
print ("A quinta parte desse numero é: " , quintaparte) | 35.333333 | 55 | 0.679245 |
db46b4e4192695e73c457cb36bd275487608f760 | 1,164 | py | Python | frappe/patches/v9_1/move_feed_to_activity_log.py | chentaoz/frappe | ee3c4943bf6177ad3b410cdb0d802af486751a65 | [
"MIT"
] | 5 | 2017-09-12T15:56:31.000Z | 2022-03-09T13:50:21.000Z | frappe/patches/v9_1/move_feed_to_activity_log.py | chentaoz/frappe | ee3c4943bf6177ad3b410cdb0d802af486751a65 | [
"MIT"
] | 212 | 2017-08-16T13:03:18.000Z | 2020-10-06T12:26:21.000Z | frappe/patches/v9_1/move_feed_to_activity_log.py | chentaoz/frappe | ee3c4943bf6177ad3b410cdb0d802af486751a65 | [
"MIT"
] | 14 | 2020-11-04T11:22:44.000Z | 2022-02-01T20:59:37.000Z | from __future__ import unicode_literals
import frappe
from frappe.utils.background_jobs import enqueue
def execute():
comm_records_count = frappe.db.count("Communication", {"comment_type": "Updated"})
if comm_records_count > 100000:
enqueue(method=move_data_from_communication_to_activity_log, queue='short', now=True)
else:
move_data_from_communication_to_activity_log()
def move_data_from_communication_to_activity_log():
frappe.reload_doc("core", "doctype", "communication")
frappe.reload_doc("core", "doctype", "activity_log")
frappe.db.sql("""insert into `tabActivity Log` (name, owner, modified, creation, status, communication_date,
reference_doctype, reference_name, timeline_doctype, timeline_name, link_doctype, link_name, subject, content, user)
select name, owner, modified, creation, status, communication_date,
reference_doctype, reference_name, timeline_doctype, timeline_name, link_doctype, link_name, subject, content, user
from `tabCommunication`
where comment_type = 'Updated'""")
frappe.db.sql("""delete from `tabCommunication` where comment_type = 'Updated'""")
frappe.delete_doc("DocType", "Authentication Log") | 48.5 | 119 | 0.790378 |
70592f3c00cff165808bd28edbf1001595eabcee | 951 | py | Python | setup.py | hemna/vmwaretool | 5cdafee63c51614b87351b0beb8ae96f084a2459 | [
"Apache-2.0"
] | null | null | null | setup.py | hemna/vmwaretool | 5cdafee63c51614b87351b0beb8ae96f084a2459 | [
"Apache-2.0"
] | null | null | null | setup.py | hemna/vmwaretool | 5cdafee63c51614b87351b0beb8ae96f084a2459 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(setup_requires=["pbr"], pbr=True)
| 35.222222 | 69 | 0.757098 |
166af0dcd609a15f89d36294e16dd5883ed5bb2b | 9,344 | py | Python | colour/appearance/tests/test_ciecam02.py | MaxSchambach/colour | 3f3685d616fda4be58cec20bc1e16194805d7e2d | [
"BSD-3-Clause"
] | null | null | null | colour/appearance/tests/test_ciecam02.py | MaxSchambach/colour | 3f3685d616fda4be58cec20bc1e16194805d7e2d | [
"BSD-3-Clause"
] | null | null | null | colour/appearance/tests/test_ciecam02.py | MaxSchambach/colour | 3f3685d616fda4be58cec20bc1e16194805d7e2d | [
"BSD-3-Clause"
] | null | null | null | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.appearance.ciecam02` module.
"""
from __future__ import division, unicode_literals
import numpy as np
from itertools import permutations
from colour.appearance import (
CIECAM02_VIEWING_CONDITIONS, CIECAM02_InductionFactors,
CIECAM02_Specification, XYZ_to_CIECAM02, CIECAM02_to_XYZ)
from colour.appearance.tests.common import ColourAppearanceModelTest
from colour.utilities import (as_namedtuple, domain_range_scale,
ignore_numpy_errors, tsplit, tstack)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'TestCIECAM02ColourAppearanceModelForward',
'TestCIECAM02ColourAppearanceModelInverse'
]
class TestCIECAM02ColourAppearanceModelForward(ColourAppearanceModelTest):
"""
Defines :mod:`colour.appearance.ciecam02` module units tests methods for
*CIECAM02* colour appearance model forward implementation.
"""
FIXTURE_BASENAME = 'ciecam02.csv'
OUTPUT_ATTRIBUTES = {
'J': 'J',
'C': 'C',
'h': 'h',
's': 's',
'Q': 'Q',
'M': 'M',
'H': 'H'
}
def output_specification_from_data(self, data):
"""
Returns the *CIECAM02* colour appearance model output specification
from given data.
Parameters
----------
data : list
Fixture data.
Returns
-------
CIECAM02_Specification
*CIECAM02* colour appearance model specification.
"""
XYZ = tstack([data['X'], data['Y'], data['Z']])
XYZ_w = tstack([data['X_w'], data['Y_w'], data['Z_w']])
specification = XYZ_to_CIECAM02(
XYZ, XYZ_w, data['L_A'], data['Y_b'],
CIECAM02_InductionFactors(data['F'], data['c'], data['N_c']))
return specification
@ignore_numpy_errors
def test_domain_range_scale_XYZ_to_CIECAM02(self):
"""
Tests :func:`colour.appearance.cam16.XYZ_to_CIECAM02` definition domain
and range scale support.
"""
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
L_A = 318.31
Y_b = 20.0
surround = CIECAM02_VIEWING_CONDITIONS['Average']
specification = XYZ_to_CIECAM02(XYZ, XYZ_w, L_A, Y_b, surround)[:-1]
d_r = (
('reference', 1, 1),
(1, 0.01,
np.array([
1 / 100, 1 / 100, 1 / 360, 1 / 100, 1 / 100, 1 / 100, 1 / 360
])),
(100, 1, np.array([1, 1, 100 / 360, 1, 1, 1, 100 / 360])),
)
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
XYZ_to_CIECAM02(XYZ * factor_a, XYZ_w * factor_a, L_A, Y_b,
surround)[:-1],
specification * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_XYZ_to_CIECAM02(self):
"""
Tests :func:`colour.appearance.ciecam02.XYZ_to_CIECAM02` definition
nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ = np.array(case)
XYZ_w = np.array(case)
L_A = case[0]
Y_b = case[0]
surround = CIECAM02_InductionFactors(case[0], case[0], case[0])
XYZ_to_CIECAM02(XYZ, XYZ_w, L_A, Y_b, surround)
class TestCIECAM02ColourAppearanceModelInverse(ColourAppearanceModelTest):
"""
Defines :mod:`colour.appearance.ciecam02` module units tests methods for
*CIECAM02* colour appearance model inverse implementation.
"""
FIXTURE_BASENAME = 'ciecam02.csv'
OUTPUT_ATTRIBUTES = {'X': 0, 'Y': 1, 'Z': 2}
def output_specification_from_data(self, data):
"""
Returns the colour appearance model output specification from given
fixture data.
Parameters
----------
data : list
Tested colour appearance model fixture data.
Notes
-----
- This method is a dummy object.
"""
pass
def _XYZ_from_data(self, data, correlates):
"""
Returns the *CIE XYZ* tristimulus values from given *CIECAM02* colour
appearance model input data.
Parameters
----------
data : list
Fixture data.
correlates : array_like
Correlates used to build the input *CIECAM02* colour appearance
model specification.
Returns
-------
array_like
*CIE XYZ* tristimulus values
"""
XYZ_w = tstack([data['X_w'], data['Y_w'], data['Z_w']])
i, j, k = correlates
CIECAM02_specification = as_namedtuple({
i: data[i],
j: data[j],
k: data[k]
}, CIECAM02_Specification)
XYZ = CIECAM02_to_XYZ(
CIECAM02_specification, XYZ_w, data['L_A'], data['Y_b'],
CIECAM02_InductionFactors(data['F'], data['c'], data['N_c']))
return XYZ
def check_specification_attribute(self, case, data, attribute, expected):
"""
Tests *CIE XYZ* tristimulus values output from *CIECAM02* colour
appearance model input data.
Parameters
----------
case : int
Fixture case number.
data : dict.
Fixture case data.
attribute : unicode.
Tested attribute name.
expected : float.
Expected attribute value.
Warning
-------
The method name does not reflect the underlying implementation.
"""
for correlates in (('J', 'C', 'h'), ('J', 'M', 'h')):
XYZ = self._XYZ_from_data(data, correlates)
value = tsplit(XYZ)[attribute]
error_message = ('Parameter "{0}" in test case "{1}" '
'does not match target value.\n'
'Expected: "{2}" \n'
'Received "{3}"').format(attribute, case,
expected, value)
np.testing.assert_allclose(
value,
expected,
err_msg=error_message,
rtol=0.01,
atol=0.01,
verbose=False)
np.testing.assert_almost_equal(
value, expected, decimal=1, err_msg=error_message)
@ignore_numpy_errors
def test_domain_range_scale_CIECAM02_to_XYZ(self):
"""
Tests :func:`colour.appearance.cam16.CIECAM02_to_XYZ` definition domain
and range scale support.
"""
XYZ_i = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
L_A = 318.31
Y_b = 20.0
surround = CIECAM02_VIEWING_CONDITIONS['Average']
specification = XYZ_to_CIECAM02(XYZ_i, XYZ_w, L_A, Y_b, surround)
XYZ = CIECAM02_to_XYZ(specification, XYZ_w, L_A, Y_b, surround)
d_r = (
('reference', 1, 1, 1),
(1,
np.array([
1 / 100, 1 / 100, 1 / 360, 1 / 100, 1 / 100, 1 / 100, 1 / 360
]), 0.01, 0.01),
(100, np.array([1, 1, 100 / 360, 1, 1, 1, 100 / 360]), 1, 1),
)
for scale, factor_a, factor_b, factor_c in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
CIECAM02_to_XYZ(specification[:-1] * factor_a,
XYZ_w * factor_b, L_A, Y_b, surround),
XYZ * factor_c,
decimal=7)
@ignore_numpy_errors
def test_raise_exception_CIECAM02_to_XYZ(self):
"""
Tests :func:`colour.appearance.cam16.CIECAM02_to_XYZ` definition raised
exception.
"""
try:
CIECAM02_to_XYZ(
CIECAM02_Specification(
41.731091132513917,
None,
219.04843265831178,
),
np.array([95.05, 100.00, 108.88]),
318.31,
20.0,
CIECAM02_VIEWING_CONDITIONS['Average'],
)
except ValueError:
pass
@ignore_numpy_errors
def test_nan_CIECAM02_to_XYZ(self):
"""
Tests :func:`colour.appearance.ciecam02.CIECAM02_to_XYZ` definition
nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
J = case[0]
C = case[0]
h = case[0]
XYZ_w = np.array(case)
L_A = case[0]
Y_b = case[0]
surround = CIECAM02_InductionFactors(case[0], case[0], case[0])
CIECAM02_to_XYZ(
CIECAM02_Specification(J, C, h), XYZ_w, L_A, Y_b, surround)
| 31.355705 | 79 | 0.544842 |
1f63835089c3416888ea78643d0aa7b462cf140a | 2,884 | py | Python | analyze_crime.py | NahsiN/WalkSafe | dbfbe7ede0d1aae9420358c61b365ac5359727ca | [
"MIT"
] | 1 | 2016-09-30T01:01:43.000Z | 2016-09-30T01:01:43.000Z | analyze_crime.py | NahsiN/SafeWalk | dbfbe7ede0d1aae9420358c61b365ac5359727ca | [
"MIT"
] | null | null | null | analyze_crime.py | NahsiN/SafeWalk | dbfbe7ede0d1aae9420358c61b365ac5359727ca | [
"MIT"
] | null | null | null | """
Exploratory plots of crime data to justify app design
"""
import pandas as pd
import numpy as np
import overpass
import geopandas as gpd
import matplotlib.pyplot as plt
from ast import literal_eval as make_tuple
import psycopg2
import ipdb
import sys
import cost_models
import sys
fname = 'data/NYPD_7_Major_Felony_Incidents_2005--2016_with_roads'
print('Initial loading from {0}'.format(fname + '.csv'))
df = pd.read_csv(fname + '.csv')
# plot crime by hour
g_hour = df.groupby('Occurrence Hour')
df_crimes_by_hour = g_hour.size()
df_crimes_by_hour.plot()
# types of crime and by the hour
direct_bodily_harm_offenses = ['ROBBERY', 'FELONY ASSAULT', 'RAPE', 'MURDER & NON-NEGL. MANSLAUGHTE']
indirect_bodily_harm_offenses = ['GRAND LARCENY', 'GRAND LARCENY OF MOTOR VEHICLE', 'BURGLARY']
# group by direct and indirect
group_by_offense = df.groupby('Offense')
# for direct bodily harm create a dataframe
df_direct_bodily_harm = pd.concat([group_by_offense.get_group(offense) for offense in direct_bodily_harm_offenses])
group_by_hour_direct_bodily_harm = df_direct_bodily_harm.groupby('Occurrence Hour')
df_direct_bodily_harm_by_hour = group_by_hour_direct_bodily_harm.size()
df_direct_bodily_harm_by_hour.plot()
df_indirect_bodily_harm = pd.concat([group_by_offense.get_group(offense) for offense in indirect_bodily_harm_offenses])
group_by_hour_indirect_bodily_harm = df_indirect_bodily_harm.groupby('Occurrence Hour')
df_indirect_bodily_harm_by_hour = group_by_hour_indirect_bodily_harm.size()
df_indirect_bodily_harm_by_hour.plot()
# within each group each one
# plot hourly crime for each of two main offense subclasses
plt.figure()
for offense in direct_bodily_harm_offenses:
df_tmp = df[df.loc[:,'Offense'] == offense]
df_tmp.groupby('Occurrence Hour').size().plot(logy=True, label=offense, figsize=(19.1, 7.5), fontsize=20, linewidth=2)
plt.legend()
plt.xlabel('Occurrence Hour', fontsize=20)
plt.ylabel('log(Numbers of Crime)', fontsize=20)
plt.savefig('direct_body_harm.png', bbox_inches='tight')
plt.figure()
for offense in indirect_bodily_harm_offenses:
df_tmp = df[df.loc[:,'Offense'] == offense]
df_tmp.groupby('Occurrence Hour').size().plot(logy=True, label=offense)
plt.legend()
# specific types of crimes
offense = 'ROBBERY'
df_tmp = df[df.loc[:,'Offense'] == offense]
df_tmp.groupby('Occurrence Hour').size().plot(logy=False, label=offense, figsize=(19.1, 7.5), fontsize=20, linewidth=2)
plt.legend()
plt.xlabel('Occurrence Hour', fontsize=20)
plt.ylabel('Numbers', fontsize=20)
offense = 'FELONY ASSAULT'
df_tmp = df[df.loc[:,'Offense'] == offense]
df_tmp.groupby('Occurrence Hour').size().plot(logy=False, label=offense, figsize=(19.1, 7.5), fontsize=20, linewidth=2)
plt.legend()
plt.xlabel('Occurrence Hour', fontsize=20)
plt.ylabel('Numbers', fontsize=20)
plt.savefig('felony_assault.png', bbox_inches='tight')
| 38.453333 | 122 | 0.771151 |
90332f4272ae83ee63aedd9b4438e4e3cade8389 | 3,453 | py | Python | simutator/batch_genome_mutator.py | martinghunt/simutator | 0218c8a5b37fd72eb4e5b2df4cba9f6118f96788 | [
"MIT"
] | 7 | 2020-01-09T15:25:17.000Z | 2021-08-05T15:58:25.000Z | simutator/batch_genome_mutator.py | martinghunt/simutator | 0218c8a5b37fd72eb4e5b2df4cba9f6118f96788 | [
"MIT"
] | null | null | null | simutator/batch_genome_mutator.py | martinghunt/simutator | 0218c8a5b37fd72eb4e5b2df4cba9f6118f96788 | [
"MIT"
] | 1 | 2020-01-09T13:10:02.000Z | 2020-01-09T13:10:02.000Z | import logging
from simutator import genome_mutator
def _parse_indels_option_string(s):
distances_and_lengths = []
for x in s.split(","):
dist, length = x.split(":")
distances_and_lengths.append({"dist": int(dist), "len": int(length)})
return distances_and_lengths
def _parse_complex_option_string(s):
complex_vars = []
for x in s.split(","):
dist, length, snps, ins, dels, max_indel = [int(y) for y in x.split(":")]
complex_vars.append(
{
"dist": dist,
"len": length,
"snp": snps,
"ins": ins,
"del": dels,
"max_indel_len": max_indel,
}
)
return complex_vars
def mutations_from_options(options):
mutations = {}
if options.snps is not None:
try:
snp_distances = [{"dist": int(x)} for x in options.snps.split(",")]
except:
raise ValueError(f"Cannot parse --snps option: '{options.snps}'")
mutations["snp"] = snp_distances
if options.ins is not None:
try:
mutations["insertion"] = _parse_indels_option_string(options.ins)
except:
raise ValueError(f"Cannot parse --ins option: '{options.ins}'")
if options.dels is not None:
try:
mutations["deletion"] = _parse_indels_option_string(options.dels)
except:
raise ValueError(f"Cannot parse --dels option: '{options.dels}'")
if options.complex is not None:
try:
mutations["complex"] = _parse_complex_option_string(options.complex)
except:
raise ValueError(f"Cannot parse --complex option: '{options.complex}'")
if len(mutations) == 0:
raise RuntimeError(
"Must use at least one of the options --snps, --dels, --ins, --complex"
)
return mutations
def run_all_mutations(fasta_in, outprefix, mutations, seed=None):
for mutation_type, mutations_list in mutations.items():
for mutation in mutations_list:
logging.info(
f"Simulating mutations of type '{mutation_type}' with parameters {mutation}"
)
if mutation_type == "snp":
mutator = genome_mutator.SnpMutator(mutation["dist"], seed=seed)
elif mutation_type == "insertion":
mutator = genome_mutator.InsertionMutator(
mutation["dist"], mutation["len"], seed=seed
)
elif mutation_type == "deletion":
mutator = genome_mutator.DeletionMutator(
mutation["dist"], mutation["len"], seed=seed
)
elif mutation_type == "complex":
mutator = genome_mutator.ComplexMutator(
mutation["dist"],
mutation["len"],
mutation["snp"],
mutation["del"],
mutation["ins"],
mutation["max_indel_len"],
seed=seed,
)
this_prefix = f"{outprefix}.{mutation_type}." + ".".join(
[k + "-" + str(v) for k, v in sorted(mutation.items())]
)
mutator.mutate_fasta_file(
fasta_in,
f"{this_prefix}.fa",
f"{this_prefix}.original.vcf",
f"{this_prefix}.mutated.vcf",
)
| 33.852941 | 92 | 0.536056 |
4b60a29cf5f3d75a8c2297b19e9233c0f3975608 | 9,245 | py | Python | asteroids v1.0.py | FlankMe/Asteroids | 902c18457aa15dfaedd548e0df15a68facf3286f | [
"MIT"
] | null | null | null | asteroids v1.0.py | FlankMe/Asteroids | 902c18457aa15dfaedd548e0df15a68facf3286f | [
"MIT"
] | null | null | null | asteroids v1.0.py | FlankMe/Asteroids | 902c18457aa15dfaedd548e0df15a68facf3286f | [
"MIT"
] | 1 | 2021-11-05T12:57:16.000Z | 2021-11-05T12:57:16.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon May 02 20:56:30 2016
Simple game where the player has to dodge the asteroids (grey) and catch the
golden coins (yellow). When playing as a human, hold the mouse button to move
left or leave it to move right.
The idea is to create a simple environment for training a deep (convolutional)
neural network. The CNN will be coded and published at a later stage.
Feel free to code your own agent. You need to:
1) create a class Agent in file CNN_Agent.py (in the same folder as this game);
2) implement a method self.__init__ that accepts SCREEN_WIDTH (int) as input;
3) implement a method self.choose_action(image, reward, is_terminal)
that accepts:
- image: array, image made of raw pixels captured from the game screen;
- reward: float, reward received at that particular state;
- is_terminal: bool, indicates if the player is at a terminal state;
and returns:
- (+1) for right move, (0) for no move, and (-1) for left move;
4) implement a method self.close() that clears the graph of Tensorflow.
@author: Riccardo Rossi
"""
# Fix the screen's size. The screen will be a rectangle with sizes
# (SCREEN_WIDTH) x (SCREEN_WIDTH + MARGIN_FOR_FONT)
SCREEN_WIDTH = 504
MARGIN_FOR_FONT = 36
SCREEN_HEIGHT = SCREEN_WIDTH + MARGIN_FOR_FONT
import pygame
import time
import numpy as np
np.random.seed(int(time.time()))
import matplotlib.pyplot as plt
# Hyperparameters
HUMAN_PLAYING = False
ACTION_STEPS = 4 # Steps to observe before deciding next action
PLAYER_SPEED = 6
GOLD_SPEED = 6
OBSTACLE_SPEED = 6
REWARD_CATCHING_GOLD = 1.
PLAYER_DIES_PENALTY = 0.
PROB_OBJECT_SPAWNED = 0.12
# Probability gold is spawned conditional to an object being spawned
PROB_GOLD_SPAWNED = 0.8
# Try to import Tensorflow and initialize the CNN agent
# If unsuccessful, it sets the game as played by a human
if not HUMAN_PLAYING:
try:
import tensorflow as tf
from tensorflow.python.framework import ops
import CNN_Agent
agent = CNN_Agent.Agent(SCREEN_WIDTH)
except:
print('It was not possible to load TensorFlow. The human will play.')
HUMAN_PLAYING = True
GAME_TITLE = 'Asteroids - dodge the asteroids (grey), catch the gold (yellow)'
TPS = 100
FRAME_WIDTH = 3
FRAME_FROM_BORDER = 3
# Loose definition of a colours used in the game
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (219, 218, 191)
GOLD = (255, 215, 64)
GREY = (112, 138, 127)
class Block(pygame.sprite.Sprite):
def __init__(self, width, height):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([width, height])
self.rect = self.image.get_rect()
class Player(Block):
def __init__(self, colour=RED, width=30, height=18):
Block.__init__(self, width, height)
self.image.fill(colour)
self.rect.x = int((SCREEN_WIDTH/2 - width) / 6)*6
self.rect.y = SCREEN_HEIGHT - height - 18
self.score = 0
self.speed = [+PLAYER_SPEED, 0]
def is_position_allowed(self):
if (self.rect.right > SCREEN_WIDTH - FRAME_FROM_BORDER - FRAME_WIDTH or
self.rect.x < FRAME_FROM_BORDER + FRAME_WIDTH):
return(False)
if (self.rect.bottom > SCREEN_HEIGHT - FRAME_FROM_BORDER - FRAME_WIDTH
or self.rect.y < FRAME_FROM_BORDER + FRAME_WIDTH):
return(False)
return(True)
def move(self):
self.rect.x += self.speed[0]
self.rect.y += self.speed[1]
def update(self):
self.move()
class Asteroid(Block):
def __init__(self, colour=GREY, width=12, height=12, speed=OBSTACLE_SPEED):
Block.__init__(self, width, height)
self.image.fill(colour)
self.rect.x = int(np.random.randint(
FRAME_FROM_BORDER + FRAME_WIDTH,
SCREEN_WIDTH - FRAME_FROM_BORDER - FRAME_WIDTH - width)/6)*6
self.rect.y = FRAME_FROM_BORDER + FRAME_WIDTH
self.speed = speed
def update(self):
self.rect.y += self.speed
class Gold(Block):
def __init__(self, colour=GOLD, width=12, height=12, speed=GOLD_SPEED):
Block.__init__(self, width, height)
self.image.fill(colour)
self.rect.x = int(np.random.randint(
FRAME_FROM_BORDER + FRAME_WIDTH,
SCREEN_WIDTH - FRAME_FROM_BORDER - FRAME_WIDTH - width)/6)*6
self.rect.y = FRAME_FROM_BORDER + FRAME_WIDTH
self.speed = speed
def update(self):
self.rect.y += self.speed
start = time.time() # Useful for measuring the duration of the game
###############################################################################
# Launch PyGame
pygame.init()
pygame.display.set_caption(GAME_TITLE)
screen = pygame.display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT])
all_items_list = pygame.sprite.Group()
player_list = pygame.sprite.Group()
obstacle_list = pygame.sprite.Group()
gold_list = pygame.sprite.Group()
player = Player()
player_list.add(player)
all_items_list.add(player)
# Initialize a few useful variables
font = pygame.font.SysFont("calibri",20)
reward = max_score = last_score = 0
is_terminal = inquire_the_agent = False
count = action = +1
clock = pygame.time.Clock()
# Start the game
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
continue
if HUMAN_PLAYING:
if (event.type == pygame.MOUSEBUTTONDOWN or
event.type == pygame.MOUSEBUTTONUP):
if event.type == pygame.MOUSEBUTTONDOWN:
action = -1
if event.type == pygame.MOUSEBUTTONUP:
action = 1
# Update all items' positions and the game_count
all_items_list.update()
count += 1
# Create the background, basic frame, and score line
screen.fill(BLACK)
frame = pygame.draw.rect(screen, WHITE, pygame.Rect(
(FRAME_FROM_BORDER, FRAME_FROM_BORDER),
(SCREEN_WIDTH - 2*FRAME_FROM_BORDER,
SCREEN_HEIGHT - 2*FRAME_FROM_BORDER)),
FRAME_WIDTH)
score = font.render('Max score : ' +
str(int(max_score)) +
'; Last score : ' +
str(int(last_score)) +
'; Current score : ' +
str(int(player.score)), True, WHITE)
screen.blit(score, (FRAME_FROM_BORDER + FRAME_WIDTH + 6,
FRAME_FROM_BORDER + FRAME_WIDTH))
# Generate obstacles and golden coins randomly
if ((not obstacle_list and not gold_list) or
np.random.uniform() < PROB_OBJECT_SPAWNED):
if np.random.uniform() < PROB_GOLD_SPAWNED:
gold = Gold()
gold_list.add(gold)
all_items_list.add(gold)
else:
asteroid = Asteroid()
obstacle_list.add(asteroid)
all_items_list.add(asteroid)
# Count the elements caught by the player
obstacle_hits = pygame.sprite.spritecollide(player, obstacle_list, True)
gold_hits = pygame.sprite.spritecollide(player, gold_list, True)
# If gold was caught by the player, then reward is distributed
if gold_hits:
reward = REWARD_CATCHING_GOLD * len(gold_hits)
player.score += reward
inquire_the_agent = True
# Remove all elements that hit the bottom frame
for elem in list(obstacle_list) + list(gold_list):
if elem.rect.bottom > SCREEN_HEIGHT - FRAME_FROM_BORDER - FRAME_WIDTH:
elem.kill()
# If the player hits an obstacle or the screen's border, it's game over
# The scores are updated and the game is reset
if obstacle_hits or not player.is_position_allowed():
last_score = player.score
if max_score < player.score:
max_score = player.score
all_items_list.empty()
player_list.empty()
obstacle_list.empty()
gold_list.empty()
is_terminal = True
inquire_the_agent = True
reward += PLAYER_DIES_PENALTY
player = Player()
player_list.add(player)
all_items_list.add(player)
else:
is_terminal = False
# Print all objects in the screen
all_items_list.draw(screen)
pygame.display.flip()
clock.tick(TPS)
if count % ACTION_STEPS == 0:
inquire_the_agent = True
# Inquire the Agent
if not HUMAN_PLAYING and inquire_the_agent:
image = pygame.surfarray.array3d(screen)
image = image[:, MARGIN_FOR_FONT:, :]
# Agent's action function has to return +1 for right and -1 for left
action = agent.choose_action(image, reward, is_terminal)
inquire_the_agent = False
reward = 0.0
player.speed = [+PLAYER_SPEED * action, 0]
# Save settings, reset the graph, and close the session
if not HUMAN_PLAYING:
agent.close()
del font, score; pygame.display.quit(); pygame.quit()
###############################################################################
print ('Max score was', max_score)
print('The process took :', round(time.time() - start, 2), 'seconds')
| 33.618182 | 80 | 0.636019 |
726ebe4f22159cdbde71433b1a9121126c69d8b0 | 3,560 | py | Python | Back-End/Python/Basics/Part -4- OOP/02 - Polymorphism/06_callable_protocol.py | ASHISHKUMAR2411/Programming-CookBook | 9c60655d64d21985ccb4196360858d98344701f9 | [
"MIT"
] | 25 | 2021-04-28T02:51:26.000Z | 2022-03-24T13:58:04.000Z | Back-End/Python/Basics/Part -4- OOP/02 - Polymorphism/06_callable_protocol.py | ASHISHKUMAR2411/Programming-CookBook | 9c60655d64d21985ccb4196360858d98344701f9 | [
"MIT"
] | 1 | 2022-03-03T23:33:41.000Z | 2022-03-03T23:35:41.000Z | Back-End/Python/Basics/Part -4- OOP/02 - Polymorphism/06_callable_protocol.py | ASHISHKUMAR2411/Programming-CookBook | 9c60655d64d21985ccb4196360858d98344701f9 | [
"MIT"
] | 15 | 2021-05-30T01:35:20.000Z | 2022-03-25T12:38:25.000Z | from functools import partial
from collections import defaultdict
from functools import wraps
def my_func(a, b, c):
return a, b, c
class Partial:
def __init__(self, func, *args):
self._func = func
self._args = args
def __call__(self, *args):
return self._func(*self._args, *args)
partial_func = Partial(my_func, 10, 20)
print(partial_func(30))
#(10, 20, 30)
print(callable(partial_func)) # True
miss_counter = 0
def default_value():
global miss_counter
miss_counter += 1
return 'N/A'
d = defaultdict(default_value)
d['a'] = 1
d['a']
d['b']
d['c']
print(miss_counter)
print(d)
# 2
# defaultdict(<function default_value at 0x000001BA82FB10D0>, {'a': 1, 'b': 'N/A', 'c': 'N/A'})
class DefaultValue:
def __init__(self):
self.counter = 0
def __iadd__(self, other):
if isinstance(other, int):
self.counter += other
return self
raise ValueError('Can only increment with an integer value.')
def __call__(self):
self.counter += 1
return 'N/A'
def_1 = DefaultValue()
def_2 = DefaultValue()
cache_1 = defaultdict(def_1)
cache_2 = defaultdict(def_2)
print(cache_1['a'], cache_1['b'])
# ('N/A', 'N/A')
print(def_1.counter) # 2
print(cache_2['a']) # 'N/A'
class DefaultValue:
def __init__(self, default_value):
self.default_value = default_value
self.counter = 0
def __iadd__(self, other):
if isinstance(other, int):
self.counter += other
return self
raise ValueError('Can only increment with an integer value.')
def __call__(self):
self.counter += 1
return self.default_value
cache_def_1 = DefaultValue(None)
cache_def_2 = DefaultValue(0)
cache_1 = defaultdict(cache_def_1)
cache_2 = defaultdict(cache_def_2)
print(cache_1['a'], cache_1['b'], cache_1['a'])
# (None, None, None)
def profiler(fn):
_counter = 0
_total_elapsed = 0
_avg_time = 0
@wraps(fn)
def inner(*args, **kwargs):
nonlocal _counter
nonlocal _total_elapsed
nonlocal _avg_time
_counter += 1
start = perf_counter()
result = fn(*args, **kwargs)
end = perf_counter()
_total_elapsed += (end - start)
return result
def counter():
return _counter
def avg_time():
return _total_elapsed / _counter
inner.counter = counter
inner.avg_time = avg_time
return inner
@profiler
def func1():
sleep(random.random())
print(func1(), func1())
# (None, None)
func1.counter()
#2
func1.avg_time()
# 0.3425700559746474
# Class Coounter Decorator
class Profiler:
def __init__(self, fn):
self.counter = 0
self.total_elapsed = 0
self.fn = fn
def __call__(self, *args, **kwargs):
self.counter += 1
start = perf_counter()
result = self.fn(*args, **kwargs)
end = perf_counter()
self.total_elapsed += (end - start)
return result
@property
def avg_time(self):
return self.total_elapsed / self.counter
@Profiler
def func_1(a, b):
sleep(random.random())
return (a, b)
func_1(1, 2)
# (1, 2)
print(func_1.counter)
#1
print(func_1(2, 3))
#(2, 3)
print(func_1.counter)
#2
print(func_1.avg_time)
#0.46242688701022416
@Profiler
def func_2():
sleep(random.random())
print(func_2(), func_2(), func_2())
#(None, None, None)
print(func_2.counter, func_2.avg_time)
# (3, 0.5231811150054758) | 19.668508 | 95 | 0.614326 |
3910e616bb4985faca26428fcf93c4e573ba8650 | 4,683 | py | Python | py3status/modules/deadbeef.py | eugenenelou/py3status | bbf256af954b9ff0ac794e9ebd441286ccd339e2 | [
"BSD-3-Clause"
] | null | null | null | py3status/modules/deadbeef.py | eugenenelou/py3status | bbf256af954b9ff0ac794e9ebd441286ccd339e2 | [
"BSD-3-Clause"
] | null | null | null | py3status/modules/deadbeef.py | eugenenelou/py3status | bbf256af954b9ff0ac794e9ebd441286ccd339e2 | [
"BSD-3-Clause"
] | null | null | null | """
Display songs currently playing in DeaDBeeF.
Configuration parameters:
cache_timeout: refresh interval for this module (default 5)
format: display format for this module (default '[{artist} - ][{title}]')
sleep_timeout: when deadbeef is not running, this interval will be used
to allow faster refreshes with time-related placeholders and/or
to refresh few times per minute rather than every few seconds
(default 20)
Format placeholders:
{album} name of the album
{artist} name of the artist
{length} length time in [HH:]MM:SS
{playback_time} elapsed time in [HH:]MM:SS
{title} title of the track
{tracknumber} track number in two digits
{year} year in four digits
For more placeholders, see title formatting 2.0 in 'deadbeef --help'
or https://github.com/DeaDBeeF-Player/deadbeef/wiki/Title-formatting-2.0
Not all of Foobar2000 remapped metadata fields will work with deadbeef and
a quick reminder about using {placeholders} here instead of %placeholder%.
Color options:
color_paused: Paused, defaults to color_degraded
color_playing: Playing, defaults to color_good
color_stopped: Stopped, defaults to color_bad
Requires:
deadbeef: a GTK+ audio player for GNU/Linux
Examples:
```
# see 'deadbeef --help' for more buttons
deadbeef {
on_click 1 = 'exec deadbeef --play-pause'
on_click 8 = 'exec deadbeef --random'
}
```
@author mrt-prodz
SAMPLE OUTPUT
{'color': '#00ff00', 'full_text': 'Music For Programming - Lackluster'}
paused
{'color': '#ffff00', 'full_text': 'Music For Programming - Lackluster'}
"""
STRING_NOT_INSTALLED = "not installed"
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 5
format = "[{artist} - ][{title}]"
sleep_timeout = 20
class Meta:
deprecated = {
"remove": [{"param": "delimiter", "msg": "obsolete parameter"}],
"rename_placeholder": [
{
"placeholder": "elapsed",
"new": "playback_time",
"format_strings": ["format"],
},
{
"placeholder": "tracknum",
"new": "tracknumber",
"format_strings": ["format"],
},
],
}
def post_config_hook(self):
if not self.py3.check_commands("deadbeef"):
raise Exception(STRING_NOT_INSTALLED)
self.separator = "|SEPARATOR|"
self.placeholders = list(
set(self.py3.get_placeholders_list(self.format) + ["isplaying"])
)
self.deadbeef_command = 'deadbeef --nowplaying-tf "{}"'.format(
self.separator.join(["%{}%".format(x) for x in self.placeholders])
)
self.color_paused = self.py3.COLOR_PAUSED or self.py3.COLOR_DEGRADED
self.color_playing = self.py3.COLOR_PLAYING or self.py3.COLOR_GOOD
self.color_stopped = self.py3.COLOR_STOPPED or self.py3.COLOR_BAD
def _is_running(self):
try:
self.py3.command_output(["pgrep", "deadbeef"])
return True
except self.py3.CommandError:
return False
def _get_deadbeef_data(self):
# Deadbeef can generate lot of startup noises with or without error
# codes. Running command sometimes change how things behaves onscreen
# too. We used subprocess in the past to ignore error codes. We also
# use pgrep and hidden placeholders to dictate how status output and
# color should look... mainly to stay consistency in multiple versions
# (e.g., Python2.7 to Python3+ and nonstop deadbeef-git commits).
try:
return self.py3.command_output(self.deadbeef_command)
except self.py3.CommandError as ce:
return ce.output
def deadbeef(self):
beef_data = {}
cached_until = self.sleep_timeout
color = self.color_stopped
if self._is_running():
line = self._get_deadbeef_data()
beef_data = dict(zip(self.placeholders, line.split(self.separator)))
cached_until = self.cache_timeout
if beef_data["isplaying"]:
color = self.color_playing
else:
color = self.color_paused
return {
"cached_until": self.py3.time_in(cached_until),
"full_text": self.py3.safe_format(self.format, beef_data),
"color": color,
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| 32.520833 | 80 | 0.624173 |
b27e9e62c112c9dd6b33f927b341650287cfdd9f | 2,052 | py | Python | backendaas/api/AAS.py | ali-asad-fzea/_ | cd8df57228e1d867714b11d917d0b1f9cb0a102b | [
"MIT"
] | 1 | 2022-03-08T15:19:46.000Z | 2022-03-08T15:19:46.000Z | backendaas/api/AAS.py | ali-asad-fzea/_ | cd8df57228e1d867714b11d917d0b1f9cb0a102b | [
"MIT"
] | null | null | null | backendaas/api/AAS.py | ali-asad-fzea/_ | cd8df57228e1d867714b11d917d0b1f9cb0a102b | [
"MIT"
] | 4 | 2020-09-29T12:15:47.000Z | 2022-02-08T06:40:55.000Z |
import os
import pandas as pd
import pickle
import json
def getJSON(csvFile, index=0):
df = pd.read_csv(csvFile)
print(df.iloc[index].to_json())
def getDF(d1):
d2 = {}
for k in d1:
v = []
v.append(d1[k])
d2[k]= v
return pd.DataFrame(d2)
def training():
df = pd.read_csv("Sensors.csv")
X= df.drop("condition",axis=1)
# dummy_row(X)
Y = df["condition"]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X,Y, train_size=0.8, random_state=11)
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=74,random_state=10)
model.fit(x_train,y_train )
pkl_filename = "mymodel.pkl"
with open(pkl_filename, "wb") as f1:
pickle.dump(model, f1)
# training()
# def pred(jsonData):
# x = json.loads(jsonData)
# # x = jsonData
# # df = json_normalize(x) #normalize dont create column for null values
# df = getDF(x)
# # tmp = df.iloc[0]
# # tmp.to_csv("tmp2.csv")
# # df = pre_proc(df)
# with open("mymodel.pkl", "rb") as f1:
# model = pickle.load(f1)
# if "condition" in df:
# df.drop("condition", inplace=True, axis=1)
# y = model.predict(df)
# print(y)
# return y
# # pred('{"sensor1":356,"sensor2":43,"condition":1}')
def pred(ob):
# from pandas.io.json import json_normalize
# ob= json.loads(ob)
df = pd.DataFrame(ob,index=[0])
# df = getDF(x)
pkl_filename = "./mymodel.pkl"
pkl_filename = os.path.join(os.path.abspath(os.path.dirname(__file__)),pkl_filename)
with open(pkl_filename, "rb") as f1:
model = pickle.load(f1)
if "condition" in df:
df.drop("condition", inplace=True, axis=1)
y = model.predict(df)
# print(y)
return y
# getJSON("Sensors.csv",18)
# pred('{"Accelerometer":13,"DPS":25,"Gyroscope":19,"BPS":37,"condition":0}') | 28.5 | 94 | 0.612573 |
1844dcf4c7bf1555ba73208bc63c1c3ccb4f0441 | 2,015 | py | Python | src/tfi/base/__init__.py | ajbouh/tfi | 6e89e8c8f1ca3b285c788cc6b802fc44f9001290 | [
"MIT"
] | 160 | 2017-09-13T00:32:05.000Z | 2018-05-21T18:17:32.000Z | src/tfi/base/__init__.py | tesserai/tfi | 6e89e8c8f1ca3b285c788cc6b802fc44f9001290 | [
"MIT"
] | 6 | 2017-09-14T17:54:21.000Z | 2018-01-27T19:31:18.000Z | src/tfi/base/__init__.py | ajbouh/tfi | 6e89e8c8f1ca3b285c788cc6b802fc44f9001290 | [
"MIT"
] | 11 | 2017-09-13T00:37:08.000Z | 2018-03-05T08:03:34.000Z | class _GetAttrAccumulator:
@staticmethod
def apply(gaa, target):
if not isinstance(gaa, _GetAttrAccumulator):
if isinstance(gaa, dict):
return {
_GetAttrAccumulator.apply(k, target): _GetAttrAccumulator.apply(v, target)
for k, v in gaa.items()
}
if isinstance(gaa, list):
return [_GetAttrAccumulator.apply(v, target) for v in gaa]
return gaa
result = target
for fn in gaa._gotten:
result = fn(target, result)
result = _GetAttrAccumulator.apply(result, target)
return result
def __init__(self, gotten=None, text=None):
if gotten is None:
gotten = []
self._gotten = gotten
self._text = "" if text is None else text
def __getitem__(self, item):
gotten = [
*self._gotten,
lambda t, o: o[item],
]
return _GetAttrAccumulator(gotten, "%s[%s]" % (self._text, item))
def __getattr__(self, name):
gotten = [
*self._gotten,
lambda t, o: getattr(o, name),
]
return _GetAttrAccumulator(gotten, "%s.%s" % (self._text, name))
def __call__(self, **kw):
gotten = [
*self._gotten,
lambda t, o: o(**{
k: _GetAttrAccumulator.apply(v, t)
for k, v in kw.items()
}),
]
return _GetAttrAccumulator(gotten, "%s(...)" % self._text)
def __str__(self):
return "_GetAttrAccumulator<%s>" % self._text
def _recursive_transform(o, fn):
# First, a shallow tranform.
o = fn(o)
# Now a recursive one, if needed.
if isinstance(o, dict):
return {
k: _recursive_transform(v, fn)
for k, v in o.items()
}
elif isinstance(o, list):
return [
_recursive_transform(e, fn)
for e in o
]
else:
return o
| 28.785714 | 94 | 0.519107 |
088339b68187b44ef23b5bd99e0b6d20e219fd43 | 3,162 | py | Python | ae_web/accounts/migrations/0001_initial.py | arrayexpress/ae_auto | 78e50cc31997cb5a69d0d74258b6b1a089ba387a | [
"Apache-2.0"
] | null | null | null | ae_web/accounts/migrations/0001_initial.py | arrayexpress/ae_auto | 78e50cc31997cb5a69d0d74258b6b1a089ba387a | [
"Apache-2.0"
] | 4 | 2020-06-05T19:26:42.000Z | 2022-03-29T21:55:14.000Z | ae_web/accounts/migrations/0001_initial.py | arrayexpress/ae_auto | 78e50cc31997cb5a69d0d74258b6b1a089ba387a | [
"Apache-2.0"
] | 1 | 2019-03-27T13:15:37.000Z | 2019-03-27T13:15:37.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
import django.core.validators
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username')),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=254, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('is_admin', models.BooleanField(default=False)),
('is_super_admin', models.BooleanField(default=False)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),
('manager', models.ForeignKey(related_name='users', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 68.73913 | 432 | 0.665085 |
33f00e4e3af2c0dcdf7270ea4c7f233f2ddcd3cc | 12,850 | py | Python | tests/sentry/api/test_event_search.py | detouched/sentry | 1d3cc332c9ee1c2cf5ddaf1e850e14386c3684dd | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/api/test_event_search.py | detouched/sentry | 1d3cc332c9ee1c2cf5ddaf1e850e14386c3684dd | [
"BSD-3-Clause"
] | 1 | 2021-05-09T11:43:43.000Z | 2021-05-09T11:43:43.000Z | tests/sentry/api/test_event_search.py | detouched/sentry | 1d3cc332c9ee1c2cf5ddaf1e850e14386c3684dd | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import datetime
from django.utils import timezone
from parsimonious.exceptions import IncompleteParseError
from sentry.api.event_search import (
convert_endpoint_params, get_snuba_query_args, parse_search_query,
InvalidSearchQuery, SearchFilter, SearchKey, SearchValue
)
from sentry.testutils import TestCase
class EventSearchTest(TestCase):
def test_parse_search_query(self):
# test with raw search query at the end
assert parse_search_query('user.email:foo@example.com release:1.2.1 hello') == [
SearchFilter(
key=SearchKey(name='user.email'),
operator="=",
value=SearchValue(raw_value='foo@example.com'),
),
SearchFilter(
key=SearchKey(name='release'),
operator="=",
value=SearchValue(raw_value='1.2.1'),
),
SearchFilter(
key=SearchKey(name='message'),
operator='=',
value=SearchValue(raw_value='hello'),
)
]
# if the search query starts with the raw query, assume the whole thing is a raw string
assert parse_search_query('hello user.email:foo@example.com release:1.2.1') == [
SearchFilter(
key=SearchKey(name='message'),
operator='=',
value=SearchValue(raw_value='hello user.email:foo@example.com release:1.2.1'),
),
]
def test_parse_search_query_timestamp(self):
# test date format
assert parse_search_query('timestamp>2015-05-18') == [
SearchFilter(
key=SearchKey(name='timestamp'),
operator=">",
value=SearchValue(
raw_value=datetime.datetime(
2015,
5,
18,
0,
0,
tzinfo=timezone.utc),
),
),
]
# test date time format
assert parse_search_query('timestamp>2015-05-18T10:15:01') == [
SearchFilter(
key=SearchKey(name='timestamp'),
operator=">",
value=SearchValue(
raw_value=datetime.datetime(
2015,
5,
18,
10,
15,
1,
tzinfo=timezone.utc),
),
),
]
# test date time format w microseconds
assert parse_search_query('timestamp>2015-05-18T10:15:01.103') == [
SearchFilter(
key=SearchKey(name='timestamp'),
operator=">",
value=SearchValue(
raw_value=datetime.datetime(
2015,
5,
18,
10,
15,
1,
103000,
tzinfo=timezone.utc),
),
),
]
def test_parse_search_query_quoted_val(self):
assert parse_search_query('release:"a release"') == [
SearchFilter(
key=SearchKey(name='release'),
operator='=',
value=SearchValue(raw_value='a release'),
),
]
assert parse_search_query('!release:"a release"') == [
SearchFilter(
key=SearchKey(name='release'),
operator='!=',
value=SearchValue('a release'),
),
]
def test_parse_search_query_quoted_key(self):
assert parse_search_query('"hi:there":value') == [
SearchFilter(
key=SearchKey(name='hi:there'),
operator='=',
value=SearchValue(raw_value='value'),
),
]
assert parse_search_query('!"hi:there":value') == [
SearchFilter(
key=SearchKey(name='hi:there'),
operator='!=',
value=SearchValue(raw_value='value'),
),
]
def test_parse_search_query_weird_values(self):
# quotes within quotes
assert parse_search_query('release:"a"thing""') == [
SearchFilter(
key=SearchKey(name='release'),
operator='=',
value=SearchValue(raw_value='a"thing"'),
),
]
# newline within quote
assert parse_search_query('release:"a\nrelease"') == [
SearchFilter(
key=SearchKey(name='release'),
operator='=',
value=SearchValue(raw_value='a\nrelease')
),
]
# newline outside quote
with self.assertRaises(IncompleteParseError):
parse_search_query('release:a\nrelease')
# tab within quote
assert parse_search_query('release:"a\trelease"') == [
SearchFilter(
key=SearchKey(name='release'),
operator='=',
value=SearchValue(raw_value='a\trelease')
),
]
# tab outside quote
assert parse_search_query('release:a\trelease') == [
SearchFilter(
key=SearchKey(name='release'),
operator='=',
value=SearchValue(raw_value='a'),
),
SearchFilter(
key=SearchKey(name='message'),
operator='=',
value=SearchValue(raw_value='\trelease')
),
]
# escaped quotes
assert parse_search_query('release:"a\"thing\""') == [
SearchFilter(
key=SearchKey(name='release'),
operator='=',
value=SearchValue(raw_value='a"thing"')
),
]
assert parse_search_query('release:"a\"\"release"') == [
SearchFilter(
key=SearchKey(name='release'),
operator='=',
value=SearchValue(raw_value='a""release')
),
]
# poorly escaped quotes
assert parse_search_query('release:"a release\"') == [
SearchFilter(
key=SearchKey(name='release'),
operator='=',
value=SearchValue(raw_value='a release')
),
]
assert parse_search_query('release:\"a release "') == [
SearchFilter(
key=SearchKey(name='release'),
operator='=',
value=SearchValue(raw_value='a release ')
),
]
def test_parse_search_query_custom_tag(self):
assert parse_search_query('fruit:apple release:1.2.1') == [
SearchFilter(
key=SearchKey(name='fruit'),
operator='=',
value=SearchValue(raw_value='apple'),
),
SearchFilter(
key=SearchKey(name='release'),
operator='=',
value=SearchValue(raw_value='1.2.1'),
),
]
def test_parse_search_query_has_tag(self):
# unquoted key
assert parse_search_query('has:release') == [
SearchFilter(
key=SearchKey(name='release'),
operator='!=',
value=SearchValue(raw_value=''),
),
]
# quoted key
assert parse_search_query('has:"hi:there"') == [
SearchFilter(
key=SearchKey(name='hi:there'),
operator='!=',
value=SearchValue(raw_value=''),
),
]
# malformed key
with self.assertRaises(InvalidSearchQuery):
parse_search_query('has:"hi there"')
def test_parse_search_query_not_has_tag(self):
# unquoted key
assert parse_search_query('!has:release') == [
SearchFilter(
key=SearchKey(name='release'),
operator='=',
value=SearchValue(''),
),
]
# quoted key
assert parse_search_query('!has:"hi:there"') == [
SearchFilter(
key=SearchKey(name='hi:there'),
operator='=',
value=SearchValue(''),
),
]
def test_get_snuba_query_args(self):
assert get_snuba_query_args('user.email:foo@example.com release:1.2.1 fruit:apple hello', {
'project_id': [1, 2, 3],
'start': datetime.datetime(2015, 5, 18, 10, 15, 1, tzinfo=timezone.utc),
'end': datetime.datetime(2015, 5, 19, 10, 15, 1, tzinfo=timezone.utc),
}) == {
'conditions': [
['email', '=', 'foo@example.com'],
['tags[sentry:release]', '=', '1.2.1'],
['tags[fruit]', '=', 'apple'],
[['positionCaseInsensitive', ['message', "'hello'"]], '!=', 0],
],
'filter_keys': {'project_id': [1, 2, 3]},
'start': datetime.datetime(2015, 5, 18, 10, 15, 1, tzinfo=timezone.utc),
'end': datetime.datetime(2015, 5, 19, 10, 15, 1, tzinfo=timezone.utc),
}
def test_negation_get_snuba_query_args(self):
assert get_snuba_query_args('!user.email:foo@example.com') == {
'conditions': [
[['ifNull', ['email', "''"]], '!=', 'foo@example.com'],
],
'filter_keys': {},
}
def test_get_snuba_query_args_no_search(self):
assert get_snuba_query_args(params={
'project_id': [1, 2, 3],
'start': datetime.datetime(2015, 5, 18, 10, 15, 1, tzinfo=timezone.utc),
'end': datetime.datetime(2015, 5, 19, 10, 15, 1, tzinfo=timezone.utc),
}) == {
'conditions': [],
'filter_keys': {'project_id': [1, 2, 3]},
'start': datetime.datetime(2015, 5, 18, 10, 15, 1, tzinfo=timezone.utc),
'end': datetime.datetime(2015, 5, 19, 10, 15, 1, tzinfo=timezone.utc),
}
def test_get_snuba_query_args_wildcard(self):
assert get_snuba_query_args('release:3.1.* user.email:*@example.com') == {
'conditions': [
[['match', ['tags[sentry:release]', "'^3\\.1\\..*$'"]], '=', 1],
[['match', ['email', "'^.*\\@example\\.com$'"]], '=', 1],
],
'filter_keys': {},
}
def test_get_snuba_query_args_negated_wildcard(self):
assert get_snuba_query_args('!release:3.1.* user.email:*@example.com') == {
'conditions': [
[['match', [['ifNull', ['tags[sentry:release]', "''"]], "'^3\\.1\\..*$'"]], '!=', 1],
[['match', ['email', "'^.*\\@example\\.com$'"]], '=', 1],
],
'filter_keys': {},
}
def test_get_snuba_query_args_has(self):
assert get_snuba_query_args('has:release') == {
'filter_keys': {},
'conditions': [[['ifNull', ['tags[sentry:release]', "''"]], '!=', '']]
}
def test_get_snuba_query_args_not_has(self):
assert get_snuba_query_args('!has:release') == {
'filter_keys': {},
'conditions': [[['ifNull', ['tags[sentry:release]', "''"]], '=', '']]
}
def test_convert_endpoint_params(self):
assert convert_endpoint_params({
'project_id': [1, 2, 3],
'start': datetime.datetime(2015, 5, 18, 10, 15, 1, tzinfo=timezone.utc),
'end': datetime.datetime(2015, 5, 19, 10, 15, 1, tzinfo=timezone.utc),
}) == [
SearchFilter(
key=SearchKey(name='start'),
operator='=',
value=SearchValue(
raw_value=datetime.datetime(
2015,
5,
18,
10,
15,
1,
tzinfo=timezone.utc),
)
),
SearchFilter(
key=SearchKey(name='project_id'),
operator='=',
value=SearchValue(raw_value=[1, 2, 3])
),
SearchFilter(
key=SearchKey(name='end'),
operator='=',
value=SearchValue(
raw_value=datetime.datetime(
2015,
5,
19,
10,
15,
1,
tzinfo=timezone.utc),
)
),
]
| 34.72973 | 101 | 0.466226 |
d7e279b24d45237786f5447972d84d66ddcc5b5f | 44,885 | py | Python | qteasy/operator.py | shepherdpp/qteasy | eabcbe79da7d196a530b8ef0d3f654c0b1dcf38d | [
"CC0-1.0"
] | 7 | 2020-02-10T22:47:11.000Z | 2022-03-24T06:50:54.000Z | qteasy/operator.py | shepherdpp/qteasy | eabcbe79da7d196a530b8ef0d3f654c0b1dcf38d | [
"CC0-1.0"
] | 8 | 2020-02-11T18:07:21.000Z | 2022-01-10T16:24:16.000Z | qteasy/operator.py | shepherdpp/qteasy | eabcbe79da7d196a530b8ef0d3f654c0b1dcf38d | [
"CC0-1.0"
] | 3 | 2021-05-09T03:16:55.000Z | 2022-02-18T21:52:34.000Z | # coding=utf-8
# operator.py
# ======================================
# This file contains Operator class, that
# merges and applies investment strategies
# to generate operation signals with
# given history data.
# ======================================
import pandas as pd
import numpy as np
from .finance import CashPlan
from .history import HistoryPanel
from .utilfuncs import str_to_list
from .strategy import RollingTiming
from .strategy import SimpleSelecting
from .strategy import SimpleTiming
from .built_in import AVAILABLE_STRATEGIES, BUILT_IN_STRATEGY_DICT
from .utilfuncs import unify, mask_to_signal
# TODO:
# TODO:作为完整的交易信号,为了实现更加贴近实际的交易信号,交易信号应该包括交易方向和头寸位置两个主要参数(对于股票来说
# TODO:只有多头头寸)
# TODO:position > 0时,表示多头头寸
# TODO:position < 0时,表示空头头寸
# TODO:两种不同的头寸位置配合开仓(signal>0)或平仓(signal<0)才能完整地表示所有的交易方式
# TODO:另外,还需要加入更多交易相关信息,如限价单、市价单、交易数量等等,总之,之前仅用singal表示交易信号的方式太过于简单了
# TODO: A reference data type should affiliated to strategies,
# TODO: which is useful when a reference data is needed. for
# TODO: example, a relative change rate is based on the difference
# TODO: between the stock prices and reference prices.
class Operator:
"""交易操作生成类,通过简单工厂模式创建择时属性类和选股属性类,并根据这两个属性类的结果生成交易清单
根据输入的参数生成Operator对象,在对象中创建相应的策略类型:
input:
:param selecting_types: 一个包含多个字符串的列表,表示不同选股策略,后续可以考虑把字符串列表改为逗号分隔的纯字符串输入
:param timing_types: 字符串列表,表示不同择时策略,后续可以考虑把字符串列表改为逗号分隔的纯字符串输入
:param ricon_types: 字符串列表,表示不同风控策略,后续可以考虑把字符串列表改为逗号分隔的纯字符串输入
Operator对象其实就是若干个不同类型的操作策略的容器对象,
在一个Operator对象中,可以包含任意多个"策略对象",而运行Operator生成交易信号的过程,就是调用这些不同的交易策略,并通过
不同的方法对这些交易策略的结果进行组合的过程
目前在Operator对象中支持五种策略生成器,而每种类型的策略生成器用不同的方式生成策略相关信号,可以在Operator对象使用若干不同
生成类型的策略。同时,Operator对象在生成最终交易信号时,成员策略会被用于三种用途,分别用于创建多空蒙板、选股蒙板以及交易信号矩阵
最后将两种蒙板和一种信号矩阵组合生成最终的交易信号清单。
在同一个Operator对象中,不同生成类型的策略可以被用于不同的用途,具体如下:
usage \ generator | RollingTiming | SimpleSelecting | Simple_Timing | FactoralSelecting | ReferenceTiming |
==================|===============|=================|===============|===================|=================|
long/short mask | Yes | Yes | Yes | Yes | Yes |
------------------|---------------|-----------------|---------------|-------------------|-----------------|
Portfolio mask | No | Yes | No | Yes | Yes |
------------------|---------------|-----------------|---------------|-------------------|-----------------|
signal matrix | Yes | No | Yes | No | Yes |
==五种策略信号生成器==
目前Operator支持五种不同生成类型的策略,它们并不仅局限于某一种用途,不同生成器之间的区别在于策略利用历史数据并生成最终结果的
方法不一样。五种生成类型的策略分别如下:
1, RollingTiming 逐品种滚动时序信号生成器,用于生成择时信号的策略
这类策略的共同特征是对投资组合中的所有投资产品逐个考察其历史数据,根据其历史数据,在历史数据的粒度上生成整个时间段上的
时间序列信号。时间序列信号可以为多空信号,即用>0的数字表示多头头寸,<0的数字代表空头头寸,0代表中性头寸。也可以表示交
易信号,即>0的数字表示建多仓或平空仓,<0的数字表示见空仓或平多仓。
这种策略生成器将投资组合中的每一个投资产品逐个处理,每个投资产品中的NA值可以单独处理,与其他投资品种不相关、互不影响,
同时,每个投资产品可以应用不同的参数生成信号,是最为灵活的择时信号生成器。
另外,为了避免前视偏差,滚动择时策略仅利用一小段有限的历史数据(被称为时间窗口)来生成每一个时间点上的信号,同时确保
时间窗口总是处于需要计算多空位置那一点的过去。这种技术称为"时间窗口滚动"。这样的滚动择时信号生成方法适用于未来数据会
对当前的信号产生影响的情况下。采用滚动择时策略生成方法,可以确保每个时间点的交易信号只跟过去一段时间有关,从而彻底排除
前视偏差可能给策略带来的影响。
不过,由于时间窗口滚动的计算方式需要反复提取一段时间窗口内的数据,并反复计算,因此计算复杂度与数据总量M与时间窗口长度N
的乘积M*N成正比,效率显著低于简单时序信号生成策略,因此,在可能的情况下(例如,简单移动平均值相关策略不受未来价格影响)
应该尽量使用简单时序信号生成策略,以提升执行速度。
2, SimpleSelecting 简单投资组合分配器,用于周期性地调整投资组合中每个个股的权重比例
这类策略的共同特征是周期性运行,且运行的周期与其历史数据的粒度不同。在每次运行时,根据其历史数据,为潜在投资组合中的每
一个投资产品分配一个权重,并最终确保所有的权重值归一化。权重为0时表示该投资产品被从组合中剔除,而权重的大小则代表投资
过程中分配投资资金的比例。
这种方式生成的策略可以用于生成周期性选股蒙板,也可以用于生成周期性的多空信号模板。
这种生成方式的策略是针对历史数据区间运行的,是运算复杂度最低的一类生成方式,对于数量超大的投资组合,可以采用这种方式生
成投资策略。但仅仅局限于部分周期性运行的策略。
3, SimpleTiming 逐品种简单时序信号生成器,用于生成择时信号的策略
这类策略的共同特征是对投资组合中的所有投资产品逐个考察其历史数据,并在历史数据的粒度上生成整个时间段上的时间序列信号。
这种策略生成方法与逐品种滚动时序信号生成策略的信号产生方法类似,只是缺少了"滚动"的操作,时序信号是一次性在整个历史区间
上生成的,并不考虑未来数据对当前信号的影响。这类方法生成的信号既可以代表多空信号,也可以代表交易信号。
同时,简单时序信号生成器也保留了滚动时序信号生成器的灵活性优点:每个投资产品独立处理,不同数据的NA值互不关联,互不影响,
同时每个不同的投资产品可以应用完全不同的策略参数。最大的区别就在于信号不是滚动生成的。
正因为没有采用滚动计算的方式,因此简单时序信号生成器的计算复杂度只有O(M),与历史数据数量M成正比,远小于滚动生成器。
不过,其风险在于策略本身是否受未来数据影响,如果策略本身不受未来数据的影响,则采用简单时序生成器是一种较优的选择,例如,
基于移动平均线相交的相交线策略、基于过去N日股价变动的股价变动策略本身具备不受未来信息影响的特点,使用滚动时序生成器和
简单时序生成器能得到相同的结果,而简单时序生成器的计算耗时大大低于滚动时序生成器,因此应该采用简单滚动生成器。又例如,
基于指数平滑均线或加权平均线的策略,或基于波形降噪分析的策略,其输出信号受未来信息的影响,如果使用简单滚动生成器将会
导致未来价格信息对回测信号产生影响,因此不应该使用简单时序信号生成器。
4, FactoralSelecting 因子选股投资组合分配器,用于周期性地调整投资组合中每个个股的权重比例
这类策略的共同特征是周期性运行,且运行的周期与其历史数据的粒度不同。在每次运行时,根据其历史数据,为每一个股票计算一个
选股因子,这个选股因子可以根据任意选定的数据根据任意可能的逻辑生成。生成选股因子后,可以通过对选股因子的条件筛选和
排序执行选股操作。用户可以在策略属性层面定义筛选条件和排序方法,同时可以选择不同的选股权重分配方式
这种方式生成的策略可以用于生成周期性选股蒙板,也可以用于生成周期性的多空信号模板。
这种生成方式的策略是针对历史数据区间运行的,是运算复杂度最低的一类生成方式,对于数量超大的投资组合,可以采用这种方式生
成投资策略。但仅仅局限于部分周期性运行的策略。
5, ReferenceTiming 参考数据信号生成器
这类策略并不需要所选择股票本身的数据计算策略输出,而是利用参考数据例如大盘、宏观经济数据或其他数据来生成统一的股票多空
或选股信号模版。其计算的基本方法与Timing类型生成器基本一致,但是同时针对所有的投资组合进行计算,因此信号可以用于多空
蒙板和选股信号蒙本,计算的基础为参考信号
这种方式生成的策略可以用于生成周期性选股蒙板,也可以用于生成周期性的多空信号模板,同时也可以用于直接生成交易信号。
==策略的三种用途==
在Operator对象中,包含的策略可以有无限多个,但是Operator会将策略分别用于三种不同的用途,并把它们的输出混合成为最终的策略输出。
Operator对象可以同时将多个策略用于同一个用途,不过,为了确保输出唯一,多个策略的输出将被以某种方式混合,混合的方式是Operator
对象的属性,定义了同样用途的不同策略输出结果的混合方式,以下是三种用途及其混合方式的介绍:
用途1, 生成多空蒙板:
多空蒙板定义了整个历史时期内所有股票的多空仓位时间序列,如前所述,1代表多头满仓,0代表空仓,-1代表空头满仓。数值为1到
-1之间的小数时,代表一定的仓位百分比。当在Operator对象中定义了多个策略用于生成多空蒙板时,Operator会分别使用策略生
成数个蒙板,再将它们混合起来。
多空蒙板的混合方式由多空混合字符串来确定,字符串的格式为"[chg|pos]-0/9|cumulative"(此处应该使用正则表达式)
'str-T': T为浮点数,当多个策略多空蒙板的总体信号强度达到阈值T时,总体输出为1(或者-1),否则为0
'pos-N': N为正整数,取值区间为1到len(timing)的值,表示在N个策略为多时状态为多,否则为空
这种类型有一个变体:
'pos-N-T': T为信号强度阈值,忽略信号强度达不到该阈值的多空蒙板信号,将剩余的多空蒙板进行计数,信号数量达到或
超过N时,输出为1(或者-1),否则为0
'avg': 平均信号强度,所有多空蒙板的信号强度的平均值
'comboo': 在每个策略发生反转时都会产生交易信号,信号的强度不经过衰减,但是通常第一个信号产生后,后续信号就再无意义
用途2, 生成选股蒙板:
选股蒙板定义了每一个时刻整个投资组合中每一个投资产品被分配到的权重。同样,如果定义了多个策略,也必须将它们的输出结果混
合成一个
选股蒙板的混合方式由一个逻辑表达式来确定,例如'0 and (1 or 2 and (3 or 4))'
上面的表达式表示了如何根据五个选股蒙板来确定一个个股是否被选中而且权重多大。在目前的系统中,qteasy只是简单地将and运算
处理为乘法,or运算处理为加法。在只有0和1的情况下这样做是没有问题的,但是在普遍蒙板中存在大量介于0和1之间的浮点数的
时候,就需要注意了,如果蒙板0中某个股票的权重为0.5,在蒙板1中的权重为0.5,那么0 and 1 与0 or 1的结果分别应该是什么?
目前这个问题的解决方式是:
0.5 and 0.5 = 0.5 * 0.5 = 0.25,
0.5 or 0.5 = 0.5 + 0.5 = 1
完成上述计算后重新unify整个蒙板
想到还有另一种解决方式:
0.5 and 0.5 = 0.5 * 0.5 = 0.25,
0.5 or 0.5 = 1 - (1 - 0.5) * (1 - 0.5) = 0.75
同样在完成上述计算后unify整个蒙板
孰优孰劣,还需要观察和试验,但是现在先把后一种方式写入代码中,后续再进行验证
用途3: 生成交易信号矩阵:
交易信号矩阵是由策略直接生成的交易信号组成的,如前所述,1代表开多仓或平空仓,-1代表平多仓或开空仓。与其他用途一样,如果
多个策略被用于同样的用途,应该把多个策略的输出混合成一个最终输出。
交易信号矩阵的混合方式跟多空蒙板的混合方式相似,以混合字符串定义。混合字符串的格式为"[chg|pos]-0/9|cumulative"
'chg-N': N为正整数,取值区间为1到len(timing)的值,表示多空状态在第N次信号反转时反转
'pos-N': N为正整数,取值区间为1到len(timing)的值,表示在N个策略为多时状态为多,否则为空
'cumulative': 在每个策略发生反转时都会产生交易信号,但是信号强度为1/len(timing)
==策略的组合==
以上三类策略通过不同的方式混合后,可以任意组合一种复杂的策略,因此,在qteasy系统中,复杂的策略是可以由很多个简单的策略组合而来
的。
在一个Operator对象中,作为容器可以容纳任意多个任意类型的策略,所有的策略以用途分成三组,所有的策略可以引用不同的历史数据,生成
同样大小尺度的结果(也就是说,生成的结果有相同的历史区间,相同的时间粒度),最后这些结果被通过某种方法"混合"起来,形成每个用途
的最终的结果,即多空模版、选股蒙板以及交易信号矩阵。
三种用途的结果又再次被组合起来,变成整个Operator对象的输出。
目前采用的组合方式是:
mask_to_signal(多空模版 * 选股蒙板) + 交易信号
其中mask_to_signal()函数的作用是将蒙板转化为交易信号,这样输出的就是交易信号
未来将第三类策略升级为单品种信号生成策略后,信号的组合方式就可以变为:
mask_to_signal(多空蒙板 * 选股蒙板)+ (交易信号 * 选股蒙板)
这样同样可以输出一组交易信号
但这样做还会有问题,预先生成交易信号在交易过程中存在MOQ时可能会发生这样的情况,在试图分多次建仓买入股票时,由于股票价值较高,导致
分批建仓的信号每次都无法买入,解决的思路有两个,第一是在回测时不仅接受交易信号,还接受目标仓位,那么如果第一次建仓不够买入一手
股票,到后续的目标仓位时总能有足够资金建仓。第二种是修改回测程序,在每次操作后记录理论操作数量和实际操作数量的差值,等下次有同方向
操作的时候补齐差额。孰优孰劣?目前还没有想清楚。
"""
# 对象初始化时需要给定对象中包含的选股、择时、风控组件的类型列表
AVAILABLE_LS_BLENDER_TYPES = ['avg', 'avg_pos', 'pos', 'str', 'combo', 'none']
def __init__(self, selecting_types=None,
timing_types=None,
ricon_types=None):
"""根据生成具体的对象
input:
:param selecting_types: 一个包含多个字符串的列表,表示不同选股策略,后续可以考虑把字符串列表改为逗号分隔的纯字符串输入
:param timing_types: 字符串列表,表示不同择时策略,后续可以考虑把字符串列表改为逗号分隔的纯字符串输入
:param ricon_types: 字符串列表,表示不同风控策略,后续可以考虑把字符串列表改为逗号分隔的纯字符串输入
"""
# 对象属性:
# 交易信号通用属性:
# 如果对象的种类未在参数中给出,则直接指定最简单的策略种类
if selecting_types is None:
selecting_types = ['all']
if isinstance(selecting_types, str):
selecting_types = str_to_list(selecting_types)
if timing_types is None:
timing_types = ['long']
if isinstance(timing_types, str):
timing_types = str_to_list(timing_types)
if ricon_types is None:
ricon_types = ['ricon_none']
if isinstance(ricon_types, str):
ricon_types = str_to_list(ricon_types)
# 在Operator对象中,对每一种类型的策略,需要三个列表对象加上一个字符串作为基本数据结构,存储相关信息:
# 对于每一类型的策略,第一个列表是_stg_types, 按照顺序保存所有相关策略对象的种类字符串,如['MACD', 'DMA', 'MACD']
# 第二个列表是_stg,按照顺序存储所有的策略对象,如[Timing(MACD), Timing(timing_DMA), Timing(MACD)]
# 第三个列表是_stg_history_data, 列表中按照顺序保存用于不同策略的历史数据切片,格式均为np.ndarray,维度为三维
# 字符串则是"混合"字符串,代表最终将所有的同类策略混合到一起的方式,对于不同类型的策略,混合方式定义不同
# 以上的数据结构对于所有类型的策略都基本相同
self._timing_types = []
self._timing = []
self._timing_history_data = []
self._ls_blender = 'pos-1' # 默认的择时策略混合方式
for timing_type in timing_types:
# 通过字符串比较确认timing_type的输入参数来生成不同的具体择时策略对象,使用.lower()转化为全小写字母
if isinstance(timing_type, str):
if timing_type.lower() not in AVAILABLE_STRATEGIES:
raise KeyError(f'built-in timing strategy \'{timing_type}\' not found!')
self._timing_types.append(timing_type)
self._timing.append(BUILT_IN_STRATEGY_DICT[timing_type]())
# 当传入的对象是一个strategy时,直接
elif isinstance(timing_type, (RollingTiming, SimpleTiming)):
self._timing_types.append(timing_type.stg_type)
self._timing.append(timing_type)
else:
raise TypeError(f'The timing strategy type \'{type(timing_type)}\' is not supported!')
# 根据输入参数创建不同的具体选股策略对象。selecting_types及selectings属性与择时策略对象属性相似
# 都是列表,包含若干相互独立的选股策略(至少一个)
self._selecting_type = []
self._selecting = []
self._selecting_history_data = []
# 生成选股蒙板生成策略清单
cur_type = 0
str_list = []
for selecting_type in selecting_types:
if cur_type == 0:
str_list.append(str(cur_type))
else:
str_list.append(f' or {str(cur_type)}')
cur_type += 1
if isinstance(selecting_type, str):
if selecting_type.lower() not in AVAILABLE_STRATEGIES:
raise KeyError(f'KeyError: built-in selecting type \'{selecting_type}\' not found!')
self._selecting_type.append(selecting_type)
self._selecting.append(BUILT_IN_STRATEGY_DICT[selecting_type]())
elif isinstance(selecting_type, (SimpleSelecting, SimpleTiming)):
self._selecting_type.append(selecting_type.stg_type)
self._selecting.append(selecting_type)
else:
raise TypeError(f'Type Error, the type of object {type(selecting_type)} is not supported!')
self._selecting_blender_string = ''.join(str_list)
# create selecting blender by selecting blender string
self._selecting_blender = self._exp_to_blender
# 根据输入参数生成不同的风险控制策略对象
self._ricon_type = []
self._ricon = []
self._ricon_history_data = []
self._ricon_blender = 'add'
for ricon_type in ricon_types:
if isinstance(ricon_type, str):
if ricon_type.lower() not in AVAILABLE_STRATEGIES:
raise KeyError(f'ricon type {ricon_type} not available!')
self._ricon_type.append(ricon_type)
self._ricon.append(BUILT_IN_STRATEGY_DICT[ricon_type]())
elif isinstance(ricon_type, (RollingTiming, SimpleTiming)):
self._ricon_type.append(ricon_type.stg_type)
self._ricon.append(ricon_type)
else:
raise TypeError(f'Type Error, the type of passed object {type(ricon_type)} is not supported!')
@property
def timing(self):
"""返回operator对象的所有timing对象"""
return self._timing
@property
def timing_count(self):
"""返回operator对象中的所有timing对象的数量"""
return len(self.timing)
@property
def ls_blender(self):
"""返回operator对象中的多空蒙板混合器"""
return self._ls_blender
@property
def selecting(self):
"""返回operator对象的所有selecting对象"""
return self._selecting
@property
def selecting_count(self):
"""返回operator对象的所有selecting对象的数量"""
return len(self.selecting)
@property
def selecting_blender(self):
"""返回operator对象的所有选股策略的选股结果混合器"""
return self._selecting_blender_string
@property
def selecting_blender_expr(self):
"""返回operator对象的所有选股策略的选股结果的混合器表达式"""
return self._selecting_blender
@property
def ricon(self):
"""返回operator对象的所有ricon对象"""
return self._ricon
@property
def ricon_count(self):
"""返回operator对象的所有ricon对象的数量"""
return len(self.ricon)
@property
def ricon_blender(self):
"""返回operator对象的所有ricon对象的混合器"""
return self._ricon_blender
@property
def strategies(self):
"""返回operator对象的所有策略子对象"""
stg = [item for item in self.timing + self.selecting + self.ricon]
return stg
@property
def strategy_count(self):
"""返回operator对象的所有策略的数量"""
return len(self.strategies)
@property
def strategy_benders(self):
return [self.ls_blender, self.selecting_blender, self.ricon_blender]
@property
def op_data_types(self):
"""返回operator对象所有策略子对象所需数据类型的集合"""
d_types = [typ for item in self.strategies for typ in item.data_types]
d_types = list(set(d_types))
d_types.sort()
return d_types
@property
def op_data_freq(self):
"""返回operator对象所有策略子对象所需数据的采样频率"""
d_freq = [stg.data_freq for stg in self.strategies]
d_freq = list(set(d_freq))
assert len(d_freq) == 1, f'ValueError, there are multiple history data frequency required by strategies'
return d_freq[0]
@property
def opt_space_par(self):
"""一次性返回operator对象中所有参加优化(opt_tag != 0)的子策略的参数空间Space信息
改属性的返回值是一个元组,包含ranges, types两个列表,这两个列表正好可以直接用作Space对象的创建参数,用于创建一个合适的
Space对象
一个完整的投资策略由三类多个不同的子策略组成。每个子策略都有自己特定的参数空间,它们的参数空间信息存储在stg.par_boes以及
stg.par_types等属性中。通常,我们在优化参数的过程中,希望仅对部分策略的参数空间进行搜索,而其他的策略保持参数不变。为了实现
这样的控制,我们可以给每一个子策略一个属性opt_tag优化标签,通过设置这个标签的值,可以控制这个子策略是否参与优化:
{0: 'No optimization, 不参与优化,这个子策略在整个优化过程中将始终使用同一组参数',
1: 'Normal optimization, 普通优化,这个子策略在优化过程中使用不同的参数,这些参数都是从它的参数空间中按照规律取出的',
2: 'enumerate optimization, 枚举优化,在优化过程中使用不同的参数,但并非取自其参数空间,而是来自一个预设的枚举对象'}
这个函数遍历operator对象中所有子策略,根据其优化类型选择它的参数空间信息,组合成一个多维向量用于创建可以用于生成所有相关
策略的参数的高维空间
return: ranges, types
"""
ranges = []
types = []
for stg in self.strategies:
if stg.opt_tag == 0:
pass # 策略参数不参与优化
elif stg.opt_tag == 1:
# 所有的策略参数全部参与优化,且策略的每一个参数作为一个个体参与优化
ranges.extend(stg.par_boes)
types.extend(stg.par_types)
elif stg.opt_tag == 2:
# 所有的策略参数全部参与优化,但策略的所有参数组合作为枚举同时参与优化
ranges.append(stg.par_boes)
types.extend(['enum'])
return ranges, types
@property
def opt_types(self):
"""返回所有策略的优化类型标签"""
return [stg.opt_tag for stg in self.strategies]
@property
def max_window_length(self):
""" 计算并返回operator对象所有子策略中最长的策略形成期。在准备回测或优化历史数据时,以此确保有足够的历史数据供策略形成
:return: int
"""
return max(stg.window_length for stg in self.strategies)
@property
def _exp_to_blender(self):
"""选股策略混合表达式解析程序,将通常的中缀表达式解析为前缀运算队列,从而便于混合程序直接调用
系统接受的合法表达式为包含 '*' 与 '+' 的中缀表达式,符合人类的思维习惯,使用括号来实现强制
优先计算,如 '0 + (1 + 2) * 3'; 表达式中的数字0~3代表选股策略列表中的不同策略的索引号
上述表达式虽然便于人类理解,但是不利于快速计算,因此需要转化为前缀表达式,其优势是没有括号
按照顺序读取并直接计算,便于程序的运行。为了节省系统运行开销,在给出混合表达式的时候直接将它
转化为前缀表达式的形式并直接存储在blender列表中,在混合时直接调用并计算即可
input: =====
no input parameter
return:===== s2: 前缀表达式
:rtype: list: 前缀表达式
"""
# TODO: extract expression with re module
prio = {'or' : 0,
'and': 1}
# 定义两个队列作为操作堆栈
s1 = [] # 运算符栈
s2 = [] # 结果栈
# 读取字符串并读取字符串中的各个元素(操作数和操作符),当前使用str对象的split()方法进行,要
# 求字符串中个元素之间用空格或其他符号分割,应该考虑写一个self.__split()方法,不依赖空格对
# 字符串进行分割
exp_list = self._selecting_blender_string.split()
# 使用list()的问题是,必须确保表达式中不存在超过一位数字的数,如12等,同时需要去除字符串中的特殊字符如空格等
# exp_list = list(self._selecting_blender_string.
# replace(' ', '').
# replace('_', '').
# replace('.', '').
# replace('-', ''))
# 开始循环读取所有操作元素
while exp_list:
s = exp_list.pop()
# 从右至左逐个读取表达式中的元素(数字或操作符)
# 并按照以下算法处理
if s.isdigit():
# 1,如果元素是数字则压入结果栈
s2.append(s)
elif s == ')':
# 2,如果元素是反括号则压入运算符栈
s1.append(s)
elif s == '(':
# 3,扫描到(时,依次弹出所有运算符直到遇到),并把该)弹出
while s1[-1] != ')':
s2.append(s1.pop())
s1.pop()
elif s in prio.keys():
# 4,扫描到运算符时:
if s1 == [] or s1[-1] == ')' or prio[s] >= prio[s1[-1]]:
# 如果这三种情况则直接入栈
s1.append(s)
else:
# 否则就弹出s1中的符号压入s2,并将元素放回队列
s2.append(s1.pop())
exp_list.append(s)
else:
raise ValueError(f'unidentified characters found in blender string: \'{s}\'')
while s1:
s2.append(s1.pop())
s2.reverse() # 表达式解析完成,生成前缀表达式
return s2
@property
def ready(self):
""" assess if the operator is ready to generate
:return:
"""
raise NotImplementedError
def add_strategy(self, stg, usage):
"""add strategy"""
raise NotImplementedError
def remove_strategy(self, stg):
"""remove strategy"""
raise NotImplementedError
def clear(self):
"""clear all strategies
:return:
"""
raise NotImplementedError
def set_opt_par(self, opt_par):
"""optimizer接口函数,将输入的opt参数切片后传入stg的参数中
:param opt_par:
:type opt_par:Tuple
一组参数,可能包含多个策略的参数,在这里被分配到不同的策略中
:return
None
本函数与set_parameter()不同,在优化过程中非常有用,可以同时将参数设置到几个不同的策略中去,只要这个策略的opt_tag不为零
在一个包含多个Strategy的Operator中,可能同时有几个不同的strategy需要寻优。这时,为了寻找最优解,需要建立一个Space,包含需要寻优的
几个strategy的所有参数空间。通过这个space生成参数空间后,空间中的每一个向量实际上包含了不同的策略的参数,因此需要将他们原样分配到不
同的策略中。
举例如下:
一个Operator对象有三个strategy,分别需要2, 3, 3个参数,而其中第一和第三个策略需要参数寻优,这个operator的所有策略参数可以写
成一个2+3+2维向量,其中下划线的几个是需要寻优的策略的参数:
stg1: stg2: stg3:
tag=1 tag=0 tag=1
[p0, p1, p2, p3, p4, p5, p6, p7]
== == == == ==
为了寻优方便,可以建立一个五维参数空间,用于生成五维参数向量:
[v0, v1, v2, v3, v4]
set_opt_par函数遍历Operator对象中的所有strategy函数,检查它的opt_tag值,只要发现opt_tag不为0,则将相应的参数赋值给strategy:
stg1: stg2: stg3:
tag=1 tag=0 tag=1
[p0, p1, p2, p3, p4, p5, p6, p7]
== == == == ==
[v0, v1] [v2, v3, v4]
在另一种情况下,一个策略的参数本身就以一个tuple的形式给出,一系列的合法参数组以enum的形式形成一个完整的参数空间,这时,opt_tag被
设置为2,此时参数空间中每个向量的一个分量就包含了完整的参数信息,例子如下:
一个Operator对象有三个strategy,分别需要2, 3, 3个参数,而其中第一和第三个策略需要参数寻优,这个operator的所有策略参数可以写
成一个2+3+2维向量,其中下划线的几个是需要寻优的策略的参数,注意其中stg3的opt_tag被设置为2:
stg1: stg2: stg3:
tag=1 tag=0 tag=2
[p0, p1, p2, p3, p4, p5, p6, p7]
== == == == ==
为了寻优建立一个3维参数空间,用于生成五维参数向量:
[v0, v1, v2],其中v2 = (i0, i1, i2)
set_opt_par函数遍历Operator对象中的所有strategy函数,检查它的opt_tag值,对于opt_tag==2的策略,则分配参数给这个策略
stg1: stg2: stg3:
tag=1 tag=0 tag=2
[p0, p1, p2, p3, p4, p5, p6, p7]
== == == == ==
[v0, v1] v2=[i0, i1, i2]
"""
s = 0
k = 0
# 依次遍历operator对象中的所有策略:
for stg in self.strategies:
# 优化标记为0:该策略的所有参数在优化中不发生变化
if stg.opt_tag == 0:
pass
# 优化标记为1:该策略参与优化,用于优化的参数组的类型为上下界
elif stg.opt_tag == 1:
k += stg.par_count
stg.set_pars(opt_par[s:k])
s = k
# 优化标记为2:该策略参与优化,用于优化的参数组的类型为枚举
elif stg.opt_tag == 2:
# 在这种情况下,只需要取出参数向量中的一个分量,赋值给策略作为参数即可。因为这一个分量就包含了完整的策略参数tuple
k += 1
stg.set_pars(opt_par[s])
s = k
# TODO: 完善本函数的Docstring,添加详细的使用介绍和示例
def set_blender(self, blender_type, *args, **kwargs):
""" 统一的blender混合器属性设置入口
:param blender_type:
:type blender_type: str, 一个字符串,用于指定被设置属性的混合器的类型,接收到可识别
的输入后,调用不同的混合器设置函数,设置混合器
:return
None
"""
if isinstance(blender_type, str):
if blender_type.lower() in ['selecting', 'sel', 'select']:
self._set_selecting_blender(*args, **kwargs)
elif blender_type.lower() in ['ls', 'longshort', 'long_short']:
self._set_ls_blender(*args, **kwargs)
elif blender_type.lower() in ['ricon']:
self._set_ricon_blender(*args, **kwargs)
else:
raise ValueError(f'wrong input! \'{blender_type}\' is not a valid input, '
f'choose from [\'selecting\', \'sel\', \'ls\', \'ricon\']')
else:
raise TypeError(f'blender_type should be a string, got {type(blender_type)} instead')
pass
def set_parameter(self,
stg_id: str,
pars: [tuple, dict] = None,
opt_tag: int = None,
par_boes: [tuple, list] = None,
par_types: [list, str] = None,
sample_freq: str = None,
window_length: int = None,
data_types: [str, list] = None,
**kwargs):
""" 统一的策略参数设置入口,stg_id标识接受参数的具体成员策略
stg_id的格式为'x-n',其中x为's/t/r'中的一个字母,n为一个整数
这里应该有本函数的详细介绍
:param stg_id:
:type stg_id: str, 策略ID字符串,格式为x-N,表示第N个x类策略,x的取值范围为{'s', 't', 'r'},分别表示选股、择时和风控策略
:param pars:
:type pars: tuple or dict , 需要设置的策略参数,格式为tuple
:param opt_tag:
:type opt_tag: int, 优化类型,0: 不参加优化,1: 参加优化, 2: 以枚举类型参加优化
:param par_boes:
:type par_boes: tuple or list, 策略取值范围列表,一个包含若干tuple的列表,代表参数中一个元素的取值范围,如
[(0, 1), (0, 100), (0, 100)]
:param par_types:
:type par_types: str or list, 策略参数类型列表,与par_boes配合确定策略参数取值范围类型,详情参见Space类的介绍
:param sample_freq:
:type sample_freq: str, 采样频率,策略运行时的采样频率
:param window_length:
:type window_length: int, 窗口长度:策略计算的前视窗口长度
:param data_types:
:type data_types: str or list, 策略计算所需历史数据的数据类型
:return:
"""
assert isinstance(stg_id, str), f'stg_id should be a string like \'t-0\', got {stg_id} instead'
l = stg_id.split('-')
assert len(l) == 2 and l[1].isdigit(), f'stg_id should be a string like \'t-0\', got {stg_id} instead'
if l[0].lower() == 's':
assert int(l[1]) < self.selecting_count, \
f'ValueError: trying to set parameter for the {int(l[1]) + 1}-th selecting strategy but there\'s only' \
f' {self.selecting_count} selecting strategy(s)'
strategy = self.selecting[int(l[1])]
elif l[0].lower() == 't':
assert int(l[1]) < self.timing_count, \
f'ValueError: trying to set parameter for the {int(l[1]) + 1}-th timing strategy but there\'s only' \
f' {self.timing_count} timing strategies'
strategy = self.timing[int(l[1])]
elif l[0].lower() == 'r':
assert int(l[1]) < self.ricon_count, \
f'ValueError: trying to set parameter for the {int(l[1]) + 1}-th ricon strategy but there\'s only ' \
f'{self.ricon_count} ricon strategies'
strategy = self.ricon[int(l[1])]
else:
raise ValueError(f'The identifier of strategy is not recognized, should be like \'t-0\', got {stg_id}')
if pars is not None:
if strategy.set_pars(pars):
pass
else:
raise ValueError(f'parameter setting error')
if opt_tag is not None:
strategy.set_opt_tag(opt_tag)
if par_boes is not None:
strategy.set_par_boes(par_boes)
if par_types is not None:
strategy.par_types = par_types
has_sf = sample_freq is not None
has_wl = window_length is not None
has_dt = data_types is not None
if has_sf or has_wl or has_dt:
strategy.set_hist_pars(sample_freq=sample_freq,
window_length=window_length,
data_types=data_types)
# set up additional properties of the class if they do exist:
strategy.set_custom_pars(**kwargs)
# =================================================
# 下面是Operation模块的公有方法:
def info(self, verbose = False):
""" 打印出当前交易操作对象的信息,包括选股、择时策略的类型,策略混合方法、风险控制策略类型等等信息
如果策略包含更多的信息,还会打印出策略的一些具体信息,如选股策略的信息等
在这里调用了私有属性对象的私有属性,不应该直接调用,应该通过私有属性的公有方法打印相关信息
首先打印Operation木块本身的信息
:type verbose: bool
"""
print('OPERATION MODULE INFO:')
print('=' * 25)
print('Information of the Module')
print('=' * 25)
# 打印各个子模块的信息:
# 首先打印Selecting模块的信息
print('Total count of SimpleSelecting strategies:', len(self._selecting))
print('the blend type of selecting strategies is', self._selecting_blender_string)
print('Parameters of SimpleSelecting Strategies:')
for sel in self.selecting:
sel.info()
print('=' * 25)
# 接着打印 timing模块的信息
print('Total count of timing strategies:', len(self._timing))
print('The blend type of timing strategies is', self._ls_blender)
print('Parameters of timing Strategies:')
for tmg in self.timing:
tmg.info()
print('=' * 25)
# 最后打印Ricon模块的信息
print('Total count of Risk Control strategies:', len(self._ricon))
print('The blend type of Risk Control strategies is', self._ricon_blender)
for ric in self.ricon:
ric.info()
print('=' * 25)
# TODO 临时性使用cashplan作为参数之一,理想中应该只用一个"start_date"即可,这个Start_date可以在core.run()中具体指定,因为
# TODO 在不同的运行模式下,start_date可能来源是不同的:
def prepare_data(self, hist_data: HistoryPanel, cash_plan: CashPlan):
""" 在create_signal之前准备好相关历史数据,检查历史数据是否符合所有策略的要求:
检查hist_data历史数据的类型正确;
检查cash_plan投资计划的类型正确;
检查hist_data是否为空(要求不为空);
在hist_data中找到cash_plan投资计划中投资时间点的具体位置
检查cash_plan投资计划中的每个投资时间点均有价格数据,也就是说,投资时间点都在交易日内
检查cash_plan投资计划中第一次投资时间点前有足够的数据量,用于滚动回测
检查cash_plan投资计划中最后一次投资时间点在历史数据的范围内
从hist_data中根据各个量化策略的参数选取正确的切片放入各个策略数据仓库中
然后,根据operator对象中的不同策略所需的数据类型,将hist_data数据仓库中的相应历史数据
切片后保存到operator的各个策略历史数据属性中,供operator调用生成交易清单。
:param hist_data:
:type hist_data: HistoryPanel
历史数据,一个HistoryPanel对象,应该包含operator对象中的所有策略运行所需的历史数据,包含所有
个股所有类型的数据,例如,operator对象中存在两个交易策略,分别需要的数据类型如下:
策略 所需数据类型
------------------------------
策略A: close, open, high
策略B: close, eps
hist_data中就应该包含close、open、high、eps四种类型的数据
数据覆盖的时间段和时间频率也必须符合上述要求
:param cash_plan:
:type cash_plan: CashPlan
一个投资计划,临时性加入,在这里仅检查CashPlan与历史数据区间是否吻合,是否会产生数据量不够的问题
:return:
None
"""
# 确保输入的历史数据是HistoryPanel类型
if not isinstance(hist_data, HistoryPanel):
raise TypeError(f'Historical data should be HistoryPanel, got {type(hist_data)}')
# TODO: 临时性处理方式
# 确保cash_plan的数据类型正确
if not isinstance(cash_plan, CashPlan):
raise TypeError(f'cash plan should be CashPlan object, got {type(cash_plan)}')
# 确保输入的历史数据不为空
if hist_data.is_empty:
raise ValueError(f'history data can not be empty!')
# 默认截取部分历史数据,截取的起点是cash_plan的第一个投资日,在历史数据序列中找到正确的对应位置
first_cash_pos = np.searchsorted(hist_data.hdates, cash_plan.first_day)
last_cash_pos = np.searchsorted(hist_data.hdates, cash_plan.last_day)
# 确保回测操作的起点前面有足够的数据用于满足回测窗口的要求
# TODO: 这里应该提高容错度,设置更好的回测历史区间设置方法,尽量使用户通过较少的参数设置就能完成基
# TODO: 本的运行,不用过分强求参数之间的关系完美无缺,如果用户输入的参数之间有冲突,根据优先级调整
# TODO: 相关参数即可,毋须责备求全。
# TODO: 当运行模式为0时,不需要判断cash_pos与max_window_length的关系
assert first_cash_pos >= self.max_window_length, \
f'InputError, History data starts on {hist_data.hdates[0]} does not have enough data to cover' \
f' first cash date {cash_plan.first_day}, ' \
f'expect {self.max_window_length} cycles, got {first_cash_pos} records only'
# 确保最后一个投资日也在输入的历史数据范围内
# TODO: 这里应该提高容错度,如果某个投资日超出了历史数据范围,可以丢弃该笔投资,仅输出警告信息即可
# TODO: 没必要过度要求用户的输入完美无缺。
assert last_cash_pos < len(hist_data.hdates), \
f'InputError, Not enough history data record to cover complete investment plan, history data ends ' \
f'on {hist_data.hdates[-1]}, last investment on {cash_plan.last_day}'
# 确认cash_plan的所有投资时间点都在价格清单中能找到(代表每个投资时间点都是交易日)
invest_dates_in_hist = [invest_date in hist_data.hdates for invest_date in cash_plan.dates]
if not all(invest_dates_in_hist):
np_dates_in_hist = np.array(invest_dates_in_hist)
where_not_in = [cash_plan.dates[i] for i in list(np.where(np_dates_in_hist == False)[0])]
raise ValueError(f'Cash investment should be on trading days, '
f'following dates are not valid!\n{where_not_in}')
# 确保op的策略都设置了参数
assert all(stg.has_pars for stg in self.strategies),\
f'One or more strategies has no parameter set properly!'
# 确保op的策略都设置了混合方式
assert self.selecting_blender != ''
assert self.ls_blender != ''
assert self.ricon_blender != ''
# 使用循环方式,将相应的数据切片与不同的交易策略关联起来
self._selecting_history_data = [hist_data[stg.data_types, :, (first_cash_pos - stg.window_length):]
for stg in self.selecting]
# 用于择时仓位策略的数据需要包含足够的数据窗口用于滚动计算
self._timing_history_data = [hist_data[stg.data_types, :, (first_cash_pos - stg.window_length):]
for stg in self.timing]
self._ricon_history_data = [hist_data[stg.data_types, :, (first_cash_pos - stg.window_length):]
for stg in self.ricon]
# TODO: 需要改进:
# TODO: 供回测或实盘交易的交易信号应该转化为交易订单,并支持期货交易,因此生成的交易订单应该包含四类:
# TODO: 1,Buy-开多仓,2,sell-平多仓,3,sel_short-开空仓,4,buy_to_cover-平空仓
# TODO: 应该创建标准的交易订单模式,并且通过一个函数把交易信号转化为交易订单,以供回测或实盘交易使用
# TODO: 交易信号生成和回测模块需改进:
# TODO: 在交易信号生成模块不再仅仅生成+1/-1交易信号,而是同时生成交易信号
# TODO: 和多空目标位置,这样至少可以避免以下问题:当MOQ存在时,在逐步减仓的情况下,每次加仓的交易信号强度可能
# TODO: 都不足以买到一手股票,那么所有逐步加仓的信号都会失效。
# TODO: 另外,将交易信号和仓位信号分开也能更好地支持交易信号型策略和仓位变化型策略
# TODO: 需要调查:
# TODO: 为什么在已经通过prepare_data()方法设置好了每个不同策略所需的历史数据之后,在create_signal()方法中还需要传入
# TODO: hist_data作为参数?这个参数现在已经没什么用了,完全可以拿掉。在sel策略的generate方法中也不应该
# TODO: 需要传入shares和dates作为参数。只需要selecting_history_data中的一部分就可以了
def create_signal(self, hist_data: HistoryPanel):
""" 操作信号生成方法,在输入的历史数据上分别应用选股策略、择时策略和风险控制策略,生成初步交易信号后,
对信号进行合法性处理,最终生成合法交易信号
input:
:param hist_data:
:type hist_data: HistoryPanel
从数据仓库中导出的历史数据,包含多只股票在一定时期内特定频率的一组或多组数据
!!但是!!
作为参数传入的这组历史数据并不会被直接用于交易信号的生成,用于生成交易信号的历史数据
存储在operator对象的下面三个属性中,在生成交易信号时直接调用,避免了每次生成交易信号
时再动态分配历史数据。
self._selecting_history_data
self._timing_history_data
self._ricon_history_data
:return=====
list
使用对象的策略在历史数据期间的一个子集上产生的所有合法交易信号,该信号可以输出到回测
模块进行回测和评价分析,也可以输出到实盘操作模块触发交易操作
"""
# 第一步,在历史数据上分别使用选股策略独立产生若干选股蒙板(sel_mask)
# 选股策略的所有参数都通过对象属性设置,因此在这里不需要传递任何参数
# 生成空的选股蒙板
# 确保输入历史数据的数据格式正确;并确保择时策略和风控策略都已经关联号相应的历史数据
assert isinstance(hist_data, HistoryPanel), \
f'Type Error: historical data should be HistoryPanel, got {type(hist_data)}'
assert len(self._timing_history_data) > 0, \
f'ObjectSetupError: history data should be set before signal creation!'
assert len(self._ricon_history_data) > 0, \
f'ObjectSetupError: history data should be set before signal creation!'
sel_masks = []
shares = hist_data.shares
date_list = hist_data.hdates
for sel, dt in zip(self._selecting, self._selecting_history_data): # 依次使用选股策略队列中的所有策略逐个生成选股蒙板
# TODO: 目前选股蒙板的输入参数还比较复杂,包括shares和dates两个参数,应该消除掉这两个参数,使
# TODO: sel.generate()函数的signature与tmg.generate()和ricon.generate()一致
history_length = dt.shape[1]
sel_masks.append(
sel.generate(hist_data=dt, shares=shares, dates=date_list[-history_length:]))
# 生成的选股蒙板添加到选股蒙板队列中,
sel_mask = self._selecting_blend(sel_masks) # 根据蒙板混合前缀表达式混合所有蒙板
# sel_mask.any(0) 生成一个行向量,每个元素对应sel_mask中的一列,如果某列全部为零,该元素为0,
# 乘以hist_extract后,会把它对应列清零,因此不参与后续计算,降低了择时和风控计算的开销
# TODO: 这里本意是筛选掉未中选的股票,降低择时计算的开销,使用新的数据结构后不再适用,需改进以使其适用
# hist_selected = hist_data * selected_shares
# 第二步,使用择时策略在历史数据上独立产生若干多空蒙板(ls_mask)
# 生成多空蒙板时忽略在整个历史考察期内从未被选中过的股票:
# 依次使用择时策略队列中的所有策略逐个生成多空蒙板
ls_masks = np.array([tmg.generate(dt) for tmg, dt in zip(self._timing, self._timing_history_data)])
ls_mask = self._ls_blend(ls_masks) # 混合所有多空蒙板生成最终的多空蒙板
# 第三步,风险控制交易信号矩阵生成(简称风控矩阵)
# 依次使用风控策略队列中的所有策略生成风险控制矩阵
ricon_mats = np.array([ricon.generate(dt) for ricon, dt in zip(self._ricon, self._ricon_history_data)])
ricon_mat = self._ricon_blend(ricon_mats) # 混合所有风控矩阵后得到最终的风控策略
# 使用mask_to_signal方法将多空蒙板及选股蒙板的乘积(持仓蒙板)转化为交易信号,再加上风控交易信号矩阵,并移除所有大于1或小于-1的信号
# 生成交易信号矩阵
if self._ls_blender != 'none':
# 常规情况下,所有的ls_mask会先被混合起来,然后再与sel_mask相乘后生成交易信号,与ricon_mat相加
op_mat = (mask_to_signal(ls_mask * sel_mask) + ricon_mat).clip(-1, 1)
else:
# 在ls_blender为"none"的时候,代表择时多空蒙板不会进行混合,分别与sel_mask相乘后单独生成交易信号,再与ricon_mat相加
op_mat = (mask_to_signal(ls_mask * sel_mask).sum(axis=0) + ricon_mat).clip(-1, 1)
# 生成DataFrame,并且填充日期数据
date_list = hist_data.hdates[-op_mat.shape[0]:]
# TODO: 在这里似乎可以不用DataFrame,直接生成一个np.ndarray速度更快
lst = pd.DataFrame(op_mat, index=date_list, columns=shares)
# 定位lst中所有不全为0的行
lst_out = lst.loc[lst.any(axis=1)]
return lst_out
# ================================
# 下面是Operation模块的私有方法
def _set_ls_blender(self, ls_blender):
"""设置多空蒙板的混合方式,包括多种混合方式, 用于产生不同的混合效果。
input:
:param ls_blender:
str,合法的输入包括:
'combo': combo表示……
'none': None表示……
'str-T': T为浮点数,取值区间为大于零的浮点数,当所有策略的多空信号强度之和大于T时,
输出信号为1或-1,没有中间地带
'pos-N-T': N为正整数,取值区间为1到len(timing)的值,表示在N个策略为多时状态为多,
则为空,当策略的多空输出存在浮点数时, T表示判断某个多空值为多的阈值,
例如:
'pos-2-0.2' 表示当至少有两个策略输出的多空值>0.2时,策略输出值
为多,信号强度为所有多空信号之和。信号强度clip到(-1, 1)
'avg_pos-N-T': 表示……
'avg': 在每个策略发生反转时都会产生交易信号,总信号强度为所有策略信号强度的平均值
return:=====
None
"""
# TODO: 使用regex判断输入的ls_blender各式是否正确
assert isinstance(ls_blender, str), f'TypeError, expecting string but got {type(ls_blender)}'
self._ls_blender = ls_blender
def _set_selecting_blender(self, selecting_blender_expression):
""" 设置选股策略的混合方式,混合方式通过选股策略混合表达式来表示
给选股策略混合表达式赋值后,直接解析表达式,将选股策略混合表达式的前缀表达式存入选股策略混合器
"""
if not isinstance(selecting_blender_expression, str): # 如果输入不是str类型
self._selecting_blender_string = '0'
self._selecting_blender = ['0']
else:
self._selecting_blender_string = selecting_blender_expression
try:
self._selecting_blender = self._exp_to_blender
except:
raise ValueError(f'SimpleSelecting blender expression is not Valid: (\'{selecting_blender_expression}\')'
f', all elements should be separated by blank space, for example: '
f'\' 0 and ( 1 or 2 )\'')
def _set_ricon_blender(self, ricon_blender):
self._ricon_blender = ricon_blender
def _ls_blend(self, ls_masks):
""" 择时策略混合器,将各个择时策略生成的多空蒙板按规则混合成一个蒙板
这些多空模板的混合方式由混合字符串来定义。
混合字符串是满足以下任意一种类型的字符串:
1, 'none'
模式表示输入的蒙板不会被混合,所有的蒙板会被转化为一个
三维的ndarray返回,不做任何混合,在后续计算中采用特殊计算方式
# 分别计算每一个多空蒙板的交易信号,然后再将交易信号混合起来.
2, 'avg':
avg方式下,持仓取决于看多的蒙板的数量,看多蒙板越多,持仓越高,
只有所有蒙板均看空时,最终结果才看空所有蒙板的权重相同,因此,如
果一共五个蒙板三个看多两个看空时,持仓为60%。更简单的解释是,混
合后的多空仓位是所有蒙版仓位的平均值.
3, '[pos]/[avg-pos](-N)(-T)'
格式为满足以上正则表达式的字符串,其混合规则如下:
在pos-N方式下,
最终的多空信号强度取决于蒙板集合中各个蒙板的信号值,只有满足N个以
上的蒙板信号值为多(>0)或者为空(<0)时,最终蒙板的多空信号才为多或
为空。最终信号的强度始终为-1或1,如果希望最终信号强度为输入信号的
平均值,应该使用avg_pos-N方式混合
pos-N还有一种变体,即pos-N-T模式,在这种模式下,N参数仍然代表看
多的参数个数阈值,但是并不是所有判断持仓为正的数据都会被判断为正
只有绝对值大于T的数据才会被接受,例如,当T为0.25的时候,0.35会
被接受为多头,但是0.15不会被接受为多头,因此尽管有两个策略在这个
时间点判断为多头,但是实际上只有一个策略会被接受.
avg_pos-N方式下,
持仓同样取决于看多的蒙板的数量,只有满足N个或更多蒙板看多时,最终结果
看多,否则看空,在看多/空情况下,最终的多空信号强度=平均多空信号强度
。当然,avg_pos-1与avg等价,如avg_pos-2方式下,至少两个蒙板看多
则最终看多,否则看空
avg_pos-N还有一种变体,即avg_pos-N-T模式,在通常的模式下加
入了一个阈值Threshold参数T,用来判断何种情况下输入的多空蒙板信号
可以被保留,当T大于0时,只有输入信号绝对值大于T的时候才会被接受为有
意义的信号否则就会被忽略。使用avg_pos-N-T模式,并把T设置为一个较
小的浮点数能够过滤掉一些非常微弱的多空信号.
4, 'str-T':
str-T模式下,持仓只能为0或+1,只有当所有多空模版的输出的总和大于
某一个阈值T的时候,最终结果才会是多头,否则就是空头
5, 'combo':
在combo模式下,所有的信号被加总合并,这样每个所有的信号都会被保留,
虽然并不是所有的信号都有效。在这种模式下,本意是原样保存所有单个输入
多空模板产生的交易信号,但是由于正常多空模板在生成卖出信号的时候,会
运用"比例机制"生成卖出证券占持有份额的比例。这种比例机制会在针对
combo模式的信号组合进行计算的过程中产生问题。
例如:在将两组信号A和B合并到一起之后,如果A在某一天产生了一个股票
100%卖出的信号,紧接着B在接下来的一天又产生了一次股票100%卖出的信号,
两个信号叠加的结果,就是产生超出持有股票总数的卖出数量。将导致信号问题
input:=====
:type: ls_masks:object ndarray, 多空蒙板列表,包含至少一个多空蒙板
return:=====
:rtype: object: 一个混合后的多空蒙板
"""
try:
blndr = str_to_list(self._ls_blender, '-') # 从对象的属性中读取择时混合参数
except:
raise TypeError(f'the timing blender converted successfully!')
assert isinstance(blndr[0], str) and blndr[0] in self.AVAILABLE_LS_BLENDER_TYPES, \
f'extracted blender \'{blndr[0]}\' can not be recognized, make sure ' \
f'your input is like "str-T", "avg_pos-N-T", "pos-N-T", "combo", "none" or "avg"'
l_m = ls_masks
l_m_sum = np.sum(l_m, 0) # 计算所有多空模版的和
l_count = ls_masks.shape[0]
# 根据多空蒙板混合字符串对多空模板进行混合
if blndr[0] == 'none':
return l_m
if blndr[0] == 'avg':
return l_m_sum / l_count
if blndr[0] == 'pos' or blndr[0] == 'avg_pos':
l_m_sign = 0.
n = int(blndr[1])
if len(blndr) == 3:
threshold = float(blndr[2])
for msk in ls_masks:
l_m_sign += np.sign(np.where(np.abs(msk) < threshold, 0, msk))
else:
for msk in ls_masks:
l_m_sign += np.sign(msk)
if blndr[0] == 'pos':
res = np.where(np.abs(l_m_sign) >= n, l_m_sign, 0)
return res.clip(-1, 1)
if blndr[0] == 'avg_pos':
res = np.where(np.abs(l_m_sign) >= n, l_m_sum, 0) / l_count
return res.clip(-1, 1)
if blndr[0] == 'str':
threshold = float(blndr[1])
return np.where(np.abs(l_m_sum) >= threshold, 1, 0) * np.sign(l_m_sum)
if blndr[0] == 'combo':
return l_m_sum
raise ValueError(f'Blender text \'({blndr})\' not recognized!')
def _selecting_blend(self, sel_masks):
""" 选股策略混合器,将各个选股策略生成的选股蒙板按规则混合成一个蒙板
input:
:param sel_masks:
:return:
ndarray, 混合完成的选股蒙板
"""
exp = self._selecting_blender[:]
s = []
while exp: # 等同于但是更好: while exp != []
if exp[-1].isdigit():
s.append(sel_masks[int(exp.pop())])
else:
s.append(self._blend(s.pop(), s.pop(), exp.pop()))
return unify(s[0])
def _blend(self, n1, n2, op):
"""混合操作符函数,将两个选股、多空蒙板混合为一个
input:
:param n1: np.ndarray: 第一个输入矩阵
:param n2: np.ndarray: 第二个输入矩阵
:param op: np.ndarray: 运算符
return:
:return: np.ndarray
"""
if op == 'or':
return n1 + n2
elif op == 'and':
return n1 * n2
elif op == 'orr':
return 1 - (1 - n1) * (1 - n2)
else:
raise ValueError(f'ValueError, unknown operand, {op} is not an operand that can be recognized')
def _ricon_blend(self, ricon_mats):
if self._ricon_blender == 'add':
return ricon_mats.sum(axis=0)
raise NotImplementedError(f'ricon singal blender method ({self._ricon_blender}) is not supported!')
| 40.61991 | 121 | 0.593093 |
0124dceccc0f8e331d0ae1df73064d3aa3f21886 | 5,875 | py | Python | sound_classification/evaluate_classification.py | laurent-george/protolab_sound_recognition | c29a138c2b7cd60afc3c8b601d88857454202fc9 | [
"MIT"
] | 27 | 2015-07-09T15:07:39.000Z | 2019-10-03T07:44:32.000Z | sound_classification/evaluate_classification.py | lgeo3/protolab_sound_recognition | c29a138c2b7cd60afc3c8b601d88857454202fc9 | [
"MIT"
] | 7 | 2015-09-03T10:29:23.000Z | 2019-04-16T10:14:09.000Z | sound_classification/evaluate_classification.py | laurent-george/protolab_sound_recognition | c29a138c2b7cd60afc3c8b601d88857454202fc9 | [
"MIT"
] | 9 | 2015-07-09T15:07:17.000Z | 2019-10-10T18:24:56.000Z | __author__ = 'lgeorge'
#import seaborn as sns
import pylab
import classification_service
import numpy as np
import sound_classification.confusion_matrix
from sklearn.metrics import confusion_matrix
def plot_distribution_true_false(prediction_df):
"""
:param prediction_df:
:return:
"""
mask_well_classified = prediction_df.expected == prediction_df.class_predicted
for group_name, g in prediction_df.groupby('expected'):
print("group_name %s" % group_name)
pylab.figure()
try:
v = g.confidence[mask_well_classified]
pylab.hist(list(v), color='g', alpha=0.3, normed=0, range=(0,1), bins=10)
#sns.distplot(g.confidence[mask_well_classified], color='g', bins=11) # TRUE POSITIVE
except Exception as e:
print(e)
mask_wrong = (prediction_df.class_predicted == group_name) & (prediction_df.expected != group_name) # FALSE POSITIVE
#v = g.confidence[~mask_well_classified]
try:
v = prediction_df.confidence[mask_wrong]
pylab.hist(list(v), color='r', alpha=0.3, normed=0, range=(0,1), bins=10)
#sns.distplot(v, color='r', bins=11)
except Exception as e:
print(e)
#print(len(v))
pass
print("FIN figure %s" % group_name)
pylab.show()
print("")
#return pylab.gcf()
def get_expected_predicted_stratified_fold(stratified_fold, df, window_block=None, clf=None, window_block_learning=None, calibrate_score=False, keep_first_slice_only=False):
"""
Tool function to report classification accuracy for our classification tools
"""
predicted=[]
expected=[]
filenames = []
fold_num = 0
for train_set, test_set in stratified_fold:
train_files = df.iloc[train_set].filename
sound_classification_obj = classification_service.SoundClassification(train_files.tolist(), clf=clf, window_block_learning=window_block_learning , calibrate_score=calibrate_score)
sound_classification_obj.learn()
labels = sound_classification_obj.clf.classes_
for index in test_set:
val = df.iloc[index]
try:
prediction = sound_classification_obj.processed_wav(val.filename, window_block=window_block, ignore_fs=True)
#print(val.filename)
#if "FireAlarmFr-002-PepperLaurent.wav" in val.filename:
# print("len of prediction for file %s is %s" % (val.filename, len(prediction)))
# import pdb
# pdb.set_trace()
if keep_first_slice_only:
filenames.append(val.filename)
predicted.append(prediction[0])
expected.append(val.classname)
else:
expected.extend([val.classname]*len(prediction))
predicted.extend(prediction)
filenames.extend(['_'.join([val.filename, '_fold%s' % fold_num])]*len(prediction)) # we append the num of fold to filename to have easy difference after that.... TODO: use another column
except classification_service.SoundClassificationException as e:
print("Exception %s detected on %s" % (e, val.filename))
fold_num += 1
return expected, predicted, labels, filenames # ca commence a faire beaucoup, pourquoi ne pas renvoyer un dictionnaire, ou un pandas DataFrame: TODO
def filter_out_based_on_threshold(prediction_df, score_threshold_dict):
# we use threshold_dict to filter out value with confidence bellow threshold and assigned class UNKNOWN
for name, threshold in score_threshold_dict.iteritems():
mask = (prediction_df.predicted_class == name) & (prediction_df.confidence < threshold)
prediction_df.predicted_class[mask] = 'UNKNOWN'
return prediction_df
def print_report(expected, predicted_class, labels, score_threshold_dict=None):
# compute confusion matrix
matrix = confusion_matrix(expected, predicted_class, labels=labels)
# plot confusion matrix
fig = sound_classification.confusion_matrix.displayConfusionMatrix(matrix, labels=labels)
return fig
# print report
#import sklearn
#print(sklearn.metrics.classification_report(expected, predicted_class))
# Warning this methods are not optimal at all.. the complexity is high.. but we don't really care here..
def compute_precision_cumulative_curve(prediction_df, true_positive_class=None, step=0.01):
mask_true = prediction_df.expected == prediction_df.class_predicted # TRUE POSITIVE
mask_wrong = (prediction_df.class_predicted == true_positive_class) & (prediction_df.expected != true_positive_class) # FALSE POSITIVE
bins = np.arange(0, 1, step)
res = []
for prediction_threshold in bins:
false_cumulative = np.sum(prediction_df.confidence[mask_wrong] >= prediction_threshold)
true_cumulative = np.sum(prediction_df.confidence[mask_true] >= prediction_threshold)
precision_cumulative = true_cumulative / float(false_cumulative + true_cumulative)
res.append(precision_cumulative)
return bins, res
def plot_precision_cumulative_curve(prediction_df, true_positive_class=None, step=0.01):
bins, res = compute_precision_cumulative_curve(prediction_df, true_positive_class=true_positive_class, step=step)
pylab.scatter(bins, res)
def get_threshold_cum_precision(prediction_df, true_positive_class=None, min_expected_cum_precision=0.99):
bins, precision_cumulative = compute_precision_cumulative_curve(prediction_df, true_positive_class=true_positive_class)
valid_entry = np.argwhere(np.array(precision_cumulative) >= min_expected_cum_precision)[0]
if valid_entry == []:
return 1 # worst threshold..
else:
return bins[valid_entry[0]] # first position
| 45.542636 | 207 | 0.697532 |
e5d4483b8ec2f44fe361b9e670bf65f9a7bf71ab | 148 | py | Python | result.py | zeronph/FbMessengerBirthdayBOT | ec51f25ed4067669f58aa4d0ec0ad81e6281195b | [
"MIT"
] | 23 | 2017-10-09T02:20:41.000Z | 2022-02-13T13:27:10.000Z | result.py | phnhan0301/FbMessengerBirthdayBOT | ec51f25ed4067669f58aa4d0ec0ad81e6281195b | [
"MIT"
] | null | null | null | result.py | phnhan0301/FbMessengerBirthdayBOT | ec51f25ed4067669f58aa4d0ec0ad81e6281195b | [
"MIT"
] | 17 | 2017-10-08T10:59:39.000Z | 2022-03-11T20:22:47.000Z | friendlist = {'100005035713252':'Shãy Môūta','100014534922785':'Siwar Arfaoui','100013961718416':'Rahma Farhat','100003139845002':'Nesrine Zouaoui'} | 148 | 148 | 0.783784 |
490e5bf42c515a1de50b512479367a7a47c69eda | 1,309 | py | Python | gfootball/scenarios/academy_3_vs_1_with_keeper.py | mahi97/football | 18d51cb110034ce7080b2ccce14a2539d2a04af3 | [
"Apache-2.0"
] | 3 | 2019-06-20T05:47:02.000Z | 2019-08-25T05:04:11.000Z | gfootball/scenarios/academy_3_vs_1_with_keeper.py | AzharMithani/football | 0f09bcb8b3d48ac31987e13739e21a58ef0ca405 | [
"Apache-2.0"
] | 1 | 2019-06-09T10:06:36.000Z | 2019-06-09T10:06:36.000Z | gfootball/scenarios/academy_3_vs_1_with_keeper.py | nczempin/gfootball | 617e9cb6d48b4ac7187b9b3de68bd4ab44ea528e | [
"Apache-2.0"
] | 1 | 2019-06-09T12:42:28.000Z | 2019-06-09T12:42:28.000Z | # coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.SetFlag('game_duration', 400)
builder.SetFlag('deterministic', False)
builder.SetFlag('offsides', False)
builder.SetFlag('end_episode_on_score', True)
builder.SetFlag('end_episode_on_out_of_play', True)
builder.SetFlag('end_episode_on_possession_change', True)
builder.SetBallPosition(0.62, 0.0)
builder.SetTeam(Team.e_Home)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(0.6, 0.0, e_PlayerRole_CM)
builder.AddPlayer(0.7, 0.2, e_PlayerRole_CM)
builder.AddPlayer(0.7, -0.2, e_PlayerRole_CM)
builder.SetTeam(Team.e_Away)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(-0.75, 0.0, e_PlayerRole_CB)
| 31.926829 | 74 | 0.755539 |
6b42582ed4cbbba6e713d06083ad7be0c91ae0c5 | 1,541 | py | Python | git_hooks.py | joshi-bharat/linter | d0d26d46708b5adcbb384185014eb6524ad1e4ce | [
"BSD-3-Clause"
] | 25 | 2017-08-09T16:48:54.000Z | 2022-03-15T12:48:31.000Z | git_hooks.py | joshi-bharat/linter | d0d26d46708b5adcbb384185014eb6524ad1e4ce | [
"BSD-3-Clause"
] | 19 | 2017-08-15T15:39:40.000Z | 2021-11-11T10:46:23.000Z | git_hooks.py | mit-drl/linter | 25381bb35b9435e3d906a881130ad95ebdcba2cb | [
"BSD-3-Clause"
] | 11 | 2017-11-06T09:02:13.000Z | 2021-11-03T02:41:23.000Z | #!/usr/bin/env python
from __future__ import print_function
import os
import subprocess
import sys
def run_command_in_folder(command, folder):
"""Run a bash command in a specific folder."""
run_command = subprocess.Popen(command,
shell=True,
cwd=folder,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, _ = run_command.communicate()
command_output = stdout.rstrip()
return command_output
def get_git_repo_root(some_folder_in_root_repo='./'):
"""Get the root folder of the current git repository."""
return run_command_in_folder('git rev-parse --show-toplevel',
some_folder_in_root_repo)
def get_linter_folder(root_repo_folder):
"""Find the folder where this linter is stored."""
try:
return os.environ['LINTER_PATH']
except KeyError:
print("Cannot find linter because the environment variable "
"LINTER_PATH doesn't exist.")
sys.exit(1)
def main():
# Get git root folder.
repo_root = get_git_repo_root()
# Get linter subfolder
linter_folder = get_linter_folder(repo_root)
# Append linter folder to the path so that we can import the linter module.
linter_folder = os.path.join(repo_root, linter_folder)
sys.path.append(linter_folder)
import linter
linter.linter_check(repo_root, linter_folder)
if __name__ == "__main__":
main()
| 27.517857 | 79 | 0.635951 |
46c827dd90700fc195c16699aae7dc21de763094 | 7,369 | py | Python | Gems/AtomLyIntegration/TechnicalArt/DccScriptingInterface/azpy/dcc/maya/callbacks/event_callback_handler.py | prophetl33t/o3de | eaeeb883eee1594b1b93327f6909eebd1a826caf | [
"Apache-2.0",
"MIT"
] | null | null | null | Gems/AtomLyIntegration/TechnicalArt/DccScriptingInterface/azpy/dcc/maya/callbacks/event_callback_handler.py | prophetl33t/o3de | eaeeb883eee1594b1b93327f6909eebd1a826caf | [
"Apache-2.0",
"MIT"
] | null | null | null | Gems/AtomLyIntegration/TechnicalArt/DccScriptingInterface/azpy/dcc/maya/callbacks/event_callback_handler.py | prophetl33t/o3de | eaeeb883eee1594b1b93327f6909eebd1a826caf | [
"Apache-2.0",
"MIT"
] | null | null | null | # coding:utf-8
#!/usr/bin/python
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
# -------------------------------------------------------------------------
# <DCCsi>\\azpy\\maya\\\callbacks\\event_callback_handler.py
# Maya event callback handler
# -------------------------------------------------------------------------
"""
Module Documentation:
<DCCsi>:: azpy//maya//callbacks//event_callback_handler.py
.. module:: event_callback_handler
:synopsis: Simple event based callback_event handler
using maya.api.OpenMaya (api2)
.. :note: nothing mundane to declare
.. :attention: callbacks should be uninstalled on exit
.. :warning: maya may crash on exit if callbacks are not uninstalled
.. Usage:
def test_func(*arg):
_logging.debug("~ test_func ccallbackEvent fired! arg={0}"
"".format(arg))
#register an event based based callback event
cb = EventCallbackHandler('NameChanged', test_func)
.. Reference:
The following call will return all the available events that can be
passed into the EventCallbackHandler.
import maya.api.OpenMaya as openmaya
openmaya.MEventMessage.getEventNames()
Important ones for quick reference are:
quitApplication
SelectionChanged
NameChanged
SceneSaved
NewSceneOpened
SceneOpened
PostSceneRead
workspaceChanged
.. moduleauthor:: Amazon Lumberyard
"""
#--------------------------------------------------------------------------
# -- Standard Python modules
import os
# -- External Python modules
# -- Lumberyard Extension Modules
import azpy
from azpy.env_bool import env_bool
from azpy.constants import ENVAR_DCCSI_GDEBUG
from azpy.constants import ENVAR_DCCSI_DEV_MODE
# -- Maya Modules
import maya.api.OpenMaya as openmaya
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
# -- Misc Global Space Definitions
_DCCSI_GDEBUG = env_bool(ENVAR_DCCSI_GDEBUG, False)
_DCCSI_DEV_MODE = env_bool(ENVAR_DCCSI_DEV_MODE, False)
_PACKAGENAME = __name__
if _PACKAGENAME is '__main__':
_PACKAGENAME = 'azpy.dcc.maya.callbacks.event_callback_handler'
_LOGGER = azpy.initialize_logger(_PACKAGENAME, default_log_level=int(20))
_LOGGER.debug('Invoking:: {0}.'.format({_PACKAGENAME}))
# --------------------------------------------------------------------------
# =========================================================================
# First Class
# =========================================================================
class EventCallbackHandler(object):
"""
A simple Maya event based callback_event handler class
:ivar callback_event: stores event type trigger for a maya callback_event
:vartype event: for example, 'NameChanged'
:ivar this_function: stores this_function to call when callback_event is triggered
:vartype this_function: for example,
cb = EventCallbackHandler(callback_event='NameChanged',
this_function=test_func)
"""
# --BASE-METHODS-------------------------------------------------------
# --constructor-
def __init__(self, callback_event, this_function, install=True):
"""
initializes a callback_event object
"""
# callback_event id storage
self._callback_id = None
# state tracker
self._message_id_set = None
# the callback_event event trigger
self._callback_event = callback_event
# the thing to do on callback_event
self._function = this_function
if install:
self.install()
# --properties---------------------------------------------------------
@property
def callback_id(self):
return self._callback_id
@property
def callback_event(self):
return self._callback_event
@property
def this_function(self):
return self._this_function
# --method-------------------------------------------------------------
def install(self):
"""
installs this callback_event for event, which makes it active
"""
add_event_method = openmaya.MEventMessage.addEventCallback
# when called, check if it's already installed
if self._callback_id:
_LOGGER.warning("EventCallback::{0}:{1}, is already installed"
"".format(self._callback_event,
self._function.__name__))
return False
# else try to install it
try:
self._callback_id = add_event_method(self._callback_event,
self._function)
except Exception as e:
_LOGGER.error("Failed to install EventCallback::'{0}:{1}'"
"".format(self._callback_event,
self._function.__name__))
self._message_id_set = False
else:
_LOGGER.debug("Installing EventCallback::{0}:{1}"
"".format(self._callback_event,
self._function.__name__))
self._message_id_set = True
return self._callback_id
# --method-------------------------------------------------------------
def uninstall(self):
"""
uninstalls this callback_event for the event, deactivates
"""
remove_event_callback = openmaya.MEventMessage.removeCallback
if self._callback_id:
try:
remove_event_callback(self._callback_id)
except Exception as e:
_LOGGER.error("Couldn't remove EventCallback::{0}:{1}"
"".format(self._callback_event,
self._function.__name__))
self._callback_id = None
self._message_id_set = None
_LOGGER.debug("Uninstalled the EventCallback::{0}:{1}"
"".format(self._callback_event,
self._function.__name__))
return True
else:
_LOGGER.warning("EventCallback::{0}:{1}, not currently installed"
"".format(self._callback_event,
self._function.__name__))
return False
# --method-------------------------------------------------------------
def __del__(self):
"""
if object is deleted, the callback_event is uninstalled
"""
self.uninstall()
# -------------------------------------------------------------------------
#==========================================================================
# Class Test
#==========================================================================
if __name__ == "__main__":
def test_func(*arg):
print("~ test_func callback_event fired! arg={0}"
"".format(arg))
cb = EventCallbackHandler('NameChanged', test_func)
cb.install()
# callback_event is active
#cb.uninstall()
## callback_event not active
| 33.495455 | 99 | 0.524495 |
a071e556fb0af977f5256f588d1c1310cbca7233 | 5,075 | py | Python | recoloradv/mister_ed/cifar10/cifar_resnets.py | YogaLYJ/ReColorAdv | b49aa59049e73f7ad02de1bef1add52e65b6f160 | [
"MIT"
] | 40 | 2019-01-17T22:17:42.000Z | 2022-03-23T06:24:00.000Z | mister_ed/cifar10/cifar_resnets.py | Mortal12138/geometric-certificates | 8730abaf2ab0c8972a2d40168d5fe64c8670fc62 | [
"MIT"
] | 6 | 2019-08-03T08:49:21.000Z | 2022-03-11T23:43:56.000Z | mister_ed/cifar10/cifar_resnets.py | Mortal12138/geometric-certificates | 8730abaf2ab0c8972a2d40168d5fe64c8670fc62 | [
"MIT"
] | 7 | 2019-12-14T07:19:51.000Z | 2021-05-23T15:01:29.000Z | '''
MISTER_ED_NOTE: I blatantly copied this code from this github repository:
https://github.com/akamaster/pytorch_resnet_cifar10
Huge kudos to Yerlan Idelbayev.
'''
'''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
__all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']
def _weights_init(m):
classname = m.__class__.__name__
# print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
try:
init.kaiming_normal_(m.weight)
except AttributeError:
init.kaiming_normal(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
| 30.945122 | 120 | 0.633498 |
d0cbbfb408ee3de97a3dcb47a1c4a27f82fa36d9 | 20,217 | py | Python | training/random_codes/MCD_codes/Train_MCD_mixstyle_multisrc.py | manogna-s/da-fer | 43229ba368454cb4e5aecab8fdb3ea68ad9060e4 | [
"MIT"
] | null | null | null | training/random_codes/MCD_codes/Train_MCD_mixstyle_multisrc.py | manogna-s/da-fer | 43229ba368454cb4e5aecab8fdb3ea68ad9060e4 | [
"MIT"
] | null | null | null | training/random_codes/MCD_codes/Train_MCD_mixstyle_multisrc.py | manogna-s/da-fer | 43229ba368454cb4e5aecab8fdb3ea68ad9060e4 | [
"MIT"
] | null | null | null | import torch.nn.functional as F
from torch.autograd import Variable
from models.ResNet_feat import ResClassifier
from train_setup import *
from utils.Loss import *
from models.mixstyle import activate_mixstyle, deactivate_mixstyle
import copy
eta = 1.0
num_k = 2 #4
def Train_MCD(args, G, F1, F2, train_source1_dataloader, train_source2_dataloader, train_target_dataloader, optimizer_g, optimizer_f, epoch,
writer, criterion):
"""Train."""
G.train()
F1.train()
F2.train()
# torch.autograd.set_detect_anomaly(True)
batch_size = args.train_batch
m_total_loss, m_loss1, m_loss2, m_loss_dis, m_entropy_loss = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter()
# Get Source/Target Dataloader iterator
iter_source1_dataloader = iter(train_source1_dataloader)
iter_source2_dataloader = iter(train_source2_dataloader)
iter_target_dataloader = iter(train_target_dataloader)
num_iter = len(train_source1_dataloader) if (len(train_source1_dataloader) > len(train_target_dataloader)) else len(
train_target_dataloader)
for batch_index in range(num_iter):
try:
data_source1, landmark_source1, label_source1 = iter_source1_dataloader.next()
except:
iter_source1_dataloader = iter(train_source1_dataloader)
data_source1, landmark_source1, label_source1 = iter_source1_dataloader.next()
try:
data_source2, landmark_source2, label_source2 = iter_source2_dataloader.next()
except:
iter_source1_dataloader = iter(train_source1_dataloader)
data_source2, landmark_source2, label_source2 = iter_source2_dataloader.next()
data_source = Variable(torch.cat((data_source1, data_source2), 0))
landmark_source = Variable(torch.cat((landmark_source1, landmark_source2), 0))
label_source = Variable(torch.cat((label_source1, label_source2), 0))
perm = torch.randperm(args.train_batch)
data_source = data_source[perm]
landmark_source = landmark_source[perm]
label_source = label_source[perm]
try:
data_target, landmark_target, label_target = iter_target_dataloader.next()
except:
iter_target_dataloader = iter(train_target_dataloader)
data_target, landmark_target, label_target = iter_target_dataloader.next()
data_source, landmark_source, label_source = data_source.cuda(), landmark_source.cuda(), label_source.cuda()
data_target, landmark_target, label_target = data_target.cuda(), landmark_target.cuda(), label_target.cuda()
# Forward Propagation
data = Variable(torch.cat((data_source, data_target), 0))
landmark = Variable(torch.cat((landmark_source, landmark_target), 0))
label_source = Variable(label_source)
G.apply(activate_mixstyle)
output = G(data, landmark)
output1 = F1(output)
output2 = F2(output)
output_s1 = output1[:batch_size, :]
output_s2 = output2[:batch_size, :]
output_t1 = output1[batch_size:, :]
output_t2 = output2[batch_size:, :]
output_t1 = F.softmax(output_t1)
output_t2 = F.softmax(output_t2)
entropy_loss = - torch.mean(torch.log(torch.mean(output_t1, 0) + 1e-6))
entropy_loss -= torch.mean(torch.log(torch.mean(output_t2, 0) + 1e-6))
target1 = label_source
loss1 = criterion(output_s1, target1)
loss2 = criterion(output_s2, target1)
all_loss = loss1 + loss2 + args.lamda_ent * entropy_loss
all_loss.backward()
optimizer_g.step()
optimizer_f.step()
# Step B train classifier to maximize discrepancy
optimizer_g.zero_grad()
optimizer_f.zero_grad()
output = G(data, landmark)
output1 = F1(output)
output2 = F2(output)
output_s1 = output1[:batch_size, :]
output_s2 = output2[:batch_size, :]
output_t1 = output1[batch_size:, :]
output_t2 = output2[batch_size:, :]
output_t1 = F.softmax(output_t1)
output_t2 = F.softmax(output_t2)
loss1 = criterion(output_s1, target1)
loss2 = criterion(output_s2, target1)
entropy_loss = - torch.mean(torch.log(torch.mean(output_t1, 0) + 1e-6))
entropy_loss -= torch.mean(torch.log(torch.mean(output_t2, 0) + 1e-6))
loss_dis = torch.mean(torch.abs(output_t1 - output_t2))
F_loss = loss1 + loss2 + args.lamda_ent * entropy_loss
if epoch>0:
F_loss += - eta * loss_dis
F_loss.backward()
optimizer_f.step()
# Step C train generator to minimize discrepancy
if epoch>0:
for i in range(num_k):
G.apply(deactivate_mixstyle)
optimizer_g.zero_grad()
output = G(data, landmark)
output1 = F1(output)
output2 = F2(output)
output_s1 = output1[:batch_size, :]
output_s2 = output2[:batch_size, :]
output_t1 = output1[batch_size:, :]
output_t2 = output2[batch_size:, :]
loss1 = criterion(output_s1, target1)
loss2 = criterion(output_s2, target1)
output_t1 = F.softmax(output_t1)
output_t2 = F.softmax(output_t2)
loss_dis = torch.mean(torch.abs(output_t1 - output_t2))
entropy_loss = -torch.mean(torch.log(torch.mean(output_t1, 0) + 1e-6))
entropy_loss -= torch.mean(torch.log(torch.mean(output_t2, 0) + 1e-6))
loss_dis.backward()
optimizer_g.step()
print('Train Ep: {} [{}/{} ({:.0f}%)]\tLoss1: {:.6f}\tLoss2: {:.6f}\t Dis: {:.6f} Entropy: {:.6f}'.format(
epoch, batch_index * batch_size, 12000,
100. * batch_index / num_iter, loss1.data.item(), loss2.data.item(), loss_dis.data.item(),
args.lamda_ent * entropy_loss.data.item()))
# Log loss
m_total_loss.update(float(F_loss.cpu().data.item()))
m_loss1.update(float(loss1.cpu().data.item()))
m_loss2.update(float(loss2.cpu().data.item()))
m_loss_dis.update(float(loss_dis.cpu().data.item()))
m_entropy_loss.update(float(entropy_loss.cpu().data.item()))
LoggerInfo = '''
[Train source]:
Epoch {0}
Learning Rate {1}\n
'''.format(epoch, args.lr)
LoggerInfo += ''' Total Loss {loss:.4f} Cls1 Loss {loss1:.4f} Cls2 Loss {loss2:.4f} Discrepancy Loss {dis_loss:.4f} Entropy loss {ent_loss}''' \
.format(loss=m_total_loss.avg, loss1=m_loss1.avg, loss2=m_loss2.avg, dis_loss=m_loss_dis.avg,
ent_loss=m_entropy_loss.avg)
print(LoggerInfo)
return
def Test_MCD_tsne(args, G, F1, F2, dataloaders, epoch, splits=None):
if splits is None: # evaluate on test splits by default
splits = ['test_source', 'test_target']
G.eval()
G.apply(deactivate_mixstyle)
F1.eval()
F2.eval()
Features = []
Labels = []
if True:
iter_dataloader = iter(dataloaders['train_source'])
acc1, prec1, recall1 = [AverageMeter() for i in range(args.class_num)], \
[AverageMeter() for i in range(args.class_num)], \
[AverageMeter() for i in range(args.class_num)]
acc2, prec2, recall2 = [AverageMeter() for i in range(args.class_num)], \
[AverageMeter() for i in range(args.class_num)], \
[AverageMeter() for i in range(args.class_num)]
for batch_index, (input, landmark, label) in enumerate(iter_dataloader):
input, landmark, label = input.cuda(), landmark.cuda(), label.cuda()
with torch.no_grad():
feat = G(input, landmark)
output1 = F1(feat)
output2 = F2(feat)
Features.append (feat.cpu().data.numpy())
Labels.append (label.cpu().data.numpy()+14)
Compute_Accuracy(args, output1, label, acc1, prec1, recall1)
Compute_Accuracy(args, output2, label, acc2, prec2, recall2)
print('Classifier 1')
AccuracyInfo, acc_avg, prec_avg, recall_avg, f1_avg = Show_Accuracy(acc1, prec1, recall1, args.class_num)
print('Classifier 2')
AccuracyInfo, acc_avg, prec_avg, recall_avg, f1_avg = Show_Accuracy(acc2, prec2, recall2, args.class_num)
Features_src_test=copy.deepcopy(Features)
Labels_src_test=copy.deepcopy(Labels)
Features_tar_train=copy.deepcopy(Features)
Labels_tar_train=copy.deepcopy(Labels)
Features_tar_test=copy.deepcopy(Features)
Labels_tar_test=copy.deepcopy(Labels)
for split in splits:
print(f'\n[{split}]')
iter_dataloader = iter(dataloaders[split])
acc1, prec1, recall1 = [AverageMeter() for i in range(args.class_num)], \
[AverageMeter() for i in range(args.class_num)], \
[AverageMeter() for i in range(args.class_num)]
acc2, prec2, recall2 = [AverageMeter() for i in range(args.class_num)], \
[AverageMeter() for i in range(args.class_num)], \
[AverageMeter() for i in range(args.class_num)]
for batch_index, (input, landmark, label) in enumerate(iter_dataloader):
input, landmark, label = input.cuda(), landmark.cuda(), label.cuda()
with torch.no_grad():
feat = G(input, landmark)
output1 = F1(feat)
output2 = F2(feat)
if split == 'test_source':
Features_src_test.append (feat.cpu().data.numpy())
Labels_src_test.append (label.cpu().data.numpy())
if split == 'test_target':
Features_tar_test.append (feat.cpu().data.numpy())
Labels_tar_test.append (label.cpu().data.numpy()+7)
if split == 'train_target':
Features_tar_train.append (feat.cpu().data.numpy())
Labels_tar_train.append (label.cpu().data.numpy()+7)
Compute_Accuracy(args, output1, label, acc1, prec1, recall1)
Compute_Accuracy(args, output2, label, acc2, prec2, recall2)
print('Classifier 1')
AccuracyInfo, acc_avg, prec_avg, recall_avg, f1_avg = Show_Accuracy(acc1, prec1, recall1, args.class_num)
print('Classifier 2')
AccuracyInfo, acc_avg, prec_avg, recall_avg, f1_avg = Show_Accuracy(acc2, prec2, recall2, args.class_num)
Features_src_test = np.vstack(Features_src_test)
Labels_src_test = np.concatenate(Labels_src_test)
viz_tsne(args, Features_src_test, Labels_src_test, epoch=f'test_source_{epoch}')
Features_tar_train = np.vstack(Features_tar_train)
Labels_tar_train = np.concatenate(Labels_tar_train)
viz_tsne(args, Features_tar_train, Labels_tar_train, epoch=f'train_target_{epoch}')
Features_tar_test = np.vstack(Features_tar_test)
Labels_tar_test = np.concatenate(Labels_tar_test)
viz_tsne(args, Features_tar_test, Labels_tar_test, epoch=f'test_target_{epoch}')
return
def Test_MCD(args, G, F1, F2, dataloaders, epoch, splits=None):
if splits is None: # evaluate on test splits by default
splits = ['test_source', 'test_target']
G.eval()
F1.eval()
F2.eval()
Features = []
Labels = []
for split in splits:
print(f'\n[{split}]')
iter_dataloader = iter(dataloaders[split])
acc1, prec1, recall1 = [AverageMeter() for i in range(args.class_num)], \
[AverageMeter() for i in range(args.class_num)], \
[AverageMeter() for i in range(args.class_num)]
acc2, prec2, recall2 = [AverageMeter() for i in range(args.class_num)], \
[AverageMeter() for i in range(args.class_num)], \
[AverageMeter() for i in range(args.class_num)]
for batch_index, (input, landmark, label) in enumerate(iter_dataloader):
input, landmark, label = input.cuda(), landmark.cuda(), label.cuda()
with torch.no_grad():
feat = G(input, landmark)
output1 = F1(feat)
output2 = F2(feat)
Features.append (feat.cpu().data.numpy())
if split == 'train_source':
Labels.append (label.cpu().data.numpy()+14)
if split == 'test_source':
Labels.append (label.cpu().data.numpy())
if split == 'test_target':
Labels.append (label.cpu().data.numpy()+7)
if split == 'train_target':
Labels.append (label.cpu().data.numpy()+7)
Compute_Accuracy(args, output1, label, acc1, prec1, recall1)
Compute_Accuracy(args, output2, label, acc2, prec2, recall2)
print('Classifier 1')
AccuracyInfo, acc_avg, prec_avg, recall_avg, f1_avg = Show_Accuracy(acc1, prec1, recall1, args.class_num)
print('Classifier 2')
AccuracyInfo, acc_avg, prec_avg, recall_avg, f1_avg = Show_Accuracy(acc2, prec2, recall2, args.class_num)
Features = np.vstack(Features)
Labels = np.concatenate(Labels)
viz_tsne(args, Features, Labels, epoch=f'{splits[1]}_{epoch}')
return
def main():
"""Main."""
torch.manual_seed(args.seed)
# Experiment Information
print_experiment_info(args)
dataloaders, G, optimizer_g, writer = train_setup(args)
args.train_batch=16
dataloaders['train_source1'] = BuildDataloader(args, split='train', domain='source', max_samples=args.source_labeled)
args.source='CK+'
dataloaders['train_source2'] = BuildDataloader(args, split='train', domain='source', max_samples=args.source_labeled)
args.source='RAF'
args.train_batch=32
optimizer_g, lr = lr_scheduler_withoutDecay(optimizer_g, lr=args.lr)
scheduler_g = optim.lr_scheduler.StepLR(optimizer_g, step_size=20, gamma=0.1, verbose=True)
F1 = ResClassifier(num_classes=args.class_num, num_layer=1)
F2 = ResClassifier(num_classes=args.class_num, num_layer=1)
F1.cuda()
F2.cuda()
optimizer_f = optim.SGD(list(F1.parameters()) + list(F2.parameters()), momentum=0.9, lr=0.001, weight_decay=0.0005)
scheduler_f = optim.lr_scheduler.StepLR(optimizer_f, step_size=20, gamma=0.1, verbose=True)
# G_ckpt= os.path.join(args.out, f'ckpts/MCD_G.pkl')
# if os.path.exists(G_ckpt):
# checkpoint = torch.load (G_ckpt, map_location='cuda')
# G.load_state_dict (checkpoint, strict=False)
# F1_ckpt= os.path.join(args.out, f'ckpts/MCD_F1.pkl')
# if os.path.exists(F1_ckpt):
# checkpoint = torch.load (F1_ckpt, map_location='cuda')
# F1.load_state_dict (checkpoint, strict=False)
# F2_ckpt= os.path.join(args.out, f'ckpts/MCD_F2.pkl')
# if os.path.exists(F2_ckpt):
# checkpoint = torch.load (F2_ckpt, map_location='cuda')
# F2.load_state_dict (checkpoint, strict=False)
if args.show_feat:
G_ckpt= os.path.join(args.out, f'ckpts/MCD_G.pkl')
if os.path.exists(G_ckpt):
checkpoint = torch.load (G_ckpt, map_location='cuda')
G.load_state_dict (checkpoint, strict=False)
F1_ckpt= os.path.join(args.out, f'ckpts/MCD_F1.pkl')
if os.path.exists(F1_ckpt):
checkpoint = torch.load (F1_ckpt, map_location='cuda')
F1.load_state_dict (checkpoint, strict=False)
F2_ckpt= os.path.join(args.out, f'ckpts/MCD_F2.pkl')
if os.path.exists(F2_ckpt):
checkpoint = torch.load (F2_ckpt, map_location='cuda')
F2.load_state_dict (checkpoint, strict=False)
Test_MCD_tsne(args, G, F1, F2, dataloaders, 30, splits=['test_source', 'train_target', 'test_target'])
return
if args.criterion == 'ce':
criterion = nn.CrossEntropyLoss()
elif args.criterion == 'focal':
criterion = FocalLoss(gamma=1)
elif args.criterion == 'weighted_focal':
if args.source == 'RAF_balanced':
cls_num_list= np.array([713, 262, 713, 713, 713, 682, 713])
else: #RAF
cls_num_list= np.array([1259, 262, 713, 4705, 1885, 682, 2465])
beta = 0.9999
effective_num = 1.0 - np.power(beta, cls_num_list)
per_cls_weights = (1.0 - beta) / np.array(effective_num)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
#[0.65831665 3.01150101 1.13164193 0.20750166 0.45330163 1.18126904 0.35646808]
per_cls_weights = [1.75, 3.0, 2.0, 1.0, 1.5, 2.0, 1.25]
print(per_cls_weights)
class_weights = torch.FloatTensor(per_cls_weights).cuda()
criterion = FocalLoss(weight=class_weights, gamma=1)
elif args.criterion == 'ldam':
if args.source == 'RAF_balanced':
cls_num_list= np.array([713, 262, 713, 713, 713, 682, 713])
else: #RAF
cls_num_list= np.array([1259, 262, 713, 4705, 1885, 682, 2465])
idx = 0
betas = [0, 0.9999]
effective_num = 1.0 - np.power(betas[idx], cls_num_list)
per_cls_weights = (1.0 - betas[idx]) / np.array(effective_num)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
per_cls_weights = [1.75, 3.0, 2.0, 1.0, 1.5, 2.0, 1.25]
per_cls_weights = torch.FloatTensor(per_cls_weights).cuda()
def get_drw_weights(args, epoch, cls_num_list):
if True:
idx = 0 if epoch <= 5 else 1
betas = [0, 0.9999]
effective_num = 1.0 - np.power(betas[idx], cls_num_list)
per_cls_weights = (1.0 - betas[idx]) / np.array(effective_num)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
per_cls_weights = torch.FloatTensor(per_cls_weights).cuda()
return per_cls_weights
print(f'Using {args.criterion} loss')
# Running Experiment
print("Run Experiment...")
for epoch in range(1, args.epochs + 1):
# if epoch < 5 and args.criterion == 'weighted_focal': #Try delayed reweighting
# criterion = FocalLoss(gamma=1)
if args.criterion=='ldam':
if epoch >4:
per_cls_weights = [1.75, 3.0, 2.0, 1.0, 1.5, 2.0, 1.25]
else:
per_cls_weights = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
# per_cls_weights = get_drw_weights(args, epoch, cls_num_list)
print(f'Epoch: {epoch}, per cls weights: {per_cls_weights}')
per_cls_weights = torch.FloatTensor(per_cls_weights).cuda()
criterion = LDAMLoss(cls_num_list, weight=per_cls_weights)
print(f'Epoch : {epoch}')
Train_MCD(args, G, F1, F2, dataloaders['train_source1'], dataloaders['train_source2'], dataloaders['train_target'], optimizer_g, optimizer_f,
epoch, writer, criterion)
scheduler_g.step()
scheduler_f.step()
print('\nEvaluation ...')
Test_MCD_tsne(args, G, F1, F2, dataloaders, epoch, splits=['test_source', 'train_target', 'test_target'])
# Test_MCD(args, G, F1, F2, dataloaders, epoch, splits=['train_source', 'test_source'])
# Test_MCD(args, G, F1, F2, dataloaders, epoch, splits=['train_source', 'train_target'])
# Test_MCD(args, G, F1, F2, dataloaders, epoch, splits=['train_source', 'test_target'])
# Test_MCD(args, G, F1, F2, dataloaders, epoch, splits=['train_source', 'train_target', 'test_source', 'test_target'])
if args.save_checkpoint and epoch%5:
torch.save(G.state_dict(), os.path.join(args.out, f'ckpts/MCD_G_{epoch}.pkl'))
torch.save(F1.state_dict(), os.path.join(args.out, f'ckpts/MCD_F1_{epoch}.pkl'))
torch.save(F2.state_dict(), os.path.join(args.out, f'ckpts/MCD_F2_{epoch}.pkl'))
writer.close()
if __name__ == '__main__':
main()
| 44.141921 | 151 | 0.621853 |
2cdc08ce6f76053d409e81c4827b8cc3bb0ed9c9 | 520 | py | Python | mlic/__init__.py | learnmedicalcantsavecn/Machine_Learning | 537998bb47801d88c9a91432ffc71df1b6b89ae5 | [
"Apache-2.0"
] | 1 | 2019-12-11T14:13:37.000Z | 2019-12-11T14:13:37.000Z | mlic/__init__.py | learnmedicalcantsavecn/Machine_Learning | 537998bb47801d88c9a91432ffc71df1b6b89ae5 | [
"Apache-2.0"
] | null | null | null | mlic/__init__.py | learnmedicalcantsavecn/Machine_Learning | 537998bb47801d88c9a91432ffc71df1b6b89ae5 | [
"Apache-2.0"
] | 1 | 2020-11-19T14:05:53.000Z | 2020-11-19T14:05:53.000Z | # -*- coding: utf-8 -*-
"""
Description : Machine Learning in Action
Author : xxm
"""
import sys
import os
# import loguru
path = {
'BASE_PATH': os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
'EXAMPLE_PATH': os.path.abspath(os.path.dirname(os.path.dirname(__file__))).__add__('/examples')
}
__all__ = ['cluster', 'linear_model', 'naive_bayes', 'neighbors', 'neural_network', 'svm', 'tree', 'utils', 'path',
'metrics']
if __name__ == '__main__':
pass
else:
pass
| 23.636364 | 115 | 0.632692 |
64bc6dba2fee2a272b90915098ee7dcc9f891829 | 1,097 | py | Python | crop_rotator/rotator/migrations/0027_auto_20210607_1251.py | Bahusson/crop_rotator | c1d86d36ce1867a84b927708f92c62c7815250a4 | [
"MIT"
] | 1 | 2021-05-08T07:04:45.000Z | 2021-05-08T07:04:45.000Z | crop_rotator/rotator/migrations/0027_auto_20210607_1251.py | Bahusson/crop_rotator | c1d86d36ce1867a84b927708f92c62c7815250a4 | [
"MIT"
] | 80 | 2020-11-18T20:35:12.000Z | 2021-06-13T08:08:36.000Z | crop_rotator/rotator/migrations/0027_auto_20210607_1251.py | Bahusson/crop_rotator | c1d86d36ce1867a84b927708f92c62c7815250a4 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-06-07 10:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rotator', '0026_auto_20210606_1500'),
]
operations = [
migrations.RemoveField(
model_name='crop',
name='is_deep_roots',
),
migrations.RemoveField(
model_name='crop',
name='is_demanding',
),
migrations.RemoveField(
model_name='crop',
name='is_leaves_mess',
),
migrations.RemoveField(
model_name='crop',
name='plant_type',
),
migrations.RemoveField(
model_name='crop',
name='redirect_id',
),
migrations.RemoveField(
model_name='crop',
name='redirect_name',
),
migrations.RemoveField(
model_name='cropfamily',
name='culture_manured',
),
migrations.RemoveField(
model_name='cropfamily',
name='is_manurable',
),
]
| 23.847826 | 47 | 0.52051 |
a631b8d408fe226e37ae762011b5a44a09c2989c | 3,090 | py | Python | tests/test_vader.py | merrickchoo/vaderSentiment | 1c942054a15ce941b6989c5b7246ec6f314c344a | [
"MIT"
] | 1 | 2021-08-25T11:34:34.000Z | 2021-08-25T11:34:34.000Z | tests/test_vader.py | merrickchoo/vaderSentiment | 1c942054a15ce941b6989c5b7246ec6f314c344a | [
"MIT"
] | 7 | 2021-08-25T11:09:22.000Z | 2021-09-07T13:35:08.000Z | tests/test_vader.py | merrickchoo/vaderSentiment | 1c942054a15ce941b6989c5b7246ec6f314c344a | [
"MIT"
] | null | null | null | # coding: utf-8
import vaderSentiment
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from collections import defaultdict as dd
sent = dd(list)
### values for the original vader model
sent['en_vader'] = [["VADER is smart, handsome, and funny.",
(0.0, 0.254, 0.746, 0.8316)],
["I have a bad feeling about this.",
(0.35, 0.5, 0.15, -0.4588)],
["I feel good about this.",
(0.0, 0.58, 0.42, 0.4404)],
["Not such a badass after all.",
(0.289, 0.711, 0.0, -0.2584)],
['They are going to start a direct flight soon 😍',
(0.0, 0.8, 0.2, 0.4588)],
['They are going to start a direct flight soon 😠',
(0.248, 0.752, 0.0, -0.5106)],
['They are going to start a direct flight soon😠',
(0.248, 0.752, 0.0, -0.5106)],
]
### values for Vader + ESR
sent['en_vd-esd'] = [["VADER is smart, handsome, and funny.",
(0.0, 0.254, 0.746, 0.8316)],
["I have a bad feeling about this.",
(0.35, 0.5, 0.15, -0.4588)],
["I feel good about this.",
(0.0, 0.58, 0.42, 0.4404)],
["Not such a badass after all.",
(0.289, 0.711, 0.0, -0.2584)],
['They are going to start a direct flight soon 😍',
(0.0, 0.708, 0.292, 0.5736)],
['They are going to start a direct flight soon 😠',
(0.197, 0.803, 0.0, -0.2978)],
['They are going to start a direct flight soon😠',
(0.197, 0.803, 0.0, -0.2978)],
]
### values for GerVader + ESR
sent['de_vd-esd'] = [["VADER ist klug, gutaussehend und lustig. ",
(0.0, 0.617, 0.383, 0.4767) ],
["Nicht so krass letztlich.",
(0.459, 0.541, 0.0, -0.3713)]]
### values for GerVader
sent['de_vader'] = [["VADER ist klug, gutaussehend und lustig. ",
(0.0, 0.617, 0.383, 0.4767) ],
["Nicht so krass letztlich.",
(0.459, 0.541, 0.0, -0.3713)],
["Ich bin überrascht zu sehen, nur wie erstaunlich nützlich VADER!",
(0.459, 0.541, 0.0, -0.3713)]
]
def test_meta():
analyzer = SentimentIntensityAnalyzer('en_vader')
meta = analyzer.meta
assert meta['lang'] == 'en'
assert meta['url'] == "https://github.com/cjhutto/vaderSentiment"
def test_models():
for model in sent:
analyzer = SentimentIntensityAnalyzer(model)
sentences = sent[model]
for (s,r) in sentences:
vs = analyzer.polarity_scores(s)
msg = "{}: {}".format(model, s)
assert vs['neg'] == r[0], msg
assert vs['neu'] == r[1], msg
assert vs['pos'] == r[2], msg
assert vs['compound'] == r[3], msg
| 38.625 | 88 | 0.470227 |
060d21c1364bf4d52143ec573cdaae2133a374aa | 5,576 | py | Python | framework/net_device.py | spdk/tests | 471bb8be275a2b18933bfdb19da109973e09cf05 | [
"BSD-3-Clause"
] | 3 | 2020-04-30T10:47:35.000Z | 2022-01-03T08:18:21.000Z | framework/net_device.py | spdk/tests | 471bb8be275a2b18933bfdb19da109973e09cf05 | [
"BSD-3-Clause"
] | null | null | null | framework/net_device.py | spdk/tests | 471bb8be275a2b18933bfdb19da109973e09cf05 | [
"BSD-3-Clause"
] | 7 | 2017-10-12T08:15:37.000Z | 2021-09-07T06:56:04.000Z | # BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from functools import wraps
import settings
from crb import Crb
from settings import TIMEOUT
NICS_LIST = []
class NetDevice(object):
def __init__(self, crb, domain_id, bus_id, devfun_id):
if not isinstance(crb, Crb):
raise Exception(" Please input the instance of Crb!!!")
self.crb = crb
self.domain_id = domain_id
self.bus_id = bus_id
self.devfun_id = devfun_id
self.pci = domain_id + ':' + bus_id + ':' + devfun_id
self.pci_id = get_pci_id(crb, domain_id, bus_id, devfun_id)
self.default_driver = settings.get_nic_driver(self.pci_id)
self.intf_name = 'N/A'
self.intf2_name = None
self.get_interface_name()
def __send_expect(self, cmds, expected, timeout=TIMEOUT, alt_session=True):
"""
Wrap the crb's session as private session for sending expect.
"""
return self.crb.send_expect(
cmds, expected, timeout=timeout, alt_session=alt_session)
def nic_has_driver(func):
"""
Check if the NIC has a driver.
"""
@wraps(func)
def wrapper(*args, **kwargs):
nic_instance = args[0]
nic_instance.current_driver = nic_instance.get_nic_driver()
if not nic_instance.current_driver:
return ''
return func(*args, **kwargs)
return wrapper
def get_nic_driver(self):
"""
Get the NIC driver.
"""
return self.crb.get_pci_dev_driver(
self.domain_id, self.bus_id, self.devfun_id)
@nic_has_driver
def get_interface_name(self):
"""
Get interface name of NICs.
"""
driver = self.current_driver
driver_alias = driver.replace('-', '_')
try:
get_interface_name = getattr(
self, 'get_interface_name_%s' %
driver_alias)
except Exception as e:
generic_driver = 'generic'
get_interface_name = getattr(
self, 'get_interface_name_%s' %
generic_driver)
out = get_interface_name(self.domain_id, self.bus_id, self.devfun_id)
if "No such file or directory" in out:
self.intf_name = 'N/A'
else:
self.intf_name = out
return self.intf_name
def get_interface_name_generic(self, domain_id, bus_id, devfun_id):
"""
Get the interface name by the default way.
"""
command = 'ls --color=never /sys/bus/pci/devices/%s\:%s\:%s/net' % (
domain_id, bus_id, devfun_id)
return self.__send_expect(command, '# ')
def get_interface2_name(self):
"""
Get interface name of second port of this pci device.
"""
return self.intf2_name
def get_pci_id(crb, domain_id, bus_id, devfun_id):
pass
def add_to_list(host, obj):
"""
Add NICs object to global structure
Parameter 'host' is ip address, 'obj' is netdevice object
"""
nic = {}
nic['host'] = host
nic['pci'] = obj.pci
nic['port'] = obj
NICS_LIST.append(nic)
def get_from_list(host, domain_id, bus_id, devfun_id):
"""
Get NICs object from global structure
Parameter will by host ip, pci domain id, pci bus id, pci function id
"""
for nic in NICS_LIST:
if host == nic['host']:
pci = ':'.join((domain_id, bus_id, devfun_id))
if pci == nic['pci']:
return nic['port']
return None
def GetNicObj(crb, domain_id, bus_id, devfun_id):
"""
Get NICs object. If NICs has been initialized, just return object.
"""
obj = get_from_list(crb.crb['My IP'], domain_id, bus_id, devfun_id)
if obj:
return obj
pci_id = get_pci_id(crb, domain_id, bus_id, devfun_id)
nic = settings.get_nic_name(pci_id)
obj = NetDevice(crb, domain_id, bus_id, devfun_id)
add_to_list(crb.crb['My IP'], obj)
return obj
| 34.419753 | 79 | 0.647956 |
dc750561fb9c7c5c339b044f77715c9ef9eee63f | 8,119 | py | Python | test/unit/test_memcache.py | juhhov/pyOCD | 0965f7f038622ef1d933af5c6916c482a28244ff | [
"Apache-2.0"
] | 1 | 2020-07-11T09:24:25.000Z | 2020-07-11T09:24:25.000Z | test/unit/test_memcache.py | ARMmbed/pyOCD-Samsung | 03242b6eb57d2170a4b531d00f1a0577e2b0abde | [
"Apache-2.0"
] | null | null | null | test/unit/test_memcache.py | ARMmbed/pyOCD-Samsung | 03242b6eb57d2170a4b531d00f1a0577e2b0abde | [
"Apache-2.0"
] | null | null | null | # pyOCD debugger
# Copyright (c) 2016 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyocd.debug.cache import MemoryCache
from pyocd.debug.context import DebugContext
from pyocd.core import memory_map
from pyocd.utility import conversion
from pyocd.utility import mask
import pytest
import logging
@pytest.fixture(scope='function')
def memcache(mockcore):
return MemoryCache(DebugContext(mockcore), mockcore)
class TestMemoryCache:
def test_1(self, mockcore, memcache):
memcache.write_memory_block8(0, [0x10, 0x12, 0x14, 0x16])
assert memcache.read_memory_block8(0, 4) == [0x10, 0x12, 0x14, 0x16]
def test_2(self, mockcore, memcache):
memcache.write_memory_block8(0, [0x10, 0x12, 0x14, 0x16])
assert memcache.read_memory_block8(0, 4) == [0x10, 0x12, 0x14, 0x16]
assert memcache.read_memory_block8(2, 4) == [0x14, 0x16, 0xff, 0xff]
def test_3(self, mockcore, memcache):
memcache.write_memory_block32(0, [0x10121416])
assert memcache.read_memory_block32(0, 1) == [0x10121416]
assert memcache.read_memory_block8(2, 4) == [0x12, 0x10, 0xff, 0xff]
def test_4(self, mockcore, memcache):
mockcore.write_memory_block8(0, [1, 2, 3, 4])
assert memcache.read_memory_block8(0, 8) == [1, 2, 3, 4, 0xff, 0xff, 0xff, 0xff]
assert memcache.read_memory_block8(4, 4) == [0xff] * 4
mockcore.write_memory_block8(10, [50, 51])
assert memcache.read_memory_block8(6, 6) == [0xff, 0xff, 0xff, 0xff, 50, 51]
def test_5(self, mockcore, memcache):
memcache.write_memory_block8(0, [1, 2])
memcache.write_memory_block8(4, [3, 4])
assert memcache.read_memory_block8(0, 8) == [1, 2, 0xff, 0xff, 3, 4, 0xff, 0xff]
def test_6_middle_cached(self, mockcore, memcache):
mockcore.write_memory_block8(0, [50, 51, 52, 53, 54, 55, 56, 57])
memcache.write_memory_block8(4, [3, 4])
assert memcache.read_memory_block8(0, 8) == [50, 51, 52, 53, 3, 4, 56, 57]
def test_7_odd_cached(self, mockcore, memcache):
mockcore.write_memory_block8(0, [50, 51, 52, 53, 54, 55, 56, 57])
memcache.write_memory_block8(1, [1])
memcache.write_memory_block8(3, [2])
memcache.write_memory_block8(5, [3])
memcache.write_memory_block8(7, [4])
assert memcache.read_memory_block8(0, 8) == [50, 1, 52, 2, 54, 3, 56, 4]
def test_8_no_overlap(self, mockcore, memcache):
memcache.write_memory_block8(0, [1, 2, 3, 4])
assert memcache.read_memory_block8(8, 4) == [0xff] * 4
def test_9_begin_overlap(self, mockcore, memcache):
memcache.write_memory_block8(4, range(8))
assert memcache.read_memory_block8(0, 8) == [0xff, 0xff, 0xff, 0xff, 0, 1, 2, 3]
def test_10_end_overlap(self, mockcore, memcache):
memcache.write_memory_block8(0, range(8))
assert memcache.read_memory_block8(4, 8) == [4, 5, 6, 7, 0xff, 0xff, 0xff, 0xff]
def test_11_full_overlap(self, mockcore, memcache):
memcache.write_memory_block8(0, range(8))
assert memcache.read_memory_block8(0, 8) == list(range(8))
def test_12_begin(self, mockcore, memcache):
memcache.write_memory_block8(8, [1, 2, 3, 4])
assert memcache.read_memory_block8(7, 1) == [0xff]
assert memcache.read_memory_block8(8, 1) == [1]
def test_13_end(self, mockcore, memcache):
memcache.write_memory_block8(0, [1, 2, 3, 4])
assert memcache.read_memory_block8(3, 1) == [4]
assert memcache.read_memory_block8(4, 1) == [0xff]
def test_14_write_begin_ragged_cached(self, mockcore, memcache):
memcache.write_memory_block8(4, [1, 2, 3, 4])
mockcore.write_memory_block8(8, [90, 91, 92, 93])
memcache.write_memory_block8(6, [55, 56, 57, 58])
assert memcache.read_memory_block8(4, 8) == [1, 2, 55, 56, 57, 58, 92, 93]
def test_15_write_end_ragged_cached(self, mockcore, memcache):
memcache.write_memory_block8(4, [1, 2, 3, 4])
mockcore.write_memory_block8(0, [90, 91, 92, 93])
memcache.write_memory_block8(2, [55, 56, 57, 58])
assert memcache.read_memory_block8(0, 8) == [90, 91, 55, 56, 57, 58, 3, 4]
def test_16_no_mem_region(self, mockcore, memcache):
assert memcache.read_memory_block8(0x30000000, 4) == [0x55] * 4
# Make sure we didn't cache anything.
assert memcache._cache.overlap(0x30000000, 0x30000004) == set()
def test_17_noncacheable_region_read(self, mockcore, memcache):
mockcore.write_memory_block8(0x20000410, [90, 91, 92, 93])
assert memcache.read_memory_block8(0x20000410, 4) == [90, 91, 92, 93]
# Make sure we didn't cache anything.
assert memcache._cache.overlap(0x20000410, 0x20000414) == set()
def test_18_noncacheable_region_write(self, mockcore, memcache):
memcache.write_memory_block8(0x20000410, [1, 2, 3, 4])
mockcore.write_memory_block8(0x20000410, [90, 91, 92, 93])
assert memcache.read_memory_block8(0x20000410, 4) == [90, 91, 92, 93]
# Make sure we didn't cache anything.
assert memcache._cache.overlap(0x20000410, 0x20000414) == set()
def test_19_write_into_cached(self, mockcore, memcache):
mockcore.write_memory_block8(4, [1, 2, 3, 4, 5, 6, 7, 8])
assert memcache.read_memory_block8(4, 8) == [1, 2, 3, 4, 5, 6, 7, 8]
memcache.write_memory_block8(6, [128, 129, 130, 131])
assert memcache.read_memory_block8(4, 8) == [1, 2, 128, 129, 130, 131, 7, 8]
assert len(list(memcache._cache.overlap(4, 12))[0].data) == 8
def test_20_empty_read(self, memcache):
assert memcache.read_memory_block8(128, 0) == []
def test_21_empty_write(self, memcache):
memcache.write_memory_block8(128, [])
# This test reproduces a bug where writes followed by reads will start
# accumulating and returning extra data.
def test_22_multi_write_read_size(self, memcache):
test_size = 128
for i in range(100):
data = [x for x in range(test_size)]
memcache.write_memory_block8(0, data)
block = memcache.read_memory_block8(0, test_size)
assert data == block
# Variant of test 22.
def test_23_multi_write_1_read_size(self, memcache):
test_size = 128
data = [x for x in range(test_size)]
for i in range(10):
memcache.write_memory_block8(0, data)
block = memcache.read_memory_block8(0, test_size)
assert data == block
# Variant of test 22.
def test_24_1_write_multi_read_size(self, memcache):
test_size = 128
data = [x for x in range(test_size)]
memcache.write_memory_block8(0, data)
for i in range(10):
block = memcache.read_memory_block8(0, test_size)
assert data == block
# Variant of test 22.
def test_25_multi_write_subrange_1_read_size(self, memcache):
test_size = 128
data = [x for x in range(test_size)]
memcache.write_memory_block8(0, data)
for i in range(10):
memcache.write_memory_block8(64, data[64:96])
block = memcache.read_memory_block8(0, test_size)
assert data == block
def test_26_read_subrange(self, memcache):
data = list((n % 256) for n in range(320))
memcache.write_memory_block8(0x20000000, data)
block = memcache.read_memory_block8(0x2000007e, 4)
assert block == data[0x7e:0x82]
# TODO test read32/16/8 with and without callbacks
| 43.650538 | 88 | 0.665846 |
de701fa9beb3cc0de149f9eae070d5fdc9fb189f | 1,323 | py | Python | examples/pyplot/histo_polar.py | mikami520/vedo | 1a3abcf3f1e495287e8934d9b5bb07b511ab8be5 | [
"MIT"
] | 1 | 2022-03-22T21:49:29.000Z | 2022-03-22T21:49:29.000Z | examples/pyplot/histo_polar.py | mikami520/vedo | 1a3abcf3f1e495287e8934d9b5bb07b511ab8be5 | [
"MIT"
] | null | null | null | examples/pyplot/histo_polar.py | mikami520/vedo | 1a3abcf3f1e495287e8934d9b5bb07b511ab8be5 | [
"MIT"
] | null | null | null | from vedo import Hyperboloid, show
from vedo.pyplot import histogram
import numpy as np
np.random.seed(1)
##################################################################
radhisto = histogram(
np.random.rand(200)*6.28,
mode='polar',
title="random orientations",
bins=10,
c=range(10), #'orange', #uniform color
alpha=0.8,
labels=["label"+str(i) for i in range(10)],
)
show(radhisto, at=0, N=2, axes=0, sharecam=False)
##################################################################
hyp = Hyperboloid(res=20).cutWithPlane().rotateY(-90)
hyp.color('grey').alpha(0.3)
# select 10 random indices of points on the surface
idx = np.random.randint(0, hyp.NPoints(), size=10)
radhistos = []
for i in idx:
#generate a random histogram
rh = histogram(
np.random.randn(100),
mode='polar',
bins=12,
r1=0.2, # inner radius
phigap=1.0, # leave a space btw phi bars
cmap='viridis_r',
showDisc=False,
showAngles=False,
showErrors=False,
)
rh.scale(0.15) # scale histogram to make it small
rh.pos(hyp.points(i)) # set its position on the surface
rh.orientation(hyp.normalAt(i)) # orient it along normal
radhistos.append(rh)
show(hyp, radhistos, at=1).interactive().close()
| 29.4 | 66 | 0.565382 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.