content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
print("Hello Open Source")
|
nilq/baby-python
|
python
|
def extractLightNovelsWorld(item):
"""
Light Novels World
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
# This comes first, because it occationally includes non-numbered chapters.
if 'Tsuki ga Michibiku Isekai Douchuu (POV)' in item['tags']:
if not postfix and '-' in item['title']:
postfix = item['title'].split("-")[-1].strip()
return buildReleaseMessageWithType(item, 'Tsuki ga Michibiku Isekai Douchuur', vol, chp, frag=frag, postfix=postfix)
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'Announcements' in item['tags']:
return None
if 'Amaku Yasashii Sekai de Ikiru ni wa' in item['tags']:
return buildReleaseMessageWithType(item, 'Amaku Yasashii Sekai de Ikiru ni wa', vol, chp, frag=frag, postfix=postfix)
if 'Omae Mitai na Hiroin ga Ite Tamaruka!' in item['tags']:
return buildReleaseMessageWithType(item, 'Omae Mitai na Hiroin ga Ite Tamaruka!', vol, chp, frag=frag, postfix=postfix)
if 'the nine godheads' in item['tags']:
return buildReleaseMessageWithType(item, 'The Nine Godheads', vol, chp, frag=frag, postfix=postfix)
if 'World Seed' in item['tags']:
return buildReleaseMessageWithType(item, 'World Seed', vol, chp, frag=frag, postfix=postfix)
if 'Asura' in item['tags']:
return buildReleaseMessageWithType(item, 'Asura', vol, chp, frag=frag, postfix=postfix)
if 'Infinity Armament' in item['tags']:
return buildReleaseMessageWithType(item, 'Infinity Armament', vol, chp, frag=frag, postfix=postfix)
if 'Peerless Demonic Lord' in item['tags']:
return buildReleaseMessageWithType(item, 'Peerless Demonic Lord', vol, chp, frag=frag, postfix=postfix)
if 'The Throne Under the Starry Sky' in item['tags']:
return buildReleaseMessageWithType(item, 'The Throne Under the Starry Sky', vol, chp, frag=frag, postfix=postfix)
if 'Twin Sword' in item['tags']:
return buildReleaseMessageWithType(item, 'Twin Sword', vol, chp, frag=frag, postfix=postfix)
if 'Sayonara Ryuusei Konnichiwa Jinsei' in item['tags']:
return buildReleaseMessageWithType(item, 'Sayonara Ryuusei Konnichiwa Jinsei', vol, chp, frag=frag, postfix=postfix)
if 'Online Game: Evil Dragon Against The Heaven' in item['tags']:
return buildReleaseMessageWithType(item, 'Online Game: Evil Dragon Against The Heaven', vol, chp, frag=frag, postfix=postfix)
if 'Hakushaku Reijo ha Chito Tensei Mono' in item['tags']:
return buildReleaseMessageWithType(item, 'Hakushaku Reijo ha Chito Tensei Mono', vol, chp, frag=frag, postfix=postfix)
if 'Ore to Kawazu-san no Isekai Houriki' in item['tags'] or 'Ore to Kawazu-san no Isekai Hourouki' in item['tags']:
return buildReleaseMessageWithType(item, 'Ore to Kawazu-san no Isekai Houriki', vol, chp, frag=frag, postfix=postfix)
if 'Dragon Blood Warrior' in item['tags']:
return buildReleaseMessageWithType(item, 'Dragon Blood Warrior', vol, chp, frag=frag, postfix=postfix)
if 'Evil-like Duke Household' in item['tags']:
return buildReleaseMessageWithType(item, 'Evil-like Duke Household', vol, chp, frag=frag, postfix=postfix)
if 'Great Dao Commander' in item['tags']:
return buildReleaseMessageWithType(item, 'Great Dao Commander', vol, chp, frag=frag, postfix=postfix)
if 'It’s Impossible that My Evil Overlord is So Cute' in item['tags']:
return buildReleaseMessageWithType(item, 'It’s Impossible that My Evil Overlord is So Cute', vol, chp, frag=frag, postfix=postfix)
if 'I’m OP, but I Began an Inn' in item['tags']:
return buildReleaseMessageWithType(item, 'I’m OP, but I Began an Inn', vol, chp, frag=frag, postfix=postfix)
if 'The Lame Daoist Priest' in item['tags']:
return buildReleaseMessageWithType(item, 'The Lame Daoist Priest', vol, chp, frag=frag, postfix=postfix)
if 'The Last Apostle' in item['tags']:
return buildReleaseMessageWithType(item, 'The Last Apostle', vol, chp, frag=frag, postfix=postfix)
if 'Isekai Teni Jobumasuta e no Michi' in item['tags']:
return buildReleaseMessageWithType(item, 'Isekai Teni Jobumasuta e no Michi', vol, chp, frag=frag, postfix=postfix)
if 'Against the Fate' in item['tags']:
return buildReleaseMessageWithType(item, 'Against the Fate', vol, chp, frag=frag, postfix=postfix)
if 'Hone no aru Yatsu' in item['tags']:
return buildReleaseMessageWithType(item, 'Hone no aru Yatsu', vol, chp, frag=frag, postfix=postfix)
if 'LV999 Villager' in item['tags']:
return buildReleaseMessageWithType(item, 'LV999 Villager', vol, chp, frag=frag, postfix=postfix)
if "Immortal's Farmland" in item['tags']:
return buildReleaseMessageWithType(item, "Immortal's Farmland", vol, chp, frag=frag, postfix=postfix)
if 'Returning from the Immortal World' in item['tags']:
return buildReleaseMessageWithType(item, 'Returning from the Immortal World', vol, chp, frag=frag, postfix=postfix)
if 'Starchild Escapes Arranged Marriage' in item['tags']:
return buildReleaseMessageWithType(item, 'Starchild Escapes Arranged Marriage', vol, chp, frag=frag, postfix=postfix)
if '9 Coffins of the Immortals' in item['tags']:
return buildReleaseMessageWithType(item, '9 Coffins of the Immortals', vol, chp, frag=frag, postfix=postfix)
if 'Fantastic Creatures’ Travelogue' in item['tags']:
return buildReleaseMessageWithType(item, 'Fantastic Creatures’ Travelogue', vol, chp, frag=frag, postfix=postfix)
if "Hell's Cinema" in item['tags']:
return buildReleaseMessageWithType(item, "Hell's Cinema", vol, chp, frag=frag, postfix=postfix)
if 'The Great Conqueror' in item['tags']:
return buildReleaseMessageWithType(item, 'The Great Conqueror', vol, chp, frag=frag, postfix=postfix)
if 'Almighty Student' in item['tags']:
return buildReleaseMessageWithType(item, 'Almighty Student', vol, chp, frag=frag, postfix=postfix)
if 'Godly Student' in item['tags']:
return buildReleaseMessageWithType(item, 'Godly Student', vol, chp, frag=frag, postfix=postfix)
if 'Legend of the Cultivation God' in item['tags']:
return buildReleaseMessageWithType(item, 'Legend of the Cultivation God', vol, chp, frag=frag, postfix=postfix)
if 'Supreme Arrow God' in item['tags']:
return buildReleaseMessageWithType(item, 'Supreme Arrow God', vol, chp, frag=frag, postfix=postfix)
if 'Blade Online' in item['tags']:
return buildReleaseMessageWithType(item, 'Blade Online', vol, chp, frag=frag, postfix=postfix)
if 'The Crimson Dragon' in item['tags']:
return buildReleaseMessageWithType(item, 'The Crimson Dragon', vol, chp, frag=frag, postfix=postfix)
if 'Sky Prince' in item['tags']:
return buildReleaseMessageWithType(item, 'Sky Prince', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
if 'Aenthar' in item['tags']:
return buildReleaseMessageWithType(item, 'Aenthar', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
if 'How to Survive a Summoning 101' in item['tags']:
return buildReleaseMessageWithType(item, 'How to Survive a Summoning 101', vol, chp, frag=frag, postfix=postfix, tl_type='oel')
return False
|
nilq/baby-python
|
python
|
from torchvision import datasets, transforms
from core.data.data_loaders.base import BaseDataLoader
class CIFAR100Loader(BaseDataLoader):
""" CIFAR100 data loading + transformations """
def __init__(self, data_dir, batch_size,
shuffle=True, validation_split=0.0,
training=True,
transformations="DefaultTransformations",
**kwargs):
print("[INFO][DATA] \t Preparing the CIFAR100 dataset ...")
_transf = BaseDataLoader.get_transformations(
self, name=transformations)
self.trans = _transf.get_train_trans() if training is True \
else _transf.get_test_trans()
self.data_dir = data_dir
self.dataset = datasets.CIFAR100(
self.data_dir, train=training, download=True, transform=self.trans)
super().__init__(self.dataset, batch_size, shuffle,
validation_split,
**kwargs)
|
nilq/baby-python
|
python
|
from pint import UnitRegistry
ureg = UnitRegistry()
ureg.define('kn_cm2 = kilonewton / centimeter ** 2 = kn_cm2')
ureg.define('kNcm = kilonewton * centimeter = kncm')
ureg.define('kNm = kilonewton * meter = knm')
_Q = ureg.Quantity
e = 0.00001
|
nilq/baby-python
|
python
|
'''
Created on Jan 23, 2018
@author: kyao
'''
import numpy as np
import typing
from d3m.metadata import hyperparams, params
from d3m import container
from d3m.exceptions import InvalidArgumentValueError
import d3m.metadata.base as mbase
from sklearn.random_projection import johnson_lindenstrauss_min_dim, GaussianRandomProjection
# from d3m.primitive_interfaces.featurization import FeaturizationLearnerPrimitiveBase
# changed primitive class to fit in devel branch of d3m (2019-1-17)
from d3m.primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase
from d3m.primitive_interfaces.base import CallResult
import pandas as pd
from . import config
Inputs = container.List#[container.DataFrame] # this format is for old version of d3m
Outputs = container.DataFrame
class Params(params.Params):
x_dim: int
y_dim: int
value_dimension: int
projection_param: typing.Dict
components_: typing.Optional[np.ndarray]
value_found: bool
class Hyperparams(hyperparams.Hyperparams):
'''
eps : Maximum distortion rate as defined by the Johnson-Lindenstrauss lemma.
'''
eps = hyperparams.Uniform(
lower=0.1,
upper=0.5,
default=0.2,
semantic_types=["http://schema.org/Float", "https://metadata.datadrivendiscovery.org/types/TuningParameter"]
)
generate_metadata = hyperparams.UniformBool(
default = True,
description="A control parameter to set whether to generate metada after the feature extraction. It will be very slow if the columns length is very large. For the default condition, it will turn off to accelerate the program running.",
semantic_types=["http://schema.org/Boolean", "https://metadata.datadrivendiscovery.org/types/ControlParameter"]
)
class RandomProjectionTimeSeriesFeaturization(UnsupervisedLearnerPrimitiveBase[Inputs, Outputs, Params, Hyperparams]):
'''
classdocs
'''
metadata = hyperparams.base.PrimitiveMetadata({
"id": "dsbox.timeseries_featurization.random_projection",
"version": config.VERSION,
"name": "DSBox random projection timeseries featurization ",
"description": "A simple timeseries featurization using random projection",
"python_path": "d3m.primitives.feature_extraction.RandomProjectionTimeSeriesFeaturization.DSBOX",
"primitive_family": "FEATURE_EXTRACTION",
"algorithm_types": [ "RANDOM_PROJECTION" ],
"source": {
"name": config.D3M_PERFORMER_TEAM,
"contact": config.D3M_CONTACT,
"uris": [ config.REPOSITORY ]
},
### Automatically generated
# "primitive_code"
# "original_python_path"
# "schema"
# "structural_type"
### Optional
"keywords": [ "feature_extraction", "timeseries"],
"installation": [ config.INSTALLATION ],
#"location_uris": [],
"precondition": ["NO_MISSING_VALUES", "NO_CATEGORICAL_VALUES"],
"effects": ["NO_JAGGED_VALUES"],
#"hyperparms_to_tune": []
})
def __init__(self, *, hyperparams: Hyperparams) -> None:
super().__init__(hyperparams=hyperparams)
self.hyperparams = hyperparams
self._model = None
self._training_data = None
self._value_found = False
self._x_dim = 0 # x_dim : the amount of timeseries dataset
self._y_dim = 0 # y_dim : the length of each timeseries dataset
self._value_dimension = 0 # value_dimension : used to determine which dimension data is the values we want
self._fitted = False
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:
# if self._training_data is None or self._y_dim==0:
inputs_timeseries = inputs[1]
inputs_d3mIndex = inputs[0]
if not self._fitted:
return CallResult(None, True, 0)
if isinstance(inputs_timeseries, np.ndarray):
X = np.zeros((inputs_timeseries.shape[0], self._y_dim))
else:
X = np.zeros((len(inputs_timeseries), self._y_dim))
for i, series in enumerate(inputs_timeseries):
if series.shape[1] > 1 and not self._value_found:
series_output = pd.DataFrame()
for j in range(series.shape[1]):
series_output = pd.concat([series_output,series.iloc[:, j]])
else:
series_output = series
if (series_output.shape[0] < self._y_dim):
# pad with zeros
X[i,:series_output.shape[0]] = series_output.iloc[:series_output.shape[0], self._value_dimension]
else:
# Truncate or just fit in
X[i,:] = series_output.iloc[:self._y_dim, self._value_dimension]
# save the result to DataFrame format
output_ndarray = self._model.transform(X)
output_dataFrame = container.DataFrame(output_ndarray)
# update the original index to be d3mIndex
output_dataFrame = container.DataFrame(pd.concat([pd.DataFrame(inputs_d3mIndex, columns=['d3mIndex']), pd.DataFrame(output_dataFrame)], axis=1))
# add d3mIndex metadata
index_metadata_selector = (mbase.ALL_ELEMENTS, 0)
index_metadata = {'semantic_types': ('https://metadata.datadrivendiscovery.org/types/TabularColumn', 'https://metadata.datadrivendiscovery.org/types/PrimaryKey')}
output_dataFrame.metadata = output_dataFrame.metadata.update(metadata=index_metadata, selector=index_metadata_selector)
# add other metadata
if self.hyperparams["generate_metadata"]:
for each_column in range(1, output_dataFrame.shape[1]):
metadata_selector = (mbase.ALL_ELEMENTS, each_column)
metadata_each_column = {'semantic_types': ('https://metadata.datadrivendiscovery.org/types/TabularColumn', 'https://metadata.datadrivendiscovery.org/types/Attribute')}
output_dataFrame.metadata = output_dataFrame.metadata.update(metadata=metadata_each_column, selector=metadata_selector)
return CallResult(output_dataFrame, True, None)
def set_training_data(self, *, inputs: Inputs) -> None:
if len(inputs) != 2:
raise InvalidArgumentValueError('Expecting two inputs')
inputs_timeseries = inputs[1]
inputs_d3mIndex = inputs[0]
if len(inputs_timeseries) == 0:
print("Warning: Inputs timeseries data to timeseries_featurization primitive's length is 0.")
return
# update: now we need to get the whole shape of inputs to process
lengths = [x.shape[0] for x in inputs_timeseries]
widths = [x.shape[1] for x in inputs_timeseries]
# here just take first timeseries dataset to search
column_name = list(inputs_timeseries[0].columns.values)
'''
New things, the previous version only trying to load the fixed columns
It will cause problems that may load the wrong data
e.g.: at dataset 66, it will read the "time" data instead of "value"
So here I added a function to check the name of each column to ensure that we read the correct data
'''
for i in range(len(column_name)):
if 'value' in column_name[i]:
self._value_found = True
self._value_dimension = i
is_same_length = len(set(lengths)) == 1
is_same_width = len(set(widths)) == 1
if not is_same_width:
print("Warning: some csv file have different dimensions!")
if self._value_found :
if is_same_length:
self._y_dim = lengths[0]
else:
# Truncate all time series to the shortest time series
self._y_dim = min(lengths)
else:
if is_same_length:
self._y_dim = lengths[0] * widths[0]
else:
# Truncate all time series to the shortest time series
self._y_dim = min(lengths) * min(widths)
self._x_dim = len(inputs_timeseries)
self._training_data = np.zeros((self._x_dim, self._y_dim))
for i, series in enumerate(inputs_timeseries):
if series.shape[1] > 1 and not self._value_found :
series_output = pd.DataFrame()
for each_dimension in range(series.shape[1]):
series_output = pd.concat([series_output,series.iloc[:, each_dimension]])
else:
series_output = series
self._training_data[i, :] = series_output.iloc[:self._y_dim, self._value_dimension]
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
eps = self.hyperparams['eps']
n_components = johnson_lindenstrauss_min_dim(n_samples=self._x_dim, eps=eps)
print("[INFO] n_components is", n_components)
if n_components > self._y_dim:
# Default n_components == 'auto' fails. Need to explicitly assign n_components
self._model = GaussianRandomProjection(n_components=self._y_dim, random_state=self.random_seed)
else:
try:
self._model = GaussianRandomProjection(eps=eps, random_state=self.random_seed)
self._model.fit(self._training_data)
except:
print("[Warning] Using given eps value failed, will use default conditions.")
self._model = GaussianRandomProjection()
self._model.fit(self._training_data)
self._fitted = True
return CallResult(None, has_finished=True)
def get_params(self) -> Params:
if self._model:
return Params(y_dim = self._y_dim,
x_dim = self._x_dim,
value_found = self._value_found,
value_dimension = self._value_dimension,
projection_param = self._model.get_params(),
components_ = getattr(self._model, 'components_', None)
)
else:
return Params({'y_dim': 0, 'projection_param': {}})
def set_params(self, *, params: Params) -> None:
self._y_dim = params['y_dim']
self._x_dim = params['x_dim']
self._value_found = params['value_found']
self._value_dimension = params['value_dimension']
self._model = None
if params['projection_param']:
self._model = GaussianRandomProjection()
self._model.set_params(**params['projection_param'])
self._model.components_ = params['components_']
self._fitted = True
else:
self._fitted = False
|
nilq/baby-python
|
python
|
"""IPs domain API."""
from ..base import ApiDomainResource
class IPs(ApiDomainResource):
"""
IPs domain resource.
"""
api_endpoint = "ips"
DOMAIN_NAMESPACE = True
def list(self):
"""
List the existing IPs on the domain.
"""
return self.request("GET")
def create(self, ip): # pylint: disable=invalid-name
"""
Assign a dedicated IP to the domain.
:param ip: the new IP address to assign
"""
return self.request("POST", data={"ip": ip})
def delete(self, ip): # pylint: disable=invalid-name
"""
Delete an existing IP on a domain.
"""
return self.request("DELETE", ip)
|
nilq/baby-python
|
python
|
"""
pcolor: for plotting pcolor using matplotlib
"""
import matplotlib.pyplot as plt
import numpy as np
import os
import time
def is_linux():
import platform
s = platform.system()
return {
'Linux': True,
'Darwin': False,
'Windows': False,
}[s]
def is_mac():
import platform
s = platform.system()
return {
'Linux': False,
'Darwin': True,
'Windows': False,
}[s]
def linux_plot_issue():
if is_linux():
import matplotlib
matplotlib.use('TkAgg')
# matplotlib.use('agg')
print('backend:', matplotlib.get_backend())
# matplotlib.hold(true) # deprecated
output_directory = './generated'
os.makedirs(output_directory, exist_ok=True)
class PColor:
""" Show and save pcolor (w,h,3) in range float [0,1] """
@staticmethod
def plot_show_image(G_paintings2d, file_id, sleep_sec, more_info):
plt.clf()
import matplotlib
matplotlib.rc('axes', edgecolor='white')
matplotlib.rc('axes', facecolor='black')
ax = plt.gca()
ax.set_facecolor((0.0, 0.0, 0.0))
#print(dir(ax))
#exit()
#ax.set_edgecolor((1.0, 1.0, 1.0))
#print(np.max(np.max(G_paintings2d,axis=2), axis=0))
#print(np.min(np.min(G_paintings2d,axis=2), axis=1))
#print(G_paintings2d.shape)
#plt.imshow(G_paintings2d)
#plt.imshow((G_paintings2d * 0.2 + 0.5)*0.2)
#img_pix_rescale = (G_paintings2d * 0.05 + 0.5)
#img_pix_rescale = (G_paintings2d)
#plt.imshow(img_pix_rescale, vmin=-100, vmax=100)
#img_pix_rescale = ((G_paintings2d) / 80.0 *40 ) +0.5
#img_pix_rescale = ((G_paintings2d) / 2.0 ) +0.5
img_pix_rescale = G_paintings2d
# print('img_pix_rescale.shape', img_pix_rescale.shape)
RGB3D = 3
assert len(img_pix_rescale.shape) == RGB3D
if img_pix_rescale.shape[2] < RGB3D:
img_pix_rescale = np.max(img_pix_rescale, axis=2)
img_pix_rescale = img_pix_rescale[:,:,None]
img_pix_rescale = np.repeat(img_pix_rescale, RGB3D, axis=2)
if img_pix_rescale.shape[2] > RGB3D:
img_pix_rescale = img_pix_rescale[:,:,:RGB3D]
#scaled_back_to_255 = img_pix_rescale * 128
#scaled_back_to_255 = ((img_pix_rescale / 2.0)+0.5) * 128
scaled_back_to_255 = img_pix_rescale * 127.0 + 128
scaled_back_to_255[scaled_back_to_255 > 255] = 255
plt.imshow(scaled_back_to_255.astype(np.uint8))
print('min max:', np.min(img_pix_rescale.ravel()), np.max(img_pix_rescale.ravel()))
#plt.pcolor(np.mean(G_paintings2d, axis=2))
acc, score = more_info
plt.text(-.5, 0, 'D accuracy=%.2f (0.5 for D to converge)' % acc, fontdict={'size': 15})
plt.text(-.5, G_paintings2d.shape[1]*0.5, 'D score= %.2f (-1.38 for G to converge)' % score, fontdict={'size': 15})
# plt.colorbar()
PColor.next_plot(sleep_sec)
if(file_id is not None):
PColor.save( os.path.join(output_directory, file_id + '.png') )
@staticmethod
def save(filename):
plt.draw()
plt.savefig( filename )
print("saved")
if is_mac():
wait_time_sec = 0.1
time.sleep(wait_time_sec)
""" Next plot. Platform-independent """
@staticmethod
def next_plot(sleep_sec):
if is_mac():
print('draw')
import sys
sys.stdout.flush()
plt.draw()
time.sleep(sleep_sec)
elif is_linux():
# """ "Modal" """
# plt.show()
#plt.draw()
#plt.show(block=False)
#time.sleep(0.5)
#plt.draw()
"""
# futile:
plt.ion()
plt.draw()
plt.show()
plt.ioff()
time.sleep(sleep_sec)
time.sleep(2.0)
plt.close()
plt.ioff()
"""
else:
raise
@staticmethod
def init():
linux_plot_issue()
plt.cla()
#plt.imshow(main_artworks[0])
if is_linux():
# plt.ioff() # not necessary
# plt.show()
#plt.ion()
plt.draw()
plt.show(block=False)
plt.draw()
time.sleep(0.5)
return
elif is_mac():
plt.draw()
plt.ion()
plt.show()
time.sleep(0.1)
plt.ion() # something about continuous plotting
return
else:
raise
raise
@staticmethod
def last(self):
if is_mac():
plt.ioff()
plt.show()
elif is_linux():
pass
else:
raise
|
nilq/baby-python
|
python
|
from empire.python.typings import *
from empire.enums.base_enum import BaseEnum
class TimeUnits(BaseEnum):
NANOS: Final[int] = 0
MICROS: Final[int] = 1
MILLIS: Final[int] = 2
SECONDS: Final[int] = 3
MINUTES: Final[int] = 4
HOURS: Final[int] = 5
DAYS: Final[int] = 6
class TimeUtil:
@staticmethod
def get_readable_time_value(time_value: float, source_unit: int, precision: int = 2) -> str:
current_unit = source_unit
if current_unit == TimeUnits.DAYS:
return '{} {}'.format(round(time_value, precision), TimeUtil.unit_to_string(current_unit))
while current_unit <= 6:
if current_unit in [TimeUnits.NANOS, TimeUnits.MICROS, TimeUnits.MILLIS] and time_value > 1000:
current_unit += 1
time_value /= 1000
continue
elif current_unit in [TimeUnits.SECONDS, TimeUnits.MINUTES] and time_value > 60:
current_unit += 1
time_value /= 60
continue
elif current_unit == TimeUnits.HOURS and time_value > 24:
current_unit += 1
time_value /= 24
continue
else:
break
return '{} {}'.format(round(time_value, precision), TimeUtil.unit_to_string(current_unit))
@staticmethod
def unit_to_string(unit: int) -> str:
if unit == TimeUnits.NANOS:
return 'ns'
elif unit == TimeUnits.MICROS:
return 'µs'
elif unit == TimeUnits.MILLIS:
return 'ms'
elif unit == TimeUnits.SECONDS:
return 's'
elif unit == TimeUnits.MINUTES:
return 'min'
elif unit == TimeUnits.HOURS:
return 'hours'
elif unit == TimeUnits.DAYS:
return 'days'
else:
return '~'
|
nilq/baby-python
|
python
|
from __future__ import division
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable
from numpy.random import rand
#import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import cPickle as pickle
import pylab as plb
import os, sys
def run():
'''
Read results of clash score servey
pdb_clash_scores = list([score_with_hydrogen,score_without_hydrogen]...)
pdb_clash_score_and_name = list([score_with_hydrogen,score_without_hydrogen,experment_type,file_name]...)
pdb_clash_score_dict[file_name] = [score_with_hydrogen,score_without_hydrogen,experment_type]
'''
# locate the directory containing the log files
osType = sys.platform
if osType.startswith('win'):
directory_path = 'c:\Phenix\Dev\Work\work\Clashes'
else:
directory_path = '/net/cci-filer2/raid1/home/youval/Work/work/Clashes'
# convert the path to python format
directory_path = os.path.realpath(directory_path)
os.chdir(directory_path)
pdb_clash_scores = pickle.load(open('pdb_clash_scores','r'))
pdb_clash_score_and_name = pickle.load(open('pdb_clash_score_and_name','r'))
pdb_clash_score_dict = pickle.load(open('pdb_clash_score_dict','r'))
# in the original run ELECTRON MICROSCOPE was not an option - fix that
for i,x in enumerate(pdb_clash_score_and_name):
if x[2]=='':
pdb_clash_score_and_name[i][2] = 'ELECTRON MICROSCOPE'
pdb_clash_scores.sort()
pdb_clash_score_and_name.sort()
print 'Total number of clash score records is: {}'.format(len(pdb_clash_score_and_name))
print '*'*60
#print_list(pdb_clash_score_and_name[-6:], 2)
#print_list(pdb_clash_score_and_name[:50], 5)
return pdb_clash_score_and_name,pdb_clash_score_dict
def print_list(l,n):
'''print list l with n items in a raw'''
x = len(l) % n
l.extend(['',]*x)
for i in range(len(l)//n):
s = i*n
e = s + n
print l[s:e]
def plot_data(pdb_clash_score_and_name,by_type_dict):
for k in by_type_dict:
# create a list with the same color for all points with the same experiment type
data = by_type_dict[k]
#c = np.ones(len(data))*0.647933889333
# build data with size and color
#datalist = [[i,d[0],(d[0]-d[1])] for i,d in enumerate(data)]
x = range(1,len(data)+1)
#x = [d[0] for d in datalist]
y = [d[1] for d in data] # use clash score without pdb hydrogens as y (keep_hydrogens=False)
y2 = [d[0] for d in data] # use clash score with pdb hydrogens as y
# make the size of the points on the plot relative to the difference in the clash scores
s = [50 + 5*abs(d[1]-d[0]) for d in data]
# The color of points where both clash scores are the same
c = ['y',]*len(data)
# Color the data points in a different colors
for i in range(len(data)):
if data[i][0]>data[i][1]: c[i] = 'b'
elif data[i][0]<data[i][1]: c[i] = 'r'
#c = rand(len(data))
plot_experiment(x,y,s,c,k,data)
hist_both_clash_scores(y,y2,k)
def plot_experiment(x,y,s,c,k,data):
'''
plot a sub plot for an experiment type
x: enumerating data points
y: clash score with hydrogen
s: size the data point, related to the difference between with/without hydrogen clash scores
c: data point color
k: pdb file experiment type
'''
def onpick3(event):
ind = event.ind
i = ind[0]
print '*'*50
print 'PDB file {0} Experiment type: {1}'.format(data[i][2],k)
print 'Clash score with hydrogen kept: {0:.4f} without hydrogen: {1:.4f}'.format(data[i][0],data[i][1])
print c[i]
# set figure look
gr = 1.61803398875
h = 10 # figure hight
w = gr*h # figure width
d = 0.05 # distance between plot regon and figure edge
fig = plt.figure(figsize=(w,h))
plt.subplots_adjust(left=d, right=1-d, top=1-d, bottom=d)
ax1 = fig.add_subplot(111)
# set scattering plot and allow interactinve selection of points on plot
col = ax1.scatter(x,y,s,c=c, picker=True)
fig.canvas.mpl_connect('pick_event',onpick3)
fig.set_size_inches(w,h)
#
maxy = max(y)
maxs = max(s)/100
ax1.set_ylim([-maxy*.01,maxy+maxs])
ax1.set_xlim([-x[-1]*0.01,x[-1]*1.01+maxs])
#
plt.title(k)
delta_score = [abs(i[0]-i[1]) for i in data]
minscore = min(delta_score)
maxscore = max(delta_score)
text1 = 'Number of data points: {0}\nMin score difference: {1}\nMax score difference: {2}\n\n'.format(x[-1],minscore,maxscore)
text2 = 'Blue: Score excluding H is lower\nRed: Score including PDB H is lower\nYellow: The same '
plt.text(x[-1]*0.1,maxy*.65, text1+text2,fontsize=16)
plt.ylabel('Clash score - Excluding hydrogens in input file')
fig.savefig('pscoll.eps')
plt.show()
def hist_both_clash_scores(x,y,k):
'''
x: clash score without pdb hydrogens as y (keep_hydrogens=False)
y: clash score with pdb hydrogens as y
k: Experiment type
'''
# set figure look
h = 11 # figure hight
w = 11 # figure width
d = 0.05 # distance between plot regon and figure edge
plt.figtext(0,0,k, fontsize=16)
fig, axScatter = plt.subplots(figsize=(w,h))
plt.subplots_adjust(left=d, right=1-d, top=1-d, bottom=d)
# the scatter plot:
axScatter.scatter(x, y)
axScatter.set_aspect(1.)
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(axScatter)
binlim = 200
axHistx = divider.append_axes("top", 3, pad=1, sharex=axScatter, xlabel='Clash score without PDB Hydrogen', xlim=[0,binlim])
axHisty = divider.append_axes("right", 3, pad=1, sharey=axScatter, ylabel='Clash score with PDB Hydrogen', ylim=[0,binlim])
plt.annotate
#bins = np.arange(-lim, lim + binwidth, binwidth)
bins = 40
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
plt.figtext(0.4,0.97,k, fontsize=16)
plt.draw()
plt.show()
def zero_scores(clash_data):
'''
In ELECTRON MICROSCOPE files there is no EXPERIMENT TYPE and clash score are zero
Remove those records
'''
zero_score = [x for x in clash_data if x[0:2] == [0,0]]
zero_scores_dict = initial_dict()
for x in zero_score:
keys = x[2].split(',')
for k in keys:
zero_scores_dict[k].append(x[3])
print 'Number of records with 0.0 clash scores: {}'.format(len(zero_score))
print '='*60
for x in zero_scores_dict:
print '{0:30} : {1:4}'.format(x,len(zero_scores_dict[x]))
print '*'*60
def create_by_type_dict(pdb_clash_score_and_name):
'''(list) -> dicttionary
sort clash score by experiment type
'''
by_type_dict = initial_dict()
for x in pdb_clash_score_and_name:
keys = x[2].split(',')
for k in keys:
by_type_dict[k].append([x[0],x[1],x[3]])
print 'Experimental type breakdown'
print '='*60
for x in by_type_dict:
print ' {0:30} : {1:4}'.format(x,len(by_type_dict[x]))
print '*'*60
return by_type_dict
def initial_dict():
init_dict = dict([('X-RAY DIFFRACTION',[]),
('NMR',[]),
('NEUTRON DIFFRACTION',[]),
('ELECTRON MICROSCOPE',[]),
('Other',[]),
('SMALL ANGLE X-RAY SCATTERING',[])])
return init_dict
if __name__=='__main__':
pdb_clash_score_and_name,pdb_clash_score_dict = run()
# Look at records with 0.0 scores
zero_scores(pdb_clash_score_and_name)
by_type_dict = create_by_type_dict(pdb_clash_score_and_name)
plot_data(pdb_clash_score_and_name,by_type_dict)
print 'done'
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from sahara_dashboard.api import sahara as saharaclient
from sahara_dashboard.content.data_processing \
import tabs as sahara_tabs
from sahara_dashboard.content. \
data_processing.utils import workflow_helpers as helpers
from sahara_dashboard.content.data_processing.clusters.nodegroup_templates \
import tables as node_group_template_tables
LOG = logging.getLogger(__name__)
class NodeGroupTemplatesTab(sahara_tabs.SaharaTableTab):
table_classes = (node_group_template_tables.NodegroupTemplatesTable, )
name = _("Node Group Templates")
slug = "node_group_templates_tab"
template_name = "horizon/common/_detail_table.html"
def get_nodegroup_templates_data(self):
try:
table = self._tables['nodegroup_templates']
search_opts = {}
filter = self.get_server_filter_info(table.request, table)
if filter['value'] and filter['field']:
search_opts = {filter['field']: filter['value']}
node_group_templates = saharaclient.nodegroup_template_list(
self.request, search_opts)
except Exception:
node_group_templates = []
exceptions.handle(self.request,
_("Unable to fetch node group template list"))
return node_group_templates
class GeneralTab(tabs.Tab):
name = _("General Info")
slug = "nodegroup_template_details_tab"
template_name = "nodegroup_templates/_details.html"
def get_context_data(self, request):
template_id = self.tab_group.kwargs['template_id']
try:
template = saharaclient.nodegroup_template_get(
request, template_id)
except Exception as e:
template = {}
LOG.error(
"Unable to fetch node group template details: %s" % str(e))
return {"template": template}
try:
flavor = nova.flavor_get(request, template.flavor_id)
except Exception:
flavor = {}
exceptions.handle(request,
_("Unable to fetch flavor for template."))
floating_ip_pool_name = None
if template.floating_ip_pool:
try:
floating_ip_pool_name = self._get_floating_ip_pool_name(
request, template.floating_ip_pool)
except Exception:
exceptions.handle(request,
_("Unable to fetch floating ip pools."))
base_image_name = None
if template.image_id:
try:
base_image_name = saharaclient.image_get(
request, template.image_id).name
except Exception:
exceptions.handle(request,
_("Unable to fetch Base Image with id: %s.")
% template.image_id)
security_groups = helpers.get_security_groups(
request, template.security_groups)
if getattr(template, 'boot_from_volume', None) is None:
show_bfv = False
else:
show_bfv = True
return {"template": template, "flavor": flavor,
"floating_ip_pool_name": floating_ip_pool_name,
"base_image_name": base_image_name,
"security_groups": security_groups,
"show_bfv": show_bfv}
def _get_floating_ip_pool_name(self, request, pool_id):
pools = [pool for pool in neutron.floating_ip_pools_list(
request) if pool.id == pool_id]
return pools[0].name if pools else pool_id
class ConfigsTab(tabs.Tab):
name = _("Service Configurations")
slug = "nodegroup_template_service_configs_tab"
template_name = "nodegroup_templates/_service_confs.html"
def get_context_data(self, request):
template_id = self.tab_group.kwargs['template_id']
try:
template = saharaclient.nodegroup_template_get(
request, template_id)
except Exception as e:
template = {}
LOG.error(
"Unable to fetch node group template details: %s" % str(e))
return {"template": template}
class NodegroupTemplateDetailsTabs(tabs.TabGroup):
slug = "nodegroup_template_details"
tabs = (GeneralTab, ConfigsTab, )
sticky = True
|
nilq/baby-python
|
python
|
from utils.data_reader import prepare_data_for_feature, generate_vocab, read_data
from utils.features import get_feature
from utils.utils import getMetrics
from utils import constant
from baseline.baseline_classifier import get_classifier
from baseline.baseline_features import get_features_for_prediction
import numpy as np
import csv
import pandas as pd
import os
'''
Before running this file, pls assign save path.
python predict_classifier.py --save_path 'save/LR_final/' --classifier 'LR' --C 0.01 --pred_score --include_test
'''
if not os.path.exists(constant.save_path):
os.makedirs(constant.save_path)
label2emotion = ["others","happy", "sad","angry"]
## define parameters for getting feature
features = constant.features
## define parameters for building model
classifier_list = ["LR","SVM","XGB"]
## LR: c
## SVM: c
## XGB: n_estimators, max_depth
parameter_list = [constant.C,constant.n_estimators,constant.max_depth]
classifier = constant.classifier
print('features: ', features)
print('Classifier: ', classifier)
print('Parameters: ', parameter_list)
txt_file = classifier+"_baseline.txt"
microF1s = 0
## define parameters for checkpoint
if classifier=="XGB":
params = str(parameter_list[1])+"-"+str(parameter_list[2])
pass
else:
params = str(parameter_list[0])
pass
record_file = classifier+"_"+params+".csv"
checkpoint = False
currentSplit = 0
## check checkpoint
if os.path.exists(constant.save_path+record_file):
checkpoint = True
## read checkpoint
with open(constant.save_path+record_file, newline='') as csvfile:
mLines = csvfile.readlines()
## get current split
targetLine = mLines[-1]
currentSplit=targetLine.split(',')[0]
##read F1 score records
rLines = mLines[-currentSplit-1:]
for line in rLines:
microF1s += float(line.split(',')[1])
currentSplit += 1
model = get_classifier(ty=classifier, c=parameter_list[0], n_estimators=parameter_list[1], max_depth=parameter_list[2])
for i in range(constant.num_split):
## confirm checkpoint
if checkpoint==True and i<currentSplit:
print("Split {} is skipped because it has been run!".format(i))
continue
## prepare feature for model
X_train, y_train, X_val, y_val, X_test, ind, X_text = get_features_for_prediction(features, i, use_pca=False)
print('shape of X_train',X_train.shape)
print('shape of X_test',X_test.shape)
print("###### Running folder %d ######" % (i+1))
if i==0:
y_pred = []
pass
## train aval and predict
model.fit(X_train.reshape(X_train.shape[0], -1), y_train) ## [29010,3,emb_size] --> [29010, 3 * emb_size]
## validate to validation set
y_pred = model.predict(X_test.reshape(X_test.shape[0], -1))
print("###### Writing result of folder %d to file ######" % (i+1))
## generate files with 3 turns and labels
file = constant.save_path+"test_{}.txt".format(i)
if not os.path.exists(file):
with open(file, 'w') as the_file:
the_file.write("id\tturn1\tturn2\tturn3\tlabel\n")
preds_dict = {}
indices = []
for idx, text, pred in zip(ind,X_text,y_pred):
preds_dict[idx] = "{}\t{}\t{}\t{}\t{}\n".format(idx,text[0],text[1],text[2],label2emotion[pred])
indices.append(idx)
sorted_indices = np.argsort(-np.array(indices))[::-1]
for idx in range(len(sorted_indices)):
the_file.write(preds_dict[idx])
## run validation set to get the F1 score
if constant.pred_score:
if i==0:
txtfile = open(txt_file,'a')
txtfile.write("\n--------------------\n")
txtfile.write("Classifier %s, Parameters: %f, %f, %f" %(classifier, parameter_list[0], parameter_list[1], parameter_list[2]))
txtfile.close()
y_pred_val = model.predict(X_val.reshape(X_val.shape[0], -1))
## covert output to one hot
one_hot = np.zeros((y_pred_val.shape[0], 4))
one_hot[np.arange(y_pred_val.shape[0]), y_pred_val] = 1
## call the scorer
acc, microPrecision, microRecall, microF1 = getMetrics(one_hot,y_val,verbose=True)
txtfile = open(txt_file,'a')
txtfile.write("(EXPERIMENT %d) microF1 score %f" % ((i+1), microF1))
txtfile.write("\n--------------------\n")
txtfile.close()
result = [i,microF1]
with open(constant.save_path+record_file, 'a') as f:
writer = csv.writer(f)
writer.writerow(result)
microF1s = microF1s + microF1
microF1s = microF1s/constant.num_split
txtfile = open(txt_file,'a')
txtfile.write("\nAVERAGE F1 VAL: %3.5f\n\n" % microF1s)
txtfile.close()
|
nilq/baby-python
|
python
|
"""Users models."""
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
"""Custom user.
This inherits all the fields from Django's basic user,
but also has an avatar.
"""
def __str__(self) -> str:
"""Represent the user by their full name, or email, or ID."""
return self.get_full_name() or self.email or str(self.pk)
|
nilq/baby-python
|
python
|
n1 = int(input('Digite o numero inicial: '))
razao = int(input('Digite sua razão: '))
contador = 10
while contador != 0:
n1 += razao
print(n1)
contador -= 1
termos = int(input('Se Você deseja adicionar mais termos, informe o numero, caso contrario, digite 0: '))
contador += termos
while termos > 0:
while contador > 0:
n1 += razao
print(n1)
contador -= 1
termos = int(input('Se Você deseja adicionar mais termos, informe o numero, caso contrario, digite 0: '))
contador += termos
if termos == 0 or termos < 0:
print('Fim do programa')
|
nilq/baby-python
|
python
|
import magicbot
import wpilib
import ctre
import wpilib.drive
from robotpy_ext.common_drivers import navx
class MyRobot(magicbot.MagicRobot):
def createObjects(self):
self.init_drive_train()
def init_drive_train(self):
fl, bl, fr, br = (30, 40, 50, 10) # practice bot
br, fr, bl, fl = (1, 7, 2, 5) # on competition robot
self.br_motor = ctre.wpi_talonsrx.WPI_TalonSRX(br)
self.bl_motor = ctre.wpi_talonsrx.WPI_TalonSRX(bl)
self.fl_motor = ctre.wpi_talonsrx.WPI_TalonSRX(fl)
self.fr_motor = ctre.wpi_talonsrx.WPI_TalonSRX(fr)
self.fr_motor.setInverted(True)
self.br_motor.setInverted(True)
self.gyro = navx.AHRS.create_spi()
self.joystick = wpilib.Joystick(0)
self.joystick2 = wpilib.Joystick(1)
self.robot_drive = wpilib.RobotDrive(self.fl_motor, self.bl_motor, self.fr_motor, self.br_motor)
self.robot_drive = wpilib.RobotDrive(self.fl_motor, self.bl_motor, self.fr_motor, self.br_motor)
def teleopInit(self):
pass
def teleopPeriodic(self):
self.robot_drive.arcadeDrive(self.joystick.getX(), self.joystick.getY())
if __name__ == '__main__':
wpilib.run(MyRobot)
|
nilq/baby-python
|
python
|
from notipy_me import Notipy
from repairing_genomic_gaps import cae_200, build_synthetic_dataset_cae, train_model
if __name__ == "__main__":
with Notipy():
model = cae_200()
train, test = build_synthetic_dataset_cae(200)
model = train_model(model, train, test, path="single_gap")
|
nilq/baby-python
|
python
|
"""Tornado handlers for security logging."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from tornado import web
from . import csp_report_uri
from ...base.handlers import APIHandler
class CSPReportHandler(APIHandler):
"""Accepts a content security policy violation report"""
_track_activity = False
def skip_check_origin(self):
"""Don't check origin when reporting origin-check violations!"""
return True
def check_xsrf_cookie(self):
# don't check XSRF for CSP reports
return
@web.authenticated
def post(self):
"""Log a content security policy violation report"""
self.log.warning(
"Content security violation: %s", self.request.body.decode("utf8", "replace")
)
default_handlers = [(csp_report_uri, CSPReportHandler)]
|
nilq/baby-python
|
python
|
"""exercism bob module."""
def response(hey_bob):
"""
Model responses for input text.
:param hey_bob string - The input provided.
:return string - The respons.
"""
answer = 'Whatever.'
hey_bob = hey_bob.strip()
yelling = hey_bob.isupper()
asking_question = len(hey_bob) > 0 and hey_bob[-1] == '?'
if asking_question and yelling:
answer = "Calm down, I know what I'm doing!"
elif asking_question:
answer = 'Sure.'
elif yelling:
answer = 'Whoa, chill out!'
elif hey_bob == '':
answer = 'Fine. Be that way!'
return answer
|
nilq/baby-python
|
python
|
import os
"""
Guild how to read your graph
Description:
I provided several methods to read graph-network data but not limit other formats, please implement your own format as your need
### Graph Kinds & Data Structure
UNDIRECTED-GRAPH <SYMMETRIC-MATRIX, UPPER-MATRIX>
DIRECTED-GRAPH <ASYMETRTIC-MATRIX>
TREE <TREE-HIERARCHY>
MULTI-GRAPH
### Data IO Structure
SYMMETRIC-MATRIX
Square matrix N * N
Numpy array or list of lists
Upper matrix must be symetric with lower matrix
ASYMMETRIC-MATRIX
Square matrix N * N
Numpy array or list of lists
UPPER-MATRIX
Can be a list of upper matrix (total length of each row is decreased by 1) or numpy array with all elements in lower matrix are zeros
TREE-HIERARCHY
A list of NODE_TREE objects or a multi-level nested dictionary
"""
class GraphReader():
def __init__(self):
pass
def _prior_action_(self):
pass
def _post_action_(self):
pass
|
nilq/baby-python
|
python
|
## Fake Binary
## 8 kyu
## https://www.codewars.com/kata/57eae65a4321032ce000002d
def fake_bin(x):
num = ''
for char in x:
if int(char) < 5:
num += '0'
else:
num += '1'
return num
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import datetime
import argparse
from pathlib import Path
import importlib
target = ''
technique_info = {
'blackbot_id': 'T1530',
'external_id': '',
'controller': 'lightsail_download_ssh_keys',
'services': ['Lightsail'],
'prerequisite_modules': [],
'arguments_to_autocomplete': [],
'version': '1',
'aws_namespaces': [],
'last_updated_by': 'Blackbot, Inc. Sun Sep 20 04:13:33 UTC 2020' ,
'ttp_exec': '',
'ttp_mitigation': '',
'ttp_detection': '',
'intent': 'Downloads Lightsails default SSH key pairs.',
'name': 'ADD_NAME_HERE',
}
parser = argparse.ArgumentParser(add_help=False, description=technique_info['name'])
def main(args, awsattack_main):
args = parser.parse_args(args)
import_path = 'ttp.src.lightsail_download_ssh_keys_src'
src_code = __import__(import_path, globals(), locals(), ['technique_info'], 0)
importlib.reload(src_code)
awsattack_main.chain = True
return src_code.main(args, awsattack_main, data=technique_info)
def summary(data, awsattack_main):
out = ' Keys downloaded to:\n'
out += ' ' + data['dl_path'] + '\n'
out += ' Downloaded Key Pairs for the following regions: \n'
for region in sorted(data['region_key_pairs']):
out += ' {}\n'.format(region)
return out
|
nilq/baby-python
|
python
|
import abc
import argparse
import functools
import os
import pathlib
import shutil
import numpy as np
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser(description="Client allocation")
parser.add_argument('-c', '--train-clients', default=100, type=int)
parser.add_argument('-t', '--test-clients', default=12, type=int)
parser.add_argument('-s', '--seed', default=42, type=int)
parser.add_argument('-d', '--data-root', default='./data')
parser.add_argument('--train-clients-subdir', default='train_clients')
parser.add_argument('--test-clients-subdir', default='test_clients')
return parser.parse_args()
def split_dataframe(df, split):
assert split in ['train', 'test']
imgs = df[df['Dataset_type'] == split.upper()].drop('Dataset_type', 1)
return imgs
def make_client_ids(num_clients):
return ["client-{:02d}".format(i) for i in range(num_clients)]
def split_dataframe_for_clients(df, client_ids):
splits = np.array_split(
df.loc[np.random.permutation(df.index)], len(client_ids))
return dict(zip(client_ids, splits))
def allocate_samples_on_disk(
client_samples,
data_root: pathlib.Path,
split_subdir: str,
clients_subdir: str,
):
split_root = data_root / split_subdir # e.g. 4P/data/train
clients_root = data_root / clients_subdir # e.g. 4P/data/train_clients
for client_id, sample in client_samples.items():
# e.g. 4P/data/train_clients/03/
client_dir_name = "{:2d}".format(client_id)
client_path = clients_root / client_dir_name
for label in ["0", "1", "2"]:
(client_path / label).mkdir(parents=True, exist_ok=True)
for imname, label in zip(sample.X_ray_image_name, sample.Numeric_Label):
shutil.copy(split_root / imname, client_path / str(label) / imname)
def main(args):
np.random.seed(args.seed)
data_root = pathlib.Path(args.data_root)
df = pd.read_csv(data_root.joinpath("Labels.csv"))
train_df = split_dataframe(df, 'train')
test_df = split_dataframe(df, 'test')
train_ids = make_client_ids(args.train_clients)
test_ids = make_client_ids(args.test_clients)
train_splits = split_dataframe_for_clients(train_df, train_ids)
test_splits = split_dataframe_for_clients(test_df, test_ids)
allocate_samples_on_disk(
train_splits, data_root, 'train', args.train_clients_subdir)
allocate_samples_on_disk(
test_splits, data_root, 'test', args.test_clients_subdir)
if __name__ == '__main__':
args = parse_args()
main(args)
|
nilq/baby-python
|
python
|
# coding=utf-8
import argparse
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch import nn
import pandas as pd
from Source import utils
import time
import logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def get_params():
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", default='Data/', type=str)
parser.add_argument("--dataset", default='knowit', type=str, help='knowit or tvqa')
parser.add_argument("--bert_model", default='bert-base-uncased', type=str)
parser.add_argument("--do_lower_case", default=True)
parser.add_argument('--seed', type=int, default=181)
parser.add_argument("--lr", default=5e-5, type=float)
parser.add_argument("--workers", default=8)
parser.add_argument("--device", default='cuda', type=str, help="cuda, cpu")
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument('--momentum', default=0.9)
parser.add_argument('--nepochs', default=100, help='Number of epochs', type=int)
parser.add_argument('--patience', default=15, type=int)
parser.add_argument('--no_cuda', action='store_true')
parser.add_argument('--weight_loss_read', default=0.06, type=float)
parser.add_argument('--weight_loss_observe', default=0.06, type=float)
parser.add_argument('--weight_loss_recall', default=0.08, type=float)
parser.add_argument('--weight_loss_final', default=0.80, type=float)
parser.add_argument('--use_read', action='store_true')
parser.add_argument('--use_observe', action='store_true')
parser.add_argument('--use_recall', action='store_true')
parser.add_argument("--train_name", default='FusionMW', type=str)
args, unknown = parser.parse_known_args()
return args
class FusionMW(nn.Module):
def __init__(self):
super(FusionMW, self).__init__()
self.fc_read = nn.Sequential(nn.Linear(768, 1))
self.fc_obs = nn.Sequential(nn.Linear(768, 1))
self.fc_recall = nn.Sequential(nn.Linear(768, 1))
self.dropout = nn.Dropout(0.5)
self.classifier = nn.Sequential(nn.Linear(3, 1))
def forward(self, in_read_feat, in_obs_feat, in_recall_feat):
num_choices = in_read_feat.shape[1]
# R, O, LL features
flat_in_read_feat = in_read_feat.view(-1, in_read_feat.size(-1))
flat_in_obs_feat = in_obs_feat.view(-1, in_obs_feat.size(-1))
flat_in_recall_feat = in_recall_feat.view(-1, in_recall_feat.size(-1))
flat_in_read_feat = self.dropout(flat_in_read_feat)
flat_in_obs_feat = self.dropout(flat_in_obs_feat)
flat_in_recall_feat = self.dropout(flat_in_recall_feat)
# R, O, LL scores
read_scores = self.fc_read(flat_in_read_feat)
obs_scores = self.fc_obs(flat_in_obs_feat)
recall_scores = self.fc_recall(flat_in_recall_feat)
reshaped_read_scores = read_scores.view(-1, num_choices)
reshaped_obs_scores = obs_scores.view(-1, num_choices)
reshaped_recall_scores = recall_scores.view(-1, num_choices)
# Final score
all_feat = torch.squeeze(torch.cat([read_scores, obs_scores, recall_scores], 1), 1)
final_scores = self.classifier(all_feat)
reshaped_final_scores = final_scores.view(-1, num_choices)
return [reshaped_read_scores, reshaped_obs_scores, reshaped_recall_scores, reshaped_final_scores]
class LanguageData(object):
def __init__(self, id_q, question, subtitles, answer1, answer2, answer3, answer4, kg, label, vision = None):
self.id_q = id_q
self.question = question
self.subtitles = subtitles
self.kg = kg
self.label = label
self.vision = vision
self.answers = [
answer1,
answer2,
answer3,
answer4,
]
def trainEpoch(args, train_loader, model, criterion, optimizer, epoch):
read_losses, obs_losses, recall_losses = utils.AverageMeter(), utils.AverageMeter(), utils.AverageMeter()
final_losses = utils.AverageMeter()
losses = utils.AverageMeter()
model.train()
for batch_idx, (input, target) in enumerate(train_loader):
# Inputs to Variable type
input_var = list()
for j in range(len(input)):
input_var.append(torch.autograd.Variable(input[j]).cuda())
# Targets to Variable type
target_var = list()
for j in range(len(target)):
target[j] = target[j].cuda(async=True)
target_var.append(torch.autograd.Variable(target[j]))
# Output of the model
output = model(*input_var)
# Compute loss
read_loss = criterion(output[0], target_var[0])
obs_loss = criterion(output[1], target_var[0])
recall_loss = criterion(output[2], target_var[0])
final_loss = criterion(output[3], target_var[0])
train_loss = args.weight_loss_read * read_loss + \
args.weight_loss_observe * obs_loss + \
args.weight_loss_recall * recall_loss + \
args.weight_loss_final * final_loss
# Track loss
read_losses.update(read_loss.data.cpu().numpy(), input[0].size(0))
obs_losses.update(obs_loss.data.cpu().numpy(), input[0].size(0))
recall_losses.update(recall_loss.data.cpu().numpy(), input[0].size(0))
final_losses.update(final_loss.data.cpu().numpy(), input[0].size(0))
losses.update(train_loss.data.cpu().numpy(), input[0].size(0))
# Backpropagate loss and update weights
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
# Print info
logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
epoch, batch_idx, len(train_loader), 100. * batch_idx / len(train_loader), loss=losses))
# Plot loss after all mini-batches have finished
plotter.plot('loss', 'train', 'Class Loss', epoch, losses.avg)
def valEpoch(args, val_loader, model, criterion, epoch):
losses = utils.AverageMeter()
model.eval()
for batch_idx, (input, target) in enumerate(val_loader):
# Inputs to Variable type
input_var = list()
for j in range(len(input)):
input_var.append(torch.autograd.Variable(input[j]).cuda())
# Targets to Variable type
target_var = list()
for j in range(len(target)):
target[j] = target[j].cuda(async=True)
target_var.append(torch.autograd.Variable(target[j]))
# Output of the model
with torch.no_grad():
output = model(*input_var)
# Compute loss
_, predicted = torch.max(output[3], 1)
_, p_read = torch.max(output[0], 1)
_, p_obs = torch.max(output[1], 1)
_, p_recall = torch.max(output[2], 1)
read_loss = criterion(output[0], target_var[0])
obs_loss = criterion(output[1], target_var[0])
recall_loss = criterion(output[2], target_var[0])
final_loss = criterion(output[3], target_var[0])
train_loss = args.weight_loss_read * read_loss + \
args.weight_loss_observe * obs_loss + \
args.weight_loss_recall * recall_loss + \
args.weight_loss_final * final_loss
losses.update(train_loss.data.cpu().numpy(), input[0].size(0))
# Save predictions to compute accuracy
if batch_idx == 0:
out = predicted.data.cpu().numpy()
out_r = p_read.data.cpu().numpy()
out_o = p_obs.data.cpu().numpy()
out_ll = p_recall.data.cpu().numpy()
label = target[0].cpu().numpy()
else:
out = np.concatenate((out,predicted.data.cpu().numpy()),axis=0)
out_r = np.concatenate((out_r, p_read.data.cpu().numpy()), axis=0)
out_o = np.concatenate((out_o, p_obs.data.cpu().numpy()), axis=0)
out_ll = np.concatenate((out_ll, p_recall.data.cpu().numpy()), axis=0)
label = np.concatenate((label,target[0].cpu().numpy()),axis=0)
# Accuracy
acc = np.sum(out == label) / len(out)
logger.info('Validation set: Average loss: {:.4f}\t'
'Accuracy {acc}'.format(losses.avg, acc=acc))
plotter.plot('loss', 'val', 'Class Loss', epoch, losses.avg)
plotter.plot('acc', 'val', 'Class Accuracy', epoch, acc)
acc_read = np.sum(out_r == label) / len(out)
acc_osb = np.sum(out_o == label) / len(out)
acc_recall = np.sum(out_ll == label) / len(out)
plotter.plot('readacc', 'val', 'Read Accuracy', epoch, acc_read)
plotter.plot('obsacc', 'val', 'Obs Accuracy', epoch, acc_osb)
plotter.plot('recallacc', 'val', 'Recall Accuracy', epoch, acc_recall)
return acc
def train(args, modeldir):
# Set GPU
n_gpu = torch.cuda.device_count()
logger.info("device: {} n_gpu: {}".format(args.device, n_gpu))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
# Create training directory
if not os.path.exists(modeldir):
os.makedirs(modeldir)
# Model, optimizer and loss
model = FusionMW()
if args.device == "cuda":
model.cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
class_loss = nn.CrossEntropyLoss().cuda()
# Data
trainDataObject = FusionDataloader(args, split='train')
valDataObject = FusionDataloader(args, split='val')
train_loader = torch.utils.data.DataLoader(trainDataObject, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers)
val_loader = torch.utils.data.DataLoader(valDataObject, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers)
num_batches = train_loader.__len__()
# Now, let's start the training process!
logger.info('Training loader with %d samples' % train_loader.__len__())
logger.info('Validation loader with %d samples' % val_loader.__len__())
logger.info('Training...')
pattrack = 0
best_val = 0
for epoch in range(0, args.nepochs):
# Epoch
trainEpoch(args, train_loader, model, class_loss, optimizer, epoch)
current_val = valEpoch(args, val_loader, model, class_loss, epoch)
# Check patience
is_best = current_val > best_val
best_val = max(current_val, best_val)
if not is_best:
pattrack += 1
else:
pattrack = 0
if pattrack >= args.patience:
break
logger.info('** Validation information: %f (this accuracy) - %f (best accuracy) - %d (patience valtrack)' % (current_val, best_val, pattrack))
# Save
state = {'state_dict': model.state_dict(),
'best_val': best_val,
'optimizer': optimizer.state_dict(),
'pattrack': pattrack,
'curr_val': current_val}
filename = os.path.join(modeldir, 'model_latest.pth.tar')
torch.save(state, filename)
if is_best:
filename = os.path.join(modeldir, 'model_best.pth.tar')
torch.save(state, filename)
def evaluate(args, modeldir):
# Model
model = FusionMW()
if args.device == "cuda":
model.cuda()
class_loss = nn.CrossEntropyLoss().cuda()
logger.info("=> loading checkpoint from '{}'".format(modeldir))
checkpoint = torch.load(os.path.join(modeldir, 'model_best.pth.tar'))
model.load_state_dict(checkpoint['state_dict'])
# Data
evalDataObject = FusionDataloader(args, split='test')
test_loader = torch.utils.data.DataLoader(evalDataObject, batch_size=args.batch_size, shuffle=False, pin_memory=(not args.no_cuda), num_workers=args.workers)
logger.info('Evaluation loader with %d samples' % test_loader.__len__())
# Switch to evaluation mode & compute test samples embeddings
batch_time = utils.AverageMeter()
end = time.time()
model.eval()
for i, (input, target) in enumerate(test_loader):
# Inputs to Variable type
input_var = list()
for j in range(len(input)):
input_var.append(torch.autograd.Variable(input[j]).cuda())
# Targets to Variable type
target_var = list()
for j in range(len(target)):
target[j] = target[j].cuda(async=True)
target_var.append(torch.autograd.Variable(target[j]))
# Output of the model
with torch.no_grad():
output = model(*input_var)
_, predicted = torch.max(output[3], 1)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# Store outpputs
if i==0:
out = predicted.data.cpu().numpy()
label = target[0].cpu().numpy()
index = target[1].cpu().numpy()
scores_read = output[0].data.cpu().numpy()
scores_observe = output[1].data.cpu().numpy()
scores_recall = output[2].data.cpu().numpy()
scores_final = output[3].data.cpu().numpy()
else:
out = np.concatenate((out,predicted.data.cpu().numpy()),axis=0)
label = np.concatenate((label,target[0].cpu().numpy()),axis=0)
index = np.concatenate((index, target[1].cpu().numpy()), axis=0)
scores_read = np.concatenate((scores_read, output[0].cpu().numpy()), axis=0)
scores_observe = np.concatenate((scores_observe, output[1].cpu().numpy()), axis=0)
scores_recall = np.concatenate((scores_recall, output[2].cpu().numpy()), axis=0)
scores_final = np.concatenate((scores_final, output[3].cpu().numpy()), axis=0)
# Print accuracy
df = pd.read_csv(os.path.join(args.data_dir, 'knowit_data/knowit_data_test.csv'), delimiter='\t')
utils.accuracy(df, out, label, index)
if __name__ == "__main__":
args = get_params()
assert args.dataset in ['knowit', 'tvqa']
if args.dataset == 'knowit':
from Source.dataloader_knowit import FusionDataloader
args.descriptions_file = 'Data/knowit_observe/scenes_descriptions.csv'
elif args.dataset == 'tvqa':
# from Source.dataloader_tvqa import FusionDataloader
logger.error('Sorry, TVQA+ dataset not implemented yet.')
import sys
sys.exit(0)
# Create training and data directories
modeldir = os.path.join('Training', args.train_name)
if not os.path.exists(modeldir):
os.makedirs(modeldir)
outdatadir = os.path.join(args.data_dir, args.dataset)
if not os.path.exists(outdatadir):
os.makedirs(outdatadir)
# Train if model does not exist
if not os.path.isfile(os.path.join(modeldir, 'model_best.pth.tar')):
global plotter
plotter = utils.VisdomLinePlotter(env_name=args.train_name)
train(args, modeldir)
# Evaluation
evaluate(args, modeldir)
|
nilq/baby-python
|
python
|
"""
Module: mercadopago/__init__.py
"""
from .sdk import SDK
|
nilq/baby-python
|
python
|
import cv2 as cv
from utilities import show_in_matplotlib
def get_channel(img, channel):
b = img[:, :, channel]
# g = img[:,:,1]
# r = img[:,:,2]
return b
def remove_channel(img, channel):
imgCopy = img.copy()
imgCopy[:, :, channel] = 0
return imgCopy
def remove_channel_v0(img, channel):
b = img[:, :, 0]
g = img[:, :, 1]
r = img[:, :, 2]
if channel == 0:
b[:] = 0
elif channel == 1:
g[:] = 0
else:
r[:] = 0
img_merged = cv.merge((b, g, r))
return img_merged
if __name__ == "__main__":
import cv2 as cv
img = cv.imread('color_img.png')
show_in_matplotlib(img, title='original')
ch = 1
b = get_channel(img, ch)
show_in_matplotlib(b, title=f"Channel {ch} only")
img_merged = remove_channel(img, ch)
show_in_matplotlib(img_merged, title=f"Channel {ch} removed")
|
nilq/baby-python
|
python
|
map = [0 for i in range(8*2*4)]
while(True):
x = int(input("Please input the operation number:\n1:get\n2:free\n3:show\n0:quit\n"))
if (x==0):
break
if (x==1):
map_index = []
file_size = int(input("Please input the file size\n"))
for i in range(8*2*4):
if (map[i]==0):
map_index.append(i)
if (len(map_index)>=file_size):
for i in map_index[0:file_size]:
map[i] = 1
print("------")
print("logic address "+str(i)+" allocated")
print("cylinder number: "+str(i // 8))
print("track number: "+str((i % 8) // 4))
print("sector number: "+str(i % 4))
print("-------")
else:
print("Fail")
if (x==2):
cn = int(input("cylinder number: \n"))
tn = int(input("track number: \n"))
sn = int(input("sector number: \n"))
print("The logic address is : "+str(8*cn+tn*4+sn))
if (map[8*cn+tn*4+sn]==1):
print("Success!")
map[8*cn+tn*4+sn]=0
else:
print("Error!")
if (x==3):
ans=""
for i in range(8):
for j in range(4*2):
ans+=str(map[i*8+j])
ans+="\n"
print(ans)
|
nilq/baby-python
|
python
|
"""Unit tests for powercycle_sentinel.py."""
# pylint: disable=missing-docstring
import unittest
from datetime import datetime, timezone, timedelta
from unittest.mock import Mock
from evergreen import EvergreenApi, Task
from buildscripts.powercycle_sentinel import watch_tasks, POWERCYCLE_TASK_EXEC_TIMEOUT_SECS
def make_task_mock(evg_api, task_id, start_time, finish_time):
return Task({
"task_id": task_id,
"start_time": start_time,
"finish_time": finish_time,
}, evg_api)
class TestWatchTasks(unittest.TestCase):
"""Test watch_tasks."""
def test_no_long_running_tasks(self):
evg_api = EvergreenApi()
task_ids = ["1", "2"]
now = datetime.now(timezone.utc).isoformat()
task_1 = make_task_mock(evg_api, task_ids[0], now, now)
task_2 = make_task_mock(evg_api, task_ids[1], now, now)
evg_api.task_by_id = Mock(
side_effect=(lambda task_id: {
"1": task_1,
"2": task_2,
}[task_id]))
long_running_task_ids = watch_tasks(task_ids, evg_api, 0)
self.assertEqual([], long_running_task_ids)
def test_found_long_running_tasks(self):
evg_api = EvergreenApi()
task_ids = ["1", "2"]
exec_timeout_seconds_ago = (datetime.now(timezone.utc) -
timedelta(hours=POWERCYCLE_TASK_EXEC_TIMEOUT_SECS)).isoformat()
now = datetime.now(timezone.utc).isoformat()
task_1 = make_task_mock(evg_api, task_ids[0], exec_timeout_seconds_ago, now)
task_2 = make_task_mock(evg_api, task_ids[1], exec_timeout_seconds_ago, None)
evg_api.task_by_id = Mock(
side_effect=(lambda task_id: {
"1": task_1,
"2": task_2,
}[task_id]))
long_running_task_ids = watch_tasks(task_ids, evg_api, 0)
self.assertEqual([task_2.task_id], long_running_task_ids)
|
nilq/baby-python
|
python
|
import functools
class Codec:
db = []
def encode(self, longUrl):
"""Encodes a URL to a shortened URL.
:type longUrl: str
:rtype: str
"""
length = len(self.db)
self.db.append(longUrl)
return self.conversionA(length)
def decode(self, shortUrl):
"""Decodes a shortened URL to its original URL.
:type shortUrl: str
:rtype: str
"""
return self.conversionC(shortUrl)
def conversionA(self, s):
b0 = s % 62
s = int(s / 62)
b1 = s % 62
s = int(s / 62)
b2 = s % 62
s = int(s / 62)
b3 = s % 62
s = int(s / 62)
b4 = s % 62
s = int(s / 62)
b5 = s % 62
return self.conversionB(b5) + self.conversionB(b4) + self.conversionB(b3) + self.conversionB(b2) + self.conversionB(b1) + self.conversionB(b0)
def conversionB(self, b):
if b < 10:
return chr(48 + b)
if b < 36:
return chr(65 + b - 10)
return chr(97 + b - 36)
def conversionC(self, c):
array = list(map(lambda x: self.conversionD(x), list(c)))
from functools import reduce
index = functools.reduce(lambda x , y: x * 62 + y, array, 0)
return self.db[index]
def conversionD(self, d):
c = ord(d)
if c < 58:
return c - 48
if c <= ord('Z'):
return c + 10 - 65
return c + 36 - 97
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(url))
|
nilq/baby-python
|
python
|
# https://deeplearningcourses.com/c/data-science-natural-language-processing-in-python
# https://www.udemy.com/data-science-natural-language-processing-in-python
# Author: http://lazyprogrammer.me
import numpy as np
import matplotlib.pyplot as plt
import string
import random
import re
import requests
import os
import textwrap
### create substitution cipher
# one will act as the key, other as the value
letters1 = list(string.ascii_lowercase)
letters2 = list(string.ascii_lowercase)
true_mapping = {}
# shuffle second set of letters
random.shuffle(letters2)
# populate map
for k, v in zip(letters1, letters2):
true_mapping[k] = v
### the language model
# initialize Markov matrix
M = np.ones((26, 26))
# initial state distribution
pi = np.zeros(26)
# a function to update the Markov matrix
def update_transition(ch1, ch2):
# ord('a') = 97, ord('b') = 98, ...
i = ord(ch1) - 97
j = ord(ch2) - 97
M[i,j] += 1
# a function to update the initial state distribution
def update_pi(ch):
i = ord(ch) - 97
pi[i] += 1
# get the log-probability of a word / token
def get_word_prob(word):
# print("word:", word)
i = ord(word[0]) - 97
logp = np.log(pi[i])
for ch in word[1:]:
j = ord(ch) - 97
logp += np.log(M[i, j]) # update prob
i = j # update j
return logp
# get the probability of a sequence of words
def get_sequence_prob(words):
# if input is a string, split into an array of tokens
if type(words) == str:
words = words.split()
logp = 0
for word in words:
logp += get_word_prob(word)
return logp
### create a markov model based on an English dataset
# is an edit of https://www.gutenberg.org/ebooks/2701
# (I removed the front and back matter)
# download the file
if not os.path.exists('moby_dick.txt'):
print("Downloading moby dick...")
r = requests.get('https://lazyprogrammer.me/course_files/moby_dick.txt')
with open('moby_dick.txt', 'w') as f:
f.write(r.content.decode())
# for replacing non-alpha characters
regex = re.compile('[^a-zA-Z]')
# load in words
for line in open('moby_dick.txt'):
line = line.rstrip()
# there are blank lines in the file
if line:
line = regex.sub(' ', line) # replace all non-alpha characters with space
# split the tokens in the line and lowercase
tokens = line.lower().split()
for token in tokens:
# update the model
# first letter
ch0 = token[0]
update_pi(ch0)
# other letters
for ch1 in token[1:]:
update_transition(ch0, ch1)
ch0 = ch1
# normalize the probabilities
pi /= pi.sum()
M /= M.sum(axis=1, keepdims=True)
### encode a message
# this is a random excerpt from Project Gutenberg's
# The Adventures of Sherlock Holmes, by Arthur Conan Doyle
# https://www.gutenberg.org/ebooks/1661
original_message = '''I then lounged down the street and found,
as I expected, that there was a mews in a lane which runs down
by one wall of the garden. I lent the ostlers a hand in rubbing
down their horses, and received in exchange twopence, a glass of
half-and-half, two fills of shag tobacco, and as much information
as I could desire about Miss Adler, to say nothing of half a dozen
other people in the neighbourhood in whom I was not in the least
interested, but whose biographies I was compelled to listen to.
'''
# Away they went, and I was just wondering whether I should not do well
# to follow them when up the lane came a neat little landau, the coachman
# with his coat only half-buttoned, and his tie under his ear, while all
# the tags of his harness were sticking out of the buckles. It hadn't
# pulled up before she shot out of the hall door and into it. I only
# caught a glimpse of her at the moment, but she was a lovely woman, with
# a face that a man might die for.
# My cabby drove fast. I don't think I ever drove faster, but the others
# were there before us. The cab and the landau with their steaming horses
# were in front of the door when I arrived. I paid the man and hurried
# into the church. There was not a soul there save the two whom I had
# followed and a surpliced clergyman, who seemed to be expostulating with
# them. They were all three standing in a knot in front of the altar. I
# lounged up the side aisle like any other idler who has dropped into a
# church. Suddenly, to my surprise, the three at the altar faced round to
# me, and Godfrey Norton came running as hard as he could towards me.
# a function to encode a message
def encode_message(msg):
# downcase
msg = msg.lower()
# replace non-alpha characters
msg = regex.sub(' ', msg)
# make the encoded message
coded_msg = []
for ch in msg:
coded_ch = ch # could just be a space
if ch in true_mapping:
coded_ch = true_mapping[ch]
coded_msg.append(coded_ch)
return ''.join(coded_msg)
encoded_message = encode_message(original_message)
# a function to decode a message
def decode_message(msg, word_map):
decoded_msg = []
for ch in msg:
decoded_ch = ch # could just be a space
if ch in word_map:
decoded_ch = word_map[ch]
decoded_msg.append(decoded_ch)
return ''.join(decoded_msg)
### run an evolutionary algorithm to decode the message
# this is our initialization point
dna_pool = []
for _ in range(20):
dna = list(string.ascii_lowercase)
random.shuffle(dna)
dna_pool.append(dna)
def evolve_offspring(dna_pool, n_children):
# make n_children per offspring
offspring = []
for dna in dna_pool:
for _ in range(n_children):
copy = dna.copy()
j = np.random.randint(len(copy))
k = np.random.randint(len(copy))
# switch
tmp = copy[j]
copy[j] = copy[k]
copy[k] = tmp
offspring.append(copy)
return offspring + dna_pool
num_iters = 1000
scores = np.zeros(num_iters)
best_dna = None
best_map = None
best_score = float('-inf')
for i in range(num_iters):
if i > 0:
# get offspring from the current dna pool
dna_pool = evolve_offspring(dna_pool, 3)
# calculate score for each dna
dna2score = {}
for dna in dna_pool:
# populate map
current_map = {}
for k, v in zip(letters1, dna):
current_map[k] = v
decoded_message = decode_message(encoded_message, current_map)
score = get_sequence_prob(decoded_message)
# store it
# needs to be a string to be a dict key
dna2score[''.join(dna)] = score
# record the best so far
if score > best_score:
best_dna = dna
best_map = current_map
best_score = score
# average score for this generation
scores[i] = np.mean(list(dna2score.values()))
# keep the best 5 dna
# also turn them back into list of single chars
sorted_dna = sorted(dna2score.items(), key=lambda x: x[1], reverse=True)
dna_pool = [list(k) for k, v in sorted_dna[:5]]
if i % 200 == 0:
print("iter:", i, "score:", scores[i], "best so far:", best_score)
# use best score
decoded_message = decode_message(encoded_message, best_map)
print("LL of decoded message:", get_sequence_prob(decoded_message))
print("LL of true message:", get_sequence_prob(regex.sub(' ', original_message.lower())))
# which letters are wrong?
for true, v in true_mapping.items():
pred = best_map[v]
if true != pred:
print("true: %s, pred: %s" % (true, pred))
# print the final decoded message
print("Decoded message:\n", textwrap.fill(decoded_message))
print("\nTrue message:\n", original_message)
|
nilq/baby-python
|
python
|
from collections import defaultdict
"""
students = 10
leads = 9
clues = [[1, 2], [3, 4], [5, 2], [4, 6], [2, 6], [8, 7], [9, 7], [1, 6], [2, 4]]
"""
class Unionfind():
def __init__(self, students, leads, clues):
self.students = students
# Set up parent for each node.
self.parent = {item:item for item in range(1, self.students + 1)}
# Create a dictionary to store the group result.
self.group = defaultdict(list)
def find(self, u):
if self.parent[u] == u:
return u
else:
self.parent[u] = self.find(self.parent[u])
return self.parent[u]
def union(self, u, v):
t1 = self.find(u)
t2 = self.find(v)
if t1 != t2:
for i in range(1, self.students + 1):
if self.parent[i] == t2:
self.parent[i] = t1
#self.parent[t2] = t1
def find_group(self, clues):
for u, v in clues:
self.union(u, v)
for i in range(1, self.students + 1):
self.group[self.parent[i]].append(i)
return len(self.group)
def main():
students = 10
leads = 9
clues = [[1, 2], [3, 4], [5, 2], [4, 6], [2, 6], [8, 7], [9, 7], [1, 6], [2, 4]]
#clues = [[1, 2], [3, 4], [4, 5]]
uf = Unionfind(students, leads, clues)
print(uf.find_group(clues))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# Set up configuration variables
__all__ = ['custom_viewer', 'qglue', 'test']
import os
import sys
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution('glue-core').version
except DistributionNotFound:
__version__ = 'undefined'
from ._mpl_backend import MatplotlibBackendSetter
sys.meta_path.append(MatplotlibBackendSetter())
from glue.viewers.custom.helper import custom_viewer
# Load user's configuration file
from .config import load_configuration
env = load_configuration()
from .qglue import qglue
from .main import load_plugins # noqa
def test(no_optional_skip=False):
from pytest import main
root = os.path.abspath(os.path.dirname(__file__))
args = [root, '-x']
if no_optional_skip:
args.append('--no-optional-skip')
return main(args=args)
from glue._settings_helpers import load_settings
load_settings()
# In PyQt 5.5+, PyQt overrides the default exception catching and fatally
# crashes the Qt application without printing out any details about the error.
# Below we revert the exception hook to the original Python one. Note that we
# can't just do sys.excepthook = sys.__excepthook__ otherwise PyQt will detect
# the default excepthook is in place and override it.
def handle_exception(exc_type, exc_value, exc_traceback):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
sys.excepthook = handle_exception
|
nilq/baby-python
|
python
|
def create_user(base_cls):
class User_info(base_cls):
__tablename__ = 'user_info'
__table_args__ = {'autoload': True}
return User_info
|
nilq/baby-python
|
python
|
import os
from enum import Enum
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
class Verbosity(Enum):
error = 1
warning = 2
info = 3
debug = 4
BASE_FOLDER = "output/"
class Logger:
current_run = None
def __init__(self, verbosity=Verbosity.info, markdown=False, output_file=None, img_format='png') -> None:
self.verbosity = Verbosity(verbosity) if isinstance(verbosity, int) else verbosity
self.markdown_mode = markdown
self.output_file = output_file
self.img_format = img_format
def df(self, frame):
if not isinstance(frame, pd.DataFrame):
frame = pd.DataFrame(frame)
self.info("\n")
self.info(frame.to_markdown(), "\n\n")
def csv(self, frame: pd.DataFrame, name: str, extra="csv/"):
file_path = self.get_file_path(name, extra)
frame.to_csv(file_path)
def list(self, data):
self.__print("[")
for item in data:
self.__print(item, ",")
self.__print("]")
def log_img(self, name: str, file_format=None):
if file_format is None:
file_format = self.img_format
print(f"saving: {name} as {file_format}")
file_path = self.get_file_path(f"{name}.{file_format}")
plt.savefig(file_path, bbox_inches='tight', format=file_format)
# self.debug(f"Saving: {file_path}")
plt.clf()
plt.close()
self.info(f"")
def get_file_path(self, name, extra=None):
if Logger.current_run is not None:
subfolder = Logger.current_run
else:
subfolder = None
folder = BASE_FOLDER + subfolder if subfolder is not None else BASE_FOLDER
if extra is not None:
folder = folder + extra
Path(folder).mkdir(parents=True, exist_ok=True)
file_path = Path(folder + name).resolve()
return file_path
def set_verbosity(self, v: Verbosity):
self.verbosity = v
def debug(self, *args):
if self.verbosity.value >= 4:
self.__print(*args)
def info(self, *args):
if self.verbosity.value >= 3:
self.__print(*args)
def warning(self, *args):
if self.verbosity.value >= 2:
self.__print(*args)
def error(self, *args):
if self.verbosity.value >= 1:
self.__print(*args)
def is_info(self):
return self.verbosity.value >= 3
def __print(self, *args):
if self.output_file is not None:
file_path = self.get_file_path(self.output_file)
if os.path.exists(file_path):
write_type = "a"
else:
write_type = "w"
with open(file_path, write_type) as file:
if self.markdown_mode:
print(*args, file=file, end=' \n')
else:
print(*args, file=file)
file.close()
else:
if not self.markdown_mode:
print(*args)
else:
print(*args, " ")
|
nilq/baby-python
|
python
|
"""
Attributes are arbitrary data stored on objects. Attributes supports
both pure-string values and pickled arbitrary data.
Attributes are also used to implement Nicks. This module also contains
the Attribute- and NickHandlers as well as the `NAttributeHandler`,
which is a non-db version of Attributes.
"""
import re
import weakref
from django.db import models
from django.conf import settings
from django.utils.encoding import smart_str
from evennia.locks.lockhandler import LockHandler
from evennia.utils.idmapper.models import SharedMemoryModel
from evennia.utils.dbserialize import to_pickle, from_pickle
from evennia.utils.picklefield import PickledObjectField
from evennia.utils.utils import lazy_property, to_str, make_iter
_TYPECLASS_AGGRESSIVE_CACHE = settings.TYPECLASS_AGGRESSIVE_CACHE
#------------------------------------------------------------
#
# Attributes
#
#------------------------------------------------------------
class Attribute(SharedMemoryModel):
"""
Attributes are things that are specific to different types of objects. For
example, a drink container needs to store its fill level, whereas an exit
needs to store its open/closed/locked/unlocked state. These are done via
attributes, rather than making different classes for each object type and
storing them directly. The added benefit is that we can add/remove
attributes on the fly as we like.
The Attribute class defines the following properties:
key - primary identifier.
lock_storage - perm strings.
obj - which object the attribute is defined on.
date_created - when the attribute was created.
value - the data stored in the attribute, in pickled form
using wrappers to be able to store/retrieve models.
strvalue - string-only data. This data is not pickled and is
thus faster to search for in the database.
category - optional character string for grouping the Attribute.
"""
#
# Attribute Database Model setup
#
# These database fields are all set using their corresponding properties,
# named same as the field, but withtout the db_* prefix.
db_key = models.CharField('key', max_length=255, db_index=True)
db_value = PickledObjectField(
'value', null=True,
help_text="The data returned when the attribute is accessed. Must be "
"written as a Python literal if editing through the admin "
"interface. Attribute values which are not Python literals "
"cannot be edited through the admin interface.")
db_strvalue = models.TextField(
'strvalue', null=True, blank=True,
help_text="String-specific storage for quick look-up")
db_category = models.CharField(
'category', max_length=128, db_index=True, blank=True, null=True,
help_text="Optional categorization of attribute.")
# Lock storage
db_lock_storage = models.TextField(
'locks', blank=True,
help_text="Lockstrings for this object are stored here.")
db_model = models.CharField(
'model', max_length=32, db_index=True, blank=True, null=True,
help_text="Which model of object this attribute is attached to (A "
"natural key like 'objects.dbobject'). You should not change "
"this value unless you know what you are doing.")
# subclass of Attribute (None or nick)
db_attrtype = models.CharField(
'attrtype', max_length=16, db_index=True, blank=True, null=True,
help_text="Subclass of Attribute (None or nick)")
# time stamp
db_date_created = models.DateTimeField(
'date_created', editable=False, auto_now_add=True)
# Database manager
#objects = managers.AttributeManager()
@lazy_property
def locks(self):
return LockHandler(self)
class Meta:
"Define Django meta options"
verbose_name = "Evennia Attribute"
# read-only wrappers
key = property(lambda self: self.db_key)
strvalue = property(lambda self: self.db_strvalue)
category = property(lambda self: self.db_category)
model = property(lambda self: self.db_model)
attrtype = property(lambda self: self.db_attrtype)
date_created = property(lambda self: self.db_date_created)
def __lock_storage_get(self):
return self.db_lock_storage
def __lock_storage_set(self, value):
self.db_lock_storage = value
self.save(update_fields=["db_lock_storage"])
def __lock_storage_del(self):
self.db_lock_storage = ""
self.save(update_fields=["db_lock_storage"])
lock_storage = property(__lock_storage_get, __lock_storage_set, __lock_storage_del)
# Wrapper properties to easily set database fields. These are
# @property decorators that allows to access these fields using
# normal python operations (without having to remember to save()
# etc). So e.g. a property 'attr' has a get/set/del decorator
# defined that allows the user to do self.attr = value,
# value = self.attr and del self.attr respectively (where self
# is the object in question).
# value property (wraps db_value)
#@property
def __value_get(self):
"""
Getter. Allows for `value = self.value`.
We cannot cache here since it makes certain cases (such
as storing a dbobj which is then deleted elsewhere) out-of-sync.
The overhead of unpickling seems hard to avoid.
"""
return from_pickle(self.db_value, db_obj=self)
#@value.setter
def __value_set(self, new_value):
"""
Setter. Allows for self.value = value. We cannot cache here,
see self.__value_get.
"""
self.db_value = to_pickle(new_value)
self.save(update_fields=["db_value"])
#@value.deleter
def __value_del(self):
"Deleter. Allows for del attr.value. This removes the entire attribute."
self.delete()
value = property(__value_get, __value_set, __value_del)
#
#
# Attribute methods
#
#
def __str__(self):
return smart_str("%s(%s)" % (self.db_key, self.id))
def __unicode__(self):
return u"%s(%s)" % (self.db_key,self.id)
def access(self, accessing_obj, access_type='read', default=False, **kwargs):
"""
Determines if another object has permission to access.
Args:
accessing_obj (object): object trying to access this one.
access_type (optional): type of access sought.
default (optional): what to return if no lock of access_type was found
Kwargs:
**kwargs: passed to `at_access` hook along with `result`.
Returns:
result:
"""
result = self.locks.check(accessing_obj, access_type=access_type, default=default)
#self.at_access(result, **kwargs)
return result
#
# Handlers making use of the Attribute model
#
class AttributeHandler(object):
"""
Handler for adding Attributes to the object.
"""
_m2m_fieldname = "db_attributes"
_attrcreate = "attrcreate"
_attredit = "attredit"
_attrread = "attrread"
_attrtype = None
def __init__(self, obj):
"Initialize handler"
self.obj = obj
self._objid = obj.id
self._model = to_str(obj.__dbclass__.__name__.lower())
self._cache = None
def _recache(self):
"Cache all attributes of this object"
query = {"%s__id" % self._model : self._objid,
"attribute__db_attrtype" : self._attrtype}
attrs = [conn.attribute for conn in getattr(self.obj, self._m2m_fieldname).through.objects.filter(**query)]
self._cache = dict(("%s-%s" % (to_str(attr.db_key).lower(),
attr.db_category.lower() if attr.db_category else None),
attr) for attr in attrs)
def has(self, key, category=None):
"""
Checks if the given Attribute (or list of Attributes) exists on
the object.
If an iterable is given, returns list of booleans.
"""
if self._cache is None or not _TYPECLASS_AGGRESSIVE_CACHE:
self._recache()
key = [k.strip().lower() for k in make_iter(key) if k]
category = category.strip().lower() if category is not None else None
searchkeys = ["%s-%s" % (k, category) for k in make_iter(key)]
ret = [self._cache.get(skey) for skey in searchkeys if skey in self._cache]
return ret[0] if len(ret) == 1 else ret
def get(self, key=None, category=None, default=None, return_obj=False,
strattr=False, raise_exception=False, accessing_obj=None,
default_access=True, not_found_none=False):
"""
Returns the value of the given Attribute or list of Attributes.
`strattr` will cause the string-only value field instead of the normal
pickled field data. Use to get back values from Attributes added with
the `strattr` keyword.
If `return_obj=True`, return the matching Attribute object
instead. Returns `default` if no matches (or [ ] if `key` was a list
with no matches). If `raise_exception=True`, failure to find a
match will raise `AttributeError` instead.
If `accessing_obj` is given, its `attrread` permission lock will be
checked before displaying each looked-after Attribute. If no
`accessing_obj` is given, no check will be done.
"""
class RetDefault(object):
"Holds default values"
def __init__(self):
self.value = default
self.strvalue = str(default) if default is not None else None
if self._cache is None or not _TYPECLASS_AGGRESSIVE_CACHE:
self._recache()
ret = []
key = [k.strip().lower() for k in make_iter(key) if k]
category = category.strip().lower() if category is not None else None
#print "cache:", self._cache.keys(), key
if not key:
# return all with matching category (or no category)
catkey = "-%s" % category if category is not None else None
ret = [attr for key, attr in self._cache.items() if key and key.endswith(catkey)]
else:
for searchkey in ("%s-%s" % (k, category) for k in key):
attr_obj = self._cache.get(searchkey)
if attr_obj:
ret.append(attr_obj)
else:
if raise_exception:
raise AttributeError
else:
ret.append(RetDefault())
if accessing_obj:
# check 'attrread' locks
ret = [attr for attr in ret if attr.access(accessing_obj, self._attrread, default=default_access)]
if strattr:
ret = ret if return_obj else [attr.strvalue for attr in ret if attr]
else:
ret = ret if return_obj else [attr.value for attr in ret if attr]
if not ret:
return ret if len(key) > 1 else default
return ret[0] if len(ret)==1 else ret
def add(self, key, value, category=None, lockstring="",
strattr=False, accessing_obj=None, default_access=True):
"""
Add attribute to object, with optional `lockstring`.
If `strattr` is set, the `db_strvalue` field will be used (no pickling).
Use the `get()` method with the `strattr` keyword to get it back.
If `accessing_obj` is given, `self.obj`'s `attrcreate` lock access
will be checked against it. If no `accessing_obj` is given, no check
will be done.
"""
if accessing_obj and not self.obj.access(accessing_obj,
self._attrcreate, default=default_access):
# check create access
return
if self._cache is None:
self._recache()
if not key:
return
category = category.strip().lower() if category is not None else None
keystr = key.strip().lower()
cachekey = "%s-%s" % (keystr, category)
attr_obj = self._cache.get(cachekey)
if attr_obj:
# update an existing attribute object
if strattr:
# store as a simple string (will not notify OOB handlers)
attr_obj.db_strvalue = value
attr_obj.save(update_fields=["db_strvalue"])
else:
# store normally (this will also notify OOB handlers)
attr_obj.value = value
else:
# create a new Attribute (no OOB handlers can be notified)
kwargs = {"db_key" : keystr, "db_category" : category,
"db_model" : self._model, "db_attrtype" : self._attrtype,
"db_value" : None if strattr else to_pickle(value),
"db_strvalue" : value if strattr else None}
new_attr = Attribute(**kwargs)
new_attr.save()
getattr(self.obj, self._m2m_fieldname).add(new_attr)
self._cache[cachekey] = new_attr
def batch_add(self, key, value, category=None, lockstring="",
strattr=False, accessing_obj=None, default_access=True):
"""
Batch-version of `add()`. This is more efficient than
repeat-calling add.
`key` and `value` must be sequences of the same length, each
representing a key-value pair.
"""
if accessing_obj and not self.obj.access(accessing_obj,
self._attrcreate, default=default_access):
# check create access
return
if self._cache is None:
self._recache()
if not key:
return
keys, values= make_iter(key), make_iter(value)
if len(keys) != len(values):
raise RuntimeError("AttributeHandler.add(): key and value of different length: %s vs %s" % key, value)
category = category.strip().lower() if category is not None else None
new_attrobjs = []
for ikey, keystr in enumerate(keys):
keystr = keystr.strip().lower()
new_value = values[ikey]
cachekey = "%s-%s" % (keystr, category)
attr_obj = self._cache.get(cachekey)
if attr_obj:
# update an existing attribute object
if strattr:
# store as a simple string (will not notify OOB handlers)
attr_obj.db_strvalue = new_value
attr_obj.save(update_fields=["db_strvalue"])
else:
# store normally (this will also notify OOB handlers)
attr_obj.value = new_value
else:
# create a new Attribute (no OOB handlers can be notified)
kwargs = {"db_key" : keystr, "db_category" : category,
"db_attrtype" : self._attrtype,
"db_value" : None if strattr else to_pickle(new_value),
"db_strvalue" : value if strattr else None}
new_attr = Attribute(**kwargs)
new_attr.save()
new_attrobjs.append(new_attr)
if new_attrobjs:
# Add new objects to m2m field all at once
getattr(self.obj, self._m2m_fieldname).add(*new_attrobjs)
self._recache()
def remove(self, key, raise_exception=False, category=None,
accessing_obj=None, default_access=True):
"""
Remove attribute or a list of attributes from object.
If `accessing_obj` is given, will check against the `attredit` lock.
If not given, this check is skipped.
"""
if self._cache is None or not _TYPECLASS_AGGRESSIVE_CACHE:
self._recache()
key = [k.strip().lower() for k in make_iter(key) if k]
category = category.strip().lower() if category is not None else None
for searchstr in ("%s-%s" % (k, category) for k in key):
attr_obj = self._cache.get(searchstr)
if attr_obj:
if not (accessing_obj and not attr_obj.access(accessing_obj,
self._attredit, default=default_access)):
attr_obj.delete()
elif not attr_obj and raise_exception:
raise AttributeError
self._recache()
def clear(self, category=None, accessing_obj=None, default_access=True):
"""
Remove all Attributes on this object. If `accessing_obj` is
given, check the `attredit` lock on each Attribute before
continuing. If not given, skip check.
"""
if self._cache is None or not _TYPECLASS_AGGRESSIVE_CACHE:
self._recache()
if accessing_obj:
[attr.delete() for attr in self._cache.values()
if attr.access(accessing_obj, self._attredit, default=default_access)]
else:
[attr.delete() for attr in self._cache.values()]
self._recache()
def all(self, accessing_obj=None, default_access=True):
"""
Return all Attribute objects on this object.
If `accessing_obj` is given, check the `attrread` lock on
each attribute before returning them. If not given, this
check is skipped.
"""
if self._cache is None or not _TYPECLASS_AGGRESSIVE_CACHE:
self._recache()
attrs = sorted(self._cache.values(), key=lambda o: o.id)
if accessing_obj:
return [attr for attr in attrs
if attr.access(accessing_obj, self._attredit, default=default_access)]
else:
return attrs
class NickHandler(AttributeHandler):
"""
Handles the addition and removal of Nicks
(uses Attributes' `strvalue` and `category` fields)
Nicks are stored as Attributes
with categories `nick_<nicktype>`
"""
_attrtype = "nick"
def has(self, key, category="inputline"):
return super(NickHandler, self).has(key, category=category)
def get(self, key=None, category="inputline", **kwargs):
"Get the replacement value matching the given key and category"
return super(NickHandler, self).get(key=key, category=category, strattr=True, **kwargs)
def add(self, key, replacement, category="inputline", **kwargs):
"Add a new nick"
super(NickHandler, self).add(key, replacement, category=category, strattr=True, **kwargs)
def remove(self, key, category="inputline", **kwargs):
"Remove Nick with matching category"
super(NickHandler, self).remove(key, category=category, **kwargs)
def nickreplace(self, raw_string, categories=("inputline", "channel"), include_player=True):
"Replace entries in raw_string with nick replacement"
raw_string
obj_nicks, player_nicks = [], []
for category in make_iter(categories):
obj_nicks.extend([n for n in make_iter(self.get(category=category, return_obj=True)) if n])
if include_player and self.obj.has_player:
for category in make_iter(categories):
player_nicks.extend([n for n in make_iter(self.obj.player.nicks.get(category=category, return_obj=True)) if n])
for nick in obj_nicks + player_nicks:
# make a case-insensitive match here
match = re.match(re.escape(nick.db_key), raw_string, re.IGNORECASE)
if match:
raw_string = raw_string.replace(match.group(), nick.db_strvalue, 1)
break
return raw_string
class NAttributeHandler(object):
"""
This stand-alone handler manages non-database saving.
It is similar to `AttributeHandler` and is used
by the `.ndb` handler in the same way as `.db` does
for the `AttributeHandler`.
"""
def __init__(self, obj):
"initialized on the object"
self._store = {}
self.obj = weakref.proxy(obj)
def has(self, key):
"Check if object has this attribute or not"
return key in self._store
def get(self, key):
"Returns named key value"
return self._store.get(key, None)
def add(self, key, value):
"Add new key and value"
self._store[key] = value
self.obj.set_recache_protection()
def remove(self, key):
"Remove key from storage"
if key in self._store:
del self._store[key]
self.obj.set_recache_protection(self._store)
def clear(self):
"Remove all nattributes from handler"
self._store = {}
def all(self, return_tuples=False):
"List all keys or (keys, values) stored, except _keys"
if return_tuples:
return [(key, value) for (key, value) in self._store.items() if not key.startswith("_")]
return [key for key in self._store if not key.startswith("_")]
|
nilq/baby-python
|
python
|
import asyncio
import logging
from .util import testing_exception_handler
loop = asyncio.get_event_loop()
loop.set_exception_handler(testing_exception_handler)
logging.getLogger('asynqp').setLevel(100) # mute the logger
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.13 on 2020-09-04 06:26
import enumfields.fields
from django.db import migrations
import leasing.enums
class Migration(migrations.Migration):
dependencies = [
("leasing", "0014_add_lease_identifier_field"),
]
operations = [
migrations.AddField(
model_name="planunit",
name="plan_unit_status",
field=enumfields.fields.EnumField(
default="present",
enum=leasing.enums.PlanUnitStatus,
max_length=30,
verbose_name="Plan unit status",
),
),
]
|
nilq/baby-python
|
python
|
import os
import time
import libtorrent as lt
from Downloader.Utils.tasks import shutdown
from Downloader.configuration import TORRENT_PATH
from Downloader.Utils.file_operations import create_folder
def download_magnetic_link(_link, _path=TORRENT_PATH):
ses = lt.session()
ses.listen_on(6881, 6891)
if not os.path.exists(_path):
os.makedirs(_path)
params = {
'save_path': _path,
'storage_mode': lt.storage_mode_t(2)}
handle = lt.add_magnet_uri(ses, _link, params)
ses.start_dht()
print('downloading metadata...')
while not handle.has_metadata():
time.sleep(1)
print('got metadata, starting torrent download...')
while handle.status().state != lt.torrent_status.seeding:
s = handle.status()
state_str = ['queued', 'checking', 'downloading metadata', \
'downloading', 'finished', 'seeding', 'allocating']
print('%.2f%% complete (down: %.1f kb/s up: %.1f kB/s peers: %d) %s' % \
(s.progress * 100, s.download_rate / 1000, s.upload_rate / 1000, \
s.num_peers, state_str[s.state]))
time.sleep(5)
def download_torrents(links, _shutdown='no', _path=TORRENT_PATH):
create_folder(_path)
for link in links:
download_magnetic_link(link, _path)
shutdown(_shutdown)
|
nilq/baby-python
|
python
|
log_level = "INFO"
max_task_count = 1000
poll_db_interval = 100
config_max_downloading = 10000
mq_queue = "download_retrier_queue"
mq_routing_key = "download_retrier_routing_key"
mq_exchange = "download_retrier_exchange"
max_file_size = 52428800
config_domains = ['youku.com', 'ykimg.com', 'tudou.com',
'tudouui.com', 'tdimg.com', 'le.com',
'letv.com', 'letvcdn.com', 'iqiyi.com',
'qiyi.com', 'sohu.com', 'qq.com',
'qzoneapp.com', 'gtimg.com']
config_fetch_day = -10
# config_dates = "AND url_date>='20160827' AND url_date<='20160829'"
config_dates = ""
|
nilq/baby-python
|
python
|
import oemof.solph as solph
from .component import Component
class Supply (Component):
""" Generic supply component
(usually for grid supplied electricity, heat etc.) is created through this
class """
def __init__(self, params):
# Call the init function of the mother class.
Component.__init__(self)
# ------------------- PARAMETERS -------------------
self.name = 'Grid_default_name'
# Maximum output per timestep of commodity:
# e.g. for the electricity grid [Wh], thermal grid [Wh], CH4 grid [kg/h]
self.output_max = 8000000
self.bus_out = None
# ------------- PARAMETERS ARTIFICIAL COSTS FOREIGN STATE --------------
# The artificial costs for supplying electricity can be dependant on a
# foreign state, like a storage SoC. Therefore the name and the state
# name of that foreign entity have to be defined as well as the threshold
# level, under which the low level costs are used. Above the threshold,
# the high level artificial costs are used.
# Define the threshold value for the artificial costs.
self.fs_threshold = None
# Define the low and the high art. cost value e.g. [EUR/Wh], [EUR/kg]
self.fs_low_art_cost = None
self.fs_high_art_cost = None
# ------------------- UPDATE PARAMETER DEFAULT VALUES -------------------
self.set_parameters(params)
# ------------------- INTERNAL VALUES -------------------
# The current artificial cost value e.g. [EUR/Wh], [EUR/kg].
self.current_ac = 0
def prepare_simulation(self, components):
# Update the artificial costs for this time step (dependant on foreign states).
if self.fs_component_name is not None:
foreign_state_value = self.get_foreign_state_value(components)
if foreign_state_value < self.fs_threshold:
self.artificial_costs = self.fs_low_art_cost
else:
self.artificial_costs = self.fs_high_art_cost
# Set the total costs for the commodity this time step
# (costs + art. costs) e.g. [EUR/Wh], [EUR/kg].
self.current_ac = self.get_costs_and_art_costs()
def create_oemof_model(self, busses, _):
from_grid = solph.Source(
label=self.name,
outputs={busses[self.bus_out]: solph.Flow(
nominal_value=self.output_max,
variable_costs=self.current_ac
)})
return from_grid
|
nilq/baby-python
|
python
|
from dicom_parser.utils.sequence_detector.sequences.mr.dwi.derived import \
DWI_DERIVED_RULES
from dicom_parser.utils.sequence_detector.sequences.mr.dwi.diffusion import \
DWI_RULES
from dicom_parser.utils.sequence_detector.sequences.mr.dwi.fieldmap import \
DWI_FIELDMAP
from dicom_parser.utils.sequence_detector.sequences.mr.dwi.sbref import \
DWI_SBREF_RULES
MR_DIFFUSION_SEQUENCES = {
"dwi": DWI_RULES,
"dwi_derived": DWI_DERIVED_RULES,
"dwi_fieldmap": DWI_FIELDMAP,
"dwi_sbref": DWI_SBREF_RULES,
}
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from typing import Union
import urllib3
from indico.config import IndicoConfig
from indico.http.client import HTTPClient
from indico.client.request import HTTPRequest, RequestChain, PagedRequest
class IndicoClient:
"""
The Indico GraphQL Client.
IndicoClient is the primary way to interact with the Indico Platform.
Args:
config= (IndicoConfig, optional): IndicoConfig object with environment configuration
Returns:
IndicoConfig object
Raises:
RuntimeError: If api_token_path does not exist.
"""
def __init__(self, config: IndicoConfig = None):
if not config:
config = IndicoConfig()
if not config.verify_ssl:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self.config = config
self._http = HTTPClient(config)
def _handle_request_chain(self, chain: RequestChain):
response = None
for request in chain.requests():
if isinstance(request, HTTPRequest):
response = self._http.execute_request(request)
chain.previous = response
elif isinstance(request, RequestChain):
response = self._handle_request_chain(request)
chain.previous = response
if chain.result:
return chain.result
return response
def call(self, request: Union[HTTPRequest, RequestChain]):
"""
Make a call to the Indico IPA Platform
Args:
request (GraphQLRequest or RequestChain): GraphQL request to send to the Indico Platform
Returns:
Response appropriate to the class of the provided request parameter. Often JSON but not always.
Raises:
IndicoRequestError: With errors in processing the request
"""
if isinstance(request, RequestChain):
return self._handle_request_chain(request)
elif request and isinstance(request, HTTPRequest):
return self._http.execute_request(request)
def paginate(self, request: PagedRequest):
"""
Provides a generator that continues paging through responses
Available with List<> Requests that offer pagination
Example:
for s in client.paginate(ListSubmissions()):
print("Submission", s)
"""
while request.has_next_page:
r = self._http.execute_request(request)
yield r
|
nilq/baby-python
|
python
|
import os
#import requests
import sys, urllib2, urllib
comp_err_file = open("compile.e", 'r')
comp_err_str = comp_err_file.read()
comp_out_file = open("compile.o", 'r')
comp_out_str = comp_out_file.read()
fileName = str(sys.argv[1])
print 'something'
data = urllib.urlencode({'fileName':fileName,'compileO':comp_out_str, 'compileE':comp_err_str})
req = urllib.urlopen("http://10.201.136.134:8000/COL380/API/Compile/", data)
#response = urllib.urlopen(req)
if (len(comp_err_file.read()) == 0):
# compilation success
os.system("/opt/pbs/default/bin/qsub -P cse -N Test1 -lselect=1:ncpus=21:mem=24gb -l walltime=00:15:00 run1.sh")
os.system("/opt/pbs/default/bin/qsub -P cse -N Test2 -lselect=1:ncpus=21:mem=24gb -l walltime=00:15:00 run2.sh")
os.system("/opt/pbs/default/bin/qsub -P cse -N Test3 -lselect=1:ncpus=21:mem=24gb -l walltime=00:15:00 run3.sh")
os.system("/opt/pbs/default/bin/qsub -P cse -N Test4 -lselect=1:ncpus=21:mem=24gb -l walltime=00:15:00 run4.sh")
os.system("/opt/pbs/default/bin/qsub -P cse -N Test5 -lselect=1:ncpus=21:mem=24gb -l walltime=00:15:00 run5.sh")
os.system("/opt/pbs/default/bin/qsub -P cse -N sendStatus -lselect=1:ncpus=21:mem=24gb -l walltime=00:15:00 SendStatus.sh")
|
nilq/baby-python
|
python
|
""" Keras Retinanet from https://github.com/fizyr/keras-retinanet
Some slight refactoring are done to improve reusability of codebase
"""
import keras
from .. import initializers
from .. import layers
from .. import losses
from ._retinanet_config import make_config
from ._retinanet import (
default_classification_model,
default_regression_model,
create_pyramid_features,
apply_model_to_features,
compute_anchors
)
from ._load_backbone import (
load_backbone,
load_backbone_preprocessing,
load_backbone_custom_objects
)
def compile_retinanet(
training_model,
huber_sigma=3.0,
focal_alpha=0.25,
focal_gamma=2.0,
optimizer=None
):
if optimizer is None:
optimizer=keras.optimizers.adam(lr=1e-5, clipnorm=0.001)
training_model.compile(
loss={
'regression' : losses.make_detection_huber_loss(sigma=huber_sigma),
'classification': losses.make_detection_focal_loss(alpha=focal_alpha, gamma=focal_gamma)
},
optimizer=optimizer
)
def RetinaNetLoad(filepath, backbone='resnet50'):
""" Loads a retinanet model from a file
Args
filepath: one of the following:
- string, path to the saved model, or
- h5py.File object from which to load the model
backbone : Backbone with which the model was trained.
"""
custom_objects = {
'PriorProbability' : initializers.PriorProbability,
'ResizeTo' : layers.ResizeTo,
'Anchors' : layers.Anchors,
'ClipBoxes' : layers.ClipBoxes,
'RegressBoxes' : layers.RegressBoxes,
'FilterDetections' : layers.FilterDetections,
'detection_focal_loss' : losses.make_detection_focal_loss(),
'detection_huber_loss' : losses.make_detection_huber_loss()
}
custom_objects.update(load_backbone_custom_objects(backbone))
return keras.models.load_model(filepath, custom_objects=custom_objects)
def RetinaNetTrain(num_classes, **kwargs):
""" Construct a RetinaNet model for training
Args
Refer to keras_collections.models._retinanet_config.py
Returns
training_model : RetinaNet training model (a keras.models.Model object)
- Outputs of this model are [anchor_regressions, anchor_classifications]
- Shapes would be [(batch_size, num_anchors, 4), (batch_size, num_anchors, num_classes)]
config : The network configs (used to convert into a prediction model)
"""
kwargs['num_classes'] = num_classes
config = make_config(**kwargs)
# Make all submodels
backbone_model = load_backbone(
config.backbone,
freeze_backbone=config.freeze_backbone
)
regression_model = default_regression_model(
config.num_anchors,
pyramid_feature_size=config.pyramid_feature_size,
regression_feature_size=config.regression_feature_size
)
classification_model = default_classification_model(
config.num_classes, config.num_anchors,
pyramid_feature_size=config.pyramid_feature_size,
classification_feature_size=config.classification_feature_size
)
# Create inputs and apply preprocessing
model_inputs = keras.Input(shape=(None, None, 3))
preprocessing_layer = load_backbone_preprocessing(config.backbone)
preprocessed_inputs = preprocessing_layer(model_inputs)
# Create feature pyramid
C3, C4, C5 = backbone_model(preprocessed_inputs)[-3:]
features = create_pyramid_features(C3, C4, C5, feature_size=config.pyramid_feature_size)
# Compute outputs
regression_outputs = apply_model_to_features('regression' , regression_model , features)
classification_outputs = apply_model_to_features('classification', classification_model, features)
return keras.models.Model(
inputs=model_inputs,
outputs=(regression_outputs, classification_outputs),
name=config.name + '_train'
), config
def RetinaNetFromTrain(
training_model,
config,
nms=True,
class_specific_filter=True,
):
""" Converts a RetinaNet model for training from a prediction model
Args
training_model : The RetinaNetTrain mode
config : The configs returned by the training model
nms : Flag to trigger if nms is to be applied
class_specific_filter : Flag to trigger if nms is to be applied to each class
Returns
RetinaNet prediction model (a keras.models.Model object)
- Outputs of this model are [boxes, scores, labels]
- Shapes would be [(batch_size, max_detection, 4), (batch_size, max_detection), (batch_size, max_detection)]
"""
# Compute anchors
features = [training_model.get_layer(p_name).output for p_name in ['P3', 'P4', 'P5', 'P6', 'P7']]
anchors = compute_anchors(
features,
sizes=config.anchor_sizes,
strides=config.anchor_strides,
ratios=config.anchor_ratios,
scales=config.anchor_scales,
)
# Get training_model outputs
regression = training_model.outputs[0]
classification = training_model.outputs[1]
# Apply predicted regression to anchors
boxes = layers.RegressBoxes(name='boxes')([anchors, regression])
boxes = layers.ClipBoxes(name='clipped_boxes')([training_model.inputs[0], boxes])
# Filter detections
detections = layers.FilterDetections(
nms=nms,
class_specific_filter=class_specific_filter,
name='filtered_detections'
)([boxes, classification])
return keras.models.Model(inputs=training_model.inputs, outputs=detections, name=config.name)
|
nilq/baby-python
|
python
|
from wtforms import fields, validators as va, Form
from receipt_split.models import MAX_MESSAGE_LENGTH
from . import UserSummaryForm
class PaymentForm(Form):
message = fields.StringField("Message", [va.length(min=1,
max=MAX_MESSAGE_LENGTH
)])
amount = fields.DecimalField("Decimal", [va.NumberRange(min=0.01)])
to_user = fields.FormField(UserSummaryForm)
|
nilq/baby-python
|
python
|
from .startapp import StartApplication
|
nilq/baby-python
|
python
|
import numpy as np
from seedbank._keys import make_key, make_seed
class SeedState:
"""
Manage a root seed and facilities to derive seeds.
"""
_seed: np.random.SeedSequence
def __init__(self, seed=None):
if seed is None:
seed = np.random.SeedSequence()
self._seed = seed
@property
def seed(self) -> np.random.SeedSequence:
"Get the seed sequence for this seed state."
return self._seed
@property
def int_seed(self):
"Get this seed as an integer."
return self.entropy(1)[0]
def entropy(self, words):
"""
Get *n* words of entropy as a NumPy array.
Args:
words(int): the number of words to return.
Returns:
numpy.ndarray: the entropy.
"""
return self._seed.generate_state(words)
def initialize(self, seed, keys):
seed = make_seed(seed)
if keys:
seed = self.derive(seed, keys).seed
self._seed = seed
return seed
def derive(self, base, keys=None):
"""
Derive a new seed state.
Args:
base(seed-like):
The base seed. If ``None``, use this seed state.
keys(list of seed-like):
Additional keys for deriving the seed. If no keys are
provided, calls :meth:`numpy.random.SeedSequence.spawn` to
obtain a new RNG.
Returns:
SeedState: the derived seed state.
"""
if base is None:
base = self.seed
else:
base = make_seed(base)
if keys:
k2 = tuple(make_key(k) for k in keys)
seed = np.random.SeedSequence(base.entropy, spawn_key=base.spawn_key + k2)
else:
seed = base.spawn(1)[0]
return SeedState(seed)
def rng(self, seed=None):
if seed is None:
seed, = self.seed.spawn(1)
elif not isinstance(seed, np.random.SeedSequence):
seed = np.random.SeedSequence(make_key)
return np.random.default_rng(seed)
|
nilq/baby-python
|
python
|
# By Nick Cortale
# 2017-06-28
#
# Extends the functionality of faker to a more data scientist-esque approach.
# Implements some of the functions from numpy to create some fake data. This is
# also useful for creating data sets with a certain demensionality and integer
# fields.
import faker
import pandas as pd
import time as time
FIELD_LIST = ['am_pm', 'boolean', 'bothify', 'bs', 'building_number', 'catch_phrase',
'century', 'city', 'city_prefix', 'city_suffix', 'color_name', 'company',
'company_email', 'company_suffix', 'country', 'country_code', 'credit_card_expire',
'credit_card_number', 'credit_card_provider', 'credit_card_security_code',
'currency_code', 'date', 'domain_name', 'domain_word', 'ean', 'ean13', 'ean8',
'email', 'file_extension', 'file_name', 'file_path', 'first_name',
'first_name_female', 'first_name_male', 'free_email', 'free_email_domain',
'geo_coordinate', 'hex_color', 'image_url', 'internet_explorer', 'ipv4', 'ipv6',
'isbn10', 'isbn13', 'iso8601', 'job', 'language_code', 'last_name',
'last_name_female', 'last_name_male', 'latitude', 'lexify',
'linux_platform_token', 'linux_processor', 'locale', 'longitude', 'mac_address',
'mac_platform_token', 'mac_processor', 'md5', 'military_apo', 'military_dpo',
'military_ship', 'military_state', 'mime_type', 'month', 'month_name', 'name',
'name_female', 'name_male', 'null_boolean', 'numerify',
'password', 'phone_number', 'postalcode', 'postalcode_plus4', 'postcode',
'prefix', 'prefix_female', 'prefix_male', 'pybool', 'pydecimal', 'pyfloat',
'pyint', 'pystr', 'random_digit', 'random_digit_not_null',
'random_digit_not_null_or_empty', 'random_digit_or_empty', 'random_element',
'random_int', 'random_letter', 'random_number', 'randomize_nb_elements',
'safe_color_name', 'safe_email', 'safe_hex_color', 'secondary_address', 'seed',
'sentence', 'sha1', 'sha256', 'slug', 'ssn', 'state',
'state_abbr', 'street_address', 'street_name', 'street_suffix', 'suffix',
'suffix_female', 'suffix_male', 'text', 'time',
'timezone', 'tld', 'unix_time', 'uri', 'uri_extension', 'uri_page', 'uri_path',
'url', 'user_agent', 'user_name', 'uuid4', 'windows_platform_token', 'word',
'year', 'zipcode', 'zipcode_plus4']
QUICK_LIST = ['random_element', 'random_digit', 'random_digit_not_null',
'uri_page', 'safe_color_name', 'free_email_domain', 'military_state',
'random_int', 'uri_extension', 'state_abbr', 'state', 'pybool', 'military_ship',
'pyint', 'tld', 'zipcode', 'random_letter', 'null_boolean', 'mac_processor',
'randomize_nb_elements', 'city_prefix', 'linux_processor', 'company_suffix',
'postalcode', 'city_suffix', 'unix_time', 'windows_platform_token', 'boolean',
'century', 'linux_platform_token', 'word', 'street_suffix',
'random_digit_not_null_or_empty', 'currency_code', 'hex_color', 'sha1',
'credit_card_provider', 'sha256', 'md5', 'country_code',
'random_digit_or_empty', 'country', 'safe_hex_color', 'timezone', 'uuid4',
'geo_coordinate', 'random_number', 'language_code', 'longitude',
'zipcode_plus4', 'latitude', 'postalcode_plus4', 'mime_type', 'file_extension',
'prefix_male', 'job', 'mac_platform_token', 'prefix_female', 'uri_path',
'ipv4', 'suffix_female', 'iso8601', 'locale', 'color_name', 'image_url',
'internet_explorer', 'file_name', 'ssn', 'bs', 'time', 'numerify',
'catch_phrase', 'prefix', 'suffix_male', 'lexify', 'suffix',
'secondary_address', 'date', 'month_name', 'month', 'year', 'file_path',
'pyfloat', 'credit_card_security_code', 'pydecimal', 'mac_address', 'am_pm',
'ipv6', 'building_number', 'bothify', 'slug', 'ean8', 'military_apo',
'military_dpo']
BASIC_LIST = ['name', 'free_email_domain', 'city', 'state_abbr', 'job',
'random_digit', 'random_digit_or_empty']
class PandasFaker(object):
"""Create fake data for data analysis or database testing purposes.
fields : list or None
If fields is none, will use the basic list.
"""
def __init__(self, fields=None):
if not fields:
fields = BASIC_LIST
self.fields = fields
self.faker_obj = faker.Faker()
def _gen_fake(self):
"""Create a fake dictionary of attributes as defined in fields.
fields : list
Fields to grab to generate some fake data.
"""
#fake = faker.Faker()
data = {}
for field in self.fields:
try:
x = getattr(self.faker_obj, field)
data[field] = x()
except:
print("{} is not currently implemented".format(field) )
return data
def make_fakes(self, num_fakes):
"""Create multiple fake records that will be output as a pandas
dataframe.
num_fakes : int
Number of fakes to create
"""
data_list = []
for i in range(num_fakes):
data = self._gen_fake()
data_list.append(data)
return pd.DataFrame(data_list)
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
#from stat_perform import *
from utilities import *
'''
This program will calculate the IoU per emage per class
Inputs:
- .tflite: a segmentation model
- .jpg: a picture from pascal
- pascal_segmented_classes_per_image.csv file
Output:
- CSV file contains iou_score (float),
iou_per_class_array (float array size 1x20 an entry per class),
time_milisecs (time in miliseconds)
'''
#mdl_name = 'modelDeepLabV3_Mila'
#mdl_name = 'lite-model_deeplabv3-mobilenetv2_dm05_1_default_2'
#mdl_name = 'lite-model_deeplabv3-xception65_1_default_2'
#mdl_name = 'lite-model_mobilenetv2-coco_dr_1'
#mdl_name = 'lite-model_mobilenetv2-coco_dr_1'
tst_path = './datasets/pascal/'
#mdl_path = './models/deep_lab_v3_plus/'
#mdl_path = './models/TensorFlowHub/'
mdl_path = './models/frozen_graph/'
csv_in = 'pascal_segmented_classes_per_image.csv'
mdls = os.listdir(mdl_path)
labels = pd.read_csv(tst_path + csv_in, index_col=1).drop('Unnamed: 0', axis = 1)
label_array = labels.to_numpy()
#col_head = labels.columns
val_path = tst_path + 'Segmentation_input/validation/'
seg_path = tst_path + 'Segmentation_output/validation/'
#val_path = 'C:/Users/adi/Downloads/VOC2012/SegmentationClass/'
image_list = os.listdir(val_path)
# Calculate # uou per class
nimg = len(image_list)
for mdl in mdls:
iou_out = []
i=1
# Loop to compute the IOU
for img in image_list:
print('Processing image:' + img + ', image no ' + str(i) + ' of ' + str(nimg))
#iou_score, ipclass, time_milisecs = iou_per_class(mdl_path + mdl_name + '.tflite', val_path + img, labels)
#iou_out.append(np.hstack((iou_score, np.squeeze(ipclass), time_milisecs)))
label = image2segmap(seg_path + os.path.splitext(img)[0] + '.png')
#time_milisecs, iou_score = meanIou(mdl_path + os.path.splitext(mdl)[0] + '.tflite', val_path + img, seg_path + os.path.splitext(img)[0] + '.png')
#_, iou_score, ioupclass ,time_milisecs = iou_per_pixelclass1(mdl_path + os.path.splitext(mdl)[0] + '.tflite', val_path + img, seg_path + os.path.splitext(img)[0] + '.png')
iou_score, ioupclass ,time_milisecs = meanIougraph_2(mdl_path + os.path.splitext(mdl)[0] + '.pb', val_path + img, seg_path + os.path.splitext(img)[0] + '.png')
iou_out.append(np.hstack((iou_score, time_milisecs, ioupclass)))
i=i+1
iou_out = np.array(iou_out)
# Create header for CSV
_, label_names = get_pascal_labels()
label_names = label_names[:-1]
header = np.hstack(('mIOU', 'Speed (ms)', label_names))
#header = np.hstack(('mIOU', 'Speed (ms)'))
rst =pd.DataFrame(iou_out, columns = header, index = image_list[:iou_out.shape[0]])
print(rst.head())
# Do mean for evaluating the model performace
iouave = iou_out.mean(axis=0)
maiou = iouave[0]
#mspd = iouave[-1]
mspd = iouave[1]
prtout = 'MAIOU: ' + str(maiou) + ', mean speed: ' + str(mspd)
print(prtout)
print(prtout, file=open(mdl + '_maiou.txt', "a"))
# Create the csv file
rst.to_csv(mdl + '_miou.csv')
|
nilq/baby-python
|
python
|
"""Microsoft Teams destination."""
import logging
import pymsteams
def build_notification_text(text_parameters) -> str:
"""Create and format the contents of the notification."""
nr_changed = len(text_parameters["metrics"])
plural_s = "s" if nr_changed > 1 else ""
report_link = f'[{text_parameters["report_title"]}]({text_parameters["url"]})'
result = f'{report_link} has {nr_changed} metric{plural_s} that changed status:\n\n'
for metric in text_parameters["metrics"]:
name = metric["metric_name"]
unit = metric["metric_unit"]
unit = unit if unit.startswith("%") else f" {unit}"
result += f'* {name} status is {metric["new_metric_status"]}, was {metric["old_metric_status"]}. ' \
f'Value is {metric["new_metric_value"]}{unit}, was {metric["old_metric_value"]}{unit}.\n'
return result
def send_notification_to_teams(destination: str, text: str) -> None:
"""Send notification to Microsoft Teams using a Webhook."""
logging.info("Sending notification to configured webhook")
my_teams_message = pymsteams.connectorcard(destination)
my_teams_message.text(text)
try:
my_teams_message.send()
except Exception as reason: # pylint: disable=broad-except
logging.error("Could not deliver notification: %s", reason)
|
nilq/baby-python
|
python
|
from BorutaShap import BorutaShap
def test_class_constructs():
BorutaShap()
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
import numpy as np
import math
import time
import rospy
import roslib
from geometry_msgs.msg import Twist
from std_msgs.msg import String, Float32, Int32, Bool, Int32MultiArray, Float32MultiArray
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
frame_w = rospy.get_param("/usb_cam/image_width")
frame_h = rospy.get_param("/usb_cam/image_height")
ball_x = -1
ball_y = -1
ball_size = -1
def ball_pos_callback(pos_msg):
global ball_x, ball_y
ball_x = pos_msg.data[0]
ball_y = pos_msg.data[1]
button = [0, 0]
def button_callback(pos_msg):
global button
button[0] = pos_msg.data[0]
button[1] = pos_msg.data[1]
magneto = 0
def compass_callback(cmps_msg):
global magneto
magneto = cmps_msg.data
count_ball_loss = 0
def ball_lost(threshold):
global count_ball_loss
if ball_x == -1 and ball_y == -1:
count_ball_loss += 1
if count_ball_loss >= threshold :
return True
else :
count_ball_loss = 0
return False
def head_move(head_pan, head_tilt):
global pos_pan, pos_tilt
pos_pan = head_pan
pos_tilt = head_tilt
head_pos = Float32MultiArray()
head_pos.data = [pos_pan, pos_tilt]
head_pub.publish(head_pos)
def walk(x, y, a):
velocity = Twist()
velocity.linear.x = x
velocity.linear.y = y
velocity.linear.z = a
motion_vel_pub.publish(velocity)
pos_pan = 0.0
pos_tilt = 0.0
pan_min = -1.5
pan_max = 1.5
tilt_min = -1.3
tilt_max = 0.0
def head_limit(pos_pan, pos_tilt):
if pos_pan <= pan_min :
pos_pan = pan_min
elif pos_pan >= pan_max :
pos_pan = pan_max
if pos_tilt <= tilt_min :
pos_tilt = tilt_min
elif pos_tilt >= tilt_max :
pos_tilt = tilt_max
head_pos = Float32MultiArray()
head_pos.data = [pos_pan, pos_tilt]
return head_pos
pan_step = 0.0
tilt_step = 0.0
move_pan = True
def scan_ball(mode):
global pos_pan, pos_tilt, pan_step, tilt_step, move_pan
if pan_step > 0:
pan_step = rospy.get_param("/united_soccer_params/Pan_Step")
else:
pan_step = rospy.get_param("/united_soccer_params/Pan_Step") * -1
if tilt_step > 0:
tilt_step = rospy.get_param("/united_soccer_params/Tilt_Step")
else:
tilt_step = rospy.get_param("/united_soccer_params/Tilt_Step") * -1
if mode == 0: # normal
pos_pan += pan_step
if pos_pan >= pan_max or pos_pan <= pan_min:
pan_step *= -1
pos_tilt += tilt_step
if pos_tilt >= tilt_max or pos_tilt <= tilt_min:
tilt_step *= -1
elif mode == 1: # only tilt
pos_pan = 0.0
pos_tilt += tilt_step
if pos_tilt >= tilt_max or pos_tilt <= tilt_min:
tilt_step *= -1
elif mode == 2: # rectangle
if move_pan:
pos_pan += pan_step
if pos_pan >= pan_max or pos_pan <= pan_min:
pan_step *= -1
move_pan = False
else:
pos_tilt += tilt_step
if pos_tilt >= tilt_max or pos_tilt <= tilt_min:
tilt_step *= -1
move_pan = True
head_pos = head_limit(pos_pan, round(pos_tilt, 3))
pos_pan, pos_tilt = head_pos.data
head_pub.publish(head_pos)
sum_err_pan = 0
sum_err_tilt = 0
last_error_x = 0
last_error_y = 0
def head_track_ball():
global pos_pan, pos_tilt, sum_err_pan, sum_err_tilt, last_error_x, last_error_y
global freq
dt = 1.0 / float(freq)
KP_pan = rospy.get_param("/united_soccer_params/Pan_KP")
KI_pan = rospy.get_param("/united_soccer_params/Pan_KI")
KD_pan = rospy.get_param("/united_soccer_params/Pan_KD")
KP_tilt = rospy.get_param("/united_soccer_params/Tilt_KP")
KI_tilt = rospy.get_param("/united_soccer_params/Tilt_KI")
KD_tilt = rospy.get_param("/united_soccer_params/Tilt_KD")
if ball_x != -1 and ball_y != -1:
error_x = (frame_w/2) - ball_x
error_x *= 77.32 / frame_w
error_x = (error_x * math.pi)/ 180
error_x_diff = error_x - last_error_x
P_pan = last_error_x * KP_pan
sum_err_pan += error_x * dt
I_pan = sum_err_pan * KI_pan
deriv_err_pan = error_x_diff / dt
D_pan = deriv_err_pan * KD_pan
last_error_x = error_x
pos_pan += (P_pan + I_pan + D_pan)
error_y = (frame_h/2) - ball_y
error_y *= -1
error_y *= 61.93 / frame_h
error_y = (error_y * math.pi) /180
error_y_diff = error_y - last_error_y
P_tilt = last_error_y * KP_tilt
sum_err_tilt += error_y * dt
I_tilt = sum_err_tilt * KI_tilt
deriv_err_tilt = sum_err_tilt / dt
D_tilt = deriv_err_tilt * KD_tilt
last_error_y = error_y
pos_tilt += (P_tilt + I_tilt + D_tilt)
head_pos = head_limit(pos_pan, round(pos_tilt, 2))
pos_pan, pos_tilt = head_pos.data
head_pub.publish(head_pos)
def body_track_ball():
global pos_pan, pos_tilt
KP_body = rospy.get_param("/united_soccer_params/Body_KP")
if ball_x != -1 and ball_y != -1:
error_body_a = pos_pan - 0
else:
error_body_a = 0
max_walk_a = 0.4
body_a = error_body_a * KP_body
if body_a >= max_walk_a:
body_a = max_walk_a
elif body_a <= -max_walk_a:
body_a = -max_walk_a
body_a = round(body_a, 2)
return body_a
ball_pos = False
px_ball_pos = 0.00
py_ball_pos = 0.00
def ball_positioning(setPoint_X, setPoint_Y, speed=0.10):
global pos_pan, pos_tilt, ball_pos, px_ball_pos, py_ball_pos
errorPos_X = pos_pan - setPoint_X
errorPos_Y = pos_tilt - setPoint_Y
KP_ball_positioning_y = rospy.get_param("/united_soccer_params/KP_Ball_Pos_Y")
# print("KP_BALL_POS", KP_ball_positioning_y)
if (errorPos_X > -0.10 and errorPos_X < 0.10) and (errorPos_Y > -0.10):
px_ball_pos = 0.00
py_ball_pos = 0.00
ball_pos = True
else:
ball_pos = False
if (pos_pan >= 1.0 and pos_tilt >= -1.2) or (pos_pan <= -1.0 and pos_tilt >= -1.2): # bola disamping | pan tilt kircok (polar)
px_ball_pos = -0.03
py_ball_pos = errorPos_X * KP_ball_positioning_y
else:
# X Move
if pos_tilt > setPoint_Y:
px_ball_pos = -0.03
elif pos_tilt >= (setPoint_Y - 0.1) and pos_tilt <= setPoint_Y:
px_ball_pos = 0.00
elif pos_tilt >= (setPoint_Y - 0.3) and pos_tilt < (setPoint_Y - 0.1):
px_ball_pos = errorPos_Y * -speed
if px_ball_pos >= 0.02:
px_ball_pos = 0.02
elif px_ball_pos <= 0.00:
px_ball_pos = 0.00
else: #bola masih jauh
px_ball_pos = pos_tilt * (0.08 / -1.6)
# Y Move
if pos_pan >= (setPoint_X - 0.1) and pos_pan <= (setPoint_X + 0.1):
py_ball_pos = 0.00
else: # belum dalam range
py_ball_pos = errorPos_X * KP_ball_positioning_y
walk(round(px_ball_pos, 3), round(py_ball_pos,3), 0.0)
# walk(0.00,0.00,0.00)
def ball_positioning2(setPoint_X, setPoint_Y, speed=0.10):
global pos_pan, pos_tilt, ball_pos, px_ball_pos, py_ball_pos
errorPos_X = pos_pan - setPoint_X
errorPos_Y = pos_tilt - setPoint_Y
# print("error", errorPos_X, errorPos_Y)
KP_ball_positioning_x = rospy.get_param("/united_soccer_params/KP_Ball_Pos_X")
KP_ball_positioning_y = rospy.get_param("/united_soccer_params/KP_Ball_Pos_Y")
if (errorPos_X > -0.08 and errorPos_X < 0.08 and errorPos_Y > -0.10):
ball_pos = True
elif (pos_pan > 1.35 and pos_pan < 1.35):
py_ball_pos = errorPos_X * KP_ball_positioning_x
px_ball_pos = -errorPos_Y * KP_ball_positioning_y
if py_ball_pos >= 0.03:
py_ball_pos = 0.03
if py_ball_pos <= -0.03:
py_ball_pos = -0.03
walk(-0.015, round(py_ball_pos,3), 0.0)
ball_pos = False
else:
py_ball_pos = errorPos_X * KP_ball_positioning_x
px_ball_pos = -errorPos_Y * KP_ball_positioning_y
if px_ball_pos >= 0.04:
px_ball_pos = 0.04
if py_ball_pos >= 0.03:
py_ball_pos = 0.03
if py_ball_pos <= -0.03:
py_ball_pos = -0.03
walk(round(px_ball_pos, 3), round(py_ball_pos,3), 0.0)
ball_pos = False
def kick():
global pos_pan, pos_tilt, ball_pos
pPan_kick = rospy.get_param("/united_soccer_params/Pan_Kick")
pTilt_kick = rospy.get_param("/united_soccer_params/Tilt_Kick")
if pos_pan >= 0 :#and right_kick == False and left_kick == False: # left_kick
left_kick = True
right_kick = False
elif pos_pan <= 0 :#and right_kick == False and left_kick == False: # right_kick
right_kick = True
left_kick = False
if left_kick:
if ball_pos:
motion_state_pub.publish("stop")
time.sleep(1)
motion_state_pub.publish("action 1")
# return True
else:
ball_positioning(-pPan_kick, pTilt_kick, 0.10)
if right_kick:
if ball_pos:
motion_state_pub.publish("stop")
time.sleep(1)
motion_state_pub.publish("action 2")
# return True
else:
ball_positioning(pPan_kick, pTilt_kick, 0.10)
count_ready_kick = 0
def followBall(mode): #0 normal, 1 sambil belok
head_track_ball()
global pos_pan, pos_tilt, count_ready_kick
set_point_pan = 0.0
set_point_tilt = 0.0
if pos_tilt >= set_point_tilt:
pos_tilt = set_point_tilt
elif pos_tilt < -2.0:
pos_tilt = -2.0
error_fPan = pos_pan - set_point_pan
error_fTilt = pos_tilt - set_point_tilt
if pos_tilt >= set_point_tilt and pos_pan < 0.4 and pos_pan > -0.4 and ball_x != -1 and ball_y != -1: # Stop(bola sudah dekat)
count_ready_kick += 1
else: # Kejar Bola(bola masih jauh)
count_ready_kick = 0
if count_ready_kick >= 5:
px_move = 0.0 # jalan ditempat
py_move = error_fPan * 0.040 # 0.045
pa_move = error_fPan * 0.20 # 0.30 0.045
else:
if pos_tilt < -1.5:
px_move = 0.05
elif pos_tilt >= -1.5 and pos_tilt < -1.3:
px_move = 0.04
elif pos_tilt > -1.0:
px_move = 0.03
else:
px_move = 0.02
py_move = error_fPan * 0.045 # 0.045
pa_move = error_fPan * 0.25 # 0.35 #0.045
if mode == 0: # Mode differential walking
if error_fPan > -0.4 and error_fPan < 0.4:
# print("AA\n")
walk(round(px_move, 3), 0.0, round(pa_move,3))
else:
# print("BB\n")
walk(0.0, 0.0, round(pa_move, 3))
elif mode == 1: # Mode omnidirectional walking
if error_fPan > -0.4 and error_fPan < 0.4:
# print("CC\n")
walk(round(px_move, 3), round(py_move,3), round(pa_move,3))
else:
#printf("DD\n");
walk(0.0, 0.0, round(pa_move,3))
def compass_goal_found(compass_goal, compass_minmax=40):
compass_min = compass_goal - compass_minmax
if compass_min < 0:
compass_min = 360 - compass_min
if compass_min > 360:
compass_min = compass_min - 360
compass_max = compass_goal + compass_minmax
if compass_max < 0:
compass_max = 360 - compass_max
if compass_max > 360:
compass_max = compass_max - 360
if magneto > compass_min and magneto < compass_max:
print("True")
return True
else:
print("False")
return False
def kill_node():
rospy.signal_shutdown("shutdown time.")
def main():
print("United Soccer Player - Running")
rospy.init_node("united_soccer_player")
rospy.wait_for_service("/srv_controller")
global head_pub, motion_vel_pub, motion_state_pub
motion_vel_pub = rospy.Publisher("/motion/cmd_vel", Twist, queue_size=1)
motion_state_pub = rospy.Publisher("/motion/state", String, queue_size=1)
head_pub = rospy.Publisher("/head/pos", Float32MultiArray, queue_size=1)
ball_pos_sub = rospy.Subscriber("/united_soccer/ball/position", Int32MultiArray, ball_pos_callback)
button_sub = rospy.Subscriber("/button/state", Int32MultiArray, button_callback)
compass_sub = rospy.Subscriber("/compass/value", Int32, compass_callback)
print("United Soccer Player - Running")
time.sleep(0.3)
motion_state_pub.publish("stand")
global freq
freq = 50
rate = rospy.Rate(freq)
state = "initial"
play = False
button_pressed = [0, 0]
conf_stop = 0
foot = "left"
found_ball = 0
compass_goal = 175
count_goal_found = 0
while not rospy.is_shutdown():
if button[0] == 1:
button_pressed[0] = 1
else:
if button_pressed[0] == 1:
if play:
motion_state_pub.publish("sit")
print("Sit")
play = False
else:
motion_state_pub.publish("stand")
print("Stand")
play = True
state = "initial"
button_pressed[0] = 0
#///////////////////////////////////////////////////////////////////////
#//////////////.............Role of execution............///////////////
#///////////////////////////////////////////////////////////////////////
if play :
# print("pospan %f postilt %f", pos_pan, pos_tilt)
print(state)
if state == "initial":
if ball_lost(20):
scan_ball(0)
motion_state_pub.publish("stop")
else:
if ball_x != -1 and ball_y != -1 :
found_ball += 1
if found_ball >= 50:
state = "follow_ball"
found_ball = 0
elif state == "follow_ball":
if ball_lost(20):
scan_ball(0)
walk(0.0,0.0,0.0)
# motion_state_pub.publish("stop")
# head_move(0.0, -1.3)
else:
head_track_ball()
followBall(1)
if pos_tilt >= -0.6 and ball_x != -1 and ball_y != -1:
state = "positioning"
# if button[1] == 1:
# state = "kick"
# motion_state_pub.publish("start")
# state = "forward"
elif state == "positioning":
if ball_lost(20):
state = "follow_ball"
# scan_ball(0)
# walk(0.0,0.0,0.0)
else:
head_track_ball()
set_point_x = rospy.get_param("/united_soccer_params/Pan_Kick")
set_point_y = rospy.get_param("/united_soccer_params/Tilt_Kick")
# if pos_pan > 0:
ball_positioning2(-set_point_x, set_point_y)
# if ball_pos == True and compass_goal_found(compass_goal) == True:
# count_goal_found += 1
# else:
# count_goal_found = 0
if ball_pos == True :
count_goal_found += 1
else:
count_goal_found = 0
if count_goal_found > 5:
state = "kick_left"
count_goal_found = 0
# else:
# state = "goto_goal_heading"
# else:
# ball_positioning2(-set_point_x, set_point_y)
# if ball_pos == True:
# state = "kick_left"
elif state == "goto_goal_heading":
if ball_lost(20):
# scan_ball(0)
# walk(0.0,0.0,0.0)
state = "follow_ball"
else:
head_track_ball()
if compass_goal_found(compass_goal) == True:
count_goal_found += 1
else:
count_goal_found = 0
if count_goal_found > 5:
state = "positioning"
count_goal_found = 0
else:
rotate_alpha = pos_pan * rospy.get_param("/united_soccer_params/KP_Compass_A")
rotate_y = 0.3
if pos_tilt > -0.3:
walk(0.00, round(rotate_y, 3), round(rotate_alpha,3))
else:
error_tilt = pos_tilt - rospy.get_param("/united_soccer_params/Tilt_Kick")
rotate_x = -error_tilt * rospy.get_param("/united_soccer_params/KP_Compass_X")
walk(round(rotate_x, 3), round(rotate_y, 3), round(rotate_alpha,3))
elif state == "kick_right":
motion_state_pub.publish("stop")
time.sleep(1)
motion_state_pub.publish("action 2")
state = "initial"
elif state == "kick_left":
motion_state_pub.publish("stop")
time.sleep(1)
motion_state_pub.publish("action 1")
state = "initial"
elif state == "tune_head":
if ball_lost(20):
scan_ball(0)
else:
head_track_ball()
# print("%d, %d", ball_x, ball_y)
elif state == "tune_body":
if ball_lost(20):
motion_state_pub.publish("stop")
scan_ball(0)
else:
head_track_ball()
shift = body_track_ball()
walk(0.0, 0.0, shift)
# print("%d, %d", ball_x, ball_y)
elif state == "test_kick":
if button[1] == 1:
if foot == "left":
motion_state_pub.publish("action 1") # left_kick
foot = "right"
elif foot == "right":
motion_state_pub.publish("action 2") # right_kick
foot = "left"
rate.sleep()
print("United Soccer Player - Shut Down")
rospy.on_shutdown(kill_node)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import str
from builtins import range
from collections import defaultdict
from itertools import chain
from lxml import etree
from lxml.html import fromstring
import numpy as np
from fonduer.models import Phrase
from fonduer.snorkel.candidates import Ngrams
from fonduer.snorkel.models.context import TemporarySpan
from fonduer.snorkel.utils import tokens_to_ngrams
from fonduer.utils_table import (min_row_diff, min_col_diff, is_row_aligned,
is_col_aligned, is_axis_aligned)
from fonduer.utils_visual import (
bbox_from_span, bbox_from_phrase, bbox_horz_aligned, bbox_vert_aligned,
bbox_vert_aligned_left, bbox_vert_aligned_right, bbox_vert_aligned_center)
from bs4 import BeautifulSoup
from fuzzywuzzy import fuzz
import os
# Default dimensions for 8.5" x 11"
DEFAULT_WIDTH = 612
DEFAULT_HEIGHT = 792
def get_between_ngrams(c, attrib='words', n_min=1, n_max=1, lower=True):
"""Return the ngrams _between_ two unary Spans of a binary-Span Candidate.
Get the ngrams _between_ two unary Spans of a binary-Span Candidate, where
both share the same sentence Context.
:param c: The binary-Span Candidate to evaluate.
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If 'True', all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
if len(c) != 2:
raise ValueError("Only applicable to binary Candidates")
span0 = c[0]
span1 = c[1]
if span0.sentence != span1.sentence:
raise ValueError("Only applicable to Candidates where both spans are \
from the same immediate Context.")
distance = abs(span0.get_word_start() - span1.get_word_start())
if span0.get_word_start() < span1.get_word_start():
for ngram in get_right_ngrams(
span0,
window=distance - 1,
attrib=attrib,
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
else: # span0.get_word_start() > span1.get_word_start()
for ngram in get_left_ngrams(
span1,
window=distance - 1,
attrib=attrib,
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
def get_left_ngrams(span,
window=3,
attrib='words',
n_min=1,
n_max=1,
lower=True):
"""Get the ngrams within a window to the _left_ of the Candidate from its sentence Context.
For higher-arity Candidates, defaults to the _first_ argument.
:param span: The Span to evaluate. If a candidate is given, default to its first Span.
:param window: The number of tokens to the left of the first argument to return
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
span = span if isinstance(span,
TemporarySpan) else span[0] # get first Span
i = span.get_word_start()
for ngram in tokens_to_ngrams(
getattr(span.sentence, attrib)[max(0, i - window):i],
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
def get_right_ngrams(span,
window=3,
attrib='words',
n_min=1,
n_max=1,
lower=True):
"""Get the ngrams within a window to the _right_ of the Candidate from its sentence Context.
For higher-arity Candidates, defaults to the _last_ argument.
:param span: The Span to evaluate. If a candidate is given, default to its last Span.
:param window: The number of tokens to the left of the first argument to return
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
span = span if isinstance(span,
TemporarySpan) else span[-1] # get last Span
i = span.get_word_end()
for ngram in tokens_to_ngrams(
getattr(span.sentence, attrib)[i + 1:i + 1 + window],
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
def get_matches(lf, candidate_set, match_values=[1, -1]):
"""Return a list of candidates that are matched by a particular LF.
A simple helper function to see how many matches (non-zero by default) an LF gets.
Returns the matched candidates, which can then be directly put into the Viewer.
:param lf: The labeling function to apply to the candidate_set
:param candidate_set: The set of candidates to evaluate
:param match_values: An option list of the values to consider as matched. [1, -1] by default.
:rtype: a list of candidates
"""
matches = []
for c in candidate_set:
label = lf(c)
if label in match_values:
matches.append(c)
print(("%s matches") % len(matches))
return matches
# TABLE LF HELPERS ##########################################################
def same_document(c):
"""Return True if all Spans in the given candidate are from the same Document.
:param c: The candidate whose Spans are being compared
:rtype: boolean
"""
return (all(c[i].sentence.document is not None
and c[i].sentence.document == c[0].sentence.document
for i in range(len(c))))
def same_table(c):
"""Return True if all Spans in the given candidate are from the same Table.
:param c: The candidate whose Spans are being compared
:rtype: boolean
"""
return (all(c[i].sentence.is_tabular()
and c[i].sentence.table == c[0].sentence.table
for i in range(len(c))))
# Added by Wei Li
def same_file(organic, figure):
"""Return True if all candidate are from the same file.
:rtype: boolean
"""
return organic.sentence.document == figure.figure.document
def mentionsFig(organic, figure):
text = organic.sentence.text.replace(' ', '').lower()
fig_name = figure.figure.name.replace(' ', '').lower()
return text.find(fig_name) != -1
def mentionsOrg(figure, organic):
fig_text = figure.figure.description
if figure.figure.text and len(figure.figure.text) != 0:
fig_text += figure.figure.text
fig_text.replace(' ', '').lower()
organic_name = organic.text
return fig_text.find(organic_name) != -1
def same_row(c):
"""Return True if all Spans in the given candidate are from the same Row.
:param c: The candidate whose Spans are being compared
:rtype: boolean
"""
return (same_table(c) and all(
is_row_aligned(c[i].sentence, c[0].sentence) for i in range(len(c))))
def same_col(c):
"""Return True if all Spans in the given candidate are from the same Col.
:param c: The candidate whose Spans are being compared
:rtype: boolean
"""
return (same_table(c) and all(
is_col_aligned(c[i].sentence, c[0].sentence) for i in range(len(c))))
def is_tabular_aligned(c):
"""Return True if all Spans in the given candidate are from the same Row or Col.
:param c: The candidate whose Spans are being compared
:rtype: boolean
"""
return (same_table(c) and (is_col_aligned(c[i].sentence, c[0].sentence)
or is_row_aligned(c[i].sentence, c[0].sentence)
for i in range(len(c))))
def same_cell(c):
"""Return True if all Spans in the given candidate are from the same Cell.
:param c: The candidate whose Spans are being compared
:rtype: boolean
"""
return (all(c[i].sentence.cell is not None
and c[i].sentence.cell == c[0].sentence.cell
for i in range(len(c))))
def same_phrase(c):
"""Return True if all Spans in the given candidate are from the same Phrase.
:param c: The candidate whose Spans are being compared
:rtype: boolean
"""
return (all(c[i].sentence is not None and c[i].sentence == c[0].sentence
for i in range(len(c))))
def get_max_col_num(span):
"""Return the largest column number that a Span occupies.
:param span: The Span to evaluate. If a candidate is given, default to its last Span.
:rtype: integer or None
"""
span = span if isinstance(span, TemporarySpan) else span[-1]
if span.sentence.is_tabular():
return span.sentence.cell.col_end
else:
return None
def get_min_col_num(span):
"""Return the lowest column number that a Span occupies.
:param span: The Span to evaluate. If a candidate is given, default to its first Span.
:rtype: integer or None
"""
span = span if isinstance(span, TemporarySpan) else span[0]
if span.sentence.is_tabular():
return span.sentence.cell.col_start
else:
return None
def get_phrase_ngrams(span, attrib='words', n_min=1, n_max=1, lower=True):
"""Get the ngrams that are in the Phrase of the given span, not including itself.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The Span whose Phrase is being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in get_left_ngrams(
span,
window=100,
attrib=attrib,
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
for ngram in get_right_ngrams(
span,
window=100,
attrib=attrib,
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
def get_neighbor_phrase_ngrams(span,
d=1,
attrib='words',
n_min=1,
n_max=1,
lower=True):
"""Get the ngrams that are in the neighoring Phrases of the given Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The span whose neighbor Phrases are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in chain.from_iterable([
tokens_to_ngrams(
getattr(phrase, attrib),
n_min=n_min,
n_max=n_max,
lower=lower) for phrase in span.sentence.document.phrases
if abs(phrase.phrase_num - span.sentence.phrase_num) <= d
and phrase != span.sentence
]):
yield ngram
def get_cell_ngrams(span, attrib='words', n_min=1, n_max=1, lower=True):
"""Get the ngrams that are in the Cell of the given span, not including itself.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The span whose Cell is being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in get_phrase_ngrams(
span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower):
yield ngram
if isinstance(span.sentence,
Phrase) and span.sentence.cell is not None:
for ngram in chain.from_iterable([
tokens_to_ngrams(
getattr(phrase, attrib),
n_min=n_min,
n_max=n_max,
lower=lower) for phrase in span.sentence.cell.phrases
if phrase != span.sentence
]):
yield ngram
def get_neighbor_cell_ngrams(span,
dist=1,
directions=False,
attrib='words',
n_min=1,
n_max=1,
lower=True):
"""Get the ngrams from all Cells that are within a given Cell distance in one direction from the given Span.
Note that if a candidate is passed in, all of its Spans will be searched.
If `directions=True``, each ngram will be returned with a direction in {'UP', 'DOWN', 'LEFT', 'RIGHT'}.
:param span: The span whose neighbor Cells are being searched
:param dist: The Cell distance within which a neighbor Cell must be to be considered
:param directions: A Boolean expressing whether or not to return the direction of each ngram
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams (or (ngram, direction) tuples if directions=True)
"""
# TODO: Fix this to be more efficient (optimize with SQL query)
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in get_phrase_ngrams(
span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower):
yield ngram
if isinstance(span.sentence,
Phrase) and span.sentence.cell is not None:
root_cell = span.sentence.cell
for phrase in chain.from_iterable([
_get_aligned_phrases(root_cell, 'row'),
_get_aligned_phrases(root_cell, 'col')
]):
row_diff = min_row_diff(phrase, root_cell, absolute=False)
col_diff = min_col_diff(phrase, root_cell, absolute=False)
if (row_diff or col_diff) and not (
row_diff and
col_diff) and abs(row_diff) + abs(col_diff) <= dist:
if directions:
direction = ''
if col_diff == 0:
if 0 < row_diff and row_diff <= dist:
direction = "UP"
elif 0 > row_diff and row_diff >= -dist:
direction = "DOWN"
elif row_diff == 0:
if 0 < col_diff and col_diff <= dist:
direction = "RIGHT"
elif 0 > col_diff and col_diff >= -dist:
direction = "LEFT"
for ngram in tokens_to_ngrams(
getattr(phrase, attrib),
n_min=n_min,
n_max=n_max,
lower=lower):
yield (ngram, direction)
else:
for ngram in tokens_to_ngrams(
getattr(phrase, attrib),
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
def get_row_ngrams(span,
attrib='words',
n_min=1,
n_max=1,
spread=[0, 0],
lower=True):
"""Get the ngrams from all Cells that are in the same row as the given Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The span whose row Cells are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in _get_axis_ngrams(
span,
axis='row',
attrib=attrib,
n_min=n_min,
n_max=n_max,
spread=spread,
lower=lower):
yield ngram
def get_col_ngrams(span,
attrib='words',
n_min=1,
n_max=1,
spread=[0, 0],
lower=True):
"""Get the ngrams from all Cells that are in the same column as the given Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The span whose column Cells are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in _get_axis_ngrams(
span,
axis='col',
attrib=attrib,
n_min=n_min,
n_max=n_max,
spread=spread,
lower=lower):
yield ngram
def get_aligned_ngrams(span,
attrib='words',
n_min=1,
n_max=1,
spread=[0, 0],
lower=True):
"""Get the ngrams from all Cells that are in the same row or column as the given Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The span whose row and column Cells are being searched
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in get_row_ngrams(
span,
attrib=attrib,
n_min=n_min,
n_max=n_max,
spread=spread,
lower=lower):
yield ngram
for ngram in get_col_ngrams(
span,
attrib=attrib,
n_min=n_min,
n_max=n_max,
spread=spread,
lower=lower):
yield ngram
def get_head_ngrams(span,
axis=None,
attrib='words',
n_min=1,
n_max=1,
lower=True):
"""Get the ngrams from the cell in the head of the row or column.
More specifically, this returns the ngrams in the leftmost cell in a row and/or the
ngrams in the topmost cell in the column, depending on the axis parameter.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The span whose head Cells are being returned
:param axis: Which axis {'row', 'col'} to search. If None, then both row and col are searched.
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
axes = [axis] if axis else ['row', 'col']
for span in spans:
if not span.sentence.cell:
return
else:
for axis in axes:
if getattr(span.sentence, _other_axis(axis) + '_start') == 0:
return
for phrase in getattr(
_get_head_cell(span.sentence.cell, axis), 'phrases',
[]):
for ngram in tokens_to_ngrams(
getattr(phrase, attrib),
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
def _get_head_cell(root_cell, axis):
other_axis = 'row' if axis == 'col' else 'col'
aligned_cells = _get_aligned_cells(root_cell, axis)
return sorted(
aligned_cells, key=lambda x: getattr(x, other_axis + '_start'))[
0] if aligned_cells else []
def _get_axis_ngrams(span,
axis,
attrib='words',
n_min=1,
n_max=1,
spread=[0, 0],
lower=True):
for ngram in get_phrase_ngrams(
span, attrib=attrib, n_min=n_min, n_max=n_max, lower=lower):
yield ngram
if (span.sentence.cell is not None):
for phrase in _get_aligned_phrases(span.sentence, axis, spread=spread):
for ngram in tokens_to_ngrams(
getattr(phrase, attrib), n_min=n_min, n_max=n_max,
lower=lower):
yield ngram
def _get_aligned_cells(root_cell, axis):
aligned_cells = [
cell for cell in root_cell.table.cells
if is_axis_aligned(root_cell, cell, axis=axis) and cell != root_cell
]
return aligned_cells
def _get_aligned_phrases(root_phrase, axis, spread=[0, 0]):
return [
phrase for cell in root_phrase.table.cells
if is_axis_aligned(root_phrase, cell, axis=axis, spread=spread)
for phrase in cell.phrases if phrase != root_phrase
]
def _other_axis(axis):
return 'row' if axis == 'col' else 'col'
def is_superset(a, b):
"""Check if a is a superset of b.
This is typically used to check if ALL of a list of phrases is in the ngrams returned by an lf_helper.
:param a: A collection of items
:param b: A collection of items
:rtype: boolean
"""
return set(a).issuperset(b)
def overlap(a, b):
"""Check if a overlaps b.
This is typically used to check if ANY of a list of phrases is in the ngrams returned by an lf_helper.
:param a: A collection of items
:param b: A collection of items
:rtype: boolean
"""
return not set(a).isdisjoint(b)
############################
# Visual feature helpers
############################
def is_same_org_fig_page(org, fig):
return fig.page in org.page
def is_same_sent_fig_page(org, fig):
return fig.page in org.sentence.page
def is_nearby_org_fig_page(org, fig, num_pages):
for i in range(1, num_pages+1):
if (fig.page-i) in org.page or (fig.page+i) in org.page:
return True
return False
def fig_on_prev_page(org, fig):
return fig.page < min(org.page)
def org_on_prev_page(org, fig):
return fig.page > max(org.page)
def within_distance(org, fig, ratio, page_width=DEFAULT_WIDTH, page_height=DEFAULT_HEIGHT):
fig_vert_pos = (fig.top+fig.bottom)/2.0
fig_horz_pos = (fig.left+fig.right)/2.0
org_vert_pos = (org.top[0]+org.bottom[0])/2.0
org_horz_pos = (org.left[0]+org.right[0])/2.0
org_vert_pos += page_height * (org.page[0] - fig.page)
if abs(fig_vert_pos - org_vert_pos) <= ratio * page_height:
yield "WITHIN_{}_VERT_PAGE".format(ratio)
if abs(fig_horz_pos - org_horz_pos) <= ratio * page_width:
yield "WITHIN_{}_HORZ_PAGE".format(ratio)
def org_pos_near_fig(org, fig, page_width=DEFAULT_WIDTH, page_height=DEFAULT_HEIGHT):
fig_vert_pos = (fig.top + fig.bottom) / 2.0
fig_horz_pos = (fig.left + fig.right) / 2.0
org_vert_pos = (org.top[0] + org.bottom[0]) / 2.0
org_horz_pos = (org.left[0] + org.right[0]) / 2.0
org_vert_pos += page_height * (org.page[0] - fig.page)
return abs(fig_vert_pos - org_vert_pos) <= 0.5 * page_height and \
abs(fig_horz_pos - org_horz_pos) <= 0.5 * page_width
def ahead_feature(org, fig, page_height=DEFAULT_HEIGHT):
fig_vert_pos = (fig.top + fig.bottom) / 2.0
fig_horz_pos = (fig.left + fig.right) / 2.0
org_vert_pos = (org.top[0] + org.bottom[0]) / 2.0
org_horz_pos = (org.left[0] + org.right[0]) / 2.0
org_vert_pos += page_height * (org.page[0] - fig.page)
if org_vert_pos < fig_vert_pos:
yield "ORG_AHEAD_VERT_PDF"
if org_horz_pos < fig_horz_pos:
yield "ORG_AHEAD_HORZ_PDF"
def fig_contains_org(organic, figure, scores=[75]):
fig_text = figure.description
if figure.text:
text = ' '.join(figure.text.strip().replace('\n', ' ').split())
fig_text += ' ' + text
for score in scores:
if fuzz.partial_ratio(organic.text, fig_text) >= score:
yield "FIG_HAS_ORG_{}_SCORE".format(score)
def org_contains_fig_name(organic, figure, scores=[75]):
fig_text = figure.name
organic_text = organic.sentence.text
for score in scores:
if fuzz.partial_ratio(organic_text, fig_text) >= score:
yield "ORG_HAS_FIG_{}_SCORE".format(score)
if organic.text.find(fig_text) != -1:
yield "ORG_FIG_EXACT_MATCH"
def fig_text_matches_org_text(organic, figure, scores=[75]):
fig_text = figure.description
organic_text = organic.sentence.text
for score in scores:
if fuzz.token_set_ratio(organic_text, fig_text) >= score:
yield "ORG_FIG_TEXT_{}_SCORE".format(score)
def both_contain_keywords(organic, figure, keywords):
fig_text = figure.description
organic_text = organic.sentence.text
img_contains = False
for word in keywords:
if fuzz.token_set_ratio(word, fig_text) > 90:
img_contains = True
break
if img_contains:
for word in keywords:
if fuzz.token_set_ratio(word, organic_text) > 90:
return True
return False
def search_fig_first_apprearance(organic, figure):
doc = organic.sentence.document
for i in range(len(doc.phrases)):
text = doc.phrases[i].text
if i < len(doc.phrases) - 1:
text += doc.phrases[i+1].text
text = text.strip().replace(' ', '')
if len(text) < 4:
continue
fig_name = figure.name.strip().replace(' ','')
if text.find(fig_name) != -1:
dist = i - organic.sentence.phrase_num
if fuzz.partial_ratio(organic.text.strip().replace(' ',''), text) > 85:
yield "ORG_IN_FIG_FIRST_MENTION"
pg_dist = sum(doc.phrases[i].page)/len(doc.phrases[i].page) - \
sum(organic.sentence.page)/len(organic.sentence.page)
if pg_dist < -2:
yield "ORG_MENTION_FAR_AHEAD"
elif pg_dist < 0:
yield "ORG_MENTION_NEAR_AHEAD"
elif pg_dist == 0:
yield "ORG_MENTION_SAME_PAGE"
elif pg_dist < 3:
yield "ORG_MENTION_NEAR_BEHIND"
else:
yield "ORG_MENTION_FAR_BEHIND"
if i < -300:
yield "FIG_FAR_AHEAD"
elif i < -100:
yield "FIG_NEAR_AHEAD"
elif i < 0:
yield "FIG_CLOSE_AHEAD"
elif i == 0:
yield "FIG_EXACT_MATCH"
elif i < 100:
yield "FIG_CLOSE_AFTER"
elif i <= 300:
yield "FIG_NEAR_BEHIND"
else:
yield "FIG_FAR_BEHIND"
return "NO_MATCH"
def get_page(span):
"""Return the page number of the given span.
If a candidate is passed in, this returns the page of its first Span.
:param span: The Span to get the page number of.
:rtype: integer
"""
span = span if isinstance(span, TemporarySpan) else span[0]
return span.get_attrib_tokens('page')[0]
def is_horz_aligned(c):
"""Return True if all the components of c are horizontally aligned.
Horizontal alignment means that the bounding boxes of each Span of c shares
a similar y-axis value in the visual rendering of the document.
:param c: The candidate to evaluate
:rtype: boolean
"""
return (all([
c[i].sentence.is_visual()
and bbox_horz_aligned(bbox_from_span(c[i]), bbox_from_span(c[0]))
for i in range(len(c))
]))
def is_vert_aligned(c):
"""Return true if all the components of c are vertically aligned.
Vertical alignment means that the bounding boxes of each Span of c shares
a similar x-axis value in the visual rendering of the document.
:param c: The candidate to evaluate
:rtype: boolean
"""
return (all([
c[i].sentence.is_visual()
and bbox_vert_aligned(bbox_from_span(c[i]), bbox_from_span(c[0]))
for i in range(len(c))
]))
def is_vert_aligned_left(c):
"""Return true if all the components of c are vertically aligned based on their left border.
Vertical alignment means that the bounding boxes of each Span of c shares
a similar x-axis value in the visual rendering of the document. In this function
the similarity of the x-axis value is based on the left border of their bounding boxes.
:param c: The candidate to evaluate
:rtype: boolean
"""
return (all([
c[i].sentence.is_visual()
and bbox_vert_aligned_left(bbox_from_span(c[i]), bbox_from_span(c[0]))
for i in range(len(c))
]))
def is_vert_aligned_right(c):
"""Return true if all the components of c are vertically aligned based on their right border.
Vertical alignment means that the bounding boxes of each Span of c shares
a similar x-axis value in the visual rendering of the document. In this function
the similarity of the x-axis value is based on the right border of their bounding boxes.
:param c: The candidate to evaluate
:rtype: boolean
"""
return (all([
c[i].sentence.is_visual() and bbox_vert_aligned_right(
bbox_from_span(c[i]), bbox_from_span(c[0])) for i in range(len(c))
]))
def is_vert_aligned_center(c):
"""Return true if all the components of c are vertically aligned based on their left border.
Vertical alignment means that the bounding boxes of each Span of c shares
a similar x-axis value in the visual rendering of the document. In this function
the similarity of the x-axis value is based on the center of their bounding boxes.
:param c: The candidate to evaluate
:rtype: boolean
"""
return (all([
c[i].sentence.is_visual() and bbox_vert_aligned_center(
bbox_from_span(c[i]), bbox_from_span(c[0])) for i in range(len(c))
]))
def same_page(c):
"""Return true if all the components of c are on the same page of the document.
Page numbers are based on the PDF rendering of the document. If a PDF file is
provided, it is used. Otherwise, if only a HTML/XML document is provided, a
PDF is created and then used to determine the page number of a Span.
:param c: The candidate to evaluate
:rtype: boolean
"""
return (all([
c[i].sentence.is_visual()
and bbox_from_span(c[i]).page == bbox_from_span(c[0]).page
for i in range(len(c))
]))
def get_horz_ngrams(span,
attrib='words',
n_min=1,
n_max=1,
lower=True,
from_phrase=True):
"""Return all ngrams which are visually horizontally aligned with the Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The Span to evaluate
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:param from_phrase: If True, returns ngrams from any horizontally aligned Phrases,
rather than just horizontally aligned ngrams themselves.
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in _get_direction_ngrams('horz', span, attrib, n_min, n_max,
lower, from_phrase):
yield ngram
def get_vert_ngrams(span,
attrib='words',
n_min=1,
n_max=1,
lower=True,
from_phrase=True):
"""Return all ngrams which are visually vertivally aligned with the Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The Span to evaluate
:param attrib: The token attribute type (e.g. words, lemmas, poses)
:param n_min: The minimum n of the ngrams that should be returned
:param n_max: The maximum n of the ngrams that should be returned
:param lower: If True, all ngrams will be returned in lower case
:param from_phrase: If True, returns ngrams from any horizontally aligned Phrases,
rather than just horizontally aligned ngrams themselves.
:rtype: a _generator_ of ngrams
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
for ngram in _get_direction_ngrams('vert', span, attrib, n_min, n_max,
lower, from_phrase):
yield ngram
def _get_direction_ngrams(direction, c, attrib, n_min, n_max, lower,
from_phrase):
# TODO: this currently looks only in current table;
# precompute over the whole document/page instead
bbox_direction_aligned = bbox_vert_aligned if direction == 'vert' else bbox_horz_aligned
ngrams_space = Ngrams(n_max=n_max, split_tokens=[])
f = (lambda w: w.lower()) if lower else (lambda w: w)
spans = [c] if isinstance(c, TemporarySpan) else c.get_contexts()
for span in spans:
if not span.sentence.is_tabular() or not span.sentence.is_visual():
continue
for phrase in span.sentence.table.phrases:
if (from_phrase):
if (bbox_direction_aligned(
bbox_from_phrase(phrase), bbox_from_span(span))
and phrase is not span.sentence):
for ngram in tokens_to_ngrams(
getattr(phrase, attrib),
n_min=n_min,
n_max=n_max,
lower=lower):
yield ngram
else:
for ts in ngrams_space.apply(phrase):
if (bbox_direction_aligned(
bbox_from_span(ts), bbox_from_span(span))
and not (phrase == span.sentence
and ts.get_span() in span.get_span())):
yield f(ts.get_span())
def get_vert_ngrams_left(c):
"""Not implemented."""
# TODO
return
def get_vert_ngrams_right(c):
"""Not implemented."""
# TODO
return
def get_vert_ngrams_center(c):
"""Not implemented."""
# TODO
return
def get_visual_header_ngrams(c, axis=None):
"""Not implemented."""
# TODO
return
def get_visual_distance(c, axis=None):
"""Not implemented."""
# TODO
return
def get_page_vert_percentile(span,
page_width=DEFAULT_WIDTH,
page_height=DEFAULT_HEIGHT):
"""Return which percentile from the TOP in the page Span candidate is located in.
Percentile is calculated where the top of the page is 0.0, and the bottom of
the page is 1.0. For example, a Span in at the top 1/4 of the page will have
a percentil of 0.25.
Page width and height are based on pt values:
Letter 612x792
Tabloid 792x1224
Ledger 1224x792
Legal 612x1008
Statement 396x612
Executive 540x720
A0 2384x3371
A1 1685x2384
A2 1190x1684
A3 842x1190
A4 595x842
A4Small 595x842
A5 420x595
B4 729x1032
B5 516x729
Folio 612x936
Quarto 610x780
10x14 720x1008
and should match the source documents. Letter size is used by default.
Note that if a candidate is passed in, only the vertical percentil of its
first Span is returned.
:param span: The Span to evaluate
:param page_width: The width of the page. Default to Letter paper width.
:param page_height: The heigh of the page. Default to Letter paper height.
:rtype: float in [0.0, 1.0]
"""
span = span if isinstance(span, TemporarySpan) else span[0]
return bbox_from_span(span).top / page_height
def get_page_horz_percentile(span,
page_width=DEFAULT_WIDTH,
page_height=DEFAULT_HEIGHT):
"""Return which percentile from the LEFT in the page the Span is located in.
Percentile is calculated where the left of the page is 0.0, and the right of
the page is 1.0.
Page width and height are based on pt values:
Letter 612x792
Tabloid 792x1224
Ledger 1224x792
Legal 612x1008
Statement 396x612
Executive 540x720
A0 2384x3371
A1 1685x2384
A2 1190x1684
A3 842x1190
A4 595x842
A4Small 595x842
A5 420x595
B4 729x1032
B5 516x729
Folio 612x936
Quarto 610x780
10x14 720x1008
and should match the source documents. Letter size is used by default.
Note that if a candidate is passed in, only the vertical percentil of its
first Span is returned.
:param c: The Span to evaluate
:param page_width: The width of the page. Default to Letter paper width.
:param page_height: The heigh of the page. Default to Letter paper height.
:rtype: float in [0.0, 1.0]
"""
span = span if isinstance(span, TemporarySpan) else span[0]
return bbox_from_span(span).left, page_width
def _assign_alignment_features(phrases_by_key, align_type):
for key, phrases in phrases_by_key.items():
if len(phrases) == 1:
continue
context_lemmas = set()
for p in phrases:
p._aligned_lemmas.update(context_lemmas)
# update lemma context for upcoming phrases in the group
if len(p.lemmas) < 7:
new_lemmas = [
lemma.lower() for lemma in p.lemmas if lemma.isalpha()
]
# if new_lemmas: print '++Lemmas for\t', p, context_lemmas
context_lemmas.update(new_lemmas)
context_lemmas.update(
align_type + lemma for lemma in new_lemmas)
def _preprocess_visual_features(doc):
if hasattr(doc, '_visual_features'):
return
# cache flag
doc._visual_features = True
phrase_by_page = defaultdict(list)
for phrase in doc.phrases:
phrase_by_page[phrase.page[0]].append(phrase)
phrase._aligned_lemmas = set()
for page, phrases in phrase_by_page.items():
# process per page alignments
yc_aligned = defaultdict(list)
x0_aligned = defaultdict(list)
xc_aligned = defaultdict(list)
x1_aligned = defaultdict(list)
for phrase in phrases:
phrase.bbox = bbox_from_phrase(phrase)
phrase.yc = (phrase.bbox.top + phrase.bbox.bottom) / 2
phrase.x0 = phrase.bbox.left
phrase.x1 = phrase.bbox.right
phrase.xc = (phrase.x0 + phrase.x1) / 2
# index current phrase by different alignment keys
yc_aligned[phrase.yc].append(phrase)
x0_aligned[phrase.x0].append(phrase)
x1_aligned[phrase.x1].append(phrase)
xc_aligned[phrase.xc].append(phrase)
for l in yc_aligned.values():
l.sort(key=lambda p: p.xc)
for l in x0_aligned.values():
l.sort(key=lambda p: p.yc)
for l in x1_aligned.values():
l.sort(key=lambda p: p.yc)
for l in xc_aligned.values():
l.sort(key=lambda p: p.yc)
_assign_alignment_features(yc_aligned, 'Y_')
_assign_alignment_features(x0_aligned, 'LEFT_')
_assign_alignment_features(x1_aligned, 'RIGHT_')
_assign_alignment_features(xc_aligned, 'CENTER_')
def get_visual_aligned_lemmas(span):
"""Return a generator of the lemmas aligned visually with the Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The Span to evaluate.
:rtype: a _generator_ of lemmas
"""
spans = [span] if isinstance(span, TemporarySpan) else span.get_contexts()
for span in spans:
phrase = span.sentence
doc = phrase.document
# cache features for the entire document
_preprocess_visual_features(doc)
for aligned_lemma in phrase._aligned_lemmas:
yield aligned_lemma
def get_aligned_lemmas(span):
"""Return a set of the lemmas aligned visually with the Span.
Note that if a candidate is passed in, all of its Spans will be searched.
:param span: The Span to evaluate.
:rtype: a set of lemmas
"""
return set(get_visual_aligned_lemmas(span))
############################
# Structural feature helpers
############################
def get_tag(span):
"""Return the HTML tag of the Span.
If a candidate is passed in, only the tag of its first Span is returned.
These may be tags such as 'p', 'h2', 'table', 'div', etc.
:param span: The Span to evaluate
:rtype: string
"""
span = span if isinstance(span, TemporarySpan) else span[0]
return str(span.sentence.html_tag)
def get_attributes(span):
"""Return the HTML attributes of the Span.
If a candidate is passed in, only the tag of its first Span is returned.
A sample outout of this function on a Span in a paragraph tag is
[u'style=padding-top: 8pt;padding-left: 20pt;text-indent: 0pt;text-align: left;']
:param span: The Span to evaluate
:rtype: list of strings representing HTML attributes
"""
span = span if isinstance(span, TemporarySpan) else span[0]
return span.sentence.html_attrs
# TODO: Too slow
def _get_node(phrase):
return (etree.ElementTree(fromstring(phrase.document.text)).xpath(
phrase.xpath))[0]
def get_parent_tag(span):
"""Return the HTML tag of the Span's parent.
These may be tags such as 'p', 'h2', 'table', 'div', etc.
If a candidate is passed in, only the tag of its first Span is returned.
:param span: The Span to evaluate
:rtype: string
"""
span = span if isinstance(span, TemporarySpan) else span[0]
i = _get_node(span.sentence)
return str(i.getparent().tag) if i.getparent() is not None else None
def get_prev_sibling_tags(span):
"""Return the HTML tag of the Span's previous siblings.
Previous siblings are Spans which are at the same level in the HTML tree as
the given span, but are declared before the given span.
If a candidate is passed in, only the previous siblings of its first Span
are considered in the calculation.
:param span: The Span to evaluate
:rtype: list of strings
"""
span = span if isinstance(span, TemporarySpan) else span[0]
prev_sibling_tags = []
i = _get_node(span.sentence)
while i.getprevious() is not None:
prev_sibling_tags.insert(0, str(i.getprevious().tag))
i = i.getprevious()
return prev_sibling_tags
def get_next_sibling_tags(span):
"""Return the HTML tag of the Span's next siblings.
Next siblings are Spans which are at the same level in the HTML tree as
the given span, but are declared after the given span.
If a candidate is passed in, only the next siblings of its last Span
are considered in the calculation.
:param span: The Span to evaluate
:rtype: list of strings
"""
span = span if isinstance(span, TemporarySpan) else span[-1]
next_sibling_tags = []
i = _get_node(span.sentence)
while i.getnext() is not None:
next_sibling_tags.append(str(i.getnext().tag))
i = i.getnext()
return next_sibling_tags
def get_ancestor_class_names(span):
"""Return the HTML classes of the Span's ancestors.
If a candidate is passed in, only the ancestors of its first Span are returned.
:param span: The Span to evaluate
:rtype: list of strings
"""
span = span if isinstance(span, TemporarySpan) else span[0]
class_names = []
i = _get_node(span.sentence)
while i is not None:
class_names.insert(0, str(i.get('class')))
i = i.getparent()
return class_names
def get_ancestor_tag_names(span):
"""Return the HTML tag of the Span's ancestors.
For example, ['html', 'body', 'p'].
If a candidate is passed in, only the ancestors of its first Span are returned.
:param span: The Span to evaluate
:rtype: list of strings
"""
span = span if isinstance(span, TemporarySpan) else span[0]
tag_names = []
i = _get_node(span.sentence)
while i is not None:
tag_names.insert(0, str(i.tag))
i = i.getparent()
return tag_names
def get_ancestor_id_names(span):
"""Return the HTML id's of the Span's ancestors.
If a candidate is passed in, only the ancestors of its first Span are returned.
:param span: The Span to evaluate
:rtype: list of strings
"""
span = span if isinstance(span, TemporarySpan) else span[0]
id_names = []
i = _get_node(span.sentence)
while i is not None:
id_names.insert(0, str(i.get('id')))
i = i.getparent()
return id_names
def common_ancestor(c):
"""Return the common path to the root that is shared between a binary-Span Candidate.
In particular, this is the common path of HTML tags.
:param c: The binary-Span Candidate to evaluate
:rtype: list of strings
"""
ancestor1 = np.array(c[0].sentence.xpath.split('/'))
ancestor2 = np.array(c[1].sentence.xpath.split('/'))
min_len = min(ancestor1.size, ancestor2.size)
return list(
ancestor1[:np.argmin(ancestor1[:min_len] == ancestor2[:min_len])])
def lowest_common_ancestor_depth(c):
"""Return the minimum distance between a binary-Span Candidate to their lowest common ancestor.
For example, if the tree looked like this:
html
|----<div> span 1 </div>
|----table
| |----tr
| | |-----<th> span 2 </th>
we return 1, the distance from span 1 to the html root. Smaller values indicate
that two Spans are close structurally, while larger values indicate that two
Spans are spread far apart structurally in the document.
:param c: The binary-Span Candidate to evaluate
:rtype: integer
"""
ancestor1 = np.array(c[0].sentence.xpath.split('/'))
ancestor2 = np.array(c[1].sentence.xpath.split('/'))
min_len = min(ancestor1.size, ancestor2.size)
return min_len - np.argmin(ancestor1[:min_len] == ancestor2[:min_len])
def find_image(span):
html_content = span.document.text
soup = BeautifulSoup(html_content, "html.parser")
img_list = soup.find_all('div', class_ = 'image_table')
for i in img_list:
if i.img.get('src') == span.url:
return i
def cut_string(prev_content, name):
subs_list = []
name = name.strip()
count = 0
while True:
start = prev_content.find(name)
if not start == -1:
start = prev_content.find(name) + len(name)
subs_list.append(prev_content[0:start])
prev_content = prev_content[start:]
count += 1
elif not prev_content == '':
subs_list.append(prev_content)
break
else:
break
return subs_list, count
def get_near_string(prev_content, name):
if prev_content == '' or name == '':
# print('Error: Got empty input!')
return None, None
name = name.strip()
start = prev_content.find(name)
if not start == -1:
if start <= 100:
pre = prev_content[:start]
else:
pre = prev_content[start - 101 : start-1]
if len(prev_content) <= start + len(name) + 100:
post = prev_content[start:]
else:
post = prev_content[start:start + len(name) + 100]
return pre, post
return None, None
# get image_table xpath
def lxml_find_image_xpath(figure, root, tree):
imgs = root.findall(".//div[@class='image_table']")
# imgs = root.findall(".//div[@class='image_table']//img[@src]")
for i in imgs:
isrc = i.find(".//img[@src]").attrib.get('src')
if figure.url == isrc:
return tree.getpath(i)
def get_text_img_horizontal_distance(organic, figure):
root = fromstring(figure.document.text) # lxml.html.fromstring()
tree = etree.ElementTree(root)
# ancestor1 = np.array(organic.sentence.xpath.split('/'))
img_path = lxml_find_image_xpath(figure, root, tree)
org_path = organic.sentence.xpath
# common_path = os.path.commonprefix([organic_path, img_path])
ancestor1 = org_path.split('/')
ancestor2 = img_path.split('/')
# min_len = min(ancestor1.size, ancestor2.size)
# l = list(
# ancestor1[:min_len - np.argmin(ancestor1[:min_len] == ancestor2[:min_len])])
min_len = min(len(ancestor1), len(ancestor2))
common_path = ''
for i, e in enumerate(ancestor1[:min_len]):
if ancestor1[i] == ancestor2[i]:
common_path += e + '/'
else:
break;
if common_path == img_path:
return 0, 1
if common_path == org_path:
return 0, -1
try:
common_parent = root.xpath(common_path[:-1])[0]
except:
print(common_path)
return None, None
img_path_sub = common_path + img_path[len(common_path):].split('/')[0]
org_path_sub = common_path + org_path[len(common_path):].split('/')[0]
distance = 0
found = 0
direction = 0
for e in common_parent:
e_path = tree.getpath(e)
# e_img_prefix = os.path.commonprefix([e_path, img_path])
# e_org_prefix = os.path.commonprefix([e_path, organic_path])
if found != 0:
distance += 1
if e_path == img_path_sub or e_path == org_path_sub:
if found == 0:
# image above organic
if e_path == img_path_sub:
direction = 1
else:
direction = -1
found += 1
if found == 2:
return distance, direction
return None, None
#
def get_text_img_dfs_distance(organic, figure):
root = fromstring(figure.document.text) # lxml.html.fromstring()
tree = etree.ElementTree(root)
# ancestor1 = np.array(organic.sentence.xpath.split('/'))
img_path = lxml_find_image_xpath(figure, root, tree)
org_path = organic.sentence.xpath
# common_path = os.path.commonprefix([org_path, img_path])
ancestor1 = org_path.split('/')
ancestor2 = img_path.split('/')
# min_len = min(ancestor1.size, ancestor2.size)
# l = list(
# ancestor1[:min_len - np.argmin(ancestor1[:min_len] == ancestor2[:min_len])])
min_len = min(len(ancestor1), len(ancestor2))
common_path = ''
for i, e in enumerate(ancestor1[:min_len]):
if ancestor1[i] == ancestor2[i]:
common_path += e + '/'
else:
break;
if common_path == img_path:
return 0, 1
if common_path == org_path:
return 0, -1
try:
common_parent = root.xpath(common_path[:-1])[0]
except:
print(common_path)
return None
distance = 0
found = 0
for e in common_parent.iter():
e_path = tree.getpath(e)
# e_img_prefix = os.path.commonprefix([e_path, img_path])
# e_org_prefix = os.path.commonprefix([e_path, organic_path])
if found != 0:
distance += 1
if e_path == img_path or e_path == org_path:
found += 1
if found == 2:
return distance
return None
def text_fig_common_ancestor(c):
organic = c[0]
figure = c[1]
root = fromstring(figure.document.text) # lxml.html.fromstring()
tree = etree.ElementTree(root)
ancestor1 = np.array(organic.sentence.xpath.split('/'))
img_path = lxml_find_image_xpath(figure, root, tree)
ancestor2 = np.array(img_path.split('/'))
min_len = min(ancestor1.size, ancestor2.size)
return list(
ancestor1[:np.argmin(ancestor1[:min_len] == ancestor2[:min_len])])
def text_fig_lowest_common_ancestor_depth(c):
organic = c[0]
figure = c[1]
root = fromstring(figure.document.text) # lxml.html.fromstring()
tree = etree.ElementTree(root)
ancestor1 = np.array(organic.sentence.xpath.split('/'))
img_path = lxml_find_image_xpath(figure, root, tree)
ancestor2 = np.array(img_path.split('/'))
min_len = min(ancestor1.size, ancestor2.size)
return min_len - np.argmin(ancestor1[:min_len] == ancestor2[:min_len])
|
nilq/baby-python
|
python
|
# coding=utf-8
#
# pylint: disable = wildcard-import, unused-wildcard-import, unused-import
# pylint: disable = missing-docstring, invalid-name, wrong-import-order
# pylint: disable = no-member, attribute-defined-outside-init
"""
Copyright (c) 2019, Alexander Magola. All rights reserved.
license: BSD 3-Clause License, see LICENSE for more details.
"""
import sys
import os
from copy import deepcopy
import pytest
import tests.common as cmn
from zm.constants import APPNAME, CAP_APPNAME, CWD
from zm import cli
joinpath = os.path.join
class TestSuite(object):
@pytest.fixture(autouse = True)
def setup(self):
self.defaults = { 'buildtype': 'somedebug' }
self.parser = cli.CmdLineParser('test', self.defaults)
def _parseHelpArgs(self, args, capsys):
# CLI prints help and does exit
with pytest.raises(SystemExit) as cm:
self.parser.parse(args)
captured = capsys.readouterr()
return cm.value.code, captured.out, captured.err
def _testMainHelpMsg(self, args, capsys):
ecode, out, err = self._parseHelpArgs(args, capsys)
assert not err
assert ecode == 0
assert CAP_APPNAME in out
assert 'based on the Waf build system' in out
assert self.parser.command is not None
assert self.parser.command.name == 'help'
assert self.parser.command.args == {'topic': 'overview'}
assert self.parser.wafCmdLine == []
def _assertAllsForCmd(self, cmdname, checks, baseExpectedArgs):
expectedArgs = None
for check in checks:
expectedArgs = deepcopy(baseExpectedArgs)
expectedArgs.update(check['expectedArgsUpdate'])
def assertAll(cmd, parsercmd, wafcmdline):
assert cmd is not None
assert parsercmd is not None
assert parsercmd == cmd
assert cmd.name == cmdname
assert cmd.args == expectedArgs
# pylint: disable = cell-var-from-loop
if 'wafArgs' in check:
assert sorted(check['wafArgs']) == sorted(wafcmdline)
# parser with explicit args
cmd = self.parser.parse(check['args'])
assertAll(cmd, self.parser.command, self.parser.wafCmdLine)
# parser with args from sys.argv
oldargv = sys.argv
sys.argv = [APPNAME] + check['args']
cmd = self.parser.parse()
sys.argv = oldargv
assertAll(cmd, self.parser.command, self.parser.wafCmdLine)
def testEmpty(self, capsys):
self._testMainHelpMsg([], capsys)
def testHelp(self, capsys):
self._testMainHelpMsg(['help'], capsys)
def testHelpWrongTopic(self, capsys):
args = ['help', 'qwerty']
ecode, out, err = self._parseHelpArgs(args, capsys)
assert not out
assert 'Unknown command/topic' in err
assert ecode != 0
def testHelpForCmds(self, capsys):
for cmd in cli.config.commands:
args = ['help', cmd.name]
ecode, out, err = self._parseHelpArgs(args, capsys)
assert ecode == 0
assert not err
if cmd.name == 'help':
assert 'show help' in out
else:
assert cmd.description.capitalize() in out
def testCmdBuild(self):
baseExpectedArgs = {
'buildtype' : self.defaults['buildtype'],
'jobs' : None,
'configure': False,
'color': 'auto',
'clean': False,
'progress': False,
'cleanAll': False,
'distclean': False,
'tasks': [],
'verbose': 0,
'verboseConfigure' : None,
'verboseBuild' : None,
'withTests': 'no',
'runTests': 'none',
'bindir' : None,
'libdir' : None,
'prefix' : cli.DEFAULT_PREFIX,
'buildroot' : None,
'forceExternalDeps' : False,
'cacheCfgActionResults' : False,
}
CMDNAME = 'build'
CMNOPTS = ['--color=auto', '--prefix=' + cli.DEFAULT_PREFIX]
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '-b', 'release'],
expectedArgsUpdate = {'buildtype': 'release'},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--jobs', '22'],
expectedArgsUpdate = {'jobs': 22},
wafArgs = [CMDNAME, '--jobs=22'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = [CMDNAME, '-v'] + CMNOPTS,
),
dict(
args = [CMDNAME, '-vvv'],
expectedArgsUpdate = {'verbose': 3},
wafArgs = [CMDNAME, '-vvv'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--configure'],
expectedArgsUpdate = {'configure': True},
wafArgs = ['configure', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--clean'],
expectedArgsUpdate = {'clean': True},
wafArgs = ['clean', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--clean-all'],
expectedArgsUpdate = {'cleanAll': True},
wafArgs = ['cleanall', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--distclean'],
expectedArgsUpdate = {'distclean': True},
wafArgs = ['distclean', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--with-tests', 'yes'],
expectedArgsUpdate = {'withTests': 'yes'},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--run-tests', 'all'],
expectedArgsUpdate = {'runTests': 'all'},
wafArgs = [CMDNAME, 'test'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--run-tests', 'on-changes'],
expectedArgsUpdate = {'runTests': 'on-changes'},
wafArgs = [CMDNAME, 'test'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--progress'],
expectedArgsUpdate = {'progress': True},
wafArgs = [CMDNAME, '--progress'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--color', 'no'],
expectedArgsUpdate = {'color': 'no'},
wafArgs = [CMDNAME, '--color=no'] + CMNOPTS[1:],
),
dict(
args = [CMDNAME, 'sometask'],
expectedArgsUpdate = {'tasks': ['sometask']},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, 'sometask', 'anothertask'],
expectedArgsUpdate = {'tasks': ['sometask', 'anothertask']},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--buildroot', 'somedir'],
expectedArgsUpdate = {'buildroot' : 'somedir'},
wafArgs = [CMDNAME] + CMNOPTS,
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def testCmdTest(self):
baseExpectedArgs = {
'buildtype' : self.defaults['buildtype'],
'jobs' : None,
'configure': False,
'color': 'auto',
'clean': False,
'progress': False,
'cleanAll': False,
'distclean': False,
'tasks': [],
'verbose': 0,
'verboseConfigure' : None,
'verboseBuild' : None,
'withTests': 'yes',
'runTests': 'all',
'buildroot' : None,
'forceExternalDeps' : False,
'cacheCfgActionResults' : False,
}
CMDNAME = 'test'
CMNOPTS = ['--color=auto',]
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '-b', 'release'],
expectedArgsUpdate = {'buildtype': 'release'},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--jobs', '22'],
expectedArgsUpdate = {'jobs': 22},
wafArgs = ['build', CMDNAME, '--jobs=22'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = ['build', CMDNAME, '-v'] + CMNOPTS,
),
dict(
args = [CMDNAME, '-vvv'],
expectedArgsUpdate = {'verbose': 3},
wafArgs = ['build', CMDNAME, '-vvv'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--configure'],
expectedArgsUpdate = {'configure': True},
wafArgs = ['configure', 'build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--clean'],
expectedArgsUpdate = {'clean': True},
wafArgs = ['clean', 'build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--clean-all'],
expectedArgsUpdate = {'cleanAll': True},
wafArgs = ['cleanall', 'build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--distclean'],
expectedArgsUpdate = {'distclean': True},
wafArgs = ['distclean', 'build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--with-tests', 'no'],
expectedArgsUpdate = {'withTests': 'no'},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--run-tests', 'none'],
expectedArgsUpdate = {'runTests': 'none'},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--run-tests', 'on-changes'],
expectedArgsUpdate = {'runTests': 'on-changes'},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--progress'],
expectedArgsUpdate = {'progress': True},
wafArgs = ['build', CMDNAME, '--progress'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--color', 'no'],
expectedArgsUpdate = {'color': 'no'},
wafArgs = ['build', CMDNAME, '--color=no'],
),
dict(
args = [CMDNAME, 'sometask'],
expectedArgsUpdate = {'tasks': ['sometask']},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, 'sometask', 'anothertask'],
expectedArgsUpdate = {'tasks': ['sometask', 'anothertask']},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--buildroot', os.getcwd()],
expectedArgsUpdate = {'buildroot' : os.getcwd()},
wafArgs = ['build', CMDNAME] + CMNOPTS,
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def testCmdConfigure(self):
baseExpectedArgs = {
'buildtype' : self.defaults['buildtype'],
'color': 'auto',
'cleanAll': False,
'distclean': False,
'verbose': 0,
'verboseConfigure' : None,
'withTests': 'no',
'bindir' : None,
'libdir' : None,
'prefix' : cli.DEFAULT_PREFIX,
'buildroot' : None,
'forceExternalDeps' : False,
'cacheCfgActionResults' : False,
'force' : False,
}
CMDNAME = 'configure'
CMNOPTS = ['--color=auto', '--prefix=' + cli.DEFAULT_PREFIX]
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '-b', 'release'],
expectedArgsUpdate = {'buildtype': 'release'},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--clean-all'],
expectedArgsUpdate = {'cleanAll': True},
wafArgs = ['cleanall', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--distclean'],
expectedArgsUpdate = {'distclean': True},
wafArgs = ['distclean', CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = [CMDNAME, '-v'] + CMNOPTS,
),
dict(
args = [CMDNAME, '-vvv'],
expectedArgsUpdate = {'verbose': 3},
wafArgs = [CMDNAME, '-vvv'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--color', 'no'],
expectedArgsUpdate = {'color': 'no'},
wafArgs = [CMDNAME, '--color=no'] + CMNOPTS[1:],
),
dict(
args = [CMDNAME, '--buildroot', os.getcwd()],
expectedArgsUpdate = {'buildroot' : os.getcwd()},
wafArgs = [CMDNAME] + CMNOPTS,
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def testCmdClean(self):
baseExpectedArgs = {
'buildtype' : self.defaults['buildtype'],
'color': 'auto',
'verbose': 0,
'buildroot' : None,
'forceExternalDeps' : False,
}
CMDNAME = 'clean'
CMNOPTS = ['--color=auto',]
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '-b', 'release'],
expectedArgsUpdate = {'buildtype': 'release'},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = [CMDNAME, '-v'] + CMNOPTS,
),
dict(
args = [CMDNAME, '-vvv'],
expectedArgsUpdate = {'verbose': 3},
wafArgs = [CMDNAME, '-vvv'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--color', 'no'],
expectedArgsUpdate = {'color': 'no'},
wafArgs = [CMDNAME, '--color=no'],
),
dict(
args = [CMDNAME, '--buildroot', os.getcwd()],
expectedArgsUpdate = {'buildroot' : os.getcwd()},
wafArgs = [CMDNAME] + CMNOPTS,
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def testCmdDistclean(self):
baseExpectedArgs = {
'color': 'auto',
'verbose': 0,
'buildroot' : None,
}
CMDNAME = 'distclean'
CMNOPTS = ['--color=auto',]
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = [CMDNAME, '-v'] + CMNOPTS,
),
dict(
args = [CMDNAME, '-vvv'],
expectedArgsUpdate = {'verbose': 3},
wafArgs = [CMDNAME, '-vvv'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--color', 'no'],
expectedArgsUpdate = {'color': 'no'},
wafArgs = [CMDNAME, '--color=no'],
),
dict(
args = [CMDNAME, '--buildroot', os.getcwd()],
expectedArgsUpdate = {'buildroot' : os.getcwd()},
wafArgs = [CMDNAME] + CMNOPTS,
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def testCmdZipApp(self):
baseExpectedArgs = {
'destdir' : '.',
'color': 'auto',
'verbose': 0,
}
CMDNAME = 'zipapp'
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = { 'destdir' : CWD },
),
dict(
args = [CMDNAME, '--destdir', 'somedir'],
expectedArgsUpdate = {'destdir' : joinpath(CWD, 'somedir') },
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1, 'destdir' : CWD},
),
dict(
args = [CMDNAME, '--color', 'no'],
expectedArgsUpdate = {'color': 'no', 'destdir' : CWD},
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def checkCmdInstall(self, cmd):
baseExpectedArgs = {
'buildtype' : self.defaults['buildtype'],
'jobs' : None,
'color': 'auto',
'configure': False,
'clean': False,
'progress': False,
'cleanAll': False,
'distclean': False,
'verbose': 0,
'verboseConfigure' : None,
'verboseBuild' : None,
'destdir' : '',
'bindir' : None,
'libdir' : None,
'prefix' : cli.DEFAULT_PREFIX,
'buildroot' : None,
'forceExternalDeps' : False,
'cacheCfgActionResults' : False,
}
if cmd == 'uninstall':
for name in ('configure', 'jobs', 'clean', 'cleanAll', 'distclean'):
baseExpectedArgs.pop(name)
CMDNAME = cmd
CMNOPTS = ['--color=auto', '--prefix=' + cli.DEFAULT_PREFIX]
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = [CMDNAME] + CMNOPTS,
),
dict(
args = [CMDNAME, '--destdir', 'somedir'],
expectedArgsUpdate = {'destdir' : joinpath(CWD, 'somedir') },
wafArgs = [CMDNAME, '--destdir=' + joinpath(CWD, 'somedir')] + CMNOPTS,
),
dict(
args = [CMDNAME, '--bindir', 'somedir'],
expectedArgsUpdate = {'bindir' : 'somedir' },
wafArgs = [CMDNAME, '--bindir=' + 'somedir'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--libdir', 'somedir'],
expectedArgsUpdate = {'libdir' : 'somedir' },
wafArgs = [CMDNAME, '--libdir=' + 'somedir'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = [CMDNAME, '-v'] + CMNOPTS,
),
dict(
args = [CMDNAME, '--color', 'no'],
expectedArgsUpdate = {'color': 'no'},
wafArgs = [CMDNAME, '--color=no'] + CMNOPTS[1:],
),
dict(
args = [CMDNAME, '--buildroot', os.getcwd()],
expectedArgsUpdate = {'buildroot' : os.getcwd()},
wafArgs = [CMDNAME] + CMNOPTS,
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def testCmdInstall(self):
self.checkCmdInstall('install')
def testCmdUninstall(self):
self.checkCmdInstall('uninstall')
def testCmdVersion(self):
baseExpectedArgs = {
'verbose': 0,
}
CMDNAME = 'version'
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = [CMDNAME],
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = [CMDNAME, '-v'],
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
def testCmdSysInfo(self):
baseExpectedArgs = {
'verbose': 0,
}
CMDNAME = 'sysinfo'
checks = [
dict(
args = [CMDNAME],
expectedArgsUpdate = {},
wafArgs = [CMDNAME],
),
dict(
args = [CMDNAME, '--verbose'],
expectedArgsUpdate = {'verbose': 1},
wafArgs = [CMDNAME, '-v'],
),
]
self._assertAllsForCmd(CMDNAME, checks, baseExpectedArgs)
|
nilq/baby-python
|
python
|
from Tkinter import *
root = Tk()
var = StringVar()
var.set("Site View")
names = ('C-cex','Bittrex')
def ffet(param):
var.set(param)
print(param)
# Appends names to names list and updates OptionMenu
#def createName(n):
# names.append(n)
# personName.delete(0, "end")
# menu = nameMenu['menu']
# menu.delete(0, "end")
# for name in names:
# menu.add_command(label=name, command=lambda: ffet(name))
# what to run when a name is selected
#def selection(name):
# var.set(name)
# print "Running" # For testing purposes to see when/if selection runs
# print name
# Option Menu for names
nameMenu = OptionMenu(root, var, ())
nameMenu.grid(row=0, column=0, columnspan=2)
nameMenu.config(width=20)
menu = nameMenu.children['menu']
menu.delete(0, "end")
#mlabel = nameMenu.children['label']
#for name in names:
# menu.add_command(label=name, command=lambda v=name: nameMenu.choice.set(v))
for name in names:
menu.add_command(label='Waht is this for', command=lambda v=name: ffet(v))
# Entry for user to submit name
#Label(root, text="Name").grid(row=1, column=0)
#personName = Entry(root, width=17)
#personName.grid(row=1, column=1)
# Add person Button
#Button(root, text="Add Person", width= 20, command=lambda: createName(personName.get())).grid(row=5, column=0, columnspan=2)
mainloop()
|
nilq/baby-python
|
python
|
import random
import config
def shuffle_characters():
characters = list(config.BASE_STRING)
random.shuffle(characters)
rearranged_string = ''.join(characters)
return rearranged_string
|
nilq/baby-python
|
python
|
from . import common, core3
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# euclid graphics maths module
#
# Copyright (c) 2006 Alex Holkner
# Alex.Holkner@mail.google.com
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
'''euclid graphics maths module
Documentation and tests are included in the file "euclid.txt", or online
at http://code.google.com/p/pyeuclid
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
__revision__ = '$Revision$'
import math
import operator
import types
# Some magic here. If _use_slots is True, the classes will derive from
# object and will define a __slots__ class variable. If _use_slots is
# False, classes will be old-style and will not define __slots__.
#
# _use_slots = True: Memory efficient, probably faster in future versions
# of Python, "better".
# _use_slots = False: Ordinary classes, much faster than slots in current
# versions of Python (2.4 and 2.5).
_use_slots = True
# If True, allows components of Vector2 and Vector3 to be set via swizzling;
# e.g. v.xyz = (1, 2, 3). This is much, much slower than the more verbose
# v.x = 1; v.y = 2; v.z = 3, and slows down ordinary element setting as
# well. Recommended setting is False.
_enable_swizzle_set = False
# Requires class to derive from object.
if _enable_swizzle_set:
_use_slots = True
# Implement _use_slots magic.
class _EuclidMetaclass(type):
def __new__(cls, name, bases, dct):
if '__slots__' in dct:
dct['__getstate__'] = cls._create_getstate(dct['__slots__'])
dct['__setstate__'] = cls._create_setstate(dct['__slots__'])
if _use_slots:
return type.__new__(cls, name, bases + (object,), dct)
else:
if '__slots__' in dct:
del dct['__slots__']
return types.ClassType.__new__(types.ClassType, name, bases, dct)
@classmethod
def _create_getstate(cls, slots):
def __getstate__(self):
d = {}
for slot in slots:
d[slot] = getattr(self, slot)
return d
return __getstate__
@classmethod
def _create_setstate(cls, slots):
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
return __setstate__
__metaclass__ = _EuclidMetaclass
class Vector2:
__slots__ = ['x', 'y']
__hash__ = None
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __copy__(self):
return self.__class__(self.x, self.y)
copy = __copy__
def __repr__(self):
return 'Vector2(%.2f, %.2f)' % (self.x, self.y)
def __eq__(self, other):
if isinstance(other, Vector2):
return self.x == other.x and \
self.y == other.y
else:
assert hasattr(other, '__len__') and len(other) == 2
return self.x == other[0] and \
self.y == other[1]
def __ne__(self, other):
return not self.__eq__(other)
def __nonzero__(self):
return self.x != 0 or self.y != 0
def __len__(self):
return 2
def __getitem__(self, key):
return (self.x, self.y)[key]
def __setitem__(self, key, value):
l = [self.x, self.y]
l[key] = value
self.x, self.y = l
def __iter__(self):
return iter((self.x, self.y))
def __getattr__(self, name):
try:
return tuple([(self.x, self.y)['xy'.index(c)] \
for c in name])
except ValueError:
raise AttributeError, name
if _enable_swizzle_set:
# This has detrimental performance on ordinary setattr as well
# if enabled
def __setattr__(self, name, value):
if len(name) == 1:
object.__setattr__(self, name, value)
else:
try:
l = [self.x, self.y]
for c, v in map(None, name, value):
l['xy'.index(c)] = v
self.x, self.y = l
except ValueError:
raise AttributeError, name
def __add__(self, other):
if isinstance(other, Vector2):
# Vector + Vector -> Vector
# Vector + Point -> Point
# Point + Point -> Vector
if self.__class__ is other.__class__:
_class = Vector2
else:
_class = Point2
return _class(self.x + other.x,
self.y + other.y)
else:
assert hasattr(other, '__len__') and len(other) == 2
return Vector2(self.x + other[0],
self.y + other[1])
__radd__ = __add__
def __iadd__(self, other):
if isinstance(other, Vector2):
self.x += other.x
self.y += other.y
else:
self.x += other[0]
self.y += other[1]
return self
def __sub__(self, other):
if isinstance(other, Vector2):
# Vector - Vector -> Vector
# Vector - Point -> Point
# Point - Point -> Vector
if self.__class__ is other.__class__:
_class = Vector2
else:
_class = Point2
return _class(self.x - other.x,
self.y - other.y)
else:
assert hasattr(other, '__len__') and len(other) == 2
return Vector2(self.x - other[0],
self.y - other[1])
def __rsub__(self, other):
if isinstance(other, Vector2):
return Vector2(other.x - self.x,
other.y - self.y)
else:
assert hasattr(other, '__len__') and len(other) == 2
return Vector2(other.x - self[0],
other.y - self[1])
def __mul__(self, other):
assert type(other) in (int, long, float)
return Vector2(self.x * other,
self.y * other)
__rmul__ = __mul__
def __imul__(self, other):
assert type(other) in (int, long, float)
self.x *= other
self.y *= other
return self
def __div__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.div(self.x, other),
operator.div(self.y, other))
def __rdiv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.div(other, self.x),
operator.div(other, self.y))
def __floordiv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.floordiv(self.x, other),
operator.floordiv(self.y, other))
def __rfloordiv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.floordiv(other, self.x),
operator.floordiv(other, self.y))
def __truediv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.truediv(self.x, other),
operator.truediv(self.y, other))
def __rtruediv__(self, other):
assert type(other) in (int, long, float)
return Vector2(operator.truediv(other, self.x),
operator.truediv(other, self.y))
def __neg__(self):
return Vector2(-self.x,
-self.y)
__pos__ = __copy__
def __abs__(self):
return math.sqrt(self.x ** 2 + \
self.y ** 2)
magnitude = __abs__
def magnitude_squared(self):
return self.x ** 2 + \
self.y ** 2
def normalize(self):
d = self.magnitude()
if d:
self.x /= d
self.y /= d
return self
def normalized(self):
d = self.magnitude()
if d:
return Vector2(self.x / d,
self.y / d)
return self.copy()
def dot(self, other):
assert isinstance(other, Vector2)
return self.x * other.x + \
self.y * other.y
def cross(self):
return Vector2(self.y, -self.x)
def reflect(self, normal):
# assume normal is normalized
assert isinstance(normal, Vector2)
d = 2 * (self.x * normal.x + self.y * normal.y)
return Vector2(self.x - d * normal.x,
self.y - d * normal.y)
def angle(self, other):
"""Return the angle to the vector other"""
return math.acos(self.dot(other) / (self.magnitude()*other.magnitude()))
def project(self, other):
"""Return one vector projected on the vector other"""
n = other.normalized()
return self.dot(n)*n
class Vector3:
__slots__ = ['x', 'y', 'z']
__hash__ = None
def __init__(self, x=0, y=0, z=0):
self.x = x
self.y = y
self.z = z
def __copy__(self):
return self.__class__(self.x, self.y, self.z)
copy = __copy__
def __repr__(self):
return 'Vector3(%.2f, %.2f, %.2f)' % (self.x,
self.y,
self.z)
def __eq__(self, other):
if isinstance(other, Vector3):
return self.x == other.x and \
self.y == other.y and \
self.z == other.z
else:
assert hasattr(other, '__len__') and len(other) == 3
return self.x == other[0] and \
self.y == other[1] and \
self.z == other[2]
def __ne__(self, other):
return not self.__eq__(other)
def __nonzero__(self):
return self.x != 0 or self.y != 0 or self.z != 0
def __len__(self):
return 3
def __getitem__(self, key):
return (self.x, self.y, self.z)[key]
def __setitem__(self, key, value):
l = [self.x, self.y, self.z]
l[key] = value
self.x, self.y, self.z = l
def __iter__(self):
return iter((self.x, self.y, self.z))
def __getattr__(self, name):
try:
return tuple([(self.x, self.y, self.z)['xyz'.index(c)] \
for c in name])
except ValueError:
raise AttributeError, name
if _enable_swizzle_set:
# This has detrimental performance on ordinary setattr as well
# if enabled
def __setattr__(self, name, value):
if len(name) == 1:
object.__setattr__(self, name, value)
else:
try:
l = [self.x, self.y, self.z]
for c, v in map(None, name, value):
l['xyz'.index(c)] = v
self.x, self.y, self.z = l
except ValueError:
raise AttributeError, name
def __add__(self, other):
if isinstance(other, Vector3):
# Vector + Vector -> Vector
# Vector + Point -> Point
# Point + Point -> Vector
if self.__class__ is other.__class__:
_class = Vector3
else:
_class = Point3
return _class(self.x + other.x,
self.y + other.y,
self.z + other.z)
else:
assert hasattr(other, '__len__') and len(other) == 3
return Vector3(self.x + other[0],
self.y + other[1],
self.z + other[2])
__radd__ = __add__
def __iadd__(self, other):
if isinstance(other, Vector3):
self.x += other.x
self.y += other.y
self.z += other.z
else:
self.x += other[0]
self.y += other[1]
self.z += other[2]
return self
def __sub__(self, other):
if isinstance(other, Vector3):
# Vector - Vector -> Vector
# Vector - Point -> Point
# Point - Point -> Vector
if self.__class__ is other.__class__:
_class = Vector3
else:
_class = Point3
return Vector3(self.x - other.x,
self.y - other.y,
self.z - other.z)
else:
assert hasattr(other, '__len__') and len(other) == 3
return Vector3(self.x - other[0],
self.y - other[1],
self.z - other[2])
def __rsub__(self, other):
if isinstance(other, Vector3):
return Vector3(other.x - self.x,
other.y - self.y,
other.z - self.z)
else:
assert hasattr(other, '__len__') and len(other) == 3
return Vector3(other.x - self[0],
other.y - self[1],
other.z - self[2])
def __mul__(self, other):
if isinstance(other, Vector3):
# TODO component-wise mul/div in-place and on Vector2; docs.
if self.__class__ is Point3 or other.__class__ is Point3:
_class = Point3
else:
_class = Vector3
return _class(self.x * other.x,
self.y * other.y,
self.z * other.z)
else:
assert type(other) in (int, long, float)
return Vector3(self.x * other,
self.y * other,
self.z * other)
__rmul__ = __mul__
def __imul__(self, other):
assert type(other) in (int, long, float)
self.x *= other
self.y *= other
self.z *= other
return self
def __div__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.div(self.x, other),
operator.div(self.y, other),
operator.div(self.z, other))
def __rdiv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.div(other, self.x),
operator.div(other, self.y),
operator.div(other, self.z))
def __floordiv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.floordiv(self.x, other),
operator.floordiv(self.y, other),
operator.floordiv(self.z, other))
def __rfloordiv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.floordiv(other, self.x),
operator.floordiv(other, self.y),
operator.floordiv(other, self.z))
def __truediv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.truediv(self.x, other),
operator.truediv(self.y, other),
operator.truediv(self.z, other))
def __rtruediv__(self, other):
assert type(other) in (int, long, float)
return Vector3(operator.truediv(other, self.x),
operator.truediv(other, self.y),
operator.truediv(other, self.z))
def __neg__(self):
return Vector3(-self.x,
-self.y,
-self.z)
__pos__ = __copy__
def __abs__(self):
return math.sqrt(self.x ** 2 + \
self.y ** 2 + \
self.z ** 2)
magnitude = __abs__
def magnitude_squared(self):
return self.x ** 2 + \
self.y ** 2 + \
self.z ** 2
def normalize(self):
d = self.magnitude()
if d:
self.x /= d
self.y /= d
self.z /= d
return self
def normalized(self):
d = self.magnitude()
if d:
return Vector3(self.x / d,
self.y / d,
self.z / d)
return self.copy()
def dot(self, other):
assert isinstance(other, Vector3)
return self.x * other.x + \
self.y * other.y + \
self.z * other.z
def cross(self, other):
assert isinstance(other, Vector3)
return Vector3(self.y * other.z - self.z * other.y,
-self.x * other.z + self.z * other.x,
self.x * other.y - self.y * other.x)
def reflect(self, normal):
# assume normal is normalized
assert isinstance(normal, Vector3)
d = 2 * (self.x * normal.x + self.y * normal.y + self.z * normal.z)
return Vector3(self.x - d * normal.x,
self.y - d * normal.y,
self.z - d * normal.z)
def rotate_around(self, axis, theta):
"""Return the vector rotated around axis through angle theta. Right hand rule applies"""
# Adapted from equations published by Glenn Murray.
# http://inside.mines.edu/~gmurray/ArbitraryAxisRotation/ArbitraryAxisRotation.html
x, y, z = self.x, self.y,self.z
u, v, w = axis.x, axis.y, axis.z
# Extracted common factors for simplicity and efficiency
r2 = u**2 + v**2 + w**2
r = math.sqrt(r2)
ct = math.cos(theta)
st = math.sin(theta) / r
dt = (u*x + v*y + w*z) * (1 - ct) / r2
return Vector3((u * dt + x * ct + (-w * y + v * z) * st),
(v * dt + y * ct + ( w * x - u * z) * st),
(w * dt + z * ct + (-v * x + u * y) * st))
def angle(self, other):
"""Return the angle to the vector other"""
return math.acos(self.dot(other) / (self.magnitude()*other.magnitude()))
def project(self, other):
"""Return one vector projected on the vector other"""
n = other.normalized()
return self.dot(n)*n
# a b c
# e f g
# i j k
class Matrix3:
__slots__ = list('abcefgijk')
def __init__(self):
self.identity()
def __copy__(self):
M = Matrix3()
M.a = self.a
M.b = self.b
M.c = self.c
M.e = self.e
M.f = self.f
M.g = self.g
M.i = self.i
M.j = self.j
M.k = self.k
return M
copy = __copy__
def __repr__(self):
return ('Matrix3([% 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f])') \
% (self.a, self.b, self.c,
self.e, self.f, self.g,
self.i, self.j, self.k)
def __getitem__(self, key):
return [self.a, self.e, self.i,
self.b, self.f, self.j,
self.c, self.g, self.k][key]
def __setitem__(self, key, value):
L = self[:]
L[key] = value
(self.a, self.e, self.i,
self.b, self.f, self.j,
self.c, self.g, self.k) = L
def __mul__(self, other):
if isinstance(other, Matrix3):
# Caching repeatedly accessed attributes in local variables
# apparently increases performance by 20%. Attrib: Will McGugan.
Aa = self.a
Ab = self.b
Ac = self.c
Ae = self.e
Af = self.f
Ag = self.g
Ai = self.i
Aj = self.j
Ak = self.k
Ba = other.a
Bb = other.b
Bc = other.c
Be = other.e
Bf = other.f
Bg = other.g
Bi = other.i
Bj = other.j
Bk = other.k
C = Matrix3()
C.a = Aa * Ba + Ab * Be + Ac * Bi
C.b = Aa * Bb + Ab * Bf + Ac * Bj
C.c = Aa * Bc + Ab * Bg + Ac * Bk
C.e = Ae * Ba + Af * Be + Ag * Bi
C.f = Ae * Bb + Af * Bf + Ag * Bj
C.g = Ae * Bc + Af * Bg + Ag * Bk
C.i = Ai * Ba + Aj * Be + Ak * Bi
C.j = Ai * Bb + Aj * Bf + Ak * Bj
C.k = Ai * Bc + Aj * Bg + Ak * Bk
return C
elif isinstance(other, Point2):
A = self
B = other
P = Point2(0, 0)
P.x = A.a * B.x + A.b * B.y + A.c
P.y = A.e * B.x + A.f * B.y + A.g
return P
elif isinstance(other, Vector2):
A = self
B = other
V = Vector2(0, 0)
V.x = A.a * B.x + A.b * B.y
V.y = A.e * B.x + A.f * B.y
return V
else:
other = other.copy()
other._apply_transform(self)
return other
def __imul__(self, other):
assert isinstance(other, Matrix3)
# Cache attributes in local vars (see Matrix3.__mul__).
Aa = self.a
Ab = self.b
Ac = self.c
Ae = self.e
Af = self.f
Ag = self.g
Ai = self.i
Aj = self.j
Ak = self.k
Ba = other.a
Bb = other.b
Bc = other.c
Be = other.e
Bf = other.f
Bg = other.g
Bi = other.i
Bj = other.j
Bk = other.k
self.a = Aa * Ba + Ab * Be + Ac * Bi
self.b = Aa * Bb + Ab * Bf + Ac * Bj
self.c = Aa * Bc + Ab * Bg + Ac * Bk
self.e = Ae * Ba + Af * Be + Ag * Bi
self.f = Ae * Bb + Af * Bf + Ag * Bj
self.g = Ae * Bc + Af * Bg + Ag * Bk
self.i = Ai * Ba + Aj * Be + Ak * Bi
self.j = Ai * Bb + Aj * Bf + Ak * Bj
self.k = Ai * Bc + Aj * Bg + Ak * Bk
return self
def identity(self):
self.a = self.f = self.k = 1.
self.b = self.c = self.e = self.g = self.i = self.j = 0
return self
def scale(self, x, y):
self *= Matrix3.new_scale(x, y)
return self
def translate(self, x, y):
self *= Matrix3.new_translate(x, y)
return self
def rotate(self, angle):
self *= Matrix3.new_rotate(angle)
return self
# Static constructors
def new_identity(cls):
self = cls()
return self
new_identity = classmethod(new_identity)
def new_scale(cls, x, y):
self = cls()
self.a = x
self.f = y
return self
new_scale = classmethod(new_scale)
def new_translate(cls, x, y):
self = cls()
self.c = x
self.g = y
return self
new_translate = classmethod(new_translate)
def new_rotate(cls, angle):
self = cls()
s = math.sin(angle)
c = math.cos(angle)
self.a = self.f = c
self.b = -s
self.e = s
return self
new_rotate = classmethod(new_rotate)
def determinant(self):
return (self.a*self.f*self.k
+ self.b*self.g*self.i
+ self.c*self.e*self.j
- self.a*self.g*self.j
- self.b*self.e*self.k
- self.c*self.f*self.i)
def inverse(self):
tmp = Matrix3()
d = self.determinant()
if abs(d) < 0.001:
# No inverse, return identity
return tmp
else:
d = 1.0 / d
tmp.a = d * (self.f*self.k - self.g*self.j)
tmp.b = d * (self.c*self.j - self.b*self.k)
tmp.c = d * (self.b*self.g - self.c*self.f)
tmp.e = d * (self.g*self.i - self.e*self.k)
tmp.f = d * (self.a*self.k - self.c*self.i)
tmp.g = d * (self.c*self.e - self.a*self.g)
tmp.i = d * (self.e*self.j - self.f*self.i)
tmp.j = d * (self.b*self.i - self.a*self.j)
tmp.k = d * (self.a*self.f - self.b*self.e)
return tmp
# a b c d
# e f g h
# i j k l
# m n o p
class Matrix4:
__slots__ = list('abcdefghijklmnop')
def __init__(self):
self.identity()
def __copy__(self):
M = Matrix4()
M.a = self.a
M.b = self.b
M.c = self.c
M.d = self.d
M.e = self.e
M.f = self.f
M.g = self.g
M.h = self.h
M.i = self.i
M.j = self.j
M.k = self.k
M.l = self.l
M.m = self.m
M.n = self.n
M.o = self.o
M.p = self.p
return M
copy = __copy__
def __repr__(self):
return ('Matrix4([% 8.2f % 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f % 8.2f\n' \
' % 8.2f % 8.2f % 8.2f % 8.2f])') \
% (self.a, self.b, self.c, self.d,
self.e, self.f, self.g, self.h,
self.i, self.j, self.k, self.l,
self.m, self.n, self.o, self.p)
def __getitem__(self, key):
return [self.a, self.e, self.i, self.m,
self.b, self.f, self.j, self.n,
self.c, self.g, self.k, self.o,
self.d, self.h, self.l, self.p][key]
def __setitem__(self, key, value):
L = self[:]
L[key] = value
(self.a, self.e, self.i, self.m,
self.b, self.f, self.j, self.n,
self.c, self.g, self.k, self.o,
self.d, self.h, self.l, self.p) = L
def __mul__(self, other):
if isinstance(other, Matrix4):
# Cache attributes in local vars (see Matrix3.__mul__).
Aa = self.a
Ab = self.b
Ac = self.c
Ad = self.d
Ae = self.e
Af = self.f
Ag = self.g
Ah = self.h
Ai = self.i
Aj = self.j
Ak = self.k
Al = self.l
Am = self.m
An = self.n
Ao = self.o
Ap = self.p
Ba = other.a
Bb = other.b
Bc = other.c
Bd = other.d
Be = other.e
Bf = other.f
Bg = other.g
Bh = other.h
Bi = other.i
Bj = other.j
Bk = other.k
Bl = other.l
Bm = other.m
Bn = other.n
Bo = other.o
Bp = other.p
C = Matrix4()
C.a = Aa * Ba + Ab * Be + Ac * Bi + Ad * Bm
C.b = Aa * Bb + Ab * Bf + Ac * Bj + Ad * Bn
C.c = Aa * Bc + Ab * Bg + Ac * Bk + Ad * Bo
C.d = Aa * Bd + Ab * Bh + Ac * Bl + Ad * Bp
C.e = Ae * Ba + Af * Be + Ag * Bi + Ah * Bm
C.f = Ae * Bb + Af * Bf + Ag * Bj + Ah * Bn
C.g = Ae * Bc + Af * Bg + Ag * Bk + Ah * Bo
C.h = Ae * Bd + Af * Bh + Ag * Bl + Ah * Bp
C.i = Ai * Ba + Aj * Be + Ak * Bi + Al * Bm
C.j = Ai * Bb + Aj * Bf + Ak * Bj + Al * Bn
C.k = Ai * Bc + Aj * Bg + Ak * Bk + Al * Bo
C.l = Ai * Bd + Aj * Bh + Ak * Bl + Al * Bp
C.m = Am * Ba + An * Be + Ao * Bi + Ap * Bm
C.n = Am * Bb + An * Bf + Ao * Bj + Ap * Bn
C.o = Am * Bc + An * Bg + Ao * Bk + Ap * Bo
C.p = Am * Bd + An * Bh + Ao * Bl + Ap * Bp
return C
elif isinstance(other, Point3):
A = self
B = other
P = Point3(0, 0, 0)
P.x = A.a * B.x + A.b * B.y + A.c * B.z + A.d
P.y = A.e * B.x + A.f * B.y + A.g * B.z + A.h
P.z = A.i * B.x + A.j * B.y + A.k * B.z + A.l
return P
elif isinstance(other, Vector3):
A = self
B = other
V = Vector3(0, 0, 0)
V.x = A.a * B.x + A.b * B.y + A.c * B.z
V.y = A.e * B.x + A.f * B.y + A.g * B.z
V.z = A.i * B.x + A.j * B.y + A.k * B.z
return V
else:
other = other.copy()
other._apply_transform(self)
return other
def __imul__(self, other):
assert isinstance(other, Matrix4)
# Cache attributes in local vars (see Matrix3.__mul__).
Aa = self.a
Ab = self.b
Ac = self.c
Ad = self.d
Ae = self.e
Af = self.f
Ag = self.g
Ah = self.h
Ai = self.i
Aj = self.j
Ak = self.k
Al = self.l
Am = self.m
An = self.n
Ao = self.o
Ap = self.p
Ba = other.a
Bb = other.b
Bc = other.c
Bd = other.d
Be = other.e
Bf = other.f
Bg = other.g
Bh = other.h
Bi = other.i
Bj = other.j
Bk = other.k
Bl = other.l
Bm = other.m
Bn = other.n
Bo = other.o
Bp = other.p
self.a = Aa * Ba + Ab * Be + Ac * Bi + Ad * Bm
self.b = Aa * Bb + Ab * Bf + Ac * Bj + Ad * Bn
self.c = Aa * Bc + Ab * Bg + Ac * Bk + Ad * Bo
self.d = Aa * Bd + Ab * Bh + Ac * Bl + Ad * Bp
self.e = Ae * Ba + Af * Be + Ag * Bi + Ah * Bm
self.f = Ae * Bb + Af * Bf + Ag * Bj + Ah * Bn
self.g = Ae * Bc + Af * Bg + Ag * Bk + Ah * Bo
self.h = Ae * Bd + Af * Bh + Ag * Bl + Ah * Bp
self.i = Ai * Ba + Aj * Be + Ak * Bi + Al * Bm
self.j = Ai * Bb + Aj * Bf + Ak * Bj + Al * Bn
self.k = Ai * Bc + Aj * Bg + Ak * Bk + Al * Bo
self.l = Ai * Bd + Aj * Bh + Ak * Bl + Al * Bp
self.m = Am * Ba + An * Be + Ao * Bi + Ap * Bm
self.n = Am * Bb + An * Bf + Ao * Bj + Ap * Bn
self.o = Am * Bc + An * Bg + Ao * Bk + Ap * Bo
self.p = Am * Bd + An * Bh + Ao * Bl + Ap * Bp
return self
def transform(self, other):
A = self
B = other
P = Point3(0, 0, 0)
P.x = A.a * B.x + A.b * B.y + A.c * B.z + A.d
P.y = A.e * B.x + A.f * B.y + A.g * B.z + A.h
P.z = A.i * B.x + A.j * B.y + A.k * B.z + A.l
w = A.m * B.x + A.n * B.y + A.o * B.z + A.p
if w != 0:
P.x /= w
P.y /= w
P.z /= w
return P
def identity(self):
self.a = self.f = self.k = self.p = 1.
self.b = self.c = self.d = self.e = self.g = self.h = \
self.i = self.j = self.l = self.m = self.n = self.o = 0
return self
def scale(self, x, y, z):
self *= Matrix4.new_scale(x, y, z)
return self
def translate(self, x, y, z):
self *= Matrix4.new_translate(x, y, z)
return self
def rotatex(self, angle):
self *= Matrix4.new_rotatex(angle)
return self
def rotatey(self, angle):
self *= Matrix4.new_rotatey(angle)
return self
def rotatez(self, angle):
self *= Matrix4.new_rotatez(angle)
return self
def rotate_axis(self, angle, axis):
self *= Matrix4.new_rotate_axis(angle, axis)
return self
def rotate_euler(self, heading, attitude, bank):
self *= Matrix4.new_rotate_euler(heading, attitude, bank)
return self
def rotate_triple_axis(self, x, y, z):
self *= Matrix4.new_rotate_triple_axis(x, y, z)
return self
def transpose(self):
(self.a, self.e, self.i, self.m,
self.b, self.f, self.j, self.n,
self.c, self.g, self.k, self.o,
self.d, self.h, self.l, self.p) = \
(self.a, self.b, self.c, self.d,
self.e, self.f, self.g, self.h,
self.i, self.j, self.k, self.l,
self.m, self.n, self.o, self.p)
def transposed(self):
M = self.copy()
M.transpose()
return M
# Static constructors
def new(cls, *values):
M = cls()
M[:] = values
return M
new = classmethod(new)
def new_identity(cls):
self = cls()
return self
new_identity = classmethod(new_identity)
def new_scale(cls, x, y, z):
self = cls()
self.a = x
self.f = y
self.k = z
return self
new_scale = classmethod(new_scale)
def new_translate(cls, x, y, z):
self = cls()
self.d = x
self.h = y
self.l = z
return self
new_translate = classmethod(new_translate)
def new_rotatex(cls, angle):
self = cls()
s = math.sin(angle)
c = math.cos(angle)
self.f = self.k = c
self.g = -s
self.j = s
return self
new_rotatex = classmethod(new_rotatex)
def new_rotatey(cls, angle):
self = cls()
s = math.sin(angle)
c = math.cos(angle)
self.a = self.k = c
self.c = s
self.i = -s
return self
new_rotatey = classmethod(new_rotatey)
def new_rotatez(cls, angle):
self = cls()
s = math.sin(angle)
c = math.cos(angle)
self.a = self.f = c
self.b = -s
self.e = s
return self
new_rotatez = classmethod(new_rotatez)
def new_rotate_axis(cls, angle, axis):
assert(isinstance(axis, Vector3))
vector = axis.normalized()
x = vector.x
y = vector.y
z = vector.z
self = cls()
s = math.sin(angle)
c = math.cos(angle)
c1 = 1. - c
# from the glRotate man page
self.a = x * x * c1 + c
self.b = x * y * c1 - z * s
self.c = x * z * c1 + y * s
self.e = y * x * c1 + z * s
self.f = y * y * c1 + c
self.g = y * z * c1 - x * s
self.i = x * z * c1 - y * s
self.j = y * z * c1 + x * s
self.k = z * z * c1 + c
return self
new_rotate_axis = classmethod(new_rotate_axis)
def new_rotate_euler(cls, heading, attitude, bank):
# from http://www.euclideanspace.com/
ch = math.cos(heading)
sh = math.sin(heading)
ca = math.cos(attitude)
sa = math.sin(attitude)
cb = math.cos(bank)
sb = math.sin(bank)
self = cls()
self.a = ch * ca
self.b = sh * sb - ch * sa * cb
self.c = ch * sa * sb + sh * cb
self.e = sa
self.f = ca * cb
self.g = -ca * sb
self.i = -sh * ca
self.j = sh * sa * cb + ch * sb
self.k = -sh * sa * sb + ch * cb
return self
new_rotate_euler = classmethod(new_rotate_euler)
def new_rotate_triple_axis(cls, x, y, z):
m = cls()
m.a, m.b, m.c = x.x, y.x, z.x
m.e, m.f, m.g = x.y, y.y, z.y
m.i, m.j, m.k = x.z, y.z, z.z
return m
new_rotate_triple_axis = classmethod(new_rotate_triple_axis)
def new_look_at(cls, eye, at, up):
z = (eye - at).normalized()
x = up.cross(z).normalized()
y = z.cross(x)
m = cls.new_rotate_triple_axis(x, y, z)
m.d, m.h, m.l = eye.x, eye.y, eye.z
return m
new_look_at = classmethod(new_look_at)
def new_perspective(cls, fov_y, aspect, near, far):
# from the gluPerspective man page
f = 1 / math.tan(fov_y / 2)
self = cls()
assert near != 0.0 and near != far
self.a = f / aspect
self.f = f
self.k = (far + near) / (near - far)
self.l = 2 * far * near / (near - far)
self.o = -1
self.p = 0
return self
new_perspective = classmethod(new_perspective)
def determinant(self):
return ((self.a * self.f - self.e * self.b)
* (self.k * self.p - self.o * self.l)
- (self.a * self.j - self.i * self.b)
* (self.g * self.p - self.o * self.h)
+ (self.a * self.n - self.m * self.b)
* (self.g * self.l - self.k * self.h)
+ (self.e * self.j - self.i * self.f)
* (self.c * self.p - self.o * self.d)
- (self.e * self.n - self.m * self.f)
* (self.c * self.l - self.k * self.d)
+ (self.i * self.n - self.m * self.j)
* (self.c * self.h - self.g * self.d))
def inverse(self):
tmp = Matrix4()
d = self.determinant();
if abs(d) < 0.001:
# No inverse, return identity
return tmp
else:
d = 1.0 / d;
tmp.a = d * (self.f * (self.k * self.p - self.o * self.l) + self.j * (self.o * self.h - self.g * self.p) + self.n * (self.g * self.l - self.k * self.h));
tmp.e = d * (self.g * (self.i * self.p - self.m * self.l) + self.k * (self.m * self.h - self.e * self.p) + self.o * (self.e * self.l - self.i * self.h));
tmp.i = d * (self.h * (self.i * self.n - self.m * self.j) + self.l * (self.m * self.f - self.e * self.n) + self.p * (self.e * self.j - self.i * self.f));
tmp.m = d * (self.e * (self.n * self.k - self.j * self.o) + self.i * (self.f * self.o - self.n * self.g) + self.m * (self.j * self.g - self.f * self.k));
tmp.b = d * (self.j * (self.c * self.p - self.o * self.d) + self.n * (self.k * self.d - self.c * self.l) + self.b * (self.o * self.l - self.k * self.p));
tmp.f = d * (self.k * (self.a * self.p - self.m * self.d) + self.o * (self.i * self.d - self.a * self.l) + self.c * (self.m * self.l - self.i * self.p));
tmp.j = d * (self.l * (self.a * self.n - self.m * self.b) + self.p * (self.i * self.b - self.a * self.j) + self.d * (self.m * self.j - self.i * self.n));
tmp.n = d * (self.i * (self.n * self.c - self.b * self.o) + self.m * (self.b * self.k - self.j * self.c) + self.a * (self.j * self.o - self.n * self.k));
tmp.c = d * (self.n * (self.c * self.h - self.g * self.d) + self.b * (self.g * self.p - self.o * self.h) + self.f * (self.o * self.d - self.c * self.p));
tmp.g = d * (self.o * (self.a * self.h - self.e * self.d) + self.c * (self.e * self.p - self.m * self.h) + self.g * (self.m * self.d - self.a * self.p));
tmp.k = d * (self.p * (self.a * self.f - self.e * self.b) + self.d * (self.e * self.n - self.m * self.f) + self.h * (self.m * self.b - self.a * self.n));
tmp.o = d * (self.m * (self.f * self.c - self.b * self.g) + self.a * (self.n * self.g - self.f * self.o) + self.e * (self.b * self.o - self.n * self.c));
tmp.d = d * (self.b * (self.k * self.h - self.g * self.l) + self.f * (self.c * self.l - self.k * self.d) + self.j * (self.g * self.d - self.c * self.h));
tmp.h = d * (self.c * (self.i * self.h - self.e * self.l) + self.g * (self.a * self.l - self.i * self.d) + self.k * (self.e * self.d - self.a * self.h));
tmp.l = d * (self.d * (self.i * self.f - self.e * self.j) + self.h * (self.a * self.j - self.i * self.b) + self.l * (self.e * self.b - self.a * self.f));
tmp.p = d * (self.a * (self.f * self.k - self.j * self.g) + self.e * (self.j * self.c - self.b * self.k) + self.i * (self.b * self.g - self.f * self.c));
return tmp;
class Quaternion:
# All methods and naming conventions based off
# http://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions
# w is the real part, (x, y, z) are the imaginary parts
__slots__ = ['w', 'x', 'y', 'z']
def __init__(self, w=1, x=0, y=0, z=0):
self.w = w
self.x = x
self.y = y
self.z = z
def __copy__(self):
Q = Quaternion()
Q.w = self.w
Q.x = self.x
Q.y = self.y
Q.z = self.z
return Q
copy = __copy__
def __repr__(self):
return 'Quaternion(real=%.2f, imag=<%.2f, %.2f, %.2f>)' % \
(self.w, self.x, self.y, self.z)
def __mul__(self, other):
if isinstance(other, Quaternion):
Ax = self.x
Ay = self.y
Az = self.z
Aw = self.w
Bx = other.x
By = other.y
Bz = other.z
Bw = other.w
Q = Quaternion()
Q.x = Ax * Bw + Ay * Bz - Az * By + Aw * Bx
Q.y = -Ax * Bz + Ay * Bw + Az * Bx + Aw * By
Q.z = Ax * By - Ay * Bx + Az * Bw + Aw * Bz
Q.w = -Ax * Bx - Ay * By - Az * Bz + Aw * Bw
return Q
elif isinstance(other, Vector3):
w = self.w
x = self.x
y = self.y
z = self.z
Vx = other.x
Vy = other.y
Vz = other.z
ww = w * w
w2 = w * 2
wx2 = w2 * x
wy2 = w2 * y
wz2 = w2 * z
xx = x * x
x2 = x * 2
xy2 = x2 * y
xz2 = x2 * z
yy = y * y
yz2 = 2 * y * z
zz = z * z
return other.__class__(\
ww * Vx + wy2 * Vz - wz2 * Vy + \
xx * Vx + xy2 * Vy + xz2 * Vz - \
zz * Vx - yy * Vx,
xy2 * Vx + yy * Vy + yz2 * Vz + \
wz2 * Vx - zz * Vy + ww * Vy - \
wx2 * Vz - xx * Vy,
xz2 * Vx + yz2 * Vy + \
zz * Vz - wy2 * Vx - yy * Vz + \
wx2 * Vy - xx * Vz + ww * Vz)
else:
other = other.copy()
other._apply_transform(self)
return other
def __imul__(self, other):
assert isinstance(other, Quaternion)
Ax = self.x
Ay = self.y
Az = self.z
Aw = self.w
Bx = other.x
By = other.y
Bz = other.z
Bw = other.w
self.x = Ax * Bw + Ay * Bz - Az * By + Aw * Bx
self.y = -Ax * Bz + Ay * Bw + Az * Bx + Aw * By
self.z = Ax * By - Ay * Bx + Az * Bw + Aw * Bz
self.w = -Ax * Bx - Ay * By - Az * Bz + Aw * Bw
return self
def __abs__(self):
return math.sqrt(self.w ** 2 + \
self.x ** 2 + \
self.y ** 2 + \
self.z ** 2)
magnitude = __abs__
def magnitude_squared(self):
return self.w ** 2 + \
self.x ** 2 + \
self.y ** 2 + \
self.z ** 2
def identity(self):
self.w = 1
self.x = 0
self.y = 0
self.z = 0
return self
def rotate_axis(self, angle, axis):
self *= Quaternion.new_rotate_axis(angle, axis)
return self
def rotate_euler(self, heading, attitude, bank):
self *= Quaternion.new_rotate_euler(heading, attitude, bank)
return self
def rotate_matrix(self, m):
self *= Quaternion.new_rotate_matrix(m)
return self
def conjugated(self):
Q = Quaternion()
Q.w = self.w
Q.x = -self.x
Q.y = -self.y
Q.z = -self.z
return Q
def normalize(self):
d = self.magnitude()
if d != 0:
self.w /= d
self.x /= d
self.y /= d
self.z /= d
return self
def normalized(self):
d = self.magnitude()
if d != 0:
Q = Quaternion()
Q.w = self.w / d
Q.x = self.x / d
Q.y = self.y / d
Q.z = self.z / d
return Q
else:
return self.copy()
def get_angle_axis(self):
if self.w > 1:
self = self.normalized()
angle = 2 * math.acos(self.w)
s = math.sqrt(1 - self.w ** 2)
if s < 0.001:
return angle, Vector3(1, 0, 0)
else:
return angle, Vector3(self.x / s, self.y / s, self.z / s)
def get_euler(self):
t = self.x * self.y + self.z * self.w
if t > 0.4999:
heading = 2 * math.atan2(self.x, self.w)
attitude = math.pi / 2
bank = 0
elif t < -0.4999:
heading = -2 * math.atan2(self.x, self.w)
attitude = -math.pi / 2
bank = 0
else:
sqx = self.x ** 2
sqy = self.y ** 2
sqz = self.z ** 2
heading = math.atan2(2 * self.y * self.w - 2 * self.x * self.z,
1 - 2 * sqy - 2 * sqz)
attitude = math.asin(2 * t)
bank = math.atan2(2 * self.x * self.w - 2 * self.y * self.z,
1 - 2 * sqx - 2 * sqz)
return heading, attitude, bank
def get_matrix(self):
xx = self.x ** 2
xy = self.x * self.y
xz = self.x * self.z
xw = self.x * self.w
yy = self.y ** 2
yz = self.y * self.z
yw = self.y * self.w
zz = self.z ** 2
zw = self.z * self.w
M = Matrix4()
M.a = 1 - 2 * (yy + zz)
M.b = 2 * (xy - zw)
M.c = 2 * (xz + yw)
M.e = 2 * (xy + zw)
M.f = 1 - 2 * (xx + zz)
M.g = 2 * (yz - xw)
M.i = 2 * (xz - yw)
M.j = 2 * (yz + xw)
M.k = 1 - 2 * (xx + yy)
return M
# Static constructors
def new_identity(cls):
return cls()
new_identity = classmethod(new_identity)
def new_rotate_axis(cls, angle, axis):
assert(isinstance(axis, Vector3))
axis = axis.normalized()
s = math.sin(angle / 2)
Q = cls()
Q.w = math.cos(angle / 2)
Q.x = axis.x * s
Q.y = axis.y * s
Q.z = axis.z * s
return Q
new_rotate_axis = classmethod(new_rotate_axis)
def new_rotate_euler(cls, heading, attitude, bank):
Q = cls()
c1 = math.cos(heading / 2)
s1 = math.sin(heading / 2)
c2 = math.cos(attitude / 2)
s2 = math.sin(attitude / 2)
c3 = math.cos(bank / 2)
s3 = math.sin(bank / 2)
Q.w = c1 * c2 * c3 - s1 * s2 * s3
Q.x = s1 * s2 * c3 + c1 * c2 * s3
Q.y = s1 * c2 * c3 + c1 * s2 * s3
Q.z = c1 * s2 * c3 - s1 * c2 * s3
return Q
new_rotate_euler = classmethod(new_rotate_euler)
def new_rotate_matrix(cls, m):
if m[0*4 + 0] + m[1*4 + 1] + m[2*4 + 2] > 0.00000001:
t = m[0*4 + 0] + m[1*4 + 1] + m[2*4 + 2] + 1.0
s = 0.5/math.sqrt(t)
return cls(
s*t,
(m[1*4 + 2] - m[2*4 + 1])*s,
(m[2*4 + 0] - m[0*4 + 2])*s,
(m[0*4 + 1] - m[1*4 + 0])*s
)
elif m[0*4 + 0] > m[1*4 + 1] and m[0*4 + 0] > m[2*4 + 2]:
t = m[0*4 + 0] - m[1*4 + 1] - m[2*4 + 2] + 1.0
s = 0.5/math.sqrt(t)
return cls(
(m[1*4 + 2] - m[2*4 + 1])*s,
s*t,
(m[0*4 + 1] + m[1*4 + 0])*s,
(m[2*4 + 0] + m[0*4 + 2])*s
)
elif m[1*4 + 1] > m[2*4 + 2]:
t = -m[0*4 + 0] + m[1*4 + 1] - m[2*4 + 2] + 1.0
s = 0.5/math.sqrt(t)
return cls(
(m[2*4 + 0] - m[0*4 + 2])*s,
(m[0*4 + 1] + m[1*4 + 0])*s,
s*t,
(m[1*4 + 2] + m[2*4 + 1])*s
)
else:
t = -m[0*4 + 0] - m[1*4 + 1] + m[2*4 + 2] + 1.0
s = 0.5/math.sqrt(t)
return cls(
(m[0*4 + 1] - m[1*4 + 0])*s,
(m[2*4 + 0] + m[0*4 + 2])*s,
(m[1*4 + 2] + m[2*4 + 1])*s,
s*t
)
new_rotate_matrix = classmethod(new_rotate_matrix)
def new_interpolate(cls, q1, q2, t):
assert isinstance(q1, Quaternion) and isinstance(q2, Quaternion)
Q = cls()
costheta = q1.w * q2.w + q1.x * q2.x + q1.y * q2.y + q1.z * q2.z
if costheta < 0.:
costheta = -costheta
q1 = q1.conjugated()
elif costheta > 1:
costheta = 1
theta = math.acos(costheta)
if abs(theta) < 0.01:
Q.w = q2.w
Q.x = q2.x
Q.y = q2.y
Q.z = q2.z
return Q
sintheta = math.sqrt(1.0 - costheta * costheta)
if abs(sintheta) < 0.01:
Q.w = (q1.w + q2.w) * 0.5
Q.x = (q1.x + q2.x) * 0.5
Q.y = (q1.y + q2.y) * 0.5
Q.z = (q1.z + q2.z) * 0.5
return Q
ratio1 = math.sin((1 - t) * theta) / sintheta
ratio2 = math.sin(t * theta) / sintheta
Q.w = q1.w * ratio1 + q2.w * ratio2
Q.x = q1.x * ratio1 + q2.x * ratio2
Q.y = q1.y * ratio1 + q2.y * ratio2
Q.z = q1.z * ratio1 + q2.z * ratio2
return Q
new_interpolate = classmethod(new_interpolate)
# Geometry
# Much maths thanks to Paul Bourke, http://astronomy.swin.edu.au/~pbourke
# ---------------------------------------------------------------------------
class Geometry:
def _connect_unimplemented(self, other):
raise AttributeError, 'Cannot connect %s to %s' % \
(self.__class__, other.__class__)
def _intersect_unimplemented(self, other):
raise AttributeError, 'Cannot intersect %s and %s' % \
(self.__class__, other.__class__)
_intersect_point2 = _intersect_unimplemented
_intersect_line2 = _intersect_unimplemented
_intersect_circle = _intersect_unimplemented
_connect_point2 = _connect_unimplemented
_connect_line2 = _connect_unimplemented
_connect_circle = _connect_unimplemented
_intersect_point3 = _intersect_unimplemented
_intersect_line3 = _intersect_unimplemented
_intersect_sphere = _intersect_unimplemented
_intersect_plane = _intersect_unimplemented
_connect_point3 = _connect_unimplemented
_connect_line3 = _connect_unimplemented
_connect_sphere = _connect_unimplemented
_connect_plane = _connect_unimplemented
def intersect(self, other):
raise NotImplementedError
def connect(self, other):
raise NotImplementedError
def distance(self, other):
c = self.connect(other)
if c:
return c.length
return 0.0
def _intersect_point2_circle(P, C):
return abs(P - C.c) <= C.r
def _intersect_line2_line2(A, B):
d = B.v.y * A.v.x - B.v.x * A.v.y
if d == 0:
return None
dy = A.p.y - B.p.y
dx = A.p.x - B.p.x
ua = (B.v.x * dy - B.v.y * dx) / d
if not A._u_in(ua):
return None
ub = (A.v.x * dy - A.v.y * dx) / d
if not B._u_in(ub):
return None
return Point2(A.p.x + ua * A.v.x,
A.p.y + ua * A.v.y)
def _intersect_line2_circle(L, C):
a = L.v.magnitude_squared()
b = 2 * (L.v.x * (L.p.x - C.c.x) + \
L.v.y * (L.p.y - C.c.y))
c = C.c.magnitude_squared() + \
L.p.magnitude_squared() - \
2 * C.c.dot(L.p) - \
C.r ** 2
det = b ** 2 - 4 * a * c
if det < 0:
return None
sq = math.sqrt(det)
u1 = (-b + sq) / (2 * a)
u2 = (-b - sq) / (2 * a)
if not L._u_in(u1):
u1 = max(min(u1, 1.0), 0.0)
if not L._u_in(u2):
u2 = max(min(u2, 1.0), 0.0)
# Tangent
if u1 == u2:
return Point2(L.p.x + u1 * L.v.x,
L.p.y + u1 * L.v.y)
return LineSegment2(Point2(L.p.x + u1 * L.v.x,
L.p.y + u1 * L.v.y),
Point2(L.p.x + u2 * L.v.x,
L.p.y + u2 * L.v.y))
def _connect_point2_line2(P, L):
d = L.v.magnitude_squared()
assert d != 0
u = ((P.x - L.p.x) * L.v.x + \
(P.y - L.p.y) * L.v.y) / d
if not L._u_in(u):
u = max(min(u, 1.0), 0.0)
return LineSegment2(P,
Point2(L.p.x + u * L.v.x,
L.p.y + u * L.v.y))
def _connect_point2_circle(P, C):
v = P - C.c
v.normalize()
v *= C.r
return LineSegment2(P, Point2(C.c.x + v.x, C.c.y + v.y))
def _connect_line2_line2(A, B):
d = B.v.y * A.v.x - B.v.x * A.v.y
if d == 0:
# Parallel, connect an endpoint with a line
if isinstance(B, Ray2) or isinstance(B, LineSegment2):
p1, p2 = _connect_point2_line2(B.p, A)
return p2, p1
# No endpoint (or endpoint is on A), possibly choose arbitrary point
# on line.
return _connect_point2_line2(A.p, B)
dy = A.p.y - B.p.y
dx = A.p.x - B.p.x
ua = (B.v.x * dy - B.v.y * dx) / d
if not A._u_in(ua):
ua = max(min(ua, 1.0), 0.0)
ub = (A.v.x * dy - A.v.y * dx) / d
if not B._u_in(ub):
ub = max(min(ub, 1.0), 0.0)
return LineSegment2(Point2(A.p.x + ua * A.v.x, A.p.y + ua * A.v.y),
Point2(B.p.x + ub * B.v.x, B.p.y + ub * B.v.y))
def _connect_circle_line2(C, L):
d = L.v.magnitude_squared()
assert d != 0
u = ((C.c.x - L.p.x) * L.v.x + (C.c.y - L.p.y) * L.v.y) / d
if not L._u_in(u):
u = max(min(u, 1.0), 0.0)
point = Point2(L.p.x + u * L.v.x, L.p.y + u * L.v.y)
v = (point - C.c)
v.normalize()
v *= C.r
return LineSegment2(Point2(C.c.x + v.x, C.c.y + v.y), point)
def _connect_circle_circle(A, B):
v = B.c - A.c
d = v.magnitude()
if A.r >= B.r and d < A.r:
#centre B inside A
s1,s2 = +1, +1
elif B.r > A.r and d < B.r:
#centre A inside B
s1,s2 = -1, -1
elif d >= A.r and d >= B.r:
s1,s2 = +1, -1
v.normalize()
return LineSegment2(Point2(A.c.x + s1 * v.x * A.r, A.c.y + s1 * v.y * A.r),
Point2(B.c.x + s2 * v.x * B.r, B.c.y + s2 * v.y * B.r))
class Point2(Vector2, Geometry):
def __repr__(self):
return 'Point2(%.2f, %.2f)' % (self.x, self.y)
def intersect(self, other):
return other._intersect_point2(self)
def _intersect_circle(self, other):
return _intersect_point2_circle(self, other)
def connect(self, other):
return other._connect_point2(self)
def _connect_point2(self, other):
return LineSegment2(other, self)
def _connect_line2(self, other):
c = _connect_point2_line2(self, other)
if c:
return c._swap()
def _connect_circle(self, other):
c = _connect_point2_circle(self, other)
if c:
return c._swap()
class Line2(Geometry):
__slots__ = ['p', 'v']
def __init__(self, *args):
if len(args) == 3:
assert isinstance(args[0], Point2) and \
isinstance(args[1], Vector2) and \
type(args[2]) == float
self.p = args[0].copy()
self.v = args[1] * args[2] / abs(args[1])
elif len(args) == 2:
if isinstance(args[0], Point2) and isinstance(args[1], Point2):
self.p = args[0].copy()
self.v = args[1] - args[0]
elif isinstance(args[0], Point2) and isinstance(args[1], Vector2):
self.p = args[0].copy()
self.v = args[1].copy()
else:
raise AttributeError, '%r' % (args,)
elif len(args) == 1:
if isinstance(args[0], Line2):
self.p = args[0].p.copy()
self.v = args[0].v.copy()
else:
raise AttributeError, '%r' % (args,)
else:
raise AttributeError, '%r' % (args,)
if not self.v:
raise AttributeError, 'Line has zero-length vector'
def __copy__(self):
return self.__class__(self.p, self.v)
copy = __copy__
def __repr__(self):
return 'Line2(<%.2f, %.2f> + u<%.2f, %.2f>)' % \
(self.p.x, self.p.y, self.v.x, self.v.y)
p1 = property(lambda self: self.p)
p2 = property(lambda self: Point2(self.p.x + self.v.x,
self.p.y + self.v.y))
def _apply_transform(self, t):
self.p = t * self.p
self.v = t * self.v
def _u_in(self, u):
return True
def intersect(self, other):
return other._intersect_line2(self)
def _intersect_line2(self, other):
return _intersect_line2_line2(self, other)
def _intersect_circle(self, other):
return _intersect_line2_circle(self, other)
def connect(self, other):
return other._connect_line2(self)
def _connect_point2(self, other):
return _connect_point2_line2(other, self)
def _connect_line2(self, other):
return _connect_line2_line2(other, self)
def _connect_circle(self, other):
return _connect_circle_line2(other, self)
class Ray2(Line2):
def __repr__(self):
return 'Ray2(<%.2f, %.2f> + u<%.2f, %.2f>)' % \
(self.p.x, self.p.y, self.v.x, self.v.y)
def _u_in(self, u):
return u >= 0.0
class LineSegment2(Line2):
def __repr__(self):
return 'LineSegment2(<%.2f, %.2f> to <%.2f, %.2f>)' % \
(self.p.x, self.p.y, self.p.x + self.v.x, self.p.y + self.v.y)
def _u_in(self, u):
return u >= 0.0 and u <= 1.0
def __abs__(self):
return abs(self.v)
def magnitude_squared(self):
return self.v.magnitude_squared()
def _swap(self):
# used by connect methods to switch order of points
self.p = self.p2
self.v *= -1
return self
length = property(lambda self: abs(self.v))
class Circle(Geometry):
__slots__ = ['c', 'r']
def __init__(self, center, radius):
assert isinstance(center, Vector2) and type(radius) == float
self.c = center.copy()
self.r = radius
def __copy__(self):
return self.__class__(self.c, self.r)
copy = __copy__
def __repr__(self):
return 'Circle(<%.2f, %.2f>, radius=%.2f)' % \
(self.c.x, self.c.y, self.r)
def _apply_transform(self, t):
self.c = t * self.c
def intersect(self, other):
return other._intersect_circle(self)
def _intersect_point2(self, other):
return _intersect_point2_circle(other, self)
def _intersect_line2(self, other):
return _intersect_line2_circle(other, self)
def connect(self, other):
return other._connect_circle(self)
def _connect_point2(self, other):
return _connect_point2_circle(other, self)
def _connect_line2(self, other):
c = _connect_circle_line2(self, other)
if c:
return c._swap()
def _connect_circle(self, other):
return _connect_circle_circle(other, self)
# 3D Geometry
# -------------------------------------------------------------------------
def _connect_point3_line3(P, L):
d = L.v.magnitude_squared()
assert d != 0
u = ((P.x - L.p.x) * L.v.x + \
(P.y - L.p.y) * L.v.y + \
(P.z - L.p.z) * L.v.z) / d
if not L._u_in(u):
u = max(min(u, 1.0), 0.0)
return LineSegment3(P, Point3(L.p.x + u * L.v.x,
L.p.y + u * L.v.y,
L.p.z + u * L.v.z))
def _connect_point3_sphere(P, S):
v = P - S.c
v.normalize()
v *= S.r
return LineSegment3(P, Point3(S.c.x + v.x, S.c.y + v.y, S.c.z + v.z))
def _connect_point3_plane(p, plane):
n = plane.n.normalized()
d = p.dot(plane.n) - plane.k
return LineSegment3(p, Point3(p.x - n.x * d, p.y - n.y * d, p.z - n.z * d))
def _connect_line3_line3(A, B):
assert A.v and B.v
p13 = A.p - B.p
d1343 = p13.dot(B.v)
d4321 = B.v.dot(A.v)
d1321 = p13.dot(A.v)
d4343 = B.v.magnitude_squared()
denom = A.v.magnitude_squared() * d4343 - d4321 ** 2
if denom == 0:
# Parallel, connect an endpoint with a line
if isinstance(B, Ray3) or isinstance(B, LineSegment3):
return _connect_point3_line3(B.p, A)._swap()
# No endpoint (or endpoint is on A), possibly choose arbitrary
# point on line.
return _connect_point3_line3(A.p, B)
ua = (d1343 * d4321 - d1321 * d4343) / denom
if not A._u_in(ua):
ua = max(min(ua, 1.0), 0.0)
ub = (d1343 + d4321 * ua) / d4343
if not B._u_in(ub):
ub = max(min(ub, 1.0), 0.0)
return LineSegment3(Point3(A.p.x + ua * A.v.x,
A.p.y + ua * A.v.y,
A.p.z + ua * A.v.z),
Point3(B.p.x + ub * B.v.x,
B.p.y + ub * B.v.y,
B.p.z + ub * B.v.z))
def _connect_line3_plane(L, P):
d = P.n.dot(L.v)
if not d:
# Parallel, choose an endpoint
return _connect_point3_plane(L.p, P)
u = (P.k - P.n.dot(L.p)) / d
if not L._u_in(u):
# intersects out of range, choose nearest endpoint
u = max(min(u, 1.0), 0.0)
return _connect_point3_plane(Point3(L.p.x + u * L.v.x,
L.p.y + u * L.v.y,
L.p.z + u * L.v.z), P)
# Intersection
return None
def _connect_sphere_line3(S, L):
d = L.v.magnitude_squared()
assert d != 0
u = ((S.c.x - L.p.x) * L.v.x + \
(S.c.y - L.p.y) * L.v.y + \
(S.c.z - L.p.z) * L.v.z) / d
if not L._u_in(u):
u = max(min(u, 1.0), 0.0)
point = Point3(L.p.x + u * L.v.x, L.p.y + u * L.v.y, L.p.z + u * L.v.z)
v = (point - S.c)
v.normalize()
v *= S.r
return LineSegment3(Point3(S.c.x + v.x, S.c.y + v.y, S.c.z + v.z),
point)
def _connect_sphere_sphere(A, B):
v = B.c - A.c
d = v.magnitude()
if A.r >= B.r and d < A.r:
#centre B inside A
s1,s2 = +1, +1
elif B.r > A.r and d < B.r:
#centre A inside B
s1,s2 = -1, -1
elif d >= A.r and d >= B.r:
s1,s2 = +1, -1
v.normalize()
return LineSegment3(Point3(A.c.x + s1* v.x * A.r,
A.c.y + s1* v.y * A.r,
A.c.z + s1* v.z * A.r),
Point3(B.c.x + s2* v.x * B.r,
B.c.y + s2* v.y * B.r,
B.c.z + s2* v.z * B.r))
def _connect_sphere_plane(S, P):
c = _connect_point3_plane(S.c, P)
if not c:
return None
p2 = c.p2
v = p2 - S.c
v.normalize()
v *= S.r
return LineSegment3(Point3(S.c.x + v.x, S.c.y + v.y, S.c.z + v.z),
p2)
def _connect_plane_plane(A, B):
if A.n.cross(B.n):
# Planes intersect
return None
else:
# Planes are parallel, connect to arbitrary point
return _connect_point3_plane(A._get_point(), B)
def _intersect_point3_sphere(P, S):
return abs(P - S.c) <= S.r
def _intersect_line3_sphere(L, S):
a = L.v.magnitude_squared()
b = 2 * (L.v.x * (L.p.x - S.c.x) + \
L.v.y * (L.p.y - S.c.y) + \
L.v.z * (L.p.z - S.c.z))
c = S.c.magnitude_squared() + \
L.p.magnitude_squared() - \
2 * S.c.dot(L.p) - \
S.r ** 2
det = b ** 2 - 4 * a * c
if det < 0:
return None
sq = math.sqrt(det)
u1 = (-b + sq) / (2 * a)
u2 = (-b - sq) / (2 * a)
if not L._u_in(u1):
u1 = max(min(u1, 1.0), 0.0)
if not L._u_in(u2):
u2 = max(min(u2, 1.0), 0.0)
return LineSegment3(Point3(L.p.x + u1 * L.v.x,
L.p.y + u1 * L.v.y,
L.p.z + u1 * L.v.z),
Point3(L.p.x + u2 * L.v.x,
L.p.y + u2 * L.v.y,
L.p.z + u2 * L.v.z))
def _intersect_line3_plane(L, P):
d = P.n.dot(L.v)
if not d:
# Parallel
return None
u = (P.k - P.n.dot(L.p)) / d
if not L._u_in(u):
return None
return Point3(L.p.x + u * L.v.x,
L.p.y + u * L.v.y,
L.p.z + u * L.v.z)
def _intersect_plane_plane(A, B):
n1_m = A.n.magnitude_squared()
n2_m = B.n.magnitude_squared()
n1d2 = A.n.dot(B.n)
det = n1_m * n2_m - n1d2 ** 2
if det == 0:
# Parallel
return None
c1 = (A.k * n2_m - B.k * n1d2) / det
c2 = (B.k * n1_m - A.k * n1d2) / det
return Line3(Point3(c1 * A.n.x + c2 * B.n.x,
c1 * A.n.y + c2 * B.n.y,
c1 * A.n.z + c2 * B.n.z),
A.n.cross(B.n))
class Point3(Vector3, Geometry):
def __repr__(self):
return 'Point3(%.2f, %.2f, %.2f)' % (self.x, self.y, self.z)
def intersect(self, other):
return other._intersect_point3(self)
def _intersect_sphere(self, other):
return _intersect_point3_sphere(self, other)
def connect(self, other):
return other._connect_point3(self)
def _connect_point3(self, other):
if self != other:
return LineSegment3(other, self)
return None
def _connect_line3(self, other):
c = _connect_point3_line3(self, other)
if c:
return c._swap()
def _connect_sphere(self, other):
c = _connect_point3_sphere(self, other)
if c:
return c._swap()
def _connect_plane(self, other):
c = _connect_point3_plane(self, other)
if c:
return c._swap()
class Line3:
__slots__ = ['p', 'v']
def __init__(self, *args):
if len(args) == 3:
assert isinstance(args[0], Point3) and \
isinstance(args[1], Vector3) and \
type(args[2]) == float
self.p = args[0].copy()
self.v = args[1] * args[2] / abs(args[1])
elif len(args) == 2:
if isinstance(args[0], Point3) and isinstance(args[1], Point3):
self.p = args[0].copy()
self.v = args[1] - args[0]
elif isinstance(args[0], Point3) and isinstance(args[1], Vector3):
self.p = args[0].copy()
self.v = args[1].copy()
else:
raise AttributeError, '%r' % (args,)
elif len(args) == 1:
if isinstance(args[0], Line3):
self.p = args[0].p.copy()
self.v = args[0].v.copy()
else:
raise AttributeError, '%r' % (args,)
else:
raise AttributeError, '%r' % (args,)
# XXX This is annoying.
#if not self.v:
# raise AttributeError, 'Line has zero-length vector'
def __copy__(self):
return self.__class__(self.p, self.v)
copy = __copy__
def __repr__(self):
return 'Line3(<%.2f, %.2f, %.2f> + u<%.2f, %.2f, %.2f>)' % \
(self.p.x, self.p.y, self.p.z, self.v.x, self.v.y, self.v.z)
p1 = property(lambda self: self.p)
p2 = property(lambda self: Point3(self.p.x + self.v.x,
self.p.y + self.v.y,
self.p.z + self.v.z))
def _apply_transform(self, t):
self.p = t * self.p
self.v = t * self.v
def _u_in(self, u):
return True
def intersect(self, other):
return other._intersect_line3(self)
def _intersect_sphere(self, other):
return _intersect_line3_sphere(self, other)
def _intersect_plane(self, other):
return _intersect_line3_plane(self, other)
def connect(self, other):
return other._connect_line3(self)
def _connect_point3(self, other):
return _connect_point3_line3(other, self)
def _connect_line3(self, other):
return _connect_line3_line3(other, self)
def _connect_sphere(self, other):
return _connect_sphere_line3(other, self)
def _connect_plane(self, other):
c = _connect_line3_plane(self, other)
if c:
return c
class Ray3(Line3):
def __repr__(self):
return 'Ray3(<%.2f, %.2f, %.2f> + u<%.2f, %.2f, %.2f>)' % \
(self.p.x, self.p.y, self.p.z, self.v.x, self.v.y, self.v.z)
def _u_in(self, u):
return u >= 0.0
class LineSegment3(Line3):
def __repr__(self):
return 'LineSegment3(<%.2f, %.2f, %.2f> to <%.2f, %.2f, %.2f>)' % \
(self.p.x, self.p.y, self.p.z,
self.p.x + self.v.x, self.p.y + self.v.y, self.p.z + self.v.z)
def _u_in(self, u):
return u >= 0.0 and u <= 1.0
def __abs__(self):
return abs(self.v)
def magnitude_squared(self):
return self.v.magnitude_squared()
def _swap(self):
# used by connect methods to switch order of points
self.p = self.p2
self.v *= -1
return self
length = property(lambda self: abs(self.v))
class Sphere:
__slots__ = ['c', 'r']
def __init__(self, center, radius):
assert isinstance(center, Vector3) and type(radius) == float
self.c = center.copy()
self.r = radius
def __copy__(self):
return self.__class__(self.c, self.r)
copy = __copy__
def __repr__(self):
return 'Sphere(<%.2f, %.2f, %.2f>, radius=%.2f)' % \
(self.c.x, self.c.y, self.c.z, self.r)
def _apply_transform(self, t):
self.c = t * self.c
def intersect(self, other):
return other._intersect_sphere(self)
def _intersect_point3(self, other):
return _intersect_point3_sphere(other, self)
def _intersect_line3(self, other):
return _intersect_line3_sphere(other, self)
def connect(self, other):
return other._connect_sphere(self)
def _connect_point3(self, other):
return _connect_point3_sphere(other, self)
def _connect_line3(self, other):
c = _connect_sphere_line3(self, other)
if c:
return c._swap()
def _connect_sphere(self, other):
return _connect_sphere_sphere(other, self)
def _connect_plane(self, other):
c = _connect_sphere_plane(self, other)
if c:
return c
class Plane:
# n.p = k, where n is normal, p is point on plane, k is constant scalar
__slots__ = ['n', 'k']
def __init__(self, *args):
if len(args) == 3:
assert isinstance(args[0], Point3) and \
isinstance(args[1], Point3) and \
isinstance(args[2], Point3)
self.n = (args[1] - args[0]).cross(args[2] - args[0])
self.n.normalize()
self.k = self.n.dot(args[0])
elif len(args) == 2:
if isinstance(args[0], Point3) and isinstance(args[1], Vector3):
self.n = args[1].normalized()
self.k = self.n.dot(args[0])
elif isinstance(args[0], Vector3) and type(args[1]) == float:
self.n = args[0].normalized()
self.k = args[1]
else:
raise AttributeError, '%r' % (args,)
else:
raise AttributeError, '%r' % (args,)
if not self.n:
raise AttributeError, 'Points on plane are colinear'
def __copy__(self):
return self.__class__(self.n, self.k)
copy = __copy__
def __repr__(self):
return 'Plane(<%.2f, %.2f, %.2f>.p = %.2f)' % \
(self.n.x, self.n.y, self.n.z, self.k)
def _get_point(self):
# Return an arbitrary point on the plane
if self.n.z:
return Point3(0., 0., self.k / self.n.z)
elif self.n.y:
return Point3(0., self.k / self.n.y, 0.)
else:
return Point3(self.k / self.n.x, 0., 0.)
def _apply_transform(self, t):
p = t * self._get_point()
self.n = t * self.n
self.k = self.n.dot(p)
def intersect(self, other):
return other._intersect_plane(self)
def _intersect_line3(self, other):
return _intersect_line3_plane(other, self)
def _intersect_plane(self, other):
return _intersect_plane_plane(self, other)
def connect(self, other):
return other._connect_plane(self)
def _connect_point3(self, other):
return _connect_point3_plane(other, self)
def _connect_line3(self, other):
return _connect_line3_plane(other, self)
def _connect_sphere(self, other):
return _connect_sphere_plane(other, self)
def _connect_plane(self, other):
return _connect_plane_plane(other, self)
|
nilq/baby-python
|
python
|
from django.conf.urls import (
include,
url,
)
from .views import (
calls,
notifications,
reviews,
submissions,
)
app_name = 'submitify'
notification_urls = [
url(r'^(?P<notification_id>\d+)/$', notifications.view_notification,
name='view_notification'),
url(r'^(?P<notification_type>(basic|accept|reject))/$',
notifications.send_notification,
name='send_notification'),
]
review_urls = [
url(r'^create/$', reviews.create_review,
name='create_review'),
url(r'(?P<review_id>\d+)/$', reviews.view_review,
name='view_review'),
url(r'(?P<review_id>\d+)/edit/$', reviews.edit_review,
name='edit_review'),
]
submission_urls = [
url(r'^$', submissions.view_submission,
name='view_submission'),
url(r'^text/$', submissions.view_submission_text,
name='view_submission_text'),
url(r'^pdf/$', submissions.view_submission_file,
name='view_submission_file'),
url(r'^original/$', submissions.view_original_file,
name='view_original_file'),
url(r'^reviews/', include(review_urls)),
url(r'^resolve/(?P<resolution_type>(accept|reject))/$',
submissions.resolve_submission,
name='resolve_submission'),
]
call_urls = [
url(r'^$', calls.view_call,
name='view_call'),
url(r'^submit/$', submissions.create_submission,
name='create_submission'),
url(r'^edit/$', calls.edit_call,
name='edit_call'),
url(r'^next/$', calls.next_step,
name='next_step'),
url(r'^notifications/', include(notification_urls)),
url(r'^(?P<submission_id>\d+)/', include(submission_urls))
]
urlpatterns = [
url(r'^$', calls.list_calls,
name='list_calls'),
url(r'^create/$', calls.create_call,
name='create_call'),
url(r'^invite/reader/$', calls.invite_reader,
name='invite_reader'),
url(r'^invite/writer/$', calls.invite_writer,
name='invite_writer'),
url(r'^(?P<call_id>\d+)/', include(call_urls)),
url(r'^(?P<call_id>\d+)-(?P<call_slug>[-\w]+)/', include(call_urls)),
]
|
nilq/baby-python
|
python
|
from flask import Flask, request, jsonify
from service import Service
app = Flask(__name__, static_url_path='/static')
service = Service()
@app.route("/")
def hello():
return '', 200
@app.route("fullinsert")
def fullinsert():
service.init()
return '', 200
#To access parameters submitted in the URL (?key=value)
@app.route('/insert', methods=['POST'])
def insert():
data = request.get_json()
user = data['user']
interest = data['interest']
longitude = data['longitude']
latitude = data['latitude']
imgurl = data['imgurl']
service.insert(user, interest, longitude, latitude, imgurl)
return '', 200
@app.route('/query')
def query():
user = request.args.get('user')
interest = request.args.get('interest')
longitude = request.args.get('longitude')
latitude = request.args.get('latitude')
return jsonify(service.query(user, interest, longitude, latitude))
@app.route('/fullquery')
def fullquery():
return jsonify(service.fullquery())
#prompt the user to enter their interest
@app.route('/interest/')
def interest():
person = [
{'user': 'Declan',
'interest': ['reading, yikes','gaming','writing']
}
]
return jsonify(person)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=False, port=5000)
|
nilq/baby-python
|
python
|
#Exercício Python 73: Crie uma tupla preenchida com os 20 primeiros colocados da Tabela do Campeonato Brasileiro de Futebol, na ordem de colocação. Depois mostre:
# a) Os 5 primeiros times.
# b) Os últimos 4 colocados.
# c) Times em ordem alfabética.
# d) Em que posição está o time da Chapecoense.
tabela_brasileirao = (
"Flamengo", "Internacional", "Atlético - MG", "São Paulo", "Fluminense", "Grêmio", "Palmeiras", "Santos", "Athlético Paranaense", "Bragantino - Red Bull", "Ceará", "Corinthians", "Atlético - GO", "Bahia", "Sport", "Fortaleza", "Vasco da Gama", "Goiás", "Coritiba", "Botafogo")
print(f"\n(A) Os cinco primeiros colocados do campeonato foram: {tabela_brasileirao[:5]}\n")
print(f"(B) Os últimos colocados do campeonato foram: {tabela_brasileirao[-4:]}\n")
print(f"(C) Os times em ordem alfabética são: {sorted(tabela_brasileirao)}\n")
#modo1 -busca o índice de elemento qualquer
print(f"O time da Bahia está na {tabela_brasileirao.index('Bahia')}ª posição")
#modo2 - busca um elemento usando for
for cont in range(0, len(tabela_brasileirao)):
if tabela_brasileirao[cont] == "Bahia":
print(f"(D) O time da {tabela_brasileirao[cont]} está na {cont+1}º Posição.\n")
elif cont == len(tabela_brasileirao)-1 and tabela_brasileirao[cont] != "Chapecoense":
print("(D) O time da Chapecoense não se encontra no campeonado este ano!\n")
|
nilq/baby-python
|
python
|
from argparse import ArgumentParser
import flom
from trainer import train
def make_parser():
parser = ArgumentParser(description='Train the motion to fit to effectors')
parser.add_argument('-i', '--input', type=str, help='Input motion file', required=True)
parser.add_argument('-r', '--robot', type=str, help='Input robot model file', required=True)
parser.add_argument('-t', '--timestep', type=float, help='Timestep', default=0.0165/8)
parser.add_argument('-s', '--frame-skip', type=int, help='Frame skip', default=8)
return parser
def main(args):
motion = flom.load(args.input)
weights = train(motion, args.robot, args.timestep, args.frame_skip)
print(weights)
if __name__ == '__main__':
parser = make_parser()
args = parser.parse_args()
main(args)
|
nilq/baby-python
|
python
|
"""
"""
from django.core.urlresolvers import reverse
from django.test import TestCase
from wagtail.tests.utils import WagtailTestUtils
class BaseTestIndexView(TestCase, WagtailTestUtils):
"""
Base test case for CRUD index view.
"""
url_namespace = None
template_dir = None
def _create_sequential_instance(self, index):
"""
Stub method for extending class to create sequential
model instances.
:param index: the sequential index to use.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def setUp(self):
self.login()
def get(self, params=None):
if not params:
params = {}
return self.client.get(
reverse('{0}:index'.format(self.url_namespace)), params)
def populate(self):
"""
Populates several model class instance.
"""
for i in range(50):
self._create_sequential_instance(i)
def test_get(self):
# Generate the response.
response = self.get()
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/index.html'.format(self.template_dir)
)
def test_search(self):
# Generate the response.
response = self.get({'q': 'keyword'})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], 'keyword')
def test_pagination(self):
# Create model class instances.
self.populate()
# Generate the response.
response = self.get({'p': 2})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/index.html'.format(self.template_dir)
)
self.assertEqual(response.context['page_obj'].number, 2)
def test_pagination_invalid(self):
# Create model class instances.
self.populate()
# Generate the response.
response = self.get({'p': 'fake'})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/index.html'.format(self.template_dir)
)
self.assertEqual(response.context['page_obj'].number, 1)
def test_pagination_out_of_range(self):
# Create model class instances.
self.populate()
# Generate the response.
response = self.get({'p': 99999})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/index.html'.format(self.template_dir)
)
self.assertEqual(
response.context['page_obj'].number,
response.context['paginator'].num_pages
)
def test_ordering(self):
orderings = ['title', '-created_at']
for ordering in orderings:
response = self.get({'ordering': ordering})
self.assertEqual(response.status_code, 200)
class BaseTestCreateView(TestCase, WagtailTestUtils):
"""
Base test case for CRUD add view.
"""
url_namespace = None
template_dir = None
model_class = None
def _get_post_data(self):
"""
Stub method for extending class to return data dictionary
to create a new model instance on POST.
:rtype: dict.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def setUp(self):
self.login()
def test_get(self):
# Generate the response.
response = self.client.get(
reverse('{0}:add'.format(self.url_namespace))
)
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/add.html'.format(self.template_dir)
)
def test_post(self):
# Get POST data.
data = self._get_post_data()
# Generate the response.
response = self.client.post(
reverse('{0}:add'.format(self.url_namespace)),
data
)
# Check assertions.
self.assertRedirects(
response,
reverse('{0}:index'.format(self.url_namespace))
)
self.assertTrue(
self.model_class.objects.filter(**data).exists()
)
class BaseTestUpdateView(TestCase, WagtailTestUtils):
"""
Base test case for CRUD edit view.
"""
url_namespace = None
template_dir = None
model_class = None
def _get_instance(self):
"""
Stub method for extending class to return saved model class
instance.
:rtype: django.db.models.Model.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def _get_post_data(self):
"""
Stub method for extending class to return data dictionary
to create a new model instance on POST.
:rtype: dict.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def setUp(self):
# Create the instance and login.
self.instance = self._get_instance()
self.login()
def test_get(self):
# Generate the response.
response = self.client.get(
reverse(
'{0}:edit'.format(self.url_namespace),
args=(self.instance.pk,)
)
)
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/edit.html'.format(self.template_dir)
)
def test_post(self):
# Get POST data.
data = self._get_post_data()
# Generate the response.
response = self.client.post(
reverse(
'{0}:edit'.format(self.url_namespace),
args=(self.instance.pk,)
),
data
)
# Check assertions.
self.assertRedirects(
response,
reverse('{0}:index'.format(self.url_namespace)))
self.assertTrue(
self.model_class.objects.filter(**data).exists()
)
class BaseTestDeleteView(TestCase, WagtailTestUtils):
"""
Base test case for CRUD delete view.
"""
url_namespace = None
template_dir = None
model_class = None
def _get_instance(self):
"""
Stub method for extending class to return saved model class
instance.
:rtype: django.db.models.Model.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def setUp(self):
# Create the instance and login.
self.instance = self._get_instance()
self.login()
def test_get(self):
# Generate the response.
response = self.client.get(
reverse(
'{0}:delete'.format(self.url_namespace),
args=(self.instance.pk,)
)
)
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/confirm_delete.html'.format(self.template_dir)
)
def test_delete(self):
# Generate the response.
response = self.client.post(
reverse(
'{0}:delete'.format(self.url_namespace),
args=(self.instance.pk,)
),
{'foo': 'bar'}
)
# Check assertions.
self.assertRedirects(
response,
reverse('{0}:index'.format(self.url_namespace))
)
self.assertFalse(
self.model_class.objects.filter(pk=self.instance.pk).exists()
)
class BaseTestChooserView(TestCase, WagtailTestUtils):
"""
Base test for chooser view.
"""
url_namespace = None
template_dir = None
model_class = None
def _create_sequential_instance(self, index):
"""
Stub method for extending class to create sequential
model instances.
:param index: the sequential index to use.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def setUp(self):
self.login()
def get(self, params=None):
if not params:
params = {}
return self.client.get(
reverse('{0}:choose'.format(self.url_namespace)),
params
)
def populate(self):
"""
Populates several model class instance.
"""
for i in range(50):
self._create_sequential_instance(i)
def test_get(self):
# Generate the response.
response = self.get()
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/chooser.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/results.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/chooser.js'.format(self.template_dir)
)
def test_search(self):
# Generate the response.
response = self.get({'q': 'keyword'})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], 'keyword')
def test_pagination(self):
# Create model class instances.
self.populate()
# Generate the response.
response = self.get({'p': 2})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/results.html'.format(self.template_dir)
)
self.assertEqual(response.context['page_obj'].number, 2)
def test_pagination_invalid(self):
# Create model class instances.
self.populate()
# Generate the response.
response = self.get({'p': 'fake'})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/results.html'.format(self.template_dir)
)
self.assertEqual(response.context['page_obj'].number, 1)
def test_pagination_out_of_range(self):
# Create model class instances.
self.populate()
# Generate the response.
response = self.get({'p': 99999})
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/results.html'.format(self.template_dir)
)
self.assertEqual(
response.context['page_obj'].number,
response.context['paginator'].num_pages
)
class BaseTestChosenView(TestCase, WagtailTestUtils):
url_namespace = None
template_dir = None
model_class = None
def _get_instance(self):
"""
Stub method for extending class to return saved model class
instance.
:rtype: django.db.models.Model.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def setUp(self):
# Create the instance and login.
self.instance = self._get_instance()
self.login()
def test_get(self):
# Generate the response.
response = self.client.get(
reverse(
'{0}:chosen'.format(self.url_namespace),
args=(self.instance.id,)
)
)
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/chosen.js'.format(self.template_dir)
)
class BaseTestChooserCreateView(TestCase, WagtailTestUtils):
"""
Base test case for CRUD add view.
"""
url_namespace = None
template_dir = None
model_class = None
def _get_post_data(self):
"""
Stub method for extending class to return data dictionary
to create a new model instance on POST.
:rtype: dict.
"""
raise NotImplemented(
'This method must be implemented by {0}'.format(
self.__class__.__name__
)
)
def setUp(self):
self.login()
def test_get(self):
# Generate the response.
response = self.client.get(
reverse('{0}:choose'.format(self.url_namespace))
)
# Check assertions.
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response,
'{0}/chooser.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/results.html'.format(self.template_dir)
)
self.assertTemplateUsed(
response,
'{0}/chooser.js'.format(self.template_dir)
)
def test_post(self):
# Get POST data.
data = self._get_post_data()
# Generate the response.
response = self.client.post(
reverse('{0}:choose'.format(self.url_namespace)),
data
)
# Check assertions.
self.assertTemplateUsed(
response,
'{0}/chosen.js'.format(self.template_dir)
)
self.assertContains(
response,
'modal.respond'
)
self.assertTrue(
self.model_class.objects.filter(**data).exists()
)
|
nilq/baby-python
|
python
|
from ect_def import add_dict
"""
Rules of Follow
1) if A is a nonterminal and start sign then FOLLOW(A) include $
2) if B -> aAb, b != epsilon then FOLLOW(A) include FIRST(b) without epsilon
3) if B -> aA or B -> aAb b=>epsilon then add FOLLOW(B) to FOLLOW(A)
"""
def getFollow(terminals:list, non_terminals:list, cfg:dict, first:dict, start_nonterminal:str) :
follow = {}
#rule 1
add_dict(follow, start_nonterminal, "$")
for non_terminal in non_terminals:
# rule 2
for cfg_result in cfg[non_terminal]:
splited = cfg_result.split(' ')
for index, word in enumerate(splited):
# if word is non termianl and next word exist
if word in non_terminals and index < len(splited)-1:
next_word = splited[index+1]
if next_word in terminals:
add_dict(follow, word, splited[index+1])
else :
if len(first[next_word]) == 1 and 'epsilon' in first[next_word]:
continue
else :
for first_elm in first[next_word]:
if first_elm != 'epsilon' and not(word in follow.keys() and first_elm in follow[word]):
add_dict(follow, word, first_elm)
# rule3
include_relation = {}
for non_terminal in non_terminals:
for cfg_result in cfg[non_terminal]:
splited = cfg_result.split(' ')
for index, word in enumerate(splited):
# if word is non termianl and word is last word then follow(word) include non_terminal
if word in non_terminals and index == len(splited) - 1:
if word == non_terminal:
continue
if word in include_relation.keys():
if not(non_terminal in include_relation[word]) :
add_dict(include_relation, word, non_terminal)
else :
add_dict(include_relation, word, non_terminal)
# if word is non termianl and word is not last word and all word's next words can be epsilon then follow(word) include non_terminal
elif word in non_terminals and index != len(splited) - 1:
possible_epsilon = True
for i in range(index+1,len(splited)):
if splited[i] in terminals:
possible_epsilon = False
continue
if not('epsilon' in first[splited[i]]):
possible_epsilon = False
if possible_epsilon == True:
add_dict(include_relation, word, non_terminal)
# add follow with include relation until no change
while(True):
change_count = 0
for key in include_relation.keys():
for value in include_relation[key]:
if not(value in follow.keys()):
continue
else:
for add_value in follow[value]:
if add_dict(follow, key, add_value):
change_count += 1
if change_count == 0 :
break
return follow
|
nilq/baby-python
|
python
|
# Copyright 2018-2020 Jakub Kuczys (https://github.com/jack1142)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, List, Literal, Tuple
from ..schema import COG_KEYS_ORDER, REPO_KEYS_ORDER, SHARED_FIELDS_KEYS_ORDER
from ..typedefs import CogInfoDict
if TYPE_CHECKING:
from ..context import InfoGenMainCommand
__all__ = ("check_key_order",)
def check_key_order(ctx: InfoGenMainCommand) -> bool:
"""Temporary order checking, until strictyaml adds proper support for sorting."""
success = True
success &= _check_repo_info_and_shared_fields_key_order(ctx)
success &= _check_cog_names_alphaorder(ctx)
success &= _check_cog_info_key_order(ctx)
return success
def _check_repo_info_and_shared_fields_key_order(ctx: InfoGenMainCommand) -> bool:
to_check: Dict[Literal["repo", "shared_fields"], List[str]] = {
"repo": REPO_KEYS_ORDER,
"shared_fields": SHARED_FIELDS_KEYS_ORDER,
}
success = True
for key, order in to_check.items():
section = ctx.data[key]
original_keys = list(section.keys())
sorted_keys = sorted(section.keys(), key=order.index)
if original_keys != sorted_keys:
print(
"\033[93m\033[1mWARNING:\033[0m "
f"Keys in `{key}` section have wrong order - use this order: "
f"{', '.join(sorted_keys)}"
)
success = False
return success
def _check_cog_names_alphaorder(ctx: InfoGenMainCommand) -> bool:
original_cog_names = list(ctx.cogs.keys())
sorted_cog_names = sorted(ctx.cogs.keys())
if original_cog_names != sorted_cog_names:
print(
"\033[93m\033[1mWARNING:\033[0m "
"Cog names in `cogs` section aren't sorted. Use alphabetical order."
)
return False
return True
def _check_cog_info_key_order(ctx: InfoGenMainCommand) -> bool:
success = True
for pkg_name, cog_info in ctx.cogs.items():
# strictyaml breaks ordering of keys for optionals with default values
original_keys = list((k for k, v in cog_info.items() if v))
sorted_keys = sorted(
(k for k, v in cog_info.items() if v), key=COG_KEYS_ORDER.index
)
if original_keys != sorted_keys:
print(
"\033[93m\033[1mWARNING:\033[0m "
f"Keys in `cogs->{pkg_name}` section have wrong order"
f" - use this order: {', '.join(sorted_keys)}"
)
print(original_keys)
print(sorted_keys)
success = False
success &= _check_cog_info_collections_alphaorder(pkg_name, cog_info)
return success
def _check_cog_info_collections_alphaorder(
pkg_name: str, cog_info: CogInfoDict
) -> bool:
collections: Tuple[Literal["required_cogs", "requirements", "tags"], ...] = (
"required_cogs",
"requirements",
"tags",
)
success = True
for key in collections:
list_or_dict = cog_info[key]
if isinstance(list_or_dict, dict):
original_list = list(list_or_dict.keys())
else:
original_list = list_or_dict
sorted_list = sorted(original_list)
if original_list != sorted_list:
friendly_name = key.capitalize().replace("_", " ")
print(
"\033[93m\033[1mWARNING:\033[0m "
f"{friendly_name} for `{pkg_name}` cog aren't sorted."
" Use alphabetical order."
)
print(original_list)
print(sorted_list)
success = False
return success
|
nilq/baby-python
|
python
|
# Exercícios sobre Listas, do curso Python Impressionador da Hashtag
## 1. Faturamento do Melhor e do Pior Mês do Ano
# Qual foi o valor de vendas do melhor mês do Ano?
# E valor do pior mês do ano?
meses = ['jan', 'fev', 'mar', 'abr', 'mai', 'jun', 'jul', 'ago', 'set', 'out', 'nov', 'dez']
vendas_1sem = [25000, 29000, 22200, 17750, 15870, 19900]
vendas_2sem = [19850, 20120, 17540, 15555, 49051, 9650]
# Somando as listas
vendas = vendas_1sem + vendas_2sem
# Identificando o maior e menor valor
maiorValor = max(vendas)
menorValor = min(vendas)
# Identificando o mês do maior e menor valor
melhorMes = vendas.index(maiorValor)
piorMes = vendas.index(menorValor)
## 2. Continuação
# Agora relacione as duas listas para printar 'O melhor mês do ano foi {} com {} vendas' e o mesmo para o pior mês do ano.
# Calcule também o faturamento total do Ano e quanto que o melhor mês representou do faturamento total.
# Obs: Para o faturamento total, pode usar a função sum(lista) que soma todos os itens de uma lista
print(f'O melhor mês do ano foi {meses[melhorMes]} com {maiorValor} vendas, \nE o pior mês do ano foi {meses[piorMes]} com {menorValor} vendas.')
# Calculando o total com o metodo sum()
total = sum(vendas)
print(f'O faturamento total foi de R$ {total:.2f}.')
# Percentual
percentual = (maiorValor / total)
print(f'O maior mês representa {percentual:.2%} do total de vendas.')
## 3. Crie uma lista com o top 3 valores de vendas do ano (sem fazer "no olho")
# Dica: o método remove retira um item da lista.
top3 = []
# Duplicando a lista, utilizando o metodo copy() para não perder a posição em relação aos meses e removendo o maior valor
lista_temp = vendas.copy()
# O maior valor já é o primeiro do top 3
# Criando um loop para encontrar os 3 maiores valores, removendo o item de maior valor da lista temporaria e adicionando-o no top3
x = 1
while x < 4:
top3.append(max(lista_temp))
lista_temp.remove(max(lista_temp))
x += 1
# Relacionando os top valores com a lista dos meses
# Criando um for simples com range do tamanho da lista
# Cada índice do top3 corresponderá a um valor da lista original de vendas, e com isso podemos relacionar a posição de cada valor com a posição de cada mes na lista meses
print(f'Os meses com maiores vendas foram:')
for i in range(0, len(top3)):
print(f'{meses[vendas.index(top3[i])]} com {top3[i]}')
|
nilq/baby-python
|
python
|
"""
Module to look for, and parse, Function and settings.
"""
import os
import json
from . import logger
def is_function(folder: str) -> bool:
return os.path.isfile("{}/function.json".format(folder))
def get_functions(path: str) -> list:
functions = []
for file in os.listdir(path):
candidate = "{}/{}".format(path, file)
if os.path.isdir(candidate):
if is_function(candidate):
functions.append((candidate, file))
return functions
def load_json_file(path: str) -> dict:
with open(path, "r") as file:
return json.load(file)
def load_function_settings(path: str) -> dict:
return load_json_file("{}/function.json".format(path))
def load_project_settings(path: str) -> dict:
return load_json_file("{}/host.json".format(path))
def load_project(path: str) -> dict:
try:
project_settings = load_project_settings(path)
except json.decoder.JSONDecodeError:
logger.error("Unable to parse host.json: invalid JSON.")
project_settings = {}
except FileNotFoundError:
logger.error("Unable to parse host.json: file not found.")
project_settings = {}
functions_settings = []
functions = get_functions(path)
for function in functions:
try:
functions_settings.append((function[1], load_function_settings(function[0])))
except json.decoder.JSONDecodeError:
logger.error("[{}] Unable to parse Function settings: invalid JSON.".format(function[1]))
return {"project": project_settings, "functions": functions_settings}
|
nilq/baby-python
|
python
|
from django.contrib import admin
from django.apps import apps
models = apps.get_models()
for model in models:
# admin.site.register(model)
admin.register(model)
|
nilq/baby-python
|
python
|
"""
To be filled in with official datajoint information soon
"""
from .connection import conn, Connection
|
nilq/baby-python
|
python
|
"""
pipeline effects
"""
import sys
import abc
import enum
import logging
from itertools import zip_longest
from typing import Dict, Optional
from .actions import Action, SendOutputAction, CheckOutputAction
from .exceptions import CheckDelivery, Retry
from .utils import NamedSerializable, class_from_string
_registered_effects: Dict = {}
logger = logging.getLogger()
def register_effect(effect_cls):
"""Register effect in library.
Will raise exception if `name` already registered.
"""
if effect_cls.name in _registered_effects: # pragma: no cover
raise Exception(
"Effect with name %s already registered" % effect_cls.name
)
_registered_effects[effect_cls.name] = effect_cls
return effect_cls
def load_effect(data):
"""Load effect.
"""
cls = _registered_effects[data[0]]
return cls.load(data[1], data[2])
def get_class_instance(cls_name, args, kwargs):
"""Get instance from class name and arguments.
"""
cls = class_from_string(cls_name)
return cls(*args, **kwargs)
class EffectStatus(enum.Enum):
"""Route status.
"""
PENDING = 1
FINISHED = 2
FAILED = 3
class OutputStatus(enum.Enum):
"""Output status for message.
TODO: move to better place
"""
PENDING = 1
CHECK = 2
SUCCESS = 3
FAIL = 4
RETRY = 5
SKIP = 6
class Effect(NamedSerializable, abc.ABC):
"""Abstract pipeline effect.
Effect used in delivery pipeline generator. Pipeline return effects instead
of performing heavy operations. Any effect can be serialized and
transferred to the place of execution.
The method `next_effect` must be implemented on all derived classes and
must return an `Action` instance or `None` if no next action available for
this effect and it can be marked as applied by `MessageConsumer`.
"""
name: str
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
assert self.name, "Effect must define `name` property"
@abc.abstractmethod
def next_action(self, state) -> Optional[Action]:
"""Get next effect action.
Receive state from last `apply` call.
Return `Action` instance or `None` if no more actions available for
this effect.
"""
pass # pragma: no cover
@abc.abstractmethod
def apply(self, message):
"""Apply next action and return next state.
"""
pass # pragma: no cover
# pylint: disable=no-self-use
def serialize_state(self, state):
"""Serialize effect state.
"""
return state # pragma: no cover
def load_state(self, data):
"""Load serialized effect state.
"""
return data # pragma: no cover
def pretty(self, state): # pragma: no cover
"""Pretty print effect.
"""
return ''
def __eq__(self, other):
if not isinstance(other, self.__class__): # pragma: no cover
raise TypeError("Effect and %s can't be compared" % type(other))
return self.serialize() == other.serialize()
@register_effect
class SendEffect(Effect):
"""Effect: send message through outputs.
Accepts outputs as the args.
"""
name = 'send'
def next_action(self, state=None):
"""Next effect action.
"""
state = self.reset_state(state)
position = self.next_action_pos(state)
if position is None:
return None
selected_output = self.args[position]
if state[position] == OutputStatus.CHECK:
return CheckOutputAction(selected_output)
return SendOutputAction(selected_output)
def next_action_pos(self, state):
"""Next effect action position.
"""
state = self.reset_state(state, reset_pending=True)
selected_output = None
# search next pending backend
for i, (_, status) in enumerate(zip(self.args, state)):
if status == OutputStatus.PENDING:
selected_output = i
break
else:
for i, (_, status) in enumerate(zip(self.args, state)):
if status == OutputStatus.CHECK:
selected_output = i
break
return selected_output
def reset_state(self, state, reset_pending=False):
"""Reset state.
`reset_pending=True` force reset all RETRY to PENDING.
TODO: also reset CHECK to CHECK_PENDING
:params bool reset_pending: reset to reset_pending
"""
if state is None or state == []:
# create default state with all backends pending
state = [OutputStatus.PENDING for b in self.args]
assert len(state) == len(self.args), "State and args length must match"
if reset_pending and OutputStatus.PENDING not in state:
for i, status in enumerate(state):
if status == OutputStatus.RETRY:
state[i] = OutputStatus.PENDING
return state
def apply(self, message):
"""Send message through next pending output.
Modifies message route. Return state.
"""
state = message.get_route_state(self)
state = self.reset_state(state)
position = self.next_action_pos(state)
action = self.next_action(state)
retry = message.get_route_retry(self)
try:
result = action.execute(message, retry)
if result is False: # ignore None
state[position] = OutputStatus.FAIL
else:
state[position] = OutputStatus.SUCCESS
except CheckDelivery:
state[position] = OutputStatus.CHECK
except Retry:
prev = message.get_route_retry(self)
message.set_route_retry(self, prev + 1)
state[position] = OutputStatus.RETRY
message.log.info("Delivery retried (%i)", prev + 1)
return state
def load_state(self, data):
if not data:
data = []
return [OutputStatus(status) for status in data]
def serialize_state(self, state):
if not state:
state = []
return [status.value for status in state]
def serialize_args(self):
return [b.serialize() for b in self.args]
@classmethod
def load_args(cls, args):
return [get_class_instance(*b) for b in args]
def pretty(self, state):
"""Pretty format effect.
"""
action_format = "{a.__class__.__name__} <{s}>"
if not state:
state = self.reset_state(state)
return '\n\t\t\t'.join([
action_format.format(a=a, s=s) for a, s in zip_longest(
self.args, state
)
])
# @register_effect
# class CallEffect(Effect):
# name = 'call'
# def next_effect(self, state):
# """Execute callable in message consumer.
# """
# return CallAction(self.args[0], *self.args[1:], **self.kwargs)
send = SendEffect
# call = CallEffect
for name, effect in _registered_effects.items():
setattr(sys.modules[__name__], name, effect)
|
nilq/baby-python
|
python
|
import numpy as np
from . import backends
from importlib import import_module
class Predictor():
def __init__(self, model, config={}, backend=backends.backend()):
self.model = model
self.config = config
self.backend = backend
assert(model)
self.postprocessors = []
postprocessors = self.config.get('postprocessors')
if postprocessors:
print('___ loading postprocessors ___')
for f in postprocessors:
full_function = list(f.keys())[0]
module_name, function_name = full_function.rsplit('.', 1)
parameters = f[full_function]
print(module_name, function_name, parameters)
mod = import_module(module_name)
met = getattr(mod, function_name)
self.postprocessors.append(
{'function': met, 'parameters': parameters})
def postprocessing(self, img):
for f in self.postprocessors:
if type(f['parameters']) is list:
img = f['function'](img, *f['parameters'])
else:
img = f['function'](img, **f['parameters'])
return img
def predict(self, img):
prediction = self.backend.predict(self, img)
return self.postprocessing(prediction)
def batch_predict(self, img_batch):
prediction = self.backend.batch_predict(self, img_batch)
return self.postprocessing(prediction)
|
nilq/baby-python
|
python
|
MAX_ARRAY_COUNT = MAX_ROWS = 9
MAX_ARRAY_SUM = 45
COMPLETE_ARRAY = [1, 2, 3, 4, 5, 6, 7, 8, 9]
SUBGRIDS_BY_ROWS = [
[0, 0, 0, 1, 1, 1, 2, 2, 2],
[0, 0, 0, 1, 1, 1, 2, 2, 2],
[0, 0, 0, 1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4, 5, 5, 5],
[3, 3, 3, 4, 4, 4, 5, 5, 5],
[3, 3, 3, 4, 4, 4, 5, 5, 5],
[6, 6, 6, 7, 7, 7, 8, 8, 8],
[6, 6, 6, 7, 7, 7, 8, 8, 8],
[6, 6, 6, 7, 7, 7, 8, 8, 8],
]
def _filter_zeros(xs):
"""Return a list of numbers with 0s removed from the list."""
return list(filter(lambda x: x != 0, xs))
def _subgrid(r, c):
"""Given a row/col coordinate, identify which subgrid it's in."""
return SUBGRIDS_BY_ROWS[r][c]
def validate_row(numbers):
"""
Given a row of numbers, verify it could be a sudoku row.
Arguments:
- list of integers
Returns: n/a
Notes:
- A valid list
- must be 9 numbers.
- each number must be between 0-9.
- 0's can be repeated in the list.
- numbers 1-9 must be unique
- If a row is invalid, an exception is thrown.
"""
if type(numbers).__name__ != "list":
raise TypeError("Invalid argument, must be a list")
if len(numbers) != MAX_ARRAY_COUNT:
raise ValueError("Invalid array: too many numbers")
no_zeros = _filter_zeros(numbers)
if len(set(no_zeros)) != len(no_zeros):
raise ValueError("Invalid row: row has duplicate numbers")
if sum(numbers) > MAX_ARRAY_SUM:
raise ValueError("Invalid row: row total is too high")
# TODO: What if each "cell" was an object, that contained it's row, column,
# subgrid?
class Grid:
"""
Provide instances of sudoku board/grids.
The values in the board are stored in 3 attributes of the object. The
trade off, in theory, is by keeping the rows, columns, and subgrids
duplicated, it should be more efficient to check if a board is solved.
An alternative would be to store the board as a 2D array, and the concept
of rows, columns, and subgrids can be calculated when checking for a cells'
solution. However given that each cell may have to be looked up multiple
times I opted to triplicate the data...which feels gross.
"""
def __init__(self):
self.grid_type = "grid"
self.rows = []
self.columns = [[], [], [], [], [], [], [], [], []]
self.subgrids = {}
for i in range(9):
self.subgrids[i] = []
def add_row(self, numbers):
"""
Add a row to the grid.
Arguments:
- list of positive integers, 1-9
Returns: n/a
Usage:
g.add_row([1, 2, 3, 4, 5, 6, 7, 8, 9])
g.add_row([1, 2, 3, 0, 0, 0, 7, 8, 9])
Notes:
- The list of numbers is validated and will be rejected if invalid.
- Must have 9 digits
- Can include multiple 0's for unknown cells
- Any numbers in the range 1-9 must be unique
- The grid is not checked to see if a row will make a board that's
impossible to solve.
"""
validate_row(numbers)
if len(self.rows) >= MAX_ROWS:
raise RuntimeError("Grid is full: no more rows can be added")
row_number = len(self.rows)
for col, val in enumerate(numbers):
self.columns[col].append(val)
self.subgrids[_subgrid(row_number, col)].append(val)
self.rows.append(numbers)
# TODO: should this print() or return an array of arrays?
def show(self):
"""Print the grid in its current state."""
for row in self.rows:
print(row)
def solve(self):
"""
Solve the sudoku puzzle.
Arguments: n/a
Returns: n/a
Usage:
g.solve()
Notes:
- This method will mutate the instance's attributes. As it finds
solutions to cells it will update accordingly.
- If a puzzle can't be solved (the puzzle is iterated through with
no changes), an exception is thrown to avoid an infinite loop.
"""
while self.solved() is False:
changes = 0
for r, row in enumerate(self.rows):
for c, _ in enumerate(row):
if self.rows[r][c] == 0:
result = self._solve_cell(r, c)
if result > 0:
self._update_cell(r, c, result)
changes += 1
if changes == 0:
raise Exception("Puzzle is unsolvable")
def solved(self):
"""
Check if puzzle is solved.
Arguments: n/a
Returns:
- bool, True or False
"""
for row in self.rows:
if sorted(row) != COMPLETE_ARRAY:
return False
for col in self.columns:
if sorted(col) != COMPLETE_ARRAY:
return False
for subgrid in self.subgrids:
if sorted(self.subgrids[subgrid]) != COMPLETE_ARRAY:
return False
return True
def _solve_cell(self, r, c):
"""
Given a cell (row, column), try to identify it's correct value.
Arguments:
- row, integer
- column, integer
Returns:
- value, integer
Details:
- Row and column correspond to a row and column in the puzzle.
- The check is to try and find a unique integer not yet used in
- the row
- the column
- and the subgrid
"""
subgrid = self.subgrids[_subgrid(r, c)]
row = self.rows[r]
col = self.columns[c]
available = set.difference(
set(COMPLETE_ARRAY), set(subgrid), set(row), set(col)
)
if len(available) == 1:
return available.pop()
return 0
# TODO: we store the same data 3 ways...I wonder if there's a way to do this
# just once to make updating easier?
def _update_cell(self, r, c, val):
"""
Given a row/col coordinate and value, update the grid with the value.
Arguments:
- row, integer
- column, integer
Returns: n/a
Details:
- Since the instance tracks rows, columns, and subgrids separately
this method is used to keep them updated so they're in sync.
"""
self.rows[r][c] = val
self.columns[c][r] = val
subgrid = self.subgrids[_subgrid(r, c)]
subgrid_index = (r % 3) * 3 + c % 3
subgrid[subgrid_index] = val
|
nilq/baby-python
|
python
|
import logging
from django import http
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.core.mail import EmailMultiAlternatives
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.urls import reverse
from flags.state import flag_enabled
from ..base.models import FeedbackURLConfig, MessagingLog, User
from ..base.utils import get_admin_email
from ..base.views import (
ReportAbuseView,
SendMessageView,
get_private_profiles,
get_profiles_data,
)
from ..localgroups.models import LocalGroup
from .forms import (
DeleteProfileForm,
EditProfileCareerForm,
EditProfileCauseAreasForm,
EditProfileCommunityForm,
EditProfileForm,
)
from .models import (
CauseArea,
ExpertiseArea,
GivingPledge,
Membership,
OrganisationalAffiliation,
Profile,
ProfileAnalyticsLog,
ProfileSlug,
)
def profile_detail_or_redirect(request, slug, first_visit=False):
slug_entry = get_object_or_404(ProfileSlug, slug=slug)
profile = slug_entry.content_object
if not (profile and request.user.has_perm("profiles.view_profile", profile)):
raise Http404("No profile exists with that slug.")
if slug_entry.redirect:
return redirect("profile", slug=profile.slug, permanent=True)
return render(
request, "eahub/profile.html", {"profile": profile, "first_visit": first_visit}
)
def profile_redirect_from_legacy_record(request, legacy_record):
user = request.user
profile = get_object_or_404(
Profile.objects.visible_to_user(user), legacy_record=legacy_record
)
assert user.has_perm("profiles.view_profile", profile)
return redirect("profile", slug=profile.slug, permanent=True)
@login_required
def my_profile(request, first_visit=False):
if not hasattr(request.user, "profile"):
raise http.Http404("user has no profile")
return profile_detail_or_redirect(
request, slug=request.user.profile.slug, first_visit=first_visit
)
@login_required
def my_profile_first_visit(request):
return my_profile(request, True)
class ReportProfileAbuseView(ReportAbuseView):
def profile(self):
return Profile.objects.get(slug=self.kwargs["slug"])
def get_type(self):
return "profile"
class SendProfileMessageView(SendMessageView):
def get_recipient(self):
profile = Profile.objects.get(slug=self.kwargs["slug"])
if profile is None:
raise Exception("Could not find profile")
return profile
def form_valid(self, form):
message = form.cleaned_data["your_message"]
recipient = self.get_recipient()
sender_name = form.cleaned_data["your_name"]
subject = f"{sender_name} wants to connect with {recipient.name}!"
sender_email_address = form.cleaned_data["your_email_address"]
feedback_url = FeedbackURLConfig.get_solo().site_url
admins_email = get_admin_email()
profile_edit_url = self.request.build_absolute_uri(reverse("edit_profile"))
txt_message = render_to_string(
"emails/message_profile.txt",
{
"sender_name": sender_name,
"recipient": recipient.name,
"message": message,
"admin_email": admins_email,
"feedback_url": feedback_url,
"profile_edit_url": profile_edit_url,
},
)
html_message = render_to_string(
"emails/message_profile.html",
{
"sender_name": sender_name,
"recipient": recipient.name,
"message": message,
"admin_email": admins_email,
"feedback_url": feedback_url,
"profile_edit_url": profile_edit_url,
},
)
email = EmailMultiAlternatives(
subject=subject,
body=txt_message,
from_email=admins_email,
to=[recipient.user.email],
reply_to=[sender_email_address],
)
email.attach_alternative(html_message, "text/html")
email.send()
log = MessagingLog(
sender_email=sender_email_address,
recipient_email=recipient.user.email,
recipient_type=MessagingLog.USER,
)
log.save()
messages.success(
self.request, "Your message to " + recipient.name + " has been sent"
)
return redirect(reverse("profile", args=([recipient.slug])))
def get(self, request, *args, **kwargs):
if not request.user.has_perm("profiles.message_users"):
raise PermissionDenied
recipient = self.get_recipient()
if not flag_enabled("MESSAGING_FLAG", request=request):
raise Http404("Messaging toggled off")
if recipient.get_can_receive_message():
return super().get(request, *args, **kwargs)
else:
raise Http404("Messaging not enabled for this user")
def post(self, request, *args, **kwargs):
if not request.user.has_perm("profiles.message_users"):
raise PermissionDenied
recipient = self.get_recipient()
if not flag_enabled("MESSAGING_FLAG", request=request):
raise Http404("Messaging toggled off")
if recipient.get_can_receive_message():
return super().post(request, *args, **kwargs)
else:
raise Http404("Messaging not enabled for this user")
@login_required
def edit_profile(request):
if not hasattr(request.user, "profile"):
raise http.Http404("user has no profile")
profile = Profile.objects.get(pk=request.user.profile.id)
if request.method == "POST":
form = EditProfileForm(
request.POST, request.FILES, instance=request.user.profile
)
if form.is_valid():
profile = form.save(commit=False)
profile = profile.geocode()
profile.save()
return redirect("my_profile")
else:
form = EditProfileForm(instance=request.user.profile)
opportunities = []
if profile.open_to_job_offers:
opportunities.append("job offers")
if profile.available_to_volunteer:
opportunities.append("volunteering opportunities")
if profile.available_as_speaker:
opportunities.append("speaking opportunities")
return render(
request,
"eahub/edit_profile.html",
{"form": form, "profile": profile, "opportunities": opportunities},
)
def reorder_cause_areas(causes):
return sorted(causes, key=lambda x: x[1].label)
@login_required
def edit_profile_cause_areas(request):
if not hasattr(request.user, "profile"):
raise http.Http404("user has no profile")
if request.method == "POST":
form = EditProfileCauseAreasForm(request.POST, instance=request.user.profile)
if form.is_valid():
profile = form.save(commit=False)
cause_areas = request.POST.getlist("cause_areas")
profile.cause_areas = cause_areas
giving_pledges = request.POST.getlist("giving_pledges")
profile.giving_pledges = giving_pledges
profile.save()
return redirect("my_profile")
else:
form = EditProfileCauseAreasForm(instance=request.user.profile)
return render(
request,
"eahub/edit_profile_cause_areas.html",
{
"form": form,
"profile": Profile.objects.get(pk=request.user.profile.id),
"cause_area_choices": reorder_cause_areas(CauseArea.choices()),
"giving_pledge_choices": GivingPledge.choices,
},
)
@login_required
def edit_profile_career(request):
if not hasattr(request.user, "profile"):
raise http.Http404("user has no profile")
if request.method == "POST":
form = EditProfileCareerForm(request.POST, instance=request.user.profile)
if form.is_valid():
profile = form.save(commit=False)
expertise_areas = request.POST.getlist("expertise_areas")
profile.expertise_areas = expertise_areas
career_interest_areas = request.POST.getlist("career_interest_areas")
profile.career_interest_areas = career_interest_areas
profile.save()
return redirect("my_profile")
else:
form = EditProfileCareerForm(instance=request.user.profile)
return render(
request,
"eahub/edit_profile_career.html",
{
"form": form,
"profile": Profile.objects.get(pk=request.user.profile.id),
"expertise_area_choices": ExpertiseArea.choices,
},
)
def reorder_orgs(orgs):
return sorted(orgs, key=lambda x: x[1].label)
@login_required
def edit_profile_community(request):
if not hasattr(request.user, "profile"):
raise http.Http404("user has no profile")
if request.method == "POST":
form = EditProfileCommunityForm(request.POST, instance=request.user.profile)
old_local_groups = [
group.name
for group in LocalGroup.objects.filter(
membership__profile=request.user.profile
)
]
if form.is_valid():
profile = form.save(commit=False)
profile.local_groups.clear()
organisational_affiliations = request.POST.getlist(
"organisational_affiliations"
)
profile.organisational_affiliations = [
int(x) for x in organisational_affiliations
]
profile.save()
group_affiliations = request.POST.getlist("local_groups")
local_groups = LocalGroup.objects.filter(id__in=group_affiliations)
for group in local_groups:
membership = Membership(profile=profile, local_group=group)
membership.save()
if old_local_groups != [x.name for x in local_groups.all()]:
log = ProfileAnalyticsLog()
log.profile = request.user.profile
log.action = "Update"
log.old_value = old_local_groups
log.new_value = [x.name for x in local_groups.all()]
log.field = "local_groups"
log.save()
return redirect("my_profile")
else:
form = EditProfileCommunityForm(instance=request.user.profile)
return render(
request,
"eahub/edit_profile_community.html",
{
"form": form,
"profile": Profile.objects.get(pk=request.user.profile.id),
"organisation_choices": reorder_orgs(OrganisationalAffiliation.choices()),
},
)
@login_required
def delete_profile(request):
if request.method == "POST":
logging.info(
"user_id={} email={} has deleted their account".format(
request.user.id, request.user.email
)
)
user = User.objects.get(id=request.user.id)
user.delete()
return redirect("account_logout")
else:
form = DeleteProfileForm()
return render(request, "eahub/delete_profile.html", {"form": form})
def profiles(request):
profiles_data = get_profiles_data(request.user)
private_profiles = get_private_profiles(request.user)
return render(
request,
"eahub/profiles.html",
{
"page_name": "Profiles",
"profiles": profiles_data["rows"],
"map_locations": {
"profiles": profiles_data["map_data"],
"private_profiles": private_profiles,
},
},
)
|
nilq/baby-python
|
python
|
from django.urls import path
from . import views
app_name = 'kandidaturen' # here for namespacing of urls.
urlpatterns = [
path("", views.main_screen, name="homepage"),
path("erstellen", views.kandidaturErstellenView, name="erstellenView"),
path("erstellen/speichern", views.erstellen, name="erstellen"),
path("<int:kandidatur_id>/bearbeiten", views.kandidaturBearbeitenView, name="bearbeitenView"),
path("<int:kandidatur_id>/bearbeiten/speichern", views.speichern, name="speichern"),
path('ajax/laden', views.kandidatur_laden, name='kandidatur_laden'),
path("ajax/kandidaturen-loeschen", views.kandidaturen_loeschen, name="kandidaturen_loeschen"),
path('ajax/bereiche-laden', views.bereiche_laden, name='bereiche_laden'),
path('ajax/funktionen-laden', views.funktionen_laden, name='aemter_laden'),
path('ajax/funktionen-html-laden', views.funktionen_html_laden, name='aemter_html_laden'),
path('ajax/funktion-loeschen', views.funktion_loeschen, name='amt_loeschen'),
path('ajax/email-html-laden', views.email_html_laden, name='email_html_laden'),
path('ajax/email-loeschen', views.email_loeschen, name='email_loeschen'),
path('ajax/suchen', views.suchen, name="suchen"),
path('ajax/kandidatur-aufnehmen', views.kandidatur_aufnehmen, name="kandidatur_aufnehmen")
]
|
nilq/baby-python
|
python
|
import pyfiglet
ascii_banner = pyfiglet.figlet_format("G o d s - e y e")
print(ascii_banner)
|
nilq/baby-python
|
python
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""image"""
import numbers
import numpy as np
import mindspore.common.dtype as mstype
from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.ops.primitive import constexpr
from mindspore._checkparam import Rel, Validator as validator
from .conv import Conv2d
from .container import CellList
from .pooling import AvgPool2d
from .activation import ReLU
from ..cell import Cell
__all__ = ['ImageGradients', 'SSIM', 'MSSSIM', 'PSNR', 'CentralCrop']
class ImageGradients(Cell):
r"""
Returns two tensors, the first is along the height dimension and the second is along the width dimension.
Assume an image shape is :math:`h*w`. The gradients along the height and the width are :math:`dy` and :math:`dx`,
respectively.
.. math::
dy[i] = \begin{cases} image[i+1, :]-image[i, :], &if\ 0<=i<h-1 \cr
0, &if\ i==h-1\end{cases}
dx[i] = \begin{cases} image[:, i+1]-image[:, i], &if\ 0<=i<w-1 \cr
0, &if\ i==w-1\end{cases}
Inputs:
- **images** (Tensor) - The input image data, with format 'NCHW'.
Outputs:
- **dy** (Tensor) - vertical image gradients, the same type and shape as input.
- **dx** (Tensor) - horizontal image gradients, the same type and shape as input.
Examples:
>>> net = nn.ImageGradients()
>>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32)
>>> net(image)
[[[[2,2]
[0,0]]]]
[[[[1,0]
[1,0]]]]
"""
def __init__(self):
super(ImageGradients, self).__init__()
def construct(self, images):
check = _check_input_4d(F.shape(images), "images", self.cls_name)
images = F.depend(images, check)
batch_size, depth, height, width = P.Shape()(images)
if height == 1:
dy = P.Fill()(P.DType()(images), (batch_size, depth, 1, width), 0)
else:
dy = images[:, :, 1:, :] - images[:, :, :height - 1, :]
dy_last = P.Fill()(P.DType()(images), (batch_size, depth, 1, width), 0)
dy = P.Concat(2)((dy, dy_last))
if width == 1:
dx = P.Fill()(P.DType()(images), (batch_size, depth, height, 1), 0)
else:
dx = images[:, :, :, 1:] - images[:, :, :, :width - 1]
dx_last = P.Fill()(P.DType()(images), (batch_size, depth, height, 1), 0)
dx = P.Concat(3)((dx, dx_last))
return dy, dx
def _convert_img_dtype_to_float32(img, max_val):
"""convert img dtype to float32"""
# Ususally max_val is 1.0 or 255, we will do the scaling if max_val > 1.
# We will scale img pixel value if max_val > 1. and just cast otherwise.
ret = F.cast(img, mstype.float32)
max_val = F.scalar_cast(max_val, mstype.float32)
if max_val > 1.:
scale = 1. / max_val
ret = ret * scale
return ret
@constexpr
def _get_dtype_max(dtype):
"""get max of the dtype"""
np_type = mstype.dtype_to_nptype(dtype)
if issubclass(np_type, numbers.Integral):
dtype_max = np.float64(np.iinfo(np_type).max)
else:
dtype_max = 1.0
return dtype_max
@constexpr
def _check_input_4d(input_shape, param_name, func_name):
if len(input_shape) != 4:
raise ValueError(f"{func_name} {param_name} should be 4d, but got shape {input_shape}")
return True
@constexpr
def _check_input_filter_size(input_shape, param_name, filter_size, func_name):
_check_input_4d(input_shape, param_name, func_name)
validator.check(param_name + " shape[2]", input_shape[2], "filter_size", filter_size, Rel.GE, func_name)
validator.check(param_name + " shape[3]", input_shape[3], "filter_size", filter_size, Rel.GE, func_name)
@constexpr
def _check_input_dtype(input_dtype, param_name, allow_dtypes, cls_name):
validator.check_type_name(param_name, input_dtype, allow_dtypes, cls_name)
def _conv2d(in_channels, out_channels, kernel_size, weight, stride=1, padding=0):
return Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
weight_init=weight, padding=padding, pad_mode="valid")
def _create_window(size, sigma):
x_data, y_data = np.mgrid[-size // 2 + 1:size // 2 + 1, -size // 2 + 1:size // 2 + 1]
x_data = np.expand_dims(x_data, axis=-1).astype(np.float32)
x_data = np.expand_dims(x_data, axis=-1) ** 2
y_data = np.expand_dims(y_data, axis=-1).astype(np.float32)
y_data = np.expand_dims(y_data, axis=-1) ** 2
sigma = 2 * sigma ** 2
g = np.exp(-(x_data + y_data) / sigma)
return np.transpose(g / np.sum(g), (2, 3, 0, 1))
def _split_img(x):
_, c, _, _ = F.shape(x)
img_split = P.Split(1, c)
output = img_split(x)
return output, c
def _compute_per_channel_loss(c1, c2, img1, img2, conv):
"""computes ssim index between img1 and img2 per single channel"""
dot_img = img1 * img2
mu1 = conv(img1)
mu2 = conv(img2)
mu1_sq = mu1 * mu1
mu2_sq = mu2 * mu2
mu1_mu2 = mu1 * mu2
sigma1_tmp = conv(img1 * img1)
sigma1_sq = sigma1_tmp - mu1_sq
sigma2_tmp = conv(img2 * img2)
sigma2_sq = sigma2_tmp - mu2_sq
sigma12_tmp = conv(dot_img)
sigma12 = sigma12_tmp - mu1_mu2
a = (2 * mu1_mu2 + c1)
b = (mu1_sq + mu2_sq + c1)
v1 = 2 * sigma12 + c2
v2 = sigma1_sq + sigma2_sq + c2
ssim = (a * v1) / (b * v2)
cs = v1 / v2
return ssim, cs
def _compute_multi_channel_loss(c1, c2, img1, img2, conv, concat, mean):
"""computes ssim index between img1 and img2 per color channel"""
split_img1, c = _split_img(img1)
split_img2, _ = _split_img(img2)
multi_ssim = ()
multi_cs = ()
for i in range(c):
ssim_per_channel, cs_per_channel = _compute_per_channel_loss(c1, c2, split_img1[i], split_img2[i], conv)
multi_ssim += (ssim_per_channel,)
multi_cs += (cs_per_channel,)
multi_ssim = concat(multi_ssim)
multi_cs = concat(multi_cs)
ssim = mean(multi_ssim, (2, 3))
cs = mean(multi_cs, (2, 3))
return ssim, cs
class SSIM(Cell):
r"""
Returns SSIM index between img1 and img2.
Its implementation is based on Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). `Image quality
assessment: from error visibility to structural similarity <https://ieeexplore.ieee.org/document/1284395>`_.
IEEE transactions on image processing.
.. math::
l(x,y)&=\frac{2\mu_x\mu_y+C_1}{\mu_x^2+\mu_y^2+C_1}, C_1=(K_1L)^2.\\
c(x,y)&=\frac{2\sigma_x\sigma_y+C_2}{\sigma_x^2+\sigma_y^2+C_2}, C_2=(K_2L)^2.\\
s(x,y)&=\frac{\sigma_{xy}+C_3}{\sigma_x\sigma_y+C_3}, C_3=C_2/2.\\
SSIM(x,y)&=l*c*s\\&=\frac{(2\mu_x\mu_y+C_1)(2\sigma_{xy}+C_2}{(\mu_x^2+\mu_y^2+C_1)(\sigma_x^2+\sigma_y^2+C_2)}.
Args:
max_val (Union[int, float]): The dynamic range of the pixel values (255 for 8-bit grayscale images).
Default: 1.0.
filter_size (int): The size of the Gaussian filter. Default: 11. The value must be greater than or equal to 1.
filter_sigma (float): The standard deviation of Gaussian kernel. Default: 1.5. The value must be greater than 0.
k1 (float): The constant used to generate c1 in the luminance comparison function. Default: 0.01.
k2 (float): The constant used to generate c2 in the contrast comparison function. Default: 0.03.
Inputs:
- **img1** (Tensor) - The first image batch with format 'NCHW'. It must be the same shape and dtype as img2.
- **img2** (Tensor) - The second image batch with format 'NCHW'. It must be the same shape and dtype as img1.
Outputs:
Tensor, has the same dtype as img1. It is a 1-D tensor with shape N, where N is the batch num of img1.
Examples:
>>> net = nn.SSIM()
>>> img1 = Tensor(np.random.random((1,3,16,16)), mindspore.float32)
>>> img2 = Tensor(np.random.random((1,3,16,16)), mindspore.float32)
>>> ssim = net(img1, img2)
[0.12174469]
"""
def __init__(self, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03):
super(SSIM, self).__init__()
validator.check_value_type('max_val', max_val, [int, float], self.cls_name)
validator.check_number('max_val', max_val, 0.0, Rel.GT, self.cls_name)
self.max_val = max_val
self.filter_size = validator.check_int(filter_size, 1, Rel.GE, 'filter_size', self.cls_name)
self.filter_sigma = validator.check_positive_float(filter_sigma, 'filter_sigma', self.cls_name)
self.k1 = validator.check_value_type('k1', k1, [float], self.cls_name)
self.k2 = validator.check_value_type('k2', k2, [float], self.cls_name)
window = _create_window(filter_size, filter_sigma)
self.conv = _conv2d(1, 1, filter_size, Tensor(window))
self.conv.weight.requires_grad = False
self.reduce_mean = P.ReduceMean()
self.concat = P.Concat(axis=1)
def construct(self, img1, img2):
_check_input_dtype(F.dtype(img1), "img1", [mstype.float32, mstype.float16], self.cls_name)
_check_input_filter_size(F.shape(img1), "img1", self.filter_size, self.cls_name)
P.SameTypeShape()(img1, img2)
dtype_max_val = _get_dtype_max(F.dtype(img1))
max_val = F.scalar_cast(self.max_val, F.dtype(img1))
max_val = _convert_img_dtype_to_float32(max_val, dtype_max_val)
img1 = _convert_img_dtype_to_float32(img1, dtype_max_val)
img2 = _convert_img_dtype_to_float32(img2, dtype_max_val)
c1 = (self.k1 * max_val) ** 2
c2 = (self.k2 * max_val) ** 2
ssim_ave_channel, _ = _compute_multi_channel_loss(c1, c2, img1, img2, self.conv, self.concat, self.reduce_mean)
loss = self.reduce_mean(ssim_ave_channel, -1)
return loss
def _downsample(img1, img2, op):
a = op(img1)
b = op(img2)
return a, b
class MSSSIM(Cell):
r"""
Returns MS-SSIM index between img1 and img2.
Its implementation is based on Wang, Zhou, Eero P. Simoncelli, and Alan C. Bovik. `Multiscale structural similarity
for image quality assessment <https://ieeexplore.ieee.org/document/1292216>`_.
Signals, Systems and Computers, 2004.
.. math::
l(x,y)&=\frac{2\mu_x\mu_y+C_1}{\mu_x^2+\mu_y^2+C_1}, C_1=(K_1L)^2.\\
c(x,y)&=\frac{2\sigma_x\sigma_y+C_2}{\sigma_x^2+\sigma_y^2+C_2}, C_2=(K_2L)^2.\\
s(x,y)&=\frac{\sigma_{xy}+C_3}{\sigma_x\sigma_y+C_3}, C_3=C_2/2.\\
MSSSIM(x,y)&=l^alpha_M*{\prod_{1\leq j\leq M} (c^beta_j*s^gamma_j)}.
Args:
max_val (Union[int, float]): The dynamic range of the pixel values (255 for 8-bit grayscale images).
Default: 1.0.
power_factors (Union[tuple, list]): Iterable of weights for each scal e.
Default: (0.0448, 0.2856, 0.3001, 0.2363, 0.1333). Default values obtained by Wang et al.
filter_size (int): The size of the Gaussian filter. Default: 11.
filter_sigma (float): The standard deviation of Gaussian kernel. Default: 1.5.
k1 (float): The constant used to generate c1 in the luminance comparison function. Default: 0.01.
k2 (float): The constant used to generate c2 in the contrast comparison function. Default: 0.03.
Inputs:
- **img1** (Tensor) - The first image batch with format 'NCHW'. It must be the same shape and dtype as img2.
- **img2** (Tensor) - The second image batch with format 'NCHW'. It must be the same shape and dtype as img1.
Outputs:
Tensor, the value is in range [0, 1]. It is a 1-D tensor with shape N, where N is the batch num of img1.
Examples:
>>> net = nn.MSSSIM(power_factors=(0.033, 0.033, 0.033))
>>> img1 = Tensor(np.random.random((1,3,128,128)))
>>> img2 = Tensor(np.random.random((1,3,128,128)))
>>> msssim = net(img1, img2)
"""
def __init__(self, max_val=1.0, power_factors=(0.0448, 0.2856, 0.3001, 0.2363, 0.1333), filter_size=11,
filter_sigma=1.5, k1=0.01, k2=0.03):
super(MSSSIM, self).__init__()
validator.check_value_type('max_val', max_val, [int, float], self.cls_name)
validator.check_number('max_val', max_val, 0.0, Rel.GT, self.cls_name)
self.max_val = max_val
validator.check_value_type('power_factors', power_factors, [tuple, list], self.cls_name)
self.filter_size = validator.check_int(filter_size, 1, Rel.GE, 'filter_size', self.cls_name)
self.filter_sigma = validator.check_positive_float(filter_sigma, 'filter_sigma', self.cls_name)
self.k1 = validator.check_value_type('k1', k1, [float], self.cls_name)
self.k2 = validator.check_value_type('k2', k2, [float], self.cls_name)
window = _create_window(filter_size, filter_sigma)
self.level = len(power_factors)
self.conv = []
for i in range(self.level):
self.conv.append(_conv2d(1, 1, filter_size, Tensor(window)))
self.conv[i].weight.requires_grad = False
self.multi_convs_list = CellList(self.conv)
self.weight_tensor = Tensor(power_factors, mstype.float32)
self.avg_pool = AvgPool2d(kernel_size=2, stride=2, pad_mode='valid')
self.relu = ReLU()
self.reduce_mean = P.ReduceMean()
self.prod = P.ReduceProd()
self.pow = P.Pow()
self.pack = P.Pack(axis=-1)
self.concat = P.Concat(axis=1)
def construct(self, img1, img2):
_check_input_4d(F.shape(img1), "img1", self.cls_name)
_check_input_4d(F.shape(img2), "img2", self.cls_name)
_check_input_dtype(F.dtype(img1), 'img1', mstype.number_type, self.cls_name)
P.SameTypeShape()(img1, img2)
dtype_max_val = _get_dtype_max(F.dtype(img1))
max_val = F.scalar_cast(self.max_val, F.dtype(img1))
max_val = _convert_img_dtype_to_float32(max_val, dtype_max_val)
img1 = _convert_img_dtype_to_float32(img1, dtype_max_val)
img2 = _convert_img_dtype_to_float32(img2, dtype_max_val)
c1 = (self.k1 * max_val) ** 2
c2 = (self.k2 * max_val) ** 2
sim = ()
mcs = ()
for i in range(self.level):
sim, cs = _compute_multi_channel_loss(c1, c2, img1, img2,
self.multi_convs_list[i], self.concat, self.reduce_mean)
mcs += (self.relu(cs),)
img1, img2 = _downsample(img1, img2, self.avg_pool)
mcs = mcs[0:-1:1]
mcs_and_ssim = self.pack(mcs + (self.relu(sim),))
mcs_and_ssim = self.pow(mcs_and_ssim, self.weight_tensor)
ms_ssim = self.prod(mcs_and_ssim, -1)
loss = self.reduce_mean(ms_ssim, -1)
return loss
class PSNR(Cell):
r"""
Returns Peak Signal-to-Noise Ratio of two image batches.
It produces a PSNR value for each image in batch.
Assume inputs are :math:`I` and :math:`K`, both with shape :math:`h*w`.
:math:`MAX` represents the dynamic range of pixel values.
.. math::
MSE&=\frac{1}{hw}\sum\limits_{i=0}^{h-1}\sum\limits_{j=0}^{w-1}[I(i,j)-K(i,j)]^2\\
PSNR&=10*log_{10}(\frac{MAX^2}{MSE})
Args:
max_val (Union[int, float]): The dynamic range of the pixel values (255 for 8-bit grayscale images).
The value must be greater than 0. Default: 1.0.
Inputs:
- **img1** (Tensor) - The first image batch with format 'NCHW'. It must be the same shape and dtype as img2.
- **img2** (Tensor) - The second image batch with format 'NCHW'. It must be the same shape and dtype as img1.
Outputs:
Tensor, with dtype mindspore.float32. It is a 1-D tensor with shape N, where N is the batch num of img1.
Examples:
>>> net = nn.PSNR()
>>> img1 = Tensor(np.random.random((1,3,16,16)))
>>> img2 = Tensor(np.random.random((1,3,16,16)))
>>> psnr = net(img1, img2)
[7.8297315]
"""
def __init__(self, max_val=1.0):
super(PSNR, self).__init__()
validator.check_value_type('max_val', max_val, [int, float], self.cls_name)
validator.check_number('max_val', max_val, 0.0, Rel.GT, self.cls_name)
self.max_val = max_val
def construct(self, img1, img2):
_check_input_4d(F.shape(img1), "img1", self.cls_name)
_check_input_4d(F.shape(img2), "img2", self.cls_name)
P.SameTypeShape()(img1, img2)
dtype_max_val = _get_dtype_max(F.dtype(img1))
max_val = F.scalar_cast(self.max_val, F.dtype(img1))
max_val = _convert_img_dtype_to_float32(max_val, dtype_max_val)
img1 = _convert_img_dtype_to_float32(img1, dtype_max_val)
img2 = _convert_img_dtype_to_float32(img2, dtype_max_val)
mse = P.ReduceMean()(F.square(img1 - img2), (-3, -2, -1))
psnr = 10 * P.Log()(F.square(max_val) / mse) / F.scalar_log(10.0)
return psnr
@constexpr
def _raise_dims_rank_error(input_shape, param_name, func_name):
"""raise error if input is not 3d or 4d"""
raise ValueError(f"{func_name} {param_name} should be 3d or 4d, but got shape {input_shape}")
@constexpr
def _get_bbox(rank, shape, size_h, size_w):
"""get bbox start and size for slice"""
if rank == 3:
c, h, w = shape
else:
n, c, h, w = shape
bbox_h_start = int((float(h) - size_h) / 2)
bbox_w_start = int((float(w) - size_w) / 2)
bbox_h_size = h - bbox_h_start * 2
bbox_w_size = w - bbox_w_start * 2
if rank == 3:
bbox_begin = (0, bbox_h_start, bbox_w_start)
bbox_size = (c, bbox_h_size, bbox_w_size)
else:
bbox_begin = (0, 0, bbox_h_start, bbox_w_start)
bbox_size = (n, c, bbox_h_size, bbox_w_size)
return bbox_begin, bbox_size
class CentralCrop(Cell):
"""
Crop the centeral region of the images with the central_fraction.
Args:
central_fraction (float): Fraction of size to crop. It must be float and in range (0.0, 1.0].
Inputs:
- **image** (Tensor) - A 3-D tensor of shape [C, H, W], or a 4-D tensor of shape [N, C, H, W].
Outputs:
Tensor, 3-D or 4-D float tensor, according to the input.
Examples:
>>> net = nn.CentralCrop(central_fraction=0.5)
>>> image = Tensor(np.random.random((4, 3, 4, 4)), mindspore.float32)
>>> output = net(image)
"""
def __init__(self, central_fraction):
super(CentralCrop, self).__init__()
validator.check_value_type("central_fraction", central_fraction, [float], self.cls_name)
self.central_fraction = validator.check_float_range(central_fraction, 0.0, 1.0, Rel.INC_RIGHT,
'central_fraction', self.cls_name)
self.slice = P.Slice()
def construct(self, image):
image_shape = F.shape(image)
rank = len(image_shape)
h, w = image_shape[-2], image_shape[-1]
if not rank in (3, 4):
return _raise_dims_rank_error(image_shape, "image", self.cls_name)
if self.central_fraction == 1.0:
return image
size_h = self.central_fraction * h
size_w = self.central_fraction * w
bbox_begin, bbox_size = _get_bbox(rank, image_shape, size_h, size_w)
image = self.slice(image, bbox_begin, bbox_size)
return image
|
nilq/baby-python
|
python
|
'''
siehe Bilder in diesem Ordner
F: Forget Gate -> welche vorherigen Informationen werden verworfen
I: Input Gate -> welche neuen Informationen sind wichtig
O: Output Gate -> welche Informationen werden intern im Cell State gespeichert
C: Candidate State -> welche Informationen werden intern dem Cell State (c) hinzugefügt
h: Hidden State -> Ausgabe der LSTM(Long Short Term Memory) in dem aktuellen Zeitschritt
einmal Keras Implementierung und eigene Implementierung
'''
import random
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.activations import *
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def tanh(x):
return np.tanh(x)
class LSTMInference:
def __init__(self, lstm_layer, return_sequences=False):
self.return_sequences = return_sequences
self.lstm_layer = lstm_layer
self.W, self.U, self.b = self.lstm_layer.get_weights()
self.units = self.b.shape[0] // 4
self.W_i = self.W[:, :self.units]
self.W_f = self.W[:, self.units: self.units * 2]
self.W_c = self.W[:, self.units * 2: self.units * 3]
self.W_o = self.W[:, self.units * 3:]
self.U_i = self.U[:, :self.units]
self.U_f = self.U[:, self.units: self.units * 2]
self.U_c = self.U[:, self.units * 2: self.units * 3]
self.U_o = self.U[:, self.units * 3:]
self.b_i = self.b[: self.units]
self.b_f = self.b[self.units: self.units * 2]
self.b_c = self.b[self.units * 2: self.units * 3]
self.b_o = self.b[self.units * 3:]
# magic method call überschreiben
def __call__(self, x):
# output shape (num_timesteps, units)
if self.return_sequences:
self.time_steps = x.shape[0]
self.h = np.zeros((self.time_steps, self.units))
# output shape (units)
else:
self.h = np.zeros((self.units))
h_t = np.zeros((1, self.units))
c_t = np.zeros((1, self.units))
for t, x_t in enumerate(x):
x_t = x_t.reshape(1, -1) # (2) => (1, 2)
c_t, h_t = self.forward_step(x_t, c_t, h_t)
if self.return_sequences:
self.h[t] = h_t
else:
self.h = h_t
return self.h
# Berechnung
def forward_step(self, x_t, c_t, h_t):
i_t = sigmoid(np.matmul(x_t, self.W_i) + np.matmul(h_t, self.U_i) + self.b_i)
f_t = sigmoid(np.matmul(x_t, self.W_f) + np.matmul(h_t, self.U_f) + self.b_f)
c_tilde = tanh(np.matmul(x_t, self.W_c) + np.matmul(h_t, self.U_c) + self.b_c)
o_t = sigmoid(np.matmul(x_t, self.W_o) + np.matmul(h_t, self.U_o) + self.b_o)
c_t = f_t * c_t + i_t * c_tilde
h_t = o_t * tanh(c_t)
return c_t, h_t
# data set shape = (num_samples, num_timesteps, num_features)
# input shape = (num_timesteps, num_features)
# If return_sequences == True:
# output shape = (num_timesteps, units)
# Else:
# output shape = (1, units)
x = np.random.normal(size=(1, 3, 2))
units = 4
return_sequences = True
# num_features = 2
# units = 4
# h_t shape = (4), (units)
# W shape = (2, 4), (num_features, units)
# U shape = (4, 4), (units, units)
# b shape = (4), (units)
#
# matmul(x, W) (1, 2)*(2,4) => (4)
# matmul(h, U) (1, 4)*(4,4) => (4)
# intern + b (4)+(4) => (4)
# Keras Implementation
model = Sequential()
model.add(LSTM(units=units, return_sequences=return_sequences, input_shape=x.shape[1:]))
model.compile(loss="mse", optimizer="Adam")
#model.summary()
# Implementation without Keras
rnn = LSTMInference(lstm_layer=model.layers[0], return_sequences=return_sequences)
output_rnn_own = rnn(x[0]) # 10.5 aufrufen der call Methode
print(output_rnn_own)
print("\n\n")
output_rnn_tf = model.predict(x[[0]])
print(output_rnn_tf) # 10.5
assert np.all(np.isclose(output_rnn_own - output_rnn_tf, 0.0, atol=1e-06))
|
nilq/baby-python
|
python
|
# stdlib
import os
import zipfile
from typing import Type, Union
# 3rd party
import handy_archives
import pytest
import remotezip
from apeye import URL
from coincidence.params import param
from coincidence.regressions import AdvancedDataRegressionFixture, AdvancedFileRegressionFixture
from domdf_python_tools.paths import PathPlus
from packaging.version import Version
from shippinglabel.checksum import get_sha256_hash
# this package
from remote_wheel import RemoteWheelDistribution, RemoteZipFile
wheel_urls = PathPlus(__file__).parent.joinpath("wheel_urls.json").load_json()
wheels = pytest.mark.parametrize("url", [param(w[2], id=f"{w[0]}-{w[1]}") for w in wheel_urls])
url_type = pytest.mark.parametrize(
"url_type",
[param(str, id="str"), param(URL, id="URL")],
)
class TestRemoteWheelDistribution:
@url_type
@wheels
def test_distribution(
self,
url: str,
url_type: Type[Union[str, URL]],
advanced_data_regression: AdvancedDataRegressionFixture,
):
wd = RemoteWheelDistribution.from_url(url_type(url))
advanced_data_regression.check({
"name": wd.name,
"url": wd.url,
"repr": repr(wd),
"version": str(wd.version),
"wheel": list(wd.get_wheel().items()),
"metadata": list(wd.get_metadata().items()),
"entry_points": wd.get_entry_points(),
"has_license": wd.has_file("LICENSE"),
})
assert isinstance(wd.wheel_zip, zipfile.ZipFile)
assert isinstance(wd.wheel_zip, handy_archives.ZipFile)
assert isinstance(wd.wheel_zip, RemoteZipFile)
assert isinstance(wd.wheel_zip, remotezip.RemoteZip)
@wheels
def test_get_record(self, url: str):
distro = RemoteWheelDistribution.from_url(url)
record = distro.get_record()
assert record is not None
assert len(record) # pylint: disable=len-as-condition
for file in record:
if file.hash is None:
assert file.name == "RECORD"
else:
with distro.wheel_zip.open(os.fspath(file)) as fp:
assert get_sha256_hash(fp).hexdigest() == file.hash.hexdigest()
if file.size is not None:
assert distro.wheel_zip.getinfo(os.fspath(file)).file_size == file.size
assert file.distro is None
with pytest.raises(ValueError, match="Cannot read files with 'self.distro = None'"):
file.read_bytes()
def test_remotezip(self, advanced_file_regression: AdvancedFileRegressionFixture):
wd = RemoteWheelDistribution.from_url(
"https://files.pythonhosted.org/packages/94/e2/"
"0a5630e43ca0b21ca891ec3a697bdb98a25663e27ebd1079ab55e8c68e72/"
"domdf_python_tools-2.9.1-py3-none-any.whl"
"#sha256=ad1058fa0769a68808c2ed44909222508edf6f26ec3a36f91f86b6d654c58474",
)
assert isinstance(wd.wheel_zip, zipfile.ZipFile)
assert isinstance(wd.wheel_zip, handy_archives.ZipFile)
assert isinstance(wd.wheel_zip, RemoteZipFile)
advanced_file_regression.check(
wd.wheel_zip.read("domdf_python_tools/__init__.py").decode("UTF-8"), extension="._py"
)
with wd:
advanced_file_regression.check(
wd.wheel_zip.read("domdf_python_tools/__init__.py").decode("UTF-8"), extension="._py"
)
assert wd.wheel_zip.fp is None
def test_remotezip_github_pages(self, advanced_file_regression: AdvancedFileRegressionFixture):
wd = RemoteWheelDistribution.from_url(
"https://repo-helper.uk/simple503/pydash/pydash-5.0.0-py3-none-any.whl"
"#sha256=0d87f879a3df4ad9389ab6d63c69eea078517d41541ddd5744cfcff3396e8543",
)
assert isinstance(wd.wheel_zip, zipfile.ZipFile)
assert isinstance(wd.wheel_zip, handy_archives.ZipFile)
assert isinstance(wd.wheel_zip, RemoteZipFile)
advanced_file_regression.check(wd.wheel_zip.read("pydash/__init__.py").decode("UTF-8"), extension="._py")
assert isinstance(wd, RemoteWheelDistribution)
with wd:
advanced_file_regression.check(
wd.wheel_zip.read("pydash/__init__.py").decode("UTF-8"), extension="._py"
)
assert wd.wheel_zip.fp is None
def test_remotezip_ionos(self):
with pytest.raises(
remotezip.RangeNotSupported,
match="The server at remote-wheel-test.repo-helper.uk doesn't support range requests",
):
wd = RemoteWheelDistribution.from_url(
"http://remote-wheel-test.repo-helper.uk/pydash-5.0.0-py3-none-any.whl",
)
def test_remotezip_auth(self, advanced_file_regression: AdvancedFileRegressionFixture):
url = "http://remote-wheel-test.repo-helper.uk/toml-0.10.2-py2.py3-none-any.whl"
wheel_zip = RemoteZipFile(url, initial_buffer_size=100, auth=("user", "password"))
wd = RemoteWheelDistribution("toml", Version("0.10.2"), url, wheel_zip)
assert isinstance(wd.wheel_zip, zipfile.ZipFile)
assert isinstance(wd.wheel_zip, handy_archives.ZipFile)
assert isinstance(wd.wheel_zip, RemoteZipFile)
advanced_file_regression.check(wd.wheel_zip.read("toml/__init__.py").decode("UTF-8"), extension="._py")
assert isinstance(wd, RemoteWheelDistribution)
with wd:
advanced_file_regression.check(wd.wheel_zip.read("toml/__init__.py").decode("UTF-8"), extension="._py")
assert wd.wheel_zip.fp is None
# Again to check the auth requirement works
with pytest.raises(remotezip.RemoteIOError, match=f"^401 Client Error: Unauthorized for url: {url}$"):
RemoteZipFile(url, initial_buffer_size=100)
|
nilq/baby-python
|
python
|
class Postprocessor:
pass
|
nilq/baby-python
|
python
|
from .test_task import TestEnv
# Robot Import
from .agents.stretch import Stretch
from .agents.pr2 import PR2
# Human Import
from .agents.human import Human
from .agents import human
# Robot Configuration
robot_arm = 'left'
# Human Configuration
# human_controllable_joint_indices = human.right_arm_joints
class TestPR2Env(TestEnv):
def __init__(self):
super(TestPR2Env, self).__init__(robot=PR2(robot_arm), human=Human(human_controllable_joint_indices, controllable=False))
class TestStretchEnv(TestEnv):
def __init__(self):
super(TestStretchEnv, self).__init__(robot=Stretch('wheel_' + robot_arm), human=None)
|
nilq/baby-python
|
python
|
from typing_extensions import Protocol
class HasStr(Protocol):
def __str__(self) -> str:
...
|
nilq/baby-python
|
python
|
# This entry point is intended to be used to start the backend at a terminal for debugging purposes.
from backend import app
app.main()
|
nilq/baby-python
|
python
|
"""
ASDF tags for geometry related models.
"""
from asdf_astropy.converters.transform.core import TransformConverterBase
__all__ = ['DirectionCosinesConverter', 'SphericalCartesianConverter']
class DirectionCosinesConverter(TransformConverterBase):
tags = ["tag:stsci.edu:gwcs/direction_cosines-*"]
types = ["gwcs.geometry.ToDirectionCosines",
"gwcs.geometry.FromDirectionCosines"]
def from_yaml_tree_transform(self, node, tag, ctx):
from ..geometry import ToDirectionCosines, FromDirectionCosines
transform_type = node['transform_type']
if transform_type == 'to_direction_cosines':
return ToDirectionCosines()
elif transform_type == 'from_direction_cosines':
return FromDirectionCosines()
else:
raise TypeError(f"Unknown model_type {transform_type}")
def to_yaml_tree_transform(self, model, tag, ctx):
from ..geometry import ToDirectionCosines, FromDirectionCosines
if isinstance(model, FromDirectionCosines):
transform_type = 'from_direction_cosines'
elif isinstance(model, ToDirectionCosines):
transform_type = 'to_direction_cosines'
else:
raise TypeError(f"Model of type {model.__class__} is not supported.")
node = {'transform_type': transform_type}
return node
class SphericalCartesianConverter(TransformConverterBase):
tags = ["tag:stsci.edu:gwcs/spherical_cartesian-*"]
types = ["gwcs.geometry.SphericalToCartesian",
"gwcs.geometry.CartesianToSpherical"]
def from_yaml_tree_transform(self, node, tag, ctx):
from ..geometry import SphericalToCartesian, CartesianToSpherical
transform_type = node['transform_type']
wrap_lon_at = node['wrap_lon_at']
if transform_type == 'spherical_to_cartesian':
return SphericalToCartesian(wrap_lon_at=wrap_lon_at)
elif transform_type == 'cartesian_to_spherical':
return CartesianToSpherical(wrap_lon_at=wrap_lon_at)
else:
raise TypeError(f"Unknown model_type {transform_type}")
def to_yaml_tree_transform(self, model, tag, ctx):
from ..geometry import SphericalToCartesian, CartesianToSpherical
if isinstance(model, SphericalToCartesian):
transform_type = 'spherical_to_cartesian'
elif isinstance(model, CartesianToSpherical):
transform_type = 'cartesian_to_spherical'
else:
raise TypeError(f"Model of type {model.__class__} is not supported.")
node = {
'transform_type': transform_type,
'wrap_lon_at': model.wrap_lon_at
}
return node
|
nilq/baby-python
|
python
|
import asyncio
import base64
import json
import os
from dataclasses import dataclass
from datetime import datetime, timezone
from email.message import EmailMessage
from enum import Enum
from io import BytesIO
from typing import List, Optional
from uuid import uuid4
import pytest
from aiohttp import ClientSession, ClientTimeout
from aiohttp.test_utils import TestClient, teardown_test_loop
from aioredis import create_redis
from arq import ArqRedis, Worker
from arq.connections import RedisSettings
from atoolbox.db.helpers import DummyPgPool
from atoolbox.test_utils import DummyServer, create_dummy_server
from buildpg import Values
from cryptography.fernet import Fernet
from PIL import Image, ImageDraw
from yarl import URL
from em2.auth.utils import mk_password
from em2.background import push_multiple
from em2.core import Action, Connections, apply_actions, generate_conv_key
from em2.main import create_app
from em2.protocol.core import get_signing_key
from em2.protocol.smtp import LogSmtpHandler, SesSmtpHandler
from em2.settings import Settings
from em2.utils.web import MakeUrl
from em2.worker import worker_settings
from . import dummy_server
from .resolver import TestDNSResolver
commit_transactions = 'KEEP_DB' in os.environ
@pytest.fixture(scope='session', name='settings_session')
def _fix_settings_session():
pg_db = 'em2_test'
redis_db = 2
test_worker = os.getenv('PYTEST_XDIST_WORKER')
if test_worker:
worker_id = int(test_worker.replace('gw', ''))
redis_db = worker_id + 2
if worker_id:
pg_db = f'em2_test_{worker_id}'
return Settings(
testing=True,
pg_dsn=f'postgres://postgres@localhost:5432/{pg_db}',
redis_settings=f'redis://localhost:6379/{redis_db}',
bcrypt_work_factor=6,
max_request_size=1024 ** 2,
aws_access_key='testing_access_key',
aws_secret_key='testing_secret_key',
ses_url_token='testing',
aws_sns_signing_host='localhost',
aws_sns_signing_schema='http',
internal_auth_key='testing' * 6,
auth_key=Fernet.generate_key(),
s3_temp_bucket='s3_temp_bucket.example.com',
s3_file_bucket='s3_files_bucket.example.com',
s3_cache_bucket='s3_cache_bucket.example.com',
max_ref_image_size=666,
max_ref_image_count=10,
vapid_private_key=(
'MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgvGPhHfTSfxCod+wT'
'zLuyK8KWjPGGvKJKJjzBGSF47YuhRANCAAQJNQfHBSOe5nI5fmUcwTFw3ckqXXvR'
'F632vcMyB9RxPMaxicdqPiLg45GIk9oeEtm1kQjHQe7ikWxPFAm7uxkB'
),
vapid_sub_email='vapid-reports@example.com',
signing_secret_key=b'4' * 64,
max_em2_file_size=500,
)
@pytest.fixture(name='dummy_server')
async def _fix_dummy_server(loop, aiohttp_server):
ctx = {'smtp': [], 's3_files': {}, 'webpush': [], 'em2push': [], 'em2_follower_push': []}
return await create_dummy_server(aiohttp_server, extra_routes=dummy_server.routes, extra_context=ctx)
replaced_url_fields = 'grecaptcha_url', 'ses_endpoint_url', 's3_endpoint_url'
@pytest.fixture(name='settings')
def _fix_settings(dummy_server: DummyServer, tmpdir, settings_session):
update = {f: f'{dummy_server.server_name}/{f}/' for f in replaced_url_fields}
return settings_session.copy(update=update)
@pytest.fixture(scope='session', name='main_db_create')
def _fix_main_db_create(settings_session):
# loop fixture has function scope so can't be used here.
from atoolbox.db import prepare_database
loop = asyncio.new_event_loop()
loop.run_until_complete(prepare_database(settings_session, True))
teardown_test_loop(loop)
@pytest.fixture(name='db_conn')
async def _fix_db_conn(loop, settings, main_db_create):
from buildpg import asyncpg
conn = await asyncpg.connect_b(dsn=settings.pg_dsn, loop=loop)
tr = conn.transaction()
await tr.start()
yield DummyPgPool(conn)
if commit_transactions:
await tr.commit()
else:
await tr.rollback()
await conn.close()
@pytest.fixture(name='conns')
def _fix_conns(db_conn, redis, settings):
return Connections(db_conn.as_dummy_conn(), redis, settings)
@pytest.yield_fixture(name='redis')
async def _fix_redis(loop, settings):
addr = settings.redis_settings.host, settings.redis_settings.port
redis = await create_redis(addr, db=settings.redis_settings.database, encoding='utf8', commands_factory=ArqRedis)
await redis.flushdb()
yield redis
redis.close()
await redis.wait_closed()
class UserTestClient(TestClient):
async def post_json(self, path, data=None, *, origin=None, status=200):
if not isinstance(data, (str, bytes)):
data = json.dumps(data)
r = await self.post(
path,
data=data,
headers={
'Content-Type': 'application/json',
'Referer': 'http://localhost:3000/dummy-referer/',
'Origin': origin or 'http://localhost:3000',
},
)
if status:
assert r.status == status, await r.text()
return r
async def get_json(self, path, *, status=200, **kwargs):
r = await self.get(path, **kwargs)
assert r.status == status, await r.text()
return await r.json()
async def get_ndjson(self, path, *, status=200, **kwargs):
r = await self.get(path, **kwargs)
assert r.status == status, await r.text()
assert r.content_type == 'text/plain'
text = await r.text()
return [json.loads(line) for line in text.split('\n') if line]
@pytest.fixture(name='resolver')
def _fix_resolver(dummy_server: DummyServer, loop):
return TestDNSResolver(dummy_server, loop=loop)
@pytest.fixture(name='cli')
async def _fix_cli(settings, db_conn, aiohttp_server, redis, resolver):
app = await create_app(settings=settings)
app['pg'] = db_conn
app['protocol_app']['resolver'] = resolver
server = await aiohttp_server(app)
settings.local_port = server.port
resolver.main_server = server
cli = UserTestClient(server)
yield cli
await cli.close()
def em2_json_default(v):
if isinstance(v, Enum):
return v.value
if isinstance(v, datetime):
return v.isoformat()
raise TypeError(f'unable to serialize {type(v)}: {v!r}')
class Em2TestClient(TestClient):
def __init__(self, *args, settings, dummy_server, factory, **kwargs):
super().__init__(*args, **kwargs)
self._settings: Settings = settings
self._dummy_server: DummyServer = dummy_server
self._factory: Factory = factory
self._url_func = MakeUrl(self.app).get_path
self.signing_key = get_signing_key(self._settings.signing_secret_key)
async def post_json(self, path, data, *, expected_status=200):
if not isinstance(data, (str, bytes)):
data = json.dumps(data, default=em2_json_default)
sign_ts = datetime.utcnow().isoformat()
to_sign = f'POST http://127.0.0.1:{self.server.port}{path} {sign_ts}\n{data}'.encode()
r = await self.post(
path,
data=data,
headers={
'Content-Type': 'application/json',
'Signature': sign_ts + ',' + self.signing_key.sign(to_sign).signature.hex(),
},
)
if expected_status:
assert r.status == expected_status, await r.text()
return r
async def push_actions(self, conv_key, actions, *, em2_node=None, expected_status=200):
em2_node = em2_node or f'localhost:{self._dummy_server.server.port}/em2'
path = self.url('protocol:em2-push', conv=conv_key, query={'node': em2_node})
return await self.post_json(path, data={'actions': actions}, expected_status=expected_status)
async def create_conv(
self,
*,
em2_node=None,
actor='actor@em2-ext.example.com',
subject='Test Subject',
recipient='recipient@example.com',
msg='test message',
expected_status=200,
):
# use recipient@example.com here so recipient can be changed in test errors
if not await self._factory.conn.fetchval('select 1 from users where email=$1', 'recipient@example.com'):
await self._factory.create_user(email='recipient@example.com')
ts = datetime(2032, 6, 6, 12, 0, tzinfo=timezone.utc)
conv_key = generate_conv_key(actor, ts, subject)
actions = [
{'id': 1, 'act': 'participant:add', 'ts': ts, 'actor': actor, 'participant': actor},
{'id': 2, 'act': 'participant:add', 'ts': ts, 'actor': actor, 'participant': recipient},
{'id': 3, 'act': 'message:add', 'ts': ts, 'actor': actor, 'body': msg},
{'id': 4, 'act': 'conv:publish', 'ts': ts, 'actor': actor, 'body': subject},
]
return await self.push_actions(conv_key, actions, em2_node=em2_node, expected_status=expected_status)
def url(self, name: str, *, query=None, **kwargs) -> URL:
return self._url_func(name, query=query, **kwargs)
@pytest.fixture(name='em2_cli')
async def _fix_em2_cli(settings, aiohttp_client, cli: UserTestClient, dummy_server, factory):
cli = Em2TestClient(cli.server, settings=settings, dummy_server=dummy_server, factory=factory)
yield cli
await cli.close()
@pytest.fixture(name='url')
def _fix_url(cli: UserTestClient):
return MakeUrl(cli.server.app).get_path
@dataclass
class User:
email: str
first_name: str
last_name: str
password: str
auth_user_id: int
id: Optional[int] = None
session_id: Optional[int] = None
@dataclass
class Conv:
key: str
id: int
class Factory:
def __init__(self, redis, cli, url):
self.redis: ArqRedis = redis
self.cli = cli
self.conn = self.cli.server.app['pg'].as_dummy_conn()
self.conns = Connections(self.conn, self.redis, cli.server.app['settings'])
self.email_index = 1
self.user: User = None
self.conv: Conv = None
self._url = url
async def create_user(self, *, login=True, email=None, first_name='Tes', last_name='Ting', pw='testing') -> User:
if email is None:
email = f'testing-{self.email_index}@example.com'
self.email_index += 1
password_hash = mk_password(pw, self.conns.settings)
auth_user_id = await self.conn.fetchval(
"""
insert into auth_users (email, first_name, last_name, password_hash, account_status)
values ($1, $2, $3, $4, 'active')
on conflict (email) do nothing returning id
""",
email,
first_name,
last_name,
password_hash,
)
if not auth_user_id:
raise RuntimeError(f'user with email {email} already exists')
user_id = None
session_id = None
if login:
r1, r2 = await self.login(email, pw)
obj = await r1.json()
session_id = obj['session']['session_id']
user_id = await self.conn.fetchval('select id from users where email=$1', email)
user = User(email, first_name, last_name, pw, auth_user_id, user_id, session_id)
self.user = self.user or user
return user
async def create_simple_user(
self,
email: str = None,
visibility: str = None,
profile_type: str = None,
main_name: str = 'John',
last_name: str = None,
strap_line: str = None,
image_storage: str = None,
profile_status: str = None,
profile_status_message: str = None,
profile_details: str = None,
):
if email is None:
email = f'testing-{self.email_index}@example.com'
self.email_index += 1
user_id = await self.conn.fetchval_b(
'insert into users (:values__names) values :values on conflict (email) do nothing returning id',
values=Values(
email=email,
visibility=visibility,
profile_type=profile_type,
main_name=main_name,
last_name=last_name,
strap_line=strap_line,
image_storage=image_storage,
profile_status=profile_status,
profile_status_message=profile_status_message,
profile_details=profile_details,
),
)
if not user_id:
raise RuntimeError(f'user with email {email} already exists')
await self.conn.execute(
"""
update users set
vector=setweight(to_tsvector(main_name || ' ' || coalesce(last_name, '')), 'A') ||
setweight(to_tsvector(coalesce(strap_line, '')), 'B') ||
to_tsvector(coalesce(profile_details, ''))
where id=$1
""",
user_id,
)
return user_id
def url(self, name, *, query=None, **kwargs):
if self.user and name.startswith('ui:'):
kwargs.setdefault('session_id', self.user.session_id)
return self._url(name, query=query, **kwargs)
async def login(self, email, password, *, captcha=False):
data = dict(email=email, password=password)
if captcha:
data['grecaptcha_token'] = '__ok__'
r1 = await self.cli.post(
self._url('auth:login'),
data=json.dumps(data),
headers={'Content-Type': 'application/json', 'Origin': 'null'},
)
assert r1.status == 200, await r1.text()
obj = await r1.json()
r2 = await self.cli.post_json(self._url('ui:auth-token'), data={'auth_token': obj['auth_token']})
assert r2.status == 200, await r2.text()
assert len(self.cli.session.cookie_jar) == 1
return r1, r2
async def create_conv(
self, subject='Test Subject', message='Test Message', session_id=None, participants=(), publish=False
) -> Conv:
data = {'subject': subject, 'message': message, 'publish': publish, 'participants': participants}
r = await self.cli.post_json(
self.url('ui:create', session_id=session_id or self.user.session_id), data, status=201
)
conv_key = (await r.json())['key']
conv_id = await self.conn.fetchval('select id from conversations where key=$1', conv_key)
conv = Conv(conv_key, conv_id)
self.conv = self.conv or conv
return conv
async def create_label(self, name='Test Label', *, user_id=None, ordering=None, color=None, description=None):
val = dict(name=name, user_id=user_id or self.user.id, ordering=ordering, color=color, description=description)
return await self.conn.fetchval_b(
'insert into labels (:values__names) values :values returning id',
values=Values(**{k: v for k, v in val.items() if v is not None}),
)
async def act(self, conv_id: int, action: Action) -> List[int]:
key, leader = await self.conns.main.fetchrow('select key, leader_node from conversations where id=$1', conv_id)
interaction_id = uuid4().hex
if leader:
await self.conns.redis.enqueue_job('follower_push_actions', key, leader, interaction_id, [action])
else:
action_ids = await apply_actions(self.conns, conv_id, [action])
if action_ids:
await push_multiple(self.conns, conv_id, action_ids)
return action_ids
async def create_contact(
self,
owner: int,
user_id: int,
*,
profile_type: str = None,
main_name: str = None,
last_name: str = None,
strap_line: str = None,
image_storage: str = None,
**kwargs,
):
val = dict(
owner=owner,
profile_user=user_id,
profile_type=profile_type,
main_name=main_name,
last_name=last_name,
strap_line=strap_line,
image_storage=image_storage,
**kwargs,
)
contact_id = await self.conn.fetchval_b(
'insert into contacts (:values__names) values :values returning id',
values=Values(**{k: v for k, v in val.items() if v is not None}),
)
# TODO update contact search vector
return contact_id
@pytest.fixture(name='factory')
def _fix_factory(redis, cli, url):
return Factory(redis, cli, url)
@pytest.yield_fixture(name='worker_ctx')
async def _fix_worker_ctx(redis, settings, db_conn, dummy_server, resolver):
session = ClientSession(timeout=ClientTimeout(total=10))
ctx = dict(
settings=settings,
pg=db_conn,
client_session=session,
resolver=resolver,
redis=redis,
signing_key=get_signing_key(settings.signing_secret_key),
)
ctx['smtp_handler'] = LogSmtpHandler(ctx)
yield ctx
await session.close()
@pytest.yield_fixture(name='worker')
async def _fix_worker(redis, worker_ctx):
worker = Worker(
functions=worker_settings['functions'], redis_pool=redis, burst=True, poll_delay=0.01, ctx=worker_ctx
)
yield worker
worker.pool = None
await worker.close()
@pytest.yield_fixture(name='ses_worker')
async def _fix_ses_worker(redis, settings, db_conn, resolver):
session = ClientSession(timeout=ClientTimeout(total=10))
ctx = dict(
settings=settings,
pg=db_conn,
client_session=session,
resolver=resolver,
signing_key=get_signing_key(settings.signing_secret_key),
)
ctx.update(smtp_handler=SesSmtpHandler(ctx), conns=Connections(ctx['pg'], redis, settings))
worker = Worker(functions=worker_settings['functions'], redis_pool=redis, burst=True, poll_delay=0.01, ctx=ctx)
yield worker
await ctx['smtp_handler'].shutdown()
worker.pool = None
await worker.close()
await session.close()
@pytest.fixture(name='send_to_remote')
async def _fix_send_to_remote(factory: Factory, worker: Worker, db_conn):
await factory.create_user()
await factory.create_conv(participants=[{'email': 'sender@example.net'}], publish=True)
assert 4 == await db_conn.fetchval('select count(*) from actions')
await worker.async_run()
assert (worker.jobs_complete, worker.jobs_failed, worker.jobs_retried) == (3, 0, 0)
assert 1 == await db_conn.fetchval('select count(*) from sends')
return await db_conn.fetchrow('select id, ref from sends')
@pytest.fixture(name='sns_data')
def _fix_sns_data(dummy_server: DummyServer, mocker):
def run(message_id, *, mock_verify=True, **message):
if mock_verify:
mocker.patch('em2.protocol.views.smtp_ses.x509.load_pem_x509_certificate')
return {
'Type': 'Notification',
'MessageId': message_id,
'Subject': 'Amazon SES Email Receipt Notification',
'Timestamp': '2032-03-11T18:00:00.000Z',
'TopicArn': 'arn:aws:sns:us-east-1:123:em2-webhook',
'Message': json.dumps(message),
'SigningCertURL': dummy_server.server_name + '/sns_signing_url.pem',
'Signature': base64.b64encode(b'the signature').decode(),
}
return run
@pytest.fixture(name='attachment')
def _fix_attachment():
def run(filename, mime_type, content, headers=None):
attachment = EmailMessage()
for k, v in (headers or {}).items():
attachment[k] = v
maintype, subtype = mime_type.split('/', 1)
kwargs = dict(subtype=subtype, filename=filename)
if maintype != 'text':
# not sure why this is
kwargs['maintype'] = maintype
attachment.set_content(content, **kwargs)
for k, v in (headers or {}).items():
if k in attachment:
attachment.replace_header(k, v)
else:
attachment.add_header(k, v)
return attachment
return run
@pytest.fixture(name='create_email')
def _fix_create_email():
def run(
subject='Test Subject',
e_from='sender@example.net',
to=('testing-1@example.com',),
text_body='this is a message.',
html_body='this is an html <b>message</b>.',
message_id='message-id@example.net',
attachments=(),
headers=None,
):
email_msg = EmailMessage()
if message_id is not None:
email_msg['Message-ID'] = message_id
email_msg['Subject'] = subject
email_msg['From'] = e_from
email_msg['To'] = ','.join(to)
# email.utils.format_datetime(datetime(2032, 1, 1, 12, 0))
email_msg['Date'] = 'Thu, 01 Jan 2032 12:00:00 -0000'
for k, v in (headers or {}).items():
email_msg[k] = v
text_body and email_msg.set_content(text_body)
html_body and email_msg.add_alternative(html_body, subtype='html')
for attachment in attachments:
if email_msg.get_content_type() != 'multipart/mixed':
email_msg.make_mixed()
email_msg.attach(attachment)
return email_msg
return run
@pytest.fixture(name='create_ses_email')
def _fix_create_ses_email(dummy_server, sns_data, create_email):
def run(
*args,
to=('testing-1@example.com',),
key='foobar',
headers=None,
message_id='message-id@example.net',
receipt_extra=None,
**kwargs,
):
msg = create_email(*args, to=to, message_id=message_id, headers=headers, **kwargs)
dummy_server.app['s3_files'][key] = msg.as_string()
headers = headers or {}
h = [{'name': k, 'value': v} for k, v in headers.items()]
if message_id is not None:
h.append({'name': 'Message-ID', 'value': message_id})
mail = dict(headers=h, commonHeaders={'to': list(to)})
receipt = dict(
action={'type': 'S3', 'bucketName': 'em2-testing', 'objectKeyPrefix': '', 'objectKey': key},
spamVerdict={'status': 'PASS'},
virusVerdict={'status': 'PASS'},
spfVerdict={'status': 'PASS'},
dkimVerdict={'status': 'PASS'},
dmarcVerdict={'status': 'PASS'},
)
receipt.update(receipt_extra or {})
return sns_data(message_id, notificationType='Received', mail=mail, receipt=receipt)
return run
@pytest.fixture(name='create_image')
def _fix_create_image():
def create_image(image_format='JPEG'):
stream = BytesIO()
image = Image.new('RGB', (400, 300), (50, 100, 150))
ImageDraw.Draw(image).polygon([(0, 0), (image.width, 0), (image.width, 100), (0, 100)], fill=(128, 128, 128))
image.save(stream, format=image_format, optimize=True)
return stream.getvalue()
return create_image
@pytest.fixture(name='web_push_sub')
def _fix_web_push_sub(dummy_server):
return {
'endpoint': dummy_server.server_name.replace('localhost', '127.0.0.1') + '/vapid/',
'expirationTime': None,
'keys': {
# generated by code in dummy_server.py
'p256dh': 'BGsX0fLhLEJH-Lzm5WOkQPJ3A32BLeszoPShOUXYmMKWT-NC4v4af5uO5-tKfA-eFivOM1drMV7Oy7ZAaDe_UfU',
'auth': 'x' * 32,
},
}
@pytest.fixture(scope='session', name='alt_settings_session')
def _fix_alt_settings_session(settings_session):
pg_db = 'em2_test_alt'
redis_db = 3
test_worker = os.getenv('PYTEST_XDIST_WORKER')
if test_worker:
worker_id = int(test_worker.replace('gw', ''))
redis_db = worker_id + 8
if worker_id:
pg_db = f'em2_test_alt_{worker_id}'
return settings_session.copy(
update={
'pg_dsn': f'postgres://postgres@localhost:5432/{pg_db}',
'redis_settings': RedisSettings(database=redis_db),
}
)
@pytest.fixture(name='alt_settings')
def _fix_alt_settings(dummy_server: DummyServer, tmpdir, alt_settings_session):
update = {f: f'{dummy_server.server_name}/{f}/' for f in replaced_url_fields}
return alt_settings_session.copy(update=update)
@pytest.fixture(scope='session', name='alt_db_create')
def _fix_alt_db_create(alt_settings_session):
# loop fixture has function scope so can't be used here.
from atoolbox.db import prepare_database
loop = asyncio.new_event_loop()
loop.run_until_complete(prepare_database(alt_settings_session, True))
teardown_test_loop(loop)
@pytest.fixture(name='alt_db_conn')
async def _fix_alt_db_conn(loop, alt_settings, alt_db_create):
from buildpg import asyncpg
conn = await asyncpg.connect_b(dsn=alt_settings.pg_dsn, loop=loop)
tr = conn.transaction()
await tr.start()
yield DummyPgPool(conn)
if commit_transactions:
await tr.commit()
else:
await tr.rollback()
await conn.close()
@pytest.yield_fixture(name='alt_redis')
async def _fix_alt_redis(loop, alt_settings):
addr = alt_settings.redis_settings.host, alt_settings.redis_settings.port
redis = await create_redis(
addr, db=alt_settings.redis_settings.database, encoding='utf8', commands_factory=ArqRedis
)
await redis.flushdb()
yield redis
redis.close()
await redis.wait_closed()
@pytest.fixture(name='alt_conns')
def _fix_alt_conns(alt_db_conn, alt_redis, alt_settings):
return Connections(alt_db_conn, alt_redis, alt_settings)
@pytest.fixture(name='alt_cli')
async def _fix_alt_cli(alt_settings, alt_db_conn, aiohttp_server, alt_redis, resolver: TestDNSResolver):
app = await create_app(settings=alt_settings)
app['pg'] = alt_db_conn
app['protocol_app']['resolver'] = resolver
server = await aiohttp_server(app)
resolver.alt_server = server
alt_settings.local_port = server.port
cli = UserTestClient(server)
yield cli
await cli.close()
@pytest.fixture(name='alt_url')
def _fix_alt_url(alt_cli: UserTestClient):
return MakeUrl(alt_cli.server.app).get_path
@pytest.fixture(name='alt_factory')
async def _fix_alt_factory(alt_redis, alt_cli, alt_url):
return Factory(alt_redis, alt_cli, alt_url)
@pytest.yield_fixture(name='alt_worker_ctx')
async def _fix_alt_worker_ctx(alt_redis, alt_settings, alt_db_conn, resolver):
session = ClientSession(timeout=ClientTimeout(total=10))
ctx = dict(
settings=alt_settings,
pg=alt_db_conn,
client_session=session,
resolver=resolver,
redis=alt_redis,
signing_key=get_signing_key(alt_settings.signing_secret_key),
)
ctx['smtp_handler'] = LogSmtpHandler(ctx)
yield ctx
await session.close()
@pytest.yield_fixture(name='alt_worker')
async def _fix_alt_worker(alt_redis, alt_worker_ctx):
worker = Worker(
functions=worker_settings['functions'], redis_pool=alt_redis, burst=True, poll_delay=0.01, ctx=alt_worker_ctx
)
yield worker
worker.pool = None
await worker.close()
def create_raw_image(width: int = 600, height: int = 600, mode: str = 'RGB') -> Image:
image = Image.new(mode, (width, height), (50, 100, 150))
ImageDraw.Draw(image).line((0, 0) + image.size, fill=128)
return image
def create_image(width: int = 600, height: int = 600, mode: str = 'RGB', format: str = 'JPEG') -> bytes:
image = create_raw_image(width, height, mode)
stream = BytesIO()
image.save(stream, format=format, optimize=True)
return stream.getvalue()
|
nilq/baby-python
|
python
|
from .alias import *
from .bookmark import *
__all__ = [ 'ALIAS_KIND_FILE', 'ALIAS_KIND_FOLDER',
'ALIAS_HFS_VOLUME_SIGNATURE',
'ALIAS_FIXED_DISK', 'ALIAS_NETWORK_DISK', 'ALIAS_400KB_FLOPPY_DISK',
'ALIAS_800KB_FLOPPY_DISK', 'ALIAS_1_44MB_FLOPPY_DISK',
'ALIAS_EJECTABLE_DISK',
'ALIAS_NO_CNID',
'kBookmarkPath', 'kBookmarkCNIDPath', 'kBookmarkFileProperties',
'kBookmarkFileName', 'kBookmarkFileID', 'kBookmarkFileCreationDate',
'kBookmarkTOCPath', 'kBookmarkVolumePath',
'kBookmarkVolumeURL', 'kBookmarkVolumeName', 'kBookmarkVolumeUUID',
'kBookmarkVolumeSize', 'kBookmarkVolumeCreationDate',
'kBookmarkVolumeProperties', 'kBookmarkContainingFolder',
'kBookmarkUserName', 'kBookmarkUID', 'kBookmarkWasFileReference',
'kBookmarkCreationOptions', 'kBookmarkURLLengths',
'kBookmarkSecurityExtension',
'AppleShareInfo',
'VolumeInfo',
'TargetInfo',
'Alias',
'Bookmark',
'Data',
'URL' ]
|
nilq/baby-python
|
python
|
from django.apps import apps
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand
from django.utils import translation
User = get_user_model()
user_deletion_config = apps.get_app_config('user_deletion')
class Command(BaseCommand):
def handle(self, *args, **options):
translation.activate(settings.LANGUAGE_CODE)
users = User.objects.users_to_delete()
site = Site.objects.get_current()
user_deletion_config.deletion_notification_class(
user=None,
site=site,
users=users,
).notify()
users.delete()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class OnTaskConfig(AppConfig):
name = 'ontask'
verbose_name = _('OnTask')
def ready(self):
# Needed so that the signal registration is done
from ontask import signals # noqa
|
nilq/baby-python
|
python
|
from app import db
class Entity(db.Model):
__tablename__ = 'entities'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True)
description = db.Column(db.Text, index=True)
def __repr__(self):
return "<Entity '{}'>".format(self.name)
class WikipediaSuggest(db.Model):
__tablename__ = 'wikipedia_suggest'
id = db.Column(db.Integer, primary_key=True)
entity_id = db.Column(db.Integer, index=True)
wikipedia_page_id = db.Column(db.BigInteger)
wikipedia_page_title = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
entity = db.relationship('Entity', foreign_keys=[entity_id], primaryjoin='Entity.id == WikipediaSuggest.entity_id', backref='wikipedia_suggest', uselist=False, lazy=True)
@property
def wikipedia_url(self):
return "https://en.wikipedia.org/wiki/{}".format(self.wikipedia_page_title)
class EntityMeta(db.Model):
__tablename__ = 'entities_meta'
id = db.Column(db.Integer, primary_key=True)
entity_id = db.Column(db.Integer)
type_ = db.Column(db.String(64), index=True)
description = db.Column(db.Text)
entity = db.relationship('Entity', foreign_keys=[entity_id], primaryjoin='Entity.id == EntityMeta.entity_id', backref='entity_meta', uselist=False, lazy=True)
|
nilq/baby-python
|
python
|
from wx import wx
from wx.lib.pubsub import Publisher
import select
import socket
import sys
import Queue
import os
from thread import *
from collections import defaultdict
uploadInfos = defaultdict(dict) # For seeders
downloadInfos = defaultdict(dict) # For leechers
pieceRequestQueue = defaultdict(dict) # For leechers
torrentInfo = defaultdict(dict) # For storing torrentInfo for every file running
sizeDownloaded = defaultdict(dict) # total size download corresponding to each file
Downloading = defaultdict(dict) # flag for each file.
pieceStatus = defaultdict(dict)
# inputs = defaultdict(dict)
# outputs = defaultdict(dict)
myGroupID = 0
myGroupList = []
lastPieceStatus = {}
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
PIECE_SIZE = 1024 * 512
BLOCK_SIZE = 4 * 1024
DELIMITER = '|/!@#$%^&*\|'
SEPAERATOR = '|/<>?\~|'
lastBuffered = ''
fd = None
running = True
badaFlag = False
# torrentInfo = {}
pieceBitVector = {}
seeder = True
count = 0
myHost = ""
myPort = 0
inputs = []
outputs = []
numPiecesDownloaded = 0
# Outgoing message queues (socket:Queue)
message_queues = {}
def multicast(index, currentFile, blockNumber, byteData):
blockNumber = 1
for s in myGroupList:
print "Inside multicast = " + str(len(myGroupList))
msg = "MULTICAST" + SEPAERATOR + currentFile + SEPAERATOR + str(index) + SEPAERATOR + str(
blockNumber) + SEPAERATOR + byteData + DELIMITER
message_queues[s].put(msg)
def broadcast(index, currentFile):
print "In broadcast"
try:
print currentFile
bitvector = returnBitVector(currentFile, int(torrentInfo[currentFile]["pieces"]))
bitvector[index - 1] = '1'
except:
print "error in bitvector"
file = "./bitvector/" + currentFile.split('.')[0] + ".vec"
f = open(file, "w")
try:
str1 = stringify(bitvector)
except:
print "error in stringify"
f.write(str1)
for s in downloadInfos[currentFile]:
msg = "BROADCAST" + SEPAERATOR + currentFile + SEPAERATOR + str(index) + DELIMITER
message_queues[s].put(msg)
def getSize(filename):
print "In getSize"
filename = "./" + filename
print "retrieving size of file: " + filename
if (os.path.exists(filename)):
size = os.path.getsize(filename)
else:
size = 0
print "Size is: " + str(size)
# if size < 0:
# import subprocess as s
# size = long( s.Popen("ls -l %s | cut -d ' ' -f5" % filename,
# shell=True, stdout=s.PIPE).communicate()[0] )
return size
def getKey(item):
return item[0]
def returnPeerList(torrentInfo, host, port, currentFile):
connected = False
for tracker in torrentInfo[currentFile]['trackers']:
print tracker
tracker = tracker.strip('\n')
hostTracker, portTracker = tracker.split(':')
server_address = (hostTracker, int(portTracker))
msg = "REQUEST_PEERS-" + host + ":" + str(port) + ",FILE:" + torrentInfo[currentFile]['name']
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'connecting to %s port %s' % server_address
try:
s.connect(server_address)
print '%s: sending "%s"' % (s.getsockname(), msg)
s.send(msg)
connected = True
break
except:
print "Unable to Connect"
pass
if connected:
data = s.recv(BLOCK_SIZE)
# print "Received data:" + data
data = data.split('-')
peerList = data[1].split(',')
print peerList
return peerList
else:
return []
def parseTorrentFile(inp):
global trackers, torrentName
currentFile = ""
with open(inp) as f:
for line in f:
info = line.strip(' ')
info = line.strip('\n')
info = line.split('-')
if info[0] == 'name':
currentFile = info[1].split('\n')[0]
with open(inp) as f:
for line in f:
info = line.strip(' ')
info = line.strip('\n')
info = line.split('-')
if info[0] == 'trackers':
torrentInfo[currentFile]['trackers'] = info[1].split(',')
elif info[0] == 'name':
torrentInfo[currentFile]['name'] = info[1]
elif info[0] == 'length':
torrentInfo[currentFile]['length'] = int(info[1])
elif info[0] == 'pieces':
torrentInfo[currentFile]['pieces'] = int(info[1])
else:
print "Torrent File Corrupted\n"
sys.exit(0)
for i in xrange(1, torrentInfo[currentFile]['pieces'] + 1):
pieceStatus[currentFile][i] = 0
return currentFile
def availablePieces():
return 10
def processMsg(data):
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue
response = ''
data = data.split(SEPAERATOR)
Q = Queue.Queue()
header = data[0]
if header == "REQUEST_FILE":
currentPiece = 1
count = 0
pieceFlag = True
f = open(data[1].strip('\n'), "rb")
fileInfo = os.stat(data[1].strip('\n'))
fileSize = fileInfo.st_size
pieces = fileSize / 2
offset = 1 # for 1st seeder and this for 2nd seeder
# offset = (pieces/PIECE_SIZE)*PIECE_SIZE + 1
f.seek(offset)
msg = "OFFSET" + SEPAERATOR + str(offset)
# Q.put(msg)
l = f.read(BLOCK_SIZE)
while (l and pieceFlag):
Q.put(l)
l = f.read(BLOCK_SIZE)
count = count + 1
if (count / 10 == currentPiece):
print "Piece " + str(currentPiece) + " put in queue for senting to leecher"
currentPiece = currentPiece + 1
if (currentPiece == pieces / PIECE_SIZE and offset == 0):
pieceFlag = False
f.close()
response = "Queue"
ret = (response, Q)
elif header == "HAVE":
pass
return ret
def handleRecvIO(s, file, length):
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue
count = 1
percent = 100
offset = 0
f = open(file, "wb")
f.seek(offset)
print "Ready to Recieve : " + file
while (count <= length):
part = s.recv(BLOCK_SIZE)
f.write(part)
count = count + 1
if count == length / percent:
print "" + str(percent) + " Percent Remaining"
if percent != 1:
percent = percent - 1
f.close()
print file + " Downloaded Successfully"
# generates the queue
def pieceRequestOrdering(filename, currentFile):
filename = filename.strip("\n")
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue, inputs, outputs
for x in xrange(1, int(torrentInfo[currentFile]["pieces"]) + 1):
pieceRequestQueue[filename][x] = Queue.Queue()
print inputs
tempList = inputs[1:]
for s in tempList:
# print "Hello"
if s in downloadInfos[filename]:
bitvector = downloadInfos[filename][s]
print bitvector
index = 1
pieces = int(torrentInfo[currentFile]["pieces"])
print "length of bitvector is: " + str(len(bitvector))
for i in xrange(0, pieces):
if (bitvector[i] == '1'):
pieceRequestQueue[filename][i + 1].put(s)
else:
print i
# for i in bitvector:
# if i =='1':
# if(index==1):
# print "why????????????????????????"
# pieceRequestQueue[filename][index].put(s)
# index = index + 1
# read from file and send blocks to the requesting peer
def retrieveBytesFromFile(s, filename, index):
global PIECE_SIZE, BLOCK_SIZE
filename = filename.strip('\n')
print filename
offset = 0
try:
fo = open(filename, "r+b")
print "Reading File at index : " + str(index)
fo.seek((index - 1) * PIECE_SIZE, 0)
print "current file position is : " + str(fo.tell())
# print "Name of the file: ", fo.name
# fo.seek((index-1)*PIECE_SIZE, 0)
for blockNumber in xrange(0, PIECE_SIZE / BLOCK_SIZE):
print "In Loop"
byteData = fo.read(BLOCK_SIZE)
if (byteData == ''):
print "byteData is NULL"
break
if (byteData):
data = "HAVE_PIECE" + SEPAERATOR + filename + SEPAERATOR + str(index) + SEPAERATOR + str(
blockNumber) + SEPAERATOR + byteData + DELIMITER
message_queues[s].put(data)
fo.close()
except:
print "Error Handling File "
pass
# handles different messages
def processRecvdMsg(data, s):
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue, badaFlag, fd, pieceStatus, sizeDownloaded, inputs, outputs
# print 'received "%s" from %s' % (data, s.getpeername())
# print "data is: " + data
data = data.strip('\n')
temp = data.split(SEPAERATOR)
# print "temp is: " + ('')temp
try:
currentFile = temp[1]
except:
print "temp[1] out of index"
return
print "In processRecvdMsg"
print currentFile
header = temp[0]
data = ""
if header == "RECEIVE_FILE":
file = temp[1]
length = int(temp[2])
handleRecvIO(s, file, length)
# running = False
pass
elif header == "REQUEST_FILE":
response, Q = processMsg(data)
if response == "Queue":
length = Q.qsize()
msg = "RECEIVE_FILE" + SEPAERATOR + temp[1] + SEPAERATOR + str(length) + SEPAERATOR
message_queues[s].put(msg)
while (not Q.empty()):
message_queues[s].put(Q.get_nowait())
elif header == "HANDSHAKE":
filename = temp[1]
grpID = int(temp[2])
bitvector = temp[3]
if (grpID == myGroupID):
myGroupList.append(s)
pieces = len(bitvector)
# uploadInfos[filename][s.getpeername()] = bitvector
uploadInfos[filename][s] = bitvector
print "In Handshake"
print filename
print "after Handshake"
bitvector = returnBitVector(filename, pieces)
data = "REPLY_HANDSHAKE" + SEPAERATOR + filename + SEPAERATOR + str(myGroupID) + SEPAERATOR + stringify(
bitvector) + DELIMITER
message_queues[s].put(data)
elif header == "REPLY_HANDSHAKE":
filename = temp[1]
grpID = int(temp[2])
bitvector = temp[3]
if (grpID == myGroupID):
myGroupList.append(s)
pieces = len(bitvector)
downloadInfos[filename][s] = bitvector
if (not os.path.exists(filename)):
fd = open(filename, "w+b", 0)
fd.close()
fd = open(filename, "rw+b", 0)
# uploadInfos[filename][s] = bitvector
elif header == "REQUEST_PIECE":
# print temp
filename = temp[1]
index = int(temp[2])
actualPieceData = retrieveBytesFromFile(s, filename, index)
elif header == "MULTICAST":
filename = temp[1]
index = int(temp[2])
blockNumber = int(temp[3])
byteData = temp[4]
print "Multicasting Message recieved for block = " + str(blockNumber) + "and piece = " + str(
index) + "and file = " + filename
try:
position = PIECE_SIZE * (index - 1) + blockNumber * BLOCK_SIZE
if (not os.path.exists(filename)):
fs = open(filename, "wb+")
fs = open(filename, "rwb+")
fs.seek(position, 0)
fs.write(byteData)
fs.close()
except:
print "Error while Multicasting"
elif header == "BROADCAST":
filename = temp[1]
index = int(temp[2])
print "Broadcast message received for piece " + str(index) + "of file " + filename
if s in downloadInfos[filename]:
pieceRequestQueue[filename][index].put(s)
elif header == "HAVE_PIECE":
filename = temp[1]
index = int(temp[2])
blockNumber = int(temp[3])
byteData = temp[4]
# print "piece status list: " + (' ').join(pieceStatus)
# print pieceStatus
# print len(temp)
# print len(byteData)
sizeDownloaded[filename] += len(byteData)
try:
# if(not os.path.exists(filename)):
# fd = open(filename,"wb+")
# fd.close()
# fd = open(filename,"rwb+")
position = PIECE_SIZE * (index - 1) + blockNumber * BLOCK_SIZE
fd.seek(position, 0)
writtenAmount = fd.write(byteData)
# time.sleep(0.001)
fd.flush()
try:
pieceStatus[filename][index] = pieceStatus[filename][index] + 1
except:
print "error in pieceStatus"
try:
if (pieceStatus[filename][index] == 128):
print "Piece no. =" + str(index) + " of file " + filename + " Downloaded"
broadcast(index, filename)
multicast(index, filename, blockNumber, byteData)
except:
print "Error in casting"
print "Downloaded index = " + str(index) + " blockNumber = " + str(
blockNumber) + " for filename = " + filename + " at position: " + str(position) + " till: " + str(
fd.tell()) + " Written = " + str(writtenAmount)
except:
print "Error handling while Flushing data"
else:
# print "data count: " + data
pass
if s not in outputs:
outputs.append(s)
# send handshake message to the peers and recv handshake_reply
# transfers bitvector to each other
def handShaking(peerList, currentFile):
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue, pieceStatus, inputs, outputs
print "Seeder in handshaking"
for peers in peerList:
print peers
host, port = peers.split(':')
port = int(port)
peerServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
peerServer.connect((host, port))
# peerServer.setblocking(0)
outputs.append(peerServer)
print "Connection to " + peers + " succeeded"
print "Creating output queue for " + peers
message_queues[peerServer] = Queue.Queue()
data = "HANDSHAKE" + SEPAERATOR + torrentInfo[currentFile]['name'] + SEPAERATOR + str(
myGroupID) + SEPAERATOR + stringify(pieceBitVector[torrentInfo[currentFile]["name"]]) + DELIMITER
print data
peerServer.send(data)
print peerServer.getpeername()
# print "Hello"
try:
data = peerServer.recv(BLOCK_SIZE)
print 'received "%s" from %s' % (data, peerServer.getpeername())
# print data
processRecvdMsg(data, peerServer)
except:
print "Error while recieveing "
inputs.append(peerServer)
except:
print "Some error"
pass
peerServer.setblocking(0)
# request messages are put in the message queue corresponding to the peers
def rarestPieceFirstAlgo(filename, pieces):
print "Executing Rarest Piece first algorithm"
filename = filename.strip('\n')
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue
# Creating list of tuples for rarest first order (pieceCount,pieceIndex)
countPiece = []
for i in xrange(1, pieces + 1):
countPiece.append((pieceRequestQueue[filename][i].qsize(), i))
countPiece = sorted(countPiece, key=getKey)
print countPiece
for tuples in countPiece:
pieceQsize = tuples[0]
pieceIndex = tuples[1]
# FORMAT of Sending message
if pieceQsize != 0:
data = "REQUEST_PIECE" + SEPAERATOR + filename + SEPAERATOR + str(pieceIndex) + DELIMITER
s = pieceRequestQueue[filename][pieceIndex].get_nowait()
message_queues[s].put(data)
# print data
# called by recvMessage
def reactor(server, currentFile):
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue, inputs, outputs, message_queues, DELIMITER, lastBuffered, badaFlag, running, numPiecesDownloaded, Downloading, sizeDownloaded
Downloading[currentFile] = True
sizeDownloaded[currentFile] = 0
running = True
while running:
# In case of a leecher, an initial check to whether the file has been downloaded
# if not seeder:
# # print "Size Downloaded : " + str(sizeDownloaded[currentFile])
# if(getSize(currentFile)==int(torrentInfo[currentFile]['length'])):
# Downloading[currentFile] = False
# print currentFile + " downloaded Successfully"
# #TO DO here:
# #call some dialog box saying "Download completed for the currentFile"
# #Close sockets gracefully
# fd.close()
# break
# # try:
# # for s in inputs:
# # if s is not server:
# # s.close()
# # for s in outputs:
# # if s is not server:
# # s.close()
# # except:
# # print "Error while closing sockets"
# Wait for at least one of the sockets to be ready for processing
print '\nwaiting for the next event using select'
readable, writable, exceptional = select.select(inputs, outputs, inputs)
# Handle inputs
for s in readable:
print "In Readable"
if s is server:
# A "readable" server socket is ready to accept a connection
connection, client_address = s.accept()
print 'new connection from', client_address
connection.setblocking(0)
inputs.append(connection)
outputs.append(connection)
# Give the connection a queue for data we want to send
message_queues[connection] = Queue.Queue()
else:
bufferMsg = s.recv(BLOCK_SIZE)
# print bufferMsg
if bufferMsg:
if lastBuffered != "":
bufferMsg = lastBuffered + bufferMsg
lastBuffered = ""
# if badaFlag :
# print "Stray data is bufferMsg = "+bufferMsg
bufferMsg = bufferMsg.split(DELIMITER)
# if badaFlag :
# print " bufferMsgafter splitting DELIMITER= " + ('').join(bufferMsg)
if (bufferMsg[-1]):
lastBuffered = bufferMsg[-1]
for data in bufferMsg[:-1]:
processRecvdMsg(data, s)
# A readable client socket has data
else:
# Interpret empty result as closed connection
print 'closing', client_address, 'after reading no data'
# Stop listening for input on the connection
if s in outputs:
outputs.remove(s)
inputs.remove(s)
s.close()
# Remove message queue
del message_queues[s]
# Handle outputs
for s in writable:
print "In writable"
try:
next_msg = message_queues[s].get_nowait()
# print "nextmessage: " + next_msg
except:
# No messages waiting so stop checking for writability.
print 'output queue for', s.getpeername(), 'is empty'
outputs.remove(s)
else:
temp = next_msg.split(SEPAERATOR)
if (temp[0] == "HAVE_PIECE"):
print "Sending data for file = " + temp[1] + " PieceIndex = " + temp[2] + " blockNumber = " + temp[
3]
# print 'sending "%s" to %s' % (next_msg, s.getpeername())
s.send(next_msg)
# time.sleep(0.075)
# Handle "exceptional conditions"
for s in exceptional:
print "In Exceptional"
print 'handling exceptional condition for', s.getpeername()
# Stop listening for input on the connection
inputs.remove(s)
if s in outputs:
outputs.remove(s)
s.close()
# Remove message queue
del message_queues[s] # reactor is called by recvMessage
# thread runs here
def recvMessage(host, port, peerList, currentFile):
# global seeder,torrentInfo,uploadInfos,downloadInfos,pieceRequestQueue, inputs, outputs
# print "Entering recvMessage"
# global count
# server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# server.setblocking(0)
# # Bind the socket to the port
# server_address = (host, port)
# print 'starting up on %s port %s' % server_address
# server.bind(server_address)
# count = count+1
# # Listen for incoming connections
# server.listen(5)
# # if(not seeder)
# inputs = []
# outputs = []
# print "currentFile is : " + currentFile
# print "inputs is : " + str(type(inputs))
# print "outputs is : " + str(type(outputs))
# inputs.append(server)
# # Sockets from which we expect to read
handShaking(peerList, currentFile)
if not seeder:
pieceRequestOrdering(torrentInfo[currentFile]["name"], currentFile)
rarestPieceFirstAlgo(torrentInfo[currentFile]["name"], int(torrentInfo[currentFile]['pieces']))
reactor(server, currentFile) # Thread first calls this function
print "Closing Main Socket"
server.close()
# bitvector showing which pieces I have. '0' means piece missing and '1' means I have the piece.
def returnBitVector(filename, pieces):
file = "./bitvector/" + filename.split(".")[0] + ".vec"
print file
try:
f = open(file, "r")
stringBitVector = f.read()
stringBitVector = stringBitVector.strip('\n')
bitvector = []
for i in xrange(0, pieces):
bitvector.append(int(stringBitVector[i]))
return bitvector
except:
print "printing 0 vector"
return [0] * pieces
def stringify(bitvector):
str = ""
for i in bitvector:
if (i == 0):
str = str + '0'
else:
str = str + '1'
return str
def initialize(torrentFile):
global myHost, myPort, seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue
# if(len(sys.argv) < 3):
# print "To run please follow following Format: python %s hostname port (optional:Torrent File)",sys.argv[0]
# sys.exit("Bye")
seeder = True
# myHost = sys.argv[1]
# myPort = int(sys.argv[2])
currentFile = ""
peerList = []
if len(sys.argv) == 1:
# filename = sys.argv[3]
currentFile = parseTorrentFile(torrentFile)
torrentInfo[currentFile]["name"] = torrentInfo[currentFile]["name"].strip('\n')
print "calling returnBitVector"
bitvector = returnBitVector(torrentInfo[currentFile]["name"], int(torrentInfo[currentFile]["pieces"]))
print "call to returnBitVector ended"
pieceBitVector[torrentInfo[currentFile]["name"]] = bitvector
seeder = False
if (not seeder):
peerList = returnPeerList(torrentInfo, myHost, myPort, currentFile)
print "Peer List Received"
return (currentFile, peerList)
# if __name__ == '__main__':
# #Tracker connection
# initialize()
# try:
# start_new_thread(recvMessage,(myHost,myPort,peerList))
# # start_new_thread(sendMessage,(host,port,peerList))
# except:
# print "Error: unable to start thread"
# while 1:
# pass
#######################################################################################
class MyProgressDialog(wx.Dialog):
""""""
# ----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
wx.Dialog.__init__(self, None, title="Progress")
self.count = 0
self.progress = wx.Gauge(self, range=20)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.progress, 0, wx.EXPAND)
self.SetSizer(sizer)
# create a pubsub listenerwx.
CallAfter(Publisher().sendMessage, "update", "")
Publisher().subscribe(self.updateProgress, "update")
# ----------------------------------------------------------------------
def updateProgress(self, msg):
"""
Update the progress bar
"""
self.count += 1
if self.count >= 20:
self.Destroy()
self.progress.SetValue(self.count)
class Example(wx.Frame):
def __init__(self, parent, title):
super(Example, self).__init__(parent, title=title,
size=(460, 300))
# self.InitUI()
self.panel = wx.Panel(self)
sizer = wx.GridBagSizer(5, 5)
self.text1 = wx.StaticText(self.panel, label="BITTORRENT v1.0")
sizer.Add(self.text1, pos=(0, 0), flag=wx.TOP | wx.LEFT | wx.BOTTOM,
border=15)
self.icon = wx.StaticBitmap(self.panel, bitmap=wx.Bitmap('exec.png'))
sizer.Add(self.icon, pos=(0, 4), flag=wx.TOP | wx.RIGHT | wx.ALIGN_RIGHT,
border=5)
self.line = wx.StaticLine(self.panel)
sizer.Add(self.line, pos=(1, 0), span=(1, 5),
flag=wx.EXPAND | wx.BOTTOM, border=10)
self.text2 = wx.StaticText(self.panel, label="Port")
sizer.Add(self.text2, pos=(3, 0), flag=wx.LEFT, border=10)
self.portText = wx.TextCtrl(self.panel)
sizer.Add(self.portText, pos=(3, 1), span=(1, 3), flag=wx.TOP | wx.EXPAND)
self.text3 = wx.StaticText(self.panel, label="Torrent File")
sizer.Add(self.text3, pos=(4, 0), flag=wx.LEFT | wx.TOP, border=10)
self.torrentFileText = wx.TextCtrl(self.panel)
sizer.Add(self.torrentFileText, pos=(4, 1), span=(1, 3), flag=wx.TOP | wx.EXPAND,
border=5)
self.text4 = wx.StaticText(self.panel, label="IP")
sizer.Add(self.text4, pos=(2, 0), flag=wx.LEFT | wx.TOP, border=10)
self.IPText = wx.TextCtrl(self.panel)
sizer.Add(self.IPText, pos=(2, 1), span=(1, 3), flag=wx.TOP | wx.EXPAND,
border=5)
self.text4 = wx.StaticText(self.panel, label="Group ID")
sizer.Add(self.text4, pos=(5, 0), flag=wx.LEFT | wx.TOP, border=10)
self.GroupText = wx.TextCtrl(self.panel)
sizer.Add(self.GroupText, pos=(5, 1), span=(1, 1), flag=wx.TOP | wx.EXPAND,
border=5)
self.IPText.SetValue("127.0.0.1")
self.portText.SetValue("10001")
self.button1 = wx.Button(self.panel, label="Browse...")
sizer.Add(self.button1, pos=(4, 4), flag=wx.TOP | wx.RIGHT, border=5)
self.Bind(wx.EVT_BUTTON, self.OnButton_FrameHandler, self.button1)
self.button3 = wx.Button(self.panel, label='Help')
sizer.Add(self.button3, pos=(7, 0), flag=wx.LEFT, border=10)
self.button4 = wx.Button(self.panel, label="Start")
sizer.Add(self.button4, pos=(7, 3))
self.Bind(wx.EVT_BUTTON, self.OnClickStart, self.button4)
self.button4 = wx.Button(self.panel, label="Connect")
sizer.Add(self.button4, pos=(3, 4))
self.Bind(wx.EVT_BUTTON, self.OnConnect, self.button4)
self.button5 = wx.Button(self.panel, label="Exit")
sizer.Add(self.button5, pos=(7, 4), span=(1, 1),
flag=wx.BOTTOM | wx.RIGHT, border=5)
self.Bind(wx.EVT_BUTTON, self.OnClickExit, self.button5)
sizer.AddGrowableCol(2)
self.panel.SetSizer(sizer)
self.Centre()
self.Show()
def OnButton_FrameHandler(self, event):
# print "Hello"
openFileDialog = wx.FileDialog(self, "Open Torrent file", "", "",
"Torrent files (*.torrent)|*.torrent", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return
else:
paths = openFileDialog.GetPaths()
self.torrentFileText.SetValue(paths[0])
def OnClickExit(self, event):
print "Thank you for sharing"
running = False
self.Destroy()
def OnConnect(self, event):
myHost = self.IPText.GetValue()
myPort = int(self.portText.GetValue())
global seeder, torrentInfo, uploadInfos, downloadInfos, pieceRequestQueue, inputs, outputs, server
print "Entering recvMessage"
global count
print "Socket connected"
# server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setblocking(0)
# Bind the socket to the port
server_address = (myHost, myPort)
print 'starting up on %s port %s' % server_address
server.bind(server_address)
count = count + 1
# Listen for incoming connections
server.listen(5)
# if(not seeder)
inputs = []
outputs = []
# print "currentFile is : " + currentFile
print "inputs is : " + str(type(inputs))
print "outputs is : " + str(type(outputs))
inputs.append(server)
# Sockets from which we expect to read
def OnClickStart(self, event):
print "Ready to share torrents"
torrentFilename = self.torrentFileText.GetValue()
myGroupID = self.GroupText.GetValue()
(currentFile, peerList) = initialize(torrentFilename)
# recvMessage(myHost,myPort,peerList, currentFile)
try:
start_new_thread(recvMessage, (myHost, myPort, peerList, currentFile))
except:
print "Error: unable to start thread"
def createProgressBar(self):
self.count = 0
self.progress = wx.Gauge(self, range=20)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.progress, 2,
flag=wx.EXPAND, border=5)
self.SetSizer(sizer)
Publisher().subscribe(self.updateProgress, "update")
def updateProgress(self, msg):
"""
Update the progress bar
"""
self.count += 1
if self.count >= 20:
self.Destroy()
self.progress.SetValue(self.count)
def InitUI(self):
pass
def GUI():
app = wx.App()
Example(None, title="Distributed Bittorent")
app.MainLoop()
if __name__ == '__main__':
GUI()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2021 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Community access system field."""
from invenio_rdm_records.records.systemfields.access import Owner, Owners, \
RecordAccessField
class _Owner(Owner):
@property
def user(self):
if self.owner_type == 'user':
return self.owner_id
class _Owners(Owners):
owner_cls = _Owner
def __init__(self, owners=None, owner_cls=None):
"""Create a new list of owners."""
self.owner_cls = owner_cls or self.owner_cls
for owner in owners or []:
self.add(owner)
class CommunityAccess:
"""Access management per community."""
VISIBILITY_LEVELS = ('public', 'restricted')
MEMBER_POLICY_LEVELS = ('open', 'closed')
RECORD_POLICY_LEVELS = ('open', 'closed', 'restricted')
owners_cls = _Owners
def __init__(
self,
visibility=None,
member_policy=None,
record_policy=None,
owned_by=None,
owners_cls=None,
):
"""Create a new CommunityAccess object.
If ``owned_by`` is not specified, a new instance of ``owners_cls``
will be used.
:param visibility: The visibility level.
:param owned_by: The set of community owners
"""
self.visibility = visibility or 'public'
self.member_policy = member_policy or 'open'
self.record_policy = record_policy or 'open'
owners_cls = owners_cls or self.owners_cls
self.owned_by = owned_by if owned_by else owners_cls()
self.errors = []
def _validate_visibility_level(self, level):
return level in self.VISIBILITY_LEVELS
def _validate_member_policy_level(self, level):
return level in self.MEMBER_POLICY_LEVELS
def _validate_record_policy_level(self, level):
return level in self.RECORD_POLICY_LEVELS
@property
def visibility(self):
"""Get the visibility level."""
return self._visibility
@visibility.setter
def visibility(self, value):
"""Set the visibility level."""
if not self._validate_visibility_level(value):
raise ValueError(f"Unknown visibility level: {value}")
self._visibility = value
@property
def member_policy(self):
"""Get the member policy level."""
return self._member_policy
@member_policy.setter
def member_policy(self, value):
"""Set the member policy level."""
if not self._validate_member_policy_level(value):
raise ValueError(f"Unknown member policy level: {value}")
self._member_policy = value
@property
def record_policy(self):
"""Get the record policy level."""
return self._record_policy
@record_policy.setter
def record_policy(self, value):
"""Set the record policy level."""
if not self._validate_record_policy_level(value):
raise ValueError(f"Unknown record policy level: {value}")
self._record_policy = value
def dump(self):
"""Dump the field values as dictionary."""
return {
"visibility": self.visibility,
"member_policy": self.member_policy,
"record_policy": self.record_policy,
"owned_by": self.owned_by.dump(),
}
def refresh_from_dict(self, access_dict):
"""Re-initialize the Access object with the data in the access_dict."""
new_access = self.from_dict(access_dict)
self.visibility = new_access.visibility
self.member_policy = new_access.member_policy
self.record_policy = new_access.record_policy
self.owned_by = new_access.owned_by
@classmethod
def from_dict(
cls,
access_dict,
owners_cls=None,
):
"""Create a new Access object from the specified 'access' property.
The new ``CommunityAccess`` object will be populated with new instances
from the configured classes.
If ``access_dict`` is empty, the ``Access`` object will be populated
with new instance ``owners_cls``.
"""
owners_cls = owners_cls or cls.owners_cls
errors = []
# provide defaults in case there is no 'access' property
owned_by = owners_cls()
if access_dict:
for owner_dict in access_dict.get("owned_by", []):
try:
owned_by.add(owned_by.owner_cls(owner_dict))
except Exception as e:
errors.append(e)
access = cls(
visibility=access_dict.get("visibility"),
member_policy=access_dict.get("member_policy"),
record_policy=access_dict.get("record_policy"),
owned_by=owned_by
)
access.errors = errors
return access
def __repr__(self):
"""Return repr(self)."""
return (
"<{} (visibility: {}, "
"member_policy: {}, "
"record_policy: {}, "
"owners: {})>"
).format(
type(self).__name__,
self.visibility,
self.member_policy,
self.record_policy,
self.owned_by,
)
class CommunityAccessField(RecordAccessField):
"""System field for managing community access."""
def __init__(self, *args, access_obj_class=CommunityAccess, **kwargs):
"""Create a new CommunityAccessField instance."""
super().__init__(*args, access_obj_class=access_obj_class, **kwargs)
def obj(self, instance):
"""Get the access object."""
obj = self._get_cache(instance)
if obj is not None:
return obj
data = self.get_dictkey(instance)
if data:
obj = self._access_obj_class.from_dict(data)
else:
obj = self._access_obj_class()
self._set_cache(instance, obj)
return obj
# NOTE: The original RecordAccessField dumps some non-existing fields
def post_dump(self, *args, **kwargs):
"""Called before a record is dumped."""
pass
def pre_load(self, *args, **kwargs):
"""Called before a record is dumped."""
pass
|
nilq/baby-python
|
python
|
from datetime import datetime
from typing import List
from fastapi import APIRouter
from utils.database import database
from .models import replies
from .schema import ReplyIn, Reply, LikeIn, Like
replies_router = APIRouter()
@replies_router.get("/list-for-post/{post_id}/", response_model=List[Reply])
async def list_replies(post_id: int):
"""
List Reply(ies) for a given Post
:param post_id: int Post.id
:return: List[Reply]
"""
query = replies.select().where(
replies.c.post_id == post_id
)
return await database.fetch_all(query=query)
@replies_router.post("/add-to-post/", response_model=Reply)
async def create_reply(reply: ReplyIn):
created_at = datetime.utcnow()
query = replies.insert().values(
post_id=reply.post_id,
reply_type=reply.reply_type,
content=reply.content,
created_at=created_at
)
last_record_id = await database.execute(query=query)
return {
**reply.dict(),
"id": last_record_id,
"created_at": created_at
}
@replies_router.post("/add-like/", response_model=Like)
async def add_like(like: LikeIn):
query = replies.select().where(
replies.c.id == like.reply_id
)
exiting = await database.fetch_one(query=query)
likes_count = 1 if exiting.likes_count is None else exiting.likes_count + 1
query = replies.update().values(
likes_count=likes_count
).where(
replies.c.id == like.reply_id
)
await database.execute(query=query)
return {
**like.dict(),
"likes_count": likes_count
}
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.6 on 2020-05-20 10:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("resources", "0003_auto_20200520_0825"),
]
operations = [
migrations.RemoveField(model_name="land", name="images",),
]
|
nilq/baby-python
|
python
|
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-clause BSD, see included LICENSE for information
from ._version import get_versions
from ._langs import get_langs
#__version__ = get_versions()['version']
#__version_full__ = get_versions()['full']
__version__ = get_versions()['version']
__langs__ = get_langs()
del get_versions
del get_langs
|
nilq/baby-python
|
python
|
#-*- coding: utf-8 -*-
'''
Copyright (c) 2016 NSR (National Security Research Institute)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from .lsh256 import LSH256
from .lsh512 import LSH512
## 해쉬 함수 wrapper 클래스
class LSHDigest:
## 파라미터에 맞는 LSH 알고리즘 객체 생성
# @param [in] wordlenbits 워드 길이 (비트) 256, 512만 가능함
# @param [in] outlenbits 출력 길이 (비트) 1 ~ 256 (LSH-256) 혹은 1 ~ 512 (LSH-512) 가 가능함
# @return LSH 객체
@staticmethod
def getInstance(wordlenbits, outlenbits = None):
if outlenbits is None:
outlenbits = wordlenbits
if wordlenbits == 256:
return LSH256(outlenbits)
elif wordlenbits == 512:
return LSH512(outlenbits)
else:
raise ValueError("Unsupported algorithm parameter");
## digest 함수 - 최종 해쉬값을 계산하여 리턴한다.
# @param [in] wordlenbits 워드 길이 256, 512 중 하나여야 함
# @param [in] outlenbits 출력 해시 길이 1 ~ wordlenbits 사이의 값이어야 함
# @param [in] data 입력 데이터
# @param [in] offset 데이터 시작 오프셋 (바이트)
# @param [in] length 데이터 길이 (비트)
# @return 계산된 해쉬값
@staticmethod
def digest(wordlenbits, outlenbits = None, data = None, offset = 0, length = -1):
if outlenbits is None:
outlenbits = wordlenbits
lsh = LSHDigest.getInstance(wordlenbits, outlenbits)
return lsh.final(data, offset, length)
|
nilq/baby-python
|
python
|
# Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Ranking losses."""
import tensorflow as tf
from delf.python.training.losses import ranking_losses
class RankingLossesTest(tf.test.TestCase):
def testContrastiveLoss(self):
# Testing the correct numeric value.
queries = tf.math.l2_normalize(tf.constant([[1.0, 2.0, -2.0]]))
positives = tf.math.l2_normalize(tf.constant([[-1.0, 2.0, 0.0]]))
negatives = tf.math.l2_normalize(tf.constant([[[-5.0, 0.0, 3.0]]]))
result = ranking_losses.contrastive_loss(queries, positives, negatives,
margin=0.7, eps=1e-6)
exp_output = 0.55278635
self.assertAllClose(exp_output, result)
def testTripletLossZeroLoss(self):
# Testing the correct numeric value in case if query-positive distance is
# smaller than the query-negative distance.
queries = tf.math.l2_normalize(tf.constant([[1.0, 2.0, -2.0]]))
positives = tf.math.l2_normalize(tf.constant([[-1.0, 2.0, 0.0]]))
negatives = tf.math.l2_normalize(tf.constant([[[-5.0, 0.0, 3.0]]]))
result = ranking_losses.triplet_loss(queries, positives, negatives,
margin=0.1)
exp_output = 0.0
self.assertAllClose(exp_output, result)
def testTripletLossNonZeroLoss(self):
# Testing the correct numeric value in case if query-positive distance is
# bigger than the query-negative distance.
queries = tf.math.l2_normalize(tf.constant([[1.0, 2.0, -2.0]]))
positives = tf.math.l2_normalize(tf.constant([[-5.0, 0.0, 3.0]]))
negatives = tf.math.l2_normalize(tf.constant([[[-1.0, 2.0, 0.0]]]))
result = ranking_losses.triplet_loss(queries, positives, negatives,
margin=0.1)
exp_output = 2.2520838
self.assertAllClose(exp_output, result)
if __name__ == '__main__':
tf.test.main()
|
nilq/baby-python
|
python
|
import curve25519
import time
# from urandom import randint
d = b'\x70\x1f\xb4\x30\x86\x55\xb4\x76\xb6\x78\x9b\x73\x25\xf9\xea\x8c\xdd\xd1\x6a\x58\x53\x3f\xf6\xd9\xe6\x00\x09\x46\x4a\x5f\x9d\x54\x00\x00\x00\x00'
u = b'\x09' + bytes(31)
v = b'\xd9\xd3\xce~\xa2\xc5\xe9)\xb2a|m~M=\x92L\xd1Hw,\xdd\x1e\xe0\xb4\x86\xa0\xb8\xa1\x19\xae \x00\x00\x00\x00'
print('Test vectors from https://tools.ietf.org/html/rfc8031#appendix-A')
print('Test 1: X25519: q = d*u')
start = time.ticks_ms() # get millisecond counter
b = curve25519.x25519(d, u)
delta = time.ticks_diff(time.ticks_ms(), start) # compute time difference
print('Computation time: %d ms' % delta)
q = int.from_bytes(b, 'little')
print('q [hex/dec] = %x %d' % (q, q))
if q != 0x66c7fb0d9f7090f777fa8493081ce8a4f174dbbbf9a36f16ba571206d4ddd548:
print('Test 1 failed.')
else:
print('Test 1 passed.')
print()
print('Test 2: X25519 + y-coordinate recovery + transform to Edwards-curve')
print('(x, y) = Edward(q, r), (q, r) = d*(u, v)')
start = time.ticks_ms() # get millisecond counter
b = curve25519.x25519_ed(d, u, v)
delta = time.ticks_diff(time.ticks_ms(), start) # compute time difference
print('Computation time: %d ms' % delta)
x = int.from_bytes(b[0], 'little')
y = int.from_bytes(b[1], 'little')
print('x [hex/dec] = %x %d' % (x, x))
print('y [hex/dec] = %x %d' % (y, y))
if x != 0x1ce7e6e3a747a25352df2d3155f06427ba389769e37755731dead2b54c5cef03 or y != 0x4dd1c7c2001c147333ceedf77ebd48b1100e2a95f88cf1f40d1b74ec7279e657:
print('Test 2 failed.')
else:
print('Test 2 passed.')
print()
|
nilq/baby-python
|
python
|
import torch
import pytest
def test_nll(device):
from speechbrain.nnet.losses import nll_loss
predictions = torch.zeros(4, 10, 8, device=device)
targets = torch.zeros(4, 10, device=device)
lengths = torch.ones(4, device=device)
out_cost = nll_loss(predictions, targets, lengths)
assert torch.all(torch.eq(out_cost, 0))
def test_mse(device):
from speechbrain.nnet.losses import mse_loss
predictions = torch.ones(4, 10, 8, device=device)
targets = torch.ones(4, 10, 8, device=device)
lengths = torch.ones(4, device=device)
out_cost = mse_loss(predictions, targets, lengths)
assert torch.all(torch.eq(out_cost, 0))
predictions = torch.zeros(4, 10, 8, device=device)
out_cost = mse_loss(predictions, targets, lengths)
assert torch.all(torch.eq(out_cost, 1))
def test_l1(device):
from speechbrain.nnet.losses import l1_loss
predictions = torch.ones(4, 10, 8, device=device)
targets = torch.ones(4, 10, 8, device=device)
lengths = torch.ones(4, device=device)
out_cost = l1_loss(predictions, targets, lengths)
assert torch.all(torch.eq(out_cost, 0))
def test_bce_loss(device):
from speechbrain.nnet.losses import bce_loss
# Ensure this works both with and without singleton dimension
predictions_singleton = torch.zeros(4, 10, 1, device=device)
predictions_match = torch.zeros(4, 10, device=device)
targets = torch.ones(4, 10, device=device)
lengths = torch.ones(4, device=device)
out_cost_singleton = bce_loss(predictions_singleton, targets, lengths)
out_cost_match = bce_loss(predictions_match, targets, lengths)
assert torch.allclose(
torch.exp(out_cost_singleton), torch.tensor(2.0, device=device)
)
assert torch.allclose(
torch.exp(out_cost_match), torch.tensor(2.0, device=device)
)
# How about one dimensional inputs
predictions = torch.zeros(5, 1, device=device)
targets = torch.ones(5, device=device)
out_cost = bce_loss(predictions, targets)
assert torch.allclose(torch.exp(out_cost), torch.tensor(2.0, device=device))
# Can't pass lengths in 1D case
with pytest.raises(ValueError):
bce_loss(predictions, targets, length=torch.ones(5, device=device))
def test_classification_error(device):
from speechbrain.nnet.losses import classification_error
predictions = torch.zeros(4, 10, 8, device=device)
predictions[:, :, 0] += 1.0
targets = torch.zeros(4, 10, device=device)
lengths = torch.ones(4, device=device)
out_cost = classification_error(predictions, targets, lengths)
assert torch.all(torch.eq(out_cost, 0))
def test_pitwrapper(device):
from speechbrain.nnet.losses import PitWrapper
import torch
from torch import nn
base_loss = nn.MSELoss(reduction="none")
pit = PitWrapper(base_loss)
predictions = torch.rand(
(2, 32, 4), device=device
) # batch, frames, sources
p = (3, 0, 2, 1)
# same but we invert the ordering to check if permutation invariant
targets = predictions[..., p]
loss, opt_p = pit(predictions, targets)
assert [x == p for x in opt_p] == [True for i in range(len(opt_p))]
predictions = pit.reorder_tensor(predictions, opt_p)
assert torch.all(torch.eq(base_loss(predictions, targets), 0))
predictions = torch.rand(
(3, 32, 32, 32, 5), device=device
) # batch, ..., sources
p = (3, 0, 2, 1, 4)
targets = predictions[
..., p
] # same but we invert the ordering to check if permutation invariant
loss, opt_p = pit(predictions, targets)
assert [x == p for x in opt_p] == [True for i in range(len(opt_p))]
predictions = pit.reorder_tensor(predictions, opt_p)
assert torch.all(torch.eq(base_loss(predictions, targets), 0))
def test_transducer_loss(device):
# Make this its own test since it can only be run
# if numba is installed and a GPU is available
pytest.importorskip("numba")
if torch.cuda.device_count() == 0:
pytest.skip("This test can only be run if a GPU is available")
from speechbrain.nnet.losses import transducer_loss
device = torch.device("cuda")
log_probs = (
torch.Tensor(
[
[
[
[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.6, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.8, 0.1],
],
[
[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.1, 0.1],
[0.7, 0.1, 0.2, 0.1, 0.1],
],
]
]
)
.to(device)
.requires_grad_()
.log_softmax(dim=-1)
)
targets = torch.Tensor([[1, 2]]).to(device).int()
probs_length = torch.Tensor([1.0]).to(device)
target_length = torch.Tensor([1.0]).to(device)
out_cost = transducer_loss(
log_probs,
targets,
probs_length,
target_length,
blank_index=0,
use_torchaudio=False,
)
out_cost.backward()
assert out_cost.item() == 2.247833251953125
def test_guided_attention_loss_mask(device):
from speechbrain.nnet.loss.guidedattn_loss import GuidedAttentionLoss
loss = GuidedAttentionLoss().to(device)
input_lengths = torch.tensor([3, 2, 6], device=device)
output_lengths = torch.tensor([4, 3, 5], device=device)
soft_mask = loss.guided_attentions(input_lengths, output_lengths)
ref_soft_mask = torch.tensor(
[
[
[0.0, 0.54216665, 0.9560631, 0.9991162, 0.0],
[0.7506478, 0.08314464, 0.2933517, 0.8858382, 0.0],
[0.9961341, 0.8858382, 0.2933517, 0.08314464, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.7506478, 0.9961341, 0.0, 0.0],
[0.9560631, 0.2933517, 0.2933517, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.39346933, 0.86466473, 0.988891, 0.99966455],
[0.2933517, 0.01379288, 0.49366438, 0.90436554, 0.993355],
[0.7506478, 0.1992626, 0.05404053, 0.5888877, 0.93427145],
[0.9560631, 0.6753475, 0.1175031, 0.1175031, 0.6753475],
[0.9961341, 0.93427145, 0.5888877, 0.05404053, 0.1992626],
[0.9998301, 0.993355, 0.90436554, 0.49366438, 0.01379288],
],
],
device=device,
)
assert torch.allclose(soft_mask, ref_soft_mask)
def test_guided_attention_loss_value(device):
from speechbrain.nnet.loss.guidedattn_loss import GuidedAttentionLoss
loss = GuidedAttentionLoss().to(device)
input_lengths = torch.tensor([2, 3], device=device)
target_lengths = torch.tensor([3, 4], device=device)
alignments = torch.tensor(
[
[
[0.8, 0.2, 0.0],
[0.4, 0.6, 0.0],
[0.2, 0.8, 0.0],
[0.0, 0.0, 0.0],
],
[
[0.6, 0.2, 0.2],
[0.1, 0.7, 0.2],
[0.3, 0.4, 0.3],
[0.2, 0.3, 0.5],
],
],
device=device,
)
loss_value = loss(alignments, input_lengths, target_lengths)
ref_loss_value = torch.tensor(0.1142)
assert torch.isclose(loss_value, ref_loss_value, 0.0001, 0.0001).item()
def test_guided_attention_loss_shapes(device):
from speechbrain.nnet.loss.guidedattn_loss import GuidedAttentionLoss
loss = GuidedAttentionLoss().to(device)
input_lengths = torch.tensor([3, 2, 6], device=device)
output_lengths = torch.tensor([4, 3, 5], device=device)
soft_mask = loss.guided_attentions(input_lengths, output_lengths)
assert soft_mask.shape == (3, 6, 5)
soft_mask = loss.guided_attentions(
input_lengths, output_lengths, max_input_len=10
)
assert soft_mask.shape == (3, 10, 5)
soft_mask = loss.guided_attentions(
input_lengths, output_lengths, max_target_len=12
)
assert soft_mask.shape == (3, 6, 12)
soft_mask = loss.guided_attentions(
input_lengths, output_lengths, max_input_len=10, max_target_len=12
)
assert soft_mask.shape == (3, 10, 12)
|
nilq/baby-python
|
python
|
amount = int(input("Inserire il reddito imponibile: "))
married = input("Sei coniugato? [y/N]: ") == "y"
if married:
if amount > 64000:
tax = 8800 + (amount - 64000) * .25
elif amount > 16000:
tax = 1600 + (amount - 16000) * .15
else:
tax = amount * .10
else:
if amount > 32000:
tax = 4400 + (amount - 32000) * .25
elif amount > 8000:
tax = 800 + (amount - 8000) * .15
else:
tax = amount * .10
print(f"Le tasse sono {tax:.2f}$")
|
nilq/baby-python
|
python
|
# Source Generated with Decompyle++
# File: device_parameter_component.pyc (Python 2.5)
from __future__ import absolute_import
from ableton.v2.control_surface.control import ControlList
from pushbase.device_parameter_component import DeviceParameterComponentBase
from mapped_control import MappedControl
class DeviceParameterComponent(DeviceParameterComponentBase):
controls = ControlList(MappedControl, 8)
def set_parameter_controls(self, encoders):
self.controls.set_control_element(encoders)
self._connect_parameters()
def _connect_parameters(self):
parameters = self._parameter_provider.parameters[:self.controls.control_count]
for (control, parameter_info) in map(None, self.controls, parameters):
if parameter_info:
pass
1
parameter = None
control.mapped_parameter = parameter
if parameter:
control.update_sensitivities(parameter_info.default_encoder_sensitivity, parameter_info.fine_grain_encoder_sensitivity)
continue
|
nilq/baby-python
|
python
|
import codecs
from luadata.serializer.unserialize import unserialize
def read(path, encoding="utf-8", multival=False):
"""Read luadata from file
Args:
path (str): file path
encoding (str, optional): file encoding. Defaults to "utf-8".
Returns:
tuple([*]): unserialized data from luadata file
"""
with codecs.open(path, "r", encoding) as file:
text = file.read().strip()
if text[0:6] == "return":
ch = text[6:7]
if not (
(ch >= "a" and ch <= "z")
or (ch >= "A" and ch <= "Z")
or (ch >= "0" and ch <= "9")
or ch == "_"
):
text = text[6:]
return unserialize(text, encoding=encoding, multival=False)
|
nilq/baby-python
|
python
|
# This is to test the conditions in python
# Demo for if and elsif
number = int(input("Please enter a number to check\n"))
if number <100:
print("the number is less that 100")
elif number == 100:
print("the number is equal to 100")
else:
print("number is more than 100\n")
# this part
city = ['Tokyo', 'New York', 'Toronto', 'Hong Kong']
for name in city:
print('City: ' + name)
print('\n') # newline
num = [1,2,3,4,5,6,7,8,9]
print('x^2 loop:')
for x in num:
y = x * x
print(str(x) + '*' + str(x) + '=' + str(y))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Han Xiao <artex.xh@gmail.com> <https://hanxiao.github.io>
import multiprocessing
import os
import random
import sys
import threading
import time
from collections import defaultdict
from datetime import datetime
from itertools import chain
from multiprocessing import Process
from multiprocessing.pool import Pool
import numpy as np
import zmq
import zmq.decorators as zmqd
from termcolor import colored
from zmq.utils import jsonapi
from .helper import *
from .protocol import *
from .http import BertHTTPProxy
from .zmq_decor import multi_socket
from .postsink import WKRSink
from .hard_worker import WKRHardWorker
from .statistic import ServerStatistic
__all__ = ['__version__', 'WKRServer', 'WKRHardWorker']
__version__ = '1.0.0-a'
class WKRServer(threading.Thread):
def __init__(self, args, hardprocesser=WKRHardWorker):
super().__init__()
self.hardprocessor_skeleton = hardprocesser
if not issubclass(self.hardprocessor_skeleton, WKRHardWorker):
raise AssertionError('hardprocesser must inherit from class WKRHardWorker')
self.model_dir = args.model_dir
self.num_worker = args.num_worker
self.device_map = args.device_map
self.gpu_memory_fraction = args.gpu_memory_fraction
self.all_cpu = args.cpu
self.num_concurrent_postsocket = max(8, args.num_worker * 2)
self.batch_size = args.batch_size
self.total_concurrent_socket = self.num_concurrent_postsocket
self.port = args.port
self.args = args
self.transfer_protocol = args.protocol
self.status_args = {k: v for k, v in sorted(vars(args).items())}
self.status_static = {
'python_version': sys.version,
'server_version': __version__,
'pyzmq_version': zmq.pyzmq_version(),
'zmq_version': zmq.zmq_version(),
'server_start_time': str(datetime.now()),
}
self.processes = []
self.logdir = args.log_dir
self.logger = set_logger(colored('NAVIGATOR', 'red'), logger_dir=self.logdir, verbose=args.verbose)
self.logger.info('freeze, optimize and export graph, could take a while...')
self.is_ready = threading.Event()
def __enter__(self):
self.start()
self.is_ready.wait()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
self.logger.info('shutting down...')
self._send_close_signal()
self.is_ready.clear()
self.join()
@zmqd.context()
@zmqd.socket(zmq.PUSH)
def _send_close_signal(self, _, frontend):
frontend.connect('tcp://localhost:%d' % self.port)
frontend.send_multipart([b'', ServerCmd.terminate, b'', b''])
@staticmethod
def shutdown(args):
with zmq.Context() as ctx:
ctx.setsockopt(zmq.LINGER, args.timeout)
with ctx.socket(zmq.PUSH) as frontend:
try:
frontend.connect('tcp://%s:%d' % (args.ip, args.port))
frontend.send_multipart([b'', ServerCmd.terminate, b'', b''])
print('shutdown signal sent to %d' % args.port)
except zmq.error.Again:
raise TimeoutError(
'no response from the server (with "timeout"=%d ms), please check the following:'
'is the server still online? is the network broken? are "port" correct? ' % args.timeout)
def run(self):
self._run()
@zmqd.context()
@zmqd.socket(zmq.PULL)
@zmqd.socket(zmq.PAIR)
@multi_socket(zmq.PUSH, num_socket='total_concurrent_socket')
def _run(self, _, frontend, sink, *backend_socks):
def push_new_job(client, req_id, msg_raw, msg_info_raw):
_sock = rand_backend_socket
send_to_next_raw(client, req_id, msg_raw, msg_info_raw, _sock)
# bind all sockets
self.logger.info('bind all sockets')
frontend.bind('tcp://*:%d' % self.port)
addr_front2sink = auto_bind(sink)
addr_backend_post_list = [auto_bind(b) for b in backend_socks]
self.logger.info('open %d worker sockets' % len(addr_backend_post_list))
# start the sink process
self.logger.info('start the sink')
proc_postsink = WKRSink(self.args, addr_front2sink, addr_backend_post_list)
self.processes.append(proc_postsink)
proc_postsink.start()
addr_sink = sink.recv().decode('ascii')
# start the post-backend processes
# WaveWorker: self, id, args, worker_address_list, sink_address, device_id
self.logger.info('start main-workers')
device_map_main_worker = self._get_device_map(self.num_worker, self.device_map, self.gpu_memory_fraction, run_all_cpu=self.all_cpu)
for idx, device_id in enumerate(device_map_main_worker):
process = self.hardprocessor_skeleton(idx, self.args, addr_backend_post_list, addr_sink, device_id)
self.processes.append(process)
process.start()
# process.is_ready.wait() # start model sequencely
# start the http-service process
if self.args.http_port:
self.logger.info('start http proxy')
proc_proxy = BertHTTPProxy(self.args)
self.processes.append(proc_proxy)
proc_proxy.start()
rand_backend_socket = None
server_status = ServerStatistic()
for p in self.processes:
p.is_ready.wait()
self.is_ready.set()
self.logger.info('all set, ready to serve request!')
while True:
try:
request = frontend.recv_multipart()
client, req_id, msg, msg_info = request
# client, req_id, msg, msg_info = recv_from_prev(self.transfer_protocol, frontend)
# request = [client, msg, req_id, msg_info]
except (ValueError, AssertionError):
self.logger.error('received a wrongly-formatted request (expected 4 frames, got %d)' % len(request))
self.logger.error('\n'.join('field %d: %s' % (idx, k) for idx, k in enumerate(request)), exc_info=True)
else:
server_status.update(request)
if msg == ServerCmd.terminate:
break
elif msg == ServerCmd.show_config:
self.logger.info('new config request\treq id: %d\tclient: %s' % (int(req_id), client))
status_runtime = {'client': client.decode('ascii'),
'num_process': len(self.processes),
'navigator -> worker': addr_backend_post_list,
'worker -> sink': addr_sink,
'server_current_time': str(datetime.now()),
'statistic': server_status.value,
'main_device_map': device_map_main_worker,
'main_batch_size': self.batch_size,
'protocol': self.transfer_protocol,
'num_concurrent_socket': self.total_concurrent_socket}
sink.send_multipart([client, msg, jsonapi.dumps({**status_runtime,
**self.status_args,
**self.status_static}), req_id])
else:
self.logger.info('new encode request\treq id: %s\tclient: %s' %
(str(req_id), client))
# regist job
sink.send_multipart([client, ServerCmd.new_job, jsonapi.dumps({'job_parts': '1', 'split_info': {}}), to_bytes(req_id)])
# pick random socket
rand_backend_socket = random.choice([b for b in backend_socks if b != rand_backend_socket])
# info = jsonapi.loads(msg_info)
# if self.transfer_protocol == 'obj':
# msg = decode_object(msg, info)
# else:
# msg = decode_ndarray(msg, info)
# push job
push_new_job(client, req_id, msg, msg_info)
for p in self.processes:
p.close()
self.logger.info('terminated!')
def _get_device_map(self, num_worker, device_map_raw, per_process_gpu_fragment, run_all_cpu=False):
self.logger.info('get devices map')
run_on_gpu = False
device_map = [-1] * num_worker
if not run_all_cpu:
try:
import GPUtil
num_all_gpu = len(GPUtil.getGPUs())
avail_gpu = GPUtil.getAvailable(order='memory', limit=min(num_all_gpu, num_worker),
maxMemory=0.9, maxLoad=0.9)
num_avail_gpu = len(avail_gpu)
if num_avail_gpu >= num_worker:
run_on_gpu = True
elif 0 < num_avail_gpu < num_worker:
self.logger.warning('only %d out of %d GPU(s) is available/free, but "num_worker=%d"' %
(num_avail_gpu, num_all_gpu, num_worker))
if not device_map_raw:
self.logger.warning('multiple workers will be allocated to one GPU, '
'may not scale well and may raise out-of-memory')
else:
self.logger.warning('workers will be allocated based on "-device_map=%s", '
'may not scale well and may raise out-of-memory' % device_map_raw)
run_on_gpu = True
else:
self.logger.warning('no GPU available, fall back to CPU')
if run_on_gpu:
device_map = ((device_map_raw or avail_gpu) * num_worker)[: num_worker]
except FileNotFoundError:
self.logger.warning('nvidia-smi is missing, often means no gpu on this machine. '
'fall back to cpu!')
self.logger.info('device map: \n\t\t%s' % '\n\t\t'.join(
'worker %2d -> %s' % (w_id, ('gpu %2d' % g_id) if g_id >= 0 else 'cpu') for w_id, g_id in
enumerate(device_map)))
return device_map
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.