code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from crispy_forms.layout import (
BaseInput, ButtonHolder, Div, Field,
Fieldset, HTML, Hidden, Layout, MultiField,
MultiWidgetField
)
from crispy_forms.utils import TEMPLATE_PACK, render_field
__all__ = [
# Defined in this file
"Button", "Column", "IconField", "Reset", "Row", "Submit", "UploadField",
# Imported from CrispyForms itself
"ButtonHolder", "Div", "Field", "Fieldset", "Hidden", "HTML", "Layout",
"MultiField", "MultiWidgetField",
]
class UploadField(Field):
def __init__(self, *args, **kwargs):
if 'css_class' in kwargs:
kwargs['css_class'] += " file-input"
else:
kwargs['css_class'] = "file-input"
super().__init__(*args, **kwargs)
class Submit(BaseInput):
"""
Used to create a Submit button descriptor for the {% crispy %} template tag.
>>> submit = Submit("Search the Site", "search this site")
The first argument is also slugified and turned into the id for the submit button.
"""
field_classes = "button is-primary"
input_type = "submit"
class Button(BaseInput):
"""
Used to create a Submit input descriptor for the {% crispy %} template tag.
>>> button = Button("Button 1", "Press Me!")
The first argument is also slugified and turned into the id for the button.
"""
field_classes = "button"
input_type = "button"
class Reset(BaseInput):
"""
Used to create a Reset button input descriptor for the {% crispy %} template tag.
>>> reset = Reset("Reset This Form", "Revert Me!")
The first argument is also slugified and turned into the id for the button.
"""
field_classes = "button is-text"
input_type = "reset"
class Row(Div):
"""
Layout object. It wraps fields in a div whose default class is "columns".
>>> Row("form_field_1", "form_field_2", "form_field_3")
"""
css_class = "columns"
class Column(Div):
"""
Layout object. It wraps fields in a div whose default class is "column".
>>> Column("form_field_1", "form_field_2")
"""
css_class = "column"
class IconField(Field):
template = "%s/layout/input_with_icon.html"
def __init__(self, field, icon_prepend=None, icon_append=None, *args, **kwargs):
self.field = field
self.icon_prepend = icon_prepend
self.icon_append = icon_append
super().__init__(*args, **kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK,
extra_context=None, **kwargs):
extra_context = extra_context.copy() if extra_context is not None else {}
extra_context.update({
"icon_prepend": self.icon_prepend,
"icon_append": self.icon_append,
})
template = self.get_template_name(template_pack)
return render_field(
self.field, form, form_style, context,
template=template,
template_pack=template_pack, extra_context=extra_context, **kwargs
) | django_crispy_bulma/layout.py | from crispy_forms.layout import (
BaseInput, ButtonHolder, Div, Field,
Fieldset, HTML, Hidden, Layout, MultiField,
MultiWidgetField
)
from crispy_forms.utils import TEMPLATE_PACK, render_field
__all__ = [
# Defined in this file
"Button", "Column", "IconField", "Reset", "Row", "Submit", "UploadField",
# Imported from CrispyForms itself
"ButtonHolder", "Div", "Field", "Fieldset", "Hidden", "HTML", "Layout",
"MultiField", "MultiWidgetField",
]
class UploadField(Field):
def __init__(self, *args, **kwargs):
if 'css_class' in kwargs:
kwargs['css_class'] += " file-input"
else:
kwargs['css_class'] = "file-input"
super().__init__(*args, **kwargs)
class Submit(BaseInput):
"""
Used to create a Submit button descriptor for the {% crispy %} template tag.
>>> submit = Submit("Search the Site", "search this site")
The first argument is also slugified and turned into the id for the submit button.
"""
field_classes = "button is-primary"
input_type = "submit"
class Button(BaseInput):
"""
Used to create a Submit input descriptor for the {% crispy %} template tag.
>>> button = Button("Button 1", "Press Me!")
The first argument is also slugified and turned into the id for the button.
"""
field_classes = "button"
input_type = "button"
class Reset(BaseInput):
"""
Used to create a Reset button input descriptor for the {% crispy %} template tag.
>>> reset = Reset("Reset This Form", "Revert Me!")
The first argument is also slugified and turned into the id for the button.
"""
field_classes = "button is-text"
input_type = "reset"
class Row(Div):
"""
Layout object. It wraps fields in a div whose default class is "columns".
>>> Row("form_field_1", "form_field_2", "form_field_3")
"""
css_class = "columns"
class Column(Div):
"""
Layout object. It wraps fields in a div whose default class is "column".
>>> Column("form_field_1", "form_field_2")
"""
css_class = "column"
class IconField(Field):
template = "%s/layout/input_with_icon.html"
def __init__(self, field, icon_prepend=None, icon_append=None, *args, **kwargs):
self.field = field
self.icon_prepend = icon_prepend
self.icon_append = icon_append
super().__init__(*args, **kwargs)
def render(self, form, form_style, context, template_pack=TEMPLATE_PACK,
extra_context=None, **kwargs):
extra_context = extra_context.copy() if extra_context is not None else {}
extra_context.update({
"icon_prepend": self.icon_prepend,
"icon_append": self.icon_append,
})
template = self.get_template_name(template_pack)
return render_field(
self.field, form, form_style, context,
template=template,
template_pack=template_pack, extra_context=extra_context, **kwargs
) | 0.653569 | 0.151749 |
def get_H_matrix(a,b,c,alpha,beta,gamma):
return np.matrix([[a*np.sin(beta), b*np.sin(alpha)*np.cos(gamma),0.],
[0., b*np.sin(alpha)*np.sin(gamma), 0.],
[a*np.cos(beta), b*np.cos(alpha), c]])
def replicate(coordinates, abc, lmn, alpha=90.,gamma=90.,beta=90.):
'''
:param coordinates: data type read in by xyz or pdb
:param abc: box dimensions
:param lmn: integer number of cells in each dimension
:return:
'''
a, b, c = abc
replicated = {'atoms':[], 'coords':[]}
replicated['box info'] = {'a':a*lmn[0],'b':b*lmn[1],'c':c*lmn[2],
'alpha':alpha,'gamma':gamma,'beta':beta}
alpha = alpha/180.*np.pi
beta = beta/180.*np.pi
gamma = gamma/180.*np.pi
H = get_H_matrix(a,b,c,alpha,beta,gamma)
for xcell in range(lmn[0]):
for ycell in range(lmn[1]):
for zcell in range(lmn[2]):
for atom, xyz in zip(coordinates['atoms'], coordinates['coords']):
xyz_cryst = np.linalg.inv(H)*np.vstack(xyz)
xyz_cryst_new = xyz_cryst + np.matrix([[xcell],[ycell],[zcell]])
new_xyz = H*xyz_cryst_new
new_xyz = new_xyz.tolist()
new_xyz = [new_xyz[0][0], new_xyz[1][0], new_xyz[2][0]]
replicated['atoms'].append( atom )
replicated['coords'].append( new_xyz )
return replicated
def replicate_mol(molecule_coords, abc, lmn):
a,b,c = abc
replicated = []
for xcell in range(lmn[0]): # start from 1 so don't duplicate
for ycell in range(lmn[1]):
for zcell in range(lmn[2]):
# make ONE new molecule each time here
new_mol_coords = []
for bead in molecule_coords:
x, y, z = map(float,bead['xyz'].split())
new_xyz = [x + xcell*a, y + ycell*b, z + zcell*c]
new_mol_coords.append({'xyz':' '.join(['%e'%k for k in new_xyz]),'q':bead['q']})
replicated.append(new_mol_coords)
return replicated
def replicate_file(parent_parser):
parser = argparse.ArgumentParser(description='replicate struxture file',parents=[parent_parser])
parser.add_argument('-abc','--boxlengths',help='dimensions of orthorhombic box',
type=float,nargs='+')
parser.add_argument('-f','--file',help='file to replicate',
type=str)
parser.add_argument('-w','--writeFile',help='new file name', type=str,
default='replicated.xyz')
args = vars(parser.parse_args())
assert len(args['boxlengths']) == 3, '3 dimensions needed for box lengths'
if '.xyz' in args['file']:
coordinates = reader.xyz(args['file'])
elif '.pdb' in args['file']:
coordinates = reader.PDB(args['file'])
else:
raise TypeError('File type not known')
my_coords = replicate(coordinates, args['boxlengths'] ,args['replicate'])
writer.PDB(args['writeFile'], my_coords)
def replicate_box(parent_parser):
import copy
parser = argparse.ArgumentParser(description='replicate box',parents=[parent_parser])
parser.add_argument('-b','--box',help='box to replicate',type=str)
parser.add_argument('-r','--restart',help='restart file',type=str)
parser.add_argument('-p','--path',help='path to main directories',
type=str,default=os.getcwd())
parser.add_argument('-f','--input',help='input file (fort.4)',type=str)
parser.add_argument('-w','--extension',help='new file extension', type=str,
default='replicated')
args = vars(parser.parse_args())
assert args['input'], parser.print_help()
input_data = reader.read_fort4(args['input'])
nmolty = int(input_data['&mc_shared']['nmolty'])
restart_data = reader.read_restart(args['restart'],
nmolty,
int(input_data['&mc_shared']['nbox']))
mol_added = {'%i'%i:0 for i in range(1,nmolty+1)}
box_dimensions = list(map(float,restart_data['box dimensions']['box%s'%args['box']].split()))
new_restart_data = copy.deepcopy(restart_data)
new_input_data = copy.deepcopy(input_data)
for key in 'mol types', 'box types', 'coords':
new_restart_data[key] = []
for c, ibox in enumerate(restart_data['box types']):
imolty = restart_data['mol types'][c]
if ibox == args['box']:
mol_added[imolty] -= 1 # dont double count
# we need to replicate these coordinates
new_mol = replicate_mol(restart_data['coords'][c], box_dimensions, args['replicate'])
# keep track of added mols
for m in range(len(new_mol)):
new_restart_data['mol types'].append(imolty)
new_restart_data['box types'].append(ibox)
new_restart_data['coords'].append(new_mol[m])
mol_added[imolty] += 1
else:
# keep
for key in 'mol types', 'box types', 'coords':
new_restart_data[key].append( restart_data[key][c] )
# tell how many mols added
total_added = sum(mol_added.values())
old_nchain = int(input_data['&mc_shared']['nchain'])
new_nchain = int(new_input_data['&mc_shared']['nchain']) + total_added
new_input_data['&mc_shared']['nchain'] = input_data['&mc_shared']['nchain'].replace(
'%i'%old_nchain,'%i'%new_nchain
)
new_restart_data['nchain'] = restart_data['nchain'].replace(
'%i'%old_nchain,'%i'%new_nchain
)
for mol, nAdded in mol_added.items():
nOld = int(new_input_data['SIMULATION_BOX']['box%s'%args['box']]['mol%s'%mol])
new_input_data['SIMULATION_BOX']['box%s'%args['box']]['mol%s'%mol] = '%i'%(nOld+nAdded)
molID = input_data['MOLECULE_TYPE']['mol%s'%mol].split('\n')[1].split()[0]
print('For input dir %s , %i molecules of type %s added '%(
args['input'][:args['input'].rfind('/')], nAdded,molID)
)
for func, inputName, newData, fortNum in zip( [writer.write_restart,writer.write_fort4],
[args['restart'],args['input']],
[new_restart_data, new_input_data],
['fort.77.','fort.4.']):
new_file_name = '%s'%args['path']
if '/' in inputName:
new_file_name += '/%s'%inputName[:inputName.rfind('/')]
new_file_name += '/%s%s'%(fortNum, args['extension'])
func(newData, new_file_name)
from MCFlow.file_formatting import reader
from MCFlow.file_formatting import writer
import os, argparse
import numpy as np
if __name__ == '__main__':
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('-t','--type',choices=['replicate_box','replicate_file'])
parent_parser.add_argument('-lmn','--replicate',help='integer number of cells in each dimension '
'(1 does no replication)',
type=int, nargs='+')
replicate_box(parent_parser) | mcflow/file_formatting/replicate.py | def get_H_matrix(a,b,c,alpha,beta,gamma):
return np.matrix([[a*np.sin(beta), b*np.sin(alpha)*np.cos(gamma),0.],
[0., b*np.sin(alpha)*np.sin(gamma), 0.],
[a*np.cos(beta), b*np.cos(alpha), c]])
def replicate(coordinates, abc, lmn, alpha=90.,gamma=90.,beta=90.):
'''
:param coordinates: data type read in by xyz or pdb
:param abc: box dimensions
:param lmn: integer number of cells in each dimension
:return:
'''
a, b, c = abc
replicated = {'atoms':[], 'coords':[]}
replicated['box info'] = {'a':a*lmn[0],'b':b*lmn[1],'c':c*lmn[2],
'alpha':alpha,'gamma':gamma,'beta':beta}
alpha = alpha/180.*np.pi
beta = beta/180.*np.pi
gamma = gamma/180.*np.pi
H = get_H_matrix(a,b,c,alpha,beta,gamma)
for xcell in range(lmn[0]):
for ycell in range(lmn[1]):
for zcell in range(lmn[2]):
for atom, xyz in zip(coordinates['atoms'], coordinates['coords']):
xyz_cryst = np.linalg.inv(H)*np.vstack(xyz)
xyz_cryst_new = xyz_cryst + np.matrix([[xcell],[ycell],[zcell]])
new_xyz = H*xyz_cryst_new
new_xyz = new_xyz.tolist()
new_xyz = [new_xyz[0][0], new_xyz[1][0], new_xyz[2][0]]
replicated['atoms'].append( atom )
replicated['coords'].append( new_xyz )
return replicated
def replicate_mol(molecule_coords, abc, lmn):
a,b,c = abc
replicated = []
for xcell in range(lmn[0]): # start from 1 so don't duplicate
for ycell in range(lmn[1]):
for zcell in range(lmn[2]):
# make ONE new molecule each time here
new_mol_coords = []
for bead in molecule_coords:
x, y, z = map(float,bead['xyz'].split())
new_xyz = [x + xcell*a, y + ycell*b, z + zcell*c]
new_mol_coords.append({'xyz':' '.join(['%e'%k for k in new_xyz]),'q':bead['q']})
replicated.append(new_mol_coords)
return replicated
def replicate_file(parent_parser):
parser = argparse.ArgumentParser(description='replicate struxture file',parents=[parent_parser])
parser.add_argument('-abc','--boxlengths',help='dimensions of orthorhombic box',
type=float,nargs='+')
parser.add_argument('-f','--file',help='file to replicate',
type=str)
parser.add_argument('-w','--writeFile',help='new file name', type=str,
default='replicated.xyz')
args = vars(parser.parse_args())
assert len(args['boxlengths']) == 3, '3 dimensions needed for box lengths'
if '.xyz' in args['file']:
coordinates = reader.xyz(args['file'])
elif '.pdb' in args['file']:
coordinates = reader.PDB(args['file'])
else:
raise TypeError('File type not known')
my_coords = replicate(coordinates, args['boxlengths'] ,args['replicate'])
writer.PDB(args['writeFile'], my_coords)
def replicate_box(parent_parser):
import copy
parser = argparse.ArgumentParser(description='replicate box',parents=[parent_parser])
parser.add_argument('-b','--box',help='box to replicate',type=str)
parser.add_argument('-r','--restart',help='restart file',type=str)
parser.add_argument('-p','--path',help='path to main directories',
type=str,default=os.getcwd())
parser.add_argument('-f','--input',help='input file (fort.4)',type=str)
parser.add_argument('-w','--extension',help='new file extension', type=str,
default='replicated')
args = vars(parser.parse_args())
assert args['input'], parser.print_help()
input_data = reader.read_fort4(args['input'])
nmolty = int(input_data['&mc_shared']['nmolty'])
restart_data = reader.read_restart(args['restart'],
nmolty,
int(input_data['&mc_shared']['nbox']))
mol_added = {'%i'%i:0 for i in range(1,nmolty+1)}
box_dimensions = list(map(float,restart_data['box dimensions']['box%s'%args['box']].split()))
new_restart_data = copy.deepcopy(restart_data)
new_input_data = copy.deepcopy(input_data)
for key in 'mol types', 'box types', 'coords':
new_restart_data[key] = []
for c, ibox in enumerate(restart_data['box types']):
imolty = restart_data['mol types'][c]
if ibox == args['box']:
mol_added[imolty] -= 1 # dont double count
# we need to replicate these coordinates
new_mol = replicate_mol(restart_data['coords'][c], box_dimensions, args['replicate'])
# keep track of added mols
for m in range(len(new_mol)):
new_restart_data['mol types'].append(imolty)
new_restart_data['box types'].append(ibox)
new_restart_data['coords'].append(new_mol[m])
mol_added[imolty] += 1
else:
# keep
for key in 'mol types', 'box types', 'coords':
new_restart_data[key].append( restart_data[key][c] )
# tell how many mols added
total_added = sum(mol_added.values())
old_nchain = int(input_data['&mc_shared']['nchain'])
new_nchain = int(new_input_data['&mc_shared']['nchain']) + total_added
new_input_data['&mc_shared']['nchain'] = input_data['&mc_shared']['nchain'].replace(
'%i'%old_nchain,'%i'%new_nchain
)
new_restart_data['nchain'] = restart_data['nchain'].replace(
'%i'%old_nchain,'%i'%new_nchain
)
for mol, nAdded in mol_added.items():
nOld = int(new_input_data['SIMULATION_BOX']['box%s'%args['box']]['mol%s'%mol])
new_input_data['SIMULATION_BOX']['box%s'%args['box']]['mol%s'%mol] = '%i'%(nOld+nAdded)
molID = input_data['MOLECULE_TYPE']['mol%s'%mol].split('\n')[1].split()[0]
print('For input dir %s , %i molecules of type %s added '%(
args['input'][:args['input'].rfind('/')], nAdded,molID)
)
for func, inputName, newData, fortNum in zip( [writer.write_restart,writer.write_fort4],
[args['restart'],args['input']],
[new_restart_data, new_input_data],
['fort.77.','fort.4.']):
new_file_name = '%s'%args['path']
if '/' in inputName:
new_file_name += '/%s'%inputName[:inputName.rfind('/')]
new_file_name += '/%s%s'%(fortNum, args['extension'])
func(newData, new_file_name)
from MCFlow.file_formatting import reader
from MCFlow.file_formatting import writer
import os, argparse
import numpy as np
if __name__ == '__main__':
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('-t','--type',choices=['replicate_box','replicate_file'])
parent_parser.add_argument('-lmn','--replicate',help='integer number of cells in each dimension '
'(1 does no replication)',
type=int, nargs='+')
replicate_box(parent_parser) | 0.330363 | 0.582669 |
from django.db import migrations, models
import django.db.models.deletion
import upcoming_events.models
from django.core.files import File
def create_icons(apps, schema_editor):
from upcoming_events.models import EventIcon
ListOfIcons = [["ESS", "ess-logo.png"], ["DMSC", "dmsc.png"], ["ECDC", "ecdc.png"], ["Screaming Udder", "screaming_udder.png"]]
for Icon in ListOfIcons:
ImageFile = open("upcoming_events/static/images/" + Icon[1], "rb")
ImageData = File(ImageFile)
CurrentIcon = EventIcon()
CurrentIcon.Name = Icon[0]
CurrentIcon.Image.save(Icon[1], ImageData, save=True)
CurrentIcon.save()
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Title', models.CharField(max_length=200)),
('Description', models.TextField(blank=True, null=True)),
('StartDate', models.DateTimeField(default=upcoming_events.models.todayMorning, verbose_name='Start of event')),
('EndDate', models.DateTimeField(default=upcoming_events.models.todayAfternoon, null=True, verbose_name='End of event')),
],
),
migrations.CreateModel(
name='EventIcon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.CharField(max_length=50)),
('Image', models.FileField(upload_to='icon_images/')),
],
),
migrations.AddField(
model_name='event',
name='Icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='upcoming_events.EventIcon'),
),
migrations.RunPython(create_icons)
] | ecdc_status/upcoming_events/migrations/0001_initial.py |
from django.db import migrations, models
import django.db.models.deletion
import upcoming_events.models
from django.core.files import File
def create_icons(apps, schema_editor):
from upcoming_events.models import EventIcon
ListOfIcons = [["ESS", "ess-logo.png"], ["DMSC", "dmsc.png"], ["ECDC", "ecdc.png"], ["Screaming Udder", "screaming_udder.png"]]
for Icon in ListOfIcons:
ImageFile = open("upcoming_events/static/images/" + Icon[1], "rb")
ImageData = File(ImageFile)
CurrentIcon = EventIcon()
CurrentIcon.Name = Icon[0]
CurrentIcon.Image.save(Icon[1], ImageData, save=True)
CurrentIcon.save()
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Title', models.CharField(max_length=200)),
('Description', models.TextField(blank=True, null=True)),
('StartDate', models.DateTimeField(default=upcoming_events.models.todayMorning, verbose_name='Start of event')),
('EndDate', models.DateTimeField(default=upcoming_events.models.todayAfternoon, null=True, verbose_name='End of event')),
],
),
migrations.CreateModel(
name='EventIcon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.CharField(max_length=50)),
('Image', models.FileField(upload_to='icon_images/')),
],
),
migrations.AddField(
model_name='event',
name='Icon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='upcoming_events.EventIcon'),
),
migrations.RunPython(create_icons)
] | 0.450843 | 0.137938 |
from pathlib import Path
from typing import Tuple, Union
import h5py
from tensorflow import keras
from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Input
from tensorflow.python.keras.saving import hdf5_format
from ms2deepscore import SpectrumBinner
class SiameseModel:
"""
Class for training and evaluating a siamese neural network, implemented in Tensorflow Keras.
It consists of a dense 'base' network that produces an embedding for each of the 2 inputs. The
'head' model computes the cosine similarity between the embeddings.
Mimics keras.Model API.
For example:
.. code-block:: python
# Import data and reference scores --> spectrums & tanimoto_scores_df
# Create binned spectrums
spectrum_binner = SpectrumBinner(1000, mz_min=10.0, mz_max=1000.0, peak_scaling=0.5)
binned_spectrums = spectrum_binner.fit_transform(spectrums)
# Create generator
dimension = len(spectrum_binner.known_bins)
test_generator = DataGeneratorAllSpectrums(binned_spectrums, tanimoto_scores_df,
dim=dimension)
# Create (and train) a Siamese model
model = SiameseModel(spectrum_binner, base_dims=(600, 500, 400), embedding_dim=400,
dropout_rate=0.2)
model.compile(loss='mse', optimizer=keras.optimizers.Adam(lr=0.001))
model.summary()
model.fit(test_generator,
validation_data=test_generator,
epochs=50)
"""
def __init__(self,
spectrum_binner: SpectrumBinner,
base_dims: Tuple[int, ...] = (600, 500, 500),
embedding_dim: int = 400,
dropout_rate: float = 0.5,
dropout_in_first_layer: bool = False,
l1_reg: float = 1e-6,
l2_reg: float = 1e-6,
keras_model: keras.Model = None):
"""
Construct SiameseModel
Parameters
----------
spectrum_binner
SpectrumBinner which is used to bin the spectra data for the model training.
base_dims
Tuple of integers depicting the dimensions of the desired hidden
layers of the base model
embedding_dim
Dimension of the embedding (i.e. the output of the base model)
dropout_rate
Dropout rate to be used in the base model.
dropout_in_first_layer
Set to True if dropout should be part of first dense layer as well. Default is False.
l1_reg
L1 regularization rate. Default is 1e-6.
l2_reg
L2 regularization rate. Default is 1e-6.
keras_model
When provided, this keras model will be used to construct the SiameseModel instance.
Default is None.
"""
# pylint: disable=too-many-arguments
assert spectrum_binner.known_bins is not None, \
"spectrum_binner does not contain known bins (run .fit_transform() on training data first!)"
self.spectrum_binner = spectrum_binner
self.input_dim = len(spectrum_binner.known_bins)
if keras_model is None:
# Create base model
self.base = self.get_base_model(input_dim=self.input_dim,
base_dims=base_dims,
embedding_dim=embedding_dim,
dropout_rate=dropout_rate,
dropout_in_first_layer=dropout_in_first_layer,
l1_reg=l1_reg,
l2_reg=l2_reg)
# Create head model
self.model = self._get_head_model(input_dim=self.input_dim,
base_model=self.base)
else:
self._construct_from_keras_model(keras_model)
def save(self, filename: Union[str, Path]):
"""
Save model to file.
Parameters
----------
filename
Filename to specify where to store the model.
"""
with h5py.File(filename, mode='w') as f:
hdf5_format.save_model_to_hdf5(self.model, f)
f.attrs['spectrum_binner'] = self.spectrum_binner.to_json()
@staticmethod
def get_base_model(input_dim: int,
base_dims: Tuple[int, ...] = (600, 500, 500),
embedding_dim: int = 400,
dropout_rate: float = 0.25,
dropout_in_first_layer: bool = False,
l1_reg: float = 1e-6,
l2_reg: float = 1e-6,
dropout_always_on: bool = False) -> keras.Model:
"""Create base model for Siamaese network.
Parameters
----------
input_dim : int
Dimension of the input vectors.
base_dims
Tuple of integers depicting the dimensions of the desired hidden
layers of the base model
embedding_dim
Dimension of the embedding (i.e. the output of the base model)
dropout_rate
Dropout rate to be used in the base model
dropout_in_first_layer
Set to True if dropout should be part of first dense layer as well. Default is False.
l1_reg
L1 regularization rate. Default is 1e-6.
l2_reg
L2 regularization rate. Default is 1e-6.
dropout_always_on
Default is False in which case dropout layers will only be active during
model training, but switched off during inference. When set to True,
dropout layers will always be on, which is used for ensembling via
Monte Carlo dropout.
"""
# pylint: disable=too-many-arguments
dropout_starting_layer = 0 if dropout_in_first_layer else 1
model_input = Input(shape=input_dim, name='base_input')
for i, dim in enumerate(base_dims):
if i == 0: # L1 and L2 regularization only in 1st layer
model_layer = Dense(dim, activation='relu', name='dense'+str(i+1),
kernel_regularizer=keras.regularizers.l1_l2(l1=l1_reg, l2=l2_reg))(
model_input)
else:
model_layer = Dense(dim, activation='relu', name='dense'+str(i+1))(model_layer)
model_layer = BatchNormalization(name='normalization'+str(i+1))(model_layer)
if dropout_always_on and i >= dropout_starting_layer:
model_layer = Dropout(dropout_rate, name='dropout'+str(i+1))(model_layer,
training=True)
elif i >= dropout_starting_layer:
model_layer = Dropout(dropout_rate, name='dropout'+str(i+1))(model_layer)
embedding = Dense(embedding_dim, activation='relu', name='embedding')(model_layer)
return keras.Model(model_input, embedding, name='base')
@staticmethod
def _get_head_model(input_dim: int,
base_model: keras.Model):
input_a = Input(shape=input_dim, name="input_a")
input_b = Input(shape=input_dim, name="input_b")
embedding_a = base_model(input_a)
embedding_b = base_model(input_b)
cosine_similarity = keras.layers.Dot(axes=(1, 1),
normalize=True,
name="cosine_similarity")([embedding_a, embedding_b])
return keras.Model(inputs=[input_a, input_b], outputs=[cosine_similarity],
name='head')
def _construct_from_keras_model(self, keras_model):
def valid_keras_model(given_model):
assert given_model.layers, "Expected valid keras model as input."
assert len(given_model.layers) > 2, "Expected more layers"
assert len(keras_model.layers[2].layers) > 1, "Expected more layers for base model"
valid_keras_model(keras_model)
self.base = keras_model.layers[2]
self.model = keras_model
def compile(self, *args, **kwargs):
self.model.compile(*args, **kwargs)
def fit(self, *args, **kwargs):
self.model.fit(*args, **kwargs)
def load_weights(self, checkpoint_path):
self.model.load_weights(checkpoint_path)
def summary(self):
self.base.summary()
self.model.summary()
def evaluate(self, *args, **kwargs):
return self.model.evaluate(*args, **kwargs) | ms2deepscore/models/SiameseModel.py | from pathlib import Path
from typing import Tuple, Union
import h5py
from tensorflow import keras
from tensorflow.keras.layers import BatchNormalization, Dense, Dropout, Input
from tensorflow.python.keras.saving import hdf5_format
from ms2deepscore import SpectrumBinner
class SiameseModel:
"""
Class for training and evaluating a siamese neural network, implemented in Tensorflow Keras.
It consists of a dense 'base' network that produces an embedding for each of the 2 inputs. The
'head' model computes the cosine similarity between the embeddings.
Mimics keras.Model API.
For example:
.. code-block:: python
# Import data and reference scores --> spectrums & tanimoto_scores_df
# Create binned spectrums
spectrum_binner = SpectrumBinner(1000, mz_min=10.0, mz_max=1000.0, peak_scaling=0.5)
binned_spectrums = spectrum_binner.fit_transform(spectrums)
# Create generator
dimension = len(spectrum_binner.known_bins)
test_generator = DataGeneratorAllSpectrums(binned_spectrums, tanimoto_scores_df,
dim=dimension)
# Create (and train) a Siamese model
model = SiameseModel(spectrum_binner, base_dims=(600, 500, 400), embedding_dim=400,
dropout_rate=0.2)
model.compile(loss='mse', optimizer=keras.optimizers.Adam(lr=0.001))
model.summary()
model.fit(test_generator,
validation_data=test_generator,
epochs=50)
"""
def __init__(self,
spectrum_binner: SpectrumBinner,
base_dims: Tuple[int, ...] = (600, 500, 500),
embedding_dim: int = 400,
dropout_rate: float = 0.5,
dropout_in_first_layer: bool = False,
l1_reg: float = 1e-6,
l2_reg: float = 1e-6,
keras_model: keras.Model = None):
"""
Construct SiameseModel
Parameters
----------
spectrum_binner
SpectrumBinner which is used to bin the spectra data for the model training.
base_dims
Tuple of integers depicting the dimensions of the desired hidden
layers of the base model
embedding_dim
Dimension of the embedding (i.e. the output of the base model)
dropout_rate
Dropout rate to be used in the base model.
dropout_in_first_layer
Set to True if dropout should be part of first dense layer as well. Default is False.
l1_reg
L1 regularization rate. Default is 1e-6.
l2_reg
L2 regularization rate. Default is 1e-6.
keras_model
When provided, this keras model will be used to construct the SiameseModel instance.
Default is None.
"""
# pylint: disable=too-many-arguments
assert spectrum_binner.known_bins is not None, \
"spectrum_binner does not contain known bins (run .fit_transform() on training data first!)"
self.spectrum_binner = spectrum_binner
self.input_dim = len(spectrum_binner.known_bins)
if keras_model is None:
# Create base model
self.base = self.get_base_model(input_dim=self.input_dim,
base_dims=base_dims,
embedding_dim=embedding_dim,
dropout_rate=dropout_rate,
dropout_in_first_layer=dropout_in_first_layer,
l1_reg=l1_reg,
l2_reg=l2_reg)
# Create head model
self.model = self._get_head_model(input_dim=self.input_dim,
base_model=self.base)
else:
self._construct_from_keras_model(keras_model)
def save(self, filename: Union[str, Path]):
"""
Save model to file.
Parameters
----------
filename
Filename to specify where to store the model.
"""
with h5py.File(filename, mode='w') as f:
hdf5_format.save_model_to_hdf5(self.model, f)
f.attrs['spectrum_binner'] = self.spectrum_binner.to_json()
@staticmethod
def get_base_model(input_dim: int,
base_dims: Tuple[int, ...] = (600, 500, 500),
embedding_dim: int = 400,
dropout_rate: float = 0.25,
dropout_in_first_layer: bool = False,
l1_reg: float = 1e-6,
l2_reg: float = 1e-6,
dropout_always_on: bool = False) -> keras.Model:
"""Create base model for Siamaese network.
Parameters
----------
input_dim : int
Dimension of the input vectors.
base_dims
Tuple of integers depicting the dimensions of the desired hidden
layers of the base model
embedding_dim
Dimension of the embedding (i.e. the output of the base model)
dropout_rate
Dropout rate to be used in the base model
dropout_in_first_layer
Set to True if dropout should be part of first dense layer as well. Default is False.
l1_reg
L1 regularization rate. Default is 1e-6.
l2_reg
L2 regularization rate. Default is 1e-6.
dropout_always_on
Default is False in which case dropout layers will only be active during
model training, but switched off during inference. When set to True,
dropout layers will always be on, which is used for ensembling via
Monte Carlo dropout.
"""
# pylint: disable=too-many-arguments
dropout_starting_layer = 0 if dropout_in_first_layer else 1
model_input = Input(shape=input_dim, name='base_input')
for i, dim in enumerate(base_dims):
if i == 0: # L1 and L2 regularization only in 1st layer
model_layer = Dense(dim, activation='relu', name='dense'+str(i+1),
kernel_regularizer=keras.regularizers.l1_l2(l1=l1_reg, l2=l2_reg))(
model_input)
else:
model_layer = Dense(dim, activation='relu', name='dense'+str(i+1))(model_layer)
model_layer = BatchNormalization(name='normalization'+str(i+1))(model_layer)
if dropout_always_on and i >= dropout_starting_layer:
model_layer = Dropout(dropout_rate, name='dropout'+str(i+1))(model_layer,
training=True)
elif i >= dropout_starting_layer:
model_layer = Dropout(dropout_rate, name='dropout'+str(i+1))(model_layer)
embedding = Dense(embedding_dim, activation='relu', name='embedding')(model_layer)
return keras.Model(model_input, embedding, name='base')
@staticmethod
def _get_head_model(input_dim: int,
base_model: keras.Model):
input_a = Input(shape=input_dim, name="input_a")
input_b = Input(shape=input_dim, name="input_b")
embedding_a = base_model(input_a)
embedding_b = base_model(input_b)
cosine_similarity = keras.layers.Dot(axes=(1, 1),
normalize=True,
name="cosine_similarity")([embedding_a, embedding_b])
return keras.Model(inputs=[input_a, input_b], outputs=[cosine_similarity],
name='head')
def _construct_from_keras_model(self, keras_model):
def valid_keras_model(given_model):
assert given_model.layers, "Expected valid keras model as input."
assert len(given_model.layers) > 2, "Expected more layers"
assert len(keras_model.layers[2].layers) > 1, "Expected more layers for base model"
valid_keras_model(keras_model)
self.base = keras_model.layers[2]
self.model = keras_model
def compile(self, *args, **kwargs):
self.model.compile(*args, **kwargs)
def fit(self, *args, **kwargs):
self.model.fit(*args, **kwargs)
def load_weights(self, checkpoint_path):
self.model.load_weights(checkpoint_path)
def summary(self):
self.base.summary()
self.model.summary()
def evaluate(self, *args, **kwargs):
return self.model.evaluate(*args, **kwargs) | 0.95452 | 0.542984 |
import mock
import pytest
from backend.container_service.clusters.constants import ClusterManagerNodeStatus
from backend.container_service.clusters.tools import node as node_tools
from backend.resources.constants import NodeConditionStatus
from backend.tests.container_service.clusters.test_cc_host import fake_fetch_all_hosts, fake_get_agent_status
FAKE_INNER_IP = "127.0.0.1"
FAKE_NODE_NAME = "bcs-test-node"
def test_query_cluster_nodes(client, create_and_delete_node, ctx_cluster):
cluster_nodes = node_tools.query_cluster_nodes(ctx_cluster)
assert FAKE_INNER_IP in cluster_nodes
assert cluster_nodes[FAKE_INNER_IP]["name"] == FAKE_NODE_NAME
assert cluster_nodes[FAKE_INNER_IP]["status"] == NodeConditionStatus.Ready
assert not cluster_nodes[FAKE_INNER_IP]["unschedulable"]
@pytest.mark.parametrize(
"cluster_node_status,unschedulable,cm_node_status,expected_status",
[
(NodeConditionStatus.Ready, False, ClusterManagerNodeStatus.RUNNING, ClusterManagerNodeStatus.RUNNING),
(NodeConditionStatus.Ready, True, ClusterManagerNodeStatus.RUNNING, ClusterManagerNodeStatus.REMOVABLE),
(NodeConditionStatus.Ready, True, ClusterManagerNodeStatus.REMOVABLE, ClusterManagerNodeStatus.REMOVABLE),
(NodeConditionStatus.NotReady, True, ClusterManagerNodeStatus.NOTREADY, ClusterManagerNodeStatus.NOTREADY),
(NodeConditionStatus.NotReady, True, ClusterManagerNodeStatus.REMOVABLE, ClusterManagerNodeStatus.NOTREADY),
(NodeConditionStatus.Unknown, True, ClusterManagerNodeStatus.REMOVABLE, ClusterManagerNodeStatus.UNKNOWN),
("", False, ClusterManagerNodeStatus.INITIALIZATION, ClusterManagerNodeStatus.INITIALIZATION),
("", False, ClusterManagerNodeStatus.DELETING, ClusterManagerNodeStatus.DELETING),
("", False, ClusterManagerNodeStatus.ADDFAILURE, ClusterManagerNodeStatus.ADDFAILURE),
("", False, ClusterManagerNodeStatus.REMOVEFAILURE, ClusterManagerNodeStatus.REMOVEFAILURE),
],
)
def test_transform_status(cluster_node_status, unschedulable, cm_node_status, expected_status):
assert expected_status == node_tools.transform_status(cluster_node_status, unschedulable, cm_node_status)
@pytest.fixture
def cluster_name():
return "cluster_name"
class TestNodesData:
def test_compose_data_by_cm_nodes(self, cm_nodes, cluster_nodes, cluster_id, cluster_name):
client = node_tools.NodesData(
cm_nodes=cm_nodes, cluster_nodes=cluster_nodes, cluster_id=cluster_id, cluster_name=cluster_name
)
node_data = client._compose_data_by_cm_nodes()
assert len(node_data) == len(
[node for inner_ip, node in cm_nodes.items() if node["status"] != ClusterManagerNodeStatus.RUNNING]
)
assert node_data[0]["cluster_name"] == cluster_name
def test_compose_data_by_cluster_nodes(self, cm_nodes, cluster_nodes, cluster_id):
client = node_tools.NodesData(
cm_nodes=cm_nodes, cluster_nodes=cluster_nodes, cluster_id=cluster_id, cluster_name="cluster_name"
)
node_data = client._compose_data_by_cluster_nodes()
assert len(node_data) == len(cluster_nodes)
assert node_data[0]["status"] == ClusterManagerNodeStatus.RUNNING
@pytest.fixture
def master_client(ctx_cluster):
return node_tools.BcsClusterMaster(ctx_cluster=ctx_cluster, biz_id=1)
class TestBcsClusterMaster:
@mock.patch("backend.components.cc.HostQueryService.fetch_all", new=fake_fetch_all_hosts)
@mock.patch("backend.components.gse.get_agent_status", new=fake_get_agent_status)
def test_list_masters(self, master_client, create_and_delete_master):
masters = master_client.list_masters()
# 判断 ip 存在返回的数据中
detail, is_exist = {}, False
for master in masters:
if master["inner_ip"] == FAKE_INNER_IP:
detail, is_exist = master, True
break
assert is_exist
# 判断包含对应的字段
for field_name in ["inner_ip", "idc", "rack", "device_class", "bk_cloud_id", "agent"]:
assert field_name in detail | bcs-ui/backend/tests/container_service/clusters/tools/test_node.py | import mock
import pytest
from backend.container_service.clusters.constants import ClusterManagerNodeStatus
from backend.container_service.clusters.tools import node as node_tools
from backend.resources.constants import NodeConditionStatus
from backend.tests.container_service.clusters.test_cc_host import fake_fetch_all_hosts, fake_get_agent_status
FAKE_INNER_IP = "127.0.0.1"
FAKE_NODE_NAME = "bcs-test-node"
def test_query_cluster_nodes(client, create_and_delete_node, ctx_cluster):
cluster_nodes = node_tools.query_cluster_nodes(ctx_cluster)
assert FAKE_INNER_IP in cluster_nodes
assert cluster_nodes[FAKE_INNER_IP]["name"] == FAKE_NODE_NAME
assert cluster_nodes[FAKE_INNER_IP]["status"] == NodeConditionStatus.Ready
assert not cluster_nodes[FAKE_INNER_IP]["unschedulable"]
@pytest.mark.parametrize(
"cluster_node_status,unschedulable,cm_node_status,expected_status",
[
(NodeConditionStatus.Ready, False, ClusterManagerNodeStatus.RUNNING, ClusterManagerNodeStatus.RUNNING),
(NodeConditionStatus.Ready, True, ClusterManagerNodeStatus.RUNNING, ClusterManagerNodeStatus.REMOVABLE),
(NodeConditionStatus.Ready, True, ClusterManagerNodeStatus.REMOVABLE, ClusterManagerNodeStatus.REMOVABLE),
(NodeConditionStatus.NotReady, True, ClusterManagerNodeStatus.NOTREADY, ClusterManagerNodeStatus.NOTREADY),
(NodeConditionStatus.NotReady, True, ClusterManagerNodeStatus.REMOVABLE, ClusterManagerNodeStatus.NOTREADY),
(NodeConditionStatus.Unknown, True, ClusterManagerNodeStatus.REMOVABLE, ClusterManagerNodeStatus.UNKNOWN),
("", False, ClusterManagerNodeStatus.INITIALIZATION, ClusterManagerNodeStatus.INITIALIZATION),
("", False, ClusterManagerNodeStatus.DELETING, ClusterManagerNodeStatus.DELETING),
("", False, ClusterManagerNodeStatus.ADDFAILURE, ClusterManagerNodeStatus.ADDFAILURE),
("", False, ClusterManagerNodeStatus.REMOVEFAILURE, ClusterManagerNodeStatus.REMOVEFAILURE),
],
)
def test_transform_status(cluster_node_status, unschedulable, cm_node_status, expected_status):
assert expected_status == node_tools.transform_status(cluster_node_status, unschedulable, cm_node_status)
@pytest.fixture
def cluster_name():
return "cluster_name"
class TestNodesData:
def test_compose_data_by_cm_nodes(self, cm_nodes, cluster_nodes, cluster_id, cluster_name):
client = node_tools.NodesData(
cm_nodes=cm_nodes, cluster_nodes=cluster_nodes, cluster_id=cluster_id, cluster_name=cluster_name
)
node_data = client._compose_data_by_cm_nodes()
assert len(node_data) == len(
[node for inner_ip, node in cm_nodes.items() if node["status"] != ClusterManagerNodeStatus.RUNNING]
)
assert node_data[0]["cluster_name"] == cluster_name
def test_compose_data_by_cluster_nodes(self, cm_nodes, cluster_nodes, cluster_id):
client = node_tools.NodesData(
cm_nodes=cm_nodes, cluster_nodes=cluster_nodes, cluster_id=cluster_id, cluster_name="cluster_name"
)
node_data = client._compose_data_by_cluster_nodes()
assert len(node_data) == len(cluster_nodes)
assert node_data[0]["status"] == ClusterManagerNodeStatus.RUNNING
@pytest.fixture
def master_client(ctx_cluster):
return node_tools.BcsClusterMaster(ctx_cluster=ctx_cluster, biz_id=1)
class TestBcsClusterMaster:
@mock.patch("backend.components.cc.HostQueryService.fetch_all", new=fake_fetch_all_hosts)
@mock.patch("backend.components.gse.get_agent_status", new=fake_get_agent_status)
def test_list_masters(self, master_client, create_and_delete_master):
masters = master_client.list_masters()
# 判断 ip 存在返回的数据中
detail, is_exist = {}, False
for master in masters:
if master["inner_ip"] == FAKE_INNER_IP:
detail, is_exist = master, True
break
assert is_exist
# 判断包含对应的字段
for field_name in ["inner_ip", "idc", "rack", "device_class", "bk_cloud_id", "agent"]:
assert field_name in detail | 0.402392 | 0.41561 |
import sys, os, argparse
import MaterialX as mx
from MaterialX import PyMaterialXGenShader as mx_gen_shader
from MaterialX import PyMaterialXGenGlsl as ms_gen_glsl
from MaterialX import PyMaterialXRender as mx_render
from MaterialX import PyMaterialXRenderGlsl as mx_render_glsl
def main():
parser = argparse.ArgumentParser(description="Generate a translated baked version of each material in the input document.")
parser.add_argument("--width", dest="width", type=int, default=0, help="Specify an optional width for baked textures (defaults to the maximum image height in the source document).")
parser.add_argument("--height", dest="height", type=int, default=0, help="Specify an optional height for baked textures (defaults to the maximum image width in the source document).")
parser.add_argument("--hdr", dest="hdr", action="store_true", help="Bake images with high dynamic range (e.g. in HDR or EXR format).")
parser.add_argument("--path", dest="paths", action='append', nargs='+', help="An additional absolute search path location (e.g. '/projects/MaterialX')")
parser.add_argument("--library", dest="libraries", action='append', nargs='+', help="An additional relative path to a custom data library folder (e.g. 'libraries/custom')")
parser.add_argument(dest="inputFilename", help="Filename of the input document.")
parser.add_argument(dest="outputFilename", help="Filename of the output document.")
parser.add_argument(dest="destShader", help="Destination shader for translation")
opts = parser.parse_args()
doc = mx.createDocument()
try:
mx.readFromXmlFile(doc, opts.inputFilename)
except mx.ExceptionFileMissing as err:
print(err)
sys.exit(0)
stdlib = mx.createDocument()
filePath = os.path.dirname(os.path.abspath(__file__))
searchPath = mx.FileSearchPath(os.path.join(filePath, '..', '..'))
searchPath.append(os.path.dirname(opts.inputFilename))
libraryFolders = [ "libraries" ]
if opts.paths:
for pathList in opts.paths:
for path in pathList:
searchPath.append(path)
if opts.libraries:
for libraryList in opts.libraries:
for library in libraryList:
libraryFolders.append(library)
mx.loadLibraries(libraryFolders, searchPath, stdlib)
doc.importLibrary(stdlib)
valid, msg = doc.validate()
if not valid:
print("Validation warnings for input document:")
print(msg)
# Check the document for a UDIM set.
udimSetValue = doc.getGeomPropValue('udimset')
udimSet = udimSetValue.getData() if udimSetValue else []
# Compute baking resolution from the source document.
imageHandler = mx_render.ImageHandler.create(mx_render.StbImageLoader.create())
imageHandler.setSearchPath(searchPath)
if udimSet:
resolver = doc.createStringResolver()
resolver.setUdimString(udimSet[0])
imageHandler.setFilenameResolver(resolver)
imageVec = imageHandler.getReferencedImages(doc)
bakeWidth, bakeHeight = mx_render.getMaxDimensions(imageVec)
# Apply baking resolution settings.
if opts.width > 0:
bakeWidth = opts.width
if opts.height > 0:
bakeHeight = opts.height
bakeWidth = max(bakeWidth, 4)
bakeHeight = max(bakeHeight, 4)
# Translate materials between shading models
translator = mx_gen_shader.ShaderTranslator.create()
try:
translator.translateAllMaterials(doc, opts.destShader)
except mx.Exception as err:
print(err)
sys.exit(0)
# Bake translated materials to flat textures.
baseType = mx_render.BaseType.FLOAT if opts.hdr else mx_render.BaseType.UINT8
baker = mx_render_glsl.TextureBaker.create(bakeWidth, bakeHeight, baseType)
baker.bakeAllMaterials(doc, searchPath, opts.outputFilename)
if __name__ == '__main__':
main() | python/Scripts/translateshader.py | import sys, os, argparse
import MaterialX as mx
from MaterialX import PyMaterialXGenShader as mx_gen_shader
from MaterialX import PyMaterialXGenGlsl as ms_gen_glsl
from MaterialX import PyMaterialXRender as mx_render
from MaterialX import PyMaterialXRenderGlsl as mx_render_glsl
def main():
parser = argparse.ArgumentParser(description="Generate a translated baked version of each material in the input document.")
parser.add_argument("--width", dest="width", type=int, default=0, help="Specify an optional width for baked textures (defaults to the maximum image height in the source document).")
parser.add_argument("--height", dest="height", type=int, default=0, help="Specify an optional height for baked textures (defaults to the maximum image width in the source document).")
parser.add_argument("--hdr", dest="hdr", action="store_true", help="Bake images with high dynamic range (e.g. in HDR or EXR format).")
parser.add_argument("--path", dest="paths", action='append', nargs='+', help="An additional absolute search path location (e.g. '/projects/MaterialX')")
parser.add_argument("--library", dest="libraries", action='append', nargs='+', help="An additional relative path to a custom data library folder (e.g. 'libraries/custom')")
parser.add_argument(dest="inputFilename", help="Filename of the input document.")
parser.add_argument(dest="outputFilename", help="Filename of the output document.")
parser.add_argument(dest="destShader", help="Destination shader for translation")
opts = parser.parse_args()
doc = mx.createDocument()
try:
mx.readFromXmlFile(doc, opts.inputFilename)
except mx.ExceptionFileMissing as err:
print(err)
sys.exit(0)
stdlib = mx.createDocument()
filePath = os.path.dirname(os.path.abspath(__file__))
searchPath = mx.FileSearchPath(os.path.join(filePath, '..', '..'))
searchPath.append(os.path.dirname(opts.inputFilename))
libraryFolders = [ "libraries" ]
if opts.paths:
for pathList in opts.paths:
for path in pathList:
searchPath.append(path)
if opts.libraries:
for libraryList in opts.libraries:
for library in libraryList:
libraryFolders.append(library)
mx.loadLibraries(libraryFolders, searchPath, stdlib)
doc.importLibrary(stdlib)
valid, msg = doc.validate()
if not valid:
print("Validation warnings for input document:")
print(msg)
# Check the document for a UDIM set.
udimSetValue = doc.getGeomPropValue('udimset')
udimSet = udimSetValue.getData() if udimSetValue else []
# Compute baking resolution from the source document.
imageHandler = mx_render.ImageHandler.create(mx_render.StbImageLoader.create())
imageHandler.setSearchPath(searchPath)
if udimSet:
resolver = doc.createStringResolver()
resolver.setUdimString(udimSet[0])
imageHandler.setFilenameResolver(resolver)
imageVec = imageHandler.getReferencedImages(doc)
bakeWidth, bakeHeight = mx_render.getMaxDimensions(imageVec)
# Apply baking resolution settings.
if opts.width > 0:
bakeWidth = opts.width
if opts.height > 0:
bakeHeight = opts.height
bakeWidth = max(bakeWidth, 4)
bakeHeight = max(bakeHeight, 4)
# Translate materials between shading models
translator = mx_gen_shader.ShaderTranslator.create()
try:
translator.translateAllMaterials(doc, opts.destShader)
except mx.Exception as err:
print(err)
sys.exit(0)
# Bake translated materials to flat textures.
baseType = mx_render.BaseType.FLOAT if opts.hdr else mx_render.BaseType.UINT8
baker = mx_render_glsl.TextureBaker.create(bakeWidth, bakeHeight, baseType)
baker.bakeAllMaterials(doc, searchPath, opts.outputFilename)
if __name__ == '__main__':
main() | 0.443118 | 0.177169 |
from datetime import timedelta
import logging
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_SSL
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import DOMAIN, KEY_COORDINATOR, KEY_ROUTER, PLATFORMS
from .errors import CannotLoginException
from .router import NetgearRouter
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=30)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Netgear component."""
router = NetgearRouter(hass, entry)
try:
if not await router.async_setup():
raise ConfigEntryNotReady
except CannotLoginException as ex:
raise ConfigEntryNotReady from ex
port = entry.data.get(CONF_PORT)
ssl = entry.data.get(CONF_SSL)
if port != router.port or ssl != router.ssl:
data = {**entry.data, CONF_PORT: router.port, CONF_SSL: router.ssl}
hass.config_entries.async_update_entry(entry, data=data)
_LOGGER.info(
"Netgear port-SSL combination updated from (%i, %r) to (%i, %r), "
"this should only occur after a firmware update",
port,
ssl,
router.port,
router.ssl,
)
hass.data.setdefault(DOMAIN, {})
entry.async_on_unload(entry.add_update_listener(update_listener))
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, entry.unique_id)},
manufacturer="Netgear",
name=router.device_name,
model=router.model,
sw_version=router.firmware_version,
configuration_url=f"http://{entry.data[CONF_HOST]}/",
)
async def async_update_data() -> bool:
"""Fetch data from the router."""
data = await router.async_update_device_trackers()
return data
# Create update coordinator
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=router.device_name,
update_method=async_update_data,
update_interval=SCAN_INTERVAL,
)
await coordinator.async_config_entry_first_refresh()
hass.data[DOMAIN][entry.entry_id] = {
KEY_ROUTER: router,
KEY_COORDINATOR: coordinator,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
async def update_listener(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(config_entry.entry_id) | homeassistant/components/netgear/__init__.py | from datetime import timedelta
import logging
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_SSL
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import DOMAIN, KEY_COORDINATOR, KEY_ROUTER, PLATFORMS
from .errors import CannotLoginException
from .router import NetgearRouter
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=30)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Netgear component."""
router = NetgearRouter(hass, entry)
try:
if not await router.async_setup():
raise ConfigEntryNotReady
except CannotLoginException as ex:
raise ConfigEntryNotReady from ex
port = entry.data.get(CONF_PORT)
ssl = entry.data.get(CONF_SSL)
if port != router.port or ssl != router.ssl:
data = {**entry.data, CONF_PORT: router.port, CONF_SSL: router.ssl}
hass.config_entries.async_update_entry(entry, data=data)
_LOGGER.info(
"Netgear port-SSL combination updated from (%i, %r) to (%i, %r), "
"this should only occur after a firmware update",
port,
ssl,
router.port,
router.ssl,
)
hass.data.setdefault(DOMAIN, {})
entry.async_on_unload(entry.add_update_listener(update_listener))
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, entry.unique_id)},
manufacturer="Netgear",
name=router.device_name,
model=router.model,
sw_version=router.firmware_version,
configuration_url=f"http://{entry.data[CONF_HOST]}/",
)
async def async_update_data() -> bool:
"""Fetch data from the router."""
data = await router.async_update_device_trackers()
return data
# Create update coordinator
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=router.device_name,
update_method=async_update_data,
update_interval=SCAN_INTERVAL,
)
await coordinator.async_config_entry_first_refresh()
hass.data[DOMAIN][entry.entry_id] = {
KEY_ROUTER: router,
KEY_COORDINATOR: coordinator,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
async def update_listener(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(config_entry.entry_id) | 0.655557 | 0.090937 |
from ayumi import Ayumi
from cerberus import Validator
from typing import Dict
cv = Validator({
"show": {'type': 'string', 'required': True},
"episode": {'type': 'string', 'required': True},
"filesize": {'type': 'integer', 'required': True},
"sub": {'type': 'string', 'required': True}
})
fiv = Validator({
"title": {'type': 'string', 'required': True},
"link": {'type': 'string', 'required': True},
"guid": {'type': 'string', 'required': True},
"show_title": {'type': 'string', 'required': False}
})
class Job:
def __init__(self, show: str = None, episode: str = None, filesize: int = -1, sub: str = None):
self._show = Job._clean(show)
self._episode = Job._clean(episode)
self._filesize = filesize
self._sub = sub.upper()
@property
def show(self) -> str:
return self._show
@property
def episode(self) -> str:
return self._episode
@property
def filesize(self) -> int:
return self._filesize
@property
def sub(self) -> str:
return self._sub
@staticmethod
def _clean(name: str) -> str:
if name.endswith("/"):
return name[:-1]
else:
return name
class FeedItem:
def __init__(self, title: str = None, link: str = None, guid: str = None, show_title: str = None):
self._title = title
self._link = link
self._guid = guid
self._show_title = show_title
@property
def title(self) -> str:
return self._title
@property
def link(self) -> str:
return self._link
@property
def guid(self) -> str:
return self._guid
@property
def show_title(self) -> str:
return self._show_title
def validate(job: Dict) -> bool:
"""Alias for validate_job, meant to support older versions."""
return validate_job(job)
def validate_job(job: Dict) -> bool:
if cv.validate(job):
Ayumi.debug("Incoming job validated, returning True.",
color=Ayumi.GREEN)
return True
else:
Ayumi.debug("Incoming job is invalid, returning False.",
color=Ayumi.YELLOW)
return False
def validate_feeditem(item: Dict) -> bool:
if fiv.validate(item):
Ayumi.debug("Incoming feed item validated, returning True.",
color=Ayumi.GREEN)
return True
else:
Ayumi.debug("Incoming feed item is invalid, returning False.",
color=Ayumi.YELLOW)
return False
def generate(job: Dict) -> Job:
return generate_job(job)
def generate_job(job: Dict) -> Job:
return Job(
show=job['show'],
episode=job['episode'],
filesize=job['filesize'],
sub=job['sub']
)
def generate_feeditem(item: Dict) -> FeedItem:
return FeedItem(
title=item['title'],
link=item['link'],
guid=item['guid'],
show_title=item['show_title']
) | metsuke/metsuke.py | from ayumi import Ayumi
from cerberus import Validator
from typing import Dict
cv = Validator({
"show": {'type': 'string', 'required': True},
"episode": {'type': 'string', 'required': True},
"filesize": {'type': 'integer', 'required': True},
"sub": {'type': 'string', 'required': True}
})
fiv = Validator({
"title": {'type': 'string', 'required': True},
"link": {'type': 'string', 'required': True},
"guid": {'type': 'string', 'required': True},
"show_title": {'type': 'string', 'required': False}
})
class Job:
def __init__(self, show: str = None, episode: str = None, filesize: int = -1, sub: str = None):
self._show = Job._clean(show)
self._episode = Job._clean(episode)
self._filesize = filesize
self._sub = sub.upper()
@property
def show(self) -> str:
return self._show
@property
def episode(self) -> str:
return self._episode
@property
def filesize(self) -> int:
return self._filesize
@property
def sub(self) -> str:
return self._sub
@staticmethod
def _clean(name: str) -> str:
if name.endswith("/"):
return name[:-1]
else:
return name
class FeedItem:
def __init__(self, title: str = None, link: str = None, guid: str = None, show_title: str = None):
self._title = title
self._link = link
self._guid = guid
self._show_title = show_title
@property
def title(self) -> str:
return self._title
@property
def link(self) -> str:
return self._link
@property
def guid(self) -> str:
return self._guid
@property
def show_title(self) -> str:
return self._show_title
def validate(job: Dict) -> bool:
"""Alias for validate_job, meant to support older versions."""
return validate_job(job)
def validate_job(job: Dict) -> bool:
if cv.validate(job):
Ayumi.debug("Incoming job validated, returning True.",
color=Ayumi.GREEN)
return True
else:
Ayumi.debug("Incoming job is invalid, returning False.",
color=Ayumi.YELLOW)
return False
def validate_feeditem(item: Dict) -> bool:
if fiv.validate(item):
Ayumi.debug("Incoming feed item validated, returning True.",
color=Ayumi.GREEN)
return True
else:
Ayumi.debug("Incoming feed item is invalid, returning False.",
color=Ayumi.YELLOW)
return False
def generate(job: Dict) -> Job:
return generate_job(job)
def generate_job(job: Dict) -> Job:
return Job(
show=job['show'],
episode=job['episode'],
filesize=job['filesize'],
sub=job['sub']
)
def generate_feeditem(item: Dict) -> FeedItem:
return FeedItem(
title=item['title'],
link=item['link'],
guid=item['guid'],
show_title=item['show_title']
) | 0.84869 | 0.284899 |
import json
from django.test import TestCase
from tastypie.test import ResourceTestCaseMixin
import logging
logging.disable(logging.CRITICAL)
class ClassAttributes:
def __init__(self, dictionary):
for k, v in dictionary.items():
setattr(self, k, v)
class TestNegativeNetLoad(ResourceTestCaseMixin, TestCase):
def setUp(self):
super(TestNegativeNetLoad, self).setUp()
self.submit_url = '/v1/job/'
self.results_url = '/v1/job/<run_uuid>/results/'
with open('reo/tests/posts/nestedPOST.json') as f:
self.post = json.loads(f.read())
def get_response(self, data):
initial_post = self.api_client.post(self.submit_url, format='json', data=data)
uuid = json.loads(initial_post.content)['run_uuid']
response = json.loads(self.api_client.get(self.results_url.replace('<run_uuid>', str(uuid))).content)
return response
def test_negative_net_load(self):
"""
Test that self.post scenario, with monthly rates, returns the expected LCC and PV station attributes
"""
self.post['Scenario']['Site']['LoadProfile']['loads_kw'] = [1 for _ in range(8760)]
self.post['Scenario']['Site']['LoadProfile']['loads_kw'][10] = -1
del self.post['Scenario']['Site']['LoadProfile']['doe_reference_name']
del self.post['Scenario']['Site']['LoadProfile']['annual_kwh']
del self.post['Scenario']['Site']['LoadProfile']['year']
del self.post['Scenario']['Site']['LoadProfile']['monthly_totals_kwh']
del self.post['Scenario']['Site']['LoadProfile']['outage_start_time_step']
del self.post['Scenario']['Site']['LoadProfile']['outage_end_time_step']
del self.post['Scenario']['Site']['LoadProfile']['critical_load_pct']
del self.post['Scenario']['Site']['LoadProfile']['critical_loads_kw']
response = self.get_response(self.post)
self.assertTrue("After adding existing generation to the load profile there were still negative electricity loads. Loads (non-net) must be equal to or greater than 0." in response['messages']['error']) | reo/tests/test_negative_loads.py | import json
from django.test import TestCase
from tastypie.test import ResourceTestCaseMixin
import logging
logging.disable(logging.CRITICAL)
class ClassAttributes:
def __init__(self, dictionary):
for k, v in dictionary.items():
setattr(self, k, v)
class TestNegativeNetLoad(ResourceTestCaseMixin, TestCase):
def setUp(self):
super(TestNegativeNetLoad, self).setUp()
self.submit_url = '/v1/job/'
self.results_url = '/v1/job/<run_uuid>/results/'
with open('reo/tests/posts/nestedPOST.json') as f:
self.post = json.loads(f.read())
def get_response(self, data):
initial_post = self.api_client.post(self.submit_url, format='json', data=data)
uuid = json.loads(initial_post.content)['run_uuid']
response = json.loads(self.api_client.get(self.results_url.replace('<run_uuid>', str(uuid))).content)
return response
def test_negative_net_load(self):
"""
Test that self.post scenario, with monthly rates, returns the expected LCC and PV station attributes
"""
self.post['Scenario']['Site']['LoadProfile']['loads_kw'] = [1 for _ in range(8760)]
self.post['Scenario']['Site']['LoadProfile']['loads_kw'][10] = -1
del self.post['Scenario']['Site']['LoadProfile']['doe_reference_name']
del self.post['Scenario']['Site']['LoadProfile']['annual_kwh']
del self.post['Scenario']['Site']['LoadProfile']['year']
del self.post['Scenario']['Site']['LoadProfile']['monthly_totals_kwh']
del self.post['Scenario']['Site']['LoadProfile']['outage_start_time_step']
del self.post['Scenario']['Site']['LoadProfile']['outage_end_time_step']
del self.post['Scenario']['Site']['LoadProfile']['critical_load_pct']
del self.post['Scenario']['Site']['LoadProfile']['critical_loads_kw']
response = self.get_response(self.post)
self.assertTrue("After adding existing generation to the load profile there were still negative electricity loads. Loads (non-net) must be equal to or greater than 0." in response['messages']['error']) | 0.445047 | 0.127952 |
try:
import simplejson as json
except ImportError:
import json
from bigml.api_handlers.resourcehandler import ResourceHandlerMixin
from bigml.api_handlers.resourcehandler import check_resource_type, \
resource_is_ready, get_optiml_id
from bigml.constants import OPTIML_PATH
class OptimlHandlerMixin(ResourceHandlerMixin):
"""This class is used by the BigML class as
a mixin that provides the REST calls models. It should not
be instantiated independently.
"""
def __init__(self):
"""Initializes the OptimlHandler. This class is intended
to be used as a mixin on ResourceHandler, that inherits its
attributes and basic method from BigMLConnection, and must not be
instantiated independently.
"""
self.optiml_url = self.url + OPTIML_PATH
def create_optiml(self, datasets,
args=None, wait_time=3, retries=10):
"""Creates an optiml from a `dataset`
of a list o `datasets`.
"""
create_args = self._set_create_from_datasets_args(
datasets, args=args, wait_time=wait_time, retries=retries)
body = json.dumps(create_args)
return self._create(self.optiml_url, body)
def get_optiml(self, optiml, query_string='',
shared_username=None, shared_api_key=None):
"""Retrieves an optiml.
The model parameter should be a string containing the
optiml id or the dict returned by
create_optiml.
As an optiml is an evolving object that is processed
until it reaches the FINISHED or FAULTY state, the function will
return a dict that encloses the optiml
values and state info available at the time it is called.
If this is a shared optiml, the username and
sharing api key must also be provided.
"""
check_resource_type(optiml, OPTIML_PATH,
message="An optiml id is needed.")
optiml_id = get_optiml_id(optiml)
if optiml_id:
return self._get("%s%s" % (self.url, optiml_id),
query_string=query_string,
shared_username=shared_username,
shared_api_key=shared_api_key)
def optiml_is_ready(self, optiml, **kwargs):
"""Checks whether an optiml's status is FINISHED.
"""
check_resource_type(optiml, OPTIML_PATH,
message="An optiml id is needed.")
resource = self.get_optiml(optiml, **kwargs)
return resource_is_ready(resource)
def list_optimls(self, query_string=''):
"""Lists all your optimls.
"""
return self._list(self.optiml_url, query_string)
def update_optiml(self, optiml, changes):
"""Updates an optiml.
"""
check_resource_type(optiml, OPTIML_PATH,
message="An optiml id is needed.")
optiml_id = get_optiml_id(optiml)
if optiml_id:
body = json.dumps(changes)
return self._update(
"%s%s" % (self.url, optiml_id), body)
def delete_optiml(self, optiml):
"""Deletes an optiml.
"""
check_resource_type(optiml, OPTIML_PATH,
message="An optiml id is needed.")
optiml_id = get_optiml_id(optiml)
if optiml_id:
return self._delete("%s%s" % (self.url, optiml_id)) | bigml/api_handlers/optimlhandler.py | try:
import simplejson as json
except ImportError:
import json
from bigml.api_handlers.resourcehandler import ResourceHandlerMixin
from bigml.api_handlers.resourcehandler import check_resource_type, \
resource_is_ready, get_optiml_id
from bigml.constants import OPTIML_PATH
class OptimlHandlerMixin(ResourceHandlerMixin):
"""This class is used by the BigML class as
a mixin that provides the REST calls models. It should not
be instantiated independently.
"""
def __init__(self):
"""Initializes the OptimlHandler. This class is intended
to be used as a mixin on ResourceHandler, that inherits its
attributes and basic method from BigMLConnection, and must not be
instantiated independently.
"""
self.optiml_url = self.url + OPTIML_PATH
def create_optiml(self, datasets,
args=None, wait_time=3, retries=10):
"""Creates an optiml from a `dataset`
of a list o `datasets`.
"""
create_args = self._set_create_from_datasets_args(
datasets, args=args, wait_time=wait_time, retries=retries)
body = json.dumps(create_args)
return self._create(self.optiml_url, body)
def get_optiml(self, optiml, query_string='',
shared_username=None, shared_api_key=None):
"""Retrieves an optiml.
The model parameter should be a string containing the
optiml id or the dict returned by
create_optiml.
As an optiml is an evolving object that is processed
until it reaches the FINISHED or FAULTY state, the function will
return a dict that encloses the optiml
values and state info available at the time it is called.
If this is a shared optiml, the username and
sharing api key must also be provided.
"""
check_resource_type(optiml, OPTIML_PATH,
message="An optiml id is needed.")
optiml_id = get_optiml_id(optiml)
if optiml_id:
return self._get("%s%s" % (self.url, optiml_id),
query_string=query_string,
shared_username=shared_username,
shared_api_key=shared_api_key)
def optiml_is_ready(self, optiml, **kwargs):
"""Checks whether an optiml's status is FINISHED.
"""
check_resource_type(optiml, OPTIML_PATH,
message="An optiml id is needed.")
resource = self.get_optiml(optiml, **kwargs)
return resource_is_ready(resource)
def list_optimls(self, query_string=''):
"""Lists all your optimls.
"""
return self._list(self.optiml_url, query_string)
def update_optiml(self, optiml, changes):
"""Updates an optiml.
"""
check_resource_type(optiml, OPTIML_PATH,
message="An optiml id is needed.")
optiml_id = get_optiml_id(optiml)
if optiml_id:
body = json.dumps(changes)
return self._update(
"%s%s" % (self.url, optiml_id), body)
def delete_optiml(self, optiml):
"""Deletes an optiml.
"""
check_resource_type(optiml, OPTIML_PATH,
message="An optiml id is needed.")
optiml_id = get_optiml_id(optiml)
if optiml_id:
return self._delete("%s%s" % (self.url, optiml_id)) | 0.668664 | 0.275742 |
import numpy as np
import MPC
import FootstepPlanner
from multiprocessing import Process, Value, Array
class MPC_Wrapper:
"""Wrapper to run FootstepPlanner + MPC on another process
Args:
dt (float): Time step of the MPC
n_steps (int): Number of time steps in one gait cycle
k_mpc (int): Number of inv dyn time step for one iteration of the MPC
T_gait (float): Duration of one period of gait
multiprocessing (bool): Enable/Disable running the MPC with another process
"""
def __init__(self, dt, n_steps, k_mpc, T_gait, multiprocessing=False):
self.f_applied = np.zeros((12,))
self.not_first_iter = False
# Number of TSID steps for 1 step of the MPC
self.k_mpc = k_mpc
self.multiprocessing = multiprocessing
if multiprocessing:
self.newData = Value('b', False)
self.newResult = Value('b', False)
self.dataIn = Array('d', [0.0] * 328)
self.dataOut = Array('d', [0] * 12)
self.fsteps_future = np.zeros((20, 13))
else:
# Create the new version of the MPC solver object
self.mpc = MPC.MPC(dt, n_steps, T_gait)
def solve(self, k, fstep_planner):
"""Call either the asynchronous MPC or the synchronous MPC depending on the value of multiprocessing during
the creation of the wrapper
Args:
k (int): Number of inv dynamics iterations since the start of the simulation
fstep_planner (object): FootstepPlanner object of the control loop
"""
if self.multiprocessing:
# TODO: Adapt asynchronous for lower number of parameters
raise("Error: Asynchronous MPC is not up to date")
# self.run_MPC_asynchronous(dt, n_steps, k, T_gait, joystick, fstep_planner, interface)
else:
self.run_MPC_synchronous(k, fstep_planner)
return 0
def get_latest_result(self):
"""Return the desired contact forces that have been computed by the last iteration of the MPC
Args:
"""
if (self.not_first_iter):
if self.multiprocessing:
if self.newResult.value:
self.newResult.value = False
# Retrieve desired contact forces with through the memory shared with the asynchronous
return self.convert_dataOut()
else:
raise ValueError("Error: something went wrong with the MPC, result not available.")
else:
# Directly retrieve desired contact force of the synchronous MPC object
return self.mpc.f_applied
else:
# Default forces for the first iteration
self.not_first_iter = True
return np.array([0.0, 0.0, 8.0] * 4)
def run_MPC_synchronous(self, k, fstep_planner):
"""Run the MPC (synchronous version) to get the desired contact forces for the feet currently in stance phase
Args:
k (int): Number of inv dynamics iterations since the start of the simulation
fstep_planner (object): FootstepPlanner object of the control loop
"""
# Run the MPC to get the reference forces and the next predicted state
# Result is stored in mpc.f_applied, mpc.q_next, mpc.v_next
"""print(dt, n_steps, k, T_gait)
print(np.round(interface.lC.ravel(), decimals=2))
print(np.round(interface.abg.ravel(), decimals=2))
print(np.round(interface.lV.ravel(), decimals=2))
print(np.round(interface.lW.ravel(), decimals=2))
print(interface.l_feet.ravel())
print(joystick.v_ref.ravel())
print(fstep_planner.fsteps)"""
if k > 1900:
deb=1
self.mpc.run((k/self.k_mpc), fstep_planner.xref, fstep_planner.fsteps)
"""tmp_lC = interface.lC.copy()
tmp_lC[2, 0] += dt * interface.lV[2, 0]
tmp_abg = interface.abg + dt * interface.lW
tmp_abg[2, 0] = 0.0
tmp_lfeet = interface.l_feet - dt * interface.lV
tmp_xref = fstep_planner.xref.copy()
tmp_xref """
# Output of the MPC
self.f_applied = self.mpc.f_applied
def run_MPC_asynchronous(self, dt, n_steps, k, T_gait, joystick, fstep_planner, interface):
"""Run the MPC (asynchronous version) to get the desired contact forces for the feet currently in stance phase
Args:
dt (float): Time step of the MPC
n_steps (int): Number of time steps in one gait cycle
k (int): Number of inv dynamics iterations since the start of the simulation
T_gait (float): duration of one period of gait
joystick (object): interface with the gamepad
fstep_planner (object): FootstepPlanner object of the control loop
interface (object): Interface object of the control loop
"""
# If this is the first iteration, creation of the parallel process
if (k == 0):
p = Process(target=self.create_MPC_asynchronous, args=(self.newData, self.newResult, self.dataIn, self.dataOut))
p.start()
# print("Setting Data")
self.compress_dataIn(dt, n_steps, k, T_gait, joystick, fstep_planner, interface)
"""print("Sending")
print(dt, n_steps, k, T_gait)
print(interface.lC.ravel())
print(interface.abg.ravel())
print(interface.lV.ravel())
print(interface.lW.ravel())
print(interface.l_feet.ravel())
print(joystick.v_ref.ravel())
print(fstep_planner.fsteps)"""
self.newData.value = True
return 0
def create_MPC_asynchronous(self, newData, newResult, dataIn, dataOut):
"""Parallel process with an infinite loop that run the asynchronous MPC
Args:
newData (Value): shared variable that is true if new data is available, false otherwise
newResult (Value): shared variable that is true if a new result is available, false otherwise
dataIn (Array): shared array that contains the data the asynchronous MPC will use as inputs
dataOut (Array): shared array that contains the result of the asynchronous MPC
"""
# print("Entering infinite loop")
while True:
# Checking if new data is available to trigger the asynchronous MPC
if newData.value:
# Set the shared variable to false to avoid re-trigering the asynchronous MPC
newData.value = False
# print("New data detected")
# Retrieve data thanks to the decompression function and reshape it
dt, nsteps, k, T_gait, lC, abg, lV, lW, l_feet, xref, x0, v_ref, fsteps = self.decompress_dataIn(dataIn)
#print("Receiving")
dt = dt[0]
nsteps = np.int(nsteps[0])
k = k[0]
T_gait = T_gait[0]
lC = np.reshape(lC, (3, 1))
abg = np.reshape(abg, (3, 1))
lV = np.reshape(lV, (3, 1))
lW = np.reshape(lW, (3, 1))
l_feet = np.reshape(l_feet, (3, 4))
xref = np.reshape(xref, (12, nsteps+1))
x0 = np.reshape(x0, (12, 1))
v_ref = np.reshape(v_ref, (6, 1))
fsteps = np.reshape(fsteps, (20, 13))
"""print(dt, nsteps, k, T_gait)
print(lC.ravel())
print(abg.ravel())
print(lV.ravel())
print(lW.ravel())
print(l_feet.ravel())
print(v_ref.ravel())
print(fsteps)"""
# Create the MPC object of the parallel process during the first iteration
if k == 0:
loop_mpc = MPC.MPC(dt, nsteps)
# Run the asynchronous MPC with the data that as been retrieved
loop_mpc.run((k/self.k_mpc), T_gait, lC, abg, lV, lW,
l_feet, xref, x0, v_ref, fsteps)
# Store the result (desired forces) in the shared memory
self.dataOut[:] = loop_mpc.f_applied.tolist()
# Set shared variable to true to signal that a new result is available
newResult.value = True
return 0
def compress_dataIn(self, dt, n_steps, k, T_gait, joystick, fstep_planner, interface):
"""Compress data in a single C-type array that belongs to the shared memory to send data from the main control
loop to the asynchronous MPC
Args:
dt (float): Time step of the MPC
n_steps (int): Number of time steps in one gait cycle
k (int): Number of inv dynamics iterations since the start of the simulation
T_gait (float): duration of one period of gait
joystick (object): interface with the gamepad
fstep_planner (object): FootstepPlanner object of the control loop
interface (object): Interface object of the control loop
"""
# print("Compressing dataIn")
# Replace NaN values by 0.0 to be stored in C-type arrays
fstep_planner.fsteps[np.isnan(fstep_planner.fsteps)] = 0.0
# Compress data in the shared input array
self.dataIn[:] = np.concatenate([[dt, n_steps, k, T_gait], np.array(interface.lC).ravel(), np.array(interface.abg).ravel(),
np.array(interface.lV).ravel(), np.array(interface.lW).ravel(), np.array(interface.l_feet).ravel(), fstep_planner.xref.ravel(), fstep_planner.x0.ravel(), joystick.v_ref.ravel(),
fstep_planner.fsteps.ravel()], axis=0)
return 0.0
def decompress_dataIn(self, dataIn):
"""Decompress data from a single C-type array that belongs to the shared memory to retrieve data from the main control
loop in the asynchronous MPC
Args:
dataIn (Array): shared array that contains the data the asynchronous MPC will use as inputs
"""
# print("Decompressing dataIn")
# Sizes of the different variables that are stored in the C-type array
sizes = [0, 1, 1, 1, 1, 3, 3, 3, 3, 12, (np.int(dataIn[1])+1) * 12, 12, 6, 13*20]
csizes = np.cumsum(sizes)
# Return decompressed variables in a list
return [dataIn[csizes[i]:csizes[i+1]] for i in range(len(sizes)-1)]
def convert_dataOut(self):
"""Return the result of the asynchronous MPC (desired contact forces) that is stored in the shared memory
"""
return np.array(self.dataOut[:])
def roll_asynchronous(self, fsteps):
"""Move one step further in the gait cycle. Since the output of the asynchronous MPC is retrieved by
TSID during the next call to the MPC, it should not work with the current state of the gait but with the
gait on step into the future. That way, when TSID retrieves the result, it is consistent with the current
state of the gait.
Decrease by 1 the number of remaining step for the current phase of the gait and increase
by 1 the number of remaining step for the last phase of the gait (periodic motion).
Simplification: instead of creating a new phase if required (see roll function of FootstepPlanner) we always
increase the last one by 1 step. That way we don't need to call other functions to predict the position of
footstep when a new phase is created.
"""
self.fsteps_future = fsteps.copy()
# Index of the first empty line
index = next((idx for idx, val in np.ndenumerate(self.fsteps_future[:, 0]) if val==0.0), 0.0)[0]
# Create a new phase if needed or increase the last one by 1 step
self.fsteps_future[index-1, 0] += 1.0
# Decrease the current phase by 1 step and delete it if it has ended
if self.fsteps_future[0, 0] > 1.0:
self.fsteps_future[0, 0] -= 1.0
else:
self.fsteps_future = np.roll(self.fsteps_future, -1, axis=0)
self.fsteps_future[-1, :] = np.zeros((13, ))
return 0 | MPC_Wrapper.py |
import numpy as np
import MPC
import FootstepPlanner
from multiprocessing import Process, Value, Array
class MPC_Wrapper:
"""Wrapper to run FootstepPlanner + MPC on another process
Args:
dt (float): Time step of the MPC
n_steps (int): Number of time steps in one gait cycle
k_mpc (int): Number of inv dyn time step for one iteration of the MPC
T_gait (float): Duration of one period of gait
multiprocessing (bool): Enable/Disable running the MPC with another process
"""
def __init__(self, dt, n_steps, k_mpc, T_gait, multiprocessing=False):
self.f_applied = np.zeros((12,))
self.not_first_iter = False
# Number of TSID steps for 1 step of the MPC
self.k_mpc = k_mpc
self.multiprocessing = multiprocessing
if multiprocessing:
self.newData = Value('b', False)
self.newResult = Value('b', False)
self.dataIn = Array('d', [0.0] * 328)
self.dataOut = Array('d', [0] * 12)
self.fsteps_future = np.zeros((20, 13))
else:
# Create the new version of the MPC solver object
self.mpc = MPC.MPC(dt, n_steps, T_gait)
def solve(self, k, fstep_planner):
"""Call either the asynchronous MPC or the synchronous MPC depending on the value of multiprocessing during
the creation of the wrapper
Args:
k (int): Number of inv dynamics iterations since the start of the simulation
fstep_planner (object): FootstepPlanner object of the control loop
"""
if self.multiprocessing:
# TODO: Adapt asynchronous for lower number of parameters
raise("Error: Asynchronous MPC is not up to date")
# self.run_MPC_asynchronous(dt, n_steps, k, T_gait, joystick, fstep_planner, interface)
else:
self.run_MPC_synchronous(k, fstep_planner)
return 0
def get_latest_result(self):
"""Return the desired contact forces that have been computed by the last iteration of the MPC
Args:
"""
if (self.not_first_iter):
if self.multiprocessing:
if self.newResult.value:
self.newResult.value = False
# Retrieve desired contact forces with through the memory shared with the asynchronous
return self.convert_dataOut()
else:
raise ValueError("Error: something went wrong with the MPC, result not available.")
else:
# Directly retrieve desired contact force of the synchronous MPC object
return self.mpc.f_applied
else:
# Default forces for the first iteration
self.not_first_iter = True
return np.array([0.0, 0.0, 8.0] * 4)
def run_MPC_synchronous(self, k, fstep_planner):
"""Run the MPC (synchronous version) to get the desired contact forces for the feet currently in stance phase
Args:
k (int): Number of inv dynamics iterations since the start of the simulation
fstep_planner (object): FootstepPlanner object of the control loop
"""
# Run the MPC to get the reference forces and the next predicted state
# Result is stored in mpc.f_applied, mpc.q_next, mpc.v_next
"""print(dt, n_steps, k, T_gait)
print(np.round(interface.lC.ravel(), decimals=2))
print(np.round(interface.abg.ravel(), decimals=2))
print(np.round(interface.lV.ravel(), decimals=2))
print(np.round(interface.lW.ravel(), decimals=2))
print(interface.l_feet.ravel())
print(joystick.v_ref.ravel())
print(fstep_planner.fsteps)"""
if k > 1900:
deb=1
self.mpc.run((k/self.k_mpc), fstep_planner.xref, fstep_planner.fsteps)
"""tmp_lC = interface.lC.copy()
tmp_lC[2, 0] += dt * interface.lV[2, 0]
tmp_abg = interface.abg + dt * interface.lW
tmp_abg[2, 0] = 0.0
tmp_lfeet = interface.l_feet - dt * interface.lV
tmp_xref = fstep_planner.xref.copy()
tmp_xref """
# Output of the MPC
self.f_applied = self.mpc.f_applied
def run_MPC_asynchronous(self, dt, n_steps, k, T_gait, joystick, fstep_planner, interface):
"""Run the MPC (asynchronous version) to get the desired contact forces for the feet currently in stance phase
Args:
dt (float): Time step of the MPC
n_steps (int): Number of time steps in one gait cycle
k (int): Number of inv dynamics iterations since the start of the simulation
T_gait (float): duration of one period of gait
joystick (object): interface with the gamepad
fstep_planner (object): FootstepPlanner object of the control loop
interface (object): Interface object of the control loop
"""
# If this is the first iteration, creation of the parallel process
if (k == 0):
p = Process(target=self.create_MPC_asynchronous, args=(self.newData, self.newResult, self.dataIn, self.dataOut))
p.start()
# print("Setting Data")
self.compress_dataIn(dt, n_steps, k, T_gait, joystick, fstep_planner, interface)
"""print("Sending")
print(dt, n_steps, k, T_gait)
print(interface.lC.ravel())
print(interface.abg.ravel())
print(interface.lV.ravel())
print(interface.lW.ravel())
print(interface.l_feet.ravel())
print(joystick.v_ref.ravel())
print(fstep_planner.fsteps)"""
self.newData.value = True
return 0
def create_MPC_asynchronous(self, newData, newResult, dataIn, dataOut):
"""Parallel process with an infinite loop that run the asynchronous MPC
Args:
newData (Value): shared variable that is true if new data is available, false otherwise
newResult (Value): shared variable that is true if a new result is available, false otherwise
dataIn (Array): shared array that contains the data the asynchronous MPC will use as inputs
dataOut (Array): shared array that contains the result of the asynchronous MPC
"""
# print("Entering infinite loop")
while True:
# Checking if new data is available to trigger the asynchronous MPC
if newData.value:
# Set the shared variable to false to avoid re-trigering the asynchronous MPC
newData.value = False
# print("New data detected")
# Retrieve data thanks to the decompression function and reshape it
dt, nsteps, k, T_gait, lC, abg, lV, lW, l_feet, xref, x0, v_ref, fsteps = self.decompress_dataIn(dataIn)
#print("Receiving")
dt = dt[0]
nsteps = np.int(nsteps[0])
k = k[0]
T_gait = T_gait[0]
lC = np.reshape(lC, (3, 1))
abg = np.reshape(abg, (3, 1))
lV = np.reshape(lV, (3, 1))
lW = np.reshape(lW, (3, 1))
l_feet = np.reshape(l_feet, (3, 4))
xref = np.reshape(xref, (12, nsteps+1))
x0 = np.reshape(x0, (12, 1))
v_ref = np.reshape(v_ref, (6, 1))
fsteps = np.reshape(fsteps, (20, 13))
"""print(dt, nsteps, k, T_gait)
print(lC.ravel())
print(abg.ravel())
print(lV.ravel())
print(lW.ravel())
print(l_feet.ravel())
print(v_ref.ravel())
print(fsteps)"""
# Create the MPC object of the parallel process during the first iteration
if k == 0:
loop_mpc = MPC.MPC(dt, nsteps)
# Run the asynchronous MPC with the data that as been retrieved
loop_mpc.run((k/self.k_mpc), T_gait, lC, abg, lV, lW,
l_feet, xref, x0, v_ref, fsteps)
# Store the result (desired forces) in the shared memory
self.dataOut[:] = loop_mpc.f_applied.tolist()
# Set shared variable to true to signal that a new result is available
newResult.value = True
return 0
def compress_dataIn(self, dt, n_steps, k, T_gait, joystick, fstep_planner, interface):
"""Compress data in a single C-type array that belongs to the shared memory to send data from the main control
loop to the asynchronous MPC
Args:
dt (float): Time step of the MPC
n_steps (int): Number of time steps in one gait cycle
k (int): Number of inv dynamics iterations since the start of the simulation
T_gait (float): duration of one period of gait
joystick (object): interface with the gamepad
fstep_planner (object): FootstepPlanner object of the control loop
interface (object): Interface object of the control loop
"""
# print("Compressing dataIn")
# Replace NaN values by 0.0 to be stored in C-type arrays
fstep_planner.fsteps[np.isnan(fstep_planner.fsteps)] = 0.0
# Compress data in the shared input array
self.dataIn[:] = np.concatenate([[dt, n_steps, k, T_gait], np.array(interface.lC).ravel(), np.array(interface.abg).ravel(),
np.array(interface.lV).ravel(), np.array(interface.lW).ravel(), np.array(interface.l_feet).ravel(), fstep_planner.xref.ravel(), fstep_planner.x0.ravel(), joystick.v_ref.ravel(),
fstep_planner.fsteps.ravel()], axis=0)
return 0.0
def decompress_dataIn(self, dataIn):
"""Decompress data from a single C-type array that belongs to the shared memory to retrieve data from the main control
loop in the asynchronous MPC
Args:
dataIn (Array): shared array that contains the data the asynchronous MPC will use as inputs
"""
# print("Decompressing dataIn")
# Sizes of the different variables that are stored in the C-type array
sizes = [0, 1, 1, 1, 1, 3, 3, 3, 3, 12, (np.int(dataIn[1])+1) * 12, 12, 6, 13*20]
csizes = np.cumsum(sizes)
# Return decompressed variables in a list
return [dataIn[csizes[i]:csizes[i+1]] for i in range(len(sizes)-1)]
def convert_dataOut(self):
"""Return the result of the asynchronous MPC (desired contact forces) that is stored in the shared memory
"""
return np.array(self.dataOut[:])
def roll_asynchronous(self, fsteps):
"""Move one step further in the gait cycle. Since the output of the asynchronous MPC is retrieved by
TSID during the next call to the MPC, it should not work with the current state of the gait but with the
gait on step into the future. That way, when TSID retrieves the result, it is consistent with the current
state of the gait.
Decrease by 1 the number of remaining step for the current phase of the gait and increase
by 1 the number of remaining step for the last phase of the gait (periodic motion).
Simplification: instead of creating a new phase if required (see roll function of FootstepPlanner) we always
increase the last one by 1 step. That way we don't need to call other functions to predict the position of
footstep when a new phase is created.
"""
self.fsteps_future = fsteps.copy()
# Index of the first empty line
index = next((idx for idx, val in np.ndenumerate(self.fsteps_future[:, 0]) if val==0.0), 0.0)[0]
# Create a new phase if needed or increase the last one by 1 step
self.fsteps_future[index-1, 0] += 1.0
# Decrease the current phase by 1 step and delete it if it has ended
if self.fsteps_future[0, 0] > 1.0:
self.fsteps_future[0, 0] -= 1.0
else:
self.fsteps_future = np.roll(self.fsteps_future, -1, axis=0)
self.fsteps_future[-1, :] = np.zeros((13, ))
return 0 | 0.652795 | 0.348673 |
from __future__ import unicode_literals, division, absolute_import, print_function
import os
import subprocess
import sys
import shutil
import re
import json
import tarfile
import zipfile
from . import package_root, build_root, other_packages
from ._pep425 import _pep425tags, _pep425_implementation
if sys.version_info < (3,):
str_cls = unicode # noqa
else:
str_cls = str
def run():
"""
Installs required development dependencies. Uses git to checkout other
modularcrypto repos for more accurate coverage data.
"""
deps_dir = os.path.join(build_root, 'modularcrypto-deps')
if os.path.exists(deps_dir):
shutil.rmtree(deps_dir, ignore_errors=True)
os.mkdir(deps_dir)
try:
print("Staging ci dependencies")
_stage_requirements(deps_dir, os.path.join(package_root, 'requires', 'ci'))
print("Checking out modularcrypto packages for coverage")
for other_package in other_packages:
pkg_url = 'https://github.com/wbond/%s.git' % other_package
pkg_dir = os.path.join(build_root, other_package)
if os.path.exists(pkg_dir):
print("%s is already present" % other_package)
continue
print("Cloning %s" % pkg_url)
_execute(['git', 'clone', pkg_url], build_root)
print()
except (Exception):
if os.path.exists(deps_dir):
shutil.rmtree(deps_dir, ignore_errors=True)
raise
return True
def _download(url, dest):
"""
Downloads a URL to a directory
:param url:
The URL to download
:param dest:
The path to the directory to save the file in
:return:
The filesystem path to the saved file
"""
print('Downloading %s' % url)
filename = os.path.basename(url)
dest_path = os.path.join(dest, filename)
if sys.platform == 'win32':
powershell_exe = os.path.join('system32\\WindowsPowerShell\\v1.0\\powershell.exe')
code = "[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]::Tls12;"
code += "(New-Object Net.WebClient).DownloadFile('%s', '%s');" % (url, dest_path)
_execute([powershell_exe, '-Command', code], dest)
else:
_execute(['curl', '-L', '--silent', '--show-error', '-O', url], dest)
return dest_path
def _tuple_from_ver(version_string):
"""
:param version_string:
A unicode dotted version string
:return:
A tuple of integers
"""
return tuple(map(int, version_string.split('.')))
def _open_archive(path):
"""
:param path:
A unicode string of the filesystem path to the archive
:return:
An archive object
"""
if path.endswith('.zip'):
return zipfile.ZipFile(path, 'r')
return tarfile.open(path, 'r')
def _list_archive_members(archive):
"""
:param archive:
An archive from _open_archive()
:return:
A list of info objects to be used with _info_name() and _extract_info()
"""
if isinstance(archive, zipfile.ZipFile):
return archive.infolist()
return archive.getmembers()
def _archive_single_dir(archive):
"""
Check if all members of the archive are in a single top-level directory
:param archive:
An archive from _open_archive()
:return:
None if not a single top level directory in archive, otherwise a
unicode string of the top level directory name
"""
common_root = None
for info in _list_archive_members(archive):
fn = _info_name(info)
if fn in set(['.', '/']):
continue
sep = None
if '/' in fn:
sep = '/'
elif '\\' in fn:
sep = '\\'
if sep is None:
root_dir = fn
else:
root_dir, _ = fn.split(sep, 1)
if common_root is None:
common_root = root_dir
else:
if common_root != root_dir:
return None
return common_root
def _info_name(info):
"""
Returns a normalized file path for an archive info object
:param info:
An info object from _list_archive_members()
:return:
A unicode string with all directory separators normalized to "/"
"""
if isinstance(info, zipfile.ZipInfo):
return info.filename.replace('\\', '/')
return info.name.replace('\\', '/')
def _extract_info(archive, info):
"""
Extracts the contents of an archive info object
;param archive:
An archive from _open_archive()
:param info:
An info object from _list_archive_members()
:return:
None, or a byte string of the file contents
"""
if isinstance(archive, zipfile.ZipFile):
fn = info.filename
is_dir = fn.endswith('/') or fn.endswith('\\')
out = archive.read(info)
if is_dir and out == b'':
return None
return out
info_file = archive.extractfile(info)
if info_file:
return info_file.read()
return None
def _extract_package(deps_dir, pkg_path):
"""
Extract a .whl, .zip, .tar.gz or .tar.bz2 into a package path to
use when running CI tasks
:param deps_dir:
A unicode string of the directory the package should be extracted to
:param pkg_path:
A unicode string of the path to the archive
"""
if pkg_path.endswith('.exe'):
try:
zf = None
zf = zipfile.ZipFile(pkg_path, 'r')
# Exes have a PLATLIB folder containing everything we want
for zi in zf.infolist():
if not zi.filename.startswith('PLATLIB'):
continue
data = _extract_info(zf, zi)
if data is not None:
dst_path = os.path.join(deps_dir, zi.filename[8:])
dst_dir = os.path.dirname(dst_path)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
with open(dst_path, 'wb') as f:
f.write(data)
finally:
if zf:
zf.close()
return
if pkg_path.endswith('.whl'):
try:
zf = None
zf = zipfile.ZipFile(pkg_path, 'r')
# Wheels contain exactly what we need and nothing else
zf.extractall(deps_dir)
finally:
if zf:
zf.close()
return
# Source archives may contain a bunch of other things.
# The following code works for the packages coverage and
# configparser, which are the two we currently require that
# do not provide wheels
try:
ar = None
ar = _open_archive(pkg_path)
pkg_name = None
base_path = _archive_single_dir(ar) or ''
if len(base_path):
if '-' in base_path:
pkg_name, _ = base_path.split('-', 1)
base_path += '/'
base_pkg_path = None
if pkg_name is not None:
base_pkg_path = base_path + pkg_name + '/'
src_path = base_path + 'src/'
members = []
for info in _list_archive_members(ar):
fn = _info_name(info)
if base_pkg_path is not None and fn.startswith(base_pkg_path):
dst_path = fn[len(base_pkg_path) - len(pkg_name) - 1:]
members.append((info, dst_path))
continue
if fn.startswith(src_path):
members.append((info, fn[len(src_path):]))
continue
for info, path in members:
info_data = _extract_info(ar, info)
# Dirs won't return a file
if info_data is not None:
dst_path = os.path.join(deps_dir, path)
dst_dir = os.path.dirname(dst_path)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
with open(dst_path, 'wb') as f:
f.write(info_data)
finally:
if ar:
ar.close()
def _stage_requirements(deps_dir, path):
"""
Installs requirements without using Python to download, since
different services are limiting to TLS 1.2, and older version of
Python do not support that
:param deps_dir:
A unicode path to a temporary diretory to use for downloads
:param path:
A unicoe filesystem path to a requirements file
"""
valid_tags = _pep425tags()
exe_suffix = None
if sys.platform == 'win32' and _pep425_implementation() == 'cp':
win_arch = 'win32' if sys.maxsize == 2147483647 else 'win-amd64'
version_info = sys.version_info
exe_suffix = '.%s-py%d.%d.exe' % (win_arch, version_info[0], version_info[1])
packages = _parse_requires(path)
for p in packages:
pkg = p['pkg']
if p['type'] == 'url':
if pkg.endswith('.zip') or pkg.endswith('.tar.gz') or pkg.endswith('.tar.bz2') or pkg.endswith('.whl'):
url = pkg
else:
raise Exception('Unable to install package from URL that is not an archive')
else:
pypi_json_url = 'https://pypi.org/pypi/%s/json' % pkg
json_dest = _download(pypi_json_url, deps_dir)
with open(json_dest, 'rb') as f:
pkg_info = json.loads(f.read().decode('utf-8'))
if os.path.exists(json_dest):
os.remove(json_dest)
latest = pkg_info['info']['version']
if p['type'] == '>=':
if _tuple_from_ver(p['ver']) > _tuple_from_ver(latest):
raise Exception('Unable to find version %s of %s, newest is %s' % (p['ver'], pkg, latest))
version = latest
elif p['type'] == '==':
if p['ver'] not in pkg_info['releases']:
raise Exception('Unable to find version %s of %s' % (p['ver'], pkg))
version = p['ver']
else:
version = latest
wheels = {}
whl = None
tar_bz2 = None
tar_gz = None
exe = None
for download in pkg_info['releases'][version]:
if exe_suffix and download['url'].endswith(exe_suffix):
exe = download['url']
if download['url'].endswith('.whl'):
parts = os.path.basename(download['url']).split('-')
tag_impl = parts[-3]
tag_abi = parts[-2]
tag_arch = parts[-1].split('.')[0]
wheels[(tag_impl, tag_abi, tag_arch)] = download['url']
if download['url'].endswith('.tar.bz2'):
tar_bz2 = download['url']
if download['url'].endswith('.tar.gz'):
tar_gz = download['url']
# Find the most-specific wheel possible
for tag in valid_tags:
if tag in wheels:
whl = wheels[tag]
break
if exe_suffix and exe:
url = exe
elif whl:
url = whl
elif tar_bz2:
url = tar_bz2
elif tar_gz:
url = tar_gz
else:
raise Exception('Unable to find suitable download for %s' % pkg)
local_path = _download(url, deps_dir)
_extract_package(deps_dir, local_path)
os.remove(local_path)
def _parse_requires(path):
"""
Does basic parsing of pip requirements files, to allow for
using something other than Python to do actual TLS requests
:param path:
A path to a requirements file
:return:
A list of dict objects containing the keys:
- 'type' ('any', 'url', '==', '>=')
- 'pkg'
- 'ver' (if 'type' == '==' or 'type' == '>=')
"""
python_version = '.'.join(map(str_cls, sys.version_info[0:2]))
sys_platform = sys.platform
packages = []
with open(path, 'rb') as f:
contents = f.read().decode('utf-8')
for line in re.split(r'\r?\n', contents):
line = line.strip()
if not len(line):
continue
if re.match(r'^\s*#', line):
continue
if ';' in line:
package, cond = line.split(';', 1)
package = package.strip()
cond = cond.strip()
cond = cond.replace('sys_platform', repr(sys_platform))
cond = cond.replace('python_version', repr(python_version))
if not eval(cond):
continue
else:
package = line.strip()
if re.match(r'^\s*-r\s*', package):
sub_req_file = re.sub(r'^\s*-r\s*', '', package)
sub_req_file = os.path.abspath(os.path.join(os.path.dirname(path), sub_req_file))
packages.extend(_parse_requires(sub_req_file))
continue
if re.match(r'https?://', package):
packages.append({'type': 'url', 'pkg': package})
continue
if '>=' in package:
parts = package.split('>=')
package = parts[0].strip()
ver = parts[1].strip()
packages.append({'type': '>=', 'pkg': package, 'ver': ver})
continue
if '==' in package:
parts = package.split('==')
package = parts[0].strip()
ver = parts[1].strip()
packages.append({'type': '==', 'pkg': package, 'ver': ver})
continue
if re.search(r'[^ a-zA-Z0-9\-]', package):
raise Exception('Unsupported requirements format version constraint: %s' % package)
packages.append({'type': 'any', 'pkg': package})
return packages
def _execute(params, cwd):
"""
Executes a subprocess
:param params:
A list of the executable and arguments to pass to it
:param cwd:
The working directory to execute the command in
:return:
A 2-element tuple of (stdout, stderr)
"""
proc = subprocess.Popen(
params,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd
)
stdout, stderr = proc.communicate()
code = proc.wait()
if code != 0:
e = OSError('subprocess exit code was non-zero')
e.stdout = stdout
e.stderr = stderr
raise e
return (stdout, stderr) | dev/deps.py | from __future__ import unicode_literals, division, absolute_import, print_function
import os
import subprocess
import sys
import shutil
import re
import json
import tarfile
import zipfile
from . import package_root, build_root, other_packages
from ._pep425 import _pep425tags, _pep425_implementation
if sys.version_info < (3,):
str_cls = unicode # noqa
else:
str_cls = str
def run():
"""
Installs required development dependencies. Uses git to checkout other
modularcrypto repos for more accurate coverage data.
"""
deps_dir = os.path.join(build_root, 'modularcrypto-deps')
if os.path.exists(deps_dir):
shutil.rmtree(deps_dir, ignore_errors=True)
os.mkdir(deps_dir)
try:
print("Staging ci dependencies")
_stage_requirements(deps_dir, os.path.join(package_root, 'requires', 'ci'))
print("Checking out modularcrypto packages for coverage")
for other_package in other_packages:
pkg_url = 'https://github.com/wbond/%s.git' % other_package
pkg_dir = os.path.join(build_root, other_package)
if os.path.exists(pkg_dir):
print("%s is already present" % other_package)
continue
print("Cloning %s" % pkg_url)
_execute(['git', 'clone', pkg_url], build_root)
print()
except (Exception):
if os.path.exists(deps_dir):
shutil.rmtree(deps_dir, ignore_errors=True)
raise
return True
def _download(url, dest):
"""
Downloads a URL to a directory
:param url:
The URL to download
:param dest:
The path to the directory to save the file in
:return:
The filesystem path to the saved file
"""
print('Downloading %s' % url)
filename = os.path.basename(url)
dest_path = os.path.join(dest, filename)
if sys.platform == 'win32':
powershell_exe = os.path.join('system32\\WindowsPowerShell\\v1.0\\powershell.exe')
code = "[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]::Tls12;"
code += "(New-Object Net.WebClient).DownloadFile('%s', '%s');" % (url, dest_path)
_execute([powershell_exe, '-Command', code], dest)
else:
_execute(['curl', '-L', '--silent', '--show-error', '-O', url], dest)
return dest_path
def _tuple_from_ver(version_string):
"""
:param version_string:
A unicode dotted version string
:return:
A tuple of integers
"""
return tuple(map(int, version_string.split('.')))
def _open_archive(path):
"""
:param path:
A unicode string of the filesystem path to the archive
:return:
An archive object
"""
if path.endswith('.zip'):
return zipfile.ZipFile(path, 'r')
return tarfile.open(path, 'r')
def _list_archive_members(archive):
"""
:param archive:
An archive from _open_archive()
:return:
A list of info objects to be used with _info_name() and _extract_info()
"""
if isinstance(archive, zipfile.ZipFile):
return archive.infolist()
return archive.getmembers()
def _archive_single_dir(archive):
"""
Check if all members of the archive are in a single top-level directory
:param archive:
An archive from _open_archive()
:return:
None if not a single top level directory in archive, otherwise a
unicode string of the top level directory name
"""
common_root = None
for info in _list_archive_members(archive):
fn = _info_name(info)
if fn in set(['.', '/']):
continue
sep = None
if '/' in fn:
sep = '/'
elif '\\' in fn:
sep = '\\'
if sep is None:
root_dir = fn
else:
root_dir, _ = fn.split(sep, 1)
if common_root is None:
common_root = root_dir
else:
if common_root != root_dir:
return None
return common_root
def _info_name(info):
"""
Returns a normalized file path for an archive info object
:param info:
An info object from _list_archive_members()
:return:
A unicode string with all directory separators normalized to "/"
"""
if isinstance(info, zipfile.ZipInfo):
return info.filename.replace('\\', '/')
return info.name.replace('\\', '/')
def _extract_info(archive, info):
"""
Extracts the contents of an archive info object
;param archive:
An archive from _open_archive()
:param info:
An info object from _list_archive_members()
:return:
None, or a byte string of the file contents
"""
if isinstance(archive, zipfile.ZipFile):
fn = info.filename
is_dir = fn.endswith('/') or fn.endswith('\\')
out = archive.read(info)
if is_dir and out == b'':
return None
return out
info_file = archive.extractfile(info)
if info_file:
return info_file.read()
return None
def _extract_package(deps_dir, pkg_path):
"""
Extract a .whl, .zip, .tar.gz or .tar.bz2 into a package path to
use when running CI tasks
:param deps_dir:
A unicode string of the directory the package should be extracted to
:param pkg_path:
A unicode string of the path to the archive
"""
if pkg_path.endswith('.exe'):
try:
zf = None
zf = zipfile.ZipFile(pkg_path, 'r')
# Exes have a PLATLIB folder containing everything we want
for zi in zf.infolist():
if not zi.filename.startswith('PLATLIB'):
continue
data = _extract_info(zf, zi)
if data is not None:
dst_path = os.path.join(deps_dir, zi.filename[8:])
dst_dir = os.path.dirname(dst_path)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
with open(dst_path, 'wb') as f:
f.write(data)
finally:
if zf:
zf.close()
return
if pkg_path.endswith('.whl'):
try:
zf = None
zf = zipfile.ZipFile(pkg_path, 'r')
# Wheels contain exactly what we need and nothing else
zf.extractall(deps_dir)
finally:
if zf:
zf.close()
return
# Source archives may contain a bunch of other things.
# The following code works for the packages coverage and
# configparser, which are the two we currently require that
# do not provide wheels
try:
ar = None
ar = _open_archive(pkg_path)
pkg_name = None
base_path = _archive_single_dir(ar) or ''
if len(base_path):
if '-' in base_path:
pkg_name, _ = base_path.split('-', 1)
base_path += '/'
base_pkg_path = None
if pkg_name is not None:
base_pkg_path = base_path + pkg_name + '/'
src_path = base_path + 'src/'
members = []
for info in _list_archive_members(ar):
fn = _info_name(info)
if base_pkg_path is not None and fn.startswith(base_pkg_path):
dst_path = fn[len(base_pkg_path) - len(pkg_name) - 1:]
members.append((info, dst_path))
continue
if fn.startswith(src_path):
members.append((info, fn[len(src_path):]))
continue
for info, path in members:
info_data = _extract_info(ar, info)
# Dirs won't return a file
if info_data is not None:
dst_path = os.path.join(deps_dir, path)
dst_dir = os.path.dirname(dst_path)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
with open(dst_path, 'wb') as f:
f.write(info_data)
finally:
if ar:
ar.close()
def _stage_requirements(deps_dir, path):
"""
Installs requirements without using Python to download, since
different services are limiting to TLS 1.2, and older version of
Python do not support that
:param deps_dir:
A unicode path to a temporary diretory to use for downloads
:param path:
A unicoe filesystem path to a requirements file
"""
valid_tags = _pep425tags()
exe_suffix = None
if sys.platform == 'win32' and _pep425_implementation() == 'cp':
win_arch = 'win32' if sys.maxsize == 2147483647 else 'win-amd64'
version_info = sys.version_info
exe_suffix = '.%s-py%d.%d.exe' % (win_arch, version_info[0], version_info[1])
packages = _parse_requires(path)
for p in packages:
pkg = p['pkg']
if p['type'] == 'url':
if pkg.endswith('.zip') or pkg.endswith('.tar.gz') or pkg.endswith('.tar.bz2') or pkg.endswith('.whl'):
url = pkg
else:
raise Exception('Unable to install package from URL that is not an archive')
else:
pypi_json_url = 'https://pypi.org/pypi/%s/json' % pkg
json_dest = _download(pypi_json_url, deps_dir)
with open(json_dest, 'rb') as f:
pkg_info = json.loads(f.read().decode('utf-8'))
if os.path.exists(json_dest):
os.remove(json_dest)
latest = pkg_info['info']['version']
if p['type'] == '>=':
if _tuple_from_ver(p['ver']) > _tuple_from_ver(latest):
raise Exception('Unable to find version %s of %s, newest is %s' % (p['ver'], pkg, latest))
version = latest
elif p['type'] == '==':
if p['ver'] not in pkg_info['releases']:
raise Exception('Unable to find version %s of %s' % (p['ver'], pkg))
version = p['ver']
else:
version = latest
wheels = {}
whl = None
tar_bz2 = None
tar_gz = None
exe = None
for download in pkg_info['releases'][version]:
if exe_suffix and download['url'].endswith(exe_suffix):
exe = download['url']
if download['url'].endswith('.whl'):
parts = os.path.basename(download['url']).split('-')
tag_impl = parts[-3]
tag_abi = parts[-2]
tag_arch = parts[-1].split('.')[0]
wheels[(tag_impl, tag_abi, tag_arch)] = download['url']
if download['url'].endswith('.tar.bz2'):
tar_bz2 = download['url']
if download['url'].endswith('.tar.gz'):
tar_gz = download['url']
# Find the most-specific wheel possible
for tag in valid_tags:
if tag in wheels:
whl = wheels[tag]
break
if exe_suffix and exe:
url = exe
elif whl:
url = whl
elif tar_bz2:
url = tar_bz2
elif tar_gz:
url = tar_gz
else:
raise Exception('Unable to find suitable download for %s' % pkg)
local_path = _download(url, deps_dir)
_extract_package(deps_dir, local_path)
os.remove(local_path)
def _parse_requires(path):
"""
Does basic parsing of pip requirements files, to allow for
using something other than Python to do actual TLS requests
:param path:
A path to a requirements file
:return:
A list of dict objects containing the keys:
- 'type' ('any', 'url', '==', '>=')
- 'pkg'
- 'ver' (if 'type' == '==' or 'type' == '>=')
"""
python_version = '.'.join(map(str_cls, sys.version_info[0:2]))
sys_platform = sys.platform
packages = []
with open(path, 'rb') as f:
contents = f.read().decode('utf-8')
for line in re.split(r'\r?\n', contents):
line = line.strip()
if not len(line):
continue
if re.match(r'^\s*#', line):
continue
if ';' in line:
package, cond = line.split(';', 1)
package = package.strip()
cond = cond.strip()
cond = cond.replace('sys_platform', repr(sys_platform))
cond = cond.replace('python_version', repr(python_version))
if not eval(cond):
continue
else:
package = line.strip()
if re.match(r'^\s*-r\s*', package):
sub_req_file = re.sub(r'^\s*-r\s*', '', package)
sub_req_file = os.path.abspath(os.path.join(os.path.dirname(path), sub_req_file))
packages.extend(_parse_requires(sub_req_file))
continue
if re.match(r'https?://', package):
packages.append({'type': 'url', 'pkg': package})
continue
if '>=' in package:
parts = package.split('>=')
package = parts[0].strip()
ver = parts[1].strip()
packages.append({'type': '>=', 'pkg': package, 'ver': ver})
continue
if '==' in package:
parts = package.split('==')
package = parts[0].strip()
ver = parts[1].strip()
packages.append({'type': '==', 'pkg': package, 'ver': ver})
continue
if re.search(r'[^ a-zA-Z0-9\-]', package):
raise Exception('Unsupported requirements format version constraint: %s' % package)
packages.append({'type': 'any', 'pkg': package})
return packages
def _execute(params, cwd):
"""
Executes a subprocess
:param params:
A list of the executable and arguments to pass to it
:param cwd:
The working directory to execute the command in
:return:
A 2-element tuple of (stdout, stderr)
"""
proc = subprocess.Popen(
params,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd
)
stdout, stderr = proc.communicate()
code = proc.wait()
if code != 0:
e = OSError('subprocess exit code was non-zero')
e.stdout = stdout
e.stderr = stderr
raise e
return (stdout, stderr) | 0.441914 | 0.097005 |
import os
import random
import tempfile
import unittest
from asn1crypto import keys
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec
from loopchain.crypto.signature import Signer, SignVerifier, long_to_bytes
from tests.unit import test_util
class TestSignature(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.temp_dir = tempfile.TemporaryDirectory()
# Private Key
cls.private_key = ec.generate_private_key(ec.SECP256K1(), default_backend())
cls.private_der_path = os.path.join(cls.temp_dir.name, "private.der")
with open(cls.private_der_path, "wb") as private_der_file:
private_der_file.write(
cls.private_key.private_bytes(
encoding=serialization.Encoding['DER'],
format=serialization.PrivateFormat['PKCS8'],
encryption_algorithm=serialization.BestAvailableEncryption(b"TEST")
)
)
cls.private_pem_path = os.path.join(cls.temp_dir.name, "private.pem")
with open(cls.private_pem_path, "wb") as private_pem_file:
private_pem_file.write(
cls.private_key.private_bytes(
encoding=serialization.Encoding['PEM'],
format=serialization.PrivateFormat['PKCS8'],
encryption_algorithm=serialization.BestAvailableEncryption(b"TEST")
)
)
key_info = keys.PrivateKeyInfo.load(cls.private_key.private_bytes(
encoding=serialization.Encoding['DER'],
format=serialization.PrivateFormat['PKCS8'],
encryption_algorithm=serialization.NoEncryption()
))
cls.private_key_bytes = long_to_bytes(key_info['private_key'].native['private_key'])
# Public Key
cls.public_key = cls.private_key.public_key()
cls.public_der_path = os.path.join(cls.temp_dir.name, "public.der")
with open(cls.public_der_path, "wb") as public_der_file:
public_der_file.write(
cls.public_key.public_bytes(
encoding=serialization.Encoding['DER'],
format=serialization.PublicFormat['SubjectPublicKeyInfo']
)
)
cls.public_pem_path = os.path.join(cls.temp_dir.name, "public.pem")
with open(cls.public_pem_path, "wb") as public_pem_file:
public_pem_file.write(
cls.public_key.public_bytes(
encoding=serialization.Encoding['PEM'],
format=serialization.PublicFormat['SubjectPublicKeyInfo']
)
)
key_info = keys.PublicKeyInfo.load(
cls.public_key.public_bytes(
encoding=serialization.Encoding['DER'],
format=serialization.PublicFormat['SubjectPublicKeyInfo']
)
)
cls.public_key_bytes = key_info['public_key'].native
cls.signer_private_key_bytes = Signer.from_prikey(cls.private_key_bytes)
cls.signer_private_key_der = Signer.from_prikey_file(cls.private_der_path, b"TEST")
cls.signer_private_key_pem = Signer.from_prikey_file(cls.private_pem_path, b"TEST")
cls.sign_verifier_private_key_bytes = SignVerifier.from_prikey(cls.private_key_bytes)
cls.sign_verifier_private_key_der = SignVerifier.from_prikey_file(cls.private_der_path, b"TEST")
cls.sign_verifier_private_key_pem = SignVerifier.from_prikey_file(cls.private_pem_path, b"TEST")
cls.sign_verifier_public_key_bytes = SignVerifier.from_pubkey(cls.public_key_bytes)
cls.sign_verifier_public_key_der = SignVerifier.from_pubkey_file(cls.public_der_path)
cls.sign_verifier_public_key_pem = SignVerifier.from_pubkey_file(cls.public_pem_path)
@classmethod
def tearDownClass(cls) -> None:
cls.temp_dir.cleanup()
def setUp(self):
test_util.print_testname(self._testMethodName)
def test_signer_and_sign_verifier_hash_verification_success_result_equal(self):
hash_data = os.urandom(32)
signature = self.signer_private_key_bytes.sign_hash(hash_data)
self.signer_private_key_bytes.verify_hash(hash_data, signature)
self.signer_private_key_der.verify_hash(hash_data, signature)
self.signer_private_key_pem.verify_hash(hash_data, signature)
self.sign_verifier_private_key_bytes.verify_hash(hash_data, signature)
self.sign_verifier_private_key_der.verify_hash(hash_data, signature)
self.sign_verifier_private_key_pem.verify_hash(hash_data, signature)
self.sign_verifier_public_key_bytes.verify_hash(hash_data, signature)
self.sign_verifier_public_key_der.verify_hash(hash_data, signature)
self.sign_verifier_public_key_pem.verify_hash(hash_data, signature)
def test_signer_and_sign_verifier_data_verification_success_result_equal(self):
data = os.urandom(random.randrange(1, 1000))
signature = self.signer_private_key_bytes.sign_data(data)
self.signer_private_key_bytes.verify_data(data, signature)
self.signer_private_key_der.verify_data(data, signature)
self.signer_private_key_pem.verify_data(data, signature)
self.sign_verifier_private_key_bytes.verify_data(data, signature)
self.sign_verifier_private_key_der.verify_data(data, signature)
self.sign_verifier_private_key_pem.verify_data(data, signature)
self.sign_verifier_public_key_bytes.verify_data(data, signature)
self.sign_verifier_public_key_der.verify_data(data, signature)
self.sign_verifier_public_key_pem.verify_data(data, signature)
def test_signer_and_sign_verifier_hash_verification_failure_result_equal(self):
hash_data = os.urandom(32)
signature = self.signer_private_key_bytes.sign_hash(hash_data)
invalid_signature0 = os.urandom(len(signature))
invalid_signature1 = os.urandom(random.randrange(1, 1000))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_bytes.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_bytes.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_der.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_der.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_pem.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_pem.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_bytes.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_bytes.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_der.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_der.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_pem.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_pem.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_bytes.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_bytes.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_der.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_der.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_pem.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_pem.verify_hash(hash_data, invalid_signature1))
def test_signer_and_sign_verifier_data_verification_failure_result_equal(self):
data = os.urandom(random.randrange(1, 1000))
signature = self.signer_private_key_bytes.sign_data(data)
invalid_signature0 = os.urandom(len(signature))
invalid_signature1 = os.urandom(random.randrange(1, 1000))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_bytes.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_bytes.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_der.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_der.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_pem.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_pem.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_bytes.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_bytes.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_der.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_der.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_pem.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_pem.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_bytes.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_bytes.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_der.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_der.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_pem.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_pem.verify_data(data, invalid_signature1))
def test_hash_signatures_equal(self):
hash_data = os.urandom(32)
self.assertEqual(self.signer_private_key_bytes.sign_hash(hash_data),
self.signer_private_key_der.sign_hash(hash_data))
self.assertEqual(self.signer_private_key_bytes.sign_hash(hash_data),
self.signer_private_key_pem.sign_hash(hash_data))
def test_data_signatures_equal(self):
data = os.urandom(random.randint(1, 1000))
self.assertEqual(self.signer_private_key_bytes.sign_data(data),
self.signer_private_key_der.sign_data(data))
self.assertEqual(self.signer_private_key_bytes.sign_data(data),
self.signer_private_key_pem.sign_data(data))
def test_signer_private_keys_equal(self):
self.assertEqual(self.signer_private_key_bytes.get_private_secret(),
self.signer_private_key_der.get_private_secret())
self.assertEqual(self.signer_private_key_bytes.get_private_secret(),
self.signer_private_key_pem.get_private_secret())
def test_signer_sign_verifier_addresses_equal(self):
self.assertEqual(self.signer_private_key_bytes.address, self.signer_private_key_der.address)
self.assertEqual(self.signer_private_key_bytes.address, self.signer_private_key_pem.address)
self.assertEqual(self.sign_verifier_private_key_bytes.address, self.signer_private_key_bytes.address)
self.assertEqual(self.sign_verifier_private_key_bytes.address, self.sign_verifier_private_key_der.address)
self.assertEqual(self.sign_verifier_private_key_bytes.address, self.sign_verifier_private_key_pem.address)
self.assertEqual(self.sign_verifier_public_key_bytes.address, self.signer_private_key_bytes.address)
self.assertEqual(self.sign_verifier_public_key_bytes.address, self.sign_verifier_public_key_der.address)
self.assertEqual(self.sign_verifier_public_key_bytes.address, self.sign_verifier_public_key_pem.address)
def test_signer_from_pubkey(self):
self.assertRaises(TypeError, lambda: Signer.from_pubkey(self.public_key_bytes))
self.assertRaises(TypeError, lambda: Signer.from_pubkey_file(self.public_der_path))
self.assertRaises(TypeError, lambda: Signer.from_pubkey_file(self.public_pem_path)) | tests/unit/test_signature.py | import os
import random
import tempfile
import unittest
from asn1crypto import keys
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec
from loopchain.crypto.signature import Signer, SignVerifier, long_to_bytes
from tests.unit import test_util
class TestSignature(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.temp_dir = tempfile.TemporaryDirectory()
# Private Key
cls.private_key = ec.generate_private_key(ec.SECP256K1(), default_backend())
cls.private_der_path = os.path.join(cls.temp_dir.name, "private.der")
with open(cls.private_der_path, "wb") as private_der_file:
private_der_file.write(
cls.private_key.private_bytes(
encoding=serialization.Encoding['DER'],
format=serialization.PrivateFormat['PKCS8'],
encryption_algorithm=serialization.BestAvailableEncryption(b"TEST")
)
)
cls.private_pem_path = os.path.join(cls.temp_dir.name, "private.pem")
with open(cls.private_pem_path, "wb") as private_pem_file:
private_pem_file.write(
cls.private_key.private_bytes(
encoding=serialization.Encoding['PEM'],
format=serialization.PrivateFormat['PKCS8'],
encryption_algorithm=serialization.BestAvailableEncryption(b"TEST")
)
)
key_info = keys.PrivateKeyInfo.load(cls.private_key.private_bytes(
encoding=serialization.Encoding['DER'],
format=serialization.PrivateFormat['PKCS8'],
encryption_algorithm=serialization.NoEncryption()
))
cls.private_key_bytes = long_to_bytes(key_info['private_key'].native['private_key'])
# Public Key
cls.public_key = cls.private_key.public_key()
cls.public_der_path = os.path.join(cls.temp_dir.name, "public.der")
with open(cls.public_der_path, "wb") as public_der_file:
public_der_file.write(
cls.public_key.public_bytes(
encoding=serialization.Encoding['DER'],
format=serialization.PublicFormat['SubjectPublicKeyInfo']
)
)
cls.public_pem_path = os.path.join(cls.temp_dir.name, "public.pem")
with open(cls.public_pem_path, "wb") as public_pem_file:
public_pem_file.write(
cls.public_key.public_bytes(
encoding=serialization.Encoding['PEM'],
format=serialization.PublicFormat['SubjectPublicKeyInfo']
)
)
key_info = keys.PublicKeyInfo.load(
cls.public_key.public_bytes(
encoding=serialization.Encoding['DER'],
format=serialization.PublicFormat['SubjectPublicKeyInfo']
)
)
cls.public_key_bytes = key_info['public_key'].native
cls.signer_private_key_bytes = Signer.from_prikey(cls.private_key_bytes)
cls.signer_private_key_der = Signer.from_prikey_file(cls.private_der_path, b"TEST")
cls.signer_private_key_pem = Signer.from_prikey_file(cls.private_pem_path, b"TEST")
cls.sign_verifier_private_key_bytes = SignVerifier.from_prikey(cls.private_key_bytes)
cls.sign_verifier_private_key_der = SignVerifier.from_prikey_file(cls.private_der_path, b"TEST")
cls.sign_verifier_private_key_pem = SignVerifier.from_prikey_file(cls.private_pem_path, b"TEST")
cls.sign_verifier_public_key_bytes = SignVerifier.from_pubkey(cls.public_key_bytes)
cls.sign_verifier_public_key_der = SignVerifier.from_pubkey_file(cls.public_der_path)
cls.sign_verifier_public_key_pem = SignVerifier.from_pubkey_file(cls.public_pem_path)
@classmethod
def tearDownClass(cls) -> None:
cls.temp_dir.cleanup()
def setUp(self):
test_util.print_testname(self._testMethodName)
def test_signer_and_sign_verifier_hash_verification_success_result_equal(self):
hash_data = os.urandom(32)
signature = self.signer_private_key_bytes.sign_hash(hash_data)
self.signer_private_key_bytes.verify_hash(hash_data, signature)
self.signer_private_key_der.verify_hash(hash_data, signature)
self.signer_private_key_pem.verify_hash(hash_data, signature)
self.sign_verifier_private_key_bytes.verify_hash(hash_data, signature)
self.sign_verifier_private_key_der.verify_hash(hash_data, signature)
self.sign_verifier_private_key_pem.verify_hash(hash_data, signature)
self.sign_verifier_public_key_bytes.verify_hash(hash_data, signature)
self.sign_verifier_public_key_der.verify_hash(hash_data, signature)
self.sign_verifier_public_key_pem.verify_hash(hash_data, signature)
def test_signer_and_sign_verifier_data_verification_success_result_equal(self):
data = os.urandom(random.randrange(1, 1000))
signature = self.signer_private_key_bytes.sign_data(data)
self.signer_private_key_bytes.verify_data(data, signature)
self.signer_private_key_der.verify_data(data, signature)
self.signer_private_key_pem.verify_data(data, signature)
self.sign_verifier_private_key_bytes.verify_data(data, signature)
self.sign_verifier_private_key_der.verify_data(data, signature)
self.sign_verifier_private_key_pem.verify_data(data, signature)
self.sign_verifier_public_key_bytes.verify_data(data, signature)
self.sign_verifier_public_key_der.verify_data(data, signature)
self.sign_verifier_public_key_pem.verify_data(data, signature)
def test_signer_and_sign_verifier_hash_verification_failure_result_equal(self):
hash_data = os.urandom(32)
signature = self.signer_private_key_bytes.sign_hash(hash_data)
invalid_signature0 = os.urandom(len(signature))
invalid_signature1 = os.urandom(random.randrange(1, 1000))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_bytes.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_bytes.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_der.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_der.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_pem.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_pem.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_bytes.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_bytes.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_der.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_der.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_pem.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_pem.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_bytes.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_bytes.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_der.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_der.verify_hash(hash_data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_pem.verify_hash(hash_data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_pem.verify_hash(hash_data, invalid_signature1))
def test_signer_and_sign_verifier_data_verification_failure_result_equal(self):
data = os.urandom(random.randrange(1, 1000))
signature = self.signer_private_key_bytes.sign_data(data)
invalid_signature0 = os.urandom(len(signature))
invalid_signature1 = os.urandom(random.randrange(1, 1000))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_bytes.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_bytes.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_der.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_der.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_pem.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.signer_private_key_pem.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_bytes.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_bytes.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_der.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_der.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_pem.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_private_key_pem.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_bytes.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_bytes.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_der.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_der.verify_data(data, invalid_signature1))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_pem.verify_data(data, invalid_signature0))
self.assertRaises(RuntimeError,
lambda: self.sign_verifier_public_key_pem.verify_data(data, invalid_signature1))
def test_hash_signatures_equal(self):
hash_data = os.urandom(32)
self.assertEqual(self.signer_private_key_bytes.sign_hash(hash_data),
self.signer_private_key_der.sign_hash(hash_data))
self.assertEqual(self.signer_private_key_bytes.sign_hash(hash_data),
self.signer_private_key_pem.sign_hash(hash_data))
def test_data_signatures_equal(self):
data = os.urandom(random.randint(1, 1000))
self.assertEqual(self.signer_private_key_bytes.sign_data(data),
self.signer_private_key_der.sign_data(data))
self.assertEqual(self.signer_private_key_bytes.sign_data(data),
self.signer_private_key_pem.sign_data(data))
def test_signer_private_keys_equal(self):
self.assertEqual(self.signer_private_key_bytes.get_private_secret(),
self.signer_private_key_der.get_private_secret())
self.assertEqual(self.signer_private_key_bytes.get_private_secret(),
self.signer_private_key_pem.get_private_secret())
def test_signer_sign_verifier_addresses_equal(self):
self.assertEqual(self.signer_private_key_bytes.address, self.signer_private_key_der.address)
self.assertEqual(self.signer_private_key_bytes.address, self.signer_private_key_pem.address)
self.assertEqual(self.sign_verifier_private_key_bytes.address, self.signer_private_key_bytes.address)
self.assertEqual(self.sign_verifier_private_key_bytes.address, self.sign_verifier_private_key_der.address)
self.assertEqual(self.sign_verifier_private_key_bytes.address, self.sign_verifier_private_key_pem.address)
self.assertEqual(self.sign_verifier_public_key_bytes.address, self.signer_private_key_bytes.address)
self.assertEqual(self.sign_verifier_public_key_bytes.address, self.sign_verifier_public_key_der.address)
self.assertEqual(self.sign_verifier_public_key_bytes.address, self.sign_verifier_public_key_pem.address)
def test_signer_from_pubkey(self):
self.assertRaises(TypeError, lambda: Signer.from_pubkey(self.public_key_bytes))
self.assertRaises(TypeError, lambda: Signer.from_pubkey_file(self.public_der_path))
self.assertRaises(TypeError, lambda: Signer.from_pubkey_file(self.public_pem_path)) | 0.538255 | 0.149842 |
from random import choice
from django.contrib.auth.models import AnonymousUser, Permission
from django.test.utils import override_settings
from cms.api import create_page
from cms.test_utils.testcases import CMSTestCase
from cms.toolbar.items import AjaxItem, Menu, ModalItem
from richie.apps.core.factories import UserFactory
from richie.apps.courses.factories import (
CourseFactory,
OrganizationFactory,
PersonFactory,
)
from ..core.utils import CheckToolbarMixin
# pylint: disable=too-many-ancestors
class CoursesCMSToolbarTestCase(CheckToolbarMixin, CMSTestCase):
"""Testing the integration of page extensions in the toolbar for the courses application"""
def get_cases_for_page_change(self):
"""
Not a test, a helper to create different users for each possible level of access
and specify their expected visibility on the menu item..
pylint: disable=too-many-locals
"""
superuser = UserFactory(is_staff=True, is_superuser=True)
staff_with_permission = UserFactory(is_staff=True)
user_with_permission = UserFactory()
staff = UserFactory(is_staff=True)
user = UserFactory()
anonymous = AnonymousUser()
# Add global permission to change page for users concerned
can_change_page = Permission.objects.get(codename="change_page")
staff_with_permission.user_permissions.add(can_change_page)
user_with_permission.user_permissions.add(can_change_page)
return [
([superuser, False, False], self.check_disabled),
([superuser, True, False], self.check_active),
([superuser, False, True], self.check_disabled),
([staff_with_permission, False, False], self.check_disabled),
([staff_with_permission, True, False], self.check_active),
([staff_with_permission, False, True], self.check_disabled),
([staff, False, False], self.check_missing),
([staff, True, False], self.check_missing),
([staff, False, True], self.check_missing),
([user_with_permission, False, False], self.check_absent),
([user_with_permission, True, False], self.check_absent),
([user_with_permission, False, True], self.check_absent),
([user, False, False], self.check_absent),
([user, True, False], self.check_absent),
([user, False, True], self.check_absent),
([anonymous, False, False], self.check_absent),
([anonymous, True, False], self.check_absent),
([anonymous, False, True], self.check_absent),
]
@override_settings(CMS_PERMISSION=False)
def test_cms_toolbars_course_has_page_extension_settings_item(self):
"""
Validate that a new item to edit the course is available only when visiting the page
in edit mode and for users with permission to edit the page.
"""
course = CourseFactory()
url = f"/en/admin/courses/course/{course.id:d}/change/"
for args, method in self.get_cases_for_page_change():
toolbar = self.get_toolbar_for_page(course.extended_object, *args)
item = method(toolbar, "Course settings...")
if item:
self.assertEqual(item.url, url)
# pylint: disable=too-many-locals
def test_cms_toolbars_course_has_snapshot_item(self):
"""
Validate that a new item to snapshot the course is available only when visiting the page
in edit mode and for users with permission to snapshot the page.
"""
course = CourseFactory()
superuser = UserFactory(is_staff=True, is_superuser=True)
staff_with_permission = UserFactory(is_staff=True)
user_with_permission = UserFactory()
unauthorized_staff = UserFactory(is_staff=True)
unauthorized_user = UserFactory()
anonymous = AnonymousUser()
# Add all permissions to snapshot page for users with permissions
for user in [staff_with_permission, user_with_permission]:
self.add_permission(user, "add_page")
self.add_permission(user, "change_page")
self.add_page_permission(
user, course.extended_object, can_change=True, can_add=True
)
# Randomly add only half of the necessary permissions for unauthorized users
for user in [unauthorized_staff, unauthorized_user]:
self.add_permission(user, "add_page")
self.add_permission(user, "change_page")
can_change = choice([True, False])
self.add_page_permission(
user,
course.extended_object,
can_change=can_change,
can_add=not can_change,
)
cases = [
([superuser, False, False], self.check_disabled),
([superuser, True, False], self.check_active),
([superuser, False, True], self.check_disabled),
([staff_with_permission, False, False], self.check_disabled),
([staff_with_permission, True, False], self.check_active),
([staff_with_permission, False, True], self.check_disabled),
([unauthorized_staff, False, False], self.check_missing),
([unauthorized_staff, True, False], self.check_missing),
([unauthorized_staff, False, True], self.check_missing),
([user_with_permission, False, False], self.check_absent),
([user_with_permission, True, False], self.check_absent),
([user_with_permission, False, True], self.check_absent),
([unauthorized_user, False, False], self.check_absent),
([unauthorized_user, True, False], self.check_absent),
([unauthorized_user, False, True], self.check_absent),
([anonymous, False, False], self.check_absent),
([anonymous, True, False], self.check_absent),
([anonymous, False, True], self.check_absent),
]
url = f"/en/admin/courses/course/{course.id:d}/snapshot/"
for args, method in cases:
toolbar = self.get_toolbar_for_page(course.extended_object, *args)
item = method(toolbar, "Snapshot this page...", item_type=AjaxItem)
if item:
self.assertEqual(item.action, url)
def test_cms_toolbars_snapshot_no_snapshot_item(self):
"""
Make sure that the item to snapshot a course is not available on the page of a snapshot.
"""
course = CourseFactory()
snapshot = CourseFactory(page_parent=course.extended_object)
superuser = UserFactory(is_staff=True, is_superuser=True)
cases = [
[superuser, False, False],
[superuser, True, False],
[superuser, False, True],
]
for args in cases:
toolbar = self.get_toolbar_for_page(snapshot.extended_object, *args)
self.check_missing(toolbar, "Snapshot this page...", item_type=AjaxItem)
@override_settings(CMS_PERMISSION=False)
def test_cms_toolbars_organization_has_page_extension_settings_item(self):
"""
Validate that a new item to edit the organization is available only when visiting the page
in edit mode and for users with permission to edit the page.
"""
organization = OrganizationFactory()
url = f"/en/admin/courses/organization/{organization.id:d}/change/"
for args, method in self.get_cases_for_page_change():
toolbar = self.get_toolbar_for_page(organization.extended_object, *args)
item = method(toolbar, "Organization settings...")
if item:
self.assertEqual(item.url, url)
@override_settings(CMS_PERMISSION=False)
def test_cms_toolbars_no_page_extension(self):
"""
The toolbar should not include any item to edit a page extension on a page not related
to any page extension.
"""
# Testing with a superuser proves our point
superuser = UserFactory(is_staff=True, is_superuser=True)
# Create a page not related to any page extension
page = create_page(
"A page", template="richie/single_column.html", language="en"
)
cases = [[False, False], [False, True], [True, False]]
for args in cases:
toolbar = self.get_toolbar_for_page(page, superuser, *args)
page_menu = toolbar.find_items(Menu, name="Page")[0].item
# Check that the course item is absent
results = page_menu.find_items(ModalItem, name="Course settings...")
self.assertEqual(results, [])
# Check that the snapshot item is absent
results = page_menu.find_items(ModalItem, name="Snapshot this page...")
self.assertEqual(results, [])
# Check that the organization item is absent
results = page_menu.find_items(ModalItem, name="Organization settings...")
self.assertEqual(results, [])
# Check that the person item is absent
results = page_menu.find_items(ModalItem, name="Person settings...")
self.assertEqual(results, [])
@override_settings(CMS_PERMISSION=False)
# pylint: disable=too-many-locals
def test_cms_toolbars_person_has_page_extension_settings_item(self):
"""
Validate that a new item to edit the person is available only when visiting the page
in edit mode and for users with permission to edit the page.
"""
person = PersonFactory()
# Create different users for each possible level of access
# pylint: disable=too-many-locals
superuser = UserFactory(is_staff=True, is_superuser=True)
staff_with_permission = UserFactory(is_staff=True)
user_with_permission = UserFactory()
staff = UserFactory(is_staff=True)
user = UserFactory()
anonymous = AnonymousUser()
# Add global permission to change page for users concerned
can_change_page = Permission.objects.get(codename="change_page")
staff_with_permission.user_permissions.add(can_change_page)
user_with_permission.user_permissions.add(can_change_page)
cases = [
([superuser, False, False], self.check_disabled),
([superuser, True, False], self.check_active),
([superuser, False, True], self.check_disabled),
([staff_with_permission, False, False], self.check_disabled),
([staff_with_permission, True, False], self.check_active),
([staff_with_permission, False, True], self.check_disabled),
([staff, False, False], self.check_missing),
([staff, True, False], self.check_missing),
([staff, False, True], self.check_missing),
([user_with_permission, False, False], self.check_absent),
([user_with_permission, True, False], self.check_absent),
([user_with_permission, False, True], self.check_absent),
([user, False, False], self.check_absent),
([user, True, False], self.check_absent),
([user, False, True], self.check_absent),
([anonymous, False, False], self.check_absent),
([anonymous, True, False], self.check_absent),
([anonymous, False, True], self.check_absent),
]
url = f"/en/admin/courses/person/{person.id:d}/change/"
for args, method in cases:
toolbar = self.get_toolbar_for_page(person.extended_object, *args)
item = method(toolbar, "Person settings...")
if item:
self.assertEqual(item.url, url) | tests/apps/courses/test_cms_toolbars.py | from random import choice
from django.contrib.auth.models import AnonymousUser, Permission
from django.test.utils import override_settings
from cms.api import create_page
from cms.test_utils.testcases import CMSTestCase
from cms.toolbar.items import AjaxItem, Menu, ModalItem
from richie.apps.core.factories import UserFactory
from richie.apps.courses.factories import (
CourseFactory,
OrganizationFactory,
PersonFactory,
)
from ..core.utils import CheckToolbarMixin
# pylint: disable=too-many-ancestors
class CoursesCMSToolbarTestCase(CheckToolbarMixin, CMSTestCase):
"""Testing the integration of page extensions in the toolbar for the courses application"""
def get_cases_for_page_change(self):
"""
Not a test, a helper to create different users for each possible level of access
and specify their expected visibility on the menu item..
pylint: disable=too-many-locals
"""
superuser = UserFactory(is_staff=True, is_superuser=True)
staff_with_permission = UserFactory(is_staff=True)
user_with_permission = UserFactory()
staff = UserFactory(is_staff=True)
user = UserFactory()
anonymous = AnonymousUser()
# Add global permission to change page for users concerned
can_change_page = Permission.objects.get(codename="change_page")
staff_with_permission.user_permissions.add(can_change_page)
user_with_permission.user_permissions.add(can_change_page)
return [
([superuser, False, False], self.check_disabled),
([superuser, True, False], self.check_active),
([superuser, False, True], self.check_disabled),
([staff_with_permission, False, False], self.check_disabled),
([staff_with_permission, True, False], self.check_active),
([staff_with_permission, False, True], self.check_disabled),
([staff, False, False], self.check_missing),
([staff, True, False], self.check_missing),
([staff, False, True], self.check_missing),
([user_with_permission, False, False], self.check_absent),
([user_with_permission, True, False], self.check_absent),
([user_with_permission, False, True], self.check_absent),
([user, False, False], self.check_absent),
([user, True, False], self.check_absent),
([user, False, True], self.check_absent),
([anonymous, False, False], self.check_absent),
([anonymous, True, False], self.check_absent),
([anonymous, False, True], self.check_absent),
]
@override_settings(CMS_PERMISSION=False)
def test_cms_toolbars_course_has_page_extension_settings_item(self):
"""
Validate that a new item to edit the course is available only when visiting the page
in edit mode and for users with permission to edit the page.
"""
course = CourseFactory()
url = f"/en/admin/courses/course/{course.id:d}/change/"
for args, method in self.get_cases_for_page_change():
toolbar = self.get_toolbar_for_page(course.extended_object, *args)
item = method(toolbar, "Course settings...")
if item:
self.assertEqual(item.url, url)
# pylint: disable=too-many-locals
def test_cms_toolbars_course_has_snapshot_item(self):
"""
Validate that a new item to snapshot the course is available only when visiting the page
in edit mode and for users with permission to snapshot the page.
"""
course = CourseFactory()
superuser = UserFactory(is_staff=True, is_superuser=True)
staff_with_permission = UserFactory(is_staff=True)
user_with_permission = UserFactory()
unauthorized_staff = UserFactory(is_staff=True)
unauthorized_user = UserFactory()
anonymous = AnonymousUser()
# Add all permissions to snapshot page for users with permissions
for user in [staff_with_permission, user_with_permission]:
self.add_permission(user, "add_page")
self.add_permission(user, "change_page")
self.add_page_permission(
user, course.extended_object, can_change=True, can_add=True
)
# Randomly add only half of the necessary permissions for unauthorized users
for user in [unauthorized_staff, unauthorized_user]:
self.add_permission(user, "add_page")
self.add_permission(user, "change_page")
can_change = choice([True, False])
self.add_page_permission(
user,
course.extended_object,
can_change=can_change,
can_add=not can_change,
)
cases = [
([superuser, False, False], self.check_disabled),
([superuser, True, False], self.check_active),
([superuser, False, True], self.check_disabled),
([staff_with_permission, False, False], self.check_disabled),
([staff_with_permission, True, False], self.check_active),
([staff_with_permission, False, True], self.check_disabled),
([unauthorized_staff, False, False], self.check_missing),
([unauthorized_staff, True, False], self.check_missing),
([unauthorized_staff, False, True], self.check_missing),
([user_with_permission, False, False], self.check_absent),
([user_with_permission, True, False], self.check_absent),
([user_with_permission, False, True], self.check_absent),
([unauthorized_user, False, False], self.check_absent),
([unauthorized_user, True, False], self.check_absent),
([unauthorized_user, False, True], self.check_absent),
([anonymous, False, False], self.check_absent),
([anonymous, True, False], self.check_absent),
([anonymous, False, True], self.check_absent),
]
url = f"/en/admin/courses/course/{course.id:d}/snapshot/"
for args, method in cases:
toolbar = self.get_toolbar_for_page(course.extended_object, *args)
item = method(toolbar, "Snapshot this page...", item_type=AjaxItem)
if item:
self.assertEqual(item.action, url)
def test_cms_toolbars_snapshot_no_snapshot_item(self):
"""
Make sure that the item to snapshot a course is not available on the page of a snapshot.
"""
course = CourseFactory()
snapshot = CourseFactory(page_parent=course.extended_object)
superuser = UserFactory(is_staff=True, is_superuser=True)
cases = [
[superuser, False, False],
[superuser, True, False],
[superuser, False, True],
]
for args in cases:
toolbar = self.get_toolbar_for_page(snapshot.extended_object, *args)
self.check_missing(toolbar, "Snapshot this page...", item_type=AjaxItem)
@override_settings(CMS_PERMISSION=False)
def test_cms_toolbars_organization_has_page_extension_settings_item(self):
"""
Validate that a new item to edit the organization is available only when visiting the page
in edit mode and for users with permission to edit the page.
"""
organization = OrganizationFactory()
url = f"/en/admin/courses/organization/{organization.id:d}/change/"
for args, method in self.get_cases_for_page_change():
toolbar = self.get_toolbar_for_page(organization.extended_object, *args)
item = method(toolbar, "Organization settings...")
if item:
self.assertEqual(item.url, url)
@override_settings(CMS_PERMISSION=False)
def test_cms_toolbars_no_page_extension(self):
"""
The toolbar should not include any item to edit a page extension on a page not related
to any page extension.
"""
# Testing with a superuser proves our point
superuser = UserFactory(is_staff=True, is_superuser=True)
# Create a page not related to any page extension
page = create_page(
"A page", template="richie/single_column.html", language="en"
)
cases = [[False, False], [False, True], [True, False]]
for args in cases:
toolbar = self.get_toolbar_for_page(page, superuser, *args)
page_menu = toolbar.find_items(Menu, name="Page")[0].item
# Check that the course item is absent
results = page_menu.find_items(ModalItem, name="Course settings...")
self.assertEqual(results, [])
# Check that the snapshot item is absent
results = page_menu.find_items(ModalItem, name="Snapshot this page...")
self.assertEqual(results, [])
# Check that the organization item is absent
results = page_menu.find_items(ModalItem, name="Organization settings...")
self.assertEqual(results, [])
# Check that the person item is absent
results = page_menu.find_items(ModalItem, name="Person settings...")
self.assertEqual(results, [])
@override_settings(CMS_PERMISSION=False)
# pylint: disable=too-many-locals
def test_cms_toolbars_person_has_page_extension_settings_item(self):
"""
Validate that a new item to edit the person is available only when visiting the page
in edit mode and for users with permission to edit the page.
"""
person = PersonFactory()
# Create different users for each possible level of access
# pylint: disable=too-many-locals
superuser = UserFactory(is_staff=True, is_superuser=True)
staff_with_permission = UserFactory(is_staff=True)
user_with_permission = UserFactory()
staff = UserFactory(is_staff=True)
user = UserFactory()
anonymous = AnonymousUser()
# Add global permission to change page for users concerned
can_change_page = Permission.objects.get(codename="change_page")
staff_with_permission.user_permissions.add(can_change_page)
user_with_permission.user_permissions.add(can_change_page)
cases = [
([superuser, False, False], self.check_disabled),
([superuser, True, False], self.check_active),
([superuser, False, True], self.check_disabled),
([staff_with_permission, False, False], self.check_disabled),
([staff_with_permission, True, False], self.check_active),
([staff_with_permission, False, True], self.check_disabled),
([staff, False, False], self.check_missing),
([staff, True, False], self.check_missing),
([staff, False, True], self.check_missing),
([user_with_permission, False, False], self.check_absent),
([user_with_permission, True, False], self.check_absent),
([user_with_permission, False, True], self.check_absent),
([user, False, False], self.check_absent),
([user, True, False], self.check_absent),
([user, False, True], self.check_absent),
([anonymous, False, False], self.check_absent),
([anonymous, True, False], self.check_absent),
([anonymous, False, True], self.check_absent),
]
url = f"/en/admin/courses/person/{person.id:d}/change/"
for args, method in cases:
toolbar = self.get_toolbar_for_page(person.extended_object, *args)
item = method(toolbar, "Person settings...")
if item:
self.assertEqual(item.url, url) | 0.520009 | 0.208763 |
import os
import datetime
from django import template
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry
from django.template.base import TemplateDoesNotExist
from django.db.models.fields.related import FieldDoesNotExist
from django.utils.timezone import utc
from django.utils.translation import ugettext, ungettext
from ..helpers import alphabet_enumeration
register = template.Library()
class SmartIncludeNode(template.Node):
def __init__(self, viewname):
super(SmartIncludeNode, self).__init__()
self.viewname = viewname
def render(self, context):
apps = [app.split('.')[-1] for app in settings.INSTALLED_APPS]
# Bring current app to the top of the list
appname = context.get('appname', apps[0])
apps.pop(apps.index(appname))
apps = [appname] + apps
viewname = self.viewname
result = ""
for module in apps:
try:
template_name = "%(module)s/%(module)s_%(viewname)s_fragment.html" % {'viewname': viewname, 'module': module}
t = template.loader.get_template(template_name)
result += t.render(context)
except TemplateDoesNotExist:
pass
return result
@register.tag(name="smart_include")
def do_smart_include(parser, token):
try:
tag_name, viewname = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("%r tag requires one argument" % token.contents.split()[0])
if not (viewname[0] == viewname[-1] and viewname[0] in ('"', "'")):
raise template.TemplateSyntaxError("%r tag's viewname argument should be in quotes" % tag_name)
return SmartIncludeNode(viewname[1:-1])
@register.filter
def latlngbounds(obj):
if obj is None or isinstance(obj, basestring):
return 'null'
if isinstance(obj, GEOSGeometry):
extent = obj.extent
else:
extent = obj.get_map_image_extent()
return [[extent[1], extent[0]], [extent[3], extent[2]]]
@register.filter(name='verbose')
def field_verbose_name(obj, field):
"""Usage: {{ object|get_object_field }}"""
try:
return obj._meta.get_field(field).verbose_name
except FieldDoesNotExist:
a = getattr(obj, '%s_verbose_name' % field)
if a is None:
raise
return unicode(a)
@register.simple_tag()
def media_static_fallback(media_file, static_file, *args, **kwarg):
if os.path.exists(os.path.join(settings.MEDIA_ROOT, media_file)):
return os.path.join(settings.MEDIA_URL, media_file)
return os.path.join(settings.STATIC_URL, static_file)
@register.filter(name='timesince')
def humanize_timesince(date):
"""
http://djangosnippets.org/snippets/2275/
Humanized and localized version of built-in timesince template filter.
Based on <NAME>'s idea.
"""
delta = datetime.datetime.utcnow().replace(tzinfo=utc) - date
num_years = delta.days / 365
if (num_years > 0):
return ungettext(u"%d year ago", u"%d years ago", num_years) % num_years
num_weeks = delta.days / 7
if (num_weeks > 0):
return ungettext(u"%d week ago", u"%d weeks ago", num_weeks) % num_weeks
if (delta.days > 0):
return ungettext(u"%d day ago", u"%d days ago", delta.days) % delta.days
num_hours = delta.seconds / 3600
if (num_hours > 0):
return ungettext(u"%d hour ago", u"%d hours ago", num_hours) % num_hours
num_minutes = delta.seconds / 60
if (num_minutes > 0):
return ungettext(u"%d minute ago", u"%d minutes ago", num_minutes) % num_minutes
return ugettext(u"just a few seconds ago")
@register.inclusion_tag('mapentity/_detail_valuelist_fragment.html')
def valuelist(items, field=None, enumeration=False):
"""
Common template tag to show a list of values in detail pages.
:param field: Use this attribute on each item instead of their unicode representation
:param enumeration: Show enumerations, useful to match those shown by ``mapentity/leaflet.enumeration.js``
See https://github.com/makinacorpus/django-mapentity/issues/35
https://github.com/makinacorpus/Geotrek/issues/960
https://github.com/makinacorpus/Geotrek/issues/214
https://github.com/makinacorpus/Geotrek/issues/871
"""
if field:
def display(v):
return getattr(v, '%s_display' % field, getattr(v, field))
itemslist = [display(v) for v in items]
else:
itemslist = items
letters = alphabet_enumeration(len(items))
valuelist = []
for i, item in enumerate(itemslist):
valuelist.append({
'enumeration': letters[i] if enumeration else False,
'pk': getattr(items[i], 'pk', None),
'text': item
})
modelname = None
if len(items) > 0:
oneitem = items[0]
if hasattr(oneitem, '_meta'):
modelname = oneitem._meta.object_name.lower()
return {
'valuelist': valuelist,
'modelname': modelname
}
@register.inclusion_tag('mapentity/_detail_valuetable_fragment.html')
def valuetable(items, columns='', enumeration=False):
"""
Common template tag to show a table with columns in detail pages.
:param enumeration: Show enumerations, see ``valuelist`` template tag.
"""
columns = columns.split(',')
letters = alphabet_enumeration(len(items))
records = []
for i, item in enumerate(items):
def display(column):
return getattr(item, '%s_display' % column, getattr(item, column))
attrs = [display(column) for column in columns]
records.append({
'enumeration': letters[i] if enumeration else False,
'attrs': attrs,
'pk': getattr(item, 'pk', None)
})
if len(items) > 0:
oneitem = items[0]
columns_titles = []
for column in columns:
columns_titles.append({'name': column,
'text': field_verbose_name(oneitem, column)})
modelname = oneitem._meta.object_name.lower()
else:
modelname = None
columns_titles = None
return {
'nbcolumns': len(columns),
'columns': columns_titles,
'records': records,
'modelname': modelname
} | mapentity/templatetags/mapentity_tags.py | import os
import datetime
from django import template
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry
from django.template.base import TemplateDoesNotExist
from django.db.models.fields.related import FieldDoesNotExist
from django.utils.timezone import utc
from django.utils.translation import ugettext, ungettext
from ..helpers import alphabet_enumeration
register = template.Library()
class SmartIncludeNode(template.Node):
def __init__(self, viewname):
super(SmartIncludeNode, self).__init__()
self.viewname = viewname
def render(self, context):
apps = [app.split('.')[-1] for app in settings.INSTALLED_APPS]
# Bring current app to the top of the list
appname = context.get('appname', apps[0])
apps.pop(apps.index(appname))
apps = [appname] + apps
viewname = self.viewname
result = ""
for module in apps:
try:
template_name = "%(module)s/%(module)s_%(viewname)s_fragment.html" % {'viewname': viewname, 'module': module}
t = template.loader.get_template(template_name)
result += t.render(context)
except TemplateDoesNotExist:
pass
return result
@register.tag(name="smart_include")
def do_smart_include(parser, token):
try:
tag_name, viewname = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("%r tag requires one argument" % token.contents.split()[0])
if not (viewname[0] == viewname[-1] and viewname[0] in ('"', "'")):
raise template.TemplateSyntaxError("%r tag's viewname argument should be in quotes" % tag_name)
return SmartIncludeNode(viewname[1:-1])
@register.filter
def latlngbounds(obj):
if obj is None or isinstance(obj, basestring):
return 'null'
if isinstance(obj, GEOSGeometry):
extent = obj.extent
else:
extent = obj.get_map_image_extent()
return [[extent[1], extent[0]], [extent[3], extent[2]]]
@register.filter(name='verbose')
def field_verbose_name(obj, field):
"""Usage: {{ object|get_object_field }}"""
try:
return obj._meta.get_field(field).verbose_name
except FieldDoesNotExist:
a = getattr(obj, '%s_verbose_name' % field)
if a is None:
raise
return unicode(a)
@register.simple_tag()
def media_static_fallback(media_file, static_file, *args, **kwarg):
if os.path.exists(os.path.join(settings.MEDIA_ROOT, media_file)):
return os.path.join(settings.MEDIA_URL, media_file)
return os.path.join(settings.STATIC_URL, static_file)
@register.filter(name='timesince')
def humanize_timesince(date):
"""
http://djangosnippets.org/snippets/2275/
Humanized and localized version of built-in timesince template filter.
Based on <NAME>'s idea.
"""
delta = datetime.datetime.utcnow().replace(tzinfo=utc) - date
num_years = delta.days / 365
if (num_years > 0):
return ungettext(u"%d year ago", u"%d years ago", num_years) % num_years
num_weeks = delta.days / 7
if (num_weeks > 0):
return ungettext(u"%d week ago", u"%d weeks ago", num_weeks) % num_weeks
if (delta.days > 0):
return ungettext(u"%d day ago", u"%d days ago", delta.days) % delta.days
num_hours = delta.seconds / 3600
if (num_hours > 0):
return ungettext(u"%d hour ago", u"%d hours ago", num_hours) % num_hours
num_minutes = delta.seconds / 60
if (num_minutes > 0):
return ungettext(u"%d minute ago", u"%d minutes ago", num_minutes) % num_minutes
return ugettext(u"just a few seconds ago")
@register.inclusion_tag('mapentity/_detail_valuelist_fragment.html')
def valuelist(items, field=None, enumeration=False):
"""
Common template tag to show a list of values in detail pages.
:param field: Use this attribute on each item instead of their unicode representation
:param enumeration: Show enumerations, useful to match those shown by ``mapentity/leaflet.enumeration.js``
See https://github.com/makinacorpus/django-mapentity/issues/35
https://github.com/makinacorpus/Geotrek/issues/960
https://github.com/makinacorpus/Geotrek/issues/214
https://github.com/makinacorpus/Geotrek/issues/871
"""
if field:
def display(v):
return getattr(v, '%s_display' % field, getattr(v, field))
itemslist = [display(v) for v in items]
else:
itemslist = items
letters = alphabet_enumeration(len(items))
valuelist = []
for i, item in enumerate(itemslist):
valuelist.append({
'enumeration': letters[i] if enumeration else False,
'pk': getattr(items[i], 'pk', None),
'text': item
})
modelname = None
if len(items) > 0:
oneitem = items[0]
if hasattr(oneitem, '_meta'):
modelname = oneitem._meta.object_name.lower()
return {
'valuelist': valuelist,
'modelname': modelname
}
@register.inclusion_tag('mapentity/_detail_valuetable_fragment.html')
def valuetable(items, columns='', enumeration=False):
"""
Common template tag to show a table with columns in detail pages.
:param enumeration: Show enumerations, see ``valuelist`` template tag.
"""
columns = columns.split(',')
letters = alphabet_enumeration(len(items))
records = []
for i, item in enumerate(items):
def display(column):
return getattr(item, '%s_display' % column, getattr(item, column))
attrs = [display(column) for column in columns]
records.append({
'enumeration': letters[i] if enumeration else False,
'attrs': attrs,
'pk': getattr(item, 'pk', None)
})
if len(items) > 0:
oneitem = items[0]
columns_titles = []
for column in columns:
columns_titles.append({'name': column,
'text': field_verbose_name(oneitem, column)})
modelname = oneitem._meta.object_name.lower()
else:
modelname = None
columns_titles = None
return {
'nbcolumns': len(columns),
'columns': columns_titles,
'records': records,
'modelname': modelname
} | 0.434461 | 0.144934 |
from collections import deque, defaultdict
from operator import itemgetter
from snakeoil.demandload import demandload, demand_compile_regexp
from snakeoil.fileutils import readlines
from snakeoil.lists import iflatten_instance
from snakeoil.osutils import listdir_files, pjoin
from pkgcore.ebuild.atom import atom
demandload('pkgcore.log:logger')
demand_compile_regexp(
"valid_updates_re", "^(\d)Q-(\d{4})$"
)
def _scan_directory(path):
files = []
for x in listdir_files(path):
match = valid_updates_re.match(x)
if match is not None:
files.append(((match.group(2), match.group(1)), x))
files.sort(key=itemgetter(0))
return [x[1] for x in files]
def read_updates(path):
def f():
d = deque()
return [d,d]
# mods tracks the start point [0], and the tail, [1].
# via this, pkg moves into a specific pkg can pick up
# changes past that point, while ignoring changes prior
# to that point.
# Aftwards, we flatten it to get a per cp chain of commands.
# no need to do lookups basically, although we do need to
# watch for cycles.
mods = defaultdict(f)
moved = {}
for fp in _scan_directory(path):
fp = pjoin(path, fp)
_process_update(readlines(fp), fp, mods, moved)
# force a walk of the tree, flattening it
commands = {k: list(iflatten_instance(v[0], tuple)) for k,v in mods.iteritems()}
# filter out empty nodes.
commands = {k: v for k,v in commands.iteritems() if v}
return commands
def _process_update(sequence, filename, mods, moved):
for raw_line in sequence:
line = raw_line.split()
if line[0] == 'move':
if len(line) != 3:
raise ValueError("move line %r isn't of proper form" % (raw_line,))
src, trg = atom(line[1]), atom(line[2])
if src.fullver is not None:
raise ValueError("file %r, line %r; atom %s must be versionless"
% (filename, raw_line, src))
elif trg.fullver is not None:
raise ValueError("file %r, line %r; atom %s must be versionless"
% (filename, raw_line, trg))
if src.key in moved:
logger.warning("file %r, line %r: %s was already moved to %s,"
" this line is redundant." % (filename, raw_line, src, moved[src.key]))
continue
d = deque()
mods[src.key][1].extend([('move', src, trg), d])
# start essentially a new checkpoint in the trg
mods[trg.key][1].append(d)
mods[trg.key][1] = d
moved[src.key] = trg
elif line[0] == 'slotmove':
if len(line) != 4:
raise ValueError("slotmove line %r isn't of proper form" % (raw_line,))
src = atom(line[1])
if src.key in moved:
logger.warning("file %r, line %r: %s was already moved to %s,"
" this line is redundant.", filename, raw_line, src, moved[src.key])
continue
elif src.slot is not None:
logger.warning("file %r, line %r: slotted atom makes no sense for slotmoves, ignoring",
filename, raw_line)
src_slot = atom("%s:%s" % (src, line[2]))
trg_slot = atom("%s:%s" % (src.key, line[3]))
mods[src.key][1].append(('slotmove', src_slot, line[3])) | pkgcore/ebuild/pkg_updates.py |
from collections import deque, defaultdict
from operator import itemgetter
from snakeoil.demandload import demandload, demand_compile_regexp
from snakeoil.fileutils import readlines
from snakeoil.lists import iflatten_instance
from snakeoil.osutils import listdir_files, pjoin
from pkgcore.ebuild.atom import atom
demandload('pkgcore.log:logger')
demand_compile_regexp(
"valid_updates_re", "^(\d)Q-(\d{4})$"
)
def _scan_directory(path):
files = []
for x in listdir_files(path):
match = valid_updates_re.match(x)
if match is not None:
files.append(((match.group(2), match.group(1)), x))
files.sort(key=itemgetter(0))
return [x[1] for x in files]
def read_updates(path):
def f():
d = deque()
return [d,d]
# mods tracks the start point [0], and the tail, [1].
# via this, pkg moves into a specific pkg can pick up
# changes past that point, while ignoring changes prior
# to that point.
# Aftwards, we flatten it to get a per cp chain of commands.
# no need to do lookups basically, although we do need to
# watch for cycles.
mods = defaultdict(f)
moved = {}
for fp in _scan_directory(path):
fp = pjoin(path, fp)
_process_update(readlines(fp), fp, mods, moved)
# force a walk of the tree, flattening it
commands = {k: list(iflatten_instance(v[0], tuple)) for k,v in mods.iteritems()}
# filter out empty nodes.
commands = {k: v for k,v in commands.iteritems() if v}
return commands
def _process_update(sequence, filename, mods, moved):
for raw_line in sequence:
line = raw_line.split()
if line[0] == 'move':
if len(line) != 3:
raise ValueError("move line %r isn't of proper form" % (raw_line,))
src, trg = atom(line[1]), atom(line[2])
if src.fullver is not None:
raise ValueError("file %r, line %r; atom %s must be versionless"
% (filename, raw_line, src))
elif trg.fullver is not None:
raise ValueError("file %r, line %r; atom %s must be versionless"
% (filename, raw_line, trg))
if src.key in moved:
logger.warning("file %r, line %r: %s was already moved to %s,"
" this line is redundant." % (filename, raw_line, src, moved[src.key]))
continue
d = deque()
mods[src.key][1].extend([('move', src, trg), d])
# start essentially a new checkpoint in the trg
mods[trg.key][1].append(d)
mods[trg.key][1] = d
moved[src.key] = trg
elif line[0] == 'slotmove':
if len(line) != 4:
raise ValueError("slotmove line %r isn't of proper form" % (raw_line,))
src = atom(line[1])
if src.key in moved:
logger.warning("file %r, line %r: %s was already moved to %s,"
" this line is redundant.", filename, raw_line, src, moved[src.key])
continue
elif src.slot is not None:
logger.warning("file %r, line %r: slotted atom makes no sense for slotmoves, ignoring",
filename, raw_line)
src_slot = atom("%s:%s" % (src, line[2]))
trg_slot = atom("%s:%s" % (src.key, line[3]))
mods[src.key][1].append(('slotmove', src_slot, line[3])) | 0.542863 | 0.277387 |
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.md')) as f:
CHANGES = f.read()
requires = [
'bcrypt',
'psycopg2',
'plaster_pastedeploy',
'pandas>=1.2.2',
'openpyxl',
'xlrd>= 1.0.0',
'pyramid',
'pyramid_chameleon',
'pyramid_beaker==0.8',
'pylibmc==1.6.1',
'pyramid_debugtoolbar',
'waitress',
'alembic',
'pyramid_retry',
'pyramid_tm',
'SQLAlchemy == 1.3.*',
'transaction',
'zope.sqlalchemy',
'pyramid_openapi3==0.11',
'openapi-core<0.14',
'pytest >= 3.7.4',
'dataclasses-json==0.5.2',
"pyyaml",
"gunicorn==20.0.4",
]
tests_require = [
'WebTest >= 1.3.1', # py3 compat
'pytest-cov',
"sqlalchemy_utils",
"requests",
"parameterized >= 0.8.1",
"mypy",
]
setup(
name = 'datameta',
version = '1.0.5',
description = 'DataMeta - submission server for data and associated metadata',
long_description = README + '\n\n' + CHANGES,
author = '<NAME>',
author_email = '<EMAIL>',
url = '',
keywords = 'web pyramid pylons',
packages = find_packages(),
license = 'Apache 2.0',
include_package_data = True,
zip_safe = False,
install_requires = requires,
extras_require={
'testing': tests_require,
},
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: Apache Software License',
'Framework :: Pyramid',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
],
entry_points={
'paste.app_factory': [
'main = datameta:main',
],
'console_scripts': [
'initialize_datameta_db=datameta.scripts.initialize_db:main',
],
},
) | setup.py |
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.md')) as f:
CHANGES = f.read()
requires = [
'bcrypt',
'psycopg2',
'plaster_pastedeploy',
'pandas>=1.2.2',
'openpyxl',
'xlrd>= 1.0.0',
'pyramid',
'pyramid_chameleon',
'pyramid_beaker==0.8',
'pylibmc==1.6.1',
'pyramid_debugtoolbar',
'waitress',
'alembic',
'pyramid_retry',
'pyramid_tm',
'SQLAlchemy == 1.3.*',
'transaction',
'zope.sqlalchemy',
'pyramid_openapi3==0.11',
'openapi-core<0.14',
'pytest >= 3.7.4',
'dataclasses-json==0.5.2',
"pyyaml",
"gunicorn==20.0.4",
]
tests_require = [
'WebTest >= 1.3.1', # py3 compat
'pytest-cov',
"sqlalchemy_utils",
"requests",
"parameterized >= 0.8.1",
"mypy",
]
setup(
name = 'datameta',
version = '1.0.5',
description = 'DataMeta - submission server for data and associated metadata',
long_description = README + '\n\n' + CHANGES,
author = '<NAME>',
author_email = '<EMAIL>',
url = '',
keywords = 'web pyramid pylons',
packages = find_packages(),
license = 'Apache 2.0',
include_package_data = True,
zip_safe = False,
install_requires = requires,
extras_require={
'testing': tests_require,
},
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: Apache Software License',
'Framework :: Pyramid',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
],
entry_points={
'paste.app_factory': [
'main = datameta:main',
],
'console_scripts': [
'initialize_datameta_db=datameta.scripts.initialize_db:main',
],
},
) | 0.283087 | 0.158923 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("public_data", "0017_auto_20210924_2219"),
]
operations = [
migrations.RemoveField(
model_name="artificielle2018",
name="usage_label",
),
migrations.AddField(
model_name="artificialisee2015to2018",
name="cs_2015_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Couverture 2015"
),
),
migrations.AddField(
model_name="artificialisee2015to2018",
name="cs_2018_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Couverture 2018"
),
),
migrations.AddField(
model_name="artificialisee2015to2018",
name="us_2015_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Usage 2015"
),
),
migrations.AddField(
model_name="artificialisee2015to2018",
name="us_2018_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Usage 2018"
),
),
migrations.AddField(
model_name="enveloppeurbaine2018",
name="couverture_label",
field=models.CharField(
blank=True,
max_length=254,
null=True,
verbose_name="Libellé couverture du sol",
),
),
migrations.AddField(
model_name="renaturee2018to2015",
name="cs_2015_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Couverture 2015"
),
),
migrations.AddField(
model_name="renaturee2018to2015",
name="cs_2018_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Couverture 2018"
),
),
migrations.AddField(
model_name="renaturee2018to2015",
name="us_2015_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Usage 2015"
),
),
migrations.AddField(
model_name="renaturee2018to2015",
name="us_2018_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Usage 2018"
),
),
migrations.AddField(
model_name="voirie2018",
name="couverture_label",
field=models.CharField(
blank=True,
max_length=254,
null=True,
verbose_name="Libellé couverture du sol",
),
),
migrations.AddField(
model_name="zonesbaties2018",
name="couverture_label",
field=models.CharField(
blank=True,
max_length=254,
null=True,
verbose_name="Libellé couverture du sol",
),
),
migrations.AddField(
model_name="zonesbaties2018",
name="usage_label",
field=models.CharField(
blank=True,
max_length=254,
null=True,
verbose_name="Libellé usage du sol",
),
),
] | public_data/migrations/0018_auto_20210926_2332.py |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("public_data", "0017_auto_20210924_2219"),
]
operations = [
migrations.RemoveField(
model_name="artificielle2018",
name="usage_label",
),
migrations.AddField(
model_name="artificialisee2015to2018",
name="cs_2015_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Couverture 2015"
),
),
migrations.AddField(
model_name="artificialisee2015to2018",
name="cs_2018_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Couverture 2018"
),
),
migrations.AddField(
model_name="artificialisee2015to2018",
name="us_2015_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Usage 2015"
),
),
migrations.AddField(
model_name="artificialisee2015to2018",
name="us_2018_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Usage 2018"
),
),
migrations.AddField(
model_name="enveloppeurbaine2018",
name="couverture_label",
field=models.CharField(
blank=True,
max_length=254,
null=True,
verbose_name="Libellé couverture du sol",
),
),
migrations.AddField(
model_name="renaturee2018to2015",
name="cs_2015_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Couverture 2015"
),
),
migrations.AddField(
model_name="renaturee2018to2015",
name="cs_2018_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Couverture 2018"
),
),
migrations.AddField(
model_name="renaturee2018to2015",
name="us_2015_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Usage 2015"
),
),
migrations.AddField(
model_name="renaturee2018to2015",
name="us_2018_label",
field=models.CharField(
blank=True, max_length=254, null=True, verbose_name="Usage 2018"
),
),
migrations.AddField(
model_name="voirie2018",
name="couverture_label",
field=models.CharField(
blank=True,
max_length=254,
null=True,
verbose_name="Libellé couverture du sol",
),
),
migrations.AddField(
model_name="zonesbaties2018",
name="couverture_label",
field=models.CharField(
blank=True,
max_length=254,
null=True,
verbose_name="Libellé couverture du sol",
),
),
migrations.AddField(
model_name="zonesbaties2018",
name="usage_label",
field=models.CharField(
blank=True,
max_length=254,
null=True,
verbose_name="Libellé usage du sol",
),
),
] | 0.618665 | 0.286465 |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import FbDeprecatedDialogTeacher, MultiTaskTeacher
from .build import build
import copy
import os
tasks = {}
tasks[1] = 'personalized-dialog-task1-API-calls'
tasks[2] = 'personalized-dialog-task2-API-refine'
tasks[3] = 'personalized-dialog-task3-options'
tasks[4] = 'personalized-dialog-task4-info'
tasks[5] = 'personalized-dialog-task5-full-dialogs'
def _path(exsz, task, opt):
# Build the data if it doesn't exist.
build(opt)
suffix = ''
dt = opt['datatype'].split(':')[0]
if dt == 'train':
suffix = 'trn'
elif dt == 'test':
suffix = 'tst'
elif dt == 'valid':
suffix = 'dev'
return os.path.join(
opt['datapath'],
'personalized-dialog',
'personalized-dialog-dataset',
'{exsz}'.format(exsz=exsz),
'{tsk}-{type}.txt'.format(tsk=tasks[int(task)], type=suffix),
)
# The knowledge base of facts that can be used to answer questions.
class KBTeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
build(opt)
opt['datafile'] = os.path.join(
opt['datapath'],
'personalized-dialog',
'personalized-dialog-dataset',
'personalized-dialog-kb-all.txt',
)
super().__init__(opt, shared)
# python <script.py> -t personalized_dialog:FullTask:<task_id>
# Single full task.
class FullTaskTeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
opt['datafile'] = _path('full', opt['task'].split(':')[2], opt)
opt['cands_datafile'] = os.path.join(
opt['datapath'],
'personalized-dialog',
'personalized-dialog-dataset',
'personalized-dialog-candidates.txt',
)
super().__init__(opt, shared)
# python <script.py> -t personalized_dialog:SmallTask:<task_id>
# Single small task.
class SmallTaskTeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
opt['datafile'] = _path('small', opt['task'].split(':')[2], opt)
opt['cands_datafile'] = os.path.join(
opt['datapath'],
'personalized-dialog',
'personalized-dialog-dataset',
'personalized-dialog-candidates.txt',
)
super().__init__(opt, shared)
# python <script.py> -t personalized_dialog:AllFull
# By default train on all tasks at once.
class AllFullTeacher(MultiTaskTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['task'] = ','.join(
'personalized_dialog:FullTask:%d' % (i + 1) for i in range(5)
)
opt['cands_datafile'] = os.path.join(
opt['datapath'],
'personalized-dialog',
'personalized-dialog-dataset',
'personalized-dialog-candidates.txt',
)
super().__init__(opt, shared)
# python <script.py> -t personalized_dialog:AllSmall
# By default train on all tasks at once.
class AllSmallTeacher(MultiTaskTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['task'] = ','.join(
'personalized_dialog:SmallTask:%d' % (i + 1) for i in range(5)
)
opt['cands_datafile'] = os.path.join(
opt['datapath'],
'personalized-dialog',
'personalized-dialog-dataset',
'personalized-dialog-candidates.txt',
)
super().__init__(opt, shared)
# By default train on all tasks at once.
class DefaultTeacher(AllSmallTeacher):
pass | doc/integrations/pytorch/parlai/tasks/personalized_dialog/agents.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import FbDeprecatedDialogTeacher, MultiTaskTeacher
from .build import build
import copy
import os
tasks = {}
tasks[1] = 'personalized-dialog-task1-API-calls'
tasks[2] = 'personalized-dialog-task2-API-refine'
tasks[3] = 'personalized-dialog-task3-options'
tasks[4] = 'personalized-dialog-task4-info'
tasks[5] = 'personalized-dialog-task5-full-dialogs'
def _path(exsz, task, opt):
# Build the data if it doesn't exist.
build(opt)
suffix = ''
dt = opt['datatype'].split(':')[0]
if dt == 'train':
suffix = 'trn'
elif dt == 'test':
suffix = 'tst'
elif dt == 'valid':
suffix = 'dev'
return os.path.join(
opt['datapath'],
'personalized-dialog',
'personalized-dialog-dataset',
'{exsz}'.format(exsz=exsz),
'{tsk}-{type}.txt'.format(tsk=tasks[int(task)], type=suffix),
)
# The knowledge base of facts that can be used to answer questions.
class KBTeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
build(opt)
opt['datafile'] = os.path.join(
opt['datapath'],
'personalized-dialog',
'personalized-dialog-dataset',
'personalized-dialog-kb-all.txt',
)
super().__init__(opt, shared)
# python <script.py> -t personalized_dialog:FullTask:<task_id>
# Single full task.
class FullTaskTeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
opt['datafile'] = _path('full', opt['task'].split(':')[2], opt)
opt['cands_datafile'] = os.path.join(
opt['datapath'],
'personalized-dialog',
'personalized-dialog-dataset',
'personalized-dialog-candidates.txt',
)
super().__init__(opt, shared)
# python <script.py> -t personalized_dialog:SmallTask:<task_id>
# Single small task.
class SmallTaskTeacher(FbDeprecatedDialogTeacher):
def __init__(self, opt, shared=None):
opt['datafile'] = _path('small', opt['task'].split(':')[2], opt)
opt['cands_datafile'] = os.path.join(
opt['datapath'],
'personalized-dialog',
'personalized-dialog-dataset',
'personalized-dialog-candidates.txt',
)
super().__init__(opt, shared)
# python <script.py> -t personalized_dialog:AllFull
# By default train on all tasks at once.
class AllFullTeacher(MultiTaskTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['task'] = ','.join(
'personalized_dialog:FullTask:%d' % (i + 1) for i in range(5)
)
opt['cands_datafile'] = os.path.join(
opt['datapath'],
'personalized-dialog',
'personalized-dialog-dataset',
'personalized-dialog-candidates.txt',
)
super().__init__(opt, shared)
# python <script.py> -t personalized_dialog:AllSmall
# By default train on all tasks at once.
class AllSmallTeacher(MultiTaskTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['task'] = ','.join(
'personalized_dialog:SmallTask:%d' % (i + 1) for i in range(5)
)
opt['cands_datafile'] = os.path.join(
opt['datapath'],
'personalized-dialog',
'personalized-dialog-dataset',
'personalized-dialog-candidates.txt',
)
super().__init__(opt, shared)
# By default train on all tasks at once.
class DefaultTeacher(AllSmallTeacher):
pass | 0.560132 | 0.067701 |
import numpy as np
from matplotlib import pyplot as plt
from Dataset.DatasetPreprocessor import cross_validation_split
# Function produces scatter-plots for each feature against their respective label
def plot_mean(alphas, meanScores, optimalAlpha, highestScore):
figure = plt.figure()
plt.xscale('log')
plt.scatter(x=alphas, y=meanScores)
plt.plot(alphas, meanScores, label="Mean Score vs. log(Alpha)")
plt.scatter(x=optimalAlpha, y=highestScore, label="log(Alpha): {},\nMean Score: {}".format(optimalAlpha, highestScore))
plt.title('Cross Validation', fontsize=16)
plt.xlabel("log(Alphas)")
plt.ylabel("Mean $R^2$")
plt.legend(prop={'size': 7})
return figure
# Concatenate 1 to the beginning of each row - necessary to ensure W[0] computes correctly.
def configure_input_matrix(mX):
a = len(mX)
b = np.ones((a, 1), dtype=int)
return np.concatenate((b, mX), axis=1)
# Function takes parameters mY - Results matrix, mX - inputs matrix, l - lambda value.
# Parameters are used to calculate weights matrix containing minimized weights values.
def calculate_weights(mY, mX, a):
mI = np.identity(len(mX.T.dot(mX)))
return np.linalg.inv(mX.T.dot(mX) + (a * mI)).dot(mX.T.dot(mY))
def configure_training_folds(foldsY, foldsX, iterator):
trainY = []
trainX = []
cloneY = np.delete(foldsY, iterator, 0)
cloneX = np.delete(foldsX, iterator, 0)
for j in range(len(cloneY)):
for k in range(len(cloneY[0])):
trainY.append(cloneY[j][k])
trainX.append(cloneX[j][k])
trainY = np.array(trainY)
trainX = np.array(trainX)
return trainY, trainX
def optimise_alpha(mY, mX, folds=10, randomizer=None):
foldsY, foldsX = cross_validation_split(mY, mX, folds, randomizer)
alphas = np.logspace(-323.67, 0, 10000)
scores = []
for i in range(len(foldsY)):
currentScores = []
testY = foldsY[i]
testX = foldsX[i]
trainY, trainX = configure_training_folds(foldsY, foldsX, i)
for alpha in alphas:
TestModel = RidgeModel()
TestModel.train(trainY, trainX, alpha)
score = TestModel.score(testY, testX)
currentScores.append(score)
scores.append(currentScores)
scores = np.array(scores)
meanScores = scores.mean(axis=0)
optimalAlpha = alphas[np.argmax(meanScores)]
highestScore = meanScores[np.argmax(meanScores)]
plot = plot_mean(alphas, meanScores, optimalAlpha, highestScore)
return optimalAlpha, plot
# Function adjusts the alpha by performing cross-validation
def cross_validate(mY, mX, folds=10, randomizer=None):
return optimise_alpha(mY, mX, folds, randomizer)
class RidgeModel:
def __init__(self):
self.weights = []
self.intercept = 0
# Function fits the training matrices to the linear regression algorithm
def train(self, mY, mX, a):
mX = configure_input_matrix(mX)
mW = calculate_weights(mY, mX, a)
self.weights = mW[1:]
self.intercept = mW[0]
# Prediction is calculated by using the equation for a line y = mx + c
def predict(self, mX):
prediction = []
for i in mX:
result = 0 + self.intercept
for x, w in zip(i, self.weights):
result += x * w
prediction.append(result)
return prediction
# Score (R^2) is calculated by computing 1 - (Residual Sum of Squares / Total Sum of Squares)
def score(self, mY, mX):
y_pred = self.predict(mX)
y_test = mY
rss = ((y_test - y_pred) ** 2).sum()
tss = ((y_test - y_test.mean()) ** 2).sum()
return 1 - (rss / tss) | RidgeRegression/RidgeAlgorithm.py | import numpy as np
from matplotlib import pyplot as plt
from Dataset.DatasetPreprocessor import cross_validation_split
# Function produces scatter-plots for each feature against their respective label
def plot_mean(alphas, meanScores, optimalAlpha, highestScore):
figure = plt.figure()
plt.xscale('log')
plt.scatter(x=alphas, y=meanScores)
plt.plot(alphas, meanScores, label="Mean Score vs. log(Alpha)")
plt.scatter(x=optimalAlpha, y=highestScore, label="log(Alpha): {},\nMean Score: {}".format(optimalAlpha, highestScore))
plt.title('Cross Validation', fontsize=16)
plt.xlabel("log(Alphas)")
plt.ylabel("Mean $R^2$")
plt.legend(prop={'size': 7})
return figure
# Concatenate 1 to the beginning of each row - necessary to ensure W[0] computes correctly.
def configure_input_matrix(mX):
a = len(mX)
b = np.ones((a, 1), dtype=int)
return np.concatenate((b, mX), axis=1)
# Function takes parameters mY - Results matrix, mX - inputs matrix, l - lambda value.
# Parameters are used to calculate weights matrix containing minimized weights values.
def calculate_weights(mY, mX, a):
mI = np.identity(len(mX.T.dot(mX)))
return np.linalg.inv(mX.T.dot(mX) + (a * mI)).dot(mX.T.dot(mY))
def configure_training_folds(foldsY, foldsX, iterator):
trainY = []
trainX = []
cloneY = np.delete(foldsY, iterator, 0)
cloneX = np.delete(foldsX, iterator, 0)
for j in range(len(cloneY)):
for k in range(len(cloneY[0])):
trainY.append(cloneY[j][k])
trainX.append(cloneX[j][k])
trainY = np.array(trainY)
trainX = np.array(trainX)
return trainY, trainX
def optimise_alpha(mY, mX, folds=10, randomizer=None):
foldsY, foldsX = cross_validation_split(mY, mX, folds, randomizer)
alphas = np.logspace(-323.67, 0, 10000)
scores = []
for i in range(len(foldsY)):
currentScores = []
testY = foldsY[i]
testX = foldsX[i]
trainY, trainX = configure_training_folds(foldsY, foldsX, i)
for alpha in alphas:
TestModel = RidgeModel()
TestModel.train(trainY, trainX, alpha)
score = TestModel.score(testY, testX)
currentScores.append(score)
scores.append(currentScores)
scores = np.array(scores)
meanScores = scores.mean(axis=0)
optimalAlpha = alphas[np.argmax(meanScores)]
highestScore = meanScores[np.argmax(meanScores)]
plot = plot_mean(alphas, meanScores, optimalAlpha, highestScore)
return optimalAlpha, plot
# Function adjusts the alpha by performing cross-validation
def cross_validate(mY, mX, folds=10, randomizer=None):
return optimise_alpha(mY, mX, folds, randomizer)
class RidgeModel:
def __init__(self):
self.weights = []
self.intercept = 0
# Function fits the training matrices to the linear regression algorithm
def train(self, mY, mX, a):
mX = configure_input_matrix(mX)
mW = calculate_weights(mY, mX, a)
self.weights = mW[1:]
self.intercept = mW[0]
# Prediction is calculated by using the equation for a line y = mx + c
def predict(self, mX):
prediction = []
for i in mX:
result = 0 + self.intercept
for x, w in zip(i, self.weights):
result += x * w
prediction.append(result)
return prediction
# Score (R^2) is calculated by computing 1 - (Residual Sum of Squares / Total Sum of Squares)
def score(self, mY, mX):
y_pred = self.predict(mX)
y_test = mY
rss = ((y_test - y_pred) ** 2).sum()
tss = ((y_test - y_test.mean()) ** 2).sum()
return 1 - (rss / tss) | 0.861407 | 0.742632 |
import os, sys
import random
import numpy as np
import yaml
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..'))
from common.geometry import View, PointLight, Lighting
from common.utils.io_utils import read_from_stat_file
class Randomizer(object):
def __init__(self, view_file=None, truncparam_file='None', config_file='default'):
nowpath = os.path.dirname(os.path.abspath(__file__))
if config_file == 'default':
config_file = os.path.join(nowpath, 'default.yaml')
if not os.path.exists(config_file):
raise ValueError('config file {0} not found.'.format(config_file))
with open(config_file, 'r') as f:
data = yaml.safe_load(f)
self.param = data
self.view_random_list = None
if view_file is not None:
self.view_random_list = read_from_stat_file(view_file)
# self.truncparam_random_list = read_from_stat_file(truncparam_file)
def randomize_view_dist(self, min_dist, max_dist):
dist = np.random.rand() * (max_dist - min_dist) + min_dist
return dist
def randomize_view(self, min_dist=1.0, max_dist=3.5, num=0):
if self.view_random_list is None:
raise ValueError('view randomization list not defined.')
if num == 0:
line = random.choice(self.view_random_list)
data = [float(x) for x in line.strip('\n').split()]
view = View(data[0], data[1], data[2], self.randomize_view_dist(min_dist, max_dist))
return view
else:
lines = random.sample(self.view_random_list, num)
data = [[float(x) for x in line.strip('\n').split()] for line in lines]
views = [View(k[0], k[1], k[2], self.randomize_view_dist(min_dist, max_dist)) for k in data]
return views
def randomize_truncparam(self):
data = []
for idx in range(4):
while True:
rnd = np.random.normal(0, self.param['truncation_param_std'])
if abs(rnd) < self.param['truncation_param_bound']:
break
data.append(rnd)
return data
def randomize_point_light(self):
azimuth_deg = np.random.uniform(self.param['light_azimuth_degree_lowbound'], self.param['light_azimuth_degree_highbound'])
elevation_deg = np.random.uniform(self.param['light_elevation_degree_lowbound'], self.param['light_elevation_degree_highbound'])
dist = np.random.uniform(self.param['light_dist_lowbound'], self.param['light_dist_highbound'])
energy = np.random.normal(self.param['light_energy_mean'], self.param['light_energy_std'])
data = PointLight(azimuth_deg, elevation_deg, dist, energy)
return data
def randomize_lighting(self, use_point_lighting=True):
while True:
env_energy = np.random.uniform(self.param['light_env_energy_lowbound'], self.param['light_env_energy_highbound'])
point_light_num = random.randint(self.param['light_num_lowbound'], self.param['light_num_highbound'])
if env_energy >= self.param['light_env_energy_lowbound_without_point'] or (use_point_lighting and point_light_num != 0):
break
points_light = []
if use_point_lighting:
for idx in range(point_light_num):
point_light = self.randomize_point_light()
points_light.append(point_light)
data = Lighting(env_energy, points_light)
return data
def randomize_cropbg_param(self):
data = [np.random.rand(), np.random.rand()]
return data
def standardize_truncparam(self):
data = [0.0, 0.0, 0.0, 0.0]
return data
def standardize_cropbg_param(self):
data = [0.5, 0.5]
return data | synthesis/src/randomize/randomizer.py | import os, sys
import random
import numpy as np
import yaml
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..'))
from common.geometry import View, PointLight, Lighting
from common.utils.io_utils import read_from_stat_file
class Randomizer(object):
def __init__(self, view_file=None, truncparam_file='None', config_file='default'):
nowpath = os.path.dirname(os.path.abspath(__file__))
if config_file == 'default':
config_file = os.path.join(nowpath, 'default.yaml')
if not os.path.exists(config_file):
raise ValueError('config file {0} not found.'.format(config_file))
with open(config_file, 'r') as f:
data = yaml.safe_load(f)
self.param = data
self.view_random_list = None
if view_file is not None:
self.view_random_list = read_from_stat_file(view_file)
# self.truncparam_random_list = read_from_stat_file(truncparam_file)
def randomize_view_dist(self, min_dist, max_dist):
dist = np.random.rand() * (max_dist - min_dist) + min_dist
return dist
def randomize_view(self, min_dist=1.0, max_dist=3.5, num=0):
if self.view_random_list is None:
raise ValueError('view randomization list not defined.')
if num == 0:
line = random.choice(self.view_random_list)
data = [float(x) for x in line.strip('\n').split()]
view = View(data[0], data[1], data[2], self.randomize_view_dist(min_dist, max_dist))
return view
else:
lines = random.sample(self.view_random_list, num)
data = [[float(x) for x in line.strip('\n').split()] for line in lines]
views = [View(k[0], k[1], k[2], self.randomize_view_dist(min_dist, max_dist)) for k in data]
return views
def randomize_truncparam(self):
data = []
for idx in range(4):
while True:
rnd = np.random.normal(0, self.param['truncation_param_std'])
if abs(rnd) < self.param['truncation_param_bound']:
break
data.append(rnd)
return data
def randomize_point_light(self):
azimuth_deg = np.random.uniform(self.param['light_azimuth_degree_lowbound'], self.param['light_azimuth_degree_highbound'])
elevation_deg = np.random.uniform(self.param['light_elevation_degree_lowbound'], self.param['light_elevation_degree_highbound'])
dist = np.random.uniform(self.param['light_dist_lowbound'], self.param['light_dist_highbound'])
energy = np.random.normal(self.param['light_energy_mean'], self.param['light_energy_std'])
data = PointLight(azimuth_deg, elevation_deg, dist, energy)
return data
def randomize_lighting(self, use_point_lighting=True):
while True:
env_energy = np.random.uniform(self.param['light_env_energy_lowbound'], self.param['light_env_energy_highbound'])
point_light_num = random.randint(self.param['light_num_lowbound'], self.param['light_num_highbound'])
if env_energy >= self.param['light_env_energy_lowbound_without_point'] or (use_point_lighting and point_light_num != 0):
break
points_light = []
if use_point_lighting:
for idx in range(point_light_num):
point_light = self.randomize_point_light()
points_light.append(point_light)
data = Lighting(env_energy, points_light)
return data
def randomize_cropbg_param(self):
data = [np.random.rand(), np.random.rand()]
return data
def standardize_truncparam(self):
data = [0.0, 0.0, 0.0, 0.0]
return data
def standardize_cropbg_param(self):
data = [0.5, 0.5]
return data | 0.327776 | 0.253457 |
from urwid_stackedwidget import StackedWidget
import urwid
import pytest
# TODO: Test mouse_event()
def stringify(raw):
"""
:param raw: A list of byte strings (raw output from widget.text)
"""
return '\n'.join(map(str, raw))
@pytest.fixture
def stacked_widget1():
stacked_widget = StackedWidget()
stacked_widget.push_widget(
urwid.Filler(urwid.Text('The first widget')))
stacked_widget.push_widget(
urwid.Filler(urwid.Text('The second widget')))
stacked_widget.push_widget(
urwid.Filler(urwid.Text('The third widget')))
return stacked_widget
def test_properties(stacked_widget1):
stacked_widget0 = StackedWidget()
assert stacked_widget0.selectable
assert stacked_widget0.widget_count == 0
assert stacked_widget0.current_widget is None
assert stacked_widget1.selectable
assert stacked_widget1.widget_count == 3
assert stacked_widget1.current_widget is not None
def test_pop_and_push(stacked_widget1):
assert stacked_widget1.widget_count == 3
stacked_widget1.pop_widget()
assert stacked_widget1.widget_count == 2
stacked_widget1.push_widget(urwid.Text('This is a new widget'))
assert stacked_widget1.widget_count == 3
stacked_widget1.insert_widget(0, urwid.Text('This is a new widget'))
assert stacked_widget1.widget_count == 4
def test_show_next_widget(stacked_widget1):
size = (20, 2)
canvas = stacked_widget1.render(size)
assert 'first' in stringify(canvas.text)
stacked_widget1.show_next_widget()
canvas = stacked_widget1.render(size)
assert 'second' in stringify(canvas.text)
stacked_widget1.show_next_widget()
canvas = stacked_widget1.render(size)
assert 'third' in stringify(canvas.text)
# Shall wrap-around
stacked_widget1.show_next_widget()
canvas = stacked_widget1.render(size)
assert 'first' in stringify(canvas.text)
def test_show_previous_widget(stacked_widget1):
size = (20, 2)
canvas = stacked_widget1.render(size)
assert 'first' in stringify(canvas.text)
stacked_widget1.show_previous_widget()
canvas = stacked_widget1.render(size)
assert 'third' in stringify(canvas.text)
stacked_widget1.show_previous_widget()
canvas = stacked_widget1.render(size)
assert 'second' in stringify(canvas.text)
stacked_widget1.show_previous_widget()
canvas = stacked_widget1.render(size)
assert 'first' in stringify(canvas.text) | tests/test_basic.py | from urwid_stackedwidget import StackedWidget
import urwid
import pytest
# TODO: Test mouse_event()
def stringify(raw):
"""
:param raw: A list of byte strings (raw output from widget.text)
"""
return '\n'.join(map(str, raw))
@pytest.fixture
def stacked_widget1():
stacked_widget = StackedWidget()
stacked_widget.push_widget(
urwid.Filler(urwid.Text('The first widget')))
stacked_widget.push_widget(
urwid.Filler(urwid.Text('The second widget')))
stacked_widget.push_widget(
urwid.Filler(urwid.Text('The third widget')))
return stacked_widget
def test_properties(stacked_widget1):
stacked_widget0 = StackedWidget()
assert stacked_widget0.selectable
assert stacked_widget0.widget_count == 0
assert stacked_widget0.current_widget is None
assert stacked_widget1.selectable
assert stacked_widget1.widget_count == 3
assert stacked_widget1.current_widget is not None
def test_pop_and_push(stacked_widget1):
assert stacked_widget1.widget_count == 3
stacked_widget1.pop_widget()
assert stacked_widget1.widget_count == 2
stacked_widget1.push_widget(urwid.Text('This is a new widget'))
assert stacked_widget1.widget_count == 3
stacked_widget1.insert_widget(0, urwid.Text('This is a new widget'))
assert stacked_widget1.widget_count == 4
def test_show_next_widget(stacked_widget1):
size = (20, 2)
canvas = stacked_widget1.render(size)
assert 'first' in stringify(canvas.text)
stacked_widget1.show_next_widget()
canvas = stacked_widget1.render(size)
assert 'second' in stringify(canvas.text)
stacked_widget1.show_next_widget()
canvas = stacked_widget1.render(size)
assert 'third' in stringify(canvas.text)
# Shall wrap-around
stacked_widget1.show_next_widget()
canvas = stacked_widget1.render(size)
assert 'first' in stringify(canvas.text)
def test_show_previous_widget(stacked_widget1):
size = (20, 2)
canvas = stacked_widget1.render(size)
assert 'first' in stringify(canvas.text)
stacked_widget1.show_previous_widget()
canvas = stacked_widget1.render(size)
assert 'third' in stringify(canvas.text)
stacked_widget1.show_previous_widget()
canvas = stacked_widget1.render(size)
assert 'second' in stringify(canvas.text)
stacked_widget1.show_previous_widget()
canvas = stacked_widget1.render(size)
assert 'first' in stringify(canvas.text) | 0.431345 | 0.53279 |
from .helper_funcs import *
from pyfiberamp.mode_shape import ModeShape
class OpticalChannel:
def __init__(self, v, dv, input_power, direction, overlaps, mode_func, gain, absorption, loss, label,
reflection_target_label, reflection_coeff, channel_type):
self.v = v
self.dv = dv
self.input_power = input_power
self.direction = direction
self.overlaps = overlaps
self.gain = gain
self.absorption = absorption
self.loss = loss
self.peak_power_func = lambda x: x
self.number_of_modes = NUMBER_OF_MODES_IN_SINGLE_MODE_FIBER
self.label = label
self.reflection_target_label = reflection_target_label
self.reflection_coeff = reflection_coeff
self.channel_type = channel_type
self.mode_shape_func = mode_func
@property
def wavelength(self):
return freq_to_wl(self.v[0])
@classmethod
def create_signal_channel(cls, fiber, wl, wl_bandwidth, power, mode_shape_parameters, direction, label,
reflection_target_label, reflectance, channel_type=''):
mode_shape_parameters = cls.fill_mode_shape_parameters(mode_shape_parameters,
fiber.default_signal_mode_shape_parameters)
return cls._create_channel(fiber, wl, wl_bandwidth, power, mode_shape_parameters,
direction, label, reflection_target_label, reflectance,
channel_type)
@classmethod
def create_pump_channel(cls, fiber, wl, wl_bandwidth, power, mode_shape_parameters, direction, label,
reflection_target_label, reflectance, channel_type=''):
mode_shape_parameters = cls.fill_mode_shape_parameters(mode_shape_parameters,
fiber.default_pump_mode_shape_parameters)
return cls._create_channel(fiber, wl, wl_bandwidth, power, mode_shape_parameters,
direction, label, reflection_target_label, reflectance,
channel_type)
@staticmethod
def fill_mode_shape_parameters(input_parameters, default_parameters):
if input_parameters is None:
return default_parameters
return {**default_parameters, **input_parameters}
@classmethod
def _create_channel(cls, fiber, wl, wl_bandwidth, power, mode_shape_parameters,
direction, label, reflection_target_label,
reflection_coeff, channel_type):
n_ion_populations = fiber.num_ion_populations
overlaps, mode_func = cls.get_overlaps_and_mode_func(fiber, wl, mode_shape_parameters)
center_frequency = wl_to_freq(wl)
frequency_bandwidth = wl_bw_to_freq_bw(wl_bandwidth, wl)
gain = overlaps * fiber.get_channel_emission_cross_section(center_frequency, frequency_bandwidth) * fiber.doping_profile.ion_number_densities
absorption = overlaps * fiber.get_channel_absorption_cross_section(center_frequency, frequency_bandwidth) * fiber.doping_profile.ion_number_densities
center_frequency = np.full(n_ion_populations, center_frequency)
frequency_bandwidth = np.full(n_ion_populations, frequency_bandwidth)
loss = np.full(n_ion_populations, fiber.background_loss)
return OpticalChannel(center_frequency, frequency_bandwidth, power,
direction, overlaps, mode_func, gain, absorption, loss,
label, reflection_target_label, reflection_coeff,
channel_type)
@staticmethod
def get_overlaps_and_mode_func(fiber, wl, mode_shape_parameters):
mode_func = None
# Case 1: overlaps predefined
n_preset_overlaps = len(mode_shape_parameters['overlaps'])
if n_preset_overlaps > 0:
assert n_preset_overlaps == len(fiber.doping_profile.areas)
return np.array(mode_shape_parameters['overlaps']), mode_func
# No overlaps defined -> fiber must specify doping profile radii for overlap calculation
doping_radii = fiber.doping_profile.radii
assert len(doping_radii) > 0
# Case 2: Mode shape and overlaps must be calculated
mode_shape = ModeShape(fiber, wl, mode_shape_parameters)
overlaps = mode_shape.get_ring_overlaps(doping_radii)
mode_func = mode_shape.mode_func
return overlaps, mode_func | pyfiberamp/optical_channel.py | from .helper_funcs import *
from pyfiberamp.mode_shape import ModeShape
class OpticalChannel:
def __init__(self, v, dv, input_power, direction, overlaps, mode_func, gain, absorption, loss, label,
reflection_target_label, reflection_coeff, channel_type):
self.v = v
self.dv = dv
self.input_power = input_power
self.direction = direction
self.overlaps = overlaps
self.gain = gain
self.absorption = absorption
self.loss = loss
self.peak_power_func = lambda x: x
self.number_of_modes = NUMBER_OF_MODES_IN_SINGLE_MODE_FIBER
self.label = label
self.reflection_target_label = reflection_target_label
self.reflection_coeff = reflection_coeff
self.channel_type = channel_type
self.mode_shape_func = mode_func
@property
def wavelength(self):
return freq_to_wl(self.v[0])
@classmethod
def create_signal_channel(cls, fiber, wl, wl_bandwidth, power, mode_shape_parameters, direction, label,
reflection_target_label, reflectance, channel_type=''):
mode_shape_parameters = cls.fill_mode_shape_parameters(mode_shape_parameters,
fiber.default_signal_mode_shape_parameters)
return cls._create_channel(fiber, wl, wl_bandwidth, power, mode_shape_parameters,
direction, label, reflection_target_label, reflectance,
channel_type)
@classmethod
def create_pump_channel(cls, fiber, wl, wl_bandwidth, power, mode_shape_parameters, direction, label,
reflection_target_label, reflectance, channel_type=''):
mode_shape_parameters = cls.fill_mode_shape_parameters(mode_shape_parameters,
fiber.default_pump_mode_shape_parameters)
return cls._create_channel(fiber, wl, wl_bandwidth, power, mode_shape_parameters,
direction, label, reflection_target_label, reflectance,
channel_type)
@staticmethod
def fill_mode_shape_parameters(input_parameters, default_parameters):
if input_parameters is None:
return default_parameters
return {**default_parameters, **input_parameters}
@classmethod
def _create_channel(cls, fiber, wl, wl_bandwidth, power, mode_shape_parameters,
direction, label, reflection_target_label,
reflection_coeff, channel_type):
n_ion_populations = fiber.num_ion_populations
overlaps, mode_func = cls.get_overlaps_and_mode_func(fiber, wl, mode_shape_parameters)
center_frequency = wl_to_freq(wl)
frequency_bandwidth = wl_bw_to_freq_bw(wl_bandwidth, wl)
gain = overlaps * fiber.get_channel_emission_cross_section(center_frequency, frequency_bandwidth) * fiber.doping_profile.ion_number_densities
absorption = overlaps * fiber.get_channel_absorption_cross_section(center_frequency, frequency_bandwidth) * fiber.doping_profile.ion_number_densities
center_frequency = np.full(n_ion_populations, center_frequency)
frequency_bandwidth = np.full(n_ion_populations, frequency_bandwidth)
loss = np.full(n_ion_populations, fiber.background_loss)
return OpticalChannel(center_frequency, frequency_bandwidth, power,
direction, overlaps, mode_func, gain, absorption, loss,
label, reflection_target_label, reflection_coeff,
channel_type)
@staticmethod
def get_overlaps_and_mode_func(fiber, wl, mode_shape_parameters):
mode_func = None
# Case 1: overlaps predefined
n_preset_overlaps = len(mode_shape_parameters['overlaps'])
if n_preset_overlaps > 0:
assert n_preset_overlaps == len(fiber.doping_profile.areas)
return np.array(mode_shape_parameters['overlaps']), mode_func
# No overlaps defined -> fiber must specify doping profile radii for overlap calculation
doping_radii = fiber.doping_profile.radii
assert len(doping_radii) > 0
# Case 2: Mode shape and overlaps must be calculated
mode_shape = ModeShape(fiber, wl, mode_shape_parameters)
overlaps = mode_shape.get_ring_overlaps(doping_radii)
mode_func = mode_shape.mode_func
return overlaps, mode_func | 0.907125 | 0.292059 |
from contextlib import suppress
from git import Git, GitCommandError
from github.Repository import Repository
from msm import SkillRepo, SkillEntry
from os.path import join
from subprocess import call
from msk.exceptions import AlreadyUpdated, NotUploaded
from msk.global_context import GlobalContext
from msk.lazy import Lazy
from msk.util import skill_repo_name
class RepoData(GlobalContext):
msminfo = Lazy(lambda s: s.msm.repo) # type: SkillRepo
git = Lazy(lambda s: Git(s.msminfo.path)) # type: Git
hub = Lazy(lambda s: s.github.get_repo(skill_repo_name(s.msminfo.url))) # type: Repository
fork = Lazy(lambda s: s.github.get_user().create_fork(s.hub)) # type: Repository
def push_to_fork(self, branch: str):
remotes = self.git.remote().split('\n')
command = 'set-url' if 'fork' in remotes else 'add'
self.git.remote(command, 'fork', self.fork.html_url)
# Use call to ensure the environment variable GIT_ASKPASS is used
call(['git', 'push', '-u', 'fork', branch, '--force'], cwd=self.msminfo.path)
def checkout_branch(self, branch):
with suppress(GitCommandError):
self.git.branch('-D', branch)
try:
self.git.checkout(b=branch)
except GitCommandError:
self.git.checkout(branch)
class SkillData(GlobalContext):
def __init__(self, skill: SkillEntry):
self.entry = skill
name = property(lambda self: self.entry.name)
repo = Lazy(lambda s: RepoData()) # type: RepoData
repo_git = Lazy(lambda s: Git(join(s.repo.msminfo.path, s.submodule_name))) # type: Git
repo_branch = Lazy(lambda s: s.repo_git.symbolic_ref('refs/remotes/origin/HEAD'))
git = Lazy(lambda s: Git(s.entry.path)) # type: Git
hub = Lazy(lambda s: s.github.get_repo(skill_repo_name(s.entry.url))) # type: Repository
@Lazy
def submodule_name(self):
name_to_path = {name: path for name, path, url, sha in self.repo.msminfo.get_skill_data()}
if self.name not in name_to_path:
raise NotUploaded('The skill {} has not yet been uploaded to the skill store'.format(
self.name
))
return name_to_path[self.name]
def upgrade(self) -> str:
skill_module = self.submodule_name
submod = Git(join(self.repo.msminfo.path, skill_module))
submod.remote('set-head', 'origin', '-a')
self.repo.msminfo.update()
self.repo_git.fetch()
self.repo_git.reset(self.repo_branch, hard=True)
upgrade_branch = 'upgrade-' + self.name
self.repo.checkout_branch(upgrade_branch)
if not self.repo.git.diff(skill_module) and self.repo.git.ls_files(skill_module):
raise AlreadyUpdated(
'The latest version of {} is already uploaded to the skill repo'.format(
self.name
)
)
self.repo.git.add(skill_module)
self.repo.git.commit(message='Upgrade ' + self.name)
return upgrade_branch
def add_to_repo(self) -> str:
self.repo.msminfo.update()
existing_mods = [i.split('\t')[1]
for i in self.git.ls_tree('HEAD').split('\n')]
if self.name not in existing_mods:
self.repo.git.submodule('add', self.entry.url, self.name)
# Upgrade skill in case it is outdated
self.repo_git.fetch()
self.repo_git.reset(self.repo_branch, hard=True)
branch_name = 'add-' + self.name
self.repo.checkout_branch(branch_name)
self.repo.git.add(self.name)
self.repo.git.commit(message='Add ' + self.name)
return branch_name
def init_existing(self):
self.repo.git.submodule('update', '--init', self.submodule_name) | msk/repo_action.py | from contextlib import suppress
from git import Git, GitCommandError
from github.Repository import Repository
from msm import SkillRepo, SkillEntry
from os.path import join
from subprocess import call
from msk.exceptions import AlreadyUpdated, NotUploaded
from msk.global_context import GlobalContext
from msk.lazy import Lazy
from msk.util import skill_repo_name
class RepoData(GlobalContext):
msminfo = Lazy(lambda s: s.msm.repo) # type: SkillRepo
git = Lazy(lambda s: Git(s.msminfo.path)) # type: Git
hub = Lazy(lambda s: s.github.get_repo(skill_repo_name(s.msminfo.url))) # type: Repository
fork = Lazy(lambda s: s.github.get_user().create_fork(s.hub)) # type: Repository
def push_to_fork(self, branch: str):
remotes = self.git.remote().split('\n')
command = 'set-url' if 'fork' in remotes else 'add'
self.git.remote(command, 'fork', self.fork.html_url)
# Use call to ensure the environment variable GIT_ASKPASS is used
call(['git', 'push', '-u', 'fork', branch, '--force'], cwd=self.msminfo.path)
def checkout_branch(self, branch):
with suppress(GitCommandError):
self.git.branch('-D', branch)
try:
self.git.checkout(b=branch)
except GitCommandError:
self.git.checkout(branch)
class SkillData(GlobalContext):
def __init__(self, skill: SkillEntry):
self.entry = skill
name = property(lambda self: self.entry.name)
repo = Lazy(lambda s: RepoData()) # type: RepoData
repo_git = Lazy(lambda s: Git(join(s.repo.msminfo.path, s.submodule_name))) # type: Git
repo_branch = Lazy(lambda s: s.repo_git.symbolic_ref('refs/remotes/origin/HEAD'))
git = Lazy(lambda s: Git(s.entry.path)) # type: Git
hub = Lazy(lambda s: s.github.get_repo(skill_repo_name(s.entry.url))) # type: Repository
@Lazy
def submodule_name(self):
name_to_path = {name: path for name, path, url, sha in self.repo.msminfo.get_skill_data()}
if self.name not in name_to_path:
raise NotUploaded('The skill {} has not yet been uploaded to the skill store'.format(
self.name
))
return name_to_path[self.name]
def upgrade(self) -> str:
skill_module = self.submodule_name
submod = Git(join(self.repo.msminfo.path, skill_module))
submod.remote('set-head', 'origin', '-a')
self.repo.msminfo.update()
self.repo_git.fetch()
self.repo_git.reset(self.repo_branch, hard=True)
upgrade_branch = 'upgrade-' + self.name
self.repo.checkout_branch(upgrade_branch)
if not self.repo.git.diff(skill_module) and self.repo.git.ls_files(skill_module):
raise AlreadyUpdated(
'The latest version of {} is already uploaded to the skill repo'.format(
self.name
)
)
self.repo.git.add(skill_module)
self.repo.git.commit(message='Upgrade ' + self.name)
return upgrade_branch
def add_to_repo(self) -> str:
self.repo.msminfo.update()
existing_mods = [i.split('\t')[1]
for i in self.git.ls_tree('HEAD').split('\n')]
if self.name not in existing_mods:
self.repo.git.submodule('add', self.entry.url, self.name)
# Upgrade skill in case it is outdated
self.repo_git.fetch()
self.repo_git.reset(self.repo_branch, hard=True)
branch_name = 'add-' + self.name
self.repo.checkout_branch(branch_name)
self.repo.git.add(self.name)
self.repo.git.commit(message='Add ' + self.name)
return branch_name
def init_existing(self):
self.repo.git.submodule('update', '--init', self.submodule_name) | 0.539954 | 0.119049 |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class SwapSpaceMonitorMemoryUsage2(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _class: str=None, available_physical_memory: int=None, available_swap_space: int=None, total_physical_memory: int=None, total_swap_space: int=None): # noqa: E501
"""SwapSpaceMonitorMemoryUsage2 - a model defined in OpenAPI
:param _class: The _class of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:type _class: str
:param available_physical_memory: The available_physical_memory of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:type available_physical_memory: int
:param available_swap_space: The available_swap_space of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:type available_swap_space: int
:param total_physical_memory: The total_physical_memory of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:type total_physical_memory: int
:param total_swap_space: The total_swap_space of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:type total_swap_space: int
"""
self.openapi_types = {
'_class': str,
'available_physical_memory': int,
'available_swap_space': int,
'total_physical_memory': int,
'total_swap_space': int
}
self.attribute_map = {
'_class': '_class',
'available_physical_memory': 'availablePhysicalMemory',
'available_swap_space': 'availableSwapSpace',
'total_physical_memory': 'totalPhysicalMemory',
'total_swap_space': 'totalSwapSpace'
}
self.__class = _class
self._available_physical_memory = available_physical_memory
self._available_swap_space = available_swap_space
self._total_physical_memory = total_physical_memory
self._total_swap_space = total_swap_space
@classmethod
def from_dict(cls, dikt) -> 'SwapSpaceMonitorMemoryUsage2':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The SwapSpaceMonitorMemoryUsage2 of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:rtype: SwapSpaceMonitorMemoryUsage2
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self) -> str:
"""Gets the _class of this SwapSpaceMonitorMemoryUsage2.
:return: The _class of this SwapSpaceMonitorMemoryUsage2.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""Sets the _class of this SwapSpaceMonitorMemoryUsage2.
:param _class: The _class of this SwapSpaceMonitorMemoryUsage2.
:type _class: str
"""
self.__class = _class
@property
def available_physical_memory(self) -> int:
"""Gets the available_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:return: The available_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:rtype: int
"""
return self._available_physical_memory
@available_physical_memory.setter
def available_physical_memory(self, available_physical_memory: int):
"""Sets the available_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:param available_physical_memory: The available_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:type available_physical_memory: int
"""
self._available_physical_memory = available_physical_memory
@property
def available_swap_space(self) -> int:
"""Gets the available_swap_space of this SwapSpaceMonitorMemoryUsage2.
:return: The available_swap_space of this SwapSpaceMonitorMemoryUsage2.
:rtype: int
"""
return self._available_swap_space
@available_swap_space.setter
def available_swap_space(self, available_swap_space: int):
"""Sets the available_swap_space of this SwapSpaceMonitorMemoryUsage2.
:param available_swap_space: The available_swap_space of this SwapSpaceMonitorMemoryUsage2.
:type available_swap_space: int
"""
self._available_swap_space = available_swap_space
@property
def total_physical_memory(self) -> int:
"""Gets the total_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:return: The total_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:rtype: int
"""
return self._total_physical_memory
@total_physical_memory.setter
def total_physical_memory(self, total_physical_memory: int):
"""Sets the total_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:param total_physical_memory: The total_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:type total_physical_memory: int
"""
self._total_physical_memory = total_physical_memory
@property
def total_swap_space(self) -> int:
"""Gets the total_swap_space of this SwapSpaceMonitorMemoryUsage2.
:return: The total_swap_space of this SwapSpaceMonitorMemoryUsage2.
:rtype: int
"""
return self._total_swap_space
@total_swap_space.setter
def total_swap_space(self, total_swap_space: int):
"""Sets the total_swap_space of this SwapSpaceMonitorMemoryUsage2.
:param total_swap_space: The total_swap_space of this SwapSpaceMonitorMemoryUsage2.
:type total_swap_space: int
"""
self._total_swap_space = total_swap_space | clients/python-flask/generated/openapi_server/models/swap_space_monitor_memory_usage2.py |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class SwapSpaceMonitorMemoryUsage2(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _class: str=None, available_physical_memory: int=None, available_swap_space: int=None, total_physical_memory: int=None, total_swap_space: int=None): # noqa: E501
"""SwapSpaceMonitorMemoryUsage2 - a model defined in OpenAPI
:param _class: The _class of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:type _class: str
:param available_physical_memory: The available_physical_memory of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:type available_physical_memory: int
:param available_swap_space: The available_swap_space of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:type available_swap_space: int
:param total_physical_memory: The total_physical_memory of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:type total_physical_memory: int
:param total_swap_space: The total_swap_space of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:type total_swap_space: int
"""
self.openapi_types = {
'_class': str,
'available_physical_memory': int,
'available_swap_space': int,
'total_physical_memory': int,
'total_swap_space': int
}
self.attribute_map = {
'_class': '_class',
'available_physical_memory': 'availablePhysicalMemory',
'available_swap_space': 'availableSwapSpace',
'total_physical_memory': 'totalPhysicalMemory',
'total_swap_space': 'totalSwapSpace'
}
self.__class = _class
self._available_physical_memory = available_physical_memory
self._available_swap_space = available_swap_space
self._total_physical_memory = total_physical_memory
self._total_swap_space = total_swap_space
@classmethod
def from_dict(cls, dikt) -> 'SwapSpaceMonitorMemoryUsage2':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The SwapSpaceMonitorMemoryUsage2 of this SwapSpaceMonitorMemoryUsage2. # noqa: E501
:rtype: SwapSpaceMonitorMemoryUsage2
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self) -> str:
"""Gets the _class of this SwapSpaceMonitorMemoryUsage2.
:return: The _class of this SwapSpaceMonitorMemoryUsage2.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""Sets the _class of this SwapSpaceMonitorMemoryUsage2.
:param _class: The _class of this SwapSpaceMonitorMemoryUsage2.
:type _class: str
"""
self.__class = _class
@property
def available_physical_memory(self) -> int:
"""Gets the available_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:return: The available_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:rtype: int
"""
return self._available_physical_memory
@available_physical_memory.setter
def available_physical_memory(self, available_physical_memory: int):
"""Sets the available_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:param available_physical_memory: The available_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:type available_physical_memory: int
"""
self._available_physical_memory = available_physical_memory
@property
def available_swap_space(self) -> int:
"""Gets the available_swap_space of this SwapSpaceMonitorMemoryUsage2.
:return: The available_swap_space of this SwapSpaceMonitorMemoryUsage2.
:rtype: int
"""
return self._available_swap_space
@available_swap_space.setter
def available_swap_space(self, available_swap_space: int):
"""Sets the available_swap_space of this SwapSpaceMonitorMemoryUsage2.
:param available_swap_space: The available_swap_space of this SwapSpaceMonitorMemoryUsage2.
:type available_swap_space: int
"""
self._available_swap_space = available_swap_space
@property
def total_physical_memory(self) -> int:
"""Gets the total_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:return: The total_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:rtype: int
"""
return self._total_physical_memory
@total_physical_memory.setter
def total_physical_memory(self, total_physical_memory: int):
"""Sets the total_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:param total_physical_memory: The total_physical_memory of this SwapSpaceMonitorMemoryUsage2.
:type total_physical_memory: int
"""
self._total_physical_memory = total_physical_memory
@property
def total_swap_space(self) -> int:
"""Gets the total_swap_space of this SwapSpaceMonitorMemoryUsage2.
:return: The total_swap_space of this SwapSpaceMonitorMemoryUsage2.
:rtype: int
"""
return self._total_swap_space
@total_swap_space.setter
def total_swap_space(self, total_swap_space: int):
"""Sets the total_swap_space of this SwapSpaceMonitorMemoryUsage2.
:param total_swap_space: The total_swap_space of this SwapSpaceMonitorMemoryUsage2.
:type total_swap_space: int
"""
self._total_swap_space = total_swap_space | 0.807043 | 0.183082 |
import numpy as np
from joblib import Parallel, delayed
import torch
from src.clustering_models.clusternet_modules.clusternet_trainer import (
ClusterNetTrainer,
)
def _parallel_compute_distance(X, cluster):
n_samples = X.shape[0]
dis_mat = np.zeros((n_samples, 1))
for i in range(n_samples):
dis_mat[i] += np.sqrt(np.sum((X[i] - cluster) ** 2, axis=0))
return dis_mat
class ClusterNet(object):
def __init__(self, args, feature_extractor):
self.args = args
self.latent_dim = args.latent_dim
self.n_clusters = args.n_clusters
self.clusters = np.zeros((self.n_clusters, self.latent_dim))
self.count = 100 * np.ones((self.n_clusters)) # serve as learning rate
self.n_jobs = args.n_jobs
self.feature_extractor = feature_extractor
self.device = "cuda" if torch.cuda.is_available() and args.gpus is not None else "cpu"
def _compute_dist(self, X):
dis_mat = Parallel(n_jobs=self.n_jobs)(
delayed(_parallel_compute_distance)(X, self.clusters[i])
for i in range(self.n_clusters)
)
dis_mat = np.hstack(dis_mat)
return dis_mat
def init_cluster(self, train_loader, val_loader, logger, indices=None, centers=None, init_num=0):
""" Generate initial clusters using the clusternet
init num is the number of time the clusternet was initialized (from the AE_ClusterPipeline module)
"""
self.feature_extractor.freeze()
self.model = ClusterNetTrainer(
args=self.args,
init_k=self.n_clusters,
latent_dim=self.latent_dim,
feature_extractor=self.feature_extractor,
centers=centers,
init_num=init_num
)
self.fit_cluster(train_loader, val_loader, logger, centers)
self.model.cluster_model.freeze()
self.feature_extractor.unfreeze()
self.feature_extractor.to(device=self.device)
def fit_cluster(self, train_loader, val_loader, logger, centers=None):
self.feature_extractor.freeze()
self.model.cluster_model.unfreeze()
self.model.fit(train_loader, val_loader, logger, self.args.train_cluster_net, centers=centers)
self.model.cluster_model.freeze()
self.clusters = self.model.get_clusters_centers() # copy clusters
self._set_K(self.model.get_current_K())
self.feature_extractor.unfreeze()
self.feature_extractor.to(device=self.device)
def freeze(self):
self.model.cluster_model.freeze()
self.feature_extractor.unfreeze()
def unfreeze(self):
self.model.cluster_model.unfreeze()
self.model.cluster_model.to(device=self.device)
def update_cluster_center(self, X, cluster_idx, assignments=None):
""" Update clusters centers on a batch of data
Args:
X (torch.tensor): All the data points that were assigned to this cluster
cluster_idx (int): The cluster index
assignments: The probability of each cluster to be assigned to this cluster (would be a vector of ones for hard assignment)
"""
n_samples = X.shape[0]
for i in range(n_samples):
if assignments[i, cluster_idx].item() > 0:
self.count[cluster_idx] += assignments[i, cluster_idx].item()
eta = 1.0 / self.count[cluster_idx]
updated_cluster = (1 - eta) * self.clusters[cluster_idx] + eta * X[i] * assignments[i, cluster_idx].item()
# updated_cluster = (1 - eta) * self.clusters[cluster_idx] + eta * X[i]
self.clusters[cluster_idx] = updated_cluster
def update_cluster_covs(self, X, cluster_idx, assignments):
return None
def update_cluster_pis(self, X, cluster_idx, assignments):
return None
def update_assign(self, X, how_to_assign="min_dist"):
""" Assign samples in `X` to clusters """
if how_to_assign == "min_dist":
return self._update_assign_min_dist(X.detach().cpu().numpy())
elif how_to_assign == "forward_pass":
return self.get_model_resp(X)
def _update_assign_min_dist(self, X):
dis_mat = self._compute_dist(X)
hard_assign = np.argmin(dis_mat, axis=1)
return self._to_one_hot(torch.tensor(hard_assign))
def _to_one_hot(self, hard_assignments):
"""
Takes LongTensor with index values of shape (*) and
returns a tensor of shape (*, num_classes) that have zeros everywhere
except where the index of last dimension matches the corresponding value
of the input tensor, in which case it will be 1.
"""
return torch.nn.functional.one_hot(hard_assignments, num_classes=self.n_clusters)
def _set_K(self, new_K):
self.n_clusters = new_K
self.count = 100 * np.ones((self.n_clusters)) # serve as learning rate, pseudo-counts
def get_model_params(self):
mu, covs, pi, K = self.model.get_clusters_centers(), self.model.get_clusters_covs(), self.model.get_clusters_pis(), self.n_clusters
return mu, covs, pi, K
def get_model_resp(self, codes):
self.model.cluster_model.to(device=self.device)
if self.args.regularization == "cluster_loss":
# cluster assignment should have grad
return self.model.cluster_model(codes)
else:
# cluster assignment shouldn't have grad
with torch.no_grad():
return self.model.cluster_model(codes) | src/clustering_models/clusternet.py |
import numpy as np
from joblib import Parallel, delayed
import torch
from src.clustering_models.clusternet_modules.clusternet_trainer import (
ClusterNetTrainer,
)
def _parallel_compute_distance(X, cluster):
n_samples = X.shape[0]
dis_mat = np.zeros((n_samples, 1))
for i in range(n_samples):
dis_mat[i] += np.sqrt(np.sum((X[i] - cluster) ** 2, axis=0))
return dis_mat
class ClusterNet(object):
def __init__(self, args, feature_extractor):
self.args = args
self.latent_dim = args.latent_dim
self.n_clusters = args.n_clusters
self.clusters = np.zeros((self.n_clusters, self.latent_dim))
self.count = 100 * np.ones((self.n_clusters)) # serve as learning rate
self.n_jobs = args.n_jobs
self.feature_extractor = feature_extractor
self.device = "cuda" if torch.cuda.is_available() and args.gpus is not None else "cpu"
def _compute_dist(self, X):
dis_mat = Parallel(n_jobs=self.n_jobs)(
delayed(_parallel_compute_distance)(X, self.clusters[i])
for i in range(self.n_clusters)
)
dis_mat = np.hstack(dis_mat)
return dis_mat
def init_cluster(self, train_loader, val_loader, logger, indices=None, centers=None, init_num=0):
""" Generate initial clusters using the clusternet
init num is the number of time the clusternet was initialized (from the AE_ClusterPipeline module)
"""
self.feature_extractor.freeze()
self.model = ClusterNetTrainer(
args=self.args,
init_k=self.n_clusters,
latent_dim=self.latent_dim,
feature_extractor=self.feature_extractor,
centers=centers,
init_num=init_num
)
self.fit_cluster(train_loader, val_loader, logger, centers)
self.model.cluster_model.freeze()
self.feature_extractor.unfreeze()
self.feature_extractor.to(device=self.device)
def fit_cluster(self, train_loader, val_loader, logger, centers=None):
self.feature_extractor.freeze()
self.model.cluster_model.unfreeze()
self.model.fit(train_loader, val_loader, logger, self.args.train_cluster_net, centers=centers)
self.model.cluster_model.freeze()
self.clusters = self.model.get_clusters_centers() # copy clusters
self._set_K(self.model.get_current_K())
self.feature_extractor.unfreeze()
self.feature_extractor.to(device=self.device)
def freeze(self):
self.model.cluster_model.freeze()
self.feature_extractor.unfreeze()
def unfreeze(self):
self.model.cluster_model.unfreeze()
self.model.cluster_model.to(device=self.device)
def update_cluster_center(self, X, cluster_idx, assignments=None):
""" Update clusters centers on a batch of data
Args:
X (torch.tensor): All the data points that were assigned to this cluster
cluster_idx (int): The cluster index
assignments: The probability of each cluster to be assigned to this cluster (would be a vector of ones for hard assignment)
"""
n_samples = X.shape[0]
for i in range(n_samples):
if assignments[i, cluster_idx].item() > 0:
self.count[cluster_idx] += assignments[i, cluster_idx].item()
eta = 1.0 / self.count[cluster_idx]
updated_cluster = (1 - eta) * self.clusters[cluster_idx] + eta * X[i] * assignments[i, cluster_idx].item()
# updated_cluster = (1 - eta) * self.clusters[cluster_idx] + eta * X[i]
self.clusters[cluster_idx] = updated_cluster
def update_cluster_covs(self, X, cluster_idx, assignments):
return None
def update_cluster_pis(self, X, cluster_idx, assignments):
return None
def update_assign(self, X, how_to_assign="min_dist"):
""" Assign samples in `X` to clusters """
if how_to_assign == "min_dist":
return self._update_assign_min_dist(X.detach().cpu().numpy())
elif how_to_assign == "forward_pass":
return self.get_model_resp(X)
def _update_assign_min_dist(self, X):
dis_mat = self._compute_dist(X)
hard_assign = np.argmin(dis_mat, axis=1)
return self._to_one_hot(torch.tensor(hard_assign))
def _to_one_hot(self, hard_assignments):
"""
Takes LongTensor with index values of shape (*) and
returns a tensor of shape (*, num_classes) that have zeros everywhere
except where the index of last dimension matches the corresponding value
of the input tensor, in which case it will be 1.
"""
return torch.nn.functional.one_hot(hard_assignments, num_classes=self.n_clusters)
def _set_K(self, new_K):
self.n_clusters = new_K
self.count = 100 * np.ones((self.n_clusters)) # serve as learning rate, pseudo-counts
def get_model_params(self):
mu, covs, pi, K = self.model.get_clusters_centers(), self.model.get_clusters_covs(), self.model.get_clusters_pis(), self.n_clusters
return mu, covs, pi, K
def get_model_resp(self, codes):
self.model.cluster_model.to(device=self.device)
if self.args.regularization == "cluster_loss":
# cluster assignment should have grad
return self.model.cluster_model(codes)
else:
# cluster assignment shouldn't have grad
with torch.no_grad():
return self.model.cluster_model(codes) | 0.846514 | 0.321939 |
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class PyObjectSynthProviderTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def test_print_array(self):
"""Test that expr -Z works"""
self.build()
self.provider_data_formatter_commands()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break at.
self.line = line_number('main.cpp', 'break here')
def provider_data_formatter_commands(self):
"""Test that the PythonObjectSyntheticChildProvider helper class works"""
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type synth clear', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
self.runCmd('command script import provider.py')
self.runCmd(
'type synthetic add Foo --python-class provider.SyntheticChildrenProvider')
self.expect('frame variable f.Name', substrs=['"Enrico"'])
self.expect(
'frame variable f',
substrs=[
'ID = 123456',
'Name = "Enrico"',
'Rate = 1.25'])
self.expect(
'expression f',
substrs=[
'ID = 123456',
'Name = "Enrico"',
'Rate = 1.25']) | lldb/packages/Python/lldbsuite/test/functionalities/data-formatter/pyobjsynthprovider/TestPyObjSynthProvider.py | from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class PyObjectSynthProviderTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def test_print_array(self):
"""Test that expr -Z works"""
self.build()
self.provider_data_formatter_commands()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break at.
self.line = line_number('main.cpp', 'break here')
def provider_data_formatter_commands(self):
"""Test that the PythonObjectSyntheticChildProvider helper class works"""
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type synth clear', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
self.runCmd('command script import provider.py')
self.runCmd(
'type synthetic add Foo --python-class provider.SyntheticChildrenProvider')
self.expect('frame variable f.Name', substrs=['"Enrico"'])
self.expect(
'frame variable f',
substrs=[
'ID = 123456',
'Name = "Enrico"',
'Rate = 1.25'])
self.expect(
'expression f',
substrs=[
'ID = 123456',
'Name = "Enrico"',
'Rate = 1.25']) | 0.572723 | 0.22946 |
"""Tests for the OLE Compound File summary and document summary plugins."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import olecf # pylint: disable=unused-import
from plaso.parsers.olecf_plugins import summary
from tests import test_lib as shared_test_lib
from tests.parsers.olecf_plugins import test_lib
class TestSummaryInformationOLECFPlugin(test_lib.OLECFPluginTestCase):
"""Tests for the OLECF summary information plugin."""
@shared_test_lib.skipUnlessHasTestFile(['Document.doc'])
def testProcess(self):
"""Tests the Process function on a Summary Information stream."""
plugin = summary.SummaryInformationOLECFPlugin()
storage_writer = self._ParseOLECFFileWithPlugin(['Document.doc'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 3)
events = list(storage_writer.GetSortedEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2012-12-10 18:38:00.000000')
self.assertEqual(event.timestamp_desc, 'Document Creation Time')
self.assertEqual(event.name, 'Summary Information')
self.assertEqual(event.title, 'Table of Context')
self.assertEqual(event.author, '<NAME>')
self.assertEqual(event.template, 'Normal.dotm')
self.assertEqual(event.last_saved_by, 'Nides')
self.assertEqual(event.revision_number, '4')
self.assertEqual(event.number_of_characters, 18)
self.assertEqual(event.application, 'Microsoft Office Word')
self.assertEqual(event.security, 0)
expected_message = (
'Title: Table of Context '
'Author: <NAME> '
'Template: Normal.dotm '
'Revision number: 4 '
'Last saved by: Nides '
'Number of pages: 1 '
'Number of words: 3 '
'Number of characters: 18 '
'Application: Microsoft Office Word '
'Security: 0')
expected_short_message = (
'Title: Table of Context '
'Author: <NAME> '
'Revision number: 4')
# TODO: add support for:
# 'Total edit time (secs): 0 '
self._TestGetMessageStrings(event, expected_message, expected_short_message)
class TestDocumentSummaryInformationOLECFPlugin(test_lib.OLECFPluginTestCase):
"""Tests for the OLECF document summary information plugin."""
@shared_test_lib.skipUnlessHasTestFile(['Document.doc'])
def testProcess(self):
"""Tests the Process function on a Document Summary Information stream."""
plugin = summary.DocumentSummaryInformationOLECFPlugin()
storage_writer = self._ParseOLECFFileWithPlugin(['Document.doc'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 1)
events = list(storage_writer.GetSortedEvents())
event = events[0]
self.assertEqual(event.name, 'Document Summary Information')
self.assertEqual(event.number_of_lines, 1)
self.assertEqual(event.number_of_paragraphs, 1)
self.assertEqual(event.company, 'KPMG')
self.assertFalse(event.shared_document)
self.assertEqual(event.application_version, '14.0')
# TODO: add support for:
# self.assertEqual(event.is_shared, False)
expected_message = (
'Number of lines: 1 '
'Number of paragraphs: 1 '
'Company: KPMG '
'Shared document: False '
'Application version: 14.0')
expected_short_message = (
'Company: KPMG')
self._TestGetMessageStrings(event, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main() | tests/parsers/olecf_plugins/summary.py | """Tests for the OLE Compound File summary and document summary plugins."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import olecf # pylint: disable=unused-import
from plaso.parsers.olecf_plugins import summary
from tests import test_lib as shared_test_lib
from tests.parsers.olecf_plugins import test_lib
class TestSummaryInformationOLECFPlugin(test_lib.OLECFPluginTestCase):
"""Tests for the OLECF summary information plugin."""
@shared_test_lib.skipUnlessHasTestFile(['Document.doc'])
def testProcess(self):
"""Tests the Process function on a Summary Information stream."""
plugin = summary.SummaryInformationOLECFPlugin()
storage_writer = self._ParseOLECFFileWithPlugin(['Document.doc'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 3)
events = list(storage_writer.GetSortedEvents())
event = events[0]
self.CheckTimestamp(event.timestamp, '2012-12-10 18:38:00.000000')
self.assertEqual(event.timestamp_desc, 'Document Creation Time')
self.assertEqual(event.name, 'Summary Information')
self.assertEqual(event.title, 'Table of Context')
self.assertEqual(event.author, '<NAME>')
self.assertEqual(event.template, 'Normal.dotm')
self.assertEqual(event.last_saved_by, 'Nides')
self.assertEqual(event.revision_number, '4')
self.assertEqual(event.number_of_characters, 18)
self.assertEqual(event.application, 'Microsoft Office Word')
self.assertEqual(event.security, 0)
expected_message = (
'Title: Table of Context '
'Author: <NAME> '
'Template: Normal.dotm '
'Revision number: 4 '
'Last saved by: Nides '
'Number of pages: 1 '
'Number of words: 3 '
'Number of characters: 18 '
'Application: Microsoft Office Word '
'Security: 0')
expected_short_message = (
'Title: Table of Context '
'Author: <NAME> '
'Revision number: 4')
# TODO: add support for:
# 'Total edit time (secs): 0 '
self._TestGetMessageStrings(event, expected_message, expected_short_message)
class TestDocumentSummaryInformationOLECFPlugin(test_lib.OLECFPluginTestCase):
"""Tests for the OLECF document summary information plugin."""
@shared_test_lib.skipUnlessHasTestFile(['Document.doc'])
def testProcess(self):
"""Tests the Process function on a Document Summary Information stream."""
plugin = summary.DocumentSummaryInformationOLECFPlugin()
storage_writer = self._ParseOLECFFileWithPlugin(['Document.doc'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 1)
events = list(storage_writer.GetSortedEvents())
event = events[0]
self.assertEqual(event.name, 'Document Summary Information')
self.assertEqual(event.number_of_lines, 1)
self.assertEqual(event.number_of_paragraphs, 1)
self.assertEqual(event.company, 'KPMG')
self.assertFalse(event.shared_document)
self.assertEqual(event.application_version, '14.0')
# TODO: add support for:
# self.assertEqual(event.is_shared, False)
expected_message = (
'Number of lines: 1 '
'Number of paragraphs: 1 '
'Company: KPMG '
'Shared document: False '
'Application version: 14.0')
expected_short_message = (
'Company: KPMG')
self._TestGetMessageStrings(event, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main() | 0.680985 | 0.54952 |
import os
import re
import sys
import types
import unittest
from unittest.mock import MagicMock
from moderngl_window.utils import module_loading
# Mock modules
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = [
'glfw',
'sdl2',
'sdl2.ext',
'sdl2.video',
'pyglet',
'pyglet.window',
'PyQt5',
'PyQt5.QtCore',
'QtCore',
'QtOpenGL',
'QtWidgets',
'PySide2',
'PySide2.QtCore',
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
class TestCase(unittest.TestCase):
"""
Test reference docs
"""
def validate(self, filename, module, classname=None, ignore=None):
"""
Finds all automethod and autoattribute statements in an rst file
comparing them to the attributes found in the actual class
"""
if ignore is None:
ignore = []
with open(os.path.normpath(os.path.join('docs', 'reference', filename))) as f:
docs = f.read()
module = module_loading.import_module(module)
# Inspect class
if classname:
methods = re.findall(r'^\.\. automethod:: ([^\(\n]+)', docs, flags=re.M)
attributes = re.findall(r'^\.\. autoattribute:: ([^\n]+)', docs, flags=re.M)
documented = set(filter(lambda x: x.startswith(classname), [a for a in methods] + attributes))
implemented = set(classname + '.' + x for x in dir(getattr(module, classname))
if not x.startswith('_') or x == '__init__')
ignored = set(classname + '.' + x for x in ignore)
# Inspect module
else:
# Only inspect functions for now
functions = re.findall(r'^\.\. autofunction:: ([^\(\n]+)', docs, flags=re.M)
documented = set(functions)
ignored = set(ignore)
implemented = set(func for func in dir(module) if isinstance(getattr(module, func), types.FunctionType))
self.assertSetEqual(implemented - documented - ignored, set(), msg='Implemented but not Documented')
self.assertSetEqual(documented - implemented - ignored, set(), msg='Documented but not Implemented')
def test_moderngl_window(self):
self.validate(
'moderngl_window.rst',
'moderngl_window',
ignore=['valid_window_size', 'valid_window_size_multiplier', 'import_string', 'valid_bool'],
)
def test_settings(self):
self.validate('settings.conf.settings.rst', 'moderngl_window.conf', 'Settings', [])
# --- context ---
def test_context_base_window(self):
self.validate('context/basewindow.rst', 'moderngl_window.context.base.window', 'BaseWindow')
def test_context_glfw_window(self):
self.validate('context/glfw.window.rst', 'moderngl_window.context.glfw.window', 'Window')
def test_context_headless_window(self):
self.validate('context/headless.window.rst', 'moderngl_window.context.headless.window', 'Window')
def test_context_pyglet_window(self):
self.validate('context/pyglet.window.rst', 'moderngl_window.context.pyglet.window', 'Window')
def test_context_pyqt5_window(self):
self.validate('context/pyqt5.window.rst', 'moderngl_window.context.pyqt5.window', 'Window')
@unittest.skipIf(sys.version_info >= (3, 8, 0), reason="pyside2 not supported in py38 yet")
def test_context_pyside2_window(self):
self.validate('context/pyside2.window.rst', 'moderngl_window.context.pyside2.window', 'Window')
def test_context_sdl2_window(self):
self.validate('context/sdl2.window.rst', 'moderngl_window.context.sdl2.window', 'Window')
# --- geometry ---
def test_geometry(self):
self.validate('geometry.rst', 'moderngl_window.geometry')
# --- Loaders ---
def test_loaders_base(self):
self.validate('loaders/base.rst', 'moderngl_window.loaders.base', 'BaseLoader')
# --- Loaders : Texture ---
def test_loaders_t2d(self):
self.validate('loaders/t2d.rst', 'moderngl_window.loaders.texture.t2d', 'Loader')
def test_loaders_array(self):
self.validate('loaders/array.rst', 'moderngl_window.loaders.texture.array', 'Loader')
# --- Loaders : Scene ---
def test_loaders_wavefront(self):
self.validate('loaders/wavefront.rst', 'moderngl_window.loaders.scene.wavefront', 'Loader')
def test_loaders_gltf(self):
self.validate('loaders/gltf2.rst', 'moderngl_window.loaders.scene.gltf2', 'Loader')
def test_loaders_stl(self):
self.validate('loaders/wavefront.rst', 'moderngl_window.loaders.scene.stl', 'Loader')
# --- Loaders : Program ---
def test_loader_single(self):
self.validate('loaders/single.rst', 'moderngl_window.loaders.program.single', 'Loader')
def test_loader_separate(self):
self.validate('loaders/separate.rst', 'moderngl_window.loaders.program.separate', 'Loader')
# --- Loaders : Data ---
def test_loader_text(self):
self.validate('loaders/text.rst', 'moderngl_window.loaders.data.text', 'Loader')
def test_loader_json(self):
self.validate('loaders/json.rst', 'moderngl_window.loaders.data.json', 'Loader')
def test_loader_binary(self):
self.validate('loaders/binary.rst', 'moderngl_window.loaders.data.binary', 'Loader')
# --- Meta ---
def test_meta_base(self):
self.validate('meta/base.rst', 'moderngl_window.meta.base', 'ResourceDescription')
def test_meta_texture(self):
self.validate('meta/texture.rst', 'moderngl_window.meta.texture', 'TextureDescription')
def test_meta_program(self):
self.validate('meta/program.rst', 'moderngl_window.meta.program', 'ProgramDescription')
def test_meta_scene(self):
self.validate('meta/scene.rst', 'moderngl_window.meta.scene', 'SceneDescription')
def test_meta_data(self):
self.validate('meta/data.rst', 'moderngl_window.meta.data', 'DataDescription')
# --- Finders ---
def test_finders_base(self):
self.validate('finders/base.rst', 'moderngl_window.finders.base', 'BaseFilesystemFinder')
def test_finders_texture(self):
self.validate('finders/texture.rst', 'moderngl_window.finders.texture', 'FilesystemFinder')
def test_finders_program(self):
self.validate('finders/program.rst', 'moderngl_window.finders.program', 'FilesystemFinder')
def test_finders_scene(self):
self.validate('finders/scene.rst', 'moderngl_window.finders.scene', 'FilesystemFinder')
def test_finders_data(self):
self.validate('finders/data.rst', 'moderngl_window.finders.data', 'FilesystemFinder')
# --- opengl ---
def test_opengl_projection3d(self):
self.validate('opengl/projection.rst', 'moderngl_window.opengl.projection', 'Projection3D')
def test_opengl_vao(self):
self.validate('opengl/vao.rst', 'moderngl_window.opengl.vao', 'VAO')
# --- resources ---
def test_resources_base(self):
self.validate('resources/base.rst', 'moderngl_window.resources.base', 'BaseRegistry')
def test_resources_data(self):
self.validate('resources/data.rst', 'moderngl_window.resources.data', 'DataFiles')
def test_resources_textures(self):
self.validate('resources/textures.rst', 'moderngl_window.resources.textures', 'Textures')
def test_resources_programs(self):
self.validate('resources/programs.rst', 'moderngl_window.resources.programs', 'Programs')
def test_resources_scenes(self):
self.validate('resources/scenes.rst', 'moderngl_window.resources.scenes', 'Scenes')
# --- timers ---
def test_timers_base(self):
self.validate('timers/base.rst', 'moderngl_window.timers.base', 'BaseTimer')
def test_timers_clock(self):
self.validate('timers/clock.rst', 'moderngl_window.timers.clock', 'Timer')
# -- Scene ---
def test_scene_camera(self):
self.validate('scene/camera.rst', 'moderngl_window.scene', 'Camera')
def test_scene_keyboardcamera(self):
self.validate('scene/keyboardcamera.rst', 'moderngl_window.scene', 'KeyboardCamera')
def test_scene_scene(self):
self.validate('scene/scene.rst', 'moderngl_window.scene', 'Scene')
def test_scene_node(self):
self.validate('scene/node.rst', 'moderngl_window.scene', 'Node')
def test_scene_mesh(self):
self.validate('scene/mesh.rst', 'moderngl_window.scene', 'Mesh')
def test_scene_material(self):
self.validate('scene/material.rst', 'moderngl_window.scene', 'Material')
def test_scene_material_texture(self):
self.validate('scene/materialtexture.rst', 'moderngl_window.scene', 'MaterialTexture')
def test_scene_meshprogram(self):
self.validate('scene/meshprogram.rst', 'moderngl_window.scene', 'MeshProgram') | tests/test_docs.py | import os
import re
import sys
import types
import unittest
from unittest.mock import MagicMock
from moderngl_window.utils import module_loading
# Mock modules
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = [
'glfw',
'sdl2',
'sdl2.ext',
'sdl2.video',
'pyglet',
'pyglet.window',
'PyQt5',
'PyQt5.QtCore',
'QtCore',
'QtOpenGL',
'QtWidgets',
'PySide2',
'PySide2.QtCore',
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
class TestCase(unittest.TestCase):
"""
Test reference docs
"""
def validate(self, filename, module, classname=None, ignore=None):
"""
Finds all automethod and autoattribute statements in an rst file
comparing them to the attributes found in the actual class
"""
if ignore is None:
ignore = []
with open(os.path.normpath(os.path.join('docs', 'reference', filename))) as f:
docs = f.read()
module = module_loading.import_module(module)
# Inspect class
if classname:
methods = re.findall(r'^\.\. automethod:: ([^\(\n]+)', docs, flags=re.M)
attributes = re.findall(r'^\.\. autoattribute:: ([^\n]+)', docs, flags=re.M)
documented = set(filter(lambda x: x.startswith(classname), [a for a in methods] + attributes))
implemented = set(classname + '.' + x for x in dir(getattr(module, classname))
if not x.startswith('_') or x == '__init__')
ignored = set(classname + '.' + x for x in ignore)
# Inspect module
else:
# Only inspect functions for now
functions = re.findall(r'^\.\. autofunction:: ([^\(\n]+)', docs, flags=re.M)
documented = set(functions)
ignored = set(ignore)
implemented = set(func for func in dir(module) if isinstance(getattr(module, func), types.FunctionType))
self.assertSetEqual(implemented - documented - ignored, set(), msg='Implemented but not Documented')
self.assertSetEqual(documented - implemented - ignored, set(), msg='Documented but not Implemented')
def test_moderngl_window(self):
self.validate(
'moderngl_window.rst',
'moderngl_window',
ignore=['valid_window_size', 'valid_window_size_multiplier', 'import_string', 'valid_bool'],
)
def test_settings(self):
self.validate('settings.conf.settings.rst', 'moderngl_window.conf', 'Settings', [])
# --- context ---
def test_context_base_window(self):
self.validate('context/basewindow.rst', 'moderngl_window.context.base.window', 'BaseWindow')
def test_context_glfw_window(self):
self.validate('context/glfw.window.rst', 'moderngl_window.context.glfw.window', 'Window')
def test_context_headless_window(self):
self.validate('context/headless.window.rst', 'moderngl_window.context.headless.window', 'Window')
def test_context_pyglet_window(self):
self.validate('context/pyglet.window.rst', 'moderngl_window.context.pyglet.window', 'Window')
def test_context_pyqt5_window(self):
self.validate('context/pyqt5.window.rst', 'moderngl_window.context.pyqt5.window', 'Window')
@unittest.skipIf(sys.version_info >= (3, 8, 0), reason="pyside2 not supported in py38 yet")
def test_context_pyside2_window(self):
self.validate('context/pyside2.window.rst', 'moderngl_window.context.pyside2.window', 'Window')
def test_context_sdl2_window(self):
self.validate('context/sdl2.window.rst', 'moderngl_window.context.sdl2.window', 'Window')
# --- geometry ---
def test_geometry(self):
self.validate('geometry.rst', 'moderngl_window.geometry')
# --- Loaders ---
def test_loaders_base(self):
self.validate('loaders/base.rst', 'moderngl_window.loaders.base', 'BaseLoader')
# --- Loaders : Texture ---
def test_loaders_t2d(self):
self.validate('loaders/t2d.rst', 'moderngl_window.loaders.texture.t2d', 'Loader')
def test_loaders_array(self):
self.validate('loaders/array.rst', 'moderngl_window.loaders.texture.array', 'Loader')
# --- Loaders : Scene ---
def test_loaders_wavefront(self):
self.validate('loaders/wavefront.rst', 'moderngl_window.loaders.scene.wavefront', 'Loader')
def test_loaders_gltf(self):
self.validate('loaders/gltf2.rst', 'moderngl_window.loaders.scene.gltf2', 'Loader')
def test_loaders_stl(self):
self.validate('loaders/wavefront.rst', 'moderngl_window.loaders.scene.stl', 'Loader')
# --- Loaders : Program ---
def test_loader_single(self):
self.validate('loaders/single.rst', 'moderngl_window.loaders.program.single', 'Loader')
def test_loader_separate(self):
self.validate('loaders/separate.rst', 'moderngl_window.loaders.program.separate', 'Loader')
# --- Loaders : Data ---
def test_loader_text(self):
self.validate('loaders/text.rst', 'moderngl_window.loaders.data.text', 'Loader')
def test_loader_json(self):
self.validate('loaders/json.rst', 'moderngl_window.loaders.data.json', 'Loader')
def test_loader_binary(self):
self.validate('loaders/binary.rst', 'moderngl_window.loaders.data.binary', 'Loader')
# --- Meta ---
def test_meta_base(self):
self.validate('meta/base.rst', 'moderngl_window.meta.base', 'ResourceDescription')
def test_meta_texture(self):
self.validate('meta/texture.rst', 'moderngl_window.meta.texture', 'TextureDescription')
def test_meta_program(self):
self.validate('meta/program.rst', 'moderngl_window.meta.program', 'ProgramDescription')
def test_meta_scene(self):
self.validate('meta/scene.rst', 'moderngl_window.meta.scene', 'SceneDescription')
def test_meta_data(self):
self.validate('meta/data.rst', 'moderngl_window.meta.data', 'DataDescription')
# --- Finders ---
def test_finders_base(self):
self.validate('finders/base.rst', 'moderngl_window.finders.base', 'BaseFilesystemFinder')
def test_finders_texture(self):
self.validate('finders/texture.rst', 'moderngl_window.finders.texture', 'FilesystemFinder')
def test_finders_program(self):
self.validate('finders/program.rst', 'moderngl_window.finders.program', 'FilesystemFinder')
def test_finders_scene(self):
self.validate('finders/scene.rst', 'moderngl_window.finders.scene', 'FilesystemFinder')
def test_finders_data(self):
self.validate('finders/data.rst', 'moderngl_window.finders.data', 'FilesystemFinder')
# --- opengl ---
def test_opengl_projection3d(self):
self.validate('opengl/projection.rst', 'moderngl_window.opengl.projection', 'Projection3D')
def test_opengl_vao(self):
self.validate('opengl/vao.rst', 'moderngl_window.opengl.vao', 'VAO')
# --- resources ---
def test_resources_base(self):
self.validate('resources/base.rst', 'moderngl_window.resources.base', 'BaseRegistry')
def test_resources_data(self):
self.validate('resources/data.rst', 'moderngl_window.resources.data', 'DataFiles')
def test_resources_textures(self):
self.validate('resources/textures.rst', 'moderngl_window.resources.textures', 'Textures')
def test_resources_programs(self):
self.validate('resources/programs.rst', 'moderngl_window.resources.programs', 'Programs')
def test_resources_scenes(self):
self.validate('resources/scenes.rst', 'moderngl_window.resources.scenes', 'Scenes')
# --- timers ---
def test_timers_base(self):
self.validate('timers/base.rst', 'moderngl_window.timers.base', 'BaseTimer')
def test_timers_clock(self):
self.validate('timers/clock.rst', 'moderngl_window.timers.clock', 'Timer')
# -- Scene ---
def test_scene_camera(self):
self.validate('scene/camera.rst', 'moderngl_window.scene', 'Camera')
def test_scene_keyboardcamera(self):
self.validate('scene/keyboardcamera.rst', 'moderngl_window.scene', 'KeyboardCamera')
def test_scene_scene(self):
self.validate('scene/scene.rst', 'moderngl_window.scene', 'Scene')
def test_scene_node(self):
self.validate('scene/node.rst', 'moderngl_window.scene', 'Node')
def test_scene_mesh(self):
self.validate('scene/mesh.rst', 'moderngl_window.scene', 'Mesh')
def test_scene_material(self):
self.validate('scene/material.rst', 'moderngl_window.scene', 'Material')
def test_scene_material_texture(self):
self.validate('scene/materialtexture.rst', 'moderngl_window.scene', 'MaterialTexture')
def test_scene_meshprogram(self):
self.validate('scene/meshprogram.rst', 'moderngl_window.scene', 'MeshProgram') | 0.407098 | 0.165492 |
from self_py_fun.PreFun import *
plt.style.use('ggplot')
import csv
import seaborn as sns
sns.set_context('notebook')
# If running this file on the cluster, comment out the tensorflow-related functions and imports.
class ExistMLPred(EEGPreFun):
def __init__(self, *args, **kwargs):
super(ExistMLPred, self).__init__(*args, **kwargs)
def import_sim_matlab_swlda_wts_train(
self, trn_rep_dim, file_subscript, scenario_name
):
folder_dir = '{}/swLDA/{}/sim_swlda_wts_train_{}_{}.mat'.format(
self.parent_sim_output_path,
scenario_name, trn_rep_dim, file_subscript
)
# print(folder_dir)
swlda_wts = sio.loadmat(folder_dir)
swlda_wts_keys, _ = zip(*swlda_wts.items())
# print(swlda_wts_keys)
return swlda_wts
def import_eeg_matlab_swlda_wts_train(
self, file_subscript, scenario_name, eeg_file_suffix,
channel_dim=1, channel_id=1
):
if channel_dim == 1:
folder_dir = '{}/swLDA/{}/{}_swlda_wts_train_{}_channel_{}_{}.mat'\
.format(self.parent_eeg_output_path, scenario_name,
self.sub_folder_name, file_subscript, channel_id, eeg_file_suffix)
else:
folder_dir = '{}/swLDA/{}/{}_swlda_wts_train_{}_{}_{}.mat'\
.format(self.parent_eeg_output_path, scenario_name,
self.sub_folder_name, file_subscript, scenario_name, eeg_file_suffix)
print(folder_dir)
swlda_wts = sio.loadmat(folder_dir)
swlda_wts_keys, _ = zip(*swlda_wts.items())
# print(swlda_wts_keys)
return swlda_wts
@staticmethod
def swlda_predict_y_prob(
b, in_model, eeg_signals_trun
):
b_inmodel = np.multiply(np.transpose(in_model), b)
pred_prob = np.matmul(eeg_signals_trun, b_inmodel)
return pred_prob
def ml_predict(
self, predict_prob, eeg_code,
letter_dim, repetition_pred
):
r"""
:param predict_prob: 2d-array probability, (feature_vector_dim, 1)
:param eeg_code: ultimately converted to 1d-array
:param letter_dim: integer
:param repetition_pred: integer
:return:
"""
assert predict_prob.shape == (letter_dim * repetition_pred * self.num_rep, 1), \
print('Inconsistent dimension of predict_prob.')
eeg_code = np.reshape(eeg_code, [self.num_letter * repetition_pred * self.num_rep])
single_score_row = np.zeros([int(self.num_rep / 2), self.num_letter * repetition_pred])
single_score_col = np.zeros([int(self.num_rep / 2), self.num_letter * repetition_pred])
for i in range(int(self.num_rep / 2)):
single_score_row[i, :] = predict_prob[np.where(eeg_code == i + 1)[0], 0]
single_score_col[i, :] = predict_prob[np.where(eeg_code == i + self.row_column_length + 1)[0], 0]
single_score_row = np.reshape(
single_score_row, [int(self.num_rep / 2), self.num_letter, repetition_pred]
)
single_score_col = np.reshape(
single_score_col, [int(self.num_rep / 2), self.num_letter, repetition_pred]
)
print('single_score_row has shape {}'.format(single_score_row.shape))
# Compute the prediction based on single seq (row + col)
arg_max_single_row = np.argmax(single_score_row, axis=0) + 1
arg_max_single_col = np.argmax(single_score_col, axis=0) + self.row_column_length + 1
# cumulative
cum_score_row = np.cumsum(single_score_row, axis=-1)
cum_score_col = np.cumsum(single_score_col, axis=-1)
'''
# 5 out of 7 or 5 out of 8
_, num_letter_temp, _ = single_score_row.shape
seq_fix = 5
n_choose_k = np.array(list(itl.product(*[(0, 1) for i in range(repetition_pred)])))
n_choose_k = np.copy(n_choose_k[np.sum(n_choose_k, axis=-1) == seq_fix, :]) # (21, 7) or (56, 8)
n_k_val, _ = n_choose_k.shape
cum_score_row = np.zeros([6, num_letter_temp, n_k_val])
cum_score_col = np.zeros([6, num_letter_temp, n_k_val])
for n_k_id in range(n_k_val):
cum_score_row[..., n_k_id] = np.sum(single_score_row[..., np.where(n_choose_k[n_k_id, :] == 1)[0]], axis=-1)
cum_score_col[..., n_k_id] = np.sum(single_score_col[..., np.where(n_choose_k[n_k_id, :] == 1)[0]], axis=-1)
'''
arg_max_cum_row = np.argmax(cum_score_row, axis=0) + 1
arg_max_cum_col = np.argmax(cum_score_col, axis=0) + self.row_column_length + 1
letter_single_mat = np.zeros([letter_dim, repetition_pred]).astype('<U1')
letter_cum_mat = np.zeros([letter_dim, repetition_pred]).astype('<U1')
for i in range(letter_dim):
for j in range(repetition_pred):
letter_single_mat[i, j] = self.determine_letter(arg_max_single_row[i, j], arg_max_single_col[i, j])
letter_cum_mat[i, j] = self.determine_letter(arg_max_cum_row[i, j], arg_max_cum_col[i, j])
ml_pred_dict = {
"single": letter_single_mat,
"cum": letter_cum_mat
}
return ml_pred_dict
def ml_predict_entropy(
self, predict_prob, eeg_code, screen_ids,
letter_dim, repetition_pred
):
assert predict_prob.shape == (letter_dim * repetition_pred * self.num_rep, 1), \
print('Inconsistent dimension of predict_prob.')
assert screen_ids.shape == (letter_dim, repetition_pred, repetition_pred), \
print('Inconsistent dimension of screen indicator matrix.')
eeg_code = np.reshape(eeg_code, [self.num_letter * repetition_pred * self.num_rep])
single_score_row = np.zeros([int(self.num_rep / 2), self.num_letter * repetition_pred])
single_score_col = np.zeros([int(self.num_rep / 2), self.num_letter * repetition_pred])
for i in range(int(self.num_rep / 2)):
single_score_row[i, :] = predict_prob[np.where(eeg_code == i + 1)[0], 0]
single_score_col[i, :] = predict_prob[np.where(eeg_code == i + self.row_column_length + 1)[0], 0]
single_score_row = np.reshape(
single_score_row, [int(self.num_rep / 2), self.num_letter, repetition_pred]
) # (6, 19, 8)
single_score_col = np.reshape(
single_score_col, [int(self.num_rep / 2), self.num_letter, repetition_pred]
)
arg_max_single_row = np.argmax(single_score_row, axis=0) + 1
arg_max_single_col = np.argmax(single_score_col, axis=0) + self.row_column_length + 1
entropy_score_row = np.zeros_like(single_score_row)
entropy_score_col = np.zeros_like(single_score_col)
for l_id in range(letter_dim):
for rep_id in range(repetition_pred):
for rep_select_id in range(repetition_pred):
if screen_ids[l_id, rep_id, rep_select_id] == 1:
entropy_score_row[:, l_id, rep_id] = entropy_score_row[:, l_id, rep_id] + single_score_row[:, l_id, rep_select_id]
entropy_score_col[:, l_id, rep_id] = entropy_score_col[:, l_id, rep_id] + single_score_col[:, l_id, rep_select_id]
arg_max_entropy_row = np.argmax(entropy_score_row, axis=0) + 1
arg_max_entropy_col = np.argmax(entropy_score_col, axis=0) + self.row_column_length + 1
letter_single_mat = np.zeros([letter_dim, repetition_pred]).astype('<U1')
letter_entropy_mat = np.zeros([letter_dim, repetition_pred]).astype('<U1')
for i in range(letter_dim):
for j in range(repetition_pred):
letter_single_mat[i, j] = self.determine_letter(arg_max_single_row[i, j], arg_max_single_col[i, j])
letter_entropy_mat[i, j] = self.determine_letter(arg_max_entropy_row[i, j], arg_max_entropy_col[i, j])
ml_prediction_dict = {
"single": letter_single_mat,
"cum": letter_entropy_mat
}
return ml_prediction_dict
@staticmethod
def compute_selected_sample_stats(
signals_trun_sub, type_sub, in_model
):
r"""
:param signals_trun_sub: (sample_row, feature_col)
:param type_sub: (sample_row,)
:param in_model: (1, feature_col)
:return:
"""
signals_trun_sub = signals_trun_sub[:, in_model[0, :] == 1]
signals_mean_1_sub = np.mean(signals_trun_sub[type_sub == 1], axis=0)
signals_mean_0_sub = np.mean(signals_trun_sub[type_sub == 0], axis=0)
signals_cov_sub = np.cov(signals_trun_sub, rowvar=False)
return [signals_mean_1_sub, signals_mean_0_sub,
signals_cov_sub, signals_trun_sub]
def plot_swlda_select_feature(
self, in_model, trn_repetition, scenario_name, sim_dat, channel_ids=None
):
r"""
:param inmodel: array_like, (1, num_electrode * n_length)
:param sim_folder_name: string
:param trn_repetition: integer or string
:param scenario_name: string
:param sim_dat: bool_like
:param channel_ids: array_like
:return: plot
"""
if channel_ids is None:
channel_ids = np.arange(self.num_electrode)
channel_dim = len(channel_ids)
in_model = np.reshape(in_model, [channel_dim, self.n_length])
if sim_dat:
plot_pdf_dir = '{}/swLDA/{}/{}_swlda_select_train_{}.pdf'.format(
self.parent_sim_output_path, scenario_name,
self.sub_folder_name, trn_repetition
)
else:
plot_pdf_dir = '{}/swLDA/{}/{}_swlda_select_train_{}.pdf'.format(
self.parent_eeg_output_path, scenario_name,
self.sub_folder_name, trn_repetition
)
plot_pdf = bpdf.PdfPages(plot_pdf_dir)
# print(plot_pdf_dir)
print('The number of features selected by swLDA is {}.'.format(np.sum(in_model)))
# log-likelihood trace-plot and mean selection rate
for i, e_id in enumerate(channel_ids):
fig_alt = plt.figure(figsize=(10, 10))
plt.plot(np.arange(self.n_length), in_model[i, :])
plt.ylim(-0.1, 1.1)
plt.title('channel ' + str(e_id + 1))
plt.close()
# plt.show()
plot_pdf.savefig(fig_alt)
plot_pdf.close()
return 'plots done!'
def save_exist_ml_pred_results(
self, exist_ml_pred_dict, repet_num_fit, repet_num_pred,
target_letter, exist_ml_name, scenario_name, file_subscript,
train_bool=True, sim_dat_bool=True
):
r"""
:param exist_ml_pred_dict:
:param repet_num_fit:
:param repet_num_pred:
:param target_letter:
:param exist_ml_name:
:param scenario_name:
:param file_subscript:
:param train_bool:
:param sim_dat_bool:
:return:
"""
method_dir = "{}/{}/{}".format(
self.parent_path,
exist_ml_name,
self.sub_name_short
)
try:
os.mkdir(method_dir)
print('Directory', method_dir, ' is created.')
except FileExistsError:
print('Directory ', method_dir, ' already exists.')
if sim_dat_bool:
method_dir = '{}/{}'.format(method_dir, self.sub_folder_name)
try:
os.mkdir(method_dir)
print('Directory', method_dir, ' is created.')
except FileExistsError:
print('Directory ', method_dir, ' already exists.')
method_dir = '{}/{}'.format(method_dir, scenario_name)
try:
os.mkdir(method_dir)
print('Directory', method_dir, ' is created.')
except FileExistsError:
print('Directory ', method_dir, ' already exists.')
if train_bool:
file_dir = "{}/{}_train_{}_pred_train_{}_{}.csv".format(
method_dir, self.sub_folder_name,
repet_num_fit, repet_num_pred, file_subscript
)
else:
file_dir = "{}/{}_train_{}_pred_test_{}_{}.csv".format(
method_dir, self.sub_folder_name,
repet_num_fit, repet_num_pred, file_subscript
)
single_pred = exist_ml_pred_dict['single']
cum_pred = exist_ml_pred_dict['cum']
task = 'w'
with open(file_dir, task) as f:
f_writer = csv.writer(f)
l0 = ['trn_repetition', str(repet_num_fit)]
f_writer.writerow(l0)
l1 = ['Sequence id']
l1.extend(list(np.arange(1, repet_num_pred+1)))
f_writer.writerow(l1)
f_writer.writerow(['Single sequence prediction:'])
for i, letter_i in enumerate(target_letter):
l_ij = ['Letter {}'.format(letter_i)]
for j in range(repet_num_pred):
l_ij.append(single_pred[i, j])
f_writer.writerow(l_ij)
f_writer.writerow([' '])
f_writer.writerow(['Cumulative sequence prediction:'])
for i, letter_i in enumerate(target_letter):
l_ij = ['Letter {}'.format(letter_i)]
for j in range(repet_num_pred):
l_ij.append(cum_pred[i, j])
f_writer.writerow(l_ij)
return 'Saving results done!'
def split_trunc_train_set_odd_even(
self, eeg_signals_trun, eeg_type_3d, eeg_code_3d, rep_train_id, rep_test_id
):
eeg_signals_trun_2 = np.reshape(
eeg_signals_trun, [self.num_electrode,
self.num_letter,
self.num_repetition,
self.num_rep,
self.n_length]
)
# We pick the odd sequence for training and even sequence for testing (within TRN_file)
eeg_signals_trun_2_odd = eeg_signals_trun_2[..., rep_train_id - 1, :, :]
eeg_signals_trun_2_even = eeg_signals_trun_2[..., rep_test_id - 1, :, :]
repet_num_train = len(rep_train_id)
repet_num_test = len(rep_test_id)
eeg_signals_trun_2_odd = np.reshape(
eeg_signals_trun_2_odd, [self.num_electrode,
self.num_letter * repet_num_train * self.num_rep,
self.n_length]
)
eeg_signals_trun_2_odd = np.transpose(eeg_signals_trun_2_odd, [1, 0, 2])
eeg_signals_trun_2_even = np.reshape(
eeg_signals_trun_2_even, [self.num_electrode,
self.num_letter * repet_num_test * self.num_rep,
self.n_length]
)
eeg_signals_trun_2_even = np.transpose(eeg_signals_trun_2_even, [1, 0, 2])
eeg_type_odd = eeg_type_3d[:, rep_train_id-1, :]
eeg_type_even = eeg_type_3d[:, rep_test_id-1, :]
eeg_code_odd = eeg_code_3d[:, rep_train_id-1, :]
eeg_code_even = eeg_code_3d[:, rep_test_id-1, :]
return [eeg_signals_trun_2_odd, eeg_type_odd, eeg_code_odd,
eeg_signals_trun_2_even, eeg_type_even, eeg_code_even]
def exist_ml_fit_predict(
self, ml_obj, signals_trun, eeg_code_1d,
target_letters, repet_num_fit, repet_num_pred,
method_name, scenario_name, file_subscript,
train_bool, sim_dat_bool
):
r"""
:param ml_obj:
:param signals_trun:
:param eeg_type_1d: (feature length, 1)
:param eeg_code_1d: (feature length, 1)
:param target_letters:
:param repet_num_fit:
:param repet_num_pred:
:param method_name:
:param scenario_name:
:param file_subscript:
:param train_bool:
:param sim_dat_bool:
:return:
"""
letter_dim = len(target_letters)
# ml_obj.fit(signals_trun, eeg_type_1d[0, :])
y_pred_train = ml_obj.predict_proba(signals_trun)[:, 1, np.newaxis]
exist_ml_dict = self.ml_predict(
y_pred_train, eeg_code_1d[0, :], letter_dim, repet_num_pred
)
self.save_exist_ml_pred_results(
exist_ml_dict, repet_num_fit, repet_num_pred,
target_letters, method_name, scenario_name, file_subscript,
train_bool, sim_dat_bool
)
print('Exist ML results have been saved.')
return exist_ml_dict | Python/self_py_fun/ExistMLFun.py | from self_py_fun.PreFun import *
plt.style.use('ggplot')
import csv
import seaborn as sns
sns.set_context('notebook')
# If running this file on the cluster, comment out the tensorflow-related functions and imports.
class ExistMLPred(EEGPreFun):
def __init__(self, *args, **kwargs):
super(ExistMLPred, self).__init__(*args, **kwargs)
def import_sim_matlab_swlda_wts_train(
self, trn_rep_dim, file_subscript, scenario_name
):
folder_dir = '{}/swLDA/{}/sim_swlda_wts_train_{}_{}.mat'.format(
self.parent_sim_output_path,
scenario_name, trn_rep_dim, file_subscript
)
# print(folder_dir)
swlda_wts = sio.loadmat(folder_dir)
swlda_wts_keys, _ = zip(*swlda_wts.items())
# print(swlda_wts_keys)
return swlda_wts
def import_eeg_matlab_swlda_wts_train(
self, file_subscript, scenario_name, eeg_file_suffix,
channel_dim=1, channel_id=1
):
if channel_dim == 1:
folder_dir = '{}/swLDA/{}/{}_swlda_wts_train_{}_channel_{}_{}.mat'\
.format(self.parent_eeg_output_path, scenario_name,
self.sub_folder_name, file_subscript, channel_id, eeg_file_suffix)
else:
folder_dir = '{}/swLDA/{}/{}_swlda_wts_train_{}_{}_{}.mat'\
.format(self.parent_eeg_output_path, scenario_name,
self.sub_folder_name, file_subscript, scenario_name, eeg_file_suffix)
print(folder_dir)
swlda_wts = sio.loadmat(folder_dir)
swlda_wts_keys, _ = zip(*swlda_wts.items())
# print(swlda_wts_keys)
return swlda_wts
@staticmethod
def swlda_predict_y_prob(
b, in_model, eeg_signals_trun
):
b_inmodel = np.multiply(np.transpose(in_model), b)
pred_prob = np.matmul(eeg_signals_trun, b_inmodel)
return pred_prob
def ml_predict(
self, predict_prob, eeg_code,
letter_dim, repetition_pred
):
r"""
:param predict_prob: 2d-array probability, (feature_vector_dim, 1)
:param eeg_code: ultimately converted to 1d-array
:param letter_dim: integer
:param repetition_pred: integer
:return:
"""
assert predict_prob.shape == (letter_dim * repetition_pred * self.num_rep, 1), \
print('Inconsistent dimension of predict_prob.')
eeg_code = np.reshape(eeg_code, [self.num_letter * repetition_pred * self.num_rep])
single_score_row = np.zeros([int(self.num_rep / 2), self.num_letter * repetition_pred])
single_score_col = np.zeros([int(self.num_rep / 2), self.num_letter * repetition_pred])
for i in range(int(self.num_rep / 2)):
single_score_row[i, :] = predict_prob[np.where(eeg_code == i + 1)[0], 0]
single_score_col[i, :] = predict_prob[np.where(eeg_code == i + self.row_column_length + 1)[0], 0]
single_score_row = np.reshape(
single_score_row, [int(self.num_rep / 2), self.num_letter, repetition_pred]
)
single_score_col = np.reshape(
single_score_col, [int(self.num_rep / 2), self.num_letter, repetition_pred]
)
print('single_score_row has shape {}'.format(single_score_row.shape))
# Compute the prediction based on single seq (row + col)
arg_max_single_row = np.argmax(single_score_row, axis=0) + 1
arg_max_single_col = np.argmax(single_score_col, axis=0) + self.row_column_length + 1
# cumulative
cum_score_row = np.cumsum(single_score_row, axis=-1)
cum_score_col = np.cumsum(single_score_col, axis=-1)
'''
# 5 out of 7 or 5 out of 8
_, num_letter_temp, _ = single_score_row.shape
seq_fix = 5
n_choose_k = np.array(list(itl.product(*[(0, 1) for i in range(repetition_pred)])))
n_choose_k = np.copy(n_choose_k[np.sum(n_choose_k, axis=-1) == seq_fix, :]) # (21, 7) or (56, 8)
n_k_val, _ = n_choose_k.shape
cum_score_row = np.zeros([6, num_letter_temp, n_k_val])
cum_score_col = np.zeros([6, num_letter_temp, n_k_val])
for n_k_id in range(n_k_val):
cum_score_row[..., n_k_id] = np.sum(single_score_row[..., np.where(n_choose_k[n_k_id, :] == 1)[0]], axis=-1)
cum_score_col[..., n_k_id] = np.sum(single_score_col[..., np.where(n_choose_k[n_k_id, :] == 1)[0]], axis=-1)
'''
arg_max_cum_row = np.argmax(cum_score_row, axis=0) + 1
arg_max_cum_col = np.argmax(cum_score_col, axis=0) + self.row_column_length + 1
letter_single_mat = np.zeros([letter_dim, repetition_pred]).astype('<U1')
letter_cum_mat = np.zeros([letter_dim, repetition_pred]).astype('<U1')
for i in range(letter_dim):
for j in range(repetition_pred):
letter_single_mat[i, j] = self.determine_letter(arg_max_single_row[i, j], arg_max_single_col[i, j])
letter_cum_mat[i, j] = self.determine_letter(arg_max_cum_row[i, j], arg_max_cum_col[i, j])
ml_pred_dict = {
"single": letter_single_mat,
"cum": letter_cum_mat
}
return ml_pred_dict
def ml_predict_entropy(
self, predict_prob, eeg_code, screen_ids,
letter_dim, repetition_pred
):
assert predict_prob.shape == (letter_dim * repetition_pred * self.num_rep, 1), \
print('Inconsistent dimension of predict_prob.')
assert screen_ids.shape == (letter_dim, repetition_pred, repetition_pred), \
print('Inconsistent dimension of screen indicator matrix.')
eeg_code = np.reshape(eeg_code, [self.num_letter * repetition_pred * self.num_rep])
single_score_row = np.zeros([int(self.num_rep / 2), self.num_letter * repetition_pred])
single_score_col = np.zeros([int(self.num_rep / 2), self.num_letter * repetition_pred])
for i in range(int(self.num_rep / 2)):
single_score_row[i, :] = predict_prob[np.where(eeg_code == i + 1)[0], 0]
single_score_col[i, :] = predict_prob[np.where(eeg_code == i + self.row_column_length + 1)[0], 0]
single_score_row = np.reshape(
single_score_row, [int(self.num_rep / 2), self.num_letter, repetition_pred]
) # (6, 19, 8)
single_score_col = np.reshape(
single_score_col, [int(self.num_rep / 2), self.num_letter, repetition_pred]
)
arg_max_single_row = np.argmax(single_score_row, axis=0) + 1
arg_max_single_col = np.argmax(single_score_col, axis=0) + self.row_column_length + 1
entropy_score_row = np.zeros_like(single_score_row)
entropy_score_col = np.zeros_like(single_score_col)
for l_id in range(letter_dim):
for rep_id in range(repetition_pred):
for rep_select_id in range(repetition_pred):
if screen_ids[l_id, rep_id, rep_select_id] == 1:
entropy_score_row[:, l_id, rep_id] = entropy_score_row[:, l_id, rep_id] + single_score_row[:, l_id, rep_select_id]
entropy_score_col[:, l_id, rep_id] = entropy_score_col[:, l_id, rep_id] + single_score_col[:, l_id, rep_select_id]
arg_max_entropy_row = np.argmax(entropy_score_row, axis=0) + 1
arg_max_entropy_col = np.argmax(entropy_score_col, axis=0) + self.row_column_length + 1
letter_single_mat = np.zeros([letter_dim, repetition_pred]).astype('<U1')
letter_entropy_mat = np.zeros([letter_dim, repetition_pred]).astype('<U1')
for i in range(letter_dim):
for j in range(repetition_pred):
letter_single_mat[i, j] = self.determine_letter(arg_max_single_row[i, j], arg_max_single_col[i, j])
letter_entropy_mat[i, j] = self.determine_letter(arg_max_entropy_row[i, j], arg_max_entropy_col[i, j])
ml_prediction_dict = {
"single": letter_single_mat,
"cum": letter_entropy_mat
}
return ml_prediction_dict
@staticmethod
def compute_selected_sample_stats(
signals_trun_sub, type_sub, in_model
):
r"""
:param signals_trun_sub: (sample_row, feature_col)
:param type_sub: (sample_row,)
:param in_model: (1, feature_col)
:return:
"""
signals_trun_sub = signals_trun_sub[:, in_model[0, :] == 1]
signals_mean_1_sub = np.mean(signals_trun_sub[type_sub == 1], axis=0)
signals_mean_0_sub = np.mean(signals_trun_sub[type_sub == 0], axis=0)
signals_cov_sub = np.cov(signals_trun_sub, rowvar=False)
return [signals_mean_1_sub, signals_mean_0_sub,
signals_cov_sub, signals_trun_sub]
def plot_swlda_select_feature(
self, in_model, trn_repetition, scenario_name, sim_dat, channel_ids=None
):
r"""
:param inmodel: array_like, (1, num_electrode * n_length)
:param sim_folder_name: string
:param trn_repetition: integer or string
:param scenario_name: string
:param sim_dat: bool_like
:param channel_ids: array_like
:return: plot
"""
if channel_ids is None:
channel_ids = np.arange(self.num_electrode)
channel_dim = len(channel_ids)
in_model = np.reshape(in_model, [channel_dim, self.n_length])
if sim_dat:
plot_pdf_dir = '{}/swLDA/{}/{}_swlda_select_train_{}.pdf'.format(
self.parent_sim_output_path, scenario_name,
self.sub_folder_name, trn_repetition
)
else:
plot_pdf_dir = '{}/swLDA/{}/{}_swlda_select_train_{}.pdf'.format(
self.parent_eeg_output_path, scenario_name,
self.sub_folder_name, trn_repetition
)
plot_pdf = bpdf.PdfPages(plot_pdf_dir)
# print(plot_pdf_dir)
print('The number of features selected by swLDA is {}.'.format(np.sum(in_model)))
# log-likelihood trace-plot and mean selection rate
for i, e_id in enumerate(channel_ids):
fig_alt = plt.figure(figsize=(10, 10))
plt.plot(np.arange(self.n_length), in_model[i, :])
plt.ylim(-0.1, 1.1)
plt.title('channel ' + str(e_id + 1))
plt.close()
# plt.show()
plot_pdf.savefig(fig_alt)
plot_pdf.close()
return 'plots done!'
def save_exist_ml_pred_results(
self, exist_ml_pred_dict, repet_num_fit, repet_num_pred,
target_letter, exist_ml_name, scenario_name, file_subscript,
train_bool=True, sim_dat_bool=True
):
r"""
:param exist_ml_pred_dict:
:param repet_num_fit:
:param repet_num_pred:
:param target_letter:
:param exist_ml_name:
:param scenario_name:
:param file_subscript:
:param train_bool:
:param sim_dat_bool:
:return:
"""
method_dir = "{}/{}/{}".format(
self.parent_path,
exist_ml_name,
self.sub_name_short
)
try:
os.mkdir(method_dir)
print('Directory', method_dir, ' is created.')
except FileExistsError:
print('Directory ', method_dir, ' already exists.')
if sim_dat_bool:
method_dir = '{}/{}'.format(method_dir, self.sub_folder_name)
try:
os.mkdir(method_dir)
print('Directory', method_dir, ' is created.')
except FileExistsError:
print('Directory ', method_dir, ' already exists.')
method_dir = '{}/{}'.format(method_dir, scenario_name)
try:
os.mkdir(method_dir)
print('Directory', method_dir, ' is created.')
except FileExistsError:
print('Directory ', method_dir, ' already exists.')
if train_bool:
file_dir = "{}/{}_train_{}_pred_train_{}_{}.csv".format(
method_dir, self.sub_folder_name,
repet_num_fit, repet_num_pred, file_subscript
)
else:
file_dir = "{}/{}_train_{}_pred_test_{}_{}.csv".format(
method_dir, self.sub_folder_name,
repet_num_fit, repet_num_pred, file_subscript
)
single_pred = exist_ml_pred_dict['single']
cum_pred = exist_ml_pred_dict['cum']
task = 'w'
with open(file_dir, task) as f:
f_writer = csv.writer(f)
l0 = ['trn_repetition', str(repet_num_fit)]
f_writer.writerow(l0)
l1 = ['Sequence id']
l1.extend(list(np.arange(1, repet_num_pred+1)))
f_writer.writerow(l1)
f_writer.writerow(['Single sequence prediction:'])
for i, letter_i in enumerate(target_letter):
l_ij = ['Letter {}'.format(letter_i)]
for j in range(repet_num_pred):
l_ij.append(single_pred[i, j])
f_writer.writerow(l_ij)
f_writer.writerow([' '])
f_writer.writerow(['Cumulative sequence prediction:'])
for i, letter_i in enumerate(target_letter):
l_ij = ['Letter {}'.format(letter_i)]
for j in range(repet_num_pred):
l_ij.append(cum_pred[i, j])
f_writer.writerow(l_ij)
return 'Saving results done!'
def split_trunc_train_set_odd_even(
self, eeg_signals_trun, eeg_type_3d, eeg_code_3d, rep_train_id, rep_test_id
):
eeg_signals_trun_2 = np.reshape(
eeg_signals_trun, [self.num_electrode,
self.num_letter,
self.num_repetition,
self.num_rep,
self.n_length]
)
# We pick the odd sequence for training and even sequence for testing (within TRN_file)
eeg_signals_trun_2_odd = eeg_signals_trun_2[..., rep_train_id - 1, :, :]
eeg_signals_trun_2_even = eeg_signals_trun_2[..., rep_test_id - 1, :, :]
repet_num_train = len(rep_train_id)
repet_num_test = len(rep_test_id)
eeg_signals_trun_2_odd = np.reshape(
eeg_signals_trun_2_odd, [self.num_electrode,
self.num_letter * repet_num_train * self.num_rep,
self.n_length]
)
eeg_signals_trun_2_odd = np.transpose(eeg_signals_trun_2_odd, [1, 0, 2])
eeg_signals_trun_2_even = np.reshape(
eeg_signals_trun_2_even, [self.num_electrode,
self.num_letter * repet_num_test * self.num_rep,
self.n_length]
)
eeg_signals_trun_2_even = np.transpose(eeg_signals_trun_2_even, [1, 0, 2])
eeg_type_odd = eeg_type_3d[:, rep_train_id-1, :]
eeg_type_even = eeg_type_3d[:, rep_test_id-1, :]
eeg_code_odd = eeg_code_3d[:, rep_train_id-1, :]
eeg_code_even = eeg_code_3d[:, rep_test_id-1, :]
return [eeg_signals_trun_2_odd, eeg_type_odd, eeg_code_odd,
eeg_signals_trun_2_even, eeg_type_even, eeg_code_even]
def exist_ml_fit_predict(
self, ml_obj, signals_trun, eeg_code_1d,
target_letters, repet_num_fit, repet_num_pred,
method_name, scenario_name, file_subscript,
train_bool, sim_dat_bool
):
r"""
:param ml_obj:
:param signals_trun:
:param eeg_type_1d: (feature length, 1)
:param eeg_code_1d: (feature length, 1)
:param target_letters:
:param repet_num_fit:
:param repet_num_pred:
:param method_name:
:param scenario_name:
:param file_subscript:
:param train_bool:
:param sim_dat_bool:
:return:
"""
letter_dim = len(target_letters)
# ml_obj.fit(signals_trun, eeg_type_1d[0, :])
y_pred_train = ml_obj.predict_proba(signals_trun)[:, 1, np.newaxis]
exist_ml_dict = self.ml_predict(
y_pred_train, eeg_code_1d[0, :], letter_dim, repet_num_pred
)
self.save_exist_ml_pred_results(
exist_ml_dict, repet_num_fit, repet_num_pred,
target_letters, method_name, scenario_name, file_subscript,
train_bool, sim_dat_bool
)
print('Exist ML results have been saved.')
return exist_ml_dict | 0.553988 | 0.372448 |
import httplib, urllib, re
from datetime import *
from push import *
import time
import MySQLdb
class iHDate:
def __init__(self):
self.date = date.today()
def isTodayMonday(self):
if self.date.weekday() == 0:
return True
else:
return False
def isTodayFriday(self):
if self.date.weekday() == 4:
return True
else:
return False
def isTodaySaturday(self):
if self.date.weekday() == 5:
return True
else:
return False
def isTodayWeekend(self):
if (self.date.weekday() == 6):
return True
else:
return False
def getThedayString(self, distance = 0):
yestarday = date.today() + timedelta(distance)
return yestarday.isoformat()
def getDateString(self):
return self.date.isoformat()
def getYestardayString(self):
yestarday = date.today() + timedelta(-1)
return yestarday.isoformat()
def getDayBeforeYestardayString(self):
daybeforeYestarday = date.today() + timedelta(-2)
return daybeforeYestarday.isoformat()
def getCurrentDateTimeStr(self):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
class iHRequest:
def __init__(self, keyValues=[]):
self.keyValues = keyValues
def dispatch(self):
return {
'fund': self.fund(),
}[self.keyValues[0]]
def fund(self):
conn = httplib.HTTPConnection(self.keyValues[2])
conn.request("GET", self.keyValues[3])
response = conn.getresponse()
htmlData = response.read()
collectionData = [];
regs = self.keyValues[4].split(',', 1)
for reg in regs:
reg = reg.strip()
htmlValue = re.findall(reg, htmlData)
collectionData.append(htmlValue)
conn.close()
return collectionData
class iHMysql:
def __init__(self, fundBasic=[], fundValues=[]):
self.fundBasic = fundBasic
self.fundValues = fundValues
def getConnection(self):
# return MySQLdb.connect(host='127.0.0.1',user='root',passwd='<PASSWORD>',db='ihakula',port=3306, charset="utf8")
return MySQLdb.connect(host='127.0.0.1',user='root',passwd='<PASSWORD>!',db='ihakula',port=3306, charset="utf8")
def insertOne(self):
try:
conn = self.getConnection()
cur = conn.cursor()
sql = "insert into ih_funds(name,million_revenue,sevenday_revenue,date) values(%s,%s,%s,%s)"
today = iHDate()
param = (self.fundBasic[1], self.fundValues[0][0], self.fundValues[1][0], today.getYestardayString())
n = cur.execute(sql,param)
conn.commit()
cur.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def insertTwo(self):
try:
conn = self.getConnection()
cur = conn.cursor()
sql = "insert into ih_funds(name,million_revenue,sevenday_revenue,date) values(%s,%s,%s,%s)"
today = iHDate()
param = (self.fundBasic[1], self.fundValues[0][0], self.fundValues[1][0], today.getYestardayString())
n = cur.execute(sql,param)
conn.commit()
param = (self.fundBasic[1], self.fundValues[0][1], self.fundValues[1][1], today.getDayBeforeYestardayString())
n = cur.execute(sql,param)
conn.commit()
cur.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def getFundIdByName(self, fname):
try:
conn = self.getConnection()
cur = conn.cursor()
sql = "select ID from ih_fund where name=%s"
n = cur.execute(sql, [fname.split('(')[0]])
fid = cur.fetchone()[0]
cur.close()
conn.close()
return fid
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def getRateById(self, frates = [], fid = 0):
try:
for r in frates:
if fid == r[0]:
return r[1]
return 0
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def updateConfirms(self, dayCount = 1):
try:
today = iHDate()
conn = self.getConnection()
cur = conn.cursor()
sql = "select * from ih_if_certification_cash where approve_date=%s and verified=0 order by apply_date asc"
param = [today.getDateString()]
if 2 == dayCount:
param = [today.getYestardayString()]
n = cur.execute(sql, param)
rows = cur.fetchall()
for row in rows:
accountId = row[1]
sql = "select * from ih_if_account where ID=%s"
param = [accountId]
n = cur.execute(sql, param)
account = cur.fetchone()
cashArr = row[2].split(',')
verifiedCashArr = account[3].split(',')
for fcash in cashArr:
founded = False
fkeyvalue = fcash.split(':')
fid = fkeyvalue[0]
fvalue = fkeyvalue[1]
for vcash in verifiedCashArr:
objectIndex = verifiedCashArr.index(vcash)
vkeyvalue = vcash.split(':')
vid = vkeyvalue[0]
vvalue = vkeyvalue[1]
if fid == vid:
verifiedValue = float(fvalue) + float(vvalue)
verifiedValue = float("%.4f" % verifiedValue)
verifiedCash = str(vid) + ':' + str(verifiedValue)
verifiedCashArr[objectIndex] = verifiedCash
founded = True
break
if False == founded:
verifiedCashArr.append(fcash)
sep = ','
verifiedCashStr = sep.join(verifiedCashArr)
sql = "update ih_if_account set purchased=%s, date=%s where ID=%s"
param = [verifiedCashStr, today.getCurrentDateTimeStr(), accountId]
n = cur.execute(sql, param)
conn.commit()
sql = "update ih_if_certification_cash set verified=%s where ID=%s"
param = [1, row[0]]
n = cur.execute(sql, param)
conn.commit()
cur.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def getAllFunds(self):
try:
conn = self.getConnection()
cur = conn.cursor()
sql = "select * from ih_fund order by ID asc"
n = cur.execute(sql)
rows = cur.fetchall()
cur.close()
conn.close()
fundsIDArr = []
for row in rows:
fundsIDArr.append(row[0]);
return fundsIDArr
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def getFundsDayRevenue(self, date = ''):
try:
conn = self.getConnection()
cur = conn.cursor()
fundsRevenueDic = {}
allfundsIDArr = self.getAllFunds()
for fid in allfundsIDArr:
sql = "SELECT million_revenue FROM ih_funds where name = (select concat(name,'(',code,')') as fundname from ih_fund where ID = %s) and date = %s"
param = [fid, date]
n = cur.execute(sql, param)
row = cur.fetchone()
if row:
fundsRevenueDic[fid] = row[0]
cur.close()
conn.close()
return fundsRevenueDic
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def pushNotification(self, dayCount = 1):
try:
conn = self.getConnection()
cur = conn.cursor()
sql = "select * from ih_if_account order by ID asc"
n = cur.execute(sql)
rows = cur.fetchall()
today = iHDate()
settlementDate = today.getYestardayString()
if 2 == dayCount:
settlementDate = today.getDayBeforeYestardayString()
fundsRevenueDic = self.getFundsDayRevenue(settlementDate)
tokenMessageDic = {}
tokenFreeDic = {}
for row in rows:
accID = row[0]
token = row[2]
purchased = row[3]
allMoney = row[4]
if token == '':
continue
tokenFreeDic[token] = row[6]
verifiedCashArr = purchased.split(',')
allearn = 0
for vcash in verifiedCashArr:
vkeyvalue = vcash.split(':')
fid = vkeyvalue[0]
rate = fundsRevenueDic.get(long(fid))
if rate != None:
income = float(rate) * float(vkeyvalue[1]) / 10000
allearn += income
if income > 0:
# Insert new income
sql = "insert into ih_if_income(account_id,fund_id,income,date) values(%s,%s,%s,%s)"
param = (accID, fid, income, today.getDateString())
cur.execute(sql,param)
conn.commit()
# Insert new buy
dayAfter = 2
if today.isTodayFriday():
dayAfter = 4;
elif today.isTodaySaturday():
dayAfter = 3;
sql = "insert into ih_if_certification_cash(account_id,cash,verified,approve_date,apply_date) values(%s,%s,%s,%s,%s)"
param = (accID, str(fid)+":"+str(income), 0, today.getThedayString(dayAfter), today.getDateString())
cur.execute(sql,param)
conn.commit()
fundsMoneyArr = allMoney.split(',')
for oldcash in fundsMoneyArr:
oldIndex = fundsMoneyArr.index(oldcash)
oldkeyvalue = oldcash.split(':')
oldfid = oldkeyvalue[0]
if str(oldfid) == str(fid):
newDisValue = float(oldkeyvalue[1]) + income
newDisValue = float('%.4f' % newDisValue)
fundsMoneyArr[oldIndex] = str(oldfid) + ":" + str(newDisValue)
sep = ','
allMoney = sep.join(fundsMoneyArr)
break
# re.findall("(?<!\d)2"+":[+-]?\d*(?:\.\d+)?",allMoney)
# oldDisValue = re.findall(str(fid) + ":([-+]?\d*\.?\d+)", allMoney)
# newDisValue = float(oldDisValue[0]) + income
# allMoney = re.sub(str(fid) + ":([-+]?\d*\.?\d+)", str(fid) + ":" + str(newDisValue), allMoney)
tokenMessageDic[token] = float('%.2f'% allearn)
if allearn > 0:
sql = "update ih_if_account set all_money=%s where ID=%s"
param = [allMoney, accID]
n = cur.execute(sql, param)
conn.commit()
cur.close()
conn.close()
pushObj = iHPush()
messageCount = 0
for token in tokenMessageDic:
messageCount = messageCount + 1
revenue = tokenMessageDic[token]
ttype = tokenFreeDic[token]
if 0 == revenue:
continue
if 100 > messageCount:
alertmsg = '您于'+settlementDate+'号的总收益为: '+ str(tokenMessageDic[token])+' RMB'
pushObj.push(token, alertmsg, 0, ttype)
else:
pushObj.notify()
messageCount = 0
pushObj = iHPush()
pushObj.notify()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def run():
today = iHDate()
if today.isTodayMonday():
try:
data = open('CollectionItems.txt')
for each_line in data:
line = each_line.split(':', 4)
element = iHRequest(line)
filterArr = element.dispatch()
sql = iHMysql(line, filterArr)
sql.insertTwo()
updateSql = iHMysql()
updateSql.updateConfirms(1)
updateSql.pushNotification(1)
updateSql.pushNotification(2)
except IOError:
print("The datafile is missing");
return
else:
if today.isTodayWeekend():
return
else:
try:
data = open('CollectionItems.txt')
for each_line in data:
line = each_line.split(':', 4)
element = iHRequest(line)
filterArr = element.dispatch()
sql = iHMysql(line, filterArr)
sql.insertOne()
updateSql = iHMysql()
# Saturday has nothing to confirm, do nothing
updateSql.updateConfirms(1)
updateSql.pushNotification(1)
except IOError:
print("The datafile is missing")
run()
# fund:天弘(000198):fund.eastmoney.com:/000198.html:<span class="ui-font-large ui-color-red ui-num">([\s\S]*?)</span>,<span class="ui-font-middle ui-color-red ui-num">([\s\S]*?)</span></dd></dl> | fund.py |
import httplib, urllib, re
from datetime import *
from push import *
import time
import MySQLdb
class iHDate:
def __init__(self):
self.date = date.today()
def isTodayMonday(self):
if self.date.weekday() == 0:
return True
else:
return False
def isTodayFriday(self):
if self.date.weekday() == 4:
return True
else:
return False
def isTodaySaturday(self):
if self.date.weekday() == 5:
return True
else:
return False
def isTodayWeekend(self):
if (self.date.weekday() == 6):
return True
else:
return False
def getThedayString(self, distance = 0):
yestarday = date.today() + timedelta(distance)
return yestarday.isoformat()
def getDateString(self):
return self.date.isoformat()
def getYestardayString(self):
yestarday = date.today() + timedelta(-1)
return yestarday.isoformat()
def getDayBeforeYestardayString(self):
daybeforeYestarday = date.today() + timedelta(-2)
return daybeforeYestarday.isoformat()
def getCurrentDateTimeStr(self):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
class iHRequest:
def __init__(self, keyValues=[]):
self.keyValues = keyValues
def dispatch(self):
return {
'fund': self.fund(),
}[self.keyValues[0]]
def fund(self):
conn = httplib.HTTPConnection(self.keyValues[2])
conn.request("GET", self.keyValues[3])
response = conn.getresponse()
htmlData = response.read()
collectionData = [];
regs = self.keyValues[4].split(',', 1)
for reg in regs:
reg = reg.strip()
htmlValue = re.findall(reg, htmlData)
collectionData.append(htmlValue)
conn.close()
return collectionData
class iHMysql:
def __init__(self, fundBasic=[], fundValues=[]):
self.fundBasic = fundBasic
self.fundValues = fundValues
def getConnection(self):
# return MySQLdb.connect(host='127.0.0.1',user='root',passwd='<PASSWORD>',db='ihakula',port=3306, charset="utf8")
return MySQLdb.connect(host='127.0.0.1',user='root',passwd='<PASSWORD>!',db='ihakula',port=3306, charset="utf8")
def insertOne(self):
try:
conn = self.getConnection()
cur = conn.cursor()
sql = "insert into ih_funds(name,million_revenue,sevenday_revenue,date) values(%s,%s,%s,%s)"
today = iHDate()
param = (self.fundBasic[1], self.fundValues[0][0], self.fundValues[1][0], today.getYestardayString())
n = cur.execute(sql,param)
conn.commit()
cur.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def insertTwo(self):
try:
conn = self.getConnection()
cur = conn.cursor()
sql = "insert into ih_funds(name,million_revenue,sevenday_revenue,date) values(%s,%s,%s,%s)"
today = iHDate()
param = (self.fundBasic[1], self.fundValues[0][0], self.fundValues[1][0], today.getYestardayString())
n = cur.execute(sql,param)
conn.commit()
param = (self.fundBasic[1], self.fundValues[0][1], self.fundValues[1][1], today.getDayBeforeYestardayString())
n = cur.execute(sql,param)
conn.commit()
cur.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def getFundIdByName(self, fname):
try:
conn = self.getConnection()
cur = conn.cursor()
sql = "select ID from ih_fund where name=%s"
n = cur.execute(sql, [fname.split('(')[0]])
fid = cur.fetchone()[0]
cur.close()
conn.close()
return fid
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def getRateById(self, frates = [], fid = 0):
try:
for r in frates:
if fid == r[0]:
return r[1]
return 0
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def updateConfirms(self, dayCount = 1):
try:
today = iHDate()
conn = self.getConnection()
cur = conn.cursor()
sql = "select * from ih_if_certification_cash where approve_date=%s and verified=0 order by apply_date asc"
param = [today.getDateString()]
if 2 == dayCount:
param = [today.getYestardayString()]
n = cur.execute(sql, param)
rows = cur.fetchall()
for row in rows:
accountId = row[1]
sql = "select * from ih_if_account where ID=%s"
param = [accountId]
n = cur.execute(sql, param)
account = cur.fetchone()
cashArr = row[2].split(',')
verifiedCashArr = account[3].split(',')
for fcash in cashArr:
founded = False
fkeyvalue = fcash.split(':')
fid = fkeyvalue[0]
fvalue = fkeyvalue[1]
for vcash in verifiedCashArr:
objectIndex = verifiedCashArr.index(vcash)
vkeyvalue = vcash.split(':')
vid = vkeyvalue[0]
vvalue = vkeyvalue[1]
if fid == vid:
verifiedValue = float(fvalue) + float(vvalue)
verifiedValue = float("%.4f" % verifiedValue)
verifiedCash = str(vid) + ':' + str(verifiedValue)
verifiedCashArr[objectIndex] = verifiedCash
founded = True
break
if False == founded:
verifiedCashArr.append(fcash)
sep = ','
verifiedCashStr = sep.join(verifiedCashArr)
sql = "update ih_if_account set purchased=%s, date=%s where ID=%s"
param = [verifiedCashStr, today.getCurrentDateTimeStr(), accountId]
n = cur.execute(sql, param)
conn.commit()
sql = "update ih_if_certification_cash set verified=%s where ID=%s"
param = [1, row[0]]
n = cur.execute(sql, param)
conn.commit()
cur.close()
conn.close()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def getAllFunds(self):
try:
conn = self.getConnection()
cur = conn.cursor()
sql = "select * from ih_fund order by ID asc"
n = cur.execute(sql)
rows = cur.fetchall()
cur.close()
conn.close()
fundsIDArr = []
for row in rows:
fundsIDArr.append(row[0]);
return fundsIDArr
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def getFundsDayRevenue(self, date = ''):
try:
conn = self.getConnection()
cur = conn.cursor()
fundsRevenueDic = {}
allfundsIDArr = self.getAllFunds()
for fid in allfundsIDArr:
sql = "SELECT million_revenue FROM ih_funds where name = (select concat(name,'(',code,')') as fundname from ih_fund where ID = %s) and date = %s"
param = [fid, date]
n = cur.execute(sql, param)
row = cur.fetchone()
if row:
fundsRevenueDic[fid] = row[0]
cur.close()
conn.close()
return fundsRevenueDic
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def pushNotification(self, dayCount = 1):
try:
conn = self.getConnection()
cur = conn.cursor()
sql = "select * from ih_if_account order by ID asc"
n = cur.execute(sql)
rows = cur.fetchall()
today = iHDate()
settlementDate = today.getYestardayString()
if 2 == dayCount:
settlementDate = today.getDayBeforeYestardayString()
fundsRevenueDic = self.getFundsDayRevenue(settlementDate)
tokenMessageDic = {}
tokenFreeDic = {}
for row in rows:
accID = row[0]
token = row[2]
purchased = row[3]
allMoney = row[4]
if token == '':
continue
tokenFreeDic[token] = row[6]
verifiedCashArr = purchased.split(',')
allearn = 0
for vcash in verifiedCashArr:
vkeyvalue = vcash.split(':')
fid = vkeyvalue[0]
rate = fundsRevenueDic.get(long(fid))
if rate != None:
income = float(rate) * float(vkeyvalue[1]) / 10000
allearn += income
if income > 0:
# Insert new income
sql = "insert into ih_if_income(account_id,fund_id,income,date) values(%s,%s,%s,%s)"
param = (accID, fid, income, today.getDateString())
cur.execute(sql,param)
conn.commit()
# Insert new buy
dayAfter = 2
if today.isTodayFriday():
dayAfter = 4;
elif today.isTodaySaturday():
dayAfter = 3;
sql = "insert into ih_if_certification_cash(account_id,cash,verified,approve_date,apply_date) values(%s,%s,%s,%s,%s)"
param = (accID, str(fid)+":"+str(income), 0, today.getThedayString(dayAfter), today.getDateString())
cur.execute(sql,param)
conn.commit()
fundsMoneyArr = allMoney.split(',')
for oldcash in fundsMoneyArr:
oldIndex = fundsMoneyArr.index(oldcash)
oldkeyvalue = oldcash.split(':')
oldfid = oldkeyvalue[0]
if str(oldfid) == str(fid):
newDisValue = float(oldkeyvalue[1]) + income
newDisValue = float('%.4f' % newDisValue)
fundsMoneyArr[oldIndex] = str(oldfid) + ":" + str(newDisValue)
sep = ','
allMoney = sep.join(fundsMoneyArr)
break
# re.findall("(?<!\d)2"+":[+-]?\d*(?:\.\d+)?",allMoney)
# oldDisValue = re.findall(str(fid) + ":([-+]?\d*\.?\d+)", allMoney)
# newDisValue = float(oldDisValue[0]) + income
# allMoney = re.sub(str(fid) + ":([-+]?\d*\.?\d+)", str(fid) + ":" + str(newDisValue), allMoney)
tokenMessageDic[token] = float('%.2f'% allearn)
if allearn > 0:
sql = "update ih_if_account set all_money=%s where ID=%s"
param = [allMoney, accID]
n = cur.execute(sql, param)
conn.commit()
cur.close()
conn.close()
pushObj = iHPush()
messageCount = 0
for token in tokenMessageDic:
messageCount = messageCount + 1
revenue = tokenMessageDic[token]
ttype = tokenFreeDic[token]
if 0 == revenue:
continue
if 100 > messageCount:
alertmsg = '您于'+settlementDate+'号的总收益为: '+ str(tokenMessageDic[token])+' RMB'
pushObj.push(token, alertmsg, 0, ttype)
else:
pushObj.notify()
messageCount = 0
pushObj = iHPush()
pushObj.notify()
except MySQLdb.Error,e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
def run():
today = iHDate()
if today.isTodayMonday():
try:
data = open('CollectionItems.txt')
for each_line in data:
line = each_line.split(':', 4)
element = iHRequest(line)
filterArr = element.dispatch()
sql = iHMysql(line, filterArr)
sql.insertTwo()
updateSql = iHMysql()
updateSql.updateConfirms(1)
updateSql.pushNotification(1)
updateSql.pushNotification(2)
except IOError:
print("The datafile is missing");
return
else:
if today.isTodayWeekend():
return
else:
try:
data = open('CollectionItems.txt')
for each_line in data:
line = each_line.split(':', 4)
element = iHRequest(line)
filterArr = element.dispatch()
sql = iHMysql(line, filterArr)
sql.insertOne()
updateSql = iHMysql()
# Saturday has nothing to confirm, do nothing
updateSql.updateConfirms(1)
updateSql.pushNotification(1)
except IOError:
print("The datafile is missing")
run()
# fund:天弘(000198):fund.eastmoney.com:/000198.html:<span class="ui-font-large ui-color-red ui-num">([\s\S]*?)</span>,<span class="ui-font-middle ui-color-red ui-num">([\s\S]*?)</span></dd></dl> | 0.299515 | 0.131842 |
import re
from datetime import timedelta
from enum import Enum
from typing import Union, Tuple, List
from django.apps import apps
from django.core import exceptions
from django.conf import settings
from django.db.models import F, Count, Min, Max, Sum, Value, Avg, ExpressionWrapper, DurationField, FloatField, Model, JSONField
from django.db.models import functions as func
from data_interrogator import exceptions as di_exceptions
from data_interrogator.db import GroupConcat, DateDiff, ForceDate, SumIf
try:
from garnett.expressions import L
from garnett.fields import TranslatedField
GARNETT_ENABLED = True
except:
GARNETT_ENABLED = False
# Utility functions
math_infix_symbols = {
'-': lambda a, b: a - b,
'+': lambda a, b: a + b,
'/': lambda a, b: a / b,
'*': lambda a, b: a * b,
}
# Large unit multipliers to filter across
BIG_MULTIPLIERS = {
'day': 1,
'week': 7,
'fortnight': 14,
'month': 30, # close enough
'year': 365,
'decade': 10 * 365,
}
# Small unit multipliers to filter across
LITTLE_MULTIPLIERS = {
'second': 1,
'minute': 60,
'hour': 60 * 60,
'microfortnight': 1.2, # sure why not?
}
LEXP = "l_exp____"
def get_base_model(app_label: str, model: str) -> Model:
"""Get the actual base model, from the """
return apps.get_model(app_label.lower(), model.lower())
def normalise_field(text) -> str:
"""Replace the UI access with the backend Django access"""
return text.strip().replace('(', '::').replace(')', '').replace(".", "__")
def clean_lexp_key(key):
if key.startswith(LEXP):
return key[len(LEXP):]
return key
def normalise_math(expression):
"""Normalise math from UI """
if not any(s in expression for s in math_infix_symbols.keys()):
# we're aggregating some mathy things, these are tricky
return F(normalise_field(expression))
math_operator_re = '[\-\/\+\*]'
a, b = [v.strip() for v in re.split(math_operator_re, expression, 1)]
first_operator = re.findall(math_operator_re, expression)[0]
if first_operator == "-" and a.endswith('date') and b.endswith('date'):
expr = ExpressionWrapper(
DateDiff(
ForceDate(F(a)),
ForceDate(F(b))
), output_field=DurationField()
)
else:
expr = ExpressionWrapper(
math_infix_symbols[first_operator](F(a), F(b)),
output_field=FloatField()
)
return expr
def clean_filter(text: str) -> Union[str, Tuple[str, str, str]]:
"""Return the (cleaned) filter for replacement"""
maps = [('<>', 'ne'), ('<=', 'lte'), ('<', 'lt'), ('>=', 'gte'), ('>', 'gt'), ('=', '')]
for interrogator_filter, django_filter in maps:
candidate = text.split(interrogator_filter)
if len(candidate) == 2:
if interrogator_filter == "=":
return candidate[0], django_filter, candidate[1]
return candidate[0], '__%s' % django_filter, candidate[1]
return text
class Allowable(Enum):
ALL_APPS = 1
ALL_MODELS = 1
ALL_FIELDS = 3
class Interrogator:
available_aggregations = {
"min": Min,
"max": Max,
"sum": Sum,
'avg': Avg,
"count": Count,
"substr": func.Substr,
"group": GroupConcat,
"concat": func.Concat,
"sumif": SumIf,
}
errors = []
report_models = Allowable.ALL_MODELS
# both of these are lists of either:
# ('app_label',)
# ('app_label', 'model_name')
# Not this yet: ('app_label', 'model_name', ['list of field names'])
allowed = Allowable.ALL_MODELS
excluded = []
def __init__(self, report_models=None, allowed=None, excluded=None):
if report_models is not None:
self.report_models = report_models
if allowed is not None:
self.allowed = allowed
if excluded is not None:
self.excluded = excluded
# Clean up rules if they aren't lower cased.
fixed_excluded = []
for rule in self.excluded:
if len(rule) == 1:
rule = (rule[0].lower(),)
if len(rule) == 2:
rule = (rule[0].lower(), rule[1].lower())
if len(rule) == 3:
rule = (rule[0].lower(), rule[1].lower(), rule[2])
fixed_excluded.append(rule)
self.excluded = fixed_excluded
if self.allowed != Allowable.ALL_MODELS:
self.allowed_apps = [
i[0] for i in allowed
if type(i) is str or type(i) is tuple and len(i) == 1
]
if self.allowed != Allowable.ALL_APPS:
self.allowed_models = [
i[:2] for i in allowed
if type(i) is tuple and len(i) == 2
]
else:
self.allowed_models = Allowable.ALL_MODELS
def get_model_queryset(self):
return self.base_model.objects.all()
def process_annotation_concat(self, column):
pass
def process_annotation(self, column):
pass
def is_allowed_model(self, model):
pass
def verify_column(self, column):
model = self.base_model
args = column.split('__')
for a in args:
if model:
# If there is no model, its not a foreign key, so its safe as its either
# a transform or a key JSON lookup.
model = [f for f in model._meta.get_fields() if f.name == a][0].related_model
def get_field_by_name(self, model, field_name):
return model._meta.get_field(field_name)
def is_excluded_field(self, model, field) -> bool:
"""
Accepts model and field object
TODO: currently we're not doing per field permission checks, add this later
"""
return False
def is_excluded_model(self, model_class) -> bool:
"""Returns whether a model should be excluded"""
app_label = model_class._meta.app_label
model_name = model_class._meta.model_name
# Special case to include content type
if model_name == 'contenttype':
return False
if app_label in self.excluded or (app_label, model_name) in self.excluded:
return True
if self.allowed == Allowable.ALL_MODELS:
return False
excluded = not (app_label in self.allowed or ((app_label, model_name) in self.allowed))
return excluded
def has_forbidden_field(self, column) -> bool:
"""Return whether a forbidden field exists in the query"""
checking_model = self.base_model
joins = column.split('__')
for _, relation in enumerate(joins):
if checking_model:
try:
field = self.get_field_by_name(checking_model, relation)
if isinstance(field, JSONField):
# This is safe as you can't foreign key out of a JSONField
return False
if field.related_model:
if self.is_excluded_model(field.related_model):
# Despite the join/field being named differently, this column is forbidden!
return True
if self.is_excluded_field(checking_model, field):
# Despite the join/field being named differently, this column is forbidden!
return True
checking_model = field.related_model
except exceptions.FieldDoesNotExist:
pass
return False
def is_translatable(self, column) -> bool:
"""Return whether a forbidden field exists in the query"""
if not GARNETT_ENABLED:
return False
checking_model = self.base_model
joins = list(enumerate(column.split('__')))
for i, relation in joins:
if checking_model:
try:
field = self.get_field_by_name(checking_model, relation)
if isinstance(field, TranslatedField):
return i == len(joins) - 1
checking_model = field.related_model
except exceptions.FieldDoesNotExist:
pass
return False
def get_base_annotations(self):
return {}
def get_annotation(self, column):
agg, field = column.split('::', 1)
if agg == 'sumif':
try:
field, cond = field.split(',', 1)
except:
raise di_exceptions.InvalidAnnotationError("SUMIF must have a condition")
field = normalise_math(field)
conditions = {}
for condition in cond.split(','):
condition_key, condition_val = condition.split('=', 1)
conditions[normalise_field(condition_key)] = normalise_field(condition_val)
annotation = self.available_aggregations[agg](field=field, **conditions)
elif agg == 'join':
fields = []
for f in field.split(','):
if f.startswith(('"', "'")):
# its a string!
fields.append(Value(f.strip('"').strip("'")))
else:
fields.append(f)
annotation = self.available_aggregations[agg](*fields)
elif agg == "substr":
field, i, j = (field.split(',') + [None])[0:3]
annotation = self.available_aggregations[agg](field, i, j)
else:
field = normalise_math(field)
annotation = self.available_aggregations[agg](field, distinct=False)
return annotation
def validate_report_model(self, base_model):
app_label, model = base_model.split(':', 1)
base_model = apps.get_model(app_label.lower(), model.lower())
extra_data = {}
if (app_label, model) in self.excluded or base_model in self.excluded:
self.base_model = None
raise di_exceptions.ModelNotAllowedException(model=base_model)
if self.report_models == Allowable.ALL_MODELS:
return base_model, extra_data
for opts in self.report_models:
if opts[:2] == (app_label, model):
return base_model, extra_data
self.base_model = None
raise di_exceptions.ModelNotAllowedException()
def check_for_forbidden_column(self, column) -> List[str]:
"""Check if column is forbidden for whatever reason, and return the value of it"""
errors: List[str] = []
# Check if the column has permission
if self.has_forbidden_field(column):
errors.append(
"Joining tables with the column [{}] is forbidden, this column is removed from the output.".format(
column))
# Check aggregation includes a forbidden column
if '::' in column:
check_col = column.split('::', 1)[-1]
if self.has_forbidden_field(check_col):
errors.append(
"Aggregating tables using the column [{}] is forbidden, this column is removed from the output.".format(
column))
return errors
def generate_filters(self, filters, annotations, expression_columns):
errors = []
annotation_filters = {}
_filters = {}
excludes = {}
filters_all = {}
for index, expression in enumerate(filters):
field, exp, val = clean_filter(normalise_field(expression))
if self.has_forbidden_field(field):
errors.append(
f"Filtering with the column [{field}] is forbidden, this filter is removed from the output."
)
continue
key = '%s%s' % (field.strip(), exp)
val = val.strip()
if val.startswith('~'):
val = F(val[1:])
elif key.endswith('date'):
val = (val + '-01-01')[:10] # If we are filtering by a date, make sure its 'date-like'
elif key.endswith('__isnull'):
if val.lower() in ['false', 'f', '0']:
val = False
else:
val = bool(val)
if '::' in field:
# We've got an annotated filter
agg, f = field.split('::', 1)
field = 'f%s%s' % (index, field)
key = 'f%s%s' % (index, key)
annotations[field] = self.available_aggregations[agg](f, distinct=True)
annotation_filters[key] = val
elif key in annotations.keys():
annotation_filters[key] = val
elif key.split('__')[0] in expression_columns:
k = key.split('__')[0]
if 'date' in k and key.endswith('date') or 'date' in str(annotations[k]):
val, period = (val.rsplit(' ', 1) + ['days'])[0:2]
# this line is complicated, just in case there is no period or space
period = period.rstrip('s') # remove plurals
kwargs = {}
if BIG_MULTIPLIERS.get(period, None):
kwargs['days'] = int(val) * BIG_MULTIPLIERS[period]
elif LITTLE_MULTIPLIERS.get(period, None):
kwargs['seconds'] = int(val) * LITTLE_MULTIPLIERS[period]
annotation_filters[key] = timedelta(**kwargs)
else:
annotation_filters[key] = val
elif key.endswith('__all'):
key = key.rstrip('_all')
val = [v for v in val.split(',')]
filters_all[key] = val
else:
exclude = key.endswith('!')
if exclude:
key = key[:-1]
if key.endswith('__in'):
val = [v for v in val.split(',')]
if exclude:
excludes[key] = val
else:
_filters[key] = val
# _filters.update(**annotation_filters)
return filters_all, _filters, annotation_filters, annotations, expression_columns, excludes
def get_model_restriction(self, model):
return {}
def get_model_restriction_filters(self, column) -> bool:
"""Return whether a forbidden join exists in the query"""
checking_model = self.base_model
restriction_filters = {}
joins = column.split('__')
for i, relation in enumerate(joins):
try:
attr = self.get_field_by_name(checking_model, relation)
if isinstance(attr, JSONField):
# This is safe as you can't foreign key out of a JSONField
break
if attr.related_model:
if restriction := self.get_model_restriction(attr.related_model):
for k, v in restriction.items():
joined_rest = "__".join(joins[:i+1]) + "__" + k
restriction_filters[joined_rest] = v
checking_model = attr.related_model
except exceptions.FieldDoesNotExist:
pass
return restriction_filters
def generate_queryset(self, base_model, columns=None, filters=None, order_by=None, limit=None, offset=0):
errors = []
annotation_filters = {}
self.base_model, base_model_data = self.validate_report_model(base_model)
wrap_sheets = base_model_data.get('wrap_sheets', {})
annotations = self.get_base_annotations()
expression_columns = []
output_columns = []
query_columns = []
query_columns_exp = {}
model_restriction_filters = {}
model_restriction_filters.update(self.get_model_restriction(self.base_model))
# Generate filters
for column in columns:
var_name = None
if column == "":
# If the field is empty, don't do anything
continue
if ':=' in column:
var_name, column = column.split(':=', 1)
# Map names in UI to django functions
column = normalise_field(column)
if var_name is None:
var_name = column
# Check if the column has permission
column_permission_errors = self.check_for_forbidden_column(column)
if column_permission_errors:
# If there are permission errors, add to error list, and don't continue
errors.extend(column_permission_errors)
continue
# Build columns
if column.startswith(tuple([a + '::' for a in self.available_aggregations.keys()])):
annotations[var_name] = self.get_annotation(column)
elif any(s in column for s in math_infix_symbols.keys()):
annotations[var_name] = self.normalise_math(column)
expression_columns.append(var_name)
else:
if column in wrap_sheets.keys():
cols = wrap_sheets.get(column).get('columns', [])
query_columns = query_columns + cols
else:
if var_name == column:
if self.is_translatable(column):
query_columns_exp.update({f"{LEXP}{var_name}": L(var_name)})
else:
query_columns.append(var_name)
else:
annotations[var_name] = F(column)
model_restriction_filters.update(self.get_model_restriction_filters(column))
output_columns.append(var_name)
rows = self.get_model_queryset()
# Generate filters
filters_all, _filters, annotation_filters, annotations, expression_columns, excludes = self.generate_filters(
filters=filters,
annotations=annotations,
expression_columns=expression_columns
)
rows = rows.filter(**_filters)
for key, val in filters_all.items():
for v in val:
rows = rows.filter(**{key: v})
rows = rows.exclude(**excludes)
if model_restriction_filters:
rows = rows.filter(**model_restriction_filters)
rows = rows.values(*query_columns, **query_columns_exp)
if annotations:
rows = rows.annotate(**annotations)
rows = rows.filter(**annotation_filters)
if order_by:
ordering = map(normalise_field, order_by)
rows = rows.order_by(*ordering)
if limit:
lim = abs(int(limit))
rows = rows[offset:lim]
return rows, errors, output_columns, base_model_data
def interrogate(self, base_model, columns=None, filters=None, order_by=None, limit=None, offset=0):
if order_by is None: order_by = []
if filters is None: filters = []
if columns is None: columns = []
errors = []
base_model_data = {}
output_columns = []
count = 0
rows = []
try:
rows, errors, output_columns, base_model_data = self.generate_queryset(
base_model, columns, filters, order_by, limit, offset
)
if errors:
rows = rows.none()
rows = list(rows) # Force a database hit to check the in database state
_rows = []
for row in rows:
if row not in _rows:
row = {
clean_lexp_key(k):v
for k, v in row.items()
}
_rows.append(row)
rows = _rows
count = len(rows)
except di_exceptions.InvalidAnnotationError as e:
errors.append(e)
except ValueError as e:
rows = []
if limit is None:
errors.append("Limit must be a number")
elif limit < 1:
errors.append("Limit must be a number greater than zero")
else:
errors.append("Something went wrong - %s" % e)
except IndexError as e:
rows = []
errors.append("No rows returned for your query, try broadening your search.")
except exceptions.FieldError as e:
rows = []
if str(e).startswith('Cannot resolve keyword'):
field = str(e).split("'")[1]
errors.append("The requested field '%s' was not found in the database." % field)
else:
errors.append("An error was found with your query:\n%s" % e)
except Exception as e:
rows = []
errors.append("Something went wrong - %s" % e)
return {
'rows': rows, 'count': count, 'columns': output_columns, 'errors': errors,
'base_model': base_model_data,
# 'query': query # DEBUG Tool
}
class PivotInterrogator(Interrogator):
def __init__(self, aggregators, **kwargs):
super().__init__(**kwargs)
self.aggregators = aggregators
def get_base_annotations(self):
aggs = {
x: self.get_annotation(normalise_field(x)) for x in self.aggregators
if not self.has_forbidden_field(column=x)
}
aggs.update({"cell": Count(1)})
return aggs
def pivot(self):
# Only accept the first two valid columns
self.columns = [normalise_field(c) for c in self.columns if not self.has_forbidden_field(column=c)][:2]
data = self.interrogate()
out_rows = {}
col_head = self.base_model.objects.values(self.columns[0]).order_by(self.columns[0]).distinct()
x, y = self.columns[:2]
from collections import OrderedDict
default = OrderedDict([(c[x], {'count': 0}) for c in col_head])
for r in data['rows']:
this_row = out_rows.get(r[y], default.copy())
this_row[r[x]] = {'count': r['cell'],
'aggs': [(k, v) for k, v in r.items() if k not in ['cell', x, y]]
}
out_rows[r[y]] = this_row
return {
'rows': out_rows, 'col_head': col_head, 'errors': data['errors'],
'base_model': data['base_model'], 'headers': data['headers']
} | data_interrogator/interrogators.py | import re
from datetime import timedelta
from enum import Enum
from typing import Union, Tuple, List
from django.apps import apps
from django.core import exceptions
from django.conf import settings
from django.db.models import F, Count, Min, Max, Sum, Value, Avg, ExpressionWrapper, DurationField, FloatField, Model, JSONField
from django.db.models import functions as func
from data_interrogator import exceptions as di_exceptions
from data_interrogator.db import GroupConcat, DateDiff, ForceDate, SumIf
try:
from garnett.expressions import L
from garnett.fields import TranslatedField
GARNETT_ENABLED = True
except:
GARNETT_ENABLED = False
# Utility functions
math_infix_symbols = {
'-': lambda a, b: a - b,
'+': lambda a, b: a + b,
'/': lambda a, b: a / b,
'*': lambda a, b: a * b,
}
# Large unit multipliers to filter across
BIG_MULTIPLIERS = {
'day': 1,
'week': 7,
'fortnight': 14,
'month': 30, # close enough
'year': 365,
'decade': 10 * 365,
}
# Small unit multipliers to filter across
LITTLE_MULTIPLIERS = {
'second': 1,
'minute': 60,
'hour': 60 * 60,
'microfortnight': 1.2, # sure why not?
}
LEXP = "l_exp____"
def get_base_model(app_label: str, model: str) -> Model:
"""Get the actual base model, from the """
return apps.get_model(app_label.lower(), model.lower())
def normalise_field(text) -> str:
"""Replace the UI access with the backend Django access"""
return text.strip().replace('(', '::').replace(')', '').replace(".", "__")
def clean_lexp_key(key):
if key.startswith(LEXP):
return key[len(LEXP):]
return key
def normalise_math(expression):
"""Normalise math from UI """
if not any(s in expression for s in math_infix_symbols.keys()):
# we're aggregating some mathy things, these are tricky
return F(normalise_field(expression))
math_operator_re = '[\-\/\+\*]'
a, b = [v.strip() for v in re.split(math_operator_re, expression, 1)]
first_operator = re.findall(math_operator_re, expression)[0]
if first_operator == "-" and a.endswith('date') and b.endswith('date'):
expr = ExpressionWrapper(
DateDiff(
ForceDate(F(a)),
ForceDate(F(b))
), output_field=DurationField()
)
else:
expr = ExpressionWrapper(
math_infix_symbols[first_operator](F(a), F(b)),
output_field=FloatField()
)
return expr
def clean_filter(text: str) -> Union[str, Tuple[str, str, str]]:
"""Return the (cleaned) filter for replacement"""
maps = [('<>', 'ne'), ('<=', 'lte'), ('<', 'lt'), ('>=', 'gte'), ('>', 'gt'), ('=', '')]
for interrogator_filter, django_filter in maps:
candidate = text.split(interrogator_filter)
if len(candidate) == 2:
if interrogator_filter == "=":
return candidate[0], django_filter, candidate[1]
return candidate[0], '__%s' % django_filter, candidate[1]
return text
class Allowable(Enum):
ALL_APPS = 1
ALL_MODELS = 1
ALL_FIELDS = 3
class Interrogator:
available_aggregations = {
"min": Min,
"max": Max,
"sum": Sum,
'avg': Avg,
"count": Count,
"substr": func.Substr,
"group": GroupConcat,
"concat": func.Concat,
"sumif": SumIf,
}
errors = []
report_models = Allowable.ALL_MODELS
# both of these are lists of either:
# ('app_label',)
# ('app_label', 'model_name')
# Not this yet: ('app_label', 'model_name', ['list of field names'])
allowed = Allowable.ALL_MODELS
excluded = []
def __init__(self, report_models=None, allowed=None, excluded=None):
if report_models is not None:
self.report_models = report_models
if allowed is not None:
self.allowed = allowed
if excluded is not None:
self.excluded = excluded
# Clean up rules if they aren't lower cased.
fixed_excluded = []
for rule in self.excluded:
if len(rule) == 1:
rule = (rule[0].lower(),)
if len(rule) == 2:
rule = (rule[0].lower(), rule[1].lower())
if len(rule) == 3:
rule = (rule[0].lower(), rule[1].lower(), rule[2])
fixed_excluded.append(rule)
self.excluded = fixed_excluded
if self.allowed != Allowable.ALL_MODELS:
self.allowed_apps = [
i[0] for i in allowed
if type(i) is str or type(i) is tuple and len(i) == 1
]
if self.allowed != Allowable.ALL_APPS:
self.allowed_models = [
i[:2] for i in allowed
if type(i) is tuple and len(i) == 2
]
else:
self.allowed_models = Allowable.ALL_MODELS
def get_model_queryset(self):
return self.base_model.objects.all()
def process_annotation_concat(self, column):
pass
def process_annotation(self, column):
pass
def is_allowed_model(self, model):
pass
def verify_column(self, column):
model = self.base_model
args = column.split('__')
for a in args:
if model:
# If there is no model, its not a foreign key, so its safe as its either
# a transform or a key JSON lookup.
model = [f for f in model._meta.get_fields() if f.name == a][0].related_model
def get_field_by_name(self, model, field_name):
return model._meta.get_field(field_name)
def is_excluded_field(self, model, field) -> bool:
"""
Accepts model and field object
TODO: currently we're not doing per field permission checks, add this later
"""
return False
def is_excluded_model(self, model_class) -> bool:
"""Returns whether a model should be excluded"""
app_label = model_class._meta.app_label
model_name = model_class._meta.model_name
# Special case to include content type
if model_name == 'contenttype':
return False
if app_label in self.excluded or (app_label, model_name) in self.excluded:
return True
if self.allowed == Allowable.ALL_MODELS:
return False
excluded = not (app_label in self.allowed or ((app_label, model_name) in self.allowed))
return excluded
def has_forbidden_field(self, column) -> bool:
"""Return whether a forbidden field exists in the query"""
checking_model = self.base_model
joins = column.split('__')
for _, relation in enumerate(joins):
if checking_model:
try:
field = self.get_field_by_name(checking_model, relation)
if isinstance(field, JSONField):
# This is safe as you can't foreign key out of a JSONField
return False
if field.related_model:
if self.is_excluded_model(field.related_model):
# Despite the join/field being named differently, this column is forbidden!
return True
if self.is_excluded_field(checking_model, field):
# Despite the join/field being named differently, this column is forbidden!
return True
checking_model = field.related_model
except exceptions.FieldDoesNotExist:
pass
return False
def is_translatable(self, column) -> bool:
"""Return whether a forbidden field exists in the query"""
if not GARNETT_ENABLED:
return False
checking_model = self.base_model
joins = list(enumerate(column.split('__')))
for i, relation in joins:
if checking_model:
try:
field = self.get_field_by_name(checking_model, relation)
if isinstance(field, TranslatedField):
return i == len(joins) - 1
checking_model = field.related_model
except exceptions.FieldDoesNotExist:
pass
return False
def get_base_annotations(self):
return {}
def get_annotation(self, column):
agg, field = column.split('::', 1)
if agg == 'sumif':
try:
field, cond = field.split(',', 1)
except:
raise di_exceptions.InvalidAnnotationError("SUMIF must have a condition")
field = normalise_math(field)
conditions = {}
for condition in cond.split(','):
condition_key, condition_val = condition.split('=', 1)
conditions[normalise_field(condition_key)] = normalise_field(condition_val)
annotation = self.available_aggregations[agg](field=field, **conditions)
elif agg == 'join':
fields = []
for f in field.split(','):
if f.startswith(('"', "'")):
# its a string!
fields.append(Value(f.strip('"').strip("'")))
else:
fields.append(f)
annotation = self.available_aggregations[agg](*fields)
elif agg == "substr":
field, i, j = (field.split(',') + [None])[0:3]
annotation = self.available_aggregations[agg](field, i, j)
else:
field = normalise_math(field)
annotation = self.available_aggregations[agg](field, distinct=False)
return annotation
def validate_report_model(self, base_model):
app_label, model = base_model.split(':', 1)
base_model = apps.get_model(app_label.lower(), model.lower())
extra_data = {}
if (app_label, model) in self.excluded or base_model in self.excluded:
self.base_model = None
raise di_exceptions.ModelNotAllowedException(model=base_model)
if self.report_models == Allowable.ALL_MODELS:
return base_model, extra_data
for opts in self.report_models:
if opts[:2] == (app_label, model):
return base_model, extra_data
self.base_model = None
raise di_exceptions.ModelNotAllowedException()
def check_for_forbidden_column(self, column) -> List[str]:
"""Check if column is forbidden for whatever reason, and return the value of it"""
errors: List[str] = []
# Check if the column has permission
if self.has_forbidden_field(column):
errors.append(
"Joining tables with the column [{}] is forbidden, this column is removed from the output.".format(
column))
# Check aggregation includes a forbidden column
if '::' in column:
check_col = column.split('::', 1)[-1]
if self.has_forbidden_field(check_col):
errors.append(
"Aggregating tables using the column [{}] is forbidden, this column is removed from the output.".format(
column))
return errors
def generate_filters(self, filters, annotations, expression_columns):
errors = []
annotation_filters = {}
_filters = {}
excludes = {}
filters_all = {}
for index, expression in enumerate(filters):
field, exp, val = clean_filter(normalise_field(expression))
if self.has_forbidden_field(field):
errors.append(
f"Filtering with the column [{field}] is forbidden, this filter is removed from the output."
)
continue
key = '%s%s' % (field.strip(), exp)
val = val.strip()
if val.startswith('~'):
val = F(val[1:])
elif key.endswith('date'):
val = (val + '-01-01')[:10] # If we are filtering by a date, make sure its 'date-like'
elif key.endswith('__isnull'):
if val.lower() in ['false', 'f', '0']:
val = False
else:
val = bool(val)
if '::' in field:
# We've got an annotated filter
agg, f = field.split('::', 1)
field = 'f%s%s' % (index, field)
key = 'f%s%s' % (index, key)
annotations[field] = self.available_aggregations[agg](f, distinct=True)
annotation_filters[key] = val
elif key in annotations.keys():
annotation_filters[key] = val
elif key.split('__')[0] in expression_columns:
k = key.split('__')[0]
if 'date' in k and key.endswith('date') or 'date' in str(annotations[k]):
val, period = (val.rsplit(' ', 1) + ['days'])[0:2]
# this line is complicated, just in case there is no period or space
period = period.rstrip('s') # remove plurals
kwargs = {}
if BIG_MULTIPLIERS.get(period, None):
kwargs['days'] = int(val) * BIG_MULTIPLIERS[period]
elif LITTLE_MULTIPLIERS.get(period, None):
kwargs['seconds'] = int(val) * LITTLE_MULTIPLIERS[period]
annotation_filters[key] = timedelta(**kwargs)
else:
annotation_filters[key] = val
elif key.endswith('__all'):
key = key.rstrip('_all')
val = [v for v in val.split(',')]
filters_all[key] = val
else:
exclude = key.endswith('!')
if exclude:
key = key[:-1]
if key.endswith('__in'):
val = [v for v in val.split(',')]
if exclude:
excludes[key] = val
else:
_filters[key] = val
# _filters.update(**annotation_filters)
return filters_all, _filters, annotation_filters, annotations, expression_columns, excludes
def get_model_restriction(self, model):
return {}
def get_model_restriction_filters(self, column) -> bool:
"""Return whether a forbidden join exists in the query"""
checking_model = self.base_model
restriction_filters = {}
joins = column.split('__')
for i, relation in enumerate(joins):
try:
attr = self.get_field_by_name(checking_model, relation)
if isinstance(attr, JSONField):
# This is safe as you can't foreign key out of a JSONField
break
if attr.related_model:
if restriction := self.get_model_restriction(attr.related_model):
for k, v in restriction.items():
joined_rest = "__".join(joins[:i+1]) + "__" + k
restriction_filters[joined_rest] = v
checking_model = attr.related_model
except exceptions.FieldDoesNotExist:
pass
return restriction_filters
def generate_queryset(self, base_model, columns=None, filters=None, order_by=None, limit=None, offset=0):
errors = []
annotation_filters = {}
self.base_model, base_model_data = self.validate_report_model(base_model)
wrap_sheets = base_model_data.get('wrap_sheets', {})
annotations = self.get_base_annotations()
expression_columns = []
output_columns = []
query_columns = []
query_columns_exp = {}
model_restriction_filters = {}
model_restriction_filters.update(self.get_model_restriction(self.base_model))
# Generate filters
for column in columns:
var_name = None
if column == "":
# If the field is empty, don't do anything
continue
if ':=' in column:
var_name, column = column.split(':=', 1)
# Map names in UI to django functions
column = normalise_field(column)
if var_name is None:
var_name = column
# Check if the column has permission
column_permission_errors = self.check_for_forbidden_column(column)
if column_permission_errors:
# If there are permission errors, add to error list, and don't continue
errors.extend(column_permission_errors)
continue
# Build columns
if column.startswith(tuple([a + '::' for a in self.available_aggregations.keys()])):
annotations[var_name] = self.get_annotation(column)
elif any(s in column for s in math_infix_symbols.keys()):
annotations[var_name] = self.normalise_math(column)
expression_columns.append(var_name)
else:
if column in wrap_sheets.keys():
cols = wrap_sheets.get(column).get('columns', [])
query_columns = query_columns + cols
else:
if var_name == column:
if self.is_translatable(column):
query_columns_exp.update({f"{LEXP}{var_name}": L(var_name)})
else:
query_columns.append(var_name)
else:
annotations[var_name] = F(column)
model_restriction_filters.update(self.get_model_restriction_filters(column))
output_columns.append(var_name)
rows = self.get_model_queryset()
# Generate filters
filters_all, _filters, annotation_filters, annotations, expression_columns, excludes = self.generate_filters(
filters=filters,
annotations=annotations,
expression_columns=expression_columns
)
rows = rows.filter(**_filters)
for key, val in filters_all.items():
for v in val:
rows = rows.filter(**{key: v})
rows = rows.exclude(**excludes)
if model_restriction_filters:
rows = rows.filter(**model_restriction_filters)
rows = rows.values(*query_columns, **query_columns_exp)
if annotations:
rows = rows.annotate(**annotations)
rows = rows.filter(**annotation_filters)
if order_by:
ordering = map(normalise_field, order_by)
rows = rows.order_by(*ordering)
if limit:
lim = abs(int(limit))
rows = rows[offset:lim]
return rows, errors, output_columns, base_model_data
def interrogate(self, base_model, columns=None, filters=None, order_by=None, limit=None, offset=0):
if order_by is None: order_by = []
if filters is None: filters = []
if columns is None: columns = []
errors = []
base_model_data = {}
output_columns = []
count = 0
rows = []
try:
rows, errors, output_columns, base_model_data = self.generate_queryset(
base_model, columns, filters, order_by, limit, offset
)
if errors:
rows = rows.none()
rows = list(rows) # Force a database hit to check the in database state
_rows = []
for row in rows:
if row not in _rows:
row = {
clean_lexp_key(k):v
for k, v in row.items()
}
_rows.append(row)
rows = _rows
count = len(rows)
except di_exceptions.InvalidAnnotationError as e:
errors.append(e)
except ValueError as e:
rows = []
if limit is None:
errors.append("Limit must be a number")
elif limit < 1:
errors.append("Limit must be a number greater than zero")
else:
errors.append("Something went wrong - %s" % e)
except IndexError as e:
rows = []
errors.append("No rows returned for your query, try broadening your search.")
except exceptions.FieldError as e:
rows = []
if str(e).startswith('Cannot resolve keyword'):
field = str(e).split("'")[1]
errors.append("The requested field '%s' was not found in the database." % field)
else:
errors.append("An error was found with your query:\n%s" % e)
except Exception as e:
rows = []
errors.append("Something went wrong - %s" % e)
return {
'rows': rows, 'count': count, 'columns': output_columns, 'errors': errors,
'base_model': base_model_data,
# 'query': query # DEBUG Tool
}
class PivotInterrogator(Interrogator):
def __init__(self, aggregators, **kwargs):
super().__init__(**kwargs)
self.aggregators = aggregators
def get_base_annotations(self):
aggs = {
x: self.get_annotation(normalise_field(x)) for x in self.aggregators
if not self.has_forbidden_field(column=x)
}
aggs.update({"cell": Count(1)})
return aggs
def pivot(self):
# Only accept the first two valid columns
self.columns = [normalise_field(c) for c in self.columns if not self.has_forbidden_field(column=c)][:2]
data = self.interrogate()
out_rows = {}
col_head = self.base_model.objects.values(self.columns[0]).order_by(self.columns[0]).distinct()
x, y = self.columns[:2]
from collections import OrderedDict
default = OrderedDict([(c[x], {'count': 0}) for c in col_head])
for r in data['rows']:
this_row = out_rows.get(r[y], default.copy())
this_row[r[x]] = {'count': r['cell'],
'aggs': [(k, v) for k, v in r.items() if k not in ['cell', x, y]]
}
out_rows[r[y]] = this_row
return {
'rows': out_rows, 'col_head': col_head, 'errors': data['errors'],
'base_model': data['base_model'], 'headers': data['headers']
} | 0.715523 | 0.236142 |
"""Adapter related object holder."""
import logging
from compass.db.api import adapter as adapter_api
from compass.db.api import database
from compass.db.api import permission
from compass.db.api import user as user_api
from compass.db.api import utils
from compass.db import exception
SUPPORTED_FIELDS = [
'name',
]
RESP_FIELDS = [
'id', 'name', 'roles', 'flavors',
'os_installer', 'package_installer',
'supported_oses', 'display_name', 'health_check_cmd'
]
RESP_OS_FIELDS = [
'id', 'name', 'os_id'
]
RESP_ROLES_FIELDS = [
'id', 'name', 'display_name', 'description', 'optional'
]
RESP_FLAVORS_FIELDS = [
'id', 'adapter_id', 'adapter_name', 'name', 'display_name',
'template', 'roles'
]
ADAPTER_MAPPING = None
FLAVOR_MAPPING = None
def load_adapters(force_reload=False):
global ADAPTER_MAPPING
if force_reload or ADAPTER_MAPPING is None:
logging.info('load adapters into memory')
ADAPTER_MAPPING = adapter_api.get_adapters_internal(
force_reload=force_reload
)
def load_flavors(force_reload=False):
global FLAVOR_MAPPING
if force_reload or FLAVOR_MAPPING is None:
logging.info('load flavors into memory')
FLAVOR_MAPPING = {}
adapters_flavors = adapter_api.get_flavors_internal(
force_reload=force_reload
)
for adapter_name, adapter_flavors in adapters_flavors.items():
for flavor_name, flavor in adapter_flavors.items():
FLAVOR_MAPPING['%s:%s' % (adapter_name, flavor_name)] = flavor
def _filter_adapters(adapter_config, filter_name, filter_value):
if filter_name not in adapter_config:
return False
if isinstance(filter_value, list):
return bool(
adapter_config[filter_name] in filter_value
)
elif isinstance(filter_value, dict):
return all([
_filter_adapters(
adapter_config[filter_name],
sub_filter_key, sub_filter_value
)
for sub_filter_key, sub_filter_value in filter_value.items()
])
else:
return adapter_config[filter_name] == filter_value
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_ADAPTERS
)
@utils.output_filters(name=utils.general_filter_callback)
@utils.wrap_to_dict(
RESP_FIELDS,
supported_oses=RESP_OS_FIELDS,
roles=RESP_ROLES_FIELDS,
flavors=RESP_FLAVORS_FIELDS
)
def list_adapters(user=None, session=None, **filters):
"""list adapters."""
load_adapters()
return ADAPTER_MAPPING.values()
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_ADAPTERS
)
@utils.wrap_to_dict(
RESP_FIELDS,
supported_oses=RESP_OS_FIELDS,
roles=RESP_ROLES_FIELDS,
flavors=RESP_FLAVORS_FIELDS
)
def get_adapter(adapter_id, user=None, session=None, **kwargs):
"""get adapter."""
load_adapters()
if adapter_id not in ADAPTER_MAPPING:
raise exception.RecordNotExists(
'adpater %s does not exist' % adapter_id
)
return ADAPTER_MAPPING[adapter_id]
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_METADATAS
)
@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
def list_flavors(user=None, session=None, **filters):
"""List flavors."""
load_flavors()
return FLAVOR_MAPPING.values()
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_METADATAS
)
@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
def get_flavor(flavor_id, user=None, session=None, **kwargs):
"""Get flavor."""
load_flavors()
if flavor_id not in FLAVOR_MAPPING:
raise exception.RecordNotExists(
'flavor %s does not exist' % flavor_id
)
return FLAVOR_MAPPING[flavor_id] | compass/db/api/adapter_holder.py |
"""Adapter related object holder."""
import logging
from compass.db.api import adapter as adapter_api
from compass.db.api import database
from compass.db.api import permission
from compass.db.api import user as user_api
from compass.db.api import utils
from compass.db import exception
SUPPORTED_FIELDS = [
'name',
]
RESP_FIELDS = [
'id', 'name', 'roles', 'flavors',
'os_installer', 'package_installer',
'supported_oses', 'display_name', 'health_check_cmd'
]
RESP_OS_FIELDS = [
'id', 'name', 'os_id'
]
RESP_ROLES_FIELDS = [
'id', 'name', 'display_name', 'description', 'optional'
]
RESP_FLAVORS_FIELDS = [
'id', 'adapter_id', 'adapter_name', 'name', 'display_name',
'template', 'roles'
]
ADAPTER_MAPPING = None
FLAVOR_MAPPING = None
def load_adapters(force_reload=False):
global ADAPTER_MAPPING
if force_reload or ADAPTER_MAPPING is None:
logging.info('load adapters into memory')
ADAPTER_MAPPING = adapter_api.get_adapters_internal(
force_reload=force_reload
)
def load_flavors(force_reload=False):
global FLAVOR_MAPPING
if force_reload or FLAVOR_MAPPING is None:
logging.info('load flavors into memory')
FLAVOR_MAPPING = {}
adapters_flavors = adapter_api.get_flavors_internal(
force_reload=force_reload
)
for adapter_name, adapter_flavors in adapters_flavors.items():
for flavor_name, flavor in adapter_flavors.items():
FLAVOR_MAPPING['%s:%s' % (adapter_name, flavor_name)] = flavor
def _filter_adapters(adapter_config, filter_name, filter_value):
if filter_name not in adapter_config:
return False
if isinstance(filter_value, list):
return bool(
adapter_config[filter_name] in filter_value
)
elif isinstance(filter_value, dict):
return all([
_filter_adapters(
adapter_config[filter_name],
sub_filter_key, sub_filter_value
)
for sub_filter_key, sub_filter_value in filter_value.items()
])
else:
return adapter_config[filter_name] == filter_value
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_ADAPTERS
)
@utils.output_filters(name=utils.general_filter_callback)
@utils.wrap_to_dict(
RESP_FIELDS,
supported_oses=RESP_OS_FIELDS,
roles=RESP_ROLES_FIELDS,
flavors=RESP_FLAVORS_FIELDS
)
def list_adapters(user=None, session=None, **filters):
"""list adapters."""
load_adapters()
return ADAPTER_MAPPING.values()
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_ADAPTERS
)
@utils.wrap_to_dict(
RESP_FIELDS,
supported_oses=RESP_OS_FIELDS,
roles=RESP_ROLES_FIELDS,
flavors=RESP_FLAVORS_FIELDS
)
def get_adapter(adapter_id, user=None, session=None, **kwargs):
"""get adapter."""
load_adapters()
if adapter_id not in ADAPTER_MAPPING:
raise exception.RecordNotExists(
'adpater %s does not exist' % adapter_id
)
return ADAPTER_MAPPING[adapter_id]
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_METADATAS
)
@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
def list_flavors(user=None, session=None, **filters):
"""List flavors."""
load_flavors()
return FLAVOR_MAPPING.values()
@utils.supported_filters([])
@database.run_in_session()
@user_api.check_user_permission(
permission.PERMISSION_LIST_METADATAS
)
@utils.wrap_to_dict(RESP_FLAVORS_FIELDS)
def get_flavor(flavor_id, user=None, session=None, **kwargs):
"""Get flavor."""
load_flavors()
if flavor_id not in FLAVOR_MAPPING:
raise exception.RecordNotExists(
'flavor %s does not exist' % flavor_id
)
return FLAVOR_MAPPING[flavor_id] | 0.640973 | 0.105395 |
import os
import sys
import nltk
#nltk.download('punkt')
from nltk.tokenize import word_tokenize
from nltk.tokenize import RegexpTokenizer
from nltk import *
stemmer = PorterStemmer()
def __termID(a):
global termID
termID = a
def __docID(a):
global docID
docID=a
def __offset(a):
global offset
offset=a
def __termfrequency(a):
global term_frequency
term_frequency=a
def __numberofdocuments(a):
global no_of_docs
no_of_docs=a
def __distinct(a):
global distinct
distinct=a
def __positions(a):
global positions
positions=a
def initialize_parameters(a):
__offset(a)
__termfrequency(a)
__numberofdocuments(a)
__termID(a)
__docID(a)
__distinct(a)
def doc_details(docname):
initialize_parameters(0)
if docname is not None:
with open("doc_index.txt",'r+', encoding='utf-8') as docindex, open("docids.txt",'r', encoding='utf-8') as docfile:
all_doc = docfile.read().split()
for i in range(0,len(all_doc)):
if docname==all_doc[i]:
__docID(all_doc[i-1])
break
if docID==0:
print('File not in directory')
else:
while(True):
line = docindex.readline()
if line !="":
line = line.split()
if (line[0]==docID):
__distinct(distinct+1)
__termfrequency(term_frequency+len(line)-2)
else:
break
print("Listing for document:", docname)
print("Doc ID:", docID)
print("Distinct terms:",distinct)
print("Total terms:", term_frequency)
def term_details(term):
initialize_parameters(0)
if term is not None:
with open("term_info.txt", 'r', encoding='utf-8') as term_info_file, open("termids.txt",'r+', encoding='utf-8') as termfile:
print(term)
term = ''.join(e for e in term if e.isalpha()) #clean punctuation
token = stemmer.stem(term.lower())
doc = termfile.read().split()
for i in range(0, len(doc)):
if (doc[i]==token):
__termID(doc[i-1])
break
if (termID==0):
print("Word not in corpus")
else:
while(True):
doc = term_info_file.readline().split()
if len(doc) > 0:
if (termID==doc[0]):
__offset(doc[1])
__termfrequency(doc[2])
__numberofdocuments(doc[3])
break
else:
break
print("Listing for term:", token)
print("Term ID:", termID)
print("Number of documents containing term:", no_of_docs)
print("Term frequency in corpus:", term_frequency)
print("Inverted list offset:", offset)
def term_doc_details(term, docname):
initialize_parameters(0)
if term is not None and docname is not None:
with open("doc_index.txt",'r+', encoding='utf-8') as docindex, open("docids.txt",'r', encoding='utf-8') as docidfile, open("termids.txt",'r+', encoding='utf-8') as termfile:
term = ''.join(e for e in term if e.isalpha()) #clean punctuation
doc = termfile.read().split()
term = stemmer.stem(term.lower())
__termID('N/A')
__docID('N/A')
__positions(0)
for i in range(0, len(doc)):
if (doc[i]==term):
__termID(doc[i-1])
break
if termID!='N/A':
doc = docidfile.read().split()
for i in range(0, len(doc)):
if (doc[i]==docname):
__docID(doc[i-1])
break
if (docID!='N/A'):
while (True):
doc = docindex.readline().split()
if len(doc)>0:
if doc[0]==docID and doc[1]==termID:
print('docID=',docID)
print('termID=',termID)
a = doc[2:]
__positions(doc[2:])
__termfrequency(len(positions))
break
else:
__termfrequency('N/A')
__positions('N/A')
break
else:
print("Invalid token")
print('Inverted list for term: ', term)
print('In document: ', docname)
print('TERMID:', termID)
print('DOCID:', docID)
print('Term frequency in document:', term_frequency)
if type(positions)==list:
print('Positions:', (', '.join(positions)))
else:
print('Positions:', (positions))
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1]=='--term' and sys.argv[3]=='--doc':
term_doc_details(sys.argv[2], sys.argv[4])
elif sys.argv[1]=='--term':
term_details(sys.argv[2])
elif sys.argv[1]=='--doc':
doc_details(sys.argv[2])
else:
print('Usage details as follows')
print('1. For searching details of a term in a specific document\t"python read_index.py --term <term> --doc <doc>", or')
print('2. For searching details of a term\t"python read_index.py --term <term>", or')
print('3. For searching details of a doc\t"python read_index.py --doc <doc>"') | read_index.py | import os
import sys
import nltk
#nltk.download('punkt')
from nltk.tokenize import word_tokenize
from nltk.tokenize import RegexpTokenizer
from nltk import *
stemmer = PorterStemmer()
def __termID(a):
global termID
termID = a
def __docID(a):
global docID
docID=a
def __offset(a):
global offset
offset=a
def __termfrequency(a):
global term_frequency
term_frequency=a
def __numberofdocuments(a):
global no_of_docs
no_of_docs=a
def __distinct(a):
global distinct
distinct=a
def __positions(a):
global positions
positions=a
def initialize_parameters(a):
__offset(a)
__termfrequency(a)
__numberofdocuments(a)
__termID(a)
__docID(a)
__distinct(a)
def doc_details(docname):
initialize_parameters(0)
if docname is not None:
with open("doc_index.txt",'r+', encoding='utf-8') as docindex, open("docids.txt",'r', encoding='utf-8') as docfile:
all_doc = docfile.read().split()
for i in range(0,len(all_doc)):
if docname==all_doc[i]:
__docID(all_doc[i-1])
break
if docID==0:
print('File not in directory')
else:
while(True):
line = docindex.readline()
if line !="":
line = line.split()
if (line[0]==docID):
__distinct(distinct+1)
__termfrequency(term_frequency+len(line)-2)
else:
break
print("Listing for document:", docname)
print("Doc ID:", docID)
print("Distinct terms:",distinct)
print("Total terms:", term_frequency)
def term_details(term):
initialize_parameters(0)
if term is not None:
with open("term_info.txt", 'r', encoding='utf-8') as term_info_file, open("termids.txt",'r+', encoding='utf-8') as termfile:
print(term)
term = ''.join(e for e in term if e.isalpha()) #clean punctuation
token = stemmer.stem(term.lower())
doc = termfile.read().split()
for i in range(0, len(doc)):
if (doc[i]==token):
__termID(doc[i-1])
break
if (termID==0):
print("Word not in corpus")
else:
while(True):
doc = term_info_file.readline().split()
if len(doc) > 0:
if (termID==doc[0]):
__offset(doc[1])
__termfrequency(doc[2])
__numberofdocuments(doc[3])
break
else:
break
print("Listing for term:", token)
print("Term ID:", termID)
print("Number of documents containing term:", no_of_docs)
print("Term frequency in corpus:", term_frequency)
print("Inverted list offset:", offset)
def term_doc_details(term, docname):
initialize_parameters(0)
if term is not None and docname is not None:
with open("doc_index.txt",'r+', encoding='utf-8') as docindex, open("docids.txt",'r', encoding='utf-8') as docidfile, open("termids.txt",'r+', encoding='utf-8') as termfile:
term = ''.join(e for e in term if e.isalpha()) #clean punctuation
doc = termfile.read().split()
term = stemmer.stem(term.lower())
__termID('N/A')
__docID('N/A')
__positions(0)
for i in range(0, len(doc)):
if (doc[i]==term):
__termID(doc[i-1])
break
if termID!='N/A':
doc = docidfile.read().split()
for i in range(0, len(doc)):
if (doc[i]==docname):
__docID(doc[i-1])
break
if (docID!='N/A'):
while (True):
doc = docindex.readline().split()
if len(doc)>0:
if doc[0]==docID and doc[1]==termID:
print('docID=',docID)
print('termID=',termID)
a = doc[2:]
__positions(doc[2:])
__termfrequency(len(positions))
break
else:
__termfrequency('N/A')
__positions('N/A')
break
else:
print("Invalid token")
print('Inverted list for term: ', term)
print('In document: ', docname)
print('TERMID:', termID)
print('DOCID:', docID)
print('Term frequency in document:', term_frequency)
if type(positions)==list:
print('Positions:', (', '.join(positions)))
else:
print('Positions:', (positions))
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1]=='--term' and sys.argv[3]=='--doc':
term_doc_details(sys.argv[2], sys.argv[4])
elif sys.argv[1]=='--term':
term_details(sys.argv[2])
elif sys.argv[1]=='--doc':
doc_details(sys.argv[2])
else:
print('Usage details as follows')
print('1. For searching details of a term in a specific document\t"python read_index.py --term <term> --doc <doc>", or')
print('2. For searching details of a term\t"python read_index.py --term <term>", or')
print('3. For searching details of a doc\t"python read_index.py --doc <doc>"') | 0.032977 | 0.127056 |
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
class Heteroscedasticity_tests():
""" Generic class for Heteroscedasticity tests as
Park and Glejser methods.
Both methods use linear regression for functions of tested income feature and residuals as outcome
Attributes:
numpy arrays X, Y
if you use pandas dataframe df
X = df['x'].values.reshape(-1,1)
Y = df['y'].values.reshape(-1,1)
"""
def __init__(self, X, Y):
#incoming feature
self.X = X
#outcome
self.Y = Y
#residuals
self.y_new=self.find_residuals()
def fit_results(self, x, y):
'''
fit linear model
args: x, y
returns: model
'''
model=sm.regression.linear_model.OLS(x, y)
return model.fit()
def find_residuals(self):
'''
function computes linear regression residuals
args:
regression incoming features: x
regression outcome: y
returns: residuals
'''
return np.abs(self.fit_results(self.X, self.Y).resid)
def find_p_value(self, x, y):
'''
computes p value for regression between x and y
args: x and y, income and outcome for linear regression
returns: p value
'''
results = self.fit_results(x, y)
return results.rsquared, results.pvalues[0]
def plot_data(self, x, y, xlabel, ylabel, title=None):
"""Function to plot original data
Args:
x and y to plot, xlabel, ylabel, title
Produces:
plot
"""
# make the plot
plt.scatter(x, y)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.title(title)
plt.show() | heteroscedasticity/Heteroscedasticity_test_general.py | import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
class Heteroscedasticity_tests():
""" Generic class for Heteroscedasticity tests as
Park and Glejser methods.
Both methods use linear regression for functions of tested income feature and residuals as outcome
Attributes:
numpy arrays X, Y
if you use pandas dataframe df
X = df['x'].values.reshape(-1,1)
Y = df['y'].values.reshape(-1,1)
"""
def __init__(self, X, Y):
#incoming feature
self.X = X
#outcome
self.Y = Y
#residuals
self.y_new=self.find_residuals()
def fit_results(self, x, y):
'''
fit linear model
args: x, y
returns: model
'''
model=sm.regression.linear_model.OLS(x, y)
return model.fit()
def find_residuals(self):
'''
function computes linear regression residuals
args:
regression incoming features: x
regression outcome: y
returns: residuals
'''
return np.abs(self.fit_results(self.X, self.Y).resid)
def find_p_value(self, x, y):
'''
computes p value for regression between x and y
args: x and y, income and outcome for linear regression
returns: p value
'''
results = self.fit_results(x, y)
return results.rsquared, results.pvalues[0]
def plot_data(self, x, y, xlabel, ylabel, title=None):
"""Function to plot original data
Args:
x and y to plot, xlabel, ylabel, title
Produces:
plot
"""
# make the plot
plt.scatter(x, y)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.title(title)
plt.show() | 0.719285 | 0.561996 |
import os
import os.path
import pygame
import pygame.transform
import pygame.image
import pygame.font
from syntaxerrorgame.ui.button import Button
pygame.font.init()
ENEMY_FONT = pygame.font.SysFont('Times New Roman', 48)
FONT18 = pygame.font.Font(os.path.join('assets', 'fonts/VT323-Regular.ttf'), 18)
FONT32 = pygame.font.Font(os.path.join('assets', 'fonts/VT323-Regular.ttf'), 32)
FONT64 = pygame.font.Font(os.path.join('assets', 'fonts/VT323-Regular.ttf'), 64)
def show_fps(game):
"""
This method shows FPS value on window
:param: game: syntaxerrorgame/game.py/Game
"""
fps_text = FONT32.render(f'FPS: {round(game.clock.get_fps())}', True, game.data['colors']['black']).convert_alpha()
game.window.blit(fps_text, (game.window.get_width() - fps_text.get_width() - 10, 10))
def init_menu_components(game):
"""
Build menu components
:param: game: syntaxerrorgame/game.py/Game
"""
game.game_image_surface = pygame.transform.smoothscale(pygame.image.load(os.path.join('assets', 'images/game_image.png')).convert_alpha(), (450, 193))
game.button_play_surface = pygame.transform.smoothscale(pygame.image.load(os.path.join('assets', 'images/button_play.png')).convert_alpha(), (250, 100))
game.button_play = Button(
game.window,
game.button_play_surface,
[
game.window.get_width() // 2 - game.button_play_surface.get_width() // 2,
100 + game.game_image_surface.get_height() + 50,
],
[
game.button_play_surface.get_width(),
game.button_play_surface.get_height(),
],
)
game.button_exit_surface = pygame.transform.smoothscale(pygame.image.load(os.path.join('assets', 'images/button_exit.png')).convert_alpha(), (250, 100))
game.button_exit = Button(
game.window,
game.button_exit_surface,
[
game.window.get_width() // 2 - game.button_exit_surface.get_width() // 2,
100 + game.game_image_surface.get_height() + 50 +
game.button_play_surface.get_height() + 10,
],
[
game.button_exit_surface.get_width(),
game.button_exit_surface.get_height(),
],
)
game.button_again_surface = pygame.transform.smoothscale(pygame.image.load(os.path.join('assets', 'images/button_again.png')).convert_alpha(), (250, 100))
game.button_again = Button(
game.window,
game.button_again_surface,
[
game.window.get_width() // 2 - game.button_again_surface.get_width() // 2,
game.window.get_height() // 2 - game.button_again_surface.get_height() // 2,
],
[
game.button_again_surface.get_width(),
game.button_again_surface.get_height(),
],
)
game.button_main_menu_surface = pygame.transform.smoothscale(pygame.image.load(os.path.join('assets', 'images/button_main_menu.png')).convert_alpha(), (250, 100))
game.button_main_menu = Button(
game.window,
game.button_main_menu_surface,
[
game.window.get_width() // 2 - game.button_main_menu_surface.get_width() // 2,
game.window.get_height() // 2 - game.button_main_menu_surface.get_height() // 2 +
game.button_again_surface.get_height() + 10,
],
[
game.button_main_menu_surface.get_width(),
game.button_main_menu_surface.get_height(),
],
)
game.button_resume_surface = pygame.transform.smoothscale(pygame.image.load(os.path.join('assets', 'images/button_resume.png')).convert_alpha(), (250, 100))
game.button_resume = Button(
game.window,
game.button_resume_surface,
[
game.window.get_width() // 2 - game.button_resume_surface.get_width() // 2,
game.window.get_height() // 2 - game.button_resume_surface.get_height() // 2,
],
[
game.button_resume_surface.get_width(),
game.button_resume_surface.get_height(),
],
) | syntaxerrorgame/ui/__init__.py |
import os
import os.path
import pygame
import pygame.transform
import pygame.image
import pygame.font
from syntaxerrorgame.ui.button import Button
pygame.font.init()
ENEMY_FONT = pygame.font.SysFont('Times New Roman', 48)
FONT18 = pygame.font.Font(os.path.join('assets', 'fonts/VT323-Regular.ttf'), 18)
FONT32 = pygame.font.Font(os.path.join('assets', 'fonts/VT323-Regular.ttf'), 32)
FONT64 = pygame.font.Font(os.path.join('assets', 'fonts/VT323-Regular.ttf'), 64)
def show_fps(game):
"""
This method shows FPS value on window
:param: game: syntaxerrorgame/game.py/Game
"""
fps_text = FONT32.render(f'FPS: {round(game.clock.get_fps())}', True, game.data['colors']['black']).convert_alpha()
game.window.blit(fps_text, (game.window.get_width() - fps_text.get_width() - 10, 10))
def init_menu_components(game):
"""
Build menu components
:param: game: syntaxerrorgame/game.py/Game
"""
game.game_image_surface = pygame.transform.smoothscale(pygame.image.load(os.path.join('assets', 'images/game_image.png')).convert_alpha(), (450, 193))
game.button_play_surface = pygame.transform.smoothscale(pygame.image.load(os.path.join('assets', 'images/button_play.png')).convert_alpha(), (250, 100))
game.button_play = Button(
game.window,
game.button_play_surface,
[
game.window.get_width() // 2 - game.button_play_surface.get_width() // 2,
100 + game.game_image_surface.get_height() + 50,
],
[
game.button_play_surface.get_width(),
game.button_play_surface.get_height(),
],
)
game.button_exit_surface = pygame.transform.smoothscale(pygame.image.load(os.path.join('assets', 'images/button_exit.png')).convert_alpha(), (250, 100))
game.button_exit = Button(
game.window,
game.button_exit_surface,
[
game.window.get_width() // 2 - game.button_exit_surface.get_width() // 2,
100 + game.game_image_surface.get_height() + 50 +
game.button_play_surface.get_height() + 10,
],
[
game.button_exit_surface.get_width(),
game.button_exit_surface.get_height(),
],
)
game.button_again_surface = pygame.transform.smoothscale(pygame.image.load(os.path.join('assets', 'images/button_again.png')).convert_alpha(), (250, 100))
game.button_again = Button(
game.window,
game.button_again_surface,
[
game.window.get_width() // 2 - game.button_again_surface.get_width() // 2,
game.window.get_height() // 2 - game.button_again_surface.get_height() // 2,
],
[
game.button_again_surface.get_width(),
game.button_again_surface.get_height(),
],
)
game.button_main_menu_surface = pygame.transform.smoothscale(pygame.image.load(os.path.join('assets', 'images/button_main_menu.png')).convert_alpha(), (250, 100))
game.button_main_menu = Button(
game.window,
game.button_main_menu_surface,
[
game.window.get_width() // 2 - game.button_main_menu_surface.get_width() // 2,
game.window.get_height() // 2 - game.button_main_menu_surface.get_height() // 2 +
game.button_again_surface.get_height() + 10,
],
[
game.button_main_menu_surface.get_width(),
game.button_main_menu_surface.get_height(),
],
)
game.button_resume_surface = pygame.transform.smoothscale(pygame.image.load(os.path.join('assets', 'images/button_resume.png')).convert_alpha(), (250, 100))
game.button_resume = Button(
game.window,
game.button_resume_surface,
[
game.window.get_width() // 2 - game.button_resume_surface.get_width() // 2,
game.window.get_height() // 2 - game.button_resume_surface.get_height() // 2,
],
[
game.button_resume_surface.get_width(),
game.button_resume_surface.get_height(),
],
) | 0.325521 | 0.1532 |
from enum import Enum
from typing import List, Tuple, Union
from numpy import *
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from k_means import k_means, get_group_center
from ransac import *
from blend import *
from L2_Net import L2Net
import time
def show_image(image: np.ndarray) -> None:
from PIL import Image
Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)).show()
class Method(Enum):
SURF = cv2.xfeatures2d.SURF_create
SIFT = cv2.xfeatures2d.SIFT_create
ORB = cv2.ORB_create
colors = ((123, 234, 12), (23, 44, 240), (224, 120, 34), (21, 234, 190),
(80, 160, 200), (243, 12, 100), (25, 90, 12), (123, 10, 140))
class Area:
def __init__(self, *points):
self.points = list(points)
def is_inside(self, x: Union[float, Tuple[float, float]], y: float=None):
if isinstance(x, tuple):
x, y = x
raise NotImplementedError()
class Matcher:
def __init__(self, image1: np.ndarray, image2: np.ndarray, method: Enum = Method.SIFT, threshold=800, ratio=400):
"""输入两幅图像,计算其特征值
此类用于输入两幅图像,计算其特征值,输入两幅图像分别为numpy数组格式的图像,
其中的method参数要求输入SURF、SIFT或者ORB,threshold参数为特征值检测所需的阈值。
Args:
image1 (np.ndarray): 图像一
image2 (np.ndarray): 图像二
method (Enum, optional): Defaults to Method.SIFT. 特征值检测方法
ratio (int, optional): Defaults to 400. L2-Net特征向量比重
threshold (int, optional): Defaults to 800. 特征值阈值
"""
self.image1 = image1
self.image2 = image2
self.method = method
self.ratio = ratio
self.ros1 = []
# type np.ndarray
self.ros1_3 = None
self.ros2 = []
# type np.ndarray
self.ros2_3 = None
self.loc1 = []
# type list
self.loc2 = []
# type list
self.threshold = threshold
self._keypoints1 = None
# type List[cv2.KeyPoint]
self._descriptors1 = None
# type np.ndarray
self._keypoints2 = None
# type List[cv2.KeyPoint]
self._descriptors2 = None
# type np.ndarray
if self.method == Method.ORB:
# error if not set this
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
else:
self.matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=False)
# self.matcher = cv2.FlannBasedMatcher()
self.match_points = []
self.match_points1 = []
self.image_points1 = np.array([])
self.image_points2 = np.array([])
self.keypoints1_loc = None
self.keypoints2_loc = None
self.x = 0
self.y = 0
self.vggd1 = []
self.vggd2 = []
self.l2_1des = None
self.l2_2des = None
self.save1 = []
self.save2 = []
self.save3 = []
def compute_keypoint(self) -> None:
"""计算特征点
利用给出的特征值检测方法对图像进行特征值检测。
Args:
image (np.ndarray): 图像
"""
print('Compute keypoint by SIFT')
print(Method.SIFT)
feature = self.method.value(self.threshold)
self._keypoints1, self._descriptors1 = feature.detectAndCompute(
self.image1, None)
self._keypoints2, self._descriptors2 = feature.detectAndCompute(
self.image2, None)
self.keypoints1_loc = cv2.KeyPoint_convert(self._keypoints1)
self.keypoints2_loc = cv2.KeyPoint_convert(self._keypoints2)
def get_the_ros(self):
"""选取待匹配的特征块
将灰度图像选取特定区域,并转换维度为(?, 32, 32, 1)
"""
for i in range(len(self.keypoints1_loc)):
self.loc1.append(cv2.KeyPoint(x=self.keypoints1_loc[i][0], y=self.keypoints1_loc[i][1], _size=32,
_angle=-1, _response=0.018, _octave=1, _class_id=-1))
if self.keypoints1_loc[i][0] < 16:
self.x = 16
elif self.keypoints1_loc[i][0] > 240:
self.x = 240
else:
self.x = int(self.keypoints1_loc[i][0])
if self.keypoints1_loc[i][1] < 16:
self.y = 16
elif self.keypoints1_loc[i][1] > 240:
self.y = 240
else:
self.y = int(self.keypoints1_loc[i][1])
self.ros1.append(self.image1[self.x-16: self.x+16, self.y-16: self.y+16])
for i in range(len(self.keypoints2_loc)):
self.loc2.append(cv2.KeyPoint(x=self.keypoints2_loc[i][0], y=self.keypoints2_loc[i][1], _size=32,
_angle=-1, _response=0.018, _octave=1, _class_id=-1))
if self.keypoints2_loc[i][0] < 16:
self.x = 16
elif self.keypoints2_loc[i][0] > 240:
self.x = 240
else:
self.x = int(self.keypoints2_loc[i][0])
if self.keypoints2_loc[i][1] < 16:
self.y = 16
elif self.keypoints2_loc[i][1] > 240:
self.y = 240
else:
self.y = int(self.keypoints2_loc[i][1])
self.ros2.append(self.image2[self.x-16: self.x+16, self.y-16: self.y+16])
# self.ros1, self.loc1 = slice(self.image1)
# self.ros2, self.loc2 = slice(self.image2)
def compute_kepoint_by_L2_Net(self) -> None:
"""
通过VGG计算描述向量+PCA
筛选并存储描述向量和keypoint。
"""
self.get_the_ros()
self.ros1 = np.array(self.ros1)
self.ros2 = np.array(self.ros2)
print('Compute keypoint by L2-net')
l2net = L2Net("L2Net-HP+")
self.l2_1des = l2net.calc_descriptors(self.ros1)
self.l2_2des = l2net.calc_descriptors(self.ros2)
def match(self, max_match_lenth=200, threshold=0.04, show_match=False):
"""对两幅图片计算得出的特征值进行匹配,对ORB来说使用OpenCV的BFMatcher算法,而对于其他特征检测方法则使用FlannBasedMatcher算法。
max_match_lenth (int, optional): Defaults to 20. 最大匹配点数量
threshold (float, optional): Defaults to 0.04. 默认最大匹配距离差
show_match (bool, optional): Defaults to False. 是否展示匹配结果
"""
self.compute_keypoint()
self.compute_kepoint_by_L2_Net()
good = []
'''计算两张图片中的配对点,并至多取其中最优的`max_match_lenth`个'''
self.match_points = self.matcher.knnMatch(self._descriptors1, self._descriptors2, k=2)
for m, n in self.match_points:
if m.distance < 0.6 * n.distance:
good.append(m)
print(len(good))
ptsA = np.float32([self._keypoints1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
ptsB = np.float32([self._keypoints2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
ransacReprojThreshold = 4
H, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, ransacReprojThreshold)
matchesMask = status.ravel().tolist()
h, w = 256, 256
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, H)
img2 = cv2.polylines(self.image2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
draw_params = dict(matchColor=(0, 255, 0),
singlePointColor=None,
matchesMask=matchesMask,
flags=2)
img3 = cv2.drawMatches(self.image1, self._keypoints1, img2, self._keypoints2, good, None, **draw_params)
show_image(img3)
'''由最佳匹配取得匹配点对,并进行形变拼接'''
def get_weighted_points(image_points: np.ndarray):
average = np.average(image_points, axis=0)
max_index = np.argmax(np.linalg.norm((image_points - average), axis=1))
return np.append(image_points, np.array([image_points[max_index]]), axis=0)
def pca(image, rate=128):
mean_value = mean(image, axis=0)
image = image-mean_value
c = cov(image, rowvar=False)
eigvalue, eigvector = linalg.eig(mat(c))
index_vec = np.argsort(-eigvalue)
n_largest_index = index_vec[:rate]
t = eigvector[:, n_largest_index]
print(np.shape(image), np.shape(t))
new_image = np.dot(image, t)
return new_image, t, mean_value
class Stitcher:
def __init__(self, image1: np.ndarray, image2: np.ndarray, method: Enum = Method.SIFT, use_kmeans=False):
"""输入图像和匹配,对图像进行拼接
目前采用简单矩阵匹配和平均值拼合
Args:
image1 (np.ndarray): 图像一
image2 (np.ndarray): 图像二
matcher (Matcher): 匹配结果
use_kmeans (bool): 是否使用kmeans 优化点选择
"""
self.image_points1, self.image_points2 = None, None
self.image1 = image1
self.image2 = image2
self.method = method
self.use_kmeans = use_kmeans
self.matcher = Matcher(image1, image2, method=method)
self.M = np.eye(3)
self.image = None
def stich(self, show_result=True, max_match_lenth=40, show_match_point=True, use_partial=False,
use_new_match_method=False, use_gauss_blend=True):
"""对图片进行拼合
show_result (bool, optional): Defaults to True. 是否展示拼合图像
show_match_point (bool, optional): Defaults to True. 是否展示拼合点
"""
self.matcher.match(max_match_lenth=max_match_lenth,
show_match=show_match_point)
if self.use_kmeans:
self.image_points1, self.image_points2 = get_group_center(
self.matcher.image_points1, self.matcher.image_points2)
else:
self.image_points1, self.image_points2 = (
self.matcher.image_points1, self.matcher.image_points2)
if use_new_match_method:
self.M = GeneticTransform(self.image_points1, self.image_points2).run()
else:
self.M, _ = cv2.findHomography(self.image_points1, self.image_points2, method=cv2.RANSAC)
print("Good points and average distance: ", GeneticTransform.get_value(
self.image_points1, self.image_points2, self.M))
left, right, top, bottom = self.get_transformed_size()
width = int(max(right, self.image2.shape[1]) - min(left, 0))
height = int(max(bottom, self.image2.shape[0]) - min(top, 0))
if use_partial:
self.partial_transform()
# 移动矩阵
self.adjustM = np.array(
[[1, 0, max(-left, 0)], # 横向
[0, 1, max(-top, 0)], # 纵向
[0, 0, 1]
], dtype=np.float64)
self.M = np.dot(self.adjustM, self.M)
transformed_1 = cv2.warpPerspective(
self.image1, self.M, (width, height))
transformed_2 = cv2.warpPerspective(
self.image2, self.adjustM, (width, height))
self.image = self.blend(transformed_1, transformed_2, use_gauss_blend=use_gauss_blend)
if show_match_point:
for point1, point2 in zip(self.image_points1, self.image_points2):
point1 = self.get_transformed_position(tuple(point1))
point1 = tuple(map(int, point1))
point2 = self.get_transformed_position(tuple(point2), M=self.adjustM)
point2 = tuple(map(int, point2))
cv2.circle(self.image, point1, 10, (20, 20, 255), 5)
cv2.circle(self.image, point2, 8, (20, 200, 20), 5)
def blend(self, image1: np.ndarray, image2: np.ndarray, use_gauss_blend=True) -> np.ndarray:
"""对图像进行融合
Args:
image1 (np.ndarray): 图像一
image2 (np.ndarray): 图像二
use_gauss_blend
Returns:
np.ndarray: 融合结果
"""
mask = self.generate_mask(image1, image2)
print("Blending")
if use_gauss_blend:
result = gaussian_blend(image1, image2, mask, mask_blend=10)
else:
result = direct_blend(image1, image2, mask, mask_blend=0)
return result
def generate_mask(self, image1: np.ndarray, image2: np.ndarray):
"""生成供融合使用的遮罩,由变换后图像的垂直平分线来构成分界线
Args:
image1
image2
Returns:
np.ndarray: 01数组
"""
print("Generating mask")
# x, y
center1 = self.image1.shape[1] / 2, self.image1.shape[0] / 2
center1 = self.get_transformed_position(center1)
center2 = self.image2.shape[1] / 2, self.image2.shape[0] / 2
center2 = self.get_transformed_position(center2, M=self.adjustM)
x1, y1 = center1
x2, y2 = center2
def function(y, x, *z):
return (y2 - y1) * y < -(x2 - x1) * (x - (x1 + x2) / 2) + (y2 - y1) * (y1 + y2) / 2
mask = np.fromfunction(function, image1.shape)
mask = np.logical_and(mask, np.logical_not(image2)) \
+ np.logical_and(mask, image1)\
+ np.logical_and(image1, np.logical_not(image2))
return mask
def get_transformed_size(self) -> Tuple[int, int, int, int]:
"""计算形变后的边界
计算形变后的边界,从而对图片进行相应的位移,保证全部图像都出现在屏幕上。
Returns:
Tuple[int, int, int, int]: 分别为左右上下边界
"""
conner_0 = (0, 0)
conner_1 = (self.image1.shape[1], 0)
conner_2 = (self.image1.shape[1], self.image1.shape[0])
conner_3 = (0, self.image1.shape[0])
points = [conner_0, conner_1, conner_2, conner_3]
top = min(map(lambda x: self.get_transformed_position(x)[1], points))
bottom = max(
map(lambda x: self.get_transformed_position(x)[1], points))
left = min(map(lambda x: self.get_transformed_position(x)[0], points))
right = max(map(lambda x: self.get_transformed_position(x)[0], points))
return left, right, top, bottom
def get_transformed_position(self, x: Union[float, Tuple[float, float]], y: float = None, M = None) -> Tuple[float, float]:
"""求得某点在变换矩阵(self.M)下的新坐标
Args:
x (Union[float, Tuple[float, float]]): x坐标或(x,y)坐标
y (float, optional): Defaults to None. y坐标,可无
M (np.ndarray, optional): Defaults to None. 利用M进行坐标变换运算
Returns:
Tuple[float, float]: 新坐标
"""
if isinstance(x, tuple):
x, y = x
p = np.array([x, y, 1])[np.newaxis].T
if M is not None:
M = M
else:
M = self.M
pa = np.dot(M, p)
return pa[0, 0] / pa[2, 0], pa[1, 0] / pa[2, 0]
if __name__ == "__main__":
os.chdir(os.path.dirname(__file__))
start_time = time.time()
img1 = cv2.imread("./example/label.jpg", cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread("./example/4.jpg", cv2.IMREAD_GRAYSCALE)
img1 = np.expand_dims(img1, axis=2)
img2 = np.expand_dims(img2, axis=2)
stitcher = Stitcher(img1, img2, Method.SIFT, False)
stitcher.stich(max_match_lenth=50, use_partial=False, use_new_match_method=True, use_gauss_blend=False)
cv2.imwrite('./example/merge.jpg', stitcher.image)
print("Time: ", time.time() - start_time)
print("M: ", stitcher.M) | stitch.py | from enum import Enum
from typing import List, Tuple, Union
from numpy import *
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from k_means import k_means, get_group_center
from ransac import *
from blend import *
from L2_Net import L2Net
import time
def show_image(image: np.ndarray) -> None:
from PIL import Image
Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)).show()
class Method(Enum):
SURF = cv2.xfeatures2d.SURF_create
SIFT = cv2.xfeatures2d.SIFT_create
ORB = cv2.ORB_create
colors = ((123, 234, 12), (23, 44, 240), (224, 120, 34), (21, 234, 190),
(80, 160, 200), (243, 12, 100), (25, 90, 12), (123, 10, 140))
class Area:
def __init__(self, *points):
self.points = list(points)
def is_inside(self, x: Union[float, Tuple[float, float]], y: float=None):
if isinstance(x, tuple):
x, y = x
raise NotImplementedError()
class Matcher:
def __init__(self, image1: np.ndarray, image2: np.ndarray, method: Enum = Method.SIFT, threshold=800, ratio=400):
"""输入两幅图像,计算其特征值
此类用于输入两幅图像,计算其特征值,输入两幅图像分别为numpy数组格式的图像,
其中的method参数要求输入SURF、SIFT或者ORB,threshold参数为特征值检测所需的阈值。
Args:
image1 (np.ndarray): 图像一
image2 (np.ndarray): 图像二
method (Enum, optional): Defaults to Method.SIFT. 特征值检测方法
ratio (int, optional): Defaults to 400. L2-Net特征向量比重
threshold (int, optional): Defaults to 800. 特征值阈值
"""
self.image1 = image1
self.image2 = image2
self.method = method
self.ratio = ratio
self.ros1 = []
# type np.ndarray
self.ros1_3 = None
self.ros2 = []
# type np.ndarray
self.ros2_3 = None
self.loc1 = []
# type list
self.loc2 = []
# type list
self.threshold = threshold
self._keypoints1 = None
# type List[cv2.KeyPoint]
self._descriptors1 = None
# type np.ndarray
self._keypoints2 = None
# type List[cv2.KeyPoint]
self._descriptors2 = None
# type np.ndarray
if self.method == Method.ORB:
# error if not set this
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
else:
self.matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=False)
# self.matcher = cv2.FlannBasedMatcher()
self.match_points = []
self.match_points1 = []
self.image_points1 = np.array([])
self.image_points2 = np.array([])
self.keypoints1_loc = None
self.keypoints2_loc = None
self.x = 0
self.y = 0
self.vggd1 = []
self.vggd2 = []
self.l2_1des = None
self.l2_2des = None
self.save1 = []
self.save2 = []
self.save3 = []
def compute_keypoint(self) -> None:
"""计算特征点
利用给出的特征值检测方法对图像进行特征值检测。
Args:
image (np.ndarray): 图像
"""
print('Compute keypoint by SIFT')
print(Method.SIFT)
feature = self.method.value(self.threshold)
self._keypoints1, self._descriptors1 = feature.detectAndCompute(
self.image1, None)
self._keypoints2, self._descriptors2 = feature.detectAndCompute(
self.image2, None)
self.keypoints1_loc = cv2.KeyPoint_convert(self._keypoints1)
self.keypoints2_loc = cv2.KeyPoint_convert(self._keypoints2)
def get_the_ros(self):
"""选取待匹配的特征块
将灰度图像选取特定区域,并转换维度为(?, 32, 32, 1)
"""
for i in range(len(self.keypoints1_loc)):
self.loc1.append(cv2.KeyPoint(x=self.keypoints1_loc[i][0], y=self.keypoints1_loc[i][1], _size=32,
_angle=-1, _response=0.018, _octave=1, _class_id=-1))
if self.keypoints1_loc[i][0] < 16:
self.x = 16
elif self.keypoints1_loc[i][0] > 240:
self.x = 240
else:
self.x = int(self.keypoints1_loc[i][0])
if self.keypoints1_loc[i][1] < 16:
self.y = 16
elif self.keypoints1_loc[i][1] > 240:
self.y = 240
else:
self.y = int(self.keypoints1_loc[i][1])
self.ros1.append(self.image1[self.x-16: self.x+16, self.y-16: self.y+16])
for i in range(len(self.keypoints2_loc)):
self.loc2.append(cv2.KeyPoint(x=self.keypoints2_loc[i][0], y=self.keypoints2_loc[i][1], _size=32,
_angle=-1, _response=0.018, _octave=1, _class_id=-1))
if self.keypoints2_loc[i][0] < 16:
self.x = 16
elif self.keypoints2_loc[i][0] > 240:
self.x = 240
else:
self.x = int(self.keypoints2_loc[i][0])
if self.keypoints2_loc[i][1] < 16:
self.y = 16
elif self.keypoints2_loc[i][1] > 240:
self.y = 240
else:
self.y = int(self.keypoints2_loc[i][1])
self.ros2.append(self.image2[self.x-16: self.x+16, self.y-16: self.y+16])
# self.ros1, self.loc1 = slice(self.image1)
# self.ros2, self.loc2 = slice(self.image2)
def compute_kepoint_by_L2_Net(self) -> None:
"""
通过VGG计算描述向量+PCA
筛选并存储描述向量和keypoint。
"""
self.get_the_ros()
self.ros1 = np.array(self.ros1)
self.ros2 = np.array(self.ros2)
print('Compute keypoint by L2-net')
l2net = L2Net("L2Net-HP+")
self.l2_1des = l2net.calc_descriptors(self.ros1)
self.l2_2des = l2net.calc_descriptors(self.ros2)
def match(self, max_match_lenth=200, threshold=0.04, show_match=False):
"""对两幅图片计算得出的特征值进行匹配,对ORB来说使用OpenCV的BFMatcher算法,而对于其他特征检测方法则使用FlannBasedMatcher算法。
max_match_lenth (int, optional): Defaults to 20. 最大匹配点数量
threshold (float, optional): Defaults to 0.04. 默认最大匹配距离差
show_match (bool, optional): Defaults to False. 是否展示匹配结果
"""
self.compute_keypoint()
self.compute_kepoint_by_L2_Net()
good = []
'''计算两张图片中的配对点,并至多取其中最优的`max_match_lenth`个'''
self.match_points = self.matcher.knnMatch(self._descriptors1, self._descriptors2, k=2)
for m, n in self.match_points:
if m.distance < 0.6 * n.distance:
good.append(m)
print(len(good))
ptsA = np.float32([self._keypoints1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
ptsB = np.float32([self._keypoints2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
ransacReprojThreshold = 4
H, status = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, ransacReprojThreshold)
matchesMask = status.ravel().tolist()
h, w = 256, 256
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, H)
img2 = cv2.polylines(self.image2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
draw_params = dict(matchColor=(0, 255, 0),
singlePointColor=None,
matchesMask=matchesMask,
flags=2)
img3 = cv2.drawMatches(self.image1, self._keypoints1, img2, self._keypoints2, good, None, **draw_params)
show_image(img3)
'''由最佳匹配取得匹配点对,并进行形变拼接'''
def get_weighted_points(image_points: np.ndarray):
average = np.average(image_points, axis=0)
max_index = np.argmax(np.linalg.norm((image_points - average), axis=1))
return np.append(image_points, np.array([image_points[max_index]]), axis=0)
def pca(image, rate=128):
mean_value = mean(image, axis=0)
image = image-mean_value
c = cov(image, rowvar=False)
eigvalue, eigvector = linalg.eig(mat(c))
index_vec = np.argsort(-eigvalue)
n_largest_index = index_vec[:rate]
t = eigvector[:, n_largest_index]
print(np.shape(image), np.shape(t))
new_image = np.dot(image, t)
return new_image, t, mean_value
class Stitcher:
def __init__(self, image1: np.ndarray, image2: np.ndarray, method: Enum = Method.SIFT, use_kmeans=False):
"""输入图像和匹配,对图像进行拼接
目前采用简单矩阵匹配和平均值拼合
Args:
image1 (np.ndarray): 图像一
image2 (np.ndarray): 图像二
matcher (Matcher): 匹配结果
use_kmeans (bool): 是否使用kmeans 优化点选择
"""
self.image_points1, self.image_points2 = None, None
self.image1 = image1
self.image2 = image2
self.method = method
self.use_kmeans = use_kmeans
self.matcher = Matcher(image1, image2, method=method)
self.M = np.eye(3)
self.image = None
def stich(self, show_result=True, max_match_lenth=40, show_match_point=True, use_partial=False,
use_new_match_method=False, use_gauss_blend=True):
"""对图片进行拼合
show_result (bool, optional): Defaults to True. 是否展示拼合图像
show_match_point (bool, optional): Defaults to True. 是否展示拼合点
"""
self.matcher.match(max_match_lenth=max_match_lenth,
show_match=show_match_point)
if self.use_kmeans:
self.image_points1, self.image_points2 = get_group_center(
self.matcher.image_points1, self.matcher.image_points2)
else:
self.image_points1, self.image_points2 = (
self.matcher.image_points1, self.matcher.image_points2)
if use_new_match_method:
self.M = GeneticTransform(self.image_points1, self.image_points2).run()
else:
self.M, _ = cv2.findHomography(self.image_points1, self.image_points2, method=cv2.RANSAC)
print("Good points and average distance: ", GeneticTransform.get_value(
self.image_points1, self.image_points2, self.M))
left, right, top, bottom = self.get_transformed_size()
width = int(max(right, self.image2.shape[1]) - min(left, 0))
height = int(max(bottom, self.image2.shape[0]) - min(top, 0))
if use_partial:
self.partial_transform()
# 移动矩阵
self.adjustM = np.array(
[[1, 0, max(-left, 0)], # 横向
[0, 1, max(-top, 0)], # 纵向
[0, 0, 1]
], dtype=np.float64)
self.M = np.dot(self.adjustM, self.M)
transformed_1 = cv2.warpPerspective(
self.image1, self.M, (width, height))
transformed_2 = cv2.warpPerspective(
self.image2, self.adjustM, (width, height))
self.image = self.blend(transformed_1, transformed_2, use_gauss_blend=use_gauss_blend)
if show_match_point:
for point1, point2 in zip(self.image_points1, self.image_points2):
point1 = self.get_transformed_position(tuple(point1))
point1 = tuple(map(int, point1))
point2 = self.get_transformed_position(tuple(point2), M=self.adjustM)
point2 = tuple(map(int, point2))
cv2.circle(self.image, point1, 10, (20, 20, 255), 5)
cv2.circle(self.image, point2, 8, (20, 200, 20), 5)
def blend(self, image1: np.ndarray, image2: np.ndarray, use_gauss_blend=True) -> np.ndarray:
"""对图像进行融合
Args:
image1 (np.ndarray): 图像一
image2 (np.ndarray): 图像二
use_gauss_blend
Returns:
np.ndarray: 融合结果
"""
mask = self.generate_mask(image1, image2)
print("Blending")
if use_gauss_blend:
result = gaussian_blend(image1, image2, mask, mask_blend=10)
else:
result = direct_blend(image1, image2, mask, mask_blend=0)
return result
def generate_mask(self, image1: np.ndarray, image2: np.ndarray):
"""生成供融合使用的遮罩,由变换后图像的垂直平分线来构成分界线
Args:
image1
image2
Returns:
np.ndarray: 01数组
"""
print("Generating mask")
# x, y
center1 = self.image1.shape[1] / 2, self.image1.shape[0] / 2
center1 = self.get_transformed_position(center1)
center2 = self.image2.shape[1] / 2, self.image2.shape[0] / 2
center2 = self.get_transformed_position(center2, M=self.adjustM)
x1, y1 = center1
x2, y2 = center2
def function(y, x, *z):
return (y2 - y1) * y < -(x2 - x1) * (x - (x1 + x2) / 2) + (y2 - y1) * (y1 + y2) / 2
mask = np.fromfunction(function, image1.shape)
mask = np.logical_and(mask, np.logical_not(image2)) \
+ np.logical_and(mask, image1)\
+ np.logical_and(image1, np.logical_not(image2))
return mask
def get_transformed_size(self) -> Tuple[int, int, int, int]:
"""计算形变后的边界
计算形变后的边界,从而对图片进行相应的位移,保证全部图像都出现在屏幕上。
Returns:
Tuple[int, int, int, int]: 分别为左右上下边界
"""
conner_0 = (0, 0)
conner_1 = (self.image1.shape[1], 0)
conner_2 = (self.image1.shape[1], self.image1.shape[0])
conner_3 = (0, self.image1.shape[0])
points = [conner_0, conner_1, conner_2, conner_3]
top = min(map(lambda x: self.get_transformed_position(x)[1], points))
bottom = max(
map(lambda x: self.get_transformed_position(x)[1], points))
left = min(map(lambda x: self.get_transformed_position(x)[0], points))
right = max(map(lambda x: self.get_transformed_position(x)[0], points))
return left, right, top, bottom
def get_transformed_position(self, x: Union[float, Tuple[float, float]], y: float = None, M = None) -> Tuple[float, float]:
"""求得某点在变换矩阵(self.M)下的新坐标
Args:
x (Union[float, Tuple[float, float]]): x坐标或(x,y)坐标
y (float, optional): Defaults to None. y坐标,可无
M (np.ndarray, optional): Defaults to None. 利用M进行坐标变换运算
Returns:
Tuple[float, float]: 新坐标
"""
if isinstance(x, tuple):
x, y = x
p = np.array([x, y, 1])[np.newaxis].T
if M is not None:
M = M
else:
M = self.M
pa = np.dot(M, p)
return pa[0, 0] / pa[2, 0], pa[1, 0] / pa[2, 0]
if __name__ == "__main__":
os.chdir(os.path.dirname(__file__))
start_time = time.time()
img1 = cv2.imread("./example/label.jpg", cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread("./example/4.jpg", cv2.IMREAD_GRAYSCALE)
img1 = np.expand_dims(img1, axis=2)
img2 = np.expand_dims(img2, axis=2)
stitcher = Stitcher(img1, img2, Method.SIFT, False)
stitcher.stich(max_match_lenth=50, use_partial=False, use_new_match_method=True, use_gauss_blend=False)
cv2.imwrite('./example/merge.jpg', stitcher.image)
print("Time: ", time.time() - start_time)
print("M: ", stitcher.M) | 0.602997 | 0.217213 |
import logging
from jinja2 import Environment, FileSystemLoader
from ops.charm import CharmBase
from ops.framework import StoredState
from ops.main import main
from ops.model import ActiveStatus
from ops.pebble import ChangeError
from cluster import KnotCluster
logger = logging.getLogger(__name__)
class KnotOperator(CharmBase):
_stored = StoredState()
INITIAL_CONFIG_PATH = '/tmp/initial.conf'
INCLUDE_CONFIG_PATH = '/tmp/include.conf'
FUNCTIONS_PATH = '/opt/functions.sh'
CONFIG_SET_PATH = '/opt/config-set.sh'
def __init__(self, *args):
super().__init__(*args)
self.framework.observe(self.on.knot_pebble_ready, self._on_knot_pebble_ready)
self.framework.observe(self.on.config_changed, self._on_config_changed)
self.framework.observe(self.on.add_zone_action, self._on_add_zone_action)
self.framework.observe(self.on.set_zone_action, self._on_set_zone_action)
self.framework.observe(self.on.set_zone_remotes_action, self._on_set_zone_remotes_action)
self._stored.set_default(layers_added=False)
self._template_env = None
self.cluster = KnotCluster(self, 'knot-cluster')
self.framework.observe(self.cluster.on.cluster_changed, self._on_cluster_changed)
def _start_oneshot_service(self, container, service_name):
try:
container.start(service_name)
except ChangeError as e:
if not (e.change.kind == 'start' and e.change.status == 'Error'
and 'cannot start service: exited quickly with code 0' in e.err):
raise Exception('failed to start a one-shot service') from e
def _push_template(self, container, template_name, target_path, context={}):
if self._template_env is None:
self._template_env = Environment(loader=FileSystemLoader(
f'{self.charm_dir}/templates'))
container.push(
target_path,
self._template_env.get_template(template_name).render(**context),
make_dirs=True
)
def _on_knot_pebble_ready(self, event):
"""Define and start a workload using the Pebble API.
Learn more about Pebble layers at https://github.com/canonical/pebble
"""
self.framework.breakpoint('knot-pebble-ready')
# Define an initial Pebble layer configuration
pebble_layer = {
'summary': 'knot layer',
'description': 'pebble config layer for knot',
'services': {
'knot-ensure-confdb': {
'override': 'replace',
'summary': 'ensure that the Knot configuration database exists',
'command': 'bash -c "knotc --confdb /storage/confdb conf-check ||'
' (knotc --confdb /storage/confdb -f -v conf-init &&'
f' knotc conf-import {self.INITIAL_CONFIG_PATH})"',
'startup': 'disabled',
},
'knot-conf-include': {
'override': 'replace',
'summary': 'Include a set of configuration values in a config transaction',
'command': 'bash -c "knotc conf-begin &&'
f' knotc conf-set include {self.INCLUDE_CONFIG_PATH} &&'
' knotc conf-commit"',
'startup': 'disabled',
},
'knot-conf-set': {
'override': 'replace',
'summary': 'Include a set of configuration values in a config transaction',
'command': f'bash {self.CONFIG_SET_PATH}',
'startup': 'disabled',
},
'knot': {
'override': 'replace',
'summary': 'Start knotd',
'command': 'knotd --confdb /storage/confdb -v',
'startup': 'enabled',
},
},
}
container = event.workload
# Add initial Pebble config layer using the Pebble API
container.add_layer('knot', pebble_layer, combine=True)
self._stored.layers_added = True
# Push the initial config file for knot to use. This is to do a direct import
# of initial state to the configuration database. Changes to the set of addresses
# on which knot needs to listen require a restart. Starting knot without any
# of those addresses leads to an assertion failure on conf-commit setting
# those addresses.
# https://github.com/CZ-NIC/knot/blob/v3.0.6/src/knot/server/server.c#L817
self._push_template(container, 'initial-config.conf.j2', self.INITIAL_CONFIG_PATH)
# Push the script with auxiliary functions to the knot container.
self._push_template(container, 'functions.sh.j2', self.FUNCTIONS_PATH)
self._start_oneshot_service(container, 'knot-ensure-confdb')
if not container.get_service('knot').is_running():
logger.info('Autostarting knot')
container.autostart()
self._apply_config_change(container)
self.unit.status = ActiveStatus('Knot DNS server is ready')
def _apply_config_change(self, container):
context = {'remote_servers': self.model.config['remote-servers'].split()}
self._push_template(container, 'config-changed.sh.j2', self.CONFIG_SET_PATH, context)
self._start_oneshot_service(container, 'knot-conf-set')
def _on_config_changed(self, event):
if not self._stored.layers_added:
return
container = self.unit.get_container("knot")
services = container.get_plan().to_dict().get("services", {})
if 'knot' not in services or not container.get_service('knot').is_running():
event.defer()
self._apply_config_change(container)
def _on_cluster_changed(self, event):
if not self._stored.layers_added:
return
zone_records = self.cluster.zone_records()
for zone, zone_records in zone_records.items():
self._set_zone(zone, zone_records)
zone_remotes = self.cluster.zone_remotes()
for zone, remote_servers in zone_remotes.items():
self._set_zone_remotes(zone, remote_servers)
def _on_add_zone_action(self, event):
if not self.model.unit.is_leader():
logger.warning('Did not execute add-zone as this is not a leader unit.')
event.fail('Cannot run this action on a non-leader unit.')
return
zone = event.params['zone']
self.cluster.update_zones(zone, [])
def _on_set_zone_action(self, event):
if not self.model.unit.is_leader():
logger.warning('Did not execute set-zone as this is not a leader unit.')
event.fail('Cannot run this action on a non-leader unit.')
return
params = event.params
zone = params['zone']
rr = {
'owner': params['owner'],
'ttl': params['ttl'],
'type': params['type'],
'rdata': params['rdata'],
}
self.cluster.update_zones(zone, [rr])
def _set_zone(self, zone, resource_records=[]):
"""
:param str zone: The zone to add or modify.
:param resource_records: The list of RRs to set (may be empty).
:type resource_records: list[dict()]
"""
self.framework.breakpoint()
container = self.unit.get_container("knot")
self._push_template(container, 'set-zone.sh.j2', self.CONFIG_SET_PATH,
{'zone': zone, 'resource_records': resource_records})
self._start_oneshot_service(container, 'knot-conf-set')
def _on_set_zone_remotes_action(self, event):
if not self.model.unit.is_leader():
logger.warning('Did not execute set-zone-remotes as this is not a leader unit.')
event.fail('Cannot run this action on a non-leader unit.')
return
if not self.cluster.is_established:
logger.warning('Did not execute set-zone-remotes the peer relation'
' has not been created yet.')
event.fail('Cannot run this action before the cluster relation is created.')
return
zone = event.params['zone']
remote_servers = event.params['remote-servers']
# Notify other units that they should set remote servers for a zone.
self.cluster.update_zone_remotes(zone, remote_servers)
# Set zone remotes for the leader unit itself.
self._set_zone_remotes(zone, remote_servers)
def _set_zone_remotes(self, zone, remote_servers):
container = self.unit.get_container("knot")
remote_name = f'{zone.lower()}_remote'
self._push_template(container, 'enable-zone-proxy.sh.j2', self.CONFIG_SET_PATH,
{'remote_servers': remote_servers, 'zone': zone,
'remote_name': remote_name})
self._start_oneshot_service(container, 'knot-conf-set')
if __name__ == '__main__':
main(KnotOperator) | src/charm.py |
import logging
from jinja2 import Environment, FileSystemLoader
from ops.charm import CharmBase
from ops.framework import StoredState
from ops.main import main
from ops.model import ActiveStatus
from ops.pebble import ChangeError
from cluster import KnotCluster
logger = logging.getLogger(__name__)
class KnotOperator(CharmBase):
_stored = StoredState()
INITIAL_CONFIG_PATH = '/tmp/initial.conf'
INCLUDE_CONFIG_PATH = '/tmp/include.conf'
FUNCTIONS_PATH = '/opt/functions.sh'
CONFIG_SET_PATH = '/opt/config-set.sh'
def __init__(self, *args):
super().__init__(*args)
self.framework.observe(self.on.knot_pebble_ready, self._on_knot_pebble_ready)
self.framework.observe(self.on.config_changed, self._on_config_changed)
self.framework.observe(self.on.add_zone_action, self._on_add_zone_action)
self.framework.observe(self.on.set_zone_action, self._on_set_zone_action)
self.framework.observe(self.on.set_zone_remotes_action, self._on_set_zone_remotes_action)
self._stored.set_default(layers_added=False)
self._template_env = None
self.cluster = KnotCluster(self, 'knot-cluster')
self.framework.observe(self.cluster.on.cluster_changed, self._on_cluster_changed)
def _start_oneshot_service(self, container, service_name):
try:
container.start(service_name)
except ChangeError as e:
if not (e.change.kind == 'start' and e.change.status == 'Error'
and 'cannot start service: exited quickly with code 0' in e.err):
raise Exception('failed to start a one-shot service') from e
def _push_template(self, container, template_name, target_path, context={}):
if self._template_env is None:
self._template_env = Environment(loader=FileSystemLoader(
f'{self.charm_dir}/templates'))
container.push(
target_path,
self._template_env.get_template(template_name).render(**context),
make_dirs=True
)
def _on_knot_pebble_ready(self, event):
"""Define and start a workload using the Pebble API.
Learn more about Pebble layers at https://github.com/canonical/pebble
"""
self.framework.breakpoint('knot-pebble-ready')
# Define an initial Pebble layer configuration
pebble_layer = {
'summary': 'knot layer',
'description': 'pebble config layer for knot',
'services': {
'knot-ensure-confdb': {
'override': 'replace',
'summary': 'ensure that the Knot configuration database exists',
'command': 'bash -c "knotc --confdb /storage/confdb conf-check ||'
' (knotc --confdb /storage/confdb -f -v conf-init &&'
f' knotc conf-import {self.INITIAL_CONFIG_PATH})"',
'startup': 'disabled',
},
'knot-conf-include': {
'override': 'replace',
'summary': 'Include a set of configuration values in a config transaction',
'command': 'bash -c "knotc conf-begin &&'
f' knotc conf-set include {self.INCLUDE_CONFIG_PATH} &&'
' knotc conf-commit"',
'startup': 'disabled',
},
'knot-conf-set': {
'override': 'replace',
'summary': 'Include a set of configuration values in a config transaction',
'command': f'bash {self.CONFIG_SET_PATH}',
'startup': 'disabled',
},
'knot': {
'override': 'replace',
'summary': 'Start knotd',
'command': 'knotd --confdb /storage/confdb -v',
'startup': 'enabled',
},
},
}
container = event.workload
# Add initial Pebble config layer using the Pebble API
container.add_layer('knot', pebble_layer, combine=True)
self._stored.layers_added = True
# Push the initial config file for knot to use. This is to do a direct import
# of initial state to the configuration database. Changes to the set of addresses
# on which knot needs to listen require a restart. Starting knot without any
# of those addresses leads to an assertion failure on conf-commit setting
# those addresses.
# https://github.com/CZ-NIC/knot/blob/v3.0.6/src/knot/server/server.c#L817
self._push_template(container, 'initial-config.conf.j2', self.INITIAL_CONFIG_PATH)
# Push the script with auxiliary functions to the knot container.
self._push_template(container, 'functions.sh.j2', self.FUNCTIONS_PATH)
self._start_oneshot_service(container, 'knot-ensure-confdb')
if not container.get_service('knot').is_running():
logger.info('Autostarting knot')
container.autostart()
self._apply_config_change(container)
self.unit.status = ActiveStatus('Knot DNS server is ready')
def _apply_config_change(self, container):
context = {'remote_servers': self.model.config['remote-servers'].split()}
self._push_template(container, 'config-changed.sh.j2', self.CONFIG_SET_PATH, context)
self._start_oneshot_service(container, 'knot-conf-set')
def _on_config_changed(self, event):
if not self._stored.layers_added:
return
container = self.unit.get_container("knot")
services = container.get_plan().to_dict().get("services", {})
if 'knot' not in services or not container.get_service('knot').is_running():
event.defer()
self._apply_config_change(container)
def _on_cluster_changed(self, event):
if not self._stored.layers_added:
return
zone_records = self.cluster.zone_records()
for zone, zone_records in zone_records.items():
self._set_zone(zone, zone_records)
zone_remotes = self.cluster.zone_remotes()
for zone, remote_servers in zone_remotes.items():
self._set_zone_remotes(zone, remote_servers)
def _on_add_zone_action(self, event):
if not self.model.unit.is_leader():
logger.warning('Did not execute add-zone as this is not a leader unit.')
event.fail('Cannot run this action on a non-leader unit.')
return
zone = event.params['zone']
self.cluster.update_zones(zone, [])
def _on_set_zone_action(self, event):
if not self.model.unit.is_leader():
logger.warning('Did not execute set-zone as this is not a leader unit.')
event.fail('Cannot run this action on a non-leader unit.')
return
params = event.params
zone = params['zone']
rr = {
'owner': params['owner'],
'ttl': params['ttl'],
'type': params['type'],
'rdata': params['rdata'],
}
self.cluster.update_zones(zone, [rr])
def _set_zone(self, zone, resource_records=[]):
"""
:param str zone: The zone to add or modify.
:param resource_records: The list of RRs to set (may be empty).
:type resource_records: list[dict()]
"""
self.framework.breakpoint()
container = self.unit.get_container("knot")
self._push_template(container, 'set-zone.sh.j2', self.CONFIG_SET_PATH,
{'zone': zone, 'resource_records': resource_records})
self._start_oneshot_service(container, 'knot-conf-set')
def _on_set_zone_remotes_action(self, event):
if not self.model.unit.is_leader():
logger.warning('Did not execute set-zone-remotes as this is not a leader unit.')
event.fail('Cannot run this action on a non-leader unit.')
return
if not self.cluster.is_established:
logger.warning('Did not execute set-zone-remotes the peer relation'
' has not been created yet.')
event.fail('Cannot run this action before the cluster relation is created.')
return
zone = event.params['zone']
remote_servers = event.params['remote-servers']
# Notify other units that they should set remote servers for a zone.
self.cluster.update_zone_remotes(zone, remote_servers)
# Set zone remotes for the leader unit itself.
self._set_zone_remotes(zone, remote_servers)
def _set_zone_remotes(self, zone, remote_servers):
container = self.unit.get_container("knot")
remote_name = f'{zone.lower()}_remote'
self._push_template(container, 'enable-zone-proxy.sh.j2', self.CONFIG_SET_PATH,
{'remote_servers': remote_servers, 'zone': zone,
'remote_name': remote_name})
self._start_oneshot_service(container, 'knot-conf-set')
if __name__ == '__main__':
main(KnotOperator) | 0.465387 | 0.078784 |
import unittest
from pprint import pprint
import pandas as pd
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from testing.models import param_sweep
from cadCAD import configs
from testing.generic_test import make_generic_test
from testing.models.param_sweep import some_function, g as sweep_params
exec_mode = ExecutionMode()
exec_ctx = ExecutionContext(context=exec_mode.local_mode)
run = Executor(exec_context=exec_ctx, configs=configs)
# sim, run, substep, timestep
def get_expected_results(subset, run, beta, gamma):
return {
(subset, run, 0, 0): {'policies': {}, 'sweeped': {}, 'alpha': 0, 'beta': 0},
(subset, run, 1, 1): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 1, 2): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 1, 3): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 2, 1): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 2, 2): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 2, 3): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 3, 1): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 3, 2): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 3, 3): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 4, 1): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 4, 2): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 4, 3): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 5, 1): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': beta, 'alpha': 1, 'beta': beta},
(subset, run, 5, 2): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': beta, 'alpha': 1, 'beta': beta},
(subset, run, 5, 3): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': beta, 'alpha': 1, 'beta': beta}
}
def generate_expected(sweep_params):
def template(sweep_params):
subset_count = max(len(x) for x in list(sweep_params.values()))
expected_results, expected_results_1, expected_results_2 = {}, {}, {}
for subset in range(subset_count):
expected_results_1a = get_expected_results(subset, 1, 2, 3)
expected_results_1b = get_expected_results(subset, 2, 2, 3)
expected_results_1.update(expected_results_1a)
expected_results_1.update(expected_results_1b)
expected_results_2 = {}
expected_results_2a = get_expected_results(subset, 1, some_function, 4)
expected_results_2b = get_expected_results(subset, 2, some_function, 4)
expected_results_2.update(expected_results_2a)
expected_results_2.update(expected_results_2b)
expected_results.update(expected_results_1)
expected_results.update(expected_results_2)
yield expected_results
merged_expected = list(template(sweep_params))
result = {}
for d in merged_expected:
result.update(d)
return result
def row(a, b):
return a == b
def create_test_params(feature, fields):
raw_result, tensor_fields, sessions = run.execute()
df = pd.DataFrame(raw_result)
expected = generate_expected(sweep_params)
return [[feature, df, expected, fields, [row]]]
params = list(create_test_params("param_sweep", ['alpha', 'beta', 'policies', 'sweeped']))
class GenericTest(make_generic_test(params)):
pass
if __name__ == '__main__':
unittest.main() | testing/tests/param_sweep.py | import unittest
from pprint import pprint
import pandas as pd
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from testing.models import param_sweep
from cadCAD import configs
from testing.generic_test import make_generic_test
from testing.models.param_sweep import some_function, g as sweep_params
exec_mode = ExecutionMode()
exec_ctx = ExecutionContext(context=exec_mode.local_mode)
run = Executor(exec_context=exec_ctx, configs=configs)
# sim, run, substep, timestep
def get_expected_results(subset, run, beta, gamma):
return {
(subset, run, 0, 0): {'policies': {}, 'sweeped': {}, 'alpha': 0, 'beta': 0},
(subset, run, 1, 1): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 1, 2): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 1, 3): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 2, 1): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 2, 2): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 2, 3): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 3, 1): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 3, 2): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 3, 3): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 4, 1): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 4, 2): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 4, 3): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
(subset, run, 5, 1): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': beta, 'alpha': 1, 'beta': beta},
(subset, run, 5, 2): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': beta, 'alpha': 1, 'beta': beta},
(subset, run, 5, 3): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': beta, 'alpha': 1, 'beta': beta}
}
def generate_expected(sweep_params):
def template(sweep_params):
subset_count = max(len(x) for x in list(sweep_params.values()))
expected_results, expected_results_1, expected_results_2 = {}, {}, {}
for subset in range(subset_count):
expected_results_1a = get_expected_results(subset, 1, 2, 3)
expected_results_1b = get_expected_results(subset, 2, 2, 3)
expected_results_1.update(expected_results_1a)
expected_results_1.update(expected_results_1b)
expected_results_2 = {}
expected_results_2a = get_expected_results(subset, 1, some_function, 4)
expected_results_2b = get_expected_results(subset, 2, some_function, 4)
expected_results_2.update(expected_results_2a)
expected_results_2.update(expected_results_2b)
expected_results.update(expected_results_1)
expected_results.update(expected_results_2)
yield expected_results
merged_expected = list(template(sweep_params))
result = {}
for d in merged_expected:
result.update(d)
return result
def row(a, b):
return a == b
def create_test_params(feature, fields):
raw_result, tensor_fields, sessions = run.execute()
df = pd.DataFrame(raw_result)
expected = generate_expected(sweep_params)
return [[feature, df, expected, fields, [row]]]
params = list(create_test_params("param_sweep", ['alpha', 'beta', 'policies', 'sweeped']))
class GenericTest(make_generic_test(params)):
pass
if __name__ == '__main__':
unittest.main() | 0.580352 | 0.641001 |
from hvad.utils import get_translation
class NULL:pass
class BaseDescriptor(object):
"""
Base descriptor class with a helper to get the translations instance.
"""
def __init__(self, opts):
self.opts = opts
def translation(self, instance):
cached = getattr(instance, self.opts.translations_cache, None)
if not cached:
cached = get_translation(instance)
setattr(instance, self.opts.translations_cache, cached)
return cached
class TranslatedAttribute(BaseDescriptor):
"""
Basic translated attribute descriptor.
Proxies attributes from the shared instance to the translated instance.
"""
def __init__(self, opts, name):
self.name = name
super(TranslatedAttribute, self).__init__(opts)
def __get__(self, instance, instance_type=None):
if not instance:
# Don't raise an attribute error so we can use it in admin.
return self.opts.translations_model._meta.get_field_by_name(
self.name)[0].default
return getattr(self.translation(instance), self.name)
def __set__(self, instance, value):
setattr(self.translation(instance), self.name, value)
def __delete__(self, instance):
delattr(self.translation(instance), self.name)
class LanguageCodeAttribute(TranslatedAttribute):
"""
The language_code attribute is different from other attribtues as it cannot
be deleted. Trying to do so will always cause an attribute error.
"""
def __init__(self, opts):
super(LanguageCodeAttribute, self).__init__(opts, 'language_code')
def __set__(self, instance, value):
raise AttributeError("The 'language_code' attribute cannot be " +\
"changed directly! Use the translate() method instead.")
def __delete__(self, instance):
raise AttributeError("The 'language_code' attribute cannot be deleted!") | hvad/descriptors.py | from hvad.utils import get_translation
class NULL:pass
class BaseDescriptor(object):
"""
Base descriptor class with a helper to get the translations instance.
"""
def __init__(self, opts):
self.opts = opts
def translation(self, instance):
cached = getattr(instance, self.opts.translations_cache, None)
if not cached:
cached = get_translation(instance)
setattr(instance, self.opts.translations_cache, cached)
return cached
class TranslatedAttribute(BaseDescriptor):
"""
Basic translated attribute descriptor.
Proxies attributes from the shared instance to the translated instance.
"""
def __init__(self, opts, name):
self.name = name
super(TranslatedAttribute, self).__init__(opts)
def __get__(self, instance, instance_type=None):
if not instance:
# Don't raise an attribute error so we can use it in admin.
return self.opts.translations_model._meta.get_field_by_name(
self.name)[0].default
return getattr(self.translation(instance), self.name)
def __set__(self, instance, value):
setattr(self.translation(instance), self.name, value)
def __delete__(self, instance):
delattr(self.translation(instance), self.name)
class LanguageCodeAttribute(TranslatedAttribute):
"""
The language_code attribute is different from other attribtues as it cannot
be deleted. Trying to do so will always cause an attribute error.
"""
def __init__(self, opts):
super(LanguageCodeAttribute, self).__init__(opts, 'language_code')
def __set__(self, instance, value):
raise AttributeError("The 'language_code' attribute cannot be " +\
"changed directly! Use the translate() method instead.")
def __delete__(self, instance):
raise AttributeError("The 'language_code' attribute cannot be deleted!") | 0.81457 | 0.080213 |
from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase, APIClient
from .fixture import RegiaoFactory
User = get_user_model()
class RegiaoViewSetTests(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
username='bruce', email='<EMAIL>', password='<PASSWORD>'
)
self.anon_user = User.objects.create_user(
username='jane', email='<EMAIL>', password='<PASSWORD>'
)
self.unath_client = APIClient()
self.client = APIClient()
token, _ = Token.objects.get_or_create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}')
def test_perform_create(self):
data = {
'nome': 'Centro Oeste',
'estado': 'Comics',
}
response = self.unath_client.post(reverse('regiao-list'), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.post(reverse('regiao-list'), data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['nome'], data['nome'])
self.assertEqual(response.data['estado'], data['estado'])
self.assertEqual(response.data['slug'], 'centro-oeste-comics')
def test_list(self):
RegiaoFactory.create_batch(5)
response = self.unath_client.get(reverse('regiao-list'))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('regiao-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(len(response.data), 5)
def test_retrieve(self):
regiao = RegiaoFactory.create(id=10)
response = self.unath_client.get(reverse('regiao-detail', args=[10]))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('regiao-detail', args=[10]))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['nome'], regiao.nome)
def test_update(self):
regiao = RegiaoFactory.create(id=21)
data = {'nome': 'Sudeste', 'estado': 'Cave'}
self.assertNotEqual(regiao.nome, data['nome'])
response = self.unath_client.put(reverse('regiao-detail', args=[21]), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.put(reverse('regiao-detail', args=[21]), data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['nome'], data['nome'])
self.assertEqual(response.data['estado'], data['estado'])
self.assertEqual(response.data['slug'], 'sudeste-cave')
def test_partial_update(self):
regiao = RegiaoFactory.create(id=22)
data = {'nome': 'Nordeste'}
self.assertNotEqual(regiao.nome, data['nome'])
response = self.unath_client.patch(reverse('regiao-detail', args=[22]), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.patch(reverse('regiao-detail', args=[22]), data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['nome'], data['nome'])
def test_destroy(self):
RegiaoFactory.create(id=15)
response = self.unath_client.get(reverse('regiao-detail', args=[15]))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('regiao-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(len(response.data), 1)
response = self.client.delete(reverse('regiao-detail', args=[15]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get(reverse('regiao-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 0) | localizacao/tests/test_viewsets_regiao.py | from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase, APIClient
from .fixture import RegiaoFactory
User = get_user_model()
class RegiaoViewSetTests(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
username='bruce', email='<EMAIL>', password='<PASSWORD>'
)
self.anon_user = User.objects.create_user(
username='jane', email='<EMAIL>', password='<PASSWORD>'
)
self.unath_client = APIClient()
self.client = APIClient()
token, _ = Token.objects.get_or_create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}')
def test_perform_create(self):
data = {
'nome': 'Centro Oeste',
'estado': 'Comics',
}
response = self.unath_client.post(reverse('regiao-list'), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.post(reverse('regiao-list'), data=data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['nome'], data['nome'])
self.assertEqual(response.data['estado'], data['estado'])
self.assertEqual(response.data['slug'], 'centro-oeste-comics')
def test_list(self):
RegiaoFactory.create_batch(5)
response = self.unath_client.get(reverse('regiao-list'))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('regiao-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(len(response.data), 5)
def test_retrieve(self):
regiao = RegiaoFactory.create(id=10)
response = self.unath_client.get(reverse('regiao-detail', args=[10]))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('regiao-detail', args=[10]))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['nome'], regiao.nome)
def test_update(self):
regiao = RegiaoFactory.create(id=21)
data = {'nome': 'Sudeste', 'estado': 'Cave'}
self.assertNotEqual(regiao.nome, data['nome'])
response = self.unath_client.put(reverse('regiao-detail', args=[21]), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.put(reverse('regiao-detail', args=[21]), data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['nome'], data['nome'])
self.assertEqual(response.data['estado'], data['estado'])
self.assertEqual(response.data['slug'], 'sudeste-cave')
def test_partial_update(self):
regiao = RegiaoFactory.create(id=22)
data = {'nome': 'Nordeste'}
self.assertNotEqual(regiao.nome, data['nome'])
response = self.unath_client.patch(reverse('regiao-detail', args=[22]), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.patch(reverse('regiao-detail', args=[22]), data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['nome'], data['nome'])
def test_destroy(self):
RegiaoFactory.create(id=15)
response = self.unath_client.get(reverse('regiao-detail', args=[15]))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('regiao-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(len(response.data), 1)
response = self.client.delete(reverse('regiao-detail', args=[15]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get(reverse('regiao-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 0) | 0.46563 | 0.250907 |
from typing import Any
ADC1 = 1073815552 # type: int
ADC123_COMMON = 1073816320 # type: int
ADC2 = 1073815808 # type: int
ADC3 = 1073816064 # type: int
ADC_CR1 = 4 # type: int
ADC_CR2 = 8 # type: int
ADC_DR = 76 # type: int
ADC_HTR = 36 # type: int
ADC_JDR1 = 60 # type: int
ADC_JDR2 = 64 # type: int
ADC_JDR3 = 68 # type: int
ADC_JDR4 = 72 # type: int
ADC_JOFR1 = 20 # type: int
ADC_JOFR2 = 24 # type: int
ADC_JOFR3 = 28 # type: int
ADC_JOFR4 = 32 # type: int
ADC_JSQR = 56 # type: int
ADC_LTR = 40 # type: int
ADC_SMPR1 = 12 # type: int
ADC_SMPR2 = 16 # type: int
ADC_SQR1 = 44 # type: int
ADC_SQR2 = 48 # type: int
ADC_SQR3 = 52 # type: int
ADC_SR = 0 # type: int
CAN1 = 1073767424 # type: int
CAN2 = 1073768448 # type: int
CRC = 1073885184 # type: int
CRC_CR = 8 # type: int
CRC_DR = 0 # type: int
CRC_IDR = 4 # type: int
DAC = 1073771520 # type: int
DAC1 = 1073771520 # type: int
DAC_CR = 0 # type: int
DAC_DHR12L1 = 12 # type: int
DAC_DHR12L2 = 24 # type: int
DAC_DHR12LD = 36 # type: int
DAC_DHR12R1 = 8 # type: int
DAC_DHR12R2 = 20 # type: int
DAC_DHR12RD = 32 # type: int
DAC_DHR8R1 = 16 # type: int
DAC_DHR8R2 = 28 # type: int
DAC_DHR8RD = 40 # type: int
DAC_DOR1 = 44 # type: int
DAC_DOR2 = 48 # type: int
DAC_SR = 52 # type: int
DAC_SWTRIGR = 4 # type: int
DBGMCU = 3758366720 # type: int
DBGMCU_APB1FZ = 8 # type: int
DBGMCU_APB2FZ = 12 # type: int
DBGMCU_CR = 4 # type: int
DBGMCU_IDCODE = 0 # type: int
DMA1 = 1073897472 # type: int
DMA2 = 1073898496 # type: int
DMA_HIFCR = 12 # type: int
DMA_HISR = 4 # type: int
DMA_LIFCR = 8 # type: int
DMA_LISR = 0 # type: int
EXTI = 1073822720 # type: int
EXTI_EMR = 4 # type: int
EXTI_FTSR = 12 # type: int
EXTI_IMR = 0 # type: int
EXTI_PR = 20 # type: int
EXTI_RTSR = 8 # type: int
EXTI_SWIER = 16 # type: int
FLASH = 1073888256 # type: int
FLASH_ACR = 0 # type: int
FLASH_CR = 16 # type: int
FLASH_KEYR = 4 # type: int
FLASH_OPTCR = 20 # type: int
FLASH_OPTCR1 = 24 # type: int
FLASH_OPTKEYR = 8 # type: int
FLASH_SR = 12 # type: int
GPIOA = 1073872896 # type: int
GPIOB = 1073873920 # type: int
GPIOC = 1073874944 # type: int
GPIOD = 1073875968 # type: int
GPIOE = 1073876992 # type: int
GPIOF = 1073878016 # type: int
GPIOG = 1073879040 # type: int
GPIOH = 1073880064 # type: int
GPIOI = 1073881088 # type: int
GPIO_AFR0 = 32 # type: int
GPIO_AFR1 = 36 # type: int
GPIO_BSRR = 24 # type: int
GPIO_BSRRH = 26 # type: int
GPIO_BSRRL = 24 # type: int
GPIO_IDR = 16 # type: int
GPIO_LCKR = 28 # type: int
GPIO_MODER = 0 # type: int
GPIO_ODR = 20 # type: int
GPIO_OSPEEDR = 8 # type: int
GPIO_OTYPER = 4 # type: int
GPIO_PUPDR = 12 # type: int
I2C1 = 1073763328 # type: int
I2C2 = 1073764352 # type: int
I2C3 = 1073765376 # type: int
I2C_CCR = 28 # type: int
I2C_CR1 = 0 # type: int
I2C_CR2 = 4 # type: int
I2C_DR = 16 # type: int
I2C_OAR1 = 8 # type: int
I2C_OAR2 = 12 # type: int
I2C_SR1 = 20 # type: int
I2C_SR2 = 24 # type: int
I2C_TRISE = 32 # type: int
I2S2EXT = 1073755136 # type: int
I2S3EXT = 1073758208 # type: int
IWDG = 1073754112 # type: int
IWDG_KR = 0 # type: int
IWDG_PR = 4 # type: int
IWDG_RLR = 8 # type: int
IWDG_SR = 12 # type: int
PWR = 1073770496 # type: int
PWR_CR = 0 # type: int
PWR_CSR = 4 # type: int
RCC = 1073887232 # type: int
RCC_AHB1ENR = 48 # type: int
RCC_AHB1LPENR = 80 # type: int
RCC_AHB1RSTR = 16 # type: int
RCC_AHB2ENR = 52 # type: int
RCC_AHB2LPENR = 84 # type: int
RCC_AHB2RSTR = 20 # type: int
RCC_AHB3ENR = 56 # type: int
RCC_AHB3LPENR = 88 # type: int
RCC_AHB3RSTR = 24 # type: int
RCC_APB1ENR = 64 # type: int
RCC_APB1LPENR = 96 # type: int
RCC_APB1RSTR = 32 # type: int
RCC_APB2ENR = 68 # type: int
RCC_APB2LPENR = 100 # type: int
RCC_APB2RSTR = 36 # type: int
RCC_BDCR = 112 # type: int
RCC_CFGR = 8 # type: int
RCC_CIR = 12 # type: int
RCC_CR = 0 # type: int
RCC_CSR = 116 # type: int
RCC_PLLCFGR = 4 # type: int
RCC_PLLI2SCFGR = 132 # type: int
RCC_SSCGR = 128 # type: int
RNG = 1342572544 # type: int
RNG_CR = 0 # type: int
RNG_DR = 8 # type: int
RNG_SR = 4 # type: int
RTC = 1073752064 # type: int
RTC_ALRMAR = 28 # type: int
RTC_ALRMASSR = 68 # type: int
RTC_ALRMBR = 32 # type: int
RTC_ALRMBSSR = 72 # type: int
RTC_BKP0R = 80 # type: int
RTC_BKP10R = 120 # type: int
RTC_BKP11R = 124 # type: int
RTC_BKP12R = 128 # type: int
RTC_BKP13R = 132 # type: int
RTC_BKP14R = 136 # type: int
RTC_BKP15R = 140 # type: int
RTC_BKP16R = 144 # type: int
RTC_BKP17R = 148 # type: int
RTC_BKP18R = 152 # type: int
RTC_BKP19R = 156 # type: int
RTC_BKP1R = 84 # type: int
RTC_BKP2R = 88 # type: int
RTC_BKP3R = 92 # type: int
RTC_BKP4R = 96 # type: int
RTC_BKP5R = 100 # type: int
RTC_BKP6R = 104 # type: int
RTC_BKP7R = 108 # type: int
RTC_BKP8R = 112 # type: int
RTC_BKP9R = 116 # type: int
RTC_CALIBR = 24 # type: int
RTC_CALR = 60 # type: int
RTC_CR = 8 # type: int
RTC_DR = 4 # type: int
RTC_ISR = 12 # type: int
RTC_PRER = 16 # type: int
RTC_SHIFTR = 44 # type: int
RTC_SSR = 40 # type: int
RTC_TAFCR = 64 # type: int
RTC_TR = 0 # type: int
RTC_TSDR = 52 # type: int
RTC_TSSSR = 56 # type: int
RTC_TSTR = 48 # type: int
RTC_WPR = 36 # type: int
RTC_WUTR = 20 # type: int
SDIO = 1073818624 # type: int
SPI1 = 1073819648 # type: int
SPI2 = 1073756160 # type: int
SPI3 = 1073757184 # type: int
SPI_CR1 = 0 # type: int
SPI_CR2 = 4 # type: int
SPI_CRCPR = 16 # type: int
SPI_DR = 12 # type: int
SPI_I2SCFGR = 28 # type: int
SPI_I2SPR = 32 # type: int
SPI_RXCRCR = 20 # type: int
SPI_SR = 8 # type: int
SPI_TXCRCR = 24 # type: int
SYSCFG = 1073821696 # type: int
SYSCFG_CMPCR = 32 # type: int
SYSCFG_EXTICR0 = 8 # type: int
SYSCFG_EXTICR1 = 12 # type: int
SYSCFG_EXTICR2 = 16 # type: int
SYSCFG_EXTICR3 = 20 # type: int
SYSCFG_MEMRMP = 0 # type: int
SYSCFG_PMC = 4 # type: int
TIM1 = 1073807360 # type: int
TIM10 = 1073824768 # type: int
TIM11 = 1073825792 # type: int
TIM12 = 1073747968 # type: int
TIM13 = 1073748992 # type: int
TIM14 = 1073750016 # type: int
TIM2 = 1073741824 # type: int
TIM3 = 1073742848 # type: int
TIM4 = 1073743872 # type: int
TIM5 = 1073744896 # type: int
TIM6 = 1073745920 # type: int
TIM7 = 1073746944 # type: int
TIM8 = 1073808384 # type: int
TIM9 = 1073823744 # type: int
TIM_ARR = 44 # type: int
TIM_BDTR = 68 # type: int
TIM_CCER = 32 # type: int
TIM_CCMR1 = 24 # type: int
TIM_CCMR2 = 28 # type: int
TIM_CCR1 = 52 # type: int
TIM_CCR2 = 56 # type: int
TIM_CCR3 = 60 # type: int
TIM_CCR4 = 64 # type: int
TIM_CNT = 36 # type: int
TIM_CR1 = 0 # type: int
TIM_CR2 = 4 # type: int
TIM_DCR = 72 # type: int
TIM_DIER = 12 # type: int
TIM_DMAR = 76 # type: int
TIM_EGR = 20 # type: int
TIM_OR = 80 # type: int
TIM_PSC = 40 # type: int
TIM_RCR = 48 # type: int
TIM_SMCR = 8 # type: int
TIM_SR = 16 # type: int
UART4 = 1073761280 # type: int
UART5 = 1073762304 # type: int
USART1 = 1073811456 # type: int
USART2 = 1073759232 # type: int
USART3 = 1073760256 # type: int
USART6 = 1073812480 # type: int
USART_BRR = 8 # type: int
USART_CR1 = 12 # type: int
USART_CR2 = 16 # type: int
USART_CR3 = 20 # type: int
USART_DR = 4 # type: int
USART_GTPR = 24 # type: int
USART_SR = 0 # type: int
WWDG = 1073753088 # type: int
WWDG_CFR = 4 # type: int
WWDG_CR = 0 # type: int
WWDG_SR = 8 # type: int
mem16: Any ## <class 'mem'> = <16-bit memory>
mem32: Any ## <class 'mem'> = <32-bit memory>
mem8: Any ## <class 'mem'> = <8-bit memory> | stubs/micropython-v1_12-pyboard/stm.py | from typing import Any
ADC1 = 1073815552 # type: int
ADC123_COMMON = 1073816320 # type: int
ADC2 = 1073815808 # type: int
ADC3 = 1073816064 # type: int
ADC_CR1 = 4 # type: int
ADC_CR2 = 8 # type: int
ADC_DR = 76 # type: int
ADC_HTR = 36 # type: int
ADC_JDR1 = 60 # type: int
ADC_JDR2 = 64 # type: int
ADC_JDR3 = 68 # type: int
ADC_JDR4 = 72 # type: int
ADC_JOFR1 = 20 # type: int
ADC_JOFR2 = 24 # type: int
ADC_JOFR3 = 28 # type: int
ADC_JOFR4 = 32 # type: int
ADC_JSQR = 56 # type: int
ADC_LTR = 40 # type: int
ADC_SMPR1 = 12 # type: int
ADC_SMPR2 = 16 # type: int
ADC_SQR1 = 44 # type: int
ADC_SQR2 = 48 # type: int
ADC_SQR3 = 52 # type: int
ADC_SR = 0 # type: int
CAN1 = 1073767424 # type: int
CAN2 = 1073768448 # type: int
CRC = 1073885184 # type: int
CRC_CR = 8 # type: int
CRC_DR = 0 # type: int
CRC_IDR = 4 # type: int
DAC = 1073771520 # type: int
DAC1 = 1073771520 # type: int
DAC_CR = 0 # type: int
DAC_DHR12L1 = 12 # type: int
DAC_DHR12L2 = 24 # type: int
DAC_DHR12LD = 36 # type: int
DAC_DHR12R1 = 8 # type: int
DAC_DHR12R2 = 20 # type: int
DAC_DHR12RD = 32 # type: int
DAC_DHR8R1 = 16 # type: int
DAC_DHR8R2 = 28 # type: int
DAC_DHR8RD = 40 # type: int
DAC_DOR1 = 44 # type: int
DAC_DOR2 = 48 # type: int
DAC_SR = 52 # type: int
DAC_SWTRIGR = 4 # type: int
DBGMCU = 3758366720 # type: int
DBGMCU_APB1FZ = 8 # type: int
DBGMCU_APB2FZ = 12 # type: int
DBGMCU_CR = 4 # type: int
DBGMCU_IDCODE = 0 # type: int
DMA1 = 1073897472 # type: int
DMA2 = 1073898496 # type: int
DMA_HIFCR = 12 # type: int
DMA_HISR = 4 # type: int
DMA_LIFCR = 8 # type: int
DMA_LISR = 0 # type: int
EXTI = 1073822720 # type: int
EXTI_EMR = 4 # type: int
EXTI_FTSR = 12 # type: int
EXTI_IMR = 0 # type: int
EXTI_PR = 20 # type: int
EXTI_RTSR = 8 # type: int
EXTI_SWIER = 16 # type: int
FLASH = 1073888256 # type: int
FLASH_ACR = 0 # type: int
FLASH_CR = 16 # type: int
FLASH_KEYR = 4 # type: int
FLASH_OPTCR = 20 # type: int
FLASH_OPTCR1 = 24 # type: int
FLASH_OPTKEYR = 8 # type: int
FLASH_SR = 12 # type: int
GPIOA = 1073872896 # type: int
GPIOB = 1073873920 # type: int
GPIOC = 1073874944 # type: int
GPIOD = 1073875968 # type: int
GPIOE = 1073876992 # type: int
GPIOF = 1073878016 # type: int
GPIOG = 1073879040 # type: int
GPIOH = 1073880064 # type: int
GPIOI = 1073881088 # type: int
GPIO_AFR0 = 32 # type: int
GPIO_AFR1 = 36 # type: int
GPIO_BSRR = 24 # type: int
GPIO_BSRRH = 26 # type: int
GPIO_BSRRL = 24 # type: int
GPIO_IDR = 16 # type: int
GPIO_LCKR = 28 # type: int
GPIO_MODER = 0 # type: int
GPIO_ODR = 20 # type: int
GPIO_OSPEEDR = 8 # type: int
GPIO_OTYPER = 4 # type: int
GPIO_PUPDR = 12 # type: int
I2C1 = 1073763328 # type: int
I2C2 = 1073764352 # type: int
I2C3 = 1073765376 # type: int
I2C_CCR = 28 # type: int
I2C_CR1 = 0 # type: int
I2C_CR2 = 4 # type: int
I2C_DR = 16 # type: int
I2C_OAR1 = 8 # type: int
I2C_OAR2 = 12 # type: int
I2C_SR1 = 20 # type: int
I2C_SR2 = 24 # type: int
I2C_TRISE = 32 # type: int
I2S2EXT = 1073755136 # type: int
I2S3EXT = 1073758208 # type: int
IWDG = 1073754112 # type: int
IWDG_KR = 0 # type: int
IWDG_PR = 4 # type: int
IWDG_RLR = 8 # type: int
IWDG_SR = 12 # type: int
PWR = 1073770496 # type: int
PWR_CR = 0 # type: int
PWR_CSR = 4 # type: int
RCC = 1073887232 # type: int
RCC_AHB1ENR = 48 # type: int
RCC_AHB1LPENR = 80 # type: int
RCC_AHB1RSTR = 16 # type: int
RCC_AHB2ENR = 52 # type: int
RCC_AHB2LPENR = 84 # type: int
RCC_AHB2RSTR = 20 # type: int
RCC_AHB3ENR = 56 # type: int
RCC_AHB3LPENR = 88 # type: int
RCC_AHB3RSTR = 24 # type: int
RCC_APB1ENR = 64 # type: int
RCC_APB1LPENR = 96 # type: int
RCC_APB1RSTR = 32 # type: int
RCC_APB2ENR = 68 # type: int
RCC_APB2LPENR = 100 # type: int
RCC_APB2RSTR = 36 # type: int
RCC_BDCR = 112 # type: int
RCC_CFGR = 8 # type: int
RCC_CIR = 12 # type: int
RCC_CR = 0 # type: int
RCC_CSR = 116 # type: int
RCC_PLLCFGR = 4 # type: int
RCC_PLLI2SCFGR = 132 # type: int
RCC_SSCGR = 128 # type: int
RNG = 1342572544 # type: int
RNG_CR = 0 # type: int
RNG_DR = 8 # type: int
RNG_SR = 4 # type: int
RTC = 1073752064 # type: int
RTC_ALRMAR = 28 # type: int
RTC_ALRMASSR = 68 # type: int
RTC_ALRMBR = 32 # type: int
RTC_ALRMBSSR = 72 # type: int
RTC_BKP0R = 80 # type: int
RTC_BKP10R = 120 # type: int
RTC_BKP11R = 124 # type: int
RTC_BKP12R = 128 # type: int
RTC_BKP13R = 132 # type: int
RTC_BKP14R = 136 # type: int
RTC_BKP15R = 140 # type: int
RTC_BKP16R = 144 # type: int
RTC_BKP17R = 148 # type: int
RTC_BKP18R = 152 # type: int
RTC_BKP19R = 156 # type: int
RTC_BKP1R = 84 # type: int
RTC_BKP2R = 88 # type: int
RTC_BKP3R = 92 # type: int
RTC_BKP4R = 96 # type: int
RTC_BKP5R = 100 # type: int
RTC_BKP6R = 104 # type: int
RTC_BKP7R = 108 # type: int
RTC_BKP8R = 112 # type: int
RTC_BKP9R = 116 # type: int
RTC_CALIBR = 24 # type: int
RTC_CALR = 60 # type: int
RTC_CR = 8 # type: int
RTC_DR = 4 # type: int
RTC_ISR = 12 # type: int
RTC_PRER = 16 # type: int
RTC_SHIFTR = 44 # type: int
RTC_SSR = 40 # type: int
RTC_TAFCR = 64 # type: int
RTC_TR = 0 # type: int
RTC_TSDR = 52 # type: int
RTC_TSSSR = 56 # type: int
RTC_TSTR = 48 # type: int
RTC_WPR = 36 # type: int
RTC_WUTR = 20 # type: int
SDIO = 1073818624 # type: int
SPI1 = 1073819648 # type: int
SPI2 = 1073756160 # type: int
SPI3 = 1073757184 # type: int
SPI_CR1 = 0 # type: int
SPI_CR2 = 4 # type: int
SPI_CRCPR = 16 # type: int
SPI_DR = 12 # type: int
SPI_I2SCFGR = 28 # type: int
SPI_I2SPR = 32 # type: int
SPI_RXCRCR = 20 # type: int
SPI_SR = 8 # type: int
SPI_TXCRCR = 24 # type: int
SYSCFG = 1073821696 # type: int
SYSCFG_CMPCR = 32 # type: int
SYSCFG_EXTICR0 = 8 # type: int
SYSCFG_EXTICR1 = 12 # type: int
SYSCFG_EXTICR2 = 16 # type: int
SYSCFG_EXTICR3 = 20 # type: int
SYSCFG_MEMRMP = 0 # type: int
SYSCFG_PMC = 4 # type: int
TIM1 = 1073807360 # type: int
TIM10 = 1073824768 # type: int
TIM11 = 1073825792 # type: int
TIM12 = 1073747968 # type: int
TIM13 = 1073748992 # type: int
TIM14 = 1073750016 # type: int
TIM2 = 1073741824 # type: int
TIM3 = 1073742848 # type: int
TIM4 = 1073743872 # type: int
TIM5 = 1073744896 # type: int
TIM6 = 1073745920 # type: int
TIM7 = 1073746944 # type: int
TIM8 = 1073808384 # type: int
TIM9 = 1073823744 # type: int
TIM_ARR = 44 # type: int
TIM_BDTR = 68 # type: int
TIM_CCER = 32 # type: int
TIM_CCMR1 = 24 # type: int
TIM_CCMR2 = 28 # type: int
TIM_CCR1 = 52 # type: int
TIM_CCR2 = 56 # type: int
TIM_CCR3 = 60 # type: int
TIM_CCR4 = 64 # type: int
TIM_CNT = 36 # type: int
TIM_CR1 = 0 # type: int
TIM_CR2 = 4 # type: int
TIM_DCR = 72 # type: int
TIM_DIER = 12 # type: int
TIM_DMAR = 76 # type: int
TIM_EGR = 20 # type: int
TIM_OR = 80 # type: int
TIM_PSC = 40 # type: int
TIM_RCR = 48 # type: int
TIM_SMCR = 8 # type: int
TIM_SR = 16 # type: int
UART4 = 1073761280 # type: int
UART5 = 1073762304 # type: int
USART1 = 1073811456 # type: int
USART2 = 1073759232 # type: int
USART3 = 1073760256 # type: int
USART6 = 1073812480 # type: int
USART_BRR = 8 # type: int
USART_CR1 = 12 # type: int
USART_CR2 = 16 # type: int
USART_CR3 = 20 # type: int
USART_DR = 4 # type: int
USART_GTPR = 24 # type: int
USART_SR = 0 # type: int
WWDG = 1073753088 # type: int
WWDG_CFR = 4 # type: int
WWDG_CR = 0 # type: int
WWDG_SR = 8 # type: int
mem16: Any ## <class 'mem'> = <16-bit memory>
mem32: Any ## <class 'mem'> = <32-bit memory>
mem8: Any ## <class 'mem'> = <8-bit memory> | 0.285173 | 0.078113 |
import os
import queue
import base64
import asyncio
import threading
import exchangelib
from mailtk.data import Mailbox, ThreadInfo, Flag
from mailtk.accounts.base import AccountBase
class MailboxExchange(Mailbox):
_fields = 'folder'
class ThreadInfoExchange(ThreadInfo):
_fields = 'folder message_id'
class ExchangeAccount(AccountBase):
@classmethod
async def initialize(cls, loop, host, username, password, email):
account = cls(loop, host, username, password, email)
await account.connect()
return account
BREAK = object()
NOOP = object()
def __init__(self, loop, host, username, password, email):
self._loop = loop
self._host = host
self._email = email
self._credentials = exchangelib.Credentials(
username=username,
password=password,
)
self._command_queue = queue.Queue()
self._response_queue = queue.Queue()
self._ready_r, self._ready_w = os.pipe()
loop.add_reader(self._ready_r, self._ready)
self._thread = threading.Thread(None, self._run)
self._breaking = False
async def connect(self):
self._thread.start()
await self._call(self.NOOP)
async def disconnect(self):
await self._call(self.BREAK)
self._thread.join()
async def _call(self, method, *args):
if self._breaking:
raise Exception('connection is closing')
future = asyncio.Future(loop=self._loop)
self._command_queue.put_nowait((future, method, args))
if method is self.BREAK:
self._breaking = True
result = await future
if isinstance(result, Exception):
raise result
return result
def rpc(method):
async def wrapper(self, *args):
return await self._call(method, *args)
return wrapper
@rpc
def list_folders(self, account):
top = account.root.get_folder_by_name('Top of Information Store')
def make_mailbox(f):
children = (f.get_folders(depth=exchangelib.SHALLOW)
if f.child_folder_count else ())
child_mailboxes = [make_mailbox(c) for c in children
if isinstance(c, exchangelib.folders.Messages)]
return MailboxExchange(
Mailbox(f.name, child_mailboxes), f)
folders = top.get_folders(depth=exchangelib.SHALLOW)
return [make_mailbox(c) for c in folders
if isinstance(c, exchangelib.folders.Messages)]
@rpc
def list_messages(self, account, folder: MailboxExchange):
f = folder.folder # type: exchangelib.folders.Messages
messages = []
qs = f.all()
qs = qs.values('message_id', 'datetime_received', 'sender', 'subject')
qs = qs[:20]
for o in qs:
try:
message_id = o.pop('message_id')
dt = o.pop('datetime_received')
sender = o.pop('sender')
subject = o.pop('subject')
except KeyError:
raise Exception(o)
thread_info = ThreadInfo(
flag=Flag.read, size=42, date=dt,
subject=subject, sender=sender,
recipients=[],
children=[], excerpt='foo')
messages.append(ThreadInfoExchange(thread_info, folder, message_id))
return messages
@rpc
def fetch_message(self, account, threadinfo: ThreadInfoExchange):
folder = threadinfo.folder
message_id = threadinfo.message_id
qs = folder.folder.filter(message_id=message_id)
o, = qs
return base64.b64decode(o.mime_content)
del rpc
def _run(self):
# Run commands in thread
try:
if self._host == 'auto':
account = exchangelib.Account(
primary_smtp_address=self._email,
credentials=self._credentials,
autodiscover=True, access_type=exchangelib.DELEGATE)
else:
config = exchangelib.Configuration(
server=self._host,
credentials=self._credentials,
auth_type=exchangelib.NTLM,
)
account = exchangelib.Account(
primary_smtp_address=self._email, config=config,
access_type=exchangelib.DELEGATE)
except Exception as exn:
future, method, args = self._command_queue.get()
self._response_queue.put((future, exn))
self._command_queue.task_done()
os.write(self._ready_w, b'x')
return
try:
while True:
future, method, args = self._command_queue.get()
if method is self.BREAK:
break
elif method is self.NOOP:
result = None
else:
# TODO check if future is cancelled
try:
result = method(self, account, *args)
except Exception as exn:
result = exn
self._response_queue.put((future, result))
self._command_queue.task_done()
os.write(self._ready_w, b'x')
finally:
del account
del config
assert method is self.BREAK
self._response_queue.put((future, None))
self._command_queue.task_done()
os.write(self._ready_w, b'x')
def _ready(self):
os.read(self._ready_r, 1)
future, result = self._response_queue.get_nowait()
if not future.cancelled():
future.set_result(result)
self._response_queue.task_done() | mailtk/accounts/ews.py | import os
import queue
import base64
import asyncio
import threading
import exchangelib
from mailtk.data import Mailbox, ThreadInfo, Flag
from mailtk.accounts.base import AccountBase
class MailboxExchange(Mailbox):
_fields = 'folder'
class ThreadInfoExchange(ThreadInfo):
_fields = 'folder message_id'
class ExchangeAccount(AccountBase):
@classmethod
async def initialize(cls, loop, host, username, password, email):
account = cls(loop, host, username, password, email)
await account.connect()
return account
BREAK = object()
NOOP = object()
def __init__(self, loop, host, username, password, email):
self._loop = loop
self._host = host
self._email = email
self._credentials = exchangelib.Credentials(
username=username,
password=password,
)
self._command_queue = queue.Queue()
self._response_queue = queue.Queue()
self._ready_r, self._ready_w = os.pipe()
loop.add_reader(self._ready_r, self._ready)
self._thread = threading.Thread(None, self._run)
self._breaking = False
async def connect(self):
self._thread.start()
await self._call(self.NOOP)
async def disconnect(self):
await self._call(self.BREAK)
self._thread.join()
async def _call(self, method, *args):
if self._breaking:
raise Exception('connection is closing')
future = asyncio.Future(loop=self._loop)
self._command_queue.put_nowait((future, method, args))
if method is self.BREAK:
self._breaking = True
result = await future
if isinstance(result, Exception):
raise result
return result
def rpc(method):
async def wrapper(self, *args):
return await self._call(method, *args)
return wrapper
@rpc
def list_folders(self, account):
top = account.root.get_folder_by_name('Top of Information Store')
def make_mailbox(f):
children = (f.get_folders(depth=exchangelib.SHALLOW)
if f.child_folder_count else ())
child_mailboxes = [make_mailbox(c) for c in children
if isinstance(c, exchangelib.folders.Messages)]
return MailboxExchange(
Mailbox(f.name, child_mailboxes), f)
folders = top.get_folders(depth=exchangelib.SHALLOW)
return [make_mailbox(c) for c in folders
if isinstance(c, exchangelib.folders.Messages)]
@rpc
def list_messages(self, account, folder: MailboxExchange):
f = folder.folder # type: exchangelib.folders.Messages
messages = []
qs = f.all()
qs = qs.values('message_id', 'datetime_received', 'sender', 'subject')
qs = qs[:20]
for o in qs:
try:
message_id = o.pop('message_id')
dt = o.pop('datetime_received')
sender = o.pop('sender')
subject = o.pop('subject')
except KeyError:
raise Exception(o)
thread_info = ThreadInfo(
flag=Flag.read, size=42, date=dt,
subject=subject, sender=sender,
recipients=[],
children=[], excerpt='foo')
messages.append(ThreadInfoExchange(thread_info, folder, message_id))
return messages
@rpc
def fetch_message(self, account, threadinfo: ThreadInfoExchange):
folder = threadinfo.folder
message_id = threadinfo.message_id
qs = folder.folder.filter(message_id=message_id)
o, = qs
return base64.b64decode(o.mime_content)
del rpc
def _run(self):
# Run commands in thread
try:
if self._host == 'auto':
account = exchangelib.Account(
primary_smtp_address=self._email,
credentials=self._credentials,
autodiscover=True, access_type=exchangelib.DELEGATE)
else:
config = exchangelib.Configuration(
server=self._host,
credentials=self._credentials,
auth_type=exchangelib.NTLM,
)
account = exchangelib.Account(
primary_smtp_address=self._email, config=config,
access_type=exchangelib.DELEGATE)
except Exception as exn:
future, method, args = self._command_queue.get()
self._response_queue.put((future, exn))
self._command_queue.task_done()
os.write(self._ready_w, b'x')
return
try:
while True:
future, method, args = self._command_queue.get()
if method is self.BREAK:
break
elif method is self.NOOP:
result = None
else:
# TODO check if future is cancelled
try:
result = method(self, account, *args)
except Exception as exn:
result = exn
self._response_queue.put((future, result))
self._command_queue.task_done()
os.write(self._ready_w, b'x')
finally:
del account
del config
assert method is self.BREAK
self._response_queue.put((future, None))
self._command_queue.task_done()
os.write(self._ready_w, b'x')
def _ready(self):
os.read(self._ready_r, 1)
future, result = self._response_queue.get_nowait()
if not future.cancelled():
future.set_result(result)
self._response_queue.task_done() | 0.284377 | 0.055209 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Conversation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('creator', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('message', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('deleted_at', models.DateTimeField(blank=True, null=True)),
('message_type', models.IntegerField(choices=[(0, 'Text'), (1, 'Image')], default=0)),
('conversation', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='messages', to='conversations.Conversation')),
],
),
migrations.CreateModel(
name='Participant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_read_date', models.DateTimeField(auto_now_add=True)),
('conversation', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='participants', to='conversations.Conversation')),
('participant_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='message',
name='participant',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='conversations.Participant'),
),
] | yangram/conversations/migrations/0001_initial.py |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Conversation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('creator', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('message', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('deleted_at', models.DateTimeField(blank=True, null=True)),
('message_type', models.IntegerField(choices=[(0, 'Text'), (1, 'Image')], default=0)),
('conversation', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='messages', to='conversations.Conversation')),
],
),
migrations.CreateModel(
name='Participant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_read_date', models.DateTimeField(auto_now_add=True)),
('conversation', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='participants', to='conversations.Conversation')),
('participant_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='message',
name='participant',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='conversations.Participant'),
),
] | 0.511229 | 0.128744 |
import pybox.helpers.data as data_helpers
import pybox.datastore.data_to_html as pbdsdth
import pybox.datastore.data_table_row as pbdsdtr
from copy import deepcopy
from datetime import date, datetime
from math import ceil, floor
from tabulate import tabulate
from tqdm import tqdm
DATA_TYPES = [int, float, complex, bool, str, type(None), date, datetime]
class DataTable:
"""Data structure with typed columns and mutable entities."""
__slots__ = ["_data", "_data_map"]
def __init__(self, data=None, names=None, dtypes=None):
DATA_TYPES.append(self.__class__)
self._data = list()
self._data_map = list()
if isinstance(data, dict):
self._data = list(map(list, zip(*data.values())))
if not names:
names = list(data.keys())
elif type(data).__module__ == "numpy":
self._data = data.tolist()
if self.length == 0 or data is None:
if names and dtypes:
for name, dtype in zip(names, dtypes):
self._data_map.append([name, dtype])
else:
raise ValueError(
"If data not supplied, both `names` and `dtypes` must not be none."
)
else:
for column_idx in range(len(self._data[0])):
self._data_map.append([
names[column_idx],
data_helpers.recognize_type(
(self._data[row_idx][column_idx]
for row_idx in range(len(self._data))))
])
def __getitem__(self, key):
if isinstance(key, tuple):
name, index = key
return self._data[index][self.column_index(name)]
elif isinstance(key, str):
return [
self._data[index][self.column_index(key)]
for index in range(self.length)
]
def __setitem__(self, key, value):
if isinstance(key, tuple):
name, idx = key
if name not in self.columns:
self.insert_column(name, [None] * self.length, type(value))
try:
self._data[idx][self.column_index(name)] = value
except IndexError:
self.remove_columns([name])
raise IndexError("List assignment index out of range.")
elif isinstance(key, str):
if key not in self.columns:
self.insert_column(key, [None] * self.length, type(value))
if isinstance(value, list) or isinstance(value, tuple):
datatype = data_helpers.recognize_type(value)
for index, element in enumerate(value):
self._data[index][self.column_index(key)] = (
data_helpers.change_type(element, datatype))
else:
self._data[0][self.column_index(key)] = value
for idx in range(1, self.length):
self._data[idx][self.column_index(key)] = None
self._data_map[self.column_index(key)][1] = object
else:
raise ValueError(
"The wrong key type was supplied, it should be `str`",
"representing name of the column or \npair of `str` ",
"representing a column and `int` representing certain ",
"row splitted by comma. ")
def __iter__(self):
return (pbdsdtr.DataTableRow(self, row) for row in range(self.length))
def __str__(self):
return self.as_string()
def __repr__(self):
return f"DataTable({self.width}x{self.length})"
@property
def copy(self):
"""Return deepcopy of the DataTable."""
return deepcopy(self)
@property
def length(self):
"""Return length of the DataTable, in other words number of rows in it."""
return len(self._data)
@property
def width(self):
"""Return width of the DataTable, in other words number of columns in it."""
return len(self._data_map)
@property
def columns(self):
"""Return a list of column names."""
return [column_name for column_name, _ in self._data_map]
@property
def datatypes(self):
"""Return a dictionary of column names and their data types."""
return {column_name: dtype for column_name, dtype in self._data_map}
@property
def bytesize(self):
"""Returns the approximate memory DataTable footprint."""
return data_helpers.byte_size(self._data) + data_helpers.byte_size(
self._data_map)
@property
def info(self):
"""Return information about shape and bytesize of the DataTable."""
info = ("DataTable"
f"(shape={self.width}x{self.length},"
f"bytesize={self.bytesize})")
for name, dtype in self.datatypes.items():
info = "".join([info, "\n", name, ": ", dtype.__name__])
return info
@property
def to_numpy_array(self):
"""Return numpy structured array object created from the DataTable."""
from numpy import array
types_list = list(self.datatypes.items())
numpy_array = array(self._data, dtype=types_list)
return numpy_array
@property
def to_arrow_table(self):
"""Return arrow table object created from the DataTable."""
from pyarrow import Table, array
arrow_table = Table.from_arrays(
[array(column) for column in list(map(list, zip(*self._data)))],
names=self.columns)
return arrow_table
def rows(self):
"""Returns rows interable which called, displays progress bar."""
return (
pbdsdtr.DataTableRow(self, row) for row in tqdm(range(self.length)))
def to_parquet(self, file_name, directory):
"""Store `DataTable` in the parquet format file.
Args:
file_name (str): name under which data structure will be saved.
directory (str): folder structure in which DataTable is to be saved.
"""
from pybox.datastore.data_flow import table_to_parquet
table_to_parquet(self, file_name, directory)
def as_string(self, rows_number=10, text_format="simple"):
"""Returns table in the string format, easily printable in the console.
Args:
rows_number (int, optional): rows number to print. Defaults to 10.
text_format (str, optional): string output format. Defaults to `simple`.
"""
if self.length <= rows_number:
data_subset = self._data
indices = True
else:
lower_bound = ceil(rows_number / 2)
upper_bound = self.length - floor(rows_number / 2)
data_subset = [
*self._data[:lower_bound], [None for _ in range(self.width)],
*self._data[upper_bound:]
]
indices = [
*[i for i in range(lower_bound)], "..",
*[i for i in range(upper_bound, self.length)]
]
return tabulate(data_subset,
showindex=indices,
tablefmt=text_format,
headers=[
f"{name}\nDataType:{dtype.__name__}"
for (name, dtype) in self._data_map
])
def display(self, rows_number=10, display_type=None, string_length=299):
"""Display the contents of the current DataTable in rendered html format.
Args:
rows_number (int, optional): number of rows/observations
to be displeyed. Defaults to 10.
display_type (str, optional): type of displayed DataTable form.
If `random` display a random representation.
If `head` display heading elements.
If `tail` display tail elements.
Otherwise display entire or first heading and tail elements.
Defaults to None.
string_length (int, optional): maximal length of returned string,
if None entire string is printed. Defaults to 299.
"""
if not display_type:
pbdsdth.show_table(self._data, self._data_map, rows_number,
string_length)
elif display_type.lower() == "random":
pbdsdth.show_table_random(self._data, self._data_map, rows_number,
string_length)
elif display_type.lower() == "head":
pbdsdth.show_table_head(self._data, self._data_map, rows_number,
string_length)
elif display_type.lower() == "tail":
pbdsdth.show_table_tail(self._data, self._data_map, rows_number,
string_length)
def column_index(self, column_name):
"""Return the index number of the specified column.
Args:
column_name (str): name of the specified column.
"""
for column_index, element in enumerate(self._data_map):
if element[0] == column_name:
return column_index
raise IndexError(
f"A column called `{column_name}` has not been found in DataTable.")
def rows_range(self, start=None, stop=None, step=None):
"""Return an iterator containing the range of DataTable indices.
Args:
start (int, optional): start of the range, included in the
iteration. Defaults to None.
stop (int, optional): end of the range, excluded from the
iteration. Defaults to None.
step (int, optional): steps between range elements. Defaults to None.
"""
if not start:
start = 0
if not stop:
stop = self.length
if not step:
step = 1
return range(start, stop, step)
def insert_column(self,
column_name,
column_values,
datatype=None,
column_index=None):
"""Create a new column in the DataTable.
Args:
column_name (str): name of the newly created column.
column_value (any): value of the newly created column.
datatype (type, optional): data type of supplied data. Defaults to None.
"""
if column_name in self.columns:
raise NameError("Supplied column name already exists.")
length_difference = self.length - len(column_values)
if length_difference > 0:
column_values.extend([None] * length_difference)
elif length_difference < 0:
for _ in range(abs(length_difference)):
self._data.append([None] * self.width)
if not datatype:
datatype = data_helpers.recognize_type(column_values)
if datatype not in DATA_TYPES and datatype.__module__ != "numpy":
raise ValueError(
"Wrong type of data supplied, it must be one of: ",
f"{','.join([str(dtype) for dtype in DATA_TYPES])} or numpy.array."
)
if column_index is None:
column_index = self.length
for idx in range(self.length):
self._data[idx].insert(
column_index,
data_helpers.change_type(column_values[idx], datatype))
self._data_map.insert(column_index, [column_name, datatype])
def separate_columns(self, column_names):
"""Create copy of a DataTable only with supplied columns.
Args:
column_names (list): column names to be separated.
"""
data_copy = self.copy
columns_to_remove = [
col for col in self.columns if col not in column_names
]
data_copy.remove_columns(columns_to_remove)
return data_copy
def remove_columns(self, column_names):
"""Remove selected columns from the DataTable.
Args:
column_names (list): column names to be removed.
"""
for column_name in column_names:
for idx, column_map in enumerate(self._data_map):
if column_map[0] == column_name:
self._data_map.remove(column_map)
for row_idx in range(self.length):
del self._data[row_idx][idx]
break
def insert_row(self, row_values, row_index=None):
"""Insert a row into the DataTable object.
Args:
row_values (array-like): list of values to be inserted.
row_index (int, optional): number of row. Defaults to length of DateTable.
"""
if len(row_values) != self.width:
raise ValueError(
"Supplied row needs to be the same width as DataTable",
f", which is {self.width}.")
for idx, value in enumerate(row_values):
row_values[idx] = data_helpers.change_type(value,
self._data_map[idx][1])
if row_index is None:
row_index = self.length
self._data.insert(row_index, row_values)
def rename_columns(self, old_names, new_names):
"""Rename selected column names.
Args:
old_names (list): column names to be renamed.
new_names (list): new column names.
"""
columns_dict = dict(zip(old_names, new_names))
for column_map in self._data_map:
if column_map[0] in columns_dict.keys():
column_map[0] = columns_dict[column_map[0]]
def sort(self, column_names, reverse_order=False, sort_function=None):
"""Sort in place the DataTable based on the supplied column name values.
Args:
column_names (list): strings representing column names based on
which values performed will be sorting.
reverse_order (bool, optional): value that indicates whether the
sort should be performed in reverse order. Defaults to False.
sort_function (function, optional): function that allows to pass
additional commands to the sorter. Defaults to None.
"""
other_columns = [
column for column, _ in self._data_map if column not in column_names
]
data_to_sort = []
new_data_map = []
for column in column_names + other_columns:
data_to_sort.append([
self._data[index][self.column_index(column)]
for index in range(self.length)
])
new_data_map.append([column, self.datatypes[column]])
data_to_sort = list(map(list, zip(*data_to_sort)))
self._data_map = new_data_map
self._data = sorted(data_to_sort,
key=sort_function,
reverse=reverse_order)
def filter(self, filtering_function, return_filtered_out=False):
"""Filters in place the DataTable rows, removing rows which do not
meet condition from provided functiom. If `return_filtered_out` is
True method additionaly returns object with filtered out rows.
Args:
filtering_function (function): used to select rows meeting the
condition set in the function.
return_filtered_out (bool, optional): indicating whether to save
the filtered rows in a separate table. Defaults to False.
"""
if return_filtered_out:
filtered_out = self.copy
for index, row in reversed(list(enumerate(self))):
if not filtering_function(row):
del self._data[index]
else:
del filtered_out._data[index]
return filtered_out
else:
for index, row in reversed(list(enumerate(self))):
if not filtering_function(row):
del self._data[index]
def create_dummies(self, column_name, remove_in_place=True):
"""Convert specified column containing categorical variables into
several binarized ones.
Args:
column_name (str): name of the specified column.
remove_in_place (bool, optional): if True after creating new dummy
columns original column is deleted. Defaults to True.
"""
levels = set(self[column_name])
levels.discard(None)
levels = sorted(levels, reverse=True)
main_column_index = self.column_index(column_name)
for level in levels:
level_column_name = "".join([column_name, str(level)])
self.insert_column(level_column_name, [None] * self.length,
datatype=int,
column_index=main_column_index + 1)
for idx in self.rows_range():
if self._data[idx][main_column_index] is None:
continue
elif self._data[idx][main_column_index] == level:
self._data[idx][self.column_index(level_column_name)] = 1
else:
self._data[idx][self.column_index(level_column_name)] = 0
if remove_in_place:
self.remove_columns([column_name])
def concatenate(self, outer_data, inner_columns=None, outer_columns=None):
"""Concatenate the DataTable with an external one.
Args:
outer_data (DataTable): external DataTable to be concatenated.
inner_columns (list, optional): strings representing inner columns
to which new data is to be concatenated. Defaults to None.
outer_columns (list, optional): strings representing outer columns
that are to be concatenated. Defaults to None.
"""
if inner_columns is None:
inner_columns = self.columns
if outer_columns is None:
outer_columns = outer_data.columns
columns_dict = dict(zip(inner_columns, outer_columns))
for row in outer_data:
row_to_append = []
for column in self.columns:
if column in columns_dict.keys():
row_to_append.append(row[columns_dict[column]])
else:
row_to_append.append(None)
self._data.append(row_to_append)
def join(self, outer_data, inner_column, outer_column):
"""Join the DataTable with an external one.
Args:
outer_data (DataTable): external DataTable to be join with.
inner_column (str): name of the column in the inner DataTable,
based on which join is to be performed
outer_column (str): name of the column in the outer DataTable,
based on which join is to be performed
"""
outer_data_copy = outer_data.copy
outer_data_copy.rename_columns([outer_column], ["OuterMainColumn"])
outer_data_copy.sort(["OuterMainColumn"])
outer_data_width = outer_data_copy.width
self._data_map.extend(outer_data_copy._data_map)
for idx in self.rows_range():
position = data_helpers.binary_search(
outer_data_copy["OuterMainColumn"], self[inner_column, idx])
if position is None:
self._data[idx].extend([None] * outer_data_width)
else:
self._data[idx].extend(outer_data_copy._data[position])
self.remove_columns(["OuterMainColumn"])
def apply(self, column_name, function):
"""Apply supplied transformation on all values in the selected column.
Args:
column_name (str): name of the column on which the transformation
is to be performed.
function (function): describes the transformations that are to be
performed on the selected column.
"""
column_index = self.column_index(column_name)
for index in self.rows_range():
self._data[index][column_index] = function(
self._data[index][column_index]) | pybox/datastore/data_table.py | import pybox.helpers.data as data_helpers
import pybox.datastore.data_to_html as pbdsdth
import pybox.datastore.data_table_row as pbdsdtr
from copy import deepcopy
from datetime import date, datetime
from math import ceil, floor
from tabulate import tabulate
from tqdm import tqdm
DATA_TYPES = [int, float, complex, bool, str, type(None), date, datetime]
class DataTable:
"""Data structure with typed columns and mutable entities."""
__slots__ = ["_data", "_data_map"]
def __init__(self, data=None, names=None, dtypes=None):
DATA_TYPES.append(self.__class__)
self._data = list()
self._data_map = list()
if isinstance(data, dict):
self._data = list(map(list, zip(*data.values())))
if not names:
names = list(data.keys())
elif type(data).__module__ == "numpy":
self._data = data.tolist()
if self.length == 0 or data is None:
if names and dtypes:
for name, dtype in zip(names, dtypes):
self._data_map.append([name, dtype])
else:
raise ValueError(
"If data not supplied, both `names` and `dtypes` must not be none."
)
else:
for column_idx in range(len(self._data[0])):
self._data_map.append([
names[column_idx],
data_helpers.recognize_type(
(self._data[row_idx][column_idx]
for row_idx in range(len(self._data))))
])
def __getitem__(self, key):
if isinstance(key, tuple):
name, index = key
return self._data[index][self.column_index(name)]
elif isinstance(key, str):
return [
self._data[index][self.column_index(key)]
for index in range(self.length)
]
def __setitem__(self, key, value):
if isinstance(key, tuple):
name, idx = key
if name not in self.columns:
self.insert_column(name, [None] * self.length, type(value))
try:
self._data[idx][self.column_index(name)] = value
except IndexError:
self.remove_columns([name])
raise IndexError("List assignment index out of range.")
elif isinstance(key, str):
if key not in self.columns:
self.insert_column(key, [None] * self.length, type(value))
if isinstance(value, list) or isinstance(value, tuple):
datatype = data_helpers.recognize_type(value)
for index, element in enumerate(value):
self._data[index][self.column_index(key)] = (
data_helpers.change_type(element, datatype))
else:
self._data[0][self.column_index(key)] = value
for idx in range(1, self.length):
self._data[idx][self.column_index(key)] = None
self._data_map[self.column_index(key)][1] = object
else:
raise ValueError(
"The wrong key type was supplied, it should be `str`",
"representing name of the column or \npair of `str` ",
"representing a column and `int` representing certain ",
"row splitted by comma. ")
def __iter__(self):
return (pbdsdtr.DataTableRow(self, row) for row in range(self.length))
def __str__(self):
return self.as_string()
def __repr__(self):
return f"DataTable({self.width}x{self.length})"
@property
def copy(self):
"""Return deepcopy of the DataTable."""
return deepcopy(self)
@property
def length(self):
"""Return length of the DataTable, in other words number of rows in it."""
return len(self._data)
@property
def width(self):
"""Return width of the DataTable, in other words number of columns in it."""
return len(self._data_map)
@property
def columns(self):
"""Return a list of column names."""
return [column_name for column_name, _ in self._data_map]
@property
def datatypes(self):
"""Return a dictionary of column names and their data types."""
return {column_name: dtype for column_name, dtype in self._data_map}
@property
def bytesize(self):
"""Returns the approximate memory DataTable footprint."""
return data_helpers.byte_size(self._data) + data_helpers.byte_size(
self._data_map)
@property
def info(self):
"""Return information about shape and bytesize of the DataTable."""
info = ("DataTable"
f"(shape={self.width}x{self.length},"
f"bytesize={self.bytesize})")
for name, dtype in self.datatypes.items():
info = "".join([info, "\n", name, ": ", dtype.__name__])
return info
@property
def to_numpy_array(self):
"""Return numpy structured array object created from the DataTable."""
from numpy import array
types_list = list(self.datatypes.items())
numpy_array = array(self._data, dtype=types_list)
return numpy_array
@property
def to_arrow_table(self):
"""Return arrow table object created from the DataTable."""
from pyarrow import Table, array
arrow_table = Table.from_arrays(
[array(column) for column in list(map(list, zip(*self._data)))],
names=self.columns)
return arrow_table
def rows(self):
"""Returns rows interable which called, displays progress bar."""
return (
pbdsdtr.DataTableRow(self, row) for row in tqdm(range(self.length)))
def to_parquet(self, file_name, directory):
"""Store `DataTable` in the parquet format file.
Args:
file_name (str): name under which data structure will be saved.
directory (str): folder structure in which DataTable is to be saved.
"""
from pybox.datastore.data_flow import table_to_parquet
table_to_parquet(self, file_name, directory)
def as_string(self, rows_number=10, text_format="simple"):
"""Returns table in the string format, easily printable in the console.
Args:
rows_number (int, optional): rows number to print. Defaults to 10.
text_format (str, optional): string output format. Defaults to `simple`.
"""
if self.length <= rows_number:
data_subset = self._data
indices = True
else:
lower_bound = ceil(rows_number / 2)
upper_bound = self.length - floor(rows_number / 2)
data_subset = [
*self._data[:lower_bound], [None for _ in range(self.width)],
*self._data[upper_bound:]
]
indices = [
*[i for i in range(lower_bound)], "..",
*[i for i in range(upper_bound, self.length)]
]
return tabulate(data_subset,
showindex=indices,
tablefmt=text_format,
headers=[
f"{name}\nDataType:{dtype.__name__}"
for (name, dtype) in self._data_map
])
def display(self, rows_number=10, display_type=None, string_length=299):
"""Display the contents of the current DataTable in rendered html format.
Args:
rows_number (int, optional): number of rows/observations
to be displeyed. Defaults to 10.
display_type (str, optional): type of displayed DataTable form.
If `random` display a random representation.
If `head` display heading elements.
If `tail` display tail elements.
Otherwise display entire or first heading and tail elements.
Defaults to None.
string_length (int, optional): maximal length of returned string,
if None entire string is printed. Defaults to 299.
"""
if not display_type:
pbdsdth.show_table(self._data, self._data_map, rows_number,
string_length)
elif display_type.lower() == "random":
pbdsdth.show_table_random(self._data, self._data_map, rows_number,
string_length)
elif display_type.lower() == "head":
pbdsdth.show_table_head(self._data, self._data_map, rows_number,
string_length)
elif display_type.lower() == "tail":
pbdsdth.show_table_tail(self._data, self._data_map, rows_number,
string_length)
def column_index(self, column_name):
"""Return the index number of the specified column.
Args:
column_name (str): name of the specified column.
"""
for column_index, element in enumerate(self._data_map):
if element[0] == column_name:
return column_index
raise IndexError(
f"A column called `{column_name}` has not been found in DataTable.")
def rows_range(self, start=None, stop=None, step=None):
"""Return an iterator containing the range of DataTable indices.
Args:
start (int, optional): start of the range, included in the
iteration. Defaults to None.
stop (int, optional): end of the range, excluded from the
iteration. Defaults to None.
step (int, optional): steps between range elements. Defaults to None.
"""
if not start:
start = 0
if not stop:
stop = self.length
if not step:
step = 1
return range(start, stop, step)
def insert_column(self,
column_name,
column_values,
datatype=None,
column_index=None):
"""Create a new column in the DataTable.
Args:
column_name (str): name of the newly created column.
column_value (any): value of the newly created column.
datatype (type, optional): data type of supplied data. Defaults to None.
"""
if column_name in self.columns:
raise NameError("Supplied column name already exists.")
length_difference = self.length - len(column_values)
if length_difference > 0:
column_values.extend([None] * length_difference)
elif length_difference < 0:
for _ in range(abs(length_difference)):
self._data.append([None] * self.width)
if not datatype:
datatype = data_helpers.recognize_type(column_values)
if datatype not in DATA_TYPES and datatype.__module__ != "numpy":
raise ValueError(
"Wrong type of data supplied, it must be one of: ",
f"{','.join([str(dtype) for dtype in DATA_TYPES])} or numpy.array."
)
if column_index is None:
column_index = self.length
for idx in range(self.length):
self._data[idx].insert(
column_index,
data_helpers.change_type(column_values[idx], datatype))
self._data_map.insert(column_index, [column_name, datatype])
def separate_columns(self, column_names):
"""Create copy of a DataTable only with supplied columns.
Args:
column_names (list): column names to be separated.
"""
data_copy = self.copy
columns_to_remove = [
col for col in self.columns if col not in column_names
]
data_copy.remove_columns(columns_to_remove)
return data_copy
def remove_columns(self, column_names):
"""Remove selected columns from the DataTable.
Args:
column_names (list): column names to be removed.
"""
for column_name in column_names:
for idx, column_map in enumerate(self._data_map):
if column_map[0] == column_name:
self._data_map.remove(column_map)
for row_idx in range(self.length):
del self._data[row_idx][idx]
break
def insert_row(self, row_values, row_index=None):
"""Insert a row into the DataTable object.
Args:
row_values (array-like): list of values to be inserted.
row_index (int, optional): number of row. Defaults to length of DateTable.
"""
if len(row_values) != self.width:
raise ValueError(
"Supplied row needs to be the same width as DataTable",
f", which is {self.width}.")
for idx, value in enumerate(row_values):
row_values[idx] = data_helpers.change_type(value,
self._data_map[idx][1])
if row_index is None:
row_index = self.length
self._data.insert(row_index, row_values)
def rename_columns(self, old_names, new_names):
"""Rename selected column names.
Args:
old_names (list): column names to be renamed.
new_names (list): new column names.
"""
columns_dict = dict(zip(old_names, new_names))
for column_map in self._data_map:
if column_map[0] in columns_dict.keys():
column_map[0] = columns_dict[column_map[0]]
def sort(self, column_names, reverse_order=False, sort_function=None):
"""Sort in place the DataTable based on the supplied column name values.
Args:
column_names (list): strings representing column names based on
which values performed will be sorting.
reverse_order (bool, optional): value that indicates whether the
sort should be performed in reverse order. Defaults to False.
sort_function (function, optional): function that allows to pass
additional commands to the sorter. Defaults to None.
"""
other_columns = [
column for column, _ in self._data_map if column not in column_names
]
data_to_sort = []
new_data_map = []
for column in column_names + other_columns:
data_to_sort.append([
self._data[index][self.column_index(column)]
for index in range(self.length)
])
new_data_map.append([column, self.datatypes[column]])
data_to_sort = list(map(list, zip(*data_to_sort)))
self._data_map = new_data_map
self._data = sorted(data_to_sort,
key=sort_function,
reverse=reverse_order)
def filter(self, filtering_function, return_filtered_out=False):
"""Filters in place the DataTable rows, removing rows which do not
meet condition from provided functiom. If `return_filtered_out` is
True method additionaly returns object with filtered out rows.
Args:
filtering_function (function): used to select rows meeting the
condition set in the function.
return_filtered_out (bool, optional): indicating whether to save
the filtered rows in a separate table. Defaults to False.
"""
if return_filtered_out:
filtered_out = self.copy
for index, row in reversed(list(enumerate(self))):
if not filtering_function(row):
del self._data[index]
else:
del filtered_out._data[index]
return filtered_out
else:
for index, row in reversed(list(enumerate(self))):
if not filtering_function(row):
del self._data[index]
def create_dummies(self, column_name, remove_in_place=True):
"""Convert specified column containing categorical variables into
several binarized ones.
Args:
column_name (str): name of the specified column.
remove_in_place (bool, optional): if True after creating new dummy
columns original column is deleted. Defaults to True.
"""
levels = set(self[column_name])
levels.discard(None)
levels = sorted(levels, reverse=True)
main_column_index = self.column_index(column_name)
for level in levels:
level_column_name = "".join([column_name, str(level)])
self.insert_column(level_column_name, [None] * self.length,
datatype=int,
column_index=main_column_index + 1)
for idx in self.rows_range():
if self._data[idx][main_column_index] is None:
continue
elif self._data[idx][main_column_index] == level:
self._data[idx][self.column_index(level_column_name)] = 1
else:
self._data[idx][self.column_index(level_column_name)] = 0
if remove_in_place:
self.remove_columns([column_name])
def concatenate(self, outer_data, inner_columns=None, outer_columns=None):
"""Concatenate the DataTable with an external one.
Args:
outer_data (DataTable): external DataTable to be concatenated.
inner_columns (list, optional): strings representing inner columns
to which new data is to be concatenated. Defaults to None.
outer_columns (list, optional): strings representing outer columns
that are to be concatenated. Defaults to None.
"""
if inner_columns is None:
inner_columns = self.columns
if outer_columns is None:
outer_columns = outer_data.columns
columns_dict = dict(zip(inner_columns, outer_columns))
for row in outer_data:
row_to_append = []
for column in self.columns:
if column in columns_dict.keys():
row_to_append.append(row[columns_dict[column]])
else:
row_to_append.append(None)
self._data.append(row_to_append)
def join(self, outer_data, inner_column, outer_column):
"""Join the DataTable with an external one.
Args:
outer_data (DataTable): external DataTable to be join with.
inner_column (str): name of the column in the inner DataTable,
based on which join is to be performed
outer_column (str): name of the column in the outer DataTable,
based on which join is to be performed
"""
outer_data_copy = outer_data.copy
outer_data_copy.rename_columns([outer_column], ["OuterMainColumn"])
outer_data_copy.sort(["OuterMainColumn"])
outer_data_width = outer_data_copy.width
self._data_map.extend(outer_data_copy._data_map)
for idx in self.rows_range():
position = data_helpers.binary_search(
outer_data_copy["OuterMainColumn"], self[inner_column, idx])
if position is None:
self._data[idx].extend([None] * outer_data_width)
else:
self._data[idx].extend(outer_data_copy._data[position])
self.remove_columns(["OuterMainColumn"])
def apply(self, column_name, function):
"""Apply supplied transformation on all values in the selected column.
Args:
column_name (str): name of the column on which the transformation
is to be performed.
function (function): describes the transformations that are to be
performed on the selected column.
"""
column_index = self.column_index(column_name)
for index in self.rows_range():
self._data[index][column_index] = function(
self._data[index][column_index]) | 0.77949 | 0.343534 |
from bcar.car import Car
import ipywidgets.widgets as widgets
from ipywidgets import HBox, VBox, Button
import time
class BCar():
def __init__(self):
self.car = Car()
self.panel = ButtonGroup()
self.m_steer = 0
self.m_gear = 0
self.press_count = 0
def play(self):
self._setting()
return self.panel.display()
def _car_axes_rl(self, change):
self.m_steer = change['new']
self.car.drive(steer=self.m_steer, gear=self.m_gear)
def _car_axes_fb(self, change):
self.m_gear = - change['new']
if abs(self.m_gear) > 0.1:
self.car.drive(steer=self.m_steer, gear=self.m_gear)
else:
self.car.drive(steer=0, gear=0)
def _slide_left(self, change):
val = change['new']
pct = 0.5
if self.controller.buttons[10].value == 1:
self.car.run(m1=-pct, m2=pct, m3=-pct, m4=pct)
else:
self.car.run(m1=0, m2=0, m3=0, m4=0)
def _slide_right(self, change):
val = change['new']
pct = 0.5
if self.controller.buttons[11].value == 1:
self.car.run(m1=pct, m2=-pct, m3=pct, m4=-pct)
else:
self.car.run(m1=0, m2=0, m3=0, m4=0)
def controller_setup(self, index=0):
self.controller = widgets.Controller(index=index)
print("Move your controller NOW and activiate it...")
display(self.controller)
def controller_on(self): # Linking js to car movement control
self.controller.axes[0].observe(self._car_axes_rl, names=['value'])
self.controller.axes[3].observe(self._car_axes_fb, names=['value'])
self.controller.buttons[12].observe(lambda x: self._press_act(event="forward")) # forward
self.controller.buttons[13].observe(lambda x: self._press_act(event="backward")) # backward
self.controller.buttons[14].observe(lambda x: self._press_act(event="left")) # turn left
self.controller.buttons[15].observe(lambda x: self._press_act(event="right")) # turn right
self.controller.buttons[4].observe(lambda x: self._press_act(event="level_down")) # gear level up
self.controller.buttons[5].observe(lambda x: self._press_act(event="level_up")) # gear level up
self.controller.buttons[10].observe(self._slide_left, names=['value']) # Sliding car to left
self.controller.buttons[11].observe(self._slide_right, names=['value']) # Sliding car to right
def _setting(self): #config for actions
self.panel.buttons[0].on_click(lambda x: self.car.forward(p=0.3, t=-1))
self.panel.buttons[1].on_click(lambda x: self.car.forward())
self.panel.buttons[2].on_click(lambda x: self.car.slide_left(p=0.5, t=-1))
self.panel.buttons[3].on_click(lambda x: self.car.left())
self.panel.buttons[4].on_click(lambda x: self.car.stop())
self.panel.buttons[5].on_click(lambda x: self.car.right())
self.panel.buttons[6].on_click(lambda x: self.car.backward(p=0.3, t=-1))
self.panel.buttons[7].on_click(lambda x: self.car.backward())
self.panel.buttons[8].on_click(lambda x: self.car.slide_right(p=0.5, t=-1))
self.panel.extra[0].on_click(lambda x: self.car.level_up())
self.panel.extra[1].on_click(lambda x: self.car.level_read())
self.panel.extra[2].on_click(lambda x: self.car.level_down())
def _press_act(self, event):
self.press_count += 1
if self.press_count > 8:
if event == "forward":
self.car.forward()
elif event == "backward":
self.car.backward()
elif event == "left":
self.car.left()
elif event == "right":
self.car.right()
elif event == "level_up":
self.car.level_up()
elif event == "level_read":
self.car.level_read()
elif event == "level_down":
self.car.level_down()
self.press_count = 0
class ButtonGroup():
def __init__(self, button_list=None):
self.button_layout = widgets.Layout(width='60px', height='40px', align_self='center')
self.button_list = ['Fwd', '▲', 'sL', '◄', 'Stop', '►', 'Bwd', '▼', 'sR'] if button_list==None else button_list
self.extra_list = ['g△', '-', 'g▽']
self.buttons = [Button(description=i, layout=self.button_layout) for i in self.button_list]
self.extra = [Button(description=i, layout=self.button_layout) for i in self.extra_list]
def display(self):
row1 = HBox([self.buttons[0], self.buttons[1], self.buttons[2]])
row2 = HBox([self.buttons[3], self.buttons[4], self.buttons[5]])
row3 = HBox([self.buttons[6], self.buttons[7], self.buttons[8]])
col1 = VBox([self.extra[0], self.extra[1], self.extra[2]])
return HBox([VBox([row1, row2, row3]), col1]) | bcar/bcar.py | from bcar.car import Car
import ipywidgets.widgets as widgets
from ipywidgets import HBox, VBox, Button
import time
class BCar():
def __init__(self):
self.car = Car()
self.panel = ButtonGroup()
self.m_steer = 0
self.m_gear = 0
self.press_count = 0
def play(self):
self._setting()
return self.panel.display()
def _car_axes_rl(self, change):
self.m_steer = change['new']
self.car.drive(steer=self.m_steer, gear=self.m_gear)
def _car_axes_fb(self, change):
self.m_gear = - change['new']
if abs(self.m_gear) > 0.1:
self.car.drive(steer=self.m_steer, gear=self.m_gear)
else:
self.car.drive(steer=0, gear=0)
def _slide_left(self, change):
val = change['new']
pct = 0.5
if self.controller.buttons[10].value == 1:
self.car.run(m1=-pct, m2=pct, m3=-pct, m4=pct)
else:
self.car.run(m1=0, m2=0, m3=0, m4=0)
def _slide_right(self, change):
val = change['new']
pct = 0.5
if self.controller.buttons[11].value == 1:
self.car.run(m1=pct, m2=-pct, m3=pct, m4=-pct)
else:
self.car.run(m1=0, m2=0, m3=0, m4=0)
def controller_setup(self, index=0):
self.controller = widgets.Controller(index=index)
print("Move your controller NOW and activiate it...")
display(self.controller)
def controller_on(self): # Linking js to car movement control
self.controller.axes[0].observe(self._car_axes_rl, names=['value'])
self.controller.axes[3].observe(self._car_axes_fb, names=['value'])
self.controller.buttons[12].observe(lambda x: self._press_act(event="forward")) # forward
self.controller.buttons[13].observe(lambda x: self._press_act(event="backward")) # backward
self.controller.buttons[14].observe(lambda x: self._press_act(event="left")) # turn left
self.controller.buttons[15].observe(lambda x: self._press_act(event="right")) # turn right
self.controller.buttons[4].observe(lambda x: self._press_act(event="level_down")) # gear level up
self.controller.buttons[5].observe(lambda x: self._press_act(event="level_up")) # gear level up
self.controller.buttons[10].observe(self._slide_left, names=['value']) # Sliding car to left
self.controller.buttons[11].observe(self._slide_right, names=['value']) # Sliding car to right
def _setting(self): #config for actions
self.panel.buttons[0].on_click(lambda x: self.car.forward(p=0.3, t=-1))
self.panel.buttons[1].on_click(lambda x: self.car.forward())
self.panel.buttons[2].on_click(lambda x: self.car.slide_left(p=0.5, t=-1))
self.panel.buttons[3].on_click(lambda x: self.car.left())
self.panel.buttons[4].on_click(lambda x: self.car.stop())
self.panel.buttons[5].on_click(lambda x: self.car.right())
self.panel.buttons[6].on_click(lambda x: self.car.backward(p=0.3, t=-1))
self.panel.buttons[7].on_click(lambda x: self.car.backward())
self.panel.buttons[8].on_click(lambda x: self.car.slide_right(p=0.5, t=-1))
self.panel.extra[0].on_click(lambda x: self.car.level_up())
self.panel.extra[1].on_click(lambda x: self.car.level_read())
self.panel.extra[2].on_click(lambda x: self.car.level_down())
def _press_act(self, event):
self.press_count += 1
if self.press_count > 8:
if event == "forward":
self.car.forward()
elif event == "backward":
self.car.backward()
elif event == "left":
self.car.left()
elif event == "right":
self.car.right()
elif event == "level_up":
self.car.level_up()
elif event == "level_read":
self.car.level_read()
elif event == "level_down":
self.car.level_down()
self.press_count = 0
class ButtonGroup():
def __init__(self, button_list=None):
self.button_layout = widgets.Layout(width='60px', height='40px', align_self='center')
self.button_list = ['Fwd', '▲', 'sL', '◄', 'Stop', '►', 'Bwd', '▼', 'sR'] if button_list==None else button_list
self.extra_list = ['g△', '-', 'g▽']
self.buttons = [Button(description=i, layout=self.button_layout) for i in self.button_list]
self.extra = [Button(description=i, layout=self.button_layout) for i in self.extra_list]
def display(self):
row1 = HBox([self.buttons[0], self.buttons[1], self.buttons[2]])
row2 = HBox([self.buttons[3], self.buttons[4], self.buttons[5]])
row3 = HBox([self.buttons[6], self.buttons[7], self.buttons[8]])
col1 = VBox([self.extra[0], self.extra[1], self.extra[2]])
return HBox([VBox([row1, row2, row3]), col1]) | 0.407923 | 0.266745 |
import pandas as pd
import numpy as np
from numpy import array
from numpy import argmax
import os, re
import cv2
import imageio
def preprocess_input(image, fixed_size=128):
'''
'''
image_size = image.shape[:2]
ratio = float(fixed_size)/max(image_size)
new_size = tuple([int(x*ratio) for x in image_size])
img = cv2.resize(image, (new_size[1], new_size[0]))
delta_w = fixed_size - new_size[1]
delta_h = fixed_size - new_size[0]
top, bottom = delta_h//2, delta_h-(delta_h//2)
left, right = delta_w//2, delta_w-(delta_w//2)
color = [0, 0, 0]
ri = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
#print(ri.shape)
#gray_image = cv2.cvtColor(ri, cv2.COLOR_BGR2GRAY)
#gimg = np.array(gray_image).reshape(fixed_size,fixed_size,1)
gimg = np.array(ri).reshape(fixed_size,fixed_size,1)
img_n = cv2.normalize(gimg, gimg, 0, 255, cv2.NORM_MINMAX)
return(img_n)
# build a list of PNGs that match the rows of subsampled metadata from image-file-directory.csv
# this function from AC and VS
def buildPNGsName(imageid):
'''
Build a list of PNGs that match the rows of subsampled metadata
Inputs
------
imageid (str):
example format: IFCB107D20151109T221417P00789
Outputs
-------
png_name (str)
name of the png and the folder it belongs to (relative path).
example format: D20151128T170950_IFCB107/IFCB107D20151128T170950P00079.png
The folder name and the png name should match with exception of the underscore and the unique image id (PXXXXX)
'''
file_name = imageid + '.png'
datetime = imageid[-22:-6]
instrument_name = imageid[:-22]
folder_name = datetime + '_' + instrument_name
png_name = os.path.join(folder_name,file_name)
return png_name
# this came from multi_stream_generator_SLC
def image_generator(dataset, batch_size, lb):
'''
'''
data_size = len(dataset)
n_batches = data_size / batch_size
remain = data_size % batch_size
while True:
files = dataset.sample(n=data_size - remain)
shuffled = files.sample(frac=1)
result = np.array_split(shuffled, n_batches)
for batch in result:
labels = batch['high_group'].values
labels = lb.transform(labels)
image_data = []
for i in range(len(batch)):
row = batch.iloc[i]
input_path = row['full_path']
#image_data.append(preprocess_input(cv2.imread(input_path)))
image_data.append(preprocess_input(imageio.imread(input_path)))
image_data = np.array(image_data)
yield (image_data, labels ) | data_utils.py |
import pandas as pd
import numpy as np
from numpy import array
from numpy import argmax
import os, re
import cv2
import imageio
def preprocess_input(image, fixed_size=128):
'''
'''
image_size = image.shape[:2]
ratio = float(fixed_size)/max(image_size)
new_size = tuple([int(x*ratio) for x in image_size])
img = cv2.resize(image, (new_size[1], new_size[0]))
delta_w = fixed_size - new_size[1]
delta_h = fixed_size - new_size[0]
top, bottom = delta_h//2, delta_h-(delta_h//2)
left, right = delta_w//2, delta_w-(delta_w//2)
color = [0, 0, 0]
ri = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
#print(ri.shape)
#gray_image = cv2.cvtColor(ri, cv2.COLOR_BGR2GRAY)
#gimg = np.array(gray_image).reshape(fixed_size,fixed_size,1)
gimg = np.array(ri).reshape(fixed_size,fixed_size,1)
img_n = cv2.normalize(gimg, gimg, 0, 255, cv2.NORM_MINMAX)
return(img_n)
# build a list of PNGs that match the rows of subsampled metadata from image-file-directory.csv
# this function from AC and VS
def buildPNGsName(imageid):
'''
Build a list of PNGs that match the rows of subsampled metadata
Inputs
------
imageid (str):
example format: IFCB107D20151109T221417P00789
Outputs
-------
png_name (str)
name of the png and the folder it belongs to (relative path).
example format: D20151128T170950_IFCB107/IFCB107D20151128T170950P00079.png
The folder name and the png name should match with exception of the underscore and the unique image id (PXXXXX)
'''
file_name = imageid + '.png'
datetime = imageid[-22:-6]
instrument_name = imageid[:-22]
folder_name = datetime + '_' + instrument_name
png_name = os.path.join(folder_name,file_name)
return png_name
# this came from multi_stream_generator_SLC
def image_generator(dataset, batch_size, lb):
'''
'''
data_size = len(dataset)
n_batches = data_size / batch_size
remain = data_size % batch_size
while True:
files = dataset.sample(n=data_size - remain)
shuffled = files.sample(frac=1)
result = np.array_split(shuffled, n_batches)
for batch in result:
labels = batch['high_group'].values
labels = lb.transform(labels)
image_data = []
for i in range(len(batch)):
row = batch.iloc[i]
input_path = row['full_path']
#image_data.append(preprocess_input(cv2.imread(input_path)))
image_data.append(preprocess_input(imageio.imread(input_path)))
image_data = np.array(image_data)
yield (image_data, labels ) | 0.313735 | 0.354573 |
"""Timeline resources for version 1 of the Timesketch API."""
import codecs
import json
import logging
import uuid
import six
import elasticsearch
from flask import request
from flask import abort
from flask import current_app
from flask_restful import Resource
from flask_login import login_required
from flask_login import current_user
from timesketch.api.v1 import resources
from timesketch.api.v1 import utils
from timesketch.lib import forms
from timesketch.lib.definitions import HTTP_STATUS_CODE_OK
from timesketch.lib.definitions import HTTP_STATUS_CODE_CREATED
from timesketch.lib.definitions import HTTP_STATUS_CODE_BAD_REQUEST
from timesketch.lib.definitions import HTTP_STATUS_CODE_FORBIDDEN
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.models import db_session
from timesketch.models.sketch import SearchIndex
from timesketch.models.sketch import Sketch
from timesketch.models.sketch import Timeline
logger = logging.getLogger('timesketch.timeline_api')
class TimelineListResource(resources.ResourceMixin, Resource):
"""Resource to get all timelines for sketch."""
@login_required
def get(self, sketch_id):
"""Handles GET request to the resource.
Returns:
View in JSON (instance of flask.wrappers.Response)
"""
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
if not sketch.has_permission(current_user, 'read'):
abort(HTTP_STATUS_CODE_FORBIDDEN,
'User does not have read access controls on sketch.')
return self.to_json(sketch.timelines)
@login_required
def post(self, sketch_id):
"""Handles POST request to the resource.
Returns:
A sketch in JSON (instance of flask.wrappers.Response)
"""
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
if not sketch.has_permission(current_user, 'write'):
abort(HTTP_STATUS_CODE_FORBIDDEN,
'User does not have write access controls on sketch.')
form = request.json
if not form:
form = request.data
metadata = {'created': True}
searchindex_id = form.get('timeline', 0)
if isinstance(searchindex_id, str) and searchindex_id.isdigit():
searchindex_id = int(searchindex_id)
if not isinstance(searchindex_id, int):
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'The timeline (searchindex id) needs to be an integer.')
searchindex = SearchIndex.query.get_with_acl(searchindex_id)
if searchindex.get_status.status == 'deleted':
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Unable to create a timeline using a deleted search index')
timeline_id = [
t.searchindex.id for t in sketch.timelines
if t.searchindex.id == searchindex_id
]
if not timeline_id:
return_code = HTTP_STATUS_CODE_CREATED
timeline_name = form.get('timeline_name', searchindex.name)
timeline = Timeline(
name=timeline_name,
description=searchindex.description,
sketch=sketch,
user=current_user,
searchindex=searchindex)
sketch.timelines.append(timeline)
labels_to_prevent_deletion = current_app.config.get(
'LABELS_TO_PREVENT_DELETION', [])
for label in sketch.get_labels:
if label not in labels_to_prevent_deletion:
continue
timeline.add_label(label)
searchindex.add_label(label)
# Set status to ready so the timeline can be queried.
timeline.set_status('ready')
db_session.add(timeline)
db_session.commit()
else:
metadata['created'] = False
return_code = HTTP_STATUS_CODE_OK
timeline = Timeline.query.get(timeline_id)
# Run sketch analyzers when timeline is added. Import here to avoid
# circular imports.
# pylint: disable=import-outside-toplevel
if current_app.config.get('AUTO_SKETCH_ANALYZERS'):
# pylint: disable=import-outside-toplevel
from timesketch.lib import tasks
sketch_analyzer_group, _ = tasks.build_sketch_analysis_pipeline(
sketch_id, searchindex_id, current_user.id,
timeline_id=timeline_id)
if sketch_analyzer_group:
pipeline = (tasks.run_sketch_init.s(
[searchindex.index_name]) | sketch_analyzer_group)
pipeline.apply_async()
# Update the last activity of a sketch.
utils.update_sketch_last_activity(sketch)
return self.to_json(
timeline, meta=metadata, status_code=return_code)
class TimelineResource(resources.ResourceMixin, Resource):
"""Resource to get timeline."""
def _add_label(self, timeline, label):
"""Add a label to the timeline."""
if timeline.has_label(label):
logger.warning(
'Unable to apply the label [{0:s}] to timeline {1:s}, '
'already exists.'.format(label, timeline.name))
return False
timeline.add_label(label, user=current_user)
return True
def _remove_label(self, timeline, label):
"""Removes a label from a timeline."""
if not timeline.has_label(label):
logger.warning(
'Unable to remove the label [{0:s}] from timeline {1:s}, '
'label does not exist.'.format(label, timeline.name))
return False
timeline.remove_label(label)
return True
@login_required
def get(self, sketch_id, timeline_id):
"""Handles GET request to the resource.
Args:
sketch_id: Integer primary key for a sketch database model
timeline_id: Integer primary key for a timeline database model
"""
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
timeline = Timeline.query.get(timeline_id)
if not timeline:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No Timeline found with this ID.')
if timeline.sketch is None:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
f'The timeline {timeline_id} does not have an associated '
'sketch, does it belong to a sketch?')
# Check that this timeline belongs to the sketch
if timeline.sketch.id != sketch.id:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
'The sketch ID ({0:d}) does not match with the timeline '
'sketch ID ({1:d})'.format(sketch.id, timeline.sketch.id))
if not sketch.has_permission(user=current_user, permission='read'):
abort(
HTTP_STATUS_CODE_FORBIDDEN,
'The user does not have read permission on the sketch.')
return self.to_json(timeline)
@login_required
def post(self, sketch_id, timeline_id):
"""Handles GET request to the resource.
Args:
sketch_id: Integer primary key for a sketch database model
timeline_id: Integer primary key for a timeline database model
"""
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
timeline = Timeline.query.get(timeline_id)
if not timeline:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
'No timeline found with this ID.')
if timeline.sketch is None:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
'No sketch associated with this timeline.')
# Check that this timeline belongs to the sketch
if timeline.sketch.id != sketch.id:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
'The sketch ID ({0:d}) does not match with the timeline '
'sketch ID ({1:d})'.format(sketch.id, timeline.sketch.id))
if not sketch.has_permission(user=current_user, permission='write'):
abort(
HTTP_STATUS_CODE_FORBIDDEN,
'The user does not have write permission on the sketch.')
form = forms.TimelineForm.build(request)
if not form.validate_on_submit():
abort(
HTTP_STATUS_CODE_BAD_REQUEST, 'Unable to validate form data.')
if form.labels.data:
label_string = form.labels.data
labels = json.loads(label_string)
if not isinstance(labels, (list, tuple)):
abort(
HTTP_STATUS_CODE_BAD_REQUEST, (
'Label needs to be a JSON string that '
'converts to a list of strings.'))
if not all([isinstance(x, str) for x in labels]):
abort(
HTTP_STATUS_CODE_BAD_REQUEST, (
'Label needs to be a JSON string that '
'converts to a list of strings (not all strings)'))
label_action = form.label_action.data
if label_action not in ('add', 'remove'):
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Label action needs to be either add or remove.')
changed = False
if label_action == 'add':
changes = []
for label in labels:
changes.append(
self._add_label(timeline=timeline, label=label))
changed = any(changes)
elif label_action == 'remove':
changes = []
for label in labels:
changes.append(
self._remove_label(timeline=timeline, label=label))
changed = any(changes)
if not changed:
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Label [{0:s}] not {1:s}'.format(
', '.join(labels), label_action))
db_session.add(timeline)
db_session.commit()
return HTTP_STATUS_CODE_OK
timeline.name = form.name.data
timeline.description = form.description.data
timeline.color = form.color.data
db_session.add(timeline)
db_session.commit()
# Update the last activity of a sketch.
utils.update_sketch_last_activity(sketch)
return HTTP_STATUS_CODE_OK
@login_required
def delete(self, sketch_id, timeline_id):
"""Handles DELETE request to the resource.
Args:
sketch_id: Integer primary key for a sketch database model
timeline_id: Integer primary key for a timeline database model
"""
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
timeline = Timeline.query.get(timeline_id)
if not timeline:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No timeline found with this ID.')
if timeline.sketch is None:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
'No sketch associated with this timeline.')
# Check that this timeline belongs to the sketch
if timeline.sketch.id != sketch.id:
if not timeline:
msg = 'No timeline found with this ID.'
elif not sketch:
msg = 'No sketch found with this ID.'
else:
sketch_use = sketch.id or 'No sketch ID'
sketch_string = str(sketch_use)
timeline_use = timeline.sketch.id or (
'No sketch associated with the timeline.')
timeline_string = str(timeline_use)
msg = (
'The sketch ID ({0:s}) does not match with the timeline '
'sketch ID ({1:s})'.format(sketch_string, timeline_string))
abort(HTTP_STATUS_CODE_NOT_FOUND, msg)
if not sketch.has_permission(user=current_user, permission='write'):
abort(
HTTP_STATUS_CODE_FORBIDDEN,
'The user does not have write permission on the sketch.')
not_delete_labels = current_app.config.get(
'LABELS_TO_PREVENT_DELETION', [])
for label in not_delete_labels:
if timeline.has_label(label):
abort(
HTTP_STATUS_CODE_FORBIDDEN,
'Timelines with label [{0:s}] cannot be deleted.'.format(
label))
# Check if this searchindex is used in other sketches.
close_index = True
searchindex = timeline.searchindex
index_name = searchindex.index_name
search_indices = SearchIndex.query.filter_by(
index_name=index_name).all()
timelines = []
for index in search_indices:
timelines.extend(index.timelines)
for timeline_ in timelines:
if timeline_.sketch is None:
continue
if timeline_.sketch.id != sketch.id:
close_index = False
break
if timeline_.id != timeline_id:
# There are more than a single timeline using this index_name,
# we can't close it (unless this timeline is archived).
if timeline_.get_status.status != 'archived':
close_index = False
break
if close_index:
try:
self.datastore.client.indices.close(
index=searchindex.index_name)
except elasticsearch.NotFoundError:
logger.error(
'Unable to close index: {0:s} - index not '
'found'.format(searchindex.index_name))
searchindex.set_status(status='archived')
timeline.set_status(status='archived')
sketch.timelines.remove(timeline)
db_session.commit()
# Update the last activity of a sketch.
utils.update_sketch_last_activity(sketch)
return HTTP_STATUS_CODE_OK
class TimelineCreateResource(resources.ResourceMixin, Resource):
"""Resource to create a timeline."""
@login_required
def post(self):
"""Handles POST request to the resource.
Returns:
A view in JSON (instance of flask.wrappers.Response)
"""
upload_enabled = current_app.config['UPLOAD_ENABLED']
if not upload_enabled:
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Failed to create timeline, upload not enabled')
form = forms.CreateTimelineForm()
if not form.validate_on_submit():
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Failed to create timeline, form data not validated')
sketch_id = form.sketch_id.data
timeline_name = form.name.data
sketch = None
if sketch_id:
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
'No sketch found with this ID.')
# We do not need a human readable filename or
# datastore index name, so we use UUIDs here.
index_name = uuid.uuid4().hex
if not isinstance(index_name, six.text_type):
index_name = codecs.decode(index_name, 'utf-8')
# Create the search index in the Timesketch database
searchindex = SearchIndex.get_or_create(
name=timeline_name,
description=timeline_name,
user=current_user,
index_name=index_name)
searchindex.grant_permission(permission='read', user=current_user)
searchindex.grant_permission(permission='write', user=current_user)
searchindex.grant_permission(
permission='delete', user=current_user)
searchindex.set_status('processing')
db_session.add(searchindex)
db_session.commit()
timeline = None
if sketch and sketch.has_permission(current_user, 'write'):
timeline = Timeline(
name=searchindex.name,
description=searchindex.description,
sketch=sketch,
user=current_user,
searchindex=searchindex)
sketch.timelines.append(timeline)
db_session.add(timeline)
db_session.commit()
# Return Timeline if it was created.
# pylint: disable=no-else-return
if timeline:
return self.to_json(
timeline, status_code=HTTP_STATUS_CODE_CREATED)
# Update the last activity of a sketch.
utils.update_sketch_last_activity(sketch)
return self.to_json(
searchindex, status_code=HTTP_STATUS_CODE_CREATED) | timesketch/api/v1/resources/timeline.py | """Timeline resources for version 1 of the Timesketch API."""
import codecs
import json
import logging
import uuid
import six
import elasticsearch
from flask import request
from flask import abort
from flask import current_app
from flask_restful import Resource
from flask_login import login_required
from flask_login import current_user
from timesketch.api.v1 import resources
from timesketch.api.v1 import utils
from timesketch.lib import forms
from timesketch.lib.definitions import HTTP_STATUS_CODE_OK
from timesketch.lib.definitions import HTTP_STATUS_CODE_CREATED
from timesketch.lib.definitions import HTTP_STATUS_CODE_BAD_REQUEST
from timesketch.lib.definitions import HTTP_STATUS_CODE_FORBIDDEN
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.models import db_session
from timesketch.models.sketch import SearchIndex
from timesketch.models.sketch import Sketch
from timesketch.models.sketch import Timeline
logger = logging.getLogger('timesketch.timeline_api')
class TimelineListResource(resources.ResourceMixin, Resource):
"""Resource to get all timelines for sketch."""
@login_required
def get(self, sketch_id):
"""Handles GET request to the resource.
Returns:
View in JSON (instance of flask.wrappers.Response)
"""
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
if not sketch.has_permission(current_user, 'read'):
abort(HTTP_STATUS_CODE_FORBIDDEN,
'User does not have read access controls on sketch.')
return self.to_json(sketch.timelines)
@login_required
def post(self, sketch_id):
"""Handles POST request to the resource.
Returns:
A sketch in JSON (instance of flask.wrappers.Response)
"""
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
if not sketch.has_permission(current_user, 'write'):
abort(HTTP_STATUS_CODE_FORBIDDEN,
'User does not have write access controls on sketch.')
form = request.json
if not form:
form = request.data
metadata = {'created': True}
searchindex_id = form.get('timeline', 0)
if isinstance(searchindex_id, str) and searchindex_id.isdigit():
searchindex_id = int(searchindex_id)
if not isinstance(searchindex_id, int):
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'The timeline (searchindex id) needs to be an integer.')
searchindex = SearchIndex.query.get_with_acl(searchindex_id)
if searchindex.get_status.status == 'deleted':
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Unable to create a timeline using a deleted search index')
timeline_id = [
t.searchindex.id for t in sketch.timelines
if t.searchindex.id == searchindex_id
]
if not timeline_id:
return_code = HTTP_STATUS_CODE_CREATED
timeline_name = form.get('timeline_name', searchindex.name)
timeline = Timeline(
name=timeline_name,
description=searchindex.description,
sketch=sketch,
user=current_user,
searchindex=searchindex)
sketch.timelines.append(timeline)
labels_to_prevent_deletion = current_app.config.get(
'LABELS_TO_PREVENT_DELETION', [])
for label in sketch.get_labels:
if label not in labels_to_prevent_deletion:
continue
timeline.add_label(label)
searchindex.add_label(label)
# Set status to ready so the timeline can be queried.
timeline.set_status('ready')
db_session.add(timeline)
db_session.commit()
else:
metadata['created'] = False
return_code = HTTP_STATUS_CODE_OK
timeline = Timeline.query.get(timeline_id)
# Run sketch analyzers when timeline is added. Import here to avoid
# circular imports.
# pylint: disable=import-outside-toplevel
if current_app.config.get('AUTO_SKETCH_ANALYZERS'):
# pylint: disable=import-outside-toplevel
from timesketch.lib import tasks
sketch_analyzer_group, _ = tasks.build_sketch_analysis_pipeline(
sketch_id, searchindex_id, current_user.id,
timeline_id=timeline_id)
if sketch_analyzer_group:
pipeline = (tasks.run_sketch_init.s(
[searchindex.index_name]) | sketch_analyzer_group)
pipeline.apply_async()
# Update the last activity of a sketch.
utils.update_sketch_last_activity(sketch)
return self.to_json(
timeline, meta=metadata, status_code=return_code)
class TimelineResource(resources.ResourceMixin, Resource):
"""Resource to get timeline."""
def _add_label(self, timeline, label):
"""Add a label to the timeline."""
if timeline.has_label(label):
logger.warning(
'Unable to apply the label [{0:s}] to timeline {1:s}, '
'already exists.'.format(label, timeline.name))
return False
timeline.add_label(label, user=current_user)
return True
def _remove_label(self, timeline, label):
"""Removes a label from a timeline."""
if not timeline.has_label(label):
logger.warning(
'Unable to remove the label [{0:s}] from timeline {1:s}, '
'label does not exist.'.format(label, timeline.name))
return False
timeline.remove_label(label)
return True
@login_required
def get(self, sketch_id, timeline_id):
"""Handles GET request to the resource.
Args:
sketch_id: Integer primary key for a sketch database model
timeline_id: Integer primary key for a timeline database model
"""
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
timeline = Timeline.query.get(timeline_id)
if not timeline:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No Timeline found with this ID.')
if timeline.sketch is None:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
f'The timeline {timeline_id} does not have an associated '
'sketch, does it belong to a sketch?')
# Check that this timeline belongs to the sketch
if timeline.sketch.id != sketch.id:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
'The sketch ID ({0:d}) does not match with the timeline '
'sketch ID ({1:d})'.format(sketch.id, timeline.sketch.id))
if not sketch.has_permission(user=current_user, permission='read'):
abort(
HTTP_STATUS_CODE_FORBIDDEN,
'The user does not have read permission on the sketch.')
return self.to_json(timeline)
@login_required
def post(self, sketch_id, timeline_id):
"""Handles GET request to the resource.
Args:
sketch_id: Integer primary key for a sketch database model
timeline_id: Integer primary key for a timeline database model
"""
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
timeline = Timeline.query.get(timeline_id)
if not timeline:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
'No timeline found with this ID.')
if timeline.sketch is None:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
'No sketch associated with this timeline.')
# Check that this timeline belongs to the sketch
if timeline.sketch.id != sketch.id:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
'The sketch ID ({0:d}) does not match with the timeline '
'sketch ID ({1:d})'.format(sketch.id, timeline.sketch.id))
if not sketch.has_permission(user=current_user, permission='write'):
abort(
HTTP_STATUS_CODE_FORBIDDEN,
'The user does not have write permission on the sketch.')
form = forms.TimelineForm.build(request)
if not form.validate_on_submit():
abort(
HTTP_STATUS_CODE_BAD_REQUEST, 'Unable to validate form data.')
if form.labels.data:
label_string = form.labels.data
labels = json.loads(label_string)
if not isinstance(labels, (list, tuple)):
abort(
HTTP_STATUS_CODE_BAD_REQUEST, (
'Label needs to be a JSON string that '
'converts to a list of strings.'))
if not all([isinstance(x, str) for x in labels]):
abort(
HTTP_STATUS_CODE_BAD_REQUEST, (
'Label needs to be a JSON string that '
'converts to a list of strings (not all strings)'))
label_action = form.label_action.data
if label_action not in ('add', 'remove'):
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Label action needs to be either add or remove.')
changed = False
if label_action == 'add':
changes = []
for label in labels:
changes.append(
self._add_label(timeline=timeline, label=label))
changed = any(changes)
elif label_action == 'remove':
changes = []
for label in labels:
changes.append(
self._remove_label(timeline=timeline, label=label))
changed = any(changes)
if not changed:
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Label [{0:s}] not {1:s}'.format(
', '.join(labels), label_action))
db_session.add(timeline)
db_session.commit()
return HTTP_STATUS_CODE_OK
timeline.name = form.name.data
timeline.description = form.description.data
timeline.color = form.color.data
db_session.add(timeline)
db_session.commit()
# Update the last activity of a sketch.
utils.update_sketch_last_activity(sketch)
return HTTP_STATUS_CODE_OK
@login_required
def delete(self, sketch_id, timeline_id):
"""Handles DELETE request to the resource.
Args:
sketch_id: Integer primary key for a sketch database model
timeline_id: Integer primary key for a timeline database model
"""
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID.')
timeline = Timeline.query.get(timeline_id)
if not timeline:
abort(
HTTP_STATUS_CODE_NOT_FOUND, 'No timeline found with this ID.')
if timeline.sketch is None:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
'No sketch associated with this timeline.')
# Check that this timeline belongs to the sketch
if timeline.sketch.id != sketch.id:
if not timeline:
msg = 'No timeline found with this ID.'
elif not sketch:
msg = 'No sketch found with this ID.'
else:
sketch_use = sketch.id or 'No sketch ID'
sketch_string = str(sketch_use)
timeline_use = timeline.sketch.id or (
'No sketch associated with the timeline.')
timeline_string = str(timeline_use)
msg = (
'The sketch ID ({0:s}) does not match with the timeline '
'sketch ID ({1:s})'.format(sketch_string, timeline_string))
abort(HTTP_STATUS_CODE_NOT_FOUND, msg)
if not sketch.has_permission(user=current_user, permission='write'):
abort(
HTTP_STATUS_CODE_FORBIDDEN,
'The user does not have write permission on the sketch.')
not_delete_labels = current_app.config.get(
'LABELS_TO_PREVENT_DELETION', [])
for label in not_delete_labels:
if timeline.has_label(label):
abort(
HTTP_STATUS_CODE_FORBIDDEN,
'Timelines with label [{0:s}] cannot be deleted.'.format(
label))
# Check if this searchindex is used in other sketches.
close_index = True
searchindex = timeline.searchindex
index_name = searchindex.index_name
search_indices = SearchIndex.query.filter_by(
index_name=index_name).all()
timelines = []
for index in search_indices:
timelines.extend(index.timelines)
for timeline_ in timelines:
if timeline_.sketch is None:
continue
if timeline_.sketch.id != sketch.id:
close_index = False
break
if timeline_.id != timeline_id:
# There are more than a single timeline using this index_name,
# we can't close it (unless this timeline is archived).
if timeline_.get_status.status != 'archived':
close_index = False
break
if close_index:
try:
self.datastore.client.indices.close(
index=searchindex.index_name)
except elasticsearch.NotFoundError:
logger.error(
'Unable to close index: {0:s} - index not '
'found'.format(searchindex.index_name))
searchindex.set_status(status='archived')
timeline.set_status(status='archived')
sketch.timelines.remove(timeline)
db_session.commit()
# Update the last activity of a sketch.
utils.update_sketch_last_activity(sketch)
return HTTP_STATUS_CODE_OK
class TimelineCreateResource(resources.ResourceMixin, Resource):
"""Resource to create a timeline."""
@login_required
def post(self):
"""Handles POST request to the resource.
Returns:
A view in JSON (instance of flask.wrappers.Response)
"""
upload_enabled = current_app.config['UPLOAD_ENABLED']
if not upload_enabled:
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Failed to create timeline, upload not enabled')
form = forms.CreateTimelineForm()
if not form.validate_on_submit():
abort(
HTTP_STATUS_CODE_BAD_REQUEST,
'Failed to create timeline, form data not validated')
sketch_id = form.sketch_id.data
timeline_name = form.name.data
sketch = None
if sketch_id:
sketch = Sketch.query.get_with_acl(sketch_id)
if not sketch:
abort(
HTTP_STATUS_CODE_NOT_FOUND,
'No sketch found with this ID.')
# We do not need a human readable filename or
# datastore index name, so we use UUIDs here.
index_name = uuid.uuid4().hex
if not isinstance(index_name, six.text_type):
index_name = codecs.decode(index_name, 'utf-8')
# Create the search index in the Timesketch database
searchindex = SearchIndex.get_or_create(
name=timeline_name,
description=timeline_name,
user=current_user,
index_name=index_name)
searchindex.grant_permission(permission='read', user=current_user)
searchindex.grant_permission(permission='write', user=current_user)
searchindex.grant_permission(
permission='delete', user=current_user)
searchindex.set_status('processing')
db_session.add(searchindex)
db_session.commit()
timeline = None
if sketch and sketch.has_permission(current_user, 'write'):
timeline = Timeline(
name=searchindex.name,
description=searchindex.description,
sketch=sketch,
user=current_user,
searchindex=searchindex)
sketch.timelines.append(timeline)
db_session.add(timeline)
db_session.commit()
# Return Timeline if it was created.
# pylint: disable=no-else-return
if timeline:
return self.to_json(
timeline, status_code=HTTP_STATUS_CODE_CREATED)
# Update the last activity of a sketch.
utils.update_sketch_last_activity(sketch)
return self.to_json(
searchindex, status_code=HTTP_STATUS_CODE_CREATED) | 0.743727 | 0.114616 |
import logging
from torch import nn
from torch.optim import Adam
from bilstm_network import BiLstmNetwork
from data_pipeline import DataPipeline
from label_pipeline import LabelPipeline
from train import Train
from train_pipeline import TrainPipeline
from transform_label_encoder import TransformLabelEncoder
from transform_label_rehaper import TransformLabelReshaper
from transform_merge_tensors import TransformMergeTensors
from transform_text_to_index import TransformTextToIndex
class TrainBuilder:
def __init__(self, epochs=50, early_stopping=True, patience_epochs=30, batch_size=32, num_workers=None, **kwargs):
self.batch_size = batch_size
self.patience_epochs = patience_epochs
self.early_stopping = early_stopping
self.epochs = epochs
self.learning_rate = kwargs.get("learning_rate", .01)
self.hidden_dim = kwargs.get("hidden_dim", 20)
self.num_workers = num_workers
@property
def logger(self):
return logging.getLogger(__name__)
def get_pipeline(self, train_dataset):
trainer = Train(patience_epochs=self.patience_epochs, early_stopping=self.early_stopping, epochs=self.epochs)
max_feature_lens = train_dataset.max_feature_lens
num_classes = train_dataset.num_classes
text_to_index = TransformTextToIndex(feature_lens=max_feature_lens)
# data pipeline
merge_tensor = TransformMergeTensors()
post_process_steps = [("merge_tensor", merge_tensor)]
data_pipeline = DataPipeline(text_to_index=text_to_index, postprocess_steps=post_process_steps)
# Label pipeline
label_encoder = TransformLabelEncoder()
label_reshaper = TransformLabelReshaper(num_classes=num_classes)
label_pipeline = LabelPipeline(label_encoder=label_encoder, label_reshaper=label_reshaper)
# Network
model = BiLstmNetwork(input_size=text_to_index.max_index,
hidden_dim=self.hidden_dim,
output_size=train_dataset.num_classes)
self.logger.info("Using model {}".format(type(model)))
# optimiser = SGD(lr=self.learning_rate, params=model.parameters())
optimiser = Adam(params=model.parameters())
self.logger.info("Using optimiser {}".format(type(optimiser)))
# Loss function
loss_func = nn.CrossEntropyLoss()
self.logger.info("Using loss function {}".format(type(loss_func)))
# Train pipeline
train_pipeline = TrainPipeline(batch_size=self.batch_size,
optimiser=optimiser,
trainer=trainer,
data_pipeline=data_pipeline,
label_pipeline=label_pipeline,
num_workers=self.num_workers,
loss_func=loss_func,
model=model)
return train_pipeline | src/train_builder.py | import logging
from torch import nn
from torch.optim import Adam
from bilstm_network import BiLstmNetwork
from data_pipeline import DataPipeline
from label_pipeline import LabelPipeline
from train import Train
from train_pipeline import TrainPipeline
from transform_label_encoder import TransformLabelEncoder
from transform_label_rehaper import TransformLabelReshaper
from transform_merge_tensors import TransformMergeTensors
from transform_text_to_index import TransformTextToIndex
class TrainBuilder:
def __init__(self, epochs=50, early_stopping=True, patience_epochs=30, batch_size=32, num_workers=None, **kwargs):
self.batch_size = batch_size
self.patience_epochs = patience_epochs
self.early_stopping = early_stopping
self.epochs = epochs
self.learning_rate = kwargs.get("learning_rate", .01)
self.hidden_dim = kwargs.get("hidden_dim", 20)
self.num_workers = num_workers
@property
def logger(self):
return logging.getLogger(__name__)
def get_pipeline(self, train_dataset):
trainer = Train(patience_epochs=self.patience_epochs, early_stopping=self.early_stopping, epochs=self.epochs)
max_feature_lens = train_dataset.max_feature_lens
num_classes = train_dataset.num_classes
text_to_index = TransformTextToIndex(feature_lens=max_feature_lens)
# data pipeline
merge_tensor = TransformMergeTensors()
post_process_steps = [("merge_tensor", merge_tensor)]
data_pipeline = DataPipeline(text_to_index=text_to_index, postprocess_steps=post_process_steps)
# Label pipeline
label_encoder = TransformLabelEncoder()
label_reshaper = TransformLabelReshaper(num_classes=num_classes)
label_pipeline = LabelPipeline(label_encoder=label_encoder, label_reshaper=label_reshaper)
# Network
model = BiLstmNetwork(input_size=text_to_index.max_index,
hidden_dim=self.hidden_dim,
output_size=train_dataset.num_classes)
self.logger.info("Using model {}".format(type(model)))
# optimiser = SGD(lr=self.learning_rate, params=model.parameters())
optimiser = Adam(params=model.parameters())
self.logger.info("Using optimiser {}".format(type(optimiser)))
# Loss function
loss_func = nn.CrossEntropyLoss()
self.logger.info("Using loss function {}".format(type(loss_func)))
# Train pipeline
train_pipeline = TrainPipeline(batch_size=self.batch_size,
optimiser=optimiser,
trainer=trainer,
data_pipeline=data_pipeline,
label_pipeline=label_pipeline,
num_workers=self.num_workers,
loss_func=loss_func,
model=model)
return train_pipeline | 0.906291 | 0.433202 |
import time
import datetime
from telethon import TelegramClient
from telethon.client import users
from telethon.tl.types import ChannelParticipantsRecent
from telethon.tl.functions.contacts import ResolveUsernameRequest
from telethon.tl.functions.channels import EditBannedRequest
from telethon.tl.types import ChatBannedRights
data = [
"sessionname", # session name
"x", # id
"f", # hash
"groupname", # target group name
[], # gathered user ids
1000 # limit of users to gather
]
client = TelegramClient(data[0], data[1], data[2])
client.session.report_errors = False
async def main():
await client.connect()
if not await client.is_user_authorized():
client.send_code_request(data[0])
client.sign_in(
data[0],
input('Enter code ')
)
group = await client(
ResolveUsernameRequest(data[3])
)
async for user in client.iter_participants(
group,
filter=ChannelParticipantsRecent,
limit=data[5]
):
data[4].append(user.id)
should_ban = False
error_called = False
counter = 0
for id in data[4]:
if id == 1074053309:
should_ban = True
if id == 1834600391:
should_ban = False
if should_ban == True:
time.sleep(20)
try:
result = await client(EditBannedRequest(
channel=data[3],
participant=user,
banned_rights=ChatBannedRights(
until_date=datetime.timedelta(days=999),
view_messages=True,
)
))
print("x")
except:
if error_called == False:
print("You must be admin")
error_called == True
return
counter = counter+1
print(counter)
print("done!")
with client:
client.loop.run_until_complete(main()) | joinfilter.py | import time
import datetime
from telethon import TelegramClient
from telethon.client import users
from telethon.tl.types import ChannelParticipantsRecent
from telethon.tl.functions.contacts import ResolveUsernameRequest
from telethon.tl.functions.channels import EditBannedRequest
from telethon.tl.types import ChatBannedRights
data = [
"sessionname", # session name
"x", # id
"f", # hash
"groupname", # target group name
[], # gathered user ids
1000 # limit of users to gather
]
client = TelegramClient(data[0], data[1], data[2])
client.session.report_errors = False
async def main():
await client.connect()
if not await client.is_user_authorized():
client.send_code_request(data[0])
client.sign_in(
data[0],
input('Enter code ')
)
group = await client(
ResolveUsernameRequest(data[3])
)
async for user in client.iter_participants(
group,
filter=ChannelParticipantsRecent,
limit=data[5]
):
data[4].append(user.id)
should_ban = False
error_called = False
counter = 0
for id in data[4]:
if id == 1074053309:
should_ban = True
if id == 1834600391:
should_ban = False
if should_ban == True:
time.sleep(20)
try:
result = await client(EditBannedRequest(
channel=data[3],
participant=user,
banned_rights=ChatBannedRights(
until_date=datetime.timedelta(days=999),
view_messages=True,
)
))
print("x")
except:
if error_called == False:
print("You must be admin")
error_called == True
return
counter = counter+1
print(counter)
print("done!")
with client:
client.loop.run_until_complete(main()) | 0.258794 | 0.105119 |
import numpy as np
import tensorflow as tf
from railrl.misc.tf_test_case import TFTestCase
from railrl.predictors.mlp_state_action_network import MlpStateActionNetwork
class TestStateActionNetwork(TFTestCase):
def test_set_and_get_params(self):
action_dim = 5
obs_dim = 7
output_dim = 3
net1 = MlpStateActionNetwork(name_or_scope="qf_a",
observation_dim=obs_dim,
action_dim=action_dim,
output_dim=output_dim)
net2 = MlpStateActionNetwork(name_or_scope="qf_b",
observation_dim=obs_dim,
action_dim=action_dim,
output_dim=output_dim)
a = np.random.rand(1, action_dim)
o = np.random.rand(1, obs_dim)
feed_1 = {
net1.action_input: a,
net1.observation_input: o,
}
feed_2 = {
net2.action_input: a,
net2.observation_input: o,
}
self.sess.run(tf.global_variables_initializer())
out1 = self.sess.run(net1.output, feed_1)
out2 = self.sess.run(net2.output, feed_2)
self.assertFalse((out1 == out2).all())
net2.set_param_values(net1.get_param_values())
out1 = self.sess.run(net1.output, feed_1)
out2 = self.sess.run(net2.output, feed_2)
self.assertTrue((out1 == out2).all())
def test_copy(self):
action_dim = 5
obs_dim = 7
output_dim = 3
net1 = MlpStateActionNetwork(name_or_scope="qf_a",
observation_dim=obs_dim,
action_dim=action_dim,
output_dim=output_dim)
self.sess.run(tf.global_variables_initializer())
net2 = net1.get_copy(name_or_scope="qf_b")
self.sess.run(tf.global_variables_initializer())
a = np.random.rand(1, action_dim)
o = np.random.rand(1, obs_dim)
feed_1 = {
net1.action_input: a,
net1.observation_input: o,
}
feed_2 = {
net2.action_input: a,
net2.observation_input: o,
}
out1 = self.sess.run(net1.output, feed_1)
out2 = self.sess.run(net2.output, feed_2)
self.assertFalse((out1 == out2).all())
net2.set_param_values(net1.get_param_values())
out1 = self.sess.run(net1.output, feed_1)
out2 = self.sess.run(net2.output, feed_2)
self.assertTrue((out1 == out2).all())
def test_get_weight_tied_copy_obs_only(self):
action_dim = 5
obs_dim = 7
net2_observation_input = tf.placeholder(tf.float32, [None, obs_dim])
self.finish_test_get_weight_tied_copy(
action_dim,
obs_dim,
net2_observation_input=net2_observation_input,
)
def test_get_weight_tied_copy_action_only(self):
action_dim = 5
obs_dim = 7
net2_action_input = tf.placeholder(tf.float32, [None, action_dim])
self.finish_test_get_weight_tied_copy(
action_dim,
obs_dim,
net2_action_input=net2_action_input,
)
def test_get_weight_tied_copy_obs_and_action(self):
action_dim = 5
obs_dim = 7
net2_observation_input = tf.placeholder(tf.float32, [None, obs_dim])
net2_action_input = tf.placeholder(tf.float32, [None, action_dim])
self.finish_test_get_weight_tied_copy(
action_dim,
obs_dim,
net2_observation_input=net2_observation_input,
net2_action_input=net2_action_input
)
def finish_test_get_weight_tied_copy(self,
action_dim,
obs_dim,
net2_observation_input=None,
net2_action_input=None):
output_dim = 3
net1 = MlpStateActionNetwork(name_or_scope="qf_a",
observation_dim=obs_dim,
action_dim=action_dim,
output_dim=output_dim)
self.sess.run(tf.global_variables_initializer())
net2 = net1.get_weight_tied_copy(
observation_input=net2_observation_input,
action_input=net2_action_input)
self.sess.run(tf.global_variables_initializer())
a = np.random.rand(1, action_dim)
o = np.random.rand(1, obs_dim)
feed_1 = {
net1.action_input: a,
net1.observation_input: o,
}
feed_2 = {
net2.action_input: a,
net2.observation_input: o,
}
out1 = self.sess.run(net1.output, feed_1)
out2 = self.sess.run(net2.output, feed_2)
self.assertTrue((out1 == out2).all())
self.randomize_param_values(net1)
out1 = self.sess.run(net1.output, feed_1)
out2 = self.sess.run(net2.output, feed_2)
self.assertTrue((out1 == out2).all()) | tests/predictors/test_state_action_network.py | import numpy as np
import tensorflow as tf
from railrl.misc.tf_test_case import TFTestCase
from railrl.predictors.mlp_state_action_network import MlpStateActionNetwork
class TestStateActionNetwork(TFTestCase):
def test_set_and_get_params(self):
action_dim = 5
obs_dim = 7
output_dim = 3
net1 = MlpStateActionNetwork(name_or_scope="qf_a",
observation_dim=obs_dim,
action_dim=action_dim,
output_dim=output_dim)
net2 = MlpStateActionNetwork(name_or_scope="qf_b",
observation_dim=obs_dim,
action_dim=action_dim,
output_dim=output_dim)
a = np.random.rand(1, action_dim)
o = np.random.rand(1, obs_dim)
feed_1 = {
net1.action_input: a,
net1.observation_input: o,
}
feed_2 = {
net2.action_input: a,
net2.observation_input: o,
}
self.sess.run(tf.global_variables_initializer())
out1 = self.sess.run(net1.output, feed_1)
out2 = self.sess.run(net2.output, feed_2)
self.assertFalse((out1 == out2).all())
net2.set_param_values(net1.get_param_values())
out1 = self.sess.run(net1.output, feed_1)
out2 = self.sess.run(net2.output, feed_2)
self.assertTrue((out1 == out2).all())
def test_copy(self):
action_dim = 5
obs_dim = 7
output_dim = 3
net1 = MlpStateActionNetwork(name_or_scope="qf_a",
observation_dim=obs_dim,
action_dim=action_dim,
output_dim=output_dim)
self.sess.run(tf.global_variables_initializer())
net2 = net1.get_copy(name_or_scope="qf_b")
self.sess.run(tf.global_variables_initializer())
a = np.random.rand(1, action_dim)
o = np.random.rand(1, obs_dim)
feed_1 = {
net1.action_input: a,
net1.observation_input: o,
}
feed_2 = {
net2.action_input: a,
net2.observation_input: o,
}
out1 = self.sess.run(net1.output, feed_1)
out2 = self.sess.run(net2.output, feed_2)
self.assertFalse((out1 == out2).all())
net2.set_param_values(net1.get_param_values())
out1 = self.sess.run(net1.output, feed_1)
out2 = self.sess.run(net2.output, feed_2)
self.assertTrue((out1 == out2).all())
def test_get_weight_tied_copy_obs_only(self):
action_dim = 5
obs_dim = 7
net2_observation_input = tf.placeholder(tf.float32, [None, obs_dim])
self.finish_test_get_weight_tied_copy(
action_dim,
obs_dim,
net2_observation_input=net2_observation_input,
)
def test_get_weight_tied_copy_action_only(self):
action_dim = 5
obs_dim = 7
net2_action_input = tf.placeholder(tf.float32, [None, action_dim])
self.finish_test_get_weight_tied_copy(
action_dim,
obs_dim,
net2_action_input=net2_action_input,
)
def test_get_weight_tied_copy_obs_and_action(self):
action_dim = 5
obs_dim = 7
net2_observation_input = tf.placeholder(tf.float32, [None, obs_dim])
net2_action_input = tf.placeholder(tf.float32, [None, action_dim])
self.finish_test_get_weight_tied_copy(
action_dim,
obs_dim,
net2_observation_input=net2_observation_input,
net2_action_input=net2_action_input
)
def finish_test_get_weight_tied_copy(self,
action_dim,
obs_dim,
net2_observation_input=None,
net2_action_input=None):
output_dim = 3
net1 = MlpStateActionNetwork(name_or_scope="qf_a",
observation_dim=obs_dim,
action_dim=action_dim,
output_dim=output_dim)
self.sess.run(tf.global_variables_initializer())
net2 = net1.get_weight_tied_copy(
observation_input=net2_observation_input,
action_input=net2_action_input)
self.sess.run(tf.global_variables_initializer())
a = np.random.rand(1, action_dim)
o = np.random.rand(1, obs_dim)
feed_1 = {
net1.action_input: a,
net1.observation_input: o,
}
feed_2 = {
net2.action_input: a,
net2.observation_input: o,
}
out1 = self.sess.run(net1.output, feed_1)
out2 = self.sess.run(net2.output, feed_2)
self.assertTrue((out1 == out2).all())
self.randomize_param_values(net1)
out1 = self.sess.run(net1.output, feed_1)
out2 = self.sess.run(net2.output, feed_2)
self.assertTrue((out1 == out2).all()) | 0.671471 | 0.544135 |
import sys
import traceback
from abc import ABCMeta, abstractmethod
from importlib import import_module
import six
from werkzeug.utils import find_modules
from pgadmin.utils import server_utils
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class TestsGeneratorRegistry(ABCMeta):
"""
class TestsGeneratorRegistry(object)
Every module will be registered automatically by its module name.
Class-level Methods:
----------- -------
* __init__(...)
- This is used to register test modules. You don't need to
call this function explicitly. This will be automatically executed,
whenever we create a class and inherit from BaseTestGenerator -
it will register it as an available module in TestsGeneratorRegistry.
By setting the __metaclass__ for BaseTestGenerator to
TestsGeneratorRegistry it will create new instance of this
TestsGeneratorRegistry per class.
* load_generators():
- This function will load all the modules from __init__()
present in registry.
"""
registry = dict()
def __init__(cls, name, bases, d):
# Register this type of module, based on the module name
# Avoid registering the BaseDriver itself
if name != 'BaseTestGenerator' and name != 'BaseFeatureTest':
TestsGeneratorRegistry.registry[d['__module__']] = cls
ABCMeta.__init__(cls, name, bases, d)
@classmethod
def load_generators(cls, pkg_root, exclude_pkgs):
cls.registry = dict()
all_modules = []
all_modules += find_modules(pkg_root, False, True)
# Check for SERVER mode
for module_name in all_modules:
try:
if "tests." in str(module_name) and not any(
str(module_name).startswith(
'pgadmin.' + str(exclude_pkg)
) for exclude_pkg in exclude_pkgs
):
import_module(module_name)
except ImportError:
traceback.print_exc(file=sys.stderr)
@six.add_metaclass(TestsGeneratorRegistry)
class BaseTestGenerator(unittest.TestCase):
# Defining abstract method which will override by individual testcase.
def setUp(self):
super(BaseTestGenerator, self).setUp()
self.server_id = self.server_information["server_id"]
server_con = server_utils.connect_server(self, self.server_id)
if hasattr(self, 'skip_on_database'):
if 'data' in server_con and 'type' in server_con['data']:
if server_con['data']['type'] in self.skip_on_database:
self.skipTest('cannot run in: %s' % self.server['db_type'])
@classmethod
def setTestServer(cls, server):
cls.server = server
@abstractmethod
def runTest(self):
pass
# Initializing app.
def setApp(self, app):
self.app = app
# Initializing test_client.
@classmethod
def setTestClient(cls, test_client):
cls.tester = test_client
@classmethod
def setDriver(cls, driver):
cls.driver = driver
@classmethod
def setServerInformation(cls, server_information):
cls.server_information = server_information | openresty-win32-build/thirdparty/x86/pgsql/pgAdmin 4/web/pgadmin/utils/route.py |
import sys
import traceback
from abc import ABCMeta, abstractmethod
from importlib import import_module
import six
from werkzeug.utils import find_modules
from pgadmin.utils import server_utils
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class TestsGeneratorRegistry(ABCMeta):
"""
class TestsGeneratorRegistry(object)
Every module will be registered automatically by its module name.
Class-level Methods:
----------- -------
* __init__(...)
- This is used to register test modules. You don't need to
call this function explicitly. This will be automatically executed,
whenever we create a class and inherit from BaseTestGenerator -
it will register it as an available module in TestsGeneratorRegistry.
By setting the __metaclass__ for BaseTestGenerator to
TestsGeneratorRegistry it will create new instance of this
TestsGeneratorRegistry per class.
* load_generators():
- This function will load all the modules from __init__()
present in registry.
"""
registry = dict()
def __init__(cls, name, bases, d):
# Register this type of module, based on the module name
# Avoid registering the BaseDriver itself
if name != 'BaseTestGenerator' and name != 'BaseFeatureTest':
TestsGeneratorRegistry.registry[d['__module__']] = cls
ABCMeta.__init__(cls, name, bases, d)
@classmethod
def load_generators(cls, pkg_root, exclude_pkgs):
cls.registry = dict()
all_modules = []
all_modules += find_modules(pkg_root, False, True)
# Check for SERVER mode
for module_name in all_modules:
try:
if "tests." in str(module_name) and not any(
str(module_name).startswith(
'pgadmin.' + str(exclude_pkg)
) for exclude_pkg in exclude_pkgs
):
import_module(module_name)
except ImportError:
traceback.print_exc(file=sys.stderr)
@six.add_metaclass(TestsGeneratorRegistry)
class BaseTestGenerator(unittest.TestCase):
# Defining abstract method which will override by individual testcase.
def setUp(self):
super(BaseTestGenerator, self).setUp()
self.server_id = self.server_information["server_id"]
server_con = server_utils.connect_server(self, self.server_id)
if hasattr(self, 'skip_on_database'):
if 'data' in server_con and 'type' in server_con['data']:
if server_con['data']['type'] in self.skip_on_database:
self.skipTest('cannot run in: %s' % self.server['db_type'])
@classmethod
def setTestServer(cls, server):
cls.server = server
@abstractmethod
def runTest(self):
pass
# Initializing app.
def setApp(self, app):
self.app = app
# Initializing test_client.
@classmethod
def setTestClient(cls, test_client):
cls.tester = test_client
@classmethod
def setDriver(cls, driver):
cls.driver = driver
@classmethod
def setServerInformation(cls, server_information):
cls.server_information = server_information | 0.36557 | 0.110856 |
import re
import string
from mathics.core.parser.errors import ScanError
from mathics.core.parser.prescanner import Prescanner
from mathics.core.characters import letters, letterlikes
# special patterns
number_pattern = r'''
( (?# Two possible forms depending on whether base is specified)
(\d+\^\^([a-zA-Z0-9]+\.?[a-zA-Z0-9]*|[a-zA-Z0-9]*\.?[a-zA-Z0-9]+))
| (\d+\.?\d*|\d*\.?\d+)
)
(``?(\+|-)?(\d+\.?\d*|\d*\.?\d+)|`)? (?# Precision / Accuracy)
(\*\^(\+|-)?\d+)? (?# Exponent)
'''
base_symbol_pattern = r'((?![0-9])([0-9${0}{1}])+)'.format(letters, letterlikes)
full_symbol_pattern = r'(`?{0}(`{0})*)'.format(base_symbol_pattern)
pattern_pattern = r'{0}?_(\.|(__?)?{0}?)?'.format(full_symbol_pattern)
slot_pattern = r'\#(\d+|{0})?'.format(base_symbol_pattern)
filename_pattern = r'''
(?P<quote>\"?) (?# Opening quotation mark)
[a-zA-Z0-9\`/\.\\\!\-\:\_\$\*\~\?]+ (?# Literal characters)
(?P=quote) (?# Closing quotation mark)
'''
tokens = [
('Definition', r'\? '),
('Information', r'\?\? '),
('Number', number_pattern),
('String', r'"'),
('Pattern', pattern_pattern),
('Symbol', full_symbol_pattern),
('SlotSequence', r'\#\#\d*'),
('Slot', slot_pattern),
('Out', r'\%(\%+|\d+)?'),
('PutAppend', r'\>\>\>'),
('Put', r'\>\>'),
('Get', r'\<\<'),
('RawLeftBracket', r' \[ '),
('RawRightBracket', r' \] '),
('RawLeftBrace', r' \{ '),
('RawRightBrace', r' \} '),
('RawLeftParenthesis', r' \( '),
('RawRightParenthesis', r' \) '),
('RawLeftAssociation', r' \<\| '),
('RawRightAssociation', r' \|\> '),
('RawComma', r' \, '),
('Span', r' \;\; '),
('MessageName', r' \:\: '),
# boxes
('LeftRowBox', r' \\\( '),
('RightRowBox', r' \\\) '),
('InterpretedBox', r' \\\! '),
('SuperscriptBox', r' \\\^ '),
('SubscriptBox', r' \\\_ '),
('OverscriptBox', r' \\\& '),
('UnderscriptBox', r' \\\+ '),
('OtherscriptBox', r' \\\% '),
('FractionBox', r' \\\/ '),
('SqrtBox', r' \\\@ '),
('FormBox', r' \\\` '),
('Information', r'\?\?'),
('PatternTest', r' \? '),
('Increment', r' \+\+ '),
('Decrement', r' \-\- '),
('MapAll', r' \/\/\@ '),
('Map', r' \/\@ '),
('ApplyList', r' \@\@\@ '),
('Apply', r' \@\@ '),
('Composition', r' \@\* '),
('Prefix', r' \@ '),
('StringExpression', r' \~\~ '),
('Infix', r' \~ '),
('Derivative', r' \' '),
('StringJoin', r' \<\> '),
('NonCommutativeMultiply', r' \*\* '),
('AddTo', r' \+\= '),
('SubtractFrom', r' \-\= '),
('TimesBy', r' \*\= '),
('DivideBy', r' \/\= '),
('Times', r' \*|\u00d7 '),
('SameQ', r' \=\=\= '),
('UnsameQ', r' \=\!\= '),
('Equal', r' (\=\=) | \uf431 | \uf7d9 '),
('Unequal', r' (\!\= ) | \u2260 '),
('LessEqual', r' (\<\=) | \u2264 '),
('LessSlantEqual', r' \u2a7d '),
('GreaterEqual', r' (\>\=) | \u2265 '),
('GreaterSlantEqual', r' \u2a7e '),
('Greater', r' \> '),
('Less', r' \< '),
('Or', r' (\|\|) | \u2228 '),
('And', r' (\&\&) | \u2227 '),
('RepeatedNull', r' \.\.\. '),
('Repeated', r' \.\. '),
('Alternatives', r' \| '),
('Rule', r' (\-\>)|\uF522 '),
('RuleDelayed', r' (\:\>)|\uF51F '),
('UndirectedEdge', r' (\<\-\>)|\u29DF '),
('ReplaceRepeated', r' \/\/\. '),
('ReplaceAll', r' \/\. '),
('Postfix', r' \/\/ '),
('UpSetDelayed', r' \^\:\= '),
('SetDelayed', r' \:\= '),
('UpSet', r' \^\= '),
('TagSet', r' \/\: '),
('Unset', r' \=\s*\.(?!\d|\.) '), # allow whitspace but avoid e.g. x=.01
('Set', r' \= '),
('Condition', r' \/\; '),
('Semicolon', r' \; '),
('Divide', r' \/|\u00f7 '),
('Power', r' \^ '),
('Dot', r' \. '),
('Minus', r' \-|\u2212 '),
('Plus', r' \+ '),
('RawBackslash', r' \\ '),
('Factorial2', r' \!\! '),
('Factorial', r' \! '),
('Function', r' \& | \uF4A1 '),
('RawColon', r' \: '),
# ('DiscreteShift', r' \uf4a3 '),
# ('DiscreteRatio', r' \uf4a4 '),
# ('DifferenceDelta', r' \u2206 '),
# ('PartialD', r' \u2202 '),
('Cross', r' \uf4a0 '),
('Colon', r' \u2236 '),
('Transpose', r' \uf3c7 '),
('Conjugate', r' \uf3c8 '),
('ConjugateTranspose', r' \uf3c9 '),
('HermitianConjugate', r' \uf3ce '),
('Integral', r' \u222b '),
('DifferentialD', r' \uf74c '),
('Del', r' \u2207 '),
('Square', r' \uf520 '),
('SmallCircle', r' \u2218 '),
('CircleDot', r' \u2299 '),
# ('Sum', r' \u2211 '),
# ('Product', r' \u220f '),
('PlusMinus', r' \u00b1 '),
('MinusPlus', r' \u2213 '),
('Nor', r' \u22BD '),
('Nand', r' \u22BC '),
('Xor', r' \u22BB '),
('Xnor', r' \uF4A2 '),
('Diamond', r' \u22c4 '),
('Wedge', r' \u22c0 '),
('Vee', r' \u22c1 '),
('CircleTimes', r' \u2297 '),
('CenterDot', r' \u00b7 '),
('Star', r' \u22c6'),
('VerticalTilde', r' \u2240 '),
('Coproduct', r' \u2210 '),
('Cap', r' \u2322 '),
('Cup', r' \u2323 '),
('CirclePlus', r' \u2295 '),
('CircleMinus', r' \u2296 '),
('Intersection', r' \u22c2 '),
('Union', r' \u22c3 '),
('VerticalBar', r' \u2223 '),
('NotVerticalBar', r' \u2224 '),
('DoubleVerticalBar', r' \u2225 '),
('NotDoubleVerticalBar', r' \u2226 '),
('Element', r' \u2208 '),
('NotElement', r' \u2209 '),
('Subset', r' \u2282 '),
('Superset', r' \u2283 '),
('ForAll', r' \u2200 '),
('Exists', r' \u2203 '),
('NotExists', r' \u2204 '),
('Not', r' \u00AC '),
('Equivalent', r' \u29E6 '),
('Implies', r' \uF523 '),
('RightTee', r' \u22A2 '),
('DoubleRightTee', r' \u22A8 '),
('LeftTee', r' \u22A3 '),
('DoubleLeftTee', r' \u2AE4 '),
('SuchThat', r' \u220D '),
('VerticalSeparator', r' \uF432 '),
('Therefore', r' \u2234 '),
('Because', r' \u2235 '),
('Backslash', r' \u2216 '),
]
literal_tokens = {
'!': ['Unequal', 'Factorial2', 'Factorial'],
'"': ['String'],
'#': ['SlotSequence', 'Slot'],
'%': ['Out'],
'&': ['And', 'Function'],
"'": ['Derivative'],
'(': ['RawLeftParenthesis'],
')': ['RawRightParenthesis'],
'*': ['NonCommutativeMultiply', 'TimesBy', 'Times'],
'+': ['Increment', 'AddTo', 'Plus'],
',': ['RawComma'],
'-': ['Decrement', 'SubtractFrom', 'Rule', 'Minus'],
'.': ['Number', 'RepeatedNull', 'Repeated', 'Dot'],
'/': ['MapAll', 'Map', 'DivideBy', 'ReplaceRepeated', 'ReplaceAll',
'Postfix', 'TagSet', 'Condition', 'Divide'],
':': ['MessageName', 'RuleDelayed', 'SetDelayed', 'RawColon'],
';': ['Span', 'Semicolon'],
'<': ['RawLeftAssociation', 'UndirectedEdge', 'Get', 'StringJoin', 'LessEqual', 'Less'],
'=': ['SameQ', 'UnsameQ', 'Equal', 'Unset', 'Set'],
'>': ['PutAppend', 'Put', 'GreaterEqual', 'Greater'],
'?': ['Information', 'PatternTest'],
'@': ['ApplyList', 'Apply', 'Composition', 'Prefix'],
'[': ['RawLeftBracket'],
'\\': ['LeftRowBox', 'RightRowBox', 'InterpretedBox', 'SuperscriptBox',
'SubscriptBox', 'OverscriptBox', 'UnderscriptBox', 'OtherscriptBox',
'FractionBox', 'SqrtBox', 'FormBox', 'RawBackslash'],
']': ['RawRightBracket'],
'^': ['UpSetDelayed', 'UpSet', 'Power'],
'_': ['Pattern'],
'`': ['Pattern', 'Symbol'],
'|': ['RawRightAssociation', 'Or', 'Alternatives'],
'{': ['RawLeftBrace'],
'}': ['RawRightBrace'],
'~': ['StringExpression', 'Infix']
}
for c in string.ascii_letters:
literal_tokens[c] = ['Pattern', 'Symbol']
for c in string.digits:
literal_tokens[c] = ['Number']
def find_indices(literals):
'find indices of literal tokens'
literal_indices = {}
for key, tags in literals.items():
indices = []
for tag in tags:
for i, (tag2, pattern) in enumerate(tokens):
if tag == tag2:
indices.append(i)
break
literal_indices[key] = tuple(indices)
assert len(indices) == len(tags)
return literal_indices
def compile_pattern(pattern):
return re.compile(pattern, re.VERBOSE)
def compile_tokens(token_list):
return [(tag, compile_pattern(pattern)) for tag, pattern in token_list]
filename_tokens = [
('Filename', filename_pattern),
]
token_indices = find_indices(literal_tokens)
tokens = compile_tokens(tokens)
filename_tokens = compile_tokens(filename_tokens)
full_symbol_pattern = compile_pattern(full_symbol_pattern)
def is_symbol_name(text):
return full_symbol_pattern.sub('', text) == ''
class Token(object):
def __init__(self, tag, text, pos):
self.tag = tag
self.text = text
self.pos = pos
def __eq__(self, other):
if not isinstance(other, Token):
raise TypeError()
return self.tag == other.tag and self.text == other.text and self.pos == other.pos
def __repr__(self):
return 'Token(%s, %s, %i)' % (self.tag, self.text, self.pos)
class Tokeniser(object):
modes = {
'expr': (tokens, token_indices),
'filename': (filename_tokens, {}),
}
def __init__(self, feeder):
self.pos = 0
self.feeder = feeder
self.prescanner = Prescanner(feeder)
self.code = self.prescanner.scan()
self.change_mode('expr')
def change_mode(self, mode):
self.mode = mode
self.tokens, self.token_indices = self.modes[mode]
def incomplete(self):
'get more code from the prescanner and continue'
self.prescanner.incomplete()
self.code += self.prescanner.scan()
def sntx_message(self, pos=None):
if pos is None:
pos = self.pos
pre, post = self.code[:pos], self.code[pos:].rstrip('\n')
if pos == 0:
self.feeder.message('Syntax', 'sntxb', post)
else:
self.feeder.message('Syntax', 'sntxf', pre, post)
def next(self):
'return next token'
self.skip_blank()
if self.pos >= len(self.code):
return Token('END', '', len(self.code))
# look for a matching pattern
indices = self.token_indices.get(self.code[self.pos], ())
if indices:
for index in indices:
tag, pattern = self.tokens[index]
match = pattern.match(self.code, self.pos)
if match is not None:
break
else:
for tag, pattern in self.tokens:
match = pattern.match(self.code, self.pos)
if match is not None:
break
# no matching pattern found
if match is None:
self.sntx_message()
raise ScanError()
# custom tokenisation rules defined with t_tag
override = getattr(self, 't_' + tag, None)
if override is not None:
return override(match)
else:
text = match.group(0)
self.pos = match.end(0)
return Token(tag, text, match.start(0))
def skip_blank(self):
'skip whitespace and comments'
comment = [] # start positions of comments
while True:
if self.pos >= len(self.code):
if comment:
self.incomplete()
else:
break
if comment:
if self.code.startswith('(*', self.pos):
comment.append(self.pos)
self.pos += 2
elif self.code.startswith('*)', self.pos):
comment.pop()
self.pos += 2
else:
self.pos += 1
elif self.code.startswith('(*', self.pos):
comment.append(self.pos)
self.pos += 2
elif self.code[self.pos] in ' \r\n\t':
self.pos += 1
else:
break
def t_String(self, match):
start, end = self.pos, None
self.pos += 1 # skip opening '"'
newlines = []
while True:
if self.pos >= len(self.code):
if end is None:
# reached end while still inside string
self.incomplete()
newlines.append(self.pos)
else:
break
c = self.code[self.pos]
if c == '"':
self.pos += 1
end = self.pos
break
elif c == '\\':
self.pos += 2
else:
self.pos += 1
indices = [start] + newlines + [end]
result = ''.join(self.code[indices[i]:indices[i + 1]]
for i in range(len(indices) - 1))
return Token('String', result, start)
def t_Number(self, match):
text = match.group(0)
pos = match.end(0)
if self.code[pos - 1:pos + 1] == '..':
# Trailing .. should be ignored. That is, `1..` is `Repeated[1]`.
text = text[:-1]
self.pos = pos - 1
else:
self.pos = pos
return Token('Number', text, match.start(0))
def token_mode(self, match, tag, mode):
'consume a token and switch mode'
text = match.group(0)
self.pos = match.end(0)
self.change_mode(mode)
return Token(tag, text, match.start(0))
def t_Get(self, match):
return self.token_mode(match, 'Get', 'filename')
def t_Put(self, match):
return self.token_mode(match, 'Put', 'filename')
def t_PutAppend(self, match):
return self.token_mode(match, 'PutAppend', 'filename')
def t_Filename(self, match):
return self.token_mode(match, 'Filename', 'expr') | mathics/core/parser/tokeniser.py |
import re
import string
from mathics.core.parser.errors import ScanError
from mathics.core.parser.prescanner import Prescanner
from mathics.core.characters import letters, letterlikes
# special patterns
number_pattern = r'''
( (?# Two possible forms depending on whether base is specified)
(\d+\^\^([a-zA-Z0-9]+\.?[a-zA-Z0-9]*|[a-zA-Z0-9]*\.?[a-zA-Z0-9]+))
| (\d+\.?\d*|\d*\.?\d+)
)
(``?(\+|-)?(\d+\.?\d*|\d*\.?\d+)|`)? (?# Precision / Accuracy)
(\*\^(\+|-)?\d+)? (?# Exponent)
'''
base_symbol_pattern = r'((?![0-9])([0-9${0}{1}])+)'.format(letters, letterlikes)
full_symbol_pattern = r'(`?{0}(`{0})*)'.format(base_symbol_pattern)
pattern_pattern = r'{0}?_(\.|(__?)?{0}?)?'.format(full_symbol_pattern)
slot_pattern = r'\#(\d+|{0})?'.format(base_symbol_pattern)
filename_pattern = r'''
(?P<quote>\"?) (?# Opening quotation mark)
[a-zA-Z0-9\`/\.\\\!\-\:\_\$\*\~\?]+ (?# Literal characters)
(?P=quote) (?# Closing quotation mark)
'''
tokens = [
('Definition', r'\? '),
('Information', r'\?\? '),
('Number', number_pattern),
('String', r'"'),
('Pattern', pattern_pattern),
('Symbol', full_symbol_pattern),
('SlotSequence', r'\#\#\d*'),
('Slot', slot_pattern),
('Out', r'\%(\%+|\d+)?'),
('PutAppend', r'\>\>\>'),
('Put', r'\>\>'),
('Get', r'\<\<'),
('RawLeftBracket', r' \[ '),
('RawRightBracket', r' \] '),
('RawLeftBrace', r' \{ '),
('RawRightBrace', r' \} '),
('RawLeftParenthesis', r' \( '),
('RawRightParenthesis', r' \) '),
('RawLeftAssociation', r' \<\| '),
('RawRightAssociation', r' \|\> '),
('RawComma', r' \, '),
('Span', r' \;\; '),
('MessageName', r' \:\: '),
# boxes
('LeftRowBox', r' \\\( '),
('RightRowBox', r' \\\) '),
('InterpretedBox', r' \\\! '),
('SuperscriptBox', r' \\\^ '),
('SubscriptBox', r' \\\_ '),
('OverscriptBox', r' \\\& '),
('UnderscriptBox', r' \\\+ '),
('OtherscriptBox', r' \\\% '),
('FractionBox', r' \\\/ '),
('SqrtBox', r' \\\@ '),
('FormBox', r' \\\` '),
('Information', r'\?\?'),
('PatternTest', r' \? '),
('Increment', r' \+\+ '),
('Decrement', r' \-\- '),
('MapAll', r' \/\/\@ '),
('Map', r' \/\@ '),
('ApplyList', r' \@\@\@ '),
('Apply', r' \@\@ '),
('Composition', r' \@\* '),
('Prefix', r' \@ '),
('StringExpression', r' \~\~ '),
('Infix', r' \~ '),
('Derivative', r' \' '),
('StringJoin', r' \<\> '),
('NonCommutativeMultiply', r' \*\* '),
('AddTo', r' \+\= '),
('SubtractFrom', r' \-\= '),
('TimesBy', r' \*\= '),
('DivideBy', r' \/\= '),
('Times', r' \*|\u00d7 '),
('SameQ', r' \=\=\= '),
('UnsameQ', r' \=\!\= '),
('Equal', r' (\=\=) | \uf431 | \uf7d9 '),
('Unequal', r' (\!\= ) | \u2260 '),
('LessEqual', r' (\<\=) | \u2264 '),
('LessSlantEqual', r' \u2a7d '),
('GreaterEqual', r' (\>\=) | \u2265 '),
('GreaterSlantEqual', r' \u2a7e '),
('Greater', r' \> '),
('Less', r' \< '),
('Or', r' (\|\|) | \u2228 '),
('And', r' (\&\&) | \u2227 '),
('RepeatedNull', r' \.\.\. '),
('Repeated', r' \.\. '),
('Alternatives', r' \| '),
('Rule', r' (\-\>)|\uF522 '),
('RuleDelayed', r' (\:\>)|\uF51F '),
('UndirectedEdge', r' (\<\-\>)|\u29DF '),
('ReplaceRepeated', r' \/\/\. '),
('ReplaceAll', r' \/\. '),
('Postfix', r' \/\/ '),
('UpSetDelayed', r' \^\:\= '),
('SetDelayed', r' \:\= '),
('UpSet', r' \^\= '),
('TagSet', r' \/\: '),
('Unset', r' \=\s*\.(?!\d|\.) '), # allow whitspace but avoid e.g. x=.01
('Set', r' \= '),
('Condition', r' \/\; '),
('Semicolon', r' \; '),
('Divide', r' \/|\u00f7 '),
('Power', r' \^ '),
('Dot', r' \. '),
('Minus', r' \-|\u2212 '),
('Plus', r' \+ '),
('RawBackslash', r' \\ '),
('Factorial2', r' \!\! '),
('Factorial', r' \! '),
('Function', r' \& | \uF4A1 '),
('RawColon', r' \: '),
# ('DiscreteShift', r' \uf4a3 '),
# ('DiscreteRatio', r' \uf4a4 '),
# ('DifferenceDelta', r' \u2206 '),
# ('PartialD', r' \u2202 '),
('Cross', r' \uf4a0 '),
('Colon', r' \u2236 '),
('Transpose', r' \uf3c7 '),
('Conjugate', r' \uf3c8 '),
('ConjugateTranspose', r' \uf3c9 '),
('HermitianConjugate', r' \uf3ce '),
('Integral', r' \u222b '),
('DifferentialD', r' \uf74c '),
('Del', r' \u2207 '),
('Square', r' \uf520 '),
('SmallCircle', r' \u2218 '),
('CircleDot', r' \u2299 '),
# ('Sum', r' \u2211 '),
# ('Product', r' \u220f '),
('PlusMinus', r' \u00b1 '),
('MinusPlus', r' \u2213 '),
('Nor', r' \u22BD '),
('Nand', r' \u22BC '),
('Xor', r' \u22BB '),
('Xnor', r' \uF4A2 '),
('Diamond', r' \u22c4 '),
('Wedge', r' \u22c0 '),
('Vee', r' \u22c1 '),
('CircleTimes', r' \u2297 '),
('CenterDot', r' \u00b7 '),
('Star', r' \u22c6'),
('VerticalTilde', r' \u2240 '),
('Coproduct', r' \u2210 '),
('Cap', r' \u2322 '),
('Cup', r' \u2323 '),
('CirclePlus', r' \u2295 '),
('CircleMinus', r' \u2296 '),
('Intersection', r' \u22c2 '),
('Union', r' \u22c3 '),
('VerticalBar', r' \u2223 '),
('NotVerticalBar', r' \u2224 '),
('DoubleVerticalBar', r' \u2225 '),
('NotDoubleVerticalBar', r' \u2226 '),
('Element', r' \u2208 '),
('NotElement', r' \u2209 '),
('Subset', r' \u2282 '),
('Superset', r' \u2283 '),
('ForAll', r' \u2200 '),
('Exists', r' \u2203 '),
('NotExists', r' \u2204 '),
('Not', r' \u00AC '),
('Equivalent', r' \u29E6 '),
('Implies', r' \uF523 '),
('RightTee', r' \u22A2 '),
('DoubleRightTee', r' \u22A8 '),
('LeftTee', r' \u22A3 '),
('DoubleLeftTee', r' \u2AE4 '),
('SuchThat', r' \u220D '),
('VerticalSeparator', r' \uF432 '),
('Therefore', r' \u2234 '),
('Because', r' \u2235 '),
('Backslash', r' \u2216 '),
]
literal_tokens = {
'!': ['Unequal', 'Factorial2', 'Factorial'],
'"': ['String'],
'#': ['SlotSequence', 'Slot'],
'%': ['Out'],
'&': ['And', 'Function'],
"'": ['Derivative'],
'(': ['RawLeftParenthesis'],
')': ['RawRightParenthesis'],
'*': ['NonCommutativeMultiply', 'TimesBy', 'Times'],
'+': ['Increment', 'AddTo', 'Plus'],
',': ['RawComma'],
'-': ['Decrement', 'SubtractFrom', 'Rule', 'Minus'],
'.': ['Number', 'RepeatedNull', 'Repeated', 'Dot'],
'/': ['MapAll', 'Map', 'DivideBy', 'ReplaceRepeated', 'ReplaceAll',
'Postfix', 'TagSet', 'Condition', 'Divide'],
':': ['MessageName', 'RuleDelayed', 'SetDelayed', 'RawColon'],
';': ['Span', 'Semicolon'],
'<': ['RawLeftAssociation', 'UndirectedEdge', 'Get', 'StringJoin', 'LessEqual', 'Less'],
'=': ['SameQ', 'UnsameQ', 'Equal', 'Unset', 'Set'],
'>': ['PutAppend', 'Put', 'GreaterEqual', 'Greater'],
'?': ['Information', 'PatternTest'],
'@': ['ApplyList', 'Apply', 'Composition', 'Prefix'],
'[': ['RawLeftBracket'],
'\\': ['LeftRowBox', 'RightRowBox', 'InterpretedBox', 'SuperscriptBox',
'SubscriptBox', 'OverscriptBox', 'UnderscriptBox', 'OtherscriptBox',
'FractionBox', 'SqrtBox', 'FormBox', 'RawBackslash'],
']': ['RawRightBracket'],
'^': ['UpSetDelayed', 'UpSet', 'Power'],
'_': ['Pattern'],
'`': ['Pattern', 'Symbol'],
'|': ['RawRightAssociation', 'Or', 'Alternatives'],
'{': ['RawLeftBrace'],
'}': ['RawRightBrace'],
'~': ['StringExpression', 'Infix']
}
for c in string.ascii_letters:
literal_tokens[c] = ['Pattern', 'Symbol']
for c in string.digits:
literal_tokens[c] = ['Number']
def find_indices(literals):
'find indices of literal tokens'
literal_indices = {}
for key, tags in literals.items():
indices = []
for tag in tags:
for i, (tag2, pattern) in enumerate(tokens):
if tag == tag2:
indices.append(i)
break
literal_indices[key] = tuple(indices)
assert len(indices) == len(tags)
return literal_indices
def compile_pattern(pattern):
return re.compile(pattern, re.VERBOSE)
def compile_tokens(token_list):
return [(tag, compile_pattern(pattern)) for tag, pattern in token_list]
filename_tokens = [
('Filename', filename_pattern),
]
token_indices = find_indices(literal_tokens)
tokens = compile_tokens(tokens)
filename_tokens = compile_tokens(filename_tokens)
full_symbol_pattern = compile_pattern(full_symbol_pattern)
def is_symbol_name(text):
return full_symbol_pattern.sub('', text) == ''
class Token(object):
def __init__(self, tag, text, pos):
self.tag = tag
self.text = text
self.pos = pos
def __eq__(self, other):
if not isinstance(other, Token):
raise TypeError()
return self.tag == other.tag and self.text == other.text and self.pos == other.pos
def __repr__(self):
return 'Token(%s, %s, %i)' % (self.tag, self.text, self.pos)
class Tokeniser(object):
modes = {
'expr': (tokens, token_indices),
'filename': (filename_tokens, {}),
}
def __init__(self, feeder):
self.pos = 0
self.feeder = feeder
self.prescanner = Prescanner(feeder)
self.code = self.prescanner.scan()
self.change_mode('expr')
def change_mode(self, mode):
self.mode = mode
self.tokens, self.token_indices = self.modes[mode]
def incomplete(self):
'get more code from the prescanner and continue'
self.prescanner.incomplete()
self.code += self.prescanner.scan()
def sntx_message(self, pos=None):
if pos is None:
pos = self.pos
pre, post = self.code[:pos], self.code[pos:].rstrip('\n')
if pos == 0:
self.feeder.message('Syntax', 'sntxb', post)
else:
self.feeder.message('Syntax', 'sntxf', pre, post)
def next(self):
'return next token'
self.skip_blank()
if self.pos >= len(self.code):
return Token('END', '', len(self.code))
# look for a matching pattern
indices = self.token_indices.get(self.code[self.pos], ())
if indices:
for index in indices:
tag, pattern = self.tokens[index]
match = pattern.match(self.code, self.pos)
if match is not None:
break
else:
for tag, pattern in self.tokens:
match = pattern.match(self.code, self.pos)
if match is not None:
break
# no matching pattern found
if match is None:
self.sntx_message()
raise ScanError()
# custom tokenisation rules defined with t_tag
override = getattr(self, 't_' + tag, None)
if override is not None:
return override(match)
else:
text = match.group(0)
self.pos = match.end(0)
return Token(tag, text, match.start(0))
def skip_blank(self):
'skip whitespace and comments'
comment = [] # start positions of comments
while True:
if self.pos >= len(self.code):
if comment:
self.incomplete()
else:
break
if comment:
if self.code.startswith('(*', self.pos):
comment.append(self.pos)
self.pos += 2
elif self.code.startswith('*)', self.pos):
comment.pop()
self.pos += 2
else:
self.pos += 1
elif self.code.startswith('(*', self.pos):
comment.append(self.pos)
self.pos += 2
elif self.code[self.pos] in ' \r\n\t':
self.pos += 1
else:
break
def t_String(self, match):
start, end = self.pos, None
self.pos += 1 # skip opening '"'
newlines = []
while True:
if self.pos >= len(self.code):
if end is None:
# reached end while still inside string
self.incomplete()
newlines.append(self.pos)
else:
break
c = self.code[self.pos]
if c == '"':
self.pos += 1
end = self.pos
break
elif c == '\\':
self.pos += 2
else:
self.pos += 1
indices = [start] + newlines + [end]
result = ''.join(self.code[indices[i]:indices[i + 1]]
for i in range(len(indices) - 1))
return Token('String', result, start)
def t_Number(self, match):
text = match.group(0)
pos = match.end(0)
if self.code[pos - 1:pos + 1] == '..':
# Trailing .. should be ignored. That is, `1..` is `Repeated[1]`.
text = text[:-1]
self.pos = pos - 1
else:
self.pos = pos
return Token('Number', text, match.start(0))
def token_mode(self, match, tag, mode):
'consume a token and switch mode'
text = match.group(0)
self.pos = match.end(0)
self.change_mode(mode)
return Token(tag, text, match.start(0))
def t_Get(self, match):
return self.token_mode(match, 'Get', 'filename')
def t_Put(self, match):
return self.token_mode(match, 'Put', 'filename')
def t_PutAppend(self, match):
return self.token_mode(match, 'PutAppend', 'filename')
def t_Filename(self, match):
return self.token_mode(match, 'Filename', 'expr') | 0.578567 | 0.239961 |
import sys
import unittest
import doctest
from pymaybe import maybe, Something, Nothing
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
def load_tests(loader, tests, ignore):
import pymaybe
tests.addTests(
doctest.DocTestSuite(
pymaybe,
globs=pymaybe.get_doctest_globs(),
optionflags=doctest.IGNORE_EXCEPTION_DETAIL
)
)
return tests
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
class TestPyMaybe(unittest.TestCase):
def test_maybe_withValue_returnsSomething(self):
result = maybe("Value")
self.assertIsInstance(result, Something)
def test_maybe_withMaybe_returnMaybe(self):
m = maybe("value")
self.assertEqual(maybe(m), m)
def test_maybe_withNone_returnsNothing(self):
result = maybe(None)
self.assertIsInstance(result, Nothing)
# region Nothing - Comparison
def test_comparisons(self):
self.assertReallyEqual(Something(1), 1)
self.assertReallyEqual(1, Something(1))
self.assertReallyEqual(Something(1), Something(1))
self.assertReallyEqual(Nothing(), Nothing())
self.assertReallyNotEqual(Something(1), Something(2))
self.assertReallyNotEqual(Something(1), Nothing())
self.assertReallyNotEqual(Nothing(), Something(1))
def test_nothing_cmp(self):
if PY2:
self.assertEqual(0, cmp(Nothing(), Nothing()))
self.assertEqual(1, cmp(1, Nothing()))
self.assertEqual(1, cmp(Something(5), Nothing()))
self.assertEqual(1, cmp(5, Nothing()))
self.assertEqual(-1, cmp(Nothing(), Something(5)))
self.assertEqual(-1, cmp(Nothing(), 5))
def test_nothing_equalToNothing(self):
self.assertTrue(Nothing() == Nothing())
def test_nothing_notEqualToSomething(self):
self.assertFalse(Nothing() == Something(2))
self.assertFalse(Something(1) == Nothing())
def test_nothing_neSomething(self):
self.assertTrue(Nothing() != Something(2))
self.assertTrue(Something(1) != Nothing())
def test_nothing_neNothing(self):
self.assertFalse(Nothing() != Nothing())
def test_nothing_ltNothing_isFalse(self):
self.assertFalse(Nothing() < Nothing())
def test_nothing_ltSomething_isTrue(self):
self.assertTrue(Nothing() < Something(1))
def test_nothing_ltNone_isFalse(self):
self.assertFalse(Nothing() < None)
def test_nothing_ltNotNone_isFalse(self):
self.assertTrue(Nothing() < "some")
def test_nothing_gtAnything_isFalse(self):
self.assertFalse(Nothing() > Nothing())
self.assertFalse(Nothing() > Something(123))
self.assertFalse(Nothing() > None)
self.assertFalse(Nothing() > "Value")
def test_nothing_leAnything_isTrue(self):
self.assertTrue(Nothing() <= Nothing())
self.assertTrue(Nothing() <= Something(123))
self.assertTrue(Nothing() <= None)
self.assertTrue(Nothing() <= "Value")
def test_nothing_geNothing_isTrue(self):
self.assertTrue(Nothing() >= Nothing())
def test_nothing_geNone_isTrue(self):
self.assertTrue(Nothing() >= None)
def test_nothing_geNotNoneOrNothing_isFalse(self):
self.assertFalse(Nothing() >= Something(2))
self.assertFalse(Nothing() >= "some")
# endregion
# region Nothing - Dict
def test_nothing_len_isZero(self):
self.assertEqual(len(Nothing()), 0)
def test_nothing_getItem_returnsNothing(self):
n = Nothing()['name']
self.assertTrue(isinstance(n, Nothing))
self.assertTrue(n.is_none())
self.assertFalse(n.is_some())
def test_nothing_setItem_doestNothing(self):
Nothing()['name'] = 'value' # Will raise if __setitem__ wasnt defined
def test_nothing_delItem_doestNothing(self):
del Nothing()['name'] # Will raise if __delitem__ wasnt defined
# endregion
# region Nothing - Custom representation
def test_nothing_unicode(self):
if PY2:
self.assertEqual(unicode(Nothing()), unicode(None))
def test_nothing_nonzero_isFalse(self):
self.assertFalse(bool(Nothing()))
# endregion
# region Nothing - Misc Methods
def test_nothing_length_isZero(self):
self.assertEqual(len(Nothing()), 0)
def test_nothing_getItem_returnsNone(self):
result = Nothing()[10]
self.assertIsInstance(result, Nothing)
def test_nothing_strings_returnNone(self):
self.assertEqual(str(Nothing()), "Nothing")
# endregion
# region Something - Comparison
def test_something_cmp(self):
if PY2:
n = Nothing()
s = maybe(5)
s1 = maybe(7)
self.assertEqual(1, cmp(s, n))
self.assertEqual(cmp(5, 5), cmp(s, s))
self.assertEqual(cmp(5, 7), cmp(s, s1))
self.assertEqual(cmp(7, 5), cmp(s1, s))
self.assertEqual(cmp(5, 5), cmp(s, 5))
self.assertEqual(cmp(5, 7), cmp(s, 7))
self.assertEqual(cmp(7, 5), cmp(7, s))
def test_something_cmp_greaterThanNothing(self):
l = [Something(0), Nothing()]
sortedl = sorted(l)
self.assertTrue(isinstance(sortedl[0], Nothing))
self.assertTrue(isinstance(sortedl[1], Something))
def test_something_cmp_handlesComparisonBetweenSomethings(self):
l = [Something(10), Something(3)]
sortedl = sorted(l)
self.assertTrue(isinstance(sortedl[0], Something))
self.assertTrue(isinstance(sortedl[1], Something))
self.assertEqual(sortedl[0], 3)
self.assertEqual(sortedl[1], 10)
def test_something_cmp(self):
l = [Something(1), 2, Nothing()]
sortedl = sorted(l)
self.assertTrue(isinstance(sortedl[0], Nothing))
self.assertTrue(isinstance(sortedl[1], Something))
self.assertTrue(isinstance(sortedl[2], int))
self.assertEqual(sortedl[0], None)
self.assertEqual(sortedl[1], 1)
self.assertEqual(sortedl[2], 2)
def test_something_notEqualToNothing(self):
self.assertFalse(Something(1) == Nothing())
self.assertFalse(Nothing() == Something(2))
def test_something_ltNothing_isFalse(self):
self.assertFalse(Something("value") < Nothing())
def test_something_ltSomething_usesValue(self):
self.assertFalse(Something(3) < Something(1))
self.assertTrue(Something(3) > Something(1))
def test_something_gtNothing_isTrue(self):
self.assertTrue(Something("value") > Nothing())
def test_something_leNothing_isFalse(self):
self.assertFalse(Something("value") <= Nothing())
def test_something_leSomething_comparesTheUnderlyingValue(self):
self.assertTrue(Something(1) < Something(2))
self.assertFalse(Something(11) < Something(2))
def test_something_leValue_comparesTheUnderlyingValue(self):
self.assertTrue(Something(1) < 2)
self.assertTrue(Something(1) <= 2)
self.assertTrue(Something(1) <= 1)
self.assertFalse(Something(11) < 2)
def test_something_geNothing_isTrue(self):
self.assertTrue(Something("value") >= Nothing())
def test_something_geSomething_comparesTheUnderlyingValue(self):
self.assertTrue(Something(11) > Something(2))
self.assertTrue(Something(11) >= Something(2))
self.assertTrue(Something(11) >= Something(11))
self.assertFalse(Something(1) > Something(2))
def test_something_geValue_comparesTheUnderlyingValue(self):
self.assertTrue(Something(11) > 2)
self.assertFalse(Something(1) > 2)
# endregion
def test_something_conversions(self):
s = "value"
d = dict(name="Eran")
n = 123
f = 3.14
if PY2:
self.assertEqual(unicode(Something(s)), s)
self.assertEqual(long(Something(n)), n)
self.assertIsInstance(long(Something(f)), long)
self.assertEqual(str(Something(s)), "Something(%s)" % s)
self.assertEqual(repr(Something(s)), "Something(%s)" % repr(s))
self.assertEqual(repr(Something(d)), "Something(%s)" % repr(d))
self.assertEqual(int(Something(n)), n)
self.assertIsInstance(int(Something(n)), int)
self.assertEqual(float(Something(f)), f)
self.assertIsInstance(float(Something(f)), float)
# region Something - Container Methods
def test_something_len_isZero(self):
s = maybe(dict(test='value'))
self.assertEqual(len(s), 1)
s = maybe([1, 2, 3])
self.assertEqual(len(s), 3)
def test_something_getItem_returnsNothing(self):
s = maybe(dict(test='value'))
self.assertTrue(isinstance(s['test'], Something))
self.assertTrue(s['test'].is_some())
self.assertTrue(isinstance(s['test1'], Nothing))
self.assertTrue(s['test1'].is_none())
def test_something_list_getItem_returnsNothing(self):
s = maybe([1, 2, 3])
self.assertTrue(isinstance(s[0], Something))
self.assertTrue(s[0].is_some())
self.assertEqual(s[0].get(), 1)
self.assertTrue(isinstance(s['test1'], Nothing))
self.assertTrue(s['test1'].is_none())
self.assertTrue(isinstance(s[dict()], Nothing))
self.assertTrue(s[dict()].is_none())
self.assertTrue(isinstance(s[10], Nothing))
self.assertTrue(s[10].is_none())
def test_something_setItem_doestNothing(self):
s = maybe(dict(test='value'))
s['test'] = 'yeah'
self.assertEqual(s['test'], 'yeah')
self.assertTrue(s['test'].is_some())
def test_something_delItem_doestNothing(self):
s = maybe(dict(test='value'))
del s['test'] # Will raise if __delitem__ wasnt defined
self.assertEqual(len(s), 0)
self.assertTrue(s['test'].is_none())
def test_something_iter_onIterable_returnsArrayIterator(self):
s = maybe([1, 2, 3, 4, 5])
l = list(iter(s))
self.assertEqual([1, 2, 3, 4, 5], l)
def test_something_iter_onNotIterable_returnsArrayIterator(self):
class Foo(object):
pass
obj = Foo()
l = list(iter(maybe(obj)))
self.assertEqual(l, [obj])
def test_something_missing_onDefaultDict_forwardsCallToDefaultDict(self):
from collections import defaultdict
d = maybe(defaultdict(lambda: 'default'))
d['test'] = 'ok'
self.assertEqual(d['doesnt exist'], 'default')
self.assertEqual(d['test'], 'ok')
self.assertTrue(d['doesnt exist'].is_some())
self.assertTrue(d['test'].is_some())
def test_something_reversed(self):
l = maybe([1, 2, 3])
lr = list(reversed(l))
self.assertEqual(lr[0], 3)
self.assertEqual(lr[1], 2)
self.assertEqual(lr[2], 1)
# endregion
def test_something_typeConversions(self):
import math
self.assertEqual(complex(1), complex(Something(1)))
self.assertEqual(oct(1), oct(Something(1)))
self.assertEqual(hex(16), hex(Something(16)))
self.assertEqual(math.trunc(math.pi), math.trunc(maybe(math.pi)))
# region method call forwarding
def test_something_forwardsMethodCalls(self):
result = maybe('VALUE').lower()
assert result.is_some()
assert result == 'value', "result %s should be 'value'" % result
assert result == maybe('value')
def test_something_forwardsMethodCalls_handlesNonExisting(self):
result = maybe('VALUE').lowerr()
assert result.is_none()
def test_nothing_forwardsMethodCalls_handlesNonExisting(self):
result = maybe(None).invalid().call()
assert result.is_none()
# endregion
# region Assertions (for compatibility between Python version)
def assertIsInstance(self, obj, cls, msg=None):
result = isinstance(obj, cls)
self.assertTrue(result, msg=msg)
# endregion
def assertReallyEqual(self, a, b):
self.assertEqual(a, b)
self.assertEqual(b, a)
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
if PY2:
self.assertEqual(0, cmp(a, b))
self.assertEqual(0, cmp(b, a))
def assertReallyNotEqual(self, a, b):
self.assertNotEqual(a, b)
self.assertNotEqual(b, a)
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
if PY2:
self.assertNotEqual(0, cmp(a, b))
self.assertNotEqual(0, cmp(b, a))
if __name__ == '__main__':
unittest.main() | tests/test_pymaybe.py | import sys
import unittest
import doctest
from pymaybe import maybe, Something, Nothing
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
def load_tests(loader, tests, ignore):
import pymaybe
tests.addTests(
doctest.DocTestSuite(
pymaybe,
globs=pymaybe.get_doctest_globs(),
optionflags=doctest.IGNORE_EXCEPTION_DETAIL
)
)
return tests
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
class TestPyMaybe(unittest.TestCase):
def test_maybe_withValue_returnsSomething(self):
result = maybe("Value")
self.assertIsInstance(result, Something)
def test_maybe_withMaybe_returnMaybe(self):
m = maybe("value")
self.assertEqual(maybe(m), m)
def test_maybe_withNone_returnsNothing(self):
result = maybe(None)
self.assertIsInstance(result, Nothing)
# region Nothing - Comparison
def test_comparisons(self):
self.assertReallyEqual(Something(1), 1)
self.assertReallyEqual(1, Something(1))
self.assertReallyEqual(Something(1), Something(1))
self.assertReallyEqual(Nothing(), Nothing())
self.assertReallyNotEqual(Something(1), Something(2))
self.assertReallyNotEqual(Something(1), Nothing())
self.assertReallyNotEqual(Nothing(), Something(1))
def test_nothing_cmp(self):
if PY2:
self.assertEqual(0, cmp(Nothing(), Nothing()))
self.assertEqual(1, cmp(1, Nothing()))
self.assertEqual(1, cmp(Something(5), Nothing()))
self.assertEqual(1, cmp(5, Nothing()))
self.assertEqual(-1, cmp(Nothing(), Something(5)))
self.assertEqual(-1, cmp(Nothing(), 5))
def test_nothing_equalToNothing(self):
self.assertTrue(Nothing() == Nothing())
def test_nothing_notEqualToSomething(self):
self.assertFalse(Nothing() == Something(2))
self.assertFalse(Something(1) == Nothing())
def test_nothing_neSomething(self):
self.assertTrue(Nothing() != Something(2))
self.assertTrue(Something(1) != Nothing())
def test_nothing_neNothing(self):
self.assertFalse(Nothing() != Nothing())
def test_nothing_ltNothing_isFalse(self):
self.assertFalse(Nothing() < Nothing())
def test_nothing_ltSomething_isTrue(self):
self.assertTrue(Nothing() < Something(1))
def test_nothing_ltNone_isFalse(self):
self.assertFalse(Nothing() < None)
def test_nothing_ltNotNone_isFalse(self):
self.assertTrue(Nothing() < "some")
def test_nothing_gtAnything_isFalse(self):
self.assertFalse(Nothing() > Nothing())
self.assertFalse(Nothing() > Something(123))
self.assertFalse(Nothing() > None)
self.assertFalse(Nothing() > "Value")
def test_nothing_leAnything_isTrue(self):
self.assertTrue(Nothing() <= Nothing())
self.assertTrue(Nothing() <= Something(123))
self.assertTrue(Nothing() <= None)
self.assertTrue(Nothing() <= "Value")
def test_nothing_geNothing_isTrue(self):
self.assertTrue(Nothing() >= Nothing())
def test_nothing_geNone_isTrue(self):
self.assertTrue(Nothing() >= None)
def test_nothing_geNotNoneOrNothing_isFalse(self):
self.assertFalse(Nothing() >= Something(2))
self.assertFalse(Nothing() >= "some")
# endregion
# region Nothing - Dict
def test_nothing_len_isZero(self):
self.assertEqual(len(Nothing()), 0)
def test_nothing_getItem_returnsNothing(self):
n = Nothing()['name']
self.assertTrue(isinstance(n, Nothing))
self.assertTrue(n.is_none())
self.assertFalse(n.is_some())
def test_nothing_setItem_doestNothing(self):
Nothing()['name'] = 'value' # Will raise if __setitem__ wasnt defined
def test_nothing_delItem_doestNothing(self):
del Nothing()['name'] # Will raise if __delitem__ wasnt defined
# endregion
# region Nothing - Custom representation
def test_nothing_unicode(self):
if PY2:
self.assertEqual(unicode(Nothing()), unicode(None))
def test_nothing_nonzero_isFalse(self):
self.assertFalse(bool(Nothing()))
# endregion
# region Nothing - Misc Methods
def test_nothing_length_isZero(self):
self.assertEqual(len(Nothing()), 0)
def test_nothing_getItem_returnsNone(self):
result = Nothing()[10]
self.assertIsInstance(result, Nothing)
def test_nothing_strings_returnNone(self):
self.assertEqual(str(Nothing()), "Nothing")
# endregion
# region Something - Comparison
def test_something_cmp(self):
if PY2:
n = Nothing()
s = maybe(5)
s1 = maybe(7)
self.assertEqual(1, cmp(s, n))
self.assertEqual(cmp(5, 5), cmp(s, s))
self.assertEqual(cmp(5, 7), cmp(s, s1))
self.assertEqual(cmp(7, 5), cmp(s1, s))
self.assertEqual(cmp(5, 5), cmp(s, 5))
self.assertEqual(cmp(5, 7), cmp(s, 7))
self.assertEqual(cmp(7, 5), cmp(7, s))
def test_something_cmp_greaterThanNothing(self):
l = [Something(0), Nothing()]
sortedl = sorted(l)
self.assertTrue(isinstance(sortedl[0], Nothing))
self.assertTrue(isinstance(sortedl[1], Something))
def test_something_cmp_handlesComparisonBetweenSomethings(self):
l = [Something(10), Something(3)]
sortedl = sorted(l)
self.assertTrue(isinstance(sortedl[0], Something))
self.assertTrue(isinstance(sortedl[1], Something))
self.assertEqual(sortedl[0], 3)
self.assertEqual(sortedl[1], 10)
def test_something_cmp(self):
l = [Something(1), 2, Nothing()]
sortedl = sorted(l)
self.assertTrue(isinstance(sortedl[0], Nothing))
self.assertTrue(isinstance(sortedl[1], Something))
self.assertTrue(isinstance(sortedl[2], int))
self.assertEqual(sortedl[0], None)
self.assertEqual(sortedl[1], 1)
self.assertEqual(sortedl[2], 2)
def test_something_notEqualToNothing(self):
self.assertFalse(Something(1) == Nothing())
self.assertFalse(Nothing() == Something(2))
def test_something_ltNothing_isFalse(self):
self.assertFalse(Something("value") < Nothing())
def test_something_ltSomething_usesValue(self):
self.assertFalse(Something(3) < Something(1))
self.assertTrue(Something(3) > Something(1))
def test_something_gtNothing_isTrue(self):
self.assertTrue(Something("value") > Nothing())
def test_something_leNothing_isFalse(self):
self.assertFalse(Something("value") <= Nothing())
def test_something_leSomething_comparesTheUnderlyingValue(self):
self.assertTrue(Something(1) < Something(2))
self.assertFalse(Something(11) < Something(2))
def test_something_leValue_comparesTheUnderlyingValue(self):
self.assertTrue(Something(1) < 2)
self.assertTrue(Something(1) <= 2)
self.assertTrue(Something(1) <= 1)
self.assertFalse(Something(11) < 2)
def test_something_geNothing_isTrue(self):
self.assertTrue(Something("value") >= Nothing())
def test_something_geSomething_comparesTheUnderlyingValue(self):
self.assertTrue(Something(11) > Something(2))
self.assertTrue(Something(11) >= Something(2))
self.assertTrue(Something(11) >= Something(11))
self.assertFalse(Something(1) > Something(2))
def test_something_geValue_comparesTheUnderlyingValue(self):
self.assertTrue(Something(11) > 2)
self.assertFalse(Something(1) > 2)
# endregion
def test_something_conversions(self):
s = "value"
d = dict(name="Eran")
n = 123
f = 3.14
if PY2:
self.assertEqual(unicode(Something(s)), s)
self.assertEqual(long(Something(n)), n)
self.assertIsInstance(long(Something(f)), long)
self.assertEqual(str(Something(s)), "Something(%s)" % s)
self.assertEqual(repr(Something(s)), "Something(%s)" % repr(s))
self.assertEqual(repr(Something(d)), "Something(%s)" % repr(d))
self.assertEqual(int(Something(n)), n)
self.assertIsInstance(int(Something(n)), int)
self.assertEqual(float(Something(f)), f)
self.assertIsInstance(float(Something(f)), float)
# region Something - Container Methods
def test_something_len_isZero(self):
s = maybe(dict(test='value'))
self.assertEqual(len(s), 1)
s = maybe([1, 2, 3])
self.assertEqual(len(s), 3)
def test_something_getItem_returnsNothing(self):
s = maybe(dict(test='value'))
self.assertTrue(isinstance(s['test'], Something))
self.assertTrue(s['test'].is_some())
self.assertTrue(isinstance(s['test1'], Nothing))
self.assertTrue(s['test1'].is_none())
def test_something_list_getItem_returnsNothing(self):
s = maybe([1, 2, 3])
self.assertTrue(isinstance(s[0], Something))
self.assertTrue(s[0].is_some())
self.assertEqual(s[0].get(), 1)
self.assertTrue(isinstance(s['test1'], Nothing))
self.assertTrue(s['test1'].is_none())
self.assertTrue(isinstance(s[dict()], Nothing))
self.assertTrue(s[dict()].is_none())
self.assertTrue(isinstance(s[10], Nothing))
self.assertTrue(s[10].is_none())
def test_something_setItem_doestNothing(self):
s = maybe(dict(test='value'))
s['test'] = 'yeah'
self.assertEqual(s['test'], 'yeah')
self.assertTrue(s['test'].is_some())
def test_something_delItem_doestNothing(self):
s = maybe(dict(test='value'))
del s['test'] # Will raise if __delitem__ wasnt defined
self.assertEqual(len(s), 0)
self.assertTrue(s['test'].is_none())
def test_something_iter_onIterable_returnsArrayIterator(self):
s = maybe([1, 2, 3, 4, 5])
l = list(iter(s))
self.assertEqual([1, 2, 3, 4, 5], l)
def test_something_iter_onNotIterable_returnsArrayIterator(self):
class Foo(object):
pass
obj = Foo()
l = list(iter(maybe(obj)))
self.assertEqual(l, [obj])
def test_something_missing_onDefaultDict_forwardsCallToDefaultDict(self):
from collections import defaultdict
d = maybe(defaultdict(lambda: 'default'))
d['test'] = 'ok'
self.assertEqual(d['doesnt exist'], 'default')
self.assertEqual(d['test'], 'ok')
self.assertTrue(d['doesnt exist'].is_some())
self.assertTrue(d['test'].is_some())
def test_something_reversed(self):
l = maybe([1, 2, 3])
lr = list(reversed(l))
self.assertEqual(lr[0], 3)
self.assertEqual(lr[1], 2)
self.assertEqual(lr[2], 1)
# endregion
def test_something_typeConversions(self):
import math
self.assertEqual(complex(1), complex(Something(1)))
self.assertEqual(oct(1), oct(Something(1)))
self.assertEqual(hex(16), hex(Something(16)))
self.assertEqual(math.trunc(math.pi), math.trunc(maybe(math.pi)))
# region method call forwarding
def test_something_forwardsMethodCalls(self):
result = maybe('VALUE').lower()
assert result.is_some()
assert result == 'value', "result %s should be 'value'" % result
assert result == maybe('value')
def test_something_forwardsMethodCalls_handlesNonExisting(self):
result = maybe('VALUE').lowerr()
assert result.is_none()
def test_nothing_forwardsMethodCalls_handlesNonExisting(self):
result = maybe(None).invalid().call()
assert result.is_none()
# endregion
# region Assertions (for compatibility between Python version)
def assertIsInstance(self, obj, cls, msg=None):
result = isinstance(obj, cls)
self.assertTrue(result, msg=msg)
# endregion
def assertReallyEqual(self, a, b):
self.assertEqual(a, b)
self.assertEqual(b, a)
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
if PY2:
self.assertEqual(0, cmp(a, b))
self.assertEqual(0, cmp(b, a))
def assertReallyNotEqual(self, a, b):
self.assertNotEqual(a, b)
self.assertNotEqual(b, a)
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
if PY2:
self.assertNotEqual(0, cmp(a, b))
self.assertNotEqual(0, cmp(b, a))
if __name__ == '__main__':
unittest.main() | 0.402392 | 0.681667 |
# Copyright 2016 <NAME>
# 2016 <NAME>
# Apache 2.0.
# we're using python 3.x style print but want it to work in python 2.x,
from __future__ import print_function
from collections import defaultdict
import argparse
import sys
class StrToBoolAction(argparse.Action):
""" A custom action to convert bools from shell format i.e., true/false
to python format i.e., True/False """
def __call__(self, parser, namespace, values, option_string=None):
if values == "true":
setattr(namespace, self.dest, True)
elif values == "false":
setattr(namespace, self.dest, False)
else:
raise Exception("Unknown value {0} for --{1}".format(values, self.dest))
def GetArgs():
parser = argparse.ArgumentParser(description = "Converts pronunciation statistics (from phonetic decoding or g2p) "
"into a lexicon for. We prune the pronunciations "
"based on a provided stats file, and optionally filter out entries which are present "
"in a filter lexicon.",
epilog = "e.g. steps/dict/prons_to_lexicon.py --min-prob=0.4 \\"
"--filter-lexicon=exp/tri3_lex_0.4_work/phone_decode/filter_lexicon.txt \\"
"exp/tri3_lex_0.4_work/phone_decode/prons.txt \\"
"exp/tri3_lex_0.4_work/lexicon_phone_decoding.txt"
"See steps/dict/learn_lexicon_greedy.sh for examples in detail.")
parser.add_argument("--set-sum-to-one", type = str, default = False,
action = StrToBoolAction, choices = ["true", "false"],
help = "If normalize lexicon such that the sum of "
"probabilities is 1.")
parser.add_argument("--set-max-to-one", type = str, default = True,
action = StrToBoolAction, choices = ["true", "false"],
help = "If normalize lexicon such that the max "
"probability is 1.")
parser.add_argument("--top-N", type = int, default = 0,
help = "If non-zero, we just take the top N pronunciations (according to stats/pron-probs) for each word.")
parser.add_argument("--min-prob", type = float, default = 0.1,
help = "Remove pronunciation with probabilities less "
"than this value after normalization.")
parser.add_argument("--filter-lexicon", metavar='<filter-lexicon>', type = str, default = '',
help = "Exclude entries in this filter lexicon from the output lexicon."
"each line must be <word> <phones>")
parser.add_argument("stats_file", metavar='<stats-file>', type = str,
help = "Input lexicon file containing pronunciation statistics/probs in the first column."
"each line must be <counts> <word> <phones>")
parser.add_argument("out_lexicon", metavar='<out-lexicon>', type = str,
help = "Output lexicon.")
print (' '.join(sys.argv), file = sys.stderr)
args = parser.parse_args()
args = CheckArgs(args)
return args
def CheckArgs(args):
if args.stats_file == "-":
args.stats_file_handle = sys.stdin
else:
args.stats_file_handle = open(args.stats_file)
if args.filter_lexicon is not '':
if args.filter_lexicon == "-":
args.filter_lexicon_handle = sys.stdout
else:
args.filter_lexicon_handle = open(args.filter_lexicon)
if args.out_lexicon == "-":
args.out_lexicon_handle = sys.stdout
else:
args.out_lexicon_handle = open(args.out_lexicon, "w")
if args.set_max_to_one == args.set_sum_to_one:
raise Exception("Cannot have both "
"set-max-to-one and set-sum-to-one as true or false.")
return args
def ReadStats(args):
lexicon = {}
word_count = {}
for line in args.stats_file_handle:
splits = line.strip().split()
if len(splits) < 3:
continue
word = splits[1]
count = float(splits[0])
phones = ' '.join(splits[2:])
lexicon[(word, phones)] = lexicon.get((word, phones), 0) + count
word_count[word] = word_count.get(word, 0) + count
return [lexicon, word_count]
def ReadLexicon(lexicon_file_handle):
lexicon = set()
if lexicon_file_handle:
for line in lexicon_file_handle.readlines():
splits = line.strip().split()
if len(splits) == 0:
continue
if len(splits) < 2:
raise Exception('Invalid format of line ' + line
+ ' in lexicon file.')
word = splits[0]
phones = ' '.join(splits[1:])
lexicon.add((word, phones))
return lexicon
def ConvertWordCountsToProbs(args, lexicon, word_count):
word_probs = {}
for entry, count in lexicon.iteritems():
word = entry[0]
phones = entry[1]
prob = float(count) / float(word_count[word])
if word in word_probs:
word_probs[word].append((phones, prob))
else:
word_probs[word] = [(phones, prob)]
return word_probs
def ConvertWordProbsToLexicon(word_probs):
lexicon = {}
for word, entry in word_probs.iteritems():
for x in entry:
lexicon[(word, x[0])] = lexicon.get((word,x[0]), 0) + x[1]
return lexicon
def NormalizeLexicon(lexicon, set_max_to_one = True,
set_sum_to_one = False, min_prob = 0):
word_probs = {}
for entry, prob in lexicon.iteritems():
t = word_probs.get(entry[0], (0,0))
word_probs[entry[0]] = (t[0] + prob, max(t[1], prob))
for entry, prob in lexicon.iteritems():
if set_max_to_one:
prob = prob / word_probs[entry[0]][1]
elif set_sum_to_one:
prob = prob / word_probs[entry[0]][0]
if prob < min_prob:
prob = 0
lexicon[entry] = prob
def TakeTopN(lexicon, top_N):
lexicon_reshaped = defaultdict(list)
lexicon_pruned = {}
for entry, prob in lexicon.iteritems():
lexicon_reshaped[entry[0]].append([entry[1], prob])
for word in lexicon_reshaped:
prons = lexicon_reshaped[word]
sorted_prons = sorted(prons, reverse=True, key=lambda prons: prons[1])
for i in range(len(sorted_prons)):
if i >= top_N:
lexicon[(word, sorted_prons[i][0])] = 0
def WriteLexicon(args, lexicon, filter_lexicon):
words = set()
num_removed = 0
num_filtered = 0
for entry, prob in lexicon.iteritems():
if prob == 0:
num_removed += 1
continue
if entry in filter_lexicon:
num_filtered += 1
continue
words.add(entry[0])
print("{0} {1}".format(entry[0], entry[1]),
file = args.out_lexicon_handle)
print ("Before pruning, the total num. pronunciations is: {}".format(len(lexicon)), file=sys.stderr)
print ("Removed {0} pronunciations by setting min_prob {1}".format(num_removed, args.min_prob), file=sys.stderr)
print ("Filtered out {} pronunciations in the filter lexicon.".format(num_filtered), file=sys.stderr)
num_prons_from_phone_decoding = len(lexicon) - num_removed - num_filtered
print ("Num. pronunciations in the output lexicon, which solely come from phone decoding"
"is {0}. num. words is {1}".format(num_prons_from_phone_decoding, len(words)), file=sys.stderr)
def Main():
args = GetArgs()
[lexicon, word_count] = ReadStats(args)
word_probs = ConvertWordCountsToProbs(args, lexicon, word_count)
lexicon = ConvertWordProbsToLexicon(word_probs)
filter_lexicon = set()
if args.filter_lexicon is not '':
filter_lexicon = ReadLexicon(args.filter_lexicon_handle)
if args.top_N > 0:
TakeTopN(lexicon, args.top_N)
else:
NormalizeLexicon(lexicon, set_max_to_one = args.set_max_to_one,
set_sum_to_one = args.set_sum_to_one,
min_prob = args.min_prob)
WriteLexicon(args, lexicon, filter_lexicon)
args.out_lexicon_handle.close()
if __name__ == "__main__":
Main() | egs/wsj/s5/steps/dict/prons_to_lexicon.py |
# Copyright 2016 <NAME>
# 2016 <NAME>
# Apache 2.0.
# we're using python 3.x style print but want it to work in python 2.x,
from __future__ import print_function
from collections import defaultdict
import argparse
import sys
class StrToBoolAction(argparse.Action):
""" A custom action to convert bools from shell format i.e., true/false
to python format i.e., True/False """
def __call__(self, parser, namespace, values, option_string=None):
if values == "true":
setattr(namespace, self.dest, True)
elif values == "false":
setattr(namespace, self.dest, False)
else:
raise Exception("Unknown value {0} for --{1}".format(values, self.dest))
def GetArgs():
parser = argparse.ArgumentParser(description = "Converts pronunciation statistics (from phonetic decoding or g2p) "
"into a lexicon for. We prune the pronunciations "
"based on a provided stats file, and optionally filter out entries which are present "
"in a filter lexicon.",
epilog = "e.g. steps/dict/prons_to_lexicon.py --min-prob=0.4 \\"
"--filter-lexicon=exp/tri3_lex_0.4_work/phone_decode/filter_lexicon.txt \\"
"exp/tri3_lex_0.4_work/phone_decode/prons.txt \\"
"exp/tri3_lex_0.4_work/lexicon_phone_decoding.txt"
"See steps/dict/learn_lexicon_greedy.sh for examples in detail.")
parser.add_argument("--set-sum-to-one", type = str, default = False,
action = StrToBoolAction, choices = ["true", "false"],
help = "If normalize lexicon such that the sum of "
"probabilities is 1.")
parser.add_argument("--set-max-to-one", type = str, default = True,
action = StrToBoolAction, choices = ["true", "false"],
help = "If normalize lexicon such that the max "
"probability is 1.")
parser.add_argument("--top-N", type = int, default = 0,
help = "If non-zero, we just take the top N pronunciations (according to stats/pron-probs) for each word.")
parser.add_argument("--min-prob", type = float, default = 0.1,
help = "Remove pronunciation with probabilities less "
"than this value after normalization.")
parser.add_argument("--filter-lexicon", metavar='<filter-lexicon>', type = str, default = '',
help = "Exclude entries in this filter lexicon from the output lexicon."
"each line must be <word> <phones>")
parser.add_argument("stats_file", metavar='<stats-file>', type = str,
help = "Input lexicon file containing pronunciation statistics/probs in the first column."
"each line must be <counts> <word> <phones>")
parser.add_argument("out_lexicon", metavar='<out-lexicon>', type = str,
help = "Output lexicon.")
print (' '.join(sys.argv), file = sys.stderr)
args = parser.parse_args()
args = CheckArgs(args)
return args
def CheckArgs(args):
if args.stats_file == "-":
args.stats_file_handle = sys.stdin
else:
args.stats_file_handle = open(args.stats_file)
if args.filter_lexicon is not '':
if args.filter_lexicon == "-":
args.filter_lexicon_handle = sys.stdout
else:
args.filter_lexicon_handle = open(args.filter_lexicon)
if args.out_lexicon == "-":
args.out_lexicon_handle = sys.stdout
else:
args.out_lexicon_handle = open(args.out_lexicon, "w")
if args.set_max_to_one == args.set_sum_to_one:
raise Exception("Cannot have both "
"set-max-to-one and set-sum-to-one as true or false.")
return args
def ReadStats(args):
lexicon = {}
word_count = {}
for line in args.stats_file_handle:
splits = line.strip().split()
if len(splits) < 3:
continue
word = splits[1]
count = float(splits[0])
phones = ' '.join(splits[2:])
lexicon[(word, phones)] = lexicon.get((word, phones), 0) + count
word_count[word] = word_count.get(word, 0) + count
return [lexicon, word_count]
def ReadLexicon(lexicon_file_handle):
lexicon = set()
if lexicon_file_handle:
for line in lexicon_file_handle.readlines():
splits = line.strip().split()
if len(splits) == 0:
continue
if len(splits) < 2:
raise Exception('Invalid format of line ' + line
+ ' in lexicon file.')
word = splits[0]
phones = ' '.join(splits[1:])
lexicon.add((word, phones))
return lexicon
def ConvertWordCountsToProbs(args, lexicon, word_count):
word_probs = {}
for entry, count in lexicon.iteritems():
word = entry[0]
phones = entry[1]
prob = float(count) / float(word_count[word])
if word in word_probs:
word_probs[word].append((phones, prob))
else:
word_probs[word] = [(phones, prob)]
return word_probs
def ConvertWordProbsToLexicon(word_probs):
lexicon = {}
for word, entry in word_probs.iteritems():
for x in entry:
lexicon[(word, x[0])] = lexicon.get((word,x[0]), 0) + x[1]
return lexicon
def NormalizeLexicon(lexicon, set_max_to_one = True,
set_sum_to_one = False, min_prob = 0):
word_probs = {}
for entry, prob in lexicon.iteritems():
t = word_probs.get(entry[0], (0,0))
word_probs[entry[0]] = (t[0] + prob, max(t[1], prob))
for entry, prob in lexicon.iteritems():
if set_max_to_one:
prob = prob / word_probs[entry[0]][1]
elif set_sum_to_one:
prob = prob / word_probs[entry[0]][0]
if prob < min_prob:
prob = 0
lexicon[entry] = prob
def TakeTopN(lexicon, top_N):
lexicon_reshaped = defaultdict(list)
lexicon_pruned = {}
for entry, prob in lexicon.iteritems():
lexicon_reshaped[entry[0]].append([entry[1], prob])
for word in lexicon_reshaped:
prons = lexicon_reshaped[word]
sorted_prons = sorted(prons, reverse=True, key=lambda prons: prons[1])
for i in range(len(sorted_prons)):
if i >= top_N:
lexicon[(word, sorted_prons[i][0])] = 0
def WriteLexicon(args, lexicon, filter_lexicon):
words = set()
num_removed = 0
num_filtered = 0
for entry, prob in lexicon.iteritems():
if prob == 0:
num_removed += 1
continue
if entry in filter_lexicon:
num_filtered += 1
continue
words.add(entry[0])
print("{0} {1}".format(entry[0], entry[1]),
file = args.out_lexicon_handle)
print ("Before pruning, the total num. pronunciations is: {}".format(len(lexicon)), file=sys.stderr)
print ("Removed {0} pronunciations by setting min_prob {1}".format(num_removed, args.min_prob), file=sys.stderr)
print ("Filtered out {} pronunciations in the filter lexicon.".format(num_filtered), file=sys.stderr)
num_prons_from_phone_decoding = len(lexicon) - num_removed - num_filtered
print ("Num. pronunciations in the output lexicon, which solely come from phone decoding"
"is {0}. num. words is {1}".format(num_prons_from_phone_decoding, len(words)), file=sys.stderr)
def Main():
args = GetArgs()
[lexicon, word_count] = ReadStats(args)
word_probs = ConvertWordCountsToProbs(args, lexicon, word_count)
lexicon = ConvertWordProbsToLexicon(word_probs)
filter_lexicon = set()
if args.filter_lexicon is not '':
filter_lexicon = ReadLexicon(args.filter_lexicon_handle)
if args.top_N > 0:
TakeTopN(lexicon, args.top_N)
else:
NormalizeLexicon(lexicon, set_max_to_one = args.set_max_to_one,
set_sum_to_one = args.set_sum_to_one,
min_prob = args.min_prob)
WriteLexicon(args, lexicon, filter_lexicon)
args.out_lexicon_handle.close()
if __name__ == "__main__":
Main() | 0.599837 | 0.176494 |
from __future__ import print_function
import os
import sys
import warnings
import entrypoints
from six.moves import urllib
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
from mlflow.store import DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH
from mlflow.store.artifact_repository_registry import get_artifact_repository
from mlflow.store.dbmodels.db_types import DATABASE_ENGINES
from mlflow.store.file_store import FileStore
from mlflow.store.rest_store import RestStore
from mlflow.utils import env, rest_utils
from mlflow.utils.databricks_utils import get_databricks_host_creds
_TRACKING_URI_ENV_VAR = "MLFLOW_TRACKING_URI"
_LOCAL_FS_URI_PREFIX = "file:///"
_REMOTE_URI_PREFIX = "http://"
# Extra environment variables which take precedence for setting the basic/bearer
# auth on http requests.
_TRACKING_USERNAME_ENV_VAR = "MLFLOW_TRACKING_USERNAME"
_TRACKING_PASSWORD_ENV_VAR = "<PASSWORD>"
_TRACKING_TOKEN_ENV_VAR = "MLFLOW_TRACKING_TOKEN"
_TRACKING_INSECURE_TLS_ENV_VAR = "MLFLOW_TRACKING_INSECURE_TLS"
_tracking_uri = None
def is_tracking_uri_set():
"""Returns True if the tracking URI has been set, False otherwise."""
if _tracking_uri or env.get_env(_TRACKING_URI_ENV_VAR):
return True
return False
def set_tracking_uri(uri):
"""
Set the tracking server URI. This does not affect the
currently active run (if one exists), but takes effect for successive runs.
:param uri:
- An empty string, or a local file path, prefixed with ``file:/``. Data is stored
locally at the provided file (or ``./mlruns`` if empty).
- An HTTP URI like ``https://my-tracking-server:5000``.
- A Databricks workspace, provided as the string "databricks" or, to use a
Databricks CLI
`profile <https://github.com/databricks/databricks-cli#installation>`_,
"databricks://<profileName>".
"""
global _tracking_uri
_tracking_uri = uri
def get_tracking_uri():
"""
Get the current tracking URI. This may not correspond to the tracking URI of
the currently active run, since the tracking URI can be updated via ``set_tracking_uri``.
:return: The tracking URI.
"""
global _tracking_uri
if _tracking_uri is not None:
return _tracking_uri
elif env.get_env(_TRACKING_URI_ENV_VAR) is not None:
return env.get_env(_TRACKING_URI_ENV_VAR)
else:
return os.path.abspath(DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH)
def get_artifact_uri(run_id, artifact_path=None):
"""
Get the absolute URI of the specified artifact in the specified run. If `path` is not specified,
the artifact root URI of the specified run will be returned; calls to ``log_artifact``
and ``log_artifacts`` write artifact(s) to subdirectories of the artifact root URI.
:param run_id: The ID of the run for which to obtain an absolute artifact URI.
:param artifact_path: The run-relative artifact path. For example,
``path/to/artifact``. If unspecified, the artifact root URI for the
specified run will be returned.
:return: An *absolute* URI referring to the specified artifact or the specified run's artifact
root. For example, if an artifact path is provided and the specified run uses an
S3-backed store, this may be a uri of the form
``s3://<bucket_name>/path/to/artifact/root/path/to/artifact``. If an artifact path
is not provided and the specified run uses an S3-backed store, this may be a URI of
the form ``s3://<bucket_name>/path/to/artifact/root``.
"""
if not run_id:
raise MlflowException(
message="A run_id must be specified in order to obtain an artifact uri!",
error_code=INVALID_PARAMETER_VALUE)
store = _get_store()
run = store.get_run(run_id)
if artifact_path is None:
return run.info.artifact_uri
else:
# Path separators may not be consistent across all artifact repositories. Therefore, when
# joining the run's artifact root directory with the artifact's relative path, we use the
# path module defined by the appropriate artifact repository
artifact_path_module =\
get_artifact_repository(run.info.artifact_uri, store).get_path_module()
return artifact_path_module.join(run.info.artifact_uri, artifact_path)
def _download_artifact_from_uri(artifact_uri, output_path=None):
"""
:param artifact_uri: The *absolute* URI of the artifact to download.
:param output_path: The local filesystem path to which to download the artifact. If unspecified,
a local output path will be created.
"""
store = _get_store(artifact_uri=artifact_uri)
artifact_path_module =\
get_artifact_repository(artifact_uri, store).get_path_module()
artifact_src_dir = artifact_path_module.dirname(artifact_uri)
artifact_src_relative_path = artifact_path_module.basename(artifact_uri)
artifact_repo = get_artifact_repository(
artifact_uri=artifact_src_dir, store=store)
return artifact_repo.download_artifacts(
artifact_path=artifact_src_relative_path, dst_path=output_path)
def _is_local_uri(uri):
scheme = urllib.parse.urlparse(uri).scheme
return uri != 'databricks' and (scheme == '' or scheme == 'file')
def _is_http_uri(uri):
scheme = urllib.parse.urlparse(uri).scheme
return scheme == 'http' or scheme == 'https'
def _is_databricks_uri(uri):
"""Databricks URIs look like 'databricks' (default profile) or 'databricks://profile'"""
scheme = urllib.parse.urlparse(uri).scheme
return scheme == 'databricks' or uri == 'databricks'
def _get_file_store(store_uri, **_):
path = urllib.parse.urlparse(store_uri).path if store_uri else None
return FileStore(path, path)
def _is_database_uri(uri):
if urllib.parse.urlparse(uri).scheme not in DATABASE_ENGINES:
return False
return True
def _get_sqlalchemy_store(store_uri, artifact_uri):
from mlflow.store.sqlalchemy_store import SqlAlchemyStore
if artifact_uri is None:
artifact_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH
return SqlAlchemyStore(store_uri, artifact_uri)
def _get_rest_store(store_uri, **_):
def get_default_host_creds():
return rest_utils.MlflowHostCreds(
host=store_uri,
username=os.environ.get(_TRACKING_USERNAME_ENV_VAR),
password=<PASSWORD>(_TRACKING_PASSWORD_ENV_VAR),
token=os.environ.get(_TRACKING_TOKEN_ENV_VAR),
ignore_tls_verification=os.environ.get(_TRACKING_INSECURE_TLS_ENV_VAR) == 'true',
)
return RestStore(get_default_host_creds)
def get_db_profile_from_uri(uri):
"""
Get the Databricks profile specified by the tracking URI (if any), otherwise
returns None.
"""
parsed_uri = urllib.parse.urlparse(uri)
if parsed_uri.scheme == "databricks":
return parsed_uri.netloc
return None
def _get_databricks_rest_store(store_uri, **_):
profile = get_db_profile_from_uri(store_uri)
return RestStore(lambda: get_databricks_host_creds(profile))
class TrackingStoreRegistry:
"""Scheme-based registry for tracking store implementations
This class allows the registration of a function or class to provide an
implementation for a given scheme of `store_uri` through the `register`
methods. Implementations declared though the entrypoints
`mlflow.tracking_store` group can be automatically registered through the
`register_entrypoints` method.
When instantiating a store through the `get_store` method, the scheme of
the store URI provided (or inferred from environment) will be used to
select which implementation to instantiate, which will be called with same
arguments passed to the `get_store` method.
"""
def __init__(self):
self._registry = {}
def register(self, scheme, store_builder):
self._registry[scheme] = store_builder
def register_entrypoints(self):
"""Register tracking stores provided by other packages"""
for entrypoint in entrypoints.get_group_all("mlflow.tracking_store"):
try:
self.register(entrypoint.name, entrypoint.load())
except (AttributeError, ImportError) as exc:
warnings.warn(
'Failure attempting to register tracking store for scheme "{}": {}'.format(
entrypoint.name, str(exc)
),
stacklevel=2
)
def get_store(self, store_uri=None, artifact_uri=None):
"""Get a store from the registry based on the scheme of store_uri
:param store_uri: The store URI. If None, it will be inferred from the environment. This URI
is used to select which tracking store implementation to instantiate and
is passed to the constructor of the implementation.
:param artifact_uri: Artifact repository URI. Passed through to the tracking store
implementation.
:return: An instance of `mlflow.store.AbstractStore` that fulfills the store URI
requirements.
"""
store_uri = store_uri if store_uri is not None else get_tracking_uri()
if store_uri == 'databricks':
# Add colon so databricks is parsed as scheme
store_uri += ':'
scheme = urllib.parse.urlparse(store_uri).scheme
try:
store_builder = self._registry[scheme]
except KeyError:
raise MlflowException(
"Could not find a registered tracking store for: {}. "
"Currently registered schemes are: {}".format(
store_uri, list(self._registry.keys())
)
)
return store_builder(store_uri=store_uri, artifact_uri=artifact_uri)
_tracking_store_registry = TrackingStoreRegistry()
_tracking_store_registry.register('', _get_file_store)
_tracking_store_registry.register('file', _get_file_store)
_tracking_store_registry.register('databricks', _get_databricks_rest_store)
for scheme in ['http', 'https']:
_tracking_store_registry.register(scheme, _get_rest_store)
for scheme in DATABASE_ENGINES:
_tracking_store_registry.register(scheme, _get_sqlalchemy_store)
_tracking_store_registry.register_entrypoints()
def _get_store(store_uri=None, artifact_uri=None):
return _tracking_store_registry.get_store(store_uri, artifact_uri)
def _get_model_log_dir(model_name, run_id):
if not run_id:
raise Exception("Must specify a run_id to get logging directory for a model.")
store = _get_store()
run = store.get_run(run_id)
artifact_repo = get_artifact_repository(run.info.artifact_uri, store)
return artifact_repo.download_artifacts(model_name)
def _get_git_url_if_present(uri):
"""
Return the path git_uri#sub_directory if the URI passed is a local path that's part of
a Git repo, or returns the original URI otherwise.
:param uri: The expanded uri
:return: The git_uri#sub_directory if the uri is part of a Git repo,
otherwise return the original uri
"""
if '#' in uri:
# Already a URI in git repo format
return uri
try:
from git import Repo, InvalidGitRepositoryError, GitCommandNotFound, NoSuchPathError
except ImportError as e:
print("Notice: failed to import Git (the git executable is probably not on your PATH),"
" so Git SHA is not available. Error: %s" % e, file=sys.stderr)
return uri
try:
# Check whether this is part of a git repo
repo = Repo(uri, search_parent_directories=True)
# Repo url
repo_url = "file://%s" % repo.working_tree_dir
# Sub directory
rlpath = uri.replace(repo.working_tree_dir, '')
if (rlpath == ''):
git_path = repo_url
elif (rlpath[0] == '/'):
git_path = repo_url + '#' + rlpath[1:]
else:
git_path = repo_url + '#' + rlpath
return git_path
except (InvalidGitRepositoryError, GitCommandNotFound, ValueError, NoSuchPathError):
return uri | mlflow/tracking/utils.py | from __future__ import print_function
import os
import sys
import warnings
import entrypoints
from six.moves import urllib
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
from mlflow.store import DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH
from mlflow.store.artifact_repository_registry import get_artifact_repository
from mlflow.store.dbmodels.db_types import DATABASE_ENGINES
from mlflow.store.file_store import FileStore
from mlflow.store.rest_store import RestStore
from mlflow.utils import env, rest_utils
from mlflow.utils.databricks_utils import get_databricks_host_creds
_TRACKING_URI_ENV_VAR = "MLFLOW_TRACKING_URI"
_LOCAL_FS_URI_PREFIX = "file:///"
_REMOTE_URI_PREFIX = "http://"
# Extra environment variables which take precedence for setting the basic/bearer
# auth on http requests.
_TRACKING_USERNAME_ENV_VAR = "MLFLOW_TRACKING_USERNAME"
_TRACKING_PASSWORD_ENV_VAR = "<PASSWORD>"
_TRACKING_TOKEN_ENV_VAR = "MLFLOW_TRACKING_TOKEN"
_TRACKING_INSECURE_TLS_ENV_VAR = "MLFLOW_TRACKING_INSECURE_TLS"
_tracking_uri = None
def is_tracking_uri_set():
"""Returns True if the tracking URI has been set, False otherwise."""
if _tracking_uri or env.get_env(_TRACKING_URI_ENV_VAR):
return True
return False
def set_tracking_uri(uri):
"""
Set the tracking server URI. This does not affect the
currently active run (if one exists), but takes effect for successive runs.
:param uri:
- An empty string, or a local file path, prefixed with ``file:/``. Data is stored
locally at the provided file (or ``./mlruns`` if empty).
- An HTTP URI like ``https://my-tracking-server:5000``.
- A Databricks workspace, provided as the string "databricks" or, to use a
Databricks CLI
`profile <https://github.com/databricks/databricks-cli#installation>`_,
"databricks://<profileName>".
"""
global _tracking_uri
_tracking_uri = uri
def get_tracking_uri():
"""
Get the current tracking URI. This may not correspond to the tracking URI of
the currently active run, since the tracking URI can be updated via ``set_tracking_uri``.
:return: The tracking URI.
"""
global _tracking_uri
if _tracking_uri is not None:
return _tracking_uri
elif env.get_env(_TRACKING_URI_ENV_VAR) is not None:
return env.get_env(_TRACKING_URI_ENV_VAR)
else:
return os.path.abspath(DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH)
def get_artifact_uri(run_id, artifact_path=None):
"""
Get the absolute URI of the specified artifact in the specified run. If `path` is not specified,
the artifact root URI of the specified run will be returned; calls to ``log_artifact``
and ``log_artifacts`` write artifact(s) to subdirectories of the artifact root URI.
:param run_id: The ID of the run for which to obtain an absolute artifact URI.
:param artifact_path: The run-relative artifact path. For example,
``path/to/artifact``. If unspecified, the artifact root URI for the
specified run will be returned.
:return: An *absolute* URI referring to the specified artifact or the specified run's artifact
root. For example, if an artifact path is provided and the specified run uses an
S3-backed store, this may be a uri of the form
``s3://<bucket_name>/path/to/artifact/root/path/to/artifact``. If an artifact path
is not provided and the specified run uses an S3-backed store, this may be a URI of
the form ``s3://<bucket_name>/path/to/artifact/root``.
"""
if not run_id:
raise MlflowException(
message="A run_id must be specified in order to obtain an artifact uri!",
error_code=INVALID_PARAMETER_VALUE)
store = _get_store()
run = store.get_run(run_id)
if artifact_path is None:
return run.info.artifact_uri
else:
# Path separators may not be consistent across all artifact repositories. Therefore, when
# joining the run's artifact root directory with the artifact's relative path, we use the
# path module defined by the appropriate artifact repository
artifact_path_module =\
get_artifact_repository(run.info.artifact_uri, store).get_path_module()
return artifact_path_module.join(run.info.artifact_uri, artifact_path)
def _download_artifact_from_uri(artifact_uri, output_path=None):
"""
:param artifact_uri: The *absolute* URI of the artifact to download.
:param output_path: The local filesystem path to which to download the artifact. If unspecified,
a local output path will be created.
"""
store = _get_store(artifact_uri=artifact_uri)
artifact_path_module =\
get_artifact_repository(artifact_uri, store).get_path_module()
artifact_src_dir = artifact_path_module.dirname(artifact_uri)
artifact_src_relative_path = artifact_path_module.basename(artifact_uri)
artifact_repo = get_artifact_repository(
artifact_uri=artifact_src_dir, store=store)
return artifact_repo.download_artifacts(
artifact_path=artifact_src_relative_path, dst_path=output_path)
def _is_local_uri(uri):
scheme = urllib.parse.urlparse(uri).scheme
return uri != 'databricks' and (scheme == '' or scheme == 'file')
def _is_http_uri(uri):
scheme = urllib.parse.urlparse(uri).scheme
return scheme == 'http' or scheme == 'https'
def _is_databricks_uri(uri):
"""Databricks URIs look like 'databricks' (default profile) or 'databricks://profile'"""
scheme = urllib.parse.urlparse(uri).scheme
return scheme == 'databricks' or uri == 'databricks'
def _get_file_store(store_uri, **_):
path = urllib.parse.urlparse(store_uri).path if store_uri else None
return FileStore(path, path)
def _is_database_uri(uri):
if urllib.parse.urlparse(uri).scheme not in DATABASE_ENGINES:
return False
return True
def _get_sqlalchemy_store(store_uri, artifact_uri):
from mlflow.store.sqlalchemy_store import SqlAlchemyStore
if artifact_uri is None:
artifact_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH
return SqlAlchemyStore(store_uri, artifact_uri)
def _get_rest_store(store_uri, **_):
def get_default_host_creds():
return rest_utils.MlflowHostCreds(
host=store_uri,
username=os.environ.get(_TRACKING_USERNAME_ENV_VAR),
password=<PASSWORD>(_TRACKING_PASSWORD_ENV_VAR),
token=os.environ.get(_TRACKING_TOKEN_ENV_VAR),
ignore_tls_verification=os.environ.get(_TRACKING_INSECURE_TLS_ENV_VAR) == 'true',
)
return RestStore(get_default_host_creds)
def get_db_profile_from_uri(uri):
"""
Get the Databricks profile specified by the tracking URI (if any), otherwise
returns None.
"""
parsed_uri = urllib.parse.urlparse(uri)
if parsed_uri.scheme == "databricks":
return parsed_uri.netloc
return None
def _get_databricks_rest_store(store_uri, **_):
profile = get_db_profile_from_uri(store_uri)
return RestStore(lambda: get_databricks_host_creds(profile))
class TrackingStoreRegistry:
"""Scheme-based registry for tracking store implementations
This class allows the registration of a function or class to provide an
implementation for a given scheme of `store_uri` through the `register`
methods. Implementations declared though the entrypoints
`mlflow.tracking_store` group can be automatically registered through the
`register_entrypoints` method.
When instantiating a store through the `get_store` method, the scheme of
the store URI provided (or inferred from environment) will be used to
select which implementation to instantiate, which will be called with same
arguments passed to the `get_store` method.
"""
def __init__(self):
self._registry = {}
def register(self, scheme, store_builder):
self._registry[scheme] = store_builder
def register_entrypoints(self):
"""Register tracking stores provided by other packages"""
for entrypoint in entrypoints.get_group_all("mlflow.tracking_store"):
try:
self.register(entrypoint.name, entrypoint.load())
except (AttributeError, ImportError) as exc:
warnings.warn(
'Failure attempting to register tracking store for scheme "{}": {}'.format(
entrypoint.name, str(exc)
),
stacklevel=2
)
def get_store(self, store_uri=None, artifact_uri=None):
"""Get a store from the registry based on the scheme of store_uri
:param store_uri: The store URI. If None, it will be inferred from the environment. This URI
is used to select which tracking store implementation to instantiate and
is passed to the constructor of the implementation.
:param artifact_uri: Artifact repository URI. Passed through to the tracking store
implementation.
:return: An instance of `mlflow.store.AbstractStore` that fulfills the store URI
requirements.
"""
store_uri = store_uri if store_uri is not None else get_tracking_uri()
if store_uri == 'databricks':
# Add colon so databricks is parsed as scheme
store_uri += ':'
scheme = urllib.parse.urlparse(store_uri).scheme
try:
store_builder = self._registry[scheme]
except KeyError:
raise MlflowException(
"Could not find a registered tracking store for: {}. "
"Currently registered schemes are: {}".format(
store_uri, list(self._registry.keys())
)
)
return store_builder(store_uri=store_uri, artifact_uri=artifact_uri)
_tracking_store_registry = TrackingStoreRegistry()
_tracking_store_registry.register('', _get_file_store)
_tracking_store_registry.register('file', _get_file_store)
_tracking_store_registry.register('databricks', _get_databricks_rest_store)
for scheme in ['http', 'https']:
_tracking_store_registry.register(scheme, _get_rest_store)
for scheme in DATABASE_ENGINES:
_tracking_store_registry.register(scheme, _get_sqlalchemy_store)
_tracking_store_registry.register_entrypoints()
def _get_store(store_uri=None, artifact_uri=None):
return _tracking_store_registry.get_store(store_uri, artifact_uri)
def _get_model_log_dir(model_name, run_id):
if not run_id:
raise Exception("Must specify a run_id to get logging directory for a model.")
store = _get_store()
run = store.get_run(run_id)
artifact_repo = get_artifact_repository(run.info.artifact_uri, store)
return artifact_repo.download_artifacts(model_name)
def _get_git_url_if_present(uri):
"""
Return the path git_uri#sub_directory if the URI passed is a local path that's part of
a Git repo, or returns the original URI otherwise.
:param uri: The expanded uri
:return: The git_uri#sub_directory if the uri is part of a Git repo,
otherwise return the original uri
"""
if '#' in uri:
# Already a URI in git repo format
return uri
try:
from git import Repo, InvalidGitRepositoryError, GitCommandNotFound, NoSuchPathError
except ImportError as e:
print("Notice: failed to import Git (the git executable is probably not on your PATH),"
" so Git SHA is not available. Error: %s" % e, file=sys.stderr)
return uri
try:
# Check whether this is part of a git repo
repo = Repo(uri, search_parent_directories=True)
# Repo url
repo_url = "file://%s" % repo.working_tree_dir
# Sub directory
rlpath = uri.replace(repo.working_tree_dir, '')
if (rlpath == ''):
git_path = repo_url
elif (rlpath[0] == '/'):
git_path = repo_url + '#' + rlpath[1:]
else:
git_path = repo_url + '#' + rlpath
return git_path
except (InvalidGitRepositoryError, GitCommandNotFound, ValueError, NoSuchPathError):
return uri | 0.640861 | 0.077204 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from datasets.all_datasets_meta.datasets_meta import DatasetsMeta
from models.conv_util import ResConvOps, gather_second_d
from utils.tf_util import TfUtil
DEBUG = False
DEFAULT_DTYPE = tf.float32
CASTABLE_TYPES = (tf.float16,)
ALLOWED_TYPES = (DEFAULT_DTYPE,) + CASTABLE_TYPES
def ele_in_feature(features, ele, dset_shape_idx):
ds_idxs = dset_shape_idx['indices']
for g in ds_idxs:
if ele in ds_idxs[g]:
ele_idx = ds_idxs[g][ele]
ele_data = tf.gather(features[g], ele_idx, axis=-1)
return ele_data
raise ValueError, ele+' not found'
class Model(ResConvOps):
CNN = 'FAN'
def __init__(self, net_data_configs, data_format, dtype):
self.dset_shape_idx = net_data_configs['dset_shape_idx']
self.data_configs = net_data_configs['data_configs']
self.dset_metas = net_data_configs['dset_metas']
self.net_flag = net_data_configs['net_flag']
self.block_paras = BlockParas(net_data_configs['block_configs'])
super(Model, self).__init__(net_data_configs, data_format, dtype)
if self.CNN == 'TRIANGLE':
cnn_class = TriangleCnn
self.block_fn = self.inception_block_v2
elif self.CNN == 'FAN':
cnn_class = FanCnn
self.block_fn = self.fan_block_v2
self.mesh_cnn = cnn_class(self.blocks_layers, self.block_fn, self.block_paras)
def __call__(self, features, is_training):
'''
vertices: [B,N,C]
edges_per_vertex: [B,N,10*2]
'''
if self.CNN == 'TRIANGLE':
return self.main_triangle_cnn(features, is_training)
elif self.CNN == 'FAN':
return self.main_fan_cnn(features, is_training)
def main_test_pool(self, features, is_training=True):
self.is_training = is_training
inputs, xyz = self.parse_inputs(features)
vertices = inputs['vertices']
edgev_per_vertex = inputs['edgev_per_vertex']
valid_ev_num_pv = inputs['valid_ev_num_pv']
valid_ev_num_pv = inputs['valid_ev_num_pv']
vidx_per_face = inputs['vidx_per_face']
valid_num_face = tf.cast(inputs['valid_num_face'], tf.int32)
vertices = tf.expand_dims(vertices, 2)
for s in range(3):
vertices_, edgev_per_vertex_, xyz_, valid_ev_num_pv_, backprop_vidx, \
r2s_fail_mask = FanCnn.pool_mesh(s,
vertices, edgev_per_vertex, xyz, valid_ev_num_pv)
import pdb; pdb.set_trace() # XXX BREAKPOINT
vertices, edgev_per_vertex, xyz, valid_ev_num_pv =\
tf.expand_dims(vertices_,2), edgev_per_vertex_, xyz_, valid_ev_num_pv_
def main_fan_cnn(self, features, is_training):
self.is_training = is_training
inputs, xyz = self.parse_inputs(features)
vertices = inputs['vertices']
edgev_per_vertex = inputs['edgev_per_vertex']
valid_ev_num_pv = inputs['valid_ev_num_pv']
valid_ev_num_pv = inputs['valid_ev_num_pv']
vidx_per_face = inputs['vidx_per_face']
valid_num_face = tf.cast(inputs['valid_num_face'], tf.int32)
#***************************************************************************
# multi scale vertex feature encoder
scale_n = self.block_paras.scale_num
vertices_scales = []
backprop_vidx_scales = []
r2s_fail_mask_scales = []
for scale in range(scale_n):
with tf.variable_scope('FanCnn_S%d'%(scale)):
vertices = self.mesh_cnn.update_vertex(scale, is_training, vertices,
edgev_per_vertex, valid_ev_num_pv)
if scale < self.block_paras.scale_num-1:
vertices_scales.append(vertices)
with tf.variable_scope('MeshPool_S%d'%(scale)):
vertices, edgev_per_vertex, xyz, valid_ev_num_pv, backprop_vidx, \
r2s_fail_mask = FanCnn.pool_mesh(scale,
vertices, edgev_per_vertex, xyz, valid_ev_num_pv)
backprop_vidx_scales.append(backprop_vidx)
r2s_fail_mask_scales.append(r2s_fail_mask)
#***************************************************************************
with tf.variable_scope('CatGlobal'):
vertices = self.add_global(vertices)
#***************************************************************************
# multi scale feature back propogation
for i in range(scale_n-1):
scale = scale_n -2 - i
with tf.variable_scope('FanCnn_S%d'%(scale)):
vertices = self.feature_backprop(scale, vertices, vertices_scales[scale],
backprop_vidx_scales[scale], r2s_fail_mask_scales[scale])
flogits, flabel_weight = self.face_classifier(vertices, vidx_per_face, valid_num_face)
self.log_model_summary()
return flogits, flabel_weight
def feature_backprop(self, scale, cur_vertices, lasts_vertices, backprop_vidx, r2s_fail_mask):
cur_vertices = gather_second_d(cur_vertices, backprop_vidx)
r2s_fail_mask = tf.cast(tf.expand_dims(tf.expand_dims(r2s_fail_mask, -1),-1), tf.float32)
cur_vertices = cur_vertices * (1-r2s_fail_mask)
vertices = tf.concat([lasts_vertices, cur_vertices], -1)
blocks_params = self.block_paras.get_block_paras('backprop', scale)
vertices = self.blocks_layers(vertices, blocks_params, self.block_fn,
self.is_training, 'BackProp_%d'%(scale), with_initial_layer=False)
return vertices
def add_global(self, vertices):
blocks_params = self.block_paras.get_block_paras('global', 0)
if not blocks_params:
return vertices
#***************************************************************************
global_f = tf.reduce_max(vertices, 1, keepdims=True)
# encoder global feature
global_f = self.blocks_layers(global_f, blocks_params, self.block_fn,
self.is_training, 'Global',
with_initial_layer=False)
nv = TfUtil.get_tensor_shape(vertices)[1]
global_f = tf.tile(global_f, [1, nv, 1, 1])
vertices = tf.concat([vertices, global_f], -1)
self.log_tensor_p(vertices, '', 'cat global')
#***************************************************************************
# encoder fused vertex feature
blocks_params = self.block_paras.get_block_paras('global', 1)
if not blocks_params:
return vertices
vertices = self.blocks_layers(vertices, blocks_params, self.block_fn,
self.is_training, 'GlobalFusedV',
with_initial_layer=False)
return vertices
def face_classifier(self, vertices, vidx_per_face, valid_num_face):
dense_filters = self.block_paras.dense_filters + [self.dset_metas.num_classes]
vlogits = self.dense_block(vertices, dense_filters, self.is_training)
vlogits = tf.squeeze(vlogits, 2)
flogits = gather_second_d(vlogits, vidx_per_face)
flogits = tf.reduce_mean(flogits, 2)
fn = TfUtil.get_tensor_shape(vidx_per_face)[1]
valid_face_mask = tf.tile(tf.reshape(tf.range(fn), [1,fn]), [self.batch_size,1])
flabel_weight = tf.cast(tf.less(valid_face_mask, valid_num_face), tf.float32)
return flogits, flabel_weight
def main_triangle_cnn(self, features, is_training):
self.is_training = is_training
inputs = self.parse_inputs(features)
vertices = inputs['vertices']
fidx_per_vertex = inputs['fidx_per_vertex']
fidx_pv_empty_mask = inputs['fidx_pv_empty_mask']
vidx_per_face = inputs['vidx_per_face']
valid_num_face = inputs['valid_num_face']
vertices_scales = []
for scale in range(self.block_paras.scale_num):
with tf.variable_scope('S%d'%(scale)):
vertices = self.mesh_cnn.update_vertex(scale, is_training, vertices,
vidx_per_face, valid_num_face, fidx_per_vertex, fidx_pv_empty_mask)
vertices_scales.append(vertices)
#vertices = tf.concat(vertices_scales, -1)
simplicity_logits = self.simplicity_classifier(vertices)
simplicity_label = self.simplicity_label(features)
self.log_model_summary()
return simplicity_logits, simplicity_label
def simplicity_label(self, features):
min_same_norm_mask = 2
min_same_category_mask = 2
same_category_mask = self.get_ele(features, 'same_category_mask')
same_category_mask = tf.greater_equal(same_category_mask, min_same_category_mask)
same_normal_mask = self.get_ele(features, 'same_normal_mask')
same_normal_mask = tf.greater_equal(same_normal_mask, min_same_norm_mask)
simplicity_mask = tf.logical_and(same_normal_mask, same_category_mask)
simplicity_mask = tf.squeeze(simplicity_mask, -1)
simplicity_label = tf.cast(simplicity_mask, tf.int32)
return simplicity_label
def get_ele(self, features, ele):
return ele_in_feature(features, ele, self.dset_shape_idx)
def normalize_xyz(self, xyz):
norm_xyz_method = self.data_configs['normxyz']
if norm_xyz_method == 'mean0':
mean_xyz = tf.reduce_mean(xyz, 1, keepdims=True)
new_xyz = xyz - mean_xyz
elif norm_xyz_method == 'min0':
min_xyz = tf.reduce_min(xyz, 1, keepdims=True)
new_xyz = xyz - min_xyz
elif norm_xyz_method == 'max1':
min_xyz = tf.reduce_min(xyz, 1, keepdims=True)
new_xyz = xyz - min_xyz
new_xyz = new_xyz / 5.0
elif norm_xyz_method == 'raw':
new_xyz = xyz
pass
else:
raise NotImplementedError
return new_xyz
def parse_inputs(self, features):
inputs = {}
vertices = []
for e in self.data_configs['feed_data']:
ele = self.get_ele(features, e)
if e=='xyz':
xyz = ele = self.normalize_xyz(ele)
vertices.append(ele)
inputs['vertices'] = vertices = tf.concat(vertices, -1)
vshape = TfUtil.get_tensor_shape(vertices)
#self.batch_size = vshape[0]
self.num_vertex0 = vshape[1]
self.log_tensor_p(vertices, 'vertices', 'raw_input')
inputs['vidx_per_face'] = self.get_ele(features, 'vidx_per_face')
inputs['valid_num_face'] = features['valid_num_face']
if self.CNN == 'TRIANGLE':
inputs['fidx_per_vertex'] = self.get_ele(features, 'fidx_per_vertex')
inputs['fidx_pv_empty_mask'] = self.get_ele(features, 'fidx_pv_empty_mask')
elif self.CNN == 'FAN':
edgevnum = self.block_paras.edgevnum
inputs['edgev_per_vertex'] = self.get_ele(features, 'edgev_per_vertex')[:,:,0:edgevnum]
inputs['valid_ev_num_pv'] = self.get_ele(features, 'valid_ev_num_pv')
is_check_data = True
# check data
if is_check_data:
check_ev = tf.assert_greater( tf.reduce_min(inputs['edgev_per_vertex']), -1,
message='found neg edgev_per_vertex')
with tf.control_dependencies([check_ev]):
inputs['edgev_per_vertex'] = tf.identity(inputs['edgev_per_vertex'])
return inputs, xyz
def simplicity_classifier(self, vertices):
dense_filters = [32, 16, 2]
simplicity_logits = self.dense_block(vertices, dense_filters, self.is_training)
return simplicity_logits
class FanCnn():
def __init__(self, blocks_layers_fn=None, block_fn=None, block_paras=None,
):
self.block_fn = block_fn
self.blocks_layers = blocks_layers_fn
self.block_paras = block_paras
def update_vertex(self, scale, is_training, vertices,\
edgev_per_vertex, valid_ev_num_pv):
vertices = tf.expand_dims(vertices, 2)
blocks_params = self.block_paras.get_block_paras('vertex', scale)
vertices = self.blocks_layers(vertices, blocks_params, self.block_fn,
is_training, 'S%d'%( scale),
edgev_per_vertex=edgev_per_vertex)
return vertices
@staticmethod
def pool_mesh(scale, vertices, edgev_per_vertex, xyz, valid_ev_num_pv, pool_method='mean', pool_rate=0.5):
'''
max mean identity
'''
if pool_method == 'identity':
pass
else:
# (1) replace vertice features by max/mean group features
if pool_method == 'max':
pool_fn = tf.reduce_max
elif pool_method == 'mean':
pool_fn = tf.reduce_mean
vertices = gather_second_d(tf.squeeze(vertices,2), edgev_per_vertex)
vertices = tf.reduce_max(vertices, 2)
# (2) Downsampling vertices
vn = TfUtil.get_tensor_shape(vertices)[1]
new_vn = int(pool_rate * vn)
vertex_sp_indices = []
batch_size = TfUtil.get_tensor_shape(vertices)[0]
for bi in range(batch_size):
vertex_sp_indices.append( tf.expand_dims(tf.random_shuffle(tf.range(vn))[0:new_vn], 0))
vertex_sp_indices = tf.concat(vertex_sp_indices, 0)
vertex_sp_indices = tf.contrib.framework.sort(vertex_sp_indices, -1)
vertices_new = tf.squeeze(gather_second_d(vertices, tf.expand_dims(vertex_sp_indices,-1)), 2)
xyz_new = tf.squeeze(gather_second_d(xyz, tf.expand_dims(vertex_sp_indices,-1)), 2)
# (3) New edges
from datasets.tfrecord_util import MeshSampling
edgev_per_vertex_new_ls = []
valid_ev_num_pv_new_ls = []
backprop_vidx_ls = []
r2s_fail_mask_ls = []
for bi in range(batch_size):
raw_vidx_2_sp_vidx = MeshSampling.get_raw_vidx_2_sp_vidx(vertex_sp_indices[bi], vn)
edgev_per_vertex_new, valid_ev_num_pv_new, raw_edgev_spvidx = MeshSampling.rich_edges(
vertex_sp_indices[bi], edgev_per_vertex[bi],
xyz[bi], raw_vidx_2_sp_vidx, valid_ev_num_pv[bi],
max_fail_2unit_ev_rate = [2e-2*10, 3e-2*10, 3e-2*10][scale], scale=scale) # 5e-3
backprop_vidx, r2s_fail_mask = MeshSampling.get_raw2sp(edgev_per_vertex[bi],
raw_vidx_2_sp_vidx, valid_ev_num_pv[bi], raw_edgev_spvidx,
max_bp_fail_rate = [9e-4, 9e-4, 5e-3][scale], scale = scale)
edgev_per_vertex_new_ls.append(tf.expand_dims(edgev_per_vertex_new, 0))
valid_ev_num_pv_new_ls.append(tf.expand_dims(valid_ev_num_pv_new, 0))
backprop_vidx_ls.append(tf.expand_dims(backprop_vidx,0))
r2s_fail_mask_ls.append(tf.expand_dims(r2s_fail_mask,0))
edgev_per_vertex_new = tf.concat(edgev_per_vertex_new_ls, 0)
valid_ev_num_pv_new = tf.concat(valid_ev_num_pv_new_ls, 0)
backprop_vidx = tf.concat(backprop_vidx_ls, 0)
r2s_fail_mask = tf.concat(r2s_fail_mask_ls, 0)
return vertices_new, edgev_per_vertex_new, xyz_new, valid_ev_num_pv_new, backprop_vidx, r2s_fail_mask
class TriangleCnn():
def __init__(self, blocks_layers_fn=None, block_fn=None, block_paras=None,
):
self.block_fn = block_fn
self.blocks_layers = blocks_layers_fn
self.block_paras = block_paras
def update_vertex(self, scale, is_training, vertices,\
vidx_per_face, valid_num_face, fidx_per_vertex, fidx_pv_empty_mask):
'''
Inputs:
vertices: (nv, cv)
vidx_per_face: (nf, 3)
Middle:
face_centroid: (nf, 1, cc)
edges: (nf, 3, ce)
faces: (nf, cf)
Out:
vertices: (nv, cvo)
'''
self.scale = scale
self.is_training = is_training
face_centroid, edges = self.vertex_2_edge(vertices, vidx_per_face, valid_num_face)
edges = self.encoder(edges, 'edge')
face_centroid = self.encoder(face_centroid, 'centroid')
faces = self.edge_2_face(edges, face_centroid)
faces = self.encoder(faces, 'face')
vertices = self.face_2_vertex(faces, fidx_per_vertex, fidx_pv_empty_mask)
vertices = self.encoder(vertices, 'vertex')
return vertices
def vertex_2_edge(self, vertices, vidx_per_face, valid_num_face):
vertices_per_face = gather_second_d(vertices, vidx_per_face)
face_centroid = tf.reduce_mean(vertices_per_face, 2, keepdims=True) # (nf, 1, cv)
edges = vertices_per_face - face_centroid # (nf, 3, cv)
return face_centroid, edges
def encoder(self, inputs, represent):
if not self.block_fn:
return inputs
assert represent in ['edge', 'centroid', 'face', 'vertex']
blocks_params = self.block_paras.get_block_paras(represent, self.scale)
outputs = self.blocks_layers(inputs, blocks_params, self.block_fn,
self.is_training, '%s_s%d'%(represent, self.scale) )
return outputs
def edge_2_face(self, edges, face_centroid):
face_local = []
if 'max' in self.block_paras.e2fl_pool:
face_local.append( tf.reduce_max (edges, 2) )
if 'mean' in self.block_paras.e2fl_pool:
face_local.append( tf.reduce_mean (edges, 2) )
face_local = tf.concat(face_local, -1)
face_global = tf.squeeze(face_centroid, 2)
use_global = self.scale>0 or self.block_paras.use_face_global_scale0
if use_global:
faces = tf.concat([face_local, face_global], -1)
else:
faces = face_local
return faces
def face_2_vertex(self, faces, fidx_per_vertex, fidx_pv_empty_mask):
vertices_flat = gather_second_d(faces, fidx_per_vertex)
vertices = []
if 'max' in self.block_paras.f2v_pool:
vertices.append( tf.reduce_max(vertices_flat, 2) )
if 'mean' in self.block_paras.f2v_pool:
vertices.append( TfUtil.mask_reduce_mean(vertices_flat, 1-fidx_pv_empty_mask, 2) )
vertices = tf.concat(vertices, axis=-1) # (nv, 2cf)
return vertices
import numpy as np
class BlockParas():
def __init__(self, block_configs):
block_sizes = block_configs['block_sizes']
filters = block_configs['filters']
if 'kernels' in block_configs:
kernels = block_configs['kernels']
else:
kernels = None
if 'strides' in block_configs:
strides = block_configs['strides']
else:
strides = None
self.edgevnum = block_configs['edgevnum']
self.scale_num = len(filters['vertex'])
if hasattr(self, 'e2fl_pool'):
self.e2fl_pool = block_configs['e2fl_pool']
self.f2v_pool = block_configs['f2v_pool']
self.use_face_global_scale0 = block_configs['use_face_global_scale0']
self.dense_filters = block_configs['dense_filters']
#self.with_globals = [len(fs)>0 for fs in filters['global']]
all_paras = {}
for item in filters:
# Always 1 if not exist
block_size_is_1 = item not in block_sizes
kernel_is_1 = item not in kernels
stride_is_1 = item not in strides
if block_size_is_1:
block_sizes[item] = []
if kernel_is_1:
kernels[item] = []
if stride_is_1:
strides[item] = []
if block_size_is_1 or kernel_is_1:
scale_num = len(filters[item])
for s in range(scale_num):
if block_size_is_1:
block_sizes[item].append([1] * len(filters[item][s]))
if kernel_is_1:
kernels[item].append([1]*len(filters[item][s]))
if stride_is_1:
strides[item].append([1] * len(filters[item][s]))
all_paras[item] = BlockParas.complete_scales_paras(block_sizes[item],
filters[item], kernels[item], strides[item])
self.all_paras = all_paras
def get_block_paras(self, element, scale):
return self.all_paras[element][scale]
@staticmethod
def complete_scales_paras(block_size, filters, kernels, strides):
scale_num = len(block_size)
scales_blocks_paras = []
for s in range(scale_num):
if kernels:
kernels_s = kernels[s]
else:
kernels_s = None
blocks_paras = BlockParas.complete_paras_1scale(block_size[s], filters[s], kernels_s, strides[s])
scales_blocks_paras.append(blocks_paras)
return scales_blocks_paras
@staticmethod
def complete_paras_1scale(block_size, filters, kernels, strides):
if len(block_size) == 0:
return None
assert not isinstance(block_size[0], list)
block_size = np.array(block_size)
filters = np.array(filters)
block_num = block_size.shape[0]
blocks_paras = {}
blocks_paras['block_sizes'] = block_size
blocks_paras['filters'] = filters
blocks_paras['strides'] = strides
blocks_paras['kernels'], blocks_paras['pad_stride1'] = \
BlockParas.get_1_kernel_block_paras(block_num, kernels)
blocks_paras = BlockParas.split_block_paras(blocks_paras)
return blocks_paras
@staticmethod
def split_block_paras(block_paras):
'''
from one dictionary to one list
Orginal: one dictionary contianing list of len = block_num
Result: one list with len=block num, each element of the list is a dictionary for one block
'''
block_num = block_paras['block_sizes'].shape[0]
block_paras_splited = []
for s in range(block_num):
block_para_s = {}
for item in block_paras:
block_para_s[item] = block_paras[item][s]
block_paras_splited.append(block_para_s)
return block_paras_splited
@staticmethod
def get_1_kernel_block_paras(block_num, kernels_):
if kernels_:
kernels = [kernels_[i] for i in range(block_num)]
else:
kernels = [1 for i in range(block_num)]
paddings = ['v' for i in range(block_num)]
return kernels, paddings | models/meshnet_model.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from datasets.all_datasets_meta.datasets_meta import DatasetsMeta
from models.conv_util import ResConvOps, gather_second_d
from utils.tf_util import TfUtil
DEBUG = False
DEFAULT_DTYPE = tf.float32
CASTABLE_TYPES = (tf.float16,)
ALLOWED_TYPES = (DEFAULT_DTYPE,) + CASTABLE_TYPES
def ele_in_feature(features, ele, dset_shape_idx):
ds_idxs = dset_shape_idx['indices']
for g in ds_idxs:
if ele in ds_idxs[g]:
ele_idx = ds_idxs[g][ele]
ele_data = tf.gather(features[g], ele_idx, axis=-1)
return ele_data
raise ValueError, ele+' not found'
class Model(ResConvOps):
CNN = 'FAN'
def __init__(self, net_data_configs, data_format, dtype):
self.dset_shape_idx = net_data_configs['dset_shape_idx']
self.data_configs = net_data_configs['data_configs']
self.dset_metas = net_data_configs['dset_metas']
self.net_flag = net_data_configs['net_flag']
self.block_paras = BlockParas(net_data_configs['block_configs'])
super(Model, self).__init__(net_data_configs, data_format, dtype)
if self.CNN == 'TRIANGLE':
cnn_class = TriangleCnn
self.block_fn = self.inception_block_v2
elif self.CNN == 'FAN':
cnn_class = FanCnn
self.block_fn = self.fan_block_v2
self.mesh_cnn = cnn_class(self.blocks_layers, self.block_fn, self.block_paras)
def __call__(self, features, is_training):
'''
vertices: [B,N,C]
edges_per_vertex: [B,N,10*2]
'''
if self.CNN == 'TRIANGLE':
return self.main_triangle_cnn(features, is_training)
elif self.CNN == 'FAN':
return self.main_fan_cnn(features, is_training)
def main_test_pool(self, features, is_training=True):
self.is_training = is_training
inputs, xyz = self.parse_inputs(features)
vertices = inputs['vertices']
edgev_per_vertex = inputs['edgev_per_vertex']
valid_ev_num_pv = inputs['valid_ev_num_pv']
valid_ev_num_pv = inputs['valid_ev_num_pv']
vidx_per_face = inputs['vidx_per_face']
valid_num_face = tf.cast(inputs['valid_num_face'], tf.int32)
vertices = tf.expand_dims(vertices, 2)
for s in range(3):
vertices_, edgev_per_vertex_, xyz_, valid_ev_num_pv_, backprop_vidx, \
r2s_fail_mask = FanCnn.pool_mesh(s,
vertices, edgev_per_vertex, xyz, valid_ev_num_pv)
import pdb; pdb.set_trace() # XXX BREAKPOINT
vertices, edgev_per_vertex, xyz, valid_ev_num_pv =\
tf.expand_dims(vertices_,2), edgev_per_vertex_, xyz_, valid_ev_num_pv_
def main_fan_cnn(self, features, is_training):
self.is_training = is_training
inputs, xyz = self.parse_inputs(features)
vertices = inputs['vertices']
edgev_per_vertex = inputs['edgev_per_vertex']
valid_ev_num_pv = inputs['valid_ev_num_pv']
valid_ev_num_pv = inputs['valid_ev_num_pv']
vidx_per_face = inputs['vidx_per_face']
valid_num_face = tf.cast(inputs['valid_num_face'], tf.int32)
#***************************************************************************
# multi scale vertex feature encoder
scale_n = self.block_paras.scale_num
vertices_scales = []
backprop_vidx_scales = []
r2s_fail_mask_scales = []
for scale in range(scale_n):
with tf.variable_scope('FanCnn_S%d'%(scale)):
vertices = self.mesh_cnn.update_vertex(scale, is_training, vertices,
edgev_per_vertex, valid_ev_num_pv)
if scale < self.block_paras.scale_num-1:
vertices_scales.append(vertices)
with tf.variable_scope('MeshPool_S%d'%(scale)):
vertices, edgev_per_vertex, xyz, valid_ev_num_pv, backprop_vidx, \
r2s_fail_mask = FanCnn.pool_mesh(scale,
vertices, edgev_per_vertex, xyz, valid_ev_num_pv)
backprop_vidx_scales.append(backprop_vidx)
r2s_fail_mask_scales.append(r2s_fail_mask)
#***************************************************************************
with tf.variable_scope('CatGlobal'):
vertices = self.add_global(vertices)
#***************************************************************************
# multi scale feature back propogation
for i in range(scale_n-1):
scale = scale_n -2 - i
with tf.variable_scope('FanCnn_S%d'%(scale)):
vertices = self.feature_backprop(scale, vertices, vertices_scales[scale],
backprop_vidx_scales[scale], r2s_fail_mask_scales[scale])
flogits, flabel_weight = self.face_classifier(vertices, vidx_per_face, valid_num_face)
self.log_model_summary()
return flogits, flabel_weight
def feature_backprop(self, scale, cur_vertices, lasts_vertices, backprop_vidx, r2s_fail_mask):
cur_vertices = gather_second_d(cur_vertices, backprop_vidx)
r2s_fail_mask = tf.cast(tf.expand_dims(tf.expand_dims(r2s_fail_mask, -1),-1), tf.float32)
cur_vertices = cur_vertices * (1-r2s_fail_mask)
vertices = tf.concat([lasts_vertices, cur_vertices], -1)
blocks_params = self.block_paras.get_block_paras('backprop', scale)
vertices = self.blocks_layers(vertices, blocks_params, self.block_fn,
self.is_training, 'BackProp_%d'%(scale), with_initial_layer=False)
return vertices
def add_global(self, vertices):
blocks_params = self.block_paras.get_block_paras('global', 0)
if not blocks_params:
return vertices
#***************************************************************************
global_f = tf.reduce_max(vertices, 1, keepdims=True)
# encoder global feature
global_f = self.blocks_layers(global_f, blocks_params, self.block_fn,
self.is_training, 'Global',
with_initial_layer=False)
nv = TfUtil.get_tensor_shape(vertices)[1]
global_f = tf.tile(global_f, [1, nv, 1, 1])
vertices = tf.concat([vertices, global_f], -1)
self.log_tensor_p(vertices, '', 'cat global')
#***************************************************************************
# encoder fused vertex feature
blocks_params = self.block_paras.get_block_paras('global', 1)
if not blocks_params:
return vertices
vertices = self.blocks_layers(vertices, blocks_params, self.block_fn,
self.is_training, 'GlobalFusedV',
with_initial_layer=False)
return vertices
def face_classifier(self, vertices, vidx_per_face, valid_num_face):
dense_filters = self.block_paras.dense_filters + [self.dset_metas.num_classes]
vlogits = self.dense_block(vertices, dense_filters, self.is_training)
vlogits = tf.squeeze(vlogits, 2)
flogits = gather_second_d(vlogits, vidx_per_face)
flogits = tf.reduce_mean(flogits, 2)
fn = TfUtil.get_tensor_shape(vidx_per_face)[1]
valid_face_mask = tf.tile(tf.reshape(tf.range(fn), [1,fn]), [self.batch_size,1])
flabel_weight = tf.cast(tf.less(valid_face_mask, valid_num_face), tf.float32)
return flogits, flabel_weight
def main_triangle_cnn(self, features, is_training):
self.is_training = is_training
inputs = self.parse_inputs(features)
vertices = inputs['vertices']
fidx_per_vertex = inputs['fidx_per_vertex']
fidx_pv_empty_mask = inputs['fidx_pv_empty_mask']
vidx_per_face = inputs['vidx_per_face']
valid_num_face = inputs['valid_num_face']
vertices_scales = []
for scale in range(self.block_paras.scale_num):
with tf.variable_scope('S%d'%(scale)):
vertices = self.mesh_cnn.update_vertex(scale, is_training, vertices,
vidx_per_face, valid_num_face, fidx_per_vertex, fidx_pv_empty_mask)
vertices_scales.append(vertices)
#vertices = tf.concat(vertices_scales, -1)
simplicity_logits = self.simplicity_classifier(vertices)
simplicity_label = self.simplicity_label(features)
self.log_model_summary()
return simplicity_logits, simplicity_label
def simplicity_label(self, features):
min_same_norm_mask = 2
min_same_category_mask = 2
same_category_mask = self.get_ele(features, 'same_category_mask')
same_category_mask = tf.greater_equal(same_category_mask, min_same_category_mask)
same_normal_mask = self.get_ele(features, 'same_normal_mask')
same_normal_mask = tf.greater_equal(same_normal_mask, min_same_norm_mask)
simplicity_mask = tf.logical_and(same_normal_mask, same_category_mask)
simplicity_mask = tf.squeeze(simplicity_mask, -1)
simplicity_label = tf.cast(simplicity_mask, tf.int32)
return simplicity_label
def get_ele(self, features, ele):
return ele_in_feature(features, ele, self.dset_shape_idx)
def normalize_xyz(self, xyz):
norm_xyz_method = self.data_configs['normxyz']
if norm_xyz_method == 'mean0':
mean_xyz = tf.reduce_mean(xyz, 1, keepdims=True)
new_xyz = xyz - mean_xyz
elif norm_xyz_method == 'min0':
min_xyz = tf.reduce_min(xyz, 1, keepdims=True)
new_xyz = xyz - min_xyz
elif norm_xyz_method == 'max1':
min_xyz = tf.reduce_min(xyz, 1, keepdims=True)
new_xyz = xyz - min_xyz
new_xyz = new_xyz / 5.0
elif norm_xyz_method == 'raw':
new_xyz = xyz
pass
else:
raise NotImplementedError
return new_xyz
def parse_inputs(self, features):
inputs = {}
vertices = []
for e in self.data_configs['feed_data']:
ele = self.get_ele(features, e)
if e=='xyz':
xyz = ele = self.normalize_xyz(ele)
vertices.append(ele)
inputs['vertices'] = vertices = tf.concat(vertices, -1)
vshape = TfUtil.get_tensor_shape(vertices)
#self.batch_size = vshape[0]
self.num_vertex0 = vshape[1]
self.log_tensor_p(vertices, 'vertices', 'raw_input')
inputs['vidx_per_face'] = self.get_ele(features, 'vidx_per_face')
inputs['valid_num_face'] = features['valid_num_face']
if self.CNN == 'TRIANGLE':
inputs['fidx_per_vertex'] = self.get_ele(features, 'fidx_per_vertex')
inputs['fidx_pv_empty_mask'] = self.get_ele(features, 'fidx_pv_empty_mask')
elif self.CNN == 'FAN':
edgevnum = self.block_paras.edgevnum
inputs['edgev_per_vertex'] = self.get_ele(features, 'edgev_per_vertex')[:,:,0:edgevnum]
inputs['valid_ev_num_pv'] = self.get_ele(features, 'valid_ev_num_pv')
is_check_data = True
# check data
if is_check_data:
check_ev = tf.assert_greater( tf.reduce_min(inputs['edgev_per_vertex']), -1,
message='found neg edgev_per_vertex')
with tf.control_dependencies([check_ev]):
inputs['edgev_per_vertex'] = tf.identity(inputs['edgev_per_vertex'])
return inputs, xyz
def simplicity_classifier(self, vertices):
dense_filters = [32, 16, 2]
simplicity_logits = self.dense_block(vertices, dense_filters, self.is_training)
return simplicity_logits
class FanCnn():
def __init__(self, blocks_layers_fn=None, block_fn=None, block_paras=None,
):
self.block_fn = block_fn
self.blocks_layers = blocks_layers_fn
self.block_paras = block_paras
def update_vertex(self, scale, is_training, vertices,\
edgev_per_vertex, valid_ev_num_pv):
vertices = tf.expand_dims(vertices, 2)
blocks_params = self.block_paras.get_block_paras('vertex', scale)
vertices = self.blocks_layers(vertices, blocks_params, self.block_fn,
is_training, 'S%d'%( scale),
edgev_per_vertex=edgev_per_vertex)
return vertices
@staticmethod
def pool_mesh(scale, vertices, edgev_per_vertex, xyz, valid_ev_num_pv, pool_method='mean', pool_rate=0.5):
'''
max mean identity
'''
if pool_method == 'identity':
pass
else:
# (1) replace vertice features by max/mean group features
if pool_method == 'max':
pool_fn = tf.reduce_max
elif pool_method == 'mean':
pool_fn = tf.reduce_mean
vertices = gather_second_d(tf.squeeze(vertices,2), edgev_per_vertex)
vertices = tf.reduce_max(vertices, 2)
# (2) Downsampling vertices
vn = TfUtil.get_tensor_shape(vertices)[1]
new_vn = int(pool_rate * vn)
vertex_sp_indices = []
batch_size = TfUtil.get_tensor_shape(vertices)[0]
for bi in range(batch_size):
vertex_sp_indices.append( tf.expand_dims(tf.random_shuffle(tf.range(vn))[0:new_vn], 0))
vertex_sp_indices = tf.concat(vertex_sp_indices, 0)
vertex_sp_indices = tf.contrib.framework.sort(vertex_sp_indices, -1)
vertices_new = tf.squeeze(gather_second_d(vertices, tf.expand_dims(vertex_sp_indices,-1)), 2)
xyz_new = tf.squeeze(gather_second_d(xyz, tf.expand_dims(vertex_sp_indices,-1)), 2)
# (3) New edges
from datasets.tfrecord_util import MeshSampling
edgev_per_vertex_new_ls = []
valid_ev_num_pv_new_ls = []
backprop_vidx_ls = []
r2s_fail_mask_ls = []
for bi in range(batch_size):
raw_vidx_2_sp_vidx = MeshSampling.get_raw_vidx_2_sp_vidx(vertex_sp_indices[bi], vn)
edgev_per_vertex_new, valid_ev_num_pv_new, raw_edgev_spvidx = MeshSampling.rich_edges(
vertex_sp_indices[bi], edgev_per_vertex[bi],
xyz[bi], raw_vidx_2_sp_vidx, valid_ev_num_pv[bi],
max_fail_2unit_ev_rate = [2e-2*10, 3e-2*10, 3e-2*10][scale], scale=scale) # 5e-3
backprop_vidx, r2s_fail_mask = MeshSampling.get_raw2sp(edgev_per_vertex[bi],
raw_vidx_2_sp_vidx, valid_ev_num_pv[bi], raw_edgev_spvidx,
max_bp_fail_rate = [9e-4, 9e-4, 5e-3][scale], scale = scale)
edgev_per_vertex_new_ls.append(tf.expand_dims(edgev_per_vertex_new, 0))
valid_ev_num_pv_new_ls.append(tf.expand_dims(valid_ev_num_pv_new, 0))
backprop_vidx_ls.append(tf.expand_dims(backprop_vidx,0))
r2s_fail_mask_ls.append(tf.expand_dims(r2s_fail_mask,0))
edgev_per_vertex_new = tf.concat(edgev_per_vertex_new_ls, 0)
valid_ev_num_pv_new = tf.concat(valid_ev_num_pv_new_ls, 0)
backprop_vidx = tf.concat(backprop_vidx_ls, 0)
r2s_fail_mask = tf.concat(r2s_fail_mask_ls, 0)
return vertices_new, edgev_per_vertex_new, xyz_new, valid_ev_num_pv_new, backprop_vidx, r2s_fail_mask
class TriangleCnn():
def __init__(self, blocks_layers_fn=None, block_fn=None, block_paras=None,
):
self.block_fn = block_fn
self.blocks_layers = blocks_layers_fn
self.block_paras = block_paras
def update_vertex(self, scale, is_training, vertices,\
vidx_per_face, valid_num_face, fidx_per_vertex, fidx_pv_empty_mask):
'''
Inputs:
vertices: (nv, cv)
vidx_per_face: (nf, 3)
Middle:
face_centroid: (nf, 1, cc)
edges: (nf, 3, ce)
faces: (nf, cf)
Out:
vertices: (nv, cvo)
'''
self.scale = scale
self.is_training = is_training
face_centroid, edges = self.vertex_2_edge(vertices, vidx_per_face, valid_num_face)
edges = self.encoder(edges, 'edge')
face_centroid = self.encoder(face_centroid, 'centroid')
faces = self.edge_2_face(edges, face_centroid)
faces = self.encoder(faces, 'face')
vertices = self.face_2_vertex(faces, fidx_per_vertex, fidx_pv_empty_mask)
vertices = self.encoder(vertices, 'vertex')
return vertices
def vertex_2_edge(self, vertices, vidx_per_face, valid_num_face):
vertices_per_face = gather_second_d(vertices, vidx_per_face)
face_centroid = tf.reduce_mean(vertices_per_face, 2, keepdims=True) # (nf, 1, cv)
edges = vertices_per_face - face_centroid # (nf, 3, cv)
return face_centroid, edges
def encoder(self, inputs, represent):
if not self.block_fn:
return inputs
assert represent in ['edge', 'centroid', 'face', 'vertex']
blocks_params = self.block_paras.get_block_paras(represent, self.scale)
outputs = self.blocks_layers(inputs, blocks_params, self.block_fn,
self.is_training, '%s_s%d'%(represent, self.scale) )
return outputs
def edge_2_face(self, edges, face_centroid):
face_local = []
if 'max' in self.block_paras.e2fl_pool:
face_local.append( tf.reduce_max (edges, 2) )
if 'mean' in self.block_paras.e2fl_pool:
face_local.append( tf.reduce_mean (edges, 2) )
face_local = tf.concat(face_local, -1)
face_global = tf.squeeze(face_centroid, 2)
use_global = self.scale>0 or self.block_paras.use_face_global_scale0
if use_global:
faces = tf.concat([face_local, face_global], -1)
else:
faces = face_local
return faces
def face_2_vertex(self, faces, fidx_per_vertex, fidx_pv_empty_mask):
vertices_flat = gather_second_d(faces, fidx_per_vertex)
vertices = []
if 'max' in self.block_paras.f2v_pool:
vertices.append( tf.reduce_max(vertices_flat, 2) )
if 'mean' in self.block_paras.f2v_pool:
vertices.append( TfUtil.mask_reduce_mean(vertices_flat, 1-fidx_pv_empty_mask, 2) )
vertices = tf.concat(vertices, axis=-1) # (nv, 2cf)
return vertices
import numpy as np
class BlockParas():
def __init__(self, block_configs):
block_sizes = block_configs['block_sizes']
filters = block_configs['filters']
if 'kernels' in block_configs:
kernels = block_configs['kernels']
else:
kernels = None
if 'strides' in block_configs:
strides = block_configs['strides']
else:
strides = None
self.edgevnum = block_configs['edgevnum']
self.scale_num = len(filters['vertex'])
if hasattr(self, 'e2fl_pool'):
self.e2fl_pool = block_configs['e2fl_pool']
self.f2v_pool = block_configs['f2v_pool']
self.use_face_global_scale0 = block_configs['use_face_global_scale0']
self.dense_filters = block_configs['dense_filters']
#self.with_globals = [len(fs)>0 for fs in filters['global']]
all_paras = {}
for item in filters:
# Always 1 if not exist
block_size_is_1 = item not in block_sizes
kernel_is_1 = item not in kernels
stride_is_1 = item not in strides
if block_size_is_1:
block_sizes[item] = []
if kernel_is_1:
kernels[item] = []
if stride_is_1:
strides[item] = []
if block_size_is_1 or kernel_is_1:
scale_num = len(filters[item])
for s in range(scale_num):
if block_size_is_1:
block_sizes[item].append([1] * len(filters[item][s]))
if kernel_is_1:
kernels[item].append([1]*len(filters[item][s]))
if stride_is_1:
strides[item].append([1] * len(filters[item][s]))
all_paras[item] = BlockParas.complete_scales_paras(block_sizes[item],
filters[item], kernels[item], strides[item])
self.all_paras = all_paras
def get_block_paras(self, element, scale):
return self.all_paras[element][scale]
@staticmethod
def complete_scales_paras(block_size, filters, kernels, strides):
scale_num = len(block_size)
scales_blocks_paras = []
for s in range(scale_num):
if kernels:
kernels_s = kernels[s]
else:
kernels_s = None
blocks_paras = BlockParas.complete_paras_1scale(block_size[s], filters[s], kernels_s, strides[s])
scales_blocks_paras.append(blocks_paras)
return scales_blocks_paras
@staticmethod
def complete_paras_1scale(block_size, filters, kernels, strides):
if len(block_size) == 0:
return None
assert not isinstance(block_size[0], list)
block_size = np.array(block_size)
filters = np.array(filters)
block_num = block_size.shape[0]
blocks_paras = {}
blocks_paras['block_sizes'] = block_size
blocks_paras['filters'] = filters
blocks_paras['strides'] = strides
blocks_paras['kernels'], blocks_paras['pad_stride1'] = \
BlockParas.get_1_kernel_block_paras(block_num, kernels)
blocks_paras = BlockParas.split_block_paras(blocks_paras)
return blocks_paras
@staticmethod
def split_block_paras(block_paras):
'''
from one dictionary to one list
Orginal: one dictionary contianing list of len = block_num
Result: one list with len=block num, each element of the list is a dictionary for one block
'''
block_num = block_paras['block_sizes'].shape[0]
block_paras_splited = []
for s in range(block_num):
block_para_s = {}
for item in block_paras:
block_para_s[item] = block_paras[item][s]
block_paras_splited.append(block_para_s)
return block_paras_splited
@staticmethod
def get_1_kernel_block_paras(block_num, kernels_):
if kernels_:
kernels = [kernels_[i] for i in range(block_num)]
else:
kernels = [1 for i in range(block_num)]
paddings = ['v' for i in range(block_num)]
return kernels, paddings | 0.631822 | 0.157202 |
from typing import Tuple
from rio_tiler.errors import RioTilerError
class InvalidModlandGridID(RioTilerError):
"""Invalid MODLAND grid id."""
# Only non-fill tiles (460)
# format:
# horizontal_grid, vertical_grid, bbox(xmin, ymin, xmax, ymax)
MODLAND_GRID = [
("14", "00", (-180.0, 80.0, -172.7151, 80.4083)),
("15", "00", (-180.0, 80.0, -115.1274, 83.625)),
("16", "00", (-180.0, 80.0, -57.5397, 86.8167)),
("17", "00", (-180.0, 80.0, 57.2957, 90.0)),
("18", "00", (-0.004, 80.0, 180.0, 90.0)),
("19", "00", (57.5877, 80.0, 180.0, 86.8167)),
("20", "00", (115.1754, 80.0, 180.0, 83.625)),
("21", "00", (172.7631, 80.0, 180.0, 80.4083)),
("11", "01", (-180.0, 70.0, -175.4039, 70.5333)),
("12", "01", (-180.0, 70.0, -146.1659, 73.875)),
("13", "01", (-180.0, 70.0, -116.9278, 77.1667)),
("14", "01", (-180.0, 70.0, -87.6898, 80.0)),
("15", "01", (-172.7631, 70.0, -58.4517, 80.0)),
("16", "01", (-115.1754, 70.0, -29.2137, 80.0)),
("17", "01", (-57.5877, 70.0, 0.048, 80.0)),
("18", "01", (0.0, 70.0, 57.6357, 80.0)),
("19", "01", (29.238, 70.0, 115.2234, 80.0)),
("20", "01", (58.4761, 70.0, 172.8111, 80.0)),
("21", "01", (87.7141, 70.0, 180.0, 80.0)),
("22", "01", (116.9522, 70.0, 180.0, 77.1583)),
("23", "01", (146.1902, 70.0, 180.0, 73.875)),
("24", "01", (175.4283, 70.0, 180.0, 70.5333)),
("09", "02", (-180.0, 60.0, -159.9833, 63.6167)),
("10", "02", (-180.0, 60.0, -139.9833, 67.1167)),
("11", "02", (-180.0, 60.0, -119.9833, 70.0)),
("12", "02", (-175.4283, 60.0, -99.9833, 70.0)),
("13", "02", (-146.1902, 60.0, -79.9833, 70.0)),
("14", "02", (-116.9522, 60.0, -59.9833, 70.0)),
("15", "02", (-87.7141, 60.0, -39.9833, 70.0)),
("16", "02", (-58.4761, 60.0, -19.9833, 70.0)),
("17", "02", (-29.238, 60.0, 0.0244, 70.0)),
("18", "02", (0.0, 60.0, 29.2624, 70.0)),
("19", "02", (20.0, 60.0, 58.5005, 70.0)),
("20", "02", (40.0, 60.0, 87.7385, 70.0)),
("21", "02", (60.0, 60.0, 116.9765, 70.0)),
("22", "02", (80.0, 60.0, 146.2146, 70.0)),
("23", "02", (100.0, 60.0, 175.4526, 70.0)),
("24", "02", (120.0, 60.0, 180.0, 70.0)),
("25", "02", (140.0, 60.0, 180.0, 67.1167)),
("26", "02", (160.0, 60.0, 180.0, 63.6167)),
("06", "03", (-180.0, 50.0, -171.1167, 52.3333)),
("07", "03", (-180.0, 50.0, -155.5594, 56.2583)),
("08", "03", (-180.0, 50.0, -140.0022, 60.0)),
("09", "03", (-180.0, 50.0, -124.4449, 60.0)),
("10", "03", (-160.0, 50.0, -108.8877, 60.0)),
("11", "03", (-140.0, 50.0, -93.3305, 60.0)),
("12", "03", (-120.0, 50.0, -77.7732, 60.0)),
("13", "03", (-100.0, 50.0, -62.216, 60.0)),
("14", "03", (-80.0, 50.0, -46.6588, 60.0)),
("15", "03", (-60.0, 50.0, -31.1015, 60.0)),
("16", "03", (-40.0, 50.0, -15.5443, 60.0)),
("17", "03", (-20.0, 50.0, 0.0167, 60.0)),
("18", "03", (0.0, 50.0, 20.0167, 60.0)),
("19", "03", (15.5572, 50.0, 40.0167, 60.0)),
("20", "03", (31.1145, 50.0, 60.0167, 60.0)),
("21", "03", (46.6717, 50.0, 80.0167, 60.0)),
("22", "03", (62.229, 50.0, 100.0167, 60.0)),
("23", "03", (77.7862, 50.0, 120.0167, 60.0)),
("24", "03", (93.3434, 50.0, 140.0167, 60.0)),
("25", "03", (108.9007, 50.0, 160.0167, 60.0)),
("26", "03", (124.4579, 50.0, 180.0, 60.0)),
("27", "03", (140.0151, 50.0, 180.0, 60.0)),
("28", "03", (155.5724, 50.0, 180.0, 56.25)),
("29", "03", (171.1296, 50.0, 180.0, 52.3333)),
("04", "04", (-180.0, 40.0, -169.6921, 43.7667)),
("05", "04", (-180.0, 40.0, -156.638, 48.1917)),
("06", "04", (-180.0, 40.0, -143.5839, 50.0)),
("07", "04", (-171.1296, 40.0, -130.5299, 50.0)),
("08", "04", (-155.5724, 40.0, -117.4758, 50.0)),
("09", "04", (-140.0151, 40.0, -104.4217, 50.0)),
("10", "04", (-124.4579, 40.0, -91.3676, 50.0)),
("11", "04", (-108.9007, 40.0, -78.3136, 50.0)),
("12", "04", (-93.3434, 40.0, -65.2595, 50.0)),
("13", "04", (-77.7862, 40.0, -52.2054, 50.0)),
("14", "04", (-62.229, 40.0, -39.1513, 50.0)),
("15", "04", (-46.6717, 40.0, -26.0973, 50.0)),
("16", "04", (-31.1145, 40.0, -13.0432, 50.0)),
("17", "04", (-15.5572, 40.0, 0.013, 50.0)),
("18", "04", (0.0, 40.0, 15.5702, 50.0)),
("19", "04", (13.0541, 40.0, 31.1274, 50.0)),
("20", "04", (26.1081, 40.0, 46.6847, 50.0)),
("21", "04", (39.1622, 40.0, 62.2419, 50.0)),
("22", "04", (52.2163, 40.0, 77.7992, 50.0)),
("23", "04", (65.2704, 40.0, 93.3564, 50.0)),
("24", "04", (78.3244, 40.0, 108.9136, 50.0)),
("25", "04", (91.3785, 40.0, 124.4709, 50.0)),
("26", "04", (104.4326, 40.0, 140.0281, 50.0)),
("27", "04", (117.4867, 40.0, 155.5853, 50.0)),
("28", "04", (130.5407, 40.0, 171.1426, 50.0)),
("29", "04", (143.5948, 40.0, 180.0, 50.0)),
("30", "04", (156.6489, 40.0, 180.0, 48.1917)),
("31", "04", (169.7029, 40.0, 180.0, 43.7583)),
("02", "05", (-180.0, 30.0, -173.1955, 33.5583)),
("03", "05", (-180.0, 30.0, -161.6485, 38.95)),
("04", "05", (-180.0, 30.0, -150.1014, 40.0)),
("05", "05", (-169.7029, 30.0, -138.5544, 40.0)),
("06", "05", (-156.6489, 30.0, -127.0074, 40.0)),
("07", "05", (-143.5948, 30.0, -115.4604, 40.0)),
("08", "05", (-130.5407, 30.0, -103.9134, 40.0)),
("09", "05", (-117.4867, 30.0, -92.3664, 40.0)),
("10", "05", (-104.4326, 30.0, -80.8194, 40.0)),
("11", "05", (-91.3785, 30.0, -69.2724, 40.0)),
("12", "05", (-78.3244, 30.0, -57.7254, 40.0)),
("13", "05", (-65.2704, 30.0, -46.1784, 40.0)),
("14", "05", (-52.2163, 30.0, -34.6314, 40.0)),
("15", "05", (-39.1622, 30.0, -23.0844, 40.0)),
("16", "05", (-26.1081, 30.0, -11.5374, 40.0)),
("17", "05", (-13.0541, 30.0, 0.0109, 40.0)),
("18", "05", (0.0, 30.0, 13.065, 40.0)),
("19", "05", (11.547, 30.0, 26.119, 40.0)),
("20", "05", (23.094, 30.0, 39.1731, 40.0)),
("21", "05", (34.641, 30.0, 52.2272, 40.0)),
("22", "05", (46.188, 30.0, 65.2812, 40.0)),
("23", "05", (57.735, 30.0, 78.3353, 40.0)),
("24", "05", (69.282, 30.0, 91.3894, 40.0)),
("25", "05", (80.829, 30.0, 104.4435, 40.0)),
("26", "05", (92.376, 30.0, 117.4975, 40.0)),
("27", "05", (103.923, 30.0, 130.5516, 40.0)),
("28", "05", (115.4701, 30.0, 143.6057, 40.0)),
("29", "05", (127.0171, 30.0, 156.6598, 40.0)),
("30", "05", (138.5641, 30.0, 169.7138, 40.0)),
("31", "05", (150.1111, 30.0, 180.0, 40.0)),
("32", "05", (161.6581, 30.0, 180.0, 38.9417)),
("33", "05", (173.2051, 30.0, 180.0, 33.5583)),
("01", "06", (-180.0, 20.0, -170.2596, 27.2667)),
("02", "06", (-180.0, 20.0, -159.6178, 30.0)),
("03", "06", (-173.2051, 20.0, -148.976, 30.0)),
("04", "06", (-161.6581, 20.0, -138.3342, 30.0)),
("05", "06", (-150.1111, 20.0, -127.6925, 30.0)),
("06", "06", (-138.5641, 20.0, -117.0507, 30.0)),
("07", "06", (-127.0171, 20.0, -106.4089, 30.0)),
("08", "06", (-115.4701, 20.0, -95.7671, 30.0)),
("09", "06", (-103.923, 20.0, -85.1254, 30.0)),
("10", "06", (-92.376, 20.0, -74.4836, 30.0)),
("11", "06", (-80.829, 20.0, -63.8418, 30.0)),
("12", "06", (-69.282, 20.0, -53.2, 30.0)),
("13", "06", (-57.735, 20.0, -42.5582, 30.0)),
("14", "06", (-46.188, 20.0, -31.9165, 30.0)),
("15", "06", (-34.641, 20.0, -21.2747, 30.0)),
("16", "06", (-23.094, 20.0, -10.6329, 30.0)),
("17", "06", (-11.547, 20.0, 0.0096, 30.0)),
("18", "06", (0.0, 20.0, 11.5566, 30.0)),
("19", "06", (10.6418, 20.0, 23.1036, 30.0)),
("20", "06", (21.2836, 20.0, 34.6506, 30.0)),
("21", "06", (31.9253, 20.0, 46.1976, 30.0)),
("22", "06", (42.5671, 20.0, 57.7446, 30.0)),
("23", "06", (53.2089, 20.0, 69.2917, 30.0)),
("24", "06", (63.8507, 20.0, 80.8387, 30.0)),
("25", "06", (74.4924, 20.0, 92.3857, 30.0)),
("26", "06", (85.1342, 20.0, 103.9327, 30.0)),
("27", "06", (95.776, 20.0, 115.4797, 30.0)),
("28", "06", (106.4178, 20.0, 127.0267, 30.0)),
("29", "06", (117.0596, 20.0, 138.5737, 30.0)),
("30", "06", (127.7013, 20.0, 150.1207, 30.0)),
("31", "06", (138.3431, 20.0, 161.6677, 30.0)),
("32", "06", (148.9849, 20.0, 173.2147, 30.0)),
("33", "06", (159.6267, 20.0, 180.0, 30.0)),
("34", "06", (170.2684, 20.0, 180.0, 27.2667)),
("00", "07", (-180.0, 10.0, -172.6141, 19.1917)),
("01", "07", (-180.0, 10.0, -162.4598, 20.0)),
("02", "07", (-170.2684, 10.0, -152.3055, 20.0)),
("03", "07", (-159.6267, 10.0, -142.1513, 20.0)),
("04", "07", (-148.9849, 10.0, -131.997, 20.0)),
("05", "07", (-138.3431, 10.0, -121.8427, 20.0)),
("06", "07", (-127.7013, 10.0, -111.6885, 20.0)),
("07", "07", (-117.0596, 10.0, -101.5342, 20.0)),
("08", "07", (-106.4178, 10.0, -91.3799, 20.0)),
("09", "07", (-95.776, 10.0, -81.2257, 20.0)),
("10", "07", (-85.1342, 10.0, -71.0714, 20.0)),
("11", "07", (-74.4924, 10.0, -60.9171, 20.0)),
("12", "07", (-63.8507, 10.0, -50.7629, 20.0)),
("13", "07", (-53.2089, 10.0, -40.6086, 20.0)),
("14", "07", (-42.5671, 10.0, -30.4543, 20.0)),
("15", "07", (-31.9253, 10.0, -20.3001, 20.0)),
("16", "07", (-21.2836, 10.0, -10.1458, 20.0)),
("17", "07", (-10.6418, 10.0, 0.0089, 20.0)),
("18", "07", (0.0, 10.0, 10.6506, 20.0)),
("19", "07", (10.1543, 10.0, 21.2924, 20.0)),
("20", "07", (20.3085, 10.0, 31.9342, 20.0)),
("21", "07", (30.4628, 10.0, 42.576, 20.0)),
("22", "07", (40.6171, 10.0, 53.2178, 20.0)),
("23", "07", (50.7713, 10.0, 63.8595, 20.0)),
("24", "07", (60.9256, 10.0, 74.5013, 20.0)),
("25", "07", (71.0799, 10.0, 85.1431, 20.0)),
("26", "07", (81.2341, 10.0, 95.7849, 20.0)),
("27", "07", (91.3884, 10.0, 106.4266, 20.0)),
("28", "07", (101.5427, 10.0, 117.0684, 20.0)),
("29", "07", (111.6969, 10.0, 127.7102, 20.0)),
("30", "07", (121.8512, 10.0, 138.352, 20.0)),
("31", "07", (132.0055, 10.0, 148.9938, 20.0)),
("32", "07", (142.1597, 10.0, 159.6355, 20.0)),
("33", "07", (152.314, 10.0, 170.2773, 20.0)),
("34", "07", (162.4683, 10.0, 180.0, 20.0)),
("35", "07", (172.6225, 10.0, 180.0, 19.1833)),
("00", "08", (-180.0, -0.0, -169.9917, 10.0)),
("01", "08", (-172.6225, -0.0, -159.9917, 10.0)),
("02", "08", (-162.4683, -0.0, -149.9917, 10.0)),
("03", "08", (-152.314, -0.0, -139.9917, 10.0)),
("04", "08", (-142.1597, -0.0, -129.9917, 10.0)),
("05", "08", (-132.0055, -0.0, -119.9917, 10.0)),
("06", "08", (-121.8512, -0.0, -109.9917, 10.0)),
("07", "08", (-111.6969, -0.0, -99.9917, 10.0)),
("08", "08", (-101.5427, -0.0, -89.9917, 10.0)),
("09", "08", (-91.3884, -0.0, -79.9917, 10.0)),
("10", "08", (-81.2341, -0.0, -69.9917, 10.0)),
("11", "08", (-71.0799, -0.0, -59.9917, 10.0)),
("12", "08", (-60.9256, -0.0, -49.9917, 10.0)),
("13", "08", (-50.7713, -0.0, -39.9917, 10.0)),
("14", "08", (-40.6171, -0.0, -29.9917, 10.0)),
("15", "08", (-30.4628, -0.0, -19.9917, 10.0)),
("16", "08", (-20.3085, -0.0, -9.9917, 10.0)),
("17", "08", (-10.1543, -0.0, 0.0085, 10.0)),
("18", "08", (0.0, -0.0, 10.1627, 10.0)),
("19", "08", (10.0, -0.0, 20.317, 10.0)),
("20", "08", (20.0, -0.0, 30.4713, 10.0)),
("21", "08", (30.0, -0.0, 40.6255, 10.0)),
("22", "08", (40.0, -0.0, 50.7798, 10.0)),
("23", "08", (50.0, -0.0, 60.9341, 10.0)),
("24", "08", (60.0, -0.0, 71.0883, 10.0)),
("25", "08", (70.0, -0.0, 81.2426, 10.0)),
("26", "08", (80.0, -0.0, 91.3969, 10.0)),
("27", "08", (90.0, -0.0, 101.5511, 10.0)),
("28", "08", (100.0, -0.0, 111.7054, 10.0)),
("29", "08", (110.0, -0.0, 121.8597, 10.0)),
("30", "08", (120.0, -0.0, 132.0139, 10.0)),
("31", "08", (130.0, -0.0, 142.1682, 10.0)),
("32", "08", (140.0, -0.0, 152.3225, 10.0)),
("33", "08", (150.0, -0.0, 162.4767, 10.0)),
("34", "08", (160.0, -0.0, 172.631, 10.0)),
("35", "08", (170.0, -0.0, 180.0, 10.0)),
("00", "09", (-180.0, -10.0, -169.9917, -0.0)),
("01", "09", (-172.6225, -10.0, -159.9917, -0.0)),
("02", "09", (-162.4683, -10.0, -149.9917, -0.0)),
("03", "09", (-152.314, -10.0, -139.9917, -0.0)),
("04", "09", (-142.1597, -10.0, -129.9917, -0.0)),
("05", "09", (-132.0055, -10.0, -119.9917, -0.0)),
("06", "09", (-121.8512, -10.0, -109.9917, -0.0)),
("07", "09", (-111.6969, -10.0, -99.9917, -0.0)),
("08", "09", (-101.5427, -10.0, -89.9917, -0.0)),
("09", "09", (-91.3884, -10.0, -79.9917, -0.0)),
("10", "09", (-81.2341, -10.0, -69.9917, -0.0)),
("11", "09", (-71.0799, -10.0, -59.9917, -0.0)),
("12", "09", (-60.9256, -10.0, -49.9917, -0.0)),
("13", "09", (-50.7713, -10.0, -39.9917, -0.0)),
("14", "09", (-40.6171, -10.0, -29.9917, -0.0)),
("15", "09", (-30.4628, -10.0, -19.9917, -0.0)),
("16", "09", (-20.3085, -10.0, -9.9917, -0.0)),
("17", "09", (-10.1543, -10.0, 0.0085, -0.0)),
("18", "09", (0.0, -10.0, 10.1627, -0.0)),
("19", "09", (10.0, -10.0, 20.317, -0.0)),
("20", "09", (20.0, -10.0, 30.4713, -0.0)),
("21", "09", (30.0, -10.0, 40.6255, -0.0)),
("22", "09", (40.0, -10.0, 50.7798, -0.0)),
("23", "09", (50.0, -10.0, 60.9341, -0.0)),
("24", "09", (60.0, -10.0, 71.0883, -0.0)),
("25", "09", (70.0, -10.0, 81.2426, -0.0)),
("26", "09", (80.0, -10.0, 91.3969, -0.0)),
("27", "09", (90.0, -10.0, 101.5511, -0.0)),
("28", "09", (100.0, -10.0, 111.7054, -0.0)),
("29", "09", (110.0, -10.0, 121.8597, -0.0)),
("30", "09", (120.0, -10.0, 132.0139, -0.0)),
("31", "09", (130.0, -10.0, 142.1682, -0.0)),
("32", "09", (140.0, -10.0, 152.3225, -0.0)),
("33", "09", (150.0, -10.0, 162.4767, -0.0)),
("34", "09", (160.0, -10.0, 172.631, -0.0)),
("35", "09", (170.0, -10.0, 180.0, -0.0)),
("00", "10", (-180.0, -19.1917, -172.6141, -10.0)),
("01", "10", (-180.0, -20.0, -162.4598, -10.0)),
("02", "10", (-170.2684, -20.0, -152.3055, -10.0)),
("03", "10", (-159.6267, -20.0, -142.1513, -10.0)),
("04", "10", (-148.9849, -20.0, -131.997, -10.0)),
("05", "10", (-138.3431, -20.0, -121.8427, -10.0)),
("06", "10", (-127.7013, -20.0, -111.6885, -10.0)),
("07", "10", (-117.0596, -20.0, -101.5342, -10.0)),
("08", "10", (-106.4178, -20.0, -91.3799, -10.0)),
("09", "10", (-95.776, -20.0, -81.2257, -10.0)),
("10", "10", (-85.1342, -20.0, -71.0714, -10.0)),
("11", "10", (-74.4924, -20.0, -60.9171, -10.0)),
("12", "10", (-63.8507, -20.0, -50.7629, -10.0)),
("13", "10", (-53.2089, -20.0, -40.6086, -10.0)),
("14", "10", (-42.5671, -20.0, -30.4543, -10.0)),
("15", "10", (-31.9253, -20.0, -20.3001, -10.0)),
("16", "10", (-21.2836, -20.0, -10.1458, -10.0)),
("17", "10", (-10.6418, -20.0, 0.0089, -10.0)),
("18", "10", (0.0, -20.0, 10.6506, -10.0)),
("19", "10", (10.1543, -20.0, 21.2924, -10.0)),
("20", "10", (20.3085, -20.0, 31.9342, -10.0)),
("21", "10", (30.4628, -20.0, 42.576, -10.0)),
("22", "10", (40.6171, -20.0, 53.2178, -10.0)),
("23", "10", (50.7713, -20.0, 63.8595, -10.0)),
("24", "10", (60.9256, -20.0, 74.5013, -10.0)),
("25", "10", (71.0799, -20.0, 85.1431, -10.0)),
("26", "10", (81.2341, -20.0, 95.7849, -10.0)),
("27", "10", (91.3884, -20.0, 106.4266, -10.0)),
("28", "10", (101.5427, -20.0, 117.0684, -10.0)),
("29", "10", (111.6969, -20.0, 127.7102, -10.0)),
("30", "10", (121.8512, -20.0, 138.352, -10.0)),
("31", "10", (132.0055, -20.0, 148.9938, -10.0)),
("32", "10", (142.1597, -20.0, 159.6355, -10.0)),
("33", "10", (152.314, -20.0, 170.2773, -10.0)),
("34", "10", (162.4683, -20.0, 180.0, -10.0)),
("35", "10", (172.6225, -19.1833, 180.0, -10.0)),
("01", "11", (-180.0, -27.2667, -170.2596, -20.0)),
("02", "11", (-180.0, -30.0, -159.6178, -20.0)),
("03", "11", (-173.2051, -30.0, -148.976, -20.0)),
("04", "11", (-161.6581, -30.0, -138.3342, -20.0)),
("05", "11", (-150.1111, -30.0, -127.6925, -20.0)),
("06", "11", (-138.5641, -30.0, -117.0507, -20.0)),
("07", "11", (-127.0171, -30.0, -106.4089, -20.0)),
("08", "11", (-115.4701, -30.0, -95.7671, -20.0)),
("09", "11", (-103.923, -30.0, -85.1254, -20.0)),
("10", "11", (-92.376, -30.0, -74.4836, -20.0)),
("11", "11", (-80.829, -30.0, -63.8418, -20.0)),
("12", "11", (-69.282, -30.0, -53.2, -20.0)),
("13", "11", (-57.735, -30.0, -42.5582, -20.0)),
("14", "11", (-46.188, -30.0, -31.9165, -20.0)),
("15", "11", (-34.641, -30.0, -21.2747, -20.0)),
("16", "11", (-23.094, -30.0, -10.6329, -20.0)),
("17", "11", (-11.547, -30.0, 0.0096, -20.0)),
("18", "11", (0.0, -30.0, 11.5566, -20.0)),
("19", "11", (10.6418, -30.0, 23.1036, -20.0)),
("20", "11", (21.2836, -30.0, 34.6506, -20.0)),
("21", "11", (31.9253, -30.0, 46.1976, -20.0)),
("22", "11", (42.5671, -30.0, 57.7446, -20.0)),
("23", "11", (53.2089, -30.0, 69.2917, -20.0)),
("24", "11", (63.8507, -30.0, 80.8387, -20.0)),
("25", "11", (74.4924, -30.0, 92.3857, -20.0)),
("26", "11", (85.1342, -30.0, 103.9327, -20.0)),
("27", "11", (95.776, -30.0, 115.4797, -20.0)),
("28", "11", (106.4178, -30.0, 127.0267, -20.0)),
("29", "11", (117.0596, -30.0, 138.5737, -20.0)),
("30", "11", (127.7013, -30.0, 150.1207, -20.0)),
("31", "11", (138.3431, -30.0, 161.6677, -20.0)),
("32", "11", (148.9849, -30.0, 173.2147, -20.0)),
("33", "11", (159.6267, -30.0, 180.0, -20.0)),
("34", "11", (170.2684, -27.2667, 180.0, -20.0)),
("02", "12", (-180.0, -33.5583, -173.1955, -30.0)),
("03", "12", (-180.0, -38.95, -161.6485, -30.0)),
("04", "12", (-180.0, -40.0, -150.1014, -30.0)),
("05", "12", (-169.7029, -40.0, -138.5544, -30.0)),
("06", "12", (-156.6489, -40.0, -127.0074, -30.0)),
("07", "12", (-143.5948, -40.0, -115.4604, -30.0)),
("08", "12", (-130.5407, -40.0, -103.9134, -30.0)),
("09", "12", (-117.4867, -40.0, -92.3664, -30.0)),
("10", "12", (-104.4326, -40.0, -80.8194, -30.0)),
("11", "12", (-91.3785, -40.0, -69.2724, -30.0)),
("12", "12", (-78.3244, -40.0, -57.7254, -30.0)),
("13", "12", (-65.2704, -40.0, -46.1784, -30.0)),
("14", "12", (-52.2163, -40.0, -34.6314, -30.0)),
("15", "12", (-39.1622, -40.0, -23.0844, -30.0)),
("16", "12", (-26.1081, -40.0, -11.5374, -30.0)),
("17", "12", (-13.0541, -40.0, 0.0109, -30.0)),
("18", "12", (0.0, -40.0, 13.065, -30.0)),
("19", "12", (11.547, -40.0, 26.119, -30.0)),
("20", "12", (23.094, -40.0, 39.1731, -30.0)),
("21", "12", (34.641, -40.0, 52.2272, -30.0)),
("22", "12", (46.188, -40.0, 65.2812, -30.0)),
("23", "12", (57.735, -40.0, 78.3353, -30.0)),
("24", "12", (69.282, -40.0, 91.3894, -30.0)),
("25", "12", (80.829, -40.0, 104.4435, -30.0)),
("26", "12", (92.376, -40.0, 117.4975, -30.0)),
("27", "12", (103.923, -40.0, 130.5516, -30.0)),
("28", "12", (115.4701, -40.0, 143.6057, -30.0)),
("29", "12", (127.0171, -40.0, 156.6598, -30.0)),
("30", "12", (138.5641, -40.0, 169.7138, -30.0)),
("31", "12", (150.1111, -40.0, 180.0, -30.0)),
("32", "12", (161.6581, -38.9417, 180.0, -30.0)),
("33", "12", (173.2051, -33.5583, 180.0, -30.0)),
("04", "13", (-180.0, -43.7667, -169.6921, -40.0)),
("05", "13", (-180.0, -48.1917, -156.638, -40.0)),
("06", "13", (-180.0, -50.0, -143.5839, -40.0)),
("07", "13", (-171.1296, -50.0, -130.5299, -40.0)),
("08", "13", (-155.5724, -50.0, -117.4758, -40.0)),
("09", "13", (-140.0151, -50.0, -104.4217, -40.0)),
("10", "13", (-124.4579, -50.0, -91.3676, -40.0)),
("11", "13", (-108.9007, -50.0, -78.3136, -40.0)),
("12", "13", (-93.3434, -50.0, -65.2595, -40.0)),
("13", "13", (-77.7862, -50.0, -52.2054, -40.0)),
("14", "13", (-62.229, -50.0, -39.1513, -40.0)),
("15", "13", (-46.6717, -50.0, -26.0973, -40.0)),
("16", "13", (-31.1145, -50.0, -13.0432, -40.0)),
("17", "13", (-15.5572, -50.0, 0.013, -40.0)),
("18", "13", (0.0, -50.0, 15.5702, -40.0)),
("19", "13", (13.0541, -50.0, 31.1274, -40.0)),
("20", "13", (26.1081, -50.0, 46.6847, -40.0)),
("21", "13", (39.1622, -50.0, 62.2419, -40.0)),
("22", "13", (52.2163, -50.0, 77.7992, -40.0)),
("23", "13", (65.2704, -50.0, 93.3564, -40.0)),
("24", "13", (78.3244, -50.0, 108.9136, -40.0)),
("25", "13", (91.3785, -50.0, 124.4709, -40.0)),
("26", "13", (104.4326, -50.0, 140.0281, -40.0)),
("27", "13", (117.4867, -50.0, 155.5853, -40.0)),
("28", "13", (130.5407, -50.0, 171.1426, -40.0)),
("29", "13", (143.5948, -50.0, 180.0, -40.0)),
("30", "13", (156.6489, -48.1917, 180.0, -40.0)),
("31", "13", (169.7029, -43.7583, 180.0, -40.0)),
("06", "14", (-180.0, -52.3333, -171.1167, -50.0)),
("07", "14", (-180.0, -56.2583, -155.5594, -50.0)),
("08", "14", (-180.0, -60.0, -140.0022, -50.0)),
("09", "14", (-180.0, -60.0, -124.4449, -50.0)),
("10", "14", (-160.0, -60.0, -108.8877, -50.0)),
("11", "14", (-140.0, -60.0, -93.3305, -50.0)),
("12", "14", (-120.0, -60.0, -77.7732, -50.0)),
("13", "14", (-100.0, -60.0, -62.216, -50.0)),
("14", "14", (-80.0, -60.0, -46.6588, -50.0)),
("15", "14", (-60.0, -60.0, -31.1015, -50.0)),
("16", "14", (-40.0, -60.0, -15.5443, -50.0)),
("17", "14", (-20.0, -60.0, 0.0167, -50.0)),
("18", "14", (0.0, -60.0, 20.0167, -50.0)),
("19", "14", (15.5572, -60.0, 40.0167, -50.0)),
("20", "14", (31.1145, -60.0, 60.0167, -50.0)),
("21", "14", (46.6717, -60.0, 80.0167, -50.0)),
("22", "14", (62.229, -60.0, 100.0167, -50.0)),
("23", "14", (77.7862, -60.0, 120.0167, -50.0)),
("24", "14", (93.3434, -60.0, 140.0167, -50.0)),
("25", "14", (108.9007, -60.0, 160.0167, -50.0)),
("26", "14", (124.4579, -60.0, 180.0, -50.0)),
("27", "14", (140.0151, -60.0, 180.0, -50.0)),
("28", "14", (155.5724, -56.25, 180.0, -50.0)),
("29", "14", (171.1296, -52.3333, 180.0, -50.0)),
("09", "15", (-180.0, -63.6167, -159.9833, -60.0)),
("10", "15", (-180.0, -67.1167, -139.9833, -60.0)),
("11", "15", (-180.0, -70.0, -119.9833, -60.0)),
("12", "15", (-175.4283, -70.0, -99.9833, -60.0)),
("13", "15", (-146.1902, -70.0, -79.9833, -60.0)),
("14", "15", (-116.9522, -70.0, -59.9833, -60.0)),
("15", "15", (-87.7141, -70.0, -39.9833, -60.0)),
("16", "15", (-58.4761, -70.0, -19.9833, -60.0)),
("17", "15", (-29.238, -70.0, 0.0244, -60.0)),
("18", "15", (0.0, -70.0, 29.2624, -60.0)),
("19", "15", (20.0, -70.0, 58.5005, -60.0)),
("20", "15", (40.0, -70.0, 87.7385, -60.0)),
("21", "15", (60.0, -70.0, 116.9765, -60.0)),
("22", "15", (80.0, -70.0, 146.2146, -60.0)),
("23", "15", (100.0, -70.0, 175.4526, -60.0)),
("24", "15", (120.0, -70.0, 180.0, -60.0)),
("25", "15", (140.0, -67.1167, 180.0, -60.0)),
("26", "15", (160.0, -63.6167, 180.0, -60.0)),
("11", "16", (-180.0, -70.5333, -175.4039, -70.0)),
("12", "16", (-180.0, -73.875, -146.1659, -70.0)),
("13", "16", (-180.0, -77.1667, -116.9278, -70.0)),
("14", "16", (-180.0, -80.0, -87.6898, -70.0)),
("15", "16", (-172.7631, -80.0, -58.4517, -70.0)),
("16", "16", (-115.1754, -80.0, -29.2137, -70.0)),
("17", "16", (-57.5877, -80.0, 0.048, -70.0)),
("18", "16", (0.0, -80.0, 57.6357, -70.0)),
("19", "16", (29.238, -80.0, 115.2234, -70.0)),
("20", "16", (58.4761, -80.0, 172.8111, -70.0)),
("21", "16", (87.7141, -80.0, 180.0, -70.0)),
("22", "16", (116.9522, -77.1583, 180.0, -70.0)),
("23", "16", (146.1902, -73.875, 180.0, -70.0)),
("24", "16", (175.4283, -70.5333, 180.0, -70.0)),
("14", "17", (-180.0, -80.4083, -172.7151, -80.0)),
("15", "17", (-180.0, -83.625, -115.1274, -80.0)),
("16", "17", (-180.0, -86.8167, -57.5397, -80.0)),
("17", "17", (-180.0, -90.0, 57.2957, -80.0)),
("18", "17", (-0.004, -90.0, 180.0, -80.0)),
("19", "17", (57.5877, -86.8167, 180.0, -80.0)),
("20", "17", (115.1754, -83.625, 180.0, -80.0)),
("21", "17", (172.7631, -80.4083, 180.0, -80.0)),
]
def tile_bbox(
horizontal_grid: str, vertical_grid: str
) -> Tuple[float, float, float, float]:
"""Get WGS84 boundbox for any modland grid index."""
grid = list(
filter(
lambda x: x[0] == horizontal_grid and x[1] == vertical_grid, MODLAND_GRID
)
)
if not grid:
raise InvalidModlandGridID(
f"Could not find bounds for grid h{horizontal_grid}v{vertical_grid}"
)
return grid[0][2] | rio_tiler_pds/modis/modland_grid.py | from typing import Tuple
from rio_tiler.errors import RioTilerError
class InvalidModlandGridID(RioTilerError):
"""Invalid MODLAND grid id."""
# Only non-fill tiles (460)
# format:
# horizontal_grid, vertical_grid, bbox(xmin, ymin, xmax, ymax)
MODLAND_GRID = [
("14", "00", (-180.0, 80.0, -172.7151, 80.4083)),
("15", "00", (-180.0, 80.0, -115.1274, 83.625)),
("16", "00", (-180.0, 80.0, -57.5397, 86.8167)),
("17", "00", (-180.0, 80.0, 57.2957, 90.0)),
("18", "00", (-0.004, 80.0, 180.0, 90.0)),
("19", "00", (57.5877, 80.0, 180.0, 86.8167)),
("20", "00", (115.1754, 80.0, 180.0, 83.625)),
("21", "00", (172.7631, 80.0, 180.0, 80.4083)),
("11", "01", (-180.0, 70.0, -175.4039, 70.5333)),
("12", "01", (-180.0, 70.0, -146.1659, 73.875)),
("13", "01", (-180.0, 70.0, -116.9278, 77.1667)),
("14", "01", (-180.0, 70.0, -87.6898, 80.0)),
("15", "01", (-172.7631, 70.0, -58.4517, 80.0)),
("16", "01", (-115.1754, 70.0, -29.2137, 80.0)),
("17", "01", (-57.5877, 70.0, 0.048, 80.0)),
("18", "01", (0.0, 70.0, 57.6357, 80.0)),
("19", "01", (29.238, 70.0, 115.2234, 80.0)),
("20", "01", (58.4761, 70.0, 172.8111, 80.0)),
("21", "01", (87.7141, 70.0, 180.0, 80.0)),
("22", "01", (116.9522, 70.0, 180.0, 77.1583)),
("23", "01", (146.1902, 70.0, 180.0, 73.875)),
("24", "01", (175.4283, 70.0, 180.0, 70.5333)),
("09", "02", (-180.0, 60.0, -159.9833, 63.6167)),
("10", "02", (-180.0, 60.0, -139.9833, 67.1167)),
("11", "02", (-180.0, 60.0, -119.9833, 70.0)),
("12", "02", (-175.4283, 60.0, -99.9833, 70.0)),
("13", "02", (-146.1902, 60.0, -79.9833, 70.0)),
("14", "02", (-116.9522, 60.0, -59.9833, 70.0)),
("15", "02", (-87.7141, 60.0, -39.9833, 70.0)),
("16", "02", (-58.4761, 60.0, -19.9833, 70.0)),
("17", "02", (-29.238, 60.0, 0.0244, 70.0)),
("18", "02", (0.0, 60.0, 29.2624, 70.0)),
("19", "02", (20.0, 60.0, 58.5005, 70.0)),
("20", "02", (40.0, 60.0, 87.7385, 70.0)),
("21", "02", (60.0, 60.0, 116.9765, 70.0)),
("22", "02", (80.0, 60.0, 146.2146, 70.0)),
("23", "02", (100.0, 60.0, 175.4526, 70.0)),
("24", "02", (120.0, 60.0, 180.0, 70.0)),
("25", "02", (140.0, 60.0, 180.0, 67.1167)),
("26", "02", (160.0, 60.0, 180.0, 63.6167)),
("06", "03", (-180.0, 50.0, -171.1167, 52.3333)),
("07", "03", (-180.0, 50.0, -155.5594, 56.2583)),
("08", "03", (-180.0, 50.0, -140.0022, 60.0)),
("09", "03", (-180.0, 50.0, -124.4449, 60.0)),
("10", "03", (-160.0, 50.0, -108.8877, 60.0)),
("11", "03", (-140.0, 50.0, -93.3305, 60.0)),
("12", "03", (-120.0, 50.0, -77.7732, 60.0)),
("13", "03", (-100.0, 50.0, -62.216, 60.0)),
("14", "03", (-80.0, 50.0, -46.6588, 60.0)),
("15", "03", (-60.0, 50.0, -31.1015, 60.0)),
("16", "03", (-40.0, 50.0, -15.5443, 60.0)),
("17", "03", (-20.0, 50.0, 0.0167, 60.0)),
("18", "03", (0.0, 50.0, 20.0167, 60.0)),
("19", "03", (15.5572, 50.0, 40.0167, 60.0)),
("20", "03", (31.1145, 50.0, 60.0167, 60.0)),
("21", "03", (46.6717, 50.0, 80.0167, 60.0)),
("22", "03", (62.229, 50.0, 100.0167, 60.0)),
("23", "03", (77.7862, 50.0, 120.0167, 60.0)),
("24", "03", (93.3434, 50.0, 140.0167, 60.0)),
("25", "03", (108.9007, 50.0, 160.0167, 60.0)),
("26", "03", (124.4579, 50.0, 180.0, 60.0)),
("27", "03", (140.0151, 50.0, 180.0, 60.0)),
("28", "03", (155.5724, 50.0, 180.0, 56.25)),
("29", "03", (171.1296, 50.0, 180.0, 52.3333)),
("04", "04", (-180.0, 40.0, -169.6921, 43.7667)),
("05", "04", (-180.0, 40.0, -156.638, 48.1917)),
("06", "04", (-180.0, 40.0, -143.5839, 50.0)),
("07", "04", (-171.1296, 40.0, -130.5299, 50.0)),
("08", "04", (-155.5724, 40.0, -117.4758, 50.0)),
("09", "04", (-140.0151, 40.0, -104.4217, 50.0)),
("10", "04", (-124.4579, 40.0, -91.3676, 50.0)),
("11", "04", (-108.9007, 40.0, -78.3136, 50.0)),
("12", "04", (-93.3434, 40.0, -65.2595, 50.0)),
("13", "04", (-77.7862, 40.0, -52.2054, 50.0)),
("14", "04", (-62.229, 40.0, -39.1513, 50.0)),
("15", "04", (-46.6717, 40.0, -26.0973, 50.0)),
("16", "04", (-31.1145, 40.0, -13.0432, 50.0)),
("17", "04", (-15.5572, 40.0, 0.013, 50.0)),
("18", "04", (0.0, 40.0, 15.5702, 50.0)),
("19", "04", (13.0541, 40.0, 31.1274, 50.0)),
("20", "04", (26.1081, 40.0, 46.6847, 50.0)),
("21", "04", (39.1622, 40.0, 62.2419, 50.0)),
("22", "04", (52.2163, 40.0, 77.7992, 50.0)),
("23", "04", (65.2704, 40.0, 93.3564, 50.0)),
("24", "04", (78.3244, 40.0, 108.9136, 50.0)),
("25", "04", (91.3785, 40.0, 124.4709, 50.0)),
("26", "04", (104.4326, 40.0, 140.0281, 50.0)),
("27", "04", (117.4867, 40.0, 155.5853, 50.0)),
("28", "04", (130.5407, 40.0, 171.1426, 50.0)),
("29", "04", (143.5948, 40.0, 180.0, 50.0)),
("30", "04", (156.6489, 40.0, 180.0, 48.1917)),
("31", "04", (169.7029, 40.0, 180.0, 43.7583)),
("02", "05", (-180.0, 30.0, -173.1955, 33.5583)),
("03", "05", (-180.0, 30.0, -161.6485, 38.95)),
("04", "05", (-180.0, 30.0, -150.1014, 40.0)),
("05", "05", (-169.7029, 30.0, -138.5544, 40.0)),
("06", "05", (-156.6489, 30.0, -127.0074, 40.0)),
("07", "05", (-143.5948, 30.0, -115.4604, 40.0)),
("08", "05", (-130.5407, 30.0, -103.9134, 40.0)),
("09", "05", (-117.4867, 30.0, -92.3664, 40.0)),
("10", "05", (-104.4326, 30.0, -80.8194, 40.0)),
("11", "05", (-91.3785, 30.0, -69.2724, 40.0)),
("12", "05", (-78.3244, 30.0, -57.7254, 40.0)),
("13", "05", (-65.2704, 30.0, -46.1784, 40.0)),
("14", "05", (-52.2163, 30.0, -34.6314, 40.0)),
("15", "05", (-39.1622, 30.0, -23.0844, 40.0)),
("16", "05", (-26.1081, 30.0, -11.5374, 40.0)),
("17", "05", (-13.0541, 30.0, 0.0109, 40.0)),
("18", "05", (0.0, 30.0, 13.065, 40.0)),
("19", "05", (11.547, 30.0, 26.119, 40.0)),
("20", "05", (23.094, 30.0, 39.1731, 40.0)),
("21", "05", (34.641, 30.0, 52.2272, 40.0)),
("22", "05", (46.188, 30.0, 65.2812, 40.0)),
("23", "05", (57.735, 30.0, 78.3353, 40.0)),
("24", "05", (69.282, 30.0, 91.3894, 40.0)),
("25", "05", (80.829, 30.0, 104.4435, 40.0)),
("26", "05", (92.376, 30.0, 117.4975, 40.0)),
("27", "05", (103.923, 30.0, 130.5516, 40.0)),
("28", "05", (115.4701, 30.0, 143.6057, 40.0)),
("29", "05", (127.0171, 30.0, 156.6598, 40.0)),
("30", "05", (138.5641, 30.0, 169.7138, 40.0)),
("31", "05", (150.1111, 30.0, 180.0, 40.0)),
("32", "05", (161.6581, 30.0, 180.0, 38.9417)),
("33", "05", (173.2051, 30.0, 180.0, 33.5583)),
("01", "06", (-180.0, 20.0, -170.2596, 27.2667)),
("02", "06", (-180.0, 20.0, -159.6178, 30.0)),
("03", "06", (-173.2051, 20.0, -148.976, 30.0)),
("04", "06", (-161.6581, 20.0, -138.3342, 30.0)),
("05", "06", (-150.1111, 20.0, -127.6925, 30.0)),
("06", "06", (-138.5641, 20.0, -117.0507, 30.0)),
("07", "06", (-127.0171, 20.0, -106.4089, 30.0)),
("08", "06", (-115.4701, 20.0, -95.7671, 30.0)),
("09", "06", (-103.923, 20.0, -85.1254, 30.0)),
("10", "06", (-92.376, 20.0, -74.4836, 30.0)),
("11", "06", (-80.829, 20.0, -63.8418, 30.0)),
("12", "06", (-69.282, 20.0, -53.2, 30.0)),
("13", "06", (-57.735, 20.0, -42.5582, 30.0)),
("14", "06", (-46.188, 20.0, -31.9165, 30.0)),
("15", "06", (-34.641, 20.0, -21.2747, 30.0)),
("16", "06", (-23.094, 20.0, -10.6329, 30.0)),
("17", "06", (-11.547, 20.0, 0.0096, 30.0)),
("18", "06", (0.0, 20.0, 11.5566, 30.0)),
("19", "06", (10.6418, 20.0, 23.1036, 30.0)),
("20", "06", (21.2836, 20.0, 34.6506, 30.0)),
("21", "06", (31.9253, 20.0, 46.1976, 30.0)),
("22", "06", (42.5671, 20.0, 57.7446, 30.0)),
("23", "06", (53.2089, 20.0, 69.2917, 30.0)),
("24", "06", (63.8507, 20.0, 80.8387, 30.0)),
("25", "06", (74.4924, 20.0, 92.3857, 30.0)),
("26", "06", (85.1342, 20.0, 103.9327, 30.0)),
("27", "06", (95.776, 20.0, 115.4797, 30.0)),
("28", "06", (106.4178, 20.0, 127.0267, 30.0)),
("29", "06", (117.0596, 20.0, 138.5737, 30.0)),
("30", "06", (127.7013, 20.0, 150.1207, 30.0)),
("31", "06", (138.3431, 20.0, 161.6677, 30.0)),
("32", "06", (148.9849, 20.0, 173.2147, 30.0)),
("33", "06", (159.6267, 20.0, 180.0, 30.0)),
("34", "06", (170.2684, 20.0, 180.0, 27.2667)),
("00", "07", (-180.0, 10.0, -172.6141, 19.1917)),
("01", "07", (-180.0, 10.0, -162.4598, 20.0)),
("02", "07", (-170.2684, 10.0, -152.3055, 20.0)),
("03", "07", (-159.6267, 10.0, -142.1513, 20.0)),
("04", "07", (-148.9849, 10.0, -131.997, 20.0)),
("05", "07", (-138.3431, 10.0, -121.8427, 20.0)),
("06", "07", (-127.7013, 10.0, -111.6885, 20.0)),
("07", "07", (-117.0596, 10.0, -101.5342, 20.0)),
("08", "07", (-106.4178, 10.0, -91.3799, 20.0)),
("09", "07", (-95.776, 10.0, -81.2257, 20.0)),
("10", "07", (-85.1342, 10.0, -71.0714, 20.0)),
("11", "07", (-74.4924, 10.0, -60.9171, 20.0)),
("12", "07", (-63.8507, 10.0, -50.7629, 20.0)),
("13", "07", (-53.2089, 10.0, -40.6086, 20.0)),
("14", "07", (-42.5671, 10.0, -30.4543, 20.0)),
("15", "07", (-31.9253, 10.0, -20.3001, 20.0)),
("16", "07", (-21.2836, 10.0, -10.1458, 20.0)),
("17", "07", (-10.6418, 10.0, 0.0089, 20.0)),
("18", "07", (0.0, 10.0, 10.6506, 20.0)),
("19", "07", (10.1543, 10.0, 21.2924, 20.0)),
("20", "07", (20.3085, 10.0, 31.9342, 20.0)),
("21", "07", (30.4628, 10.0, 42.576, 20.0)),
("22", "07", (40.6171, 10.0, 53.2178, 20.0)),
("23", "07", (50.7713, 10.0, 63.8595, 20.0)),
("24", "07", (60.9256, 10.0, 74.5013, 20.0)),
("25", "07", (71.0799, 10.0, 85.1431, 20.0)),
("26", "07", (81.2341, 10.0, 95.7849, 20.0)),
("27", "07", (91.3884, 10.0, 106.4266, 20.0)),
("28", "07", (101.5427, 10.0, 117.0684, 20.0)),
("29", "07", (111.6969, 10.0, 127.7102, 20.0)),
("30", "07", (121.8512, 10.0, 138.352, 20.0)),
("31", "07", (132.0055, 10.0, 148.9938, 20.0)),
("32", "07", (142.1597, 10.0, 159.6355, 20.0)),
("33", "07", (152.314, 10.0, 170.2773, 20.0)),
("34", "07", (162.4683, 10.0, 180.0, 20.0)),
("35", "07", (172.6225, 10.0, 180.0, 19.1833)),
("00", "08", (-180.0, -0.0, -169.9917, 10.0)),
("01", "08", (-172.6225, -0.0, -159.9917, 10.0)),
("02", "08", (-162.4683, -0.0, -149.9917, 10.0)),
("03", "08", (-152.314, -0.0, -139.9917, 10.0)),
("04", "08", (-142.1597, -0.0, -129.9917, 10.0)),
("05", "08", (-132.0055, -0.0, -119.9917, 10.0)),
("06", "08", (-121.8512, -0.0, -109.9917, 10.0)),
("07", "08", (-111.6969, -0.0, -99.9917, 10.0)),
("08", "08", (-101.5427, -0.0, -89.9917, 10.0)),
("09", "08", (-91.3884, -0.0, -79.9917, 10.0)),
("10", "08", (-81.2341, -0.0, -69.9917, 10.0)),
("11", "08", (-71.0799, -0.0, -59.9917, 10.0)),
("12", "08", (-60.9256, -0.0, -49.9917, 10.0)),
("13", "08", (-50.7713, -0.0, -39.9917, 10.0)),
("14", "08", (-40.6171, -0.0, -29.9917, 10.0)),
("15", "08", (-30.4628, -0.0, -19.9917, 10.0)),
("16", "08", (-20.3085, -0.0, -9.9917, 10.0)),
("17", "08", (-10.1543, -0.0, 0.0085, 10.0)),
("18", "08", (0.0, -0.0, 10.1627, 10.0)),
("19", "08", (10.0, -0.0, 20.317, 10.0)),
("20", "08", (20.0, -0.0, 30.4713, 10.0)),
("21", "08", (30.0, -0.0, 40.6255, 10.0)),
("22", "08", (40.0, -0.0, 50.7798, 10.0)),
("23", "08", (50.0, -0.0, 60.9341, 10.0)),
("24", "08", (60.0, -0.0, 71.0883, 10.0)),
("25", "08", (70.0, -0.0, 81.2426, 10.0)),
("26", "08", (80.0, -0.0, 91.3969, 10.0)),
("27", "08", (90.0, -0.0, 101.5511, 10.0)),
("28", "08", (100.0, -0.0, 111.7054, 10.0)),
("29", "08", (110.0, -0.0, 121.8597, 10.0)),
("30", "08", (120.0, -0.0, 132.0139, 10.0)),
("31", "08", (130.0, -0.0, 142.1682, 10.0)),
("32", "08", (140.0, -0.0, 152.3225, 10.0)),
("33", "08", (150.0, -0.0, 162.4767, 10.0)),
("34", "08", (160.0, -0.0, 172.631, 10.0)),
("35", "08", (170.0, -0.0, 180.0, 10.0)),
("00", "09", (-180.0, -10.0, -169.9917, -0.0)),
("01", "09", (-172.6225, -10.0, -159.9917, -0.0)),
("02", "09", (-162.4683, -10.0, -149.9917, -0.0)),
("03", "09", (-152.314, -10.0, -139.9917, -0.0)),
("04", "09", (-142.1597, -10.0, -129.9917, -0.0)),
("05", "09", (-132.0055, -10.0, -119.9917, -0.0)),
("06", "09", (-121.8512, -10.0, -109.9917, -0.0)),
("07", "09", (-111.6969, -10.0, -99.9917, -0.0)),
("08", "09", (-101.5427, -10.0, -89.9917, -0.0)),
("09", "09", (-91.3884, -10.0, -79.9917, -0.0)),
("10", "09", (-81.2341, -10.0, -69.9917, -0.0)),
("11", "09", (-71.0799, -10.0, -59.9917, -0.0)),
("12", "09", (-60.9256, -10.0, -49.9917, -0.0)),
("13", "09", (-50.7713, -10.0, -39.9917, -0.0)),
("14", "09", (-40.6171, -10.0, -29.9917, -0.0)),
("15", "09", (-30.4628, -10.0, -19.9917, -0.0)),
("16", "09", (-20.3085, -10.0, -9.9917, -0.0)),
("17", "09", (-10.1543, -10.0, 0.0085, -0.0)),
("18", "09", (0.0, -10.0, 10.1627, -0.0)),
("19", "09", (10.0, -10.0, 20.317, -0.0)),
("20", "09", (20.0, -10.0, 30.4713, -0.0)),
("21", "09", (30.0, -10.0, 40.6255, -0.0)),
("22", "09", (40.0, -10.0, 50.7798, -0.0)),
("23", "09", (50.0, -10.0, 60.9341, -0.0)),
("24", "09", (60.0, -10.0, 71.0883, -0.0)),
("25", "09", (70.0, -10.0, 81.2426, -0.0)),
("26", "09", (80.0, -10.0, 91.3969, -0.0)),
("27", "09", (90.0, -10.0, 101.5511, -0.0)),
("28", "09", (100.0, -10.0, 111.7054, -0.0)),
("29", "09", (110.0, -10.0, 121.8597, -0.0)),
("30", "09", (120.0, -10.0, 132.0139, -0.0)),
("31", "09", (130.0, -10.0, 142.1682, -0.0)),
("32", "09", (140.0, -10.0, 152.3225, -0.0)),
("33", "09", (150.0, -10.0, 162.4767, -0.0)),
("34", "09", (160.0, -10.0, 172.631, -0.0)),
("35", "09", (170.0, -10.0, 180.0, -0.0)),
("00", "10", (-180.0, -19.1917, -172.6141, -10.0)),
("01", "10", (-180.0, -20.0, -162.4598, -10.0)),
("02", "10", (-170.2684, -20.0, -152.3055, -10.0)),
("03", "10", (-159.6267, -20.0, -142.1513, -10.0)),
("04", "10", (-148.9849, -20.0, -131.997, -10.0)),
("05", "10", (-138.3431, -20.0, -121.8427, -10.0)),
("06", "10", (-127.7013, -20.0, -111.6885, -10.0)),
("07", "10", (-117.0596, -20.0, -101.5342, -10.0)),
("08", "10", (-106.4178, -20.0, -91.3799, -10.0)),
("09", "10", (-95.776, -20.0, -81.2257, -10.0)),
("10", "10", (-85.1342, -20.0, -71.0714, -10.0)),
("11", "10", (-74.4924, -20.0, -60.9171, -10.0)),
("12", "10", (-63.8507, -20.0, -50.7629, -10.0)),
("13", "10", (-53.2089, -20.0, -40.6086, -10.0)),
("14", "10", (-42.5671, -20.0, -30.4543, -10.0)),
("15", "10", (-31.9253, -20.0, -20.3001, -10.0)),
("16", "10", (-21.2836, -20.0, -10.1458, -10.0)),
("17", "10", (-10.6418, -20.0, 0.0089, -10.0)),
("18", "10", (0.0, -20.0, 10.6506, -10.0)),
("19", "10", (10.1543, -20.0, 21.2924, -10.0)),
("20", "10", (20.3085, -20.0, 31.9342, -10.0)),
("21", "10", (30.4628, -20.0, 42.576, -10.0)),
("22", "10", (40.6171, -20.0, 53.2178, -10.0)),
("23", "10", (50.7713, -20.0, 63.8595, -10.0)),
("24", "10", (60.9256, -20.0, 74.5013, -10.0)),
("25", "10", (71.0799, -20.0, 85.1431, -10.0)),
("26", "10", (81.2341, -20.0, 95.7849, -10.0)),
("27", "10", (91.3884, -20.0, 106.4266, -10.0)),
("28", "10", (101.5427, -20.0, 117.0684, -10.0)),
("29", "10", (111.6969, -20.0, 127.7102, -10.0)),
("30", "10", (121.8512, -20.0, 138.352, -10.0)),
("31", "10", (132.0055, -20.0, 148.9938, -10.0)),
("32", "10", (142.1597, -20.0, 159.6355, -10.0)),
("33", "10", (152.314, -20.0, 170.2773, -10.0)),
("34", "10", (162.4683, -20.0, 180.0, -10.0)),
("35", "10", (172.6225, -19.1833, 180.0, -10.0)),
("01", "11", (-180.0, -27.2667, -170.2596, -20.0)),
("02", "11", (-180.0, -30.0, -159.6178, -20.0)),
("03", "11", (-173.2051, -30.0, -148.976, -20.0)),
("04", "11", (-161.6581, -30.0, -138.3342, -20.0)),
("05", "11", (-150.1111, -30.0, -127.6925, -20.0)),
("06", "11", (-138.5641, -30.0, -117.0507, -20.0)),
("07", "11", (-127.0171, -30.0, -106.4089, -20.0)),
("08", "11", (-115.4701, -30.0, -95.7671, -20.0)),
("09", "11", (-103.923, -30.0, -85.1254, -20.0)),
("10", "11", (-92.376, -30.0, -74.4836, -20.0)),
("11", "11", (-80.829, -30.0, -63.8418, -20.0)),
("12", "11", (-69.282, -30.0, -53.2, -20.0)),
("13", "11", (-57.735, -30.0, -42.5582, -20.0)),
("14", "11", (-46.188, -30.0, -31.9165, -20.0)),
("15", "11", (-34.641, -30.0, -21.2747, -20.0)),
("16", "11", (-23.094, -30.0, -10.6329, -20.0)),
("17", "11", (-11.547, -30.0, 0.0096, -20.0)),
("18", "11", (0.0, -30.0, 11.5566, -20.0)),
("19", "11", (10.6418, -30.0, 23.1036, -20.0)),
("20", "11", (21.2836, -30.0, 34.6506, -20.0)),
("21", "11", (31.9253, -30.0, 46.1976, -20.0)),
("22", "11", (42.5671, -30.0, 57.7446, -20.0)),
("23", "11", (53.2089, -30.0, 69.2917, -20.0)),
("24", "11", (63.8507, -30.0, 80.8387, -20.0)),
("25", "11", (74.4924, -30.0, 92.3857, -20.0)),
("26", "11", (85.1342, -30.0, 103.9327, -20.0)),
("27", "11", (95.776, -30.0, 115.4797, -20.0)),
("28", "11", (106.4178, -30.0, 127.0267, -20.0)),
("29", "11", (117.0596, -30.0, 138.5737, -20.0)),
("30", "11", (127.7013, -30.0, 150.1207, -20.0)),
("31", "11", (138.3431, -30.0, 161.6677, -20.0)),
("32", "11", (148.9849, -30.0, 173.2147, -20.0)),
("33", "11", (159.6267, -30.0, 180.0, -20.0)),
("34", "11", (170.2684, -27.2667, 180.0, -20.0)),
("02", "12", (-180.0, -33.5583, -173.1955, -30.0)),
("03", "12", (-180.0, -38.95, -161.6485, -30.0)),
("04", "12", (-180.0, -40.0, -150.1014, -30.0)),
("05", "12", (-169.7029, -40.0, -138.5544, -30.0)),
("06", "12", (-156.6489, -40.0, -127.0074, -30.0)),
("07", "12", (-143.5948, -40.0, -115.4604, -30.0)),
("08", "12", (-130.5407, -40.0, -103.9134, -30.0)),
("09", "12", (-117.4867, -40.0, -92.3664, -30.0)),
("10", "12", (-104.4326, -40.0, -80.8194, -30.0)),
("11", "12", (-91.3785, -40.0, -69.2724, -30.0)),
("12", "12", (-78.3244, -40.0, -57.7254, -30.0)),
("13", "12", (-65.2704, -40.0, -46.1784, -30.0)),
("14", "12", (-52.2163, -40.0, -34.6314, -30.0)),
("15", "12", (-39.1622, -40.0, -23.0844, -30.0)),
("16", "12", (-26.1081, -40.0, -11.5374, -30.0)),
("17", "12", (-13.0541, -40.0, 0.0109, -30.0)),
("18", "12", (0.0, -40.0, 13.065, -30.0)),
("19", "12", (11.547, -40.0, 26.119, -30.0)),
("20", "12", (23.094, -40.0, 39.1731, -30.0)),
("21", "12", (34.641, -40.0, 52.2272, -30.0)),
("22", "12", (46.188, -40.0, 65.2812, -30.0)),
("23", "12", (57.735, -40.0, 78.3353, -30.0)),
("24", "12", (69.282, -40.0, 91.3894, -30.0)),
("25", "12", (80.829, -40.0, 104.4435, -30.0)),
("26", "12", (92.376, -40.0, 117.4975, -30.0)),
("27", "12", (103.923, -40.0, 130.5516, -30.0)),
("28", "12", (115.4701, -40.0, 143.6057, -30.0)),
("29", "12", (127.0171, -40.0, 156.6598, -30.0)),
("30", "12", (138.5641, -40.0, 169.7138, -30.0)),
("31", "12", (150.1111, -40.0, 180.0, -30.0)),
("32", "12", (161.6581, -38.9417, 180.0, -30.0)),
("33", "12", (173.2051, -33.5583, 180.0, -30.0)),
("04", "13", (-180.0, -43.7667, -169.6921, -40.0)),
("05", "13", (-180.0, -48.1917, -156.638, -40.0)),
("06", "13", (-180.0, -50.0, -143.5839, -40.0)),
("07", "13", (-171.1296, -50.0, -130.5299, -40.0)),
("08", "13", (-155.5724, -50.0, -117.4758, -40.0)),
("09", "13", (-140.0151, -50.0, -104.4217, -40.0)),
("10", "13", (-124.4579, -50.0, -91.3676, -40.0)),
("11", "13", (-108.9007, -50.0, -78.3136, -40.0)),
("12", "13", (-93.3434, -50.0, -65.2595, -40.0)),
("13", "13", (-77.7862, -50.0, -52.2054, -40.0)),
("14", "13", (-62.229, -50.0, -39.1513, -40.0)),
("15", "13", (-46.6717, -50.0, -26.0973, -40.0)),
("16", "13", (-31.1145, -50.0, -13.0432, -40.0)),
("17", "13", (-15.5572, -50.0, 0.013, -40.0)),
("18", "13", (0.0, -50.0, 15.5702, -40.0)),
("19", "13", (13.0541, -50.0, 31.1274, -40.0)),
("20", "13", (26.1081, -50.0, 46.6847, -40.0)),
("21", "13", (39.1622, -50.0, 62.2419, -40.0)),
("22", "13", (52.2163, -50.0, 77.7992, -40.0)),
("23", "13", (65.2704, -50.0, 93.3564, -40.0)),
("24", "13", (78.3244, -50.0, 108.9136, -40.0)),
("25", "13", (91.3785, -50.0, 124.4709, -40.0)),
("26", "13", (104.4326, -50.0, 140.0281, -40.0)),
("27", "13", (117.4867, -50.0, 155.5853, -40.0)),
("28", "13", (130.5407, -50.0, 171.1426, -40.0)),
("29", "13", (143.5948, -50.0, 180.0, -40.0)),
("30", "13", (156.6489, -48.1917, 180.0, -40.0)),
("31", "13", (169.7029, -43.7583, 180.0, -40.0)),
("06", "14", (-180.0, -52.3333, -171.1167, -50.0)),
("07", "14", (-180.0, -56.2583, -155.5594, -50.0)),
("08", "14", (-180.0, -60.0, -140.0022, -50.0)),
("09", "14", (-180.0, -60.0, -124.4449, -50.0)),
("10", "14", (-160.0, -60.0, -108.8877, -50.0)),
("11", "14", (-140.0, -60.0, -93.3305, -50.0)),
("12", "14", (-120.0, -60.0, -77.7732, -50.0)),
("13", "14", (-100.0, -60.0, -62.216, -50.0)),
("14", "14", (-80.0, -60.0, -46.6588, -50.0)),
("15", "14", (-60.0, -60.0, -31.1015, -50.0)),
("16", "14", (-40.0, -60.0, -15.5443, -50.0)),
("17", "14", (-20.0, -60.0, 0.0167, -50.0)),
("18", "14", (0.0, -60.0, 20.0167, -50.0)),
("19", "14", (15.5572, -60.0, 40.0167, -50.0)),
("20", "14", (31.1145, -60.0, 60.0167, -50.0)),
("21", "14", (46.6717, -60.0, 80.0167, -50.0)),
("22", "14", (62.229, -60.0, 100.0167, -50.0)),
("23", "14", (77.7862, -60.0, 120.0167, -50.0)),
("24", "14", (93.3434, -60.0, 140.0167, -50.0)),
("25", "14", (108.9007, -60.0, 160.0167, -50.0)),
("26", "14", (124.4579, -60.0, 180.0, -50.0)),
("27", "14", (140.0151, -60.0, 180.0, -50.0)),
("28", "14", (155.5724, -56.25, 180.0, -50.0)),
("29", "14", (171.1296, -52.3333, 180.0, -50.0)),
("09", "15", (-180.0, -63.6167, -159.9833, -60.0)),
("10", "15", (-180.0, -67.1167, -139.9833, -60.0)),
("11", "15", (-180.0, -70.0, -119.9833, -60.0)),
("12", "15", (-175.4283, -70.0, -99.9833, -60.0)),
("13", "15", (-146.1902, -70.0, -79.9833, -60.0)),
("14", "15", (-116.9522, -70.0, -59.9833, -60.0)),
("15", "15", (-87.7141, -70.0, -39.9833, -60.0)),
("16", "15", (-58.4761, -70.0, -19.9833, -60.0)),
("17", "15", (-29.238, -70.0, 0.0244, -60.0)),
("18", "15", (0.0, -70.0, 29.2624, -60.0)),
("19", "15", (20.0, -70.0, 58.5005, -60.0)),
("20", "15", (40.0, -70.0, 87.7385, -60.0)),
("21", "15", (60.0, -70.0, 116.9765, -60.0)),
("22", "15", (80.0, -70.0, 146.2146, -60.0)),
("23", "15", (100.0, -70.0, 175.4526, -60.0)),
("24", "15", (120.0, -70.0, 180.0, -60.0)),
("25", "15", (140.0, -67.1167, 180.0, -60.0)),
("26", "15", (160.0, -63.6167, 180.0, -60.0)),
("11", "16", (-180.0, -70.5333, -175.4039, -70.0)),
("12", "16", (-180.0, -73.875, -146.1659, -70.0)),
("13", "16", (-180.0, -77.1667, -116.9278, -70.0)),
("14", "16", (-180.0, -80.0, -87.6898, -70.0)),
("15", "16", (-172.7631, -80.0, -58.4517, -70.0)),
("16", "16", (-115.1754, -80.0, -29.2137, -70.0)),
("17", "16", (-57.5877, -80.0, 0.048, -70.0)),
("18", "16", (0.0, -80.0, 57.6357, -70.0)),
("19", "16", (29.238, -80.0, 115.2234, -70.0)),
("20", "16", (58.4761, -80.0, 172.8111, -70.0)),
("21", "16", (87.7141, -80.0, 180.0, -70.0)),
("22", "16", (116.9522, -77.1583, 180.0, -70.0)),
("23", "16", (146.1902, -73.875, 180.0, -70.0)),
("24", "16", (175.4283, -70.5333, 180.0, -70.0)),
("14", "17", (-180.0, -80.4083, -172.7151, -80.0)),
("15", "17", (-180.0, -83.625, -115.1274, -80.0)),
("16", "17", (-180.0, -86.8167, -57.5397, -80.0)),
("17", "17", (-180.0, -90.0, 57.2957, -80.0)),
("18", "17", (-0.004, -90.0, 180.0, -80.0)),
("19", "17", (57.5877, -86.8167, 180.0, -80.0)),
("20", "17", (115.1754, -83.625, 180.0, -80.0)),
("21", "17", (172.7631, -80.4083, 180.0, -80.0)),
]
def tile_bbox(
horizontal_grid: str, vertical_grid: str
) -> Tuple[float, float, float, float]:
"""Get WGS84 boundbox for any modland grid index."""
grid = list(
filter(
lambda x: x[0] == horizontal_grid and x[1] == vertical_grid, MODLAND_GRID
)
)
if not grid:
raise InvalidModlandGridID(
f"Could not find bounds for grid h{horizontal_grid}v{vertical_grid}"
)
return grid[0][2] | 0.74158 | 0.300354 |
import torch
import torch.nn as nn
from typing import Tuple
from openspeech.encoders.openspeech_encoder import OpenspeechEncoder
from openspeech.modules import Conv2dSubsampling, Linear, ConformerBlock, Transpose
class ConformerEncoder(OpenspeechEncoder):
r"""
Transformer models are good at capturing content-based global interactions, while CNNs exploit local features
effectively. Conformer achieves the best of both worlds by studying how to combine convolution neural
networks and transformers to model both local and global dependencies of an audio sequence
in a parameter-efficient way.
Args:
num_classes (int): Number of classification
input_dim (int, optional): Dimension of input vector
encoder_dim (int, optional): Dimension of conformer encoders
num_layers (int, optional): Number of conformer blocks
num_attention_heads (int, optional): Number of attention heads
feed_forward_expansion_factor (int, optional): Expansion factor of feed forward module
conv_expansion_factor (int, optional): Expansion factor of conformer convolution module
feed_forward_dropout_p (float, optional): Probability of feed forward module dropout
attention_dropout_p (float, optional): Probability of attention module dropout
conv_dropout_p (float, optional): Probability of conformer convolution module dropout
conv_kernel_size (int or tuple, optional): Size of the convolving kernel
half_step_residual (bool): Flag indication whether to use half step residual or not
joint_ctc_attention (bool, optional): flag indication joint ctc attention or not
Inputs: inputs, input_lengths
- **inputs** (batch, time, dim): Tensor containing input vector
- **input_lengths** (batch): list of sequence input lengths
Returns: outputs, output_lengths
- **outputs** (batch, out_channels, time): Tensor produces by conformer encoders.
- **output_lengths** (batch): list of sequence output lengths
Reference:
<NAME> al: Conformer: Convolution-augmented Transformer for Speech Recognition
https://arxiv.org/abs/2005.08100
"""
def __init__(
self,
num_classes: int,
input_dim: int = 80,
encoder_dim: int = 512,
num_layers: int = 17,
num_attention_heads: int = 8,
feed_forward_expansion_factor: int = 4,
conv_expansion_factor: int = 2,
input_dropout_p: float = 0.1,
feed_forward_dropout_p: float = 0.1,
attention_dropout_p: float = 0.1,
conv_dropout_p: float = 0.1,
conv_kernel_size: int = 31,
half_step_residual: bool = True,
joint_ctc_attention: bool = True,
) -> None:
super(ConformerEncoder, self).__init__()
self.joint_ctc_attention = joint_ctc_attention
self.conv_subsample = Conv2dSubsampling(input_dim, in_channels=1, out_channels=encoder_dim)
self.input_projection = nn.Sequential(
Linear(self.conv_subsample.get_output_dim(), encoder_dim),
nn.Dropout(p=input_dropout_p),
)
self.layers = nn.ModuleList([
ConformerBlock(
encoder_dim=encoder_dim,
num_attention_heads=num_attention_heads,
feed_forward_expansion_factor=feed_forward_expansion_factor,
conv_expansion_factor=conv_expansion_factor,
feed_forward_dropout_p=feed_forward_dropout_p,
attention_dropout_p=attention_dropout_p,
conv_dropout_p=conv_dropout_p,
conv_kernel_size=conv_kernel_size,
half_step_residual=half_step_residual,
) for _ in range(num_layers)
])
if self.joint_ctc_attention:
self.fc = nn.Sequential(
Transpose(shape=(1, 2)),
nn.Dropout(feed_forward_dropout_p),
Linear(encoder_dim, num_classes, bias=False),
)
def forward(
self,
inputs: torch.Tensor,
input_lengths: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
r"""
Forward propagate a `inputs` for encoders training.
Args:
inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
Returns:
(Tensor, Tensor, Tensor)
* outputs: A output sequence of encoders. `FloatTensor` of size ``(batch, seq_length, dimension)``
* encoder_logits: Log probability of encoders outputs will be passed to CTC Loss.
If joint_ctc_attention is False, return None.
* output_lengths: The length of encoders outputs. ``(batch)``
"""
encoder_logits = None
outputs, output_lengths = self.conv_subsample(inputs, input_lengths)
outputs = self.input_projection(outputs)
for layer in self.layers:
outputs = layer(outputs)
if self.joint_ctc_attention:
encoder_logits = self.fc(outputs.transpose(1, 2)).log_softmax(dim=2)
return outputs, encoder_logits, output_lengths | openspeech/encoders/conformer_encoder.py |
import torch
import torch.nn as nn
from typing import Tuple
from openspeech.encoders.openspeech_encoder import OpenspeechEncoder
from openspeech.modules import Conv2dSubsampling, Linear, ConformerBlock, Transpose
class ConformerEncoder(OpenspeechEncoder):
r"""
Transformer models are good at capturing content-based global interactions, while CNNs exploit local features
effectively. Conformer achieves the best of both worlds by studying how to combine convolution neural
networks and transformers to model both local and global dependencies of an audio sequence
in a parameter-efficient way.
Args:
num_classes (int): Number of classification
input_dim (int, optional): Dimension of input vector
encoder_dim (int, optional): Dimension of conformer encoders
num_layers (int, optional): Number of conformer blocks
num_attention_heads (int, optional): Number of attention heads
feed_forward_expansion_factor (int, optional): Expansion factor of feed forward module
conv_expansion_factor (int, optional): Expansion factor of conformer convolution module
feed_forward_dropout_p (float, optional): Probability of feed forward module dropout
attention_dropout_p (float, optional): Probability of attention module dropout
conv_dropout_p (float, optional): Probability of conformer convolution module dropout
conv_kernel_size (int or tuple, optional): Size of the convolving kernel
half_step_residual (bool): Flag indication whether to use half step residual or not
joint_ctc_attention (bool, optional): flag indication joint ctc attention or not
Inputs: inputs, input_lengths
- **inputs** (batch, time, dim): Tensor containing input vector
- **input_lengths** (batch): list of sequence input lengths
Returns: outputs, output_lengths
- **outputs** (batch, out_channels, time): Tensor produces by conformer encoders.
- **output_lengths** (batch): list of sequence output lengths
Reference:
<NAME> al: Conformer: Convolution-augmented Transformer for Speech Recognition
https://arxiv.org/abs/2005.08100
"""
def __init__(
self,
num_classes: int,
input_dim: int = 80,
encoder_dim: int = 512,
num_layers: int = 17,
num_attention_heads: int = 8,
feed_forward_expansion_factor: int = 4,
conv_expansion_factor: int = 2,
input_dropout_p: float = 0.1,
feed_forward_dropout_p: float = 0.1,
attention_dropout_p: float = 0.1,
conv_dropout_p: float = 0.1,
conv_kernel_size: int = 31,
half_step_residual: bool = True,
joint_ctc_attention: bool = True,
) -> None:
super(ConformerEncoder, self).__init__()
self.joint_ctc_attention = joint_ctc_attention
self.conv_subsample = Conv2dSubsampling(input_dim, in_channels=1, out_channels=encoder_dim)
self.input_projection = nn.Sequential(
Linear(self.conv_subsample.get_output_dim(), encoder_dim),
nn.Dropout(p=input_dropout_p),
)
self.layers = nn.ModuleList([
ConformerBlock(
encoder_dim=encoder_dim,
num_attention_heads=num_attention_heads,
feed_forward_expansion_factor=feed_forward_expansion_factor,
conv_expansion_factor=conv_expansion_factor,
feed_forward_dropout_p=feed_forward_dropout_p,
attention_dropout_p=attention_dropout_p,
conv_dropout_p=conv_dropout_p,
conv_kernel_size=conv_kernel_size,
half_step_residual=half_step_residual,
) for _ in range(num_layers)
])
if self.joint_ctc_attention:
self.fc = nn.Sequential(
Transpose(shape=(1, 2)),
nn.Dropout(feed_forward_dropout_p),
Linear(encoder_dim, num_classes, bias=False),
)
def forward(
self,
inputs: torch.Tensor,
input_lengths: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
r"""
Forward propagate a `inputs` for encoders training.
Args:
inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
Returns:
(Tensor, Tensor, Tensor)
* outputs: A output sequence of encoders. `FloatTensor` of size ``(batch, seq_length, dimension)``
* encoder_logits: Log probability of encoders outputs will be passed to CTC Loss.
If joint_ctc_attention is False, return None.
* output_lengths: The length of encoders outputs. ``(batch)``
"""
encoder_logits = None
outputs, output_lengths = self.conv_subsample(inputs, input_lengths)
outputs = self.input_projection(outputs)
for layer in self.layers:
outputs = layer(outputs)
if self.joint_ctc_attention:
encoder_logits = self.fc(outputs.transpose(1, 2)).log_softmax(dim=2)
return outputs, encoder_logits, output_lengths | 0.975946 | 0.560313 |
import numpy as np
import pytest
import qpimage
def hologram(size=64):
x = np.arange(size).reshape(-1, 1) - size / 2
y = np.arange(size).reshape(1, -1) - size / 2
amp = np.linspace(.9, 1.1, size * size).reshape(size, size)
pha = np.linspace(0, 2, size * size).reshape(size, size)
rad = x**2 + y**2 > (size / 3)**2
pha[rad] = 0
amp[rad] = 1
# frequencies must match pixel in Fourier space
kx = 2 * np.pi * -.3
ky = 2 * np.pi * -.3
image = (amp**2 + np.sin(kx * x + ky * y + pha) + 1) * 255
return image
def test_find_sideband():
size = 40
ft_data = np.zeros((size, size))
fx = np.fft.fftshift(np.fft.fftfreq(size))
ft_data[2, 3] = 1
ft_data[-3, -2] = 1
sb1 = qpimage.holo.find_sideband(ft_data=ft_data,
which=+1)
assert np.allclose(sb1, (fx[2], fx[3]))
sb2 = qpimage.holo.find_sideband(ft_data=ft_data,
which=-1)
assert np.allclose(sb2, (fx[-3], fx[-2]))
def test_find_sideband_error():
size = 40
ft_data = np.zeros((size, size))
ft_data[2, 3] = 1
ft_data[-3, -2] = 1
try:
qpimage.holo.find_sideband(ft_data=ft_data,
which=2)
except ValueError:
pass
else:
assert False, "2 is not a sideband"
def test_fourier2dpad():
data = np.zeros((100, 120))
fft1 = qpimage.holo.fourier2dpad(data, zero_pad=True)
assert fft1.shape == (256, 256)
fft2 = qpimage.holo.fourier2dpad(data, zero_pad=False)
assert fft2.shape == data.shape
def test_get_field_error_bad_filter_size():
holo = hologram()
with pytest.raises(ValueError, match="must be between 0 and 1"):
qpimage.holo.get_field(hologram=holo, filter_size=2)
def test_get_field_error_bad_filter_size_interpretation_frequency_index():
holo = hologram(size=64)
with pytest.raises(ValueError,
match=r"must be between 0 and max\(hologram.shape\)/2"):
qpimage.holo.get_field(hologram=holo,
filter_size_interpretation="frequency index",
filter_size=64)
def test_get_field_error_invalid_interpretation():
holo = hologram()
with pytest.raises(ValueError,
match="Invalid value for `filter_size_interpretation`"):
qpimage.holo.get_field(hologram=holo,
filter_size_interpretation="blequency")
def test_get_field_filter_names():
holo = hologram()
kwargs = dict(hologram=holo,
sideband=+1,
filter_size=1 / 3,
subtract_mean=True,
zero_pad=True)
r_disk = qpimage.holo.get_field(filter_name="disk", **kwargs)
assert np.allclose(
r_disk[32, 32], 97.307780444912936 - 76.397860381241372j)
r_smooth_disk = qpimage.holo.get_field(filter_name="smooth disk", **kwargs)
assert np.allclose(r_smooth_disk[32, 32],
108.36665064909741 - 67.176090709644185j)
r_gauss = qpimage.holo.get_field(filter_name="gauss", **kwargs)
assert np.allclose(
r_gauss[32, 32], 108.26984751082375 - 67.116410573093304j)
r_square = qpimage.holo.get_field(filter_name="square", **kwargs)
assert np.allclose(
r_square[32, 32], 102.3285348843612 - 74.139058665601155j)
r_smsquare = qpimage.holo.get_field(filter_name="smooth square", **kwargs)
assert np.allclose(
r_smsquare[32, 32], 105.23157221309754 - 70.593282942004862j)
r_tukey = qpimage.holo.get_field(filter_name="tukey", **kwargs)
assert np.allclose(
r_tukey[32, 32], 113.4826495540899 - 59.546232775481869j)
try:
qpimage.holo.get_field(filter_name="unknown", **kwargs)
except ValueError:
pass
else:
assert False, "unknown filter accepted"
@pytest.mark.parametrize("size", [62, 63, 64])
def test_get_field_interpretation_fourier_index(size):
"""Filter size in Fourier space using Fourier index new in 0.7.0"""
holo = hologram(size=size)
ft_data = qpimage.holo.fourier2dpad(data=holo, zero_pad=True)
fsx, fsy = qpimage.holo.find_sideband(ft_data, which=+1, copy=True)
kwargs1 = dict(hologram=holo,
sideband=+1,
filter_name="disk",
filter_size=1/3,
filter_size_interpretation="sideband distance",
subtract_mean=True,
zero_pad=True)
res1 = qpimage.holo.get_field(**kwargs1)
filter_size_fi = np.sqrt(fsx**2 + fsy**2) / 3 * ft_data.shape[0]
kwargs2 = dict(hologram=holo,
sideband=+1,
filter_name="disk",
filter_size=filter_size_fi,
filter_size_interpretation="frequency index",
subtract_mean=True,
zero_pad=True)
res2 = qpimage.holo.get_field(**kwargs2)
assert np.all(res1 == res2)
@pytest.mark.parametrize("size", [62, 63, 64])
def test_get_field_interpretation_fourier_index_control(size):
"""Filter size in Fourier space using Fourier index new in 0.7.0"""
holo = hologram(size=size)
ft_data = qpimage.holo.fourier2dpad(data=holo, zero_pad=True)
fsx, fsy = qpimage.holo.find_sideband(ft_data, which=+1, copy=True)
evil_factor = 1.1
kwargs1 = dict(hologram=holo,
sideband=+1,
filter_name="disk",
filter_size=1/3 * evil_factor,
filter_size_interpretation="sideband distance",
subtract_mean=True,
zero_pad=True)
res1 = qpimage.holo.get_field(**kwargs1)
filter_size_fi = np.sqrt(fsx**2 + fsy**2) / 3 * ft_data.shape[0]
kwargs2 = dict(hologram=holo,
sideband=+1,
filter_name="disk",
filter_size=filter_size_fi,
filter_size_interpretation="frequency index",
subtract_mean=True,
zero_pad=True)
res2 = qpimage.holo.get_field(**kwargs2)
assert not np.all(res1 == res2)
@pytest.mark.parametrize("size", [62, 63, 64, 134, 135])
@pytest.mark.parametrize("filter_size", [17, 17.01])
def test_get_field_interpretation_fourier_index_mask_1(size, filter_size):
"""Make sure filter size in Fourier space pixels is correct"""
holo = hologram(size=size)
kwargs2 = dict(hologram=holo,
sideband=+1,
filter_name="disk",
filter_size=filter_size,
filter_size_interpretation="frequency index",
subtract_mean=True,
zero_pad=True)
res, mask = qpimage.holo.get_field(ret_mask=True, **kwargs2)
# We get 17*2+1, because we measure from the center of Fourier
# space and a pixel is included if its center is withing the
# perimeter of the disk.
assert np.sum(np.sum(mask, axis=0) != 0) == 17*2 + 1
@pytest.mark.parametrize("size", [62, 63, 64, 134, 135])
def test_get_field_interpretation_fourier_index_mask_2(size):
"""Filter size in Fourier space using Fourier index new in 0.7.0"""
holo = hologram(size=size)
kwargs2 = dict(hologram=holo,
sideband=+1,
filter_name="disk",
filter_size=16.99,
filter_size_interpretation="frequency index",
subtract_mean=True,
zero_pad=True)
res, mask = qpimage.holo.get_field(ret_mask=True, **kwargs2)
# We get two points less than in the previous test, because we
# loose on on each side of the spectrum.
assert np.sum(np.sum(mask, axis=0) != 0) == 17*2 - 1
def test_get_field_int_copy():
holo = hologram()
holo = np.array(holo, dtype=int)
kwargs = dict(sideband=+1,
filter_size=1 / 3,
subtract_mean=True,
zero_pad=True)
res1 = qpimage.holo.get_field(hologram=holo,
copy=False,
**kwargs)
res2 = qpimage.holo.get_field(hologram=holo,
copy=True,
**kwargs)
res3 = qpimage.holo.get_field(hologram=holo.astype(float),
copy=True,
**kwargs)
assert np.all(res1 == res2)
assert np.all(res1 == res3)
def test_get_field_sideband():
holo = hologram()
ft_data = qpimage.holo.fourier2dpad(data=holo, zero_pad=True)
sideband = qpimage.holo.find_sideband(ft_data, which=+1, copy=True)
kwargs = dict(hologram=holo,
filter_name="disk",
filter_size=1 / 3,
subtract_mean=True,
zero_pad=True)
res1 = qpimage.holo.get_field(sideband=+1, **kwargs)
res2 = qpimage.holo.get_field(sideband=sideband, **kwargs)
assert np.all(res1 == res2)
def test_qpimage_holo():
# create fake hologram
size = 200
x = np.arange(size).reshape(-1, 1)
y = np.arange(size).reshape(1, -1)
kx = -.6
ky = -.4
disk_max = 1.5
# there is a phase disk as data in the hologram
data = disk_max * ((x - size / 2)**2 + (y - size / 2)**2 < 30**2)
image = np.sin(kx * x + ky * y + data)
qpi = qpimage.QPImage(image,
which_data="hologram",
holo_kw={"filter_name": "gauss"})
qpi.compute_bg(which_data="phase",
fit_offset="fit",
fit_profile="tilt",
border_px=5)
assert np.allclose(disk_max, qpi.pha.max(), rtol=.01, atol=0)
if __name__ == "__main__":
# Run all tests
_loc = locals()
for _key in list(_loc.keys()):
if _key.startswith("test_") and hasattr(_loc[_key], "__call__"):
_loc[_key]() | tests/test_holo.py | import numpy as np
import pytest
import qpimage
def hologram(size=64):
x = np.arange(size).reshape(-1, 1) - size / 2
y = np.arange(size).reshape(1, -1) - size / 2
amp = np.linspace(.9, 1.1, size * size).reshape(size, size)
pha = np.linspace(0, 2, size * size).reshape(size, size)
rad = x**2 + y**2 > (size / 3)**2
pha[rad] = 0
amp[rad] = 1
# frequencies must match pixel in Fourier space
kx = 2 * np.pi * -.3
ky = 2 * np.pi * -.3
image = (amp**2 + np.sin(kx * x + ky * y + pha) + 1) * 255
return image
def test_find_sideband():
size = 40
ft_data = np.zeros((size, size))
fx = np.fft.fftshift(np.fft.fftfreq(size))
ft_data[2, 3] = 1
ft_data[-3, -2] = 1
sb1 = qpimage.holo.find_sideband(ft_data=ft_data,
which=+1)
assert np.allclose(sb1, (fx[2], fx[3]))
sb2 = qpimage.holo.find_sideband(ft_data=ft_data,
which=-1)
assert np.allclose(sb2, (fx[-3], fx[-2]))
def test_find_sideband_error():
size = 40
ft_data = np.zeros((size, size))
ft_data[2, 3] = 1
ft_data[-3, -2] = 1
try:
qpimage.holo.find_sideband(ft_data=ft_data,
which=2)
except ValueError:
pass
else:
assert False, "2 is not a sideband"
def test_fourier2dpad():
data = np.zeros((100, 120))
fft1 = qpimage.holo.fourier2dpad(data, zero_pad=True)
assert fft1.shape == (256, 256)
fft2 = qpimage.holo.fourier2dpad(data, zero_pad=False)
assert fft2.shape == data.shape
def test_get_field_error_bad_filter_size():
holo = hologram()
with pytest.raises(ValueError, match="must be between 0 and 1"):
qpimage.holo.get_field(hologram=holo, filter_size=2)
def test_get_field_error_bad_filter_size_interpretation_frequency_index():
holo = hologram(size=64)
with pytest.raises(ValueError,
match=r"must be between 0 and max\(hologram.shape\)/2"):
qpimage.holo.get_field(hologram=holo,
filter_size_interpretation="frequency index",
filter_size=64)
def test_get_field_error_invalid_interpretation():
holo = hologram()
with pytest.raises(ValueError,
match="Invalid value for `filter_size_interpretation`"):
qpimage.holo.get_field(hologram=holo,
filter_size_interpretation="blequency")
def test_get_field_filter_names():
holo = hologram()
kwargs = dict(hologram=holo,
sideband=+1,
filter_size=1 / 3,
subtract_mean=True,
zero_pad=True)
r_disk = qpimage.holo.get_field(filter_name="disk", **kwargs)
assert np.allclose(
r_disk[32, 32], 97.307780444912936 - 76.397860381241372j)
r_smooth_disk = qpimage.holo.get_field(filter_name="smooth disk", **kwargs)
assert np.allclose(r_smooth_disk[32, 32],
108.36665064909741 - 67.176090709644185j)
r_gauss = qpimage.holo.get_field(filter_name="gauss", **kwargs)
assert np.allclose(
r_gauss[32, 32], 108.26984751082375 - 67.116410573093304j)
r_square = qpimage.holo.get_field(filter_name="square", **kwargs)
assert np.allclose(
r_square[32, 32], 102.3285348843612 - 74.139058665601155j)
r_smsquare = qpimage.holo.get_field(filter_name="smooth square", **kwargs)
assert np.allclose(
r_smsquare[32, 32], 105.23157221309754 - 70.593282942004862j)
r_tukey = qpimage.holo.get_field(filter_name="tukey", **kwargs)
assert np.allclose(
r_tukey[32, 32], 113.4826495540899 - 59.546232775481869j)
try:
qpimage.holo.get_field(filter_name="unknown", **kwargs)
except ValueError:
pass
else:
assert False, "unknown filter accepted"
@pytest.mark.parametrize("size", [62, 63, 64])
def test_get_field_interpretation_fourier_index(size):
"""Filter size in Fourier space using Fourier index new in 0.7.0"""
holo = hologram(size=size)
ft_data = qpimage.holo.fourier2dpad(data=holo, zero_pad=True)
fsx, fsy = qpimage.holo.find_sideband(ft_data, which=+1, copy=True)
kwargs1 = dict(hologram=holo,
sideband=+1,
filter_name="disk",
filter_size=1/3,
filter_size_interpretation="sideband distance",
subtract_mean=True,
zero_pad=True)
res1 = qpimage.holo.get_field(**kwargs1)
filter_size_fi = np.sqrt(fsx**2 + fsy**2) / 3 * ft_data.shape[0]
kwargs2 = dict(hologram=holo,
sideband=+1,
filter_name="disk",
filter_size=filter_size_fi,
filter_size_interpretation="frequency index",
subtract_mean=True,
zero_pad=True)
res2 = qpimage.holo.get_field(**kwargs2)
assert np.all(res1 == res2)
@pytest.mark.parametrize("size", [62, 63, 64])
def test_get_field_interpretation_fourier_index_control(size):
"""Filter size in Fourier space using Fourier index new in 0.7.0"""
holo = hologram(size=size)
ft_data = qpimage.holo.fourier2dpad(data=holo, zero_pad=True)
fsx, fsy = qpimage.holo.find_sideband(ft_data, which=+1, copy=True)
evil_factor = 1.1
kwargs1 = dict(hologram=holo,
sideband=+1,
filter_name="disk",
filter_size=1/3 * evil_factor,
filter_size_interpretation="sideband distance",
subtract_mean=True,
zero_pad=True)
res1 = qpimage.holo.get_field(**kwargs1)
filter_size_fi = np.sqrt(fsx**2 + fsy**2) / 3 * ft_data.shape[0]
kwargs2 = dict(hologram=holo,
sideband=+1,
filter_name="disk",
filter_size=filter_size_fi,
filter_size_interpretation="frequency index",
subtract_mean=True,
zero_pad=True)
res2 = qpimage.holo.get_field(**kwargs2)
assert not np.all(res1 == res2)
@pytest.mark.parametrize("size", [62, 63, 64, 134, 135])
@pytest.mark.parametrize("filter_size", [17, 17.01])
def test_get_field_interpretation_fourier_index_mask_1(size, filter_size):
"""Make sure filter size in Fourier space pixels is correct"""
holo = hologram(size=size)
kwargs2 = dict(hologram=holo,
sideband=+1,
filter_name="disk",
filter_size=filter_size,
filter_size_interpretation="frequency index",
subtract_mean=True,
zero_pad=True)
res, mask = qpimage.holo.get_field(ret_mask=True, **kwargs2)
# We get 17*2+1, because we measure from the center of Fourier
# space and a pixel is included if its center is withing the
# perimeter of the disk.
assert np.sum(np.sum(mask, axis=0) != 0) == 17*2 + 1
@pytest.mark.parametrize("size", [62, 63, 64, 134, 135])
def test_get_field_interpretation_fourier_index_mask_2(size):
"""Filter size in Fourier space using Fourier index new in 0.7.0"""
holo = hologram(size=size)
kwargs2 = dict(hologram=holo,
sideband=+1,
filter_name="disk",
filter_size=16.99,
filter_size_interpretation="frequency index",
subtract_mean=True,
zero_pad=True)
res, mask = qpimage.holo.get_field(ret_mask=True, **kwargs2)
# We get two points less than in the previous test, because we
# loose on on each side of the spectrum.
assert np.sum(np.sum(mask, axis=0) != 0) == 17*2 - 1
def test_get_field_int_copy():
holo = hologram()
holo = np.array(holo, dtype=int)
kwargs = dict(sideband=+1,
filter_size=1 / 3,
subtract_mean=True,
zero_pad=True)
res1 = qpimage.holo.get_field(hologram=holo,
copy=False,
**kwargs)
res2 = qpimage.holo.get_field(hologram=holo,
copy=True,
**kwargs)
res3 = qpimage.holo.get_field(hologram=holo.astype(float),
copy=True,
**kwargs)
assert np.all(res1 == res2)
assert np.all(res1 == res3)
def test_get_field_sideband():
holo = hologram()
ft_data = qpimage.holo.fourier2dpad(data=holo, zero_pad=True)
sideband = qpimage.holo.find_sideband(ft_data, which=+1, copy=True)
kwargs = dict(hologram=holo,
filter_name="disk",
filter_size=1 / 3,
subtract_mean=True,
zero_pad=True)
res1 = qpimage.holo.get_field(sideband=+1, **kwargs)
res2 = qpimage.holo.get_field(sideband=sideband, **kwargs)
assert np.all(res1 == res2)
def test_qpimage_holo():
# create fake hologram
size = 200
x = np.arange(size).reshape(-1, 1)
y = np.arange(size).reshape(1, -1)
kx = -.6
ky = -.4
disk_max = 1.5
# there is a phase disk as data in the hologram
data = disk_max * ((x - size / 2)**2 + (y - size / 2)**2 < 30**2)
image = np.sin(kx * x + ky * y + data)
qpi = qpimage.QPImage(image,
which_data="hologram",
holo_kw={"filter_name": "gauss"})
qpi.compute_bg(which_data="phase",
fit_offset="fit",
fit_profile="tilt",
border_px=5)
assert np.allclose(disk_max, qpi.pha.max(), rtol=.01, atol=0)
if __name__ == "__main__":
# Run all tests
_loc = locals()
for _key in list(_loc.keys()):
if _key.startswith("test_") and hasattr(_loc[_key], "__call__"):
_loc[_key]() | 0.596903 | 0.758488 |
from __future__ import absolute_import
import torch
import argparse
import sys, os
from analysis.PytorchA import analyse
from analysis.utils import save_csv
from torch.autograd import Variable
import torch.nn as nn
"""
Supporting analyse the inheritors of torch.nn.Moudule class.
Command:`pytorch_analyser.py [-h] [--out OUT] [--class_args ARGS] path class_name shape`
- The path is the python file path which contaning your class.
- The class_name is the class name in your python file.
- The shape is the input shape of the network(split by comma `,`), in pytorch image shape should be: batch_size, channel, image_height, image_width.
- The out (optinal) is path to save the csv file, default is '/tmp/pytorch_analyse.csv'.
- The class_args (optional) is the args to init the class in python file, default is empty.
For example `python pytorch_analyser.py tmp/pytorch_analysis_test.py ResNet218 1,3,224,224`
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('path', help='python file location, recommend absolute path', type=str)
parser.add_argument('name', help='the class name or instance name in your python file', type=str)
parser.add_argument('shape',
help='input shape of the network(split by comma `,`), image shape should be: batch,c,h,w',
type=str)
parser.add_argument('--out', help='path to save the csv file', default='/tmp/pytorch_analyse.csv', type=str)
parser.add_argument('--class_args', help='args to init the class in python file', default='', type=str)
args = parser.parse_args()
path, filename = os.path.split(args.path)
filename = os.path.splitext(filename)[0]
sys.path.insert(0, path)
print(path)
print('from %s import %s as Net' % (filename, args.name))
exec('from %s import %s as Net' % (filename, args.name))
if isinstance(Net, nn.Module):
net = Net
elif issubclass(Net, nn.Module):
net = Net(*args.class_args.split())
else:
assert ("Error, The Net is not a instance of nn.Module or subclass of nn.Module")
shape = [int(i) for i in args.shape.split(',')]
x = Variable(torch.rand(shape))
blob_dict, layers = analyse(net, x)
save_csv(layers, args.out) | pytorch_analyser.py | from __future__ import absolute_import
import torch
import argparse
import sys, os
from analysis.PytorchA import analyse
from analysis.utils import save_csv
from torch.autograd import Variable
import torch.nn as nn
"""
Supporting analyse the inheritors of torch.nn.Moudule class.
Command:`pytorch_analyser.py [-h] [--out OUT] [--class_args ARGS] path class_name shape`
- The path is the python file path which contaning your class.
- The class_name is the class name in your python file.
- The shape is the input shape of the network(split by comma `,`), in pytorch image shape should be: batch_size, channel, image_height, image_width.
- The out (optinal) is path to save the csv file, default is '/tmp/pytorch_analyse.csv'.
- The class_args (optional) is the args to init the class in python file, default is empty.
For example `python pytorch_analyser.py tmp/pytorch_analysis_test.py ResNet218 1,3,224,224`
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('path', help='python file location, recommend absolute path', type=str)
parser.add_argument('name', help='the class name or instance name in your python file', type=str)
parser.add_argument('shape',
help='input shape of the network(split by comma `,`), image shape should be: batch,c,h,w',
type=str)
parser.add_argument('--out', help='path to save the csv file', default='/tmp/pytorch_analyse.csv', type=str)
parser.add_argument('--class_args', help='args to init the class in python file', default='', type=str)
args = parser.parse_args()
path, filename = os.path.split(args.path)
filename = os.path.splitext(filename)[0]
sys.path.insert(0, path)
print(path)
print('from %s import %s as Net' % (filename, args.name))
exec('from %s import %s as Net' % (filename, args.name))
if isinstance(Net, nn.Module):
net = Net
elif issubclass(Net, nn.Module):
net = Net(*args.class_args.split())
else:
assert ("Error, The Net is not a instance of nn.Module or subclass of nn.Module")
shape = [int(i) for i in args.shape.split(',')]
x = Variable(torch.rand(shape))
blob_dict, layers = analyse(net, x)
save_csv(layers, args.out) | 0.638272 | 0.258133 |
import numpy as np
import math
import random
from batchgenerators.transforms import AbstractTransform
import sys
sys.path.append(".")
from kits19cnn.io.custom_augmentations import foreground_crop, center_crop, \
random_resized_crop
class RandomResizedCropTransform(AbstractTransform):
"""
Crop the given array to random size and aspect ratio.
Doesn't resize across the depth dimenion (assumes it is dim=0) if
the data is 3D.
A crop of random size (default: of 0.08 to 1.0) of the original size and a
random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio
is made. This crop is finally resized to given size.
This is popularly used to train the Inception networks.
Assumes the data and segmentation masks are the same size.
"""
def __init__(self, target_size, scale=(0.08, 1.0),
ratio=(3. / 4., 4. / 3.),
data_key="data", label_key="seg", p_per_sample=0.33,
crop_kwargs={}, resize_kwargs={}):
"""
Attributes:
pass
"""
if len(target_size) > 2:
print("Currently only adjusts the aspect ratio for the 2D dims.")
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
self.target_size = target_size
self.scale = scale
self.ratio = ratio
self.data_key = data_key
self.label_key = label_key
self.p_per_sample = p_per_sample
self.crop_kwargs = crop_kwargs
self.resize_kwargs = resize_kwargs
def _get_image_size(self, data):
"""
Assumes data has shape (b, c, h, w (, d)). Fetches the h, w, and d.
depth if applicable.
"""
return data.shape[2:]
def get_crop_size(self, data, scale, ratio):
"""
Get parameters for ``crop`` for a random sized crop.
"""
shape_dims = self._get_image_size(data)
area = np.prod(shape_dims)
while True:
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if len(shape_dims) == 3:
depth = shape_dims[0]
crop_size = np.array([depth, h, w])
else:
crop_size = np.array([h, w])
if (crop_size <= shape_dims).all() and (crop_size > 0).all():
return crop_size
def __call__(self, **data_dict):
"""
Actually doing the cropping.
"""
data = data_dict.get(self.data_key)
seg = data_dict.get(self.label_key)
if np.random.uniform() < self.p_per_sample:
crop_size = self.get_crop_size(data, self.scale, self.ratio)
data, seg = random_resized_crop(data, seg,
target_size=self.target_size,
crop_size=crop_size,
crop_kwargs=self.crop_kwargs,
resize_kwargs=self.resize_kwargs)
else:
data, seg = center_crop(data, self.target_size, seg,
crop_kwargs=self.crop_kwargs)
data_dict[self.data_key] = data
if seg is not None:
data_dict[self.label_key] = seg.astype(np.float32)
return data_dict
class ROICropTransform(AbstractTransform):
"""
Crops the foreground in images `p_per_sample` part of the time. The
fallback cropping is center cropping.
"""
def __init__(self, crop_size=128, margins=(0, 0, 0), data_key="data",
label_key="seg", coords_key="bbox_coords",
p_per_sample=0.33, crop_kwargs={}):
self.data_key = data_key
self.label_key = label_key
self.coords_key = coords_key
self.margins = margins
self.crop_size = crop_size
self.p_per_sample = p_per_sample
self.crop_kwargs = crop_kwargs
def __call__(self, **data_dict):
"""
Actually doing the cropping. Make sure that data_dict has the
a key for the coords (self.coords_key) if p>0.
(If the output of data_dict.get(self.coords_key) is None, then foreground
crops are done on-the-fly).
"""
data = data_dict.get(self.data_key)
seg = data_dict.get(self.label_key)
if np.random.uniform() < self.p_per_sample:
coords = data_dict.get(self.coords_key)
data, seg = foreground_crop(data, seg, patch_size=self.crop_size,
margins=self.margins,
bbox_coords=coords,
crop_kwargs=self.crop_kwargs)
else:
data, seg = center_crop(data, self.crop_size, seg,
crop_kwargs=self.crop_kwargs)
data_dict[self.data_key] = data
if seg is not None:
data_dict[self.label_key] = seg
return data_dict
class MultiClassToBinaryTransform(AbstractTransform):
"""
For changing a multi-class case to a binary one. Specify the label to
change to binary with `roi_label`.
- Don't forget to adjust `remove_label` accordingly!
- label will be turned to a binary label with only `roi_label`
existing as 1s
"""
def __init__(self, roi_label="2", remove_label="1", label_key="seg"):
self.roi_label = int(roi_label)
self.remove_label = int(remove_label)
self.label_key = label_key
def __call__(self, **data_dict):
"""
Replaces the label values
"""
label = data_dict.get(self.label_key)
# changing labels
label[label == self.remove_label] = 0
label[label == self.roi_label] = 1
data_dict[self.label_key] = label
return data_dict
class RepeatChannelsTransform(AbstractTransform):
"""
Repeats across the channels dimension `num_tiles` number of times.
"""
def __init__(self, num_repeats=3, data_key="data"):
self.num_repeats = num_repeats
self.data_key = data_key
def __call__(self, **data_dict):
"""
Repeats across the channels dimension (axis=1).
"""
data = data_dict.get(self.data_key)
data_dict[self.data_key] = np.repeat(data, self.num_repeats, axis=1)
return data_dict | kits19cnn/io/custom_transforms.py | import numpy as np
import math
import random
from batchgenerators.transforms import AbstractTransform
import sys
sys.path.append(".")
from kits19cnn.io.custom_augmentations import foreground_crop, center_crop, \
random_resized_crop
class RandomResizedCropTransform(AbstractTransform):
"""
Crop the given array to random size and aspect ratio.
Doesn't resize across the depth dimenion (assumes it is dim=0) if
the data is 3D.
A crop of random size (default: of 0.08 to 1.0) of the original size and a
random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio
is made. This crop is finally resized to given size.
This is popularly used to train the Inception networks.
Assumes the data and segmentation masks are the same size.
"""
def __init__(self, target_size, scale=(0.08, 1.0),
ratio=(3. / 4., 4. / 3.),
data_key="data", label_key="seg", p_per_sample=0.33,
crop_kwargs={}, resize_kwargs={}):
"""
Attributes:
pass
"""
if len(target_size) > 2:
print("Currently only adjusts the aspect ratio for the 2D dims.")
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
self.target_size = target_size
self.scale = scale
self.ratio = ratio
self.data_key = data_key
self.label_key = label_key
self.p_per_sample = p_per_sample
self.crop_kwargs = crop_kwargs
self.resize_kwargs = resize_kwargs
def _get_image_size(self, data):
"""
Assumes data has shape (b, c, h, w (, d)). Fetches the h, w, and d.
depth if applicable.
"""
return data.shape[2:]
def get_crop_size(self, data, scale, ratio):
"""
Get parameters for ``crop`` for a random sized crop.
"""
shape_dims = self._get_image_size(data)
area = np.prod(shape_dims)
while True:
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if len(shape_dims) == 3:
depth = shape_dims[0]
crop_size = np.array([depth, h, w])
else:
crop_size = np.array([h, w])
if (crop_size <= shape_dims).all() and (crop_size > 0).all():
return crop_size
def __call__(self, **data_dict):
"""
Actually doing the cropping.
"""
data = data_dict.get(self.data_key)
seg = data_dict.get(self.label_key)
if np.random.uniform() < self.p_per_sample:
crop_size = self.get_crop_size(data, self.scale, self.ratio)
data, seg = random_resized_crop(data, seg,
target_size=self.target_size,
crop_size=crop_size,
crop_kwargs=self.crop_kwargs,
resize_kwargs=self.resize_kwargs)
else:
data, seg = center_crop(data, self.target_size, seg,
crop_kwargs=self.crop_kwargs)
data_dict[self.data_key] = data
if seg is not None:
data_dict[self.label_key] = seg.astype(np.float32)
return data_dict
class ROICropTransform(AbstractTransform):
"""
Crops the foreground in images `p_per_sample` part of the time. The
fallback cropping is center cropping.
"""
def __init__(self, crop_size=128, margins=(0, 0, 0), data_key="data",
label_key="seg", coords_key="bbox_coords",
p_per_sample=0.33, crop_kwargs={}):
self.data_key = data_key
self.label_key = label_key
self.coords_key = coords_key
self.margins = margins
self.crop_size = crop_size
self.p_per_sample = p_per_sample
self.crop_kwargs = crop_kwargs
def __call__(self, **data_dict):
"""
Actually doing the cropping. Make sure that data_dict has the
a key for the coords (self.coords_key) if p>0.
(If the output of data_dict.get(self.coords_key) is None, then foreground
crops are done on-the-fly).
"""
data = data_dict.get(self.data_key)
seg = data_dict.get(self.label_key)
if np.random.uniform() < self.p_per_sample:
coords = data_dict.get(self.coords_key)
data, seg = foreground_crop(data, seg, patch_size=self.crop_size,
margins=self.margins,
bbox_coords=coords,
crop_kwargs=self.crop_kwargs)
else:
data, seg = center_crop(data, self.crop_size, seg,
crop_kwargs=self.crop_kwargs)
data_dict[self.data_key] = data
if seg is not None:
data_dict[self.label_key] = seg
return data_dict
class MultiClassToBinaryTransform(AbstractTransform):
"""
For changing a multi-class case to a binary one. Specify the label to
change to binary with `roi_label`.
- Don't forget to adjust `remove_label` accordingly!
- label will be turned to a binary label with only `roi_label`
existing as 1s
"""
def __init__(self, roi_label="2", remove_label="1", label_key="seg"):
self.roi_label = int(roi_label)
self.remove_label = int(remove_label)
self.label_key = label_key
def __call__(self, **data_dict):
"""
Replaces the label values
"""
label = data_dict.get(self.label_key)
# changing labels
label[label == self.remove_label] = 0
label[label == self.roi_label] = 1
data_dict[self.label_key] = label
return data_dict
class RepeatChannelsTransform(AbstractTransform):
"""
Repeats across the channels dimension `num_tiles` number of times.
"""
def __init__(self, num_repeats=3, data_key="data"):
self.num_repeats = num_repeats
self.data_key = data_key
def __call__(self, **data_dict):
"""
Repeats across the channels dimension (axis=1).
"""
data = data_dict.get(self.data_key)
data_dict[self.data_key] = np.repeat(data, self.num_repeats, axis=1)
return data_dict | 0.624408 | 0.50293 |
import json
import re
from urllib.parse import urlparse
import scrapy
from scrapy.selector import Selector
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class GoldsGymSpider(scrapy.Spider):
name = "goldsgym"
item_attributes = { 'brand': "Gold's Gym" }
allowed_domains = ["goldsgym.com"]
start_urls = [
"https://www.goldsgym.com/gym_index-sitemap.xml",
]
def parse_hours(self, hours):
opening_hours = OpeningHours()
for group in hours:
days, open_time, close_time = re.search(r'([a-zA-Z,]+)\s([\d:]+)-([\d:]+)', group).groups()
days = days.split(',')
for day in days:
opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')
return opening_hours.as_opening_hours()
def parse_hotel(self, response):
if 'locate-a-gym' in response.url or '/markets/' in response.url:
return # closed gym, redirects
data = response.xpath('//script[@type="application/ld+json"]/text()').extract_first()
if data:
data = json.loads(data)
else:
return # closed gym
properties = {
'ref': "_".join([x for x in response.url.split('/')[-2:] if x]),
'name': data["name"],
'addr_full': data["address"]["streetAddress"].strip(),
'city': data["address"]["addressLocality"].strip(),
'state': data["address"]["addressRegion"],
'postcode': data["address"]["postalCode"],
'country': data["address"]["addressCountry"],
'phone': data.get("telephone", None),
'lat': float(data["geo"]["latitude"]),
'lon': float(data["geo"]["longitude"]),
'website': response.url,
}
hours = self.parse_hours(data["openingHours"])
if hours:
properties["opening_hours"] = hours
yield GeojsonPointItem(**properties)
def parse(self, response):
xml = Selector(response)
xml.remove_namespaces()
urls = xml.xpath('//loc/text()').extract()
for url in urls:
path = "/".join(urlparse(url).path.split('/')[:-1])
yield scrapy.Request(response.urljoin(path), callback=self.parse_hotel) | locations/spiders/goldsgym.py | import json
import re
from urllib.parse import urlparse
import scrapy
from scrapy.selector import Selector
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class GoldsGymSpider(scrapy.Spider):
name = "goldsgym"
item_attributes = { 'brand': "Gold's Gym" }
allowed_domains = ["goldsgym.com"]
start_urls = [
"https://www.goldsgym.com/gym_index-sitemap.xml",
]
def parse_hours(self, hours):
opening_hours = OpeningHours()
for group in hours:
days, open_time, close_time = re.search(r'([a-zA-Z,]+)\s([\d:]+)-([\d:]+)', group).groups()
days = days.split(',')
for day in days:
opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')
return opening_hours.as_opening_hours()
def parse_hotel(self, response):
if 'locate-a-gym' in response.url or '/markets/' in response.url:
return # closed gym, redirects
data = response.xpath('//script[@type="application/ld+json"]/text()').extract_first()
if data:
data = json.loads(data)
else:
return # closed gym
properties = {
'ref': "_".join([x for x in response.url.split('/')[-2:] if x]),
'name': data["name"],
'addr_full': data["address"]["streetAddress"].strip(),
'city': data["address"]["addressLocality"].strip(),
'state': data["address"]["addressRegion"],
'postcode': data["address"]["postalCode"],
'country': data["address"]["addressCountry"],
'phone': data.get("telephone", None),
'lat': float(data["geo"]["latitude"]),
'lon': float(data["geo"]["longitude"]),
'website': response.url,
}
hours = self.parse_hours(data["openingHours"])
if hours:
properties["opening_hours"] = hours
yield GeojsonPointItem(**properties)
def parse(self, response):
xml = Selector(response)
xml.remove_namespaces()
urls = xml.xpath('//loc/text()').extract()
for url in urls:
path = "/".join(urlparse(url).path.split('/')[:-1])
yield scrapy.Request(response.urljoin(path), callback=self.parse_hotel) | 0.414188 | 0.24935 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import re
import logging
from flexget import plugin
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils import requests
from flexget.utils.soup import get_soup
log = logging.getLogger('serienjunkies')
regex_single_ep = re.compile(r'(S\d+E\d\d+)(?!-E)', re.I)
regex_multi_ep = re.compile(r'(?P<season>S\d\d)E(?P<startep>\d\d+)-E?(?P<stopep>\d\d+)', re.I)
regex_season = re.compile(r'(?<=\.|\-)S\d\d(?:[-\.]S\d\d)*(?!E\d\d+)', re.I)
regex_language_container = re.compile(r'Sprache')
regex_is_german = re.compile(r'german|deutsch', re.I)
regex_is_foreign = re.compile(
r'englisc?h|französisch|japanisch|dänisch|norwegisch|niederländisch|ungarisch|italienisch|portugiesisch', re.I)
regex_is_subtitle = re.compile(r'Untertitel|Subs?|UT', re.I)
LANGUAGE = ['german', 'foreign', 'subtitle', 'dual']
HOSTER = ['ul', 'cz', 'so', 'all']
DEFAULT_LANGUAGE = 'dual'
DEFAULT_HOSTER = 'ul'
class UrlRewriteSerienjunkies(object):
"""
Serienjunkies urlrewriter
Version 1.0.2
Language setting works like a whitelist, the selected is needed,
but others are still possible.
Configuration
language: [german|foreign|subtitle|dual] default "foreign"
hoster: [ul|cz|so|all] default "ul"
"""
schema = {
'type': 'object',
'properties': {
'language': {'type': 'string', 'enum': LANGUAGE},
'hoster': {'type': 'string', 'enum': HOSTER}
},
'additionalProperties': False
}
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
if url.startswith('http://www.serienjunkies.org/') or url.startswith('http://serienjunkies.org/'):
return True
return False
# urlrewriter API
def url_rewrite(self, task, entry):
series_url = entry['url']
search_title = re.sub('\[.*\] ', '', entry['title'])
self.config = task.config.get('serienjunkies') or {}
self.config.setdefault('hoster', DEFAULT_HOSTER)
self.config.setdefault('language', DEFAULT_LANGUAGE)
download_urls = self.parse_downloads(series_url, search_title)
if not download_urls:
entry.reject('No Episode found')
else:
entry['url'] = download_urls[-1]
entry['description'] = ", ".join(download_urls)
# Debug Information
log.debug('TV Show URL: %s' % series_url)
log.debug('Episode: %s' % search_title)
log.debug('Download URL: %s' % download_urls)
@plugin.internet(log)
def parse_downloads(self, series_url, search_title):
page = requests.get(series_url).content
try:
soup = get_soup(page)
except Exception as e:
raise UrlRewritingError(e)
urls = []
# find all titles
episode_titles = self.find_all_titles(search_title)
if not episode_titles:
raise UrlRewritingError('Unable to find episode')
for ep_title in episode_titles:
# find matching download
episode_title = soup.find('strong', text=re.compile(ep_title, re.I))
if not episode_title:
continue
# find download container
episode = episode_title.parent
if not episode:
continue
# find episode language
episode_lang = episode.find_previous('strong', text=re.compile('Sprache')).next_sibling
if not episode_lang:
log.warning('No language found for: %s' % series_url)
continue
# filter language
if not self.check_language(episode_lang):
log.warning('languages not matching: %s <> %s' % (self.config['language'], episode_lang))
continue
# find download links
links = episode.find_all('a')
if not links:
log.warning('No links found for: %s' % series_url)
continue
for link in links:
if not link.has_attr('href'):
continue
url = link['href']
pattern = 'http:\/\/download\.serienjunkies\.org.*%s_.*\.html' % self.config['hoster']
if re.match(pattern, url) or self.config['hoster'] == 'all':
urls.append(url)
else:
continue
return urls
def find_all_titles(self, search_title):
search_titles = []
# Check type
if regex_multi_ep.search(search_title):
log.debug('Title seems to describe multiple episodes')
first_ep = int(regex_multi_ep.search(search_title).group('startep'))
last_ep = int(regex_multi_ep.search(search_title).group('stopep'))
season = regex_multi_ep.search(search_title).group('season') + 'E'
for i in range(first_ep, last_ep + 1):
# ToDO: Umlaute , Mehrzeilig etc.
search_titles.append(regex_multi_ep.sub(season + str(i).zfill(2) + '[\\\\w\\\\.\\\\(\\\\)]*',
search_title))
elif regex_season.search(search_title):
log.debug('Title seems to describe one or more season')
search_string = regex_season.search(search_title).group(0)
for s in re.findall('(?<!\-)S\d\d(?!\-)', search_string):
search_titles.append(regex_season.sub(s + '[\\\\w\\\\.]*', search_title))
for s in re.finditer('(?<!\-)S(\d\d)-S(\d\d)(?!\-)', search_string):
season_start = int(s.group(1))
season_end = int(s.group(2))
for i in range(season_start, season_end + 1):
search_titles.append(regex_season.sub('S' + str(i).zfill(2) + '[\\\\w\\\\.]*', search_title))
else:
log.debug('Title seems to describe a single episode')
search_titles.append(re.escape(search_title))
return search_titles
def check_language(self, languages):
# Cut additional Subtitles
try:
languages = languages[:languages.index("+")]
except IndexError:
pass
language_list = re.split(r'[,&]', languages)
try:
if self.config['language'] == 'german':
if regex_is_german.search(language_list[0]):
return True
elif self.config['language'] == 'foreign':
if (regex_is_foreign.search(language_list[0]) and len(language_list) == 1) or \
(len(language_list) > 1 and not regex_is_subtitle.search(language_list[1])):
return True
elif self.config['language'] == 'subtitle':
if len(language_list) > 1 and regex_is_subtitle.search(language_list[1]):
return True
elif self.config['language'] == 'dual':
if len(language_list) > 1 and not regex_is_subtitle.search(language_list[1]):
return True
except (KeyError, re.error):
pass
return False
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteSerienjunkies, 'serienjunkies', groups=['urlrewriter'], api_ver=2) | flexget/plugins/urlrewrite/serienjunkies.py | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import re
import logging
from flexget import plugin
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils import requests
from flexget.utils.soup import get_soup
log = logging.getLogger('serienjunkies')
regex_single_ep = re.compile(r'(S\d+E\d\d+)(?!-E)', re.I)
regex_multi_ep = re.compile(r'(?P<season>S\d\d)E(?P<startep>\d\d+)-E?(?P<stopep>\d\d+)', re.I)
regex_season = re.compile(r'(?<=\.|\-)S\d\d(?:[-\.]S\d\d)*(?!E\d\d+)', re.I)
regex_language_container = re.compile(r'Sprache')
regex_is_german = re.compile(r'german|deutsch', re.I)
regex_is_foreign = re.compile(
r'englisc?h|französisch|japanisch|dänisch|norwegisch|niederländisch|ungarisch|italienisch|portugiesisch', re.I)
regex_is_subtitle = re.compile(r'Untertitel|Subs?|UT', re.I)
LANGUAGE = ['german', 'foreign', 'subtitle', 'dual']
HOSTER = ['ul', 'cz', 'so', 'all']
DEFAULT_LANGUAGE = 'dual'
DEFAULT_HOSTER = 'ul'
class UrlRewriteSerienjunkies(object):
"""
Serienjunkies urlrewriter
Version 1.0.2
Language setting works like a whitelist, the selected is needed,
but others are still possible.
Configuration
language: [german|foreign|subtitle|dual] default "foreign"
hoster: [ul|cz|so|all] default "ul"
"""
schema = {
'type': 'object',
'properties': {
'language': {'type': 'string', 'enum': LANGUAGE},
'hoster': {'type': 'string', 'enum': HOSTER}
},
'additionalProperties': False
}
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
if url.startswith('http://www.serienjunkies.org/') or url.startswith('http://serienjunkies.org/'):
return True
return False
# urlrewriter API
def url_rewrite(self, task, entry):
series_url = entry['url']
search_title = re.sub('\[.*\] ', '', entry['title'])
self.config = task.config.get('serienjunkies') or {}
self.config.setdefault('hoster', DEFAULT_HOSTER)
self.config.setdefault('language', DEFAULT_LANGUAGE)
download_urls = self.parse_downloads(series_url, search_title)
if not download_urls:
entry.reject('No Episode found')
else:
entry['url'] = download_urls[-1]
entry['description'] = ", ".join(download_urls)
# Debug Information
log.debug('TV Show URL: %s' % series_url)
log.debug('Episode: %s' % search_title)
log.debug('Download URL: %s' % download_urls)
@plugin.internet(log)
def parse_downloads(self, series_url, search_title):
page = requests.get(series_url).content
try:
soup = get_soup(page)
except Exception as e:
raise UrlRewritingError(e)
urls = []
# find all titles
episode_titles = self.find_all_titles(search_title)
if not episode_titles:
raise UrlRewritingError('Unable to find episode')
for ep_title in episode_titles:
# find matching download
episode_title = soup.find('strong', text=re.compile(ep_title, re.I))
if not episode_title:
continue
# find download container
episode = episode_title.parent
if not episode:
continue
# find episode language
episode_lang = episode.find_previous('strong', text=re.compile('Sprache')).next_sibling
if not episode_lang:
log.warning('No language found for: %s' % series_url)
continue
# filter language
if not self.check_language(episode_lang):
log.warning('languages not matching: %s <> %s' % (self.config['language'], episode_lang))
continue
# find download links
links = episode.find_all('a')
if not links:
log.warning('No links found for: %s' % series_url)
continue
for link in links:
if not link.has_attr('href'):
continue
url = link['href']
pattern = 'http:\/\/download\.serienjunkies\.org.*%s_.*\.html' % self.config['hoster']
if re.match(pattern, url) or self.config['hoster'] == 'all':
urls.append(url)
else:
continue
return urls
def find_all_titles(self, search_title):
search_titles = []
# Check type
if regex_multi_ep.search(search_title):
log.debug('Title seems to describe multiple episodes')
first_ep = int(regex_multi_ep.search(search_title).group('startep'))
last_ep = int(regex_multi_ep.search(search_title).group('stopep'))
season = regex_multi_ep.search(search_title).group('season') + 'E'
for i in range(first_ep, last_ep + 1):
# ToDO: Umlaute , Mehrzeilig etc.
search_titles.append(regex_multi_ep.sub(season + str(i).zfill(2) + '[\\\\w\\\\.\\\\(\\\\)]*',
search_title))
elif regex_season.search(search_title):
log.debug('Title seems to describe one or more season')
search_string = regex_season.search(search_title).group(0)
for s in re.findall('(?<!\-)S\d\d(?!\-)', search_string):
search_titles.append(regex_season.sub(s + '[\\\\w\\\\.]*', search_title))
for s in re.finditer('(?<!\-)S(\d\d)-S(\d\d)(?!\-)', search_string):
season_start = int(s.group(1))
season_end = int(s.group(2))
for i in range(season_start, season_end + 1):
search_titles.append(regex_season.sub('S' + str(i).zfill(2) + '[\\\\w\\\\.]*', search_title))
else:
log.debug('Title seems to describe a single episode')
search_titles.append(re.escape(search_title))
return search_titles
def check_language(self, languages):
# Cut additional Subtitles
try:
languages = languages[:languages.index("+")]
except IndexError:
pass
language_list = re.split(r'[,&]', languages)
try:
if self.config['language'] == 'german':
if regex_is_german.search(language_list[0]):
return True
elif self.config['language'] == 'foreign':
if (regex_is_foreign.search(language_list[0]) and len(language_list) == 1) or \
(len(language_list) > 1 and not regex_is_subtitle.search(language_list[1])):
return True
elif self.config['language'] == 'subtitle':
if len(language_list) > 1 and regex_is_subtitle.search(language_list[1]):
return True
elif self.config['language'] == 'dual':
if len(language_list) > 1 and not regex_is_subtitle.search(language_list[1]):
return True
except (KeyError, re.error):
pass
return False
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteSerienjunkies, 'serienjunkies', groups=['urlrewriter'], api_ver=2) | 0.296043 | 0.087447 |
from neutron.common import constants as neutron_const
from neutron.common import utils
from neutron.extensions import portbindings
from neutron.extensions import providernet as prov_net
from neutron.plugins.common import constants as plugin_const
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers.openvswitch.mech_driver \
import mech_openvswitch as ovs
from neutron_lib import constants
from oslo_config import cfg
from oslo_log import log as logging
from networking_nec._i18n import _LW
from networking_nec.nwa.common import exceptions as nwa_exc
from networking_nec.nwa.common import utils as nwa_com_utils
from networking_nec.nwa.l2 import utils as nwa_l2_utils
from networking_nec.nwa.l3 import db_api as nwa_l3_db
from networking_nec.nwa.l3.rpc import nwa_l3_proxy_api
LOG = logging.getLogger(__name__)
class NECNWAMechanismDriver(ovs.OpenvswitchMechanismDriver):
def initialize(self):
self.resource_groups = nwa_com_utils.load_json_from_file(
'resource_group', cfg.CONF.NWA.resource_group_file,
cfg.CONF.NWA.resource_group, default_value=[])
def _get_l2api_proxy(self, context, tenant_id):
proxy = context._plugin.get_nwa_proxy(tenant_id,
context._plugin_context)
return proxy
def _get_l3api_proxy(self, context, tenant_id):
proxy = context._plugin.get_nwa_proxy(tenant_id,
context.network._plugin_context)
return nwa_l3_proxy_api.NwaL3ProxyApi(proxy.client)
def create_port_precommit(self, context):
device_owner = context._port['device_owner']
if device_owner not in (constants.DEVICE_OWNER_ROUTER_INTF,
constants.DEVICE_OWNER_ROUTER_GW):
LOG.warning(_LW("device owner missmatch device_owner=%s"),
device_owner)
return
self._l3_create_tenant_fw(context)
self._bind_segment_to_vif_type(context)
def update_port_precommit(self, context):
new_port = context.current
orig_port = context.original
if (not new_port['device_id'] and orig_port['device_id'] and
not new_port['device_owner'] and orig_port['device_owner']):
# device_id and device_owner are clear on VM deleted.
LOG.debug('original_port=%s', context.original)
LOG.debug('updated_port=%s', context.current)
self._l2_delete_general_dev(context, use_original_port=True)
def delete_port_precommit(self, context):
tenant_id, nwa_tenant_id = nwa_com_utils.get_tenant_info(context)
device_owner = context._port['device_owner']
device_id = context._port['device_id']
LOG.debug("tenant_id=%(tid)s, nwa_tenant_id=%(nid)s, "
"device_owner=%(dev)s",
{'tid': tenant_id, 'nid': nwa_tenant_id,
'dev': device_owner})
if device_owner in (constants.DEVICE_OWNER_ROUTER_GW,
constants.DEVICE_OWNER_ROUTER_INTF):
self._l3_delete_tenant_fw(context)
elif device_owner == constants.DEVICE_OWNER_FLOATINGIP:
pass
elif device_owner == '' and device_id == '':
pass
else:
self._l2_delete_general_dev(context)
def try_to_bind_segment_for_agent(self, context, segment, agent):
if self._bind_segment_to_vif_type(context, agent):
device_owner = context._port['device_owner']
if device_owner not in (constants.DEVICE_OWNER_ROUTER_GW,
constants.DEVICE_OWNER_ROUTER_INTF):
self._bind_port_nwa_debug_message(context)
self._l2_create_general_dev(context)
return True
LOG.warning(_LW("binding segment not found for agent=%s"), agent)
return super(
NECNWAMechanismDriver, self
).try_to_bind_segment_for_agent(context, segment, agent)
def _bind_segment_to_vif_type(self, context, agent=None):
mappings = {}
if agent:
mappings = agent['configurations'].get('bridge_mappings', {})
for res in self.resource_groups:
if agent and res['ResourceGroupName'] not in mappings:
continue
if res['device_owner'] != context._port['device_owner']:
continue
network_id = context.network.current['id']
dummy_segment = db.get_dynamic_segment(
context.network._plugin_context.session,
network_id, physical_network=res['ResourceGroupName'])
LOG.debug("1st: dummy segment is %s", dummy_segment)
if not dummy_segment:
dummy_segment = {
api.PHYSICAL_NETWORK: res['ResourceGroupName'],
api.NETWORK_TYPE: plugin_const.TYPE_VLAN,
api.SEGMENTATION_ID: 0
}
db.add_network_segment(
context.network._plugin_context.session,
network_id, dummy_segment, is_dynamic=True)
LOG.debug("2nd: dummy segment is %s", dummy_segment)
context.set_binding(dummy_segment[api.ID],
self.vif_type,
{portbindings.CAP_PORT_FILTER: True,
portbindings.OVS_HYBRID_PLUG: True})
return True
return False
def _bind_port_nwa_debug_message(self, context):
network_name, network_id = nwa_l2_utils.get_network_info(context)
device_owner = context._port['device_owner']
subnet_ids = []
if 'fixed_ips' in context._port:
for fixed_ip in context._port['fixed_ips']:
subnet_ids.append(fixed_ip['subnet_id'])
segmentation_id = 0
if prov_net.PHYSICAL_NETWORK in context.network.current:
segmentation_id = context.network.current[prov_net.SEGMENTATION_ID]
else:
for provider in context.network.current['segments']:
if (provider.get(prov_net.PHYSICAL_NETWORK) ==
nwa_l2_utils.get_physical_network(
device_owner, self.resource_groups)):
segmentation_id = provider[prov_net.SEGMENTATION_ID]
break
LOG.debug("provider segmentation_id = %s", segmentation_id)
LOG.debug("_bind_port_nwa %(network_name)s "
"%(network_id)s %(device_id)s %(device_owner)s "
"%(port_id)s %(mac_address)s %(subnet_ids)s "
"%(segmentation_id)s",
{'network_name': network_name,
'network_id': network_id,
'device_id': context._port['device_id'],
'device_owner': device_owner,
'port_id': context._port['id'],
'mac_address': context._port['mac_address'],
'subnet_ids': subnet_ids,
'segmentation_id': segmentation_id})
def _l2_create_general_dev(self, context):
kwargs = self._make_l2api_kwargs(context)
proxy = self._get_l2api_proxy(context, kwargs['tenant_id'])
proxy.create_general_dev(context.network._plugin_context, **kwargs)
def _l2_delete_general_dev(self, context, use_original_port=False):
try:
kwargs = self._make_l2api_kwargs(
context, use_original_port=use_original_port)
proxy = self._get_l2api_proxy(context, kwargs['tenant_id'])
kwargs['nwa_info'] = self._revert_dhcp_agent_device_id(
context, kwargs['nwa_info'])
proxy.delete_general_dev(context.network._plugin_context, **kwargs)
except nwa_exc.TenantNotFound as e:
LOG.warning(_LW("skip delete_general_dev: %s"), e)
def _make_l2api_kwargs(self, context, use_original_port=False):
tenant_id, nwa_tenant_id = nwa_com_utils.get_tenant_info(context)
nwa_info = nwa_l2_utils.portcontext_to_nwa_info(
context, self.resource_groups, use_original_port)
return {
'tenant_id': tenant_id,
'nwa_tenant_id': nwa_tenant_id,
'nwa_info': nwa_info
}
def _revert_dhcp_agent_device_id(self, context, nwa_info):
device_owner = context._port['device_owner']
device_id = context._port['device_id']
if device_owner == constants.DEVICE_OWNER_DHCP and \
device_id == neutron_const.DEVICE_ID_RESERVED_DHCP_PORT:
# get device_id of dhcp agent even if it is reserved.
nwa_info['device']['id'] = utils.get_dhcp_agent_device_id(
context.network.current['id'],
context._port.get('binding:host_id')
)
return nwa_info
def _l3_create_tenant_fw(self, context):
device_owner = context._port['device_owner']
grplst = [res['device_owner'] for res in self.resource_groups]
if device_owner not in grplst:
raise nwa_exc.ResourceGroupNameNotFound(device_owner=device_owner)
kwargs = self._make_l3api_kwargs(context)
proxy = self._get_l3api_proxy(context, kwargs['tenant_id'])
proxy.create_tenant_fw(context.network._plugin_context, **kwargs)
def _l3_delete_tenant_fw(self, context):
kwargs = self._make_l3api_kwargs(context)
proxy = self._get_l3api_proxy(context, kwargs['tenant_id'])
proxy.delete_tenant_fw(context.network._plugin_context, **kwargs)
def _make_l3api_kwargs(self, context):
rt_tid = nwa_l3_db.get_tenant_id_by_router(
context.network._plugin_context.session,
context._port['device_id']
)
nwa_rt_tid = nwa_com_utils.get_nwa_tenant_id(rt_tid)
nwa_info = nwa_l2_utils.portcontext_to_nwa_info(
context, self.resource_groups)
nwa_info['tenant_id'] = rt_tid # overwrite by router's
nwa_info['nwa_tenant_id'] = nwa_rt_tid # tenant_id and nwa_tenant_id
return {
'tenant_id': rt_tid,
'nwa_tenant_id': nwa_rt_tid,
'nwa_info': nwa_info
} | networking_nec/nwa/l2/drivers/mech_necnwa.py |
from neutron.common import constants as neutron_const
from neutron.common import utils
from neutron.extensions import portbindings
from neutron.extensions import providernet as prov_net
from neutron.plugins.common import constants as plugin_const
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers.openvswitch.mech_driver \
import mech_openvswitch as ovs
from neutron_lib import constants
from oslo_config import cfg
from oslo_log import log as logging
from networking_nec._i18n import _LW
from networking_nec.nwa.common import exceptions as nwa_exc
from networking_nec.nwa.common import utils as nwa_com_utils
from networking_nec.nwa.l2 import utils as nwa_l2_utils
from networking_nec.nwa.l3 import db_api as nwa_l3_db
from networking_nec.nwa.l3.rpc import nwa_l3_proxy_api
LOG = logging.getLogger(__name__)
class NECNWAMechanismDriver(ovs.OpenvswitchMechanismDriver):
def initialize(self):
self.resource_groups = nwa_com_utils.load_json_from_file(
'resource_group', cfg.CONF.NWA.resource_group_file,
cfg.CONF.NWA.resource_group, default_value=[])
def _get_l2api_proxy(self, context, tenant_id):
proxy = context._plugin.get_nwa_proxy(tenant_id,
context._plugin_context)
return proxy
def _get_l3api_proxy(self, context, tenant_id):
proxy = context._plugin.get_nwa_proxy(tenant_id,
context.network._plugin_context)
return nwa_l3_proxy_api.NwaL3ProxyApi(proxy.client)
def create_port_precommit(self, context):
device_owner = context._port['device_owner']
if device_owner not in (constants.DEVICE_OWNER_ROUTER_INTF,
constants.DEVICE_OWNER_ROUTER_GW):
LOG.warning(_LW("device owner missmatch device_owner=%s"),
device_owner)
return
self._l3_create_tenant_fw(context)
self._bind_segment_to_vif_type(context)
def update_port_precommit(self, context):
new_port = context.current
orig_port = context.original
if (not new_port['device_id'] and orig_port['device_id'] and
not new_port['device_owner'] and orig_port['device_owner']):
# device_id and device_owner are clear on VM deleted.
LOG.debug('original_port=%s', context.original)
LOG.debug('updated_port=%s', context.current)
self._l2_delete_general_dev(context, use_original_port=True)
def delete_port_precommit(self, context):
tenant_id, nwa_tenant_id = nwa_com_utils.get_tenant_info(context)
device_owner = context._port['device_owner']
device_id = context._port['device_id']
LOG.debug("tenant_id=%(tid)s, nwa_tenant_id=%(nid)s, "
"device_owner=%(dev)s",
{'tid': tenant_id, 'nid': nwa_tenant_id,
'dev': device_owner})
if device_owner in (constants.DEVICE_OWNER_ROUTER_GW,
constants.DEVICE_OWNER_ROUTER_INTF):
self._l3_delete_tenant_fw(context)
elif device_owner == constants.DEVICE_OWNER_FLOATINGIP:
pass
elif device_owner == '' and device_id == '':
pass
else:
self._l2_delete_general_dev(context)
def try_to_bind_segment_for_agent(self, context, segment, agent):
if self._bind_segment_to_vif_type(context, agent):
device_owner = context._port['device_owner']
if device_owner not in (constants.DEVICE_OWNER_ROUTER_GW,
constants.DEVICE_OWNER_ROUTER_INTF):
self._bind_port_nwa_debug_message(context)
self._l2_create_general_dev(context)
return True
LOG.warning(_LW("binding segment not found for agent=%s"), agent)
return super(
NECNWAMechanismDriver, self
).try_to_bind_segment_for_agent(context, segment, agent)
def _bind_segment_to_vif_type(self, context, agent=None):
mappings = {}
if agent:
mappings = agent['configurations'].get('bridge_mappings', {})
for res in self.resource_groups:
if agent and res['ResourceGroupName'] not in mappings:
continue
if res['device_owner'] != context._port['device_owner']:
continue
network_id = context.network.current['id']
dummy_segment = db.get_dynamic_segment(
context.network._plugin_context.session,
network_id, physical_network=res['ResourceGroupName'])
LOG.debug("1st: dummy segment is %s", dummy_segment)
if not dummy_segment:
dummy_segment = {
api.PHYSICAL_NETWORK: res['ResourceGroupName'],
api.NETWORK_TYPE: plugin_const.TYPE_VLAN,
api.SEGMENTATION_ID: 0
}
db.add_network_segment(
context.network._plugin_context.session,
network_id, dummy_segment, is_dynamic=True)
LOG.debug("2nd: dummy segment is %s", dummy_segment)
context.set_binding(dummy_segment[api.ID],
self.vif_type,
{portbindings.CAP_PORT_FILTER: True,
portbindings.OVS_HYBRID_PLUG: True})
return True
return False
def _bind_port_nwa_debug_message(self, context):
network_name, network_id = nwa_l2_utils.get_network_info(context)
device_owner = context._port['device_owner']
subnet_ids = []
if 'fixed_ips' in context._port:
for fixed_ip in context._port['fixed_ips']:
subnet_ids.append(fixed_ip['subnet_id'])
segmentation_id = 0
if prov_net.PHYSICAL_NETWORK in context.network.current:
segmentation_id = context.network.current[prov_net.SEGMENTATION_ID]
else:
for provider in context.network.current['segments']:
if (provider.get(prov_net.PHYSICAL_NETWORK) ==
nwa_l2_utils.get_physical_network(
device_owner, self.resource_groups)):
segmentation_id = provider[prov_net.SEGMENTATION_ID]
break
LOG.debug("provider segmentation_id = %s", segmentation_id)
LOG.debug("_bind_port_nwa %(network_name)s "
"%(network_id)s %(device_id)s %(device_owner)s "
"%(port_id)s %(mac_address)s %(subnet_ids)s "
"%(segmentation_id)s",
{'network_name': network_name,
'network_id': network_id,
'device_id': context._port['device_id'],
'device_owner': device_owner,
'port_id': context._port['id'],
'mac_address': context._port['mac_address'],
'subnet_ids': subnet_ids,
'segmentation_id': segmentation_id})
def _l2_create_general_dev(self, context):
kwargs = self._make_l2api_kwargs(context)
proxy = self._get_l2api_proxy(context, kwargs['tenant_id'])
proxy.create_general_dev(context.network._plugin_context, **kwargs)
def _l2_delete_general_dev(self, context, use_original_port=False):
try:
kwargs = self._make_l2api_kwargs(
context, use_original_port=use_original_port)
proxy = self._get_l2api_proxy(context, kwargs['tenant_id'])
kwargs['nwa_info'] = self._revert_dhcp_agent_device_id(
context, kwargs['nwa_info'])
proxy.delete_general_dev(context.network._plugin_context, **kwargs)
except nwa_exc.TenantNotFound as e:
LOG.warning(_LW("skip delete_general_dev: %s"), e)
def _make_l2api_kwargs(self, context, use_original_port=False):
tenant_id, nwa_tenant_id = nwa_com_utils.get_tenant_info(context)
nwa_info = nwa_l2_utils.portcontext_to_nwa_info(
context, self.resource_groups, use_original_port)
return {
'tenant_id': tenant_id,
'nwa_tenant_id': nwa_tenant_id,
'nwa_info': nwa_info
}
def _revert_dhcp_agent_device_id(self, context, nwa_info):
device_owner = context._port['device_owner']
device_id = context._port['device_id']
if device_owner == constants.DEVICE_OWNER_DHCP and \
device_id == neutron_const.DEVICE_ID_RESERVED_DHCP_PORT:
# get device_id of dhcp agent even if it is reserved.
nwa_info['device']['id'] = utils.get_dhcp_agent_device_id(
context.network.current['id'],
context._port.get('binding:host_id')
)
return nwa_info
def _l3_create_tenant_fw(self, context):
device_owner = context._port['device_owner']
grplst = [res['device_owner'] for res in self.resource_groups]
if device_owner not in grplst:
raise nwa_exc.ResourceGroupNameNotFound(device_owner=device_owner)
kwargs = self._make_l3api_kwargs(context)
proxy = self._get_l3api_proxy(context, kwargs['tenant_id'])
proxy.create_tenant_fw(context.network._plugin_context, **kwargs)
def _l3_delete_tenant_fw(self, context):
kwargs = self._make_l3api_kwargs(context)
proxy = self._get_l3api_proxy(context, kwargs['tenant_id'])
proxy.delete_tenant_fw(context.network._plugin_context, **kwargs)
def _make_l3api_kwargs(self, context):
rt_tid = nwa_l3_db.get_tenant_id_by_router(
context.network._plugin_context.session,
context._port['device_id']
)
nwa_rt_tid = nwa_com_utils.get_nwa_tenant_id(rt_tid)
nwa_info = nwa_l2_utils.portcontext_to_nwa_info(
context, self.resource_groups)
nwa_info['tenant_id'] = rt_tid # overwrite by router's
nwa_info['nwa_tenant_id'] = nwa_rt_tid # tenant_id and nwa_tenant_id
return {
'tenant_id': rt_tid,
'nwa_tenant_id': nwa_rt_tid,
'nwa_info': nwa_info
} | 0.413359 | 0.068351 |
from unittest.mock import patch
from homeassistant import config_entries
from homeassistant.components.aladdin_connect.config_flow import InvalidAuth
from homeassistant.components.aladdin_connect.const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from tests.common import MockConfigEntry
async def test_form(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "Aladdin Connect"
assert result2["data"] == {
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass: HomeAssistant) -> None:
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
side_effect=InvalidAuth,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass: HomeAssistant) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
side_effect=ConnectionError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
side_effect=TypeError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=False,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_already_configured(hass):
"""Test we handle already configured error."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_USERNAME: "test-username", CONF_PASSWORD: "<PASSWORD>"},
unique_id="test-username",
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == config_entries.SOURCE_USER
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
async def test_import_flow_success(hass: HomeAssistant) -> None:
"""Test a successful import of yaml."""
with patch(
"homeassistant.components.aladdin_connect.cover.async_setup_platform",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.cover.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_USERNAME: "test-user",
CONF_PASSWORD: "<PASSWORD>",
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "Aladdin Connect"
assert result2["data"] == {
CONF_USERNAME: "test-user",
CONF_PASSWORD: "<PASSWORD>",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_reauth_flow(hass: HomeAssistant) -> None:
"""Test a successful reauth flow."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "test-username", "password": "<PASSWORD>"},
unique_id="test-username",
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_entry.unique_id,
"entry_id": mock_entry.entry_id,
},
data={"username": "test-username", "password": "<PASSWORD>"},
)
assert result["step_id"] == "reauth_confirm"
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.aladdin_connect.cover.async_setup_platform",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.cover.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PASSWORD: "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == "reauth_successful"
assert mock_entry.data == {
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
}
async def test_reauth_flow_auth_error(hass: HomeAssistant) -> None:
"""Test a successful reauth flow."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "test-username", "password": "<PASSWORD>"},
unique_id="test-username",
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_entry.unique_id,
"entry_id": mock_entry.entry_id,
},
data={"username": "test-username", "password": "<PASSWORD>"},
)
assert result["step_id"] == "reauth_confirm"
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.aladdin_connect.cover.async_setup_platform",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=False,
), patch(
"homeassistant.components.aladdin_connect.cover.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PASSWORD: "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_reauth_flow_other_error(hass: HomeAssistant) -> None:
"""Test an unsuccessful reauth flow."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "test-username", "password": "<PASSWORD>"},
unique_id="test-username",
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_entry.unique_id,
"entry_id": mock_entry.entry_id,
},
data={"username": "test-username", "password": "<PASSWORD>"},
)
assert result["step_id"] == "reauth_confirm"
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.aladdin_connect.cover.async_setup_platform",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
side_effect=ValueError,
), patch(
"homeassistant.components.aladdin_connect.cover.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PASSWORD: "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"} | tests/components/aladdin_connect/test_config_flow.py | from unittest.mock import patch
from homeassistant import config_entries
from homeassistant.components.aladdin_connect.config_flow import InvalidAuth
from homeassistant.components.aladdin_connect.const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from tests.common import MockConfigEntry
async def test_form(hass: HomeAssistant) -> None:
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "Aladdin Connect"
assert result2["data"] == {
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass: HomeAssistant) -> None:
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
side_effect=InvalidAuth,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass: HomeAssistant) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
side_effect=ConnectionError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
side_effect=TypeError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=False,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_already_configured(hass):
"""Test we handle already configured error."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_USERNAME: "test-username", CONF_PASSWORD: "<PASSWORD>"},
unique_id="test-username",
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == config_entries.SOURCE_USER
with patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
},
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
async def test_import_flow_success(hass: HomeAssistant) -> None:
"""Test a successful import of yaml."""
with patch(
"homeassistant.components.aladdin_connect.cover.async_setup_platform",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.cover.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_USERNAME: "test-user",
CONF_PASSWORD: "<PASSWORD>",
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "Aladdin Connect"
assert result2["data"] == {
CONF_USERNAME: "test-user",
CONF_PASSWORD: "<PASSWORD>",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_reauth_flow(hass: HomeAssistant) -> None:
"""Test a successful reauth flow."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "test-username", "password": "<PASSWORD>"},
unique_id="test-username",
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_entry.unique_id,
"entry_id": mock_entry.entry_id,
},
data={"username": "test-username", "password": "<PASSWORD>"},
)
assert result["step_id"] == "reauth_confirm"
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.aladdin_connect.cover.async_setup_platform",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.cover.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PASSWORD: "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_ABORT
assert result2["reason"] == "reauth_successful"
assert mock_entry.data == {
CONF_USERNAME: "test-username",
CONF_PASSWORD: "<PASSWORD>",
}
async def test_reauth_flow_auth_error(hass: HomeAssistant) -> None:
"""Test a successful reauth flow."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "test-username", "password": "<PASSWORD>"},
unique_id="test-username",
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_entry.unique_id,
"entry_id": mock_entry.entry_id,
},
data={"username": "test-username", "password": "<PASSWORD>"},
)
assert result["step_id"] == "reauth_confirm"
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.aladdin_connect.cover.async_setup_platform",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
return_value=False,
), patch(
"homeassistant.components.aladdin_connect.cover.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PASSWORD: "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_reauth_flow_other_error(hass: HomeAssistant) -> None:
"""Test an unsuccessful reauth flow."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "test-username", "password": "<PASSWORD>"},
unique_id="test-username",
)
mock_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": mock_entry.unique_id,
"entry_id": mock_entry.entry_id,
},
data={"username": "test-username", "password": "<PASSWORD>"},
)
assert result["step_id"] == "reauth_confirm"
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.aladdin_connect.cover.async_setup_platform",
return_value=True,
), patch(
"homeassistant.components.aladdin_connect.config_flow.AladdinConnectClient.login",
side_effect=ValueError,
), patch(
"homeassistant.components.aladdin_connect.cover.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_PASSWORD: "<PASSWORD>"},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"} | 0.727975 | 0.333978 |
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from utils.enum_type import EPT
class MultiHeadAttention(nn.Module):
r"""Multi-head Attention is proposed in the following paper:
Attention Is All You Need.
"""
def __init__(self, embedding_size, num_heads, dropout_ratio=0.0):
super(MultiHeadAttention, self).__init__()
self.embedding_size = embedding_size
self.num_heads = num_heads
self.head_size = embedding_size // num_heads
assert self.head_size * num_heads == self.embedding_size, "embedding size must be divisible by num_heads"
self.scaling = self.head_size ** -0.5 # d_k ** -0.5
self.linear_query = nn.Linear(embedding_size, embedding_size)
self.linear_key = nn.Linear(embedding_size, embedding_size)
self.linear_value = nn.Linear(embedding_size, embedding_size)
nn.init.normal_(self.linear_query.weight, mean=0, std=0.02)
nn.init.normal_(self.linear_key.weight, mean=0, std=0.02)
nn.init.normal_(self.linear_value.weight, mean=0, std=0.02)
self.linear_out = nn.Linear(embedding_size, embedding_size)
nn.init.normal_(self.linear_out.weight, mean=0, std=0.02)
self.weight_dropout = nn.Dropout(dropout_ratio)
def forward(self, query, key, value, key_padding_mask=None, attn_mask=None):
r"""
Multi-head attention
Args:
query (torch.Tensor): shape [batch_size, tgt_len, embedding_size].
key (torch.Tensor): shape [batch_size, src_len, embedding_size].
value (torch.Tensor): shape [batch_size, src_len, embedding_size].
key_padding_mask (torch.Tensor): shape [batch_size, src_len].
attn_mask (torch.BoolTensor): shape [batch_size, tgt_len, src_len].
Return:
tuple(torch.Tensor, torch.Tensor):
attn_repre, shape [batch_size, tgt_len, embedding_size].
attn_weights, shape [batch_size, tgt_len, src_len].
"""
device=query.device
batch_size, tgt_len, embedding_size = query.size()
src_len = key.size(1)
assert key.size() == value.size()
q = self.linear_query(query) * self.scaling
k = self.linear_key(key)
v = self.linear_value(value)
q = q.view(batch_size, tgt_len, self.num_heads, self.head_size).permute(0, 2, 1, 3)
k = k.view(batch_size, src_len, self.num_heads, self.head_size).permute(0, 2, 3, 1)
v = v.view(batch_size, src_len, self.num_heads, self.head_size).permute(0, 2, 1, 3)
attn_weights = torch.matmul(q, k)
assert list(attn_weights.size()) == [batch_size, self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_weights.masked_fill_(
attn_mask.unsqueeze(0).unsqueeze(1).to(device),
float("-inf")
)
if key_padding_mask is not None:
attn_weights.masked_fill_(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(device),
float("-inf")
)
attn_weights = self.weight_dropout(F.softmax(attn_weights, dim=-1))
attn_repre = torch.matmul(attn_weights, v)
assert list(attn_repre.size()) == [batch_size, self.num_heads, tgt_len, self.head_size]
attn_repre = attn_repre.transpose(1, 2).contiguous().view(batch_size, tgt_len, embedding_size)
attn_repre = self.linear_out(attn_repre)
# maximum attention weight over heads
attn_weights, _ = attn_weights.max(dim=1)
return attn_repre, attn_weights
class EPTMultiHeadAttentionWeights(nn.Module):
"""
Class for computing multi-head attention weights (follows the paper, 'Attention is all you need')
This class computes dot-product between query Q and key K, i.e.
"""
def __init__(self, **config):
"""
Initialize MultiHeadAttentionWeights class
:keyword int hidden_dim: Vector dimension of hidden states (H). 768 by default.
:keyword int num_heads: Number of attention heads (N). 12 by default.
"""
super().__init__()
self.config = config
# Check whether D is divisible by H.
assert self.hidden_dim % self.num_heads == 0, \
"Hidden dimension %s is not divisible by the number of heads %s." % (self.hidden_dim, self.num_heads)
# Linear transform for query Q
self.linear_q = nn.Linear(self.hidden_dim, self.hidden_dim)
# Linear transform for key K
self.linear_k = nn.Linear(self.hidden_dim, self.hidden_dim)
# Vector dimension D of input of a single attention head
self.dim_head = self.hidden_dim // self.num_heads
# Square root of vector dimension, i.e. \\sqrt{D}
self.sqrt_dim = self.dim_head ** 0.5
def forward(self, query: torch.Tensor, key: torch.Tensor = None, key_ignorance_mask: torch.Tensor = None,
attention_mask: torch.Tensor = None, head_at_last: bool = True) -> torch.Tensor:
"""
Compute multi-head attention weights
Args:
query (torch.Tensor): FloatTensor representing the query matrix with shape [batch_size, query_sequence_length, hidden_size].
key (torch.Tensor): FloatTensor representing the key matrix with shape [batch_size, key_sequence_length, hidden_size] or [1, key_sequence_length, hidden_size]. By default, this is `None` (Use query matrix as a key matrix)
key_ignorance_mask (torch.Tensor): BoolTensor representing the mask for ignoring column vector in key matrix, with shape [batch_size, key_sequence_length].
If an element at (b, t) is `True,` then all return elements at batch_size=b, key_sequence_length=t will set to be -Infinity. By default, this is `None` (There's no mask to apply).
attention_mask (torch.Tensor): BoolTensor representing Attention mask for ignoring a key for each query item, with shape [query_sequence_length, key_sequence_length].
If an element at (s, t) is `True,` then all return elements at sequence_length=s, T=t will set to be -Infinity. By default, this is `None` (There's no mask to apply).
head_at_last (bool): Use `True` to make shape of return value be [batch_size, query_sequence_length, key_sequence_length, head_nums].
If `False,` this method will return [batch_size, head_nums, sequence_length, key_sequence_length]. By default, this is `True`
Returns:
torch.FloatTensor: FloatTensor of Multi-head Attention weights.
"""
# If key is None, reuse query matrix Q.
if key is None:
key = query
# Check size & type conditions
assert query.shape[0] == key.shape[0] or key.shape[0] == 1 or query.shape[0] == 1
assert key_ignorance_mask is None or (key.shape[:2] == key_ignorance_mask.shape and
key_ignorance_mask.dtype == torch.bool)
assert attention_mask is None or (query.shape[1] == attention_mask.shape[0] and
key.shape[1] == attention_mask.shape[1] and
attention_mask.dtype == torch.bool)
# Store length information
query_len = query.shape[1]
key_len = key.shape[1]
batch_size = max(key.shape[0], query.shape[0])
# Project query & key with linear transformations
query = self.linear_q(query)
key = self.linear_k(key)
# Scale query with sqrt(dim)
query = query / self.sqrt_dim
# If key / value has shape [1, T, H], expand it.
if query.shape[0] == 1:
query = query.expand(batch_size, -1, -1)
if key.shape[0] == 1:
key = key.expand(batch_size, -1, -1)
# Transform query [B, S, N, H/N] -> [B, N, S, H/N] -> [BN, S, H/N].
query = query.view(batch_size, query_len, self.num_heads, self.dim_head) \
.transpose(1, 2).flatten(0, 1).contiguous()
# Transform key [B, T, N, H/N] -> [B, N, H/N, T] -> [BN, H/T, T].
key = key.view(batch_size, key_len, self.num_heads, self.dim_head) \
.permute(0, 2, 3, 1).flatten(0, 1).contiguous()
# Compute attention weights: [BN, S, T] -> [B, N, S, T]
attention_weights = torch.bmm(query, key).view(batch_size, self.num_heads, query_len, key_len).contiguous()
# Apply masks (IMPORTANT!!! This should be applied after GELU for output weights)
if attention_mask is not None:
# Recap: attention mask has shape [S, T], which can be broadcasted as [1, 1, S, T].
attention_weights.masked_fill_(attention_mask, EPT.NEG_INF)
if key_ignorance_mask is not None:
# Recap: ignorance mask has shape [B, T] -> [B, 1, 1, T] and apply it.
attention_weights.masked_fill_(key_ignorance_mask.unsqueeze(1).unsqueeze(1), EPT.NEG_INF)
if head_at_last:
# Output will be [B, N, S, T] -> [B, S, T, N]
return attention_weights.permute(0, 2, 3, 1).contiguous()
else:
return attention_weights
@property
def hidden_dim(self) -> int:
"""
:rtype: int
:return: Vector dimension of hidden states (H)
"""
return self.config.get('hidden_dim', 768)
@property
def num_heads(self) -> int:
"""
:rtype: int
:return: Number of attention heads (N)
"""
return self.config.get('num_heads', 12)
class EPTMultiHeadAttention(nn.Module):
"""
Class for computing multi-head attention (follows the paper, 'Attention is all you need')
This class computes attention over K-V pairs with query Q, i.e.
"""
def __init__(self, **config):
"""
Initialize MultiHeadAttention class
:keyword int hidden_dim: Vector dimension of hidden states (H). 768 by default
:keyword int num_heads: Number of attention heads (N). 12 by default
:keyword float dropout_p: Probability of dropout. 0 by default
"""
super().__init__()
# Multi-head Attention Weight layer
self.attn = EPTMultiHeadAttentionWeights(**config)
# Dropout over attention weights (as in 'Attention is all you need')
self.dropout_p=0.0
self.dropout_attn = nn.Dropout(self.dropout_p)
# Linear transformations for value and output matrix.
self.linear_v = nn.Linear(self.attn.hidden_dim, self.attn.hidden_dim)
self.linear_out = nn.Linear(self.attn.hidden_dim, self.attn.hidden_dim)
def forward(self, query: torch.Tensor, key_value: torch.Tensor = None, key_ignorance_mask: torch.Tensor = None,
attention_mask: torch.Tensor = None, return_weights: bool = False, **kwargs):
"""
Compute multi-head attention
Args:
query (torch.Tensor): FloatTensor representing the query matrix with shape [batch_size, query_sequence_length, hidden_size].
key_value (torch.Tensor): FloatTensor representing the key matrix or value matrix with shape [batch_size, key_sequence_length, hidden_size] or [1, key_sequence_length, hidden_size].
By default, this is `None` (Use query matrix as a key matrix).
key_ignorance_mask (torch.Tensor): BoolTensor representing the mask for ignoring column vector in key matrix, with shape [batch_size, key_sequence_length].
If an element at (b, t) is `True,` then all return elements at batch_size=b, key_sequence_length=t will set to be -Infinity. By default, this is `None` (There's no mask to apply).
attention_mask (torch.Tensor): BoolTensor representing Attention mask for ignoring a key for each query item, with shape [query_sequence_length, key_sequence_length].
If an element at (s, t) is `True,` then all return elements at query_sequence_length=s, key_sequence_length=t will set to be -Infinity. By default, this is `None` (There's no mask to apply).
return_weights (bool): Use `True` to return attention weights. By default, this is `True.`
Returns:
Union[torch.FloatTensor, Tuple[torch.FloatTensor, torch.FloatTensor]]:
If head_at_last is True, return (Attention Output, Attention Weights). Otherwise, return only the Attention Output.
Attention Output: Shape [batch_size, query_sequence_length, hidden_size].
Attention Weights: Shape [batch_size, query_sequence_length, key_sequence_length, head_nums].
"""
# If key_value is None, reuse query matrix Q.
if key_value is None:
key_value = query
# Compute attention scores: [B, N, S, T].
attn_weights = self.attn(query=query, key=key_value, key_ignorance_mask=key_ignorance_mask,
attention_mask=attention_mask, head_at_last=False)
# Retrive shape
batch_size, _, query_len, key_len = attn_weights.shape
# Compute Softmax values. Shape [B, N, S, T] -> [BN, S, T].
# For numerical stability, replace NaN with -Inf. (NaN occurs when we should ignore all weights.)
attn = attn_weights.softmax(dim=-1)
attn = self.dropout_attn(attn) # Dropout was applied after softmax in the original paper.
attn = attn.masked_fill(torch.isnan(attn), 0.0).view(-1, query_len, key_len)
# Pass linear and transpose value matrix: [1 or B, T, N, H/N] -> [1 or B, N, T, H/N].
value_size = key_value.shape[0]
value = self.linear_v(key_value) \
.view(value_size, key_len, self.attn.num_heads, self.attn.dim_head).transpose(1, 2)
# If value has shape [1, *], expand it.
if value_size == 1:
value = value.expand(batch_size, -1, -1, -1)
# Flatten dim #0 and #1: [B, N, T, H/N] -> [BN, T, H/N].
value = value.flatten(0, 1).contiguous()
# Compute output of weighted sum: [BN, S, H/N] -> [B, N, S, H/N] -> [B, S, N, H/N] -> [B, S, H].
output = torch.bmm(attn, value) \
.view(batch_size, self.attn.num_heads, query_len, self.attn.dim_head) \
.transpose(1, 2).flatten(2, 3).contiguous()
# Map outputs and return. [B, S, H].
output = self.linear_out(output)
if return_weights:
return output, attn_weights.permute(0, 2, 3, 1).contiguous()
else:
# Map outputs and return. [B, S, H].
return output | mwp_solver/module/Attention/multi_head_attention.py |
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from utils.enum_type import EPT
class MultiHeadAttention(nn.Module):
r"""Multi-head Attention is proposed in the following paper:
Attention Is All You Need.
"""
def __init__(self, embedding_size, num_heads, dropout_ratio=0.0):
super(MultiHeadAttention, self).__init__()
self.embedding_size = embedding_size
self.num_heads = num_heads
self.head_size = embedding_size // num_heads
assert self.head_size * num_heads == self.embedding_size, "embedding size must be divisible by num_heads"
self.scaling = self.head_size ** -0.5 # d_k ** -0.5
self.linear_query = nn.Linear(embedding_size, embedding_size)
self.linear_key = nn.Linear(embedding_size, embedding_size)
self.linear_value = nn.Linear(embedding_size, embedding_size)
nn.init.normal_(self.linear_query.weight, mean=0, std=0.02)
nn.init.normal_(self.linear_key.weight, mean=0, std=0.02)
nn.init.normal_(self.linear_value.weight, mean=0, std=0.02)
self.linear_out = nn.Linear(embedding_size, embedding_size)
nn.init.normal_(self.linear_out.weight, mean=0, std=0.02)
self.weight_dropout = nn.Dropout(dropout_ratio)
def forward(self, query, key, value, key_padding_mask=None, attn_mask=None):
r"""
Multi-head attention
Args:
query (torch.Tensor): shape [batch_size, tgt_len, embedding_size].
key (torch.Tensor): shape [batch_size, src_len, embedding_size].
value (torch.Tensor): shape [batch_size, src_len, embedding_size].
key_padding_mask (torch.Tensor): shape [batch_size, src_len].
attn_mask (torch.BoolTensor): shape [batch_size, tgt_len, src_len].
Return:
tuple(torch.Tensor, torch.Tensor):
attn_repre, shape [batch_size, tgt_len, embedding_size].
attn_weights, shape [batch_size, tgt_len, src_len].
"""
device=query.device
batch_size, tgt_len, embedding_size = query.size()
src_len = key.size(1)
assert key.size() == value.size()
q = self.linear_query(query) * self.scaling
k = self.linear_key(key)
v = self.linear_value(value)
q = q.view(batch_size, tgt_len, self.num_heads, self.head_size).permute(0, 2, 1, 3)
k = k.view(batch_size, src_len, self.num_heads, self.head_size).permute(0, 2, 3, 1)
v = v.view(batch_size, src_len, self.num_heads, self.head_size).permute(0, 2, 1, 3)
attn_weights = torch.matmul(q, k)
assert list(attn_weights.size()) == [batch_size, self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_weights.masked_fill_(
attn_mask.unsqueeze(0).unsqueeze(1).to(device),
float("-inf")
)
if key_padding_mask is not None:
attn_weights.masked_fill_(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(device),
float("-inf")
)
attn_weights = self.weight_dropout(F.softmax(attn_weights, dim=-1))
attn_repre = torch.matmul(attn_weights, v)
assert list(attn_repre.size()) == [batch_size, self.num_heads, tgt_len, self.head_size]
attn_repre = attn_repre.transpose(1, 2).contiguous().view(batch_size, tgt_len, embedding_size)
attn_repre = self.linear_out(attn_repre)
# maximum attention weight over heads
attn_weights, _ = attn_weights.max(dim=1)
return attn_repre, attn_weights
class EPTMultiHeadAttentionWeights(nn.Module):
"""
Class for computing multi-head attention weights (follows the paper, 'Attention is all you need')
This class computes dot-product between query Q and key K, i.e.
"""
def __init__(self, **config):
"""
Initialize MultiHeadAttentionWeights class
:keyword int hidden_dim: Vector dimension of hidden states (H). 768 by default.
:keyword int num_heads: Number of attention heads (N). 12 by default.
"""
super().__init__()
self.config = config
# Check whether D is divisible by H.
assert self.hidden_dim % self.num_heads == 0, \
"Hidden dimension %s is not divisible by the number of heads %s." % (self.hidden_dim, self.num_heads)
# Linear transform for query Q
self.linear_q = nn.Linear(self.hidden_dim, self.hidden_dim)
# Linear transform for key K
self.linear_k = nn.Linear(self.hidden_dim, self.hidden_dim)
# Vector dimension D of input of a single attention head
self.dim_head = self.hidden_dim // self.num_heads
# Square root of vector dimension, i.e. \\sqrt{D}
self.sqrt_dim = self.dim_head ** 0.5
def forward(self, query: torch.Tensor, key: torch.Tensor = None, key_ignorance_mask: torch.Tensor = None,
attention_mask: torch.Tensor = None, head_at_last: bool = True) -> torch.Tensor:
"""
Compute multi-head attention weights
Args:
query (torch.Tensor): FloatTensor representing the query matrix with shape [batch_size, query_sequence_length, hidden_size].
key (torch.Tensor): FloatTensor representing the key matrix with shape [batch_size, key_sequence_length, hidden_size] or [1, key_sequence_length, hidden_size]. By default, this is `None` (Use query matrix as a key matrix)
key_ignorance_mask (torch.Tensor): BoolTensor representing the mask for ignoring column vector in key matrix, with shape [batch_size, key_sequence_length].
If an element at (b, t) is `True,` then all return elements at batch_size=b, key_sequence_length=t will set to be -Infinity. By default, this is `None` (There's no mask to apply).
attention_mask (torch.Tensor): BoolTensor representing Attention mask for ignoring a key for each query item, with shape [query_sequence_length, key_sequence_length].
If an element at (s, t) is `True,` then all return elements at sequence_length=s, T=t will set to be -Infinity. By default, this is `None` (There's no mask to apply).
head_at_last (bool): Use `True` to make shape of return value be [batch_size, query_sequence_length, key_sequence_length, head_nums].
If `False,` this method will return [batch_size, head_nums, sequence_length, key_sequence_length]. By default, this is `True`
Returns:
torch.FloatTensor: FloatTensor of Multi-head Attention weights.
"""
# If key is None, reuse query matrix Q.
if key is None:
key = query
# Check size & type conditions
assert query.shape[0] == key.shape[0] or key.shape[0] == 1 or query.shape[0] == 1
assert key_ignorance_mask is None or (key.shape[:2] == key_ignorance_mask.shape and
key_ignorance_mask.dtype == torch.bool)
assert attention_mask is None or (query.shape[1] == attention_mask.shape[0] and
key.shape[1] == attention_mask.shape[1] and
attention_mask.dtype == torch.bool)
# Store length information
query_len = query.shape[1]
key_len = key.shape[1]
batch_size = max(key.shape[0], query.shape[0])
# Project query & key with linear transformations
query = self.linear_q(query)
key = self.linear_k(key)
# Scale query with sqrt(dim)
query = query / self.sqrt_dim
# If key / value has shape [1, T, H], expand it.
if query.shape[0] == 1:
query = query.expand(batch_size, -1, -1)
if key.shape[0] == 1:
key = key.expand(batch_size, -1, -1)
# Transform query [B, S, N, H/N] -> [B, N, S, H/N] -> [BN, S, H/N].
query = query.view(batch_size, query_len, self.num_heads, self.dim_head) \
.transpose(1, 2).flatten(0, 1).contiguous()
# Transform key [B, T, N, H/N] -> [B, N, H/N, T] -> [BN, H/T, T].
key = key.view(batch_size, key_len, self.num_heads, self.dim_head) \
.permute(0, 2, 3, 1).flatten(0, 1).contiguous()
# Compute attention weights: [BN, S, T] -> [B, N, S, T]
attention_weights = torch.bmm(query, key).view(batch_size, self.num_heads, query_len, key_len).contiguous()
# Apply masks (IMPORTANT!!! This should be applied after GELU for output weights)
if attention_mask is not None:
# Recap: attention mask has shape [S, T], which can be broadcasted as [1, 1, S, T].
attention_weights.masked_fill_(attention_mask, EPT.NEG_INF)
if key_ignorance_mask is not None:
# Recap: ignorance mask has shape [B, T] -> [B, 1, 1, T] and apply it.
attention_weights.masked_fill_(key_ignorance_mask.unsqueeze(1).unsqueeze(1), EPT.NEG_INF)
if head_at_last:
# Output will be [B, N, S, T] -> [B, S, T, N]
return attention_weights.permute(0, 2, 3, 1).contiguous()
else:
return attention_weights
@property
def hidden_dim(self) -> int:
"""
:rtype: int
:return: Vector dimension of hidden states (H)
"""
return self.config.get('hidden_dim', 768)
@property
def num_heads(self) -> int:
"""
:rtype: int
:return: Number of attention heads (N)
"""
return self.config.get('num_heads', 12)
class EPTMultiHeadAttention(nn.Module):
"""
Class for computing multi-head attention (follows the paper, 'Attention is all you need')
This class computes attention over K-V pairs with query Q, i.e.
"""
def __init__(self, **config):
"""
Initialize MultiHeadAttention class
:keyword int hidden_dim: Vector dimension of hidden states (H). 768 by default
:keyword int num_heads: Number of attention heads (N). 12 by default
:keyword float dropout_p: Probability of dropout. 0 by default
"""
super().__init__()
# Multi-head Attention Weight layer
self.attn = EPTMultiHeadAttentionWeights(**config)
# Dropout over attention weights (as in 'Attention is all you need')
self.dropout_p=0.0
self.dropout_attn = nn.Dropout(self.dropout_p)
# Linear transformations for value and output matrix.
self.linear_v = nn.Linear(self.attn.hidden_dim, self.attn.hidden_dim)
self.linear_out = nn.Linear(self.attn.hidden_dim, self.attn.hidden_dim)
def forward(self, query: torch.Tensor, key_value: torch.Tensor = None, key_ignorance_mask: torch.Tensor = None,
attention_mask: torch.Tensor = None, return_weights: bool = False, **kwargs):
"""
Compute multi-head attention
Args:
query (torch.Tensor): FloatTensor representing the query matrix with shape [batch_size, query_sequence_length, hidden_size].
key_value (torch.Tensor): FloatTensor representing the key matrix or value matrix with shape [batch_size, key_sequence_length, hidden_size] or [1, key_sequence_length, hidden_size].
By default, this is `None` (Use query matrix as a key matrix).
key_ignorance_mask (torch.Tensor): BoolTensor representing the mask for ignoring column vector in key matrix, with shape [batch_size, key_sequence_length].
If an element at (b, t) is `True,` then all return elements at batch_size=b, key_sequence_length=t will set to be -Infinity. By default, this is `None` (There's no mask to apply).
attention_mask (torch.Tensor): BoolTensor representing Attention mask for ignoring a key for each query item, with shape [query_sequence_length, key_sequence_length].
If an element at (s, t) is `True,` then all return elements at query_sequence_length=s, key_sequence_length=t will set to be -Infinity. By default, this is `None` (There's no mask to apply).
return_weights (bool): Use `True` to return attention weights. By default, this is `True.`
Returns:
Union[torch.FloatTensor, Tuple[torch.FloatTensor, torch.FloatTensor]]:
If head_at_last is True, return (Attention Output, Attention Weights). Otherwise, return only the Attention Output.
Attention Output: Shape [batch_size, query_sequence_length, hidden_size].
Attention Weights: Shape [batch_size, query_sequence_length, key_sequence_length, head_nums].
"""
# If key_value is None, reuse query matrix Q.
if key_value is None:
key_value = query
# Compute attention scores: [B, N, S, T].
attn_weights = self.attn(query=query, key=key_value, key_ignorance_mask=key_ignorance_mask,
attention_mask=attention_mask, head_at_last=False)
# Retrive shape
batch_size, _, query_len, key_len = attn_weights.shape
# Compute Softmax values. Shape [B, N, S, T] -> [BN, S, T].
# For numerical stability, replace NaN with -Inf. (NaN occurs when we should ignore all weights.)
attn = attn_weights.softmax(dim=-1)
attn = self.dropout_attn(attn) # Dropout was applied after softmax in the original paper.
attn = attn.masked_fill(torch.isnan(attn), 0.0).view(-1, query_len, key_len)
# Pass linear and transpose value matrix: [1 or B, T, N, H/N] -> [1 or B, N, T, H/N].
value_size = key_value.shape[0]
value = self.linear_v(key_value) \
.view(value_size, key_len, self.attn.num_heads, self.attn.dim_head).transpose(1, 2)
# If value has shape [1, *], expand it.
if value_size == 1:
value = value.expand(batch_size, -1, -1, -1)
# Flatten dim #0 and #1: [B, N, T, H/N] -> [BN, T, H/N].
value = value.flatten(0, 1).contiguous()
# Compute output of weighted sum: [BN, S, H/N] -> [B, N, S, H/N] -> [B, S, N, H/N] -> [B, S, H].
output = torch.bmm(attn, value) \
.view(batch_size, self.attn.num_heads, query_len, self.attn.dim_head) \
.transpose(1, 2).flatten(2, 3).contiguous()
# Map outputs and return. [B, S, H].
output = self.linear_out(output)
if return_weights:
return output, attn_weights.permute(0, 2, 3, 1).contiguous()
else:
# Map outputs and return. [B, S, H].
return output | 0.954573 | 0.601915 |
from parlai.core.build_data import download_models
from parlai.core.dict import DictionaryAgent
from parlai.core.params import ParlaiParser, modelzoo_path
from parlai.agents.seq2seq.seq2seq import Seq2seqAgent
from projects.convai2.build_dict import build_dict
from projects.convai2.eval_ppl import eval_ppl
import torch.nn.functional as F
class Seq2seqEntry(Seq2seqAgent):
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
if shared:
self.probs = shared['probs']
else:
# default minimum probability mass for all tokens
self.probs = {k: 1e-7 for k in build_dict().keys()}
def share(self):
shared = super().share()
shared['probs'] = self.probs.copy()
return shared
def next_word_probability(self, observation, partial_out):
"""Return probability distribution over next words given an input and
partial true output. This is used to calculate the per-word perplexity.
Arguments:
observation -- input observation dict
partial_out -- list of previous "true" words
Returns a dict, where each key is a word and each value is a probability
score for that word. Unset keys assume a probability of zero.
e.g.
{'text': 'Run test program.'}, ['hello'] => {'world': 1.0}
"""
if not hasattr(self, 'prev_enc'):
self.prev_enc = None
self.last_text = None
if observation['text'] != self.last_text:
self.prev_enc = None
self.last_text = observation.get('text')
self.observe(observation)
obs = self.observation
obs['eval_labels'] = [' '.join(partial_out)]
batch = self.vectorize([obs])
self.model.eval()
self.model.longest_label = 1 # no need to predict farther ahead
out = self.model(
batch[0], # xs
ys=(batch[1] if len(partial_out) > 0 else None),
prev_enc=self.prev_enc)
scores, self.prev_enc = out[1], out[3]
# scores is bsz x seqlen x num_words, so select probs of current index
assert len(partial_out) == scores.size(1) - 1
probs = F.softmax(scores.select(1, len(partial_out)), dim=1).squeeze().cpu()
dist = self.probs
for i in range(len(probs)):
try:
val = probs[i].item()
except AttributeError:
val = probs[i][0]
dist[self.dict[i]] = val
self.batch = batch
return dist
if __name__ == '__main__':
parser = ParlaiParser(True, True)
parser.set_defaults(
model='projects.convai2.baselines.seq2seq.eval_ppl:Seq2seqEntry',
model_file='models:convai2/seq2seq/convai2_self_seq2seq_model',
dict_file='models:convai2/seq2seq/dict_convai2_self',
dict_lower=True,
datatype='valid',
batchsize=1,
numthreads=60,
no_cuda=True,
)
opt = parser.parse_args()
opt['model_type'] = 'seq2seq'
fnames = ['convai2_self_seq2seq_model.tgz', 'dict_convai2_self']
download_models(opt, fnames, 'convai2')
eval_ppl(parser) | projects/convai2/baselines/seq2seq/eval_ppl.py |
from parlai.core.build_data import download_models
from parlai.core.dict import DictionaryAgent
from parlai.core.params import ParlaiParser, modelzoo_path
from parlai.agents.seq2seq.seq2seq import Seq2seqAgent
from projects.convai2.build_dict import build_dict
from projects.convai2.eval_ppl import eval_ppl
import torch.nn.functional as F
class Seq2seqEntry(Seq2seqAgent):
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
if shared:
self.probs = shared['probs']
else:
# default minimum probability mass for all tokens
self.probs = {k: 1e-7 for k in build_dict().keys()}
def share(self):
shared = super().share()
shared['probs'] = self.probs.copy()
return shared
def next_word_probability(self, observation, partial_out):
"""Return probability distribution over next words given an input and
partial true output. This is used to calculate the per-word perplexity.
Arguments:
observation -- input observation dict
partial_out -- list of previous "true" words
Returns a dict, where each key is a word and each value is a probability
score for that word. Unset keys assume a probability of zero.
e.g.
{'text': 'Run test program.'}, ['hello'] => {'world': 1.0}
"""
if not hasattr(self, 'prev_enc'):
self.prev_enc = None
self.last_text = None
if observation['text'] != self.last_text:
self.prev_enc = None
self.last_text = observation.get('text')
self.observe(observation)
obs = self.observation
obs['eval_labels'] = [' '.join(partial_out)]
batch = self.vectorize([obs])
self.model.eval()
self.model.longest_label = 1 # no need to predict farther ahead
out = self.model(
batch[0], # xs
ys=(batch[1] if len(partial_out) > 0 else None),
prev_enc=self.prev_enc)
scores, self.prev_enc = out[1], out[3]
# scores is bsz x seqlen x num_words, so select probs of current index
assert len(partial_out) == scores.size(1) - 1
probs = F.softmax(scores.select(1, len(partial_out)), dim=1).squeeze().cpu()
dist = self.probs
for i in range(len(probs)):
try:
val = probs[i].item()
except AttributeError:
val = probs[i][0]
dist[self.dict[i]] = val
self.batch = batch
return dist
if __name__ == '__main__':
parser = ParlaiParser(True, True)
parser.set_defaults(
model='projects.convai2.baselines.seq2seq.eval_ppl:Seq2seqEntry',
model_file='models:convai2/seq2seq/convai2_self_seq2seq_model',
dict_file='models:convai2/seq2seq/dict_convai2_self',
dict_lower=True,
datatype='valid',
batchsize=1,
numthreads=60,
no_cuda=True,
)
opt = parser.parse_args()
opt['model_type'] = 'seq2seq'
fnames = ['convai2_self_seq2seq_model.tgz', 'dict_convai2_self']
download_models(opt, fnames, 'convai2')
eval_ppl(parser) | 0.66061 | 0.331566 |
import unittest
import requests
import json
base_url = 'http://localhost:5000'
''' BEFORE RUNNING TESTS RUN python dbhelper.py '''
class TestEndPoints(unittest.TestCase):
def test_registration_methods_allowed(self):
''' only POST allowed '''
endpoint = f'{base_url}/registration'
# GET request
get_req = requests.get(endpoint)
# PUT request
put_req = requests.put(endpoint, data={})
# DELETE request
del_req = requests.delete(endpoint)
# Assert all the above methods return 405 (method not allowed)
self.assertEqual(get_req.status_code, 405)
self.assertEqual(put_req.status_code, 405)
self.assertEqual(del_req.status_code, 405)
def test_registration_create_user(self):
''' Create user '''
endpoint = f'{base_url}/registration'
# POST request
headers = {"Content-Type": "application/json"}
data = {"username":"_test", "email":"_<EMAIL>", "password":"_<PASSWORD>"}
post_req = requests.post(endpoint, headers=headers, data=json.dumps(data))
# Assert response 200
self.assertEqual(post_req.status_code, 200)
# Assert response message
self.assertEqual(json.loads(post_req.text)['message'], 'Success. User created.')
# Assert response user matches
self.assertEqual(json.loads(post_req.text)['username'], '_test')
# Assert response email matches
self.assertEqual(json.loads(post_req.text)['email'], <EMAIL>')
def test_registration_user_exists(self):
''' User exists '''
endpoint = f'{base_url}/registration'
headers = {"Content-Type": "application/json"}
data = {"username":"_test", "email":"_<EMAIL>", "password":"_<PASSWORD>"}
# Repeat the request above
post_req_repeat = requests.post(endpoint, headers=headers, data=json.dumps(data))
# Assert response 400 (bad request)
self.assertEqual(post_req_repeat.status_code, 400)
# Assert response message
self.assertEqual(json.loads(post_req_repeat.text)['message'], <EMAIL> is already registered. If you need to reset your password, check the docs.')
def test_registration_missing_parameter(self):
''' Missing parameters '''
endpoint = f'{base_url}/registration'
headers = {"Content-Type": "application/json"}
data_missing = {"username":"_test_2", "password":"_<PASSWORD>"}
post_req_missing = requests.post(endpoint, headers=headers, data=json.dumps(data_missing))
# Assert response 400 (bad request)
self.assertEqual(post_req_missing.status_code, 400)
# Assert response message
self.assertEqual(json.loads(post_req_missing.text)['message'], 'Some parameters are missing. You need \'username\', \'email\' and \'password\'.')
if __name__ == "__main__":
unittest.main() | tests/test_api.py | import unittest
import requests
import json
base_url = 'http://localhost:5000'
''' BEFORE RUNNING TESTS RUN python dbhelper.py '''
class TestEndPoints(unittest.TestCase):
def test_registration_methods_allowed(self):
''' only POST allowed '''
endpoint = f'{base_url}/registration'
# GET request
get_req = requests.get(endpoint)
# PUT request
put_req = requests.put(endpoint, data={})
# DELETE request
del_req = requests.delete(endpoint)
# Assert all the above methods return 405 (method not allowed)
self.assertEqual(get_req.status_code, 405)
self.assertEqual(put_req.status_code, 405)
self.assertEqual(del_req.status_code, 405)
def test_registration_create_user(self):
''' Create user '''
endpoint = f'{base_url}/registration'
# POST request
headers = {"Content-Type": "application/json"}
data = {"username":"_test", "email":"_<EMAIL>", "password":"_<PASSWORD>"}
post_req = requests.post(endpoint, headers=headers, data=json.dumps(data))
# Assert response 200
self.assertEqual(post_req.status_code, 200)
# Assert response message
self.assertEqual(json.loads(post_req.text)['message'], 'Success. User created.')
# Assert response user matches
self.assertEqual(json.loads(post_req.text)['username'], '_test')
# Assert response email matches
self.assertEqual(json.loads(post_req.text)['email'], <EMAIL>')
def test_registration_user_exists(self):
''' User exists '''
endpoint = f'{base_url}/registration'
headers = {"Content-Type": "application/json"}
data = {"username":"_test", "email":"_<EMAIL>", "password":"_<PASSWORD>"}
# Repeat the request above
post_req_repeat = requests.post(endpoint, headers=headers, data=json.dumps(data))
# Assert response 400 (bad request)
self.assertEqual(post_req_repeat.status_code, 400)
# Assert response message
self.assertEqual(json.loads(post_req_repeat.text)['message'], <EMAIL> is already registered. If you need to reset your password, check the docs.')
def test_registration_missing_parameter(self):
''' Missing parameters '''
endpoint = f'{base_url}/registration'
headers = {"Content-Type": "application/json"}
data_missing = {"username":"_test_2", "password":"_<PASSWORD>"}
post_req_missing = requests.post(endpoint, headers=headers, data=json.dumps(data_missing))
# Assert response 400 (bad request)
self.assertEqual(post_req_missing.status_code, 400)
# Assert response message
self.assertEqual(json.loads(post_req_missing.text)['message'], 'Some parameters are missing. You need \'username\', \'email\' and \'password\'.')
if __name__ == "__main__":
unittest.main() | 0.334916 | 0.164567 |
import FWCore.ParameterSet.Config as cms
process = cms.Process("electrons")
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("RecoEcal.Configuration.RecoEcal_cff")
process.load("RecoEgamma.EgammaElectronProducers.gsfElectronSequence_cff")
process.load("RecoLocalTracker.SiPixelRecHits.SiPixelRecHits_cfi")
process.load("RecoLocalTracker.SiStripRecHitConverter.SiStripRecHitConverter_cfi")
process.load("RecoLocalTracker.SiStripRecHitConverter.SiStripRecHitMatcher_cfi")
process.load("RecoLocalTracker.SiStripRecHitConverter.StripCPEfromTrackAngle_cfi")
process.load("RecoLocalTracker.SiStripZeroSuppression.SiStripZeroSuppression_cfi")
process.load("RecoLocalTracker.SiStripClusterizer.SiStripClusterizer_cfi")
process.load("RecoLocalTracker.SiPixelClusterizer.SiPixelClusterizer_cfi")
process.load("RecoLocalTracker.SiPixelRecHits.PixelCPEESProducers_cff")
process.load("RecoTracker.TransientTrackingRecHit.TTRHBuilders_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
process.source = cms.Source("PoolSource",
debugVerbosity = cms.untracked.uint32(1),
debugFlag = cms.untracked.bool(True),
fileNames = cms.untracked.vstring(
'/store/relval/CMSSW_2_1_10/RelValSingleElectronPt10/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/IDEAL_V9_v2/0000/26338BA9-5899-DD11-BD75-000423D985B0.root',
'/store/relval/CMSSW_2_1_10/RelValSingleElectronPt10/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/IDEAL_V9_v2/0000/6A430ADA-5999-DD11-994D-001617C3B5D8.root',
'/store/relval/CMSSW_2_1_10/RelValSingleElectronPt10/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/IDEAL_V9_v2/0000/F2E24023-5899-DD11-BFBF-000423D94A20.root',
'/store/relval/CMSSW_2_1_10/RelValSingleElectronPt10/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/IDEAL_V9_v2/0000/FE4A6F3F-FD99-DD11-9587-000423D98750.root'
)
)
process.out = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *',
'keep recoSuperClusters*_*_*_*',
'keep *_iterativeCone5CaloJets_*_*',
'keep *_*_*_electrons',
'keep *HepMCProduct_*_*_*'),
fileName = cms.untracked.string('electrons.root')
)
process.p = cms.Path(process.siPixelRecHits*process.siStripMatchedRecHits*process.newSeedFromPairs*process.newSeedFromTriplets*process.newCombinedSeeds*process.ecalClusters*process.ecalDrivenElectronSeeds)
process.outpath = cms.EndPath(process.out)
process.GlobalTag.globaltag = 'MC_31X_V2' | RecoEgamma/EgammaElectronProducers/test/egammaRecHitsToElectronSeeds_cfg.py | import FWCore.ParameterSet.Config as cms
process = cms.Process("electrons")
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("RecoEcal.Configuration.RecoEcal_cff")
process.load("RecoEgamma.EgammaElectronProducers.gsfElectronSequence_cff")
process.load("RecoLocalTracker.SiPixelRecHits.SiPixelRecHits_cfi")
process.load("RecoLocalTracker.SiStripRecHitConverter.SiStripRecHitConverter_cfi")
process.load("RecoLocalTracker.SiStripRecHitConverter.SiStripRecHitMatcher_cfi")
process.load("RecoLocalTracker.SiStripRecHitConverter.StripCPEfromTrackAngle_cfi")
process.load("RecoLocalTracker.SiStripZeroSuppression.SiStripZeroSuppression_cfi")
process.load("RecoLocalTracker.SiStripClusterizer.SiStripClusterizer_cfi")
process.load("RecoLocalTracker.SiPixelClusterizer.SiPixelClusterizer_cfi")
process.load("RecoLocalTracker.SiPixelRecHits.PixelCPEESProducers_cff")
process.load("RecoTracker.TransientTrackingRecHit.TTRHBuilders_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
process.source = cms.Source("PoolSource",
debugVerbosity = cms.untracked.uint32(1),
debugFlag = cms.untracked.bool(True),
fileNames = cms.untracked.vstring(
'/store/relval/CMSSW_2_1_10/RelValSingleElectronPt10/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/IDEAL_V9_v2/0000/26338BA9-5899-DD11-BD75-000423D985B0.root',
'/store/relval/CMSSW_2_1_10/RelValSingleElectronPt10/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/IDEAL_V9_v2/0000/6A430ADA-5999-DD11-994D-001617C3B5D8.root',
'/store/relval/CMSSW_2_1_10/RelValSingleElectronPt10/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/IDEAL_V9_v2/0000/F2E24023-5899-DD11-BFBF-000423D94A20.root',
'/store/relval/CMSSW_2_1_10/RelValSingleElectronPt10/GEN-SIM-DIGI-RAW-HLTDEBUG-RECO/IDEAL_V9_v2/0000/FE4A6F3F-FD99-DD11-9587-000423D98750.root'
)
)
process.out = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *',
'keep recoSuperClusters*_*_*_*',
'keep *_iterativeCone5CaloJets_*_*',
'keep *_*_*_electrons',
'keep *HepMCProduct_*_*_*'),
fileName = cms.untracked.string('electrons.root')
)
process.p = cms.Path(process.siPixelRecHits*process.siStripMatchedRecHits*process.newSeedFromPairs*process.newSeedFromTriplets*process.newCombinedSeeds*process.ecalClusters*process.ecalDrivenElectronSeeds)
process.outpath = cms.EndPath(process.out)
process.GlobalTag.globaltag = 'MC_31X_V2' | 0.435541 | 0.049566 |
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
import numpy as np
import lsst.geom as lsstGeom
from lsst.afw import cameraGeom
from lsst.obs.lsst.imsim import ImsimMapper
__all__ = ['plot_amp_boundaries', 'plot_det', 'plot_focal_plane',
'hist_amp_data']
def get_amp_patches(det, amps=None):
"""
Return a list of Rectangle patches in focalplane coordinates
corresponding to the amplifier segments in a detector object.
Parameters
----------
det: `lsst.afw.cameraGeom.detector.detector.Detector`
Detector object.
amps: container-type object [None]
Python container that can be queried like `'C01 in amps'`
to see if a particular channel is included for plotting.
If None, then use all channels in det.
Returns
-------
list of matplotlib.patches.Rectangle objects
"""
transform = det.getTransform(cameraGeom.PIXELS, cameraGeom.FOCAL_PLANE)
bbox = list(det)[0].getBBox()
dy, dx = bbox.getHeight(), bbox.getWidth()
patches = []
for amp in det:
if amps is not None and amp.getName() not in amps:
continue
j, i = tuple(int(_) for _ in amp.getName()[1:])
y, x = j*dy, i*dx
x0, y0 = transform.applyForward(lsstGeom.Point2D(x, y))
x1, y1 = transform.applyForward(lsstGeom.Point2D(x + dx, y + dy))
patches.append(Rectangle((x0, y0), x1 - x0, y1 - y0))
return patches
def plot_amp_boundaries(ax, camera=None, edgecolor='blue', facecolor='white'):
"""
Plot the amplifier boundaries for all of the detectors in a camera.
Parameters
----------
ax: `matplotlib.Axes`
Axes object used to render the patch collection containing
the amplifier boundary Rectangles.
camera: `lsst.afw.cameraGeom.camera.camera.Camera` [None]
Camera object containing the detector info. If None, use
`lsst.obs.lsst.imsim.ImsimMapper().camera`
edgecolor: str or tuple of RGBA values ["blue"]
Color used to draw outline of amplifiers.
facecolor: str or tuple of RGBA values ["white"]
Color used to render the Rectangle corresponding to the
amplifier region.
Returns
-------
None
"""
if camera is None:
camera = ImsimMapper().camera
patches = []
for det in camera:
patches.extend(get_amp_patches(det))
pc = PatchCollection(patches, edgecolor=edgecolor, facecolor=facecolor)
ax.add_collection(pc)
def plot_det(ax, det, amp_values, cm=plt.cm.hot, z_range=None, use_log10=False):
"""
Plot the amplifiers in a detector, rendering each amplier region with
a color corresponding to its assigned value.
Parameters
----------
ax: `matplotlib.Axes`
Axes object used to render the patch collection containing
the amplifier boundary Rectangles.
det: `lsst.afw.cameraGeom.detector.detector.Detector`
Detector object.
amp_values: dict of floats
Dictionary of amplifier values to render, keyed by channel ID,
e.g., 'C00', 'C01', etc.
cm: `matplotlib.colors.Colormap`
Colormap used to render amplifier values.
z_range: 2-tuple of floats [None]
Minimum and maximum values into which to map the unit interval
for the color map. Values are mapped using
max(0, min(1, (amp_value - z_range[0])/(z_range[1] - z_range[0])))
If None, then use
z_range = (min(amp_values.values()), max(amp_values.values()))
use_log10: bool [False]
If True, then use log10(amp_value) for positive amp_value. For
non-positive amp_value, don't render the amp color.
Returns
-------
None
"""
if z_range is None:
zvals = amp_values.values()
z_range = min(zvals), max(zvals)
def mapped_value(amp_value):
return max(0, min(1., ((amp_value - z_range[0])
/(z_range[1] - z_range[0]))))
my_facecolors = []
for amp in det:
if amp.getName() not in amp_values:
continue
if use_log10:
if amp_values[amp.getName()] <= 0:
my_facecolors.append(None)
else:
my_facecolors.append(
cm(mapped_value(np.log10(amp_values[amp.getName()]))))
else:
my_facecolors.append(cm(mapped_value(amp_values[amp.getName()])))
my_patches = get_amp_patches(det, amp_values)
facecolors, patches = [], []
for facecolor, patch in zip(my_facecolors, my_patches):
if facecolor is not None:
facecolors.append(facecolor)
patches.append(patch)
assert len(facecolors) == len(patches)
pc = PatchCollection(patches, facecolors=facecolors)
ax.add_collection(pc)
def plot_focal_plane(ax, amp_data, camera=None, cm=plt.cm.hot,
x_range=(-325, 325), y_range=(-325, 325),
z_range=None, use_log10=False, scale_factor='1',
title=''):
"""
Make a "heat map" plot of the focalplane using per-amplifier values.
Parameters
----------
ax: `matplotlib.Axes`
Axes object used to render the patch collection containing
the amplifier boundary Rectangles.
amp_data: dict of dict of floats
Dictionary of dictionary of amplifier values to render,
keyed by detector name, e.g., 'R01_S00' and then by channel ID,
e.g., 'C00'.
camera: `lsst.afw.cameraGeom.camera.camera.Camera` [None]
Camera object containing the detector info. If None, use
`lsst.obs.lsst.imsim.ImsimMapper().camera`
cm: `matplotlib.colors.Colormap`
Colormap used to render amplifier values.
x_range: tuple [(-325, 325)]
Focalplane plotting region in x-direction in units of mm.
y_range: tuple [(-325, 325)]
Focalplane plotting region in y-direction in units of mm.
z_range: 2-tuple of floats [None]
Minimum and maximum values into which to map the unit interval
for the color map. Values are mapped using
max(0, min(1, (amp_value - z_range[0])/(z_range[1] - z_range[0])))
If None, then use
z_range = (min(amp_values.values()), max(amp_values.values()))
use_log10: bool [False]
If True, then use log10(amp_value) for positive amp_value. For
non-positive amp_value, don't render the amp color.
scale_factor: str ['1']
Scale factor to apply to the colorbar mapping. This value
will be cast as a float when applied to the tick label values. It
is passed as a string so that formatting in the colorbar tick
labels can be controlled directly by the client code.
This is not used if use_log10 == True.
title: str ['']
Title to apply to the plot.
Returns
-------
None
"""
if camera is None:
camera = ImsimMapper().camera
plot_amp_boundaries(ax, camera=camera)
if z_range is None:
z_range_values = []
for _ in amp_data.values():
z_range_values.extend(_.values())
if use_log10:
z_range_values = [np.log10(_) for _ in z_range_values if _ > 0]
z_range = min(z_range_values), max(z_range_values)
for det_name, amp_values in amp_data.items():
plot_det(ax, camera[det_name], amp_values, cm=cm, z_range=z_range,
use_log10=use_log10)
max_amp_value = max(amp_values.values())
plt.xlim(*x_range)
plt.ylim(*y_range)
plt.xlabel('y (mm)')
plt.ylabel('x (mm)')
norm = plt.Normalize(vmin=z_range[0], vmax=z_range[1])
sm = plt.cm.ScalarMappable(cmap=cm, norm=norm)
sm.set_array([])
colorbar = plt.colorbar(sm)
if use_log10:
ticks = sorted(list(set([int(_) for _ in
np.log10(np.logspace(0, z_range[1]))])))
ticklabels = [10**_ for _ in ticks]
colorbar.set_ticks(ticks)
colorbar.set_ticklabels(ticklabels)
elif scale_factor != '1':
ticks = colorbar.get_ticks()
colorbar.set_ticks(ticks)
ticklabels = [_/float(scale_factor) for _ in ticks]
ticklabels[-1] = '{} x {}'.format(ticklabels[-1], scale_factor)
colorbar.set_ticklabels(ticklabels)
plt.title(title)
return colorbar
def hist_amp_data(amp_data, x_label, bins=50, hist_range=None, color=None,
label=None, use_log10=False, yscale='log',
scale_factor='1'):
"""Histogram focal plane results from per amp data.
amp_data: dict of dict of floats
Dictionary of dictionary of amplifier values to render,
keyed by detector name, e.g., 'R01_S00' and then by channel ID,
e.g., 'C00'.
x_label: str
Label of x-axis. Typically, this is the results column name.
bins: int [50]
Number of histogram bins.
hist_range: (float, float) [None]
Histogram min and max values. If None, then used plt.hist default.
color: str [None]
Histogram color. If None, then used plt.hist default.
label: str [None]
Histogram label.
use_log10: bool [False]
If True, then use log10(amp_value) for positive amp_value. For
non-positive amp_value, don't render the amp color.
yscale: str ['log']
Argument to pass to plt.yscale(...). The options are 'linear', 'log'.
"""
amp_values = []
for _ in amp_data.values():
amp_values.extend(_.values())
if use_log10:
amp_values = [np.log10(_) for _ in amp_values if _ > 0]
plt.hist(amp_values, bins=bins, range=hist_range, histtype='step',
color=color, label=label)
plt.xlabel(x_label)
plt.ylabel('entries / bin')
plt.yscale(yscale)
ax = plt.axes()
if use_log10:
ticks = sorted(list(set([int(_) for _ in
np.log10(np.logspace(0, max(amp_values)))])))
ticklabels = [10**_ for _ in ticks]
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
elif scale_factor != '1':
ticks = [_ for _ in ax.get_xticks()
if (_ >= hist_range[0] and _ <= hist_range[1])]
ax.set_xticks(ticks)
ticklabels = ['{:.1f}'.format(_/float(scale_factor)) for _ in ticks]
ticklabels[-1] += f'\nx {scale_factor}'
ax.set_xticklabels(ticklabels) | python/focal_plane_plotting.py | import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
import numpy as np
import lsst.geom as lsstGeom
from lsst.afw import cameraGeom
from lsst.obs.lsst.imsim import ImsimMapper
__all__ = ['plot_amp_boundaries', 'plot_det', 'plot_focal_plane',
'hist_amp_data']
def get_amp_patches(det, amps=None):
"""
Return a list of Rectangle patches in focalplane coordinates
corresponding to the amplifier segments in a detector object.
Parameters
----------
det: `lsst.afw.cameraGeom.detector.detector.Detector`
Detector object.
amps: container-type object [None]
Python container that can be queried like `'C01 in amps'`
to see if a particular channel is included for plotting.
If None, then use all channels in det.
Returns
-------
list of matplotlib.patches.Rectangle objects
"""
transform = det.getTransform(cameraGeom.PIXELS, cameraGeom.FOCAL_PLANE)
bbox = list(det)[0].getBBox()
dy, dx = bbox.getHeight(), bbox.getWidth()
patches = []
for amp in det:
if amps is not None and amp.getName() not in amps:
continue
j, i = tuple(int(_) for _ in amp.getName()[1:])
y, x = j*dy, i*dx
x0, y0 = transform.applyForward(lsstGeom.Point2D(x, y))
x1, y1 = transform.applyForward(lsstGeom.Point2D(x + dx, y + dy))
patches.append(Rectangle((x0, y0), x1 - x0, y1 - y0))
return patches
def plot_amp_boundaries(ax, camera=None, edgecolor='blue', facecolor='white'):
"""
Plot the amplifier boundaries for all of the detectors in a camera.
Parameters
----------
ax: `matplotlib.Axes`
Axes object used to render the patch collection containing
the amplifier boundary Rectangles.
camera: `lsst.afw.cameraGeom.camera.camera.Camera` [None]
Camera object containing the detector info. If None, use
`lsst.obs.lsst.imsim.ImsimMapper().camera`
edgecolor: str or tuple of RGBA values ["blue"]
Color used to draw outline of amplifiers.
facecolor: str or tuple of RGBA values ["white"]
Color used to render the Rectangle corresponding to the
amplifier region.
Returns
-------
None
"""
if camera is None:
camera = ImsimMapper().camera
patches = []
for det in camera:
patches.extend(get_amp_patches(det))
pc = PatchCollection(patches, edgecolor=edgecolor, facecolor=facecolor)
ax.add_collection(pc)
def plot_det(ax, det, amp_values, cm=plt.cm.hot, z_range=None, use_log10=False):
"""
Plot the amplifiers in a detector, rendering each amplier region with
a color corresponding to its assigned value.
Parameters
----------
ax: `matplotlib.Axes`
Axes object used to render the patch collection containing
the amplifier boundary Rectangles.
det: `lsst.afw.cameraGeom.detector.detector.Detector`
Detector object.
amp_values: dict of floats
Dictionary of amplifier values to render, keyed by channel ID,
e.g., 'C00', 'C01', etc.
cm: `matplotlib.colors.Colormap`
Colormap used to render amplifier values.
z_range: 2-tuple of floats [None]
Minimum and maximum values into which to map the unit interval
for the color map. Values are mapped using
max(0, min(1, (amp_value - z_range[0])/(z_range[1] - z_range[0])))
If None, then use
z_range = (min(amp_values.values()), max(amp_values.values()))
use_log10: bool [False]
If True, then use log10(amp_value) for positive amp_value. For
non-positive amp_value, don't render the amp color.
Returns
-------
None
"""
if z_range is None:
zvals = amp_values.values()
z_range = min(zvals), max(zvals)
def mapped_value(amp_value):
return max(0, min(1., ((amp_value - z_range[0])
/(z_range[1] - z_range[0]))))
my_facecolors = []
for amp in det:
if amp.getName() not in amp_values:
continue
if use_log10:
if amp_values[amp.getName()] <= 0:
my_facecolors.append(None)
else:
my_facecolors.append(
cm(mapped_value(np.log10(amp_values[amp.getName()]))))
else:
my_facecolors.append(cm(mapped_value(amp_values[amp.getName()])))
my_patches = get_amp_patches(det, amp_values)
facecolors, patches = [], []
for facecolor, patch in zip(my_facecolors, my_patches):
if facecolor is not None:
facecolors.append(facecolor)
patches.append(patch)
assert len(facecolors) == len(patches)
pc = PatchCollection(patches, facecolors=facecolors)
ax.add_collection(pc)
def plot_focal_plane(ax, amp_data, camera=None, cm=plt.cm.hot,
x_range=(-325, 325), y_range=(-325, 325),
z_range=None, use_log10=False, scale_factor='1',
title=''):
"""
Make a "heat map" plot of the focalplane using per-amplifier values.
Parameters
----------
ax: `matplotlib.Axes`
Axes object used to render the patch collection containing
the amplifier boundary Rectangles.
amp_data: dict of dict of floats
Dictionary of dictionary of amplifier values to render,
keyed by detector name, e.g., 'R01_S00' and then by channel ID,
e.g., 'C00'.
camera: `lsst.afw.cameraGeom.camera.camera.Camera` [None]
Camera object containing the detector info. If None, use
`lsst.obs.lsst.imsim.ImsimMapper().camera`
cm: `matplotlib.colors.Colormap`
Colormap used to render amplifier values.
x_range: tuple [(-325, 325)]
Focalplane plotting region in x-direction in units of mm.
y_range: tuple [(-325, 325)]
Focalplane plotting region in y-direction in units of mm.
z_range: 2-tuple of floats [None]
Minimum and maximum values into which to map the unit interval
for the color map. Values are mapped using
max(0, min(1, (amp_value - z_range[0])/(z_range[1] - z_range[0])))
If None, then use
z_range = (min(amp_values.values()), max(amp_values.values()))
use_log10: bool [False]
If True, then use log10(amp_value) for positive amp_value. For
non-positive amp_value, don't render the amp color.
scale_factor: str ['1']
Scale factor to apply to the colorbar mapping. This value
will be cast as a float when applied to the tick label values. It
is passed as a string so that formatting in the colorbar tick
labels can be controlled directly by the client code.
This is not used if use_log10 == True.
title: str ['']
Title to apply to the plot.
Returns
-------
None
"""
if camera is None:
camera = ImsimMapper().camera
plot_amp_boundaries(ax, camera=camera)
if z_range is None:
z_range_values = []
for _ in amp_data.values():
z_range_values.extend(_.values())
if use_log10:
z_range_values = [np.log10(_) for _ in z_range_values if _ > 0]
z_range = min(z_range_values), max(z_range_values)
for det_name, amp_values in amp_data.items():
plot_det(ax, camera[det_name], amp_values, cm=cm, z_range=z_range,
use_log10=use_log10)
max_amp_value = max(amp_values.values())
plt.xlim(*x_range)
plt.ylim(*y_range)
plt.xlabel('y (mm)')
plt.ylabel('x (mm)')
norm = plt.Normalize(vmin=z_range[0], vmax=z_range[1])
sm = plt.cm.ScalarMappable(cmap=cm, norm=norm)
sm.set_array([])
colorbar = plt.colorbar(sm)
if use_log10:
ticks = sorted(list(set([int(_) for _ in
np.log10(np.logspace(0, z_range[1]))])))
ticklabels = [10**_ for _ in ticks]
colorbar.set_ticks(ticks)
colorbar.set_ticklabels(ticklabels)
elif scale_factor != '1':
ticks = colorbar.get_ticks()
colorbar.set_ticks(ticks)
ticklabels = [_/float(scale_factor) for _ in ticks]
ticklabels[-1] = '{} x {}'.format(ticklabels[-1], scale_factor)
colorbar.set_ticklabels(ticklabels)
plt.title(title)
return colorbar
def hist_amp_data(amp_data, x_label, bins=50, hist_range=None, color=None,
label=None, use_log10=False, yscale='log',
scale_factor='1'):
"""Histogram focal plane results from per amp data.
amp_data: dict of dict of floats
Dictionary of dictionary of amplifier values to render,
keyed by detector name, e.g., 'R01_S00' and then by channel ID,
e.g., 'C00'.
x_label: str
Label of x-axis. Typically, this is the results column name.
bins: int [50]
Number of histogram bins.
hist_range: (float, float) [None]
Histogram min and max values. If None, then used plt.hist default.
color: str [None]
Histogram color. If None, then used plt.hist default.
label: str [None]
Histogram label.
use_log10: bool [False]
If True, then use log10(amp_value) for positive amp_value. For
non-positive amp_value, don't render the amp color.
yscale: str ['log']
Argument to pass to plt.yscale(...). The options are 'linear', 'log'.
"""
amp_values = []
for _ in amp_data.values():
amp_values.extend(_.values())
if use_log10:
amp_values = [np.log10(_) for _ in amp_values if _ > 0]
plt.hist(amp_values, bins=bins, range=hist_range, histtype='step',
color=color, label=label)
plt.xlabel(x_label)
plt.ylabel('entries / bin')
plt.yscale(yscale)
ax = plt.axes()
if use_log10:
ticks = sorted(list(set([int(_) for _ in
np.log10(np.logspace(0, max(amp_values)))])))
ticklabels = [10**_ for _ in ticks]
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
elif scale_factor != '1':
ticks = [_ for _ in ax.get_xticks()
if (_ >= hist_range[0] and _ <= hist_range[1])]
ax.set_xticks(ticks)
ticklabels = ['{:.1f}'.format(_/float(scale_factor)) for _ in ticks]
ticklabels[-1] += f'\nx {scale_factor}'
ax.set_xticklabels(ticklabels) | 0.88642 | 0.698715 |
from __future__ import print_function, division, absolute_import
import math
import types
import ctypes
from functools import partial
import numba.typesystem
from numba.typesystem import itypesystem, numpy_support
from numba import numbawrapper
from numba.support.ctypes_support import is_ctypes, from_ctypes_value
from numba.support import cffi_support
import numpy as np
import datetime
#------------------------------------------------------------------------
# Class -> Type
#------------------------------------------------------------------------
def get_typing_defaults(u):
"""
Get a simple table mapping Python classes to types.
:param u: The type universe
"""
typing_defaults = {
float: u.double,
bool: u.bool_,
complex: u.complex128,
str: u.string_,
#datetime.datetime: u.datetime,
np.datetime64: u.datetime(),
np.timedelta64: u.timedelta(),
}
return typing_defaults
#------------------------------------------------------------------------
# Class -> pyval -> Type
#------------------------------------------------------------------------
def get_default_typing_rules(u, typeof, promote):
"""
Get a table mapping Python classes to handlers (value -> type)
:param u: The type universe
"""
table = {}
def register(*classes):
def dec(func):
for cls in classes:
table[cls] = lambda u, value: func(value)
return func
return dec
@register(int, long)
def type_int(value):
if abs(value) < 1:
bits = 0
else:
bits = math.ceil(math.log(abs(value), 2))
if bits < 32:
return u.int_
elif bits < 64:
return u.int64
else:
raise ValueError("Cannot represent %s as int32 or int64", value)
@register(np.ndarray)
def type_ndarray(value):
if isinstance(value, np.ndarray):
dtype = numpy_support.map_dtype(value.dtype)
return u.array(dtype, value.ndim)
#is_c_contig=value.flags['C_CONTIGUOUS'],
#is_f_contig=value.flags['F_CONTIGUOUS'])
@register(tuple, list, dict)
def type_container(value):
assert isinstance(value, (tuple, list, dict))
if isinstance(value, dict):
key_type = type_container(value.keys())
value_type = type_container(value.values())
return u.dict_(key_type, value_type, size=len(value))
if isinstance(value, tuple):
container_type = u.tuple_
else:
container_type = u.list_
if 0 < len(value) < 30:
# Figure out base type if the container is not too large
# base_type = reduce(promote, (typeof(child) for child in value))
ty = typeof(value[0])
if all(typeof(child) == ty for child in value):
base_type = ty
else:
base_type = u.object_
else:
base_type = u.object_
return container_type(base_type, size=len(value))
register(np.dtype)(lambda value: u.numpy_dtype(numpy_support.map_dtype(value)))
register(types.ModuleType)(lambda value: u.module(value))
register(itypesystem.Type)(lambda value: u.meta(value))
return table
def get_constant_typer(universe, typeof, promote):
"""
Get a function mapping values to types, which returns None if unsuccessful.
"""
typetable = get_typing_defaults(universe)
handler_table = get_default_typing_rules(universe, typeof, promote)
return itypesystem.ConstantTyper(universe, typetable, handler_table).typeof
#------------------------------------------------------------------------
# Constant matching ({ pyval -> bool : pyval -> Type })
#------------------------------------------------------------------------
# TODO: Make this a well-defined (easily overridable) matching table
# E.g. { "numpy" : { is_numpy : get_type } }
def is_dtype_constructor(value):
return isinstance(value, type) and issubclass(value, np.generic)
def is_numpy_scalar(value):
return isinstance(value, np.generic)
def is_registered(value):
from numba.type_inference import module_type_inference
return module_type_inference.is_registered(value)
def from_ctypes(value, u):
result = from_ctypes_value(value)
if result.is_function:
pointer = ctypes.cast(value, ctypes.c_void_p).value
return u.pointer_to_function(value, pointer, result)
else:
return result
def from_cffi(value, u):
signature = cffi_support.get_signature(value)
pointer = cffi_support.get_pointer(value)
return u.pointer_to_function(value, pointer, signature)
def from_typefunc(value, u):
from numba.type_inference import module_type_inference
result = module_type_inference.module_attribute_type(value)
if result is not None:
return result
else:
return u.known_value(value)
is_numba_exttype = lambda value: hasattr(type(value), '__numba_ext_type')
is_NULL = lambda value: value is numba.NULL
is_autojit_func = lambda value: isinstance(
value, numbawrapper.NumbaSpecializingWrapper)
def get_default_match_table(u):
"""
Get a matcher table: { (type -> bool) : (value -> type) }
"""
table = {
is_NULL:
lambda value: numba.typesystem.null,
is_dtype_constructor:
lambda value: numba.typesystem.from_numpy_dtype(np.dtype(value)),
is_numpy_scalar:
lambda value: numpy_support.map_dtype(value.dtype),
is_ctypes:
lambda value: from_ctypes(value, u),
cffi_support.is_cffi_func:
lambda value: from_cffi(value, u),
is_numba_exttype:
lambda value: getattr(type(value), '__numba_ext_type'),
numbawrapper.is_numba_wrapper:
lambda value: u.jit_function(value),
is_autojit_func:
lambda value: u.autojit_function(value),
is_registered:
lambda value: from_typefunc(value, u),
}
return table
def find_match(matchtable, value):
for matcher, typefunc in matchtable.iteritems():
if matcher(value):
result = typefunc(value)
assert result is not None
return result
return None
#------------------------------------------------------------------------
# Typeof
#------------------------------------------------------------------------
def object_typer(universe, value):
return universe.object_
def find_first(callables, value):
for callable in callables:
result = callable(value)
if result is not None:
return result
assert False, (callables, value)
def get_default_typeof(universe, promote):
typeof1 = get_constant_typer(universe, lambda value: typeof(value), promote)
typeof2 = partial(find_match, get_default_match_table(universe))
typeof3 = partial(object_typer, universe)
typeof = partial(find_first, [typeof1, typeof2, typeof3])
return typeof | oldnumba/typesystem/constants.py | from __future__ import print_function, division, absolute_import
import math
import types
import ctypes
from functools import partial
import numba.typesystem
from numba.typesystem import itypesystem, numpy_support
from numba import numbawrapper
from numba.support.ctypes_support import is_ctypes, from_ctypes_value
from numba.support import cffi_support
import numpy as np
import datetime
#------------------------------------------------------------------------
# Class -> Type
#------------------------------------------------------------------------
def get_typing_defaults(u):
"""
Get a simple table mapping Python classes to types.
:param u: The type universe
"""
typing_defaults = {
float: u.double,
bool: u.bool_,
complex: u.complex128,
str: u.string_,
#datetime.datetime: u.datetime,
np.datetime64: u.datetime(),
np.timedelta64: u.timedelta(),
}
return typing_defaults
#------------------------------------------------------------------------
# Class -> pyval -> Type
#------------------------------------------------------------------------
def get_default_typing_rules(u, typeof, promote):
"""
Get a table mapping Python classes to handlers (value -> type)
:param u: The type universe
"""
table = {}
def register(*classes):
def dec(func):
for cls in classes:
table[cls] = lambda u, value: func(value)
return func
return dec
@register(int, long)
def type_int(value):
if abs(value) < 1:
bits = 0
else:
bits = math.ceil(math.log(abs(value), 2))
if bits < 32:
return u.int_
elif bits < 64:
return u.int64
else:
raise ValueError("Cannot represent %s as int32 or int64", value)
@register(np.ndarray)
def type_ndarray(value):
if isinstance(value, np.ndarray):
dtype = numpy_support.map_dtype(value.dtype)
return u.array(dtype, value.ndim)
#is_c_contig=value.flags['C_CONTIGUOUS'],
#is_f_contig=value.flags['F_CONTIGUOUS'])
@register(tuple, list, dict)
def type_container(value):
assert isinstance(value, (tuple, list, dict))
if isinstance(value, dict):
key_type = type_container(value.keys())
value_type = type_container(value.values())
return u.dict_(key_type, value_type, size=len(value))
if isinstance(value, tuple):
container_type = u.tuple_
else:
container_type = u.list_
if 0 < len(value) < 30:
# Figure out base type if the container is not too large
# base_type = reduce(promote, (typeof(child) for child in value))
ty = typeof(value[0])
if all(typeof(child) == ty for child in value):
base_type = ty
else:
base_type = u.object_
else:
base_type = u.object_
return container_type(base_type, size=len(value))
register(np.dtype)(lambda value: u.numpy_dtype(numpy_support.map_dtype(value)))
register(types.ModuleType)(lambda value: u.module(value))
register(itypesystem.Type)(lambda value: u.meta(value))
return table
def get_constant_typer(universe, typeof, promote):
"""
Get a function mapping values to types, which returns None if unsuccessful.
"""
typetable = get_typing_defaults(universe)
handler_table = get_default_typing_rules(universe, typeof, promote)
return itypesystem.ConstantTyper(universe, typetable, handler_table).typeof
#------------------------------------------------------------------------
# Constant matching ({ pyval -> bool : pyval -> Type })
#------------------------------------------------------------------------
# TODO: Make this a well-defined (easily overridable) matching table
# E.g. { "numpy" : { is_numpy : get_type } }
def is_dtype_constructor(value):
return isinstance(value, type) and issubclass(value, np.generic)
def is_numpy_scalar(value):
return isinstance(value, np.generic)
def is_registered(value):
from numba.type_inference import module_type_inference
return module_type_inference.is_registered(value)
def from_ctypes(value, u):
result = from_ctypes_value(value)
if result.is_function:
pointer = ctypes.cast(value, ctypes.c_void_p).value
return u.pointer_to_function(value, pointer, result)
else:
return result
def from_cffi(value, u):
signature = cffi_support.get_signature(value)
pointer = cffi_support.get_pointer(value)
return u.pointer_to_function(value, pointer, signature)
def from_typefunc(value, u):
from numba.type_inference import module_type_inference
result = module_type_inference.module_attribute_type(value)
if result is not None:
return result
else:
return u.known_value(value)
is_numba_exttype = lambda value: hasattr(type(value), '__numba_ext_type')
is_NULL = lambda value: value is numba.NULL
is_autojit_func = lambda value: isinstance(
value, numbawrapper.NumbaSpecializingWrapper)
def get_default_match_table(u):
"""
Get a matcher table: { (type -> bool) : (value -> type) }
"""
table = {
is_NULL:
lambda value: numba.typesystem.null,
is_dtype_constructor:
lambda value: numba.typesystem.from_numpy_dtype(np.dtype(value)),
is_numpy_scalar:
lambda value: numpy_support.map_dtype(value.dtype),
is_ctypes:
lambda value: from_ctypes(value, u),
cffi_support.is_cffi_func:
lambda value: from_cffi(value, u),
is_numba_exttype:
lambda value: getattr(type(value), '__numba_ext_type'),
numbawrapper.is_numba_wrapper:
lambda value: u.jit_function(value),
is_autojit_func:
lambda value: u.autojit_function(value),
is_registered:
lambda value: from_typefunc(value, u),
}
return table
def find_match(matchtable, value):
for matcher, typefunc in matchtable.iteritems():
if matcher(value):
result = typefunc(value)
assert result is not None
return result
return None
#------------------------------------------------------------------------
# Typeof
#------------------------------------------------------------------------
def object_typer(universe, value):
return universe.object_
def find_first(callables, value):
for callable in callables:
result = callable(value)
if result is not None:
return result
assert False, (callables, value)
def get_default_typeof(universe, promote):
typeof1 = get_constant_typer(universe, lambda value: typeof(value), promote)
typeof2 = partial(find_match, get_default_match_table(universe))
typeof3 = partial(object_typer, universe)
typeof = partial(find_first, [typeof1, typeof2, typeof3])
return typeof | 0.495361 | 0.29381 |
# Use, modification and distribution is subject to the Boost Software
# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test skeleton/content
import boost.parallel.mpi as mpi
import skeleton_content
def test_skeleton_and_content(comm, root, manual_broadcast = True):
assert manual_broadcast
# Setup data
list_size = comm.size + 7
original_list = skeleton_content.list_int()
for i in range(0,list_size):
original_list.push_back(i)
if comm.rank == root:
# Broadcast skeleton
print ("Broadcasting integer list skeleton from root %d..." % (root)),
if manual_broadcast:
for p in range(0,comm.size):
if p != comm.rank:
comm.send(p, 0, value = mpi.skeleton(original_list))
print "OK."
# Broadcast content
print ("Broadcasting integer list content from root %d..." % (root)),
if manual_broadcast:
for p in range(0,comm.size):
if p != comm.rank:
comm.send(p, 0, value = mpi.get_content(original_list))
print "OK."
# Broadcast reversed content
original_list.reverse()
print ("Broadcasting reversed integer list content from root %d..." % (root)),
if manual_broadcast:
for p in range(0,comm.size):
if p != comm.rank:
comm.send(p, 0, value = mpi.get_content(original_list))
print "OK."
else:
# Allocate some useless data, to try to get the addresses of
# the underlying lists used later to be different across
# processors.
junk_list = skeleton_content.list_int()
for i in range(0,comm.rank * 3 + 1):
junk_list.push_back(i)
# Receive the skeleton of the list
if manual_broadcast:
transferred_list_skeleton = comm.recv(root, 0)
assert transferred_list_skeleton.object.size == list_size
# Receive the content and check it
transferred_list = transferred_list_skeleton.object
if manual_broadcast:
comm.recv(root, 0, mpi.get_content(transferred_list))
assert transferred_list == original_list
# Receive the content (again) and check it
original_list.reverse()
if manual_broadcast:
comm.recv(root, 0, mpi.get_content(transferred_list))
assert transferred_list == original_list
test_skeleton_and_content(mpi.world, 0)
test_skeleton_and_content(mpi.world, 1) | REDSI_1160929_1161573/boost_1_67_0/libs/mpi/test/python/skeleton_content_test.py |
# Use, modification and distribution is subject to the Boost Software
# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test skeleton/content
import boost.parallel.mpi as mpi
import skeleton_content
def test_skeleton_and_content(comm, root, manual_broadcast = True):
assert manual_broadcast
# Setup data
list_size = comm.size + 7
original_list = skeleton_content.list_int()
for i in range(0,list_size):
original_list.push_back(i)
if comm.rank == root:
# Broadcast skeleton
print ("Broadcasting integer list skeleton from root %d..." % (root)),
if manual_broadcast:
for p in range(0,comm.size):
if p != comm.rank:
comm.send(p, 0, value = mpi.skeleton(original_list))
print "OK."
# Broadcast content
print ("Broadcasting integer list content from root %d..." % (root)),
if manual_broadcast:
for p in range(0,comm.size):
if p != comm.rank:
comm.send(p, 0, value = mpi.get_content(original_list))
print "OK."
# Broadcast reversed content
original_list.reverse()
print ("Broadcasting reversed integer list content from root %d..." % (root)),
if manual_broadcast:
for p in range(0,comm.size):
if p != comm.rank:
comm.send(p, 0, value = mpi.get_content(original_list))
print "OK."
else:
# Allocate some useless data, to try to get the addresses of
# the underlying lists used later to be different across
# processors.
junk_list = skeleton_content.list_int()
for i in range(0,comm.rank * 3 + 1):
junk_list.push_back(i)
# Receive the skeleton of the list
if manual_broadcast:
transferred_list_skeleton = comm.recv(root, 0)
assert transferred_list_skeleton.object.size == list_size
# Receive the content and check it
transferred_list = transferred_list_skeleton.object
if manual_broadcast:
comm.recv(root, 0, mpi.get_content(transferred_list))
assert transferred_list == original_list
# Receive the content (again) and check it
original_list.reverse()
if manual_broadcast:
comm.recv(root, 0, mpi.get_content(transferred_list))
assert transferred_list == original_list
test_skeleton_and_content(mpi.world, 0)
test_skeleton_and_content(mpi.world, 1) | 0.506103 | 0.182608 |
import gtk
import vtk
from render_window import PyLocatorRenderWindow
from events import EventHandler, UndoRegistry
from shared import shared
from dialogs import edit_label_of_marker
INTERACT_CURSOR, MOVE_CURSOR, COLOR_CURSOR, SELECT_CURSOR, DELETE_CURSOR, LABEL_CURSOR, SCREENSHOT_CURSOR = gtk.gdk.ARROW, gtk.gdk.HAND2, gtk.gdk.SPRAYCAN, gtk.gdk.TCROSS, gtk.gdk.X_CURSOR, gtk.gdk.PENCIL, gtk.gdk.ICON
class MarkerWindowInteractor(PyLocatorRenderWindow):
"""
CLASS: MarkerWindowInteractor
DESCR:
"""
def __init__(self):
PyLocatorRenderWindow.__init__(self)
#self.camera = self.renderer.GetActiveCamera()
self.pressFuncs = {1 : self._Iren.LeftButtonPressEvent,
2 : self._Iren.MiddleButtonPressEvent,
3 : self._Iren.RightButtonPressEvent}
self.releaseFuncs = {1 : self._Iren.LeftButtonReleaseEvent,
2 : self._Iren.MiddleButtonReleaseEvent,
3 : self._Iren.RightButtonReleaseEvent}
self.pressHooks = {}
self.releaseHooks = {}
self.vtk_interact_mode = False
def mouse1_mode_change(self, event):
"""
Give derived classes toclean up any observers, etc, before
switching to new mode
"""
pass
def update_viewer(self, event, *args):
PyLocatorRenderWindow.update_viewer(self, event, *args)
if event.find('mouse1')==0:
self.mouse1_mode_change(event)
if event=='mouse1 interact':
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_interact()"
self.set_mouse1_to_interact()
elif event=='vtk interact':
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_vtkinteract()"
self.set_mouse1_to_vtkinteract()
elif event=='mouse1 color':
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_color()"
self.set_mouse1_to_color()
elif event=='mouse1 delete':
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_delete()"
self.set_mouse1_to_delete()
elif event=='mouse1 label':
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_label()"
self.set_mouse1_to_label()
elif event=='mouse1 select':
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_select()"
self.set_mouse1_to_select()
elif event=='mouse1 move':
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_move()"
self.set_mouse1_to_move()
def get_marker_at_point(self):
raise NotImplementedError
def set_image_data(self, imageData):
pass
def set_select_mode(self):
pass
def set_interact_mode(self):
if shared.debug: print "set_interact_mode()!!!!"
self.vtk_interact_mode = False
def set_vtkinteract_mode(self):
if shared.debug: print "set_vtkinteract_mode()!!!!"
if (self.vtk_interact_mode == False):
# mcc XXX: ignore this
#foo = self.AddObserver('InteractionEvent', self.vtkinteraction_event)
#print "MarkerWindowInteractor.set_vtkinteract_mode(): AddObserver call returns ", foo
self.vtk_interact_mode = True
def set_mouse1_to_interact(self):
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_interact()"
self.vtk_interact_mode = False
# XXX why does this not work
self.set_interact_mode()
try: del self.pressHooks[1]
except KeyError: pass
try: del self.releaseHooks[1]
except KeyError: pass
cursor = gtk.gdk.Cursor (INTERACT_CURSOR)
if self.window is not None:
self.window.set_cursor (cursor)
def vtkinteraction_event(self, *args):
if shared.debug: print "vtkinteraction_event!!!"
self.Render()
def set_mouse1_to_vtkinteract(self):
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_vtkinteract()"
self.set_vtkinteract_mode()
def button_down(*args):
#print "button down on brain interact."
x, y = self.GetEventPosition()
picker = vtk.vtkPropPicker()
picker.PickProp(x, y, self.renderer)
actor = picker.GetActor()
# now do something with the actor !!!
#print "actor is ", actor
def button_up(*args):
#print "button up on brain interact."
pass
self.pressHooks[1] = button_down
self.releaseHooks[1] = button_up
cursor = gtk.gdk.Cursor (INTERACT_CURSOR)
if self.window is not None:
self.window.set_cursor (cursor)
def set_mouse1_to_move(self):
self.set_select_mode()
cursor = gtk.gdk.Cursor (MOVE_CURSOR)
if self.window is not None:
self.window.set_cursor (cursor)
def set_mouse1_to_delete(self):
def button_up(*args):
pass
def button_down(*args):
marker = self.get_marker_at_point()
if marker is None: return
EventHandler().remove_marker(marker)
self.pressHooks[1] = button_down
self.releaseHooks[1] = button_up
self.set_select_mode()
cursor = gtk.gdk.Cursor (DELETE_CURSOR)
if self.window is not None:
self.window.set_cursor (cursor)
def set_mouse1_to_select(self):
def button_up(*args):
pass
def button_down(*args):
marker = self.get_marker_at_point()
if marker is None: return
isSelected = EventHandler().is_selected(marker)
if self.interactor.GetControlKey():
if isSelected: EventHandler().remove_selection(marker)
else: EventHandler().add_selection(marker)
else: EventHandler().select_new(marker)
self.pressHooks[1] = button_down
self.releaseHooks[1] = button_up
self.set_select_mode()
cursor = gtk.gdk.Cursor (SELECT_CURSOR)
if self.window is not None:
self.window.set_cursor (cursor)
def set_mouse1_to_label(self):
def button_up(*args):
pass
def button_down(*args):
marker = self.get_marker_at_point()
if marker is None: return
edit_label_of_marker(marker)
self.pressHooks[1] = button_down
self.releaseHooks[1] = button_up
self.set_select_mode()
cursor = gtk.gdk.Cursor (LABEL_CURSOR)
if self.window is not None:
self.window.set_cursor (cursor)
def set_mouse1_to_color(self):
def button_up(*args):
pass
def button_down(*args):
marker = self.get_marker_at_point()
if marker is None: return
color = EventHandler().get_default_color()
oldColor = marker.get_color()
UndoRegistry().push_command(
EventHandler().notify, 'color marker', marker, oldColor)
EventHandler().notify('color marker', marker, color)
EventHandler().notify('render now')
self.pressHooks[1] = button_down
self.releaseHooks[1] = button_up
self.set_select_mode()
cursor = gtk.gdk.Cursor (COLOR_CURSOR)
if self.window is not None:
self.window.set_cursor (cursor)
def OnButtonDown(self, wid, event):
"""Mouse button pressed."""
self.lastCamera = self.get_camera_fpu()
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
if shared.debug: print "MarkerWindowInteractor.OnButtonDown(): ctrl=", ctrl,"shift=",shift,"button=",event.button
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
chr(0), 0, None)
if shared.debug: print "MarkerWindowInteractor.OnButtonDown(): pressFuncs=", self.pressFuncs, "pressHooks=", self.pressHooks
if event.button in self.interactButtons:
if shared.debug: print "self.vtk_interact_mode =", self.vtk_interact_mode
if (self.vtk_interact_mode == False):
self.pressFuncs[event.button]()
try: self.pressHooks[event.button]()
except KeyError: pass
def OnButtonUp(self, wid, event):
"""Mouse button released."""
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
if shared.debug: print "MarkerWindowInteractor.OnButtonUp(): ctrl=", ctrl,"shift=",shift, "button=",event.button
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
chr(0), 0, None)
if event.button in self.interactButtons:
if shared.debug: print "self.vtk_interact_mode =", self.vtk_interact_mode
if (self.vtk_interact_mode == False):
self.releaseFuncs[event.button]()
try: self.releaseHooks[event.button]()
except KeyError: pass
thisCamera = self.get_camera_fpu()
try: self.lastCamera
except AttributeError: pass # this
else:
if thisCamera != self.lastCamera:
UndoRegistry().push_command(self.set_camera, self.lastCamera)
return True
def get_plane_points(self, pw):
return pw.GetOrigin(), pw.GetPoint1(), pw.GetPoint2()
def set_plane_points(self, pw, pnts):
o, p1, p2 = pnts
pw.SetOrigin(o)
pw.SetPoint1(p1)
pw.SetPoint2(p2)
pw.UpdatePlacement() | pylocator/marker_window_interactor.py | import gtk
import vtk
from render_window import PyLocatorRenderWindow
from events import EventHandler, UndoRegistry
from shared import shared
from dialogs import edit_label_of_marker
INTERACT_CURSOR, MOVE_CURSOR, COLOR_CURSOR, SELECT_CURSOR, DELETE_CURSOR, LABEL_CURSOR, SCREENSHOT_CURSOR = gtk.gdk.ARROW, gtk.gdk.HAND2, gtk.gdk.SPRAYCAN, gtk.gdk.TCROSS, gtk.gdk.X_CURSOR, gtk.gdk.PENCIL, gtk.gdk.ICON
class MarkerWindowInteractor(PyLocatorRenderWindow):
"""
CLASS: MarkerWindowInteractor
DESCR:
"""
def __init__(self):
PyLocatorRenderWindow.__init__(self)
#self.camera = self.renderer.GetActiveCamera()
self.pressFuncs = {1 : self._Iren.LeftButtonPressEvent,
2 : self._Iren.MiddleButtonPressEvent,
3 : self._Iren.RightButtonPressEvent}
self.releaseFuncs = {1 : self._Iren.LeftButtonReleaseEvent,
2 : self._Iren.MiddleButtonReleaseEvent,
3 : self._Iren.RightButtonReleaseEvent}
self.pressHooks = {}
self.releaseHooks = {}
self.vtk_interact_mode = False
def mouse1_mode_change(self, event):
"""
Give derived classes toclean up any observers, etc, before
switching to new mode
"""
pass
def update_viewer(self, event, *args):
PyLocatorRenderWindow.update_viewer(self, event, *args)
if event.find('mouse1')==0:
self.mouse1_mode_change(event)
if event=='mouse1 interact':
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_interact()"
self.set_mouse1_to_interact()
elif event=='vtk interact':
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_vtkinteract()"
self.set_mouse1_to_vtkinteract()
elif event=='mouse1 color':
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_color()"
self.set_mouse1_to_color()
elif event=='mouse1 delete':
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_delete()"
self.set_mouse1_to_delete()
elif event=='mouse1 label':
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_label()"
self.set_mouse1_to_label()
elif event=='mouse1 select':
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_select()"
self.set_mouse1_to_select()
elif event=='mouse1 move':
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_move()"
self.set_mouse1_to_move()
def get_marker_at_point(self):
raise NotImplementedError
def set_image_data(self, imageData):
pass
def set_select_mode(self):
pass
def set_interact_mode(self):
if shared.debug: print "set_interact_mode()!!!!"
self.vtk_interact_mode = False
def set_vtkinteract_mode(self):
if shared.debug: print "set_vtkinteract_mode()!!!!"
if (self.vtk_interact_mode == False):
# mcc XXX: ignore this
#foo = self.AddObserver('InteractionEvent', self.vtkinteraction_event)
#print "MarkerWindowInteractor.set_vtkinteract_mode(): AddObserver call returns ", foo
self.vtk_interact_mode = True
def set_mouse1_to_interact(self):
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_interact()"
self.vtk_interact_mode = False
# XXX why does this not work
self.set_interact_mode()
try: del self.pressHooks[1]
except KeyError: pass
try: del self.releaseHooks[1]
except KeyError: pass
cursor = gtk.gdk.Cursor (INTERACT_CURSOR)
if self.window is not None:
self.window.set_cursor (cursor)
def vtkinteraction_event(self, *args):
if shared.debug: print "vtkinteraction_event!!!"
self.Render()
def set_mouse1_to_vtkinteract(self):
if shared.debug: print "MarkerWindowInteractor.set_mouse1_to_vtkinteract()"
self.set_vtkinteract_mode()
def button_down(*args):
#print "button down on brain interact."
x, y = self.GetEventPosition()
picker = vtk.vtkPropPicker()
picker.PickProp(x, y, self.renderer)
actor = picker.GetActor()
# now do something with the actor !!!
#print "actor is ", actor
def button_up(*args):
#print "button up on brain interact."
pass
self.pressHooks[1] = button_down
self.releaseHooks[1] = button_up
cursor = gtk.gdk.Cursor (INTERACT_CURSOR)
if self.window is not None:
self.window.set_cursor (cursor)
def set_mouse1_to_move(self):
self.set_select_mode()
cursor = gtk.gdk.Cursor (MOVE_CURSOR)
if self.window is not None:
self.window.set_cursor (cursor)
def set_mouse1_to_delete(self):
def button_up(*args):
pass
def button_down(*args):
marker = self.get_marker_at_point()
if marker is None: return
EventHandler().remove_marker(marker)
self.pressHooks[1] = button_down
self.releaseHooks[1] = button_up
self.set_select_mode()
cursor = gtk.gdk.Cursor (DELETE_CURSOR)
if self.window is not None:
self.window.set_cursor (cursor)
def set_mouse1_to_select(self):
def button_up(*args):
pass
def button_down(*args):
marker = self.get_marker_at_point()
if marker is None: return
isSelected = EventHandler().is_selected(marker)
if self.interactor.GetControlKey():
if isSelected: EventHandler().remove_selection(marker)
else: EventHandler().add_selection(marker)
else: EventHandler().select_new(marker)
self.pressHooks[1] = button_down
self.releaseHooks[1] = button_up
self.set_select_mode()
cursor = gtk.gdk.Cursor (SELECT_CURSOR)
if self.window is not None:
self.window.set_cursor (cursor)
def set_mouse1_to_label(self):
def button_up(*args):
pass
def button_down(*args):
marker = self.get_marker_at_point()
if marker is None: return
edit_label_of_marker(marker)
self.pressHooks[1] = button_down
self.releaseHooks[1] = button_up
self.set_select_mode()
cursor = gtk.gdk.Cursor (LABEL_CURSOR)
if self.window is not None:
self.window.set_cursor (cursor)
def set_mouse1_to_color(self):
def button_up(*args):
pass
def button_down(*args):
marker = self.get_marker_at_point()
if marker is None: return
color = EventHandler().get_default_color()
oldColor = marker.get_color()
UndoRegistry().push_command(
EventHandler().notify, 'color marker', marker, oldColor)
EventHandler().notify('color marker', marker, color)
EventHandler().notify('render now')
self.pressHooks[1] = button_down
self.releaseHooks[1] = button_up
self.set_select_mode()
cursor = gtk.gdk.Cursor (COLOR_CURSOR)
if self.window is not None:
self.window.set_cursor (cursor)
def OnButtonDown(self, wid, event):
"""Mouse button pressed."""
self.lastCamera = self.get_camera_fpu()
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
if shared.debug: print "MarkerWindowInteractor.OnButtonDown(): ctrl=", ctrl,"shift=",shift,"button=",event.button
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
chr(0), 0, None)
if shared.debug: print "MarkerWindowInteractor.OnButtonDown(): pressFuncs=", self.pressFuncs, "pressHooks=", self.pressHooks
if event.button in self.interactButtons:
if shared.debug: print "self.vtk_interact_mode =", self.vtk_interact_mode
if (self.vtk_interact_mode == False):
self.pressFuncs[event.button]()
try: self.pressHooks[event.button]()
except KeyError: pass
def OnButtonUp(self, wid, event):
"""Mouse button released."""
m = self.get_pointer()
ctrl, shift = self._GetCtrlShift(event)
if shared.debug: print "MarkerWindowInteractor.OnButtonUp(): ctrl=", ctrl,"shift=",shift, "button=",event.button
self._Iren.SetEventInformationFlipY(m[0], m[1], ctrl, shift,
chr(0), 0, None)
if event.button in self.interactButtons:
if shared.debug: print "self.vtk_interact_mode =", self.vtk_interact_mode
if (self.vtk_interact_mode == False):
self.releaseFuncs[event.button]()
try: self.releaseHooks[event.button]()
except KeyError: pass
thisCamera = self.get_camera_fpu()
try: self.lastCamera
except AttributeError: pass # this
else:
if thisCamera != self.lastCamera:
UndoRegistry().push_command(self.set_camera, self.lastCamera)
return True
def get_plane_points(self, pw):
return pw.GetOrigin(), pw.GetPoint1(), pw.GetPoint2()
def set_plane_points(self, pw, pnts):
o, p1, p2 = pnts
pw.SetOrigin(o)
pw.SetPoint1(p1)
pw.SetPoint2(p2)
pw.UpdatePlacement() | 0.229104 | 0.062962 |
from Switch import Switch
from pyof.foundation.basic_types import DPID
from FileWriter import FileWriter
from FlowModGenerator import FlowModGenerator
from PacketInEchoGenerator import PacketInEchoGenerator
import time
import argparse
def run_latency(warmup_time_s, upstream_ip, upstream_port, duration_s, ssl):
switch = Switch(DPID("00:00:00:00:00:00:00:01"), warmup_time_s, upstream_ip, upstream_port, 1, False, ssl)
switch.start()
time.sleep(duration_s)
print("stop!")
switch.stop()
FileWriter().write_results_to_file(switch.get_results())
def run_flowmodgenerator(upstream_ip, upstream_port, duration_s, ssl, percentage_of_same_ips):
flowModGenerator = FlowModGenerator(upstream_ip=upstream_ip, upstream_port=upstream_port, enable_tls=ssl, percentage_of_same_ips=percentage_of_same_ips)
flowModGenerator.start()
time.sleep(duration_s)
print("stop!")
flowModGenerator.stop()
def run_packetinechogenerator(upstream_ip, upstream_port, duration_s, ssl):
packetInEchoGenerator = PacketInEchoGenerator(upstream_ip=upstream_ip, upstream_port=upstream_port, enable_tls=ssl)
packetInEchoGenerator.start()
time.sleep(duration_s)
print("stop!")
packetInEchoGenerator.stop()
def eval_throughput_results(switch):
print("===== switch " + switch.dpid + " =====")
results = switch.get_results()
results.sort(key=lambda x: x[0])
time_range_s = (results[-1][0] - results[0][0]) / 1000000
messages_count = 0
no_response_count = 0
while len(results) > 0:
measurement = results.pop(0)
other_measurement = find_xid(measurement[1], results)
if(other_measurement != -1):
results.pop(other_measurement)
messages_count += 1
else:
no_response_count += 1
messages_per_s = messages_count / time_range_s
print("exchanged " + str(messages_count) + " messages")
print(str(messages_per_s) + " messages per second")
print("did not receive a response for " + str(no_response_count) + " messages")
print("===============================================")
print("")
return messages_count, messages_per_s, no_response_count
def find_xid(xid, results):
for i, measurement in enumerate(results):
if measurement[1] == xid:
return i
return -1
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
if __name__ == '__main__':
ip = '127.0.0.1'
port = 6653
duration = 0
warmup = 0
number_of_switches = 1
percentage_of_same_ips=0
tls = False
parser = argparse.ArgumentParser(description="Description for my parser")
parser.add_argument("-i", "--ip", required=False)
parser.add_argument("-p", "--port", required=False)
parser.add_argument("-w", "--warmup", required=True)
parser.add_argument("-d", "--duration", required=True)
parser.add_argument("-m", "--mode", required=False)
parser.add_argument("-t", "--tls", required=False)
parser.add_argument("-si", "--percentage_of_same_ips", required=False)
argument = parser.parse_args()
if argument.ip:
ip = argument.ip
if argument.tls:
tls = str2bool(argument.tls)
if argument.port:
port = int(argument.port)
if argument.warmup:
warmup = int(argument.warmup)
if argument.duration:
duration = int(argument.duration)
if argument.percentage_of_same_ips:
percentage_of_same_ips = int(argument.percentage_of_same_ips)
if not argument.mode or argument.mode == "latency":
run_latency(warmup, ip, port, duration, tls)
elif argument.mode == "flowmod":
run_flowmodgenerator(ip, port, duration, tls, percentage_of_same_ips)
elif argument.mode == "packetin":
run_packetinechogenerator(ip, port, duration, tls) | PyBench/Main.py | from Switch import Switch
from pyof.foundation.basic_types import DPID
from FileWriter import FileWriter
from FlowModGenerator import FlowModGenerator
from PacketInEchoGenerator import PacketInEchoGenerator
import time
import argparse
def run_latency(warmup_time_s, upstream_ip, upstream_port, duration_s, ssl):
switch = Switch(DPID("00:00:00:00:00:00:00:01"), warmup_time_s, upstream_ip, upstream_port, 1, False, ssl)
switch.start()
time.sleep(duration_s)
print("stop!")
switch.stop()
FileWriter().write_results_to_file(switch.get_results())
def run_flowmodgenerator(upstream_ip, upstream_port, duration_s, ssl, percentage_of_same_ips):
flowModGenerator = FlowModGenerator(upstream_ip=upstream_ip, upstream_port=upstream_port, enable_tls=ssl, percentage_of_same_ips=percentage_of_same_ips)
flowModGenerator.start()
time.sleep(duration_s)
print("stop!")
flowModGenerator.stop()
def run_packetinechogenerator(upstream_ip, upstream_port, duration_s, ssl):
packetInEchoGenerator = PacketInEchoGenerator(upstream_ip=upstream_ip, upstream_port=upstream_port, enable_tls=ssl)
packetInEchoGenerator.start()
time.sleep(duration_s)
print("stop!")
packetInEchoGenerator.stop()
def eval_throughput_results(switch):
print("===== switch " + switch.dpid + " =====")
results = switch.get_results()
results.sort(key=lambda x: x[0])
time_range_s = (results[-1][0] - results[0][0]) / 1000000
messages_count = 0
no_response_count = 0
while len(results) > 0:
measurement = results.pop(0)
other_measurement = find_xid(measurement[1], results)
if(other_measurement != -1):
results.pop(other_measurement)
messages_count += 1
else:
no_response_count += 1
messages_per_s = messages_count / time_range_s
print("exchanged " + str(messages_count) + " messages")
print(str(messages_per_s) + " messages per second")
print("did not receive a response for " + str(no_response_count) + " messages")
print("===============================================")
print("")
return messages_count, messages_per_s, no_response_count
def find_xid(xid, results):
for i, measurement in enumerate(results):
if measurement[1] == xid:
return i
return -1
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
if __name__ == '__main__':
ip = '127.0.0.1'
port = 6653
duration = 0
warmup = 0
number_of_switches = 1
percentage_of_same_ips=0
tls = False
parser = argparse.ArgumentParser(description="Description for my parser")
parser.add_argument("-i", "--ip", required=False)
parser.add_argument("-p", "--port", required=False)
parser.add_argument("-w", "--warmup", required=True)
parser.add_argument("-d", "--duration", required=True)
parser.add_argument("-m", "--mode", required=False)
parser.add_argument("-t", "--tls", required=False)
parser.add_argument("-si", "--percentage_of_same_ips", required=False)
argument = parser.parse_args()
if argument.ip:
ip = argument.ip
if argument.tls:
tls = str2bool(argument.tls)
if argument.port:
port = int(argument.port)
if argument.warmup:
warmup = int(argument.warmup)
if argument.duration:
duration = int(argument.duration)
if argument.percentage_of_same_ips:
percentage_of_same_ips = int(argument.percentage_of_same_ips)
if not argument.mode or argument.mode == "latency":
run_latency(warmup, ip, port, duration, tls)
elif argument.mode == "flowmod":
run_flowmodgenerator(ip, port, duration, tls, percentage_of_same_ips)
elif argument.mode == "packetin":
run_packetinechogenerator(ip, port, duration, tls) | 0.580947 | 0.239572 |
from __future__ import absolute_import, division, print_function, unicode_literals
from glob import glob
from logging import getLogger
from os import listdir
from os.path import basename, isdir, isfile, join, lexists, dirname
from ..base.constants import CONDA_TARBALL_EXTENSION, PREFIX_MAGIC_FILE
from ..base.context import context
from ..common.compat import JSONDecodeError, itervalues, string_types, with_metaclass
from ..common.constants import NULL
from ..common.path import get_python_site_packages_short_path, win_path_ok
from ..common.serialize import json_load
from ..exceptions import (BasicClobberError, CondaDependencyError, CorruptedEnvironmentError,
maybe_raise)
from ..gateways.disk.create import write_as_json_to_file
from ..gateways.disk.delete import rm_rf
from ..gateways.disk.test import file_path_is_writable
from ..models.channel import Channel
from ..models.enums import PackageType, PathType
from ..models.match_spec import MatchSpec
from ..models.prefix_graph import PrefixGraph
from ..models.records import (PackageRecord, PathData, PathDataV1, PathsData, PrefixRecord)
try:
from cytoolz.itertoolz import concat, concatv
except ImportError: # pragma: no cover
from .._vendor.toolz.itertoolz import concat, concatv # NOQA
log = getLogger(__name__)
class PrefixDataType(type):
"""Basic caching of PrefixData instance objects."""
def __call__(cls, prefix_path, pip_interop_enabled=None):
if prefix_path in PrefixData._cache_:
return PrefixData._cache_[prefix_path]
elif isinstance(prefix_path, PrefixData):
return prefix_path
else:
prefix_data_instance = super(PrefixDataType, cls).__call__(prefix_path,
pip_interop_enabled)
PrefixData._cache_[prefix_path] = prefix_data_instance
return prefix_data_instance
@with_metaclass(PrefixDataType)
class PrefixData(object):
_cache_ = {}
def __init__(self, prefix_path, pip_interop_enabled=None):
# pip_interop_enabled is a temporary paramater; DO NOT USE
# TODO: when removing pip_interop_enabled, also remove from meta class
self.prefix_path = prefix_path
self.__prefix_records = None
self.__is_writable = NULL
self._pip_interop_enabled = (context.pip_interop_enabled
if pip_interop_enabled is None
else pip_interop_enabled)
def load(self):
self.__prefix_records = {}
for meta_file in glob(join(self.prefix_path, 'conda-meta', '*.json')):
self._load_single_record(meta_file)
if self._pip_interop_enabled:
self._load_site_packages()
def reload(self):
self.load()
return self
def insert(self, prefix_record):
assert prefix_record.name not in self._prefix_records
assert prefix_record.fn.endswith(CONDA_TARBALL_EXTENSION)
filename = prefix_record.fn[:-len(CONDA_TARBALL_EXTENSION)] + '.json'
prefix_record_json_path = join(self.prefix_path, 'conda-meta', filename)
if lexists(prefix_record_json_path):
maybe_raise(BasicClobberError(
source_path=None,
target_path=prefix_record_json_path,
context=context,
), context)
rm_rf(prefix_record_json_path)
write_as_json_to_file(prefix_record_json_path, prefix_record)
self._prefix_records[prefix_record.name] = prefix_record
def remove(self, package_name):
assert package_name in self._prefix_records
prefix_record = self._prefix_records[package_name]
filename = prefix_record.fn[:-len(CONDA_TARBALL_EXTENSION)] + '.json'
conda_meta_full_path = join(self.prefix_path, 'conda-meta', filename)
if self.is_writable:
rm_rf(conda_meta_full_path)
del self._prefix_records[package_name]
def get(self, package_name, default=NULL):
try:
return self._prefix_records[package_name]
except KeyError:
if default is not NULL:
return default
else:
raise
def iter_records(self):
return itervalues(self._prefix_records)
def iter_records_sorted(self):
prefix_graph = PrefixGraph(self.iter_records())
return iter(prefix_graph.graph)
def all_subdir_urls(self):
subdir_urls = set()
for prefix_record in itervalues(self._prefix_records):
subdir_url = prefix_record.channel.subdir_url
if subdir_url and subdir_url not in subdir_urls:
log.debug("adding subdir url %s for %s", subdir_url, prefix_record)
subdir_urls.add(subdir_url)
return subdir_urls
def query(self, package_ref_or_match_spec):
# returns a generator
param = package_ref_or_match_spec
if isinstance(param, string_types):
param = MatchSpec(param)
if isinstance(param, MatchSpec):
return (prefix_rec for prefix_rec in self.iter_records()
if param.match(prefix_rec))
else:
assert isinstance(param, PackageRecord)
return (prefix_rec for prefix_rec in self.iter_records() if prefix_rec == param)
@property
def _prefix_records(self):
return self.__prefix_records or self.load() or self.__prefix_records
def _load_single_record(self, prefix_record_json_path):
log.trace("loading prefix record %s", prefix_record_json_path)
with open(prefix_record_json_path) as fh:
try:
json_data = json_load(fh.read())
except JSONDecodeError:
raise CorruptedEnvironmentError(self.prefix_path, prefix_record_json_path)
# TODO: consider, at least in memory, storing prefix_record_json_path as part
# of PrefixRecord
prefix_record = PrefixRecord(**json_data)
# check that prefix record json filename conforms to name-version-build
# apparently implemented as part of #2638 to resolve #2599
try:
n, v, b = basename(prefix_record_json_path)[:-5].rsplit('-', 2)
if (n, v, b) != (prefix_record.name, prefix_record.version, prefix_record.build):
raise ValueError()
except ValueError:
log.warn("Ignoring malformed prefix record at: %s", prefix_record_json_path)
# TODO: consider just deleting here this record file in the future
return
self.__prefix_records[prefix_record.name] = prefix_record
@property
def is_writable(self):
if self.__is_writable == NULL:
test_path = join(self.prefix_path, PREFIX_MAGIC_FILE)
if not isfile(test_path):
is_writable = None
else:
is_writable = file_path_is_writable(test_path)
self.__is_writable = is_writable
return self.__is_writable
def _has_python(self):
return 'python' in self._prefix_records
def _load_site_packages(self):
python_record = next(
(prefix_rec for prefix_rec in itervalues(self.__prefix_records)
if prefix_rec.name == 'python'),
None
)
if not python_record:
return
prefix_graph = PrefixGraph(self.iter_records())
known_python_records = prefix_graph.all_descendants(python_record)
def norm_package_name(name):
return name.replace('.', '-').replace('_', '-').lower()
anchor_file_endings = ('.egg-info/PKG-INFO', '.dist-info/RECORD', '.egg-info')
conda_python_packages = dict(
((af, prefix_rec)
for prefix_rec in known_python_records
for af in prefix_rec.files
if af.endswith(anchor_file_endings) and 'site-packages' in af)
)
all_sp_anchor_files = set()
site_packages_dir = get_python_site_packages_short_path(python_record.version)
sp_dir_full_path = join(self.prefix_path, win_path_ok(site_packages_dir))
sp_anchor_endings = ('.dist-info', '.egg-info', '.egg-link')
if not isdir(sp_dir_full_path):
return
for fn in listdir(sp_dir_full_path):
if fn.endswith(sp_anchor_endings):
if fn.endswith('.dist-info'):
anchor_file = "%s/%s/%s" % (site_packages_dir, fn, 'RECORD')
elif fn.endswith(".egg-info"):
if isfile(join(sp_dir_full_path, fn)):
anchor_file = "%s/%s" % (site_packages_dir, fn)
else:
anchor_file = "%s/%s/%s" % (site_packages_dir, fn, "PKG-INFO")
elif fn.endswith('.egg-link'):
anchor_file = "%s/%s" % (site_packages_dir, fn)
elif fn.endswith('.pth'):
continue
else:
continue
all_sp_anchor_files.add(anchor_file)
_conda_anchor_files = set(conda_python_packages)
clobbered_conda_anchor_files = _conda_anchor_files - all_sp_anchor_files
non_conda_anchor_files = all_sp_anchor_files - _conda_anchor_files
# If there's a mismatch for anchor files between what conda expects for a package
# based on conda-meta, and for what is actually in site-packages, then we'll delete
# the in-memory record for the conda package. In the future, we should consider
# also deleting the record on disk in the conda-meta/ directory.
for conda_anchor_file in clobbered_conda_anchor_files:
del self._prefix_records[conda_python_packages[conda_anchor_file].name]
# TODO: only compatible with pip 9.0; consider writing this by hand
from pip._vendor.distlib.database import EggInfoDistribution, InstalledDistribution
from pip._vendor.distlib.metadata import MetadataConflictError
from pip._vendor.distlib.util import parse_requirement
def get_pydist(anchor_file):
if ".dist-info" in anchor_file:
sp_reference = basename(dirname(anchor_file))
dist_file = join(self.prefix_path, win_path_ok(dirname(anchor_file)))
dist_cls = InstalledDistribution
package_type = PackageType.SHADOW_PYTHON_DIST_INFO
elif anchor_file.endswith(".egg-info"):
sp_reference = basename(anchor_file)
dist_file = join(self.prefix_path, win_path_ok(anchor_file))
dist_cls = EggInfoDistribution
package_type = PackageType.SHADOW_PYTHON_EGG_INFO_FILE
elif ".egg-info" in anchor_file:
sp_reference = basename(dirname(anchor_file))
dist_file = join(self.prefix_path, win_path_ok(dirname(anchor_file)))
dist_cls = EggInfoDistribution
package_type = PackageType.SHADOW_PYTHON_EGG_INFO_DIR
elif anchor_file.endswith(".egg-link"):
raise NotImplementedError()
else:
raise NotImplementedError()
try:
pydist = dist_cls(dist_file)
except MetadataConflictError:
print("MetadataConflictError:", anchor_file)
pydist = None
return package_type, sp_reference, pydist
def get_python_rec(anchor_file):
package_type, sp_reference, pydist = get_pydist(anchor_file)
if pydist is None:
return None
# x.provides => [u'skdata (0.0.4)']
# x.run_requires => set([u'joblib', u'scikit-learn', u'lockfile', u'numpy', u'nose (>=1.0)']) # NOQA
# >>> list(x.list_installed_files()) => [(u'skdata/__init__.py', u'sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU', u'0'), (u'skdata/base.py', u'sha256=04MW02dky5T4nZb6Q0M351aRbAwLxd8voCK3nrAU-g0', u'5019'), (u'skdata/brodatz.py', u'sha256=NIPWLawJ59Fr037r0oT_gHe46WCo3UivuQ-cwxRU3ow', u'8492'), (u'skdata/caltech.py', u'sha256=cIfyMMRYggZ3Jkgc15tsYi_ZsZ7NpRqWh7mZ8bl6Fo0', u'8047'), (u'skdata/data_home.py', u'sha256=o5ChOI4v3Jd16JM3qWZlhrs5q-g_0yKa5-Oq44HC_K4', u'1297'), (u'skdata/diabetes.py', u'sha256=ny5Ihpc_eiIRYgzFn3Lm81fV0SZ1nyZQnqEmwb2PrS0', u'995'), (u'skdata/digits.py', u'sha256=DipeWAb3APpjXfmKmSumkfEFzuBW8XJ0 # NOQA
# TODO: normalize names against '.', '-', '_'
# TODO: ensure that this dist is *actually* the dist that matches conda-meta
if package_type == PackageType.SHADOW_PYTHON_EGG_INFO_FILE:
paths_data = None
elif package_type == PackageType.SHADOW_PYTHON_DIST_INFO:
_paths_data = []
for _path, _hash, _size in pydist.list_installed_files():
if _hash:
assert _hash.startswith('sha256='), (anchor_file, _hash)
sha256 = _hash[7:]
else:
sha256 = None
_size = int(_size) if _size else None
_paths_data.append(PathDataV1(
_path=_path,
path_type=PathType.hardlink,
sha256=sha256,
size_in_bytes=_size
))
paths_data = PathsData(paths_version=1, paths=_paths_data)
elif package_type == PackageType.SHADOW_PYTHON_EGG_INFO_DIR:
_paths_data = []
# TODO: Don't use list_installed_files() here. Read SOURCES.txt directly.
for _path, _, _ in pydist.list_installed_files():
_paths_data.append(PathData(
_path=_path,
path_type=PathType.hardlink,
))
paths_data = PathsData(paths_version=1, paths=_paths_data)
else:
raise NotImplementedError()
# TODO: need to add entry points, "exports," and other files that might not be in RECORD # NOQA
depends = tuple(
req.name for req in
# vars(req) => {'source': u'nose (>=1.0)', 'requirement': u'nose (>= 1.0)', 'extras': None, 'name': u'nose', 'url': None, 'constraints': [(u'>=', u'1.0')]} # NOQA
(parse_requirement(r) for r in pydist.run_requires)
)
# TODO: need to add python (with version?) to deps
python_rec = PrefixRecord(
package_type=package_type,
namespace='python',
name=pydist.name.lower(),
version=pydist.version,
channel=Channel('pypi'),
subdir='pypi',
fn=sp_reference,
build='pypi_0',
build_number=0,
paths_data=paths_data,
depends=depends,
)
return python_rec
egg_link_files = []
for anchor_file in non_conda_anchor_files:
if anchor_file.endswith('.egg-link'):
egg_link_files.append(anchor_file)
continue
python_rec = get_python_rec(anchor_file)
self.__prefix_records[python_rec.name] = python_rec
for egg_link_file in egg_link_files:
with open(join(self.prefix_path, win_path_ok(egg_link_file))) as fh:
egg_link_contents = fh.readlines()[0].strip()
egg_info_fns = glob(join(egg_link_contents, "*.egg-info"))
if not egg_info_fns:
continue
assert len(egg_info_fns) == 1, (egg_link_file, egg_info_fns)
egg_info_full_path = join(egg_link_contents, egg_info_fns[0])
if isdir(egg_info_full_path):
egg_info_full_path = join(egg_info_full_path, "PKG-INFO")
python_rec = get_python_rec(egg_info_full_path)
python_rec.package_type = PackageType.SHADOW_PYTHON_EGG_LINK
self.__prefix_records[python_rec.name] = python_rec
def get_python_version_for_prefix(prefix):
# returns a string e.g. "2.7", "3.4", "3.5" or None
py_record_iter = (rcrd for rcrd in PrefixData(prefix).iter_records() if rcrd.name == 'python')
record = next(py_record_iter, None)
if record is None:
return None
next_record = next(py_record_iter, None)
if next_record is not None:
raise CondaDependencyError("multiple python records found in prefix %s" % prefix)
else:
return record.version[:3]
def delete_prefix_from_linked_data(path):
'''Here, path may be a complete prefix or a dist inside a prefix'''
linked_data_path = next((key for key in sorted(PrefixData._cache_, reverse=True)
if path.startswith(key)),
None)
if linked_data_path:
del PrefixData._cache_[linked_data_path]
return True
return False | conda/core/prefix_data.py | from __future__ import absolute_import, division, print_function, unicode_literals
from glob import glob
from logging import getLogger
from os import listdir
from os.path import basename, isdir, isfile, join, lexists, dirname
from ..base.constants import CONDA_TARBALL_EXTENSION, PREFIX_MAGIC_FILE
from ..base.context import context
from ..common.compat import JSONDecodeError, itervalues, string_types, with_metaclass
from ..common.constants import NULL
from ..common.path import get_python_site_packages_short_path, win_path_ok
from ..common.serialize import json_load
from ..exceptions import (BasicClobberError, CondaDependencyError, CorruptedEnvironmentError,
maybe_raise)
from ..gateways.disk.create import write_as_json_to_file
from ..gateways.disk.delete import rm_rf
from ..gateways.disk.test import file_path_is_writable
from ..models.channel import Channel
from ..models.enums import PackageType, PathType
from ..models.match_spec import MatchSpec
from ..models.prefix_graph import PrefixGraph
from ..models.records import (PackageRecord, PathData, PathDataV1, PathsData, PrefixRecord)
try:
from cytoolz.itertoolz import concat, concatv
except ImportError: # pragma: no cover
from .._vendor.toolz.itertoolz import concat, concatv # NOQA
log = getLogger(__name__)
class PrefixDataType(type):
"""Basic caching of PrefixData instance objects."""
def __call__(cls, prefix_path, pip_interop_enabled=None):
if prefix_path in PrefixData._cache_:
return PrefixData._cache_[prefix_path]
elif isinstance(prefix_path, PrefixData):
return prefix_path
else:
prefix_data_instance = super(PrefixDataType, cls).__call__(prefix_path,
pip_interop_enabled)
PrefixData._cache_[prefix_path] = prefix_data_instance
return prefix_data_instance
@with_metaclass(PrefixDataType)
class PrefixData(object):
_cache_ = {}
def __init__(self, prefix_path, pip_interop_enabled=None):
# pip_interop_enabled is a temporary paramater; DO NOT USE
# TODO: when removing pip_interop_enabled, also remove from meta class
self.prefix_path = prefix_path
self.__prefix_records = None
self.__is_writable = NULL
self._pip_interop_enabled = (context.pip_interop_enabled
if pip_interop_enabled is None
else pip_interop_enabled)
def load(self):
self.__prefix_records = {}
for meta_file in glob(join(self.prefix_path, 'conda-meta', '*.json')):
self._load_single_record(meta_file)
if self._pip_interop_enabled:
self._load_site_packages()
def reload(self):
self.load()
return self
def insert(self, prefix_record):
assert prefix_record.name not in self._prefix_records
assert prefix_record.fn.endswith(CONDA_TARBALL_EXTENSION)
filename = prefix_record.fn[:-len(CONDA_TARBALL_EXTENSION)] + '.json'
prefix_record_json_path = join(self.prefix_path, 'conda-meta', filename)
if lexists(prefix_record_json_path):
maybe_raise(BasicClobberError(
source_path=None,
target_path=prefix_record_json_path,
context=context,
), context)
rm_rf(prefix_record_json_path)
write_as_json_to_file(prefix_record_json_path, prefix_record)
self._prefix_records[prefix_record.name] = prefix_record
def remove(self, package_name):
assert package_name in self._prefix_records
prefix_record = self._prefix_records[package_name]
filename = prefix_record.fn[:-len(CONDA_TARBALL_EXTENSION)] + '.json'
conda_meta_full_path = join(self.prefix_path, 'conda-meta', filename)
if self.is_writable:
rm_rf(conda_meta_full_path)
del self._prefix_records[package_name]
def get(self, package_name, default=NULL):
try:
return self._prefix_records[package_name]
except KeyError:
if default is not NULL:
return default
else:
raise
def iter_records(self):
return itervalues(self._prefix_records)
def iter_records_sorted(self):
prefix_graph = PrefixGraph(self.iter_records())
return iter(prefix_graph.graph)
def all_subdir_urls(self):
subdir_urls = set()
for prefix_record in itervalues(self._prefix_records):
subdir_url = prefix_record.channel.subdir_url
if subdir_url and subdir_url not in subdir_urls:
log.debug("adding subdir url %s for %s", subdir_url, prefix_record)
subdir_urls.add(subdir_url)
return subdir_urls
def query(self, package_ref_or_match_spec):
# returns a generator
param = package_ref_or_match_spec
if isinstance(param, string_types):
param = MatchSpec(param)
if isinstance(param, MatchSpec):
return (prefix_rec for prefix_rec in self.iter_records()
if param.match(prefix_rec))
else:
assert isinstance(param, PackageRecord)
return (prefix_rec for prefix_rec in self.iter_records() if prefix_rec == param)
@property
def _prefix_records(self):
return self.__prefix_records or self.load() or self.__prefix_records
def _load_single_record(self, prefix_record_json_path):
log.trace("loading prefix record %s", prefix_record_json_path)
with open(prefix_record_json_path) as fh:
try:
json_data = json_load(fh.read())
except JSONDecodeError:
raise CorruptedEnvironmentError(self.prefix_path, prefix_record_json_path)
# TODO: consider, at least in memory, storing prefix_record_json_path as part
# of PrefixRecord
prefix_record = PrefixRecord(**json_data)
# check that prefix record json filename conforms to name-version-build
# apparently implemented as part of #2638 to resolve #2599
try:
n, v, b = basename(prefix_record_json_path)[:-5].rsplit('-', 2)
if (n, v, b) != (prefix_record.name, prefix_record.version, prefix_record.build):
raise ValueError()
except ValueError:
log.warn("Ignoring malformed prefix record at: %s", prefix_record_json_path)
# TODO: consider just deleting here this record file in the future
return
self.__prefix_records[prefix_record.name] = prefix_record
@property
def is_writable(self):
if self.__is_writable == NULL:
test_path = join(self.prefix_path, PREFIX_MAGIC_FILE)
if not isfile(test_path):
is_writable = None
else:
is_writable = file_path_is_writable(test_path)
self.__is_writable = is_writable
return self.__is_writable
def _has_python(self):
return 'python' in self._prefix_records
def _load_site_packages(self):
python_record = next(
(prefix_rec for prefix_rec in itervalues(self.__prefix_records)
if prefix_rec.name == 'python'),
None
)
if not python_record:
return
prefix_graph = PrefixGraph(self.iter_records())
known_python_records = prefix_graph.all_descendants(python_record)
def norm_package_name(name):
return name.replace('.', '-').replace('_', '-').lower()
anchor_file_endings = ('.egg-info/PKG-INFO', '.dist-info/RECORD', '.egg-info')
conda_python_packages = dict(
((af, prefix_rec)
for prefix_rec in known_python_records
for af in prefix_rec.files
if af.endswith(anchor_file_endings) and 'site-packages' in af)
)
all_sp_anchor_files = set()
site_packages_dir = get_python_site_packages_short_path(python_record.version)
sp_dir_full_path = join(self.prefix_path, win_path_ok(site_packages_dir))
sp_anchor_endings = ('.dist-info', '.egg-info', '.egg-link')
if not isdir(sp_dir_full_path):
return
for fn in listdir(sp_dir_full_path):
if fn.endswith(sp_anchor_endings):
if fn.endswith('.dist-info'):
anchor_file = "%s/%s/%s" % (site_packages_dir, fn, 'RECORD')
elif fn.endswith(".egg-info"):
if isfile(join(sp_dir_full_path, fn)):
anchor_file = "%s/%s" % (site_packages_dir, fn)
else:
anchor_file = "%s/%s/%s" % (site_packages_dir, fn, "PKG-INFO")
elif fn.endswith('.egg-link'):
anchor_file = "%s/%s" % (site_packages_dir, fn)
elif fn.endswith('.pth'):
continue
else:
continue
all_sp_anchor_files.add(anchor_file)
_conda_anchor_files = set(conda_python_packages)
clobbered_conda_anchor_files = _conda_anchor_files - all_sp_anchor_files
non_conda_anchor_files = all_sp_anchor_files - _conda_anchor_files
# If there's a mismatch for anchor files between what conda expects for a package
# based on conda-meta, and for what is actually in site-packages, then we'll delete
# the in-memory record for the conda package. In the future, we should consider
# also deleting the record on disk in the conda-meta/ directory.
for conda_anchor_file in clobbered_conda_anchor_files:
del self._prefix_records[conda_python_packages[conda_anchor_file].name]
# TODO: only compatible with pip 9.0; consider writing this by hand
from pip._vendor.distlib.database import EggInfoDistribution, InstalledDistribution
from pip._vendor.distlib.metadata import MetadataConflictError
from pip._vendor.distlib.util import parse_requirement
def get_pydist(anchor_file):
if ".dist-info" in anchor_file:
sp_reference = basename(dirname(anchor_file))
dist_file = join(self.prefix_path, win_path_ok(dirname(anchor_file)))
dist_cls = InstalledDistribution
package_type = PackageType.SHADOW_PYTHON_DIST_INFO
elif anchor_file.endswith(".egg-info"):
sp_reference = basename(anchor_file)
dist_file = join(self.prefix_path, win_path_ok(anchor_file))
dist_cls = EggInfoDistribution
package_type = PackageType.SHADOW_PYTHON_EGG_INFO_FILE
elif ".egg-info" in anchor_file:
sp_reference = basename(dirname(anchor_file))
dist_file = join(self.prefix_path, win_path_ok(dirname(anchor_file)))
dist_cls = EggInfoDistribution
package_type = PackageType.SHADOW_PYTHON_EGG_INFO_DIR
elif anchor_file.endswith(".egg-link"):
raise NotImplementedError()
else:
raise NotImplementedError()
try:
pydist = dist_cls(dist_file)
except MetadataConflictError:
print("MetadataConflictError:", anchor_file)
pydist = None
return package_type, sp_reference, pydist
def get_python_rec(anchor_file):
package_type, sp_reference, pydist = get_pydist(anchor_file)
if pydist is None:
return None
# x.provides => [u'skdata (0.0.4)']
# x.run_requires => set([u'joblib', u'scikit-learn', u'lockfile', u'numpy', u'nose (>=1.0)']) # NOQA
# >>> list(x.list_installed_files()) => [(u'skdata/__init__.py', u'sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU', u'0'), (u'skdata/base.py', u'sha256=04MW02dky5T4nZb6Q0M351aRbAwLxd8voCK3nrAU-g0', u'5019'), (u'skdata/brodatz.py', u'sha256=NIPWLawJ59Fr037r0oT_gHe46WCo3UivuQ-cwxRU3ow', u'8492'), (u'skdata/caltech.py', u'sha256=cIfyMMRYggZ3Jkgc15tsYi_ZsZ7NpRqWh7mZ8bl6Fo0', u'8047'), (u'skdata/data_home.py', u'sha256=o5ChOI4v3Jd16JM3qWZlhrs5q-g_0yKa5-Oq44HC_K4', u'1297'), (u'skdata/diabetes.py', u'sha256=ny5Ihpc_eiIRYgzFn3Lm81fV0SZ1nyZQnqEmwb2PrS0', u'995'), (u'skdata/digits.py', u'sha256=DipeWAb3APpjXfmKmSumkfEFzuBW8XJ0 # NOQA
# TODO: normalize names against '.', '-', '_'
# TODO: ensure that this dist is *actually* the dist that matches conda-meta
if package_type == PackageType.SHADOW_PYTHON_EGG_INFO_FILE:
paths_data = None
elif package_type == PackageType.SHADOW_PYTHON_DIST_INFO:
_paths_data = []
for _path, _hash, _size in pydist.list_installed_files():
if _hash:
assert _hash.startswith('sha256='), (anchor_file, _hash)
sha256 = _hash[7:]
else:
sha256 = None
_size = int(_size) if _size else None
_paths_data.append(PathDataV1(
_path=_path,
path_type=PathType.hardlink,
sha256=sha256,
size_in_bytes=_size
))
paths_data = PathsData(paths_version=1, paths=_paths_data)
elif package_type == PackageType.SHADOW_PYTHON_EGG_INFO_DIR:
_paths_data = []
# TODO: Don't use list_installed_files() here. Read SOURCES.txt directly.
for _path, _, _ in pydist.list_installed_files():
_paths_data.append(PathData(
_path=_path,
path_type=PathType.hardlink,
))
paths_data = PathsData(paths_version=1, paths=_paths_data)
else:
raise NotImplementedError()
# TODO: need to add entry points, "exports," and other files that might not be in RECORD # NOQA
depends = tuple(
req.name for req in
# vars(req) => {'source': u'nose (>=1.0)', 'requirement': u'nose (>= 1.0)', 'extras': None, 'name': u'nose', 'url': None, 'constraints': [(u'>=', u'1.0')]} # NOQA
(parse_requirement(r) for r in pydist.run_requires)
)
# TODO: need to add python (with version?) to deps
python_rec = PrefixRecord(
package_type=package_type,
namespace='python',
name=pydist.name.lower(),
version=pydist.version,
channel=Channel('pypi'),
subdir='pypi',
fn=sp_reference,
build='pypi_0',
build_number=0,
paths_data=paths_data,
depends=depends,
)
return python_rec
egg_link_files = []
for anchor_file in non_conda_anchor_files:
if anchor_file.endswith('.egg-link'):
egg_link_files.append(anchor_file)
continue
python_rec = get_python_rec(anchor_file)
self.__prefix_records[python_rec.name] = python_rec
for egg_link_file in egg_link_files:
with open(join(self.prefix_path, win_path_ok(egg_link_file))) as fh:
egg_link_contents = fh.readlines()[0].strip()
egg_info_fns = glob(join(egg_link_contents, "*.egg-info"))
if not egg_info_fns:
continue
assert len(egg_info_fns) == 1, (egg_link_file, egg_info_fns)
egg_info_full_path = join(egg_link_contents, egg_info_fns[0])
if isdir(egg_info_full_path):
egg_info_full_path = join(egg_info_full_path, "PKG-INFO")
python_rec = get_python_rec(egg_info_full_path)
python_rec.package_type = PackageType.SHADOW_PYTHON_EGG_LINK
self.__prefix_records[python_rec.name] = python_rec
def get_python_version_for_prefix(prefix):
# returns a string e.g. "2.7", "3.4", "3.5" or None
py_record_iter = (rcrd for rcrd in PrefixData(prefix).iter_records() if rcrd.name == 'python')
record = next(py_record_iter, None)
if record is None:
return None
next_record = next(py_record_iter, None)
if next_record is not None:
raise CondaDependencyError("multiple python records found in prefix %s" % prefix)
else:
return record.version[:3]
def delete_prefix_from_linked_data(path):
'''Here, path may be a complete prefix or a dist inside a prefix'''
linked_data_path = next((key for key in sorted(PrefixData._cache_, reverse=True)
if path.startswith(key)),
None)
if linked_data_path:
del PrefixData._cache_[linked_data_path]
return True
return False | 0.460774 | 0.138345 |
import warnings
from collections import OrderedDict, Iterable
from torch import nn
import torch
import torchvision
from .base import Core2d
from ... import regularizers
from ..affine import Bias2DLayer, Scale2DLayer
from ..activations import AdaptiveELU
from ..hermite import (
HermiteConv2D,
RotationEquivariantBatchNorm2D,
RotationEquivariantBias2DLayer,
RotationEquivariantScale2DLayer,
)
import logging
logger = logging.getLogger(__name__)
class Stacked2dCore(Core2d, nn.Module):
"""
A core build of stacked conv2d layers.
"""
def __init__(
self,
input_channels,
hidden_channels,
input_kern,
hidden_kern,
layers=3,
gamma_hidden=0,
gamma_input=0.0,
skip=0,
final_nonlinearity=True,
elu_xshift=0.0,
elu_yshift=0.0,
bias=True,
momentum=0.1,
pad_input=True,
hidden_padding=None,
batch_norm=True,
batch_norm_scale=True,
independent_bn_bias=True,
hidden_dilation=1,
laplace_padding=0,
input_regularizer="LaplaceL2",
stack=None,
use_avg_reg=True,
):
"""
Args:
input_channels: Integer, number of input channels as in
hidden_channels: Number of hidden channels (i.e feature maps) in each hidden layer
input_kern: kernel size of the first layer (i.e. the input layer)
hidden_kern: kernel size of each hidden layer's kernel
layers: number of layers
gamma_hidden: regularizer factor for group sparsity
gamma_input: regularizer factor for the input weights (default: LaplaceL2, see neuralpredictors.regularizers)
skip: Adds a skip connection
final_nonlinearity: Boolean, if true, appends an ELU layer after the last BatchNorm (if BN=True)
elu_xshift, elu_yshift: final_nonlinearity(x) = Elu(x - elu_xshift) + elu_yshift
bias: Adds a bias layer.
momentum: BN momentum
pad_input: Boolean, if True, applies zero padding to all convolutions
hidden_padding: int or list of int. Padding for hidden layers. Note that this will apply to all the layers
except the first (input) layer.
batch_norm: Boolean, if True appends a BN layer after each convolutional layer
batch_norm_scale: If True, a scaling factor after BN will be learned.
independent_bn_bias: If False, will allow for scaling the batch norm, so that batchnorm
and bias can both be true. Defaults to True.
hidden_dilation: If set to > 1, will apply dilated convs for all hidden layers
laplace_padding: Padding size for the laplace convolution. If padding = None, it defaults to half of
the kernel size (recommended). Setting Padding to 0 is not recommended and leads to artefacts,
zero is the default however to recreate backwards compatibility.
normalize_laplace_regularizer: Boolean, if set to True, will use the LaplaceL2norm function from
neuralpredictors.regularizers, which returns the regularizer as |laplace(filters)| / |filters|
input_regularizer: String that must match one of the regularizers in ..regularizers
stack: Int or iterable. Selects which layers of the core should be stacked for the readout.
default value will stack all layers on top of each other.
Implemented as layers_to_stack = layers[stack:]. thus:
stack = -1 will only select the last layer as the readout layer.
stack of -2 will read out from the last two layers.
And stack of 1 will read out from layer 1 (0 indexed) until the last layer.
use_avg_reg: bool. Whether to use the averaged value of regularizer(s) or the summed.
To enable learning batch_norms bias and scale independently, the arguments bias, batch_norm and batch_norm_scale
work together: By default, all are true. In this case there won't be a bias learned in the convolutional layer, but
batch_norm will learn both its bias and scale. If batch_norm is false, but bias true, a bias will be learned in the
convolutional layer. If batch_norm and bias are true, but batch_norm_scale is false, batch_norm won't have learnable
parameters and a BiasLayer will be added after the batch_norm layer.
"""
super().__init__()
regularizer_config = (
dict(padding=laplace_padding, kernel=input_kern)
if input_regularizer == "GaussianLaplaceL2"
else dict(padding=laplace_padding)
)
self._input_weights_regularizer = regularizers.__dict__[input_regularizer](**regularizer_config)
self.layers = layers
self.gamma_input = gamma_input
self.gamma_hidden = gamma_hidden
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.skip = skip
self.use_avg_reg = use_avg_reg
if use_avg_reg:
warnings.warn("The averaged value of regularizer will be used.", UserWarning)
self.features = nn.Sequential()
if stack is None:
self.stack = range(self.layers)
else:
self.stack = [*range(self.layers)[stack:]] if isinstance(stack, int) else stack
# --- first layer
layer = OrderedDict()
layer["conv"] = nn.Conv2d(
input_channels,
hidden_channels,
input_kern,
padding=input_kern // 2 if pad_input else 0,
bias=bias and not batch_norm,
)
if batch_norm:
if independent_bn_bias:
layer["norm"] = nn.BatchNorm2d(hidden_channels, momentum=momentum)
else:
layer["norm"] = nn.BatchNorm2d(hidden_channels, momentum=momentum, affine=bias and batch_norm_scale)
if bias:
if not batch_norm_scale:
layer["bias"] = Bias2DLayer(hidden_channels)
elif batch_norm_scale:
layer["scale"] = Scale2DLayer(hidden_channels)
if layers > 1 or final_nonlinearity:
layer["nonlin"] = AdaptiveELU(elu_xshift, elu_yshift)
self.features.add_module("layer0", nn.Sequential(layer))
# --- other layers
if not isinstance(hidden_kern, Iterable):
hidden_kern = [hidden_kern] * (self.layers - 1)
for l in range(1, self.layers):
layer = OrderedDict()
hidden_padding = ((hidden_kern[l - 1] - 1) * hidden_dilation + 1) // 2
layer["conv"] = nn.Conv2d(
hidden_channels if not skip > 1 else min(skip, l) * hidden_channels,
hidden_channels,
hidden_kern[l - 1],
padding=hidden_padding,
bias=bias and not batch_norm,
dilation=hidden_dilation,
)
if batch_norm:
if independent_bn_bias:
layer["norm"] = nn.BatchNorm2d(hidden_channels, momentum=momentum)
else:
layer["norm"] = nn.BatchNorm2d(
hidden_channels,
momentum=momentum,
affine=bias and batch_norm_scale,
)
if bias:
if not batch_norm_scale:
layer["bias"] = Bias2DLayer(hidden_channels)
elif batch_norm_scale:
layer["scale"] = Scale2DLayer(hidden_channels)
if final_nonlinearity or l < self.layers - 1:
layer["nonlin"] = AdaptiveELU(elu_xshift, elu_yshift)
self.features.add_module("layer{}".format(l), nn.Sequential(layer))
self.initialize()
def forward(self, input_):
ret = []
for l, feat in enumerate(self.features):
do_skip = l >= 1 and self.skip > 1
input_ = feat(input_ if not do_skip else torch.cat(ret[-min(self.skip, l) :], dim=1))
ret.append(input_)
return torch.cat([ret[ind] for ind in self.stack], dim=1)
def laplace(self):
"""
Laplace regularization for the filters of the first conv2d layer.
"""
return self._input_weights_regularizer(self.features[0].conv.weight, avg=self.use_avg_reg)
def group_sparsity(self):
"""
Sparsity regularization on the filters of all the conv2d layers except the first one.
"""
ret = 0
for l in range(1, self.layers):
ret = ret + self.features[l].conv.weight.pow(2).sum(3, keepdim=True).sum(2, keepdim=True).sqrt().mean()
return ret / ((self.layers - 1) if self.layers > 1 else 1)
def regularizer(self):
return self.group_sparsity() * self.gamma_hidden + self.gamma_input * self.laplace()
@property
def outchannels(self):
return len(self.features) * self.hidden_channels
class RotationEquivariant2dCore(Core2d, nn.Module):
"""
A core built of 2d rotation-equivariant layers. For more info refer to https://openreview.net/forum?id=H1fU8iAqKX.
"""
def __init__(
self,
input_channels,
hidden_channels,
input_kern,
hidden_kern,
layers=3,
num_rotations=8,
stride=1,
upsampling=2,
gamma_hidden=0,
gamma_input=0.0,
final_nonlinearity=True,
elu_xshift=0.0,
elu_yshift=0.0,
bias=True,
momentum=0.1,
pad_input=True,
hidden_padding=None,
batch_norm=True,
batch_norm_scale=True,
rot_eq_batch_norm=True,
independent_bn_bias=True,
laplace_padding=0,
input_regularizer="LaplaceL2norm",
stack=None,
use_avg_reg=False,
):
"""
Args:
input_channels: Integer, number of input channels as in
hidden_channels: Number of hidden channels (i.e feature maps) in each hidden layer
input_kern: kernel size of the first layer (i.e. the input layer)
hidden_kern: kernel size of each hidden layer's kernel
layers: number of layers
num_rotations: number of computed rotations for every feature
stride: stride in convolutional layers
upsampling: upsampling scale of Hermite filters
gamma_hidden: regularizer factor for group sparsity
gamma_input: regularizer factor for the input weights (default: LaplaceL2, see neuralpredictors.regularizers)
final_nonlinearity: Boolean, if true, appends an ELU layer after the last BatchNorm (if BN=True)
elu_xshift, elu_yshift: final_nonlinearity(x) = Elu(x - elu_xshift) + elu_yshift
bias: Adds a bias layer.
momentum: BN momentum
pad_input: Boolean, if True, applies zero padding to all convolutions
hidden_padding: int or list of int. Padding for hidden layers. Note that this will apply to all the layers
except the first (input) layer.
batch_norm: Boolean, if True appends a BN layer after each convolutional layer
batch_norm_scale: If True, a scaling factor after BN will be learned.
independent_bn_bias: If False, will allow for scaling the batch norm, so that batchnorm
and bias can both be true. Defaults to True.
laplace_padding: Padding size for the laplace convolution. If padding = None, it defaults to half of
the kernel size (recommended). Setting Padding to 0 is not recommended and leads to artefacts,
zero is the default however to recreate backwards compatibility.
normalize_laplace_regularizer: Boolean, if set to True, will use the LaplaceL2norm function from
neuralpredictors.regularizers, which returns the regularizer as |laplace(filters)| / |filters|
input_regularizer: String that must match one of the regularizers in ..regularizers
stack: Int or iterable. Selects which layers of the core should be stacked for the readout.
default value will stack all layers on top of each other.
Implemented as layers_to_stack = layers[stack:]. thus:
stack = -1 will only select the last layer as the readout layer.
stack of -2 will read out from the last two layers.
And stack of 1 will read out from layer 1 (0 indexed) until the last layer.
use_avg_reg: bool. Whether to use the averaged value of regularizer(s) or the summed.
To enable learning batch_norms bias and scale independently, the arguments bias, batch_norm and batch_norm_scale
work together: By default, all are true. In this case there won't be a bias learned in the convolutional layer, but
batch_norm will learn both its bias and scale. If batch_norm is false, but bias true, a bias will be learned in the
convolutional layer. If batch_norm and bias are true, but batch_norm_scale is false, batch_norm won't have learnable
parameters and a BiasLayer will be added after the batch_norm layer.
"""
super().__init__()
regularizer_config = (
dict(padding=laplace_padding, kernel=input_kern)
if input_regularizer == "GaussianLaplaceL2"
else dict(padding=laplace_padding)
)
self._input_weights_regularizer = regularizers.__dict__[input_regularizer](**regularizer_config)
self.layers = layers
self.gamma_input = gamma_input
self.gamma_hidden = gamma_hidden
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.num_rotations = num_rotations
self.stride = stride
self.use_avg_reg = use_avg_reg
if rot_eq_batch_norm:
def BatchNormLayer(**kwargs):
return RotationEquivariantBatchNorm2D(num_rotations=num_rotations, **kwargs)
def BiasLayer(**kwargs):
return RotationEquivariantBias2DLayer(num_rotations=num_rotations, **kwargs)
def ScaleLayer(**kwargs):
return RotationEquivariantScale2DLayer(num_rotations=num_rotations, **kwargs)
else:
BatchNormLayer = nn.BatchNorm2d
BiasLayer = Bias2DLayer
ScaleLayer = Scale2DLayer
if use_avg_reg:
warnings.warn("The averaged value of regularizer will be used.", UserWarning)
self.features = nn.Sequential()
if stack is None:
self.stack = range(self.layers)
else:
self.stack = [*range(self.layers)[stack:]] if isinstance(stack, int) else stack
# --- first layer
layer = OrderedDict()
layer["conv"] = HermiteConv2D(
input_features=input_channels,
output_features=hidden_channels,
num_rotations=num_rotations,
upsampling=upsampling,
filter_size=input_kern,
stride=stride,
padding=input_kern // 2 if pad_input else 0,
first_layer=True,
)
if batch_norm:
if independent_bn_bias:
layer["norm"] = BatchNormLayer(num_features=hidden_channels, momentum=momentum)
else:
layer["norm"] = BatchNormLayer(
num_features=hidden_channels,
momentum=momentum,
affine=bias and batch_norm_scale,
)
if bias:
if not batch_norm_scale:
layer["bias"] = BiasLayer(channels=hidden_channels)
elif batch_norm_scale:
layer["scale"] = ScaleLayer(channels=hidden_channels)
if layers > 1 or final_nonlinearity:
layer["nonlin"] = AdaptiveELU(elu_xshift, elu_yshift)
self.features.add_module("layer0", nn.Sequential(layer))
# --- other layers
if not isinstance(hidden_kern, Iterable):
hidden_kern = [hidden_kern] * (self.layers - 1)
for l in range(1, self.layers):
layer = OrderedDict()
if hidden_padding is None:
hidden_padding = hidden_kern[l - 1] // 2
layer["conv"] = HermiteConv2D(
input_features=hidden_channels * num_rotations,
output_features=hidden_channels,
num_rotations=num_rotations,
upsampling=upsampling,
filter_size=hidden_kern[l - 1],
stride=stride,
padding=hidden_padding,
first_layer=False,
)
if batch_norm:
if independent_bn_bias:
layer["norm"] = BatchNormLayer(num_features=hidden_channels, momentum=momentum)
else:
layer["norm"] = BatchNormLayer(
num_features=hidden_channels,
momentum=momentum,
affine=bias and batch_norm_scale,
)
if bias:
if not batch_norm_scale:
layer["bias"] = BiasLayer(channels=hidden_channels)
elif batch_norm_scale:
layer["scale"] = ScaleLayer(channels=hidden_channels)
if final_nonlinearity or l < self.layers - 1:
layer["nonlin"] = AdaptiveELU(elu_xshift, elu_yshift)
self.features.add_module("layer{}".format(l), nn.Sequential(layer))
self.initialize()
def initialize(self):
self.apply(self.init_conv_hermite)
@staticmethod
def init_conv_hermite(m):
if isinstance(m, HermiteConv2D):
nn.init.normal_(m.coeffs.data, std=0.1)
def forward(self, input_):
ret = []
for l, feat in enumerate(self.features):
input_ = feat(input_)
ret.append(input_)
return torch.cat([ret[ind] for ind in self.stack], dim=1)
def laplace(self):
return self._input_weights_regularizer(self.features[0].conv.weights_all_rotations, avg=self.use_avg_reg)
def group_sparsity(self):
ret = 0
for l in range(1, self.layers):
ret = (
ret
+ self.features[l]
.conv.weights_all_rotations.pow(2)
.sum(3, keepdim=True)
.sum(2, keepdim=True)
.sqrt()
.mean()
)
return ret / ((self.layers - 1) if self.layers > 1 else 1)
def regularizer(self):
return self.group_sparsity() * self.gamma_hidden + self.gamma_input * self.laplace()
@property
def outchannels(self):
return len(self.features) * self.hidden_channels * self.num_rotations
class TransferLearningCore(Core2d, nn.Module):
"""
Core from popular image recognition networks such as VGG or AlexNet. Can be already pretrained on ImageNet.
"""
def __init__(
self,
input_channels,
tl_model_name,
layers,
pretrained=True,
final_batchnorm=True,
final_nonlinearity=True,
momentum=0.1,
fine_tune=False,
**kwargs
):
"""
Args:
input_channels (int): Number of input channels. 1 if greyscale, 3 if RBG
tl_model_name (str): Name of the image recognition Transfer Learning model. Possible are all models in
torchvision, i.e. vgg16, alexnet, ...
layers (int): Number of layers, i.e. after which layer to cut the original network
pretrained (boolean): Whether to use a randomly initialized or pretrained network
final_batchnorm (boolean): Whether to add a batch norm after the final conv layer
final_nonlinearity (boolean): Whether to add a final nonlinearity (ReLU)
momentum (float): Momentum term for batch norm. Irrelevant if batch_norm=False
fine_tune (boolean): Whether to clip gradients before this core or to allow training on the core
**kwargs:
"""
if kwargs:
warnings.warn(
"Ignoring input {} when creating {}".format(repr(kwargs), self.__class__.__name__),
UserWarning,
)
super().__init__()
self.input_channels = input_channels
self.momentum = momentum
# Download model and cut after specified layer
TL_model = getattr(torchvision.models, tl_model_name)(pretrained=pretrained)
TL_model_clipped = nn.Sequential(*list(TL_model.features.children())[:layers])
if not isinstance(TL_model_clipped[-1], nn.Conv2d):
warnings.warn(
"Final layer is of type {}, not nn.Conv2d".format(type(TL_model_clipped[-1])),
UserWarning,
)
# Fix pretrained parameters during training
if not fine_tune:
for param in TL_model_clipped.parameters():
param.requires_grad = False
# Stack model together
self.features = nn.Sequential()
self.features.add_module("TransferLearning", TL_model_clipped)
if final_batchnorm:
self.features.add_module("OutBatchNorm", nn.BatchNorm2d(self.outchannels, momentum=self.momentum))
if final_nonlinearity:
self.features.add_module("OutNonlin", nn.ReLU(inplace=True))
def forward(self, input_):
# If model is designed for RBG input but input is greyscale, repeat the same input 3 times
if self.input_channels == 1 and self.features.TransferLearning[0].in_channels == 3:
input_ = input_.repeat(1, 3, 1, 1)
input_ = self.features(input_)
return input_
def regularizer(self):
return 0
@property
def outchannels(self):
"""
Function which returns the number of channels in the output conv layer. If the output layer is not a conv
layer, the last conv layer in the network is used.
Returns: Number of output channels
"""
found_outchannels = False
i = 1
while not found_outchannels:
if "out_channels" in self.features.TransferLearning[-i].__dict__:
found_outchannels = True
else:
i += 1
return self.features.TransferLearning[-i].out_channels
def initialize(self):
logger.warning(
"Ignoring initialization since the parameters should be acquired from a pretrained model. If you want random weights, set pretrained = False."
) | neuralpredictors/layers/cores/conv2d.py | import warnings
from collections import OrderedDict, Iterable
from torch import nn
import torch
import torchvision
from .base import Core2d
from ... import regularizers
from ..affine import Bias2DLayer, Scale2DLayer
from ..activations import AdaptiveELU
from ..hermite import (
HermiteConv2D,
RotationEquivariantBatchNorm2D,
RotationEquivariantBias2DLayer,
RotationEquivariantScale2DLayer,
)
import logging
logger = logging.getLogger(__name__)
class Stacked2dCore(Core2d, nn.Module):
"""
A core build of stacked conv2d layers.
"""
def __init__(
self,
input_channels,
hidden_channels,
input_kern,
hidden_kern,
layers=3,
gamma_hidden=0,
gamma_input=0.0,
skip=0,
final_nonlinearity=True,
elu_xshift=0.0,
elu_yshift=0.0,
bias=True,
momentum=0.1,
pad_input=True,
hidden_padding=None,
batch_norm=True,
batch_norm_scale=True,
independent_bn_bias=True,
hidden_dilation=1,
laplace_padding=0,
input_regularizer="LaplaceL2",
stack=None,
use_avg_reg=True,
):
"""
Args:
input_channels: Integer, number of input channels as in
hidden_channels: Number of hidden channels (i.e feature maps) in each hidden layer
input_kern: kernel size of the first layer (i.e. the input layer)
hidden_kern: kernel size of each hidden layer's kernel
layers: number of layers
gamma_hidden: regularizer factor for group sparsity
gamma_input: regularizer factor for the input weights (default: LaplaceL2, see neuralpredictors.regularizers)
skip: Adds a skip connection
final_nonlinearity: Boolean, if true, appends an ELU layer after the last BatchNorm (if BN=True)
elu_xshift, elu_yshift: final_nonlinearity(x) = Elu(x - elu_xshift) + elu_yshift
bias: Adds a bias layer.
momentum: BN momentum
pad_input: Boolean, if True, applies zero padding to all convolutions
hidden_padding: int or list of int. Padding for hidden layers. Note that this will apply to all the layers
except the first (input) layer.
batch_norm: Boolean, if True appends a BN layer after each convolutional layer
batch_norm_scale: If True, a scaling factor after BN will be learned.
independent_bn_bias: If False, will allow for scaling the batch norm, so that batchnorm
and bias can both be true. Defaults to True.
hidden_dilation: If set to > 1, will apply dilated convs for all hidden layers
laplace_padding: Padding size for the laplace convolution. If padding = None, it defaults to half of
the kernel size (recommended). Setting Padding to 0 is not recommended and leads to artefacts,
zero is the default however to recreate backwards compatibility.
normalize_laplace_regularizer: Boolean, if set to True, will use the LaplaceL2norm function from
neuralpredictors.regularizers, which returns the regularizer as |laplace(filters)| / |filters|
input_regularizer: String that must match one of the regularizers in ..regularizers
stack: Int or iterable. Selects which layers of the core should be stacked for the readout.
default value will stack all layers on top of each other.
Implemented as layers_to_stack = layers[stack:]. thus:
stack = -1 will only select the last layer as the readout layer.
stack of -2 will read out from the last two layers.
And stack of 1 will read out from layer 1 (0 indexed) until the last layer.
use_avg_reg: bool. Whether to use the averaged value of regularizer(s) or the summed.
To enable learning batch_norms bias and scale independently, the arguments bias, batch_norm and batch_norm_scale
work together: By default, all are true. In this case there won't be a bias learned in the convolutional layer, but
batch_norm will learn both its bias and scale. If batch_norm is false, but bias true, a bias will be learned in the
convolutional layer. If batch_norm and bias are true, but batch_norm_scale is false, batch_norm won't have learnable
parameters and a BiasLayer will be added after the batch_norm layer.
"""
super().__init__()
regularizer_config = (
dict(padding=laplace_padding, kernel=input_kern)
if input_regularizer == "GaussianLaplaceL2"
else dict(padding=laplace_padding)
)
self._input_weights_regularizer = regularizers.__dict__[input_regularizer](**regularizer_config)
self.layers = layers
self.gamma_input = gamma_input
self.gamma_hidden = gamma_hidden
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.skip = skip
self.use_avg_reg = use_avg_reg
if use_avg_reg:
warnings.warn("The averaged value of regularizer will be used.", UserWarning)
self.features = nn.Sequential()
if stack is None:
self.stack = range(self.layers)
else:
self.stack = [*range(self.layers)[stack:]] if isinstance(stack, int) else stack
# --- first layer
layer = OrderedDict()
layer["conv"] = nn.Conv2d(
input_channels,
hidden_channels,
input_kern,
padding=input_kern // 2 if pad_input else 0,
bias=bias and not batch_norm,
)
if batch_norm:
if independent_bn_bias:
layer["norm"] = nn.BatchNorm2d(hidden_channels, momentum=momentum)
else:
layer["norm"] = nn.BatchNorm2d(hidden_channels, momentum=momentum, affine=bias and batch_norm_scale)
if bias:
if not batch_norm_scale:
layer["bias"] = Bias2DLayer(hidden_channels)
elif batch_norm_scale:
layer["scale"] = Scale2DLayer(hidden_channels)
if layers > 1 or final_nonlinearity:
layer["nonlin"] = AdaptiveELU(elu_xshift, elu_yshift)
self.features.add_module("layer0", nn.Sequential(layer))
# --- other layers
if not isinstance(hidden_kern, Iterable):
hidden_kern = [hidden_kern] * (self.layers - 1)
for l in range(1, self.layers):
layer = OrderedDict()
hidden_padding = ((hidden_kern[l - 1] - 1) * hidden_dilation + 1) // 2
layer["conv"] = nn.Conv2d(
hidden_channels if not skip > 1 else min(skip, l) * hidden_channels,
hidden_channels,
hidden_kern[l - 1],
padding=hidden_padding,
bias=bias and not batch_norm,
dilation=hidden_dilation,
)
if batch_norm:
if independent_bn_bias:
layer["norm"] = nn.BatchNorm2d(hidden_channels, momentum=momentum)
else:
layer["norm"] = nn.BatchNorm2d(
hidden_channels,
momentum=momentum,
affine=bias and batch_norm_scale,
)
if bias:
if not batch_norm_scale:
layer["bias"] = Bias2DLayer(hidden_channels)
elif batch_norm_scale:
layer["scale"] = Scale2DLayer(hidden_channels)
if final_nonlinearity or l < self.layers - 1:
layer["nonlin"] = AdaptiveELU(elu_xshift, elu_yshift)
self.features.add_module("layer{}".format(l), nn.Sequential(layer))
self.initialize()
def forward(self, input_):
ret = []
for l, feat in enumerate(self.features):
do_skip = l >= 1 and self.skip > 1
input_ = feat(input_ if not do_skip else torch.cat(ret[-min(self.skip, l) :], dim=1))
ret.append(input_)
return torch.cat([ret[ind] for ind in self.stack], dim=1)
def laplace(self):
"""
Laplace regularization for the filters of the first conv2d layer.
"""
return self._input_weights_regularizer(self.features[0].conv.weight, avg=self.use_avg_reg)
def group_sparsity(self):
"""
Sparsity regularization on the filters of all the conv2d layers except the first one.
"""
ret = 0
for l in range(1, self.layers):
ret = ret + self.features[l].conv.weight.pow(2).sum(3, keepdim=True).sum(2, keepdim=True).sqrt().mean()
return ret / ((self.layers - 1) if self.layers > 1 else 1)
def regularizer(self):
return self.group_sparsity() * self.gamma_hidden + self.gamma_input * self.laplace()
@property
def outchannels(self):
return len(self.features) * self.hidden_channels
class RotationEquivariant2dCore(Core2d, nn.Module):
"""
A core built of 2d rotation-equivariant layers. For more info refer to https://openreview.net/forum?id=H1fU8iAqKX.
"""
def __init__(
self,
input_channels,
hidden_channels,
input_kern,
hidden_kern,
layers=3,
num_rotations=8,
stride=1,
upsampling=2,
gamma_hidden=0,
gamma_input=0.0,
final_nonlinearity=True,
elu_xshift=0.0,
elu_yshift=0.0,
bias=True,
momentum=0.1,
pad_input=True,
hidden_padding=None,
batch_norm=True,
batch_norm_scale=True,
rot_eq_batch_norm=True,
independent_bn_bias=True,
laplace_padding=0,
input_regularizer="LaplaceL2norm",
stack=None,
use_avg_reg=False,
):
"""
Args:
input_channels: Integer, number of input channels as in
hidden_channels: Number of hidden channels (i.e feature maps) in each hidden layer
input_kern: kernel size of the first layer (i.e. the input layer)
hidden_kern: kernel size of each hidden layer's kernel
layers: number of layers
num_rotations: number of computed rotations for every feature
stride: stride in convolutional layers
upsampling: upsampling scale of Hermite filters
gamma_hidden: regularizer factor for group sparsity
gamma_input: regularizer factor for the input weights (default: LaplaceL2, see neuralpredictors.regularizers)
final_nonlinearity: Boolean, if true, appends an ELU layer after the last BatchNorm (if BN=True)
elu_xshift, elu_yshift: final_nonlinearity(x) = Elu(x - elu_xshift) + elu_yshift
bias: Adds a bias layer.
momentum: BN momentum
pad_input: Boolean, if True, applies zero padding to all convolutions
hidden_padding: int or list of int. Padding for hidden layers. Note that this will apply to all the layers
except the first (input) layer.
batch_norm: Boolean, if True appends a BN layer after each convolutional layer
batch_norm_scale: If True, a scaling factor after BN will be learned.
independent_bn_bias: If False, will allow for scaling the batch norm, so that batchnorm
and bias can both be true. Defaults to True.
laplace_padding: Padding size for the laplace convolution. If padding = None, it defaults to half of
the kernel size (recommended). Setting Padding to 0 is not recommended and leads to artefacts,
zero is the default however to recreate backwards compatibility.
normalize_laplace_regularizer: Boolean, if set to True, will use the LaplaceL2norm function from
neuralpredictors.regularizers, which returns the regularizer as |laplace(filters)| / |filters|
input_regularizer: String that must match one of the regularizers in ..regularizers
stack: Int or iterable. Selects which layers of the core should be stacked for the readout.
default value will stack all layers on top of each other.
Implemented as layers_to_stack = layers[stack:]. thus:
stack = -1 will only select the last layer as the readout layer.
stack of -2 will read out from the last two layers.
And stack of 1 will read out from layer 1 (0 indexed) until the last layer.
use_avg_reg: bool. Whether to use the averaged value of regularizer(s) or the summed.
To enable learning batch_norms bias and scale independently, the arguments bias, batch_norm and batch_norm_scale
work together: By default, all are true. In this case there won't be a bias learned in the convolutional layer, but
batch_norm will learn both its bias and scale. If batch_norm is false, but bias true, a bias will be learned in the
convolutional layer. If batch_norm and bias are true, but batch_norm_scale is false, batch_norm won't have learnable
parameters and a BiasLayer will be added after the batch_norm layer.
"""
super().__init__()
regularizer_config = (
dict(padding=laplace_padding, kernel=input_kern)
if input_regularizer == "GaussianLaplaceL2"
else dict(padding=laplace_padding)
)
self._input_weights_regularizer = regularizers.__dict__[input_regularizer](**regularizer_config)
self.layers = layers
self.gamma_input = gamma_input
self.gamma_hidden = gamma_hidden
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.num_rotations = num_rotations
self.stride = stride
self.use_avg_reg = use_avg_reg
if rot_eq_batch_norm:
def BatchNormLayer(**kwargs):
return RotationEquivariantBatchNorm2D(num_rotations=num_rotations, **kwargs)
def BiasLayer(**kwargs):
return RotationEquivariantBias2DLayer(num_rotations=num_rotations, **kwargs)
def ScaleLayer(**kwargs):
return RotationEquivariantScale2DLayer(num_rotations=num_rotations, **kwargs)
else:
BatchNormLayer = nn.BatchNorm2d
BiasLayer = Bias2DLayer
ScaleLayer = Scale2DLayer
if use_avg_reg:
warnings.warn("The averaged value of regularizer will be used.", UserWarning)
self.features = nn.Sequential()
if stack is None:
self.stack = range(self.layers)
else:
self.stack = [*range(self.layers)[stack:]] if isinstance(stack, int) else stack
# --- first layer
layer = OrderedDict()
layer["conv"] = HermiteConv2D(
input_features=input_channels,
output_features=hidden_channels,
num_rotations=num_rotations,
upsampling=upsampling,
filter_size=input_kern,
stride=stride,
padding=input_kern // 2 if pad_input else 0,
first_layer=True,
)
if batch_norm:
if independent_bn_bias:
layer["norm"] = BatchNormLayer(num_features=hidden_channels, momentum=momentum)
else:
layer["norm"] = BatchNormLayer(
num_features=hidden_channels,
momentum=momentum,
affine=bias and batch_norm_scale,
)
if bias:
if not batch_norm_scale:
layer["bias"] = BiasLayer(channels=hidden_channels)
elif batch_norm_scale:
layer["scale"] = ScaleLayer(channels=hidden_channels)
if layers > 1 or final_nonlinearity:
layer["nonlin"] = AdaptiveELU(elu_xshift, elu_yshift)
self.features.add_module("layer0", nn.Sequential(layer))
# --- other layers
if not isinstance(hidden_kern, Iterable):
hidden_kern = [hidden_kern] * (self.layers - 1)
for l in range(1, self.layers):
layer = OrderedDict()
if hidden_padding is None:
hidden_padding = hidden_kern[l - 1] // 2
layer["conv"] = HermiteConv2D(
input_features=hidden_channels * num_rotations,
output_features=hidden_channels,
num_rotations=num_rotations,
upsampling=upsampling,
filter_size=hidden_kern[l - 1],
stride=stride,
padding=hidden_padding,
first_layer=False,
)
if batch_norm:
if independent_bn_bias:
layer["norm"] = BatchNormLayer(num_features=hidden_channels, momentum=momentum)
else:
layer["norm"] = BatchNormLayer(
num_features=hidden_channels,
momentum=momentum,
affine=bias and batch_norm_scale,
)
if bias:
if not batch_norm_scale:
layer["bias"] = BiasLayer(channels=hidden_channels)
elif batch_norm_scale:
layer["scale"] = ScaleLayer(channels=hidden_channels)
if final_nonlinearity or l < self.layers - 1:
layer["nonlin"] = AdaptiveELU(elu_xshift, elu_yshift)
self.features.add_module("layer{}".format(l), nn.Sequential(layer))
self.initialize()
def initialize(self):
self.apply(self.init_conv_hermite)
@staticmethod
def init_conv_hermite(m):
if isinstance(m, HermiteConv2D):
nn.init.normal_(m.coeffs.data, std=0.1)
def forward(self, input_):
ret = []
for l, feat in enumerate(self.features):
input_ = feat(input_)
ret.append(input_)
return torch.cat([ret[ind] for ind in self.stack], dim=1)
def laplace(self):
return self._input_weights_regularizer(self.features[0].conv.weights_all_rotations, avg=self.use_avg_reg)
def group_sparsity(self):
ret = 0
for l in range(1, self.layers):
ret = (
ret
+ self.features[l]
.conv.weights_all_rotations.pow(2)
.sum(3, keepdim=True)
.sum(2, keepdim=True)
.sqrt()
.mean()
)
return ret / ((self.layers - 1) if self.layers > 1 else 1)
def regularizer(self):
return self.group_sparsity() * self.gamma_hidden + self.gamma_input * self.laplace()
@property
def outchannels(self):
return len(self.features) * self.hidden_channels * self.num_rotations
class TransferLearningCore(Core2d, nn.Module):
"""
Core from popular image recognition networks such as VGG or AlexNet. Can be already pretrained on ImageNet.
"""
def __init__(
self,
input_channels,
tl_model_name,
layers,
pretrained=True,
final_batchnorm=True,
final_nonlinearity=True,
momentum=0.1,
fine_tune=False,
**kwargs
):
"""
Args:
input_channels (int): Number of input channels. 1 if greyscale, 3 if RBG
tl_model_name (str): Name of the image recognition Transfer Learning model. Possible are all models in
torchvision, i.e. vgg16, alexnet, ...
layers (int): Number of layers, i.e. after which layer to cut the original network
pretrained (boolean): Whether to use a randomly initialized or pretrained network
final_batchnorm (boolean): Whether to add a batch norm after the final conv layer
final_nonlinearity (boolean): Whether to add a final nonlinearity (ReLU)
momentum (float): Momentum term for batch norm. Irrelevant if batch_norm=False
fine_tune (boolean): Whether to clip gradients before this core or to allow training on the core
**kwargs:
"""
if kwargs:
warnings.warn(
"Ignoring input {} when creating {}".format(repr(kwargs), self.__class__.__name__),
UserWarning,
)
super().__init__()
self.input_channels = input_channels
self.momentum = momentum
# Download model and cut after specified layer
TL_model = getattr(torchvision.models, tl_model_name)(pretrained=pretrained)
TL_model_clipped = nn.Sequential(*list(TL_model.features.children())[:layers])
if not isinstance(TL_model_clipped[-1], nn.Conv2d):
warnings.warn(
"Final layer is of type {}, not nn.Conv2d".format(type(TL_model_clipped[-1])),
UserWarning,
)
# Fix pretrained parameters during training
if not fine_tune:
for param in TL_model_clipped.parameters():
param.requires_grad = False
# Stack model together
self.features = nn.Sequential()
self.features.add_module("TransferLearning", TL_model_clipped)
if final_batchnorm:
self.features.add_module("OutBatchNorm", nn.BatchNorm2d(self.outchannels, momentum=self.momentum))
if final_nonlinearity:
self.features.add_module("OutNonlin", nn.ReLU(inplace=True))
def forward(self, input_):
# If model is designed for RBG input but input is greyscale, repeat the same input 3 times
if self.input_channels == 1 and self.features.TransferLearning[0].in_channels == 3:
input_ = input_.repeat(1, 3, 1, 1)
input_ = self.features(input_)
return input_
def regularizer(self):
return 0
@property
def outchannels(self):
"""
Function which returns the number of channels in the output conv layer. If the output layer is not a conv
layer, the last conv layer in the network is used.
Returns: Number of output channels
"""
found_outchannels = False
i = 1
while not found_outchannels:
if "out_channels" in self.features.TransferLearning[-i].__dict__:
found_outchannels = True
else:
i += 1
return self.features.TransferLearning[-i].out_channels
def initialize(self):
logger.warning(
"Ignoring initialization since the parameters should be acquired from a pretrained model. If you want random weights, set pretrained = False."
) | 0.910782 | 0.538559 |
import cv2
import paddle
import numpy as np
import albumentations as al
def get_augmentation():
return al.Compose([
al.RandomResizedCrop(512, 512, scale=(0.2, 1.)),
al.Compose(
[
# NOTE: RandomBrightnessContrast replaces ColorJitter
al.RandomBrightnessContrast(p=1),
al.HueSaturationValue(p=1),
],
p=0.8),
al.ToGray(p=0.2),
al.GaussianBlur(5, p=0.5),
])
def augment(images, labels, aug, iters):
"""Augments both image and label. Assumes input is a tensor with
a batch dimension and values normalized to N(0,1)."""
IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434),
dtype=np.float32)
# Transform label shape: B, C, W, H ==> B, W, H, C
labels_are_3d = (len(labels.shape) == 4)
if labels_are_3d:
labels = labels.permute(0, 2, 3, 1)
# Transform each image independently.
aug_images, aug_labels = [], []
for image, label in zip(images, labels):
# Step 1: Undo normalization transformation, convert to numpy
image = cv2.cvtColor(image.numpy().transpose(1, 2, 0) + IMG_MEAN,
cv2.COLOR_BGR2RGB).astype(np.uint8)
label = label.numpy()
label = label.astype('int64')
# Step 2: Perform transformations on numpy images
data = aug(image=image, mask=label)
image, label = data['image'], data['mask']
# Step 3: Convert back to PyTorch tensors
image = paddle.to_tensor((cv2.cvtColor(
image.astype(np.float32), cv2.COLOR_RGB2BGR) - IMG_MEAN).transpose(
2, 0, 1))
# label = np.where(label==-1, 255, label)
label = paddle.to_tensor(label)
if not labels_are_3d:
label = label.astype('int64')
# Add to list
aug_images.append(image)
aug_labels.append(label)
# Stack
images = paddle.stack(aug_images, axis=0)
labels = paddle.stack(aug_labels, axis=0)
# Transform label shape back: B, W, H, C ==> B, C, W, H
if labels_are_3d:
labels = labels.permute(0, 3, 1, 2)
return images, labels | contrib/DomainAdaptation/utils/augmentation.py | import cv2
import paddle
import numpy as np
import albumentations as al
def get_augmentation():
return al.Compose([
al.RandomResizedCrop(512, 512, scale=(0.2, 1.)),
al.Compose(
[
# NOTE: RandomBrightnessContrast replaces ColorJitter
al.RandomBrightnessContrast(p=1),
al.HueSaturationValue(p=1),
],
p=0.8),
al.ToGray(p=0.2),
al.GaussianBlur(5, p=0.5),
])
def augment(images, labels, aug, iters):
"""Augments both image and label. Assumes input is a tensor with
a batch dimension and values normalized to N(0,1)."""
IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434),
dtype=np.float32)
# Transform label shape: B, C, W, H ==> B, W, H, C
labels_are_3d = (len(labels.shape) == 4)
if labels_are_3d:
labels = labels.permute(0, 2, 3, 1)
# Transform each image independently.
aug_images, aug_labels = [], []
for image, label in zip(images, labels):
# Step 1: Undo normalization transformation, convert to numpy
image = cv2.cvtColor(image.numpy().transpose(1, 2, 0) + IMG_MEAN,
cv2.COLOR_BGR2RGB).astype(np.uint8)
label = label.numpy()
label = label.astype('int64')
# Step 2: Perform transformations on numpy images
data = aug(image=image, mask=label)
image, label = data['image'], data['mask']
# Step 3: Convert back to PyTorch tensors
image = paddle.to_tensor((cv2.cvtColor(
image.astype(np.float32), cv2.COLOR_RGB2BGR) - IMG_MEAN).transpose(
2, 0, 1))
# label = np.where(label==-1, 255, label)
label = paddle.to_tensor(label)
if not labels_are_3d:
label = label.astype('int64')
# Add to list
aug_images.append(image)
aug_labels.append(label)
# Stack
images = paddle.stack(aug_images, axis=0)
labels = paddle.stack(aug_labels, axis=0)
# Transform label shape back: B, W, H, C ==> B, C, W, H
if labels_are_3d:
labels = labels.permute(0, 3, 1, 2)
return images, labels | 0.637708 | 0.469216 |
from __future__ import division
from __future__ import print_function
import argparse
import gzip
import os
import sys
import urllib
try:
from urllib.error import URLError
from urllib.request import urlretrieve
except ImportError:
from urllib2 import URLError
from urllib import urlretrieve
RESOURCES = [
'train-images-idx3-ubyte.gz',
'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz',
]
def report_download_progress(chunk_number, chunk_size, file_size):
if file_size != -1:
percent = min(1, (chunk_number * chunk_size) / file_size)
bar = '#' * int(64 * percent)
sys.stdout.write('\r0% |{:<64}| {}%'.format(bar, int(percent * 100)))
def download(destination_path, url, quiet):
if os.path.exists(destination_path):
if not quiet:
print('{} already exists, skipping ...'.format(destination_path))
else:
print('Downloading {} ...'.format(url))
try:
hook = None if quiet else report_download_progress
urlretrieve(url, destination_path, reporthook=hook)
except URLError:
raise RuntimeError('Error downloading resource!')
finally:
if not quiet:
# Just a newline.
print()
def unzip(zipped_path, quiet):
unzipped_path = os.path.splitext(zipped_path)[0]
if os.path.exists(unzipped_path):
if not quiet:
print('{} already exists, skipping ... '.format(unzipped_path))
return
with gzip.open(zipped_path, 'rb') as zipped_file:
with open(unzipped_path, 'wb') as unzipped_file:
unzipped_file.write(zipped_file.read())
if not quiet:
print('Unzipped {} ...'.format(zipped_path))
def main():
parser = argparse.ArgumentParser(
description='Download the MNIST dataset from the internet')
parser.add_argument(
'-d', '--destination', default='.', help='Destination directory')
parser.add_argument(
'-q',
'--quiet',
action='store_true',
help="Don't report about progress")
options = parser.parse_args()
if not os.path.exists(options.destination):
os.makedirs(options.destination)
try:
for resource in RESOURCES:
path = os.path.join(options.destination, resource)
url = 'http://yann.lecun.com/exdb/mnist/{}'.format(resource)
download(path, url, options.quiet)
unzip(path, options.quiet)
except KeyboardInterrupt:
print('Interrupted')
if __name__ == '__main__':
main() | cpp/tools/download_mnist.py | from __future__ import division
from __future__ import print_function
import argparse
import gzip
import os
import sys
import urllib
try:
from urllib.error import URLError
from urllib.request import urlretrieve
except ImportError:
from urllib2 import URLError
from urllib import urlretrieve
RESOURCES = [
'train-images-idx3-ubyte.gz',
'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz',
]
def report_download_progress(chunk_number, chunk_size, file_size):
if file_size != -1:
percent = min(1, (chunk_number * chunk_size) / file_size)
bar = '#' * int(64 * percent)
sys.stdout.write('\r0% |{:<64}| {}%'.format(bar, int(percent * 100)))
def download(destination_path, url, quiet):
if os.path.exists(destination_path):
if not quiet:
print('{} already exists, skipping ...'.format(destination_path))
else:
print('Downloading {} ...'.format(url))
try:
hook = None if quiet else report_download_progress
urlretrieve(url, destination_path, reporthook=hook)
except URLError:
raise RuntimeError('Error downloading resource!')
finally:
if not quiet:
# Just a newline.
print()
def unzip(zipped_path, quiet):
unzipped_path = os.path.splitext(zipped_path)[0]
if os.path.exists(unzipped_path):
if not quiet:
print('{} already exists, skipping ... '.format(unzipped_path))
return
with gzip.open(zipped_path, 'rb') as zipped_file:
with open(unzipped_path, 'wb') as unzipped_file:
unzipped_file.write(zipped_file.read())
if not quiet:
print('Unzipped {} ...'.format(zipped_path))
def main():
parser = argparse.ArgumentParser(
description='Download the MNIST dataset from the internet')
parser.add_argument(
'-d', '--destination', default='.', help='Destination directory')
parser.add_argument(
'-q',
'--quiet',
action='store_true',
help="Don't report about progress")
options = parser.parse_args()
if not os.path.exists(options.destination):
os.makedirs(options.destination)
try:
for resource in RESOURCES:
path = os.path.join(options.destination, resource)
url = 'http://yann.lecun.com/exdb/mnist/{}'.format(resource)
download(path, url, options.quiet)
unzip(path, options.quiet)
except KeyboardInterrupt:
print('Interrupted')
if __name__ == '__main__':
main() | 0.250088 | 0.049612 |
import cPickle as pickle
import os
import sys
import time
from datetime import datetime
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.preprocessing import MinMaxScaler
from modlamp.descriptors import PeptideDescriptor
seed = np.random.RandomState(seed=3)
globstart = time.time()
def get_tree_pred(model, x):
""" Get the predictions from all single ensemble members.
:param model: ensemble model
:param x: data to be predicted as a numpy.array
:return: predictions from all members in a numpy.array
"""
preds = np.empty((x.shape[0], len(model.estimators_)))
for i, tree in enumerate(model.estimators_):
preds[:, i] = tree.predict_proba(x.astype('float32'), check_input=False)[:, 1] # don't always check input dim
return preds
def main(infolder, outfolder):
descriptor = 'PPCALI'
print "RF Peptide Learning Info\n========================\n"
print datetime.now().strftime("%Y-%m-%d_%H-%M") + "\n"
print("INPUT:\nInputfolder is\t%s\nOutputfolder is\t%s\nDescriptor is\t%s , auto-correlated (window 7)\n" %
(infolder, outfolder, descriptor))
# -------------------------------- TRAINING --------------------------------
print "LOG:\nLoading data..."
Pos = PeptideDescriptor(infolder + '/Pos.fasta', descriptor)
Pos.filter_duplicates()
Neg = PeptideDescriptor(infolder + '/Neg.fasta', descriptor)
Neg.filter_duplicates()
targets = np.array(len(Pos.sequences) * [1] + len(Neg.sequences) * [0]) # target vector
# Descriptor calculation
print "Calculating %s descriptor..." % descriptor
Data = PeptideDescriptor(Pos.sequences + Neg.sequences, descriptor)
Data.calculate_autocorr(7)
# Standard Scaling
print "Loading prefitted scaler and standard scaling %s descriptor..." % descriptor
scaler = pickle.load(open(infolder + '/scaler.p', 'r'))
Data = scaler.transform(Data.descriptor)
# Classifier
print "Loading pretrained classifier..."
clf = pickle.load(open(infolder + '/classifier.p', 'r'))
# fitting classifier
print "Fitting Random Forest classifier..."
clf.fit(Data, targets)
fit_leafs = clf.apply(Data)
print "\tRF out-of-bag score: %.2f" % clf.oob_score_
# -------------------------------- LIBRARY --------------------------------
# Loading library
print "Loading sequence library..."
Lib = PeptideDescriptor(infolder + '/Lib.fasta', descriptor)
class_labels = [l[:3] for l in Lib.names] # extract class labels from sequence names
print "\tLibrary size: %i" % len(Lib.sequences)
print "\tLibrary composition is:\n\t\thel: %i\n\t\tasy: %i\n\t\tnCM: %i" % (class_labels.count('hel'),
class_labels.count('asy'),
class_labels.count('nCM'))
# Calculating descriptors for library members
print "Calculating %s descriptor for library..." % descriptor
D = PeptideDescriptor(Lib.sequences, descriptor)
D.calculate_autocorr(7)
# combining both libraries and scaling descriptor
print "Standard scaling %s descriptor for library..." % descriptor
X = scaler.transform(D.descriptor)
# -------------------------------- PREDICTING --------------------------------
# get single tree predictions and calculate stdev
print "Predicting single tree results, standard deviation and entropy for library..."
start = time.time()
preds = get_tree_pred(clf, X)
print "Predicting class probabilities for library..."
probas = clf.predict_proba(X)
probas = probas[:, 1].tolist()
variance = np.var(preds, axis=1)
print ("\tPredictions took %.1f s" % (time.time() - start))
# calculate similarity of library members to training data
print "Calculating Random Forest similarity (cosine)..."
start = time.time()
lib_leafs = clf.apply(X) # leaf indices where library samples end up in -> RF intrinsic similarity measure
D_RF = pairwise_distances(lib_leafs, fit_leafs, metric='cosine')
RF_dist = D_RF.mean(axis=1).tolist()
print ("\tDistance calculation took %.1f s" % (time.time() - start))
# scaling all output features
print "Min-Max scaling outputs..."
sclr = MinMaxScaler()
# some transformations from lists to numpy matrices to arrays back to min-max scaled list:
variance = np.squeeze(sclr.fit_transform(variance.reshape(-1, 1))).tolist()
RF_dist = np.squeeze(sclr.fit_transform(np.array(RF_dist).reshape(-1, 1))).tolist()
# construct final list with all values (prediction, RF_dist, var, sum)
print "Creating result dictionaries..."
sums = [0.5 * (x * (1 - y) + z) for x, y, z in zip(variance, RF_dist, probas)] # dens-weight + proba
# create data frame with all values
d = pd.DataFrame({'Class': class_labels, 'Prediction': probas, 'RFSimilarity': RF_dist, 'TreeVariance': variance,
'WeighedSum': sums}, index=Lib.sequences)
d.index.name = 'Sequence'
d = d[['Class', 'Prediction', 'RFSimilarity', 'TreeVariance', 'WeighedSum']].sort_values('Prediction',
ascending=False)
# get top 5 and bottom 5 predictions according to the AMP prediction
synth_sele_top = d[:5]
synth_sele_bottom = d[-5:]
synth_sele = pd.concat([synth_sele_top, synth_sele_bottom])
# writing output
print "Saving output files to output directory..."
synth_sele.to_csv(outfolder + '/' + datetime.now().strftime("%Y-%m-%d_%H-%M") + 'synthesis_selection.csv')
d.to_csv(outfolder + '/library_pred.csv')
# saving scaler and classifier to pickle file for later usage
pickle.dump(sclr, open(outfolder + '/' + datetime.now().strftime("%Y-%m-%d_%H-%M") + '-scaler.p', 'w'))
pickle.dump(clf, open(outfolder + '/' + datetime.now().strftime("%Y-%m-%d_%H-%M") + '-classifier.p', 'w'))
print("Total runtime: %.1f s\n" % (time.time() - globstart))
print "\nALL DONE SUCCESSFULLY"
print "Look for your results file in %s\nAnd maybe save this terminal output to a logfile ;-)" % outfolder
if __name__ == "__main__":
if len(sys.argv) < 3:
print(__doc__)
sys.exit(2)
print "Running...\n"
infolder = os.path.abspath(sys.argv[1])
outfolder = os.path.abspath(sys.argv[2])
# run the main function
main(infolder, outfolder) | AL_final.py | import cPickle as pickle
import os
import sys
import time
from datetime import datetime
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.preprocessing import MinMaxScaler
from modlamp.descriptors import PeptideDescriptor
seed = np.random.RandomState(seed=3)
globstart = time.time()
def get_tree_pred(model, x):
""" Get the predictions from all single ensemble members.
:param model: ensemble model
:param x: data to be predicted as a numpy.array
:return: predictions from all members in a numpy.array
"""
preds = np.empty((x.shape[0], len(model.estimators_)))
for i, tree in enumerate(model.estimators_):
preds[:, i] = tree.predict_proba(x.astype('float32'), check_input=False)[:, 1] # don't always check input dim
return preds
def main(infolder, outfolder):
descriptor = 'PPCALI'
print "RF Peptide Learning Info\n========================\n"
print datetime.now().strftime("%Y-%m-%d_%H-%M") + "\n"
print("INPUT:\nInputfolder is\t%s\nOutputfolder is\t%s\nDescriptor is\t%s , auto-correlated (window 7)\n" %
(infolder, outfolder, descriptor))
# -------------------------------- TRAINING --------------------------------
print "LOG:\nLoading data..."
Pos = PeptideDescriptor(infolder + '/Pos.fasta', descriptor)
Pos.filter_duplicates()
Neg = PeptideDescriptor(infolder + '/Neg.fasta', descriptor)
Neg.filter_duplicates()
targets = np.array(len(Pos.sequences) * [1] + len(Neg.sequences) * [0]) # target vector
# Descriptor calculation
print "Calculating %s descriptor..." % descriptor
Data = PeptideDescriptor(Pos.sequences + Neg.sequences, descriptor)
Data.calculate_autocorr(7)
# Standard Scaling
print "Loading prefitted scaler and standard scaling %s descriptor..." % descriptor
scaler = pickle.load(open(infolder + '/scaler.p', 'r'))
Data = scaler.transform(Data.descriptor)
# Classifier
print "Loading pretrained classifier..."
clf = pickle.load(open(infolder + '/classifier.p', 'r'))
# fitting classifier
print "Fitting Random Forest classifier..."
clf.fit(Data, targets)
fit_leafs = clf.apply(Data)
print "\tRF out-of-bag score: %.2f" % clf.oob_score_
# -------------------------------- LIBRARY --------------------------------
# Loading library
print "Loading sequence library..."
Lib = PeptideDescriptor(infolder + '/Lib.fasta', descriptor)
class_labels = [l[:3] for l in Lib.names] # extract class labels from sequence names
print "\tLibrary size: %i" % len(Lib.sequences)
print "\tLibrary composition is:\n\t\thel: %i\n\t\tasy: %i\n\t\tnCM: %i" % (class_labels.count('hel'),
class_labels.count('asy'),
class_labels.count('nCM'))
# Calculating descriptors for library members
print "Calculating %s descriptor for library..." % descriptor
D = PeptideDescriptor(Lib.sequences, descriptor)
D.calculate_autocorr(7)
# combining both libraries and scaling descriptor
print "Standard scaling %s descriptor for library..." % descriptor
X = scaler.transform(D.descriptor)
# -------------------------------- PREDICTING --------------------------------
# get single tree predictions and calculate stdev
print "Predicting single tree results, standard deviation and entropy for library..."
start = time.time()
preds = get_tree_pred(clf, X)
print "Predicting class probabilities for library..."
probas = clf.predict_proba(X)
probas = probas[:, 1].tolist()
variance = np.var(preds, axis=1)
print ("\tPredictions took %.1f s" % (time.time() - start))
# calculate similarity of library members to training data
print "Calculating Random Forest similarity (cosine)..."
start = time.time()
lib_leafs = clf.apply(X) # leaf indices where library samples end up in -> RF intrinsic similarity measure
D_RF = pairwise_distances(lib_leafs, fit_leafs, metric='cosine')
RF_dist = D_RF.mean(axis=1).tolist()
print ("\tDistance calculation took %.1f s" % (time.time() - start))
# scaling all output features
print "Min-Max scaling outputs..."
sclr = MinMaxScaler()
# some transformations from lists to numpy matrices to arrays back to min-max scaled list:
variance = np.squeeze(sclr.fit_transform(variance.reshape(-1, 1))).tolist()
RF_dist = np.squeeze(sclr.fit_transform(np.array(RF_dist).reshape(-1, 1))).tolist()
# construct final list with all values (prediction, RF_dist, var, sum)
print "Creating result dictionaries..."
sums = [0.5 * (x * (1 - y) + z) for x, y, z in zip(variance, RF_dist, probas)] # dens-weight + proba
# create data frame with all values
d = pd.DataFrame({'Class': class_labels, 'Prediction': probas, 'RFSimilarity': RF_dist, 'TreeVariance': variance,
'WeighedSum': sums}, index=Lib.sequences)
d.index.name = 'Sequence'
d = d[['Class', 'Prediction', 'RFSimilarity', 'TreeVariance', 'WeighedSum']].sort_values('Prediction',
ascending=False)
# get top 5 and bottom 5 predictions according to the AMP prediction
synth_sele_top = d[:5]
synth_sele_bottom = d[-5:]
synth_sele = pd.concat([synth_sele_top, synth_sele_bottom])
# writing output
print "Saving output files to output directory..."
synth_sele.to_csv(outfolder + '/' + datetime.now().strftime("%Y-%m-%d_%H-%M") + 'synthesis_selection.csv')
d.to_csv(outfolder + '/library_pred.csv')
# saving scaler and classifier to pickle file for later usage
pickle.dump(sclr, open(outfolder + '/' + datetime.now().strftime("%Y-%m-%d_%H-%M") + '-scaler.p', 'w'))
pickle.dump(clf, open(outfolder + '/' + datetime.now().strftime("%Y-%m-%d_%H-%M") + '-classifier.p', 'w'))
print("Total runtime: %.1f s\n" % (time.time() - globstart))
print "\nALL DONE SUCCESSFULLY"
print "Look for your results file in %s\nAnd maybe save this terminal output to a logfile ;-)" % outfolder
if __name__ == "__main__":
if len(sys.argv) < 3:
print(__doc__)
sys.exit(2)
print "Running...\n"
infolder = os.path.abspath(sys.argv[1])
outfolder = os.path.abspath(sys.argv[2])
# run the main function
main(infolder, outfolder) | 0.464173 | 0.281518 |
import numpy as np
from .base import ScalarField
from ..geometry.coord_systems import (
cartesian_to_spherical,
cartesian_to_cylindrical)
from ..ransac import (
single_fit,
RANSAC_MODELS,
RANSAC_SAMPLERS)
class XYZScalarField(ScalarField):
def extract_info(self):
self.points = self.pyntcloud.xyz
class PlaneFit(XYZScalarField):
"""
Get inliers of the best RansacPlane found.
"""
def __init__(self, *, pyntcloud, max_dist=1e-4, max_iterations=100, n_inliers_to_stop=None):
self.model = RANSAC_MODELS["plane"]
self.sampler = RANSAC_SAMPLERS["random"]
self.name = "is_plane"
self.model_kwargs = {"max_dist": max_dist}
self.max_iterations = max_iterations
self.n_inliers_to_stop = n_inliers_to_stop
super().__init__(pyntcloud=pyntcloud)
def compute(self):
inliers = single_fit(self.points, self.model, self.sampler,
model_kwargs=self.model_kwargs,
max_iterations=self.max_iterations,
n_inliers_to_stop=self.n_inliers_to_stop)
self.to_be_added[self.name] = inliers.astype(np.uint8)
class SphereFit(XYZScalarField):
"""
Get inliers of the best RansacSphere found.
"""
def __init__(self, *, pyntcloud, max_dist=1e-4, max_iterations=100, n_inliers_to_stop=None):
super().__init__(pyntcloud=pyntcloud)
self.model = RANSAC_MODELS["sphere"]
self.sampler = RANSAC_SAMPLERS["random"]
self.name = "is_sphere"
self.model_kwargs = {"max_dist": max_dist}
self.max_iterations = max_iterations
self.n_inliers_to_stop = n_inliers_to_stop
def compute(self):
inliers = single_fit(self.points, self.model, self.sampler,
model_kwargs=self.model_kwargs,
max_iterations=self.max_iterations,
n_inliers_to_stop=self.n_inliers_to_stop)
self.to_be_added[self.name] = inliers.astype(np.uint8)
class CustomFit(XYZScalarField):
"""
Get inliers of the best custom model found.
"""
def __init__(self, pyntcloud, model, sampler, name, model_kwargs={},
sampler_kwargs={}, max_iterations=100, n_inliers_to_stop=None):
super().__init__(pyntcloud=pyntcloud)
self.model = model
self.sampler = sampler
self.name = name
self.model_kwargs = model_kwargs
self.sampler_kwargs = sampler_kwargs
self.max_iterations = max_iterations
self.n_inliers_to_stop = n_inliers_to_stop
def compute(self):
inliers = single_fit(self.points, self.model, self.sampler,
model_kwargs=self.model_kwargs,
max_iterations=self.max_iterations,
n_inliers_to_stop=self.n_inliers_to_stop)
self.to_be_added[self.name] = inliers.astype(np.uint8)
class SphericalCoordinates(XYZScalarField):
"""
Get radial, azimuthal and polar values.
"""
def __init__(self, *, pyntcloud, degrees=True):
super().__init__(pyntcloud=pyntcloud)
self.degrees = degrees
def compute(self):
radial, polar, azimuthal = cartesian_to_spherical(
self.points, degrees=self.degrees)
self.to_be_added["radial"] = radial
self.to_be_added["polar"] = polar
self.to_be_added["azimuthal"] = azimuthal
class CylindricalCoordinates(XYZScalarField):
"""
Get ro and phi values.
The z value in cylindrical coordinates remain unchanged.
"""
def __init__(self, *, pyntcloud, degrees=True):
self.degrees = degrees
super().__init__(pyntcloud=pyntcloud)
def compute(self):
radial_cylindrical, angular_cylindrical, z = cartesian_to_cylindrical(
self.points, degrees=self.degrees)
self.to_be_added["radial_cylindrical"] = radial_cylindrical
self.to_be_added["angular_cylindrical"] = angular_cylindrical | pyntcloud/scalar_fields/xyz.py | import numpy as np
from .base import ScalarField
from ..geometry.coord_systems import (
cartesian_to_spherical,
cartesian_to_cylindrical)
from ..ransac import (
single_fit,
RANSAC_MODELS,
RANSAC_SAMPLERS)
class XYZScalarField(ScalarField):
def extract_info(self):
self.points = self.pyntcloud.xyz
class PlaneFit(XYZScalarField):
"""
Get inliers of the best RansacPlane found.
"""
def __init__(self, *, pyntcloud, max_dist=1e-4, max_iterations=100, n_inliers_to_stop=None):
self.model = RANSAC_MODELS["plane"]
self.sampler = RANSAC_SAMPLERS["random"]
self.name = "is_plane"
self.model_kwargs = {"max_dist": max_dist}
self.max_iterations = max_iterations
self.n_inliers_to_stop = n_inliers_to_stop
super().__init__(pyntcloud=pyntcloud)
def compute(self):
inliers = single_fit(self.points, self.model, self.sampler,
model_kwargs=self.model_kwargs,
max_iterations=self.max_iterations,
n_inliers_to_stop=self.n_inliers_to_stop)
self.to_be_added[self.name] = inliers.astype(np.uint8)
class SphereFit(XYZScalarField):
"""
Get inliers of the best RansacSphere found.
"""
def __init__(self, *, pyntcloud, max_dist=1e-4, max_iterations=100, n_inliers_to_stop=None):
super().__init__(pyntcloud=pyntcloud)
self.model = RANSAC_MODELS["sphere"]
self.sampler = RANSAC_SAMPLERS["random"]
self.name = "is_sphere"
self.model_kwargs = {"max_dist": max_dist}
self.max_iterations = max_iterations
self.n_inliers_to_stop = n_inliers_to_stop
def compute(self):
inliers = single_fit(self.points, self.model, self.sampler,
model_kwargs=self.model_kwargs,
max_iterations=self.max_iterations,
n_inliers_to_stop=self.n_inliers_to_stop)
self.to_be_added[self.name] = inliers.astype(np.uint8)
class CustomFit(XYZScalarField):
"""
Get inliers of the best custom model found.
"""
def __init__(self, pyntcloud, model, sampler, name, model_kwargs={},
sampler_kwargs={}, max_iterations=100, n_inliers_to_stop=None):
super().__init__(pyntcloud=pyntcloud)
self.model = model
self.sampler = sampler
self.name = name
self.model_kwargs = model_kwargs
self.sampler_kwargs = sampler_kwargs
self.max_iterations = max_iterations
self.n_inliers_to_stop = n_inliers_to_stop
def compute(self):
inliers = single_fit(self.points, self.model, self.sampler,
model_kwargs=self.model_kwargs,
max_iterations=self.max_iterations,
n_inliers_to_stop=self.n_inliers_to_stop)
self.to_be_added[self.name] = inliers.astype(np.uint8)
class SphericalCoordinates(XYZScalarField):
"""
Get radial, azimuthal and polar values.
"""
def __init__(self, *, pyntcloud, degrees=True):
super().__init__(pyntcloud=pyntcloud)
self.degrees = degrees
def compute(self):
radial, polar, azimuthal = cartesian_to_spherical(
self.points, degrees=self.degrees)
self.to_be_added["radial"] = radial
self.to_be_added["polar"] = polar
self.to_be_added["azimuthal"] = azimuthal
class CylindricalCoordinates(XYZScalarField):
"""
Get ro and phi values.
The z value in cylindrical coordinates remain unchanged.
"""
def __init__(self, *, pyntcloud, degrees=True):
self.degrees = degrees
super().__init__(pyntcloud=pyntcloud)
def compute(self):
radial_cylindrical, angular_cylindrical, z = cartesian_to_cylindrical(
self.points, degrees=self.degrees)
self.to_be_added["radial_cylindrical"] = radial_cylindrical
self.to_be_added["angular_cylindrical"] = angular_cylindrical | 0.74512 | 0.414691 |
import numpy as np
from compmech.conecyl import ConeCyl
def test_static():
wmin_ref = [
['clpt_donnell_bc1', -0.0512249327106],
['clpt_donnell_bc2', -0.0500855846496],
['clpt_donnell_bc3', -0.0509280039584],
['clpt_donnell_bc4', -0.0498127720591],
['fsdt_donnell_bc1', -0.0516948646563],
['fsdt_donnell_bc2', -0.0505316013923],
['fsdt_donnell_bc3', -0.0513959737413],
['fsdt_donnell_bc4', -0.0502561654612],
]
for model, wmin in wmin_ref:
cc = ConeCyl()
cc.model = model
cc.m1 = 20
cc.m2 = 10
cc.n2 = 11
cc.name = 'Z33'
cc.laminaprop = (123.55e3 , 8.708e3, 0.319, 5.695e3, 5.695e3, 5.695e3)
cc.stack = [0, 0, 19, -19, 37, -37, 45, -45, 51, -51]
cc.plyt = 0.125
cc.r2 = 250.
cc.H = 510.
cc.add_SPL(10)
for thetadeg in np.linspace(0, 360, 300, endpoint=False):
cc.add_force(0., thetadeg, -15., 0, 0, increment=True)
cs = cc.static()
cc.uvw(cs[0])
assert np.isclose(cc.w.min(), wmin, rtol=0.01)
def test_NL_static():
wmin_ref = [
['clpt_donnell_bc1', -0.012689461685834305],
['clpt_donnell_bc2', -0.011741560192200845],
['clpt_donnell_bc3', -0.012634776822892537],
['clpt_donnell_bc4', -0.011499181513525969],
['fsdt_donnell_bc1', -0.012621439862441628],
]
for model, wmin in wmin_ref:
cc = ConeCyl()
cc.model = model
cc.m1 = 20
cc.m2 = 5
cc.n2 = 6
cc.name = 'Z33'
cc.laminaprop = (123.55e3 , 8.708e3, 0.319, 5.695e3, 5.695e3, 5.695e3)
cc.stack = [0, 0, 19, -19, 37, -37, 45, -45, 51, -51]
cc.plyt = 0.125
cc.r2 = 250.
cc.H = 510.
cc.add_SPL(10, increment=False)
for thetadeg in np.linspace(0, 360, 300, endpoint=False):
cc.add_force(0., thetadeg, -15., 0, 0, increment=True)
cc.analysis.initialInc = 0.5
cs = cc.static(NLgeom=True)
cc.uvw(cs[0])
assert np.isclose(cc.w.min(), wmin, rtol=0.01)
if __name__ == '__main__':
test_static()
test_NL_static() | compmech/conecyl/tests/test_static.py | import numpy as np
from compmech.conecyl import ConeCyl
def test_static():
wmin_ref = [
['clpt_donnell_bc1', -0.0512249327106],
['clpt_donnell_bc2', -0.0500855846496],
['clpt_donnell_bc3', -0.0509280039584],
['clpt_donnell_bc4', -0.0498127720591],
['fsdt_donnell_bc1', -0.0516948646563],
['fsdt_donnell_bc2', -0.0505316013923],
['fsdt_donnell_bc3', -0.0513959737413],
['fsdt_donnell_bc4', -0.0502561654612],
]
for model, wmin in wmin_ref:
cc = ConeCyl()
cc.model = model
cc.m1 = 20
cc.m2 = 10
cc.n2 = 11
cc.name = 'Z33'
cc.laminaprop = (123.55e3 , 8.708e3, 0.319, 5.695e3, 5.695e3, 5.695e3)
cc.stack = [0, 0, 19, -19, 37, -37, 45, -45, 51, -51]
cc.plyt = 0.125
cc.r2 = 250.
cc.H = 510.
cc.add_SPL(10)
for thetadeg in np.linspace(0, 360, 300, endpoint=False):
cc.add_force(0., thetadeg, -15., 0, 0, increment=True)
cs = cc.static()
cc.uvw(cs[0])
assert np.isclose(cc.w.min(), wmin, rtol=0.01)
def test_NL_static():
wmin_ref = [
['clpt_donnell_bc1', -0.012689461685834305],
['clpt_donnell_bc2', -0.011741560192200845],
['clpt_donnell_bc3', -0.012634776822892537],
['clpt_donnell_bc4', -0.011499181513525969],
['fsdt_donnell_bc1', -0.012621439862441628],
]
for model, wmin in wmin_ref:
cc = ConeCyl()
cc.model = model
cc.m1 = 20
cc.m2 = 5
cc.n2 = 6
cc.name = 'Z33'
cc.laminaprop = (123.55e3 , 8.708e3, 0.319, 5.695e3, 5.695e3, 5.695e3)
cc.stack = [0, 0, 19, -19, 37, -37, 45, -45, 51, -51]
cc.plyt = 0.125
cc.r2 = 250.
cc.H = 510.
cc.add_SPL(10, increment=False)
for thetadeg in np.linspace(0, 360, 300, endpoint=False):
cc.add_force(0., thetadeg, -15., 0, 0, increment=True)
cc.analysis.initialInc = 0.5
cs = cc.static(NLgeom=True)
cc.uvw(cs[0])
assert np.isclose(cc.w.min(), wmin, rtol=0.01)
if __name__ == '__main__':
test_static()
test_NL_static() | 0.358016 | 0.548553 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from PIL import Image
from PIL import ImageOps
DEFAULT_OPACITY = 0 # Between (0, 255)
# Conversion settings
_REQUIRED_MIN_DIMENSION = 10 # required minimum size of converted image
class ImageConvertSettings(object):
"""Image conversion settings."""
def __init__(self, img_format, width, height, position=(0.5, 0.5),
bg_color_rgb=(0, 0, 0),
opacity=DEFAULT_OPACITY,
resize_if_larger=False,
preserve_aspect_ratio=True):
"""Initialize settings object.
Args:
img_format: image output format, e.g., 'png', 'jpeg', etc. See supported
PIL image types.
width: desired output width in pixels.
height: desired output height in pixels.
position: position of resized image within new size frame. This should
be specified as a tuple with each value between [0, 1].
Default is (0.5, 0.5) for center.
bg_color_rgb: Background color in RGB values. E.g., (255, 255, 255).
opacity: Opacity or alpha value, from 0 to 255.
resize_if_larger: If True, will resize the image to fit the larger size
if the desired output size is larger than the original
image size.
preserve_aspect_ratio: If True, will attempt to preserve the original
image's aspect ration when resizing. Otherwise
will attempt a best effort. See PIL ImageOps.fit
documentation.
"""
self._format = img_format
self._width = width
self._height = height
self._transparency = opacity
self._opacity = opacity
# Background color is specified as RGBA
self._bg_mode = 'RGBA'
self._bg_color = (bg_color_rgb[0], bg_color_rgb[1], bg_color_rgb[2],
opacity)
self._position = position
self._preserve_aspect_ratio = preserve_aspect_ratio
# Whether image should be upsized if desired size is larger than orig size.
self._resize_if_larger = resize_if_larger
self._validate_settings()
def _validate_settings(self):
error_messages = []
for val in self._position:
if val < 0 or val > 1:
error_messages.append('Position must be a percent of width/height with '
'values ranging from 0 to 1.\n')
for val in self._bg_color:
if val < 0 or val > 255:
error_messages.append('RGB and opacity values must be between (0, 255)'
'.\n')
if self.width < _REQUIRED_MIN_DIMENSION:
error_messages.append('Desired width must be greater than %d pixels.\n' %
_REQUIRED_MIN_DIMENSION)
if self.height < _REQUIRED_MIN_DIMENSION:
error_messages.append('Desired width must be greater than %d pixels.\n' %
_REQUIRED_MIN_DIMENSION)
if error_messages:
raise ValueError('The following invalid conversion settings were found: '
'%s' % str(error_messages))
@property
def bg_color(self):
return self._bg_color
@property
def bg_mode(self):
return self._bg_mode
@property
def format(self):
return self._format
@property
def height(self):
return self._height
@property
def width(self):
return self._width
@property
def position(self):
return self._position
@property
def resize_if_larger(self):
return self._resize_if_larger
@property
def preserve_aspect_ratio(self):
return self._preserve_aspect_ratio
class ImageConverter(object):
"""Converter for images"""
def __init__(self, image, image_convert_settings):
"""Initialize converter.
Args:
image: input image.
image_convert_settings: ImageConvertSettings object.
"""
self._orig_img = image
self._settings = image_convert_settings
self._desired_size = (image_convert_settings.width,
image_convert_settings.height)
self._desired_format = str(image_convert_settings.format).lower()
self._preserve_aspect_ratio = image_convert_settings.preserve_aspect_ratio
self._resize_if_larger = image_convert_settings.resize_if_larger
self._position = image_convert_settings.position # Tuple (x, y) as decimal.
# Background settings
self._bg_mode = image_convert_settings.bg_mode
self._bg_color = image_convert_settings.bg_color
def convert(self):
"""Returns a converted image as a PIL Image object."""
# TODO: Do we need an option to allow pre-cropping?
if (self._orig_img.size == self._desired_size and
self._orig_img.format == self._desired_format.upper()):
logging.debug('Desired image is same format and size as original image'
'so no conversion needed.')
return self._orig_img
# Desired image is larger than original image.
if (self._desired_size[0] > self._orig_img.size[0] and
self._desired_size[1] > self._orig_img.size[1]):
if self._preserve_aspect_ratio and self._resize_if_larger:
return self._resize_larger_keep_aspect_ratio()
elif self._resize_if_larger:
return self._resize_larger_dont_keep_aspect_ratio()
return self._pad_to_larger_size()
if self._preserve_aspect_ratio:
return self._resize_thumbnail_keep_aspect_ratio()
return self._resize_thumbnail_and_crop()
def _resize_thumbnail_keep_aspect_ratio(self):
"""Resize image to fit the new smaller size, retaining aspect ratio.
We use the PIL thumbnail function, which guarantees the aspect ratio is
maintained but does not ensure the output size matches your desired size
(only that it will not exceed it). We then paste it onto a background image
that matches the desired output size.
If the new size is smaller than the original image size (in either
dimension), the image is reduced so it's just large enough to fit the
desired size, and padded with a background to ensure it's exactly the
desired size.
If the new size is larger than the original image size, the original size
is retained and the image is padded with a background to make it exactly
the desired size.
Args:
orig_img: PIL Image object.
Returns:
Void.
"""
# We use Image.ANTIALIAS for better quality. This is the recommended PIL
# setting.
self._orig_img.thumbnail(self._desired_size, Image.ANTIALIAS)
offset = (
int(
round(
(self._desired_size[0] - self._orig_img.size[0])
* self._position[0])),
int(
round(
(self._desired_size[1] - self._orig_img.size[1])
* self._position[1])))
bkgd_img = Image.new(self._bg_mode, self._desired_size, self._bg_color)
bkgd_img.paste(self._orig_img, offset)
return bkgd_img
def _resize_thumbnail_and_crop(self):
"""Resize image smaller, not retaining aspect ratio.
For image reduction, reduce size to fit smallest of the dimensions and
crop the larger dimension as necessary. Crop centering is based on
center of image."""
logging.debug('Reducing image size and cropping as necessary.')
return ImageOps.fit(image=self._orig_img, size=self._desired_size,
centering=self._position)
def _pad_to_larger_size(self):
"""Just pad orig image to larger size if desired size is larger."""
offset = (int(round((self._desired_size[0]) / 2)),
int(round((self._desired_size[1]) / 2)))
bkgd_img = Image.new(self._bg_mode, self._desired_size, self._bg_color)
bkgd_img.paste(self._orig_img, offset)
return bkgd_img
def _resize_larger_dont_keep_aspect_ratio(self):
"""Stretch image to fit new size, NOT retaining the aspect ratio."""
logging.info('Desired size is larger than source image size.')
return ImageOps.fit(image=self._orig_img, size=self._desired_size)
def _resize_larger_keep_aspect_ratio(self):
"""Stretch image to fit new size, keeping the aspect ratio."""
orig_w, orig_h = self._orig_img.size
# Find min stretch ratio, create a scaled up image.
resize_ratio = min(self._desired_size[0] / orig_w,
self._desired_size[1] / orig_h)
(new_width, new_height) = (int(round(orig_h * resize_ratio)),
int(round(orig_w * resize_ratio)))
new_img = ImageOps.fit(image=self._orig_img,
size=(new_width, new_height))
# If after stretching, it's the desired size already, then return image.
if self._desired_size == (new_width, new_height):
return new_img
# Otherwise we need to pad it to the correct size.
offset = (int(round((self._desired_size[0] - new_width) / 2)),
int(round((self._desired_size[1] - new_height) / 2)))
bkgd_img = Image.new(self._bg_mode, self._desired_size, self._bg_color)
bkgd_img.paste(new_img, offset)
return bkgd_img
def create_default_image(image_convert_settings):
"""Returns a default PIL image for use on image retrieval/conversion failures.
Helper method for creating a default image (of the background) to be used in
Atlas for images that fail retrieval/conversion.
Returns:
PIL Image.
"""
return Image.new(
image_convert_settings.bg_mode,
(image_convert_settings.width, image_convert_settings.height),
image_convert_settings.bg_color) | facets_atlasmaker/convert.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from PIL import Image
from PIL import ImageOps
DEFAULT_OPACITY = 0 # Between (0, 255)
# Conversion settings
_REQUIRED_MIN_DIMENSION = 10 # required minimum size of converted image
class ImageConvertSettings(object):
"""Image conversion settings."""
def __init__(self, img_format, width, height, position=(0.5, 0.5),
bg_color_rgb=(0, 0, 0),
opacity=DEFAULT_OPACITY,
resize_if_larger=False,
preserve_aspect_ratio=True):
"""Initialize settings object.
Args:
img_format: image output format, e.g., 'png', 'jpeg', etc. See supported
PIL image types.
width: desired output width in pixels.
height: desired output height in pixels.
position: position of resized image within new size frame. This should
be specified as a tuple with each value between [0, 1].
Default is (0.5, 0.5) for center.
bg_color_rgb: Background color in RGB values. E.g., (255, 255, 255).
opacity: Opacity or alpha value, from 0 to 255.
resize_if_larger: If True, will resize the image to fit the larger size
if the desired output size is larger than the original
image size.
preserve_aspect_ratio: If True, will attempt to preserve the original
image's aspect ration when resizing. Otherwise
will attempt a best effort. See PIL ImageOps.fit
documentation.
"""
self._format = img_format
self._width = width
self._height = height
self._transparency = opacity
self._opacity = opacity
# Background color is specified as RGBA
self._bg_mode = 'RGBA'
self._bg_color = (bg_color_rgb[0], bg_color_rgb[1], bg_color_rgb[2],
opacity)
self._position = position
self._preserve_aspect_ratio = preserve_aspect_ratio
# Whether image should be upsized if desired size is larger than orig size.
self._resize_if_larger = resize_if_larger
self._validate_settings()
def _validate_settings(self):
error_messages = []
for val in self._position:
if val < 0 or val > 1:
error_messages.append('Position must be a percent of width/height with '
'values ranging from 0 to 1.\n')
for val in self._bg_color:
if val < 0 or val > 255:
error_messages.append('RGB and opacity values must be between (0, 255)'
'.\n')
if self.width < _REQUIRED_MIN_DIMENSION:
error_messages.append('Desired width must be greater than %d pixels.\n' %
_REQUIRED_MIN_DIMENSION)
if self.height < _REQUIRED_MIN_DIMENSION:
error_messages.append('Desired width must be greater than %d pixels.\n' %
_REQUIRED_MIN_DIMENSION)
if error_messages:
raise ValueError('The following invalid conversion settings were found: '
'%s' % str(error_messages))
@property
def bg_color(self):
return self._bg_color
@property
def bg_mode(self):
return self._bg_mode
@property
def format(self):
return self._format
@property
def height(self):
return self._height
@property
def width(self):
return self._width
@property
def position(self):
return self._position
@property
def resize_if_larger(self):
return self._resize_if_larger
@property
def preserve_aspect_ratio(self):
return self._preserve_aspect_ratio
class ImageConverter(object):
"""Converter for images"""
def __init__(self, image, image_convert_settings):
"""Initialize converter.
Args:
image: input image.
image_convert_settings: ImageConvertSettings object.
"""
self._orig_img = image
self._settings = image_convert_settings
self._desired_size = (image_convert_settings.width,
image_convert_settings.height)
self._desired_format = str(image_convert_settings.format).lower()
self._preserve_aspect_ratio = image_convert_settings.preserve_aspect_ratio
self._resize_if_larger = image_convert_settings.resize_if_larger
self._position = image_convert_settings.position # Tuple (x, y) as decimal.
# Background settings
self._bg_mode = image_convert_settings.bg_mode
self._bg_color = image_convert_settings.bg_color
def convert(self):
"""Returns a converted image as a PIL Image object."""
# TODO: Do we need an option to allow pre-cropping?
if (self._orig_img.size == self._desired_size and
self._orig_img.format == self._desired_format.upper()):
logging.debug('Desired image is same format and size as original image'
'so no conversion needed.')
return self._orig_img
# Desired image is larger than original image.
if (self._desired_size[0] > self._orig_img.size[0] and
self._desired_size[1] > self._orig_img.size[1]):
if self._preserve_aspect_ratio and self._resize_if_larger:
return self._resize_larger_keep_aspect_ratio()
elif self._resize_if_larger:
return self._resize_larger_dont_keep_aspect_ratio()
return self._pad_to_larger_size()
if self._preserve_aspect_ratio:
return self._resize_thumbnail_keep_aspect_ratio()
return self._resize_thumbnail_and_crop()
def _resize_thumbnail_keep_aspect_ratio(self):
"""Resize image to fit the new smaller size, retaining aspect ratio.
We use the PIL thumbnail function, which guarantees the aspect ratio is
maintained but does not ensure the output size matches your desired size
(only that it will not exceed it). We then paste it onto a background image
that matches the desired output size.
If the new size is smaller than the original image size (in either
dimension), the image is reduced so it's just large enough to fit the
desired size, and padded with a background to ensure it's exactly the
desired size.
If the new size is larger than the original image size, the original size
is retained and the image is padded with a background to make it exactly
the desired size.
Args:
orig_img: PIL Image object.
Returns:
Void.
"""
# We use Image.ANTIALIAS for better quality. This is the recommended PIL
# setting.
self._orig_img.thumbnail(self._desired_size, Image.ANTIALIAS)
offset = (
int(
round(
(self._desired_size[0] - self._orig_img.size[0])
* self._position[0])),
int(
round(
(self._desired_size[1] - self._orig_img.size[1])
* self._position[1])))
bkgd_img = Image.new(self._bg_mode, self._desired_size, self._bg_color)
bkgd_img.paste(self._orig_img, offset)
return bkgd_img
def _resize_thumbnail_and_crop(self):
"""Resize image smaller, not retaining aspect ratio.
For image reduction, reduce size to fit smallest of the dimensions and
crop the larger dimension as necessary. Crop centering is based on
center of image."""
logging.debug('Reducing image size and cropping as necessary.')
return ImageOps.fit(image=self._orig_img, size=self._desired_size,
centering=self._position)
def _pad_to_larger_size(self):
"""Just pad orig image to larger size if desired size is larger."""
offset = (int(round((self._desired_size[0]) / 2)),
int(round((self._desired_size[1]) / 2)))
bkgd_img = Image.new(self._bg_mode, self._desired_size, self._bg_color)
bkgd_img.paste(self._orig_img, offset)
return bkgd_img
def _resize_larger_dont_keep_aspect_ratio(self):
"""Stretch image to fit new size, NOT retaining the aspect ratio."""
logging.info('Desired size is larger than source image size.')
return ImageOps.fit(image=self._orig_img, size=self._desired_size)
def _resize_larger_keep_aspect_ratio(self):
"""Stretch image to fit new size, keeping the aspect ratio."""
orig_w, orig_h = self._orig_img.size
# Find min stretch ratio, create a scaled up image.
resize_ratio = min(self._desired_size[0] / orig_w,
self._desired_size[1] / orig_h)
(new_width, new_height) = (int(round(orig_h * resize_ratio)),
int(round(orig_w * resize_ratio)))
new_img = ImageOps.fit(image=self._orig_img,
size=(new_width, new_height))
# If after stretching, it's the desired size already, then return image.
if self._desired_size == (new_width, new_height):
return new_img
# Otherwise we need to pad it to the correct size.
offset = (int(round((self._desired_size[0] - new_width) / 2)),
int(round((self._desired_size[1] - new_height) / 2)))
bkgd_img = Image.new(self._bg_mode, self._desired_size, self._bg_color)
bkgd_img.paste(new_img, offset)
return bkgd_img
def create_default_image(image_convert_settings):
"""Returns a default PIL image for use on image retrieval/conversion failures.
Helper method for creating a default image (of the background) to be used in
Atlas for images that fail retrieval/conversion.
Returns:
PIL Image.
"""
return Image.new(
image_convert_settings.bg_mode,
(image_convert_settings.width, image_convert_settings.height),
image_convert_settings.bg_color) | 0.853211 | 0.2553 |
import logging
import io
from ..common import SourceLocation
from .token import CToken
from ..tools.handlexer import HandLexerBase, Char
class SourceFile:
""" Presents the current file. """
def __init__(self, name):
self.filename = name
self.row = 1
def __repr__(self):
return '<SourceFile at {}:{}>'.format(self.filename, self.row)
def create_characters(f, source_file):
""" Create a sequence of characters """
for row, line in enumerate(f, 1):
line = line.expandtabs()
for col, char in enumerate(line, 1):
loc = SourceLocation(
source_file.filename, source_file.row, col, 1)
yield Char(char, loc)
source_file.row += 1
def trigraph_filter(characters):
""" Replace trigraphs in a character sequence """
tri_map = {
'=': '#',
'(': '[',
')': ']',
'<': '{',
'>': '}',
'-': '~',
'!': '|',
'/': '\\',
"'": '^',
}
buf = []
for char in characters:
buf.append(char)
if len(buf) >= 3:
if buf[0].char == '?' and buf[1].char == '?' and \
buf[2].char in tri_map:
loc = buf.pop(0).loc
buf.pop(0)
char = tri_map[buf.pop(0).char]
yield Char(char, loc)
else:
yield buf.pop(0)
for c in buf:
yield c
def continued_lines_filter(characters):
r""" Glue lines which end with a backslash '\' """
backslash = None
for char in characters:
if backslash:
if char.char in '\r\n':
pass
else:
yield backslash
yield char
backslash = False
else:
if char.char == '\\':
backslash = char
else:
yield char
def lex_text(text, coptions):
""" Lex a piece of text """
lexer = CLexer(coptions)
return list(lexer.lex_text(text))
class CLexer(HandLexerBase):
""" Lexer used for the preprocessor """
logger = logging.getLogger('clexer')
lower_letters = 'abcdefghijklmnopqrstuvwxyz'
upper_letters = lower_letters.upper()
binary_numbers = '01'
octal_numbers = binary_numbers + '234567'
numbers = octal_numbers + '89'
hex_numbers = numbers + 'abcdefABCDEF'
def __init__(self, coptions):
super().__init__()
self.coptions = coptions
def lex(self, src, source_file):
""" Read a source and generate a series of tokens """
self.logger.debug('Lexing %s', source_file.filename)
characters = create_characters(src, source_file)
if self.coptions['trigraphs']:
characters = trigraph_filter(characters)
characters = continued_lines_filter(characters)
# print('=== lex ')
# print(s)
# print('=== end lex ')
# s = '\n'.join(r)
return self.tokenize(characters)
def lex_text(self, txt):
""" Create tokens from the given text """
f = io.StringIO(txt)
filename = None
source_file = SourceFile(filename)
characters = characters = create_characters(f, source_file)
return self.tokenize(characters)
def tokenize(self, characters):
""" Generate tokens from characters """
space = ''
first = True
token = None
for token in super().tokenize(characters, self.lex_c):
if token.typ == 'BOL':
if first:
# Yield an extra start of line
yield CToken('BOL', '', '', first, token.loc)
first = True
space = ''
elif token.typ == 'WS':
space += token.val
else:
yield CToken(token.typ, token.val, space, first, token.loc)
space = ''
first = False
# Emit last newline:
if first and token:
# Yield an extra start of line
yield CToken('BOL', '', '', first, token.loc)
def lex_c(self):
""" Root parsing function """
r = self.next_char()
if r is None:
pass
elif r.char == 'L':
# Wide char or identifier
if self.accept("'"):
return self.lex_char
else:
return self.lex_identifier
elif r.char in self.lower_letters + self.upper_letters + '_':
return self.lex_identifier
elif r.char in self.numbers:
self.backup_char(r)
return self.lex_number
elif r.char in ' \t':
return self.lex_whitespace
elif r.char in '\n':
self.emit('BOL')
return self.lex_c
elif r.char == '\f':
# Skip form feed ^L chr(0xc) character
self.ignore()
return self.lex_c
elif r.char == '/':
if self.accept('/'):
if self.coptions['std'] == 'c89':
self.error('C++ style comments are not allowed in C90')
return self.lex_linecomment
elif self.accept('*'):
return self.lex_blockcomment
elif self.accept('='):
self.emit('/=')
return self.lex_c
else:
self.emit('/')
return self.lex_c
elif r.char == '"':
return self.lex_string
elif r.char == "'":
return self.lex_char
elif r.char == '<':
if self.accept('='):
self.emit('<=')
elif self.accept('<'):
if self.accept('='):
self.emit('<<')
else:
self.emit('<<')
else:
self.emit('<')
return self.lex_c
elif r.char == '>':
if self.accept('='):
self.emit('>=')
elif self.accept('>'):
if self.accept('='):
self.emit('>>=')
else:
self.emit('>>')
else:
self.emit('>')
return self.lex_c
elif r.char == '=':
if self.accept('='):
self.emit('==')
else:
self.emit('=')
return self.lex_c
elif r.char == '!':
if self.accept('='):
self.emit('!=')
else:
self.emit('!')
return self.lex_c
elif r.char == '|':
if self.accept('|'):
self.emit('||')
elif self.accept('='):
self.emit('|=')
else:
self.emit('|')
return self.lex_c
elif r.char == '&':
if self.accept('&'):
self.emit('&&')
elif self.accept('='):
self.emit('&=')
else:
self.emit('&')
return self.lex_c
elif r.char == '#':
if self.accept('#'):
self.emit('##')
else:
self.emit('#')
return self.lex_c
elif r.char == '+':
if self.accept('+'):
self.emit('++')
elif self.accept('='):
self.emit('+=')
else:
self.emit('+')
return self.lex_c
elif r.char == '-':
if self.accept('-'):
self.emit('--')
elif self.accept('='):
self.emit('-=')
elif self.accept('>'):
self.emit('->')
else:
self.emit('-')
return self.lex_c
elif r.char == '*':
if self.accept('='):
self.emit('*=')
else:
self.emit('*')
return self.lex_c
elif r.char == '%':
if self.accept('='):
self.emit('%=')
else:
self.emit('%')
return self.lex_c
elif r.char == '^':
if self.accept('='):
self.emit('^=')
else:
self.emit('^')
return self.lex_c
elif r.char == '~':
if self.accept('='):
self.emit('~=')
else:
self.emit('~')
return self.lex_c
elif r.char == '.':
if self.accept_sequence(['.', '.']):
self.emit('...')
elif self.accept(self.numbers):
# We got .[0-9]
return self.lex_float
else:
self.emit('.')
return self.lex_c
elif r.char in ';{}()[],?:':
self.emit(r.char)
return self.lex_c
elif r.char == "\\":
self.emit(r.char)
return self.lex_c
else: # pragma: no cover
raise NotImplementedError(r)
def lex_identifier(self):
id_chars = self.lower_letters + self.upper_letters + self.numbers + '_'
self.accept_run(id_chars)
self.emit('ID')
return self.lex_c
def lex_number(self):
if self.accept('0'):
# Octal, binary or hex!
if self.accept('xX'):
number_chars = self.hex_numbers
elif self.accept('bB'):
number_chars = self.binary_numbers
else:
number_chars = self.octal_numbers
else:
number_chars = self.numbers
# Accept a series of number characters:
self.accept_run(number_chars)
if self.accept('.'):
return self.lex_float()
else:
# Accept some suffixes:
self.accept('LlUu')
self.accept('LlUu')
# TODO: handle suffixes better
self.accept('LlUu')
# self.accept('
self.emit('NUMBER')
return self.lex_c
def lex_float(self):
self.accept_run(self.numbers)
if self.accept('eEpP'):
self.accept('+-')
self.accept_run(self.numbers)
self.emit('NUMBER')
return self.lex_c
def lex_whitespace(self):
self.accept_run(' \t')
self.emit('WS')
return self.lex_c
def lex_linecomment(self):
c = self.next_char()
while c and c.char != '\n':
c = self.next_char()
self.backup_char(c)
self.ignore()
return self.lex_c
def lex_blockcomment(self):
while True:
if self.accept('*'):
if self.accept('/'):
self.ignore()
# self.emit('WS')
break
else:
self.next_char(eof=False)
return self.lex_c
def lex_string(self):
""" Scan for a complete string """
c = self.next_char(eof=False)
while c.char != '"':
if c.char == '\\':
self._handle_escape_character()
c = self.next_char(eof=False)
self.emit('STRING')
return self.lex_c
def lex_char(self):
""" Scan for a complete character constant """
if self.accept("\\"):
self._handle_escape_character()
else:
# Normal char:
self.next_char(eof=False)
self.expect("'")
self.emit('CHAR')
return self.lex_c
def _handle_escape_character(self):
# Escape char!
if self.accept("'\"?\\abfnrtv"):
pass
elif self.accept(self.octal_numbers):
self.accept(self.octal_numbers)
self.accept(self.octal_numbers)
elif self.accept('x'):
self.accept(self.hex_numbers)
self.accept(self.hex_numbers)
elif self.accept('u'):
self.accept(self.hex_numbers)
self.accept(self.hex_numbers)
self.accept(self.hex_numbers)
self.accept(self.hex_numbers)
elif self.accept('U'):
self.accept(self.hex_numbers)
self.accept(self.hex_numbers)
self.accept(self.hex_numbers)
self.accept(self.hex_numbers)
else:
self.error('Unexpected escape character') | ppci/lang/c/lexer.py |
import logging
import io
from ..common import SourceLocation
from .token import CToken
from ..tools.handlexer import HandLexerBase, Char
class SourceFile:
""" Presents the current file. """
def __init__(self, name):
self.filename = name
self.row = 1
def __repr__(self):
return '<SourceFile at {}:{}>'.format(self.filename, self.row)
def create_characters(f, source_file):
""" Create a sequence of characters """
for row, line in enumerate(f, 1):
line = line.expandtabs()
for col, char in enumerate(line, 1):
loc = SourceLocation(
source_file.filename, source_file.row, col, 1)
yield Char(char, loc)
source_file.row += 1
def trigraph_filter(characters):
""" Replace trigraphs in a character sequence """
tri_map = {
'=': '#',
'(': '[',
')': ']',
'<': '{',
'>': '}',
'-': '~',
'!': '|',
'/': '\\',
"'": '^',
}
buf = []
for char in characters:
buf.append(char)
if len(buf) >= 3:
if buf[0].char == '?' and buf[1].char == '?' and \
buf[2].char in tri_map:
loc = buf.pop(0).loc
buf.pop(0)
char = tri_map[buf.pop(0).char]
yield Char(char, loc)
else:
yield buf.pop(0)
for c in buf:
yield c
def continued_lines_filter(characters):
r""" Glue lines which end with a backslash '\' """
backslash = None
for char in characters:
if backslash:
if char.char in '\r\n':
pass
else:
yield backslash
yield char
backslash = False
else:
if char.char == '\\':
backslash = char
else:
yield char
def lex_text(text, coptions):
""" Lex a piece of text """
lexer = CLexer(coptions)
return list(lexer.lex_text(text))
class CLexer(HandLexerBase):
""" Lexer used for the preprocessor """
logger = logging.getLogger('clexer')
lower_letters = 'abcdefghijklmnopqrstuvwxyz'
upper_letters = lower_letters.upper()
binary_numbers = '01'
octal_numbers = binary_numbers + '234567'
numbers = octal_numbers + '89'
hex_numbers = numbers + 'abcdefABCDEF'
def __init__(self, coptions):
super().__init__()
self.coptions = coptions
def lex(self, src, source_file):
""" Read a source and generate a series of tokens """
self.logger.debug('Lexing %s', source_file.filename)
characters = create_characters(src, source_file)
if self.coptions['trigraphs']:
characters = trigraph_filter(characters)
characters = continued_lines_filter(characters)
# print('=== lex ')
# print(s)
# print('=== end lex ')
# s = '\n'.join(r)
return self.tokenize(characters)
def lex_text(self, txt):
""" Create tokens from the given text """
f = io.StringIO(txt)
filename = None
source_file = SourceFile(filename)
characters = characters = create_characters(f, source_file)
return self.tokenize(characters)
def tokenize(self, characters):
""" Generate tokens from characters """
space = ''
first = True
token = None
for token in super().tokenize(characters, self.lex_c):
if token.typ == 'BOL':
if first:
# Yield an extra start of line
yield CToken('BOL', '', '', first, token.loc)
first = True
space = ''
elif token.typ == 'WS':
space += token.val
else:
yield CToken(token.typ, token.val, space, first, token.loc)
space = ''
first = False
# Emit last newline:
if first and token:
# Yield an extra start of line
yield CToken('BOL', '', '', first, token.loc)
def lex_c(self):
""" Root parsing function """
r = self.next_char()
if r is None:
pass
elif r.char == 'L':
# Wide char or identifier
if self.accept("'"):
return self.lex_char
else:
return self.lex_identifier
elif r.char in self.lower_letters + self.upper_letters + '_':
return self.lex_identifier
elif r.char in self.numbers:
self.backup_char(r)
return self.lex_number
elif r.char in ' \t':
return self.lex_whitespace
elif r.char in '\n':
self.emit('BOL')
return self.lex_c
elif r.char == '\f':
# Skip form feed ^L chr(0xc) character
self.ignore()
return self.lex_c
elif r.char == '/':
if self.accept('/'):
if self.coptions['std'] == 'c89':
self.error('C++ style comments are not allowed in C90')
return self.lex_linecomment
elif self.accept('*'):
return self.lex_blockcomment
elif self.accept('='):
self.emit('/=')
return self.lex_c
else:
self.emit('/')
return self.lex_c
elif r.char == '"':
return self.lex_string
elif r.char == "'":
return self.lex_char
elif r.char == '<':
if self.accept('='):
self.emit('<=')
elif self.accept('<'):
if self.accept('='):
self.emit('<<')
else:
self.emit('<<')
else:
self.emit('<')
return self.lex_c
elif r.char == '>':
if self.accept('='):
self.emit('>=')
elif self.accept('>'):
if self.accept('='):
self.emit('>>=')
else:
self.emit('>>')
else:
self.emit('>')
return self.lex_c
elif r.char == '=':
if self.accept('='):
self.emit('==')
else:
self.emit('=')
return self.lex_c
elif r.char == '!':
if self.accept('='):
self.emit('!=')
else:
self.emit('!')
return self.lex_c
elif r.char == '|':
if self.accept('|'):
self.emit('||')
elif self.accept('='):
self.emit('|=')
else:
self.emit('|')
return self.lex_c
elif r.char == '&':
if self.accept('&'):
self.emit('&&')
elif self.accept('='):
self.emit('&=')
else:
self.emit('&')
return self.lex_c
elif r.char == '#':
if self.accept('#'):
self.emit('##')
else:
self.emit('#')
return self.lex_c
elif r.char == '+':
if self.accept('+'):
self.emit('++')
elif self.accept('='):
self.emit('+=')
else:
self.emit('+')
return self.lex_c
elif r.char == '-':
if self.accept('-'):
self.emit('--')
elif self.accept('='):
self.emit('-=')
elif self.accept('>'):
self.emit('->')
else:
self.emit('-')
return self.lex_c
elif r.char == '*':
if self.accept('='):
self.emit('*=')
else:
self.emit('*')
return self.lex_c
elif r.char == '%':
if self.accept('='):
self.emit('%=')
else:
self.emit('%')
return self.lex_c
elif r.char == '^':
if self.accept('='):
self.emit('^=')
else:
self.emit('^')
return self.lex_c
elif r.char == '~':
if self.accept('='):
self.emit('~=')
else:
self.emit('~')
return self.lex_c
elif r.char == '.':
if self.accept_sequence(['.', '.']):
self.emit('...')
elif self.accept(self.numbers):
# We got .[0-9]
return self.lex_float
else:
self.emit('.')
return self.lex_c
elif r.char in ';{}()[],?:':
self.emit(r.char)
return self.lex_c
elif r.char == "\\":
self.emit(r.char)
return self.lex_c
else: # pragma: no cover
raise NotImplementedError(r)
def lex_identifier(self):
id_chars = self.lower_letters + self.upper_letters + self.numbers + '_'
self.accept_run(id_chars)
self.emit('ID')
return self.lex_c
def lex_number(self):
if self.accept('0'):
# Octal, binary or hex!
if self.accept('xX'):
number_chars = self.hex_numbers
elif self.accept('bB'):
number_chars = self.binary_numbers
else:
number_chars = self.octal_numbers
else:
number_chars = self.numbers
# Accept a series of number characters:
self.accept_run(number_chars)
if self.accept('.'):
return self.lex_float()
else:
# Accept some suffixes:
self.accept('LlUu')
self.accept('LlUu')
# TODO: handle suffixes better
self.accept('LlUu')
# self.accept('
self.emit('NUMBER')
return self.lex_c
def lex_float(self):
self.accept_run(self.numbers)
if self.accept('eEpP'):
self.accept('+-')
self.accept_run(self.numbers)
self.emit('NUMBER')
return self.lex_c
def lex_whitespace(self):
self.accept_run(' \t')
self.emit('WS')
return self.lex_c
def lex_linecomment(self):
c = self.next_char()
while c and c.char != '\n':
c = self.next_char()
self.backup_char(c)
self.ignore()
return self.lex_c
def lex_blockcomment(self):
while True:
if self.accept('*'):
if self.accept('/'):
self.ignore()
# self.emit('WS')
break
else:
self.next_char(eof=False)
return self.lex_c
def lex_string(self):
""" Scan for a complete string """
c = self.next_char(eof=False)
while c.char != '"':
if c.char == '\\':
self._handle_escape_character()
c = self.next_char(eof=False)
self.emit('STRING')
return self.lex_c
def lex_char(self):
""" Scan for a complete character constant """
if self.accept("\\"):
self._handle_escape_character()
else:
# Normal char:
self.next_char(eof=False)
self.expect("'")
self.emit('CHAR')
return self.lex_c
def _handle_escape_character(self):
# Escape char!
if self.accept("'\"?\\abfnrtv"):
pass
elif self.accept(self.octal_numbers):
self.accept(self.octal_numbers)
self.accept(self.octal_numbers)
elif self.accept('x'):
self.accept(self.hex_numbers)
self.accept(self.hex_numbers)
elif self.accept('u'):
self.accept(self.hex_numbers)
self.accept(self.hex_numbers)
self.accept(self.hex_numbers)
self.accept(self.hex_numbers)
elif self.accept('U'):
self.accept(self.hex_numbers)
self.accept(self.hex_numbers)
self.accept(self.hex_numbers)
self.accept(self.hex_numbers)
else:
self.error('Unexpected escape character') | 0.345989 | 0.158142 |
import json
import numpy as np
_json_keys = dict(
data="arraydata",
dtype="arraydtype",
shape="arraysize"
)
_complex_keys = dict(
real="real",
imag="imag"
)
def dumps(data, json_keys=None, complex_keys=None):
global _json_keys, _complex_keys
if json_keys is None:
json_keys = _json_keys
if complex_keys is None:
complex_keys = _complex_keys
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
data = {
json_keys['data']: obj.tolist(),
json_keys['shape']: obj.shape
}
if 'dtype' in json_keys:
data[json_keys['dtype']] = str(obj.dtype)
return data
if isinstance(obj, np.complex):
return {
complex_keys['real']: obj.real,
complex_keys['imag']: obj.imag,
}
return json.JSONEncoder(self, obj)
return json.dumps(data, cls=NumpyEncoder)
def loads(json_string, json_keys=None, complex_keys=None):
global _json_keys, _complex_keys
if json_keys is None:
json_keys = _json_keys
if complex_keys is None:
complex_keys = _complex_keys
def json_numpy_obj_hook(data):
if isinstance(data, dict) and json_keys['data'] in data and json_keys['shape'] in data:
if 'dtype' in json_keys and json_keys['dtype'] in data:
dtype = data[json_keys['dtype']]
else:
dtype = np.float
return np.asarray(data[json_keys['data']], dtype=dtype).reshape(data[json_keys['shape']])
if isinstance(data, dict) and complex_keys['real'] in data and complex_keys['imag'] in data:
return complex(data[complex_keys['real']], data[complex_keys['imag']])
return data
return json.loads(json_string, object_hook=json_numpy_obj_hook) | numpy_json.py | import json
import numpy as np
_json_keys = dict(
data="arraydata",
dtype="arraydtype",
shape="arraysize"
)
_complex_keys = dict(
real="real",
imag="imag"
)
def dumps(data, json_keys=None, complex_keys=None):
global _json_keys, _complex_keys
if json_keys is None:
json_keys = _json_keys
if complex_keys is None:
complex_keys = _complex_keys
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
data = {
json_keys['data']: obj.tolist(),
json_keys['shape']: obj.shape
}
if 'dtype' in json_keys:
data[json_keys['dtype']] = str(obj.dtype)
return data
if isinstance(obj, np.complex):
return {
complex_keys['real']: obj.real,
complex_keys['imag']: obj.imag,
}
return json.JSONEncoder(self, obj)
return json.dumps(data, cls=NumpyEncoder)
def loads(json_string, json_keys=None, complex_keys=None):
global _json_keys, _complex_keys
if json_keys is None:
json_keys = _json_keys
if complex_keys is None:
complex_keys = _complex_keys
def json_numpy_obj_hook(data):
if isinstance(data, dict) and json_keys['data'] in data and json_keys['shape'] in data:
if 'dtype' in json_keys and json_keys['dtype'] in data:
dtype = data[json_keys['dtype']]
else:
dtype = np.float
return np.asarray(data[json_keys['data']], dtype=dtype).reshape(data[json_keys['shape']])
if isinstance(data, dict) and complex_keys['real'] in data and complex_keys['imag'] in data:
return complex(data[complex_keys['real']], data[complex_keys['imag']])
return data
return json.loads(json_string, object_hook=json_numpy_obj_hook) | 0.298389 | 0.192862 |
import django
if django.VERSION < (1, 8):
from django.core.context_processors import csrf
else:
from django.template.context_processors import csrf
from django.shortcuts import redirect, render
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseForbidden
from .. import models as m
from ..penury import set_limit
from .getters import get_delivery
@login_required()
def edit_user_purchases(request, delivery):
"""Let user order for himself, or modified an order on an open delivery."""
delivery = get_delivery(delivery)
user = request.user
if delivery.state != delivery.ORDERING_ALL:
return HttpResponseForbidden("Cette commande n'est pas ouverte.")
if request.method == 'POST':
if _parse_form(request):
return redirect("index")
else:
# TODO: display errors in template
return redirect("edit_user_purchases", delivery=delivery.id)
else:
order = m.Order(user, delivery, with_dummies=True)
some_packaged = any(pc.product.quantity_per_package is not None for pc in order.purchases)
for pc in order.purchases:
pc.described = (pc.product.description or pc.product.image) and pc.max_quantity != 0
vars = {
'user': user,
'delivery': delivery,
'subgroup': delivery.network.subgroup_set.get(users__in=[user]),
'order': order,
'some_packaged': some_packaged,
'out_of_stock_colspan': 6 if some_packaged else 5,
'total_padding_right_colspan': 2 if some_packaged else 1,
'description_colspan': 7 if some_packaged else 6,
}
vars.update(csrf(request))
return render(request,'edit_user_purchases.html', vars)
def _parse_form(request):
"""
Parse responses from user purchases.
:param request:
:return:
"""
d = request.POST
dv = m.Delivery.objects.get(pk=int(d['dv-id']))
od = m.Order(request.user, dv, with_dummies=True)
prev_purchases = {pc.product: pc for pc in od.purchases}
for pd in dv.product_set.all():
try:
ordered = float(d.get("pd%s" % pd.id, "0"))
except ValueError: # Field present, didn't contain a valid float
continue
pc = prev_purchases[pd]
if pc.quantity == ordered: # No change
pass
elif ordered == 0: # Cancel existing purchase
pc.delete()
elif pc.quantity == 0: # Create a non-dummy purchase
pc = m.Purchase.objects.create(user=request.user, product=pd, quantity=ordered)
set_limit(pd, last_pc=pc) # In case of penury
else: # Update existing purchase quantity
pc.quantity = ordered
pc.save()
set_limit(pd, last_pc=pc) # In case of penury
m.JournalEntry.log(request.user, "Modified their purchases for dv-%d %s/%s", dv.id, dv.network.name, dv.name)
return True # true == no error | floreal/views/edit_user_purchases.py |
import django
if django.VERSION < (1, 8):
from django.core.context_processors import csrf
else:
from django.template.context_processors import csrf
from django.shortcuts import redirect, render
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseForbidden
from .. import models as m
from ..penury import set_limit
from .getters import get_delivery
@login_required()
def edit_user_purchases(request, delivery):
"""Let user order for himself, or modified an order on an open delivery."""
delivery = get_delivery(delivery)
user = request.user
if delivery.state != delivery.ORDERING_ALL:
return HttpResponseForbidden("Cette commande n'est pas ouverte.")
if request.method == 'POST':
if _parse_form(request):
return redirect("index")
else:
# TODO: display errors in template
return redirect("edit_user_purchases", delivery=delivery.id)
else:
order = m.Order(user, delivery, with_dummies=True)
some_packaged = any(pc.product.quantity_per_package is not None for pc in order.purchases)
for pc in order.purchases:
pc.described = (pc.product.description or pc.product.image) and pc.max_quantity != 0
vars = {
'user': user,
'delivery': delivery,
'subgroup': delivery.network.subgroup_set.get(users__in=[user]),
'order': order,
'some_packaged': some_packaged,
'out_of_stock_colspan': 6 if some_packaged else 5,
'total_padding_right_colspan': 2 if some_packaged else 1,
'description_colspan': 7 if some_packaged else 6,
}
vars.update(csrf(request))
return render(request,'edit_user_purchases.html', vars)
def _parse_form(request):
"""
Parse responses from user purchases.
:param request:
:return:
"""
d = request.POST
dv = m.Delivery.objects.get(pk=int(d['dv-id']))
od = m.Order(request.user, dv, with_dummies=True)
prev_purchases = {pc.product: pc for pc in od.purchases}
for pd in dv.product_set.all():
try:
ordered = float(d.get("pd%s" % pd.id, "0"))
except ValueError: # Field present, didn't contain a valid float
continue
pc = prev_purchases[pd]
if pc.quantity == ordered: # No change
pass
elif ordered == 0: # Cancel existing purchase
pc.delete()
elif pc.quantity == 0: # Create a non-dummy purchase
pc = m.Purchase.objects.create(user=request.user, product=pd, quantity=ordered)
set_limit(pd, last_pc=pc) # In case of penury
else: # Update existing purchase quantity
pc.quantity = ordered
pc.save()
set_limit(pd, last_pc=pc) # In case of penury
m.JournalEntry.log(request.user, "Modified their purchases for dv-%d %s/%s", dv.id, dv.network.name, dv.name)
return True # true == no error | 0.267313 | 0.088939 |
from pathlib import Path
import sqlite3
import pandas as pd
from tqdm import tqdm
from sys import stderr
from imageio import imread, imwrite
import numpy as np
from skimage import transform as tf
from matplotlib import pyplot as plt
from transform_utils import scale_pixel_box_coordinates, crop_image
from compare_one_sr_alpha_mask import get_emoji_rgb_bg, alpha_composite_bg, plot_comparison
SAVING_PLOT = False
JUPYTER = True
osx_dir = Path("osx/catalina/").absolute()
source_dir = osx_dir / "png"
preproc_dir = osx_dir / "bg/"
png_dir = Path("enlarged/").absolute()
out_dir = Path("transparent/").absolute()
png = png_dir / "glyph-u1F343.png"
osx_bw_db = osx_dir / "emoji_bw_calc.db"
NO_OVERWRITE = False
source_png = source_dir / png.name
preproc_png = preproc_dir / png.name
output_png = out_dir / png.name
if output_png.exists() and NO_OVERWRITE:
raise ValueError("Cannot overwrite")
elif not source_png.exists():
raise NameError(f"Expected '{source_png}' corresponding to input '{png.name}'")
# Store (x,y) coordinates of the box of interest
box_top_l = (0,104)
box_bot_r = (56,160)
box = [box_top_l, box_bot_r]
# Remove the mask and show the result
img = imread(png)
source_img = imread(source_png)
preproc_img = imread(preproc_png)
scale = img.shape[0] / source_img.shape[0]
scaled_box = scale_pixel_box_coordinates(box, scale)
source_img_sub = crop_image(source_img, box)
preproc_img_sub = crop_image(preproc_img, box)
source_img_sub_alpha = source_img_sub[:,:,3]
img_sub = crop_image(img, scaled_box)
scaled_preproc_img_sub = tf.resize(
preproc_img_sub[:,:,:3], img_sub.shape, order=0
)
scaled_source_img_sub = tf.resize(
source_img_sub[:,:,:3], img_sub.shape, order=0
)
scaled_source_img_sub_alpha = tf.resize(
source_img_sub_alpha, img_sub[:,:,0].shape, order=0
)
scaled_preproc_img_sub *= (1/scaled_preproc_img_sub.max()) * 255
scaled_preproc_img_sub = scaled_preproc_img_sub.astype(int)#img.dtype)
scaled_source_img_sub *= (1/scaled_source_img_sub.max()) * 255
scaled_source_img_sub = scaled_source_img_sub.astype(int)#img.dtype)
scaled_source_img_sub_alpha *= (1/scaled_source_img_sub_alpha.max()) * 255
scaled_source_img_sub_alpha = scaled_source_img_sub_alpha.astype(int)#img.dtype)
scaled_source_img_sub_im = scaled_source_img_sub.copy() # Retain 3 channel copy
scaled_source_img_sub = np.insert(scaled_source_img_sub, 3, scaled_source_img_sub_alpha, axis=2)
composited_grad = img_sub.astype(int) - scaled_preproc_img_sub
# Rescale from [-255,+255] to [0,1] by incrementing +255 then squashing by half
composited_grad = ((composited_grad + 255) / (255*2))
composited_grad *= scaled_source_img_sub_alpha[:,:,None]
composited_grad /= 255
# Rescale all opaque regions to 1 (and clip any above 1 now)
previous_max_alpha = scaled_source_img_sub_alpha == 255
min_of_previous_max_alpha = composited_grad[previous_max_alpha].min()
composited_grad *= (0.5/min_of_previous_max_alpha)
#composited_grad[scaled_source_img_sub_alpha == 255] = 0.5
#composited_grad /= composited_grad.max()
#breakpoint()
decomp_alpha = (scaled_source_img_sub_alpha / 255) + composited_grad.max(axis=2)
# Now rearrange to acquire the estimatable part (the "estimand")
i_in = (scaled_source_img_sub_alpha[:,:,None]/255) * (scaled_source_img_sub_im/255) # alpha_source * im_source
#breakpoint()
# If squashing composited_grad to [0,1] then don't need to divide it by 255 here
#estimand = (composited_grad/255) - (scaled_source_img_sub_alpha[:,:,None] * i_in)
estimand = composited_grad - (scaled_source_img_sub_alpha[:,:,None] * i_in)
#fig1, f1_axes = plot_comparison(
# scaled_source_img_sub_alpha,
# scaled_source_img_sub,
# img_sub,
# composited_grad,
# decomp_alpha,
# SAVING_PLOT
#)
#fig1.show()
decomposited = np.insert(img_sub, 3, decomp_alpha * 255, axis=2)
all_black = np.zeros_like(scaled_source_img_sub, dtype=img.dtype)
all_black[:,:,3] = 255
fig2, f2_axes = plot_comparison(
scaled_source_img_sub_alpha,
scaled_source_img_sub,
img_sub,
composited_grad,
all_black,
SAVING_PLOT
)
f2_axes[-1].imshow(decomposited)
#fig2.show()
recomposited = alpha_composite_bg(decomposited, 0)
fig3, f3_axes = plot_comparison(
scaled_source_img_sub_alpha,
scaled_source_img_sub,
img_sub,
composited_grad,
recomposited,
SAVING_PLOT
)
f3_axes[-1].imshow(recomposited)
fig3.show() | recover_leaf_sr_transparency.py | from pathlib import Path
import sqlite3
import pandas as pd
from tqdm import tqdm
from sys import stderr
from imageio import imread, imwrite
import numpy as np
from skimage import transform as tf
from matplotlib import pyplot as plt
from transform_utils import scale_pixel_box_coordinates, crop_image
from compare_one_sr_alpha_mask import get_emoji_rgb_bg, alpha_composite_bg, plot_comparison
SAVING_PLOT = False
JUPYTER = True
osx_dir = Path("osx/catalina/").absolute()
source_dir = osx_dir / "png"
preproc_dir = osx_dir / "bg/"
png_dir = Path("enlarged/").absolute()
out_dir = Path("transparent/").absolute()
png = png_dir / "glyph-u1F343.png"
osx_bw_db = osx_dir / "emoji_bw_calc.db"
NO_OVERWRITE = False
source_png = source_dir / png.name
preproc_png = preproc_dir / png.name
output_png = out_dir / png.name
if output_png.exists() and NO_OVERWRITE:
raise ValueError("Cannot overwrite")
elif not source_png.exists():
raise NameError(f"Expected '{source_png}' corresponding to input '{png.name}'")
# Store (x,y) coordinates of the box of interest
box_top_l = (0,104)
box_bot_r = (56,160)
box = [box_top_l, box_bot_r]
# Remove the mask and show the result
img = imread(png)
source_img = imread(source_png)
preproc_img = imread(preproc_png)
scale = img.shape[0] / source_img.shape[0]
scaled_box = scale_pixel_box_coordinates(box, scale)
source_img_sub = crop_image(source_img, box)
preproc_img_sub = crop_image(preproc_img, box)
source_img_sub_alpha = source_img_sub[:,:,3]
img_sub = crop_image(img, scaled_box)
scaled_preproc_img_sub = tf.resize(
preproc_img_sub[:,:,:3], img_sub.shape, order=0
)
scaled_source_img_sub = tf.resize(
source_img_sub[:,:,:3], img_sub.shape, order=0
)
scaled_source_img_sub_alpha = tf.resize(
source_img_sub_alpha, img_sub[:,:,0].shape, order=0
)
scaled_preproc_img_sub *= (1/scaled_preproc_img_sub.max()) * 255
scaled_preproc_img_sub = scaled_preproc_img_sub.astype(int)#img.dtype)
scaled_source_img_sub *= (1/scaled_source_img_sub.max()) * 255
scaled_source_img_sub = scaled_source_img_sub.astype(int)#img.dtype)
scaled_source_img_sub_alpha *= (1/scaled_source_img_sub_alpha.max()) * 255
scaled_source_img_sub_alpha = scaled_source_img_sub_alpha.astype(int)#img.dtype)
scaled_source_img_sub_im = scaled_source_img_sub.copy() # Retain 3 channel copy
scaled_source_img_sub = np.insert(scaled_source_img_sub, 3, scaled_source_img_sub_alpha, axis=2)
composited_grad = img_sub.astype(int) - scaled_preproc_img_sub
# Rescale from [-255,+255] to [0,1] by incrementing +255 then squashing by half
composited_grad = ((composited_grad + 255) / (255*2))
composited_grad *= scaled_source_img_sub_alpha[:,:,None]
composited_grad /= 255
# Rescale all opaque regions to 1 (and clip any above 1 now)
previous_max_alpha = scaled_source_img_sub_alpha == 255
min_of_previous_max_alpha = composited_grad[previous_max_alpha].min()
composited_grad *= (0.5/min_of_previous_max_alpha)
#composited_grad[scaled_source_img_sub_alpha == 255] = 0.5
#composited_grad /= composited_grad.max()
#breakpoint()
decomp_alpha = (scaled_source_img_sub_alpha / 255) + composited_grad.max(axis=2)
# Now rearrange to acquire the estimatable part (the "estimand")
i_in = (scaled_source_img_sub_alpha[:,:,None]/255) * (scaled_source_img_sub_im/255) # alpha_source * im_source
#breakpoint()
# If squashing composited_grad to [0,1] then don't need to divide it by 255 here
#estimand = (composited_grad/255) - (scaled_source_img_sub_alpha[:,:,None] * i_in)
estimand = composited_grad - (scaled_source_img_sub_alpha[:,:,None] * i_in)
#fig1, f1_axes = plot_comparison(
# scaled_source_img_sub_alpha,
# scaled_source_img_sub,
# img_sub,
# composited_grad,
# decomp_alpha,
# SAVING_PLOT
#)
#fig1.show()
decomposited = np.insert(img_sub, 3, decomp_alpha * 255, axis=2)
all_black = np.zeros_like(scaled_source_img_sub, dtype=img.dtype)
all_black[:,:,3] = 255
fig2, f2_axes = plot_comparison(
scaled_source_img_sub_alpha,
scaled_source_img_sub,
img_sub,
composited_grad,
all_black,
SAVING_PLOT
)
f2_axes[-1].imshow(decomposited)
#fig2.show()
recomposited = alpha_composite_bg(decomposited, 0)
fig3, f3_axes = plot_comparison(
scaled_source_img_sub_alpha,
scaled_source_img_sub,
img_sub,
composited_grad,
recomposited,
SAVING_PLOT
)
f3_axes[-1].imshow(recomposited)
fig3.show() | 0.488039 | 0.187486 |
import numpy as np
import torch
from typing import Callable, Dict, Optional, Tuple
from alibi_detect.cd.base import BaseLSDDDrift
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.utils.pytorch.distance import permed_lsdds
class LSDDDriftTorch(BaseLSDDDrift):
def __init__(
self,
x_ref: np.ndarray,
p_val: float = .05,
preprocess_x_ref: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
sigma: Optional[np.ndarray] = None,
n_permutations: int = 100,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Least-squares density difference (LSDD) data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the permutation test.
preprocess_x_ref
Whether to already preprocess and store the reference data.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
sigma
Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
between reference samples.
n_permutations
Number of permutations used in the permutation test.
n_kernel_centers
The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
Defaults to 1/20th of the reference data.
lambda_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
preprocess_x_ref=preprocess_x_ref,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
sigma=sigma,
n_permutations=n_permutations,
n_kernel_centers=n_kernel_centers,
lambda_rd_max=lambda_rd_max,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': 'pytorch'})
# set backend
if device is None or device.lower() in ['gpu', 'cuda']:
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if self.device.type == 'cpu':
print('No GPU detected, fall back on CPU.')
else:
self.device = torch.device('cpu')
if self.preprocess_x_ref or self.preprocess_fn is None:
x_ref = torch.as_tensor(self.x_ref).to(self.device)
self._configure_normalization(x_ref)
x_ref = self._normalize(x_ref)
self._initialize_kernel(x_ref)
self._configure_kernel_centers(x_ref)
self.x_ref = x_ref.cpu().numpy()
# For stability in high dimensions we don't divide H by (pi*sigma^2)^(d/2)
# Results in an alternative test-stat of LSDD*(pi*sigma^2)^(d/2). Same p-vals etc.
self.H = GaussianRBF(np.sqrt(2.)*self.kernel.sigma)(self.kernel_centers, self.kernel_centers)
def _initialize_kernel(self, x_ref: torch.Tensor):
if self.sigma is None:
self.kernel = GaussianRBF()
_ = self.kernel(x_ref, x_ref, infer_sigma=True)
else:
sigma = torch.from_numpy(self.sigma)
self.kernel = GaussianRBF(sigma)
def _configure_normalization(self, x_ref: torch.Tensor, eps: float = 1e-12):
x_ref_means = x_ref.mean(0)
x_ref_stds = x_ref.std(0)
self._normalize = lambda x: (torch.as_tensor(x) - x_ref_means)/(x_ref_stds + eps)
def _configure_kernel_centers(self, x_ref: torch.Tensor):
"Set aside reference samples to act as kernel centers"
perm = torch.randperm(self.x_ref.shape[0])
c_inds, non_c_inds = perm[:self.n_kernel_centers], perm[self.n_kernel_centers:]
self.kernel_centers = x_ref[c_inds]
if np.unique(self.kernel_centers.cpu().numpy(), axis=0).shape[0] < self.n_kernel_centers:
perturbation = (torch.randn(self.kernel_centers.shape)*1e-6).to(self.device)
self.kernel_centers = self.kernel_centers + perturbation
x_ref_eff = x_ref[non_c_inds] # the effective reference set
self.k_xc = self.kernel(x_ref_eff, self.kernel_centers)
def score(self, x: np.ndarray) -> Tuple[float, float, np.ndarray]:
"""
Compute the p-value resulting from a permutation test using the least-squares density
difference as a distance measure between the reference data and the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the LSDD between the reference and test set
and the LSDD values from the permutation test.
"""
x_ref, x = self.preprocess(x)
x_ref = torch.from_numpy(x_ref).to(self.device)
x = torch.from_numpy(x).to(self.device)
if self.preprocess_fn is not None and self.preprocess_x_ref is False:
self._configure_normalization(x_ref)
x_ref = self._normalize(x_ref)
self._initialize_kernel(x_ref)
self._configure_kernel_centers(x_ref)
self.H = GaussianRBF(np.sqrt(2.)*self.kernel.sigma)(self.kernel_centers, self.kernel_centers)
x = self._normalize(x)
k_yc = self.kernel(x, self.kernel_centers)
k_all_c = torch.cat([self.k_xc, k_yc], 0)
n_x = x_ref.shape[0] - self.n_kernel_centers
n_all = k_all_c.shape[0]
perms = [torch.randperm(n_all) for _ in range(self.n_permutations)]
x_perms = [perm[:n_x] for perm in perms]
y_perms = [perm[n_x:] for perm in perms]
lsdd_permuted, _, lsdd = permed_lsdds( # type: ignore
k_all_c, x_perms, y_perms, self.H, lam_rd_max=self.lambda_rd_max, return_unpermed=True
)
p_val = (lsdd <= lsdd_permuted).float().mean()
return float(p_val.cpu()), float(lsdd.cpu().numpy()), lsdd_permuted.cpu().numpy() | alibi_detect/cd/pytorch/lsdd.py | import numpy as np
import torch
from typing import Callable, Dict, Optional, Tuple
from alibi_detect.cd.base import BaseLSDDDrift
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.utils.pytorch.distance import permed_lsdds
class LSDDDriftTorch(BaseLSDDDrift):
def __init__(
self,
x_ref: np.ndarray,
p_val: float = .05,
preprocess_x_ref: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
sigma: Optional[np.ndarray] = None,
n_permutations: int = 100,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Least-squares density difference (LSDD) data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the permutation test.
preprocess_x_ref
Whether to already preprocess and store the reference data.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
sigma
Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
between reference samples.
n_permutations
Number of permutations used in the permutation test.
n_kernel_centers
The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
Defaults to 1/20th of the reference data.
lambda_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
preprocess_x_ref=preprocess_x_ref,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
sigma=sigma,
n_permutations=n_permutations,
n_kernel_centers=n_kernel_centers,
lambda_rd_max=lambda_rd_max,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': 'pytorch'})
# set backend
if device is None or device.lower() in ['gpu', 'cuda']:
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if self.device.type == 'cpu':
print('No GPU detected, fall back on CPU.')
else:
self.device = torch.device('cpu')
if self.preprocess_x_ref or self.preprocess_fn is None:
x_ref = torch.as_tensor(self.x_ref).to(self.device)
self._configure_normalization(x_ref)
x_ref = self._normalize(x_ref)
self._initialize_kernel(x_ref)
self._configure_kernel_centers(x_ref)
self.x_ref = x_ref.cpu().numpy()
# For stability in high dimensions we don't divide H by (pi*sigma^2)^(d/2)
# Results in an alternative test-stat of LSDD*(pi*sigma^2)^(d/2). Same p-vals etc.
self.H = GaussianRBF(np.sqrt(2.)*self.kernel.sigma)(self.kernel_centers, self.kernel_centers)
def _initialize_kernel(self, x_ref: torch.Tensor):
if self.sigma is None:
self.kernel = GaussianRBF()
_ = self.kernel(x_ref, x_ref, infer_sigma=True)
else:
sigma = torch.from_numpy(self.sigma)
self.kernel = GaussianRBF(sigma)
def _configure_normalization(self, x_ref: torch.Tensor, eps: float = 1e-12):
x_ref_means = x_ref.mean(0)
x_ref_stds = x_ref.std(0)
self._normalize = lambda x: (torch.as_tensor(x) - x_ref_means)/(x_ref_stds + eps)
def _configure_kernel_centers(self, x_ref: torch.Tensor):
"Set aside reference samples to act as kernel centers"
perm = torch.randperm(self.x_ref.shape[0])
c_inds, non_c_inds = perm[:self.n_kernel_centers], perm[self.n_kernel_centers:]
self.kernel_centers = x_ref[c_inds]
if np.unique(self.kernel_centers.cpu().numpy(), axis=0).shape[0] < self.n_kernel_centers:
perturbation = (torch.randn(self.kernel_centers.shape)*1e-6).to(self.device)
self.kernel_centers = self.kernel_centers + perturbation
x_ref_eff = x_ref[non_c_inds] # the effective reference set
self.k_xc = self.kernel(x_ref_eff, self.kernel_centers)
def score(self, x: np.ndarray) -> Tuple[float, float, np.ndarray]:
"""
Compute the p-value resulting from a permutation test using the least-squares density
difference as a distance measure between the reference data and the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the LSDD between the reference and test set
and the LSDD values from the permutation test.
"""
x_ref, x = self.preprocess(x)
x_ref = torch.from_numpy(x_ref).to(self.device)
x = torch.from_numpy(x).to(self.device)
if self.preprocess_fn is not None and self.preprocess_x_ref is False:
self._configure_normalization(x_ref)
x_ref = self._normalize(x_ref)
self._initialize_kernel(x_ref)
self._configure_kernel_centers(x_ref)
self.H = GaussianRBF(np.sqrt(2.)*self.kernel.sigma)(self.kernel_centers, self.kernel_centers)
x = self._normalize(x)
k_yc = self.kernel(x, self.kernel_centers)
k_all_c = torch.cat([self.k_xc, k_yc], 0)
n_x = x_ref.shape[0] - self.n_kernel_centers
n_all = k_all_c.shape[0]
perms = [torch.randperm(n_all) for _ in range(self.n_permutations)]
x_perms = [perm[:n_x] for perm in perms]
y_perms = [perm[n_x:] for perm in perms]
lsdd_permuted, _, lsdd = permed_lsdds( # type: ignore
k_all_c, x_perms, y_perms, self.H, lam_rd_max=self.lambda_rd_max, return_unpermed=True
)
p_val = (lsdd <= lsdd_permuted).float().mean()
return float(p_val.cpu()), float(lsdd.cpu().numpy()), lsdd_permuted.cpu().numpy() | 0.946634 | 0.625753 |
from contextlib import redirect_stdout, redirect_stderr
from datetime import datetime
from pathlib import Path
import os
import sys
import tempfile
import time
from typing import Mapping, List, Optional
from .fileutils import PathLike, tee_stdout, tee_stderr
from .parameters import Parameters
from .plugin import TemplatePlugin
from .results import Results
class ResultsPyARC(Results):
"""PyARC simulation results
Parameters
----------
params
Parameters used to generate inputs
name
Name of workflow producing results
time
Time at which workflow was run
inputs
List of input files
outputs
List of output files
results_data
PyARC results
"""
def __init__(self, params: Parameters, name: str, time: datetime,
inputs: List[Path], outputs: List[Path], results_data: dict):
super().__init__('PyARC', params, name, time, inputs, outputs)
self.results_data = results_data
@property
def stdout(self) -> str:
return (self.base_path / "PyARC_log.txt").read_text()
class PluginPyARC(TemplatePlugin):
"""Plugin for running PyARC
Parameters
----------
template_file
Templated PyARC input
show_stdout
Whether to display output from stdout when PyARC is run
show_stderr
Whether to display output from stderr when PyARC is run
extra_inputs
List of extra (non-templated) input files that are needed
extra_template_inputs
Extra templated input files
Attributes
----------
pyarc_exec
Path to PyARC executable
"""
def __init__(self, template_file: str, show_stdout: bool = False,
show_stderr: bool = False,
extra_inputs: Optional[List[str]] = None,
extra_template_inputs: Optional[List[PathLike]] = None):
super().__init__(template_file, extra_inputs, extra_template_inputs)
self._pyarc_exec = Path(os.environ.get('PyARC_DIR', 'PyARC.py'))
self.pyarc_inp_name = "pyarc_input.son"
self.show_stdout = show_stdout
self.show_stderr = show_stderr
@property
def pyarc_exec(self) -> Path:
return self._pyarc_exec
@pyarc_exec.setter
def pyarc_exec(self, exe: PathLike):
if os.path.exists(exe) is False:
raise RuntimeError(f"PyARC executable '{exe}' is missing.")
self._pyarc_exec = Path(exe)
def prerun(self, params: Parameters) -> None:
"""Generate PyARC input files
Parameters
----------
params
Parameters used by the PyARC template
"""
# Render the template
# Make a copy of params and convert units if necessary
# The original params remains unchanged
params_copy = params.convert_units()
print("Pre-run for PyARC Plugin")
self._run_time = time.time_ns()
super().prerun(params_copy, filename=self.pyarc_inp_name)
def run(self, **kwargs: Mapping):
"""Run PyARC
Parameters
----------
**kwargs
Keyword arguments passed on to :func:`pyarc.execute`
"""
print("Run for PyARC Plugin")
sys.path.insert(0, f'{self._pyarc_exec}')
import PyARC
self.pyarc = PyARC.PyARC()
self.pyarc.user_object.do_run = True
self.pyarc.user_object.do_postrun = True
od = Path.cwd()
with open('PyARC_log.txt', 'w') as f:
func_stdout = tee_stdout if self.show_stdout else redirect_stdout
func_stderr = tee_stderr if self.show_stderr else redirect_stderr
with func_stdout(f), func_stderr(f):
with tempfile.TemporaryDirectory() as tmpdir:
self.pyarc.execute(["-i", self.pyarc_inp_name, "-w", tmpdir, "-o", str(od)], **kwargs)
sys.path.pop(0) # Restore sys.path to original state
os.chdir(od) # TODO: I don't know why but I keep going to self._pyarc_exec after execution - this is very wierd!
def postrun(self, params: Parameters, name: str) -> ResultsPyARC:
"""Collect information from PyARC and create results object
Parameters
----------
params
Parameters used to create PyARC model
name
Name of the workflow
Returns
-------
PyARC results object
"""
print("Post-run for PyARC Plugin")
time, inputs, outputs = self._get_result_input(self.pyarc_inp_name)
return ResultsPyARC(params, name, time, inputs, outputs, self.pyarc.user_object.results) | src/watts/plugin_pyarc.py |
from contextlib import redirect_stdout, redirect_stderr
from datetime import datetime
from pathlib import Path
import os
import sys
import tempfile
import time
from typing import Mapping, List, Optional
from .fileutils import PathLike, tee_stdout, tee_stderr
from .parameters import Parameters
from .plugin import TemplatePlugin
from .results import Results
class ResultsPyARC(Results):
"""PyARC simulation results
Parameters
----------
params
Parameters used to generate inputs
name
Name of workflow producing results
time
Time at which workflow was run
inputs
List of input files
outputs
List of output files
results_data
PyARC results
"""
def __init__(self, params: Parameters, name: str, time: datetime,
inputs: List[Path], outputs: List[Path], results_data: dict):
super().__init__('PyARC', params, name, time, inputs, outputs)
self.results_data = results_data
@property
def stdout(self) -> str:
return (self.base_path / "PyARC_log.txt").read_text()
class PluginPyARC(TemplatePlugin):
"""Plugin for running PyARC
Parameters
----------
template_file
Templated PyARC input
show_stdout
Whether to display output from stdout when PyARC is run
show_stderr
Whether to display output from stderr when PyARC is run
extra_inputs
List of extra (non-templated) input files that are needed
extra_template_inputs
Extra templated input files
Attributes
----------
pyarc_exec
Path to PyARC executable
"""
def __init__(self, template_file: str, show_stdout: bool = False,
show_stderr: bool = False,
extra_inputs: Optional[List[str]] = None,
extra_template_inputs: Optional[List[PathLike]] = None):
super().__init__(template_file, extra_inputs, extra_template_inputs)
self._pyarc_exec = Path(os.environ.get('PyARC_DIR', 'PyARC.py'))
self.pyarc_inp_name = "pyarc_input.son"
self.show_stdout = show_stdout
self.show_stderr = show_stderr
@property
def pyarc_exec(self) -> Path:
return self._pyarc_exec
@pyarc_exec.setter
def pyarc_exec(self, exe: PathLike):
if os.path.exists(exe) is False:
raise RuntimeError(f"PyARC executable '{exe}' is missing.")
self._pyarc_exec = Path(exe)
def prerun(self, params: Parameters) -> None:
"""Generate PyARC input files
Parameters
----------
params
Parameters used by the PyARC template
"""
# Render the template
# Make a copy of params and convert units if necessary
# The original params remains unchanged
params_copy = params.convert_units()
print("Pre-run for PyARC Plugin")
self._run_time = time.time_ns()
super().prerun(params_copy, filename=self.pyarc_inp_name)
def run(self, **kwargs: Mapping):
"""Run PyARC
Parameters
----------
**kwargs
Keyword arguments passed on to :func:`pyarc.execute`
"""
print("Run for PyARC Plugin")
sys.path.insert(0, f'{self._pyarc_exec}')
import PyARC
self.pyarc = PyARC.PyARC()
self.pyarc.user_object.do_run = True
self.pyarc.user_object.do_postrun = True
od = Path.cwd()
with open('PyARC_log.txt', 'w') as f:
func_stdout = tee_stdout if self.show_stdout else redirect_stdout
func_stderr = tee_stderr if self.show_stderr else redirect_stderr
with func_stdout(f), func_stderr(f):
with tempfile.TemporaryDirectory() as tmpdir:
self.pyarc.execute(["-i", self.pyarc_inp_name, "-w", tmpdir, "-o", str(od)], **kwargs)
sys.path.pop(0) # Restore sys.path to original state
os.chdir(od) # TODO: I don't know why but I keep going to self._pyarc_exec after execution - this is very wierd!
def postrun(self, params: Parameters, name: str) -> ResultsPyARC:
"""Collect information from PyARC and create results object
Parameters
----------
params
Parameters used to create PyARC model
name
Name of the workflow
Returns
-------
PyARC results object
"""
print("Post-run for PyARC Plugin")
time, inputs, outputs = self._get_result_input(self.pyarc_inp_name)
return ResultsPyARC(params, name, time, inputs, outputs, self.pyarc.user_object.results) | 0.722037 | 0.153549 |
from pandapipes import pandapipesNet
from pandapipes.multinet.control.run_control_multinet import prepare_run_ctrl, run_control
from pandapipes.timeseries.run_time_series import init_default_outputwriter as init_default_ow_pps
from pandapower import pandapowerNet
from pandapower.control.util.diagnostic import control_diagnostic
from pandapower.timeseries.run_time_series import get_recycle_settings, init_time_steps, output_writer_routine, \
print_progress_bar, cleanup, run_loop, init_default_outputwriter as init_default_ow_pp, init_output_writer
try:
import pplog
except ImportError:
import logging as pplog
logger = pplog.getLogger(__name__)
logger.setLevel(level=pplog.WARNING)
def _call_output_writer(multinet, time_step, pf_converged, ctrl_converged, ts_variables):
"""
Calling the output writer routine for each net in multinet.
:param multinet: multinet with multinet controllers, net distinct controllers and several pandapipes/pandapower nets
:type multinet: pandapipes.Multinet
:param time_step: the results of each time step, which shall be retrieved by the output writer
:type time_step: sequence of array_like
:param pf_converged: did powerflow converge
:type pf_converged: bool
:param ctrl_converged: did all controller converge
:type ctrl_converged: bool
:param ts_variables: contains all relevant information and boundaries required for time series and control analyses
:type ts_variables: dict
:return: calling each output writer in order to save the results which are retrieved
:rtype: None
"""
for net_name in multinet['nets'].keys():
net = multinet['nets'][net_name]
output_writer_routine(net, time_step, pf_converged, ctrl_converged, ts_variables[net_name]["recycle_options"])
def init_time_series(multinet, time_steps, continue_on_divergence=False, verbose=True,
**kwargs):
"""
Initializes the time series calculation.
Besides it creates the dict ts_variables, which includes necessary variables for the time series / control loop.
:param multinet: multinet with multinet controllers, net distinct controllers and several pandapipes/pandapower nets
:type multinet: pandapipes.Multinet
:param time_steps: the number of times a time series calculation shall be conducted
:type time_steps: sequence of array_like
:param continue_on_divergence: What to do if loadflow/pipeflow is not converging, fires control_repair
:type continue_on_divergence: bool, default: False
:param verbose: prints progess bar or logger debug messages
:type verbose: bool, default: True
:param kwargs: additional keyword arguments handed to each run function
:type kwargs: dict
:return: ts_variables which contains all relevant information and boundaries required for time series and
control analyses
:rtype: dict
"""
time_steps = init_time_steps(multinet, time_steps, **kwargs)
run = kwargs.get('run', None)
ts_variables = prepare_run_ctrl(multinet, None, **kwargs)
for net_name in multinet['nets'].keys():
net = multinet['nets'][net_name]
if isinstance(net, pandapowerNet):
init_default_ow_pp(net, time_steps, **kwargs)
elif isinstance(net, pandapipesNet):
init_default_ow_pps(net, time_steps, **kwargs)
else:
raise ValueError('the given nets are neither pandapipes nor pandapower nets')
recycle_options = None
if hasattr(run, "__name__") and run.__name__ == "runpp":
# use faster runpp options if possible
recycle_options = get_recycle_settings(net, **kwargs)
ts_variables[net_name]['run'] = run['net_name'] if run is not None else ts_variables[net_name]['run']
ts_variables[net_name]['recycle_options'] = recycle_options
init_output_writer(net, time_steps)
# time steps to be calculated (list or range)
ts_variables["time_steps"] = time_steps
# If True, a diverged run is ignored and the next step is calculated
ts_variables["continue_on_divergence"] = continue_on_divergence
# print settings
ts_variables["verbose"] = verbose
if logger.level != 10 and verbose:
# simple progress bar
print_progress_bar(0, len(time_steps), prefix='Progress:', suffix='Complete', length=50)
return ts_variables
def run_timeseries(multinet, time_steps=None, continue_on_divergence=False,
verbose=True, **kwargs):
"""
Time Series main function.
Runs multiple run functions for each net in multinet. Within each time step several controller loops are conducted
till all controllers and each net is converged.
A normal pp.runpp/pps.pipeflow can be optionally replaced by other run functions by setting the run function in
kwargs.
:param multinet: multinet with multinet controllers, net distinct controllers and several pandapipes/pandapower nets
:type multinet: pandapipes.Multinet
:param time_steps: the number of times a time series calculation shall be conducted
:type time_steps: sequence of array_like, default: None
:param continue_on_divergence: What to do if loadflow/pipeflow is not converging, fires control_repair
:type continue_on_divergence: bool, default: False
:param verbose: prints progess bar or logger debug messages
:type verbose: bool, default: True
:param kwargs: additional keyword arguments handed to each run function
:type kwargs: dict
:return: runs the time series loop
:rtype: None
"""
ts_variables = init_time_series(multinet, time_steps, continue_on_divergence, verbose, **kwargs)
for net_name in multinet['nets'].keys():
control_diagnostic(multinet['nets'][net_name])
run_loop(multinet, ts_variables, run_control, _call_output_writer, **kwargs)
# cleanup functions after the last time step was calculated
for net_name in multinet['nets'].keys():
cleanup(ts_variables[net_name]) | pandapipes/multinet/timeseries/run_time_series_multinet.py |
from pandapipes import pandapipesNet
from pandapipes.multinet.control.run_control_multinet import prepare_run_ctrl, run_control
from pandapipes.timeseries.run_time_series import init_default_outputwriter as init_default_ow_pps
from pandapower import pandapowerNet
from pandapower.control.util.diagnostic import control_diagnostic
from pandapower.timeseries.run_time_series import get_recycle_settings, init_time_steps, output_writer_routine, \
print_progress_bar, cleanup, run_loop, init_default_outputwriter as init_default_ow_pp, init_output_writer
try:
import pplog
except ImportError:
import logging as pplog
logger = pplog.getLogger(__name__)
logger.setLevel(level=pplog.WARNING)
def _call_output_writer(multinet, time_step, pf_converged, ctrl_converged, ts_variables):
"""
Calling the output writer routine for each net in multinet.
:param multinet: multinet with multinet controllers, net distinct controllers and several pandapipes/pandapower nets
:type multinet: pandapipes.Multinet
:param time_step: the results of each time step, which shall be retrieved by the output writer
:type time_step: sequence of array_like
:param pf_converged: did powerflow converge
:type pf_converged: bool
:param ctrl_converged: did all controller converge
:type ctrl_converged: bool
:param ts_variables: contains all relevant information and boundaries required for time series and control analyses
:type ts_variables: dict
:return: calling each output writer in order to save the results which are retrieved
:rtype: None
"""
for net_name in multinet['nets'].keys():
net = multinet['nets'][net_name]
output_writer_routine(net, time_step, pf_converged, ctrl_converged, ts_variables[net_name]["recycle_options"])
def init_time_series(multinet, time_steps, continue_on_divergence=False, verbose=True,
**kwargs):
"""
Initializes the time series calculation.
Besides it creates the dict ts_variables, which includes necessary variables for the time series / control loop.
:param multinet: multinet with multinet controllers, net distinct controllers and several pandapipes/pandapower nets
:type multinet: pandapipes.Multinet
:param time_steps: the number of times a time series calculation shall be conducted
:type time_steps: sequence of array_like
:param continue_on_divergence: What to do if loadflow/pipeflow is not converging, fires control_repair
:type continue_on_divergence: bool, default: False
:param verbose: prints progess bar or logger debug messages
:type verbose: bool, default: True
:param kwargs: additional keyword arguments handed to each run function
:type kwargs: dict
:return: ts_variables which contains all relevant information and boundaries required for time series and
control analyses
:rtype: dict
"""
time_steps = init_time_steps(multinet, time_steps, **kwargs)
run = kwargs.get('run', None)
ts_variables = prepare_run_ctrl(multinet, None, **kwargs)
for net_name in multinet['nets'].keys():
net = multinet['nets'][net_name]
if isinstance(net, pandapowerNet):
init_default_ow_pp(net, time_steps, **kwargs)
elif isinstance(net, pandapipesNet):
init_default_ow_pps(net, time_steps, **kwargs)
else:
raise ValueError('the given nets are neither pandapipes nor pandapower nets')
recycle_options = None
if hasattr(run, "__name__") and run.__name__ == "runpp":
# use faster runpp options if possible
recycle_options = get_recycle_settings(net, **kwargs)
ts_variables[net_name]['run'] = run['net_name'] if run is not None else ts_variables[net_name]['run']
ts_variables[net_name]['recycle_options'] = recycle_options
init_output_writer(net, time_steps)
# time steps to be calculated (list or range)
ts_variables["time_steps"] = time_steps
# If True, a diverged run is ignored and the next step is calculated
ts_variables["continue_on_divergence"] = continue_on_divergence
# print settings
ts_variables["verbose"] = verbose
if logger.level != 10 and verbose:
# simple progress bar
print_progress_bar(0, len(time_steps), prefix='Progress:', suffix='Complete', length=50)
return ts_variables
def run_timeseries(multinet, time_steps=None, continue_on_divergence=False,
verbose=True, **kwargs):
"""
Time Series main function.
Runs multiple run functions for each net in multinet. Within each time step several controller loops are conducted
till all controllers and each net is converged.
A normal pp.runpp/pps.pipeflow can be optionally replaced by other run functions by setting the run function in
kwargs.
:param multinet: multinet with multinet controllers, net distinct controllers and several pandapipes/pandapower nets
:type multinet: pandapipes.Multinet
:param time_steps: the number of times a time series calculation shall be conducted
:type time_steps: sequence of array_like, default: None
:param continue_on_divergence: What to do if loadflow/pipeflow is not converging, fires control_repair
:type continue_on_divergence: bool, default: False
:param verbose: prints progess bar or logger debug messages
:type verbose: bool, default: True
:param kwargs: additional keyword arguments handed to each run function
:type kwargs: dict
:return: runs the time series loop
:rtype: None
"""
ts_variables = init_time_series(multinet, time_steps, continue_on_divergence, verbose, **kwargs)
for net_name in multinet['nets'].keys():
control_diagnostic(multinet['nets'][net_name])
run_loop(multinet, ts_variables, run_control, _call_output_writer, **kwargs)
# cleanup functions after the last time step was calculated
for net_name in multinet['nets'].keys():
cleanup(ts_variables[net_name]) | 0.666171 | 0.441252 |
import json, os
from django.http import HttpResponse
from django.shortcuts import render_to_response, redirect, get_object_or_404
from jobs.query import DataFinder
from models import Season, Episode, Request
import sys
from wsgiref.util import FileWrapper
VIDEOS_URL_PREFIX = 'http://localhost/'
VIDEOS_PATH_PREFIX = 'jobs/requests/'
"""
Redirect from criteria page
and shows seasons
"""
def criteria(request):
print("criteria page is called")
seasons = Season.objects.all()
return render_to_response('criteria.html', {
'seasons': seasons,
})
"""
Redirects from selecting the season,
and renders the page
with the episodes from the season number
"""
def results(request):
# season = request.GET.get('season')
seasons = [s for s in request.GET.getlist('seasons')]
print seasons #1,2,3...
l={}
for s in seasons: #s=1, s=2
episodes = Episode.objects.filter(season__id=s)
l[s] = episodes
return render_to_response('results.html', {
'episodes': l,
})
def results_user(request):
print "Inside results_user"
data = request.GET.get('data').encode('ascii')
print "Data: ", data
print "Data type: ", type(data)
j_obj = json.loads(data)
episodes = j_obj['episodes']
keyword = (j_obj['keyword'])
data = json.dumps({"keyword": keyword, "episodes": episodes})
request_obj = Request(request=data, status="pending")
request_obj.save()
return redirect('/requests/?request_id=%d' % request_obj.id)
def intermediate_function(request):
finderObj = DataFinder()
keyword = request.GET.get('keyword')
episodes = [int(ep) for ep in request.GET.getlist('episodes')] # episode no. here
print "KEYWORD:", keyword
print "EPISODES",episodes
data = json.dumps({"keyword": keyword, "episodes": episodes})
results1 = finderObj.getDialogueAndTimeStamps(keyword, episodes) # returns = dialogue_id, episode_id
print "After requests", results1
return render_to_response('intermediate.html', {
'results': results1,
'data': data,
})
def requests(request):
requests = Request.objects.all().order_by('-id')
return render_to_response('requests.html', {
'requests': requests,
'request_id': int(request.GET.get('request_id', 0)),
})
def request(request, id):
request_obj = get_object_or_404(Request, id=id)
return render_to_response('request.html', {
'request': request_obj,
})
def download_video(request, request_id):
request_obj = get_object_or_404(Request, id=request_id)
file = FileWrapper(open(request_obj.result_path, 'rb'))
response = HttpResponse(file, content_type='video/mp4')
filename = os.path.split(request_obj.result_path)[1]
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
def serve_video(request, request_id):
request_obj = get_object_or_404(Request, id=request_id)
path = request_obj.result_path
index = path.find(VIDEOS_PATH_PREFIX)
url = VIDEOS_URL_PREFIX + path[index:]
return redirect(url) | thruline-master/ThruLineDjango/ThruLine/views.py | import json, os
from django.http import HttpResponse
from django.shortcuts import render_to_response, redirect, get_object_or_404
from jobs.query import DataFinder
from models import Season, Episode, Request
import sys
from wsgiref.util import FileWrapper
VIDEOS_URL_PREFIX = 'http://localhost/'
VIDEOS_PATH_PREFIX = 'jobs/requests/'
"""
Redirect from criteria page
and shows seasons
"""
def criteria(request):
print("criteria page is called")
seasons = Season.objects.all()
return render_to_response('criteria.html', {
'seasons': seasons,
})
"""
Redirects from selecting the season,
and renders the page
with the episodes from the season number
"""
def results(request):
# season = request.GET.get('season')
seasons = [s for s in request.GET.getlist('seasons')]
print seasons #1,2,3...
l={}
for s in seasons: #s=1, s=2
episodes = Episode.objects.filter(season__id=s)
l[s] = episodes
return render_to_response('results.html', {
'episodes': l,
})
def results_user(request):
print "Inside results_user"
data = request.GET.get('data').encode('ascii')
print "Data: ", data
print "Data type: ", type(data)
j_obj = json.loads(data)
episodes = j_obj['episodes']
keyword = (j_obj['keyword'])
data = json.dumps({"keyword": keyword, "episodes": episodes})
request_obj = Request(request=data, status="pending")
request_obj.save()
return redirect('/requests/?request_id=%d' % request_obj.id)
def intermediate_function(request):
finderObj = DataFinder()
keyword = request.GET.get('keyword')
episodes = [int(ep) for ep in request.GET.getlist('episodes')] # episode no. here
print "KEYWORD:", keyword
print "EPISODES",episodes
data = json.dumps({"keyword": keyword, "episodes": episodes})
results1 = finderObj.getDialogueAndTimeStamps(keyword, episodes) # returns = dialogue_id, episode_id
print "After requests", results1
return render_to_response('intermediate.html', {
'results': results1,
'data': data,
})
def requests(request):
requests = Request.objects.all().order_by('-id')
return render_to_response('requests.html', {
'requests': requests,
'request_id': int(request.GET.get('request_id', 0)),
})
def request(request, id):
request_obj = get_object_or_404(Request, id=id)
return render_to_response('request.html', {
'request': request_obj,
})
def download_video(request, request_id):
request_obj = get_object_or_404(Request, id=request_id)
file = FileWrapper(open(request_obj.result_path, 'rb'))
response = HttpResponse(file, content_type='video/mp4')
filename = os.path.split(request_obj.result_path)[1]
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
def serve_video(request, request_id):
request_obj = get_object_or_404(Request, id=request_id)
path = request_obj.result_path
index = path.find(VIDEOS_PATH_PREFIX)
url = VIDEOS_URL_PREFIX + path[index:]
return redirect(url) | 0.287668 | 0.133783 |
import time
import urllib
from pyspark.sql import Row
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
# Get the file path, if not local download from web.
def get_data_file(isLocal):
if(isLocal):
filepath = "C:\\Users\\6910P\\Google Drive\\Dalhousie\\term_1\\data_management_analytics\\assignment_3\\NYPD_Motor_Vehicle_Collisions\\NYPD_Motor_Vehicle_Collisions.csv"
return filepath
else:
resource_url ="https://data.cityofnewyork.us/api/views/h9gi-nx95/rows.csv"
urllib.urlretrieve("https://data.cityofnewyork.us/api/views/h9gi-nx95/rows.csv","NYPD_Motor_Vehicle_Collisions.csv")
return "NYPD_Motor_Vehicle_Collisions.csv"
# Function to get the year from date string of format 02/12/2016
def get_year(date):
return date[-4:]
# Function to get the month from date string of format 02/12/2016
def get_month(date):
return int(date[:2])
# Function to get the quarter from date string of format 02/12/2016
def get_quarter(date):
month = int(date[:2])
if(month <4):
return "1"
elif (month > 3) and (month <7):
return "2"
elif (month > 6) and (month <10):
return "3"
else:
return "4"
if __name__=="__main__":
sc = SparkContext("local[2]","Application")
data_file = get_data_file(True)
sqlContext = SQLContext(sc)
# Load the data
df = sqlContext.read.load(data_file,format='com.databricks.spark.csv', header='true', inferSchema='true')
df.registerTempTable("nypdmvcollisions")
print "Count of total records:"+str(df.count())
start_time = time.time()
udfDateToYear=udf(get_year, StringType())
df_with_year = df.withColumn("year", udfDateToYear("DATE"))
df_with_year.registerTempTable("nypdmvcollisions_year")
#Capture total injuries(can be sum of injuries and fatalities) grouped by year and quarter
udfDateToQuarter=udf(get_quarter, StringType())
df_with_year_quart = df_with_year.withColumn("quarter", udfDateToQuarter("DATE"))
df_with_year_quart.registerTempTable("nypdmvcollisions_year_quart")
query_3 = """SELECT year , quarter,
(SUM(NUMBER_OF_PERSONS_KILLED) + SUM(NUMBER_OF_PEDESTRIANS_KILLED) +SUM(NUMBER_OF_CYCLIST_KILLED)+ SUM(NUMBER_OF_MOTORIST_KILLED)) AS All_Fatalities_Count,
(SUM(NUMBER_OF_PERSONS_INJURED)+SUM(NUMBER_OF_PEDESTRIANS_INJURED)+SUM(NUMBER_OF_CYCLIST_INJURED)+ SUM(NUMBER_OF_MOTORIST_INJURED)) AS All_Injured_Count
from nypdmvcollisions_year_quart GROUP BY year, quarter ORDER BY year DESC, quarter ASC"""
query_3_sql = sqlContext.sql(query_3)
print "Processing Time:"+str((time.time() - start_time)*1000)+" msec.\ntotal injuries grouped by year and quarter:"
query_3_sql.show() | Resources/apache-spark-pyspark-sql/nypd_mv_collision_analysis/3.py | import time
import urllib
from pyspark.sql import Row
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
# Get the file path, if not local download from web.
def get_data_file(isLocal):
if(isLocal):
filepath = "C:\\Users\\6910P\\Google Drive\\Dalhousie\\term_1\\data_management_analytics\\assignment_3\\NYPD_Motor_Vehicle_Collisions\\NYPD_Motor_Vehicle_Collisions.csv"
return filepath
else:
resource_url ="https://data.cityofnewyork.us/api/views/h9gi-nx95/rows.csv"
urllib.urlretrieve("https://data.cityofnewyork.us/api/views/h9gi-nx95/rows.csv","NYPD_Motor_Vehicle_Collisions.csv")
return "NYPD_Motor_Vehicle_Collisions.csv"
# Function to get the year from date string of format 02/12/2016
def get_year(date):
return date[-4:]
# Function to get the month from date string of format 02/12/2016
def get_month(date):
return int(date[:2])
# Function to get the quarter from date string of format 02/12/2016
def get_quarter(date):
month = int(date[:2])
if(month <4):
return "1"
elif (month > 3) and (month <7):
return "2"
elif (month > 6) and (month <10):
return "3"
else:
return "4"
if __name__=="__main__":
sc = SparkContext("local[2]","Application")
data_file = get_data_file(True)
sqlContext = SQLContext(sc)
# Load the data
df = sqlContext.read.load(data_file,format='com.databricks.spark.csv', header='true', inferSchema='true')
df.registerTempTable("nypdmvcollisions")
print "Count of total records:"+str(df.count())
start_time = time.time()
udfDateToYear=udf(get_year, StringType())
df_with_year = df.withColumn("year", udfDateToYear("DATE"))
df_with_year.registerTempTable("nypdmvcollisions_year")
#Capture total injuries(can be sum of injuries and fatalities) grouped by year and quarter
udfDateToQuarter=udf(get_quarter, StringType())
df_with_year_quart = df_with_year.withColumn("quarter", udfDateToQuarter("DATE"))
df_with_year_quart.registerTempTable("nypdmvcollisions_year_quart")
query_3 = """SELECT year , quarter,
(SUM(NUMBER_OF_PERSONS_KILLED) + SUM(NUMBER_OF_PEDESTRIANS_KILLED) +SUM(NUMBER_OF_CYCLIST_KILLED)+ SUM(NUMBER_OF_MOTORIST_KILLED)) AS All_Fatalities_Count,
(SUM(NUMBER_OF_PERSONS_INJURED)+SUM(NUMBER_OF_PEDESTRIANS_INJURED)+SUM(NUMBER_OF_CYCLIST_INJURED)+ SUM(NUMBER_OF_MOTORIST_INJURED)) AS All_Injured_Count
from nypdmvcollisions_year_quart GROUP BY year, quarter ORDER BY year DESC, quarter ASC"""
query_3_sql = sqlContext.sql(query_3)
print "Processing Time:"+str((time.time() - start_time)*1000)+" msec.\ntotal injuries grouped by year and quarter:"
query_3_sql.show() | 0.431824 | 0.124346 |
"""Tests for `mbed_tools.targets.get_board`."""
import pytest
from unittest import mock
from mbed_tools.targets._internal.exceptions import BoardAPIError
# Import from top level as this is the expected interface for users
from mbed_tools.targets import get_board_by_online_id, get_board_by_product_code, get_board_by_jlink_slug
from mbed_tools.targets.get_board import (
_DatabaseMode,
_get_database_mode,
get_board,
)
from mbed_tools.targets.env import env
from mbed_tools.targets.exceptions import UnknownBoard, UnsupportedMode
from tests.targets.factories import make_board
@pytest.fixture
def mock_get_board():
with mock.patch("mbed_tools.targets.get_board.get_board", autospec=True) as gbp:
yield gbp
@pytest.fixture
def mock_env():
with mock.patch("mbed_tools.targets.get_board.env", spec_set=env) as gbp:
yield gbp
@pytest.fixture
def mocked_boards():
with mock.patch("mbed_tools.targets.get_board.Boards", autospec=True) as gbp:
yield gbp
class TestGetBoard:
def test_online_mode(self, mock_env, mocked_boards):
mock_env.MBED_DATABASE_MODE = "ONLINE"
fn = mock.Mock()
subject = get_board(fn)
assert subject == mocked_boards.from_online_database().get_board.return_value
mocked_boards.from_online_database().get_board.assert_called_once_with(fn)
def test_offline_mode(self, mock_env, mocked_boards):
mock_env.MBED_DATABASE_MODE = "OFFLINE"
fn = mock.Mock()
subject = get_board(fn)
assert subject == mocked_boards.from_offline_database().get_board.return_value
mocked_boards.from_offline_database().get_board.assert_called_once_with(fn)
def test_auto_mode_calls_offline_boards_first(self, mock_env, mocked_boards):
mock_env.MBED_DATABASE_MODE = "AUTO"
fn = mock.Mock()
subject = get_board(fn)
assert subject == mocked_boards.from_offline_database().get_board.return_value
mocked_boards.from_online_database().get_board.assert_not_called()
mocked_boards.from_offline_database().get_board.assert_called_once_with(fn)
def test_auto_mode_falls_back_to_online_database_when_board_not_found(self, mock_env, mocked_boards):
mock_env.MBED_DATABASE_MODE = "AUTO"
mocked_boards.from_offline_database().get_board.side_effect = UnknownBoard
fn = mock.Mock()
subject = get_board(fn)
assert subject == mocked_boards.from_online_database().get_board.return_value
mocked_boards.from_offline_database().get_board.assert_called_once_with(fn)
mocked_boards.from_online_database().get_board.assert_called_once_with(fn)
def test_auto_mode_raises_when_board_not_found_offline_with_no_network(self, mock_env, mocked_boards):
mock_env.MBED_DATABASE_MODE = "AUTO"
mocked_boards.from_offline_database().get_board.side_effect = UnknownBoard
mocked_boards.from_online_database().get_board.side_effect = BoardAPIError
fn = mock.Mock()
with pytest.raises(UnknownBoard):
get_board(fn)
mocked_boards.from_offline_database().get_board.assert_called_once_with(fn)
mocked_boards.from_online_database().get_board.assert_called_once_with(fn)
class TestGetBoardByProductCode:
def test_matches_boards_by_product_code(self, mock_get_board):
product_code = "swag"
assert get_board_by_product_code(product_code) == mock_get_board.return_value
# Test callable matches correct boards
fn = mock_get_board.call_args[0][0]
matching_board = make_board(product_code=product_code)
not_matching_board = make_board(product_code="whatever")
assert fn(matching_board)
assert not fn(not_matching_board)
class TestGetBoardByOnlineId:
def test_matches_boards_by_online_id(self, mock_get_board):
target_type = "platform"
assert get_board_by_online_id(slug="slug", target_type=target_type) == mock_get_board.return_value
# Test callable matches correct boards
fn = mock_get_board.call_args[0][0]
matching_board_1 = make_board(target_type=target_type, slug="slug")
matching_board_2 = make_board(target_type=target_type, slug="SlUg")
not_matching_board = make_board(target_type=target_type, slug="whatever")
assert fn(matching_board_1)
assert fn(matching_board_2)
assert not fn(not_matching_board)
class TestGetBoardByJlinkSlug:
def test_matches_boards_by_online_id(self, mock_get_board):
assert get_board_by_jlink_slug(slug="slug") == mock_get_board.return_value
# Test callable matches correct boards
fn = mock_get_board.call_args[0][0]
matching_board_1 = make_board(slug="slug")
matching_board_2 = make_board(board_type="slug")
matching_board_3 = make_board(board_name="slug")
not_matching_board = make_board()
assert fn(matching_board_1)
assert fn(matching_board_2)
assert fn(matching_board_3)
assert not fn(not_matching_board)
class TestGetDatabaseMode:
def test_returns_configured_database_mode(self, mock_env):
mock_env.MBED_DATABASE_MODE = "OFFLINE"
assert _get_database_mode() == _DatabaseMode.OFFLINE
def test_raises_when_configuration_is_not_supported(self, mock_env):
mock_env.MBED_DATABASE_MODE = "NOT_VALID"
with pytest.raises(UnsupportedMode):
_get_database_mode() | tests/targets/test_get_board.py | """Tests for `mbed_tools.targets.get_board`."""
import pytest
from unittest import mock
from mbed_tools.targets._internal.exceptions import BoardAPIError
# Import from top level as this is the expected interface for users
from mbed_tools.targets import get_board_by_online_id, get_board_by_product_code, get_board_by_jlink_slug
from mbed_tools.targets.get_board import (
_DatabaseMode,
_get_database_mode,
get_board,
)
from mbed_tools.targets.env import env
from mbed_tools.targets.exceptions import UnknownBoard, UnsupportedMode
from tests.targets.factories import make_board
@pytest.fixture
def mock_get_board():
with mock.patch("mbed_tools.targets.get_board.get_board", autospec=True) as gbp:
yield gbp
@pytest.fixture
def mock_env():
with mock.patch("mbed_tools.targets.get_board.env", spec_set=env) as gbp:
yield gbp
@pytest.fixture
def mocked_boards():
with mock.patch("mbed_tools.targets.get_board.Boards", autospec=True) as gbp:
yield gbp
class TestGetBoard:
def test_online_mode(self, mock_env, mocked_boards):
mock_env.MBED_DATABASE_MODE = "ONLINE"
fn = mock.Mock()
subject = get_board(fn)
assert subject == mocked_boards.from_online_database().get_board.return_value
mocked_boards.from_online_database().get_board.assert_called_once_with(fn)
def test_offline_mode(self, mock_env, mocked_boards):
mock_env.MBED_DATABASE_MODE = "OFFLINE"
fn = mock.Mock()
subject = get_board(fn)
assert subject == mocked_boards.from_offline_database().get_board.return_value
mocked_boards.from_offline_database().get_board.assert_called_once_with(fn)
def test_auto_mode_calls_offline_boards_first(self, mock_env, mocked_boards):
mock_env.MBED_DATABASE_MODE = "AUTO"
fn = mock.Mock()
subject = get_board(fn)
assert subject == mocked_boards.from_offline_database().get_board.return_value
mocked_boards.from_online_database().get_board.assert_not_called()
mocked_boards.from_offline_database().get_board.assert_called_once_with(fn)
def test_auto_mode_falls_back_to_online_database_when_board_not_found(self, mock_env, mocked_boards):
mock_env.MBED_DATABASE_MODE = "AUTO"
mocked_boards.from_offline_database().get_board.side_effect = UnknownBoard
fn = mock.Mock()
subject = get_board(fn)
assert subject == mocked_boards.from_online_database().get_board.return_value
mocked_boards.from_offline_database().get_board.assert_called_once_with(fn)
mocked_boards.from_online_database().get_board.assert_called_once_with(fn)
def test_auto_mode_raises_when_board_not_found_offline_with_no_network(self, mock_env, mocked_boards):
mock_env.MBED_DATABASE_MODE = "AUTO"
mocked_boards.from_offline_database().get_board.side_effect = UnknownBoard
mocked_boards.from_online_database().get_board.side_effect = BoardAPIError
fn = mock.Mock()
with pytest.raises(UnknownBoard):
get_board(fn)
mocked_boards.from_offline_database().get_board.assert_called_once_with(fn)
mocked_boards.from_online_database().get_board.assert_called_once_with(fn)
class TestGetBoardByProductCode:
def test_matches_boards_by_product_code(self, mock_get_board):
product_code = "swag"
assert get_board_by_product_code(product_code) == mock_get_board.return_value
# Test callable matches correct boards
fn = mock_get_board.call_args[0][0]
matching_board = make_board(product_code=product_code)
not_matching_board = make_board(product_code="whatever")
assert fn(matching_board)
assert not fn(not_matching_board)
class TestGetBoardByOnlineId:
def test_matches_boards_by_online_id(self, mock_get_board):
target_type = "platform"
assert get_board_by_online_id(slug="slug", target_type=target_type) == mock_get_board.return_value
# Test callable matches correct boards
fn = mock_get_board.call_args[0][0]
matching_board_1 = make_board(target_type=target_type, slug="slug")
matching_board_2 = make_board(target_type=target_type, slug="SlUg")
not_matching_board = make_board(target_type=target_type, slug="whatever")
assert fn(matching_board_1)
assert fn(matching_board_2)
assert not fn(not_matching_board)
class TestGetBoardByJlinkSlug:
def test_matches_boards_by_online_id(self, mock_get_board):
assert get_board_by_jlink_slug(slug="slug") == mock_get_board.return_value
# Test callable matches correct boards
fn = mock_get_board.call_args[0][0]
matching_board_1 = make_board(slug="slug")
matching_board_2 = make_board(board_type="slug")
matching_board_3 = make_board(board_name="slug")
not_matching_board = make_board()
assert fn(matching_board_1)
assert fn(matching_board_2)
assert fn(matching_board_3)
assert not fn(not_matching_board)
class TestGetDatabaseMode:
def test_returns_configured_database_mode(self, mock_env):
mock_env.MBED_DATABASE_MODE = "OFFLINE"
assert _get_database_mode() == _DatabaseMode.OFFLINE
def test_raises_when_configuration_is_not_supported(self, mock_env):
mock_env.MBED_DATABASE_MODE = "NOT_VALID"
with pytest.raises(UnsupportedMode):
_get_database_mode() | 0.889778 | 0.539954 |
import logging
import pytest
from ocs_ci.framework.testlib import ManageTest, libtest
from ocs_ci.ocs.cluster import CephCluster
from ocs_ci.helpers import helpers
log = logging.getLogger(__name__)
@pytest.fixture(scope="class")
def test_fixture(request):
"""
Create disks
"""
self = request.node.cls
def finalizer():
teardown(self)
request.addfinalizer(finalizer)
setup(self)
@pytest.fixture
def mon_resource(request):
"""
A fixture to handle mon resource cleanup,
this function brings the mon count to what it was before test started
"""
self = request.node.cls
mon_count = self.cluster_obj.mon_count
log.info(f"Mon count before add = {mon_count}")
self.cluster_obj.scan_cluster()
self.cluster_obj.cluster.reload()
self.cluster_obj.cluster.data["spec"]["mon"]["allowMultiplePerNode"] = True
self.cluster_obj.cluster.apply(**self.cluster_obj.cluster.data)
yield
self.cluster_obj.mon_change_count(mon_count)
if mon_count != self.cluster_obj.mon_count:
log.error("Mon teardown failure")
log.error(f"Expected: {mon_count}", f"but found {self.cluster_obj.mon_count}")
log.info("Removed mon")
self.cluster_obj.cluster.data["spec"]["mon"]["allowMultiplePerNode"] = False
self.cluster_obj.cluster.apply(**self.cluster_obj.cluster.data)
@pytest.fixture
def mds_resource(request):
"""
A fixture to handle mds resource cleanup
This function brings mds count to what it was before test started
"""
self = request.node.cls
we_created_fs = False
if not self.cluster_obj.cephfs:
# cephfs doesn't exist , create one for this test
assert helpers.create_cephfilesystem()
self.cluster_obj.scan_cluster()
assert self.cluster_obj.cephfs
we_created_fs = True
mds_count = int(self.cluster_obj.mds_count / 2)
yield
self.cluster_obj.mds_change_count(mds_count)
current_count = int(self.cluster_obj.mds_count / 2)
if mds_count != current_count:
log.error("MDS teardown failure")
log.error(f"Expected: {mds_count} but found {current_count}")
if we_created_fs:
self.cluster_obj.cephfs.delete()
self.cluster_obj.cephfs = None
@pytest.fixture
def user_resource(request):
"""
A fixture for creating user for test and cleaning up after test is done
"""
self = request.node.cls
log.info("Creating user")
assert self.cluster_obj.create_user(self.username, self.caps)
yield
del_cmd = f"ceph auth del {self.username}"
log.info("User deleted")
self.cluster_obj.toolbox.exec_ceph_cmd(del_cmd)
def setup(self):
"""
Create CephCluster object to be consumed by tests
"""
self.cluster_obj = CephCluster()
def teardown(self):
"""
Make sure at the end cluster is in HEALTH_OK state
"""
self.cluster_obj.cluster_health_check(timeout=1200)
@libtest
@pytest.mark.usefixtures(
test_fixture.__name__,
)
class TestClusterUtils(ManageTest):
# Cluster will be populated in the fixture
username = "client.test"
caps = "mon 'allow r' osd 'allow rwx'"
def test_get_user_key(self, user_resource):
key = self.cluster_obj.get_user_key(self.username)
assert key
log.info(key)
def test_get_admin_key(self):
"""
By default admin user will be created by rook
"""
key = self.cluster_obj.get_admin_key()
assert key
def test_get_mon_info(self):
for mon in self.cluster_obj.mons:
log.info(mon.name)
log.info(mon.port)
def test_add_mon(self, mon_resource):
cur_count = self.cluster_obj.mon_count
log.info(f"current mon count = {cur_count}")
new_count = cur_count + 1
self.cluster_obj.mon_change_count(new_count)
assert new_count == self.cluster_obj.mon_count
def test_add_mds(self, mds_resource):
cur_count = int(self.cluster_obj.mds_count / 2)
log.info(f"Current active count = {cur_count}")
new_count = cur_count + 1
self.cluster_obj.mds_change_count(new_count)
assert new_count * 2 == self.cluster_obj.mds_count | tests/libtest/test_cluster_utils.py | import logging
import pytest
from ocs_ci.framework.testlib import ManageTest, libtest
from ocs_ci.ocs.cluster import CephCluster
from ocs_ci.helpers import helpers
log = logging.getLogger(__name__)
@pytest.fixture(scope="class")
def test_fixture(request):
"""
Create disks
"""
self = request.node.cls
def finalizer():
teardown(self)
request.addfinalizer(finalizer)
setup(self)
@pytest.fixture
def mon_resource(request):
"""
A fixture to handle mon resource cleanup,
this function brings the mon count to what it was before test started
"""
self = request.node.cls
mon_count = self.cluster_obj.mon_count
log.info(f"Mon count before add = {mon_count}")
self.cluster_obj.scan_cluster()
self.cluster_obj.cluster.reload()
self.cluster_obj.cluster.data["spec"]["mon"]["allowMultiplePerNode"] = True
self.cluster_obj.cluster.apply(**self.cluster_obj.cluster.data)
yield
self.cluster_obj.mon_change_count(mon_count)
if mon_count != self.cluster_obj.mon_count:
log.error("Mon teardown failure")
log.error(f"Expected: {mon_count}", f"but found {self.cluster_obj.mon_count}")
log.info("Removed mon")
self.cluster_obj.cluster.data["spec"]["mon"]["allowMultiplePerNode"] = False
self.cluster_obj.cluster.apply(**self.cluster_obj.cluster.data)
@pytest.fixture
def mds_resource(request):
"""
A fixture to handle mds resource cleanup
This function brings mds count to what it was before test started
"""
self = request.node.cls
we_created_fs = False
if not self.cluster_obj.cephfs:
# cephfs doesn't exist , create one for this test
assert helpers.create_cephfilesystem()
self.cluster_obj.scan_cluster()
assert self.cluster_obj.cephfs
we_created_fs = True
mds_count = int(self.cluster_obj.mds_count / 2)
yield
self.cluster_obj.mds_change_count(mds_count)
current_count = int(self.cluster_obj.mds_count / 2)
if mds_count != current_count:
log.error("MDS teardown failure")
log.error(f"Expected: {mds_count} but found {current_count}")
if we_created_fs:
self.cluster_obj.cephfs.delete()
self.cluster_obj.cephfs = None
@pytest.fixture
def user_resource(request):
"""
A fixture for creating user for test and cleaning up after test is done
"""
self = request.node.cls
log.info("Creating user")
assert self.cluster_obj.create_user(self.username, self.caps)
yield
del_cmd = f"ceph auth del {self.username}"
log.info("User deleted")
self.cluster_obj.toolbox.exec_ceph_cmd(del_cmd)
def setup(self):
"""
Create CephCluster object to be consumed by tests
"""
self.cluster_obj = CephCluster()
def teardown(self):
"""
Make sure at the end cluster is in HEALTH_OK state
"""
self.cluster_obj.cluster_health_check(timeout=1200)
@libtest
@pytest.mark.usefixtures(
test_fixture.__name__,
)
class TestClusterUtils(ManageTest):
# Cluster will be populated in the fixture
username = "client.test"
caps = "mon 'allow r' osd 'allow rwx'"
def test_get_user_key(self, user_resource):
key = self.cluster_obj.get_user_key(self.username)
assert key
log.info(key)
def test_get_admin_key(self):
"""
By default admin user will be created by rook
"""
key = self.cluster_obj.get_admin_key()
assert key
def test_get_mon_info(self):
for mon in self.cluster_obj.mons:
log.info(mon.name)
log.info(mon.port)
def test_add_mon(self, mon_resource):
cur_count = self.cluster_obj.mon_count
log.info(f"current mon count = {cur_count}")
new_count = cur_count + 1
self.cluster_obj.mon_change_count(new_count)
assert new_count == self.cluster_obj.mon_count
def test_add_mds(self, mds_resource):
cur_count = int(self.cluster_obj.mds_count / 2)
log.info(f"Current active count = {cur_count}")
new_count = cur_count + 1
self.cluster_obj.mds_change_count(new_count)
assert new_count * 2 == self.cluster_obj.mds_count | 0.47025 | 0.30319 |
import pytest
import json
import logging
import traceback
from urllib.parse import urlparse, parse_qsl
from pytest_httpserver import HTTPServer
from werkzeug.datastructures import Headers as HTTPHeaders
from werkzeug import Response as WerkzeugResponse, Request as WerkzeugRequest
LOG = logging.getLogger(__name__)
_RSS_PROXY_TOKEN = 'TEST_RSS_PROXY_TOKEN'
def _parse_query(qs) -> dict:
query = {}
for k, v in parse_qsl(qs):
query[k] = v
return query
def rss_proxy_handler(request: WerkzeugRequest) -> WerkzeugResponse:
try:
data = json.loads(request.data.decode('utf-8'))
assert data['token'] == _RSS_PROXY_TOKEN
assert data.get('method') in (None, 'GET', 'POST')
url = urlparse(data['url'])
query = _parse_query(url.query)
assert url.path == '/not-proxy'
assert HTTPHeaders(data['headers'])['user-agent']
except Exception as ex:
LOG.warning(ex, exc_info=ex)
msg = traceback.format_exception_only(type(ex), ex)
return WerkzeugResponse(msg, status=400)
status = query.get('status')
error = query.get('error')
if error:
if error == 'ERROR':
headers = {'x-rss-proxy-status': 'ERROR'}
return WerkzeugResponse(str(status), status=200, headers=headers)
else:
return WerkzeugResponse(str(status), status=int(error))
else:
status = int(status) if status else 200
headers = {'x-rss-proxy-status': status}
return WerkzeugResponse(str(status), status=200, headers=headers)
def _setup_rss_proxy(httpserver: HTTPServer):
httpserver.expect_request("/rss-proxy", method='POST')\
.respond_with_handler(rss_proxy_handler)
httpserver.expect_request("/not-proxy").respond_with_data('ERROR', status=500)
httpserver.expect_request("/direct/200").respond_with_data('DIRECT', status=200)
proxy_url = httpserver.url_for('/rss-proxy')
options = dict(
allow_private_address=True,
rss_proxy_url=proxy_url,
rss_proxy_token=_RSS_PROXY_TOKEN,
)
return options
@pytest.fixture()
def rss_proxy_server(httpserver: HTTPServer):
options = _setup_rss_proxy(httpserver)
yield options | tests/rss_proxy_server.py | import pytest
import json
import logging
import traceback
from urllib.parse import urlparse, parse_qsl
from pytest_httpserver import HTTPServer
from werkzeug.datastructures import Headers as HTTPHeaders
from werkzeug import Response as WerkzeugResponse, Request as WerkzeugRequest
LOG = logging.getLogger(__name__)
_RSS_PROXY_TOKEN = 'TEST_RSS_PROXY_TOKEN'
def _parse_query(qs) -> dict:
query = {}
for k, v in parse_qsl(qs):
query[k] = v
return query
def rss_proxy_handler(request: WerkzeugRequest) -> WerkzeugResponse:
try:
data = json.loads(request.data.decode('utf-8'))
assert data['token'] == _RSS_PROXY_TOKEN
assert data.get('method') in (None, 'GET', 'POST')
url = urlparse(data['url'])
query = _parse_query(url.query)
assert url.path == '/not-proxy'
assert HTTPHeaders(data['headers'])['user-agent']
except Exception as ex:
LOG.warning(ex, exc_info=ex)
msg = traceback.format_exception_only(type(ex), ex)
return WerkzeugResponse(msg, status=400)
status = query.get('status')
error = query.get('error')
if error:
if error == 'ERROR':
headers = {'x-rss-proxy-status': 'ERROR'}
return WerkzeugResponse(str(status), status=200, headers=headers)
else:
return WerkzeugResponse(str(status), status=int(error))
else:
status = int(status) if status else 200
headers = {'x-rss-proxy-status': status}
return WerkzeugResponse(str(status), status=200, headers=headers)
def _setup_rss_proxy(httpserver: HTTPServer):
httpserver.expect_request("/rss-proxy", method='POST')\
.respond_with_handler(rss_proxy_handler)
httpserver.expect_request("/not-proxy").respond_with_data('ERROR', status=500)
httpserver.expect_request("/direct/200").respond_with_data('DIRECT', status=200)
proxy_url = httpserver.url_for('/rss-proxy')
options = dict(
allow_private_address=True,
rss_proxy_url=proxy_url,
rss_proxy_token=_RSS_PROXY_TOKEN,
)
return options
@pytest.fixture()
def rss_proxy_server(httpserver: HTTPServer):
options = _setup_rss_proxy(httpserver)
yield options | 0.355439 | 0.238406 |
from http.server import SimpleHTTPRequestHandler
import logging
import os
import socket
import socketserver
import threading
import urllib.parse
logger = logging.getLogger(__name__)
def _translate_path(path, root):
"""Direct copy of /http/server.py except that ``root`` replaces the call
to ``os.getcwd()``. This is needed so that we can serve the static files of
the JS app without calling os.chdir (which would mess up other local servers).
"""
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
trailing_slash = path.rstrip().endswith('/')
try:
path = urllib.parse.unquote(path, errors='surrogatepass')
except UnicodeDecodeError:
path = urllib.parse.unquote(path)
path = os.path.normpath(path) # was ``path = posixpath.normpath(path)``
words = path.split(os.path.sep) # was ``words = path.split('/')``
words = filter(None, words)
path = root
for word in words:
if os.path.dirname(word) or word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
if trailing_slash:
path += '/'
return path
def get_custom_http_handler(name, root_path):
"""Return a subclass of ``SimpleHTTPRequestHandler`` that always serves
paths relative to ``root_path`` instead of using ``os.getcwd()``.
"""
class CustomHTTPHandler(SimpleHTTPRequestHandler):
def translate_path(self, path):
return _translate_path(path, root_path)
def log_message(self, format, *args):
"""Log message, e.g., to JS App named 'Dative', using the DativeTop
logger, wrapped as follows::
('2019-07-14 12:15:55,397 dativetop.serve.servejsapp INFO'
' Dative JS App: <MSG>')
"""
logger.info('%s JS App: ' + format, *((name,) + args))
return CustomHTTPHandler
def _serve_local_js_app(our_server=None):
our_server.serve_forever()
class CustomTCPServer(socketserver.TCPServer):
"""This allows us to shutdown the server immediately when the user
quits DativeTop. This avoids the TIME_WAIT issue. See
https://stackoverflow.com/questions/6380057/python-binding-socket-address-already-in-use/18858817#18858817
"""
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
def serve_local_js_app(name, url, root_path):
"""Serve the local JS app at ip:port in a separate thread.
Return an argument-less func that, when called, will stop the local server
and close the thread.
"""
parse = urllib.parse.urlparse(url)
ip = parse.hostname
port = parse.port
OurCustomHTTPHandler = get_custom_http_handler(name, root_path)
our_server = CustomTCPServer((ip, port), OurCustomHTTPHandler)
thread = threading.Thread(
target=_serve_local_js_app,
kwargs={'our_server': our_server},
daemon=True)
thread.start()
logger.info('{} should be being served at {}'.format(name, url))
def stop_our_server():
logger.info('Shutting down %s at %s.', name, url)
our_server.shutdown()
our_server.server_close()
thread.join()
logger.info('%s at %s should be shut down.', name, url)
return stop_our_server | src/dativetop/serve/servejsapp.py | from http.server import SimpleHTTPRequestHandler
import logging
import os
import socket
import socketserver
import threading
import urllib.parse
logger = logging.getLogger(__name__)
def _translate_path(path, root):
"""Direct copy of /http/server.py except that ``root`` replaces the call
to ``os.getcwd()``. This is needed so that we can serve the static files of
the JS app without calling os.chdir (which would mess up other local servers).
"""
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
trailing_slash = path.rstrip().endswith('/')
try:
path = urllib.parse.unquote(path, errors='surrogatepass')
except UnicodeDecodeError:
path = urllib.parse.unquote(path)
path = os.path.normpath(path) # was ``path = posixpath.normpath(path)``
words = path.split(os.path.sep) # was ``words = path.split('/')``
words = filter(None, words)
path = root
for word in words:
if os.path.dirname(word) or word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
if trailing_slash:
path += '/'
return path
def get_custom_http_handler(name, root_path):
"""Return a subclass of ``SimpleHTTPRequestHandler`` that always serves
paths relative to ``root_path`` instead of using ``os.getcwd()``.
"""
class CustomHTTPHandler(SimpleHTTPRequestHandler):
def translate_path(self, path):
return _translate_path(path, root_path)
def log_message(self, format, *args):
"""Log message, e.g., to JS App named 'Dative', using the DativeTop
logger, wrapped as follows::
('2019-07-14 12:15:55,397 dativetop.serve.servejsapp INFO'
' Dative JS App: <MSG>')
"""
logger.info('%s JS App: ' + format, *((name,) + args))
return CustomHTTPHandler
def _serve_local_js_app(our_server=None):
our_server.serve_forever()
class CustomTCPServer(socketserver.TCPServer):
"""This allows us to shutdown the server immediately when the user
quits DativeTop. This avoids the TIME_WAIT issue. See
https://stackoverflow.com/questions/6380057/python-binding-socket-address-already-in-use/18858817#18858817
"""
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
def serve_local_js_app(name, url, root_path):
"""Serve the local JS app at ip:port in a separate thread.
Return an argument-less func that, when called, will stop the local server
and close the thread.
"""
parse = urllib.parse.urlparse(url)
ip = parse.hostname
port = parse.port
OurCustomHTTPHandler = get_custom_http_handler(name, root_path)
our_server = CustomTCPServer((ip, port), OurCustomHTTPHandler)
thread = threading.Thread(
target=_serve_local_js_app,
kwargs={'our_server': our_server},
daemon=True)
thread.start()
logger.info('{} should be being served at {}'.format(name, url))
def stop_our_server():
logger.info('Shutting down %s at %s.', name, url)
our_server.shutdown()
our_server.server_close()
thread.join()
logger.info('%s at %s should be shut down.', name, url)
return stop_our_server | 0.554712 | 0.06885 |
from stacks.utils.RMFTestCase import *
from mock.mock import MagicMock, call, patch
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
@not_for_platform(PLATFORM_WINDOWS)
class TestGangliaServer(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "GANGLIA/3.5.0/package"
STACK_VERSION = "2.0.6"
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_server.py",
classname="GangliaServer",
command="configure",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertNoMoreResources()
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_server.py",
classname="GangliaServer",
command="start",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E service hdp-gmetad start >> /tmp/gmetad.log 2>&1 ; /bin/ps auwx | /bin/grep [g]metad >> /tmp/gmetad.log 2>&1',
path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
)
self.assertResourceCalled('MonitorWebserver', 'restart',
)
self.assertNoMoreResources()
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_server.py",
classname="GangliaServer",
command="stop",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E service hdp-gmetad stop >> /tmp/gmetad.log 2>&1 ; /bin/ps auwx | /bin/grep [g]metad >> /tmp/gmetad.log 2>&1',
path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
)
self.assertResourceCalled('MonitorWebserver', 'restart',
)
self.assertNoMoreResources()
def test_install_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_server.py",
classname="GangliaServer",
command="install",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
def assert_configure_default(self):
self.assertResourceCalled('Directory', '/usr/libexec/hdp/ganglia',
owner = 'root',
group = 'root',
create_parents = True,
)
self.assertResourceCalled('File', '/etc/init.d/hdp-gmetad',
content = StaticFile('gmetad.init'),
mode = 0755,
)
self.assertResourceCalled('File', '/etc/init.d/hdp-gmond',
content = StaticFile('gmond.init'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkGmond.sh',
content = StaticFile('checkGmond.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkRrdcached.sh',
content = StaticFile('checkRrdcached.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmetadLib.sh',
content = StaticFile('gmetadLib.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmondLib.sh',
content = StaticFile('gmondLib.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/rrdcachedLib.sh',
content = StaticFile('rrdcachedLib.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/setupGanglia.sh',
content = StaticFile('setupGanglia.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmetad.sh',
content = StaticFile('startGmetad.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmond.sh',
content = StaticFile('startGmond.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startRrdcached.sh',
content = StaticFile('startRrdcached.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmetad.sh',
content = StaticFile('stopGmetad.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmond.sh',
content = StaticFile('stopGmond.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopRrdcached.sh',
content = StaticFile('stopRrdcached.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/teardownGanglia.sh',
content = StaticFile('teardownGanglia.sh'),
mode = 0755,
)
self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaClusters.conf',
owner = 'root',
template_tag = None,
group = 'root',
mode = 0755,
)
self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaEnv.sh',
owner = 'root',
template_tag = None,
group = 'root',
mode = 0755,
)
self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaLib.sh',
owner = 'root',
template_tag = None,
group = 'root',
mode = 0755,
)
self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -t -o root -g hadoop',
path = ['/usr/libexec/hdp/ganglia',
'/usr/sbin',
'/sbin:/usr/local/bin',
'/bin',
'/usr/bin'],
)
self.assertResourceCalled('Directory', '/var/run/ganglia',
mode=0755,
create_parents = True
)
self.assertResourceCalled('Directory', '/var/lib/ganglia-web/dwoo',
owner = 'wwwrun',
create_parents = True,
recursive_ownership = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/srv/www/cgi-bin',
create_parents = True,
)
self.assertResourceCalled('TemplateConfig', '/srv/www/cgi-bin/rrd.py',
owner = "root",
group = "root",
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/lib/ganglia/rrds',
owner = 'nobody',
group = 'nobody',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('File', '/etc/apache2/conf.d/ganglia.conf',
content = Template('ganglia.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/ganglia/gmetad.conf',
owner = 'root',
group = 'hadoop',
) | ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py | from stacks.utils.RMFTestCase import *
from mock.mock import MagicMock, call, patch
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
@not_for_platform(PLATFORM_WINDOWS)
class TestGangliaServer(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "GANGLIA/3.5.0/package"
STACK_VERSION = "2.0.6"
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_server.py",
classname="GangliaServer",
command="configure",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertNoMoreResources()
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_server.py",
classname="GangliaServer",
command="start",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E service hdp-gmetad start >> /tmp/gmetad.log 2>&1 ; /bin/ps auwx | /bin/grep [g]metad >> /tmp/gmetad.log 2>&1',
path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
)
self.assertResourceCalled('MonitorWebserver', 'restart',
)
self.assertNoMoreResources()
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_server.py",
classname="GangliaServer",
command="stop",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E service hdp-gmetad stop >> /tmp/gmetad.log 2>&1 ; /bin/ps auwx | /bin/grep [g]metad >> /tmp/gmetad.log 2>&1',
path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
)
self.assertResourceCalled('MonitorWebserver', 'restart',
)
self.assertNoMoreResources()
def test_install_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_server.py",
classname="GangliaServer",
command="install",
config_file="default.json",
hdp_stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
def assert_configure_default(self):
self.assertResourceCalled('Directory', '/usr/libexec/hdp/ganglia',
owner = 'root',
group = 'root',
create_parents = True,
)
self.assertResourceCalled('File', '/etc/init.d/hdp-gmetad',
content = StaticFile('gmetad.init'),
mode = 0755,
)
self.assertResourceCalled('File', '/etc/init.d/hdp-gmond',
content = StaticFile('gmond.init'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkGmond.sh',
content = StaticFile('checkGmond.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/checkRrdcached.sh',
content = StaticFile('checkRrdcached.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmetadLib.sh',
content = StaticFile('gmetadLib.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/gmondLib.sh',
content = StaticFile('gmondLib.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/rrdcachedLib.sh',
content = StaticFile('rrdcachedLib.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/setupGanglia.sh',
content = StaticFile('setupGanglia.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmetad.sh',
content = StaticFile('startGmetad.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startGmond.sh',
content = StaticFile('startGmond.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/startRrdcached.sh',
content = StaticFile('startRrdcached.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmetad.sh',
content = StaticFile('stopGmetad.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopGmond.sh',
content = StaticFile('stopGmond.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/stopRrdcached.sh',
content = StaticFile('stopRrdcached.sh'),
mode = 0755,
)
self.assertResourceCalled('File', '/usr/libexec/hdp/ganglia/teardownGanglia.sh',
content = StaticFile('teardownGanglia.sh'),
mode = 0755,
)
self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaClusters.conf',
owner = 'root',
template_tag = None,
group = 'root',
mode = 0755,
)
self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaEnv.sh',
owner = 'root',
template_tag = None,
group = 'root',
mode = 0755,
)
self.assertResourceCalled('TemplateConfig', '/usr/libexec/hdp/ganglia/gangliaLib.sh',
owner = 'root',
template_tag = None,
group = 'root',
mode = 0755,
)
self.assertResourceCalled('Execute', '/usr/libexec/hdp/ganglia/setupGanglia.sh -t -o root -g hadoop',
path = ['/usr/libexec/hdp/ganglia',
'/usr/sbin',
'/sbin:/usr/local/bin',
'/bin',
'/usr/bin'],
)
self.assertResourceCalled('Directory', '/var/run/ganglia',
mode=0755,
create_parents = True
)
self.assertResourceCalled('Directory', '/var/lib/ganglia-web/dwoo',
owner = 'wwwrun',
create_parents = True,
recursive_ownership = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/srv/www/cgi-bin',
create_parents = True,
)
self.assertResourceCalled('TemplateConfig', '/srv/www/cgi-bin/rrd.py',
owner = "root",
group = "root",
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/lib/ganglia/rrds',
owner = 'nobody',
group = 'nobody',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('File', '/etc/apache2/conf.d/ganglia.conf',
content = Template('ganglia.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/ganglia/gmetad.conf',
owner = 'root',
group = 'hadoop',
) | 0.290578 | 0.112551 |
import pytest
from plenum.test.view_change.helper import ensure_all_nodes_have_same_data, \
start_stopped_node
from plenum.common.constants import DOMAIN_LEDGER_ID, LedgerState, POOL_LEDGER_ID
from plenum.test.helper import sendReqsToNodesAndVerifySuffReplies
from plenum.test.pool_transactions.conftest import looper, \
steward1, stewardWallet, stewardAndWallet1
from stp_core.common.log import getlogger
from stp_core.loop.eventually import eventually
from plenum.test.node_catchup.helper import check_ledger_state
from plenum.test.test_node import checkNodesConnected
from plenum.test import waits
from plenum.common.startable import Mode
from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected
logger = getlogger()
def catchuped(node):
assert node.mode == Mode.participating
def test_node_catchup_when_3_not_primary_node_restarted(
looper, txnPoolNodeSet, tdir, tconf,
allPluginsPath, steward1, stewardWallet):
"""
Test case:
1. Create pool of 4 nodes
2. Stop not primary node
3. Send some txns
4. Start stopped node
5. Ensure, that restarted node got all txns which was sent during restart
6. Do step 2-5 for other not primary node in pool
"""
def start_stop_one_node(node_to_restart, pool_of_nodes):
"""
:param node_to_restart: node, which would be restarted
:param pool_of_nodes: current pool
:return: new pool with restarted node
Node restart procedure consist of:
1. Calling stop()
2. Remove from looper and pool
3. Create new instance of node with the same ha, cliha and node_name
(also all path to data, keys and etc would be exactly as for stopped node)
4. Add new instance into looper and pool
5. Check, that other nodes accepted new instance and all pool has the same data
"""
remaining_nodes = list(set(pool_of_nodes) - {node_to_restart})
disconnect_node_and_ensure_disconnected(looper,
pool_of_nodes,
node_to_restart,
stopNode=True)
looper.removeProdable(node_to_restart)
ensure_all_nodes_have_same_data(looper,
remaining_nodes,
custom_timeout=tconf.VIEW_CHANGE_TIMEOUT)
sendReqsToNodesAndVerifySuffReplies(looper, stewardWallet, steward1, 1)
node_to_restart = start_stopped_node(node_to_restart,
looper,
tconf,
tdir,
allPluginsPath,
delay_instance_change_msgs=True)
pool_of_nodes = remaining_nodes + [node_to_restart]
looper.run(checkNodesConnected(pool_of_nodes))
ensure_all_nodes_have_same_data(looper,
pool_of_nodes,
custom_timeout=tconf.VIEW_CHANGE_TIMEOUT)
timeout = waits.expectedPoolCatchupTime(nodeCount=len(pool_of_nodes))
looper.run(eventually(check_ledger_state, node_to_restart, DOMAIN_LEDGER_ID,
LedgerState.synced, retryWait=.5, timeout=timeout))
looper.run(eventually(check_ledger_state, node_to_restart, POOL_LEDGER_ID,
LedgerState.synced, retryWait=.5, timeout=timeout))
looper.run(eventually(catchuped, node_to_restart, timeout=2 * timeout))
return pool_of_nodes
nodes_names = sorted([n.name for n in txnPoolNodeSet], reverse=True)
pool_of_nodes = txnPoolNodeSet
for __ in range(3):
node_to_restart = [n for n in pool_of_nodes if n.name == nodes_names[__]][0]
assert not node_to_restart.has_master_primary
pool_of_nodes = start_stop_one_node(node_to_restart, pool_of_nodes)
sendReqsToNodesAndVerifySuffReplies(looper, stewardWallet, steward1, 1)
ensure_all_nodes_have_same_data(looper,
pool_of_nodes,
custom_timeout=tconf.VIEW_CHANGE_TIMEOUT) | plenum/test/node_catchup/test_node_catchup_when_3_not_primary_node_restarted.py | import pytest
from plenum.test.view_change.helper import ensure_all_nodes_have_same_data, \
start_stopped_node
from plenum.common.constants import DOMAIN_LEDGER_ID, LedgerState, POOL_LEDGER_ID
from plenum.test.helper import sendReqsToNodesAndVerifySuffReplies
from plenum.test.pool_transactions.conftest import looper, \
steward1, stewardWallet, stewardAndWallet1
from stp_core.common.log import getlogger
from stp_core.loop.eventually import eventually
from plenum.test.node_catchup.helper import check_ledger_state
from plenum.test.test_node import checkNodesConnected
from plenum.test import waits
from plenum.common.startable import Mode
from plenum.test.pool_transactions.helper import disconnect_node_and_ensure_disconnected
logger = getlogger()
def catchuped(node):
assert node.mode == Mode.participating
def test_node_catchup_when_3_not_primary_node_restarted(
looper, txnPoolNodeSet, tdir, tconf,
allPluginsPath, steward1, stewardWallet):
"""
Test case:
1. Create pool of 4 nodes
2. Stop not primary node
3. Send some txns
4. Start stopped node
5. Ensure, that restarted node got all txns which was sent during restart
6. Do step 2-5 for other not primary node in pool
"""
def start_stop_one_node(node_to_restart, pool_of_nodes):
"""
:param node_to_restart: node, which would be restarted
:param pool_of_nodes: current pool
:return: new pool with restarted node
Node restart procedure consist of:
1. Calling stop()
2. Remove from looper and pool
3. Create new instance of node with the same ha, cliha and node_name
(also all path to data, keys and etc would be exactly as for stopped node)
4. Add new instance into looper and pool
5. Check, that other nodes accepted new instance and all pool has the same data
"""
remaining_nodes = list(set(pool_of_nodes) - {node_to_restart})
disconnect_node_and_ensure_disconnected(looper,
pool_of_nodes,
node_to_restart,
stopNode=True)
looper.removeProdable(node_to_restart)
ensure_all_nodes_have_same_data(looper,
remaining_nodes,
custom_timeout=tconf.VIEW_CHANGE_TIMEOUT)
sendReqsToNodesAndVerifySuffReplies(looper, stewardWallet, steward1, 1)
node_to_restart = start_stopped_node(node_to_restart,
looper,
tconf,
tdir,
allPluginsPath,
delay_instance_change_msgs=True)
pool_of_nodes = remaining_nodes + [node_to_restart]
looper.run(checkNodesConnected(pool_of_nodes))
ensure_all_nodes_have_same_data(looper,
pool_of_nodes,
custom_timeout=tconf.VIEW_CHANGE_TIMEOUT)
timeout = waits.expectedPoolCatchupTime(nodeCount=len(pool_of_nodes))
looper.run(eventually(check_ledger_state, node_to_restart, DOMAIN_LEDGER_ID,
LedgerState.synced, retryWait=.5, timeout=timeout))
looper.run(eventually(check_ledger_state, node_to_restart, POOL_LEDGER_ID,
LedgerState.synced, retryWait=.5, timeout=timeout))
looper.run(eventually(catchuped, node_to_restart, timeout=2 * timeout))
return pool_of_nodes
nodes_names = sorted([n.name for n in txnPoolNodeSet], reverse=True)
pool_of_nodes = txnPoolNodeSet
for __ in range(3):
node_to_restart = [n for n in pool_of_nodes if n.name == nodes_names[__]][0]
assert not node_to_restart.has_master_primary
pool_of_nodes = start_stop_one_node(node_to_restart, pool_of_nodes)
sendReqsToNodesAndVerifySuffReplies(looper, stewardWallet, steward1, 1)
ensure_all_nodes_have_same_data(looper,
pool_of_nodes,
custom_timeout=tconf.VIEW_CHANGE_TIMEOUT) | 0.449634 | 0.508422 |
import asyncio
import json
import logging
from pathlib import Path
import urllib
import xml.etree.ElementTree as ET
import aiofiles
import aiohttp
from aiohttp import FormData
# === Global vars ===
FORMAT = (
"[%(asctime)s][%(name)s][%(process)d %(processName)s]" "[%(levelname)-8s](L:%(lineno)s) %(funcName)s: %(message)s"
)
logging.basicConfig(format=FORMAT, datefmt="%Y-%m-%d %H:%M:%S")
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
testfiles_root = Path(__file__).parent.parent / "test_files"
test_xml_files = [
("study", "SRP000539.xml"),
("sample", "SRS001433.xml"),
("run", "ERR000076.xml"),
("experiment", "ERX000119.xml"),
("analysis", "ERZ266973.xml"),
]
test_json_files = [
("study", "SRP000539.json", "SRP000539.json"),
("sample", "SRS001433.json", "SRS001433.json"),
("run", "ERR000076.json", "ERR000076.json"),
("experiment", "ERX000119.json", "ERX000119.json"),
("analysis", "ERZ266973.json", "ERZ266973.json"),
]
base_url = "http://localhost:5430"
mock_auth_url = "http://localhost:8000"
objects_url = f"{base_url}/objects"
drafts_url = f"{base_url}/drafts"
folders_url = f"{base_url}/folders"
users_url = f"{base_url}/users"
submit_url = f"{base_url}/submit"
publish_url = f"{base_url}/publish"
user_id = "current"
test_user_given = "test"
test_user_family = "test"
test_user = "<EMAIL>"
other_test_user_given = "test"
other_test_user_family = "test"
other_test_user = "<EMAIL>"
# === Helper functions ===
async def login(sess, eppn, given, family):
"""Mock login."""
params = {
"eppn": eppn,
"family": family,
"given": given,
}
# Prepare response
url = f"{mock_auth_url}/setmock?{urllib.parse.urlencode(params)}"
async with sess.get(f"{url}"):
LOG.debug("Setting mock user")
async with sess.get(f"{base_url}/aai"):
LOG.debug("Doing mock user login")
async def create_request_data(schema, filename):
"""Create request data from pairs of schemas and filenames.
:param schema: name of the schema (folder) used for testing
:param filename: name of the file used for testing.
"""
data = FormData()
path_to_file = testfiles_root / schema / filename
path = path_to_file.as_posix()
async with aiofiles.open(path, mode="r") as f:
data.add_field(schema.upper(), await f.read(), filename=filename, content_type="text/xml")
return data
async def create_multi_file_request_data(filepairs):
"""Create request data with multiple files.
:param filepairs: tuple containing pairs of schemas and filenames used for testing
"""
data = FormData()
for schema, filename in filepairs:
path_to_file = testfiles_root / schema / filename
path = path_to_file.as_posix()
async with aiofiles.open(path, mode="r") as f:
data.add_field(schema.upper(), await f.read(), filename=filename, content_type="text/xml")
return data
async def create_request_json_data(schema, filename):
"""Create request data from pairs of schemas and filenames.
:param schema: name of the schema (folder) used for testing
:param filename: name of the file used for testing.
"""
path_to_file = testfiles_root / schema / filename
path = path_to_file.as_posix()
async with aiofiles.open(path, mode="r") as f:
data = await f.read()
return data
async def post_object(sess, schema, filename):
"""Post one metadata object within session, returns accessionId."""
data = await create_request_data(schema, filename)
async with sess.post(f"{objects_url}/{schema}", data=data) as resp:
LOG.debug(f"Adding new object to {schema}")
assert resp.status == 201, "HTTP Status code error"
ans = await resp.json()
return ans["accessionId"], schema
async def delete_object(sess, schema, accession_id):
"""Delete metadata object within session."""
async with sess.delete(f"{objects_url}/{schema}/{accession_id}") as resp:
LOG.debug(f"Deleting object {accession_id} from {schema}")
assert resp.status == 204, "HTTP Status code error"
async def post_draft(sess, schema, filename):
"""Post one draft metadata object within session, returns accessionId."""
data = await create_request_data(schema, filename)
async with sess.post(f"{drafts_url}/{schema}", data=data) as resp:
LOG.debug(f"Adding new object to {schema}")
assert resp.status == 201, "HTTP Status code error"
ans = await resp.json()
return ans["accessionId"]
async def post_draft_json(sess, schema, filename):
"""Post & put one metadata object within session, returns accessionId."""
data = await create_request_json_data(schema, filename)
async with sess.post(f"{drafts_url}/{schema}", data=data) as resp:
LOG.debug(f"Adding new draft object to {schema}")
assert resp.status == 201, "HTTP Status code error"
ans = await resp.json()
return ans["accessionId"]
async def get_draft(sess, schema, draft_id):
"""Get and return a drafted metadata object."""
async with sess.get(f"{drafts_url}/sample/{draft_id}") as resp:
LOG.debug(f"Checking that {draft_id} JSON exists")
assert resp.status == 200, "HTTP Status code error"
ans = await resp.json()
return json.dumps(ans)
async def put_draft(sess, schema, test_id, filename2):
"""Put one metadata object within session, returns accessionId."""
data2 = await create_request_json_data(schema, filename2)
async with sess.put(f"{drafts_url}/{schema}/{test_id}", data=data2) as resp:
LOG.debug(f"Replace draft object in {schema}")
assert resp.status == 200, "HTTP Status code error"
ans_put = await resp.json()
assert ans_put["accessionId"] == test_id, "accession ID error"
return ans_put["accessionId"]
async def patch_draft(sess, schema, test_id, filename2):
"""Patch one metadata object within session, return accessionId."""
data = await create_request_json_data(schema, filename2)
async with sess.patch(f"{drafts_url}/{schema}/{test_id}", data=data) as resp:
LOG.debug(f"Update draft object in {schema}")
assert resp.status == 200, "HTTP Status code error"
ans_put = await resp.json()
assert ans_put["accessionId"] == test_id, "accession ID error"
return ans_put["accessionId"]
async def delete_draft(sess, schema, accession_id):
"""Delete metadata object within session."""
async with sess.delete(f"{drafts_url}/{schema}/{accession_id}") as resp:
LOG.debug(f"Deleting draft object {accession_id} from {schema}")
assert resp.status == 204, "HTTP Status code error"
async def post_folder(sess, data):
"""Post one object folder within session, returns folderId."""
async with sess.post(f"{folders_url}", data=json.dumps(data)) as resp:
LOG.debug("Adding new folder")
assert resp.status == 201, "HTTP Status code error"
ans = await resp.json()
return ans["folderId"]
async def patch_folder(sess, folder_id, patch):
"""Patch one object folder within session, return folderId."""
async with sess.patch(f"{folders_url}/{folder_id}", data=json.dumps(patch)) as resp:
LOG.debug(f"Updating folder {folder_id}")
assert resp.status == 200, "HTTP Status code error"
ans_patch = await resp.json()
assert ans_patch["folderId"] == folder_id, "folder ID error"
return ans_patch["folderId"]
async def publish_folder(sess, folder_id):
"""Publish one object folder within session, return folderId."""
async with sess.patch(f"{publish_url}/{folder_id}") as resp:
LOG.debug(f"Publishing folder {folder_id}")
assert resp.status == 200, "HTTP Status code error"
ans = await resp.json()
assert ans["folderId"] == folder_id, "folder ID error"
return ans["folderId"]
async def delete_folder(sess, folder_id):
"""Delete object folder within session."""
async with sess.delete(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Deleting folder {folder_id}")
assert resp.status == 204, "HTTP Status code error"
async def patch_user(sess, user_id, real_user_id, patch):
"""Patch one user object within session, return userId."""
async with sess.patch(f"{users_url}/current", data=json.dumps(patch)) as resp:
LOG.debug(f"Updating user {user_id}")
assert resp.status == 200, "HTTP Status code error"
ans_patch = await resp.json()
assert ans_patch["userId"] == real_user_id, "user ID error"
return ans_patch["userId"]
async def delete_user(sess, user_id):
"""Delete user object within session."""
async with sess.delete(f"{users_url}/current") as resp:
LOG.debug(f"Deleting user {user_id}")
# we expect 404 as there is no frontend
assert str(resp.url) == f"{base_url}/", "redirect url user delete differs"
assert resp.status == 404, "HTTP Status code error"
# === Integration tests ===
async def test_crud_works(sess, schema, filename, folder_id):
"""Test REST api POST, GET and DELETE reqs.
Tries to create new object, gets accession id and checks if correct
resource is returned with that id. Finally deletes the object and checks it
was deleted.
:param schema: name of the schema (folder) used for testing
:param filename: name of the file used for testing.
"""
accession_id = await post_object(sess, schema, filename)
patch = [{"op": "add", "path": "/metadataObjects/-", "value": {"accessionId": accession_id[0], "schema": schema}}]
await patch_folder(sess, folder_id, patch)
async with sess.get(f"{objects_url}/{schema}/{accession_id[0]}") as resp:
LOG.debug(f"Checking that {accession_id[0]} JSON is in {schema}")
assert resp.status == 200, "HTTP Status code error"
async with sess.get(f"{objects_url}/{schema}/{accession_id[0]}" "?format=xml") as resp:
LOG.debug(f"Checking that {accession_id[0]} XML is in {schema}")
assert resp.status == 200, "HTTP Status code error"
await delete_object(sess, schema, accession_id[0])
async with sess.get(f"{objects_url}/{schema}/{accession_id[0]}") as resp:
LOG.debug(f"Checking that JSON object {accession_id[0]} was deleted")
assert resp.status == 404, "HTTP Status code error"
async with sess.get(f"{objects_url}/{schema}/{accession_id[0]}" "?format=xml") as resp:
LOG.debug(f"Checking that XML object {accession_id[0]} was deleted")
assert resp.status == 404, "HTTP Status code error"
async with sess.get(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Checking that object {accession_id} was deleted from folder {folder_id}")
res = await resp.json()
expected_true = not any(d["accessionId"] == accession_id for d in res["metadataObjects"])
assert expected_true, "draft object still exists"
async def test_crud_drafts_works(sess, schema, filename, filename2, folder_id):
"""Test drafts REST api POST, PUT and DELETE reqs.
Tries to create new draft object, gets accession id and checks if correct
resource is returned with that id. Finally deletes the object and checks it
was deleted.
:param schema: name of the schema (folder) used for testing
:param filename: name of the file used for testing.
"""
test_id = await post_draft_json(sess, schema, filename)
patch = [{"op": "add", "path": "/drafts/-", "value": {"accessionId": test_id, "schema": f"draft-{schema}"}}]
await patch_folder(sess, folder_id, patch)
accession_id = await put_draft(sess, schema, test_id, filename2)
async with sess.get(f"{drafts_url}/{schema}/{accession_id}") as resp:
LOG.debug(f"Checking that {accession_id} JSON is in {schema}")
assert resp.status == 200, "HTTP Status code error"
await delete_draft(sess, schema, accession_id)
async with sess.get(f"{drafts_url}/{schema}/{accession_id}") as resp:
LOG.debug(f"Checking that JSON object {accession_id} was deleted")
assert resp.status == 404, "HTTP Status code error"
async with sess.get(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Checking that JSON object {accession_id} was deleted from folder {folder_id}")
res = await resp.json()
expected_true = not any(d["accessionId"] == accession_id for d in res["drafts"])
assert expected_true, "draft object still exists"
async def test_patch_drafts_works(sess, schema, filename, filename2, folder_id):
"""Test REST api POST, PATCH and DELETE reqs.
Tries to create put and patch object, gets accession id and
checks if correct resource is returned with that id.
Finally deletes the object and checks it was deleted.
:param schema: name of the schema (folder) used for testing
:param filename: name of the file used for testing.
"""
test_id = await post_draft_json(sess, schema, filename)
patch = [{"op": "add", "path": "/drafts/-", "value": {"accessionId": test_id, "schema": f"draft-{schema}"}}]
await patch_folder(sess, folder_id, patch)
accession_id = await patch_draft(sess, schema, test_id, filename2)
async with sess.get(f"{drafts_url}/{schema}/{accession_id}") as resp:
LOG.debug(f"Checking that {accession_id} JSON is in {schema}")
res = await resp.json()
assert res["centerName"] == "GEOM", "content mismatch"
assert res["alias"] == "GSE10968", "content mismatch"
assert resp.status == 200, "HTTP Status code error"
await delete_draft(sess, schema, accession_id)
async with sess.get(f"{drafts_url}/{schema}/{accession_id}") as resp:
LOG.debug(f"Checking that JSON object {accession_id} was deleted")
assert resp.status == 404, "HTTP Status code error"
async def test_querying_works(sess, folder_id):
"""Test query endpoint with working and failing query."""
files = await asyncio.gather(*[post_object(sess, schema, filename) for schema, filename in test_xml_files])
for accession_id, schema in files:
patch = [{"op": "add", "path": "/metadataObjects/-", "value": {"accessionId": accession_id, "schema": schema}}]
await patch_folder(sess, folder_id, patch)
queries = {
"study": [
("studyTitle", "integrated"),
("studyType", "Other"),
("studyAbstract", "arabidopsis thaliana"),
("studyAttributes", "prjna107265"),
("studyAttributes", "parent_bioproject"),
],
"sample": [
("title", "HapMap sample"),
("description", "human hapmap individual"),
("centerName", "hapmap"),
("sampleName", "homo sapiens"),
("scientificName", "homo sapiens"),
("sampleName", 9606),
],
"run": [
("fileType", "srf"),
("experimentReference", "g1k-bgi-na18542"),
("experimentReference", "erx000037"),
],
"experiment": [("studyReference", "1000Genomes project pilot")],
"analysis": [
("fileType", "other"),
("studyReference", "HipSci___RNAseq___"),
("sampleReference", "HPSI0114i-eipl_3"),
],
}
async def do_one_query(schema, key, value, expected_status):
async with sess.get(f"{objects_url}/{schema}?{key}={value}") as resp:
assert resp.status == expected_status, "HTTP Status code error"
for schema, schema_queries in queries.items():
LOG.debug(f"Querying {schema} collection with working params")
await asyncio.gather(*[do_one_query(schema, key, value, 200) for key, value in schema_queries])
LOG.debug("Querying {schema} collection with non-working params")
invalid = "yoloswaggings"
await asyncio.gather(*[do_one_query(schema, key, invalid, 404) for key, _ in schema_queries])
await asyncio.gather(*[delete_object(sess, schema, accession_id) for accession_id, schema in files])
async def test_getting_all_objects_from_schema_works(sess, folder_id):
"""Check that /objects/study returns objects with correct pagination."""
# Add objects
files = await asyncio.gather(*[post_object(sess, "study", "SRP000539.xml") for _ in range(13)])
for accession_id, schema in files:
patch = [{"op": "add", "path": "/metadataObjects/-", "value": {"accessionId": accession_id, "schema": schema}}]
await patch_folder(sess, folder_id, patch)
# Test default values
async with sess.get(f"{objects_url}/study") as resp:
assert resp.status == 200
ans = await resp.json()
assert ans["page"]["page"] == 1
assert ans["page"]["size"] == 10
assert ans["page"]["totalPages"] == 2
assert ans["page"]["totalObjects"] == 13
assert len(ans["objects"]) == 10
# Test with custom pagination values
async with sess.get(f"{objects_url}/study?page=2&per_page=3") as resp:
assert resp.status == 200
ans = await resp.json()
assert ans["page"]["page"] == 2
assert ans["page"]["size"] == 3
assert ans["page"]["totalPages"] == 5
assert ans["page"]["totalObjects"] == 13
assert len(ans["objects"]) == 3
# Test with wrong pagination values
async with sess.get(f"{objects_url}/study?page=-1") as resp:
assert resp.status == 400
async with sess.get(f"{objects_url}/study?per_page=0") as resp:
assert resp.status == 400
# Delete objects
await asyncio.gather(*[delete_object(sess, "study", accession_id) for accession_id, _ in files])
async def test_crud_folders_works(sess):
"""Test folders REST api POST, GET, PATCH, PUBLISH and DELETE reqs."""
# Create new folder and check its creation succeeded
data = {"name": "test", "description": "test folder"}
folder_id = await post_folder(sess, data)
async with sess.get(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Checking that folder {folder_id} was created")
assert resp.status == 200, "HTTP Status code error"
# Create draft from test XML file and patch the draft into the newly created folder
draft_id = await post_draft(sess, "sample", "SRS001433.xml")
patch1 = [{"op": "add", "path": "/drafts/-", "value": [{"accessionId": draft_id, "schema": "draft-sample"}]}]
folder_id = await patch_folder(sess, folder_id, patch1)
async with sess.get(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Checking that folder {folder_id} was patched")
res = await resp.json()
assert res["folderId"] == folder_id, "content mismatch"
assert res["name"] == "test", "content mismatch"
assert res["description"] == "test folder", "content mismatch"
assert res["published"] is False, "content mismatch"
assert res["drafts"] == [{"accessionId": draft_id, "schema": "draft-sample"}], "content mismatch"
assert res["metadataObjects"] == [], "content mismatch"
# Get the draft from the collection within this session and post it to objects collection
draft = await get_draft(sess, "sample", draft_id)
async with sess.post(f"{objects_url}/sample", data=draft) as resp:
LOG.debug("Adding draft to actual objects")
assert resp.status == 201, "HTTP Status code error"
ans = await resp.json()
assert ans["accessionId"] != draft_id, "content mismatch"
accession_id = ans["accessionId"]
# Patch folder so that original draft becomes an object in the folder
patch2 = [
{"op": "add", "path": "/metadataObjects/-", "value": [{"accessionId": accession_id, "schema": "sample"}]},
]
folder_id = await patch_folder(sess, folder_id, patch2)
async with sess.get(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Checking that folder {folder_id} was patched")
res = await resp.json()
assert res["folderId"] == folder_id, "content mismatch"
assert res["published"] is False, "content mismatch"
assert res["drafts"] == [{"accessionId": draft_id, "schema": "draft-sample"}], "content mismatch"
assert res["metadataObjects"] == [{"accessionId": accession_id, "schema": "sample"}], "content mismatch"
# Publish the folder
folder_id = await publish_folder(sess, folder_id)
async with sess.get(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Checking that folder {folder_id} was patched")
res = await resp.json()
assert res["folderId"] == folder_id, "content mismatch"
assert res["published"] is True, "content mismatch"
assert res["drafts"] == [], "content mismatch"
assert res["metadataObjects"] == [{"accessionId": accession_id, "schema": "sample"}], "content mismatch"
# Delete folder
await delete_folder(sess, folder_id)
async with sess.get(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Checking that folder {folder_id} was deleted")
assert resp.status == 404, "HTTP Status code error"
async with sess.get(f"{users_url}/current") as resp:
LOG.debug(f"Checking that folder {folder_id} was deleted from current user")
res = await resp.json()
print(res)
expected_true = not any(d == accession_id for d in res["folders"])
assert expected_true, "folder still exists at user"
async def test_crud_users_works(sess):
"""Test users REST api GET, PATCH and DELETE reqs."""
# Check user exists in database (requires an user object to be mocked)
async with sess.get(f"{users_url}/{user_id}") as resp:
LOG.debug(f"Reading user {user_id}")
assert resp.status == 200, "HTTP Status code error"
response = await resp.json()
real_user_id = response["userId"]
# Add user to session and create a patch to add folder to user
data = {"name": "test", "description": "test folder"}
folder_id = await post_folder(sess, data)
patch = [{"op": "add", "path": "/folders/-", "value": [folder_id]}]
await patch_user(sess, user_id, real_user_id, patch)
async with sess.get(f"{users_url}/{user_id}") as resp:
LOG.debug(f"Checking that user {user_id} was patched")
res = await resp.json()
assert res["userId"] == real_user_id, "content mismatch"
assert res["name"] == "test test", "content mismatch"
assert res["drafts"] == [], "content mismatch"
assert folder_id in res["folders"], "content mismatch"
# Delete user
await delete_user(sess, user_id)
# 401 means API is innacessible thus session ended
# this check is not needed but good to do
async with sess.get(f"{users_url}/{user_id}") as resp:
LOG.debug(f"Checking that user {user_id} was deleted")
assert resp.status == 401, "HTTP Status code error"
async def test_submissions_work(sess, folder_id):
"""Test actions in submission xml files."""
# Post original submission with two 'add' actions
sub_files = [("submission", "ERA521986_valid.xml"), ("study", "SRP000539.xml"), ("sample", "SRS001433.xml")]
data = await create_multi_file_request_data(sub_files)
async with sess.post(f"{submit_url}", data=data) as resp:
LOG.debug("Checking initial submission worked")
assert resp.status == 200, "HTTP Status code error"
res = await resp.json()
assert len(res) == 2, "content mismatch"
assert res[0]["schema"] == "study", "content mismatch"
assert res[1]["schema"] == "sample", "content mismatch"
study_access_id = res[0]["accessionId"]
patch = [
{
"op": "add",
"path": "/metadataObjects/-",
"value": {"accessionId": res[0]["accessionId"], "schema": res[0]["schema"]},
},
{
"op": "add",
"path": "/metadataObjects/-",
"value": {"accessionId": res[1]["accessionId"], "schema": res[1]["schema"]},
},
]
await patch_folder(sess, folder_id, patch)
# Sanity check that the study object was inserted correctly before modifying it
async with sess.get(f"{objects_url}/study/{study_access_id}") as resp:
LOG.debug("Sanity checking that previous object was added correctly")
assert resp.status == 200, "HTTP Status code error"
res = await resp.json()
assert res["accessionId"] == study_access_id, "content mismatch"
assert res["alias"] == "GSE10966", "content mismatch"
assert res["descriptor"]["studyTitle"] == (
"Highly integrated epigenome maps in Arabidopsis - whole genome shotgun bisulfite sequencing"
), "content mismatch"
# Give test file the correct accession id
LOG.debug("Sharing the correct accession ID created in this test instance")
mod_study = testfiles_root / "study" / "SRP000539_modified.xml"
tree = ET.parse(mod_study)
root = tree.getroot()
for elem in root.iter("STUDY"):
elem.set("accession", study_access_id)
tree.write(mod_study, encoding="utf-8")
# Post new submission that modifies previously added study object and validates it
sub_files = [("submission", "ERA521986_modify.xml"), ("study", "SRP000539_modified.xml")]
data = await create_multi_file_request_data(sub_files)
async with sess.post(f"{submit_url}", data=data) as resp:
LOG.debug("Checking object in initial submission was modified")
assert resp.status == 200, "HTTP Status code error"
res = await resp.json()
assert len(res) == 2, "content mismatch"
new_study_access_id = res[0]["accessionId"]
assert study_access_id == new_study_access_id
# Check the modified object was inserted correctly
async with sess.get(f"{objects_url}/study/{new_study_access_id}") as resp:
LOG.debug("Checking that previous object was modified correctly")
assert resp.status == 200, "HTTP Status code error"
res = await resp.json()
assert res["accessionId"] == new_study_access_id, "content mismatch"
assert res["alias"] == "GSE10966", "content mismatch"
assert res["descriptor"]["studyTitle"] == ("Different title for testing purposes"), "content mismatch"
# Remove the accession id that was used for testing from test file
LOG.debug("Sharing the correct accession ID created in this test instance")
mod_study = testfiles_root / "study" / "SRP000539_modified.xml"
tree = ET.parse(mod_study)
root = tree.getroot()
for elem in root.iter("STUDY"):
del elem.attrib["accession"]
tree.write(mod_study, encoding="utf-8")
async def test_health_check(sess):
"""Test the health check endpoint."""
async with sess.get(f"{base_url}/health") as resp:
LOG.debug("Checking that health status is ok")
assert resp.status == 200, "HTTP Status code error"
res = await resp.json()
assert res["status"] == "Ok"
assert res["services"]["database"]["status"] == "Ok"
async def main():
"""Launch different test tasks and run them."""
async with aiohttp.ClientSession() as sess:
LOG.debug("=== Login other mock user ===")
await login(sess, other_test_user, other_test_user_given, other_test_user_family)
# Test add, modify, validate and release action with submissions
# added to validate that objects belong to a specific user
LOG.debug("=== Testing actions within submissions ===")
submission_folder = {
"name": "submission test 1",
"description": "submission test folder 1",
}
submission_folder_id = await post_folder(sess, submission_folder)
await test_submissions_work(sess, submission_folder_id)
async with aiohttp.ClientSession() as sess:
LOG.debug("=== Login mock user ===")
await login(sess, test_user, test_user_given, test_user_family)
# Test adding and getting objects
LOG.debug("=== Testing basic CRUD operations ===")
basic_folder = {
"name": "basic test",
"description": "basic test folder",
}
basic_folder_id = await post_folder(sess, basic_folder)
await asyncio.gather(*[test_crud_works(sess, schema, file, basic_folder_id) for schema, file in test_xml_files])
# Test adding and getting draft objects
LOG.debug("=== Testing basic CRUD drafts operations ===")
draft_folder = {
"name": "basic test draft",
"description": "basic test draft folder",
}
draft_folder_id = await post_folder(sess, draft_folder)
await asyncio.gather(
*[
test_crud_drafts_works(sess, schema, file, file2, draft_folder_id)
for schema, file, file2 in test_json_files
]
)
# Test patch and put
LOG.debug("=== Testing patch and put drafts operations ===")
await test_crud_drafts_works(sess, "sample", "SRS001433.json", "put.json", draft_folder_id)
await test_patch_drafts_works(sess, "study", "SRP000539.json", "patch.json", draft_folder_id)
# Test queries
LOG.debug("=== Testing queries ===")
query_folder = {
"name": "basic test query",
"description": "basic test query folder",
}
query_folder_id = await post_folder(sess, query_folder)
await test_querying_works(sess, query_folder_id)
# Test /objects/study endpoint for query pagination
LOG.debug("=== Testing getting all objects & pagination ===")
pagination_folder = {
"name": "basic test pagination",
"description": "basic test pagination folder",
}
pagination_folder_id = await post_folder(sess, pagination_folder)
await test_getting_all_objects_from_schema_works(sess, pagination_folder_id)
# Test creating, reading, updating and deleting folders
LOG.debug("=== Testing basic CRUD folder operations ===")
await test_crud_folders_works(sess)
# Test add, modify, validate and release action with submissions
LOG.debug("=== Testing actions within submissions ===")
submission_folder = {
"name": "submission test",
"description": "submission test folder",
}
submission_folder_id = await post_folder(sess, submission_folder)
await test_submissions_work(sess, submission_folder_id)
# Test health status check
LOG.debug("=== Testing health status check ===")
await test_health_check(sess)
# Test reading, updating and deleting users
# this needs to be done last as it deletes users
LOG.debug("=== Testing basic CRUD user operations ===")
await test_crud_users_works(sess)
if __name__ == "__main__":
asyncio.run(main()) | tests/integration/run_tests.py | import asyncio
import json
import logging
from pathlib import Path
import urllib
import xml.etree.ElementTree as ET
import aiofiles
import aiohttp
from aiohttp import FormData
# === Global vars ===
FORMAT = (
"[%(asctime)s][%(name)s][%(process)d %(processName)s]" "[%(levelname)-8s](L:%(lineno)s) %(funcName)s: %(message)s"
)
logging.basicConfig(format=FORMAT, datefmt="%Y-%m-%d %H:%M:%S")
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
testfiles_root = Path(__file__).parent.parent / "test_files"
test_xml_files = [
("study", "SRP000539.xml"),
("sample", "SRS001433.xml"),
("run", "ERR000076.xml"),
("experiment", "ERX000119.xml"),
("analysis", "ERZ266973.xml"),
]
test_json_files = [
("study", "SRP000539.json", "SRP000539.json"),
("sample", "SRS001433.json", "SRS001433.json"),
("run", "ERR000076.json", "ERR000076.json"),
("experiment", "ERX000119.json", "ERX000119.json"),
("analysis", "ERZ266973.json", "ERZ266973.json"),
]
base_url = "http://localhost:5430"
mock_auth_url = "http://localhost:8000"
objects_url = f"{base_url}/objects"
drafts_url = f"{base_url}/drafts"
folders_url = f"{base_url}/folders"
users_url = f"{base_url}/users"
submit_url = f"{base_url}/submit"
publish_url = f"{base_url}/publish"
user_id = "current"
test_user_given = "test"
test_user_family = "test"
test_user = "<EMAIL>"
other_test_user_given = "test"
other_test_user_family = "test"
other_test_user = "<EMAIL>"
# === Helper functions ===
async def login(sess, eppn, given, family):
"""Mock login."""
params = {
"eppn": eppn,
"family": family,
"given": given,
}
# Prepare response
url = f"{mock_auth_url}/setmock?{urllib.parse.urlencode(params)}"
async with sess.get(f"{url}"):
LOG.debug("Setting mock user")
async with sess.get(f"{base_url}/aai"):
LOG.debug("Doing mock user login")
async def create_request_data(schema, filename):
"""Create request data from pairs of schemas and filenames.
:param schema: name of the schema (folder) used for testing
:param filename: name of the file used for testing.
"""
data = FormData()
path_to_file = testfiles_root / schema / filename
path = path_to_file.as_posix()
async with aiofiles.open(path, mode="r") as f:
data.add_field(schema.upper(), await f.read(), filename=filename, content_type="text/xml")
return data
async def create_multi_file_request_data(filepairs):
"""Create request data with multiple files.
:param filepairs: tuple containing pairs of schemas and filenames used for testing
"""
data = FormData()
for schema, filename in filepairs:
path_to_file = testfiles_root / schema / filename
path = path_to_file.as_posix()
async with aiofiles.open(path, mode="r") as f:
data.add_field(schema.upper(), await f.read(), filename=filename, content_type="text/xml")
return data
async def create_request_json_data(schema, filename):
"""Create request data from pairs of schemas and filenames.
:param schema: name of the schema (folder) used for testing
:param filename: name of the file used for testing.
"""
path_to_file = testfiles_root / schema / filename
path = path_to_file.as_posix()
async with aiofiles.open(path, mode="r") as f:
data = await f.read()
return data
async def post_object(sess, schema, filename):
"""Post one metadata object within session, returns accessionId."""
data = await create_request_data(schema, filename)
async with sess.post(f"{objects_url}/{schema}", data=data) as resp:
LOG.debug(f"Adding new object to {schema}")
assert resp.status == 201, "HTTP Status code error"
ans = await resp.json()
return ans["accessionId"], schema
async def delete_object(sess, schema, accession_id):
"""Delete metadata object within session."""
async with sess.delete(f"{objects_url}/{schema}/{accession_id}") as resp:
LOG.debug(f"Deleting object {accession_id} from {schema}")
assert resp.status == 204, "HTTP Status code error"
async def post_draft(sess, schema, filename):
"""Post one draft metadata object within session, returns accessionId."""
data = await create_request_data(schema, filename)
async with sess.post(f"{drafts_url}/{schema}", data=data) as resp:
LOG.debug(f"Adding new object to {schema}")
assert resp.status == 201, "HTTP Status code error"
ans = await resp.json()
return ans["accessionId"]
async def post_draft_json(sess, schema, filename):
"""Post & put one metadata object within session, returns accessionId."""
data = await create_request_json_data(schema, filename)
async with sess.post(f"{drafts_url}/{schema}", data=data) as resp:
LOG.debug(f"Adding new draft object to {schema}")
assert resp.status == 201, "HTTP Status code error"
ans = await resp.json()
return ans["accessionId"]
async def get_draft(sess, schema, draft_id):
"""Get and return a drafted metadata object."""
async with sess.get(f"{drafts_url}/sample/{draft_id}") as resp:
LOG.debug(f"Checking that {draft_id} JSON exists")
assert resp.status == 200, "HTTP Status code error"
ans = await resp.json()
return json.dumps(ans)
async def put_draft(sess, schema, test_id, filename2):
"""Put one metadata object within session, returns accessionId."""
data2 = await create_request_json_data(schema, filename2)
async with sess.put(f"{drafts_url}/{schema}/{test_id}", data=data2) as resp:
LOG.debug(f"Replace draft object in {schema}")
assert resp.status == 200, "HTTP Status code error"
ans_put = await resp.json()
assert ans_put["accessionId"] == test_id, "accession ID error"
return ans_put["accessionId"]
async def patch_draft(sess, schema, test_id, filename2):
"""Patch one metadata object within session, return accessionId."""
data = await create_request_json_data(schema, filename2)
async with sess.patch(f"{drafts_url}/{schema}/{test_id}", data=data) as resp:
LOG.debug(f"Update draft object in {schema}")
assert resp.status == 200, "HTTP Status code error"
ans_put = await resp.json()
assert ans_put["accessionId"] == test_id, "accession ID error"
return ans_put["accessionId"]
async def delete_draft(sess, schema, accession_id):
"""Delete metadata object within session."""
async with sess.delete(f"{drafts_url}/{schema}/{accession_id}") as resp:
LOG.debug(f"Deleting draft object {accession_id} from {schema}")
assert resp.status == 204, "HTTP Status code error"
async def post_folder(sess, data):
"""Post one object folder within session, returns folderId."""
async with sess.post(f"{folders_url}", data=json.dumps(data)) as resp:
LOG.debug("Adding new folder")
assert resp.status == 201, "HTTP Status code error"
ans = await resp.json()
return ans["folderId"]
async def patch_folder(sess, folder_id, patch):
"""Patch one object folder within session, return folderId."""
async with sess.patch(f"{folders_url}/{folder_id}", data=json.dumps(patch)) as resp:
LOG.debug(f"Updating folder {folder_id}")
assert resp.status == 200, "HTTP Status code error"
ans_patch = await resp.json()
assert ans_patch["folderId"] == folder_id, "folder ID error"
return ans_patch["folderId"]
async def publish_folder(sess, folder_id):
"""Publish one object folder within session, return folderId."""
async with sess.patch(f"{publish_url}/{folder_id}") as resp:
LOG.debug(f"Publishing folder {folder_id}")
assert resp.status == 200, "HTTP Status code error"
ans = await resp.json()
assert ans["folderId"] == folder_id, "folder ID error"
return ans["folderId"]
async def delete_folder(sess, folder_id):
"""Delete object folder within session."""
async with sess.delete(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Deleting folder {folder_id}")
assert resp.status == 204, "HTTP Status code error"
async def patch_user(sess, user_id, real_user_id, patch):
"""Patch one user object within session, return userId."""
async with sess.patch(f"{users_url}/current", data=json.dumps(patch)) as resp:
LOG.debug(f"Updating user {user_id}")
assert resp.status == 200, "HTTP Status code error"
ans_patch = await resp.json()
assert ans_patch["userId"] == real_user_id, "user ID error"
return ans_patch["userId"]
async def delete_user(sess, user_id):
"""Delete user object within session."""
async with sess.delete(f"{users_url}/current") as resp:
LOG.debug(f"Deleting user {user_id}")
# we expect 404 as there is no frontend
assert str(resp.url) == f"{base_url}/", "redirect url user delete differs"
assert resp.status == 404, "HTTP Status code error"
# === Integration tests ===
async def test_crud_works(sess, schema, filename, folder_id):
"""Test REST api POST, GET and DELETE reqs.
Tries to create new object, gets accession id and checks if correct
resource is returned with that id. Finally deletes the object and checks it
was deleted.
:param schema: name of the schema (folder) used for testing
:param filename: name of the file used for testing.
"""
accession_id = await post_object(sess, schema, filename)
patch = [{"op": "add", "path": "/metadataObjects/-", "value": {"accessionId": accession_id[0], "schema": schema}}]
await patch_folder(sess, folder_id, patch)
async with sess.get(f"{objects_url}/{schema}/{accession_id[0]}") as resp:
LOG.debug(f"Checking that {accession_id[0]} JSON is in {schema}")
assert resp.status == 200, "HTTP Status code error"
async with sess.get(f"{objects_url}/{schema}/{accession_id[0]}" "?format=xml") as resp:
LOG.debug(f"Checking that {accession_id[0]} XML is in {schema}")
assert resp.status == 200, "HTTP Status code error"
await delete_object(sess, schema, accession_id[0])
async with sess.get(f"{objects_url}/{schema}/{accession_id[0]}") as resp:
LOG.debug(f"Checking that JSON object {accession_id[0]} was deleted")
assert resp.status == 404, "HTTP Status code error"
async with sess.get(f"{objects_url}/{schema}/{accession_id[0]}" "?format=xml") as resp:
LOG.debug(f"Checking that XML object {accession_id[0]} was deleted")
assert resp.status == 404, "HTTP Status code error"
async with sess.get(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Checking that object {accession_id} was deleted from folder {folder_id}")
res = await resp.json()
expected_true = not any(d["accessionId"] == accession_id for d in res["metadataObjects"])
assert expected_true, "draft object still exists"
async def test_crud_drafts_works(sess, schema, filename, filename2, folder_id):
"""Test drafts REST api POST, PUT and DELETE reqs.
Tries to create new draft object, gets accession id and checks if correct
resource is returned with that id. Finally deletes the object and checks it
was deleted.
:param schema: name of the schema (folder) used for testing
:param filename: name of the file used for testing.
"""
test_id = await post_draft_json(sess, schema, filename)
patch = [{"op": "add", "path": "/drafts/-", "value": {"accessionId": test_id, "schema": f"draft-{schema}"}}]
await patch_folder(sess, folder_id, patch)
accession_id = await put_draft(sess, schema, test_id, filename2)
async with sess.get(f"{drafts_url}/{schema}/{accession_id}") as resp:
LOG.debug(f"Checking that {accession_id} JSON is in {schema}")
assert resp.status == 200, "HTTP Status code error"
await delete_draft(sess, schema, accession_id)
async with sess.get(f"{drafts_url}/{schema}/{accession_id}") as resp:
LOG.debug(f"Checking that JSON object {accession_id} was deleted")
assert resp.status == 404, "HTTP Status code error"
async with sess.get(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Checking that JSON object {accession_id} was deleted from folder {folder_id}")
res = await resp.json()
expected_true = not any(d["accessionId"] == accession_id for d in res["drafts"])
assert expected_true, "draft object still exists"
async def test_patch_drafts_works(sess, schema, filename, filename2, folder_id):
"""Test REST api POST, PATCH and DELETE reqs.
Tries to create put and patch object, gets accession id and
checks if correct resource is returned with that id.
Finally deletes the object and checks it was deleted.
:param schema: name of the schema (folder) used for testing
:param filename: name of the file used for testing.
"""
test_id = await post_draft_json(sess, schema, filename)
patch = [{"op": "add", "path": "/drafts/-", "value": {"accessionId": test_id, "schema": f"draft-{schema}"}}]
await patch_folder(sess, folder_id, patch)
accession_id = await patch_draft(sess, schema, test_id, filename2)
async with sess.get(f"{drafts_url}/{schema}/{accession_id}") as resp:
LOG.debug(f"Checking that {accession_id} JSON is in {schema}")
res = await resp.json()
assert res["centerName"] == "GEOM", "content mismatch"
assert res["alias"] == "GSE10968", "content mismatch"
assert resp.status == 200, "HTTP Status code error"
await delete_draft(sess, schema, accession_id)
async with sess.get(f"{drafts_url}/{schema}/{accession_id}") as resp:
LOG.debug(f"Checking that JSON object {accession_id} was deleted")
assert resp.status == 404, "HTTP Status code error"
async def test_querying_works(sess, folder_id):
"""Test query endpoint with working and failing query."""
files = await asyncio.gather(*[post_object(sess, schema, filename) for schema, filename in test_xml_files])
for accession_id, schema in files:
patch = [{"op": "add", "path": "/metadataObjects/-", "value": {"accessionId": accession_id, "schema": schema}}]
await patch_folder(sess, folder_id, patch)
queries = {
"study": [
("studyTitle", "integrated"),
("studyType", "Other"),
("studyAbstract", "arabidopsis thaliana"),
("studyAttributes", "prjna107265"),
("studyAttributes", "parent_bioproject"),
],
"sample": [
("title", "HapMap sample"),
("description", "human hapmap individual"),
("centerName", "hapmap"),
("sampleName", "homo sapiens"),
("scientificName", "homo sapiens"),
("sampleName", 9606),
],
"run": [
("fileType", "srf"),
("experimentReference", "g1k-bgi-na18542"),
("experimentReference", "erx000037"),
],
"experiment": [("studyReference", "1000Genomes project pilot")],
"analysis": [
("fileType", "other"),
("studyReference", "HipSci___RNAseq___"),
("sampleReference", "HPSI0114i-eipl_3"),
],
}
async def do_one_query(schema, key, value, expected_status):
async with sess.get(f"{objects_url}/{schema}?{key}={value}") as resp:
assert resp.status == expected_status, "HTTP Status code error"
for schema, schema_queries in queries.items():
LOG.debug(f"Querying {schema} collection with working params")
await asyncio.gather(*[do_one_query(schema, key, value, 200) for key, value in schema_queries])
LOG.debug("Querying {schema} collection with non-working params")
invalid = "yoloswaggings"
await asyncio.gather(*[do_one_query(schema, key, invalid, 404) for key, _ in schema_queries])
await asyncio.gather(*[delete_object(sess, schema, accession_id) for accession_id, schema in files])
async def test_getting_all_objects_from_schema_works(sess, folder_id):
"""Check that /objects/study returns objects with correct pagination."""
# Add objects
files = await asyncio.gather(*[post_object(sess, "study", "SRP000539.xml") for _ in range(13)])
for accession_id, schema in files:
patch = [{"op": "add", "path": "/metadataObjects/-", "value": {"accessionId": accession_id, "schema": schema}}]
await patch_folder(sess, folder_id, patch)
# Test default values
async with sess.get(f"{objects_url}/study") as resp:
assert resp.status == 200
ans = await resp.json()
assert ans["page"]["page"] == 1
assert ans["page"]["size"] == 10
assert ans["page"]["totalPages"] == 2
assert ans["page"]["totalObjects"] == 13
assert len(ans["objects"]) == 10
# Test with custom pagination values
async with sess.get(f"{objects_url}/study?page=2&per_page=3") as resp:
assert resp.status == 200
ans = await resp.json()
assert ans["page"]["page"] == 2
assert ans["page"]["size"] == 3
assert ans["page"]["totalPages"] == 5
assert ans["page"]["totalObjects"] == 13
assert len(ans["objects"]) == 3
# Test with wrong pagination values
async with sess.get(f"{objects_url}/study?page=-1") as resp:
assert resp.status == 400
async with sess.get(f"{objects_url}/study?per_page=0") as resp:
assert resp.status == 400
# Delete objects
await asyncio.gather(*[delete_object(sess, "study", accession_id) for accession_id, _ in files])
async def test_crud_folders_works(sess):
"""Test folders REST api POST, GET, PATCH, PUBLISH and DELETE reqs."""
# Create new folder and check its creation succeeded
data = {"name": "test", "description": "test folder"}
folder_id = await post_folder(sess, data)
async with sess.get(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Checking that folder {folder_id} was created")
assert resp.status == 200, "HTTP Status code error"
# Create draft from test XML file and patch the draft into the newly created folder
draft_id = await post_draft(sess, "sample", "SRS001433.xml")
patch1 = [{"op": "add", "path": "/drafts/-", "value": [{"accessionId": draft_id, "schema": "draft-sample"}]}]
folder_id = await patch_folder(sess, folder_id, patch1)
async with sess.get(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Checking that folder {folder_id} was patched")
res = await resp.json()
assert res["folderId"] == folder_id, "content mismatch"
assert res["name"] == "test", "content mismatch"
assert res["description"] == "test folder", "content mismatch"
assert res["published"] is False, "content mismatch"
assert res["drafts"] == [{"accessionId": draft_id, "schema": "draft-sample"}], "content mismatch"
assert res["metadataObjects"] == [], "content mismatch"
# Get the draft from the collection within this session and post it to objects collection
draft = await get_draft(sess, "sample", draft_id)
async with sess.post(f"{objects_url}/sample", data=draft) as resp:
LOG.debug("Adding draft to actual objects")
assert resp.status == 201, "HTTP Status code error"
ans = await resp.json()
assert ans["accessionId"] != draft_id, "content mismatch"
accession_id = ans["accessionId"]
# Patch folder so that original draft becomes an object in the folder
patch2 = [
{"op": "add", "path": "/metadataObjects/-", "value": [{"accessionId": accession_id, "schema": "sample"}]},
]
folder_id = await patch_folder(sess, folder_id, patch2)
async with sess.get(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Checking that folder {folder_id} was patched")
res = await resp.json()
assert res["folderId"] == folder_id, "content mismatch"
assert res["published"] is False, "content mismatch"
assert res["drafts"] == [{"accessionId": draft_id, "schema": "draft-sample"}], "content mismatch"
assert res["metadataObjects"] == [{"accessionId": accession_id, "schema": "sample"}], "content mismatch"
# Publish the folder
folder_id = await publish_folder(sess, folder_id)
async with sess.get(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Checking that folder {folder_id} was patched")
res = await resp.json()
assert res["folderId"] == folder_id, "content mismatch"
assert res["published"] is True, "content mismatch"
assert res["drafts"] == [], "content mismatch"
assert res["metadataObjects"] == [{"accessionId": accession_id, "schema": "sample"}], "content mismatch"
# Delete folder
await delete_folder(sess, folder_id)
async with sess.get(f"{folders_url}/{folder_id}") as resp:
LOG.debug(f"Checking that folder {folder_id} was deleted")
assert resp.status == 404, "HTTP Status code error"
async with sess.get(f"{users_url}/current") as resp:
LOG.debug(f"Checking that folder {folder_id} was deleted from current user")
res = await resp.json()
print(res)
expected_true = not any(d == accession_id for d in res["folders"])
assert expected_true, "folder still exists at user"
async def test_crud_users_works(sess):
"""Test users REST api GET, PATCH and DELETE reqs."""
# Check user exists in database (requires an user object to be mocked)
async with sess.get(f"{users_url}/{user_id}") as resp:
LOG.debug(f"Reading user {user_id}")
assert resp.status == 200, "HTTP Status code error"
response = await resp.json()
real_user_id = response["userId"]
# Add user to session and create a patch to add folder to user
data = {"name": "test", "description": "test folder"}
folder_id = await post_folder(sess, data)
patch = [{"op": "add", "path": "/folders/-", "value": [folder_id]}]
await patch_user(sess, user_id, real_user_id, patch)
async with sess.get(f"{users_url}/{user_id}") as resp:
LOG.debug(f"Checking that user {user_id} was patched")
res = await resp.json()
assert res["userId"] == real_user_id, "content mismatch"
assert res["name"] == "test test", "content mismatch"
assert res["drafts"] == [], "content mismatch"
assert folder_id in res["folders"], "content mismatch"
# Delete user
await delete_user(sess, user_id)
# 401 means API is innacessible thus session ended
# this check is not needed but good to do
async with sess.get(f"{users_url}/{user_id}") as resp:
LOG.debug(f"Checking that user {user_id} was deleted")
assert resp.status == 401, "HTTP Status code error"
async def test_submissions_work(sess, folder_id):
"""Test actions in submission xml files."""
# Post original submission with two 'add' actions
sub_files = [("submission", "ERA521986_valid.xml"), ("study", "SRP000539.xml"), ("sample", "SRS001433.xml")]
data = await create_multi_file_request_data(sub_files)
async with sess.post(f"{submit_url}", data=data) as resp:
LOG.debug("Checking initial submission worked")
assert resp.status == 200, "HTTP Status code error"
res = await resp.json()
assert len(res) == 2, "content mismatch"
assert res[0]["schema"] == "study", "content mismatch"
assert res[1]["schema"] == "sample", "content mismatch"
study_access_id = res[0]["accessionId"]
patch = [
{
"op": "add",
"path": "/metadataObjects/-",
"value": {"accessionId": res[0]["accessionId"], "schema": res[0]["schema"]},
},
{
"op": "add",
"path": "/metadataObjects/-",
"value": {"accessionId": res[1]["accessionId"], "schema": res[1]["schema"]},
},
]
await patch_folder(sess, folder_id, patch)
# Sanity check that the study object was inserted correctly before modifying it
async with sess.get(f"{objects_url}/study/{study_access_id}") as resp:
LOG.debug("Sanity checking that previous object was added correctly")
assert resp.status == 200, "HTTP Status code error"
res = await resp.json()
assert res["accessionId"] == study_access_id, "content mismatch"
assert res["alias"] == "GSE10966", "content mismatch"
assert res["descriptor"]["studyTitle"] == (
"Highly integrated epigenome maps in Arabidopsis - whole genome shotgun bisulfite sequencing"
), "content mismatch"
# Give test file the correct accession id
LOG.debug("Sharing the correct accession ID created in this test instance")
mod_study = testfiles_root / "study" / "SRP000539_modified.xml"
tree = ET.parse(mod_study)
root = tree.getroot()
for elem in root.iter("STUDY"):
elem.set("accession", study_access_id)
tree.write(mod_study, encoding="utf-8")
# Post new submission that modifies previously added study object and validates it
sub_files = [("submission", "ERA521986_modify.xml"), ("study", "SRP000539_modified.xml")]
data = await create_multi_file_request_data(sub_files)
async with sess.post(f"{submit_url}", data=data) as resp:
LOG.debug("Checking object in initial submission was modified")
assert resp.status == 200, "HTTP Status code error"
res = await resp.json()
assert len(res) == 2, "content mismatch"
new_study_access_id = res[0]["accessionId"]
assert study_access_id == new_study_access_id
# Check the modified object was inserted correctly
async with sess.get(f"{objects_url}/study/{new_study_access_id}") as resp:
LOG.debug("Checking that previous object was modified correctly")
assert resp.status == 200, "HTTP Status code error"
res = await resp.json()
assert res["accessionId"] == new_study_access_id, "content mismatch"
assert res["alias"] == "GSE10966", "content mismatch"
assert res["descriptor"]["studyTitle"] == ("Different title for testing purposes"), "content mismatch"
# Remove the accession id that was used for testing from test file
LOG.debug("Sharing the correct accession ID created in this test instance")
mod_study = testfiles_root / "study" / "SRP000539_modified.xml"
tree = ET.parse(mod_study)
root = tree.getroot()
for elem in root.iter("STUDY"):
del elem.attrib["accession"]
tree.write(mod_study, encoding="utf-8")
async def test_health_check(sess):
"""Test the health check endpoint."""
async with sess.get(f"{base_url}/health") as resp:
LOG.debug("Checking that health status is ok")
assert resp.status == 200, "HTTP Status code error"
res = await resp.json()
assert res["status"] == "Ok"
assert res["services"]["database"]["status"] == "Ok"
async def main():
"""Launch different test tasks and run them."""
async with aiohttp.ClientSession() as sess:
LOG.debug("=== Login other mock user ===")
await login(sess, other_test_user, other_test_user_given, other_test_user_family)
# Test add, modify, validate and release action with submissions
# added to validate that objects belong to a specific user
LOG.debug("=== Testing actions within submissions ===")
submission_folder = {
"name": "submission test 1",
"description": "submission test folder 1",
}
submission_folder_id = await post_folder(sess, submission_folder)
await test_submissions_work(sess, submission_folder_id)
async with aiohttp.ClientSession() as sess:
LOG.debug("=== Login mock user ===")
await login(sess, test_user, test_user_given, test_user_family)
# Test adding and getting objects
LOG.debug("=== Testing basic CRUD operations ===")
basic_folder = {
"name": "basic test",
"description": "basic test folder",
}
basic_folder_id = await post_folder(sess, basic_folder)
await asyncio.gather(*[test_crud_works(sess, schema, file, basic_folder_id) for schema, file in test_xml_files])
# Test adding and getting draft objects
LOG.debug("=== Testing basic CRUD drafts operations ===")
draft_folder = {
"name": "basic test draft",
"description": "basic test draft folder",
}
draft_folder_id = await post_folder(sess, draft_folder)
await asyncio.gather(
*[
test_crud_drafts_works(sess, schema, file, file2, draft_folder_id)
for schema, file, file2 in test_json_files
]
)
# Test patch and put
LOG.debug("=== Testing patch and put drafts operations ===")
await test_crud_drafts_works(sess, "sample", "SRS001433.json", "put.json", draft_folder_id)
await test_patch_drafts_works(sess, "study", "SRP000539.json", "patch.json", draft_folder_id)
# Test queries
LOG.debug("=== Testing queries ===")
query_folder = {
"name": "basic test query",
"description": "basic test query folder",
}
query_folder_id = await post_folder(sess, query_folder)
await test_querying_works(sess, query_folder_id)
# Test /objects/study endpoint for query pagination
LOG.debug("=== Testing getting all objects & pagination ===")
pagination_folder = {
"name": "basic test pagination",
"description": "basic test pagination folder",
}
pagination_folder_id = await post_folder(sess, pagination_folder)
await test_getting_all_objects_from_schema_works(sess, pagination_folder_id)
# Test creating, reading, updating and deleting folders
LOG.debug("=== Testing basic CRUD folder operations ===")
await test_crud_folders_works(sess)
# Test add, modify, validate and release action with submissions
LOG.debug("=== Testing actions within submissions ===")
submission_folder = {
"name": "submission test",
"description": "submission test folder",
}
submission_folder_id = await post_folder(sess, submission_folder)
await test_submissions_work(sess, submission_folder_id)
# Test health status check
LOG.debug("=== Testing health status check ===")
await test_health_check(sess)
# Test reading, updating and deleting users
# this needs to be done last as it deletes users
LOG.debug("=== Testing basic CRUD user operations ===")
await test_crud_users_works(sess)
if __name__ == "__main__":
asyncio.run(main()) | 0.560974 | 0.20044 |
import io
import os
from urllib.parse import urlparse
import requests
import yaml
from doozerlib.exceptions import DoozerFatalError
from doozerlib.exectools import cmd_assert
from doozerlib.logutil import getLogger
from doozerlib.model import Missing
from doozerlib.pushd import Dir
from doozerlib.util import is_in_directory, mkdirs
LOGGER = getLogger(__name__)
class SourceModifierFactory(object):
"""A factory class for creating source modifier objects."""
MODIFICATIONS = {}
@classmethod
def supports(cls, action_name):
"""Test if specified modification action is supported"""
return action_name in cls.MODIFICATIONS
def create(self, *args, **kwargs):
"""Create a source modifier based on action.
For example, create a source modifier for adding an out-of-tree file:
factory = SourceModifierFactory()
modifier = factory.create(action='add', source='http://example.com/gating_yaml', dest='gating.yaml', overwrite=True)
modifier.modify()
"""
action = kwargs["action"]
if not self.supports(action):
raise KeyError("Unknown modification action: {}.".format(action))
return self.MODIFICATIONS[action](*args, **kwargs)
class AddModifier(object):
""" A source modifier that supports adding an out-of-tree source to dist-git.
An `add` action has the following valid fields:
- `action`: must be `add`
- `source`: URL to the out-of-tree source
- `path`: Path in dist-git to write the source to
- `overwriting`: Allow to overwrite if `path` exists
For example, to add an out-of-tree source https://gitlab.cee.redhat.com/aosqe/ocp-build-data-gating/raw/master/openshift-3.11/atomic-openshift-cluster-autoscaler/gating_yaml to dist-git and save as `gating.yaml`:
content:
source:
git:
branch:
fallback: master
target: release-{MAJOR}.{MINOR}
url: <EMAIL>:openshift/kubernetes-autoscaler.git
modifications:
- action: replace
match: origin-cluster-autoscaler
replacement: atomic-openshift-cluster-autoscaler
- action: add
source: https://gitlab.cee.redhat.com/aosqe/ocp-build-data-gating/raw/master/openshift-3.11/atomic-openshift-cluster-autoscaler/gating_yaml
path: gating.yaml
overwriting: true
path: images/cluster-autoscaler
# omitted
"""
SUPPORTED_URL_SCHEMES = ["http", "https"]
def __init__(self, *args, **kwargs):
""" Initialize an "add" Modifier.
:param source: URL to the out-of-tree source.
:param path: Destination path to the dist-git repo.
:param overwriting: True to allow to overwrite if path exists.
Setting to false to prevent from accidently overwriting files from in-tree source.
"""
self.source = kwargs["source"]
self.path = kwargs["path"]
self.overwriting = kwargs.get("overwriting", False)
self.validate = kwargs.get("validate", None)
def act(self, *args, **kwargs):
""" Run the modification action
:param ceiling_dir: If not None, prevent from writing to a directory that is out of ceiling_dir.
:param session: If not None, a requests.Session object for HTTP requests
"""
LOGGER.debug("Running 'add' modification action...")
context = kwargs["context"]
distgit_path = context['distgit_path']
source = urlparse(self.source)
if source.scheme not in self.SUPPORTED_URL_SCHEMES:
raise ValueError(
"Unsupported URL scheme {} used in 'add' action.".format(source.scheme))
source_url = source.geturl() # normalized URL
path = str(distgit_path.joinpath(self.path))
ceiling_dir = kwargs.get("ceiling_dir")
session = kwargs.get("session") or requests.session()
if ceiling_dir and not is_in_directory(path, ceiling_dir):
raise ValueError("Writing to a file out of {} is not allowed.".format(ceiling_dir))
# NOTE: `overwriting` is checked before writing.
# Data race might happen but it should suffice for prevent from accidently overwriting in-tree sources.
if not self.overwriting and os.path.exists(path):
raise IOError(
"Destination path {} exists. Use 'overwriting: true' to overwrite.".format(self.path))
LOGGER.debug("Getting out-of-tree source {}...".format(source_url))
response = session.get(source_url)
response.raise_for_status()
content = response.content
if self.validate:
if self.validate == "yaml":
yml = yaml.safe_load(content.decode("utf-8")) # will raise an Error if invalid
if yml is None:
raise IOError(f"Yaml file {source_url} is empty.")
else:
raise ValueError("Unknown 'validate' value: {self.validate}")
mkdirs(os.path.dirname(path))
with io.open(path, "wb") as dest_file:
dest_file.write(content)
LOGGER.debug("Out-of-tree source saved: {} -> {}".format(source_url, path))
SourceModifierFactory.MODIFICATIONS["add"] = AddModifier
class ReplaceModifier(object):
""" A source modifier that supports replacing a substring in Dockerfile or RPM spec file.
"""
def __init__(self, *args, **kwargs):
""" Initialize ReplaceModifier
:param match: This is old substring to be replaced.
:param replacement: This is new substring, which would replace old substring.
"""
self.match = kwargs["match"]
self.replacement = kwargs["replacement"]
def act(self, *args, **kwargs):
""" Run the modification action
:param context: A context dict. `context.component_name` is the dist-git repo name,
and `context.content` is the content of Dockerfile or RPM spec file.
"""
context = kwargs["context"]
content = context["content"]
component_name = context["component_name"]
match = self.match
assert (match is not Missing)
replacement = self.replacement
assert (replacement is not Missing)
if replacement is None: # Nothing follows colon in config yaml; user attempting to remove string
replacement = ""
pre = content
post = pre.replace(match, replacement)
if post == pre:
raise DoozerFatalError("{}: Replace ({}->{}) modification did not make a change to the Dockerfile content"
.format(component_name, match, replacement))
LOGGER.debug(
"Performed string replace '%s' -> '%s':\n%s\n" %
(match, replacement, post))
context["result"] = post
SourceModifierFactory.MODIFICATIONS["replace"] = ReplaceModifier
class CommandModifier(object):
""" A source modifier that supports running a custom command to modify the source.
"""
def __init__(self, *args, **kwargs):
""" Initialize CommandModifier
:param command: a `str` or `list` of the command with arguments
"""
self.command = kwargs["command"]
def act(self, *args, **kwargs):
""" Run the command
:param context: A context dict. `context.set_env` is a `dict` of env vars to set for command (overriding existing).
"""
context = kwargs["context"]
set_env = context["set_env"]
ceiling_dir = kwargs["ceiling_dir"]
with Dir(ceiling_dir):
cmd_assert(self.command, set_env=set_env)
SourceModifierFactory.MODIFICATIONS["command"] = CommandModifier | doozerlib/source_modifications.py | import io
import os
from urllib.parse import urlparse
import requests
import yaml
from doozerlib.exceptions import DoozerFatalError
from doozerlib.exectools import cmd_assert
from doozerlib.logutil import getLogger
from doozerlib.model import Missing
from doozerlib.pushd import Dir
from doozerlib.util import is_in_directory, mkdirs
LOGGER = getLogger(__name__)
class SourceModifierFactory(object):
"""A factory class for creating source modifier objects."""
MODIFICATIONS = {}
@classmethod
def supports(cls, action_name):
"""Test if specified modification action is supported"""
return action_name in cls.MODIFICATIONS
def create(self, *args, **kwargs):
"""Create a source modifier based on action.
For example, create a source modifier for adding an out-of-tree file:
factory = SourceModifierFactory()
modifier = factory.create(action='add', source='http://example.com/gating_yaml', dest='gating.yaml', overwrite=True)
modifier.modify()
"""
action = kwargs["action"]
if not self.supports(action):
raise KeyError("Unknown modification action: {}.".format(action))
return self.MODIFICATIONS[action](*args, **kwargs)
class AddModifier(object):
""" A source modifier that supports adding an out-of-tree source to dist-git.
An `add` action has the following valid fields:
- `action`: must be `add`
- `source`: URL to the out-of-tree source
- `path`: Path in dist-git to write the source to
- `overwriting`: Allow to overwrite if `path` exists
For example, to add an out-of-tree source https://gitlab.cee.redhat.com/aosqe/ocp-build-data-gating/raw/master/openshift-3.11/atomic-openshift-cluster-autoscaler/gating_yaml to dist-git and save as `gating.yaml`:
content:
source:
git:
branch:
fallback: master
target: release-{MAJOR}.{MINOR}
url: <EMAIL>:openshift/kubernetes-autoscaler.git
modifications:
- action: replace
match: origin-cluster-autoscaler
replacement: atomic-openshift-cluster-autoscaler
- action: add
source: https://gitlab.cee.redhat.com/aosqe/ocp-build-data-gating/raw/master/openshift-3.11/atomic-openshift-cluster-autoscaler/gating_yaml
path: gating.yaml
overwriting: true
path: images/cluster-autoscaler
# omitted
"""
SUPPORTED_URL_SCHEMES = ["http", "https"]
def __init__(self, *args, **kwargs):
""" Initialize an "add" Modifier.
:param source: URL to the out-of-tree source.
:param path: Destination path to the dist-git repo.
:param overwriting: True to allow to overwrite if path exists.
Setting to false to prevent from accidently overwriting files from in-tree source.
"""
self.source = kwargs["source"]
self.path = kwargs["path"]
self.overwriting = kwargs.get("overwriting", False)
self.validate = kwargs.get("validate", None)
def act(self, *args, **kwargs):
""" Run the modification action
:param ceiling_dir: If not None, prevent from writing to a directory that is out of ceiling_dir.
:param session: If not None, a requests.Session object for HTTP requests
"""
LOGGER.debug("Running 'add' modification action...")
context = kwargs["context"]
distgit_path = context['distgit_path']
source = urlparse(self.source)
if source.scheme not in self.SUPPORTED_URL_SCHEMES:
raise ValueError(
"Unsupported URL scheme {} used in 'add' action.".format(source.scheme))
source_url = source.geturl() # normalized URL
path = str(distgit_path.joinpath(self.path))
ceiling_dir = kwargs.get("ceiling_dir")
session = kwargs.get("session") or requests.session()
if ceiling_dir and not is_in_directory(path, ceiling_dir):
raise ValueError("Writing to a file out of {} is not allowed.".format(ceiling_dir))
# NOTE: `overwriting` is checked before writing.
# Data race might happen but it should suffice for prevent from accidently overwriting in-tree sources.
if not self.overwriting and os.path.exists(path):
raise IOError(
"Destination path {} exists. Use 'overwriting: true' to overwrite.".format(self.path))
LOGGER.debug("Getting out-of-tree source {}...".format(source_url))
response = session.get(source_url)
response.raise_for_status()
content = response.content
if self.validate:
if self.validate == "yaml":
yml = yaml.safe_load(content.decode("utf-8")) # will raise an Error if invalid
if yml is None:
raise IOError(f"Yaml file {source_url} is empty.")
else:
raise ValueError("Unknown 'validate' value: {self.validate}")
mkdirs(os.path.dirname(path))
with io.open(path, "wb") as dest_file:
dest_file.write(content)
LOGGER.debug("Out-of-tree source saved: {} -> {}".format(source_url, path))
SourceModifierFactory.MODIFICATIONS["add"] = AddModifier
class ReplaceModifier(object):
""" A source modifier that supports replacing a substring in Dockerfile or RPM spec file.
"""
def __init__(self, *args, **kwargs):
""" Initialize ReplaceModifier
:param match: This is old substring to be replaced.
:param replacement: This is new substring, which would replace old substring.
"""
self.match = kwargs["match"]
self.replacement = kwargs["replacement"]
def act(self, *args, **kwargs):
""" Run the modification action
:param context: A context dict. `context.component_name` is the dist-git repo name,
and `context.content` is the content of Dockerfile or RPM spec file.
"""
context = kwargs["context"]
content = context["content"]
component_name = context["component_name"]
match = self.match
assert (match is not Missing)
replacement = self.replacement
assert (replacement is not Missing)
if replacement is None: # Nothing follows colon in config yaml; user attempting to remove string
replacement = ""
pre = content
post = pre.replace(match, replacement)
if post == pre:
raise DoozerFatalError("{}: Replace ({}->{}) modification did not make a change to the Dockerfile content"
.format(component_name, match, replacement))
LOGGER.debug(
"Performed string replace '%s' -> '%s':\n%s\n" %
(match, replacement, post))
context["result"] = post
SourceModifierFactory.MODIFICATIONS["replace"] = ReplaceModifier
class CommandModifier(object):
""" A source modifier that supports running a custom command to modify the source.
"""
def __init__(self, *args, **kwargs):
""" Initialize CommandModifier
:param command: a `str` or `list` of the command with arguments
"""
self.command = kwargs["command"]
def act(self, *args, **kwargs):
""" Run the command
:param context: A context dict. `context.set_env` is a `dict` of env vars to set for command (overriding existing).
"""
context = kwargs["context"]
set_env = context["set_env"]
ceiling_dir = kwargs["ceiling_dir"]
with Dir(ceiling_dir):
cmd_assert(self.command, set_env=set_env)
SourceModifierFactory.MODIFICATIONS["command"] = CommandModifier | 0.656658 | 0.241534 |