text stringlengths 0 1.05M | meta dict |
|---|---|
from ...externals.six import string_types
import os
import numpy as np
from numpy.testing import assert_allclose
from nose.tools import (assert_equal, assert_almost_equal, assert_false,
assert_raises, assert_true)
import warnings
import mne
from mne.datasets import sample
from mne.io.kit.tests import data_dir as kit_data_dir
from mne.utils import _TempDir, requires_traits, requires_mne_fs_in_env
data_path = sample.data_path(download=False)
raw_path = os.path.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
kit_raw_path = os.path.join(kit_data_dir, 'test_bin_raw.fif')
subjects_dir = os.path.join(data_path, 'subjects')
warnings.simplefilter('always')
tempdir = _TempDir()
trans_dst = os.path.join(tempdir, 'test-trans.fif')
@sample.requires_sample_data
@requires_traits
def test_coreg_model():
"""Test CoregModel"""
from mne.gui._coreg_gui import CoregModel
model = CoregModel()
assert_raises(RuntimeError, model.save_trans, 'blah.fif')
model.mri.subjects_dir = subjects_dir
model.mri.subject = 'sample'
assert_false(model.mri.fid_ok)
model.mri.lpa = [[-0.06, 0, 0]]
model.mri.nasion = [[0, 0.05, 0]]
model.mri.rpa = [[0.08, 0, 0]]
assert_true(model.mri.fid_ok)
model.hsp.file = raw_path
assert_allclose(model.hsp.lpa, [[-7.137e-2, 0, 5.122e-9]], 1e-4)
assert_allclose(model.hsp.rpa, [[+7.527e-2, 0, 5.588e-9]], 1e-4)
assert_allclose(model.hsp.nasion, [[+3.725e-9, 1.026e-1, 4.191e-9]], 1e-4)
assert_true(model.has_fid_data)
lpa_distance = model.lpa_distance
nasion_distance = model.nasion_distance
rpa_distance = model.rpa_distance
avg_point_distance = np.mean(model.point_distance)
model.fit_auricular_points()
old_x = lpa_distance ** 2 + rpa_distance ** 2
new_x = model.lpa_distance ** 2 + model.rpa_distance ** 2
assert_true(new_x < old_x)
model.fit_fiducials()
old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2
+ model.nasion_distance ** 2)
assert_true(new_x < old_x)
model.fit_hsp_points()
assert_true(np.mean(model.point_distance) < avg_point_distance)
model.save_trans(trans_dst)
trans = mne.read_trans(trans_dst)
assert_allclose(trans['trans'], model.head_mri_trans)
# test restoring trans
x, y, z, rot_x, rot_y, rot_z = .1, .2, .05, 1.5, 0.1, -1.2
model.trans_x = x
model.trans_y = y
model.trans_z = z
model.rot_x = rot_x
model.rot_y = rot_y
model.rot_z = rot_z
trans = model.head_mri_trans
model.reset_traits(["trans_x", "trans_y", "trans_z", "rot_x", "rot_y",
"rot_z"])
assert_equal(model.trans_x, 0)
model.set_trans(trans)
assert_almost_equal(model.trans_x, x)
assert_almost_equal(model.trans_y, y)
assert_almost_equal(model.trans_z, z)
assert_almost_equal(model.rot_x, rot_x)
assert_almost_equal(model.rot_y, rot_y)
assert_almost_equal(model.rot_z, rot_z)
# info
assert_true(isinstance(model.fid_eval_str, string_types))
assert_true(isinstance(model.points_eval_str, string_types))
@sample.requires_sample_data
@requires_traits
@requires_mne_fs_in_env
def test_coreg_model_with_fsaverage():
"""Test CoregModel"""
from mne.gui._coreg_gui import CoregModel
mne.create_default_subject(subjects_dir=tempdir)
model = CoregModel()
model.mri.subjects_dir = tempdir
model.mri.subject = 'fsaverage'
assert_true(model.mri.fid_ok)
model.hsp.file = raw_path
lpa_distance = model.lpa_distance
nasion_distance = model.nasion_distance
rpa_distance = model.rpa_distance
avg_point_distance = np.mean(model.point_distance)
# test hsp point omission
model.trans_y = -0.008
model.fit_auricular_points()
model.omit_hsp_points(0.02)
assert_equal(model.hsp.n_omitted, 1)
model.omit_hsp_points(reset=True)
assert_equal(model.hsp.n_omitted, 0)
model.omit_hsp_points(0.02, reset=True)
assert_equal(model.hsp.n_omitted, 1)
# scale with 1 parameter
model.n_scale_params = 1
model.fit_scale_auricular_points()
old_x = lpa_distance ** 2 + rpa_distance ** 2
new_x = model.lpa_distance ** 2 + model.rpa_distance ** 2
assert_true(new_x < old_x)
model.fit_scale_fiducials()
old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2
+ model.nasion_distance ** 2)
assert_true(new_x < old_x)
model.fit_scale_hsp_points()
avg_point_distance_1param = np.mean(model.point_distance)
assert_true(avg_point_distance_1param < avg_point_distance)
desc, func, args, kwargs = model.get_scaling_job('test')
assert_true(isinstance(desc, string_types))
assert_equal(args[0], 'fsaverage')
assert_equal(args[1], 'test')
assert_allclose(args[2], model.scale)
assert_equal(kwargs['subjects_dir'], tempdir)
# scale with 3 parameters
model.n_scale_params = 3
model.fit_scale_hsp_points()
assert_true(np.mean(model.point_distance) < avg_point_distance_1param)
# test switching raw disables point omission
assert_equal(model.hsp.n_omitted, 1)
with warnings.catch_warnings(record=True):
model.hsp.file = kit_raw_path
assert_equal(model.hsp.n_omitted, 0)
| {
"repo_name": "jaeilepp/eggie",
"path": "mne/gui/tests/test_coreg_gui.py",
"copies": "2",
"size": "5480",
"license": "bsd-2-clause",
"hash": 3894136701385273000,
"line_mean": 31.8143712575,
"line_max": 78,
"alpha_frac": 0.6620437956,
"autogenerated": false,
"ratio": 2.860125260960334,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45221690565603345,
"avg_score": null,
"num_lines": null
} |
import os
import os.path as op
import re
import shutil
import sys
from unittest import SkipTest
import numpy as np
from numpy.testing import (assert_allclose, assert_equal,
assert_array_almost_equal)
import pytest
import mne
from mne.datasets import testing
from mne.io.kit.tests import data_dir as kit_data_dir
from mne.transforms import invert_transform
from mne.utils import (run_tests_if_main, requires_mayavi, traits_test,
modified_env)
data_path = testing.data_path(download=False)
raw_path = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
kit_raw_path = op.join(kit_data_dir, 'test_bin_raw.fif')
subjects_dir = op.join(data_path, 'subjects')
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_model_decimation(tmpdir):
"""Test CoregModel decimation of high-res to low-res head."""
from mne.gui._coreg_gui import CoregModel
tempdir = str(tmpdir)
subject_dir = op.join(tempdir, 'sample')
shutil.copytree(op.join(subjects_dir, 'sample'), subject_dir)
# This makes the test much faster
shutil.move(op.join(subject_dir, 'bem', 'outer_skin.surf'),
op.join(subject_dir, 'surf', 'lh.seghead'))
for fname in ('sample-head.fif', 'sample-head-dense.fif'):
os.remove(op.join(subject_dir, 'bem', fname))
model = CoregModel(guess_mri_subject=False)
with pytest.warns(RuntimeWarning, match='No low-resolution'):
model.mri.subjects_dir = tempdir
assert model.mri.subject == 'sample' # already set by setting subjects_dir
assert model.mri.bem_low_res.file == ''
assert len(model.mri.bem_low_res.surf.rr) == 2562
assert len(model.mri.bem_high_res.surf.rr) == 2562 # because we moved it
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_model(tmpdir):
"""Test CoregModel."""
from mne.gui._coreg_gui import CoregModel
tempdir = str(tmpdir)
trans_dst = op.join(tempdir, 'test-trans.fif')
model = CoregModel()
with pytest.raises(RuntimeError, match='Not enough information for savin'):
model.save_trans('blah.fif')
model.mri.subjects_dir = subjects_dir
model.mri.subject = 'sample'
assert not model.mri.fid_ok
model.mri.lpa = [[-0.06, 0, 0]]
model.mri.nasion = [[0, 0.05, 0]]
model.mri.rpa = [[0.08, 0, 0]]
assert (model.mri.fid_ok)
model.hsp.file = raw_path
assert_allclose(model.hsp.lpa, [[-7.137e-2, 0, 5.122e-9]], 1e-4)
assert_allclose(model.hsp.rpa, [[+7.527e-2, 0, 5.588e-9]], 1e-4)
assert_allclose(model.hsp.nasion, [[+3.725e-9, 1.026e-1, 4.191e-9]], 1e-4)
assert model.has_lpa_data
assert model.has_nasion_data
assert model.has_rpa_data
assert len(model.hsp.eeg_points) > 1
assert len(model.mri.bem_low_res.surf.rr) == 2562
assert len(model.mri.bem_high_res.surf.rr) == 267122
lpa_distance = model.lpa_distance
nasion_distance = model.nasion_distance
rpa_distance = model.rpa_distance
avg_point_distance = np.mean(model.point_distance)
model.nasion_weight = 1.
model.fit_fiducials(0)
old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2 +
model.nasion_distance ** 2)
assert new_x < old_x
model.fit_icp(0)
new_dist = np.mean(model.point_distance)
assert new_dist < avg_point_distance
model.save_trans(trans_dst)
trans = mne.read_trans(trans_dst)
assert_allclose(trans['trans'], model.head_mri_t)
# test restoring trans
x, y, z = 100, 200, 50
rot_x, rot_y, rot_z = np.rad2deg([1.5, 0.1, -1.2])
model.trans_x = x
model.trans_y = y
model.trans_z = z
model.rot_x = rot_x
model.rot_y = rot_y
model.rot_z = rot_z
trans = model.mri_head_t
model.reset_traits(["trans_x", "trans_y", "trans_z", "rot_x", "rot_y",
"rot_z"])
assert_equal(model.trans_x, 0)
model.set_trans(trans)
assert_array_almost_equal(model.trans_x, x)
assert_array_almost_equal(model.trans_y, y)
assert_array_almost_equal(model.trans_z, z)
assert_array_almost_equal(model.rot_x, rot_x)
assert_array_almost_equal(model.rot_y, rot_y)
assert_array_almost_equal(model.rot_z, rot_z)
# info
assert (isinstance(model.fid_eval_str, str))
assert (isinstance(model.points_eval_str, str))
# scaling job
assert not model.can_prepare_bem_model
model.n_scale_params = 1
assert (model.can_prepare_bem_model)
model.prepare_bem_model = True
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('sample2', False)
assert_equal(sdir, subjects_dir)
assert_equal(sfrom, 'sample')
assert_equal(sto, 'sample2')
assert_allclose(scale, model.parameters[6:9])
assert_equal(skip_fiducials, False)
# find BEM files
bems = set()
for fname in os.listdir(op.join(subjects_dir, 'sample', 'bem')):
match = re.match(r'sample-(.+-bem)\.fif', fname)
if match:
bems.add(match.group(1))
assert_equal(set(bemsol), bems)
model.prepare_bem_model = False
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('sample2', True)
assert_equal(bemsol, [])
assert (skip_fiducials)
model.load_trans(fname_trans)
model.save_trans(trans_dst)
trans = mne.read_trans(trans_dst)
assert_allclose(trans['trans'], model.head_mri_t)
assert_allclose(invert_transform(trans)['trans'][:3, 3] * 1000.,
[model.trans_x, model.trans_y, model.trans_z])
def _check_ci():
if os.getenv('TRAVIS', 'false').lower() == 'true' and \
sys.platform == 'darwin':
raise SkipTest('Skipping GUI tests on Travis OSX')
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_gui_display(tmpdir):
"""Test CoregFrame."""
_check_ci()
from mayavi import mlab
from tvtk.api import tvtk
home_dir = str(tmpdir)
with modified_env(**{'_MNE_GUI_TESTING_MODE': 'true',
'_MNE_FAKE_HOME_DIR': home_dir}):
with pytest.raises(ValueError, match='not a valid subject'):
mne.gui.coregistration(subject='Elvis', subjects_dir=subjects_dir)
# avoid modal dialog if SUBJECTS_DIR is set to a directory that
# does not contain valid subjects
ui, frame = mne.gui.coregistration(subjects_dir='')
mlab.process_ui_events()
ui.dispose()
mlab.process_ui_events()
ui, frame = mne.gui.coregistration(subjects_dir=subjects_dir,
subject='sample')
mlab.process_ui_events()
assert not frame.model.mri.fid_ok
frame.model.mri.lpa = [[-0.06, 0, 0]]
frame.model.mri.nasion = [[0, 0.05, 0]]
frame.model.mri.rpa = [[0.08, 0, 0]]
assert (frame.model.mri.fid_ok)
frame.data_panel.raw_src.file = raw_path
assert isinstance(frame.eeg_obj.glyph.glyph.glyph_source.glyph_source,
tvtk.SphereSource)
frame.data_panel.view_options_panel.eeg_obj.project_to_surface = True
assert isinstance(frame.eeg_obj.glyph.glyph.glyph_source.glyph_source,
tvtk.CylinderSource)
mlab.process_ui_events()
# grow hair (faster for low-res)
assert frame.data_panel.view_options_panel.head_high_res
frame.data_panel.view_options_panel.head_high_res = False
frame.model.grow_hair = 40.
# scale
frame.coreg_panel.n_scale_params = 3
frame.coreg_panel.scale_x_inc = True
assert frame.model.scale_x == 101.
frame.coreg_panel.scale_y_dec = True
assert frame.model.scale_y == 99.
# reset parameters
frame.coreg_panel.reset_params = True
assert_equal(frame.model.grow_hair, 0)
assert not frame.data_panel.view_options_panel.head_high_res
# configuration persistence
assert (frame.model.prepare_bem_model)
frame.model.prepare_bem_model = False
frame.save_config(home_dir)
ui.dispose()
mlab.process_ui_events()
ui, frame = mne.gui.coregistration(subjects_dir=subjects_dir)
assert not frame.model.prepare_bem_model
assert not frame.data_panel.view_options_panel.head_high_res
ui.dispose()
mlab.process_ui_events()
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_model_with_fsaverage(tmpdir):
"""Test CoregModel with the fsaverage brain data."""
tempdir = str(tmpdir)
from mne.gui._coreg_gui import CoregModel
mne.create_default_subject(subjects_dir=tempdir,
fs_home=op.join(subjects_dir, '..'))
model = CoregModel()
model.mri.subjects_dir = tempdir
model.mri.subject = 'fsaverage'
assert (model.mri.fid_ok)
model.hsp.file = raw_path
lpa_distance = model.lpa_distance
nasion_distance = model.nasion_distance
rpa_distance = model.rpa_distance
avg_point_distance = np.mean(model.point_distance)
# test hsp point omission
model.nasion_weight = 1.
model.trans_y = -0.008
model.fit_fiducials(0)
model.omit_hsp_points(0.02)
assert model.hsp.n_omitted == 1
model.omit_hsp_points(np.inf)
assert model.hsp.n_omitted == 0
model.omit_hsp_points(0.02)
assert model.hsp.n_omitted == 1
model.omit_hsp_points(0.01)
assert model.hsp.n_omitted == 4
model.omit_hsp_points(0.005)
assert model.hsp.n_omitted == 40
model.omit_hsp_points(0.01)
assert model.hsp.n_omitted == 4
model.omit_hsp_points(0.02)
assert model.hsp.n_omitted == 1
# scale with 1 parameter
model.n_scale_params = 1
model.fit_fiducials(1)
old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2 +
model.nasion_distance ** 2)
assert (new_x < old_x)
model.fit_icp(1)
avg_point_distance_1param = np.mean(model.point_distance)
assert (avg_point_distance_1param < avg_point_distance)
# scaling job
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('scaled', False)
assert_equal(sdir, tempdir)
assert_equal(sfrom, 'fsaverage')
assert_equal(sto, 'scaled')
assert_allclose(scale, model.parameters[6:9])
assert_equal(set(bemsol), {'inner_skull-bem'})
model.prepare_bem_model = False
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('scaled', False)
assert_equal(bemsol, [])
# scale with 3 parameters
model.n_scale_params = 3
model.fit_icp(3)
assert (np.mean(model.point_distance) < avg_point_distance_1param)
# test switching raw disables point omission
assert_equal(model.hsp.n_omitted, 1)
model.hsp.file = kit_raw_path
assert_equal(model.hsp.n_omitted, 0)
run_tests_if_main()
| {
"repo_name": "adykstra/mne-python",
"path": "mne/gui/tests/test_coreg_gui.py",
"copies": "1",
"size": "11263",
"license": "bsd-3-clause",
"hash": -682850231390502700,
"line_mean": 34.196875,
"line_max": 79,
"alpha_frac": 0.6428127497,
"autogenerated": false,
"ratio": 3.0605978260869566,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9203410575786957,
"avg_score": 0,
"num_lines": 320
} |
import os
import os.path as op
import re
import shutil
import numpy as np
from numpy.testing import assert_allclose, assert_array_almost_equal
import pytest
import mne
from mne.datasets import testing
from mne.io.kit.tests import data_dir as kit_data_dir
from mne.surface import dig_mri_distances
from mne.transforms import invert_transform
from mne.utils import (run_tests_if_main, requires_mayavi, traits_test,
modified_env)
data_path = testing.data_path(download=False)
raw_path = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
kit_raw_path = op.join(kit_data_dir, 'test_bin_raw.fif')
subjects_dir = op.join(data_path, 'subjects')
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_model_decimation(subjects_dir_tmp):
"""Test CoregModel decimation of high-res to low-res head."""
from mne.gui._coreg_gui import CoregModel
# This makes the test much faster
subject_dir = op.join(subjects_dir_tmp, 'sample')
shutil.move(op.join(subject_dir, 'bem', 'outer_skin.surf'),
op.join(subject_dir, 'surf', 'lh.seghead'))
for fname in ('sample-head.fif', 'sample-head-dense.fif'):
os.remove(op.join(subject_dir, 'bem', fname))
model = CoregModel(guess_mri_subject=False)
with pytest.warns(RuntimeWarning, match='No low-resolution'):
model.mri.subjects_dir = op.dirname(subject_dir)
assert model.mri.subject == 'sample' # already set by setting subjects_dir
assert model.mri.bem_low_res.file == ''
assert len(model.mri.bem_low_res.surf.rr) == 2562
assert len(model.mri.bem_high_res.surf.rr) == 2562 # because we moved it
@requires_mayavi
@traits_test
def test_coreg_model(subjects_dir_tmp):
"""Test CoregModel."""
from mne.gui._coreg_gui import CoregModel
trans_dst = op.join(subjects_dir_tmp, 'test-trans.fif')
# make it use MNI fiducials
os.remove(op.join(subjects_dir_tmp, 'sample', 'bem',
'sample-fiducials.fif'))
model = CoregModel()
with pytest.raises(RuntimeError, match='Not enough information for savin'):
model.save_trans('blah.fif')
model.mri.subjects_dir = subjects_dir_tmp
model.mri.subject = 'sample'
assert model.mri.fid_ok # automated using MNI fiducials
model.hsp.file = raw_path
assert_allclose(model.hsp.lpa, [[-7.137e-2, 0, 5.122e-9]], 1e-4)
assert_allclose(model.hsp.rpa, [[+7.527e-2, 0, 5.588e-9]], 1e-4)
assert_allclose(model.hsp.nasion, [[+3.725e-9, 1.026e-1, 4.191e-9]], 1e-4)
assert model.has_lpa_data
assert model.has_nasion_data
assert model.has_rpa_data
assert len(model.hsp.eeg_points) > 1
assert len(model.mri.bem_low_res.surf.rr) == 2562
assert len(model.mri.bem_high_res.surf.rr) == 267122
lpa_distance = model.lpa_distance
nasion_distance = model.nasion_distance
rpa_distance = model.rpa_distance
avg_point_distance = np.mean(model.point_distance)
model.nasion_weight = 1.
model.fit_fiducials(0)
old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2 +
model.nasion_distance ** 2)
assert new_x < old_x
model.fit_icp(0)
new_dist = np.mean(model.point_distance)
assert new_dist < avg_point_distance
model.save_trans(trans_dst)
trans = mne.read_trans(trans_dst)
assert_allclose(trans['trans'], model.head_mri_t)
# test restoring trans
x, y, z = 100, 200, 50
rot_x, rot_y, rot_z = np.rad2deg([1.5, 0.1, -1.2])
model.trans_x = x
model.trans_y = y
model.trans_z = z
model.rot_x = rot_x
model.rot_y = rot_y
model.rot_z = rot_z
trans = model.mri_head_t
model.reset_traits(["trans_x", "trans_y", "trans_z", "rot_x", "rot_y",
"rot_z"])
assert model.trans_x == 0
model.set_trans(trans)
assert_array_almost_equal(model.trans_x, x)
assert_array_almost_equal(model.trans_y, y)
assert_array_almost_equal(model.trans_z, z)
assert_array_almost_equal(model.rot_x, rot_x)
assert_array_almost_equal(model.rot_y, rot_y)
assert_array_almost_equal(model.rot_z, rot_z)
# info
assert isinstance(model.fid_eval_str, str)
assert isinstance(model.points_eval_str, str)
# scaling job
assert not model.can_prepare_bem_model
model.n_scale_params = 1
assert model.can_prepare_bem_model
model.prepare_bem_model = True
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('sample2', False)
assert sdir == subjects_dir_tmp
assert sfrom == 'sample'
assert sto == 'sample2'
assert_allclose(scale, model.parameters[6:9])
assert skip_fiducials is False
# find BEM files
bems = set()
for fname in os.listdir(op.join(subjects_dir, 'sample', 'bem')):
match = re.match(r'sample-(.+-bem)\.fif', fname)
if match:
bems.add(match.group(1))
assert set(bemsol) == bems
model.prepare_bem_model = False
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('sample2', True)
assert bemsol == []
assert (skip_fiducials)
model.load_trans(fname_trans)
model.save_trans(trans_dst)
trans = mne.read_trans(trans_dst)
assert_allclose(trans['trans'], model.head_mri_t)
assert_allclose(invert_transform(trans)['trans'][:3, 3] * 1000.,
[model.trans_x, model.trans_y, model.trans_z])
@requires_mayavi
@traits_test
def test_coreg_gui_display(subjects_dir_tmp, check_gui_ci):
"""Test CoregFrame."""
from mayavi import mlab
from tvtk.api import tvtk
home_dir = subjects_dir_tmp
# Remove the two files that will make the fiducials okay via MNI estimation
os.remove(op.join(subjects_dir_tmp, 'sample', 'bem',
'sample-fiducials.fif'))
os.remove(op.join(subjects_dir_tmp, 'sample', 'mri', 'transforms',
'talairach.xfm'))
with modified_env(_MNE_GUI_TESTING_MODE='true',
_MNE_FAKE_HOME_DIR=home_dir):
with pytest.raises(ValueError, match='not a valid subject'):
mne.gui.coregistration(
subject='Elvis', subjects_dir=subjects_dir_tmp)
# avoid modal dialog if SUBJECTS_DIR is set to a directory that
# does not contain valid subjects
ui, frame = mne.gui.coregistration(subjects_dir='')
mlab.process_ui_events()
ui.dispose()
mlab.process_ui_events()
ui, frame = mne.gui.coregistration(subjects_dir=subjects_dir_tmp,
subject='sample')
mlab.process_ui_events()
assert not frame.model.mri.fid_ok
frame.model.mri.lpa = [[-0.06, 0, 0]]
frame.model.mri.nasion = [[0, 0.05, 0]]
frame.model.mri.rpa = [[0.08, 0, 0]]
assert frame.model.mri.fid_ok
frame.data_panel.raw_src.file = raw_path
assert isinstance(frame.eeg_obj.glyph.glyph.glyph_source.glyph_source,
tvtk.SphereSource)
frame.data_panel.view_options_panel.eeg_obj.project_to_surface = True
assert isinstance(frame.eeg_obj.glyph.glyph.glyph_source.glyph_source,
tvtk.CylinderSource)
mlab.process_ui_events()
# grow hair (faster for low-res)
assert frame.data_panel.view_options_panel.head_high_res
frame.data_panel.view_options_panel.head_high_res = False
frame.model.grow_hair = 40.
# scale
frame.coreg_panel.n_scale_params = 3
frame.coreg_panel.scale_x_inc = True
assert frame.model.scale_x == 101.
frame.coreg_panel.scale_y_dec = True
assert frame.model.scale_y == 99.
# reset parameters
frame.coreg_panel.reset_params = True
assert frame.model.grow_hair == 0
assert not frame.data_panel.view_options_panel.head_high_res
# configuration persistence
assert (frame.model.prepare_bem_model)
frame.model.prepare_bem_model = False
frame.save_config(home_dir)
ui.dispose()
mlab.process_ui_events()
ui, frame = mne.gui.coregistration(subjects_dir=subjects_dir_tmp)
assert not frame.model.prepare_bem_model
assert not frame.data_panel.view_options_panel.head_high_res
ui.dispose()
mlab.process_ui_events()
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_model_with_fsaverage(tmpdir):
"""Test CoregModel with the fsaverage brain data."""
tempdir = str(tmpdir)
from mne.gui._coreg_gui import CoregModel
mne.create_default_subject(subjects_dir=tempdir,
fs_home=op.join(subjects_dir, '..'))
model = CoregModel()
model.mri.subjects_dir = tempdir
model.mri.subject = 'fsaverage'
assert model.mri.fid_ok
model.hsp.file = raw_path
lpa_distance = model.lpa_distance
nasion_distance = model.nasion_distance
rpa_distance = model.rpa_distance
avg_point_distance = np.mean(model.point_distance)
# test hsp point omission
model.nasion_weight = 1.
model.trans_y = -0.008
model.fit_fiducials(0)
model.omit_hsp_points(0.02)
assert model.hsp.n_omitted == 1
model.omit_hsp_points(np.inf)
assert model.hsp.n_omitted == 0
model.omit_hsp_points(0.02)
assert model.hsp.n_omitted == 1
model.omit_hsp_points(0.01)
assert model.hsp.n_omitted == 4
model.omit_hsp_points(0.005)
assert model.hsp.n_omitted == 40
model.omit_hsp_points(0.01)
assert model.hsp.n_omitted == 4
model.omit_hsp_points(0.02)
assert model.hsp.n_omitted == 1
# scale with 1 parameter
model.n_scale_params = 1
model.fit_fiducials(1)
old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2 +
model.nasion_distance ** 2)
assert (new_x < old_x)
model.fit_icp(1)
avg_point_distance_1param = np.mean(model.point_distance)
assert (avg_point_distance_1param < avg_point_distance)
# scaling job
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('scaled', False)
assert sdir == tempdir
assert sfrom == 'fsaverage'
assert sto == 'scaled'
assert_allclose(scale, model.parameters[6:9])
assert set(bemsol) == {'inner_skull-bem'}
model.prepare_bem_model = False
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('scaled', False)
assert bemsol == []
# scale with 3 parameters
model.n_scale_params = 3
model.fit_icp(3)
assert (np.mean(model.point_distance) < avg_point_distance_1param)
# test switching raw disables point omission
assert model.hsp.n_omitted == 1
model.hsp.file = kit_raw_path
assert model.hsp.n_omitted == 0
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_gui_automation():
"""Test that properties get properly updated."""
from mne.gui._file_traits import DigSource
from mne.gui._fiducials_gui import MRIHeadWithFiducialsModel
from mne.gui._coreg_gui import CoregModel
subject = 'sample'
hsp = DigSource()
hsp.file = raw_path
mri = MRIHeadWithFiducialsModel(subjects_dir=subjects_dir, subject=subject)
model = CoregModel(mri=mri, hsp=hsp)
# gh-7254
assert not (model.nearest_transformed_high_res_mri_idx_hsp == 0).all()
model.fit_fiducials()
model.icp_iterations = 2
model.nasion_weight = 2.
model.fit_icp()
model.omit_hsp_points(distance=5e-3)
model.icp_iterations = 2
model.fit_icp()
errs_icp = np.median(
model._get_point_distance())
assert 2e-3 < errs_icp < 3e-3
info = mne.io.read_info(raw_path)
errs_nearest = np.median(
dig_mri_distances(info, fname_trans, subject, subjects_dir))
assert 1e-3 < errs_nearest < 2e-3
run_tests_if_main()
| {
"repo_name": "pravsripad/mne-python",
"path": "mne/gui/tests/test_coreg_gui.py",
"copies": "10",
"size": "12244",
"license": "bsd-3-clause",
"hash": -6801095541144068000,
"line_mean": 34.8011695906,
"line_max": 79,
"alpha_frac": 0.6464390722,
"autogenerated": false,
"ratio": 3.070980687233509,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8717419759433509,
"avg_score": null,
"num_lines": null
} |
import os
from numpy import array
from numpy.testing import assert_allclose
from nose.tools import assert_equal, assert_false, assert_raises, assert_true
from mne.datasets import testing
from mne.io.tests import data_dir as fiff_data_dir
from mne.utils import (_TempDir, requires_mne, requires_freesurfer,
requires_traits)
data_path = testing.data_path(download=False)
subjects_dir = os.path.join(data_path, 'subjects')
bem_path = os.path.join(subjects_dir, 'sample', 'bem', 'sample-1280-bem.fif')
inst_path = os.path.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
fid_path = os.path.join(fiff_data_dir, 'fsaverage-fiducials.fif')
@testing.requires_testing_data
@requires_traits
def test_bem_source():
"""Test SurfaceSource"""
from mne.gui._file_traits import SurfaceSource
bem = SurfaceSource()
assert_equal(bem.points.shape, (0, 3))
assert_equal(bem.tris.shape, (0, 3))
bem.file = bem_path
assert_equal(bem.points.shape, (642, 3))
assert_equal(bem.tris.shape, (1280, 3))
@testing.requires_testing_data
@requires_traits
def test_fiducials_source():
"""Test FiducialsSource"""
from mne.gui._file_traits import FiducialsSource
fid = FiducialsSource()
fid.file = fid_path
points = array([[-0.08061612, -0.02908875, -0.04131077],
[0.00146763, 0.08506715, -0.03483611],
[0.08436285, -0.02850276, -0.04127743]])
assert_allclose(fid.points, points, 1e-6)
fid.file = ''
assert_equal(fid.points, None)
@testing.requires_testing_data
@requires_traits
def test_inst_source():
"""Test InstSource"""
from mne.gui._file_traits import InstSource
inst = InstSource()
assert_equal(inst.inst_fname, '-')
inst.file = inst_path
assert_equal(inst.inst_dir, os.path.dirname(inst_path))
lpa = array([[-7.13766068e-02, 0.00000000e+00, 5.12227416e-09]])
nasion = array([[3.72529030e-09, 1.02605611e-01, 4.19095159e-09]])
rpa = array([[7.52676800e-02, 0.00000000e+00, 5.58793545e-09]])
assert_allclose(inst.lpa, lpa)
assert_allclose(inst.nasion, nasion)
assert_allclose(inst.rpa, rpa)
@testing.requires_testing_data
@requires_traits
def test_subject_source():
"""Test SubjectSelector"""
from mne.gui._file_traits import MRISubjectSource
mri = MRISubjectSource()
mri.subjects_dir = subjects_dir
assert_true('sample' in mri.subjects)
mri.subject = 'sample'
@testing.requires_testing_data
@requires_traits
@requires_mne
@requires_freesurfer
def test_subject_source_with_fsaverage():
"""Test SubjectSelector"""
from mne.gui._file_traits import MRISubjectSource
tempdir = _TempDir()
mri = MRISubjectSource()
assert_false(mri.can_create_fsaverage)
assert_raises(RuntimeError, mri.create_fsaverage)
mri.subjects_dir = tempdir
assert_true(mri.can_create_fsaverage)
mri.create_fsaverage()
| {
"repo_name": "jmontoyam/mne-python",
"path": "mne/gui/tests/test_file_traits.py",
"copies": "3",
"size": "3042",
"license": "bsd-3-clause",
"hash": 8993606791894419000,
"line_mean": 28.25,
"line_max": 77,
"alpha_frac": 0.6857330703,
"autogenerated": false,
"ratio": 2.950533462657614,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 104
} |
__author__ = 'christianbuia'
import binascii
def fixed_xor_hexstrings(hexstring1, key):
import binascii
bytes1=binascii.unhexlify(hexstring1)
decoded = ""
for byte in bytes1:
decoded+=chr(byte^key)
return decoded
#-------------------------------------------------------------------------------
def evaluate_as_english(message, checkPrintables=True ,checkSpaces=False):
#added this key thing here to remove extra garbage on decoded strings, such as new lines
message = message.strip()
#first stage, we get rid of anything that has nonprintables
if checkPrintables:
for s in message:
if ord(s) < 32 or ord(s) > 126:
return False
#second stage, we get rid of anything that doesn't seem to have
#space-separated words of reasonable length.
if checkSpaces:
split_tokens = message.split(" ")
if len(split_tokens) < len(message) / 7:
return False
return True
#-------------------------------------------------------------------------------
def run_main(lines):
splitlines = [x.strip() for x in lines.split("\n")]
for line in splitlines:
#input_bytes = binascii.unhexlify(line)
line = line.strip()
possible_matches = 0
for i in range(256):
message = fixed_xor_hexstrings(line, i)
#if evaluate_as_english(message):
if evaluate_as_english(message, checkPrintables=True, checkSpaces=True):
#possible_matches+=1
#print(i)
#print(hex(i))
print(message)
#print(binascii.hexlify(bytes(message, "UTF-8")))
#print("possible matches " + str(possible_matches))
return True
lines = """0e3647e8592d35514a081243582536ed3de6734059001e3f535ce6271032
334b041de124f73c18011a50e608097ac308ecee501337ec3e100854201d
40e127f51c10031d0133590b1e490f3514e05a54143d08222c2a4071e351
45440b171d5c1b21342e021c3a0eee7373215c4024f0eb733cf006e2040c
22015e420b07ef21164d5935e82338452f42282c1836e42536284c450de3
043b452e0268e7eb005a080b360f0642e6e342005217ef04a42f3e43113d
581e0829214202063d70030845e5301f5a5212ed0818e22f120b211b171b
ea0b342957394717132307133f143a1357e9ed1f5023034147465c052616
0c300b355c2051373a051851ee154a023723414c023a08171e1b4f17595e
550c3e13e80246320b0bec09362542243be42d1d5d060e203e1a0c66ef48
e159464a582a6a0c50471310084f6b1703221d2e7a54502b2b205c433afa
ec58ea200e3005090e1725005739eda7342aed311001383fff7c58ef1f11
01305424231c0d2c41f105057f74510d335440332f1038ec17275f5814e1
05f12f380720ea2b19e24a07e53c142128354e2827f25a08fb401c3126a6
0d17272f53063954163d050a541b1f1144305ae37d4932431b1f33140b1b
0b4f070f071fe92c200e1fa05e4b272e50201b5d493110e429482c100730
100a3148080f227fe60a132f0c10174fe3f63d1a5d38eb414ca8e82f2b05
0a19e83c58400a023b13234572e6e4272bf67434331631e63b5e0f00175c
54520c2ceb45530e0f78111d0b0707e01e4bf43b0606073854324421e6f9
09e7585353ee4a34190de1354e481c373a1b2b0a136127383e271212191f
0f060d09fb4f2d5024022c5ff6463c390c2b5f1a5532071a31f33503fcea
371d39121605584f48217235ee1e0602445c162e4942254c071954321d29
4a0900e63e5f161e15554045f3594c2a6a77e4e52711602beaf53ae53bed
29011616565d2a372a605bee39eced31183fe068185c3b445b391fe53232
e4102337000303452a1e2f2b29493f54ed5a037b3e08311b625cfd005009
2d560d4b0618203249312a310d5f541f295c3f0f25235c2b20037d1600f3
2c245155e8253708391a7ceb0d05005c3e080f3f0f0e5a16583b111f4448
493804044d262eec3759594f212d562420105d6a39e70a0f3957f347070c
e72d1d1f103807590f4339575e00381074485d2d580249f744052605e11d
e131570ae95307143a71131729552d001057a4540a1f425b190b572dee34
2c1655342f02581c202b0a5c17a358291e1506f325550f05365e165c1c5f
e318164df80b043e5406296e5359271d152f552e155a43eda81f23231d1c
001de0413e174e18192c061e4b3d1b5626f90e3e1429544a20ee150d0c20
32e902193219033c58191302441a5c1b584825ea140c290927aaea53e23c
3a36363a732e32ea3f0e430508204b332c382a19292d5b291122e123446a
1804115614031f5f571f2b143c5d3c1b257a4b37350f18445a3e08341c3d
21f2fb250b2e55151e77253a3f0e5f4b2030370a4155e720e73914e35a4a
510a55583a3c491221397c123a2b14a8305b3b09e71b241d0e51202e1a32
1b51202f4917232b512a141d6812f03c455df05e5a1c2cee14390b3b593a
5f5731e5203116ee131a4a4b24112cef5d0822f035e6547d3a0014462f26
0028fb522104f771501a555d3f581e30e9ec3e49e3e63123432f07794145
1459f6312f000e5a1373e346e40f211e1b0b0e17000f391f170552150500
7e301e18325717e3412e022f087be30e5641080151357714e0e0eee15e11
533258e9360f513b083aa51d2824222f40200a470537ecec392d31070b38
07e32c180dfa56496a461627542115132a4c284050495b23e2245b093159
2d3c230a1e5a300f6c3e26ed0d1709434950fd6f1e121335054129e4e4ec
ef22fa2112311b11584ce43434f46f521a215433f9514fe33d313a3e0838
34e7f336270c08010f2f544f0f1c1e235c0222644c2632efec061de2115f
121a42395d4c560d213b0c0a26a7e4f4382718153d5e511158a10b2c021e
e05d414dfa40222f0c382a03235f4d0d04372d4b7855105e26e44f2e0555
7f3a4f1351f85b0344223e1177e14707190c0e311f4ca633f5f3e9352372
01424d5d1a322a0d381717130e181d07240c2c19ecee750b1a37085d014c
16012c5de55a0314a8260e2759e439123ca0c81c321d454e4e0ee14f4c1d
0b1415512f38580e4e2a227def242643183c224f0ea146443403022fe9fd
43eb2b1078322a02192d5b5e0c360d584d0b5e2c13072912ee32f03f4155
002a52553e08361b0be0074b573e201c164c093a5c0f0159333b59770d5b
38e63c1c5244301a5a01f26930321256143e1ae05e1120a9eaf20a192d58
7d54140a152ef4035f09083ded531ee04df55848020656a1342e502649eb
0c211dfe101702015516341136252f3f06f73247133113f5642d083a3417
015e3d51433f3c003e5e28030b1d413eee186824504b241e0f0d32373e2b
2d465040ec130c5c0e2704aa17010c40095207223669110f22f45ea155f7
14552e2b341e5ce0195351066a23e3283e0ee935444b255a1c5c3cef7614
372b453d5a357c05142be65b3c17f92d2b134853390a312bf92a531b513d
5658265f4c0ce4440a20322f591a413034292b312206a01be6453a512d21
1c585c19f31f785324f8583d1ee02620342b10a236263f105011ee5b0e14
0f522b550818591a752e5fea0e033322ee5e280a4a1b244f5a2b35341255
39093c1ced331b264127173f1312e2455fa33b31012c1f4d073c553f5d5e
18f82d5d07e2430b3b3c1b5b49effb0313173f5d4a2e5c134555ff6b1d1a
550a20234202726341190311295254f4064205aa515ae0145a23071c4e18
3f2047024e3ce4555a1b39fa145455012c3afb0f2d11134846182e3c575b
e3e456571937762828065443153b51152e262f09c937024405284f236432
012f580c3536ec5c021574541d5c41123a4e661d5f0f5f344a083e3a5e4c
4216252d01eb0a2a4623621b48360d312c29f33e380650447617124b3e71
54141e59323606390204e95f1206520e5c084510034d30171c5e744f335d
1e30061401600b342e171059526d1949431a3f412f56594c183711ea4837
3131254f11e76f550e1e4d26f1391f44363b151c31281ff45259351da0e6
5def250d0f3505385f22e9f4112633005d272d092e0138275851f943e90e
0939165718303b445210095c16390cf04f19450e06f4545c0a0c320e3e23
1e0b0b1f573f3d0fe05d43090fa8482242300819313142325b1f4b19365b
0d3b2a5d271e463d2203765245065d5d684a051e5815265b52f3171d3004
6af423303817a43324394af15a5c482e3b16f5a46f1e0b5c1201214b5fe4
4030544f3f51151e436e04203a5e3b287ee303490a43fb3b28042f36504e
1a2d5a03fc0e2c04384046242e2b5e1548101825eb2f285f1a210f022141
122355e90122281deeed3ba05636003826525d5551572d07030d4935201f
2a3c484a15410d3b16375d4665271b5c4ce7ee37083d3e512b45204f17f6
03222801255c2c211a7aeb1e042b4e38e8f1293143203139fb202c325f2b
06542a28041956350e292bf3fe5c32133a2a171b3a3e4e4e3101381529e3
4a5209ef24e5f3225e503b143d0e5747323fe7ee3d5b1b5110395619e65a
1fee0a3945563d2b5703701817584b5f5b54702522f5031b561929ea2d1e
e7271935100e3c31211b23113a3a5524e02241181a251d521ff52f3c5a76
144a0efee02f0f5f1d353a1c112e1909234f032953ec591e0a58e55d2cf4
efee0cf00d0955500210015311467543544708eb590d113d30443d080c1e
1a562c1f7e2b0030094f051c03e30f4d501a0fe22a2817edfc5e470c3843
1c3df1135321a8e9241a5607f8305d571aa546001e3254555a11511924
eb1d3f54ec0fea341a097c502ff1111524e24f5b553e49e8576b5b0e1e33
72413e2f5329e332ec563b5e65185efefd2c3b4e5f0b5133246d214a401d
352a0ae632183d200a162e5346110552131514e0553e51003e220d47424b
1d005c58135f3c1b53300c3b49263928f55625454f3be259361ded1f0834
2d2457524a1e1204255934174d442a1a7d130f350a123c4a075f5be73e30
0c0518582d131f39575925e0231833370c482b270e183810415d5aec1900
453b181df1572735380b0446097f00111f1425070b2e1958102ceb592928
010a4a2d0b0926082d2f1525562d1d070a7a08152f5b4438a4150b132e20
2b395d0d5d015d41335d21250de33e3d42152d3f557d1e44e4ee22255d2d
4a1b5c272d0d1c45072639362e402dee2853e51311262b17aa72eb390410
e7015f0215352030574b4108e44d0e1a204418e62325ff7f34052f234b2d
1d563c13202346071d39e34055402b0b392c27f552222d3deb3843ee2c16
29332a521f3c1b0811e33e1a25520e323e75e01c17473f55071226120d3d
210b35ee1a0a5335222e35033905170c4f3104eb032d425058367d5a2bf2
1e553809415efb1c460f2f0ffafaec491e4d4e49510452e8245a366a4106
e1f92cee0e10142514e7ec13155c412fe901092f1f0fa738280c5eee5e04
3526291e0b2a5f486a3051041f4c16372f5402e6f70b31a03525190b161a
260e5e1f0c2e4d7528ef11552fefe247201e4752085c1da903563c162a4b
2a14ff2e3265e604075e523b24455c364a7f284f3a43051d52152f1119e8
5f02e55a4b1300063640ef10151002565f0b0c010033a1cbef5d3634484a
1b121c585b495a5e033a09037f2d1754072c2d49084055172a3c220bed4f
1613400e1632435c0018482aa55b363d26290ae4405ded280f2b0c271536
4011250ce02119464a1de43113170356342c272d1d3355555e5706245e0a
16272d5e545953002e10020875e223010719555410f91ce518420e382456
0d4037320345f945241a1d090a545a310142442131464f4d10562ae4f05a
07ee4d4ae12e571e313c1636313134233e495459e548317708563c2c1b2f
e75803294b36565225552c3406304f0201e43323291b5e0e2159025c2f25
5e63194411490c44494232237e1b323108573d3f391d1f3537e4165a2b35
51000a3a264c503b5852072a5636f04f5cea58a42838f5fca876415c3521
3c14130be511275932055a30aa2d03470c51060009f210543002585f5713
10f0370c5823115200e5015d083e2f1a5df91d68065c1b03f0080855e529
02ec00f1462d034123151ba6fc07eb3d5e54e85a3f3ee532fb41791a060b
0c29274232f93efb3d465544e45e491b042ced245100e3f05c14134c254b
5741235f051e080401a8013c065627e8ee5432205114243d54320e133f2d
4a4d181635411f5d084e31ed230c16506d5125415e060e4dcd0e5f3708e3
2d531c3e22065a5eee07310c145305131800063e4a20094b2006ea131240
e7335c1c4308160be6aa551a0f5a58243e0b10ee470047683c345e1c5b0c
5434505ee22a18110d20342e4b53062c4d79042a0a02422e225b2523e95a
3252212407115c07e15eee06391d0519e9271b641330011f383410281f0e
2cee2b355233292b595d1c69592f483b54584f7154fd4928560752e333a1
17272b272f110df5e91c560a39104510240b5c4b0c1c570871e422351927
c32550ec3f132c0c2458503ae5241d3c0d7911480a073826315620403615
16e11c270d2b010650145de2290b0beb1e120a3a354b2104064f3b533c4e
505746313d4d2e3455290a281ee81d50007e1148252528025237715a342a
1c0a13163e404e40242142061d34185421160220fa031f7a423a08f2e01a
101d303802f51b0c08ef461259315b553823e622a12d565509e23c624139
0a3d1309e4384c0eed383846545a035a41ee1771513b090a031e15f45159
2d4944092a1965542507003b23195758403e175a0a450c5c38114de21141
eb100fe63a031c4b35eb591845e428441c0d5b0037131f5c160a31243619
c155ef0d19143e24392507a202581a25491b135c27571d5c5b35250f0bef
0e1d510556485e39557e044e2cf10457523016473f500b1e36370c17591c
7e5a19250a5e152b46f5130a094cef08e84704ef10197324464b0114017a
3b56f126390008343d3c400232ed201667211f0b1a1413080202530b08e2
4912321b61c90a0cf6ef0a0a0c0f17fa62eb385e2616194526701aff5fe6
2c57114b0400152d4f2aeb18ed41386c2e3a023a281d1a311eefe750ebab
3a4353282114593b3e36446d2c5e1e582e335337022930331f211604576a
295f3bfae9271ae8065a3b4417545c3e5b0df11a53351c78530915392d2e
074a122ee01b17131e4e124e2322a9560ce4120e37582b24e1036fe93f30
3c08290121090ef72f25e4f220323444532d3fe71f34553c7b2726131009
12e84a3308590357a719e74c4f2133690a20031a0b045af63551325b1219
0e3d4fe03f56523cf40f29e4353455120e3a4f2f26f6a30a2b3e0c5b085a
57f3315c33e41c0f523426232d0651395c1525274e314d0219163b5f181f
53471622182739e9e25b473d74e1e7023d095a3134e62d1366563004120e
230a06431935391d5e0b5543223a3bed2b4358f555401e1b3b5c36470d11
22100330e03b4812e6120f163b1ef6abebe6f602545ef9a459e33d334c2a
463405faa655563a43532cfe154bec32fe3345eb2c2700340811213e5006
14241340112b2916017c270a0652732ee8121132385a6c020c040e2be15b
251119225c573b105d5c0a371c3d421ef23e22377fee334e0228561b2d15
2e4c2e373b434b0d0b1b340c300e4b195614130ea03c234c292e14530c46
0d2c3f08560ee32e5a5b6413355215384442563e69ec294a0eef561e3053
193c100c0b24231c012273e10d2e12552723586120020b02e45632265e5f
2c175a11553d4b0b16025e2534180964245b125e5d6e595d1d2a0710580b
213a175ff30855e4001b305000263f5a5c3c5100163cee00114e3518f33a
10ed33e65b003012e7131e161d5e2e270b4645f358394118330f5a5b241b
33e80130f45708395457573406422a3b0d03e6e5053d0d2d151c083337a2
551be2082b1563c4ec2247140400124d4b6508041b5a472256093aea1847
7b5a4215415d544115415d5015455447414c155c46155f4058455c5b523f
0864eb4935144c501103a71851370719301bec57093a0929ea3f18060e55
2d395e57143359e80efffb13330633ea19e323077b4814571e5a3de73a1f
52e73c1d53330846243c422d3e1b374b5209543903e3195c041c251b7c04
2f3c2c28273a12520b482f18340d565d1fe84735474f4a012e1a13502523
23340f39064e306a08194d544647522e1443041d5ee81f5a18415e34a45f
475a392637565757730a0c4a517b2821040e1709e028071558021f164c54
100b2135190505264254005618f51152136125370eef27383e45350118ed
3947452914e0223f1d040943313c193f295b221e573e1b5723391d090d1f
2c33141859392b04155e3d4e393b322526ee3e581d1b3d6817374d0c085b
c2ea5821200f1b755b2d13130f04e26625ea3a5b1e37144d3e473c24030d
ee15025d2019f757305e3f010e2a453a205f1919391e1a04e86d1a350119
1a5beb4946180fe0002a031a050b41e5164c58795021e1e45c59e2495c20
1121394f1e381c3647005b7326250514272b55250a49183be5454ba518eb
1ee55936102a465d5004371f2e382f1d03144f170d2b0eed042ee341eb19
ec1014ef3ff1272c3408220a41163708140b2e340e505c560c1e4cf82704
274b341a454a27a0263408292e362c201c0401462049523b2d55e5132d54
e259032c444b091e2e4920023f1a7ce40908255228e36f0f2424394b3c48
34130cf8223f23084813e745e006531a1e464b005e0e1ee405413fe22b4e
4af201080c0928420c2d491f6e5121e451223b070dee54244b3efc470a0e
771c161f795df81c22101408465ae7ef0c0604733ee03a20560c1512f217
2f3a142c4155073a200f04166c565634020a59ea04244ff7413c4bc10858
240d4752e5fa5a4e1ce255505602e55d4c575e2b59f52b4e0c0a0b464019
21341927f3380232396707232ae424ea123f5b371d4f65e2471dfbede611
e10e1c3b1d4d28085c091f135b585709332c56134e4844552f45eb41172a
3f1b5a343f034832193b153c482f1705392f021f5f0953290c4c43312b36
3810161aea7001fb5d502b285945255d4ef80131572d2c2e59730e2c3035
4d59052e1f2242403d440a13263e1d2dea0612125e16033b180834030829
022917180d07474c295f793e42274b0e1e16581036225c1211e41e04042f
ec2b41054f2a5f56065e5e0e1f56e13e0a702e1b2f2137020e363a2ae2a4
53085a3b34e75a1caa2e5d031f261f5f044350312f37455d493f131f3746
0c295f1724e90b001a4e015d27091a0b3256302c303d51a05956e6331531
e42b315ce21f0def38144d20242845fa3f3b3b0ce8f4fb2d31ed1d54134b
2957023141335d35372813263b46581af6535a16404d0b4ff12a207648ec
e4421e301de25c43010c504e0f562f2018421ce137443b41134b5f542047
0c5600294e085c1d3622292c480d261213e05c1334385108c145f3090612
062d2e02267404241f4966e6e010052d3224e72856100b1d22f65a30e863
324950394700e11a01201a0564525706f1013f353319076b4c0d015a2e24
2a1be80e2013571522483b1e20321a4e03285d211a444d113924e8f41a1f
27193ae2302208e73010eaa1292001045737013e10e4745aed2c105b25fb
1b135d46eaef103e1d330a14337a2a4302441c1631ed07e7100c743a0e35
1a0957115c293b1c0de853245b5b18e2e12d28421b3230245d7b4a55f355
e7360e2b3846202a2926fa495e3302ed064d127a17343a1f11032b40e8f5
06e8f90a3118381c5414157d1434050210363e30500511a00a3d56e10438
30021931f7193e25a0540ef52658350929380974fb035b1a5d2c042959c7
151b0c24052d0e56025404390e5a3909edec0d03070f040cff710825363e
2a2328120b2203320810134a0c0a0ef30b25460bec011c1e26e913575a51
e12d0948ed3c511416151d1c54082b3e385d14f838510bec4e4b5f585321
1559305c3a49192a010f04ec11001a3d5a5621e5535358353206521f013f
172c2c155a3a322009505c290516a2c4e4405a1e0a1e353b6e1a5a4e2f09
552c34e2432b0df1132b130841000d4007232339a2092a593f142b0a0117
0931432e452d3aea1d02587d3a3e56ed2a3050e2f9363df366331e421947
0250094823545b20163f1d0a36a92228ed25564d1a304deae8035c32370d
4314380e264e2359e6a412504a424328e84434ff30236649353315344a00
25e33540550d3c15135b0eed451cfd1812eaf2063f085d6e214d121c342f
37513b2d0a4e3e5211372a3a01334c5d51030c46463e3756290c0d0e1222
132f175e4c4af1120138e1f2085a3804471f5824555d083de6123f533123
0de11936062d3d2f12193e135f38ff5e1a531d1426523746004e2c063a27
49241aee1802311611a50de9592009e936270108214a0c4213a01f09545f
02e14d2babee204a5c4337135821360d021b7831305963ee0737072f0deb
1512371119050c0c1142245a004f033650481830230a1925085c1a172726
3be62f230a4b50526ec9345100252aa729eafa59221b3fa517304e500a15
5e57f231333c3d0c470a47551733511031362a3bed0f334a3f3136104230
eb24015d051a151f245905061a37ea273d2239fe02463a5e314d565f0457
23025f415d290a594e3b5940313347a11c5e41531ff15a385a183829780a
51e0035f2deb3b163eabe8550e2e0414491f573b5419234a28183044e112
1d54e8390b26585f3aef5f14206672240c4a5e5d31e01b4d406e351401fa
e555173e242c753b275d4ee50b2f26501402a71b1b5733ec19ee34284aed
2ee8f023401c09383b084d623ef324ee5a33065a6d5e365b092c5d0d4501
3f4e024d4b161e144d5e3b140d1e2944465b491d265603a705373c231240
544f0d4ea6091e00e62d3e130d4f005139f339001a3b480c221b730be75e
5f1f4f3e0a0dec3b5128e32960e42d0fee02275528154b10e65c36555a2e
ea3e311b5b0f5f220b1f1b2914f12111f41213e06232224df5ec0114470d
51203f1e01e5563851284013514a565e53125223052f47100e5011100201
3f5bee2305217838582be55958a00245265b0308ec56525b5c114c2d5407
e6e74818e53602160e45372029eb4de72754ec3f49290d2f5901014c0e7f
08e715e612380a5c1908285a1222073a023c562907384e4f470444483f34
1110382b5225343ba6092133483e2d683e1e280227084a1e405e3a341513
415f240f0c53e3f7196e2252fb0105347f345e531f535a344bf439220916
5722e7f7fa2f4c2e057e2a025e2dec31413439aa12265f5a3458f81a4b15
135839401856f337a72fec475a060de239a650163a55392a5b303f051415
56090f18023a2b16e2364407050d48e1541408281d3aa3e84c5b264c1f33
1725f9540aec5e10ed293e4e5a5a2d2125f053251a55395d1c2044022231
292d523ff86a180620075f325e02566659f30423525a053a01f0087f4b3b
17fe493808f25309251e1325596ce32b42311e5d0c2f58652640582a4b17
67381a5afb7128150a0043e45b173d2111155c49092d2635370a3a201826
e62d021d36e03b205d5f1f295c094608342a412122583f3bfc34190be62c
393a055f59060d454a235326e844243a30285c14e316272524f4f0444f51
352c3c5b2b5845244f55494940194721f80b120f07392b7c2c5a0508111e
2f1219430151e60f11150b101e295736361b1e053e4d08f83f230e2c383a
ef5b1d492610e834330f5cf3a2485d324f2822084f41111f582957191b19
1e3e223704fe1d2e1f592753e5550f15170b231b4234e945301f5605a670
300d322759ea0337015c662a0e073809543f2741104835512d0624551751
373727ef1f41084d0b5c0c0137283b1337026aea1c5ae115064ffa183402
09152b11e1233e5a0e302a521c5a33181e180026463744a82c024b4bf04e
1df61df1263fee59135c13400950153d3c5c59183b020b1d2d2c492f4968
e2000c405a01ede30c4c082e2537443c120f38fc57c43651423e5c3beb1d
1922182420191b293e163d58020b005f454a0621051a38e80b090a463ee9
39513f2d47042c0fe5134419ec48490f150f323a5ee7a7e0201e193a5e1b
2037200a2b1013567b35fb4a0f322c2f49435d091920521c302b413f5f35
775d1a345b483b35a02a4c3e17ee3a3d5a5b57153613264f23041922432f
35125b3e0a1d2257eb002a26455e1a2f042e1545e92f0b3408032c4f3551
2d4c392321300a18ed4f3e2c314d20500052aa3917e55d0d29500754282e
381b2e263758f63c474a1c23110c2d5f1c220412e91043580656080c0427
081ce1e5350b6a3535f0e6592e5b543432340e38f008e0324102e45a3f25
30040c181615362e4d1016160a4a5c006eeb1d2422355a3f1028ff192a07
53f6354d4b5d121974245c14f0225713331f2e381810101428571725e432
1a2c06372d5b1419742150042d25003c2650512834ef16e51d183f0f0508
3d191107251100ee2e4125405a44174f061e0e1e5959e606530e06ed245e
3f592d47512dec5922500e460e1de7183b4c3c2e583942255a0c5d4d2305
3438001e482a002d56113a1fe13bed542d3508e22f4e22221431121c1539
ed445a5d28415073eb18022ef836274d573a48090f2a663058194901405d
215b143954fc313c1e28584b51e729ef31013b232bfb4c52e2322a2d4557
5244102e1c3d304450ee01761924e62ff2173305e15809102b2125284dfc
171a3f010f3639056f2be71c2047581de32e05a20833e1221b0e25362459
2958280de238084f5a1c292e005be71f3b311e1f415809383d3862260238
361f56ecee120156375862eb3627185c2519545149e2e50b1f3b0c4e3352
e6115f440634e4005d273611e41c5d383c3814537b3d23362b084024345b
10370656372e0236eb4f3303e216505f0e465228383729394faa2f205f34
2e125b2f2c1d0f1f170e0c51331f0c06291610345c0603791f33253f0e0c
1c2b080526133aeb3e23571d4cfa1e48057a2a010a490a50391b09514f2e
59383ae11237e5450029162d2e1d3e09221a160e42ea06ea0ca7c7ecf4ea
3d3024f34d5c07464bea3b185e110d3a10395d3b2632343cf30ca2e6065a
262f111c0e15441a4825111b185f1e5756243206125f4603e97e79582d27
2d5801ee2654113e2da00b58e9260d643c10423e1d1f42093b0d0f7d5102
3649211f210456051e290f1b4c584d0749220c280b2a50531f262901503e
52053e3e152b5b2b4415580fec57ef5c08e5ed43cc2d2e5b40355d0d2017
6d3917263f030c4b55f0025d501e57504a122729293c4c5819680d3001ed
1e313323324e5e177b171cf70c371541395c0e2b7726e42505483014362e
1910e4f7253f0a012057e03b1e3b4201362b224ff60e0b3a1d115b043957
200c1e0b242e5e3b4755f61e3be05c040908f1234358e55562711d2efa0f
0737e0160b1d13132044080d2325f1f0ee2f00354f2106471131020a5d0b
3f21060de62c052a17576e2ce729242b3e3621300627f01e52580a480050
1b381a11351f4f5d22040c3c4b3e7d263714e8e61a571d107a34260a4a51
edf52314e111207c0b23eb482f441d211f306137152407040e08530a783e
3c054e2d4e2905275e640220f74f1a193f54e1ed5b4e2a290eab27a55147
33522817335316ea2f3df957e25e02030601514f09f74c2fedee102d3114
5d05231d03313826164156110c44e4111f4658005e115e300f413b430300
380bf53a4331f74627492c133fe8eb3141ee39040def040c1a0ae914e3ed
5b00f0211f0a091e05582e22f05a5d262e0ce352251d25100b102b11e339
36053935f051f959093252411e2d5af81f360c0fa15d0b373b1d26323b77
501424184202206215e05944505c4817514540445b0207025de05b050932
0a5a114515536f553a352c513f0b12f700345fa51d5efb28222676e559ea
561b0557403f5f534a574638411e2d3b3c133f79555c333215e6f5f9e7ec
6658f7210218110f00062752e305f21601442c5310162445ed4d175630f3
0e2154253c4a22f02e1b0933351314071b521513235031250c18120024a1
e03555453d1e31775f37331823164c341c09e310463438481019fb0b12fa
37eee654410e4007501f2c0e42faf50125075b2b46164f165a1003097f08
2a5332145851553926523965582e5b2f530d5d1e292046344feaed461517
583d2b06251f551d2f5451110911e6034147481a05166e1f241a5817015b
1f2d3f5c310c315402200010e24135592435f71b4640540a041012ee1b3f
5b2010060e2f5a4d045e0b36192f79181b0732183b4a261038340032f434
3a5557340be6f5315c35112912393503320f54065f0e275a3b5853352008
1c595d183539220eec123478535337110424f90a355af44c267be848173f
41053f5cef5f6f56e4f5410a5407281600200b2649460a2e3a3c38492a0c
4c071a57e9356ee415103c5c53e254063f2019340969e30a2e381d5b2555
32042f46431d2c44607934ed180c1028136a5f2b26092e3b2c4e2930585a"""
run_main(lines) | {
"repo_name": "8u1a/my_matasano_crypto_challenges",
"path": "set1/challenge4.py",
"copies": "1",
"size": "21733",
"license": "unlicense",
"hash": -5676559763236223000,
"line_mean": 55.5989583333,
"line_max": 92,
"alpha_frac": 0.9484654673,
"autogenerated": false,
"ratio": 1.8335442504007424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.32820097177007423,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christianbuia'
from Crypto.Cipher import AES
import base64
#-----------------------------------------------------------------------------------------------------------------------
def solve_challenge(b64_crypt):
ciphertext = base64.decodebytes(bytes(b64_crypt, "ascii"))
key="YELLOW SUBMARINE"
#http://bityard.blogspot.com/2010/10/symmetric-encryption-with-pycrypto-part.html
decobj = AES.new(key, AES.MODE_ECB)
plaintext = decobj.decrypt(ciphertext)
print(plaintext.decode("utf-8"))
return True
#=======================================================================================================================
b64_crypt = """CRIwqt4+szDbqkNY+I0qbDe3LQz0wiw0SuxBQtAM5TDdMbjCMD/venUDW9BL
PEXODbk6a48oMbAY6DDZsuLbc0uR9cp9hQ0QQGATyyCESq2NSsvhx5zKlLtz
dsnfK5ED5srKjK7Fz4Q38/ttd+stL/9WnDzlJvAo7WBsjI5YJc2gmAYayNfm
CW2lhZE/ZLG0CBD2aPw0W417QYb4cAIOW92jYRiJ4PTsBBHDe8o4JwqaUac6
rqdi833kbyAOV/Y2RMbN0oDb9Rq8uRHvbrqQJaJieaswEtMkgUt3P5Ttgeh7
J+hE6TR0uHot8WzHyAKNbUWHoi/5zcRCUipvVOYLoBZXlNu4qnwoCZRSBgvC
wTdz3Cbsp/P2wXB8tiz6l9rL2bLhBt13Qxyhhu0H0+JKj6soSeX5ZD1Rpilp
9ncR1tHW8+uurQKyXN4xKeGjaKLOejr2xDIw+aWF7GszU4qJhXBnXTIUUNUf
RlwEpS6FZcsMzemQF30ezSJHfpW7DVHzwiLyeiTJRKoVUwo43PXupnJXDmUy
sCa2nQz/iEwyor6kPekLv1csm1Pa2LZmbA9Ujzz8zb/gFXtQqBAN4zA8/wt0
VfoOsEZwcsaLOWUPtF/Ry3VhlKwXE7gGH/bbShAIKQqMqqUkEucZ3HPHAVp7
ZCn3Ox6+c5QJ3Uv8V7L7SprofPFN6F+kfDM4zAc59do5twgDoClCbxxG0L19
TBGHiYP3CygeY1HLMrX6KqypJfFJW5O9wNIF0qfOC2lWFgwayOwq41xdFSCW
0/EBSc7cJw3N06WThrW5LimAOt5L9c7Ik4YIxu0K9JZwAxfcU4ShYu6euYmW
LP98+qvRnIrXkePugS9TSOJOHzKUoOcb1/KYd9NZFHEcp58Df6rXFiz9DSq8
0rR5Kfs+M+Vuq5Z6zY98/SP0A6URIr9NFu+Cs9/gf+q4TRwsOzRMjMQzJL8f
7TXPEHH2+qEcpDKz/5pE0cvrgHr63XKu4XbzLCOBz0DoFAw3vkuxGwJq4Cpx
kt+eCtxSKUzNtXMn/mbPqPl4NZNJ8yzMqTFSODS4bYTBaN/uQYcOAF3NBYFd
5x9TzIAoW6ai13a8h/s9i5FlVRJDe2cetQhArrIVBquF0L0mUXMWNPFKkaQE
BsxpMCYh7pp7YlyCNode12k5jY1/lc8jQLQJ+EJHdCdM5t3emRzkPgND4a7O
NhoIkUUS2R1oEV1toDj9iDzGVFwOvWyt4GzA9XdxT333JU/n8m+N6hs23MBc
Z086kp9rJGVxZ5f80jRz3ZcjU6zWjR9ucRyjbsuVn1t4EJEm6A7KaHm13m0v
wN/O4KYTiiY3aO3siayjNrrNBpn1OeLv9UUneLSCdxcUqjRvOrdA5NYv25Hb
4wkFCIhC/Y2ze/kNyis6FrXtStcjKC1w9Kg8O25VXB1Fmpu+4nzpbNdJ9LXa
hF7wjOPXN6dixVKpzwTYjEFDSMaMhaTOTCaqJig97624wv79URbCgsyzwaC7
YXRtbTstbFuEFBee3uW7B3xXw72mymM2BS2uPQ5NIwmacbhta8aCRQEGqIZ0
78YrrOlZIjar3lbTCo5o6nbbDq9bvilirWG/SgWINuc3pWl5CscRcgQQNp7o
LBgrSkQkv9AjZYcvisnr89TxjoxBO0Y93jgp4T14LnVwWQVx3l3d6S1wlsci
dVeaM24E/JtS8k9XAvgSoKCjyiqsawBMzScXCIRCk6nqX8ZaJU3rZ0LeOMTU
w6MC4dC+aY9SrCvNQub19mBdtJUwOBOqGdfd5IoqQkaL6DfOkmpnsCs5PuLb
GZBVhah5L87IY7r6TB1V7KboXH8PZIYc1zlemMZGU0o7+etxZWHgpdeX6JbJ
Is3ilAzYqw/Hz65no7eUxcDg1aOaxemuPqnYRGhW6PvjZbwAtfQPlofhB0jT
Ht5bRlzF17rn9q/6wzlc1ssp2xmeFzXoxffpELABV6+yj3gfQ/bxIB9NWjdZ
K08RX9rjm9CcBlRQeTZrD67SYQWqRpT5t7zcVDnx1s7ZffLBWm/vXLfPzMaQ
YEJ4EfoduSutjshXvR+VQRPs2TWcF7OsaE4csedKUGFuo9DYfFIHFDNg+1Py
rlWJ0J/X0PduAuCZ+uQSsM/ex/vfXp6Z39ngq4exUXoPtAIqafrDMd8SuAty
EZhyY9V9Lp2qNQDbl6JI39bDz+6pDmjJ2jlnpMCezRK89cG11IqiUWvIPxHj
oiT1guH1uk4sQ2Pc1J4zjJNsZgoJDcPBbfss4kAqUJvQyFbzWshhtVeAv3dm
gwUENIhNK/erjpgw2BIRayzYw001jAIF5c7rYg38o6x3YdAtU3d3QpuwG5xD
fODxzfL3yEKQr48C/KqxI87uGwyg6H5gc2AcLU9JYt5QoDFoC7PFxcE3RVqc
7/Um9Js9X9UyriEjftWt86/tEyG7F9tWGxGNEZo3MOydwX/7jtwoxQE5ybFj
WndqLp8DV3naLQsh/Fz8JnTYHvOR72vuiw/x5D5PFuXV0aSVvmw5Wnb09q/B
owS14WzoHH6ekaWbh78xlypn/L/M+nIIEX1Ol3TaVOqIxvXZ2sjm86xRz0Ed
oHFfupSekdBULCqptxpFpBshZFvauUH8Ez7wA7wjL65GVlZ0f74U7MJVu9Sw
sZdgsLmnsQvr5n2ojNNBEv+qKG2wpUYTmWRaRc5EClUNfhzh8iDdHIsl6edO
ewORRrNiBay1NCzlfz1cj6VlYYQUM9bDEyqrwO400XQNpoFOxo4fxUdd+AHm
CBhHbyCR81/C6LQTG2JQBvjykG4pmoqnYPxDyeiCEG+JFHmP1IL+jggdjWhL
WQatslrWxuESEl3PEsrAkMF7gt0dBLgnWsc1cmzntG1rlXVi/Hs2TAU3RxEm
MSWDFubSivLWSqZj/XfGWwVpP6fsnsfxpY3d3h/fTxDu7U8GddaFRQhJ+0ZO
dx6nRJUW3u6xnhH3mYVRk88EMtpEpKrSIWfXphgDUPZ0f4agRzehkn9vtzCm
NjFnQb0/shnqTh4Mo/8oommbsBTUKPYS7/1oQCi12QABjJDt+LyUan+4iwvC
i0k0IUIHvk21381vC0ixYDZxzY64+xx/RNID+iplgzq9PDZgjc8L7jMg+2+m
rxPS56e71m5E2zufZ4d+nFjIg+dHD/ShNPzVpXizRVUERztLuak8Asah3/yv
wOrH1mKEMMGC1/6qfvZUgFLJH5V0Ep0n2K/Fbs0VljENIN8cjkCKdG8aBnef
EhITdV7CVjXcivQ6efkbOQCfkfcwWpaBFC8tD/zebXFE+JshW16D4EWXMnSm
/9HcGwHvtlAj04rwrZ5tRvAgf1IR83kqqiTvqfENcj7ddCFwtNZrQK7EJhgB
5Tr1tBFcb9InPRtS3KYteYHl3HWR9t8E2YGE8IGrS1sQibxaK/C0kKbqIrKp
npwtoOLsZPNbPw6K2jpko9NeZAx7PYFmamR4D50KtzgELQcaEsi5aCztMg7f
p1mK6ijyMKIRKwNKIYHagRRVLNgQLg/WTKzGVbWwq6kQaQyArwQCUXo4uRty
zGMaKbTG4dns1OFB1g7NCiPb6s1lv0/lHFAF6HwoYV/FPSL/pirxyDSBb/FR
RA3PIfmvGfMUGFVWlyS7+O73l5oIJHxuaJrR4EenzAu4Avpa5d+VuiYbM10a
LaVegVPvFn4pCP4U/Nbbw4OTCFX2HKmWEiVBB0O3J9xwXWpxN1Vr5CDi75Fq
NhxYCjgSJzWOUD34Y1dAfcj57VINmQVEWyc8Tch8vg9MnHGCOfOjRqp0VGyA
S15AVD2QS1V6fhRimJSVyT6QuGb8tKRsl2N+a2Xze36vgMhw7XK7zh//jC2H""".replace("\n", "")
solve_challenge(b64_crypt) | {
"repo_name": "8u1a/my_matasano_crypto_challenges",
"path": "set1/challenge7.py",
"copies": "1",
"size": "4632",
"license": "unlicense",
"hash": 7732086924303570000,
"line_mean": 53.5058823529,
"line_max": 120,
"alpha_frac": 0.8784542314,
"autogenerated": false,
"ratio": 1.5019455252918288,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.2880399756691829,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christianbuia'
from Crypto.Cipher import AES
import base64
#-----------------------------------------------------------------------------------------------------------------------
def pkcs7_padding(message_bytes, block_size):
#message_bytes=bytearray(message_bytes)
pad_length = block_size - (len(message_bytes) % block_size)
if pad_length != block_size:
for i in range(0, pad_length):
message_bytes.append(pad_length)
return message_bytes
#-----------------------------------------------------------------------------------------------------------------------
#always 16 bytes
def decrypt_aes128(message, key):
decobj = AES.new(key, AES.MODE_ECB)
return decobj.decrypt(message)
#-----------------------------------------------------------------------------------------------------------------------
#always 16 bytes
def encrypt_aes128(message, key):
decobj = AES.new(key, AES.MODE_ECB)
return decobj.encrypt(message)
#-----------------------------------------------------------------------------------------------------------------------
def encrypt_aes128_cbc(message, key, vector):
message = pkcs7_padding(message, 16)
blocks = [message[x:x+16] for x in range(0, len(message), 16)]
encrypted_blocks = []
for block in blocks:
encrypted_block = bytearray()
for b_count in range(len(block)):
encrypted_block.append(block[b_count] ^ vector[b_count])
vector = encrypt_aes128(bytes(encrypted_block), key)
encrypted_blocks.append(vector)
ciphertext = b''
for block in encrypted_blocks:
ciphertext += block
return ciphertext
#-----------------------------------------------------------------------------------------------------------------------
def decrypt_aes128_cbc(message, key, vector):
blocks = [message[x:x+16] for x in range(0, len(message), 16)]
decrypted_blocks = []
for block in blocks:
dec_block = bytearray(decrypt_aes128(bytes(block), key))
decrypted_block = bytearray()
for b_count in range(len(dec_block)):
decrypted_block.append(dec_block[b_count] ^ vector[b_count])
vector = block
decrypted_blocks.append(decrypted_block)
plaintext = b''
for block in decrypted_blocks:
plaintext += block
#TODO may want to implement PKCS7 de-padding
return plaintext
#-----------------------------------------------------------------------------------------------------------------------
def solve_challenge(b64_crypt):
"""
#test encrypt and decrypt
plaintext = "The killer whale (Orcinus orca), also referred to as the orca whale or orca, and less commonly as the blackfish or grampus, is a toothed whale belonging to the oceanic dolphin family."
plaintext = bytearray(plaintext, "ascii")
init_vector = bytearray(b'0000000000000000')
ciphertext = encrypt_aes_cbc(plaintext, bytes("YELLOW SUBMARINE", "ascii"), init_vector)
print(ciphertext)
print(decrypt_aes_cbc(ciphertext, bytes("YELLOW SUBMARINE", "ascii"), init_vector))
#exit(1)
"""
init_vector = bytearray(b'0000000000000000')
cipher = bytes(base64.b64decode(b64_crypt))
plain = decrypt_aes128_cbc(cipher, bytes("YELLOW SUBMARINE", "ascii"), init_vector)
plain = str(plain)
#not sure why the first line of the plain text is partially garbled, but chalking it up to comment about
#the content being "somewhat" intelligible.
for p in plain.split("\\n"):
print(p)
return True
#=======================================================================================================================
b64_crypt = """CRIwqt4+szDbqkNY+I0qbNXPg1XLaCM5etQ5Bt9DRFV/xIN2k8Go7jtArLIy
P605b071DL8C+FPYSHOXPkMMMFPAKm+Nsu0nCBMQVt9mlluHbVE/yl6VaBCj
NuOGvHZ9WYvt51uR/lklZZ0ObqD5UaC1rupZwCEK4pIWf6JQ4pTyPjyiPtKX
g54FNQvbVIHeotUG2kHEvHGS/w2Tt4E42xEwVfi29J3yp0O/TcL7aoRZIcJj
MV4qxY/uvZLGsjo1/IyhtQp3vY0nSzJjGgaLYXpvRn8TaAcEtH3cqZenBoox
BH3MxNjD/TVf3NastEWGnqeGp+0D9bQx/3L0+xTf+k2VjBDrV9HPXNELRgPN
0MlNo79p2gEwWjfTbx2KbF6htgsbGgCMZ6/iCshy3R8/abxkl8eK/VfCGfA6
bQQkqs91bgsT0RgxXSWzjjvh4eXTSl8xYoMDCGa2opN/b6Q2MdfvW7rEvp5m
wJOfQFDtkv4M5cFEO3sjmU9MReRnCpvalG3ark0XC589rm+42jC4/oFWUdwv
kzGkSeoabAJdEJCifhvtGosYgvQDARUoNTQAO1+CbnwdKnA/WbQ59S9MU61Q
KcYSuk+jK5nAMDot2dPmvxZIeqbB6ax1IH0cdVx7qB/Z2FlJ/U927xGmC/RU
FwoXQDRqL05L22wEiF85HKx2XRVB0F7keglwX/kl4gga5rk3YrZ7VbInPpxU
zgEaE4+BDoEqbv/rYMuaeOuBIkVchmzXwlpPORwbN0/RUL89xwOJKCQQZM8B
1YsYOqeL3HGxKfpFo7kmArXSRKRHToXuBgDq07KS/jxaS1a1Paz/tvYHjLxw
Y0Ot3kS+cnBeq/FGSNL/fFV3J2a8eVvydsKat3XZS3WKcNNjY2ZEY1rHgcGL
5bhVHs67bxb/IGQleyY+EwLuv5eUwS3wljJkGcWeFhlqxNXQ6NDTzRNlBS0W
4CkNiDBMegCcOlPKC2ZLGw2ejgr2utoNfmRtehr+3LAhLMVjLyPSRQ/zDhHj
Xu+Kmt4elmTmqLgAUskiOiLYpr0zI7Pb4xsEkcxRFX9rKy5WV7NhJ1lR7BKy
alO94jWIL4kJmh4GoUEhO+vDCNtW49PEgQkundV8vmzxKarUHZ0xr4feL1ZJ
THinyUs/KUAJAZSAQ1Zx/S4dNj1HuchZzDDm/nE/Y3DeDhhNUwpggmesLDxF
tqJJ/BRn8cgwM6/SMFDWUnhkX/t8qJrHphcxBjAmIdIWxDi2d78LA6xhEPUw
NdPPhUrJcu5hvhDVXcceZLa+rJEmn4aftHm6/Q06WH7dq4RaaJePP6WHvQDp
zZJOIMSEisApfh3QvHqdbiybZdyErz+yXjPXlKWG90kOz6fx+GbvGcHqibb/
HUfcDosYA7lY4xY17llY5sibvWM91ohFN5jyDlHtngi7nWQgFcDNfSh77TDT
zltUp9NnSJSgNOOwoSSNWadm6+AgbXfQNX6oJFaU4LQiAsRNa7vX/9jRfi65
5uvujM4ob199CZVxEls10UI9pIemAQQ8z/3rgQ3eyL+fViyztUPg/2IvxOHv
eexE4owH4Fo/bRlhZK0mYIamVxsRADBuBlGqx1b0OuF4AoZZgUM4d8v3iyUu
feh0QQqOkvJK/svkYHn3mf4JlUb2MTgtRQNYdZKDRgF3Q0IJaZuMyPWFsSNT
YauWjMVqnj0AEDHh6QUMF8bXLM0jGwANP+r4yPdKJNsoZMpuVoUBJYWnDTV+
8Ive6ZgBi4EEbPbMLXuqDMpDi4XcLE0UUPJ8VnmO5fAHMQkA64esY2QqldZ+
5gEhjigueZjEf0917/X53ZYWJIRiICnmYPoM0GSYJRE0k3ycdlzZzljIGk+P
Q7WgeJhthisEBDbgTuppqKNXLbNZZG/VaTdbpW1ylBv0eqamFOmyrTyh1APS
Gn37comTI3fmN6/wmVnmV4/FblvVwLuDvGgSCGPOF8i6FVfKvdESs+yr+1AE
DJXfp6h0eNEUsM3gXaJCknGhnt3awtg1fSUiwpYfDKZxwpPOYUuer8Wi+VCD
sWsUpkMxhhRqOBKaQaBDQG+kVJu6aPFlnSPQQTi1hxLwi0l0Rr38xkr+lHU7
ix8LeJVgNsQdtxbovE3i7z3ZcTFY7uJkI9j9E0muDN9x8y/YN25rm6zULYaO
jUoP/7FQZsSgxPIUvUiXkEq+FU2h0FqAC7H18cr3Za5x5dpw5nwawMArKoqG
9qlhqc34lXV0ZYwULu58EImFIS8+kITFuu7jOeSXbBgbhx8zGPqavRXeiu0t
bJd0gWs+YgMLzXtQIbQuVZENMxJSZB4aw5lPA4vr1fFBsiU4unjOEo/XAgwr
Tc0w0UndJFPvXRr3Ir5rFoIEOdRo+6os5DSlk82SBnUjwbje7BWsxWMkVhYO
6bOGUm4VxcKWXu2jU66TxQVIHy7WHktMjioVlWJdZC5Hq0g1LHg1nWSmjPY2
c/odZqN+dBBC51dCt4oi5UKmKtU5gjZsRSTcTlfhGUd6DY4Tp3CZhHjQRH4l
Zhg0bF/ooPTxIjLKK4r0+yR0lyRjqIYEY27HJMhZDXFDxBQQ1UkUIhAvXacD
WB2pb3YyeSQjt8j/WSbQY6TzdLq8SreZiuMWcXmQk4EH3xu8bPsHlcvRI+B3
gxKeLnwrVJqVLkf3m2cSGnWQhSLGbnAtgQPA6z7u3gGbBmRtP0KnAHWSK7q6
onMoYTH+b5iFjCiVRqzUBVzRRKjAL4rcL2nYeV6Ec3PlnboRzJwZIjD6i7WC
dcxERr4WVOjOBX4fhhKUiVvlmlcu8CkIiSnZENHZCpI41ypoVqVarHpqh2aP
/PS624yfxx2N3C2ci7VIuH3DcSYcaTXEKhz/PRLJXkRgVlWxn7QuaJJzDvpB
oFndoRu1+XCsup/AtkLidsSXMFTo/2Ka739+BgYDuRt1mE9EyuYyCMoxO/27
sn1QWMMd1jtcv8Ze42MaM4y/PhAMp2RfCoVZALUS2K7XrOLl3s9LDFOdSrfD
8GeMciBbfLGoXDvv5Oqq0S/OvjdID94UMcadpnSNsist/kcJJV0wtRGfALG2
+UKYzEj/2TOiN75UlRvA5XgwfqajOvmIIXybbdhxpjnSB04X3iY82TNSYTmL
LAzZlX2vmV9IKRRimZ2SpzNpvLKeB8lDhIyGzGXdiynQjFMNcVjZlmWHsH7e
ItAKWmCwNkeuAfFwir4TTGrgG1pMje7XA7kMT821cYbLSiPAwtlC0wm77F0T
a7jdMrLjMO29+1958CEzWPdzdfqKzlfBzsba0+dS6mcW/YTHaB4bDyXechZB
k/35fUg+4geMj6PBTqLNNWXBX93dFC7fNyda+Lt9cVJnlhIi/61fr0KzxOeX
NKgePKOC3Rz+fWw7Bm58FlYTgRgN63yFWSKl4sMfzihaQq0R8NMQIOjzuMl3
Ie5ozSa+y9g4z52RRc69l4n4qzf0aErV/BEe7FrzRyWh4PkDj5wy5ECaRbfO
7rbs1EHlshFvXfGlLdEfP2kKpT9U32NKZ4h+Gr9ymqZ6isb1KfNov1rw0KSq
YNP+EyWCyLRJ3EcOYdvVwVb+vIiyzxnRdugB3vNzaNljHG5ypEJQaTLphIQn
lP02xcBpMNJN69bijVtnASN/TLV5ocYvtnWPTBKu3OyOkcflMaHCEUgHPW0f
mGfld4i9Tu35zrKvTDzfxkJX7+KJ72d/V+ksNKWvwn/wvMOZsa2EEOfdCidm
oql027IS5XvSHynQtvFmw0HTk9UXt8HdVNTqcdy/jUFmXpXNP2Wvn8PrU2Dh
kkIzWhQ5Rxd/vnM2QQr9Cxa2J9GXEV3kGDiZV90+PCDSVGY4VgF8y7GedI1h""".replace("\n", "")
solve_challenge(b64_crypt) | {
"repo_name": "8u1a/my_matasano_crypto_challenges",
"path": "set2/challenge10.py",
"copies": "1",
"size": "7670",
"license": "unlicense",
"hash": -1193394356072996900,
"line_mean": 41.1483516484,
"line_max": 201,
"alpha_frac": 0.7374185137,
"autogenerated": false,
"ratio": 2.038809144072302,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3276227657772302,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christianbuia'
import binascii
import sys
import base64
def hamming_distance_two_hexstrings(hexstring1, hexstring2):
distance = 0
if len(hexstring1) != len(hexstring2):
sys.stderr.write("unexpected: length of compared strings don't match. exiting.\n")
return False
bytes1 = binascii.unhexlify(hexstring1)
bytes2 = binascii.unhexlify(hexstring2)
bin_string1 = ""
bin_string2 = ""
for i in range(len(bytes1)):
#taking [2:] to convert 0b1110100 to 1110100
temp_bin1 = bin(bytes1[i])[2:]
temp_bin2 = bin(bytes2[i])[2:]
while len(temp_bin1) < 8:
temp_bin1 = "0" + temp_bin1
while len(temp_bin2) < 8:
temp_bin2 = "0" + temp_bin2
bin_string1 += temp_bin1
bin_string2 += temp_bin2
for i in range(len(bin_string1)):
if bin_string1[i] != bin_string2[i]:
distance += 1
return distance
#--------------------------------------------------------------------------
def multibyte_xor(bs, key):
count=0
decoded_bytes = []
for b in bs:
decoded_bytes.append(b^key[count % len(key)])
count+=1
return bytearray(decoded_bytes)
#--------------------------------------------------------------------------
def fixed_xor_hexstrings(hexstring1, key):
bytes1=binascii.unhexlify(hexstring1)
decoded = ""
for byte in bytes1:
#print(byte)
#print(key)
decoded += chr(byte^key)
return decoded
#--------------------------------------------------------------------------
def evaluate_as_english(message, ratio_common_printables, ratio_spaces_to_letters):
#count the number of common printables vs non-common printbables
count_cp = 0
count_ncp = 0
count_letters = 0
count_spaces = 0
for m in message:
letters=False
numbers=False
punct = False
m = ord(m)
if m > 64 and m < 123:
letters = True
count_letters+=1
if m > 47 and m < 58:
numbers=True
if m==32 or m==33 or m==34 or m==40 or m==41 or m==46 or m==63:
punct = True
if m==32:
count_spaces+=1
if letters or numbers or punct:
count_cp+=1
else:
count_ncp+=1
if count_cp / (count_cp + count_ncp) > ratio_common_printables:
if count_spaces / (count_letters + count_spaces) > ratio_spaces_to_letters:
return True
else:
return False
#--------------------------------------------------------------------------
def solve_block(block_data):
for i in range(256):
message = fixed_xor_hexstrings(block_data, i)
if evaluate_as_english(message, .8, .1):
return i
return False
#--------------------------------------------------------------------------
def transpose_blocks(ciphertext, block_size):
transposed_blocks = []
#iterate through the length of the key
for i in range(block_size):
b_array = bytearray()
count=0
for b in ciphertext:
if ((count - i) % block_size) == 0:
b_array.append(b)
count+=1
transposed_blocks.append(b_array)
return transposed_blocks
#--------------------------------------------------------------------------
def solve_challenge(b64_crypt):
ciphertext = base64.b64decode(b64_crypt)
#dictionary of hamming distances in the form {'keysize':'distance'}
keysize_hamming_distances = {}
for x in range(40):
if x < 2:
continue
distances = []
#compute the average normalized hamming distance given keysize x
for i in range((len(ciphertext) // x) - 1):
h = hamming_distance_two_hexstrings(binascii.hexlify(ciphertext[i*x:i*x+x]), binascii.hexlify(ciphertext[(i+1)*x:(i+1)*x+x]))
h_normal = h / x
distances.append(h_normal)
keysize_hamming_distances[x] = sum(distances)/len(distances)
keysize_candidates_size = 1
keysize_candidates = []
c = 0
#determine candidate keysizes
for v in sorted(keysize_hamming_distances.values()):
for i in keysize_hamming_distances.keys():
if keysize_hamming_distances[i] == v:
keysize_candidates.append(i)
c += 1
continue
if c < keysize_candidates_size:
continue
else:
break
#for each key size, attempt to solve the multibyte key
for k_candidate in keysize_candidates:
standard_blocks = [ciphertext[x:x+k_candidate] for x in range(0, len(ciphertext), k_candidate)]
transposed_blocks = transpose_blocks(ciphertext, k_candidate)
xor_bytes = [solve_block(binascii.hexlify(tblock)) for tblock in transposed_blocks]
key = bytearray(xor_bytes)
plaintext = multibyte_xor(ciphertext, key)
print(plaintext.decode("utf-8"))
return True
#--------------------------------------------------------------------------
#hamming function test
#string1 = "this is a test"
#string2 = "wokka wokka!!!"
#print(hamming_distance_two_hexstrings(binascii.hexlify(bytes(string1, "ascii")), binascii.hexlify(bytes(string2, "ascii"))))
b64_crypt = """HUIfTQsPAh9PE048GmllH0kcDk4TAQsHThsBFkU2AB4BSWQgVB0dQzNTTmVS
BgBHVBwNRU0HBAxTEjwMHghJGgkRTxRMIRpHKwAFHUdZEQQJAGQmB1MANxYG
DBoXQR0BUlQwXwAgEwoFR08SSAhFTmU+Fgk4RQYFCBpGB08fWXh+amI2DB0P
QQ1IBlUaGwAdQnQEHgFJGgkRAlJ6f0kASDoAGhNJGk9FSA8dDVMEOgFSGQEL
QRMGAEwxX1NiFQYHCQdUCxdBFBZJeTM1CxsBBQ9GB08dTnhOSCdSBAcMRVhI
CEEATyBUCHQLHRlJAgAOFlwAUjBpZR9JAgJUAAELB04CEFMBJhAVTQIHAh9P
G054MGk2UgoBCVQGBwlTTgIQUwg7EAYFSQ8PEE87ADpfRyscSWQzT1QCEFMa
TwUWEXQMBk0PAg4DQ1JMPU4ALwtJDQhOFw0VVB1PDhxFXigLTRkBEgcKVVN4
Tk9iBgELR1MdDAAAFwoFHww6Ql5NLgFBIg4cSTRWQWI1Bk9HKn47CE8BGwFT
QjcEBx4MThUcDgYHKxpUKhdJGQZZVCFFVwcDBVMHMUV4LAcKQR0JUlk3TwAm
HQdJEwATARNFTg5JFwQ5C15NHQYEGk94dzBDADsdHE4UVBUaDE5JTwgHRTkA
Umc6AUETCgYAN1xGYlUKDxJTEUgsAA0ABwcXOwlSGQELQQcbE0c9GioWGgwc
AgcHSAtPTgsAABY9C1VNCAINGxgXRHgwaWUfSQcJABkRRU8ZAUkDDTUWF01j
OgkRTxVJKlZJJwFJHQYADUgRSAsWSR8KIgBSAAxOABoLUlQwW1RiGxpOCEtU
YiROCk8gUwY1C1IJCAACEU8QRSxORTBSHQYGTlQJC1lOBAAXRTpCUh0FDxhU
ZXhzLFtHJ1JbTkoNVDEAQU4bARZFOwsXTRAPRlQYE042WwAuGxoaAk5UHAoA
ZCYdVBZ0ChQLSQMYVAcXQTwaUy1SBQsTAAAAAAAMCggHRSQJExRJGgkGAAdH
MBoqER1JJ0dDFQZFRhsBAlMMIEUHHUkPDxBPH0EzXwArBkkdCFUaDEVHAQAN
U29lSEBAWk44G09fDXhxTi0RAk4ITlQbCk0LTx4cCjBFeCsGHEETAB1EeFZV
IRlFTi4AGAEORU4CEFMXPBwfCBpOAAAdHUMxVVUxUmM9ElARGgZBAg4PAQQz
DB4EGhoIFwoKUDFbTCsWBg0OTwEbRSonSARTBDpFFwsPCwIATxNOPBpUKhMd
Th5PAUgGQQBPCxYRdG87TQoPD1QbE0s9GkFiFAUXR0cdGgkADwENUwg1DhdN
AQsTVBgXVHYaKkg7TgNHTB0DAAA9DgQACjpFX0BJPQAZHB1OeE5PYjYMAg5M
FQBFKjoHDAEAcxZSAwZOBREBC0k2HQxiKwYbR0MVBkVUHBZJBwp0DRMDDk5r
NhoGACFVVWUeBU4MRREYRVQcFgAdQnQRHU0OCxVUAgsAK05ZLhdJZChWERpF
QQALSRwTMRdeTRkcABcbG0M9Gk0jGQwdR1ARGgNFDRtJeSchEVIDBhpBHQlS
WTdPBzAXSQ9HTBsJA0UcQUl5bw0KB0oFAkETCgYANlVXKhcbC0sAGgdFUAIO
ChZJdAsdTR0HDBFDUk43GkcrAAUdRyonBwpOTkJEUyo8RR8USSkOEENSSDdX
RSAdDRdLAA0HEAAeHQYRBDYJC00MDxVUZSFQOV1IJwYdB0dXHRwNAA9PGgMK
OwtTTSoBDBFPHU54W04mUhoPHgAdHEQAZGU/OjV6RSQMBwcNGA5SaTtfADsX
GUJHWREYSQAnSARTBjsIGwNOTgkVHRYANFNLJ1IIThVIHQYKAGQmBwcKLAwR
DB0HDxNPAU94Q083UhoaBkcTDRcAAgYCFkU1RQUEBwFBfjwdAChPTikBSR0T
TwRIEVIXBgcURTULFk0OBxMYTwFUN0oAIQAQBwkHVGIzQQAGBR8EdCwRCEkH
ElQcF0w0U05lUggAAwANBxAAHgoGAwkxRRMfDE4DARYbTn8aKmUxCBsURVQf
DVlOGwEWRTIXFwwCHUEVHRcAMlVDKRsHSUdMHQMAAC0dCAkcdCIeGAxOazkA
BEk2HQAjHA1OAFIbBxNJAEhJBxctDBwKSRoOVBwbTj8aQS4dBwlHKjUECQAa
BxscEDMNUhkBC0ETBxdULFUAJQAGARFJGk9FVAYGGlMNMRcXTRoBDxNPeG43
TQA7HRxJFUVUCQhBFAoNUwctRQYFDE43PT9SUDdJUydcSWRtcwANFVAHAU5T
FjtFGgwbCkEYBhlFeFsABRcbAwZOVCYEWgdPYyARNRcGAQwKQRYWUlQwXwAg
ExoLFAAcARFUBwFOUwImCgcDDU5rIAcXUj0dU2IcBk4TUh0YFUkASEkcC3QI
GwMMQkE9SB8AMk9TNlIOCxNUHQZCAAoAHh1FXjYCDBsFABkOBkk7FgALVQRO
D0EaDwxOSU8dGgI8EVIBAAUEVA5SRjlUQTYbCk5teRsdRVQcDhkDADBFHwhJ
AQ8XClJBNl4AC1IdBghVEwARABoHCAdFXjwdGEkDCBMHBgAwW1YnUgAaRyon
B0VTGgoZUwE7EhxNCAAFVAMXTjwaTSdSEAESUlQNBFJOZU5LXHQMHE0EF0EA
Bh9FeRp5LQdFTkAZREgMU04CEFMcMQQAQ0lkay0ABwcqXwA1FwgFAk4dBkIA
CA4aB0l0PD1MSQ8PEE87ADtbTmIGDAILAB0cRSo3ABwBRTYKFhROHUETCgZU
MVQHYhoGGksABwdJAB0ASTpFNwQcTRoDBBgDUkksGioRHUkKCE5THEVCC08E
EgF0BBwJSQoOGkgGADpfADETDU5tBzcJEFMLTx0bAHQJCx8ADRJUDRdMN1RH
YgYGTi5jMURFeQEaSRAEOkURDAUCQRkKUmQ5XgBIKwYbQFIRSBVJGgwBGgtz
RRNNDwcVWE8BT3hJVCcCSQwGQx9IBE4KTwwdASEXF01jIgQATwZIPRpXKwYK
BkdEGwsRTxxDSToGMUlSCQZOFRwKUkQ5VEMnUh0BR0MBGgAAZDwGUwY7CBdN
HB5BFwMdUz0aQSwWSQoITlMcRUILTxoCEDUXF01jNw4BTwVBNlRBYhAIGhNM
EUgIRU5CRFMkOhwGBAQLTVQOHFkvUkUwF0lkbXkbHUVUBgAcFA0gRQYFCBpB
PU8FQSsaVycTAkJHYhsRSQAXABxUFzFFFggICkEDHR1OPxoqER1JDQhNEUgK
TkJPDAUAJhwQAg0XQRUBFgArU04lUh0GDlNUGwpOCU9jeTY1HFJARE4xGA4L
ACxSQTZSDxsJSw1ICFUdBgpTNjUcXk0OAUEDBxtUPRpCLQtFTgBPVB8NSRoK
SREKLUUVAklkERgOCwAsUkE2Ug8bCUsNSAhVHQYKUyI7RQUFABoEVA0dWXQa
Ry1SHgYOVBFIB08XQ0kUCnRvPgwQTgUbGBwAOVREYhAGAQBJEUgETgpPGR8E
LUUGBQgaQRIaHEshGk03AQANR1QdBAkAFwAcUwE9AFxNY2QxGA4LACxSQTZS
DxsJSw1ICFUdBgpTJjsIF00GAE1ULB1NPRpPLF5JAgJUVAUAAAYKCAFFXjUe
DBBOFRwOBgA+T04pC0kDElMdC0VXBgYdFkU2CgtNEAEUVBwTWXhTVG5SGg8e
AB0cRSo+AwgKRSANExlJCBQaBAsANU9TKxFJL0dMHRwRTAtPBRwQMAAATQcB
FlRlIkw5QwA2GggaR0YBBg5ZTgIcAAw3SVIaAQcVEU8QTyEaYy0fDE4ITlhI
Jk8DCkkcC3hFMQIEC0EbAVIqCFZBO1IdBgZUVA4QTgUWSR4QJwwRTWM=""".replace("\n", "")
solve_challenge(b64_crypt) | {
"repo_name": "8u1a/my_matasano_crypto_challenges",
"path": "set1/challenge6.py",
"copies": "1",
"size": "9251",
"license": "unlicense",
"hash": -8180788151263343000,
"line_mean": 36.6097560976,
"line_max": 137,
"alpha_frac": 0.7243541239,
"autogenerated": false,
"ratio": 2.260752688172043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3485106812072043,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christianbuia'
import random
from Crypto.Cipher import AES
import base64
import sys
def pkcs7_padding(message_bytes, block_size):
pad_length = block_size - (len(message_bytes) % block_size)
if pad_length != block_size:
for i in range(0, pad_length):
message_bytes += bytes([pad_length])
return message_bytes
#-----------------------------------------------------------------------------------------------------------------------
def generateRandom16bytes():
ints = []
for i in range(16):
ints.append(random.randint(0,255))
return bytes(ints)
#-----------------------------------------------------------------------------------------------------------------------
#always 16 bytes
def encrypt_aes128(message, key):
decobj = AES.new(key, AES.MODE_ECB)
return decobj.encrypt(pkcs7_padding(message, 16))
#-----------------------------------------------------------------------------------------------------------------------
#attempt to detect ECB by looking for identical blocks
def detectEBC(cipher, block_size):
blocks = []
for i in range(int(len(cipher)/block_size)):
blocks.append(cipher[i*block_size:i*block_size+block_size])
#detecting if dups exist: http://stackoverflow.com/questions/9835762/find-and-list-duplicates-in-python-list
if (len(set([x for x in blocks if blocks.count(x) > 1]))) > 0:
return True
else:
return False
#-----------------------------------------------------------------------------------------------------------------------
#this time with no cheating :) realized during this challenge that the oracle in challenge 12 should also always append
#the challenge plaintext. all calls to the oracle will include the original plaintext as the second parameter.
#change is trivial anyway...
def ecb_oracle(mytext, plaintext):
#using the same prefix scheme as used in challenge 11 since the spec is pretty broad.
plaintext_prefix = bytes([random.randint(0, 255) for i in range(random.randint(5, 10))])
cipher = encrypt_aes128(plaintext_prefix + mytext + plaintext, global_key)
return cipher
#-----------------------------------------------------------------------------------------------------------------------
def detect_plaintext_padding_size(oracle_func, plaintext, block_size):
count = 0
mytext = b""
observed_blocks = None
while True:
cipher = oracle_func(mytext, plaintext)
next_observed_blocks = len(cipher) / block_size
if observed_blocks != None and observed_blocks < next_observed_blocks:
break
observed_blocks = next_observed_blocks
mytext += bytes("A", "ascii")
count += 1
return (count - 1)
#-----------------------------------------------------------------------------------------------------------------------
def return_sorted_counts_of_lengths(oracle_func, attack_array, plaintext, num_runs=200):
lengths = []
for i in range(num_runs):
l = len(oracle_func(attack_array, plaintext))
if l not in lengths:
lengths.append(l)
return sorted(lengths)
#-----------------------------------------------------------------------------------------------------------------------
#this function turns out to be a waste of time, but keeping it around in case i ever need to calc this.
#determined that i can't calculate the absolute min and max if i don't know the size of the plaintext (only the delta)
#which i am assuming i won't know for this challenge
def find_prefix_delta(oracle_func, plaintext, block_size):
#we want to find an attack array that results in variable lengths of the cipher text (state 1)
#we can use that attack array by incrementing a byte at a time til we find an attack array of one len (state 2)
#we then increment the attack array.
#when we find one of multiple len, the delta between state 2 and now gives the delta of min and max.
#this is state 3.
bounds_count = 0
bounds_state = 0
state_2_len = None
min_max_delta = None
while True:
bounds_count += 1
#first we will find an attack array that yields variably sized cipher texts
ints = [ord("A") for i in range(bounds_count)]
bounds_attack_array = bytes(ints)
#undetermined
if bounds_state == 0:
if len(return_sorted_counts_of_lengths(oracle_func, bounds_attack_array, plaintext)) == 1:
pass
else:
bounds_state = 1
continue
#variable-length ciphers - looking for the first mono-length
if bounds_state == 1:
if len(return_sorted_counts_of_lengths(oracle_func, bounds_attack_array, plaintext)) == 1:
bounds_state = 2
state_2_len = len(bounds_attack_array)
else:
pass
continue
#mono-length ciphers - looking for the first variable length to show us what we subtract from the blocksize
#to arrive at the delta (delta = blocksize - (length - state 2 length)
if bounds_state == 2:
if len(return_sorted_counts_of_lengths(oracle_func, bounds_attack_array, plaintext)) == 1:
pass
else:
bounds_state = 3
#this number will give me the delta between min and max
min_max_delta = block_size - (len(bounds_attack_array) - state_2_len)
break
continue
return min_max_delta
#-----------------------------------------------------------------------------------------------------------------------
def crack_ecb(oracle_func, plaintext):
#detect block size by determining the delta of the first jump in cipher size as the plaintext size increases
block_size = None
cipher_size = len(oracle_func(b"A", plaintext))
size_count = 1
while True:
ints = [ord("A") for i in range(size_count)]
size_attack_array = bytes(ints)
next_cipher_size = len(oracle_func(size_attack_array, plaintext))
if next_cipher_size > cipher_size:
block_size = next_cipher_size - cipher_size
break
size_count += 1
#not sure i need this
prefix_delta = find_prefix_delta(oracle_func, plaintext, block_size)
sizes_of_base_plaintext = return_sorted_counts_of_lengths(oracle_func, b"", plaintext)
top_size_of_base_plaintext = sizes_of_base_plaintext[-1]
number_of_blocks_to_decode = int(top_size_of_base_plaintext / block_size)
analysis_block = number_of_blocks_to_decode + 1
print("size of base plaintext " + str(sizes_of_base_plaintext))
print("number of blocks to decode " + str(number_of_blocks_to_decode))
print("analysis block " + str(analysis_block))
#figure out the base attack array to populate the analysis block
#--------------------------------------------------------------------------------------------
base_attack_array_size = 1
base_attack_array = b""
while True:
ints = [ord("A") for i in range(base_attack_array_size)]
base_attack_array = bytes(ints)
plaintext_sizes = return_sorted_counts_of_lengths(oracle_func, base_attack_array, plaintext)
if plaintext_sizes[-1] > top_size_of_base_plaintext:
break
base_attack_array_size += 1
#print("base attack array is " + str(base_attack_array))
#print("size of base attack array is " + str(base_attack_array_size))
#--------------------------------------------------------------------------------------------
#the solved plain text we accumulate and return
solved_plain_text = b""
for block_number in range(number_of_blocks_to_decode):
sys.stdout.write("decrypting...")
sys.stdout.flush()
for byte_number in range(block_size):
sys.stdout.write(".")
sys.stdout.flush()
if solved_plain_text[0:5] == b"AAAAA":
break
#generate the next attack array
ints = [ord("A") for i in range(base_attack_array_size + (block_number*block_size) + byte_number)]
attack_array = bytes(ints)
#calculate a list that has all potential plaintexts
# the format of each element in this array is:
# [byte_iterator | blocksize worth of most recent bz-1 solved_plain_text | padding if necessary]
#build the just short array
jsa_solved_plain_text = b""
jsa_padding = b""
if (len(solved_plain_text)) >= block_size:
jsa_solved_plain_text = solved_plain_text[:(block_size-1)]
else:
jsa_solved_plain_text = solved_plain_text
padding_lenth = block_size - len(solved_plain_text) - 1
for i in range(padding_lenth):
jsa_padding += bytes([padding_lenth])
just_short_array = jsa_solved_plain_text + jsa_padding
just_short_array_bytes_dict = {}
for i in range(0, 127+1):
just_short_array_bytes_dict[i] = bytes([i]) + just_short_array
#now generate the cryptotexts we want to match
crypto_text_candidates = []
for i in range(50):
#if the byte is in the dict, create an entry in the dict of a one-element list
candidate_crypt = oracle_func(
attack_array, plaintext)
if len(candidate_crypt) >= analysis_block * block_size:
#only extract the analysis block from the candidate
entire_candidate_crypt = candidate_crypt
candidate_crypt = candidate_crypt[(analysis_block - 1)*block_size:analysis_block*block_size]
if candidate_crypt not in crypto_text_candidates:
crypto_text_candidates.append(candidate_crypt)
#print(just_short_array_bytes_dict)
#print(crypto_text_candidates)
#now gen a bunch of ciphertexts, looking at the second block and comparing it to our crypto_text_candidates
attack_count = 1
solved_byte = None
while True:
if attack_count > block_size*3:
print("error, force breaking out of byte decryption attack loop, and exiting")
exit(1)
break
elif solved_byte is not None:
break
for element in just_short_array_bytes_dict:
if solved_byte is not None:
break
test_case = just_short_array_bytes_dict[element]
#gen a bunch of ciphers...
ciphers = []
for c in range(50):
intz = \
[ord("A") for lol in range(attack_count)]
ciph = oracle_func(bytes(intz) + test_case, plaintext)
if ciph not in ciphers:
ciphers.append(ciph)
#compare generated ciphers with the crypto candidates. The intersection will reveal the next byte.
for c in ciphers:
if c[block_size:block_size*2] in crypto_text_candidates:
solved_byte = test_case[0]
break
attack_count += 1
solved_plain_text = bytes([solved_byte]) + solved_plain_text
print("\nsolved plaintext so far: " + str(solved_plain_text))
return solved_plain_text.decode("ascii").lstrip("A")
#***********************************************************************************************************************
if __name__ == '__main__':
global global_key
global_key = generateRandom16bytes()
b64_unknown_string = """Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg
aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq
dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg
YnkK""".replace("\n", "")
#prep the plaintext, though we don't want to know what it is yet
#(we are going to use the oracle to crack encrypted versions of the plaintext)
unknown_string = base64.b64decode(b64_unknown_string)
challenge_plaintext = bytes(unknown_string)
solved = crack_ecb(ecb_oracle, challenge_plaintext)
print("----------------------")
print(solved)
| {
"repo_name": "8u1a/my_matasano_crypto_challenges",
"path": "set2/challenge14.py",
"copies": "1",
"size": "12510",
"license": "unlicense",
"hash": 2999534932550162000,
"line_mean": 41.8424657534,
"line_max": 120,
"alpha_frac": 0.5486810552,
"autogenerated": false,
"ratio": 4.262350936967632,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5311031992167632,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christianbuia'
import random
from Crypto.Cipher import AES
import base64
def pkcs7_padding(message_bytes, block_size):
pad_length = block_size - (len(message_bytes) % block_size)
if pad_length != block_size:
for i in range(0, pad_length):
message_bytes += bytes([pad_length])
return message_bytes
#-----------------------------------------------------------------------------------------------------------------------
def generateRandom16bytes():
ints = []
for i in range(16):
ints.append(random.randint(0,255))
return bytes(ints)
#-----------------------------------------------------------------------------------------------------------------------
#always 16 bytes
def encrypt_aes128(message, key):
decobj = AES.new(key, AES.MODE_ECB)
return decobj.encrypt(pkcs7_padding(message, 16))
#-----------------------------------------------------------------------------------------------------------------------
#attempt to detect ECB by looking for identical blocks
def detectEBC(cipher, block_size):
blocks = []
for i in range(int(len(cipher)/block_size)):
blocks.append(cipher[i*block_size:i*block_size+block_size])
#detecting if dups exist: http://stackoverflow.com/questions/9835762/find-and-list-duplicates-in-python-list
if (len(set([x for x in blocks if blocks.count(x) > 1]))) > 0:
return True
else:
return False
#-----------------------------------------------------------------------------------------------------------------------
def ecb_oracle(mytext, plaintext):
cipher = encrypt_aes128(mytext + plaintext, global_key)
return cipher
#-----------------------------------------------------------------------------------------------------------------------
#detect block size by feeding the oracle a single byte to encrypt, and then inspecting the length of the cipher
#this assumes a single byte will be represented by a single block of the cipher (pretty safe assumption methinks,
#requires that the oracle won't prepend random bytes of size >= to the block size).
def detect_oracle_block_size(oracle_func):
cipher = oracle_func(bytes("A", "ascii"), bytes("", "ascii"))
return len(cipher)
#-----------------------------------------------------------------------------------------------------------------------
#detect oracle is ecb by feeding the oracle with homogeneous plaintext with length equal to exactly 4x the block length,
#then comparing the 2nd & 3rd cipher blocks. identical cipher blocks indicate the oracle generates ecb ciphers.
#using blocks 2 & 3 in case of random prefixes (of size less than block size) prepended to the plaintext by the oracle
def detect_oracle_is_ecb(oracle_func, block_size):
ints = [ord("A") for x in range(block_size*4)]
cipher = oracle_func(bytes(ints), bytes("", "ascii"))
if cipher[block_size:block_size*2-1] == cipher[block_size*2:block_size*3-1]:
return True
else:
return False
#-----------------------------------------------------------------------------------------------------------------------
def detect_plaintext_padding_size(oracle_func, plaintext, block_size):
count = 0
mytext = b""
observed_blocks = None
while True:
cipher = oracle_func(mytext, plaintext)
next_observed_blocks = len(cipher) / block_size
if observed_blocks != None and observed_blocks < next_observed_blocks:
break
observed_blocks = next_observed_blocks
mytext += bytes("A", "ascii")
count += 1
return (count - 1)
#-----------------------------------------------------------------------------------------------------------------------
def crack_ecb(oracle_func, plaintext):
#detect block size
block_size = detect_oracle_block_size(oracle_func)
#detect oracle is ECB
if detect_oracle_is_ecb(oracle_func, block_size) is not True:
print("oracle was determined to not be ECB. Exiting.")
exit(1)
#detect size of padding
padding_size = detect_plaintext_padding_size(oracle_func, plaintext, block_size)
size_of_unaltered_cipher = len(oracle_func(b"", plaintext))
number_of_blocks = int(size_of_unaltered_cipher / block_size)
#the solved plain text we accumulate and return
solved_plain_text = b""
for block_number in range(number_of_blocks):
#generally we do a full block_size cycle of attack arrays...
#unless it's the last block, in which case we subtract padding.
if block_number == number_of_blocks - 1:
iters = block_size - padding_size
else:
iters = block_size
for byte_number in range(iters):
#generate a homogeneous string of bytes that is of size block_size - 1 - (the number of solved bytes)
ints = [ord("A") for i in range(block_size-1-byte_number)]
attack_array = bytes(ints)
just_short_array = attack_array + solved_plain_text
last_byte_dict = {}
#ordinal for all ascii (0-127)
for i in range(0, 127+1):
last_byte_dict[i] = oracle_func(just_short_array, bytes([i]))
cipher = oracle_func(attack_array, plaintext)
for i in last_byte_dict.__iter__():
if last_byte_dict[i] == cipher[:block_size*(block_number + 1)]:
solved_plain_text += bytes([i])
return solved_plain_text
#***********************************************************************************************************************
global global_key
global_key = generateRandom16bytes()
b64_unknown_string = """Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkg
aGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBq
dXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUg
YnkK""".replace("\n", "")
#prep the plaintext, though we don't want to know what it is yet
#(we are going to use the oracle to crack encrypted versions of the plaintext)
unknown_string = base64.b64decode(b64_unknown_string)
plaintext = bytes(unknown_string)
print(crack_ecb(ecb_oracle, plaintext))
| {
"repo_name": "8u1a/my_matasano_crypto_challenges",
"path": "set2/challenge12.py",
"copies": "1",
"size": "6174",
"license": "unlicense",
"hash": 8266413372059513000,
"line_mean": 36.8773006135,
"line_max": 120,
"alpha_frac": 0.5542597992,
"autogenerated": false,
"ratio": 4.059171597633136,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010522477206399088,
"num_lines": 163
} |
__author__ = 'christianbuia'
import random
from Crypto.Cipher import AES
def add_pkcs7_padding(message_bytes, blocksize):
pad_length = blocksize - (len(message_bytes) % blocksize)
for i in range(0, pad_length):
message_bytes += bytes([pad_length])
return message_bytes
#-----------------------------------------------------------------------------------------------------------------------
def strip_pkcs7_padding(plaintext):
last_byte = plaintext[-1]
for i in range(last_byte):
if plaintext[-(i+1)] != last_byte:
raise Exception("Error with PKCS7 Padding.")
plaintext = plaintext[:-last_byte]
return plaintext
#-----------------------------------------------------------------------------------------------------------------------
def generateRandom16bytes():
ints = []
for i in range(16):
ints.append(random.randint(0,255))
return bytes(ints)
#-----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
#always 16 bytes
def decrypt_aes128(message, key, pad=False):
decobj = AES.new(key, AES.MODE_ECB)
return decobj.decrypt(message)
#-----------------------------------------------------------------------------------------------------------------------
#always 16 bytes
def encrypt_aes128(message, key, pad=False):
decobj = AES.new(key, AES.MODE_ECB)
return decobj.encrypt(message)
#-----------------------------------------------------------------------------------------------------------------------
def encrypt_aes128_cbc(message, key, iv):
message = add_pkcs7_padding(message, 16)
blocks = [message[x:x+16] for x in range(0, len(message), 16)]
encrypted_blocks = []
for block in blocks:
encrypted_block = bytearray()
for b_count in range(len(block)):
encrypted_block.append(block[b_count] ^ iv[b_count])
iv = encrypt_aes128(bytes(encrypted_block), key)
encrypted_blocks.append(iv)
ciphertext = b''
for block in encrypted_blocks:
ciphertext += block
return ciphertext
#-----------------------------------------------------------------------------------------------------------------------
def decrypt_aes128_cbc(message, key, iv):
blocks = [message[x:x+16] for x in range(0, len(message), 16)]
decrypted_blocks = []
for block in blocks:
dec_block = bytearray(decrypt_aes128(bytes(block), key))
decrypted_block = bytearray()
for b_count in range(len(dec_block)):
decrypted_block.append(dec_block[b_count] ^ iv[b_count])
iv = block
decrypted_blocks.append(decrypted_block)
plaintext = b''
for block in decrypted_blocks:
plaintext += block
return strip_pkcs7_padding(plaintext)
#-----------------------------------------------------------------------------------------------------------------------
def challenge15_oracle(user_input, key, iv):
prefix = "comment1=cooking%20MCs;userdata="
suffix = ";comment2=%20like%20a%20pound%20of%20bacon"
if ";" in user_input:
user_input = user_input.replace(";", "%3b")
if "=" in user_input:
user_input = user_input.replace("=", "%3d")
return encrypt_aes128_cbc(bytes(prefix + user_input + suffix, "ascii"), key, iv)
#-----------------------------------------------------------------------------------------------------------------------
#not going to account for the edge case where the random mess of the first tampered block closes out the key/value pair.
def check_admin(test_string):
if ";admin=true;" in test_string:
return True
else:
return False
#-----------------------------------------------------------------------------------------------------------------------
def tamper(cipher, target_block, blocksize, random_block):
#for kids who don't count good
target_block_index = target_block - 1
#what I want at the end of that target block:
want = b";admin=true"
#because bytes are immutable in python, we will convert cipher to a list of ints
cipher = list(cipher)
for i in range(len(want)):
#make the change to the preceding block
#if it were just nulls, it would be
#cipher[blocksize*(target_block_index - 1) + (blocksize - len(want)) + i] ^= want[i]
#but since we are doing random plaintexts, it's:
cipher[blocksize*(target_block_index - 1) + (blocksize - len(want)) + i] = \
cipher[blocksize*(target_block_index - 1) + (blocksize - len(want)) + i] ^ \
random_block[(blocksize - len(want)) + i] ^ want[i]
#convert cipher back to bytes
return bytes(cipher)
#-----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
blocksize = 16
key = generateRandom16bytes()
iv = generateRandom16bytes()
prefix = "comment1=cooking%20MCs;userdata="
suffix = ";comment2=%20like%20a%20pound%20of%20bacon"
#construct a suitable attack plaintext in the form:
#[fill out the prefix's last block][a block worth of random bytes for flipping][a block worth of random bytes]
#e.g.
#prefixAAA|AAAAAAAAAAAAAAAA|AAAAAAAAAAAAAAAA
#can become
#prefixAAA|GARBELEDMESSSSSS|AAAAA;admin=true
#find out how many bytes we need to fill
prefix__block_remainder = (blocksize - (len(prefix) % blocksize)) % blocksize
#find out which block will be subject to tampering:
target_block = int((len(prefix) + prefix__block_remainder)/blocksize) + 2
#create the attack array
attack_array = ""
#add bytes to fill out the prefix's last block
for i in range(prefix__block_remainder):
attack_array += "\x00"
#using null bytes is the simplest case because then we just have to xor by what we want (a xor 0 = a)
#updated to use random bytes
#add a pair of full blocks
random_block = b""
for i in range(2*blocksize):
#if all we were doing was null bytes, our entire loop body would be:
#attack_array += "\x00"
#but since we are doing random inputs, we just need to make sure the char ";" and "=" are not chosen
#because the sanitation routine quotes those chars out and don't want to do the math to account for that.
#also only doing up to 127 instead of 255 because sanitation routine is set to use ascii strings...
r = None
while True:
r = random.randint(0,127)
if r != 59 and r != 61:
break
attack_array += chr(r)
if i >= blocksize:
random_block += bytes([r])
cipher = challenge15_oracle(attack_array, key, iv)
print("untampered cipher is " + str(decrypt_aes128_cbc(cipher, key, iv)))
cipher = tamper(cipher, target_block, blocksize, random_block)
print(cipher)
plain = decrypt_aes128_cbc(cipher, key, iv)
print(plain)
is_admin = check_admin(str(plain))
if is_admin:
print("\n***SUCCESS***")
else:
print("\n===FAIL===")
| {
"repo_name": "8u1a/my_matasano_crypto_challenges",
"path": "set2/challenge16.py",
"copies": "1",
"size": "7366",
"license": "unlicense",
"hash": 5871478780797363000,
"line_mean": 34.9317073171,
"line_max": 120,
"alpha_frac": 0.5134401303,
"autogenerated": false,
"ratio": 4.366330764671013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5379770894971013,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christianbuia'
import random
from Crypto.Cipher import AES
def pkcs7_padding(message_bytes, block_size):
#message_bytes=bytearray(message_bytes)
pad_length = block_size - (len(message_bytes) % block_size)
if pad_length != block_size:
for i in range(0, pad_length):
#message_bytes.append(pad_length)
message_bytes += bytes([pad_length])
return message_bytes
#-----------------------------------------------------------------------------------------------------------------------
#always 16 bytes
def decrypt_aes128(message, key):
decobj = AES.new(key, AES.MODE_ECB)
return decobj.decrypt(message)
#-----------------------------------------------------------------------------------------------------------------------
#always 16 bytes
def encrypt_aes128(message, key):
decobj = AES.new(key, AES.MODE_ECB)
return decobj.encrypt(pkcs7_padding(message, 16))
#-----------------------------------------------------------------------------------------------------------------------
def encrypt_aes128_cbc(message, key, vector):
message = pkcs7_padding(message, 16)
blocks = [message[x:x+16] for x in range(0, len(message), 16)]
encrypted_blocks = []
for block in blocks:
encrypted_block = bytearray()
for b_count in range(len(block)):
encrypted_block.append(block[b_count] ^ vector[b_count])
vector = encrypt_aes128(bytes(encrypted_block), key)
encrypted_blocks.append(vector)
ciphertext = b''
for block in encrypted_blocks:
ciphertext += block
return ciphertext
#-----------------------------------------------------------------------------------------------------------------------
def decrypt_aes128_cbc(message, key, vector):
blocks = [message[x:x+16] for x in range(0, len(message), 16)]
decrypted_blocks = []
for block in blocks:
dec_block = bytearray(decrypt_aes128(bytes(block), key))
decrypted_block = bytearray()
for b_count in range(len(dec_block)):
decrypted_block.append(dec_block[b_count] ^ vector[b_count])
vector = block
decrypted_blocks.append(decrypted_block)
plaintext = b''
for block in decrypted_blocks:
plaintext += block
#TODO may want to implement PKCS7 de-padding
return plaintext
#-----------------------------------------------------------------------------------------------------------------------
def generateRandom16bytes():
ints = []
for i in range(16):
ints.append(random.randint(0,255))
return bytes(ints)
#-----------------------------------------------------------------------------------------------------------------------
#attempt to detect ECB by looking for identical blocks
def detectEBC(cipher, block_size):
blocks = []
for i in range(int(len(cipher)/block_size)):
blocks.append(cipher[i*block_size:i*block_size+block_size])
#detecting if dups exist: http://stackoverflow.com/questions/9835762/find-and-list-duplicates-in-python-list
if (len(set([x for x in blocks if blocks.count(x) > 1]))) > 0:
return True
else:
return False
#-----------------------------------------------------------------------------------------------------------------------
#given a plaintext, will return a cipher text generated with random key/IV using either EBC or CBC
def encryption_oracle(plaintext):
plaintext_prefix = bytes([random.randint(0,255) for i in range(random.randint(5,10))])
plaintext_suffix = bytes([random.randint(0,255) for i in range(random.randint(5,10))])
plaintext = plaintext_prefix + bytes(plaintext, "ascii") + plaintext_suffix
mode = None
cipher = None
if random.randint(0,1) == 0:
mode = "ECB"
cipher = encrypt_aes128(plaintext, generateRandom16bytes())
else:
mode = "CBC"
cipher = encrypt_aes128_cbc(plaintext, generateRandom16bytes(), generateRandom16bytes())
return mode, cipher
#-----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
#generateRandom16bytes()
plaintext = """AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAeeh89c3iofh3e8yfhin2'do3018hf3nifdm20hnoi8wyef8ssoffAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA737439g3e82y38208AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAA9f348hf98h3f983h4fui34hr23093j2ie91ipoi2-39f9AAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAo2303fgh230f92h3f92p3dh23dp9h90phr239hr2309hr3209r3AAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA""".replace("\n", "")
for i in range(1000):
mode, cipher = \
encryption_oracle(plaintext)
guessed_mode = detectEBC(cipher, 16)
if guessed_mode == True:
guessed_mode = "ECB"
else:
guessed_mode = "CBC"
if mode=="ECB" and mode == guessed_mode:
print("detected ECB")
| {
"repo_name": "8u1a/my_matasano_crypto_challenges",
"path": "set2/challenge11.py",
"copies": "1",
"size": "5574",
"license": "unlicense",
"hash": 253493021506220640,
"line_mean": 35.9139072848,
"line_max": 120,
"alpha_frac": 0.5520272695,
"autogenerated": false,
"ratio": 4.557645134914146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5609672404414145,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christianbuia'
import random
from Crypto.Cipher import AES
def pkcs7_padding(message_bytes, block_size):
pad_length = block_size - (len(message_bytes) % block_size)
if pad_length != block_size:
for i in range(0, pad_length):
message_bytes += bytes([pad_length])
return message_bytes
#-----------------------------------------------------------------------------------------------------------------------
def generateRandom16bytes():
ints = []
for i in range(16):
ints.append(random.randint(0,255))
return bytes(ints)
#-----------------------------------------------------------------------------------------------------------------------
#always 16 bytes
def encrypt_aes128(message, key):
decobj = AES.new(key, AES.MODE_ECB)
return decobj.encrypt(pkcs7_padding(message, 16))
#-----------------------------------------------------------------------------------------------------------------------
#always 16 bytes
def decrypt_aes128(message, key):
decobj = AES.new(key, AES.MODE_ECB)
return strip_pkcs7_padding(decobj.decrypt(message), 16)
#-----------------------------------------------------------------------------------------------------------------------
def strip_pkcs7_padding(message, blocksize):
number_of_blocks = len(message) / blocksize
for i in range(1,blocksize):
clean = True
for j in range(i):
if message[int(blocksize*(number_of_blocks-1) + (blocksize - 1 - j))] != i:
clean=False
if clean == True:
return message[:-i]
return message
#-----------------------------------------------------------------------------------------------------------------------
def parseKV(message):
kv_dict = {}
pairs = message.split("&")
for p in pairs:
items = p.split("=")
kv_dict[items[0]] = items[1]
return kv_dict
#-----------------------------------------------------------------------------------------------------------------------
def profile_for(email_address, uid=10, role='user'):
if "@" not in email_address:
print("not a valid email...quitting.")
exit(1)
email_address = email_address.replace("&", "").replace("=", "")
return "email=" + email_address + "&uid=" + str(uid) + "&role=" + role
#-----------------------------------------------------------------------------------------------------------------------
def profile_for_encrypted(email_address, key, uid=10, role='user'):
return encrypt_aes128(bytes(profile_for(email_address, uid, role), "ascii"), key)
#-----------------------------------------------------------------------------------------------------------------------
#***********************************************************************************************************************
ckey = generateRandom16bytes()
#the admin block we want looks like this:
#admin\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b
#and we can snag that by making this the beginning of our email address, e.g.:
#AAAAAAAAAAadmin\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b@bar.com
#and grabbing the second block (since the first block will be "email=AAAAAAAAAA")
#and then this can be appended to a message that is an exact multiple of block size, e.g.:
#email=cbuia@bar.com&uid=10&role=
#grab the second block of our special message, which is the admin block
admin_block = profile_for_encrypted("AAAAAAAAAAadmin\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b@bar.com", ckey)[16:32]
#get the target message we want to tamper with:
cipher_target = profile_for_encrypted("cbuia@bar.com", ckey)
#splice
cipher_tampered = cipher_target[:len(cipher_target)-16] + admin_block
#test
print(decrypt_aes128(cipher_tampered, ckey).decode("ascii"))
| {
"repo_name": "8u1a/my_matasano_crypto_challenges",
"path": "set2/challenge13.py",
"copies": "1",
"size": "3792",
"license": "unlicense",
"hash": -7735995615051276000,
"line_mean": 35.4615384615,
"line_max": 120,
"alpha_frac": 0.4741561181,
"autogenerated": false,
"ratio": 3.9706806282722513,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4944836746372251,
"avg_score": null,
"num_lines": null
} |
import numpy
from heapq import *
def heuristic(a, b):
return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2
def astar(array, start, goal):
#diagonal movement allowed
neighbors = [(0,1),(0,-1),(1,0),(-1,0),(1,1),(1,-1),(-1,1),(-1,-1)]
#without diagonal movement
#neighbors = [(0,1),(0,-1),(1,0),(-1,0)]
close_set = set()
came_from = {}
gscore = {start:0}
fscore = {start:heuristic(start, goal)}
oheap = []
heappush(oheap, (fscore[start], start))
while oheap:
current = heappop(oheap)[1]
if current == goal:
data = []
while current in came_from:
data.append(current)
current = came_from[current]
data = list(reversed(data))
data.insert(0, start)
#data.append(goal)
return (data, len(data))
close_set.add(current)
for i, j in neighbors:
neighbor = current[0] + i, current[1] + j
tentative_g_score = gscore[current] + heuristic(current, neighbor)
if 0 <= neighbor[0] < array.shape[0]:
if 0 <= neighbor[1] < array.shape[1]:
if array[neighbor[0]][neighbor[1]] == 1:
continue
else:
# array bound y walls
continue
else:
# array bound x walls
continue
if neighbor in close_set and tentative_g_score >= gscore.get(neighbor, 0):
continue
if tentative_g_score < gscore.get(neighbor, 0) or neighbor not in [i[1]for i in oheap]:
came_from[neighbor] = current
gscore[neighbor] = tentative_g_score
fscore[neighbor] = tentative_g_score + heuristic(neighbor, goal)
heappush(oheap, (fscore[neighbor], neighbor))
return False
| {
"repo_name": "OPU-Surveillance-System/monitoring",
"path": "master/scripts/planner/astar.py",
"copies": "1",
"size": "2025",
"license": "mit",
"hash": 5498926348619165000,
"line_mean": 29.223880597,
"line_max": 100,
"alpha_frac": 0.5180246914,
"autogenerated": false,
"ratio": 3.5526315789473686,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45706562703473685,
"avg_score": null,
"num_lines": null
} |
import numpy
from heapq import *
def heuristic(a, b):
return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2
def astar(array, start, goal):
neighbors = [(0,1),(0,-1),(1,0),(-1,0),(1,1),(1,-1),(-1,1),(-1,-1)]
close_set = set()
came_from = {}
gscore = {start:0}
fscore = {start:heuristic(start, goal)}
oheap = []
heappush(oheap, (fscore[start], start))
while oheap:
current = heappop(oheap)[1]
if current == goal:
data = []
while current in came_from:
data.append(current)
current = came_from[current]
return data
close_set.add(current)
for i, j in neighbors:
neighbor = current[0] + i, current[1] + j
tentative_g_score = gscore[current] + heuristic(current, neighbor)
if 0 <= neighbor[0] < array.shape[0]:
if 0 <= neighbor[1] < array.shape[1]:
if array[neighbor[0]][neighbor[1]] == 1:
continue
else:
# array bound y walls
continue
else:
# array bound x walls
continue
if neighbor in close_set and tentative_g_score >= gscore.get(neighbor, 0):
continue
if tentative_g_score < gscore.get(neighbor, 0) or neighbor not in [i[1]for i in oheap]:
came_from[neighbor] = current
gscore[neighbor] = tentative_g_score
fscore[neighbor] = tentative_g_score + heuristic(neighbor, goal)
heappush(oheap, (fscore[neighbor], neighbor))
return False
'''Here is an example of using my algo with a numpy array,
astar(array, start, destination)
astar function returns a list of points (shortest path)'''
nmap = numpy.array([
[0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,1,1,1,1,1,1,1,1,1,1,1,0,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,0,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,1,1,1,1,1,1,1,1,1,1,1,0,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,0,1,1,1,1,1,1,1,1,1,1,1,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,1,1,1,1,1,1,1,1,1,1,1,0,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0]])
print astar(nmap, (0,0), (10,13))
| {
"repo_name": "awwong1/2016-pason-coding-contest",
"path": "test_astar.py",
"copies": "2",
"size": "2483",
"license": "mit",
"hash": 4436675438566924000,
"line_mean": 30.8333333333,
"line_max": 100,
"alpha_frac": 0.4957712445,
"autogenerated": false,
"ratio": 2.740618101545254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42363893460452545,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christian Christelis <christian@kartoza.com>'
__date__ = '06/09/16'
__license__ = "GPL"
__copyright__ = 'kartoza.com'
# coding=utf-8
"""Model class for Occupations"""
from django.contrib.gis.db import models
from feti.models.learning_pathway import LearningPathway
class Occupation(models.Model):
"""A campus where a set of courses are offered."""
id = models.AutoField(primary_key=True)
occupation = models.CharField(max_length=150, blank=False, null=False)
green_occupation = models.BooleanField(default=False)
green_skill = models.BooleanField(default=False)
description = models.CharField(max_length=500)
tasks = models.TextField(blank=True, null=True)
occupation_regulation = models.TextField(blank=True, null=True)
learning_pathway_description = models.TextField(blank=True, null=True)
learning_pathways = models.ManyToManyField(
LearningPathway) ## limit choices to not selected pathnumbers
class Meta:
app_label = 'feti'
managed = True
| {
"repo_name": "cchristelis/feti",
"path": "django_project/feti/models/occupation.py",
"copies": "1",
"size": "1032",
"license": "bsd-2-clause",
"hash": 5154062234953269000,
"line_mean": 35.8571428571,
"line_max": 74,
"alpha_frac": 0.7160852713,
"autogenerated": false,
"ratio": 3.4285714285714284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46446566998714284,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christian Christelis <christian@kartoza.com>'
__date__ = '10/04/16'
import json
from django.contrib import messages
from django.http import Http404, HttpResponse
from django.views.generic.edit import FormView
from healthsites.forms.assessment_form import AssessmentForm
from healthsites.utils import healthsites_clustering
from healthsites.models.healthsite import Healthsite
class HealthsitesView(FormView):
template_name = 'healthsites.html'
form_class = AssessmentForm
success_url = '/healthsites'
success_message = "new event was added successfully"
def form_valid(self, form):
form.save_form()
return super(HealthsitesView, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(HealthsitesView, self).get_form_kwargs()
return kwargs
def get_success_message(self, cleaned_data):
return self.success_message
def get_cluster(request):
if request.method == "GET":
if not (all(param in request.GET for param in ['bbox', 'zoom', 'iconsize'])):
raise Http404
result = healthsites_clustering(request.GET['bbox'], int(request.GET['zoom']),
map(int, request.GET.get('iconsize').split(',')))
return HttpResponse(result, content_type='application/json')
def search_healthsites_name(request):
if request.method == 'GET':
query = request.GET.get('q')
names_start_with = Healthsite.objects.filter(
name__istartswith=query).order_by('name')
names_contains_with = Healthsite.objects.filter(
name__icontains=query).exclude(name__istartswith=query).order_by('name')
result = []
# start with with query
for name_start_with in names_start_with:
result.append(name_start_with.name)
# contains with query
for name_contains_with in names_contains_with:
result.append(name_contains_with.name)
result = json.dumps(result)
return HttpResponse(result, content_type='application/json')
def search_healthsite_by_name(request):
if request.method == 'GET':
query = request.GET.get('q')
healthsites = Healthsite.objects.filter(name=query)
geom = []
if len(healthsites) > 0:
geom = [healthsites[0].point_geometry.y, healthsites[0].point_geometry.x]
result = json.dumps({'query': query, 'geom': geom})
return HttpResponse(result, content_type='application/json')
| {
"repo_name": "cchristelis/watchkeeper",
"path": "django_project/healthsites/views/healthsites_view.py",
"copies": "1",
"size": "2518",
"license": "bsd-2-clause",
"hash": 6919746382032427000,
"line_mean": 34.9714285714,
"line_max": 89,
"alpha_frac": 0.6592533757,
"autogenerated": false,
"ratio": 3.8442748091603054,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5003528184860305,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christian Christelis <christian@kartoza.com>'
__date__ = '10/04/16'
import uuid
from django import forms
from django.contrib.gis.geos import Point
from healthsites.models.healthsite import Healthsite
from healthsites.tasks.regenerate_cache import regenerate_cache
from healthsites.models.assessment import (
AssessmentCriteria, ResultOption, AssessmentGroup, HealthsiteAssessment,
HealthsiteAssessmentEntryDropDown, HealthsiteAssessmentEntryInteger,
HealthsiteAssessmentEntryReal)
class GroupForm(forms.Form):
def __init__(self, *args, **kwargs):
assessment_group = kwargs.pop('assessment_group', None)
super(GroupForm, self).__init__(*args, **kwargs)
criteria = AssessmentCriteria.objects.filter(
assessment_group=assessment_group)
for criterium in criteria:
if criterium.result_type == 'Integer':
self.fields[criterium.name] = forms.IntegerField(
required=True
)
elif criterium.result_type == 'Decimal':
self.fields[criterium.name] = forms.DecimalField(
decimal_places=2, max_digits=9, required=True)
elif criterium.result_type == 'DropDown':
self.fields[criterium.name] = forms.ModelChoiceField(
ResultOption.objects.filter(assessment_criteria=criterium),
required=True)
# elif criterium.result_type == 'MultipleChoice':
# self.fields[criterium.name] = forms.ModelMultipleChoiceField(
# queryset=ResultOption.objects.filter(
# assessment_criteria=criterium))
class Group(object):
def __init__(self, name, group_form):
self.name = name
self.group_form = group_form
class AssessmentForm(forms.Form):
name = forms.CharField(max_length=100, min_length=3)
latitude = forms.CharField()
longitude = forms.CharField()
reference_url = forms.URLField(max_length=200, required=False)
reference_file = forms.FileField(required=False)
def save_form(self):
name = self.cleaned_data.get('name')
latitude = self.cleaned_data.get('latitude')
longitude = self.cleaned_data.get('longitude')
geom = Point(
float(latitude), float(longitude)
)
# find the healthsite
try:
healthsite = Healthsite.objects.get(name=name, point_geometry=geom)
self.create_assessment(healthsite)
except Healthsite.DoesNotExist:
# generate new uuid
tmp_uuid = uuid.uuid4().hex
healthsite = Healthsite(name=name, point_geometry=geom, uuid=tmp_uuid, version=1)
healthsite.save()
# regenerate_cache.delay()
self.create_assessment(healthsite)
def create_assessment(self, healthsite):
if 'update_button' in self.data:
healthsite_assessment = HealthsiteAssessment.objects.filter(
healthsite=healthsite,
current=True)
else:
for healthsite_assessment in HealthsiteAssessment.objects.all():
healthsite_assessment.current = False
healthsite_assessment.save()
healthsite_assessment = HealthsiteAssessment(
healthsite=healthsite)
healthsite_assessment.save()
criteria = AssessmentCriteria.objects.all()
for criterium in criteria:
entry_class = None
selected_option = None
if criterium.result_type == 'Integer':
entry_class = HealthsiteAssessmentEntryInteger
selected_option = self.cleaned_data.get(
chriterium.name)
elif criterium.result_type == 'Decimal':
entry_class = HealthsiteAssessmentEntryReal
selected_option = self.cleaned_data.get(
chriterium.name)
elif criterium.result_type == 'DropDown':
entry_class = HealthsiteAssessmentEntryDropDown
selected_option = self.cleaned_data.get(
criterium.name)
# elif criterium.result_type == 'MultipleChoice':
# self.fields[criterium.name] = forms.ModelMultipleChoiceField(
# queryset=ResultOption.objects.filter(
# assessment_criteria=criterium))
if not entry_class or not selected_option:
continue
try:
entry = entry_class.objects.get(
healthsite_assessment=healthsite_assessment,
assessment_criteria=criterium)
entry.selected_option = selected_option
entry.save()
except entry_class.DoesNotExist:
entry_class.objects.create(
healthsite_assessment=healthsite_assessment,
assessment_criteria=criterium,
selected_option=selected_option)
def groups(self):
groups = [Group('General', self)]
for assessment_group in AssessmentGroup.objects.all():
group_form = GroupForm(assessment_group=assessment_group)
group = Group(assessment_group.name, group_form)
groups.append(group)
return groups
| {
"repo_name": "cchristelis/watchkeeper",
"path": "django_project/healthsites/forms/assessment_form.py",
"copies": "1",
"size": "5365",
"license": "bsd-2-clause",
"hash": 1663090584340770800,
"line_mean": 41.5793650794,
"line_max": 93,
"alpha_frac": 0.6096924511,
"autogenerated": false,
"ratio": 4.433884297520661,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5543576748620661,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christian Christelis <christian@kartoza.com>'
__date__ = '15/04/16'
import requests
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from django.contrib.gis.geos import Point
from healthsites.models.healthsite import Healthsite
import logging
logger = logging.getLogger(__name__)
API_URL = 'https://healthsites.io/api/v1/healthsites/facilities?page='
class Command(BaseCommand):
help = (""
"This script serves to synchronize healthsite locations between "
"healthsites.io and HCID. "
"(Currently this is a one way from healthsites.io to HCID.)")
def ingest(self, healthsite_data):
uuid = healthsite_data['uuid']
try:
healthsite = Healthsite.objects.get(uuid=uuid)
version = healthsite.version
except Healthsite.DoesNotExist:
healthsite = Healthsite()
healthsite.uuid = uuid
version = None
if version != healthsite_data['version']:
if 'name' in healthsite_data['values']:
name = healthsite_data['values']['name']
healthsite.name = name.strip()[:100]
healthsite.point_geometry = Point(healthsite_data['geom'])
healthsite.version = healthsite_data['version']
healthsite.date = healthsite_data['date_modified']
healthsite.save()
def handle(self, *args, **options):
"""Get the healthsites data and add it to the DB."""
for count in xrange(1, 100000):
page_url = '%s%s' % (API_URL, count)
request = requests.get(page_url)
logging.info(
"%s found %s records" % (page_url, len(request.json())))
if not request.ok:
break
if not request.json():
break
for healthsite_data in request.json():
self.ingest(healthsite_data)
| {
"repo_name": "cchristelis/watchkeeper",
"path": "django_project/healthsites/management/commands/harvest_healthsites.py",
"copies": "1",
"size": "1983",
"license": "bsd-2-clause",
"hash": 4282295619999742500,
"line_mean": 32.05,
"line_max": 74,
"alpha_frac": 0.6011094302,
"autogenerated": false,
"ratio": 4.063524590163935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5164634020363935,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christian Christelis <christian@kartoza.com>'
__date__ = '21/04/16'
from django.contrib.gis.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from healthsites.models.healthsite import Healthsite
RESULTOPTIONS = (
('DropDown', 'DropDown'),
('Integer', 'Integer'),
('Decimal', 'Decimal'),
('MultipleChoice', 'MultipleChoice')
)
class HealthsiteAssessment(models.Model):
healthsite = models.ForeignKey(Healthsite)
current = models.BooleanField(default=True)
reference_url = models.URLField(max_length=200)
reference_file = models.FileField()
class Meta:
app_label = 'healthsites'
class AssessmentGroup(models.Model):
name = models.CharField(
help_text='The assessment group.',
max_length=32,
null=False,
blank=False)
order = models.IntegerField()
def __unicode__(self):
return self.name
class Meta:
app_label = 'healthsites'
class AssessmentCriteria(models.Model):
name = models.CharField(
help_text='The assessment names',
max_length=32,
null=False,
blank=False)
assessment_group = models.ForeignKey(AssessmentGroup)
result_type = models.CharField(
max_length=32,
choices=RESULTOPTIONS,
null=False,
blank=False)
# result_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
# object_id = models.PositiveIntegerField()
# result_object = GenericForeignKey('result_type', 'object_id')
def __unicode__(self):
return self.name
class Meta:
app_label = 'healthsites'
class ResultOption(models.Model):
assessment_criteria = models.ForeignKey(
AssessmentCriteria,
limit_choices_to={
'result_type__in': ['DropDown', 'MultipleChoice']
})
option = models.CharField(max_length=32)
value = models.IntegerField()
order = models.IntegerField()
def __unicode__(self):
return self.option
class Meta:
app_label = 'healthsites'
class HealthsiteAssessmentEntry(models.Model):
healthsite_assessment = models.ForeignKey(HealthsiteAssessment)
assessment_criteria = models.ForeignKey(AssessmentCriteria)
class Meta:
app_label = 'healthsites'
abstract = True
class HealthsiteAssessmentEntryDropDown(HealthsiteAssessmentEntry):
selected_option = models.CharField(max_length=32)
class Meta:
app_label = 'healthsites'
class HealthsiteAssessmentEntryInteger(HealthsiteAssessmentEntry):
selected_option = models.IntegerField()
class Meta:
app_label = 'healthsites'
class HealthsiteAssessmentEntryReal(HealthsiteAssessmentEntry):
selected_option = models.DecimalField(decimal_places=2, max_digits=9)
class Meta:
app_label = 'healthsites'
| {
"repo_name": "cchristelis/watchkeeper",
"path": "django_project/healthsites/models/assessment.py",
"copies": "1",
"size": "2924",
"license": "bsd-2-clause",
"hash": 2133044081133322000,
"line_mean": 25.1071428571,
"line_max": 76,
"alpha_frac": 0.6809165527,
"autogenerated": false,
"ratio": 3.9566982408660354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5137614793566035,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christian Kater'
from redmine import Redmine
import random, string, re, csv, sys
config = {}
execfile("redmine.conf", config)
redmine = Redmine(config['redmine_host'], key=config['redmine_rest_key'])
def get_key(login, parent=None):
key = re.sub('[^a-z0-9]+', '', login.lower())
if parent:
return parent.identifier + '_' + key
return key
def get_user(users, login, firstname, lastname, email):
for user in users:
if user.login == login or user.mail == email:
return user
user = redmine.user.new()
user.login = login
user.password = generate_random_password(12)
user.firstname = firstname
user.lastname = lastname
user.mail = email
print user.mail
user.save()
return user
def generate_random_password(length):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
def get_project(name, login, parent=None):
key = get_key(login, parent)
try:
project = redmine.project.get(key)
except:
project = redmine.project.new()
project.name = name
project.identifier = key
project.is_public = False
if parent:
project.parent_id = parent.id
project.inherit_members = True
print project.identifier
project.save()
return project
def get_master_project(year):
name = 'Java SS ' + str(year)
return get_project(name, name)
def assign_rights(project, user):
try:
redmine.project_membership.create(project_id=project.identifier, user_id=user.id, role_ids=[4])
except:
print 'user', user, 'is already assigned to project', project.identifier
def generate_student_projects(year, csv_filename):
master_project = get_master_project(year)
csv_file = open(csv_filename, 'rU')
users = redmine.user.all()
try:
reader = csv.reader(csv_file)
for firstname, lastname, login, mail in reader:
user = get_user(users, login, firstname, lastname, mail)
project = get_project(user.firstname + " " + user.lastname, login , master_project)
assign_rights(project, user)
finally:
csv_file.close()
def generate_group_projects(year, csv_filename):
master_project = get_master_project(year)
csv_file = open(csv_filename, 'rU')
users = redmine.user.all()
try:
reader = csv.reader(csv_file)
for group, firstname, lastname, login, mail in reader:
user = get_user(users, login, firstname, lastname, mail)
project = get_project(group, group , master_project)
assign_rights(project, user)
finally:
csv_file.close()
def print_usage():
print 'usage:'
print 'Adding Student Projects: redmine_student_administration.py student <year> <student_list.csv>'
print 'Adding Group Projects: redmine_student_administration.py group <year> <student_group_list.csv>'
if __name__ == '__main__':
if len(sys.argv) < 4:
print 'input error!'
print_usage()
sys.exit(1)
if sys.argv[1] == 'students':
generate_student_projects(sys.argv[2], sys.argv[3])
if sys.argv[1] == 'groups':
generate_group_projects(sys.argv[2], sys.argv[3]) | {
"repo_name": "ChKater/redmine-student-administration",
"path": "redmine_student_administration.py",
"copies": "1",
"size": "3261",
"license": "apache-2.0",
"hash": -6794741228925122000,
"line_mean": 32.2857142857,
"line_max": 106,
"alpha_frac": 0.6360012266,
"autogenerated": false,
"ratio": 3.552287581699346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46882888082993457,
"avg_score": null,
"num_lines": null
} |
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
import numpy as np
import typing
def algae(relative_humidity: typing.List[float], temperature: typing.List[float], material_name, porosity, roughness,
total_pore_area):
"""
UNIVPM Algae Model
Currently a dummy function!
:param relative_humidity
:param temperature
:param material_name: dictionary with relevant properties and their values
:return growth: list with eval. values
"""
def extract_material_type(material: str):
materials = {
"BrickBernhard": "brick",
"BrickJoens": "brick",
"HistoricalBrickClusterEdge": "brick",
"HistoricalBrickCluster4DD": "brick",
"HistoricalBrickCluster": "brick",
"WienerbergerNormalBrick": "brick",
"AltbauziegelDresdenZQ": "brick",
"AltbauziegelDresdenZA": "brick",
"AltbauziegelDresdenZC": "brick",
"AltbauziegelDresdenZD": "brick",
"AltbauziegelDresdenZE": "brick",
"AltbauziegelDresdenZF": "brick",
"AltbauziegelDresdenZG": "brick",
"AltbauziegelDresdenZH": "brick",
"AltbauziegelDresdenZI": "brick",
"AltbauziegelDresdenZJ": "brick",
"AltbauziegelDresdenZK": "brick",
"AltbauziegelDresdenZL": "brick",
"AltbauziegelDresdenZM": "brick",
"AltbauziegelDresdenZN": "brick",
"AltbauziegelDresdenZO": "brick",
"AltbauziegelElbphilharmonie": "brick",
"WienerbergerHochlochBrick": "brick",
"BrickWienerberger": "brick",
"CeramicBrick": "brick",
"AltbauklinkerHamburgHolstenkamp": "brick",
"AltbauziegelAmWeinbergBerlin": "brick",
"AltbauziegelAmWeinbergBerlininside": "brick",
"AltbauziegelAussenziegelII": "brick",
"AltbauziegelBolonga3enCult": "brick",
"AltbauziegelDresdenZb": "brick",
"AltbauziegelPersiusspeicher": "brick",
"AltbauziegelReithallePotsdamAussenziegel1": "brick",
"AltbauziegelReithallePotsdamAussenziegel2": "brick",
"AltbauziegelReithallePotsdamAussenziegel3": "brick",
"AltbauziegelRoteKasernePotsdamAussenziegel1": "brick",
"AltbauziegelRoteKasernePotsdamAussenziegel2": "brick",
"AltbauziegelRoteKasernePotsdamInnenziegel1": "brick",
"AltbauziegelRoteKasernePotsdamInnenziegel2": "brick",
"AltbauziegelSchlossGueterfeldeEGAussenwand1": "brick",
"AltbauziegelSchlossGueterfeldeEGAussenwand2": "brick",
"AltbauziegelTivoliBerlinAussenziegel1": "brick",
"AltbauziegelTivoliBerlinAussenziegel2": "brick",
"AltbauziegelTivoliBerlinInnenziegel": "brick",
"AltbauziegelUSHauptquartierBerlin": "brick",
"ZiegelSchlagmannVollziegel": "brick",
"ZiegelSchlagmannWDZZiegelhuelle": "brick",
"Brick": "brick",
"LehmbausteinUngebrannt": "brick",
"DTUBrick": "brick",
"LimeSandBrickIndustrial": "brick",
"LimeSandBrickTraditional": "brick",
"SandstoneCotta": "sandstone",
"SandstonePosta": "sandstone",
"SandstoneReinhardsdorf": "sandstone",
"WeatheredGranite": "sandstone",
"BundsandsteinrotHessen": "sandstone",
"CarraraMamor": "sandstone",
"KrensheimerMuschelkalk": "sandstone",
"SandsteinBadBentheim": "sandstone",
"SandsteinHildesheim": "sandstone",
"SandstoneIndiaNewSaInN": "sandstone",
"SandsteinMuehlleiteeisenhaltigeBank": "sandstone",
"SandsteinRuethen": "sandstone",
"SandsteinVelbke": "sandstone",
"Tuffstein": "other",
"TuffsteinJapan": "other",
"limesandstone": "sandstone",
"LimeSandBrick": "limestone",
"XellaKalksandstein": "sandstone",
"KalksandsteinXellaYtong2002": "other",
"KalksandsteinXellaYtong2004": "other",
"BundsandsteinIndienHumayunVerwittert": "sandstone",
"CarraraMamorSkluptur": "sandstone",
"SandstoneArholzen": "sandstone",
"SandstoneKarlshafener": "sandstone",
"SandstoneKrenzheimer": "sandstone",
"SandstoneMonteMerlo": "sandstone",
"SandstoneOberkirchner": "sandstone",
"SandstoneSander": "sandstone",
"SandstoneSchleerither": "sandstone",
"LimeSandbrick": "limestone",
"Lime Cement Plaster Light": "limestone",
"Lime Cement Mortar(High Cement Ratio)": "sandstone",
"Lime Cement Mortar(Low Cement Ratio)": "limestone",
"LimeCementMortar": "limestone",
"DTUMortar": "sandstone",
"LimePlasterHist": "limestone"
}
return materials[material]
def material_parameters(material_name):
material_type = extract_material_type(material_name)
default_parameters = {"alfa": 1, "beta": 1, "gamma": 1, "deltaA": 1, "etaA": 1, "lambdaA": 1, "muA": 1,
"deltaK": 1, "etaK": 1, "lambdaK": 1, "muK": 1}
if material_type == 'sandstone':
default_parameters.update({'alfa': 2, "beta": 1.724, "gamma": 0.2})
elif material_type == 'limestone':
default_parameters.update({'alfa': 100, "beta": 6.897, "gamma": 1.6})
return default_parameters
def create_a_parameters(porosity, roughness, material_parameters):
A1 = 3.8447E-4
A2 = -4.0800E-6
A3 = -2.1164E-4
B1 = -2.7874E-2
B2 = 2.95905E-4
B3 = 1.1856E-2
C1 = 5.5270E-1
C2 = -5.8670E-3
C3 = -1.4727E-1
D1 = -2.1146
D2 = 2.2450E-2
D3 = 4.7041E-1
ra = material_parameters['deltaA'] * (A1 * porosity + A2 * roughness + A3)
sa = material_parameters['etaA'] * (B1 * porosity + B2 * roughness + B3)
ua = material_parameters['lambdaA'] * (C1 * porosity + C2 * roughness + C3)
va = material_parameters['muA'] * (D1 * porosity + D2 * roughness + D3)
return ra, sa, ua, va
def create_k_parameters(porosity, roughness, material_parameters):
E1 = 8.3270E-5
E2 = 6.7E-7
E3 = -1.8459E-4
F1 = -6.0378E-3
F2 = -4.88E-5
F3 = 9.877E-3
G1 = 1.1971E-1
G2 = 9.69E-4
G3 = -1.0759E-1
H1 = -4.5803E-1
H2 = -3.71E-3
H3 = 3.1809E-1
rk = material_parameters['deltaK'] * (E1 * porosity + E2 * roughness + E3)
sk = material_parameters['etaK'] * (F1 * porosity + F2 * roughness + F3)
uk = material_parameters['lambdaK'] * (G1 * porosity + G2 * roughness + G3)
vk = material_parameters['muK'] * (H1 * porosity + H2 * roughness + H3)
return rk, sk, uk, vk
def initial_t(roughness, gamma):
if roughness == 5.02:
return 30
else:
return 24 * gamma * (5 / ((roughness - 5.02) ** 2))
def create_ac_at(alfa, porosity, roughness):
ac_at = (1 - np.exp(-alfa * (2.48 * porosity + 0.126 * roughness) ** 4))
if ac_at < 0:
ac_at = 0
elif ac_at > 1:
ac_at = 1
return ac_at
def create_k_rate_coefficient(beta, porosity, total_pore_area):
k_rate_coefficient = (1 - np.exp(-beta * ((4.49e-3 * (porosity * total_pore_area) - 5.79e-3) / 2.09) ** 2))
k_rate_coefficient = np.max([0.0, k_rate_coefficient])
return k_rate_coefficient
def tau_a_func(temp, ra, sa, ua, va):
tau_a = ra * temp ** 3 + sa * temp ** 2 + ua * temp + va
return float(np.clip(tau_a, 0, 1))
def tau_k_func(temp, rk, sk, uk, vk):
tau_k = rk * temp ** 3 + sk * temp ** 2 + uk * temp + vk
return float(np.clip(tau_k, 0, 1))
def favourable_growth_conditions(rh, temp, time, t1):
if rh >= 0.98 and 5 < temp < 40 and time > t1:
return True
else:
return False
material = material_parameters(material_name)
rk, sk, uk, vk = create_k_parameters(porosity, roughness, material)
ra, sa, ua, va = create_a_parameters(porosity, roughness, material)
t1 = initial_t(roughness, material['gamma'])
ac_at = create_ac_at(material['alfa'], porosity, roughness)
k_rate_coefficient = create_k_rate_coefficient(material['beta'], porosity, total_pore_area)
covered_area = [0, ]
for time in range(len(temperature)):
temp = temperature[time]
try:
rh = relative_humidity[time]
except IndexError:
break
if favourable_growth_conditions(rh, temp, time, t1):
tau_a = tau_a_func(temp, ra, sa, ua, va)
tau_k = tau_k_func(temp, rk, sk, uk, vk)
if covered_area[-1] < tau_a * ac_at:
delta_t = (-(1 / (tau_k * k_rate_coefficient)) * np.log(1 - (covered_area[-1] / (tau_a * ac_at)))) ** (
1 / 4) - (time - 1 - t1)
covered_area.append(
tau_a * ac_at * (1 - np.exp(-tau_k * k_rate_coefficient * (time + delta_t - t1) ** 4)))
else:
covered_area.append(covered_area[-1])
else:
covered_area.append(covered_area[-1])
return covered_area
| {
"repo_name": "thp44/delphin_6_automation",
"path": "data_process/algae_script/algae_model.py",
"copies": "1",
"size": "9472",
"license": "mit",
"hash": -6754541908867024000,
"line_mean": 39.1355932203,
"line_max": 119,
"alpha_frac": 0.5680954392,
"autogenerated": false,
"ratio": 3.0060298317994287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4074125270999429,
"avg_score": null,
"num_lines": null
} |
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
import pandas as pd
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_dict
from delphin_6_automation.database_interactions.db_templates import sample_entry
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
def get_data():
server = mongo_setup.global_init(auth_dict)
samples = sample_entry.SampleRaw.objects()
print(f'Got {samples.count()} samples')
samples = list(samples)
mongo_setup.global_end_ssh(server)
return samples
samples = get_data()
with pd.ExcelWriter('Sobol Samples.xlsx') as writer: # doctest: +SKIP
for sequence in samples:
print(f'Exporting {sequence.sequence_number}')
df = pd.DataFrame.from_dict(sequence.samples_raw)
df.to_excel(writer, sheet_name=f'Sequence-{sequence.sequence_number}', index=False)
| {
"repo_name": "thp44/delphin_6_automation",
"path": "data_process/sample_check/raw_samples.py",
"copies": "1",
"size": "1132",
"license": "mit",
"hash": 692229549516555000,
"line_mean": 34.375,
"line_max": 120,
"alpha_frac": 0.5697879859,
"autogenerated": false,
"ratio": 4.042857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5112645128757143,
"avg_score": null,
"num_lines": null
} |
__author__ = "Christian Kongsgaard"
__license__ = "MIT"
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
from datetime import datetime
# Modules:
import mongoengine
# RiBuild Modules:
import delphin_6_automation.database_interactions.database_collections as collections
# -------------------------------------------------------------------------------------------------------------------- #
# RESULT CLASS
class Result(mongoengine.Document):
meta = collections.raw_result_db
added_date = mongoengine.DateTimeField(default=datetime.now)
delphin = mongoengine.GenericReferenceField(required=True)
result_processed = mongoengine.GenericReferenceField()
log = mongoengine.FileField(db_alias=meta['db_alias'], collection_name=meta['collection'])
results = mongoengine.FileField(required=True, db_alias=meta['db_alias'], collection_name=meta['collection'])
geometry_file = mongoengine.DictField(required=False)
geometry_file_hash = mongoengine.IntField(required=False)
simulation_started = mongoengine.DateTimeField(required=False)
| {
"repo_name": "thp44/delphin_6_automation",
"path": "delphin_6_automation/database_interactions/db_templates/result_raw_entry.py",
"copies": "1",
"size": "1153",
"license": "mit",
"hash": -5855873261787005000,
"line_mean": 37.4333333333,
"line_max": 120,
"alpha_frac": 0.6062445794,
"autogenerated": false,
"ratio": 4.539370078740157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015818359403537994,
"num_lines": 30
} |
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import copy
import scipy.interpolate as ip
import numpy as np
import datetime
# RiBuild Modules
from delphin_6_automation.file_parsing import material_parser
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
def compute_hybrid_material(mat1_dict, mat2_dict, mat1_percentage, mat2_percentage):
hybrid = copy.deepcopy(mat1_dict)
def average_function(quantity_1, quantity_2, percentage_1, percentage_2):
return (quantity_1 * percentage_1 + quantity_2 * percentage_2) / (percentage_1 + percentage_2)
def spline_function(function_name):
spline1 = ip.splrep(mat1_dict[f'{function_name}-X'],
mat1_dict[f'{function_name}-Y'], s=0)
spline2 = ip.splrep(mat2_dict[f'{function_name}-X'],
mat2_dict[f'{function_name}-Y'], s=0)
xn = np.linspace(min(mat1_dict[f'{function_name}-X'] + mat2_dict[f'{function_name}-X']),
max(mat1_dict[f'{function_name}-X'] + mat2_dict[f'{function_name}-X']),
len(mat1_dict[f'{function_name}-X']))
spline_eval1 = ip.splev(xn, spline1)
spline_eval2 = ip.splev(xn, spline2)
return spline_eval1, spline_eval2, xn
# Info
hybrid['IDENTIFICATION-NAME'] = f'Hybrid of {mat1_percentage}% {mat1_dict["INFO-MATERIAL_NAME"]} ' \
f'and {mat2_percentage}% {mat2_dict["INFO-MATERIAL_NAME"]}'
hybrid['INFO-FILE'] = 'hybrid_material_000.m6'
hybrid['IDENTIFICATION-AQUISITION_ID'] = '000'
hybrid['IDENTIFICATION-PRODUCT_ID'] = 'Hybrid'
hybrid['IDENTIFICATION-LABORATORY'] = 'Hybrid Material. Source: Dario Bottino'
hybrid['IDENTIFICATION-DATE'] = datetime.datetime.now().strftime('%d.%m.%y')
hybrid['IDENTIFICATION-CATEGORY'] = 'BRICK'
# Parameters
# Storage Parameters
hybrid['STORAGE_BASE_PARAMETERS-RHO'] = average_function(mat1_dict['STORAGE_BASE_PARAMETERS-RHO'],
mat2_dict['STORAGE_BASE_PARAMETERS-RHO'],
mat1_percentage, mat2_percentage)
hybrid['STORAGE_BASE_PARAMETERS-CE'] = average_function(mat1_dict['STORAGE_BASE_PARAMETERS-CE'],
mat2_dict['STORAGE_BASE_PARAMETERS-CE'],
mat1_percentage * mat1_dict['STORAGE_BASE_PARAMETERS-RHO'],
mat2_percentage * mat2_dict['STORAGE_BASE_PARAMETERS-RHO'])
hybrid['STORAGE_BASE_PARAMETERS-THETA_POR'] = average_function(mat1_dict['STORAGE_BASE_PARAMETERS-THETA_POR'],
mat2_dict['STORAGE_BASE_PARAMETERS-THETA_POR'],
mat1_percentage, mat2_percentage)
hybrid['STORAGE_BASE_PARAMETERS-THETA_EFF'] = average_function(mat1_dict['STORAGE_BASE_PARAMETERS-THETA_EFF'],
mat2_dict['STORAGE_BASE_PARAMETERS-THETA_EFF'],
mat1_percentage, mat2_percentage)
hybrid['STORAGE_BASE_PARAMETERS-THETA_CAP'] = average_function(mat1_dict['STORAGE_BASE_PARAMETERS-THETA_CAP'],
mat2_dict['STORAGE_BASE_PARAMETERS-THETA_CAP'],
mat1_percentage, mat2_percentage)
hybrid['STORAGE_BASE_PARAMETERS-THETA_80'] = average_function(mat1_dict['STORAGE_BASE_PARAMETERS-THETA_80'],
mat2_dict['STORAGE_BASE_PARAMETERS-THETA_80'],
mat1_percentage, mat2_percentage)
# Transport Parameters
hybrid['TRANSPORT_BASE_PARAMETERS-LAMBDA'] = average_function(mat1_dict['TRANSPORT_BASE_PARAMETERS-LAMBDA'],
mat2_dict['TRANSPORT_BASE_PARAMETERS-LAMBDA'],
mat1_percentage * mat1_dict[
'STORAGE_BASE_PARAMETERS-RHO'],
mat2_percentage * mat2_dict[
'STORAGE_BASE_PARAMETERS-RHO'])
hybrid['TRANSPORT_BASE_PARAMETERS-AW'] = average_function(mat1_dict['TRANSPORT_BASE_PARAMETERS-AW'],
mat2_dict['TRANSPORT_BASE_PARAMETERS-AW'],
mat1_percentage, mat2_percentage)
hybrid['TRANSPORT_BASE_PARAMETERS-MEW'] = average_function(mat1_dict['TRANSPORT_BASE_PARAMETERS-MEW'],
mat2_dict['TRANSPORT_BASE_PARAMETERS-MEW'],
mat1_percentage, mat2_percentage)
hybrid['TRANSPORT_BASE_PARAMETERS-KLEFF'] = average_function(mat1_dict['TRANSPORT_BASE_PARAMETERS-KLEFF'],
mat2_dict['TRANSPORT_BASE_PARAMETERS-KLEFF'],
mat1_percentage, mat2_percentage)
# Functions
# Storage Functions
Theta_lpC = spline_function('MOISTURE_STORAGE-FUNCTION-Theta_l(pC)_de')
hybrid['MOISTURE_STORAGE-FUNCTION-Theta_l(pC)_de-X'] = Theta_lpC[2].tolist()
hybrid['MOISTURE_STORAGE-FUNCTION-Theta_l(pC)_de-Y'] = average_function(Theta_lpC[0], Theta_lpC[1],
mat1_percentage, mat2_percentage).tolist()
pCTheta_l = spline_function('MOISTURE_STORAGE-FUNCTION-pC(Theta_l)_de')
hybrid['MOISTURE_STORAGE-FUNCTION-pC(Theta_l)_de-X'] = pCTheta_l[2].tolist()
hybrid['MOISTURE_STORAGE-FUNCTION-pC(Theta_l)_de-Y'] = average_function(pCTheta_l[0], pCTheta_l[1],
mat1_percentage, mat2_percentage).tolist()
# Transport Functions
lgKlTheta_l = spline_function('MOISTURE_TRANSPORT-FUNCTION-lgKl(Theta_l)')
hybrid['MOISTURE_TRANSPORT-FUNCTION-lgKl(Theta_l)-X'] = lgKlTheta_l[2].tolist()
hybrid['MOISTURE_TRANSPORT-FUNCTION-lgKl(Theta_l)-Y'] = average_function(lgKlTheta_l[0], lgKlTheta_l[1],
mat1_percentage, mat2_percentage).tolist()
lgKvTheta_l = spline_function('MOISTURE_TRANSPORT-FUNCTION-lgKv(Theta_l)')
hybrid['MOISTURE_TRANSPORT-FUNCTION-lgKv(Theta_l)-X'] = lgKvTheta_l[2].tolist()
hybrid['MOISTURE_TRANSPORT-FUNCTION-lgKv(Theta_l)-Y'] = average_function(lgKvTheta_l[0], lgKvTheta_l[1],
mat1_percentage, mat2_percentage).tolist()
return hybrid
def create_hybrid_material(mat1_path, mat2_path, mat1_percentage, mat2_percentage, result_path):
mat1 = material_parser.material_file_to_dict(mat1_path)
mat2 = material_parser.material_file_to_dict(mat2_path)
hybrid_material = {'material_data': compute_hybrid_material(mat1, mat2, mat1_percentage, mat2_percentage),}
material_parser.dict_to_m6(hybrid_material, result_path)
| {
"repo_name": "thp44/delphin_6_automation",
"path": "delphin_6_automation/delphin_setup/material_modelling.py",
"copies": "1",
"size": "7790",
"license": "mit",
"hash": 4314642199278446000,
"line_mean": 59.859375,
"line_max": 120,
"alpha_frac": 0.5163029525,
"autogenerated": false,
"ratio": 3.9805825242718447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49968854767718446,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christian Kongsgaard'
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules:
import logging
import os
# RiBuild Modules:
# -------------------------------------------------------------------------------------------------------------------- #
# LOGGERS
loggers = {}
def ribuild_logger(name: str='delphin_6_automation'):
global loggers
source_folder = os.environ.get("_MEIPASS2", os.path.abspath("."))
if loggers.get(name):
return loggers.get(name)
else:
# create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(message)s')
# create console handler and set level to debug
if os.path.exists(f'{source_folder}/{name}.log'):
try:
os.remove(f'{source_folder}/{name}.log')
except PermissionError:
pass
# File Handler
fh = logging.FileHandler(f'{source_folder}/{name}.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
# Stream Handler
sh = logging.StreamHandler()
sh.setFormatter(formatter)
sh.setLevel(logging.INFO)
logger.addHandler(sh)
loggers[name] = logger
return logger
| {
"repo_name": "thp44/delphin_6_automation",
"path": "delphin_6_automation/logging/ribuild_logger.py",
"copies": "1",
"size": "1475",
"license": "mit",
"hash": 5566476617321507000,
"line_mean": 26.8301886792,
"line_max": 120,
"alpha_frac": 0.5003389831,
"autogenerated": false,
"ratio": 4.46969696969697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011776513241319932,
"num_lines": 53
} |
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules:
import lxml.etree as et
import xmltodict
import datetime
import os
import shutil
import bson
import typing
import numpy as np
# RiBuild Modules:
import delphin_6_automation.database_interactions.db_templates.result_raw_entry as result_db
from delphin_6_automation.logging.ribuild_logger import ribuild_logger
# Logger
logger = ribuild_logger()
# -------------------------------------------------------------------------------------------------------------------- #
# DELPHIN FUNCTIONS AND CLASSES
def dp6_to_dict(path: str) -> dict:
"""
Converts a Delphin 6 project file to a python dict.
:param path: Path to the Delphin project file
:return: Dictionary with the project file information
"""
xml_string = et.tostring(et.parse(path).getroot())
xml_dict = xmltodict.parse(xml_string, encoding='UTF-8')
return dict(xml_dict)
def d6o_to_dict(path: str, filename: str, number_of_hours: typing.Optional[int]=None)-> typing.Tuple[list, dict]:
"""
Converts a Delphin results file into a dict.
:param path: path to folder
:param filename: file name with extension
:return: converted result dict
"""
# Helper functions
def d6o(d6o_lines, length):
meta_dict_ = dict()
meta_dict_['D6OARLZ'] = lines[0].split(' ')[-1].strip()
for i in range(1, 14):
line = lines[i].split('=')
name = line[0].strip().lower()
if name == 'created':
value = datetime.datetime.strptime(line[1][1:-1], '%a %b %d %H:%M:%S %Y')
meta_dict_[name] = value
elif name == 'geo_file' or name == 'geo_file_hash':
value = line[1][1:-1]
meta_dict_[name] = value
elif name == 'indices':
value = [int(i)
for i in line[1].strip().split(' ')]
meta_dict_[name] = value
else:
value = line[1].strip()
meta_dict_[name] = value
result_values = list()
result_times = set()
for j in range(15, len(d6o_lines)):
line = lines[j].strip().split('\t')
try:
hour = int(line[0].strip())
except ValueError:
hour = line[0].strip('\x00').strip()
if hour:
hour = int(hour)
else:
continue
if hour not in result_times:
result_times.add(hour)
result_values.append((hour, float(line[1].strip())))
else:
logger.debug(f'Hour {hour} already in result file: {filename}. Duplicate value is not saved')
if not length:
result_values.sort(key=lambda x: x[0])
sorted_values = [v[1] for v in result_values]
return sorted_values, meta_dict_
else:
return check_values(result_values, length), meta_dict_
def check_values(values: typing.List[tuple], length: int) -> typing.List[float]:
values.sort(key=lambda x: x[0])
sorted_values = [v[1] for v in values]
if length == values[-1][0]-1:
return sorted_values
else:
hours = [v[0] for v in values]
for i in range(length):
try:
if not i == hours[i]:
hours.insert(i, i)
sorted_values.insert(i, np.nan)
except IndexError:
hours.append(i)
sorted_values.append(np.nan)
sorted_values = np.array(sorted_values)
correct_values = ~np.isnan(sorted_values)
xp = correct_values.ravel().nonzero()[0]
fp = sorted_values[correct_values]
x = np.isnan(sorted_values).ravel().nonzero()[0]
sorted_values[np.isnan(sorted_values)] = np.interp(x, xp, fp)
return list(sorted_values)
file_obj = open(os.path.join(path, filename), 'r')
lines = file_obj.readlines()
file_obj.close()
result_dict, meta_dict = d6o(lines, number_of_hours)
return result_dict, meta_dict
def g6a_to_dict(path: str, filename: str)-> dict:
"""
Converts a Delphin geometry file into a dict.
:param path: path to folder
:param filename: file name with extension
:return: converted geometry dict
"""
file_obj = open(path + '/' + filename, 'r')
lines = file_obj.readlines()
file_obj.close()
# Search for key points
tables = {}
for index, line in enumerate(lines):
if line.startswith('TABLE MATERIALS'):
tables['materials'] = index
elif line.startswith('TABLE GRID'):
tables['grid'] = index
elif line.startswith('TABLE ELEMENT_GEOMETRY'):
tables['element_geometry'] = index
elif line.startswith('TABLE SIDES_GEOMETRY'):
tables['sides_geometry'] = index
geometry_name = filename[:-4]
geometry_dict = dict()
geometry_dict['name'] = geometry_name
geometry_dict['D6GARLZ'] = lines[0].split(' ')[-1].strip()
geometry_dict['materials'] = []
# get materials
for i in range(2, tables['grid']-1):
number = int(lines[i][:3])
hash_ = int(lines[i][11:21])
name = lines[i][22:-1]
geometry_dict['materials'].append([number, hash_, name])
# get grid
geometry_dict['grid'] = {}
geometry_dict['grid']['x'] = [float(x)
for x in lines[(tables['grid']+1)].strip().split(' ')]
geometry_dict['grid']['y'] = [float(y)
for y in lines[tables['grid'] + 2].strip().split(' ')]
geometry_dict['grid']['z'] = [float(z)
for z in lines[tables['grid'] + 3].strip().split(' ')]
# get element geometry
geometry_dict['element_geometry'] = []
for j in range(tables['element_geometry']+1, tables['sides_geometry']-1):
line = lines[j].split(' ')
geometry_line = []
for element in line:
if element == '' or element == '\t':
pass
else:
geometry_line.append(float(element.strip()))
geometry_dict['element_geometry'].append(geometry_line)
# get sides geometry
geometry_dict['sides_geometry'] = []
for k in range(tables['sides_geometry'] + 1, len(lines) - 1):
line = lines[k].split(' ')
side_line = []
for element in line:
if element == '' or element == '\t' or element == '\n':
pass
else:
side_line.append(float(element.strip()))
geometry_dict['sides_geometry'].append(side_line)
return geometry_dict
def cvode_stats_to_dict(path: str) -> dict:
"""
Converts a Delphin integrator_cvode_stats file into a dict.
:param path: path to folder
:return: converted tsv dict
"""
file_obj = open(path + '/integrator_cvode_stats.tsv', 'r')
lines = file_obj.readlines()
file_obj.close()
tsv_dict = {'time': [],
'steps': [],
'rhs_evaluations': [],
'lin_setups': [],
'number_iterations': [],
'number_conversion_fails': [],
'number_error_fails': [],
'order': [],
'step_size': []}
for i in range(1, len(lines)):
line = lines[i].split('\t')
tsv_dict['time'].append(float(line[0].strip()))
tsv_dict['steps'].append(int(line[1].strip()))
tsv_dict['rhs_evaluations'].append(int(line[2].strip()))
tsv_dict['lin_setups'].append(int(line[3].strip()))
tsv_dict['number_iterations'].append(int(line[4].strip()))
tsv_dict['number_conversion_fails'].append(int(line[5].strip()))
tsv_dict['number_error_fails'].append(int(line[6].strip()))
tsv_dict['order'].append(int(line[7].strip()))
tsv_dict['step_size'].append(float(line[8].strip()))
return tsv_dict
def les_stats_to_dict(path: str) -> dict:
"""
Converts a Delphin LES_direct_stats file into a dict.
:param path: path to folder
:return: converted les stats dict
"""
file_obj = open(path + '/LES_direct_stats.tsv', 'r')
lines = file_obj.readlines()
file_obj.close()
les_dict = {'time': [],
'number_jacobian_evaluations': [],
'number_rhs_evaluations': []
}
for i in range(1, len(lines)):
line = lines[i].split(' ')
placeholder = []
for element in line:
if element == '':
pass
else:
placeholder.append(element)
les_dict['time'].append(float(placeholder[0].strip()))
les_dict['number_jacobian_evaluations'].append(int(placeholder[1].strip()))
les_dict['number_rhs_evaluations'].append(int(placeholder[2].strip()))
return les_dict
def progress_to_dict(path: str) -> dict:
"""
Converts a Delphin progress file into a dict.
:param path: path to folder
:return: converted progress dict
"""
file_obj = open(path + '/progress.txt', 'r')
lines = file_obj.readlines()
file_obj.close()
progress_dict = {'simulation_time': [],
'real_time': [],
'percentage': []
}
for i in range(1, len(lines)):
line = lines[i].split('\t')
progress_dict['simulation_time'].append(int(line[0].strip()))
progress_dict['real_time'].append(float(line[1].strip()))
progress_dict['percentage'].append(float(line[2].strip()))
return progress_dict
def dict_to_progress_file(file_dict: dict, log_path: str) -> bool:
"""
Turns a dictionary into a delphin progress file.
:param file_dict: Dictionary holding the information for the progress file
:param log_path: Path to were the progress file should be written
:return: True
"""
file_obj = open(log_path + '/progress.txt', 'w')
spaces = 15
file_obj.write(' Simtime [s] \t Realtime [s]\t Percentage [%]\n')
for line_index in range(0, len(file_dict['simulation_time'])):
sim_string = ' ' * (spaces - len(str(file_dict['simulation_time'][line_index]))) + \
str(file_dict['simulation_time'][line_index])
real_string = ' ' * (spaces - len(str(file_dict['real_time'][line_index]))) + \
str(file_dict['real_time'][line_index])
if int(file_dict['percentage'][line_index]) == 100:
percentage_string = ' ' * (spaces - len('1e+02')) + '1e+02'
elif file_dict['percentage'][line_index] == int(file_dict['percentage'][line_index]):
percentage_string = ' ' * (spaces - len(str(int(file_dict['percentage'][line_index])))) + \
str(int(file_dict['percentage'][line_index]))
else:
percentage_string = ' ' * (spaces - len(str(file_dict['percentage'][line_index]))) + \
str(file_dict['percentage'][line_index])
file_obj.write(sim_string + '\t' + real_string + '\t' + percentage_string + '\n')
file_obj.close()
return True
def dict_to_cvode_stats_file(file_dict: dict, log_path: str) -> bool:
"""
Turns a dictionary into a delphin cvode stats file.
:param file_dict: Dictionary holding the information for the cvode stats file
:param log_path: Path to were the cvode stats file should be written
:return: True
"""
file_obj = open(log_path + '/integrator_cvode_stats.tsv', 'w')
file_obj.write(' Time [s]\t Steps\t RhsEvals\t LinSetups\t NIters\t NConvFails\t NErrFails\t'
' Order\t StepSize [s]\n')
for line_index in range(0, len(file_dict['time'])):
time_string = ' ' * (25 - len(str("{:.10f}".format(file_dict['time'][line_index])))) + \
str("{:.10f}".format(file_dict['time'][line_index]))
steps_string = ' ' * (10 - len(str(file_dict['steps'][line_index]))) + \
str(file_dict['steps'][line_index])
rhs_string = ' ' * (10 - len(str(file_dict['rhs_evaluations'][line_index]))) + \
str(file_dict['rhs_evaluations'][line_index])
lin_string = ' ' * (10 - len(str(file_dict['lin_setups'][line_index]))) + \
str(file_dict['lin_setups'][line_index])
iterations_string = ' ' * (8 - len(str(file_dict['number_iterations'][line_index]))) + \
str(file_dict['number_iterations'][line_index])
conversion_fails_string = ' ' * (11 - len(str(file_dict['number_conversion_fails'][line_index]))) + \
str(file_dict['number_conversion_fails'][line_index])
error_fails_string = ' ' * (11 - len(str(file_dict['number_error_fails'][line_index]))) + \
str(file_dict['number_error_fails'][line_index])
order_string = ' ' * (6 - len(str(file_dict['order'][line_index]))) + \
str(file_dict['order'][line_index])
step_size_string = ' ' * (14 - len(str("{:.6f}".format(file_dict['step_size'][line_index])))) + \
str("{:.6f}".format(file_dict['step_size'][line_index]))
file_obj.write(time_string + '\t' + steps_string + '\t' + rhs_string + '\t' + lin_string + '\t'
+ iterations_string + '\t' + conversion_fails_string + '\t' + error_fails_string + '\t'
+ order_string + '\t' + step_size_string + '\n')
file_obj.close()
return True
def dict_to_les_stats_file(file_dict: dict, log_path: str) -> bool:
"""
Turns a dictionary into a delphin les stats file.
:param file_dict: Dictionary holding the information for the les stats file
:param log_path: Path to were the les stats file should be written
:return: True
"""
file_obj = open(log_path + '/LES_direct_stats.tsv', 'w')
file_obj.write(' Time\t NJacEvals\t NRhsEvals\n')
for line_index in range(0, len(file_dict['time'])):
time_string = ' ' * (25 - len(str("{:.10f}".format(file_dict['time'][line_index])))) + \
str("{:.10f}".format(file_dict['time'][line_index]))
jac_string = ' ' * (13 - len(str(file_dict['number_jacobian_evaluations'][line_index]))) + \
str(file_dict['number_jacobian_evaluations'][line_index])
rhs_string = ' ' * (13 - len(str(file_dict['number_rhs_evaluations'][line_index]))) + \
str(file_dict['number_rhs_evaluations'][line_index])
file_obj.write(time_string + '\t' + jac_string + rhs_string + '\n')
file_obj.close()
return True
def write_log_files(result_obj: result_db.Result, download_path: str) -> bool:
"""
Turns a result database entry into a delphin log file.
:param result_obj: Database entry
:param download_path: Path to were the log file should be written
:return: True
"""
log_dict: dict = bson.BSON.decode(result_obj.log.read())
log_path = download_path + '/log'
if not os.path.exists(log_path):
os.mkdir(log_path)
else:
shutil.rmtree(log_path)
os.mkdir(log_path)
for log_key in log_dict.keys():
if log_key == 'progress':
dict_to_progress_file(log_dict['progress'], log_path)
elif log_key.startswith('integrator'):
dict_to_cvode_stats_file(log_dict[log_key], log_path)
elif log_key.startswith('les_'):
dict_to_les_stats_file(log_dict[log_key], log_path)
return True
def dict_to_g6a(geometry_dict: dict, result_path: str) -> bool:
"""
Turns a dictionary into a delphin geometry file.
:param geometry_dict: Dictionary holding the information for the geometry file
:param result_path: Path to were the geometry file should be written
:return: True
"""
file_obj = open(result_path + '/' + geometry_dict['name'] + '.g6a', 'w')
file_obj.write('D6GARLZ! ' + str(geometry_dict['D6GARLZ']) + '\n')
file_obj.write('TABLE MATERIALS\n')
for material in geometry_dict['materials']:
file_obj.write(str(material[0]) + ' ' + str(material[1]) + ' ' + str(material[2] + '\n'))
file_obj.write('\nTABLE GRID\n')
for dimension in geometry_dict['grid']:
if not dimension == 'z':
file_obj.write(' '.join([str(int(element_)) if element_ == int(element_) else str(element_)
for element_ in geometry_dict['grid'][dimension]]) + ' \n')
else:
file_obj.write(' '.join([str(int(element_)) if element_ == int(element_) else str(element_)
for element_ in geometry_dict['grid'][dimension]]) + '\n')
file_obj.write('\nTABLE ELEMENT_GEOMETRY\n')
for element in geometry_dict['element_geometry']:
space0 = ' ' * (9 - len(str(int(element[0]))))
space1 = ' ' * max((10 - len(str(element[1]))), 1)
space2 = ' ' * (29 - len(str(int(element[0])) + space0 + str(element[1]) + space1 + str(element[2])))
space3 = ' ' * (6 - len(str(int(element[3]))))
space4 = ' ' * (6 - len(str(int(element[4]))))
file_obj.write(str(int(element[0])) + space0 + str(element[1]) + space1 + str(element[2]) + space2 + '\t ' +
str(int(element[3])) + space3 + str(int(element[4])) + space4 + str(int(element[5])) + '\n')
file_obj.write('\nTABLE SIDES_GEOMETRY\n')
for side in geometry_dict['sides_geometry']:
if side[1] == int(side[1]):
side[1] = int(side[1])
space0 = ' ' * (9 - len(str(int(side[0]))))
space1 = ' ' * max((10 - len(str(side[1]))), 1)
space2 = ' ' * (29 - len(str(int(side[0])) + space0 + str(side[1]) + space1 + str(side[2])))
space3 = ' ' * (7 - len(str(int(side[3]))))
space4 = ' ' * (7 - len(str(int(side[4]))))
space5 = ' ' * 4
file_obj.write(str(int(side[0])) + space0 + str(side[1]) + space1 + str(side[2]) + space2 + '\t ' +
str(int(side[3])) + space3 + str(int(side[4])) + space4 + str(int(side[5])) + space5 + '\n')
file_obj.write('\n')
file_obj.close()
return True
def dict_to_d6o(result_dict: dict, result_path: str, simulation_start: datetime.datetime,
geometry_file_name: str, geometry_file_hash: int) -> bool:
"""
Turns a dictionary into a delphin result file.
:param result_dict: Dictionary representation of the database entry
:param result_path: Path to were the result file should be written
:param simulation_start: Start time for the simulation
:param geometry_file_name: Name of the geometry file
:param geometry_file_hash: Hash of the geometry file
:return: True
"""
file_obj = open(result_path + '.d6o', 'w')
file_obj.write('D6OARLZ! ' + str(result_dict['meta']['D6OARLZ']) + '\n')
file_obj.write('TYPE = ' + str(result_dict['meta']['type']) + '\n')
file_obj.write('PROJECT_FILE = ' + str(result_dict['meta']['project_file']) + '\n')
file_obj.write('CREATED = ' + str(simulation_start.strftime('%a %b %d %H:%M:%S %Y')) + '\n')
file_obj.write('GEO_FILE = ' + str(geometry_file_name) + '.g6a' + '\n')
file_obj.write('GEO_FILE_HASH = ' + str(geometry_file_hash) + '\n')
file_obj.write('QUANTITY = ' + str(result_dict['meta']['quantity']) + '\n')
file_obj.write('QUANTITY_KW = ' + str(result_dict['meta']['quantity_kw']) + '\n')
file_obj.write('SPACE_TYPE = ' + str(result_dict['meta']['space_type']) + '\n')
file_obj.write('TIME_TYPE = ' + str(result_dict['meta']['time_type']) + '\n')
file_obj.write('VALUE_UNIT = ' + str(result_dict['meta']['value_unit']) + '\n')
file_obj.write('TIME_UNIT = ' + str(result_dict['meta']['time_unit']) + '\n')
file_obj.write('START_YEAR = ' + str(result_dict['meta']['start_year']) + '\n')
file_obj.write('INDICES = ' + ' '.join([str(i) for i in result_dict['meta']['indices']]) +
' \n\n')
for count in range(len(result_dict['result'])):
space_count = ' ' * (13 - len(str(count)))
line_to_write = str(count) + space_count
value = result_dict['result'][count]
if value == int(value):
value = int(value)
space_value = ' ' * (15 - len(str(value)))
line_to_write += '\t' + str(value) + space_value
file_obj.write(line_to_write + '\t\n')
file_obj.close()
return True
def restart_data(folder: str) -> typing.Dict[str, bytes]:
bin_file = os.path.join(folder, 'restart.bin')
tmp_file = os.path.join(folder, 'restart.bin.tmp')
def return_bytes(file_name):
with open(file_name, 'rb') as file:
return file.read()
bin_data = return_bytes(bin_file)
if os.path.exists(tmp_file):
tmp_data = return_bytes(tmp_file)
else:
tmp_data = None
return {'bin_data': bin_data, 'tmp_data': tmp_data}
def restart_data_to_file(folder: str, restart_dict: typing.Dict[str, bytes]) -> None:
bin_file = os.path.join(folder, 'restart.bin')
tmp_file = os.path.join(folder, 'restart.bin.tmp')
def write_bytes(filename, bytes_):
if not isinstance(bytes_, bytes):
bytes_ = bytearray(bytes_)
with open(filename, 'wb') as file:
file.write(bytes_)
write_bytes(bin_file, restart_dict['bin_data'])
if restart_dict['tmp_data']:
write_bytes(tmp_file, restart_dict['tmp_data'])
return None
| {
"repo_name": "thp44/delphin_6_automation",
"path": "delphin_6_automation/file_parsing/delphin_parser.py",
"copies": "1",
"size": "21944",
"license": "mit",
"hash": -4771985528817925000,
"line_mean": 34.5080906149,
"line_max": 120,
"alpha_frac": 0.5464363835,
"autogenerated": false,
"ratio": 3.520616075725975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4567052459225975,
"avg_score": null,
"num_lines": null
} |
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import matplotlib.pyplot as plt
import numpy as np
import os
import datetime
import matplotlib.dates as mdates
import pandas as pd
# RiBuild Modules
from delphin_6_automation.file_parsing import delphin_parser
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
colors = {'top': '#FBBA00', 'mid': '#B81A5D', 'bottom': '#79C6C0', '1d_brick': '#000000', '1d_mortar': '#BDCCD4'}
project_dict = {'dresden_zp_high_ratio_uninsulated_4a':
{'map':
{'5ad9e0352e2cb22f2c4f15b4': 'brick_1d',
'5ad9e3bf2e2cb22f2c4f166b': 'mortar_1d',
'5adb0a102e2cb22f2c4f17e9': '2d'}
},
'dresden_zd_high_ratio_uninsulated_4a':
{'map':
{'5ad9e0ba2e2cb22f2c4f15f1': 'brick_1d',
'5ad9e3bf2e2cb22f2c4f166b': 'mortar_1d',
'5adb2dc02e2cb22f2c4f1873': '2d'}
},
'potsdam_high_ratio_uninsulated_4a':
{'map':
{'5ad9e3462e2cb22f2c4f162e': 'brick_1d',
'5ad9e3bf2e2cb22f2c4f166b': 'mortar_1d',
'5adcc9702e2cb22f2c4f18fd': '2d'}
},
'dresden_zp_low_ratio_uninsulated_4a':
{'map':
{'5ad9e6192e2cb22f2c4f175f': 'brick_1d',
'5ad9e5812e2cb22f2c4f1722': 'mortar_1d',
'5adda7172e2cb20baca57c6e': '2d'}
},
'dresden_zd_low_ratio_uninsulated_4a':
{'map':
{'5ad9e44f2e2cb22f2c4f16a8': 'brick_1d',
'5ad9e5812e2cb22f2c4f1722': 'mortar_1d',
'5adcd4402e2cb22f2c4f1987': '2d'}
},
'potsdam_low_ratio_uninsulated_4a':
{'map': {'5ad9e4f22e2cb22f2c4f16e5': 'brick_1d',
'5ad9e5812e2cb22f2c4f1722': 'mortar_1d',
'5add9b902e2cb20baca57be4': '2d'}
},
'dresden_zp_high_ratio_insulated_4a':
{'map': {'5ae824252e2cb22d48db5955': 'brick_1d',
'5ae82c222e2cb2156000902b': 'mortar_1d',
'5ae355cf2e2cb2201055c1a4': '2d'}
},
'dresden_zd_high_ratio_insulated_4a':
{'map': {'5ae824d82e2cb22d48db5998': 'brick_1d',
'5ae82c222e2cb2156000902b': 'mortar_1d',
'5ae398f12e2cb2201055c263': '2d'}
},
'potsdam_high_ratio_insulated_4a':
{'map':
{'5ae82bac2e2cb21560008fe8': 'brick_1d',
'5ae82c222e2cb2156000902b': 'mortar_1d',
'5ae6ca982e2cb2201055c322': '2d'}
},
'dresden_zp_low_ratio_insulated_4a':
{'map':
{'5ae82e5d2e2cb21560009137': 'brick_1d',
'5ae82dc02e2cb215600090f4': 'mortar_1d',
'5ae6fdbf2e2cb20d5891272f': '2d'}
},
'dresden_zd_low_ratio_insulated_4a':
{'map':
{'5ae82cb12e2cb2156000906e': 'brick_1d',
'5ae82dc02e2cb215600090f4': 'mortar_1d',
'5ae6d9bf2e2cb2201055c3e1': '2d'}
},
'potsdam_low_ratio_insulated_4a':
{'map':
{'5ae82d3b2e2cb215600090b1': 'brick_1d',
'5ae82dc02e2cb215600090f4': 'mortar_1d',
'5ae6edaf2e2cb20d58912670': '2d'}
},
}
result_folder = r'U:\RIBuild\2D_1D\Results'
files = ['relative humidity profile.d6o']
# Functions
def get_points(result: dict, geo: dict):
points = []
for index_ in result['indices']:
x_ = geo['element_geometry'][index_][1]
y_ = geo['element_geometry'][index_][2]
points.append({'cell': index_, 'x': x_, 'y': y_})
return points
def add_data_to_points(points: list, results: dict, result_name: str):
for cell_ in results['result'].keys():
cell_index = int(cell_.split('_')[1])
for point in points:
if point['cell'] == cell_index:
point[result_name] = np.array(results['result'][cell_][8760:])
break
# Application
def main(project_):
projects = list(project_dict[project_]['map'].keys())
parsed_dicts = {'brick_1d': {'rh': {}, 'geo': {}},
'mortar_1d': {'rh': {}, 'geo': {}},
'2d': {'rh': {}, 'geo': {}}, }
for p_ in projects:
for mp_key in project_dict[project_]['map'].keys():
if p_ == mp_key:
key = project_dict[project_]['map'][mp_key]
folder = result_folder + f'/{p_}/results'
geo_file = [file
for file in os.listdir(folder)
if file.endswith('.g6a')][0]
parsed_dicts[key]['rh'], _ = delphin_parser.d6o_to_dict(folder, files[0])
parsed_dicts[key]['geo'] = delphin_parser.g6a_to_dict(folder, geo_file)
x_date = [datetime.datetime(2020, 1, 1) + datetime.timedelta(hours=i)
for i in range(len(parsed_dicts['brick_1d']['rh']['result']['cell_0'][8760:]))]
# Brick 1D
brick_1d = get_points(parsed_dicts['brick_1d']['rh'], parsed_dicts['brick_1d']['geo'])
brick_1d.sort(key=lambda point: point['x'])
add_data_to_points(brick_1d, parsed_dicts['brick_1d']['rh'], 'relative_humidity')
# Mortar 1D
mortar_1d = get_points(parsed_dicts['mortar_1d']['rh'], parsed_dicts['mortar_1d']['geo'])
mortar_1d.sort(key=lambda point: point['x'])
add_data_to_points(mortar_1d, parsed_dicts['mortar_1d']['rh'], 'relative_humidity')
# 2D
sim_2d = get_points(parsed_dicts['2d']['rh'], parsed_dicts['2d']['geo'])
sim_2d.sort(key=lambda point: (point['x'], point['y']))
add_data_to_points(sim_2d, parsed_dicts['2d']['rh'], 'relative_humidity')
# Plots
def plot_locations(quantity):
# Axes 00
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[0]['x']:.4f} and 2D-Location: {sim_2d[0]['x']:.4f}")
plt.plot(x_date, brick_1d[0][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[0][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[0][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[1][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[2][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 01
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[1]['x']:.4f} and 2D-Location: {sim_2d[3]['x']:.4f}")
plt.plot(x_date, brick_1d[1][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[1][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[3][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[4][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[5][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 10
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[2]['x']:.4f} and 2D-Location: {sim_2d[6]['x']:.4f}")
plt.plot(x_date, brick_1d[2][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[2][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[6][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[7][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[8][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 11
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[3]['x']:.4f} and 2D-Location: {sim_2d[9]['x']:.4f}")
plt.plot(x_date, brick_1d[3][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[3][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[9][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[10][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[11][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 20
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[4]['x']:.4f} and 2D-Location: {sim_2d[12]['x']:.4f}")
plt.plot(x_date, brick_1d[4][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[4][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[12][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[13][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[14][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
# Axes 21
plt.figure()
plt.title(f"{quantity}\n1D-Location: {brick_1d[5]['x']:.4f} and 2D-Location: {sim_2d[15]['x']:.4f}")
plt.plot(x_date, brick_1d[5][quantity], color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_1d[5][quantity], color=colors['1d_mortar'], label=f"1D Mortar")
plt.plot(x_date, sim_2d[15][quantity], color=colors['bottom'], label=f"2D Bottom")
plt.plot(x_date, sim_2d[16][quantity], color=colors['mid'], label=f"2D Mid")
plt.plot(x_date, sim_2d[17][quantity], color=colors['top'], label=f"2D Top")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel(f'{quantity}')
#plot_locations(quantity='relative_humidity')
#plt.show()
def abs_diff(x1, x2):
return x2 - x1
def rel_diff(x1, x2):
return (abs(x2 - x1)) / x2 * 100
def differences(i, plots=False):
avg_2d = np.mean(
[sim_2d[i]['relative_humidity'], sim_2d[i + 2]['relative_humidity'], sim_2d[i + 2]['relative_humidity']],
axis=0)
brick_abs = abs_diff(brick_1d[i]['relative_humidity'], avg_2d)
mortar_abs = abs_diff(mortar_1d[i]['relative_humidity'], avg_2d)
brick_rel = rel_diff(brick_1d[i]['relative_humidity'], avg_2d)
mortar_rel = rel_diff(mortar_1d[i]['relative_humidity'], avg_2d)
if plots:
# Plot
plt.figure()
plt.title(f"relative_humidity - Absolute Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_abs, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_abs, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('%')
plt.figure()
plt.title(f"relative_humidity - Relative Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_rel, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_rel, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('%')
local_df = pd.DataFrame(columns=[f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}",
f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}"],
index=pd.DatetimeIndex(start=datetime.datetime(2020, 1, 1),
freq='h', periods=len(brick_rel)),
data=np.vstack([brick_rel, brick_abs, mortar_rel, mortar_abs]).T)
local_df.columns = pd.MultiIndex.from_arrays([local_df.columns, ['brick', 'brick', 'mortar', 'mortar'],
['relative', 'absolute', 'relative', 'absolute']],
names=['location', 'material', 'type'])
return local_df
def differences_weighted(i, plots=False):
avg_2d = np.average(a=[sim_2d[i]['relative_humidity'],
sim_2d[i + 2]['relative_humidity'],
sim_2d[i + 2]['relative_humidity']],
axis=0,
weights=[56., 24., 56.])
brick_abs = abs_diff(brick_1d[i]['relative_humidity'], avg_2d)
mortar_abs = abs_diff(mortar_1d[i]['relative_humidity'], avg_2d)
brick_rel = rel_diff(brick_1d[i]['relative_humidity'], avg_2d)
mortar_rel = rel_diff(mortar_1d[i]['relative_humidity'], avg_2d)
if plots:
# Plot
plt.figure()
plt.title(f"Relative Humidity - Weighted Absolute Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_abs, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_abs, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('%')
plt.figure()
plt.title(f"Relative Humidity - Weighted Relative Difference\n"
f"1D-Location: {brick_1d[i]['x']:.4f} and 2D-Location: {sim_2d[i*3]['x']:.4f}")
plt.plot(x_date, brick_rel, color=colors['1d_brick'], label=f"1D Brick")
plt.plot(x_date, mortar_rel, color=colors['1d_mortar'], label=f"1D Mortar")
plt.legend()
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%B'))
plt.ylabel('%')
local_df = pd.DataFrame(columns=[f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}",
f"{brick_1d[i]['x']:.04f}", f"{brick_1d[i]['x']:.04f}"],
index=pd.DatetimeIndex(start=datetime.datetime(2020, 1, 1),
freq='h', periods=len(brick_rel)),
data=np.vstack([brick_rel, brick_abs, mortar_rel, mortar_abs]).T)
local_df.columns = pd.MultiIndex.from_arrays([local_df.columns, ['brick', 'brick', 'mortar', 'mortar'],
['relative', 'absolute', 'relative', 'absolute']],
names=['location', 'material', 'type'])
return local_df
dataframes = []
weighted_dataframes = []
for index in range(len(brick_1d)):
dataframes.append(differences(index))
weighted_dataframes.append(differences_weighted(index))
# plt.show()
result_dataframe = pd.concat(dataframes, axis=1)
w_result_dataframe = pd.concat(weighted_dataframes, axis=1)
absolute_df = result_dataframe.loc[:, pd.IndexSlice[:, :, 'absolute']]
absolute_df.columns = absolute_df.columns.droplevel(level=2)
relative_df = result_dataframe.loc[:, pd.IndexSlice[:, :, 'relative']]
relative_df.columns = relative_df.columns.droplevel(level=2)
w_absolute_df = w_result_dataframe.loc[:, pd.IndexSlice[:, :, 'absolute']]
w_absolute_df.columns = w_absolute_df.columns.droplevel(level=2)
w_relative_df = w_result_dataframe.loc[:, pd.IndexSlice[:, :, 'relative']]
w_relative_df.columns = w_relative_df.columns.droplevel(level=2)
def boxplots():
plt.figure()
ax = absolute_df.boxplot()
ax.set_ylim(-20, 20)
ax.set_ylabel('Relative Humidity - %')
ax.set_title('Non-Weighted Absolute Differences')
plt.figure()
ax = w_absolute_df.boxplot()
ax.set_ylim(-20, 20)
ax.set_ylabel('Relative Humidity - %')
ax.set_title('Weighted Absolute Differences')
plt.show()
# boxplots()
out_folder = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\2d_1d\processed_data'
def excel():
writer = pd.ExcelWriter(out_folder + '/relative_humidity.xlsx')
relative_df.describe().to_excel(writer, 'relative')
w_relative_df.describe().to_excel(writer, 'relative_weighted')
absolute_df.describe().to_excel(writer, 'absolute')
w_absolute_df.describe().to_excel(writer, 'absolute_weighted')
writer.save()
# excel()
def save_relative():
hdf_file = out_folder + '/relative_humidity.h5'
w_relative_df.to_hdf(hdf_file, project_, append=True)
save_relative()
for project_key in project_dict.keys():
print(f'Processing {project_key}')
main(project_key)
| {
"repo_name": "thp44/delphin_6_automation",
"path": "data_process/2d_1d/archieve/relative_humidity.py",
"copies": "1",
"size": "18433",
"license": "mit",
"hash": 3284509745293186000,
"line_mean": 44.5135802469,
"line_max": 120,
"alpha_frac": 0.5276406445,
"autogenerated": false,
"ratio": 3.0767818394258053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4104422483925805,
"avg_score": null,
"num_lines": null
} |
__author__ = "Christian Kongsgaard"
__license__ = "MIT"
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules:
import mongoengine
from datetime import datetime
# RiBuild Modules:
import delphin_6_automation.database_interactions.database_collections as collections
import delphin_6_automation.database_interactions.db_templates.result_processed_entry as processed_db
import delphin_6_automation.database_interactions.db_templates.material_entry as material_db
import delphin_6_automation.database_interactions.db_templates.result_raw_entry as raw_db
import delphin_6_automation.database_interactions.db_templates.weather_entry as weather_db
# -------------------------------------------------------------------------------------------------------------------- #
# DELPHIN CLASS
class Delphin(mongoengine.Document):
# Meta Data
added_date = mongoengine.DateTimeField(default=datetime.now)
simulated = mongoengine.DateTimeField()
simulating = mongoengine.DateTimeField(default=None, null=True)
simulation_time = mongoengine.FloatField()
estimated_simulation_time = mongoengine.IntField()
queue_priority = mongoengine.FloatField(default=1)
sample_data = mongoengine.DictField()
restart_data = mongoengine.DictField()
moved_to_web = mongoengine.BooleanField()
moved_to_web2 = mongoengine.BooleanField()
moved27feb = mongoengine.BooleanField()
moved_to_web_feb20 = mongoengine.BooleanField()
# Failures
exceeded_time_limit = mongoengine.BooleanField()
critical_error = mongoengine.BooleanField()
# References
dimensions = mongoengine.IntField(required=True)
results_raw = mongoengine.ReferenceField(document_type=raw_db.Result)
result_processed = mongoengine.ReferenceField(document_type=processed_db.ProcessedResult)
dp6_file = mongoengine.DictField(required=True)
materials = mongoengine.ListField(mongoengine.ReferenceField(document_type=material_db.Material))
weather = mongoengine.ListField(mongoengine.ReferenceField(document_type=weather_db.Weather))
indoor_climate = mongoengine.StringField()
meta = collections.delphin_db
class Design(mongoengine.Document):
# Meta Data
added_date = mongoengine.DateTimeField(default=datetime.now)
strategy = mongoengine.GenericReferenceField()
meta = collections.design_db
# File
design_name = mongoengine.StringField(required=True)
d6p_file = mongoengine.DictField(required=True)
update_outputs = mongoengine.BooleanField(default=True)
measured_indoor_climate = mongoengine.BooleanField(default=False)
| {
"repo_name": "thp44/delphin_6_automation",
"path": "delphin_6_automation/database_interactions/db_templates/delphin_entry.py",
"copies": "1",
"size": "2678",
"license": "mit",
"hash": 2654755857897441000,
"line_mean": 40.2,
"line_max": 120,
"alpha_frac": 0.7038834951,
"autogenerated": false,
"ratio": 4.08854961832061,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.529243311342061,
"avg_score": null,
"num_lines": null
} |
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules:
import mongoengine
import delphin_6_automation.database_interactions.database_collections as collections
# RiBuild Modules:
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
class Normalized(mongoengine.Document):
# Meta Data
meta = collections.normalized_db
delphin = mongoengine.GenericReferenceField()
loc = mongoengine.PointField()
orientation = mongoengine.IntField()
wall_width = mongoengine.FloatField()
wall_material = mongoengine.StringField()
ext_plaster = mongoengine.BooleanField()
int_plaster = mongoengine.BooleanField()
country = mongoengine.StringField()
city = mongoengine.StringField()
heat_loss = mongoengine.FloatField()
mould = mongoengine.FloatField()
u_value = mongoengine.FloatField()
algae = mongoengine.FloatField()
environment_impact = mongoengine.FloatField()
insulation_system = mongoengine.StringField()
insulation_thickness = mongoengine.IntField()
avg_surface_temp = mongoengine.FloatField()
min_surface_temp = mongoengine.FloatField()
lambda_value = mongoengine.FloatField()
rain = mongoengine.FloatField()
| {
"repo_name": "thp44/delphin_6_automation",
"path": "delphin_6_automation/database_interactions/db_templates/normalized_entry.py",
"copies": "1",
"size": "1415",
"license": "mit",
"hash": 781766919374139300,
"line_mean": 33.512195122,
"line_max": 120,
"alpha_frac": 0.6070671378,
"autogenerated": false,
"ratio": 4.5941558441558445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006867520145507048,
"num_lines": 41
} |
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import numpy as np
from typing import Tuple
import bson
# RiBuild Modules
from delphin_6_automation.logging.ribuild_logger import ribuild_logger
from delphin_6_automation.delphin_setup.damage_models import algae
from delphin_6_automation.database_interactions.db_templates import result_processed_entry, result_raw_entry, \
delphin_entry, material_entry
# Logger
logger = ribuild_logger(__name__)
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
def update_project(delphin_id):
logger.info(f'Computing algae on project: {delphin_id}')
temperature, relative_humidity = get_result_data(delphin_id)
material_name, porosity = get_material_data(delphin_id)
if 0.19 <= porosity <= 0.44:
algae_growth = algae(relative_humidity, temperature, material_name=material_name, porosity=porosity,
roughness=5.5,
total_pore_area=6.5)
else:
logger.info(
f'Project {delphin_id} with material {material_name} has a porosity {porosity} outside the accepted range.')
algae_growth = [-1, ]
update_result(delphin_id, algae_growth)
logger.info(f'Done with computing algea project: {delphin_id}')
if isinstance(algae_growth, list):
return max(algae_growth)
else:
return algae_growth
def update_result(project_id, algae_growth):
max_algae = max(algae_growth)
logger.info(f'Max algae: {max_algae} for project: {project_id}')
result = result_processed_entry.ProcessedResult.objects(delphin=project_id).first()
result['thresholds']['algae'] = max_algae
result.algae.replace(np.asarray(algae_growth).tobytes())
result.save()
logger.info(f'Uploaded algae to result with ID: {result.id}')
def get_result_data(project_id) -> Tuple[list, list]:
result = result_raw_entry.Result.objects(delphin=project_id).first()
data = bson.BSON.decode(result.results.read())
return data['temperature algae']['result'], data['relative humidity algae']['result']
def get_material_data(project_id):
sample_data = delphin_entry.Delphin.objects(id=project_id).only('sample_data').first().sample_data
plaster = sample_data['design_option'].get('exterior_plaster', None)
if plaster:
material = sample_data['exterior_plaster_material']
else:
material = sample_data['wall_core_material']
return get_porosity_and_type(material)
def get_porosity_and_type(material):
material = material_entry.Material.objects(material_id=material).first()
porosity = material.material_data.get('STORAGE_BASE_PARAMETERS-THETA_POR', 0.5)
material_name = material.material_name
logger.info(f'Material: {material.material_name} - Porosity: {porosity}')
return material_name, porosity
| {
"repo_name": "thp44/delphin_6_automation",
"path": "data_process/normalize_data/algea.py",
"copies": "1",
"size": "3056",
"license": "mit",
"hash": 9178973254214587000,
"line_mean": 33.7272727273,
"line_max": 120,
"alpha_frac": 0.6397251309,
"autogenerated": false,
"ratio": 3.4687854710556185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4608510601955619,
"avg_score": null,
"num_lines": null
} |
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules:
import os
from collections import OrderedDict
import typing
# RiBuild Modules:
import delphin_6_automation.database_interactions.db_templates.delphin_entry as delphin_db
import delphin_6_automation.database_interactions.db_templates.material_entry as material_db
from delphin_6_automation.file_parsing import material_parser
from delphin_6_automation.logging.ribuild_logger import ribuild_logger
# Logger
logger = ribuild_logger()
# -------------------------------------------------------------------------------------------------------------------- #
# DATABASE INTERACTIONS
def find_material_ids(project_materials: list) -> list:
"""
Find ids of given material entries based on material name and material unique id.
:param project_materials: List tuples with material file names and unique material ids
:return: list with material entries
"""
material_entries = []
for material_pair in project_materials:
material_entry = material_db.Material.objects(material_name=material_pair[0],
material_id=material_pair[1]).first()
if material_entry:
material_entries.append(material_entry)
else:
raise FileNotFoundError(f'Material: {material_pair[0]} with material ID: {material_pair[1]} '
f'does not exist in database.\n'
f'Please upload material files before uploading Delphin Projects\n')
return material_entries
def list_project_materials(delphin_document: delphin_db.Delphin) -> list:
"""
Returns a list with the materials in a project entry.
:param delphin_document: Delphin entry
:return: List tuples with material file names and unique material ids
"""
materials = dict(delphin_document.dp6_file)['DelphinProject']['Materials']['MaterialReference']
if isinstance(materials, list):
material_list = [(material['#text'].split('/')[-1].split('_')[0],
int(material['#text'].split('/')[-1].split('_')[-1][:-3]))
for material in materials]
elif isinstance(materials, dict):
material_list = [(materials['#text'].split('/')[-1].split('_')[0],
int(materials['#text'].split('/')[-1].split('_')[-1][:-3])), ]
logger.debug(f'Found the following materials {material_list} related to the '
f'Delphin project with ID: {delphin_document.id}')
return material_list
def upload_materials_from_folder(user_path_input: str) -> typing.List[str]:
"""Upload the Delphin material files located in a given folder"""
material_dict_lst = []
logger.debug(f'Uploads material files from {user_path_input}')
if user_path_input.endswith(".m6"):
upload_material_file(user_path_input)
else:
for root, dirs, files in os.walk(user_path_input):
for file, _ in zip(files, root):
if file.endswith(".m6"):
upload_material_file(file)
return material_dict_lst
def upload_material_file(material_path: str) -> delphin_db.Delphin.id:
"""
Uploads a Delphin file to a database.rst.
:param material_path: Path to a Delphin 6 material project file
:return: Database entry id
"""
entry = material_db.Material()
entry.material_data = material_parser.material_file_to_dict(material_path)
entry.material_name = os.path.split(material_path)[-1].split('_')[0]
entry.material_id = int(os.path.split(material_path)[-1].split('_')[1][:-3])
entry.save()
logger.debug(f'Material: {os.path.split(material_path)[-1].split("_")[0]} upload to database with ID: {entry.id}')
return entry.id
def change_material_location(delphin_object: delphin_db.Delphin) -> str:
"""
Changes the location of the material database location for the Delphin Project file.
:param delphin_object: ID of entry
:return: ID of entry
"""
delphin_dict = dict(delphin_object.dp6_file)
delphin_dict['DelphinProject']['DirectoryPlaceholders']['Placeholder']['#text'] = "${Project Directory}/materials"
delphin_object.update(set__dp6_file=delphin_dict)
return delphin_object.id
def download_materials(delphin_object: delphin_db.Delphin, path: str) -> None:
"""
Downloads the materials of a Delphin Project
:param delphin_object: Delphin entry ID
:param path: Path to save to
:return: None
"""
materials_list = delphin_object.materials
change_material_location(delphin_object)
if not os.path.isdir(path):
os.mkdir(path)
for material in materials_list:
material_parser.dict_to_m6(material, path)
logger.debug(f'Materials for Delphin project with ID: {delphin_object.id} downloaded to {path}')
def get_material_info(material_id: int) -> dict:
"""Get the material info for a material in the database given a Delphin Material ID"""
material = material_db.Material.objects(material_id=material_id).first()
material_dict = OrderedDict((('@name', f'{material.material_name} [{material.material_id}]'),
('@color', str(material.material_data['IDENTIFICATION-COLOUR'])),
('@hatchCode', str(material.material_data['IDENTIFICATION-HATCHING'])),
('#text', '${Material Database}/' +
os.path.split(material.material_data['INFO-FILE'])[-1])
)
)
logger.debug(f'Found material info: {material_dict}')
return material_dict
| {
"repo_name": "thp44/delphin_6_automation",
"path": "delphin_6_automation/database_interactions/material_interactions.py",
"copies": "1",
"size": "5860",
"license": "mit",
"hash": -7142672257858925000,
"line_mean": 35.1728395062,
"line_max": 120,
"alpha_frac": 0.6104095563,
"autogenerated": false,
"ratio": 3.9728813559322034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5083290912232203,
"avg_score": null,
"num_lines": null
} |
__author__ = "Christian Kongsgaard"
__license__ = "MIT"
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules:
import os
import datetime
import time
import shutil
import typing
from mongoengine import Q
# RiBuild Modules:
from delphin_6_automation.database_interactions.db_templates import delphin_entry
from delphin_6_automation.database_interactions import general_interactions as general_interact
from delphin_6_automation.logging.ribuild_logger import ribuild_logger
from delphin_6_automation.backend import simulation_worker
# Logger
logger = ribuild_logger()
# -------------------------------------------------------------------------------------------------------------------- #
# RIBUILD SIMULATION FUNCTIONS AND CLASSES
def download_simulation_result(sim_id: str, download_path: str, raw_or_processed='raw') -> None:
"""
Downloads Delphin simulation results from the database.
:param sim_id: Delphin project ID
:param download_path: Path to download to
:param raw_or_processed: Whether to download the raw results or the processed ones
:return: None
"""
delphin_doc = delphin_entry.Delphin.objects(id=sim_id).first()
download_extended_path = download_path + '/' + str(sim_id)
os.mkdir(download_extended_path)
if raw_or_processed == 'raw':
result_id = delphin_doc.results_raw
logger.info(f'Downloads raw result with ID: {result_id} from Delphin project with ID: {sim_id}')
general_interact.download_raw_result(result_id.id, download_extended_path)
elif raw_or_processed == 'processed':
pass
# TODO - Download processed results from database
else:
raise ValueError('raw_or_processed has to be raw or processed. Value given was: ' + str(raw_or_processed))
return None
def find_next_sim_in_queue() -> typing.Optional[str]:
"""
Finds the next entry in the simulation queue, which is not yet simulated and has the highest queue priority.
:return: If a entry is found the id will be returned otherwise None.
"""
try:
id_ = delphin_entry.Delphin.objects(simulating=False, simulated=None).order_by('-queue_priority').first().id
set_simulating(str(id_), True)
logger.debug(f'Found unsimulated Delphin project with ID: {id_}')
return str(id_)
except AttributeError:
logger.info('All Delphin Projects in the queue are simulated!')
time.sleep(60)
return None
def set_simulating(id_: str, set_to: bool) -> str:
"""
Set the simulating flag of an entry.
:param id_: ID of the entry
:param set_to: What to set simulating to. Should be either True or False.
:return: ID of the entry
"""
delphin_doc = delphin_entry.Delphin.objects(id=id_).first()
if set_to:
delphin_doc.update(set__simulating=datetime.datetime.now())
else:
delphin_doc.update(set__simulating=None)
logger.debug(f'For Delphin project with ID: {id_}, simulating was changed to: {set_to}')
return delphin_doc.id
def set_simulated(id_: str) -> str:
"""
Flags an entry for finishing the simulation.
:param id_: ID of the entry
:return: ID of the entry
"""
simulation = delphin_entry.Delphin.objects(id=id_).first()
simulation.update(set__simulated=datetime.datetime.now())
set_simulating(id_, False)
logger.debug(f'For Delphin project with ID: {id_}, simulated was changed to: {datetime.datetime.now()}')
return simulation.id
def clean_simulation_folder(path: str) -> bool:
"""
Cleans the simulation folder for content
:param path: Path to the simulation folder
:return: True on success
"""
shutil.rmtree(path)
logger.debug(f'Deleted {path}')
return True
def set_simulation_time(sim_id: str, computation_time: datetime.timedelta) -> str:
"""Sets the time it took to simulate Delphin project"""
delphin_doc = delphin_entry.Delphin.objects(id=sim_id).first()
delphin_doc.update(set__simulation_time=computation_time.total_seconds())
logger.debug(f'For Delphin project with ID: {sim_id}, '
f'simulation time was changed to: {computation_time.total_seconds()}')
return sim_id
def set_simulation_time_estimate(sim_id: str, computation_time: int) -> str:
"""Sets the estimate simulation time for a Delphin project"""
delphin_doc = delphin_entry.Delphin.objects(id=sim_id).first()
delphin_doc.update(set__estimated_simulation_time=computation_time)
logger.debug(f'For Delphin project with ID: {sim_id}, '
f'simulation time was changed to: {computation_time}')
return sim_id
def get_simulation_time_estimate(delphin_id: str) -> int:
"""Returns the estimated simulation time of Delphin project, given its ID"""
delphin_doc = delphin_entry.Delphin.objects(id=delphin_id).first()
if delphin_doc.estimated_simulation_time:
return delphin_doc.estimated_simulation_time
else:
return general_interact.compute_simulation_time(delphin_id)
def wait_until_simulated(delphin_ids: list, is_sampling_ahead: bool = False) -> bool:
"""
Wait until all simulations in the given list is simulated.
:param delphin_ids: List with Delphin database ids
:return: True
"""
simulated = [False] * len(delphin_ids)
logger.info(f'Checking if Delphin projects have been simulated')
while not all(simulated):
for index, id_ in enumerate(delphin_ids):
entry = delphin_entry.Delphin.objects(id=id_).only('simulated').first()
if entry.simulated:
simulated[index] = True
logger.debug(f'Waiting until all projects are simulated. {sum(simulated)}/{len(simulated)} is simulated')
if all(simulated):
logger.info('All projects are simulated')
return True
if sum(simulated) >= (len(simulated) * 0.9) and not is_sampling_ahead:
logger.info('90% of projects are simulated')
return False
if not all(simulated):
time.sleep(180)
logger.info('All projects are simulated')
return True
def find_exceeded() -> typing.Optional[str]:
"""
Finds a Delphin project which has exceeded the simulation run time limit.
:return: If a entry is found the id will be returned otherwise None.
"""
try:
id_ = delphin_entry.Delphin.objects(simulating=False,
exceeded_time_limit=True).order_by('-queue_priority').first().id
set_simulating(str(id_), True)
logger.debug(f'Found exceeded Delphin project with ID: {id_}')
return str(id_)
except AttributeError:
logger.info('No exceeded Delphin Projects in the database!')
time.sleep(60)
return None
def check_simulations(auth_file: str, only_count=False) -> tuple:
"""Checks running simulations on HPC"""
terminal_call = f"bstat\n"
client = simulation_worker.connect_to_hpc(auth_file)
channel = client.invoke_shell()
time.sleep(0.5)
channel.send(terminal_call)
channel_data = get_command_results(channel)
simulation_data = channel_data.split('hpclogin3')[1]
channel.close()
client.close()
# Process string
simulation_data = simulation_data.split("\n")[1:]
count = 0
p_count = 0
for data in simulation_data:
data = data.strip()
if data and data != '~' and 'JOBID' not in data:
if "pend" in data.lower():
p_count += 1
else:
count += 1
if not only_count:
logger.info(data)
return count, p_count
def get_command_results(channel):
## http://joelinoff.com/blog/?p=905
interval = 0.1
maxseconds = 10
maxcount = maxseconds / interval
bufsize = 1024
# Poll until completion or timeout
# Note that we cannot directly use the stdout file descriptor
# because it stalls at 64K bytes (65536).
input_idx = 0
timeout_flag = False
start = datetime.datetime.now()
start_secs = time.mktime(start.timetuple())
output = ''
channel.setblocking(0)
while True:
if channel.recv_ready():
data = channel.recv(bufsize).decode('utf-8')
output += data
if channel.exit_status_ready():
break
# Timeout check
now = datetime.datetime.now()
now_secs = time.mktime(now.timetuple())
et_secs = now_secs - start_secs
if et_secs > maxseconds:
timeout_flag = True
break
rbuffer = output.rstrip(' ')
if len(rbuffer) > 0 and (rbuffer[-1] == '#' or rbuffer[-1] == '>'): ## got a Cisco command prompt
break
time.sleep(0.2)
return output
def check_simulating_projects(not_simulating: bool = False) -> None:
expiry_date = datetime.datetime.now() - datetime.timedelta(minutes=300)
projects = delphin_entry.Delphin.objects(simulating__lt=expiry_date)
logger.info(f'There are {projects.count()} projects, which have exceeded their simulation time.')
if not_simulating and projects.count() > 0:
logger.info('Setting exceed simulations to not simulating')
projects.update(simulating=None)
projects = delphin_entry.Delphin.objects(simulating__lt=expiry_date)
logger.info(f'There are {projects.count()} projects, which have exceeded their simulation time.') | {
"repo_name": "thp44/delphin_6_automation",
"path": "delphin_6_automation/database_interactions/simulation_interactions.py",
"copies": "1",
"size": "9558",
"license": "mit",
"hash": -5774087173877538000,
"line_mean": 30.3409836066,
"line_max": 120,
"alpha_frac": 0.6383134547,
"autogenerated": false,
"ratio": 3.7263157894736842,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9859909157087015,
"avg_score": 0.0009440174173338563,
"num_lines": 305
} |
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import os
import json
import pandas as pd
import datetime
import bson
# RiBuild Modules
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import validation as auth_dict
from delphin_6_automation.database_interactions import weather_interactions
from delphin_6_automation.database_interactions import delphin_interactions
from delphin_6_automation.database_interactions import material_interactions
from delphin_6_automation.database_interactions import sampling_interactions
from delphin_6_automation.database_interactions.db_templates import sample_entry
from delphin_6_automation.database_interactions.db_templates import weather_entry
from delphin_6_automation.sampling import inputs
from delphin_6_automation.file_parsing import weather_parser
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
server = mongo_setup.global_init(auth_dict)
def upload_materials(folder):
print('Uploading Materials')
for file in os.listdir(os.path.join(folder, 'materials')):
print(file)
material_interactions.upload_material_file(os.path.join(folder, 'materials') + '/' + file)
bricks = pd.read_excel(folder + '/Brick.xlsx')
delphin_mats = r'C:\Program Files\IBK\Delphin 6.0\resources\DB_materials'
for i in range(len(bricks)):
name = f'{bricks.loc[i, "Name"]}_{bricks.loc[i, "Material ID"]}.m6'
if name == 'Masonry_3.m6':
pass
else:
print(name, os.path.exists(os.path.join(delphin_mats, name)))
material_interactions.upload_material_file(delphin_mats + '/' + name)
def upload_weather(folder):
stations = ['Ms-11-5', 'Ms-24-1']
weather_dict = {'time': [2017, 2019]}
for dwd_file in os.listdir(folder):
if dwd_file.endswith('DiffuseRadiation.ccd'):
key = 'diffuse_radiation'
elif dwd_file.endswith('DirectRadiation.ccd'):
key = 'direct_radiation'
elif dwd_file.endswith('SkyRadiation.ccd'):
key = 'long_wave_radiation'
if key:
weather_dict[key] = weather_parser.ccd_to_list(os.path.join(folder, dwd_file))
key = None
for station in stations:
weather_folder = os.path.join(folder, station)
for file in os.listdir(weather_folder):
if file.startswith('rH') and file.endswith('In.ccd'):
key = 'indoor_relative_humidity'
elif file.startswith('rH') and file.endswith('Out.ccd'):
key = 'relative_humidity'
elif file.startswith('T') and file.endswith('In.ccd'):
key = 'indoor_temperature'
elif file.startswith('T') and file.endswith('Out.ccd'):
key = 'temperature'
if key:
weather_dict[key] = weather_parser.ccd_to_list(os.path.join(weather_folder, file))
key = None
# Split years
def hours_per_year(start, stop):
while start < stop:
yield 8760
start += 1
def accumulate_hours(hour_list):
accumulated_list = [0, ]
for i in range(0, len(hour_list)):
accumulated_list.append(accumulated_list[i] + hour_list[i])
return accumulated_list
hours = [hour
for hour in hours_per_year(weather_dict['time'][0],
weather_dict['time'][1])]
accumulated_hours = accumulate_hours(hours)
# Add yearly weather entries
entry_ids = []
for year_index in range(1, len(accumulated_hours)):
yearly_weather_entry = weather_entry.Weather()
yearly_weather_entry.location_name = station
yearly_weather_entry.dates = {'start': datetime.datetime(year=2016 + year_index, month=1, day=1, hour=1),
'stop': datetime.datetime(year=2017 + year_index, month=1, day=1, hour=0)}
yearly_weather_entry.year = 2016 + year_index
yearly_weather_entry.location = [11.19, 50.59]
yearly_weather_entry.altitude = 208
yearly_weather_entry.source = {'comment': 'TU Dresden', 'url': None, 'file': 'multiple'}
yearly_weather_entry.units = {'temperature': 'C',
'relative_humidity': '%',
'long_wave_radiation': 'W/m2',
'diffuse_radiation': 'W/m2',
'direct_radiation': 'W/m2'
}
# Climate Data
weather = {
'temperature': weather_dict['temperature'][
accumulated_hours[year_index - 1]:
accumulated_hours[year_index]],
'relative_humidity': weather_dict['relative_humidity'][
accumulated_hours[year_index - 1]:
accumulated_hours[year_index]],
'indoor_temperature': weather_dict['indoor_temperature'][
accumulated_hours[year_index - 1]:
accumulated_hours[year_index]],
'indoor_relative_humidity': weather_dict['indoor_relative_humidity'][
accumulated_hours[year_index - 1]:
accumulated_hours[year_index]],
'long_wave_radiation': weather_dict['long_wave_radiation'][
accumulated_hours[year_index - 1]:
accumulated_hours[year_index]],
'diffuse_radiation': weather_dict['diffuse_radiation'][
accumulated_hours[year_index - 1]:
accumulated_hours[year_index]],
'direct_radiation': weather_dict['direct_radiation'][
accumulated_hours[year_index - 1]:
accumulated_hours[year_index]]
}
yearly_weather_entry.weather_data.put(bson.BSON.encode(weather))
yearly_weather_entry.save()
entry_ids.append(yearly_weather_entry.id)
yearly_weather_entry.reload()
print(f'Uploaded weather files from {yearly_weather_entry.location_name} '
f'for year {yearly_weather_entry.year}')
def create_strategy(folder):
design = ['Ms-24-1-DWD Weimar', ]
scenario = {'generic_scenario': None}
distributions = {'exterior_heat_transfer_coefficient':
{'type': 'uniform', 'range': [5, 35], },
'exterior_moisture_transfer_coefficient':
{'type': 'discrete', 'range': [7.7 * 10 ** -9], },
'solar_absorption':
{'type': 'uniform', 'range': [0.4, 0.8], },
'interior_heat_transfer_coefficient':
{'type': 'uniform', 'range': [5, 10], },
'interior_moisture_transfer_coefficient':
{'type': 'uniform', 'range': [1 * 10 ** -8, 3 * 10 ** -8], },
'wall_orientation':
{'type': 'uniform', 'range': [135, 225], },
'wall_core_material':
{'type': 'discrete', 'range': inputs.wall_core_materials(folder), },
'initial_temperature':
{'type': 'uniform', 'range': [0, 10], },
'initial_relhum':
{'type': 'uniform', 'range': [50, 90], },
'exterior_climate': {
'type': 'discrete',
'range': ['Ms-24-1']
},
'start_year': {
'type': 'discrete',
'range': [2017]
},
'simulation_length': {
'type': 'discrete',
'range': [2]
}
}
sampling_settings = {'initial samples per set': 1,
'add samples per run': 1,
'max samples': 500,
'sequence': 10,
'standard error threshold': 0.1,
'raw sample size': 2 ** 9}
combined_dict = {'design': design, 'scenario': scenario,
'distributions': distributions, 'settings': sampling_settings}
with open(os.path.join(folder, 'sampling_strategy.json'), 'w') as file:
json.dump(combined_dict, file)
def upload_strategy(folder):
strategy = os.path.join(folder, 'sampling_strategy.json')
with open(strategy) as file:
data = json.load(file)
sampling_interactions.upload_sampling_strategy(data)
def upload_designs(folder):
strategy = sample_entry.Strategy.objects().first()
# for file in os.listdir(folder):
#file = 'Ms-11-5-DWD Weimar.d6p'
file = 'Ms-24-1-DWD Weimar.d6p'
delphin_interactions.upload_design_file(os.path.join(folder, file), strategy.id, False, True)
# upload_weather(r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\validation\inputs\weather')
# upload_materials(r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\validation\inputs')
create_strategy(r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\validation\inputs')
upload_strategy(r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\validation\inputs')
upload_designs(r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\validation\inputs\design')
mongo_setup.global_end_ssh(server)
| {
"repo_name": "thp44/delphin_6_automation",
"path": "data_process/validation/upload_data_to_db.py",
"copies": "1",
"size": "10200",
"license": "mit",
"hash": 8452626281916862000,
"line_mean": 39.6374501992,
"line_max": 120,
"alpha_frac": 0.5288235294,
"autogenerated": false,
"ratio": 4.114562323517547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001547824738881268,
"num_lines": 251
} |
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules:
import os
import numpy as np
import bson
# RiBuild Modules:
from delphin_6_automation.database_interactions.db_templates import delphin_entry as delphin_db
from delphin_6_automation.file_parsing import weather_parser
from delphin_6_automation.delphin_setup import weather_modeling
import delphin_6_automation.database_interactions.db_templates.weather_entry as weather_db
from delphin_6_automation.logging.ribuild_logger import ribuild_logger
from delphin_6_automation.delphin_setup import delphin_permutations
# Logger
logger = ribuild_logger()
# -------------------------------------------------------------------------------------------------------------------- #
# WEATHER INTERACTIONS
def list_project_weather(sim_id: str) -> list:
"""
Returns a list with the weather in a project entry.
:param sim_id: Delphin entry ID
:return: List with material file names
"""
weather = delphin_db.Delphin.objects(id=sim_id).first().dp6_file.DelphinProject.Conditions.ClimateConditions.ClimateCondition
weather_list = [(w.type, w.Filename)
for w in weather]
return weather_list
def assign_weather_by_name_and_years(delphin_id: str, weather_station_name: str, years: list) -> str:
"""Assigns weather and years to a Delphin project in the database"""
weather_documents = []
for year in years:
weather_documents.append(weather_db.Weather.objects(location_name=weather_station_name, year=year).first())
delphin_id = assign_weather_to_project(delphin_id, weather_documents)
logger.debug(f'Assigned weather from {weather_station_name} '
f'for years: {years} to Delphin project with ID: {delphin_id}')
return delphin_id
def assign_weather_to_project(delphin_id: str, weather_documents: list) -> str:
"""
Assign weather to a Delphin entry
:param delphin_id: Delphin document database.rst ID
:param weather_documents: List with weather entries
:return: Database ID
"""
# Save climate class to delphin document
delphin_document = delphin_db.Delphin.objects(id=delphin_id).first()
if delphin_document.weather:
delphin_document.update(pull_all__weather=delphin_document.weather)
[delphin_document.update(push__weather=weather) for weather in weather_documents]
logger.debug(f'Weather documents with IDs: {[weather_ for weather_ in weather_documents]} '
f'assigned to Delphin project with ID: {delphin_id}')
return delphin_document.id
def assign_indoor_climate_to_project(delphin_id: str, climate_class: str) -> str:
"""
Assign indoor climate class to a Delphin entry
:param delphin_id: Database ID
:param climate_class: Climate class can be either a or b
:return: Database ID
"""
# Make check
if not climate_class.lower() in ['a', 'b']:
raise ValueError(f'Wrong climate class. It has to be either a or b. '
f'Climate class given was: {climate_class}')
# Save climate class to delphin document
delphin_document = delphin_db.Delphin.objects(id=delphin_id).first()
delphin_document.update(set__indoor_climate=climate_class.lower())
logger.debug(f'Added indoor climate class {climate_class} to Delphin project with ID: {delphin_id}')
return delphin_document.id
def concatenate_weather(delphin_document: delphin_db.Delphin) -> dict:
"""Concatenate weather documents together from a Delphin project in the database"""
weather_dict = {'temperature': [], 'relative_humidity': [],
'vertical_rain': [], 'wind_direction': [],
'wind_speed': [], 'long_wave_radiation': [],
'diffuse_radiation': [], 'direct_radiation': [],
'indoor_temperature': [], 'indoor_relative_humidity': [],
'year': [], 'location_name': [], 'altitude': []}
sim_id = delphin_document.id
for index in range(len(delphin_document.weather)):
reloaded_delphin = delphin_db.Delphin.objects(id=sim_id).first()
weather_document_as_dict: dict = bson.BSON.decode(reloaded_delphin.weather[index].weather_data.read())
for weather_key in weather_document_as_dict:
if weather_key in ['temperature', 'vertical_rain',
'wind_direction', 'wind_speed',
'long_wave_radiation', 'diffuse_radiation',
'direct_radiation', 'indoor_temperature']:
weather_dict[weather_key].extend(weather_document_as_dict[weather_key])
elif weather_key.endswith('relative_humidity'):
if reloaded_delphin.weather[index].units['relative_humidity'] == '-':
relhum = [rh * 100
for rh in weather_document_as_dict[weather_key]]
weather_dict[weather_key].extend(relhum)
else:
weather_dict[weather_key].extend(weather_document_as_dict[weather_key])
weather_dict['year'].append(reloaded_delphin.weather[index].year)
weather_dict['location_name'].append(reloaded_delphin.weather[index].location_name)
weather_dict['altitude'].append(reloaded_delphin.weather[index].altitude)
logger.debug(f'Concatenated weather for Delphin project with ID: {sim_id}')
return weather_dict
def change_weather_file_location(delphin_document: delphin_db.Delphin):
"""Change the file location for the weather files in a Delphin project"""
folder = '${Project Directory}/weather'
delphin_dict = dict(delphin_document.dp6_file)
climate_conditions = delphin_dict['DelphinProject']['Conditions']['ClimateConditions']['ClimateCondition']
interior_synonyms = ['indoor', 'interior', 'inside']
for index in range(0, len(climate_conditions)):
if climate_conditions[index]['@type'] == 'Temperature':
if any(synonym in climate_conditions[index]['@name'].lower() for synonym in interior_synonyms):
climate_conditions[index]['Filename'] = folder + '/indoor_temperature.ccd'
else:
climate_conditions[index]['Filename'] = folder + '/temperature.ccd'
elif climate_conditions[index]['@type'] == 'RelativeHumidity':
if any(synonym in climate_conditions[index]['@name'].lower() for synonym in interior_synonyms):
climate_conditions[index]['Filename'] = folder + '/indoor_relative_humidity.ccd'
else:
climate_conditions[index]['Filename'] = folder + '/relative_humidity.ccd'
elif climate_conditions[index]['@type'] == 'SWRadiationDiffuse':
climate_conditions[index]['Filename'] = folder + '/diffuse_radiation.ccd'
elif climate_conditions[index]['@type'] == 'SWRadiationDirect':
climate_conditions[index]['Filename'] = folder + '/direct_radiation.ccd'
elif climate_conditions[index]['@type'] == 'SWRadiationImposed':
climate_conditions[index]['Filename'] = folder + '/short_wave_radiation.ccd'
elif climate_conditions[index]['@type'] == 'RainFluxNormal':
climate_conditions[index]['Filename'] = folder + '/wind_driven_rain.ccd'
elif climate_conditions[index]['@type'] == 'WindDirection':
climate_conditions[index]['Filename'] = folder + '/wind_direction.ccd'
elif climate_conditions[index]['@type'] == 'WindVelocity':
climate_conditions[index]['Filename'] = folder + '/wind_speed.ccd'
elif climate_conditions[index]['@type'] == 'LWRadiationSkyEmission':
climate_conditions[index]['Filename'] = folder + '/long_wave_radiation.ccd'
delphin_document.update(set__dp6_file=delphin_dict)
logger.debug(f'Changed weather directory to {folder} for Delphin project with ID: {delphin_document.id}')
return delphin_document.id
def download_weather(delphin_document: delphin_db.Delphin, folder: str) -> bool:
"""Download the weather associated with a Delphin project in the database"""
weather = concatenate_weather(delphin_document)
# If there is not already given indoor climate data, then generate them for the standard
if not weather.get('indoor_temperature') and not weather.get('indoor_relative_humidity'):
weather['indoor_temperature'], weather['indoor_relative_humidity'] = \
weather_modeling.convert_weather_to_indoor_climate(weather['temperature'],
delphin_document.indoor_climate)
orientation = delphin_permutations.get_orientation(delphin_document.dp6_file)
# Compute the wind driven rain, if wind and rain are given
if weather.get('vertical_rain') and weather.get('wind_direction') and weather.get('wind_speed'):
wall_location = {'height': 5, 'width': 5}
weather['wind_driven_rain'] = weather_modeling.driving_rain(weather['vertical_rain'], weather['wind_direction'],
weather['wind_speed'], wall_location, orientation,
inclination=90, catch_ratio=1)
delphin_document.reload()
latitude = delphin_document.weather[0].location[0]
longitude = delphin_document.weather[0].location[1]
radiation = np.array(weather['direct_radiation']) + np.array(weather['diffuse_radiation'])
weather['short_wave_radiation'] = weather_modeling.short_wave_radiation(radiation, longitude,
latitude, 0, orientation)
for weather_key in weather.keys():
if weather_key not in ['year', 'location_name', 'altitude']:
weather[weather_key].extend(weather[weather_key][-2:])
weather_parser.dict_to_ccd(weather, folder)
change_weather_file_location(delphin_document)
logger.debug(f'Downloaded weather for Delphin project with ID: {delphin_document.id} to {folder}')
return True
def update_short_wave_condition(delphin_dict):
climate_conditions = delphin_dict['DelphinProject']['Conditions']['ClimateConditions']['ClimateCondition']
for climate_condition in climate_conditions:
if climate_condition['@type'] == 'SWRadiationDiffuse':
diffuse_radiation = climate_condition['@name']
elif climate_condition['@type'] == 'SWRadiationDirect':
direct_radiation = climate_condition['@name']
boundary_conditions = delphin_dict['DelphinProject']['Conditions']['BoundaryConditions']['BoundaryCondition']
"""
for boundary_condition in boundary_conditions:
if boundary_condition['@type'] == 'ShortWaveRadiation':
try:
for cc_ref in boundary_condition['CCReference']:
if cc_ref['@type'] == 'SWRadiationDirect':
cc_ref['#text'] = direct_radiation
elif cc_ref['@type'] == 'SWRadiationDiffuse':
cc_ref['#text'] = diffuse_radiation
except KeyError:
boundary_condition['CCReference'] = [OrderedDict((('@type', 'SWRadiationDirect'),
('#text', direct_radiation))),
OrderedDict((('@type', 'SWRadiationDiffuse'),
('#text', diffuse_radiation)))]
"""
return delphin_dict
def upload_weather_to_db(file_path: str) -> list:
"""Upload a WAC weather file to the database"""
weather_dict = weather_parser.wac_to_dict(file_path)
# Split years
def hours_per_year(start, stop):
while start < stop:
yield 8760
start += 1
def accumulate_hours(hour_list):
accumulated_list = [0, ]
for i in range(0, len(hour_list)):
accumulated_list.append(accumulated_list[i] + hour_list[i])
return accumulated_list
hours = [hour
for hour in hours_per_year(weather_dict['time'][0].year,
weather_dict['time'][-1].year)]
accumulated_hours = accumulate_hours(hours)
# Add yearly weather entries
entry_ids = []
for year_index in range(1, len(accumulated_hours)):
yearly_weather_entry = weather_db.Weather()
# Meta Data
yearly_weather_entry.location_name = os.path.split(file_path)[-1].split('_')[0]
year_dates = weather_dict['time'][accumulated_hours[year_index - 1]: accumulated_hours[year_index]]
yearly_weather_entry.dates = {'start': year_dates[0],
'stop': year_dates[-1]}
yearly_weather_entry.year = year_dates[0].year
yearly_weather_entry.location = [weather_dict['longitude'], weather_dict['latitude']]
yearly_weather_entry.altitude = weather_dict['altitude']
yearly_weather_entry.source = {'comment': 'Climate for Culture',
'url': 'https://www.climateforculture.eu/',
'file': os.path.split(file_path)[-1]}
yearly_weather_entry.units = {'temperature': 'C',
'relative_humidity': '-',
'vertical_rain': 'mm/h',
'wind_direction': 'degrees',
'wind_speed': 'm/s',
'long_wave_radiation': 'W/m2',
'diffuse_radiation': 'W/m2',
'direct_radiation': 'W/m2'
}
# Climate Data
weather = {
'temperature': weather_dict['temperature'][
accumulated_hours[year_index - 1]:
accumulated_hours[year_index]],
'relative_humidity': weather_dict['relative_humidity'][
accumulated_hours[year_index - 1]:
accumulated_hours[year_index]],
'vertical_rain': weather_dict['vertical_rain'][
accumulated_hours[year_index - 1]:
accumulated_hours[year_index]],
'wind_direction': weather_dict['wind_direction'][
accumulated_hours[year_index - 1]:
accumulated_hours[year_index]],
'wind_speed': weather_dict['wind_speed'][
accumulated_hours[year_index - 1]:
accumulated_hours[year_index]],
'long_wave_radiation': weather_dict['atmospheric_counter_horizontal_long_wave_radiation'][
accumulated_hours[year_index - 1]:
accumulated_hours[year_index]],
'diffuse_radiation': weather_dict['diffuse_horizontal_solar_radiation'][
accumulated_hours[year_index - 1]:
accumulated_hours[year_index]],
'direct_radiation': weather_dict['horizontal_global_solar_radiation'][
accumulated_hours[year_index - 1]:
accumulated_hours[year_index]]
}
yearly_weather_entry.weather_data.put(bson.BSON.encode(weather))
yearly_weather_entry.save()
entry_ids.append(yearly_weather_entry.id)
yearly_weather_entry.reload()
logger.debug(f'Uploaded weather files from {yearly_weather_entry.location_name} '
f'for year {yearly_weather_entry.year}')
return entry_ids
| {
"repo_name": "thp44/delphin_6_automation",
"path": "delphin_6_automation/database_interactions/weather_interactions.py",
"copies": "1",
"size": "16063",
"license": "mit",
"hash": 1107403312416815200,
"line_mean": 44.1207865169,
"line_max": 129,
"alpha_frac": 0.5886198095,
"autogenerated": false,
"ratio": 4.051197982345523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001925562096558954,
"num_lines": 356
} |
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas
# RiBuild Modules
from delphin_6_automation.file_parsing import delphin_parser
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
data_folder = os.path.join(os.path.dirname(__file__), 'data')
rot = []
rot_avg = []
mould = []
mould_avg = []
moisture = []
moisture_avg = []
for folder in os.listdir(data_folder):
results_folder = os.path.join(data_folder, folder, folder, 'results')
mould_data = np.array(delphin_parser.d6o_to_dict(results_folder, 'relative humidity mould.d6o')[0])
mould.append(len(mould_data[mould_data > 90.0])/len(mould_data))
mould_avg.append(np.mean(mould_data))
rot_data = np.array(delphin_parser.d6o_to_dict(results_folder, 'relative humidity wood rot.d6o')[0])
rot.append(len(rot_data[rot_data > 90.0]) / len(rot_data))
rot_avg.append(np.mean(rot_data))
moist = np.array(delphin_parser.d6o_to_dict(results_folder, 'moisture content frost.d6o')[0])
moisture.append(len(moist[moist > 90.0]) / len(moist))
moisture_avg.append(np.mean(moist))
x = np.arange(len(mould))
plt.figure()
plt.title('Hours over 90%')
plt.scatter(x, mould, label='Mould')
plt.scatter(x, rot, label='Rot')
plt.scatter(x, moisture, label='Moisture Content')
plt.ylim(-0.1, 1.1)
plt.legend()
plt.figure()
plt.title('Average')
plt.scatter(x, mould_avg, label='Mould')
plt.scatter(x, rot_avg, label='Rot')
plt.ylim(45, 110)
plt.legend()
plt.figure()
plt.title('Average Moisture')
plt.scatter(x, moisture_avg, label='Moisture Content', color='green')
plt.legend()
plt.ylim(50, 160)
plt.show()
| {
"repo_name": "thp44/delphin_6_automation",
"path": "data_process/failed_simulations/process_failed.py",
"copies": "1",
"size": "1888",
"license": "mit",
"hash": -4192356046566729700,
"line_mean": 28.0461538462,
"line_max": 120,
"alpha_frac": 0.6038135593,
"autogenerated": false,
"ratio": 2.909090909090909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9009375837514626,
"avg_score": 0.0007057261752566448,
"num_lines": 65
} |
__author__ = "Christian Kongsgaard"
__license__ = "MIT"
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules:
import os
import yaml
import numpy as np
# RiBuild Modules:
from delphin_6_automation.database_interactions.db_templates import delphin_entry as delphin_db, delphin_entry
from delphin_6_automation.database_interactions.db_templates import result_raw_entry as result_db
from delphin_6_automation.database_interactions.db_templates import weather_entry as weather_db
from delphin_6_automation.database_interactions.db_templates import material_entry as material_db
from delphin_6_automation.database_interactions import delphin_interactions
from delphin_6_automation.database_interactions import material_interactions
from delphin_6_automation.database_interactions import weather_interactions
from delphin_6_automation.logging.ribuild_logger import ribuild_logger
# Logger
logger = ribuild_logger()
# -------------------------------------------------------------------------------------------------------------------- #
# MATERIAL INTERACTIONS
def download_raw_result(result_id: str, download_path: str) -> bool:
"""
Downloads a result entry from the database.rst.
:param result_id: Database entry id
:param download_path: Path where the result should be written
:return: True
"""
result_obj = result_db.Result.objects(id=result_id).first()
download_path = os.path.join(download_path, str(result_id))
if not os.path.exists(download_path):
os.mkdir(download_path)
# delphin_parser.write_log_files(result_obj, download_path)
delphin_interactions.download_result_files(result_obj, download_path)
return True
def queue_priorities(priority: str) -> int:
"""
Generate a queue priority number.
:param priority: High, medium or low priority
:return: Priority number
"""
priority_list = [obj.queue_priority
for obj in delphin_db.Delphin.objects.order_by('queue_priority')]
if not priority_list:
return 1
else:
min_priority = min(priority_list)
max_priority = max(priority_list)
span = max_priority - min_priority
if priority == 'high':
priority_number = int(max_priority)
elif priority == 'medium':
priority_number = int(span + 0.5 * min_priority)
elif priority == 'low':
priority_number = int(span + 0.25 * min_priority)
else:
raise ValueError('priority has to be: high, medium or low. Value given was: ' + str(priority))
return priority_number
def add_to_simulation_queue(delphin_file: str, priority: str) -> str:
"""
Uploads and adds a Delphin project file to the simulation queue.
:param delphin_file: Delphin 6 project file path
:param priority: High, medium or low priority
:return: Database entry id
"""
priority_number = queue_priorities(priority)
simulation_id = delphin_interactions.upload_delphin_to_database(delphin_file, priority_number)
logger.debug(f'Added Delphin project with ID: {simulation_id} to queue with priority: {priority}')
return simulation_id
def is_simulation_finished(sim_id: str) -> bool:
"""
Checks if a Delphin project entry is simulated or not.
:param sim_id: Database entry to check
:return: True if it is simulated otherwise returns False.
"""
object_ = delphin_db.Delphin.objects(id=sim_id).first()
if object_.simulated:
logger.debug(f'Delphin project with ID: {sim_id} is finished simulating')
return True
else:
logger.debug(f'Delphin project with ID: {sim_id} is not finished simulating')
return False
def list_finished_simulations() -> list:
"""Returns a list with Delphin entry ID's for simulated entries."""
finished_list = [document.id
for document in delphin_db.Delphin.objects()
if document.simulated]
return finished_list
def download_full_project_from_database(document_id: str, folder: str) -> bool:
"""
Downloads a Delphin project file from the database.rst with all of its materials and weather.
:param document_id: Database entry id
:param folder: Path where the files should be written.
:return: True
"""
delphin_document = delphin_db.Delphin.objects(id=document_id).first()
material_interactions.download_materials(delphin_document, os.path.join(folder, 'materials'))
weather_interactions.download_weather(delphin_document, os.path.join(folder, 'weather'))
delphin_interactions.download_delphin_entry(delphin_document, folder)
if delphin_document.restart_data:
delphin_interactions.download_restart_data(delphin_document, os.path.join(folder, document_id))
logger.debug(f'Download Delphin project with ID: {document_id} from database with weather and materials.')
return True
def list_weather_stations() -> dict:
"""List the weather stations currently in database"""
weather_stations = dict()
for document in weather_db.Weather.objects():
if document.location_name in weather_stations.keys():
weather_stations[document.location_name]['years'].append(document.year)
else:
weather_stations[str(document.location_name)] = dict()
weather_stations[str(document.location_name)]['location'] = document.location
weather_stations[str(document.location_name)]['years'] = [document.year, ]
return weather_stations
def print_weather_stations_dict(weather_station_dict):
for key in weather_station_dict.keys():
print(f'Weather Station: {key} at location: {weather_station_dict[key]["location"]} contains '
f'{len(weather_station_dict[key]["years"])} years.\n'
f'\t The years are: {weather_station_dict[key]["years"]}')
def list_materials():
"""List materials currently in the database"""
materials = dict()
for document in material_db.Material.objects():
materials[str(document.material_name)] = dict()
materials[str(document.material_name)]['material_id'] = document.material_id
materials[str(document.material_name)]['database_id'] = document.id
return materials
def print_material_dict(materials):
for key in materials.keys():
print(f'Material:\n'
f'\tName: {key}\n'
f'\tDelphin Material ID: {materials[key]["material_id"]}\n'
f'\tDatabase ID: {materials[key]["database_id"]}\n')
def does_simulation_exists(sim_id: str) -> bool:
"""
Checks if a Delphin project entry is in the database or not.
:param sim_id: Database entry to check
:return: True if it is in database otherwise returns False.
"""
object_ = delphin_db.Delphin.objects(id=sim_id).first()
if object_:
return True
else:
return False
def compute_simulation_time(sim_id: str) -> int:
"""
Get the average time for this type of construction (2D or 1D)
:param sim_id: Delphin entry id from database
:return: Average simulation time in minutes
"""
sim_obj = delphin_entry.Delphin.objects(id=sim_id).first()
dimension = sim_obj.dimensions
predicted_time = sim_obj.estimated_simulation_time
if predicted_time:
logger.debug(f'Predicted simulation time for Delphin project in {dimension}D: {predicted_time}min')
return predicted_time
else:
sim_time = delphin_entry.Delphin.objects(dimensions=dimension, simulation_time__exists=True).average(
'simulation_time')
if sim_time:
avg_time = int(np.ceil(sim_time / 60))
logger.debug(f'Average simulation time for Delphin projects in {dimension}D: {avg_time}min')
return avg_time
elif dimension == 2:
logger.debug(f'No previous simulations found. Setting time to 180min for a 2D simulation')
return 240
else:
logger.debug(f'No previous simulations found. Setting time to 60min for a 1D simulation')
return 120
def download_sample_data(delphin_id, folder):
delphin_obj = delphin_db.Delphin.objects(id=delphin_id).first()
# download_path = os.path.join(folder, str(delphin_id))
download_path = folder
if not os.path.exists(download_path):
os.mkdir(download_path)
sample_data = dict(delphin_obj.sample_data)
sample_data['design_option'] = dict(sample_data['design_option'])
with open(os.path.join(folder, 'sample_data.txt'), 'w') as file:
yaml.dump(sample_data, file)
return None
| {
"repo_name": "thp44/delphin_6_automation",
"path": "delphin_6_automation/database_interactions/general_interactions.py",
"copies": "1",
"size": "8724",
"license": "mit",
"hash": 6288229648234507000,
"line_mean": 32.5538461538,
"line_max": 120,
"alpha_frac": 0.659215956,
"autogenerated": false,
"ratio": 3.757105943152455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4916321899152455,
"avg_score": null,
"num_lines": null
} |
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import os
# RiBuild Modules
from delphin_6_automation.database_interactions.db_templates import delphin_entry
from delphin_6_automation.database_interactions.db_templates import sample_entry
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_dict
from delphin_6_automation.database_interactions import simulation_interactions
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
if __name__ == "__main__":
server = mongo_setup.global_init(auth_dict)
simulating = delphin_entry.Delphin.objects(simulating__ne=None).count()
simulated = delphin_entry.Delphin.objects(simulated__exists=True).count()
projects = delphin_entry.Delphin.objects().count()
avg_time = delphin_entry.Delphin.objects(simulated__exists=False,
estimated_simulation_time__exists=True
).average("estimated_simulation_time")
auth_path = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\test\checks'
file = os.path.join(auth_path, 'ocni.json')
#bstat, pending = simulation_interactions.check_simulations(file, only_count=True)
strategy = sample_entry.Strategy.objects().first()
print()
print(f'Projects Currently Simulating: {simulating}')
#print(f'Projects Running in BSTAT: {bstat} - {pending} Projects Pending')
print(f'Simulated Projects: {simulated}')
print(f'Projects in DB: {projects}')
print(f'Remaining Projects: {projects - simulated}')
print(f'Average Estimated Simulation Time for Remaining Projects: {avg_time:.02f} min')
print()
print(f'Current Iteration: {strategy.current_iteration}')
#print(f'Number of Samples in Strategy: {len(strategy.samples)}')
print()
mongo_setup.global_end_ssh(server)
| {
"repo_name": "thp44/delphin_6_automation",
"path": "data_process/wp6_v2/sim_status.py",
"copies": "1",
"size": "2116",
"license": "mit",
"hash": -8618433809592759000,
"line_mean": 46.0222222222,
"line_max": 120,
"alpha_frac": 0.6252362949,
"autogenerated": false,
"ratio": 3.8194945848375452,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4944730879737545,
"avg_score": null,
"num_lines": null
} |
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import getpass
import os
import pathlib
import pandas as pd
# RiBuild Modules
from delphin_6_automation.database_interactions.db_templates import delphin_entry
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_2d_1d as auth_dict
from delphin_6_automation.database_interactions import general_interactions
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
print("---> in download script")
user = getpass.getuser()
folder = r'D:\WP6_1d2d_simulated'
if not os.path.exists(folder):
pathlib.Path(folder).mkdir(parents=True)
server = mongo_setup.global_init(auth_dict)
#simulated_projects = delphin_entry.Delphin.objects(simulated__exists=True)
#pd.Series(simulated_projects).to_csv('C:\\Users\\sbj\\Documents\\WP6 Study 2D_1D\\simulated.txt')
print(f'There are currently {delphin_entry.Delphin.objects(simulated__exists=True).count()} simulated projects in the database')
print(f'Downloading Projects')
#range_start = int(pd.read_csv('range.txt').columns[1])
#print('Current start: ', range_start)
# fejl ...8c71 og 8c79 og 8caf og 8d08
#range_start = 0
for project in delphin_entry.Delphin.objects(simulated__exists=True).only('id'):
project_id = str(project.id)
project_folder = os.path.join(folder, str(project_id))
if not os.path.exists(project_folder):
print(f'\nDownloads Project with ID: {project_id}')
result_id = str(delphin_entry.Delphin.objects(id=project_id).first().results_raw.id)
os.mkdir(project_folder)
general_interactions.download_full_project_from_database(project_id, project_folder)
general_interactions.download_sample_data(project_id, project_folder)
general_interactions.download_raw_result(result_id, project_folder)
else:
print(f'Skipping Project with ID: {project_id}. Already downloaded.')
mongo_setup.global_end_ssh(server)
| {
"repo_name": "thp44/delphin_6_automation",
"path": "data_process/2d_1d/simon/download.py",
"copies": "1",
"size": "2206",
"license": "mit",
"hash": -6558662757134022000,
"line_mean": 33.46875,
"line_max": 128,
"alpha_frac": 0.6591115141,
"autogenerated": false,
"ratio": 3.409582689335394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9473544717836524,
"avg_score": 0.019029897119774056,
"num_lines": 64
} |
__author__ = "Christian Kongsgaard"
__license__ = "MIT"
__version__ = "0.0.1"
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
from datetime import datetime
# Modules:
import mongoengine
# RiBuild Modules:
import delphin_6_automation.database_interactions.database_collections as collections
# -------------------------------------------------------------------------------------------------------------------- #
# WEATHER CLASS
class Weather(mongoengine.Document):
meta = collections.weather_db
# Weather Data
weather_data = mongoengine.FileField(required=True, db_alias=meta['db_alias'],
collection_name=meta['collection'])
# weather_data = {'temperature': [], 'vertical_rain': [], 'wind_direction': [], 'wind_speed': [],
# 'long_wave_radiation': [], 'diffuse_radiation': [], 'direct_radiation': []}
# Meta Data
dates = mongoengine.DictField(mongoengine.DateTimeField, required=True)
year = mongoengine.IntField(required=True)
location = mongoengine.GeoPointField(required=True)
altitude = mongoengine.FloatField(required=True)
location_name = mongoengine.StringField(required=True)
source = mongoengine.DictField(required=True)
added_date = mongoengine.DateTimeField(default=datetime.now)
units = mongoengine.DictField(required=True)
| {
"repo_name": "thp44/delphin_6_automation",
"path": "delphin_6_automation/database_interactions/db_templates/weather_entry.py",
"copies": "1",
"size": "1426",
"license": "mit",
"hash": 2360547615695469000,
"line_mean": 36.5263157895,
"line_max": 120,
"alpha_frac": 0.5778401122,
"autogenerated": false,
"ratio": 4.308157099697885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016369491812502684,
"num_lines": 38
} |
__author__ = "Christian Kongsgaard"
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules:
import datetime
import os
# RiBuild Modules:
# -------------------------------------------------------------------------------------------------------------------- #
# WEATHER PARSING
def dict_to_ccd(weather_dict: dict, folder: str) -> bool:
"""
Takes an weather dict and converts it into a .ccd file
:param weather_dict: weather dict from mongo_db
:param folder: Folder to where .ccd's should be placed.
:return: True
"""
if not os.path.isdir(folder):
os.mkdir(folder)
parameter_dict = {
"temperature": {"description": "Air temperature [C] (TA)",
"intro": "TEMPER C",
"factor": 1,
"abr": "TA"},
"relative_humidity": {"description": "Relative Humidity [%] (HREL)",
"intro": "RELHUM %",
"factor": 1,
"abr": "HREL"},
"diffuse_radiation": {"description": "Diffuse Horizontal Solar Radiation [W/m2] (ISD)",
"intro": "DIFRAD W/m2",
"factor": 1,
"abr": "ISD"},
"short_wave_radiation": {"description": "Short Wave Radiation [W/m2] (ISD)",
"intro": "SHWRAD W/m2",
"factor": 1,
"abr": "SWR"},
"wind_direction": {"description": "Wind Direction [degrees] (WD)",
"intro": "WINDDIR Deg",
"factor": 1,
"abr": "WD"},
"wind_speed": {"description": "Wind Velocity [m/s] (WS)",
"intro": "WINDVEL m/s",
"factor": 1,
"abr": "WS"},
"CloudCover": {"description": "Cloud Cover [-] (CI)",
"intro": "CLOUDCOV ---",
"factor": 1,
"abr": "CI"},
"direct_radiation": {"description": "Direct Horizontal Solar Radiation [W/m2] (ISvar) [ISGH - ISD]",
"intro": "DIRRAD W/m2",
"factor": 1,
"abr": "ISVAR"},
"vertical_rain": {"description": "Rain intensity[mm/h] (RN)",
"intro": "HORRAIN l/m2h",
"factor": 1,
"abr": "RN"},
"wind_driven_rain": {"description": "Rain intensity[mm/h] (RN)",
"intro": "NORRAIN l/m2h",
"factor": 1,
"abr": "RN"},
"TotalPressure": {"description": "Air pressure [hPa] (PSTA)",
"intro": "GASPRESS hPa",
"factor": 1,
"abr": "PSTA"},
"long_wave_radiation": {"description": "Atmospheric Horizontal Long wave Radiation [W/m2] (ILTH)",
"intro": "SKYEMISS W/m2",
"factor": 1,
"abr": "ILTH"},
"TerrainCounterRadiation": {"description": "Terrain counter radiation [W/m2](ILAH)",
"intro": "GRINDEMISS W/m2",
"factor": 1,
"abr": "ILAH"},
"GlobalRadiation": {"description": "Horizontal Global Solar Radiation [W/m2] (ISGH)",
"intro": "SKYEMISS W/m2",
"factor": 1,
"abr": "ISGH"},
"indoor_relative_humidity": {"description": "Indoor Relative Humidity after EN15026 [%] (HREL)",
"intro": "RELHUM %",
"factor": 1},
"indoor_temperature": {"description": "Indoor Air temperature after EN15026 [C] (TA)",
"intro": "TEMPER C",
"factor": 1},
}
for weather_key in weather_dict.keys():
if weather_key in ['temperature', 'relative_humidity',
'wind_driven_rain',
'long_wave_radiation',
'short_wave_radiation',
'indoor_temperature',
'indoor_relative_humidity', 'wind_speed']:
info_dict = dict(parameter_dict[weather_key], **{'year': weather_dict['year']})
info_dict.update({'location_name': weather_dict['location_name']})
list_to_ccd(weather_dict[weather_key], info_dict, os.path.join(folder, f'{weather_key}.ccd'))
return True
def list_to_ccd(weather_list: list, parameter_info: dict, file_path: str) -> bool:
"""
Converts a weather list into a Delphin weather file (.ccd)
:param weather_list: List with hourly weather values
:param parameter_info: Dict with meta data for the weather file. Should contain the following keys: location_name, year, description and intro.
:param file_path: Full file path for where the .ccd file should be saved.
:return: True
"""
# Write meta data
file_obj = open(file_path, 'w')
file_obj.write(f"# {parameter_info['location_name']}\n")
file_obj.write(f"# Year {parameter_info['year']}\n")
file_obj.write(f"# RIBuild - Hourly values, {parameter_info['description']} \n\n")
file_obj.write(parameter_info["intro"] + "\n\n")
# Write data
day = 0
hour = 0
for i in range(len(weather_list)):
# leap year 29th febuary removal
if i % 24 == 0 and i != 0:
hour = 0
day += 1
hour_str = str(hour) + ":00:00"
data = weather_list[i]
file_obj.write(f'{day:>{6}}{hour_str:>{9}} {data:.2f}\n')
hour += 1
file_obj.close()
return True
def wac_to_dict(file_path: str) -> dict:
"""Converts a WAC file into a dict"""
weather_dict = {'longitude': '',
'latitude': '',
'altitude': '',
'time': [],
'temperature': [],
'relative_humidity': [],
'horizontal_global_solar_radiation': [],
'diffuse_horizontal_solar_radiation': [],
'air_pressure': [],
'vertical_rain': [],
'wind_direction': [],
'wind_speed': [],
'cloud_index': [],
'atmospheric_counter_horizontal_long_wave_radiation': [],
'atmospheric_horizontal_long_wave_radiation': [],
'ground_temperature': [],
'ground_reflectance': []
}
file_obj = open(file_path, 'r')
file_lines = file_obj.readlines()
file_obj.close()
weather_dict['longitude'] = float(file_lines[4].split('\t')[0].strip())
weather_dict['latitude'] = float(file_lines[5].split('\t')[0].strip())
weather_dict['altitude'] = float(file_lines[6].split('\t')[0].strip())
for line in file_lines[12:]:
splitted_line = line.split('\t')
weather_dict['time'].append(datetime.datetime.strptime(splitted_line[0].strip(), '%Y-%m-%d %H:%M'))
weather_dict['temperature'].append(float(splitted_line[1].strip()))
weather_dict['relative_humidity'].append(float(splitted_line[2].strip()))
weather_dict['horizontal_global_solar_radiation'].append(float(splitted_line[3].strip()))
weather_dict['diffuse_horizontal_solar_radiation'].append(float(splitted_line[4].strip()))
weather_dict['air_pressure'].append(float(splitted_line[5].strip()))
weather_dict['vertical_rain'].append(float(splitted_line[6].strip()))
weather_dict['wind_direction'].append(float(splitted_line[7].strip()))
weather_dict['wind_speed'].append(float(splitted_line[8].strip()))
weather_dict['cloud_index'].append(float(splitted_line[9].strip()))
weather_dict['atmospheric_counter_horizontal_long_wave_radiation'].append(float(splitted_line[10].strip()))
weather_dict['atmospheric_horizontal_long_wave_radiation'].append(float(splitted_line[11].strip()))
weather_dict['ground_temperature'].append(float(splitted_line[12].strip()))
weather_dict['ground_reflectance'].append(float(splitted_line[13].strip()))
return weather_dict
def ccd_to_list(file_path: str) -> list:
"""Converts a .ccd file into a dict"""
file = open(file_path, 'r')
lines = file.readlines()
file.close()
data = []
for index, line in enumerate(lines):
try:
if line.startswith(' '):
data.append(float(line.split(' ')[-1].strip()))
elif isinstance(int(line[0]), int):
split_line = line.split('\t')
if len(split_line) > 1:
data.append(float(split_line[-1].strip()))
if split_line[1].startswith('23'):
if int(split_line[0])+1 != int(lines[index+1].split('\t')[0]):
print('DAY\tHOUR\tVALUE')
print(line, lines[index+1])
else:
if int(split_line[1][:2])+1 != int(lines[index+1].split('\t')[1][:2]):
print('\nDAY\tHOUR\t\tVALUE')
print(line.strip())
print(f'--- MISSING DATA UNTIL ---')
print(lines[index+1].strip())
else:
split_line = line.strip().split(' ')
data.append(float(split_line[-1]))
if split_line[1].startswith('23'):
if int(split_line[0])+1 != int(lines[index+1].split(' ')[0]):
print('DAY\tHOUR\tVALUE')
print(line, lines[index+1])
else:
if int(split_line[1][:2])+1 != int(lines[index+1].split(' ')[1][:2]):
print('\nDAY\tHOUR\t\tVALUE')
print(line.strip())
print(f'--- MISSING DATA UNTIL ---')
print(lines[index+1].strip())
except ValueError:
pass
return data
| {
"repo_name": "thp44/delphin_6_automation",
"path": "delphin_6_automation/file_parsing/weather_parser.py",
"copies": "1",
"size": "10581",
"license": "mit",
"hash": -3527160306274728000,
"line_mean": 40.33203125,
"line_max": 147,
"alpha_frac": 0.4619601172,
"autogenerated": false,
"ratio": 4.037008775276612,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9993558541599487,
"avg_score": 0.00108207017542506,
"num_lines": 256
} |
__author__ = "Christian Kongsgaard, Simon Jørgensen"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import pandas as pd
import numpy as np
import xmltodict
import os
from collections import defaultdict
import typing
import shutil
# RiBuild Modules
from delphin_6_automation.file_parsing.delphin_parser import dp6_to_dict
from delphin_6_automation.delphin_setup import delphin_permutations
from delphin_6_automation.database_interactions.material_interactions import get_material_info
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
def construction_types() -> typing.List[str]:
"""Gets available template strings.
:return: list of file name strings
"""
folder = os.path.dirname(os.path.realpath(__file__)) + '/input_files/delphin'
return os.listdir(folder)
def wall_core_materials(folder=os.path.dirname(os.path.realpath(__file__)) + '/input_files') -> typing.List[str]:
"""All included material IDs relevant for a wall's core.
:return: list of IDs
"""
brick_path = os.path.join(folder, 'Brick.xlsx')
natural_stone_path = os.path.join(folder, 'Natural Stone.xlsx')
material_list = []
if os.path.isfile(brick_path):
bricks = pd.read_excel(brick_path)
material_list.extend(bricks['Material ID'].tolist())
if os.path.isfile(natural_stone_path):
natural_stones = pd.read_excel(natural_stone_path)
material_list.extend(natural_stones['Material ID'].tolist())
return material_list
def plaster_materials(folder=os.path.dirname(os.path.realpath(__file__)) + '/input_files') -> typing.List[str]:
"""All included material IDs relevant for plastering of a wall.
:return: list of IDs
"""
plasters = pd.read_excel(folder + '/Plaster.xlsx')['Material ID'].tolist()
return plasters
def insulation_type(folder=os.path.dirname(os.path.realpath(__file__)) + '/input_files') \
-> typing.List[typing.List[int]]:
"""All included material IDs in insulation systems as such."""
insulation = pd.read_excel(folder + '/InsulationSystems.xlsx')['Material ID'].str.split(', ')
return [[int(i) for i in sublist]
for sublist in insulation.tolist()]
def insulation_systems(folder: str, rows_to_read=11, excel_file='InsulationSystems') -> pd.DataFrame:
"""Reformat insulation DataFrame to different design options DataFrame"""
constructions = pd.read_excel(folder + f'/{excel_file}.xlsx', usecols=[0, 3, 4, 5], nrows=rows_to_read)
systems = pd.DataFrame()
level_1 = []
level_2 = np.array([])
for idx, row in constructions.iterrows():
material_id_and_dim = []
# provide distinction between ID strings, dimensions and ect..
for col in row:
if isinstance(col, (float, int, bool)):
material_id_and_dim.append([col])
else:
material_id_and_dim.append([int(j) for j in col.split(', ')])
# re-combine values in a DataFrame (first element is ID's)
columns = []
elements = ['insulation_', 'finish_', 'detail_']
# each element: insulation, finish, detail
for i, ID in enumerate(material_id_and_dim[0]):
part = pd.DataFrame({'ID': ID,
'Dimension': material_id_and_dim[i + 1]})
# each variation title
for index in part.index:
columns.append(elements[i] + str(index).zfill(2))
systems = systems.append(part)
# preperation to multiindex
level_1 = np.append(level_1, [idx] * len(columns))
level_2 = np.append(level_2, columns)
# assign multiindex
systems.index = [level_1.astype('int'), level_2]
return systems
def delphin_templates(folder: str) -> dict:
"""Titles on delphin files to variate (select by amount of layers).
:return: dictionary with integer keys according to layers
"""
# available template files indexed using amount of layers
to_copy = defaultdict(lambda: [])
for root, dirs, files in os.walk(folder + '/delphin'):
for title in files:
if title.endswith('plaster.d6p'):
to_copy[0].append(title)
elif title.endswith('insulated2layers.d6p'):
to_copy[2].append(title)
elif title.endswith('insulated3layers.d6p'):
to_copy[3].append(title)
return to_copy
def construct_delphin_reference(folder: str) -> typing.List[str]:
"""Generate Delphin files for models without insulation."""
copied_files = []
design_folder = os.path.join(folder, 'design')
if os.path.exists(design_folder):
shutil.rmtree(design_folder)
os.mkdir(design_folder)
else:
os.mkdir(design_folder)
for file in delphin_templates(folder)[0]:
if 'exterior' in file and 'interior' in file:
new_name = '1d_exteriorinterior.d6p'
elif 'exterior' in file:
new_name = '1d_exterior.d6p'
elif 'interior' in file:
new_name = '1d_interior.d6p'
else:
new_name = '1d_bare.d6p'
from_file = os.path.join(folder, 'delphin', file)
to_file = os.path.join(design_folder, new_name)
copied_files.append(new_name)
shutil.copyfile(from_file, to_file)
return copied_files
def implement_system_materials(delphin_dict: dict, system: pd.DataFrame) -> dict:
"""Loop through materials of the system.
:param delphin_dict: relevant template delphin dict
:param system: one insulation system's Dataframe
:return: new delphin dict with all system materials implemented
"""
# general material names (template projects) - in order: insulation, finish, detail
material_names = {'insulation': 'Insulation Material [00I]',
'finish': 'Finish Material [00F]',
'detail': 'Detail Material [00D]'}
# for two layer systems reduce materials
if not any('detail' in string for string in system.index):
del material_names['detail']
# in current model each layer type
for layer, material in material_names.items():
# material dict all layers representative as _00 instances
db_material = get_material_info(system.loc[layer + '_00', 'ID'])
# step 1 new delphin dict - input dict, str, dict
delphin_dict = delphin_permutations.change_layer_material(delphin_dict,
material.split('[')[-1],
db_material
)
if layer != 'insulation':
# thickness of layer detail
select_layer = [layer in string for string in system.index]
new_width = round(system.loc[select_layer, 'Dimension'].astype(float).values[0] * 10e-4, 3)
delphin_dict = delphin_permutations.change_layer_width(delphin_dict,
db_material['@name'],
new_width
)
return delphin_dict
def implement_insulation_widths(delphin_dict: dict, system: pd.DataFrame) -> typing.List[dict]:
"""Permutate width of system applied materials"""
# look up current material assume system
db_material = get_material_info(system.loc['insulation' + '_00', 'ID'])
insulation_select = ['insulation' in row for row in system.index]
new_widths = system.loc[insulation_select, 'Dimension'].astype(float) * 10e-4
permutated_dicts = delphin_permutations.change_layer_widths(delphin_dict,
db_material['@name'],
new_widths.tolist()
)
return permutated_dicts
def implement_interior_paint(delphin_paths: typing.List[str], folder: str, excel_file: str) -> typing.List[str]:
"""Permutate interior paint"""
permutated_files = []
excel_file = os.path.join(folder, f'{excel_file}.xlsx')
system_data = pd.read_excel(excel_file, usecols=[1, 6], nrows=11)
system_names = system_data.iloc[:, 0].tolist()
for file in delphin_paths:
if file in ['1d_exterior.d6p', '1d_interior.d6p', '1d_exteriorinterior.d6p', '1d_bare.d6p']:
permutated_files.append(file)
else:
system_name = file.split('_')[2]
index = system_names.index(system_name)
sd_values = system_data.iloc[index, 1]
if sd_values:
sd_values = [float(sd) for sd in sd_values.split(', ')]
design = dp6_to_dict(os.path.join(folder, 'design', file))
for sd in sd_values:
delphin_permutations.change_boundary_coefficient(design, 'IndoorVaporDiffusion', 'SDValue', sd)
sd = str(sd).replace(".", "")
file_name = file.split('.')[0] + f'_SD{sd}.d6p'
permutated_files.append(file_name)
xmltodict.unparse(design,
output=open(os.path.join(folder,
'design',
file_name),
'w'), pretty=True)
else:
permutated_files.append(file)
permutated_files = list(set(delphin_paths).union(set(permutated_files)))
return permutated_files
def construct_design_files(folder: str) -> typing.List[str]:
"""
Generate Delphin files to cover all the design options.
Options arise from separate insulation systems X variation within those e.g. thickness of insulation layer.
"""
# appreciate formatted insulation systems
excel_file = 'InsulationSystems'
systems = insulation_systems(folder=folder, excel_file=excel_file)
file_names = construct_delphin_reference(folder)
# permutation of insulation systems
for system_number in systems.index.levels[0]:
# one insulation system each loop
system = systems.loc[system_number]
insulation_select = ['insulation' in row for row in system.index]
# appreciate template - three or two layers
if any('detail' in row for row in system.index):
layers = 3
else:
layers = 2
for file in delphin_templates(folder)[layers]:
# each template as dict to be permutated
design = dp6_to_dict(os.path.join(folder, 'delphin', file))
# material and dimension change
delphin_with_system_materials = implement_system_materials(design, system)
option_dicts = implement_insulation_widths(delphin_with_system_materials, system)
if len(option_dicts) != sum(insulation_select):
message = 'no length match'
raise TypeError(message)
# write option files (above dicts)
for i, dim in enumerate(system.loc[insulation_select, 'Dimension']):
if 'exterior' in file and 'interior' in file:
new_name = '1d_exteriorinterior_'
elif 'exterior' in file:
new_name = '1d_exterior_'
elif 'interior' in file:
new_name = '1d_interior_'
else:
new_name = '1d_bare_'
insulation_name = pd.read_excel(folder + f'/{excel_file}.xlsx', usecols=[1]).loc[system_number, 'Name']
if layers == 2:
new_name += f'{insulation_name}_{system.loc["insulation_00", "ID"]}_' \
f'{system.loc["finish_00", "ID"]}_{int(dim)}'
elif layers == 3:
new_name += f'{insulation_name}_{system.loc["insulation_00", "ID"]}_' \
f'{system.loc["finish_00", "ID"]}_{system.loc["detail_00", "ID"]}_{int(dim)}'
file_name = new_name + '.d6p'
file_names.append(file_name)
delphin_permutations.update_output_locations(option_dicts[i])
xmltodict.unparse(option_dicts[i],
output=open(os.path.join(folder,
'design',
file_name),
'w'), pretty=True)
design_files = implement_interior_paint(file_names, folder, excel_file)
return design_files
def design_options(folder=os.path.dirname(os.path.realpath(__file__)) + '/input_files') -> typing.List[str]:
design_files = construct_design_files(folder)
return [file.split('.')[0] for file in design_files]
| {
"repo_name": "thp44/delphin_6_automation",
"path": "delphin_6_automation/sampling/inputs.py",
"copies": "1",
"size": "13226",
"license": "mit",
"hash": -2398966878389577700,
"line_mean": 36.5710227273,
"line_max": 120,
"alpha_frac": 0.5615122873,
"autogenerated": false,
"ratio": 4.000302480338778,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5061814767638778,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christian Kongsgaard, Thomas Perkov'
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules:
import os
import platform
from pathlib import Path
import subprocess
import datetime
import time
import threading
import paramiko
import typing
import shutil
import json
import re
# RiBuild Modules:
from delphin_6_automation.database_interactions.db_templates import delphin_entry
from delphin_6_automation.database_interactions import simulation_interactions
from delphin_6_automation.database_interactions import delphin_interactions
from delphin_6_automation.database_interactions import general_interactions
import delphin_6_automation.database_interactions.db_templates.result_raw_entry as result_db
from delphin_6_automation.logging.ribuild_logger import ribuild_logger
# Logger
logger = ribuild_logger()
# -------------------------------------------------------------------------------------------------------------------- #
# RIBUILD SIMULATION WORKER, DELPHIN SOLVER,
def local_worker(id_: str) -> typing.Optional[bool]:
"""
Simulation worker. Supposed to be used with main simulation loop.
:param id_: Database entry ID from simulation queue
:return: True on success otherwise False
"""
# Find paths
system = platform.system()
if system == 'Windows':
folder = r'C:/ribuild'
exe_path = r'C:/Program Files/IBK/Delphin 6.0/DelphinSolver.exe'
elif system == 'Linux':
home = str(Path.home())
folder = home + '/ribuild'
exe_path = ''
else:
logger.error('OS not supported')
raise NameError('OS not supported')
simulation_folder = os.path.join(folder, id_)
if not os.path.isdir(simulation_folder):
os.mkdir(simulation_folder)
else:
simulation_interactions.clean_simulation_folder(simulation_folder)
os.mkdir(simulation_folder)
# Download, solve, upload
time_0 = datetime.datetime.now()
logger.info(f'Downloads project with ID: {id_}')
general_interactions.download_full_project_from_database(str(id_), simulation_folder)
solve_delphin(os.path.join(simulation_folder, f'{id_}.d6p'), delphin_exe=exe_path, verbosity_level=0)
result_id = delphin_interactions.upload_results_to_database(os.path.join(simulation_folder, id_),
delete_files=False)
delphin_interactions.upload_processed_results(os.path.join(simulation_folder, id_),
id_, result_id)
delta_time = datetime.datetime.now() - time_0
# Check if uploaded:
test_doc = result_db.Result.objects(id=result_id).first()
simulation_interactions.set_simulated(id_)
simulation_interactions.set_simulation_time(id_, delta_time)
if test_doc:
simulation_interactions.clean_simulation_folder(simulation_folder)
logger.info(f'Finished solving {id_}. Simulation duration: {delta_time}')
return True
else:
logger.error('Could not find result entry')
raise FileNotFoundError('Could not find result entry')
def solve_delphin(file: str, delphin_exe=r'C:/Program Files/IBK/Delphin 6.0/DelphinSolver.exe', verbosity_level=1) \
-> bool:
"""Solves a delphin file locally"""
logger.info(f'Solves {file}')
verbosity = "verbosity-level=" + str(verbosity_level)
command_string = '"' + str(delphin_exe) + '" --close-on-exit --' + verbosity + ' "' + file + '"'
process = subprocess.run(command_string, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
output = process.stdout.decode()
if output:
logger.info(output)
return True
def github_updates():
"""
if general_interactions.get_github_version() != general_interactions.get_git_revision_hash():
logger.info('New version of Delphin 6 Automation is available on Github!!')
return False
else:
return True
"""
raise NotImplementedError
def create_submit_file(sim_id: str, simulation_folder: str, computation_time: int, restart=False) -> str:
"""Create a submit file for the DTU HPC queue system."""
hpc = os.getenv('HPC_LOCATION')
if hpc == 'rtu':
return create_rtu_submit_file(sim_id, simulation_folder, computation_time, restart)
else:
return create_dtu_submit_file(sim_id, simulation_folder, computation_time, restart)
def create_dtu_submit_file(sim_id: str, simulation_folder: str, computation_time: int, restart=False) -> str:
"""Create a submit file for the DTU HPC queue system."""
delphin_path = '~/Delphin-6.0/bin/DelphinSolver'
cpus = 2
ram_per_cpu = '50MB'
submit_file = f'submit_{sim_id}.sh'
file = open(f"{simulation_folder}/{submit_file}", 'w', newline='\n')
file.write("#!/bin/bash\n")
file.write("#BSUB -J DelphinJob\n")
file.write("#BSUB -o DelphinJob_%J.out\n")
file.write("#BSUB -e DelphinJob_%J.err\n")
file.write("#BSUB -q hpc\n")
file.write(f"#BSUB -W {computation_time}\n")
file.write(f'#BSUB -R "rusage[mem={ram_per_cpu}] span[hosts=1]"\n')
file.write(f"#BSUB -n {cpus}\n")
file.write('\n')
file.write(f"export OMP_NUM_THREADS=$LSB_DJOB_NUMPROC\n")
file.write('\n')
if not restart:
file.write(f"{delphin_path} {sim_id}.d6p\n")
else:
file.write(f"{delphin_path} --restart {sim_id}.d6p\n")
file.write('\n')
file.close()
logger.debug(f'Create a submit file for {sim_id} with restart = {restart}')
return submit_file
def create_rtu_submit_file(sim_id: str, simulation_folder: str, computation_time: int, restart=False) -> str:
"""Create a submit file for the RTU HPC queue system."""
delphin_path = '~/Delphin-6.0/bin/DelphinSolver'
cpus = 2
ram = 100
submit_file = f'submit_{sim_id}.sh'
hpc = os.getenv('HPC_LOCATION', 'dtu')
if hpc == 'rtu':
hpc_folder = f"/mnt/home/ritvars01/ribuild/{sim_id}"
else:
hpc_folder = f"/work3/ocni/ribuild/{sim_id}"
file = open(f"{simulation_folder}/{submit_file}", 'w', newline='\n')
file.write(f"#!/bin/bash\n")
file.write(f"#PBS -N DelphinJob\n")
file.write(f"#PBS -o DelphinJob_$PBS_JOBID.out\n")
file.write(f"#PBS -e DelphinJob_$PBS_JOBID.err\n")
file.write(f"#PBS -q batch\n")
file.write(f"#PBS -l feature=centos7\n")
file.write(f"#PBS -l walltime=00:{computation_time}:00\n")
file.write(f'#PBS -l nodes=1:ppn={cpus},pmem={ram}m\n')
file.write(f"#PBS -j oe\n")
file.write(f"ulimit -f 1000000\n")
file.write('\n')
file.write(f"export OMP_NUM_THREADS=$PBS_NP\n")
file.write('\n')
if not restart:
file.write(f"{delphin_path} {hpc_folder}/{sim_id}.d6p\n")
else:
file.write(f"{delphin_path} --restart {hpc_folder}/{sim_id}.d6p\n")
file.write('\n')
file.close()
logger.debug(f'Create a submit file for {sim_id} with restart = {restart}')
return submit_file
def submit_job(submit_file: str, sim_id: str) -> bool:
"""Submits a job (submit file) to the DTU HPC queue."""
hpc = os.getenv('HPC_LOCATION', 'dtu')
if hpc == 'rtu':
terminal_call = f"cd /mnt/home/ritvars01/ribuild/{sim_id} && qsub {submit_file}\n"
else:
terminal_call = f"cd /work3/ocni/ribuild/{sim_id} && bsub < {submit_file}\n"
client = connect_to_hpc()
logger.info(f'Connecting to HPC to upload simulation with ID: {sim_id}')
submitted = False
retries = 0
channel = client.invoke_shell()
time.sleep(0.5)
while not submitted and retries < 3:
channel.send(terminal_call)
logger.info(f"Terminal call: {terminal_call}")
response = simulation_interactions.get_command_results(channel)
#logger.info(f'Raw response: {response}')
submitted = parse_hpc_log(response)
logger.info(f'HPC response: {submitted}')
if submitted:
logger.info(f'Submitted {hpc.upper()} job {sim_id} on {retries}. try')
channel.close()
client.close()
return True
retries += 1
time.sleep(30)
channel.close()
client.close()
logger.debug('No job was submitted')
return False
def parse_hpc_log(raw_data: str) -> typing.Union[str, bool]:
"""Parses the output from HPC to check whether or not the job has been submitted"""
data = raw_data.split('\n')
for line in data[::-1]:
if re.search(r".*submitted to queue.", line.strip()):
return line.strip()
elif re.search(r".*rudens", line):
return line
return False
def connect_to_hpc(key_file: str = 'hpc_access') -> paramiko.SSHClient:
system = platform.system()
if system == 'Windows':
from delphin_6_automation.database_interactions.auth import hpc
key = paramiko.RSAKey.from_private_key_file(hpc['key_path'], password=hpc['key_pw'])
elif system == 'Linux':
secret_path = '/run/secrets'
key_path = os.path.join(secret_path, 'ssh_key')
key = paramiko.RSAKey.from_private_key_file(key_path)
hpc_path = os.path.join(secret_path, key_file)
with open(hpc_path, 'r') as file:
hpc = json.load(file)
else:
logger.error('OS not supported')
raise NameError('OS not supported')
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=hpc['ip'], username=hpc['user'], port=hpc['port'], pkey=key)
return client
def wait_until_finished(sim_id: str, estimated_run_time: int, simulation_folder: str):
"""
Look for summary file. If it is created, continue. If it is not created and the estimated time runs out.
Then submit a new job continuing the simulation from where it ended.
"""
finished = False
start_time = None
consecutive_errors = 0
while not start_time:
if os.path.exists(f"{simulation_folder}/{sim_id}"):
start_time = datetime.datetime.now()
else:
time.sleep(2)
time_limit = start_time + datetime.timedelta(minutes=250)
while not finished:
simulation_ends = start_time + datetime.timedelta(minutes=estimated_run_time)
logger.info(f'Simulation should end at {simulation_ends}')
if os.path.exists(f"{simulation_folder}/{sim_id}/log/summary.txt"):
logger.info(f'Found: {simulation_folder}/{sim_id}/log/summary.txt')
finished = True
#elif datetime.datetime.now() > simulation_ends + datetime.timedelta(seconds=10):
# logger.info(f'Simulation exceeded allowed time: {datetime.datetime.now()} > {simulation_ends}')
# estimated_run_time, start_time = simulation_exceeded_hpc_time(simulation_folder, estimated_run_time, sim_id,
# time_limit)
# consecutive_errors = 0
elif datetime.datetime.now() > time_limit:
finished = True
logger.warning(f'Simulation with ID: {sim_id} exceeded the simulation time limit of 250 minutes.')
return 'time limit reached'
else:
if os.path.exists(os.path.join(simulation_folder, sim_id, 'log', 'screenlog.txt')):
with open(os.path.join(simulation_folder, sim_id, 'log', 'screenlog.txt'), 'r') as logfile:
log_data = logfile.readlines()
if len(log_data) > 1:
logger.info(f'Checking for critical errors')
start_time, consecutive_errors = critical_error_occurred(log_data, sim_id, simulation_folder,
estimated_run_time, start_time,
consecutive_errors)
if consecutive_errors >= 3:
finished = True
logger.warning(f'Simulation with ID: {sim_id} encountered 3 consecutive errors and has '
f'therefore been terminated.')
return 'consecutive errors'
if not finished:
time.sleep(60)
else:
time.sleep(60)
else:
time.sleep(60)
logger.info('Simulation is over')
def minutes_left(time_limit):
seconds_left = (datetime.datetime.now() - time_limit).total_seconds()
return int(seconds_left / 60)
def simulation_exceeded_hpc_time(simulation_folder, estimated_run_time, sim_id, time_limit):
files_in_folder = len(os.listdir(simulation_folder))
estimated_run_time = min(int(estimated_run_time * 1.5), minutes_left(time_limit))
submit_file = create_submit_file(sim_id, simulation_folder, estimated_run_time, restart=True)
submitted = submit_job(submit_file, sim_id)
if not submitted:
raise RuntimeError(f'Could not submit job to HPC for simulation with ID: {sim_id}')
new_files_in_simulation_folder(simulation_folder, files_in_folder, sim_id)
start_time = datetime.datetime.now()
logger.info(f'Rerunning simulation with ID: {sim_id} '
f'with new estimated run time of: {estimated_run_time}')
return estimated_run_time, start_time
def critical_error_occurred(log_data, sim_id, simulation_folder, estimated_run_time, start_time, consecutive_errors):
if "Critical error, simulation aborted." in log_data[-1]:
logger.warning('Critical error found!')
submit_file = create_submit_file(sim_id, simulation_folder, estimated_run_time, restart=True)
files_in_folder = len(os.listdir(simulation_folder))
submitted = submit_job(submit_file, sim_id)
if not submitted:
raise RuntimeError(f'Could not submit job to HPC for simulation with ID: {sim_id}')
new_files_in_simulation_folder(simulation_folder, files_in_folder, sim_id)
start_time = datetime.datetime.now()
logger.warning(f'Simulation with ID: {sim_id} encountered a critical error: {log_data[-4:]} '
f'\nRerunning failed simulation with new estimated run '
f'time of: {estimated_run_time}')
consecutive_errors += 1
logger.debug(f'Simulation with ID: {sim_id} encountered an critical error. Raising the number of '
f'consecutive errors to: {consecutive_errors}')
return start_time, consecutive_errors
else:
logger.info('No critical error found')
consecutive_errors = 0
return start_time, consecutive_errors
def new_files_in_simulation_folder(simulation_folder, files_in_folder, sim_id):
while True:
logger.debug(f'Simulation with ID: {sim_id} is waiting to get simulated.')
if files_in_folder < len(os.listdir(simulation_folder)):
break
else:
time.sleep(5)
def hpc_worker(id_: str, folder='H:/ribuild'):
"""Solves a Delphin project through DTU HPC"""
simulation_folder = os.path.join(folder, id_)
if not os.path.isdir(simulation_folder):
os.mkdir(simulation_folder)
else:
simulation_interactions.clean_simulation_folder(simulation_folder)
os.mkdir(simulation_folder)
# Download, solve, upload
logger.info(f'Downloads project with ID: {id_}')
general_interactions.download_full_project_from_database(id_, simulation_folder)
#estimated_time = simulation_interactions.get_simulation_time_estimate(id_)
estimated_time = 251
submit_file = create_submit_file(id_, simulation_folder, estimated_time)
submitted = submit_job(submit_file, id_)
if submitted:
logger.info('Job successfully submitted. Waiting for completion and processing results.')
time_0 = datetime.datetime.now()
return_code = wait_until_finished(id_, estimated_time, simulation_folder)
delta_time = datetime.datetime.now() - time_0
if return_code:
simulation_hours = None
else:
simulation_hours = len(delphin_entry.Delphin.objects(id=id_).first().weather) * 8760
result_id = delphin_interactions.upload_results_to_database(os.path.join(simulation_folder, id_),
delete_files=False, result_length=simulation_hours)
try:
delphin_interactions.upload_processed_results(os.path.join(simulation_folder, id_),
id_, result_id, return_code)
except FileNotFoundError:
pass
if return_code == 'time limit reached':
delphin_interactions.set_exceeding_time_limit(id_)
delphin_interactions.upload_restart_data(simulation_folder, id_)
elif return_code == 'consecutive errors':
delphin_interactions.set_critical_error(id_)
try:
delphin_interactions.upload_restart_data(simulation_folder, id_)
except FileNotFoundError:
pass
simulation_interactions.set_simulated(id_)
simulation_interactions.set_simulation_time(id_, delta_time)
simulation_interactions.clean_simulation_folder(simulation_folder)
logger.info(f'Finished solving {id_}. Simulation duration: {delta_time}\n')
else:
logger.warning(f'Could not submit project with ID: {id_} to HPC.')
simulation_interactions.clean_simulation_folder(simulation_folder)
simulation_interactions.set_simulating(id_, False)
def simulation_worker(sim_location: str, folder='H:/ribuild') -> None:
"""Solves Delphin projects in the database until interrupted"""
try:
while True:
id_ = simulation_interactions.find_next_sim_in_queue()
if id_:
if sim_location == 'local':
local_worker(str(id_))
elif sim_location == 'hpc':
try:
hpc_worker(str(id_), folder)
except Exception as err:
simulation_interactions.set_simulating(str(id_), False)
logger.exception(err)
if not os.path.exists(os.path.join(folder, 'failed')):
os.mkdir(os.path.join(folder, 'failed'))
failed_sim_folder = os.path.join(folder, 'failed', str(id_))
if os.path.exists(failed_sim_folder):
shutil.rmtree(failed_sim_folder)
shutil.copytree(os.path.join(folder, str(id_)), failed_sim_folder)
time.sleep(5)
pass
else:
pass
except KeyboardInterrupt:
return
def docker_worker(sim_location: str, folder='/app/data') -> None:
"""Solves Delphin projects in the database until interrupted"""
id_ = simulation_interactions.find_next_sim_in_queue()
if id_:
if sim_location == 'local':
local_worker(str(id_))
elif sim_location == 'hpc':
logger.info('Starting at HPC')
try:
hpc_worker(str(id_), folder)
except Exception as err:
logger.info('Error encountered')
simulation_interactions.set_simulating(str(id_), False)
logger.exception(err)
if not os.path.isdir(os.path.join(folder, 'failed')):
os.mkdir(os.path.join(folder, 'failed'))
failed_sim_folder = os.path.join(folder, 'failed', str(id_))
if os.path.exists(failed_sim_folder):
shutil.rmtree(failed_sim_folder)
shutil.copytree(os.path.join(folder, str(id_)),
failed_sim_folder)
raise RuntimeError
else:
logger.info('No ID found')
return None
def exceeded_worker(sim_location: str, folder='/app/data') -> None:
"""Solves Delphin projects in the database which has exceeded the run time limit"""
id_ = simulation_interactions.find_exceeded()
if id_:
if sim_location == 'local':
local_worker(str(id_))
elif sim_location == 'hpc':
logger.info('Starting at HPC')
try:
hpc_worker(str(id_), folder)
except Exception as err:
logger.info('Error encountered')
simulation_interactions.set_simulating(str(id_), False)
logger.exception(err)
if not os.path.isdir(os.path.join(folder, 'failed')):
os.mkdir(os.path.join(folder, 'failed'))
shutil.copytree(os.path.join(folder, str(id_)),
os.path.join(folder, 'failed', str(id_)))
raise RuntimeError
else:
logger.info('No ID found')
return None
def main():
print_header()
menu()
def print_header():
print('---------------------------------------------------')
print('| |')
print('| RiBuild EU Research Project |')
print('| for Hygrothermal Simulations |')
print('| |')
print('| WORK IN PROGRESS |')
print('| Simulation Environment |')
print('| |')
print('---------------------------------------------------')
def menu():
print('')
print('------------------- SIMULATION MENU ---------------------')
print('')
print("Available actions:")
print("[a] Simulate locally")
print("[b] Simulate on DTU HPC")
print("[c] Simulate exceeded simulations on DTU HPC")
print("[x] Exit")
choice = input("> ").strip().lower()
if choice == 'a':
logger.info('Local Simulation Chosen\n')
simulation_worker('local')
elif choice == 'b':
logger.info('Simulation on DTU HPC Chosen\n')
n_threads = 84
threads = []
for n in range(n_threads):
t_name = f"Worker_{n}"
logger.info(f'Created thread with name: {t_name}\n')
thread = threading.Thread(target=simulation_worker, args=('hpc',))
thread.name = t_name
thread.daemon = True
thread.start()
threads.append(thread)
time.sleep(10)
for thread in threads:
thread.join()
elif choice == 'c':
raise NotImplementedError
elif choice == 'x':
print("Goodbye")
| {
"repo_name": "thp44/delphin_6_automation",
"path": "delphin_6_automation/backend/simulation_worker.py",
"copies": "1",
"size": "22905",
"license": "mit",
"hash": -8713531379235280000,
"line_mean": 34.6775700935,
"line_max": 121,
"alpha_frac": 0.5893036455,
"autogenerated": false,
"ratio": 3.801659751037344,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4890963396537344,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christian'
from DbRequests import DbRequests
from scipy.stats import pearsonr
import numpy as np
import itertools
class RecommenderSystem:
def __init__(self):
self.db = DbRequests()
self.blacklist = ['A TripAdvisor Member', 'lass=', 'Posted by a La Quinta traveler', 'Posted by an Easytobook.com traveler', 'Posted by an Accorhotels.com traveler', 'Posted by a cheaprooms.com traveler', 'Posted by a Worldhotels.com traveler', 'Posted by a Virgin Holidays traveler', 'Posted by an OctopusTravel traveler', 'Posted by a Hotell.no traveler', 'Posted by a Husa Hoteles traveler', 'Posted by a Best Western traveler', 'Posted by a Langham Hotels traveler', 'Posted by a trip.ru traveler', 'Posted by a BanyanTree.com traveler', 'Posted by a Deutsche Bahn traveler', 'Posted by a Partner traveler', 'Posted by a Cleartrip traveler', 'Posted by a Wyndham Hotel Group traveler']
np.seterr(all="ignore")
def setSkipAtUserAmount(self, userAmount):
self.db.skipAtUserAmount = userAmount
def sim_measure1(self, location):
#print("Measure 1")
res = self.db.reviews_per_hotel_per_place(location)
hotel_scores = dict()
for result in res:
#print(result[0]["data"])
node_id = result[0]
score = result[1]
if node_id in hotel_scores.keys():
tmp_list = hotel_scores[node_id]
tmp_list.append(score)
hotel_scores[node_id] = tmp_list
else:
hotel_scores[node_id] = [score]
maxi = 0
for key in hotel_scores.keys():
avg_score = np.mean(hotel_scores[key])
maxi = max(avg_score, maxi)
hotel_scores[key] = avg_score
for key in hotel_scores.keys():
hotel_scores[key] = hotel_scores[key] / maxi
return hotel_scores
def sim_measure2(self, user_id, location):
#print("Measure 2")
res = self.db.user_reviews_per_hotel_sim2(user_id, location)
count = 0
avg_class = 0
for result in res:
hotel_class = result[0]
if 0 < hotel_class < 6:
avg_class = avg_class + hotel_class
count += 1
if count == 0:
return False
avg_class = float(avg_class) / count
res = self.db.hotels_per_place(location)
hotel_scores = dict()
maxi = 0
for result in res:
node_id = result[0]
hotel_class = result[1]
class_distance = abs(avg_class - hotel_class)
maxi = max(maxi, class_distance)
hotel_scores[node_id] = class_distance
for key in hotel_scores.keys():
hotel_scores[key] = 1 - hotel_scores[key] / maxi
return hotel_scores
def sim_measure3(self, user_id, location):
#print("Measure 3")
res = self.db.user_reviews_per_hotel_sim2(user_id, location)
lower_limit = list()
upper_limit = list()
for result in res:
lower_limit_temp = int(result[1])
upper_limit_temp = int(result[2])
if lower_limit_temp < 1:
continue
if upper_limit_temp < 1:
continue
lower_limit.append(lower_limit_temp)
upper_limit.append(upper_limit_temp)
lower_limit = np.mean(lower_limit) - np.sqrt((np.std(lower_limit)))
upper_limit = np.mean(upper_limit) + np.sqrt(np.std(upper_limit))
res = self.db.hotels_per_place(location)
hotel_scores = dict()
for result in res:
node_id = result[0]
lower_limit_temp = int(result[2])
upper_limit_temp = int(result[3])
if lower_limit_temp < 1 or upper_limit_temp < 1 or lower_limit < 1 or upper_limit < 1 or str(lower_limit) == "nan" or str(upper_limit) == "nan":
hotel_scores[node_id] = 0
else:
score_lower = 1
if lower_limit > lower_limit_temp:
score_lower = 1 - ((lower_limit - lower_limit_temp) / float(lower_limit))
elif upper_limit < lower_limit_temp:
score_lower = 0
score_upper = 1
if upper_limit < upper_limit_temp:
score_upper = 1 - ((upper_limit_temp - upper_limit) / float(upper_limit_temp))
elif lower_limit > upper_limit_temp:
score_upper = 0
hotel_scores[node_id] = score_upper * 0.75 + score_lower * 0.25
return hotel_scores
def sim_measure4(self, user_id, location):
#print("Measure 4")
res = self.db.user_reviews_per_hotel(user_id, location)
if len(res) == 0:
return False
service_list = list()
location_list = list()
sleep_quality_list = list()
value_list = list()
cleanliness_list = list()
rooms_list = list()
for result in res:
service = result[1]
if service > 0:
service_list.append(service)
location_rating = result[2]
if location_rating > 0:
location_list.append(location_rating)
sleep_quality = result[3]
if sleep_quality > 0:
sleep_quality_list.append(sleep_quality)
value = result[4]
if value > 0:
value_list.append(value)
cleanliness = result[5]
if cleanliness > 0:
cleanliness_list.append(cleanliness)
rooms = result[6]
if rooms > 0:
rooms_list.append(rooms)
small_number = 0.00000001
service_var = np.var(service_list)
if service_var == 0 or str(service_var) == "nan":
service_var = small_number
location_var = np.var(location_list)
if location_var == 0 or str(location_var) == "nan":
location_var = small_number
sleep_quality_var = np.var(sleep_quality_list)
if sleep_quality_var == 0 or str(sleep_quality_var) == "nan":
sleep_quality_var = small_number
value_var = np.var(value_list)
if value_var == 0 or str(value_var) == "nan":
value_var = small_number
cleanliness_var = np.var(cleanliness_list)
if cleanliness_var == 0 or str(cleanliness_var) == "nan":
cleanliness_var = small_number
rooms_var = np.var(rooms_list)
if rooms_var == 0 or str(rooms_var) == "nan":
rooms_var = small_number
if len(service_list) == 0:
service_mean = 3
else:
service_mean = np.mean(service_list)
if len(location_list) == 0:
location_mean = 3
else:
location_mean = np.mean(location_list)
if len(sleep_quality_list) == 0:
sleep_quality_mean = 3
else:
sleep_quality_mean = np.mean(sleep_quality_list)
if len(value_list) == 0:
value_mean = 3
else:
value_mean = np.mean(value_list)
if len(cleanliness_list) == 0:
cleanliness_mean = 3
else:
cleanliness_mean = np.mean(cleanliness_list)
if len(rooms_list) == 0:
rooms_mean = 3
else:
rooms_mean = np.mean(rooms_list)
weights = [service_var, location_var, sleep_quality_var, value_var, cleanliness_var, rooms_var]
res = self.db.reviews_per_hotel_per_place(location)
hotel_avg_asp_rating = dict()
for result in res:
node_id = result[0]
score_service = result[2]
score_location = result[3]
score_sleep_quality = result[4]
score_value = result[5]
score_cleanliness = result[6]
score_rooms = result[7]
if node_id in hotel_avg_asp_rating.keys():
tmp_dic = hotel_avg_asp_rating[node_id]
tmp = tmp_dic["ratingService"]
tmp.append(score_service)
tmp_dic["ratingService"] = tmp
tmp = tmp_dic["ratingLocation"]
tmp.append(score_location)
tmp_dic["ratingLocation"] = tmp
tmp = tmp_dic["ratingSleepQuality"]
tmp.append(score_sleep_quality)
tmp_dic["ratingSleepQuality"] = tmp
tmp = tmp_dic["ratingValue"]
tmp.append(score_value)
tmp_dic["ratingValue"] = tmp
tmp = tmp_dic["ratingCleanliness"]
tmp.append(score_cleanliness)
tmp_dic["ratingCleanliness"] = tmp
tmp = tmp_dic["ratingRooms"]
tmp.append(score_rooms)
tmp_dic["ratingRooms"] = tmp
hotel_avg_asp_rating[node_id] = tmp_dic
else:
tmp_dic = dict()
tmp_dic["ratingService"] = [score_service]
tmp_dic["ratingLocation"] = [score_location]
tmp_dic["ratingSleepQuality"] = [score_sleep_quality]
tmp_dic["ratingValue"] = [score_value]
tmp_dic["ratingCleanliness"] = [score_cleanliness]
tmp_dic["ratingRooms"] = [score_rooms]
hotel_avg_asp_rating[node_id] = tmp_dic
hotel_avgs_list = list()
for key in hotel_avg_asp_rating.keys():
temp_dic = hotel_avg_asp_rating[key]
temp_list = list()
temp_list.append(key)
if len(self.remove_mv(temp_dic["ratingService"])) == 0:
temp_list.append(3)
else:
temp_list.append(np.mean(self.remove_mv(temp_dic["ratingService"])))
if len(self.remove_mv(temp_dic["ratingLocation"])) == 0:
temp_list.append(3)
else:
temp_list.append(np.mean(self.remove_mv(temp_dic["ratingLocation"])))
if len(self.remove_mv(temp_dic["ratingSleepQuality"])) == 0:
temp_list.append(3)
else:
temp_list.append(np.mean(self.remove_mv(temp_dic["ratingSleepQuality"])))
if len(self.remove_mv(temp_dic["ratingValue"])) == 0:
temp_list.append(3)
else:
temp_list.append(np.mean(self.remove_mv(temp_dic["ratingValue"])))
if len(self.remove_mv(temp_dic["ratingCleanliness"])) == 0:
temp_list.append(3)
else:
temp_list.append(np.mean(self.remove_mv(temp_dic["ratingCleanliness"])))
if len(self.remove_mv(temp_dic["ratingRooms"])) == 0:
temp_list.append(3)
else:
temp_list.append(np.mean(self.remove_mv(temp_dic["ratingRooms"])))
hotel_avgs_list.append(temp_list)
#user = [user_id, service_mean, location_mean, sleep_quality_mean, value_mean, cleanliness_mean, rooms_mean]
user = [user_id, 5, 5, 5, 5, 5, 5]
hotel_scores = dict()
maxi = 0
for hotel in hotel_avgs_list:
distance = self.weighted_euclidean_distance(user[1:7], hotel[1:7], weights)
maxi = max(maxi, distance)
hotel_scores[hotel[0]] = distance
for hotel in hotel_scores.keys():
hotel_scores[hotel] = (1 - (hotel_scores[hotel] / maxi))
for hotel in hotel_scores.keys():
if str(hotel_scores[hotel]) == "nan":
print(hotel, hotel_scores[hotel])
return hotel_scores
def sim_measure5(self, user_id, location):
#print("Measure 5")
res = self.db.user_reviews_per_hotel(user_id, location)
hotels = list()
reviews = list()
maxReviews = 100
#print(len(res))
for row in res:
hotels.append(row[0])
reviews.append([row[1],row[2],row[3],row[4],row[5],row[6]])
maxReviews -= 1
if maxReviews <= 0:
return {}
hotel_list_with_other_user = list()
for i in range(len(hotels)):
hotel_matrix = list()
res = self.db.users_same_hotel_for_target_location(hotels[i], location, user_id)
users = list()
for row in res:
users.append(row[0]["data"]["name"].replace("'", "\\'"))
for blacklisted in self.blacklist:
if blacklisted in users:
users.remove(blacklisted)
res = self.db.reviews_for_user_set(hotels[i], users)
for j in range(0, len(res), 2):
line_in_matrix = list()
line_in_matrix.append(res[j][0]["data"]["name"])
rev = self.get_rating_values_from_review(res[j][1]["data"])
for asp in rev:
line_in_matrix.append(asp)
hotel_matrix.append(line_in_matrix)
hotel_list_with_other_user.append(hotel_matrix)
similarity_score = list()
for i in range(len(hotels)):
temp = reviews[i]
user_hotel_rating = list()
user_hotel_rating.append(user_id)
for rating in temp:
user_hotel_rating.append(rating)
hotel_matrix = hotel_list_with_other_user[i]
for other_user_rating in hotel_matrix:
temp_other_user = other_user_rating[1:7]
temp_user = user_hotel_rating[1:7]
bitmask = list()
for j in range(len(temp_user)):
if temp_user[j] < 1 or temp_other_user[j] < 1:
bitmask.append(0)
else:
bitmask.append(1)
temp_user = list(itertools.compress(temp_user, bitmask))
temp_other_user = list(itertools.compress(temp_other_user, bitmask))
if len(temp_user) == 0:
confidence = 0
else:
confidence = pearsonr(temp_user, temp_other_user)[0]
if np.isnan(confidence) or float(confidence) <= float(0):
confidence = 0
similarity_score.append((other_user_rating[0], confidence))
filtered_scores = dict()
for sims in similarity_score:
if not sims[1] == 0:
if sims[0] in filtered_scores.keys():
filtered_scores[sims[0]] = max(sims[1], filtered_scores[sims[0]])
else:
filtered_scores[sims[0]] = sims[1]
hotel_scores = dict()
for key in filtered_scores.keys():
res = self.db.hotel_review_for_user_and_location(key,location)
for row in res:
rating = row[0]
if rating > 3:
hotel_id = row[1]
rating = (rating * filtered_scores[key])/float(5)
if hotel_id in hotel_scores.keys():
hotel_scores[hotel_id] = max(rating, hotel_scores[hotel_id])
else:
hotel_scores[hotel_id] = rating
for key in hotel_scores.keys():
if np.isnan(hotel_scores[key]):
hotel_scores.pop(key, None)
return hotel_scores
def sim_measure6(self, user_id, location):
res = self.db.nationality_majoriy_voting(user_id, location)
if res == False or len(res) == 0 or len(res[0]) != 2:
return {}
hotel_scores = dict()
maxi = res[0][1]
for row in res:
hotel_scores[row[0]] = float(row[1]) / float(maxi)
return hotel_scores
def get_rating_values_from_review(self, review):
return_list = list()
return_list.append(int(review["ratingService"]))
return_list.append(int(review["ratingLocation"]))
return_list.append(int(review["ratingSleepQuality"]))
return_list.append(int(review["ratingValue"]))
return_list.append(int(review["ratingCleanliness"]))
return_list.append(int(review["ratingRooms"]))
return return_list
def weighted_mean(self, x, w):
sum = 0
for i in range(len(x)):
sum = sum + x[i]*w[i]
return float(sum / float(np.sum(w)))
def weighted_covariance(self, x, y, w):
weighted_mean_x = self.weighted_mean(x,w)
weighted_mean_y = self.weighted_mean(y,w)
sum = 0
for i in range(len(x)):
sum = sum + (w[i] * (x[i] - weighted_mean_x) * (y[i] - weighted_mean_y))
return float(sum / float(np.sum(w)))
def weighted_correlation(self, x, y, w):
#print(x,y,w)
return float(self.weighted_covariance(x, y, w) / float((np.sqrt((self.weighted_covariance(x, x, w)) * self.weighted_covariance(y, y, w)))))
def weighted_euclidean_distance(self, x, y, w):
sum = 0
for i in range(len(x)):
sum += np.sqrt(w[i] * (x[i] - y[i])**2)
return sum
def remove_mv(self, x):
temp = list()
for i in range(len(x)):
if x[i] > 0:
temp.append(x[i])
return temp
| {
"repo_name": "LukasGentele/Graph-based-Hotel-Recommendations",
"path": "rs/RecommenderSystem.py",
"copies": "1",
"size": "17214",
"license": "apache-2.0",
"hash": 8997820555788160000,
"line_mean": 34.1306122449,
"line_max": 697,
"alpha_frac": 0.5314279075,
"autogenerated": false,
"ratio": 3.5617628801986343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45931907876986344,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christian'
import csv
import logging
from feti.models.campus import Campus as Provider
from feti.models.provider import Provider as PrimaryInstitute
# logging.basicConfig(filename='provider.log')
header = []
original_providers = []
new_providers = []
primary_institutes = []
with open('feti_campus.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
header = csv_reader.next()[:2]
for row in csv_reader:
if row[0] == row[1]:
# we don't care about these, since we don't need to do anything
continue
original_providers.append(row[0])
new_providers.append(row[1])
primary_institutes.append(row[3])
message = 'Every duplicate should only be listed once.'
assert len(original_providers) == len(set(original_providers)), message
logging.info('Every duplicate is only listed once')
message = 'We should not want to remove a new institute.'
for original_id in original_providers:
assert original_id not in new_providers, message
logging.info('No original (where duplicates are moved to) is being removed.')
for original_id, new_id, pi_id in zip(
original_providers, new_providers, primary_institutes):
try:
provider_old = Provider.objects.get(id=original_id)
except Provider.DoesNotExist:
print(
'Cannot remove %s. It may already have been removed' % original_id)
logging.info(
'Cannot remove %s. It may already have been removed' % original_id)
continue
try:
provider_new = Provider.objects.get(id=new_id)
except Provider.DoesNotExist:
print (
'Cannot remove %s to %s. '
'Destination provider does not exist' %
(original_id, new_id))
logging.info(
'Cannot remove %s to %s. '
'Destination provider does not exist' %
(original_id, new_id))
continue
try:
pi = PrimaryInstitute.objects.get(id=pi_id)
except PrimaryInstitute.DoesNotExist:
print 'Primary institute does not exist.'
logging.info(
'Primary institute does not exist.')
pi = None
#continue
if provider_new.provider != pi:
print 'Destination primary institute does not exist'
logging.info('Destination primary institute does not exist')
for course in provider_old.courses.all():
provider_new.courses.add(course)
provider_old.courses.remove(course)
provider_new.save()
provider_old.save()
if provider_old.courses.all():
print 'Could not remove all campuses from provider %s' % (
original_id)
logging.info(
'Could not remove all campuses from provider %s' %
original_id)
continue
provider_old.delete()
| {
"repo_name": "cchristelis/feti",
"path": "deployment/setup_data/data_cleanup/refactor_providers.py",
"copies": "2",
"size": "2833",
"license": "bsd-2-clause",
"hash": 6131682844907570000,
"line_mean": 33.5487804878,
"line_max": 79,
"alpha_frac": 0.6399576421,
"autogenerated": false,
"ratio": 3.9511854951185494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5591143137218549,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christian'
import csv
import logging
from feti.models.provider import Provider as PrimaryInstitute
logging.basicConfig(filename='primary_institute.log')
header = []
original_primary_institutes = []
new_primary_institutes = []
with open('primary_institution_duplicates.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
header = csv_reader.next()[:2]
for row in csv_reader:
if row[0] == row[1]:
# we don't care about these, since we don't need to do anything
continue
original_primary_institutes.append(row[0])
new_primary_institutes.append(row[1])
message = 'Every duplicate should only be listed once.'
assert len(original_primary_institutes) == len(set(original_primary_institutes)), message
logging.info('Every duplicate is only listed once')
message = 'We should not want to remove a new institute.'
for original_id in original_primary_institutes:
assert original_id not in new_primary_institutes, message
logging.info('No original (where duplicates are moved to) is being removed.')
for original_id, new_id in zip(
original_primary_institutes, new_primary_institutes):
try:
pi_old = PrimaryInstitute.objects.get(id=original_id)
except PrimaryInstitute.DoesNotExist:
print(
'Cannot remove %s. It may already have been removed' % original_id)
logging.info(
'Cannot remove %s. It may already have been removed' % original_id)
continue
try:
pi_new = PrimaryInstitute.objects.get(id=new_id)
except PrimaryInstitute.DoesNotExist:
print(
'Cannot remove %s to %s. '
'Destination primary institute does not exist' %
(original_id, new_id))
logging.info(
'Cannot remove %s to %s. '
'Destination primary institute does not exist' %
(original_id, new_id))
continue
for campus in pi_old.campuses.all():
campus.provider = pi_new
campus.save()
if pi_old.campuses.all():
print('Could not remove all providers from primary institute %s' %
original_id)
logging.info(
'Could not remove all providers from primary institute %s' %
original_id)
continue
pi_old.delete()
| {
"repo_name": "cchristelis/feti",
"path": "deployment/setup_data/data_cleanup/refactor_primary_institutes.py",
"copies": "2",
"size": "2332",
"license": "bsd-2-clause",
"hash": 4491478617309552000,
"line_mean": 34.8769230769,
"line_max": 89,
"alpha_frac": 0.6509433962,
"autogenerated": false,
"ratio": 3.767366720516963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5418310116716963,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
"""Test FactorAnalysis ability to recover the data covariance structure
"""
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) \
* rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
fa = FactorAnalysis(n_components=n_components)
fa.fit(X)
X_t = fa.transform(X)
assert_true(X_t.shape == (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score(X).sum())
# Make log likelihood increases at each iteration
assert_true(np.all(np.diff(fa.loglike_) > 0.))
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_true(diff < 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
| {
"repo_name": "seckcoder/lang-learn",
"path": "python/sklearn/sklearn/decomposition/tests/test_factor_analysis.py",
"copies": "2",
"size": "1692",
"license": "unlicense",
"hash": -5542598162930436000,
"line_mean": 30.9245283019,
"line_max": 75,
"alpha_frac": 0.6678486998,
"autogenerated": false,
"ratio": 3.363817097415507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003699593044765076,
"num_lines": 53
} |
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
from sklearn.utils.testing import ignore_warnings
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| {
"repo_name": "xyguo/scikit-learn",
"path": "sklearn/decomposition/tests/test_factor_analysis.py",
"copies": "16",
"size": "3203",
"license": "bsd-3-clause",
"hash": 3558450733467062000,
"line_mean": 36.6823529412,
"line_max": 76,
"alpha_frac": 0.6625039026,
"autogenerated": false,
"ratio": 3.5005464480874315,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
import warnings
import numpy as np
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
"""Test FactorAnalysis ability to recover the data covariance structure
"""
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', ConvergenceWarning)
fa1.max_iter = 1
fa1.verbose = True
fa1.fit(X)
assert_true(w[-1].category == ConvergenceWarning)
warnings.simplefilter('always', DeprecationWarning)
FactorAnalysis(verbose=1)
assert_true(w[-1].category == DeprecationWarning)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12)
| {
"repo_name": "B3AU/waveTree",
"path": "sklearn/decomposition/tests/test_factor_analysis.py",
"copies": "4",
"size": "3346",
"license": "bsd-3-clause",
"hash": -1118620781258024700,
"line_mean": 36.1777777778,
"line_max": 81,
"alpha_frac": 0.6589958159,
"autogenerated": false,
"ratio": 3.518401682439537,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00045756254663995916,
"num_lines": 90
} |
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
from sklearn.utils.testing import ignore_warnings
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| {
"repo_name": "sonnyhu/scikit-learn",
"path": "sklearn/decomposition/tests/test_factor_analysis.py",
"copies": "112",
"size": "3203",
"license": "bsd-3-clause",
"hash": -649296008559016400,
"line_mean": 36.6823529412,
"line_max": 76,
"alpha_frac": 0.6625039026,
"autogenerated": false,
"ratio": 3.508214676889376,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
from sklearn.utils.testing import ignore_warnings
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert X_t.shape == (n_samples, n_components)
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert diff > 0., 'Log likelihood dif not increase'
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert diff < 0.1, "Mean absolute difference is %f" % diff
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| {
"repo_name": "chrsrds/scikit-learn",
"path": "sklearn/decomposition/tests/test_factor_analysis.py",
"copies": "1",
"size": "3043",
"license": "bsd-3-clause",
"hash": -8823553646527459000,
"line_mean": 36.1097560976,
"line_max": 76,
"alpha_frac": 0.6523167926,
"autogenerated": false,
"ratio": 3.4977011494252874,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46500179420252874,
"avg_score": null,
"num_lines": null
} |
__author__ = ('Christian Osendorfer, osendorf@in.tum.de;'
'Justin S Bayer, bayerj@in.tum.de'
'SUN Yi, yi@idsia.ch')
from scipy import random, outer, zeros, ones
from pybrain.datasets import SupervisedDataSet, UnsupervisedDataSet
from pybrain.supervised.trainers import Trainer
from pybrain.utilities import abstractMethod
class RbmGibbsTrainerConfig:
def __init__(self):
self.batchSize = 10 # how many samples in a batch
# training rate
self.rWeights = 0.1
self.rHidBias = 0.1
self.rVisBias = 0.1
# Several configurations, I have no idea why they are here...
self.weightCost = 0.0002
self.iniMm = 0.5 # initial momentum
self.finMm = 0.9 # final momentum
self.mmSwitchIter = 5 # at which iteration we switch the momentum
self.maxIter = 9 # how many iterations
self.visibleDistribution = 'bernoulli'
class RbmGibbsTrainer(Trainer):
"""Class for training rbms with contrastive divergence."""
def __init__(self, rbm, dataset, cfg=None):
self.rbm = rbm
self.invRbm = rbm.invert()
self.dataset = dataset
self.cfg = RbmGibbsTrainerConfig() if cfg is None else cfg
if isinstance(self.dataset, SupervisedDataSet):
self.datasetField = 'input'
elif isinstance(self.dataset, UnsupervisedDataSet):
self.datasetField = 'sample'
def train(self):
self.trainOnDataset(self.dataset)
def trainOnDataset(self, dataset):
"""This function trains the RBM using the same algorithm and
implementation presented in:
http://www.cs.toronto.edu/~hinton/MatlabForSciencePaper.html"""
cfg = self.cfg
for rows in dataset.randomBatches(self.datasetField, cfg.batchSize):
olduw, olduhb, olduvb = \
zeros((self.rbm.visibleDim, self.rbm.hiddenDim)), \
zeros(self.rbm.hiddenDim), zeros(self.rbm.visibleDim)
for t in range(cfg.maxIter):
#print("*** Iteration %2d **************************************" % t)
params = self.rbm.params
params = params.reshape((self.rbm.visibleDim, self.rbm.hiddenDim))
biasParams = self.rbm.biasParams
mm = cfg.iniMm if t < cfg.mmSwitchIter else cfg.finMm
w, hb, vb = self.calcUpdateByRows(rows)
#print("Delta: ")
#print("Weight: ",)
#print(w)
#print("Visible bias: ",)
#print(vb)
#print("Hidden bias: ",)
#print(hb)
#print("")
olduw = uw = olduw * mm + \
cfg.rWeights * (w - cfg.weightCost * params)
olduhb = uhb = olduhb * mm + cfg.rHidBias * hb
olduvb = uvb = olduvb * mm + cfg.rVisBias * vb
#print("Delta after momentum: ")
#print("Weight: ",)
#print(uw)
#print("Visible bias: ",)
#print(uvb)
#print("Hidden bias: ",)
#print(uhb)
#print("")
# update the parameters of the original rbm
params += uw
biasParams += uhb
# Create a new inverted rbm with correct parameters
invBiasParams = self.invRbm.biasParams
invBiasParams += uvb
self.invRbm = self.rbm.invert()
self.invRbm.biasParams[:] = invBiasParams
#print("Updated ")
#print("Weight: ",)
#print(self.rbm.connections[self.rbm['visible']][0].params.reshape( \)
# (self.rbm.indim, self.rbm.outdim))
#print("Visible bias: ",)
#print(self.invRbm.connections[self.invRbm['bias']][0].params)
#print("Hidden bias: ",)
#print(self.rbm.connections[self.rbm['bias']][0].params)
#print("")
def calcUpdateByRow(self, row):
"""This function trains the RBM using only one data row.
Return a 3-tuple consiting of updates for (weightmatrix,
hidden bias weights, visible bias weights)."""
# a) positive phase
poshp = self.rbm.activate(row) # compute the posterior probability
pos = outer(row, poshp) # fraction from the positive phase
poshb = poshp
posvb = row
# b) the sampling & reconstruction
sampled = self.sampler(poshp)
recon = self.invRbm.activate(sampled) # the re-construction of data
# c) negative phase
neghp = self.rbm.activate(recon)
neg = outer(recon, neghp)
neghb = neghp
negvb = recon
# compute the raw delta
# !!! note that this delta is only the 'theoretical' delta
return self.updater(pos, neg, poshb, neghb, posvb, negvb)
def sampler(self, probabilities):
abstractMethod()
def updater(self, pos, neg, poshb, neghb, posvb, negvb):
abstractMethod()
def calcUpdateByRows(self, rows):
"""Return a 3-tuple constisting of update for (weightmatrix,
hidden bias weights, visible bias weights)."""
delta_w, delta_hb, delta_vb = \
zeros((self.rbm.visibleDim, self.rbm.hiddenDim)), \
zeros(self.rbm.hiddenDim), zeros(self.rbm.visibleDim)
for row in rows:
dw, dhb, dvb = self.calcUpdateByRow(row)
delta_w += dw
delta_hb += dhb
delta_vb += dvb
delta_w /= len(rows)
delta_hb /= len(rows)
delta_vb /= len(rows)
# !!! note that this delta is only the 'theoretical' delta
return delta_w, delta_hb, delta_vb
class RbmBernoulliTrainer(RbmGibbsTrainer):
def sampler(self, probabilities):
result = probabilities > random.rand(self.rbm.hiddenDim)
return result.astype('int32')
def updater(self, pos, neg, poshb, neghb, posvb, negvb):
return pos - neg, poshb - neghb, posvb - negvb
class RbmGaussTrainer(RbmGibbsTrainer):
def __init__(self, rbm, dataset, cfg=None):
super(RbmGaussTrainer, self).__init__(rbm, dataset, cfg)
#samples = self.dataset[self.datasetField]
# self.visibleVariances = samples.var(axis=0)
self.visibleVariances = ones(rbm.net.outdim)
def sampler(self, probabilities):
return random.normal(probabilities, self.visibleVariances)
def updater(self, pos, neg, poshb, neghb, posvb, negvb):
pos = pos / self.visibleVariances
return pos - neg, poshb - neghb, posvb - negvb
| {
"repo_name": "chanderbgoel/pybrain",
"path": "pybrain/unsupervised/trainers/rbm.py",
"copies": "25",
"size": "6716",
"license": "bsd-3-clause",
"hash": -8331061457445809000,
"line_mean": 33.9791666667,
"line_max": 86,
"alpha_frac": 0.5704288267,
"autogenerated": false,
"ratio": 3.704357418643133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009535510775363193,
"num_lines": 192
} |
__author__ = 'Christian Tamayo'
def loopstart():
timetime = []
while True:
timething = raw_input('What is your time? Enter time, "done", "remove" or "reset"\n')
if timething.upper() == 'DONE':
break
elif timething.upper() == 'RESET':
timetime = []
elif timething.upper() == 'REMOVE':
try:
timetime.pop()
except:
print 'Nothing to remove\n'
continue
else:
try:
split = timething.split(':')
assert type(int(split[0])) == int
assert type(int(split[1])) == int
timetime.append(timething)
except Exception as e:
print 'Please enter a valid time'
continue
return timetime
def time_add(time_list):
tot_mins = 0
tot_secs = 0
for times in time_list:
split = times.split(':')
tot_mins += int(split[0])
tot_secs += int(split[1])
tot_mins += tot_secs/60
secs = str(tot_secs%60)
if len(secs) == 1:
secs = '0' + secs
if tot_mins >= 60:
new_tot_mins = tot_mins/60
new_new_tm = str(tot_mins - new_tot_mins*60)
if len(new_new_tm) == 1:
new_new_tm = '0'+ new_new_tm
return str(new_tot_mins) + ':' + new_new_tm + ':' + secs
else:
return str(tot_mins) + ':' + secs
def main():
print '\nTotal time is: ' + time_add(loopstart())
if __name__ == '__main__':
main()
| {
"repo_name": "cjtamayo/time_add",
"path": "time_adder.py",
"copies": "1",
"size": "1537",
"license": "mit",
"hash": 4997344134605839000,
"line_mean": 28.5576923077,
"line_max": 93,
"alpha_frac": 0.485361093,
"autogenerated": false,
"ratio": 3.607981220657277,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.95606560898812,
"avg_score": 0.006537244755215389,
"num_lines": 52
} |
__author__ = 'Christian Tamayo'
def validator(listo):
if type(listo) == str:
listo = [listo]
try:
for time in listo:
split = time.split(':')
assert type(int(split[0])) == int
assert type(int(split[1])) == int
assert int(split[1]) < 60
assert len(split[1]) == 2
return True,
except:
return False, time
def time_subtract(time1, time2):
if validator([time1,time2])[0]:
if time1 == time2:
return 0
else:
time1_tot = int(time1.split(':')[0]) * 60 + int(time1.split(':')[1])
time2_tot = int(time2.split(':')[0]) * 60 + int(time2.split(':')[1])
new_time = time1_tot - time2_tot
if new_time < 0:
return 'Time 1 must be greater than Time 2'
new_time_min = str(new_time / 60)
new_time_secs = str(new_time % 60)
if len(new_time_secs) == 1:
new_time_secs = '0' + new_time_secs
return '{}:{}'.format(new_time_min, new_time_secs)
else:
return 'please enter valid times'
class Timer(object):
def __init__(self):
self.times = []
def validate(self, listo):
return validator(listo)[0]
def show_times(self):
if len(self.times) == 0:
print 'No times have been entered'
else:
for time in self.times:
print time
def time_add(self,time):
if self.validate(time):
if type(time) == str:
self.times.append(time)
else:
for times in time:
self.times.append(times)
else:
return '{} is not a valid time. Please try again'.format(validator(time)[1])
def remove_time(self, out_time):
if out_time in self.times:
self.times.remove(out_time)
else:
print '{} has not been entered as a time'.format(out_time)
def time_total(self):
tot_mins = 0
tot_secs = 0
for times in self.times:
split = times.split(':')
tot_mins += int(split[0])
tot_secs += int(split[1])
tot_mins += tot_secs/60
secs = str(tot_secs%60)
if len(secs) == 1:
secs = '0' + secs
if tot_mins >= 60:
new_tot_mins = tot_mins/60
new_new_tm = str(tot_mins - new_tot_mins*60)
if len(new_new_tm) == 1:
new_new_tm = '0'+ new_new_tm
return str(new_tot_mins) + ':' + new_new_tm + ':' + secs
else:
return str(tot_mins) + ':' + secs
| {
"repo_name": "cjtamayo/time_add",
"path": "timer.py",
"copies": "1",
"size": "2745",
"license": "mit",
"hash": 7141247075316666000,
"line_mean": 30.6785714286,
"line_max": 88,
"alpha_frac": 0.4735883424,
"autogenerated": false,
"ratio": 3.7094594594594597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46830478018594596,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christie'
# http://www.pythonforbeginners.com/python-on-the-web/parsingjson/
import urllib3
import json
import sys
# Nick's techniques
#'{0} {1}'.format('fist', 'second')
#myjsondata = {'name': 'nick', 'school': 'Brooklyn High'}
#'{name} went to {school}'.format(**myjsondata)
def getSchoolsByProgramHighlights(queryContains):
url = 'http://nycdoe.pediacities.com/api/action/datastore_search_sql?sql=SELECT%20%22Printed_Name%22,%20%22Program%20Highlights%22%20from%20%2245f6a257-c13a-431b-acb9-b1468c3ff1e9%22%20where%20%22Program%20Highlights%22%20like%20%27{0}%%27'.format(queryContains)
#print(url)
http = urllib3.PoolManager()
r = http.request('GET', url)
#print(r.data)
#Python 3 requires explicit Bytes to String conversions
# See http://www.rmi.net/~lutz/strings30.html
return json.loads(r.data.decode())
# Example to call this program: python33 programSearch.py "Sports"
def main(args):
j = getSchoolsByProgramHighlights(args[1])
for k in j['result']['records']:
print(k['Printed_Name'] + " | " + k['Program Highlights'] + "\n")
if __name__ == '__main__':
main(sys.argv)
# see http://stackoverflow.com/questions/17523219/how-to-pass-sys-argvn-into-a-function-in-python
#main(sys.argv[1])
| {
"repo_name": "christieewen/nyc-school-choices",
"path": "searchByProgram.py",
"copies": "1",
"size": "1283",
"license": "apache-2.0",
"hash": -6266951710301184000,
"line_mean": 31.075,
"line_max": 266,
"alpha_frac": 0.6921278254,
"autogenerated": false,
"ratio": 2.8259911894273126,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8922911878814574,
"avg_score": 0.019041427202547524,
"num_lines": 40
} |
"""
Python functions to test the connectivity and performance of the iRODS icommands iput and iget.
"""
import os
import json
import subprocess
import time
from timeit import default_timer as timer
import hashlib
from tqdm import tqdm
import shutil
RED = "\033[31m"
GREEN = "\033[92m"
BLUE = "\033[34m"
DEFAULT = "\033[0m"
def createTestData():
"""
Creates test data.
Folder: /home/<usr>/testdata or /<TMPDIR>/testdata ; TMPDIR is a shell variable
Files: 100MB, 1GB, 2GB, 5GB
Folders: 100 x 10MB
"""
if "TMPDIR" not in os.environ:
testdata = os.environ["HOME"]+"/testdata"
else:
testdata = os.environ["TMPDIR"]+"/testdata"
# Check whether test folder already exists. If not create one
if os.path.isdir(testdata):
print testdata, "exists"
else:
print "Create", testdata
os.makedirs(testdata)
# Create data
#100MB
print "Write sample100M.txt"
with open(testdata+"/sample100M.txt_0", "wb") as f:
f.write(os.urandom(1024 * 1024 * 100))
#1GB
print "Write sample1G.txt"
with open(testdata+"/sample1G.txt_0", "wb") as f:
f.write(os.urandom(1024 * 1024 * 1024))
#2GB
print "Write sample2G.txt"
with open(testdata+"/sample2G.txt_0", "wb") as f:
f.write(os.urandom(1024 * 1024 * 1024 * 2))
#5GB
print "Write sample5G.txt"
with open(testdata+"/sample5G.txt_0", "wb") as f:
f.write(os.urandom(1024 * 1024 * 1024 * 5))
#Folder of 100*10MB files
print "Create 10MB*100"
os.makedirs(testdata+"/Coll10MB_0")
for i in range(100):
with open(testdata+"/Coll10MB_0/sample10MB_"+str(i)+".txt", "wb") as f:
f.write(os.urandom(1024 * 1024 * 10))
print "%sSUCCESS Test data created.%s" %(GREEN, DEFAULT)
def createEnvJSON(uname, host, zone, auth="PAM", ssl="none"):
"""
Creates the irods_environment.json
"""
# Check whether /home/<user>/.irods exists. If not create.
irodsdir = os.environ["HOME"]+"/.irods"
# Check whether test folder already exists. If not create one
if os.path.isdir(irodsdir):
print irodsdir, "exists"
else:
print "Create", irodsdir
os.makedirs(irodsdir)
# Create json file
irodsDict = {}
irodsDict["irods_user_name"] = uname
irodsDict["irods_host"] = host
irodsDict["irods_port"] = 1247
irodsDict["irods_zone_name"] = zone
irodsDict["irods_authentication_scheme"] = auth
irodsDict["irods_ssl_verify_server"] = ssl
print irodsDict
# Write to disc
print "Write", irodsdir+"/irods_environment.json"
with open(irodsdir+"/irods_environment.json", "w") as f:
json.dump(irodsDict, f)
# Do an iinit to cache the password
print "%sCaching password.%s" %(GREEN, DEFAULT)
#subprocess.call(["iinit"], shell=True)
subprocess.call(["ienv"], shell=True)
print "%sSUCCESS iRODS environment setup.%s" %(GREEN, DEFAULT)
def iRODScreateColl(collname):
"""
Creates an iRODS collection. If collection exists it starts
enumerating until new collection is created.
collname: Collection to create in iRODS, accepts absolute and relative collection paths
"""
count = 0
while(True):
p = subprocess.Popen(["imkdir "+collname+str(count)], shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if err.startswith("ERROR"):
print RED, err, DEFAULT
count = count + 1
else:
break
print GREEN, "SUCCESS iRODS collection created:", DEFAULT, collname+str(count)
return collname+str(count)
def iRODSput(iresource, source, idestination):
"""
Wrapper for iRODS iput.
iresource: iRODS resource name
source: path to local file to upload, must be a file, accepts absolut and relative paths
idestination: iRODS destination, accepts absolut and relative collection paths
"""
p = subprocess.Popen(["time iput -r -b -K -f -R "+iresource+" "+source+" "+idestination],
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
elapsed = [i.split("\t")[1] for i in err.strip("\n").split("\n")]
return (out, err, elapsed[0], elapsed[1], elapsed[2])
def iRODSget(iresource, isource, destination):
"""
Wrapper for iRODS iget.
iresource: iRODS resource name
source: path to local destination file, must be a file, accepts absolut and relative paths
idestination: iRODS source, accepts absolut and relative collection paths
"""
p = subprocess.Popen(["time iget -r -b -K -f -R "+iresource+" "+isource+" "+destination],
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
elapsed = [i.split("\t")[1] for i in err.strip("\n").split("\n")]
return (out, err, elapsed[0], elapsed[1], elapsed[2])
def checkIntegrity(iRODSfile, localFile):
"""
Compares checksums of local file and iRODS file. Uses md5.
localFile: absolut path to local file
iRODSfile: iRODS absolut or relative path
"""
p = subprocess.Popen(["ils -L "+iRODSfile],
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
irodschksum = [item for item in out.split(" ") if item !=""][7]
checksum = hashlib.md5(open(localFile, "rb").read()).hexdigest()
return irodschksum == checksum
def cleanUp(collections = ["CONNECTIVITY0", "PERFORMANCE0", "PERFORMANCEC0"],
folders = [os.environ["HOME"]+"/testdata"]):
"""
Removes iRODS collections and replicated testdata.
collections: List of absolut or relative collection names. Default ["CONNECTIVITY", "PERFORMANCE"].
folders: List of local folders. Default [os.environ["HOME"]+"/testdata"]
"""
if "TMPDIR" not in os.environ:
folders.append(os.environ["TMPDIR"]+"/testdata")
print "Remove iRODS collections"
for coll in collections:
p = subprocess.Popen(["irm -r "+coll],
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p = subprocess.Popen(["irmtrash"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print "Remove duplicate data"
data = []
for folder in folders:
data.extend([folder+"/" + f
for f in os.listdir(folder) if not f.endswith("_0")])
for d in data:
if os.path.isfile(d):
os.remove(d)
else:
shutil.rmtree(d, ignore_errors=True)
print "%sClean up finished. %s" %(GREEN, DEFAULT)
def connectivity(iresource, data=os.environ["HOME"]+"/testdata/sample100M.txt_0"):
"""
Tests the conectivity to iresource with a 100MB file, checking port 1247 and the data ports.
iresource: iRODS resource
homedir: directory containing the testdata (home directory by default)
Returns a tuple: (date, resource, client, iput/iget, size, time)
"""
# Make sure you are in /home/<user>
os.chdir(os.environ["HOME"])
# Verify that /home/<usr>/testdata/sample100M.txt is there.
if not os.path.isfile(data):
print "%sERROR test data does not exist: %s"+data %(RED, DEFAULT)
raise Exception("File not found.")
print "Create iRODS Collection CONNECTIVITY*"
collection = iRODScreateColl("CONNECTIVITY")
print "iput -f -K -R iresource", data, collection+"/sample100M.txt"
date = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
err, out, elapsed, _, _ = iRODSput(iresource, data, collection+"/sample100M.txt")
if err.startswith("ERROR"):
print "%s" %(RED), err, "%s" %(DEFAULT)
raise Exception("iRODS ERROR")
# Test data integrity
if not checkIntegrity(collection+"/sample100M.txt", data):
print "%sERROR Checksums do not match.%s" %(RED, DEFAULT)
raise Exception("iRODS Data integrity")
result = (date, iresource, os.uname()[1], "iput", "100M", elapsed)
print GREEN, "SUCCESS", result, DEFAULT
return (date, iresource, os.uname()[1], "iput", "100M", elapsed)
def performanceSingleFiles(iresource, maxTimes = 10):
"""
Tests the performance of iget and iput for single files.
Test data needs to be stored under $HOME/testdata. The function omits subfolders.
It ping-pongs the data between the unix file system and iRODS collection:
iput folder/data_0 --> coll/data_1
iget coll/data_1 --> folder/data_1
iput folder/data_1 --> coll/data_2
iget coll/data_2 --> folder/data_2
...
iresource: iRODS resource
maxTimes: times how often the file is transferred with iput and iget.
Returns a list of tuples: [(date, resource, client, iput/iget, size, real time, user time, system time)]
"""
# If there is a tmp dir, use that for transferring the data
if "TMPDIR" not in os.environ:
testdata = os.environ["HOME"]+"/testdata"
else:
testdata = os.environ["TMPDIR"]+"/testdata"
dataset = [testdata+"/" + f
for f in os.listdir(testdata) if os.path.isfile(testdata+"/" + f)]
for data in dataset:
# Verify that data is there.
if not os.path.isfile(data):
print RED, "ERROR test data does not exist:", data, DEFAULT
raise Exception("File not found.")
print "Create iRODS Collection PERFORMANCE"
collection = iRODScreateColl("PERFORMANCE")
# Put and get data from iRODS using 1GB, 2GB and 5GB, store data with new file name "+_str(i)"
result = []
for data in dataset:
data = data.split("_")[0] # ge base name of the file --> no "_str(i)"
print "Put and get: ", data
for i in tqdm(range(1, maxTimes)):
date = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
print "iput", data+"_"+str(i-1), collection+"/"+os.path.basename(data)+"_"+str(i)
out, err, real, user, sys = iRODSput(iresource, data+"_"+str(i-1),
collection+"/"+os.path.basename(data)+"_"+str(i))
print "integrity", collection+"/"+os.path.basename(data+"_"+str(i)), data+"_"+str(i-1)
if not checkIntegrity(collection+"/"+os.path.basename(data+"_"+str(i)), data+"_"+str(i-1)):
print "%sERROR Checksums do not match.%s" %(RED, DEFAULT)
raise Exception("iRODS Data integrity")
else:
print "Integrity done"
result.append((date, iresource, os.uname()[1], "iput", os.path.basename(data).split('.')[0][6:], real, user, sys))
date = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
print "iget", collection+"/"+os.path.basename(data)+"_"+str(i), data+"_"+str(i)
out, err, real, user, sys = iRODSget(iresource, collection+"/"+os.path.basename(data+"_"+str(i)),
data+"_"+str(i))
print "integrity", collection+"/"+os.path.basename(data+"_"+str(i)), data+"_"+str(i)
if not checkIntegrity(collection+"/"+os.path.basename(data)+"_"+str(i), data+"_"+str(i)):
print "%sERROR Checksums do not match.%s" %(RED, DEFAULT)
raise Exception("iRODS Data integrity")
else:
print "Integrity done"
result.append((date, iresource, os.uname()[1], "iget", os.path.basename(data).split('.')[0][6:], real, user, sys))
return result
def performanceCollections(iresource, maxTimes = 10):
"""
Tests the performance of iget and iput for single files.
Test data needs to be stored under $HOME/testdata. The function omits subfolders.
It ping-pongs the data collections between the unix file system and iRODS collection:
iput folder/data_0/ --> coll/data_1/
iget coll/data_1/ --> folder/data_1/
iput folder/data_1/ --> coll/data_2/
iget coll/data_2/ --> folder/data_2/
...
iresource: iRODS resource
maxTimes: times how often the file is transferred with iput and iget.
Returns a list of tuples: [(date, resource, client, iput/iget, size, real time, user time, system time)]
"""
# If there is a tmp dir, use that for transferring the data
if "TMPDIR" not in os.environ:
testdata = os.environ["HOME"]+"/testdata"
else:
testdata = os.environ["TMPDIR"]+"/testdata"
dataset = [testdata+"/" + f
for f in os.listdir(testdata) if os.path.isdir(testdata+"/" + f)]
for data in dataset:
# Verify that data is not empty and data is there.
files = [f for f in os.listdir(data) if os.path.isfile(data+"/" + f)]
if len(files) == 0:
print RED, "ERROR collection empty:", data, DEFAULT
raise Exception("No files in data collection.")
print "Create iRODS Collection PERFORMANCEC"
collection = iRODScreateColl("PERFORMANCEC")
result = []
for data in dataset:
data = data.split("_")[0] # ge base name of the folder --> no "_str(i)"
print "Put and get: ", data
for i in tqdm(range(1, maxTimes)):
date = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
print "iput -r", data+"_"+str(i-1), collection+"/"+os.path.basename(data)+"_"+str(i)
out, err, real, user, sys = iRODSput(iresource, data+"_"+str(i-1),
collection+"/"+os.path.basename(data)+"_"+str(i))
result.append((date, iresource, os.uname()[1], "iput", os.path.basename(data).split('.')[0][4:], real, user, sys))
date = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
print "iget -r", collection+"/"+os.path.basename(data)+"_"+str(i), data+"_"+str(i)
out, err, real, user, sys = iRODSget(iresource, collection+"/"+os.path.basename(data+"_"+str(i)),
data+"_"+str(i))
result.append((date, iresource, os.uname()[1], "iget", os.path.basename(data).split('.')[0][4:], real, user, sys))
#TODO:Integrity checks
return result
| {
"repo_name": "chStaiger/iRODS_tests",
"path": "iRODStestFunctions.py",
"copies": "1",
"size": "14131",
"license": "mit",
"hash": 7981208962537639000,
"line_mean": 38.5826330532,
"line_max": 130,
"alpha_frac": 0.6147477178,
"autogenerated": false,
"ratio": 3.502106567534077,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9573123085449784,
"avg_score": 0.008746239976858491,
"num_lines": 357
} |
__author__ = 'Christof Pieloth'
import logging
import os
from packbacker.errors import ParameterError
from packbacker.installer import Installer
from packbacker.utils import UtilsUI
class Job(object):
log = logging.getLogger(__name__)
def __init__(self):
self._installers = []
def add_installer(self, installer):
self._installers.append(installer)
def execute(self):
errors = 0
for i in self._installers:
if not UtilsUI.ask_for_execute('Install ' + i.label):
continue
try:
if i.install():
Job.log.info(i.name + ' executed.')
else:
errors += 1
Job.log.error('Error on executing ' + i.name + '!')
except Exception as ex:
errors += 1
Job.log.error('Unknown error:\n' + str(ex))
return errors
@staticmethod
def read_job(fname):
path = os.path.dirname(os.path.realpath(__file__))
prototypes = Installer.load_prototypes(os.path.join(path, 'installers'))
job = None
try:
job_file = open(fname, 'r')
except IOError as err:
Job.log.critical('Error on reading job file:\n' + str(err))
else:
with job_file:
job = Job()
for line in job_file:
if line[0] == '#':
continue
for p in prototypes:
if p.matches(line):
try:
params = Job.read_parameter(line)
cmd = p.instance(params)
job.add_installer(cmd)
except ParameterError as err:
Job.log.error("Installer '" + p.name + "' is skipped: " + str(err))
except Exception as ex:
Job.log.critical('Unknown error: \n' + str(ex))
continue
return job
@staticmethod
def read_parameter(line):
params = {}
i = line.find(': ') + 2
line = line[i:]
pairs = line.split(';')
for pair in pairs:
pair = pair.strip()
par = pair.split('=')
if len(par) == 2:
params[par[0]] = par[1]
return params | {
"repo_name": "cpieloth/PackBacker",
"path": "packbacker/job.py",
"copies": "1",
"size": "2453",
"license": "apache-2.0",
"hash": -9005515819418779000,
"line_mean": 30.0632911392,
"line_max": 99,
"alpha_frac": 0.4626987362,
"autogenerated": false,
"ratio": 4.559479553903346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000885629451030295,
"num_lines": 79
} |
__author__ = 'Christof Pieloth'
import logging
import os
from packbacker.pluginloader import BaseClassCondition
from packbacker.pluginloader import PluginLoader
from packbacker.utils import UtilsUI
class Installer(object):
"""Abstract installer with default implementations of pre_install and post_install."""
def __init__(self, name, label):
self.__name = name
self.__label = label
self.__log = logging.getLogger(self.__name)
self.__arg_dest = os.path.expanduser('~')
self.__arg_version = None
@property
def name(self):
"""Short name of the installers."""
return self.__name
@property
def label(self):
"""Long name of the installers."""
return self.__label
@property
def arg_dest(self):
"""Destination directory."""
return self.__arg_dest
@arg_dest.setter
def arg_dest(self, dest):
self.__arg_dest = os.path.expanduser(dest)
@property
def arg_version(self):
"""Version of the dependency (optional)."""
return self.__arg_version
@arg_version.setter
def arg_version(self, version):
self.__arg_version = version
@property
def log(self):
"""Logger for this installers."""
return self.__log
def _pre_install(self):
"""Is called before the installation. It can be used to check for tools which are required."""
return True
def _install(self):
"""Abstract method, implements the installation."""
self.log.debug('No yet implemented: ' + str(self.name))
return False
def _post_install(self):
"""Is called after a successful installation. Can be used to test installation or for user instructions."""
return True
def install(self):
"""Starts the installation process."""
UtilsUI.print_install_begin(self.label)
try:
success = self._pre_install()
if success:
success = self._install()
if success:
success = self._post_install()
except Exception as ex:
success = False
self.log.error("Unexpected error:\n" + str(ex))
UtilsUI.print_install_end(self.label)
return success
@classmethod
def instance(cls, params):
"""
Abstract method, returns an initialized instance of a specific command.
Can throw a ParameterError, if parameters are missing.
"""
raise Exception('Instance method not implemented for: ' + str(cls))
@classmethod
def prototype(cls):
"""Abstract method, returns an instance of a specific command, e.g. for matches() or is_available()"""
raise Exception('Prototype method not implemented for: ' + str(cls))
def matches(self, installer):
"""Checks if this command should be used for execution."""
return installer.lower().startswith(self.name)
@staticmethod
def load_prototypes(path):
"""Returns prototypes of all known installers."""
prototypes = []
loader = PluginLoader()
loader.load_directory(path, BaseClassCondition(Installer))
for k in loader.plugins:
clazz = loader.plugins[k]
if callable(clazz):
prototypes.append(clazz().prototype())
return prototypes | {
"repo_name": "cpieloth/PackBacker",
"path": "packbacker/installer.py",
"copies": "1",
"size": "3384",
"license": "apache-2.0",
"hash": 5834774232013646000,
"line_mean": 28.9557522124,
"line_max": 115,
"alpha_frac": 0.6125886525,
"autogenerated": false,
"ratio": 4.530120481927711,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5642709134427711,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christof Pieloth'
import logging
import os
from packbacker.utils import UtilsUI
class Installer(object):
"""Abstract installer with default implementations of pre_install and post_install."""
def __init__(self, name, label):
self.__name = name
self.__label = label
self.__arg_dest = os.path.expanduser('~')
self.__log = logging.getLogger(self.__name)
@property
def name(self):
"""Short name of the installers."""
return self.__name
@property
def label(self):
"""Long name of the installers."""
return self.__label
@property
def arg_dest(self):
"""Destination directory."""
return self.__arg_dest
@arg_dest.setter
def arg_dest(self, dest):
self.__arg_dest = os.path.expanduser(dest)
@property
def log(self):
"""Logger for this installers."""
return self.__log
def _pre_install(self):
"""Is called before the installation. It can be used to check for tools which are required."""
return True
def _install(self):
"""Abstract method, implements the installation."""
self.log.debug('No yet implemented: ' + str(self.name))
return False
def _post_install(self):
"""Is called after a successful installation. Can be used to test installation or for user instructions."""
return True
def install(self):
"""Starts the installation process."""
UtilsUI.print_install_begin(self.label)
try:
success = self._pre_install()
if success:
success = self._install()
if success:
success = self._post_install()
except Exception as ex:
success = False
self.log.error("Unexpected error:\n" + str(ex))
UtilsUI.print_install_end(self.label)
return success
@classmethod
def instance(cls, params):
"""
Abstract method, returns an initialized instance of a specific command.
Can throw a ParameterError, if parameters are missing.
"""
raise Exception('Instance method not implemented for: ' + str(cls))
@classmethod
def prototype(cls):
"""Abstract method, returns an instance of a specific command, e.g. for matches() or is_available()"""
raise Exception('Prototype method not implemented for: ' + str(cls))
def matches(self, installer):
"""Checks if this command should be used for execution."""
return installer.lower().startswith(self.name) | {
"repo_name": "cpieloth/CppMath",
"path": "tools/PackBacker/packbacker/installers/installer.py",
"copies": "1",
"size": "2604",
"license": "apache-2.0",
"hash": 1717855819827563000,
"line_mean": 28.6022727273,
"line_max": 115,
"alpha_frac": 0.6059907834,
"autogenerated": false,
"ratio": 4.528695652173913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000645976886770325,
"num_lines": 88
} |
__author__ = 'Christof Pieloth'
import logging
import subprocess
class Utils:
log = logging.getLogger(__name__)
@staticmethod
def check_program(program, arg):
try:
subprocess.call([program, arg], stdout=subprocess.PIPE)
return True
except OSError:
Utils.log.error("Could not found: " + program)
return False
class UtilsUI:
COLUMNS_INSTALL = 80
COLUMNS_STEP = 40
@staticmethod
def ask_for_execute(action):
var = input(action + " y/n? ")
if var.startswith('y'):
return True
else:
return False
@staticmethod
def ask_for_make_jobs():
jobs = 2
try:
jobs = int(input("Number of jobs (default: 2): "))
except ValueError:
UtilsUI.print_error("Wrong input format.")
if jobs < 1:
jobs = 1
UtilsUI.print("Using job=" + str(jobs))
return jobs
@staticmethod
def print_install_begin(dep_name):
UtilsUI.print('=' * UtilsUI.COLUMNS_INSTALL)
UtilsUI.print(dep_name)
UtilsUI.print('-' * UtilsUI.COLUMNS_INSTALL)
@staticmethod
def print_install_end(dep_name):
UtilsUI.print('-' * UtilsUI.COLUMNS_INSTALL)
UtilsUI.print(dep_name)
UtilsUI.print('=' * UtilsUI.COLUMNS_INSTALL)
@staticmethod
def print_step_begin(action_str):
info = action_str + " ..."
UtilsUI.print(info)
UtilsUI.print('-' * UtilsUI.COLUMNS_STEP)
@staticmethod
def print_step_end(action_str):
info = action_str + " ... finished!"
UtilsUI.print('-' * UtilsUI.COLUMNS_STEP)
UtilsUI.print(info)
@staticmethod
def print_env_var(name_env, value=None):
if value is None:
if len(name_env) == 1:
UtilsUI.print('Environment variable to set:')
else:
UtilsUI.print('Environment variables to set:')
UtilsUI.print()
for name, value in name_env.items():
UtilsUI.print(name + "=" + value)
else:
UtilsUI.print('Environment variable to set:')
UtilsUI.print()
UtilsUI.print(name_env + "=" + value)
UtilsUI.print()
@staticmethod
def print(*args):
print(*args)
@staticmethod
def print_error(*args):
print(*args) | {
"repo_name": "cpieloth/CppMath",
"path": "tools/PackBacker/packbacker/utils.py",
"copies": "2",
"size": "2407",
"license": "apache-2.0",
"hash": 5665650804013992000,
"line_mean": 25.7555555556,
"line_max": 67,
"alpha_frac": 0.5575405069,
"autogenerated": false,
"ratio": 4.018363939899833,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5575904446799833,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christof Pieloth'
import logging
from packbacker.errors import ParameterError
from packbacker.installers import installer_prototypes
from packbacker.utils import UtilsUI
class Job(object):
log = logging.getLogger(__name__)
def __init__(self):
self._installers = []
def add_installer(self, installer):
self._installers.append(installer)
def execute(self):
errors = 0
for i in self._installers:
if not UtilsUI.ask_for_execute('Install ' + i.label):
continue
try:
if i.install():
Job.log.info(i.name + ' executed.')
else:
errors += 1
Job.log.error('Error on executing ' + i.name + '!')
except Exception as ex:
errors += 1
Job.log.error('Unknown error:\n' + str(ex))
return errors
@staticmethod
def read_job(fname):
prototypes = []
prototypes.extend(installer_prototypes())
job = None
try:
job_file = open(fname, 'r')
except IOError as err:
Job.log.critical('Error on reading job file:\n' + str(err))
else:
with job_file:
job = Job()
for line in job_file:
if line[0] == '#':
continue
for p in prototypes:
if p.matches(line):
try:
params = Job.read_parameter(line)
cmd = p.instance(params)
job.add_installer(cmd)
except ParameterError as err:
Job.log.error("Installer '" + p.name + "' is skipped: " + str(err))
except Exception as ex:
Job.log.critical('Unknown error: \n' + str(ex))
continue
return job
@staticmethod
def read_parameter(line):
params = {}
i = line.find(': ') + 2
line = line[i:]
pairs = line.split(';')
for pair in pairs:
pair = pair.strip()
par = pair.split('=')
if len(par) == 2:
params[par[0]] = par[1]
return params | {
"repo_name": "cpieloth/CppMath",
"path": "tools/PackBacker/packbacker/job.py",
"copies": "1",
"size": "2389",
"license": "apache-2.0",
"hash": -683360499102689500,
"line_mean": 29.641025641,
"line_max": 99,
"alpha_frac": 0.4566764337,
"autogenerated": false,
"ratio": 4.65692007797271,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007387057387057387,
"num_lines": 78
} |
__author__ = 'Christof Pieloth'
import os
from subprocess import call
from packbacker.constants import Parameter
from packbacker.errors import ParameterError
from packbacker.utils import Utils
from packbacker.utils import UtilsUI
from packbacker.installer import Installer
class CxxTest(Installer):
"""
Downloads necessary files for CxxTest.
WWW: http://cxxtest.com
"""
REPO_FOLDER = "cxxtest"
def __init__(self):
Installer.__init__(self, 'cxxtest', 'CxxTest')
self.arg_version = "4.4" # 2014-06-03
@classmethod
def instance(cls, params):
installer = CxxTest()
if Parameter.DEST_DIR in params:
installer.arg_dest = params[Parameter.DEST_DIR]
else:
raise ParameterError(Parameter.DEST_DIR + ' parameter is missing!')
if Parameter.VERSION in params:
installer.arg_version = params[Parameter.VERSION]
return installer
@classmethod
def prototype(cls):
return CxxTest()
def _pre_install(self):
success = True
success = success and Utils.check_program("git", "--version")
success = success and Utils.check_program("python", "--version")
return success
def _install(self):
success = True
if success and UtilsUI.ask_for_execute("Download " + self.name):
success = success and self.__download()
if success and UtilsUI.ask_for_execute("Initialize " + self.name):
success = success and self.__initialize()
return success
def _post_install(self):
envs = {}
root_dir = os.path.join(self.arg_dest, self.REPO_FOLDER)
envs['CXXTEST_ROOT'] = root_dir
include_dir = os.path.join(self.arg_dest, self.REPO_FOLDER)
envs['CXXTEST_INCLUDE_DIR'] = include_dir
UtilsUI.print_env_var(envs)
return True
def __download(self):
UtilsUI.print_step_begin("Downloading")
repo = "https://github.com/CxxTest/cxxtest.git"
repo_dir = os.path.join(self.arg_dest, self.REPO_FOLDER)
call("git clone " + repo + " " + repo_dir, shell=True)
UtilsUI.print_step_end("Downloading")
return True
def __initialize(self):
UtilsUI.print_step_begin("Initializing")
repo_dir = os.path.join(self.arg_dest, self.REPO_FOLDER)
os.chdir(repo_dir)
call("git checkout " + self.arg_version, shell=True)
UtilsUI.print_step_end("Initializing")
return True | {
"repo_name": "cpieloth/PackBacker",
"path": "packbacker/installers/cxxtest.py",
"copies": "1",
"size": "2512",
"license": "apache-2.0",
"hash": -2255686142027108400,
"line_mean": 29.2771084337,
"line_max": 79,
"alpha_frac": 0.6277866242,
"autogenerated": false,
"ratio": 3.806060606060606,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9930676653215584,
"avg_score": 0.0006341154090044388,
"num_lines": 83
} |
__author__ = 'Christoph Ehlen'
import sys
import getopt
from database import Database
from operations import do_anonymization, do_cleanup, do_user_cut, do_blur, do_k_anonymity
validOps = ["cleanup", "anonymization", "haircut", "blur", "k_anonymity"]
callOps = {
"cleanup": lambda db, args: do_cleanup(db),
"anonymization": lambda db, args: do_anonymization(db, args[0]),
"haircut": lambda db, args: do_user_cut(db, args[0], args[1]),
"blur": lambda db, args: do_blur(db, args[0]),
"k_anonymity": lambda db, args: do_k_anonymity(db, args[0], args[1])
}
def print_help():
print """
Usage: app.py [-d database name] [-u database user] [-p database password]
[-h database host] [-o operation]
Valid Operations:
cleanup: Flags all expired rows as deleted
No Options
anonymization: Sets all user_ids to 0
Option:
SQL Select Query: Every row found by this query will be set
to user_id = 0.
haircut: Cuts trips where they split and could be traceable
Options:
First - the radius in which we look
Second - the number of users that needs to be in the radius
blur: Offsets every GPS point by a random amount
Options:
The maximum distance a point can have to its origin
k_anonymity: Creates a centroid for every GPS point
Options:
First - Number of maximum GPS points to use. If 0 then we just grab
every point inside the radius
Second - Maximum distance a point can have to be used for the
creation of the new point
"""
def main(argv):
if len(argv) == 0:
print_help()
return
options = getopt.getopt(argv, "d:u:p:h:o:")
db = "liveandgov_dev"
user = "liveandgov"
password = ""
host = "localhost"
operation = ""
for (op, v) in options[0]:
if op == "-d":
db = v
elif op == "-u":
user = v
elif op == "-p":
password = v
elif op == "-h":
host = v
elif op == "-o":
operation = v
if operation not in validOps:
print("ERROR: Operation is not valid!")
print_help()
return
print("Running operation " + operation + "...")
database = Database(host, db, user, password)
print(options)
callOps[operation](database, options[1])
if __name__ == "__main__":
# do_blur(None, None)
main(sys.argv[1:]) | {
"repo_name": "Institute-Web-Science-and-Technologies/LiveGovWP1",
"path": "server/DbAnonymization/dbops/app.py",
"copies": "1",
"size": "2511",
"license": "mit",
"hash": 7152942116458740000,
"line_mean": 29.6341463415,
"line_max": 89,
"alpha_frac": 0.5802469136,
"autogenerated": false,
"ratio": 3.7035398230088497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9778079255291612,
"avg_score": 0.0011414962634474829,
"num_lines": 82
} |
"""
A module to ease the creation of card games in written in python
The Card class is the representation of a single card, including it's
image and the back image.
The Stack class is the representation of a stack of playing cards.
To Use:
from playingcards import Card
from playingcards import Stack
"""
import os.path
import random
class Card:
"""Representation of a Playing Card"""
def __init__(self,index,facedown=False):
"""Set up the card
index: card number between 1 and 51 - 1=Ace of Spades, 15=Two of Diamonds, 27=Ace of Hearts, 40=Two of Clubs
facedown: True or False, default False - when facedown operations that return the image of the card or
a string representation of the card return the image of the back of the card or the string 'Facedown'
"""
self.cardnames=["Back","Ace","Two","Three","Four","Five","Six","Seven","Eight","Nine","Ten","Jack","Queen","King"]
modpath=os.path.abspath(os.path.dirname(__file__))
self.imgdir=os.path.join(modpath,"img")
self.imgfile=""
self.backimg=""
self.facedown=facedown
self.index=index
if self.index<14:
self.suitname="Spades"
self.value=index
elif self.index<27:
self.suitname="Diamonds"
self.value=index-13
elif self.index<40:
self.suitname="Hearts"
self.value=index-26
else:
self.suitname="Clubs"
self.value=index-39
self.cardname=self.cardnames[self.value]
self.imgfn="%d.png" % index
if os.path.isdir(self.imgdir):
imgpath=os.path.join(self.imgdir,self.imgfn)
self.backimg=os.path.join(self.imgdir,"back.png")
if os.path.exists(imgpath):
self.imgfile=imgpath
def name(self):
"""Return a string representation of the card or 'Facedown' if the card is facedown
input: none
return: string
"""
if self.facedown:
return "Facedown"
return "%s of %s" % (self.cardname,self.suitname)
def suit(self):
"""Return the suit of the card as string
input: none
return: string
"""
return self.suitname
def indexname(self):
"""Return a string representation of the value of the card
input: none
return: string
"""
return self.cardname
def image(self):
"""Return the fully-qualified path name of the image file
representing this card
input: none
return: string (filename)
"""
if self.facedown:
return self.backimg
return self.imgfile
def flip(self):
"""Flips the card
Returns the fully-qualified path name of the image file
representing either the card face or the back of the card
input: none
return: string (filename)
"""
self.facedown=False if self.facedown else True
return self.image()
def isdown(self):
"""Returns True if card is currently facedown
input: none
returns: boolean
"""
return self.facedown
class Stack:
"""Representation of a stack of playing cards"""
def __init__(self,numberofdecks=0,noaces=0):
"""Setup the stack
numberofdecks: if >0 then the stack will be initialised with that number of
decks of cards.
noaces: if >0 then the stack will be initialised with
a full deck of cards minus the Aces.
If both numberofdecks and noaces are used together then a deck of 100 cards
will be created with only 4 Aces in it (probably not what you want).
"""
self.cards=[]
if numberofdecks>0:
self.initfull(numberofdecks)
if noaces>0:
self.initnoaces()
random.seed()
def addnewcard(self,index):
"""Creates a new Card and adds it to the bottom of the Stack
index: Card number 1-51
returns: None
"""
self.cards.append(Card(index))
def addcard(self,card):
"""Adds the card to the bottom of the Stack
card: Card to add to the Stack
returns: None
"""
self.cards.append(card)
def addtopcard(self,card):
"""Adds the card to the top of the Stack
card: Card to add the Stack
returns: None
"""
self.cards.insert(0,card)
def initfull(self,numberofdecks=1):
"""Initialises the Stack with the required number of cards
numberofdecks: integer - This number of 52 card decks will be setup
returns: None
"""
for y in range(numberofdecks):
for x in range(1,53):
self.addnewcard(x)
def initnoaces(self):
"""Initialiases the Stack with a full deck minus the Aces
input: None
returns: None
"""
self.initfull(1)
t=[]
t=self.getcard(39)
t=self.getcard(26)
t=self.getcard(13)
t=self.getcard(0)
t=None
def length(self):
"""Returns the number of cards in the stack
input: None
returns: integer
"""
return len(self.cards)
def shuffle(self):
"""Randomises the order of the cards in the stack
input: None
returns: None
"""
random.shuffle(self.cards)
def topcard(self):
"""Returns the top card without removing it from the Stack
input: None
returns: Card
"""
if len(self.cards):
return self.cards[0]
return None
def bottomcard(self):
"""Returns the bottom card without removing it from the Stack
input: None
returns: Card
"""
if len(self.cards):
return self.cards[len(self.cards)-1]
return None
def getbottomcard(self):
"""Returns the bottom card removing it from the Stack
input: None
returns: Card
"""
if len(self.cards):
return self.cards.pop()
return None
def gettopcard(self):
"""Returns the top card removing it from the Stack
input: None
returns: Card
"""
l=len(self.cards)
if l:
c=self.topcard()
self.cards=self.cards[1:]
return c
return None
def getcard(self,index):
"""Returns the n card and removes it from the Stack
if index > len(self.cards) then None is returned
index: card number to remove from the stack (0 based)
returns: Card or None
"""
l=len(self.cards)
if l>index:
c=self.cards[index]
if index > 0:
t=self.cards[:index]
t.extend(self.cards[index+1:])
self.cards=t
else:
self.cards=self.cards[1:]
return c
return None
def getncards(self,n):
"""Returns the top n cards and removes them from the Stack
If n > len(self.cards) then len(self.cards) cards are
returned (or None if there are no cards in this Stack)
n: number of cards to remove
returns: [Cards]
"""
l=len(self.cards)
if l>n:
t=self.cards[:n]
self.cards=self.cards[n:]
else:
t=self.cards
self.cards=[]
return t
def addncards(self,ncards):
"""Adds the list of cards to the bottom of this Stack
ncards: [Cards]
returns: None
"""
if len(ncards) > 0:
for card in ncards:
self.addcard(card)
def status(self):
"""Returns a string representation of the status of this Stack
<number of cards in Stack> Card name of top card
i.e. "12 cards: Five of Clubs"
input: None
returns: String
"""
c=self.topcard()
if c!=None:
s=c.name()
else:
s=""
return "%d cards: %s" % (self.length(),s)
| {
"repo_name": "ccdale/playingcards",
"path": "playingcards.py",
"copies": "1",
"size": "7319",
"license": "mit",
"hash": -7293436588681191000,
"line_mean": 23.4782608696,
"line_max": 118,
"alpha_frac": 0.6358792185,
"autogenerated": false,
"ratio": 3.5702439024390245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9610655159786449,
"avg_score": 0.01909359223051511,
"num_lines": 299
} |
import os
import logging
import pandas as pd
from math import ceil, pi, exp, log, sqrt
from pyproj import Proj
import numpy as np
from collections import defaultdict
logging.basicConfig(format='%(asctime)s\t\t%(message)s', level=logging.DEBUG)
# general
LHV_DIESEL = 9.9445485 # (kWh/l) lower heating value
HOURS_PER_YEAR = 8760
# Columns in settlements file must match these exactly
SET_COUNTRY = 'Country' # This cannot be changed, lots of code will break
SET_X = 'X' # Coordinate in kilometres
SET_Y = 'Y' # Coordinate in kilometres
SET_X_DEG = 'X_deg' # Coordinates in degrees
SET_Y_DEG = 'Y_deg'
SET_POP = 'Pop' # Population in people per point (equally, people per km2)
SET_POP_CALIB = 'PopStartCalibrated' # Calibrated population to reference year, same units
SET_POP_FUTURE = 'PopFuture' # Project future population, same units
SET_GRID_DIST_CURRENT = 'GridDistCurrent' # Distance in km from current grid
SET_GRID_DIST_PLANNED = 'GridDistPlan' # Distance in km from current and future grid
SET_ROAD_DIST = 'RoadDist' # Distance in km from road network
SET_NIGHT_LIGHTS = 'NightLights' # Intensity of night time lights (from NASA), range 0 - 63
SET_TRAVEL_HOURS = 'TravelHours' # Travel time to large city in hours
SET_GHI = 'GHI' # Global horizontal irradiance in kWh/m2/day
SET_WINDVEL = 'WindVel' # Wind velocity in m/s
SET_WINDCF = 'WindCF' # Wind capacity factor as percentage (range 0 - 1)
SET_HYDRO = 'Hydropower' # Hydropower potential in kW
SET_HYDRO_DIST = 'HydropowerDist' # Distance to hydropower site in km
SET_HYDRO_FID = 'HydropowerFID' # the unique tag for eah hydropower, to not over-utilise
SET_SUBSTATION_DIST = 'SubstationDist'
SET_ELEVATION = 'Elevation' # in metres
SET_SLOPE = 'Slope' # in degrees
SET_LAND_COVER = 'LandCover'
SET_SOLAR_RESTRICTION = 'SolarRestriction'
SET_ROAD_DIST_CLASSIFIED = 'RoadDistClassified'
SET_SUBSTATION_DIST_CLASSIFIED = 'SubstationDistClassified'
SET_ELEVATION_CLASSIFIED = 'ElevationClassified'
SET_SLOPE_CLASSIFIED = 'SlopeClassified'
SET_LAND_COVER_CLASSIFIED = 'LandCoverClassified'
SET_COMBINED_CLASSIFICATION = 'GridClassification'
SET_GRID_PENALTY = 'GridPenalty'
SET_URBAN = 'IsUrban' # Whether the site is urban (0 or 1)
SET_ENERGY_PER_HH = 'EnergyPerHH'
SET_NUM_PEOPLE_PER_HH = 'NumPeoplePerHH'
SET_ELEC_CURRENT = 'ElecStart' # If the site is currently electrified (0 or 1)
SET_ELEC_FUTURE = 'ElecFuture' # If the site has the potential to be 'easily' electrified in future
SET_NEW_CONNECTIONS = 'NewConnections' # Number of new people with electricity connections
SET_MIN_GRID_DIST = 'MinGridDist'
SET_LCOE_GRID = 'Grid' # All lcoes in USD/kWh
SET_LCOE_SA_PV = 'SA_PV'
SET_LCOE_SA_DIESEL = 'SA_Diesel'
SET_LCOE_MG_WIND = 'MG_Wind'
SET_LCOE_MG_DIESEL = 'MG_Diesel'
SET_LCOE_MG_PV = 'MG_PV'
SET_LCOE_MG_HYDRO = 'MG_Hydro'
SET_MIN_OFFGRID = 'MinimumOffgrid' # The technology with lowest lcoe (excluding grid)
SET_MIN_OVERALL = 'MinimumOverall' # Same as above, but including grid
SET_MIN_OFFGRID_LCOE = 'MinimumTechLCOE' # The lcoe value for minimum tech
SET_MIN_OVERALL_LCOE = 'MinimumOverallLCOE' # The lcoe value for overall minimum
SET_MIN_OVERALL_CODE = 'MinimumOverallCode' # And a code from 1 - 7 to represent that option
SET_MIN_CATEGORY = 'MinimumCategory' # The category with minimum lcoe (grid, minigrid or standalone)
SET_NEW_CAPACITY = 'NewCapacity' # Capacity in kW
SET_INVESTMENT_COST = 'InvestmentCost' # The investment cost in USD
# Columns in the specs file must match these exactly
SPE_COUNTRY = 'Country'
SPE_POP = 'Pop2015' # The actual population in the base year
SPE_URBAN = 'UrbanRatio2015' # The ratio of urban population (range 0 - 1) in base year
SPE_POP_FUTURE = 'Pop2030'
SPE_URBAN_FUTURE = 'UrbanRatio2030'
SPE_URBAN_MODELLED = 'UrbanRatioModelled' # The urban ratio in the model after calibration (for comparison)
SPE_URBAN_CUTOFF = 'UrbanCutOff' # The urban cutoff population calirated by the model, in people per km2
SPE_URBAN_GROWTH = 'UrbanGrowth' # The urban growth rate as a simple multplier (urban pop future / urban pop present)
SPE_RURAL_GROWTH = 'RuralGrowth' # Same as for urban
SPE_NUM_PEOPLE_PER_HH_RURAL = 'NumPeoplePerHHRural'
SPE_NUM_PEOPLE_PER_HH_URBAN = 'NumPeoplePerHHUrban'
SPE_DIESEL_PRICE_LOW = 'DieselPriceLow' # Diesel price in USD/litre
SPE_DIESEL_PRICE_HIGH = 'DieselPriceHigh' # Same, with a high forecast var
SPE_GRID_PRICE = 'GridPrice' # Grid price of electricity in USD/kWh
SPE_GRID_CAPACITY_INVESTMENT = 'GridCapacityInvestmentCost' # grid capacity investments costs from TEMBA USD/kW
SPE_GRID_LOSSES = 'GridLosses' # As a ratio (0 - 1)
SPE_BASE_TO_PEAK = 'BaseToPeak' # As a ratio (0 - 1)
SPE_EXISTING_GRID_COST_RATIO = 'ExistingGridCostRatio'
SPE_MAX_GRID_DIST = 'MaxGridDist'
SPE_ELEC = 'ElecActual' # Actual current percentage electrified population (0 - 1)
SPE_ELEC_MODELLED = 'ElecModelled' # The modelled version after calibration (for comparison)
SPE_MIN_NIGHT_LIGHTS = 'MinNightLights'
SPE_MAX_GRID_EXTENSION_DIST = 'MaxGridExtensionDist'
SPE_MAX_ROAD_DIST = 'MaxRoadDist'
SPE_POP_CUTOFF1 = 'PopCutOffRoundOne'
SPE_POP_CUTOFF2 = 'PopCutOffRoundTwo'
class Technology:
"""
Used to define the parameters for each electricity access technology, and to calculate the LCOE depending on
input parameters.
"""
start_year = 2015
end_year = 2030
discount_rate = 0.08
grid_cell_area = 1 # in km2, normally 1km2
mv_line_cost = 9000 # USD/km
lv_line_cost = 5000 # USD/km
mv_line_capacity = 50 # kW/line
lv_line_capacity = 10 # kW/line
lv_line_max_length = 30 # km
hv_line_cost = 53000 # USD/km
mv_line_max_length = 50 # km
hv_lv_transformer_cost = 5000 # USD/unit
mv_increase_rate = 0.1 # percentage
def __init__(self,
tech_life, # in years
base_to_peak_load_ratio,
distribution_losses=0, # percentage
connection_cost_per_hh=0, # USD/hh
om_costs=0.0, # OM costs as percentage of capital costs
capital_cost=0, # USD/kW
capacity_factor=1.0, # percentage
efficiency=1.0, # percentage
diesel_price=0.0, # USD/litre
grid_price=0.0, # USD/kWh for grid electricity
standalone=False,
grid_capacity_investment=0.0, # USD/kW for on-grid capacity investments (excluding grid itself)
diesel_truck_consumption=0, # litres/hour
diesel_truck_volume=0, # litres
om_of_td_lines=0): # percentage
self.distribution_losses = distribution_losses
self.connection_cost_per_hh = connection_cost_per_hh
self.base_to_peak_load_ratio = base_to_peak_load_ratio
self.tech_life = tech_life
self.om_costs = om_costs
self.capital_cost = capital_cost
self.capacity_factor = capacity_factor
self.efficiency = efficiency
self.diesel_price = diesel_price
self.grid_price = grid_price
self.standalone = standalone
self.grid_capacity_investment = grid_capacity_investment
self.diesel_truck_consumption = diesel_truck_consumption
self.diesel_truck_volume = diesel_truck_volume
self.om_of_td_lines = om_of_td_lines
@classmethod
def set_default_values(cls, start_year, end_year, discount_rate, grid_cell_area, mv_line_cost, lv_line_cost,
mv_line_capacity, lv_line_capacity, lv_line_max_length, hv_line_cost, mv_line_max_length,
hv_lv_transformer_cost, mv_increase_rate):
cls.start_year = start_year
cls.end_year = end_year
cls.discount_rate = discount_rate
cls.grid_cell_area = grid_cell_area
cls.mv_line_cost = mv_line_cost
cls.lv_line_cost = lv_line_cost
cls.mv_line_capacity = mv_line_capacity
cls.lv_line_capacity = lv_line_capacity
cls.lv_line_max_length = lv_line_max_length
cls.hv_line_cost = hv_line_cost
cls.mv_line_max_length = mv_line_max_length
cls.hv_lv_transformer_cost = hv_lv_transformer_cost
cls.mv_increase_rate = mv_increase_rate
def get_lcoe(self, energy_per_hh, people, num_people_per_hh, additional_mv_line_length=0, capacity_factor=0,
mv_line_length=0, travel_hours=0, get_investment_cost=False):
"""
Calculates the LCOE depending on the parameters. Optionally calculates the investment cost instead.
The only required parameters are energy_per_hh, people and num_people_per_hh
additional_mv_line_length requried for grid
capacity_factor required for PV and wind
mv_line_length required for hydro
travel_hours required for diesel
"""
if people == 0:
# If there are no people, the investment cost is zero.
if get_investment_cost:
return 0
# Otherwise we set the people low (prevent div/0 error) and continue.
else:
people = 0.00001
# If a new capacity factor isn't given, use the class capacity factor (for hydro, diesel etc)
if capacity_factor == 0:
capacity_factor = self.capacity_factor
consumption = people / num_people_per_hh * energy_per_hh # kWh/year
average_load = consumption * (1 + self.distribution_losses) / HOURS_PER_YEAR # kW
peak_load = average_load / self.base_to_peak_load_ratio # kW
no_mv_lines = peak_load / self.mv_line_capacity
no_lv_lines = peak_load / self.lv_line_capacity
lv_networks_lim_capacity = no_lv_lines / no_mv_lines
lv_networks_lim_length = ((self.grid_cell_area / no_mv_lines) / (self.lv_line_max_length / sqrt(2))) ** 2
actual_lv_lines = min([people / num_people_per_hh, max([lv_networks_lim_capacity, lv_networks_lim_length])])
hh_per_lv_network = (people / num_people_per_hh) / (actual_lv_lines * no_mv_lines)
lv_unit_length = sqrt(self.grid_cell_area / (people / num_people_per_hh)) * sqrt(2) / 2
lv_lines_length_per_lv_network = 1.333 * hh_per_lv_network * lv_unit_length
total_lv_lines_length = no_mv_lines * actual_lv_lines * lv_lines_length_per_lv_network
line_reach = (self.grid_cell_area / no_mv_lines) / (2 * sqrt(self.grid_cell_area / no_lv_lines))
total_length_of_lines = min([line_reach, self.mv_line_max_length]) * no_mv_lines
additional_hv_lines = max(
[0, round(sqrt(self.grid_cell_area) / (2 * min([line_reach, self.mv_line_max_length])) / 10, 3) - 1])
hv_lines_total_length = (sqrt(self.grid_cell_area) / 2) * additional_hv_lines * sqrt(self.grid_cell_area)
num_transformers = additional_hv_lines + no_mv_lines + (no_mv_lines * actual_lv_lines)
generation_per_year = average_load * HOURS_PER_YEAR
# The investment and O&M costs are different for grid and non-grid solutions
if self.grid_price > 0:
td_investment_cost = hv_lines_total_length * self.hv_line_cost + \
total_length_of_lines * self.mv_line_cost + \
total_lv_lines_length * self.lv_line_cost + \
num_transformers * self.hv_lv_transformer_cost + \
(people / num_people_per_hh) * self.connection_cost_per_hh + \
additional_mv_line_length * (
self.mv_line_cost * (1 + self.mv_increase_rate) **
((additional_mv_line_length / 5) - 1))
td_om_cost = td_investment_cost * self.om_of_td_lines
total_investment_cost = td_investment_cost
total_om_cost = td_om_cost
fuel_cost = self.grid_price
else:
total_lv_lines_length *= 0 if self.standalone else 0.75
mv_total_line_cost = self.mv_line_cost * mv_line_length
lv_total_line_cost = self.lv_line_cost * total_lv_lines_length
installed_capacity = peak_load / capacity_factor
capital_investment = installed_capacity * self.capital_cost
td_investment_cost = mv_total_line_cost + lv_total_line_cost + (
people / num_people_per_hh) * self.connection_cost_per_hh
td_om_cost = td_investment_cost * self.om_of_td_lines
total_investment_cost = td_investment_cost + capital_investment
total_om_cost = td_om_cost + (self.capital_cost * self.om_costs * installed_capacity)
# If a diesel price has been passed, the technology is diesel
if self.diesel_price > 0:
# And we apply the Szabo formula to calculate the transport cost for the diesel
# p = (p_d + 2*p_d*consumption*time/volume)*(1/mu)*(1/LHVd)
fuel_cost = (self.diesel_price + 2 * self.diesel_price * self.diesel_truck_consumption * travel_hours /
self.diesel_truck_volume) / LHV_DIESEL / self.efficiency
# Otherwise it's hydro/wind etc with no fuel cost
else:
fuel_cost = 0
# Perform the time-value LCOE calculation
project_life = self.end_year - self.start_year
reinvest_year = 0
# If the technology life is less than the project life, we will have to invest twice to buy it again
if self.tech_life < project_life:
reinvest_year = self.tech_life
year = np.arange(project_life)
el_gen = generation_per_year * np.ones(project_life)
el_gen[0] = 0
discount_factor = (1 + self.discount_rate) ** year
investments = np.zeros(project_life)
investments[0] = total_investment_cost
if reinvest_year:
investments[reinvest_year] = total_investment_cost
salvage = np.zeros(project_life)
used_life = project_life
if reinvest_year:
# so salvage will come from the remaining life after the re-investment
used_life = project_life - self.tech_life
salvage[-1] = total_investment_cost * (1 - used_life / self.tech_life)
operation_and_maintenance = total_om_cost * np.ones(project_life)
operation_and_maintenance[0] = 0
fuel = el_gen * fuel_cost
fuel[0] = 0
# So we also return the total investment cost for this number of people
if get_investment_cost:
discounted_investments = investments / discount_factor
return np.sum(discounted_investments) + self.grid_capacity_investment * peak_load
else:
discounted_costs = (investments + operation_and_maintenance + fuel - salvage) / discount_factor
discounted_generation = el_gen / discount_factor
return np.sum(discounted_costs) / np.sum(discounted_generation)
def get_grid_table(self, energy_per_hh, num_people_per_hh, max_dist):
"""
Uses calc_lcoe to generate a 2D grid with the grid LCOEs, for faster access in teh electrification algorithm
"""
logging.info('Creating a grid table for {} kWh/hh/year'.format(energy_per_hh))
# Coarser resolution at the high end (just to catch the few places with exceptional population density)
# The electrification algorithm must round off with the same scheme
people_arr_direct = list(range(1000)) + list(range(1000, 10000, 10)) + list(range(10000, 350000, 1000))
elec_dists = range(0, int(max_dist) + 20) # add twenty to handle edge cases
grid_lcoes = pd.DataFrame(index=elec_dists, columns=people_arr_direct)
for people in people_arr_direct:
for additional_mv_line_length in elec_dists:
grid_lcoes[people][additional_mv_line_length] = self.get_lcoe(
energy_per_hh=energy_per_hh,
people=people,
num_people_per_hh=num_people_per_hh,
additional_mv_line_length=additional_mv_line_length)
return grid_lcoes.to_dict()
class SettlementProcessor:
"""
Processes the dataframe and adds all the columns to determine the cheapest option and the final costs and summaries
"""
def __init__(self, path):
try:
self.df = pd.read_csv(path)
except FileNotFoundError:
print('You need to first split into a base directory and prep!')
raise
def condition_df(self):
"""
Do any initial data conditioning that may be required.
"""
logging.info('Ensure that columns that are supposed to be numeric are numeric')
self.df[SET_GHI] = pd.to_numeric(self.df[SET_GHI], errors='coerce')
self.df[SET_WINDVEL] = pd.to_numeric(self.df[SET_WINDVEL], errors='coerce')
self.df[SET_NIGHT_LIGHTS] = pd.to_numeric(self.df[SET_NIGHT_LIGHTS], errors='coerce')
self.df[SET_ELEVATION] = pd.to_numeric(self.df[SET_ELEVATION], errors='coerce')
self.df[SET_SLOPE] = pd.to_numeric(self.df[SET_SLOPE], errors='coerce')
self.df[SET_LAND_COVER] = pd.to_numeric(self.df[SET_LAND_COVER], errors='coerce')
self.df[SET_GRID_DIST_CURRENT] = pd.to_numeric(self.df[SET_GRID_DIST_CURRENT], errors='coerce')
self.df[SET_GRID_DIST_PLANNED] = pd.to_numeric(self.df[SET_GRID_DIST_PLANNED], errors='coerce')
self.df[SET_SUBSTATION_DIST] = pd.to_numeric(self.df[SET_SUBSTATION_DIST], errors='coerce')
self.df[SET_ROAD_DIST] = pd.to_numeric(self.df[SET_ROAD_DIST], errors='coerce')
self.df[SET_HYDRO_DIST] = pd.to_numeric(self.df[SET_HYDRO_DIST], errors='coerce')
self.df[SET_HYDRO] = pd.to_numeric(self.df[SET_HYDRO], errors='coerce')
self.df[SET_SOLAR_RESTRICTION] = pd.to_numeric(self.df[SET_SOLAR_RESTRICTION], errors='coerce')
logging.info('Replace null values with zero')
self.df.fillna(0, inplace=True)
logging.info('Sort by country, Y and X')
self.df.sort_values(by=[SET_COUNTRY, SET_Y, SET_X], inplace=True)
logging.info('Add columns with location in degrees')
project = Proj('+proj=merc +lon_0=0 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs')
def get_x(row):
x, y = project(row[SET_X] * 1000, row[SET_Y] * 1000, inverse=True)
return x
def get_y(row):
x, y = project(row[SET_X] * 1000, row[SET_Y] * 1000, inverse=True)
return y
self.df[SET_X_DEG] = self.df.apply(get_x, axis=1)
self.df[SET_Y_DEG] = self.df.apply(get_y, axis=1)
def grid_penalties(self):
"""
Add a grid penalty factor to increase the grid cost in areas that higher road distance, higher substation
distance, unsuitable land cover, high slope angle or high elecation
"""
def classify_road_dist(row):
road_dist = row[SET_ROAD_DIST]
if road_dist <= 5:
return 5
elif road_dist <= 10:
return 4
elif road_dist <= 25:
return 3
elif road_dist <= 50:
return 2
else:
return 1
def classify_substation_dist(row):
substation_dist = row[SET_SUBSTATION_DIST]
if substation_dist <= 0.5:
return 5
elif substation_dist <= 1:
return 4
elif substation_dist <= 5:
return 3
elif substation_dist <= 10:
return 2
else:
return 1
def classify_land_cover(row):
land_cover = row[SET_LAND_COVER]
if land_cover == 0:
return 1
elif land_cover == 1:
return 3
elif land_cover == 2:
return 4
elif land_cover == 3:
return 3
elif land_cover == 4:
return 4
elif land_cover == 5:
return 3
elif land_cover == 6:
return 2
elif land_cover == 7:
return 5
elif land_cover == 8:
return 2
elif land_cover == 9:
return 5
elif land_cover == 10:
return 5
elif land_cover == 11:
return 1
elif land_cover == 12:
return 3
elif land_cover == 13:
return 3
elif land_cover == 14:
return 5
elif land_cover == 15:
return 3
elif land_cover == 16:
return 5
def classify_elevation(row):
elevation = row[SET_ELEVATION]
if elevation <= 500:
return 5
elif elevation <= 1000:
return 4
elif elevation <= 2000:
return 3
elif elevation <= 3000:
return 2
else:
return 1
def classify_slope(row):
slope = row[SET_SLOPE]
if slope <= 10:
return 5
elif slope <= 20:
return 4
elif slope <= 30:
return 3
elif slope <= 40:
return 2
else:
return 1
def set_penalty(row):
classification = row[SET_COMBINED_CLASSIFICATION]
return 1 + (exp(0.85 * abs(1 - classification)) - 1) / 100
logging.info('Classify road dist')
self.df[SET_ROAD_DIST_CLASSIFIED] = self.df.apply(classify_road_dist, axis=1)
logging.info('Classify substation dist')
self.df[SET_SUBSTATION_DIST_CLASSIFIED] = self.df.apply(classify_substation_dist, axis=1)
logging.info('Classify land cover')
self.df[SET_LAND_COVER_CLASSIFIED] = self.df.apply(classify_land_cover, axis=1)
logging.info('Classify elevation')
self.df[SET_ELEVATION_CLASSIFIED] = self.df.apply(classify_elevation, axis=1)
logging.info('Classify slope')
self.df[SET_SLOPE_CLASSIFIED] = self.df.apply(classify_slope, axis=1)
logging.info('Combined classification')
self.df[SET_COMBINED_CLASSIFICATION] = (0.05 * self.df[SET_ROAD_DIST_CLASSIFIED] +
0.09 * self.df[SET_SUBSTATION_DIST_CLASSIFIED] +
0.39 * self.df[SET_LAND_COVER_CLASSIFIED] +
0.15 * self.df[SET_ELEVATION_CLASSIFIED] +
0.32 * self.df[SET_SLOPE_CLASSIFIED])
logging.info('Grid penalty')
self.df[SET_GRID_PENALTY] = self.df.apply(set_penalty, axis=1)
def calc_wind_cfs(self):
"""
Calculate the wind capacity factor based on the average wind velocity.
"""
mu = 0.97 # availability factor
t = 8760
p_rated = 600
z = 55 # hub height
zr = 80 # velocity measurement height
es = 0.85 # losses in wind electricity
u_arr = range(1, 26)
p_curve = [0, 0, 0, 0, 30, 77, 135, 208, 287, 371, 450, 514, 558,
582, 594, 598, 600, 600, 600, 600, 600, 600, 600, 600, 600]
def get_wind_cf(row):
u_zr = row[SET_WINDVEL]
if u_zr == 0:
return 0
else:
# Adjust for the correct hub height
alpha = (0.37 - 0.088 * log(u_zr)) / (1 - 0.088 * log(zr / 10))
u_z = u_zr * (z / zr) ** alpha
# Rayleigh distribution and sum of series
rayleigh = [(pi / 2) * (u / u_z ** 2) * exp((-pi / 4) * (u / u_z) ** 2) for u in u_arr]
energy_produced = sum([mu * es * t * p * r for p, r in zip(p_curve, rayleigh)])
return energy_produced/(p_rated * t)
logging.info('Calculate Wind CF')
self.df[SET_WINDCF] = self.df.apply(get_wind_cf, axis=1)
def calibrate_pop_and_urban(self, pop_actual, pop_future, urban, urban_future, urban_cutoff):
"""
Calibrate the actual current population, the urban split and forecast the future population
"""
# Calculate the ratio between the actual population and the total population from the GIS layer
logging.info('Calibrate current population')
pop_ratio = pop_actual/self.df[SET_POP].sum()
# And use this ratio to calibrate the population in a new column
self.df[SET_POP_CALIB] = self.df.apply(lambda row: row[SET_POP] * pop_ratio, axis=1)
# Calculate the urban split, by calibrating the cutoff until the target ratio is achieved
# Keep looping until it is satisfied or another break conditions is reached
logging.info('Calibrate urban split')
count = 0
prev_vals = [] # Stores cutoff values that have already been tried to prevent getting stuck in a loop
accuracy = 0.005
max_iterations = 30
urban_modelled = 0
while True:
# Assign the 1 (urban)/0 (rural) values to each cell
self.df[SET_URBAN] = self.df.apply(lambda row: 1 if row[SET_POP_CALIB] > urban_cutoff else 0, axis=1)
# Get the calculated urban ratio, and limit it to within reasonable boundaries
pop_urb = self.df.loc[self.df[SET_URBAN] == 1, SET_POP_CALIB].sum()
urban_modelled = pop_urb / pop_actual
if urban_modelled == 0:
urban_modelled = 0.05
elif urban_modelled == 1:
urban_modelled = 0.999
if abs(urban_modelled - urban) < accuracy:
break
else:
urban_cutoff = sorted([0.005, urban_cutoff - urban_cutoff * 2 *
(urban - urban_modelled) / urban, 10000.0])[1]
if urban_cutoff in prev_vals:
logging.info('NOT SATISFIED: repeating myself')
break
else:
prev_vals.append(urban_cutoff)
if count >= max_iterations:
logging.info('NOT SATISFIED: got to {}'.format(max_iterations))
break
count += 1
# Project future population, with separate growth rates for urban and rural
logging.info('Project future population')
urban_growth = (urban_future * pop_future) / (urban * pop_actual)
rural_growth = ((1 - urban_future) * pop_future) / ((1 - urban) * pop_actual)
self.df[SET_POP_FUTURE] = self.df.apply(lambda row: row[SET_POP_CALIB] * urban_growth
if row[SET_URBAN] == 1
else row[SET_POP_CALIB] * rural_growth,
axis=1)
return urban_cutoff, urban_modelled
def elec_current_and_future(self, elec_actual, pop_cutoff, min_night_lights, max_grid_dist,
max_road_dist, pop_tot, pop_cutoff2):
"""
Calibrate the current electrification status, and future 'pre-electrification' status
"""
# Calibrate current electrification
logging.info('Calibrate current electrification')
is_round_two = False
grid_cutoff2 = 10
road_cutoff2 = 10
count = 0
prev_vals = []
accuracy = 0.005
max_iterations_one = 30
max_iterations_two = 60
elec_modelled = 0
while True:
# Assign the 1 (electrified)/0 (un-electrified) values to each cell
self.df[SET_ELEC_CURRENT] = self.df.apply(lambda row:
1
if (row[SET_NIGHT_LIGHTS] > min_night_lights and
(row[SET_POP_CALIB] > pop_cutoff or
row[SET_GRID_DIST_CURRENT] < max_grid_dist or
row[SET_ROAD_DIST] < max_road_dist))
or (row[SET_POP_CALIB] > pop_cutoff2 and
(row[SET_GRID_DIST_CURRENT] < grid_cutoff2 or
row[SET_ROAD_DIST] < road_cutoff2))
else 0,
axis=1)
# Get the calculated electrified ratio, and limit it to within reasonable boundaries
pop_elec = self.df.loc[self.df[SET_ELEC_CURRENT] == 1, SET_POP_CALIB].sum()
elec_modelled = pop_elec / pop_tot
if elec_modelled == 0:
elec_modelled = 0.01
elif elec_modelled == 1:
elec_modelled = 0.99
if abs(elec_modelled - elec_actual) < accuracy:
break
elif not is_round_two:
min_night_lights = sorted([5, min_night_lights - min_night_lights * 2 *
(elec_actual - elec_modelled) / elec_actual, 60])[1]
max_grid_dist = sorted([5, max_grid_dist + max_grid_dist * 2 *
(elec_actual - elec_modelled) / elec_actual, 150])[1]
max_road_dist = sorted([0.5, max_road_dist + max_road_dist * 2 *
(elec_actual - elec_modelled) / elec_actual, 50])[1]
elif elec_modelled - elec_actual < 0:
pop_cutoff2 = sorted([0.01, pop_cutoff2 - pop_cutoff2 *
(elec_actual - elec_modelled) / elec_actual, 100000])[1]
elif elec_modelled - elec_actual > 0:
pop_cutoff = sorted([0.01, pop_cutoff - pop_cutoff * 0.5 *
(elec_actual - elec_modelled) / elec_actual, 10000])[1]
constraints = '{}{}{}{}{}'.format(pop_cutoff, min_night_lights, max_grid_dist, max_road_dist, pop_cutoff2)
if constraints in prev_vals and not is_round_two:
logging.info('Repeating myself, on to round two')
prev_vals = []
is_round_two = True
elif constraints in prev_vals and is_round_two:
logging.info('NOT SATISFIED: repeating myself')
break
else:
prev_vals.append(constraints)
if count >= max_iterations_one and not is_round_two:
logging.info('Got to {}, on to round two'.format(max_iterations_one))
is_round_two = True
elif count >= max_iterations_two and is_round_two:
logging.info('NOT SATISFIED: Got to {}'.format(max_iterations_two))
break
count += 1
logging.info('Calculate new connections')
self.df.loc[self.df[SET_ELEC_CURRENT] == 1, SET_NEW_CONNECTIONS] =\
self.df[SET_POP_FUTURE] - self.df[SET_POP_CALIB]
self.df.loc[self.df[SET_ELEC_CURRENT] == 0, SET_NEW_CONNECTIONS] = self.df[SET_POP_FUTURE]
self.df.loc[self.df[SET_NEW_CONNECTIONS] < 0, SET_NEW_CONNECTIONS] = 0
return min_night_lights, max_grid_dist, max_road_dist, elec_modelled, pop_cutoff, pop_cutoff2
@staticmethod
def separate_elec_status(elec_status):
"""
Separate out the electrified and unelectrified states from list.
"""
electrified = []
unelectrified = []
for i, status in enumerate(elec_status):
if status:
electrified.append(i)
else:
unelectrified.append(i)
return electrified, unelectrified
@staticmethod
def get_2d_hash_table(x, y, unelectrified, distance_limit):
"""
Generates the 2D Hash Table with the unelectrified locations hashed into the table for easy O(1) access.
"""
hash_table = defaultdict(lambda: defaultdict(list))
for unelec_row in unelectrified:
hash_x = int(x[unelec_row] / distance_limit)
hash_y = int(y[unelec_row] / distance_limit)
hash_table[hash_x][hash_y].append(unelec_row)
return hash_table
@staticmethod
def get_unelectrified_rows(hash_table, elec_row, x, y, distance_limit):
"""
Returns all the unelectrified locations close to the electrified location
based on the distance boundary limit specified by asking the 2D hash table.
"""
unelec_list = []
hash_x = int(x[elec_row] / distance_limit)
hash_y = int(y[elec_row] / distance_limit)
unelec_list.extend(hash_table.get(hash_x, {}).get(hash_y, []))
unelec_list.extend(hash_table.get(hash_x, {}).get(hash_y - 1, []))
unelec_list.extend(hash_table.get(hash_x, {}).get(hash_y + 1, []))
unelec_list.extend(hash_table.get(hash_x + 1, {}).get(hash_y, []))
unelec_list.extend(hash_table.get(hash_x + 1, {}).get(hash_y - 1, []))
unelec_list.extend(hash_table.get(hash_x + 1, {}).get(hash_y + 1, []))
unelec_list.extend(hash_table.get(hash_x - 1, {}).get(hash_y, []))
unelec_list.extend(hash_table.get(hash_x - 1, {}).get(hash_y - 1, []))
unelec_list.extend(hash_table.get(hash_x - 1, {}).get(hash_y + 1, []))
return unelec_list
def pre_elec(self, grid_lcoes_rural, grid_lcoes_urban, pre_elec_dist):
"""
Determine which settlements are economically close to existing or planned grid lines, and should be
considered electrified in the electrification algorithm
"""
df_neargrid = self.df.loc[self.df[SET_GRID_DIST_PLANNED] < pre_elec_dist]
pop = df_neargrid[SET_POP_FUTURE].tolist()
urban = df_neargrid[SET_URBAN].tolist()
grid_penalty_ratio = df_neargrid[SET_GRID_PENALTY].tolist()
status = df_neargrid[SET_ELEC_CURRENT].tolist()
min_tech_lcoes = df_neargrid[SET_MIN_OFFGRID_LCOE].tolist()
dist_planned = df_neargrid[SET_GRID_DIST_PLANNED].tolist()
electrified, unelectrified = self.separate_elec_status(status)
for unelec in unelectrified:
pop_index = pop[unelec]
if pop_index < 1000:
pop_index = int(pop_index)
elif pop_index < 10000:
pop_index = 10 * round(pop_index / 10)
else:
pop_index = 1000 * round(pop_index / 1000)
if urban[unelec]:
grid_lcoe = grid_lcoes_urban[pop_index][int(grid_penalty_ratio[unelec] * dist_planned[unelec])]
else:
grid_lcoe = grid_lcoes_rural[pop_index][int(grid_penalty_ratio[unelec] * dist_planned[unelec])]
if grid_lcoe < min_tech_lcoes[unelec]:
status[unelec] = 1
return status
def elec_extension(self, grid_lcoes_rural, grid_lcoes_urban, existing_grid_cost_ratio, max_dist):
"""
Iterate through all electrified settlements and find which settlements can be economically connected to the grid
Repeat with newly electrified settlements until no more are added
"""
x = self.df[SET_X].tolist()
y = self.df[SET_Y].tolist()
pop = self.df[SET_POP_FUTURE].tolist()
urban = self.df[SET_URBAN].tolist()
grid_penalty_ratio = self.df[SET_GRID_PENALTY].tolist()
status = self.df[SET_ELEC_FUTURE].tolist()
min_tech_lcoes = self.df[SET_MIN_OFFGRID_LCOE].tolist()
new_lcoes = self.df[SET_LCOE_GRID].tolist()
cell_path_real = list(np.zeros(len(status)).tolist())
cell_path_adjusted = list(np.zeros(len(status)).tolist())
electrified, unelectrified = self.separate_elec_status(status)
loops = 1
while len(electrified) > 0:
logging.info('Electrification loop {} with {} electrified'.format(loops, len(electrified)))
loops += 1
hash_table = self.get_2d_hash_table(x, y, unelectrified, max_dist)
changes = []
for elec in electrified:
unelectrified_hashed = self.get_unelectrified_rows(hash_table, elec, x, y, max_dist)
for unelec in unelectrified_hashed:
prev_dist = cell_path_real[elec]
dist = sqrt((x[elec] - x[unelec]) ** 2 + (y[elec] - y[unelec]) ** 2)
if prev_dist + dist < max_dist:
pop_index = pop[unelec]
if pop_index < 1000:
pop_index = int(pop_index)
elif pop_index < 10000:
pop_index = 10 * round(pop_index / 10)
else:
pop_index = 1000 * round(pop_index / 1000)
dist_adjusted = grid_penalty_ratio[unelec]*(dist + existing_grid_cost_ratio * prev_dist)
if urban[unelec]:
grid_lcoe = grid_lcoes_urban[pop_index][int(dist_adjusted)]
else:
grid_lcoe = grid_lcoes_rural[pop_index][int(dist_adjusted)]
if grid_lcoe < min_tech_lcoes[unelec]:
if grid_lcoe < new_lcoes[unelec]:
new_lcoes[unelec] = grid_lcoe
cell_path_real[unelec] = dist + prev_dist
cell_path_adjusted[unelec] = dist_adjusted
if unelec not in changes:
changes.append(unelec)
electrified = changes[:]
unelectrified = [x for x in unelectrified if x not in electrified]
return new_lcoes, cell_path_adjusted
def run_elec(self, grid_lcoes_rural, grid_lcoes_urban, grid_price, existing_grid_cost_ratio, max_dist):
"""
Runs the pre-elec and grid extension algorithms
"""
# Calculate 2030 pre-electrification
logging.info('Determine future pre-electrification status')
self.df[SET_ELEC_FUTURE] = self.df.apply(lambda row: 1 if row[SET_ELEC_CURRENT] == 1 else 0, axis=1)
pre_elec_dist = 10 # The maximum distance from the grid in km to pre-electrifiy settlements
self.df.loc[self.df[SET_GRID_DIST_PLANNED] < pre_elec_dist, SET_ELEC_FUTURE] = self.pre_elec(grid_lcoes_rural,
grid_lcoes_urban,
pre_elec_dist)
self.df[SET_LCOE_GRID] = 99
self.df[SET_LCOE_GRID] = self.df.apply(lambda row: grid_price if row[SET_ELEC_FUTURE] == 1 else 99, axis=1)
self.df[SET_LCOE_GRID], self.df[SET_MIN_GRID_DIST] = self.elec_extension(grid_lcoes_rural, grid_lcoes_urban,
existing_grid_cost_ratio, max_dist)
def set_scenario_variables(self, energy_per_hh_rural, energy_per_hh_urban,
num_people_per_hh_rural, num_people_per_hh_urban):
"""
Set the basic scenario parameters that differ based on urban/rural
So that they are in the table and can be read directly to calculate LCOEs
"""
logging.info('Setting electrification targets')
self.df.loc[self.df[SET_URBAN] == 0, SET_ENERGY_PER_HH] = energy_per_hh_rural
self.df.loc[self.df[SET_URBAN] == 1, SET_ENERGY_PER_HH] = energy_per_hh_urban
self.df.loc[self.df[SET_URBAN] == 0, SET_NUM_PEOPLE_PER_HH] = num_people_per_hh_rural
self.df.loc[self.df[SET_URBAN] == 1, SET_NUM_PEOPLE_PER_HH] = num_people_per_hh_urban
def calculate_off_grid_lcoes(self, mg_hydro_calc, mg_wind_calc, mg_pv_calc,
sa_pv_calc, mg_diesel_calc, sa_diesel_calc):
"""
Calcuate the LCOEs for all off-grid technologies, and calculate the minimum, so that the electrification
algorithm knows where the bar is before it becomes economical to electrify
"""
# A df with all hydropower sites, to ensure that they aren't assigned more capacity than is available
hydro_used = 'HydropowerUsed' # the amount of the hydro potential that has been assigned
hydro_df = self.df[[SET_HYDRO_FID, SET_HYDRO]].drop_duplicates(subset=SET_HYDRO_FID)
hydro_df[hydro_used] = 0
hydro_df = hydro_df.set_index(SET_HYDRO_FID)
max_hydro_dist = 5 # the max distance in km to consider hydropower viable
def hydro_lcoe(row):
if row[SET_HYDRO_DIST] < max_hydro_dist:
# calculate the capacity that would be added by the settlement
additional_capacity = ((row[SET_NEW_CONNECTIONS] * row[SET_ENERGY_PER_HH] / row[SET_NUM_PEOPLE_PER_HH])
/ (HOURS_PER_YEAR * mg_hydro_calc.capacity_factor *
mg_hydro_calc.base_to_peak_load_ratio))
# and add it to the tracking df
hydro_df.loc[row[SET_HYDRO_FID], hydro_used] += additional_capacity
# if it exceeds the available capacity, it's not an option
if hydro_df.loc[row[SET_HYDRO_FID], hydro_used] > hydro_df.loc[row[SET_HYDRO_FID], SET_HYDRO]:
return 99
else:
return mg_hydro_calc.get_lcoe(energy_per_hh=row[SET_ENERGY_PER_HH],
people=row[SET_POP_FUTURE],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
mv_line_length=row[SET_HYDRO_DIST])
else:
return 99
logging.info('Calculate minigrid hydro LCOE')
self.df[SET_LCOE_MG_HYDRO] = self.df.apply(hydro_lcoe, axis=1)
num_hydro_limited = hydro_df.loc[hydro_df[hydro_used] > hydro_df[SET_HYDRO]][SET_HYDRO].count()
logging.info('{} potential hydropower sites were utilised to maximum capacity'.format(num_hydro_limited))
logging.info('Calculate minigrid PV LCOE')
self.df[SET_LCOE_MG_PV] = self.df.apply(
lambda row: mg_pv_calc.get_lcoe(energy_per_hh=row[SET_ENERGY_PER_HH],
people=row[SET_POP_FUTURE],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
capacity_factor=row[SET_GHI] / HOURS_PER_YEAR)
if (row[SET_SOLAR_RESTRICTION] == 1 and row[SET_GHI] > 1000) else 99,
axis=1)
logging.info('Calculate minigrid wind LCOE')
self.df[SET_LCOE_MG_WIND] = self.df.apply(
lambda row: mg_wind_calc.get_lcoe(energy_per_hh=row[SET_ENERGY_PER_HH],
people=row[SET_POP_FUTURE], num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
capacity_factor=row[SET_WINDCF])
if row[SET_WINDCF] > 0.1 else 99,
axis=1)
logging.info('Calculate minigrid diesel LCOE')
self.df[SET_LCOE_MG_DIESEL] = self.df.apply(
lambda row:
mg_diesel_calc.get_lcoe(energy_per_hh=row[SET_ENERGY_PER_HH],
people=row[SET_POP_FUTURE],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
travel_hours=row[SET_TRAVEL_HOURS]),
axis=1)
logging.info('Calculate standalone diesel LCOE')
self.df[SET_LCOE_SA_DIESEL] = self.df.apply(
lambda row:
sa_diesel_calc.get_lcoe(energy_per_hh=row[SET_ENERGY_PER_HH],
people=row[SET_POP_FUTURE],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
travel_hours=row[SET_TRAVEL_HOURS]),
axis=1)
logging.info('Calculate standalone PV LCOE')
self.df[SET_LCOE_SA_PV] = self.df.apply(
lambda row: sa_pv_calc.get_lcoe(energy_per_hh=row[SET_ENERGY_PER_HH],
people=row[SET_POP_FUTURE],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
capacity_factor=row[SET_GHI] / HOURS_PER_YEAR)
if row[SET_GHI] > 1000 else 99,
axis=1)
logging.info('Determine minimum technology (no grid)')
self.df[SET_MIN_OFFGRID] = self.df[[SET_LCOE_SA_DIESEL, SET_LCOE_SA_PV, SET_LCOE_MG_WIND,
SET_LCOE_MG_DIESEL, SET_LCOE_MG_PV, SET_LCOE_MG_HYDRO]].T.idxmin()
logging.info('Determine minimum tech LCOE')
self.df[SET_MIN_OFFGRID_LCOE] = self.df.apply(lambda row: (row[row[SET_MIN_OFFGRID]]), axis=1)
def results_columns(self, mg_hydro_calc, mg_wind_calc, mg_pv_calc, sa_pv_calc,
mg_diesel_calc, sa_diesel_calc, grid_calc):
"""
Once the grid extension algorithm has been run, determine the minimum overall option, and calculate the
capacity and investment requirements for each settlement
"""
def res_investment_cost(row):
min_tech = row[SET_MIN_OVERALL]
if min_tech == SET_LCOE_SA_DIESEL:
return sa_diesel_calc.get_lcoe(energy_per_hh=row[SET_ENERGY_PER_HH],
people=row[SET_POP_FUTURE],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
travel_hours=row[SET_TRAVEL_HOURS],
get_investment_cost=True)
elif min_tech == SET_LCOE_SA_PV:
return sa_pv_calc.get_lcoe(energy_per_hh=row[SET_ENERGY_PER_HH],
people=row[SET_POP_FUTURE],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
capacity_factor=row[SET_GHI] / HOURS_PER_YEAR,
get_investment_cost=True)
elif min_tech == SET_LCOE_MG_WIND:
return mg_wind_calc.get_lcoe(energy_per_hh=row[SET_ENERGY_PER_HH],
people=row[SET_POP_FUTURE],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
capacity_factor=row[SET_WINDCF],
get_investment_cost=True)
elif min_tech == SET_LCOE_MG_DIESEL:
return mg_diesel_calc.get_lcoe(energy_per_hh=row[SET_ENERGY_PER_HH],
people=row[SET_POP_FUTURE],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
travel_hours=row[SET_TRAVEL_HOURS],
get_investment_cost=True)
elif min_tech == SET_LCOE_MG_PV:
return mg_pv_calc.get_lcoe(energy_per_hh=row[SET_ENERGY_PER_HH],
people=row[SET_POP_FUTURE],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
capacity_factor=row[SET_GHI] / HOURS_PER_YEAR,
get_investment_cost=True)
elif min_tech == SET_LCOE_MG_HYDRO:
return mg_hydro_calc.get_lcoe(energy_per_hh=row[SET_ENERGY_PER_HH],
people=row[SET_POP_FUTURE],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
mv_line_length=row[SET_HYDRO_DIST],
get_investment_cost=True)
elif min_tech == SET_LCOE_GRID:
return grid_calc.get_lcoe(energy_per_hh=row[SET_ENERGY_PER_HH],
people=row[SET_POP_FUTURE],
num_people_per_hh=row[SET_NUM_PEOPLE_PER_HH],
additional_mv_line_length=row[SET_MIN_GRID_DIST],
get_investment_cost=True)
else:
raise ValueError('A technology has not been accounted for in res_investment_cost()')
logging.info('Determine minimum overall')
self.df[SET_MIN_OVERALL] = self.df[[SET_LCOE_GRID, SET_LCOE_SA_DIESEL, SET_LCOE_SA_PV, SET_LCOE_MG_WIND,
SET_LCOE_MG_DIESEL, SET_LCOE_MG_PV, SET_LCOE_MG_HYDRO]].T.idxmin()
logging.info('Determine minimum overall LCOE')
self.df[SET_MIN_OVERALL_LCOE] = self.df.apply(lambda row: (row[row[SET_MIN_OVERALL]]), axis=1)
logging.info('Add technology codes')
codes = {SET_LCOE_GRID: 1, SET_LCOE_MG_HYDRO: 7, SET_LCOE_MG_WIND: 6, SET_LCOE_MG_PV: 5,
SET_LCOE_MG_DIESEL: 4, SET_LCOE_SA_DIESEL: 2, SET_LCOE_SA_PV: 3}
self.df.loc[self.df[SET_MIN_OVERALL] == SET_LCOE_GRID, SET_MIN_OVERALL_CODE] = codes[SET_LCOE_GRID]
self.df.loc[self.df[SET_MIN_OVERALL] == SET_LCOE_MG_HYDRO, SET_MIN_OVERALL_CODE] = codes[SET_LCOE_MG_HYDRO]
self.df.loc[self.df[SET_MIN_OVERALL] == SET_LCOE_SA_PV, SET_MIN_OVERALL_CODE] = codes[SET_LCOE_SA_PV]
self.df.loc[self.df[SET_MIN_OVERALL] == SET_LCOE_MG_WIND, SET_MIN_OVERALL_CODE] = codes[SET_LCOE_MG_WIND]
self.df.loc[self.df[SET_MIN_OVERALL] == SET_LCOE_MG_PV, SET_MIN_OVERALL_CODE] = codes[SET_LCOE_MG_PV]
self.df.loc[self.df[SET_MIN_OVERALL] == SET_LCOE_MG_DIESEL, SET_MIN_OVERALL_CODE] = codes[SET_LCOE_MG_DIESEL]
self.df.loc[self.df[SET_MIN_OVERALL] == SET_LCOE_SA_DIESEL, SET_MIN_OVERALL_CODE] = codes[SET_LCOE_SA_DIESEL]
logging.info('Determine minimum category')
self.df[SET_MIN_CATEGORY] = self.df[SET_MIN_OVERALL].str.extract('(SA|MG|Grid)', expand=False)
logging.info('Calculate new capacity')
self.df.loc[self.df[SET_MIN_OVERALL] == SET_LCOE_GRID, SET_NEW_CAPACITY] = (
(self.df[SET_NEW_CONNECTIONS] * self.df[SET_ENERGY_PER_HH] / self.df[SET_NUM_PEOPLE_PER_HH]) /
(HOURS_PER_YEAR * grid_calc.capacity_factor * grid_calc.base_to_peak_load_ratio))
self.df.loc[self.df[SET_MIN_OVERALL] == SET_LCOE_MG_HYDRO, SET_NEW_CAPACITY] = (
(self.df[SET_NEW_CONNECTIONS] * self.df[SET_ENERGY_PER_HH] / self.df[SET_NUM_PEOPLE_PER_HH]) /
(HOURS_PER_YEAR * mg_hydro_calc.capacity_factor * mg_hydro_calc.base_to_peak_load_ratio))
self.df.loc[self.df[SET_MIN_OVERALL] == SET_LCOE_MG_PV, SET_NEW_CAPACITY] = (
(self.df[SET_NEW_CONNECTIONS] * self.df[SET_ENERGY_PER_HH] / self.df[SET_NUM_PEOPLE_PER_HH]) /
(HOURS_PER_YEAR * (self.df[SET_GHI] / HOURS_PER_YEAR) * mg_pv_calc.base_to_peak_load_ratio))
self.df.loc[self.df[SET_MIN_OVERALL] == SET_LCOE_MG_WIND, SET_NEW_CAPACITY] = (
(self.df[SET_NEW_CONNECTIONS] * self.df[SET_ENERGY_PER_HH] / self.df[SET_NUM_PEOPLE_PER_HH]) /
(HOURS_PER_YEAR * self.df[SET_WINDCF] * mg_wind_calc.base_to_peak_load_ratio))
self.df.loc[self.df[SET_MIN_OVERALL] == SET_LCOE_MG_DIESEL, SET_NEW_CAPACITY] = (
(self.df[SET_NEW_CONNECTIONS] * self.df[SET_ENERGY_PER_HH] / self.df[SET_NUM_PEOPLE_PER_HH]) /
(HOURS_PER_YEAR * mg_diesel_calc.capacity_factor * mg_diesel_calc.base_to_peak_load_ratio))
self.df.loc[self.df[SET_MIN_OVERALL] == SET_LCOE_SA_DIESEL, SET_NEW_CAPACITY] = (
(self.df[SET_NEW_CONNECTIONS] * self.df[SET_ENERGY_PER_HH] / self.df[SET_NUM_PEOPLE_PER_HH]) /
(HOURS_PER_YEAR * sa_diesel_calc.capacity_factor * sa_diesel_calc.base_to_peak_load_ratio))
self.df.loc[self.df[SET_MIN_OVERALL] == SET_LCOE_SA_PV, SET_NEW_CAPACITY] = (
(self.df[SET_NEW_CONNECTIONS] * self.df[SET_ENERGY_PER_HH] / self.df[SET_NUM_PEOPLE_PER_HH]) /
(HOURS_PER_YEAR * (self.df[SET_GHI] / HOURS_PER_YEAR) * sa_pv_calc.base_to_peak_load_ratio))
logging.info('Calculate investment cost')
self.df[SET_INVESTMENT_COST] = self.df.apply(res_investment_cost, axis=1)
def calc_summaries(self):
"""
The next section calculates the summaries for technology split, consumption added and total investment cost
"""
population_ = 'population_'
new_connections_ = 'new_connections_'
capacity_ = 'capacity_'
investments_ = 'investment_'
logging.info('Calculate summaries')
rows = []
techs = [SET_LCOE_GRID, SET_LCOE_SA_DIESEL, SET_LCOE_SA_PV, SET_LCOE_MG_WIND,
SET_LCOE_MG_DIESEL, SET_LCOE_MG_PV, SET_LCOE_MG_HYDRO]
rows.extend([population_ + t for t in techs])
rows.extend([new_connections_ + t for t in techs])
rows.extend([capacity_ + t for t in techs])
rows.extend([investments_ + t for t in techs])
summary = pd.Series(index=rows)
for t in techs:
summary.loc[population_ + t] = self.df.loc[self.df[SET_MIN_OVERALL] == t, SET_POP_FUTURE].sum()
summary.loc[new_connections_ + t] = self.df.loc[self.df[SET_MIN_OVERALL] == t, SET_NEW_CONNECTIONS].sum()
summary.loc[capacity_ + t] = self.df.loc[self.df[SET_MIN_OVERALL] == t, SET_NEW_CAPACITY].sum()
summary.loc[investments_ + t] = self.df.loc[self.df[SET_MIN_OVERALL] == t, SET_INVESTMENT_COST].sum()
return summary
| {
"repo_name": "KTH-dESA/PyOnSSET-jupyter",
"path": "onsset.py",
"copies": "1",
"size": "54327",
"license": "mit",
"hash": -397660734843026600,
"line_mean": 48.9329044118,
"line_max": 120,
"alpha_frac": 0.5694958308,
"autogenerated": false,
"ratio": 3.3568339100346023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9410227164969585,
"avg_score": 0.003220515173003695,
"num_lines": 1088
} |
__author__ = 'Christopher Bock'
from LoggingClass import LoggingClass
class RatioHistogram(LoggingClass):
"""
A convenience class to make drawing ratio histograms in ROOT (http://root.cern.ch) easier, especially when dealing
with many histograms in the same plot at once. You can either add histograms prestyled by hand or using a styling
function accepting one or two parameters being the histogram and optionally the name of the histogram in the legend.
TODO:
- add function to load histograms also from files to make life even more easy
- check styling options inside default_options for consistency
- add documentation to all the possible options inside default_options
"""
def __init__(self, logger=None):
from OptionHandler import OptionHandler
LoggingClass.__init__(self, logger=logger)
self.options = OptionHandler(logger)
self.load_defaults()
self.histograms = {}
pass
def load_defaults(self):
default_options = {'batch_mode': True, 'output_file_type': 'pdf', 'safe_to_root_file': False,
'use_atlas_style': False, 'draw_legend': True, 'legend_text_size': 0.045,
'legend_x_values': [0.69, 0.86], 'legend_y_values': [0.65, 0.85], 'opt_stat': 0,
'draw_grid': False, 'ratio_maximum': 2.0, 'ratio_minimum': 0.5, 'ratio_xaxis_ndivisions': 306,
'ratio_yaxis_ndivisions': 602, 'line_width_scale': 1.5, 'override_minimum': False,
'override_maximum': False, 'minimum_value': 1444444, 'maximum_value': -123123,
'do_atlas_label': False, 'atlas_label': 'Preliminary', 'ratio_y_label': 'Ratio',
'omit_title': False, 'legend_automatic_columns': True, 'legend_n_columns': -1,
'overall_text_scale': 1.5}
self.options.load_defaults(default_options)
pass
def load_options(self, file_name=''):
if not file_name:
pass
self.options.parse_arguments_config_file(file_name)
pass
def print_settings(self):
self.print_line()
self.options.print_options('INFO')
self.print_line()
pass
# the following convenience function still needs to be updated to work with the new code
# def AddHistogramFromFile(self, fileName, histogramName, nameInLegend=None, directoryName=None, lineColor=-1,
# markerStyle=-1, dashed=False, histogramScale=1, closeFileAfterwards=True,
# closeIfOpen=True, lineStyle=None, markerSize=None, draw_option=None):
# '''
# In case no directory name has been specified, this function assumes that
# the complete path to the histogram is specified as histogramName
# Supply -1 as histogramScale to normalize histograms
# '''
# if self.batchMode:
# gROOT.SetBatch(True)
#
# rootFile = gROOT.FindObject(fileName)
# if rootFile and closeIfOpen:
# rootFile.Close()
#
# rootFile = TFile(fileName, 'READ')
# if not rootFile:
# raise NameError('Could not load root file: ' + fileName)
#
# workingDirectory = rootFile
# if directoryName:
# workingDirectory = rootFile.Get(directoryName)
# if not workingDirectory:
# rootFile.Close()
# raise NameError('Could not open working directory: ' + directoryName)
#
# temporaryHistogram = workingDirectory.Get(histogramName)
# if not temporaryHistogram:
# rootFile.Close()
# raise NameError('Could not load histogram: ' + histogramName)
# else:
# gROOT.cd() # needed before cloning to avoid the histogram being cleaned once we exit this function
# self.AddHistogram(temporaryHistogram, nameInLegend, lineColor, markerStyle, dashed, histogramScale,
# lineStyle, markerSize, draw_option)
#
# if closeFileAfterwards:
# rootFile.Close()
#
# pass
def add_histogram(self, histogram, name_in_legend=None, histogram_styler=None):
if not histogram:
raise AttributeError('No histogram supplied to RatioHistogram.add_histogram')
if not name_in_legend:
name_in_legend = histogram.GetName()
if histogram_styler:
import inspect
argspec = inspect.getargspec(histogram_styler)
if len(argspec[0]) >= 2:
histogram_styler(histogram, name_in_legend)
else:
histogram_styler(histogram)
self.histograms[name_in_legend] = histogram
pass
def plot(self, output_file_name, name_of_canvas='canvas', log_scale=False, ratio_log_scale=False,
ratio_map=None, plot_ratios=True, sort_function=None):
import ROOT
num_histograms = len(self.histograms)
self.print_log('Output file is named: %s and superimposes %i histograms.' % (output_file_name, num_histograms))
if (num_histograms < 2) and plot_ratios:
self.print_log('Need at least two histograms to create a ratio plot!', 'WARNING')
return False
if sort_function:
histogram_keys = sorted(self.histograms.keys(), sort_function)
else:
histogram_keys = self.histograms.keys()
ROOT.gStyle.SetOptStat(self.options['opt_stat'])
if self.options['draw_legend']:
legend_x_values = self.options['legend_x_values']
legend_y_values = self.options['legend_y_values']
legend = ROOT.TLegend(legend_x_values[0], legend_y_values[0], legend_x_values[1], legend_y_values[1])
legend.SetBorderSize(0)
legend.SetFillColor(0)
legend.SetFillStyle(4050)
legend.SetTextFont(42)
legend.SetTextSize(legend.GetTextSize()*2)
legend_entry_option = 'l'
if plot_ratios:
legend_entry_option = 'lp'
for name in histogram_keys:
legend.AddEntry(self.histograms[name], name, legend_entry_option)
maximum_value = self.options['maximum_value']
minimum_value = self.options['minimum_value']
if not self.options['override_maximum']:
maximum_value = -131238.
for name, histogram in self.histograms.iteritems():
if histogram.GetMaximum() > maximum_value:
maximum_value = histogram.GetMaximum()
if not self.options['override_minimum']:
minimum_value = 87238477.
for name, histogram in self.histograms.iteritems():
if histogram.GetMinimum() < minimum_value:
minimum_value = histogram.GetMinimum()
### Creating the canvas and the pads to draw the histograms and the ratio plots on ###
canv = ROOT.TCanvas(name_of_canvas, '', 0, 0, 800, 600)
canv.SetTicks(1, 1)
if plot_ratios:
y_pad_histo = 0.2
bottom_margin_pad_histo = 0.035
#lef_margin_pad_histo = 0.13 + self.left_margin_shift
lef_margin_pad_histo = 0.13
else:
y_pad_histo = 0.0
bottom_margin_pad_histo = 0.125
#lef_margin_pad_histo = 0.1 + self.left_margin_shift
lef_margin_pad_histo = 0.1
pad_histo = ROOT.TPad('name_pad_histo', 'name_pad_histo', 0, y_pad_histo, 1., 1.)
pad_histo.SetTicks(1, 1)
pad_histo.SetLeftMargin(lef_margin_pad_histo)
pad_histo.SetRightMargin(0.05)
pad_histo.SetBottomMargin(bottom_margin_pad_histo)
if self.options['draw_grid']:
pad_histo.SetGrid()
if log_scale:
pad_histo.SetLogy(1)
if not self.options['omit_title']:
pad_histo.SetTopMargin(0.1)
ROOT.gStyle.SetOptTitle(1)
else:
ROOT.gStyle.SetOptTitle(0)
pad_histo.SetTopMargin(0.05)
if plot_ratios:
pad_ratio = ROOT.TPad('name_pad_ratio', 'name_pad_ratio', 0, 0, 1, 0.2)
pad_ratio.SetTopMargin(0.07)
pad_ratio.SetLeftMargin(lef_margin_pad_histo)
pad_ratio.SetRightMargin(0.05)
pad_ratio.SetBottomMargin(0.45)
if self.options['draw_grid']:
pad_ratio.SetGrid()
if ratio_log_scale:
pad_ratio.SetLogy(1)
pad_ratio.Draw() # otherwise ROOT crashes...
pad_histo.Draw()
### Create the ratio histograms and draw them ###
if plot_ratios:
pad_ratio.cd()
if not ratio_map:
ratio_map = {}
for i in range(1, num_histograms):
ratio_map[i] = 0
i = 0
ratio_histograms = [] # we need this workaround to prevent the GC from deleting the histograms too early
for numeratorHistogram, denumeratorHistogram in ratio_map.iteritems():
if denumeratorHistogram >= num_histograms:
continue
if numeratorHistogram >= num_histograms:
continue
ratio_histograms.append(self.histograms[histogram_keys[numeratorHistogram]].Clone(self.histograms[histogram_keys[numeratorHistogram]].GetName() + str(i) + 'clone'))
hratio = ratio_histograms[i]
hratio.Divide(self.histograms[histogram_keys[denumeratorHistogram]])
hratio.SetTitle('')
hratio.SetStats(0)
hratio.SetLineColor(self.histograms[histogram_keys[numeratorHistogram]].GetLineColor())
hratio.SetMarkerColor(self.histograms[histogram_keys[numeratorHistogram]].GetLineColor())
hratio.SetMarkerStyle(self.histograms[histogram_keys[numeratorHistogram]].GetMarkerStyle())
if not ratio_log_scale:
hratio.SetMinimum(self.options['ratio_minimum'])
hratio.SetMaximum(self.options['ratio_maximum'])
else:
if self.options['ratio_minimum'] > 0:
hratio.SetMinimum(self.options['ratio_minimum'])
else:
hratio.SetMinimum(0.1)
scalefactor = self.options['overall_text_scale'] * (1.0 - y_pad_histo) / y_pad_histo
hratio.GetXaxis().SetLabelSize(hratio.GetXaxis().GetLabelSize() * scalefactor)
hratio.GetYaxis().SetLabelSize(hratio.GetYaxis().GetLabelSize() * scalefactor)
hratio.GetXaxis().SetTitleSize(hratio.GetXaxis().GetTitleSize() * scalefactor)
hratio.GetYaxis().SetTitleSize(hratio.GetYaxis().GetTitleSize() * scalefactor)
hratio.GetXaxis().SetTitleOffset(0.9)
hratio.GetYaxis().SetTitleOffset(self.options['overall_text_scale']*0.9 / scalefactor)
hratio.GetXaxis().SetNdivisions(self.options['ratio_xaxis_ndivisions'])
hratio.GetYaxis().SetNdivisions(self.options['ratio_yaxis_ndivisions'])
hratio.SetLineWidth(int(hratio.GetLineWidth() * self.options['line_width_scale']))
#hratio.SetMarkerSize(hratio.GetMarkerSize()*scalefactor*0.9)
if i > 0:
hratio.Draw('SAME P')
else:
hratio.GetYaxis().SetTitle(self.options['ratio_y_label'])
hratio.Draw('P')
if not self.options['draw_grid']:
l = ROOT.TLine(hratio.GetXaxis().GetXmin(), 1, hratio.GetXaxis().GetXmax(), 1)
l.SetLineStyle(4)
l.SetLineColor(17)
l.Draw()
i += 1
### Now draw the distributions on the main pad ##
pad_histo.cd()
first_key = histogram_keys[0]
if log_scale and self.options['do_atlas_label']:
self.histograms[first_key].SetMaximum(maximum_value * 5)
else:
self.histograms[first_key].SetMaximum(maximum_value * 1.15)
if not log_scale:
self.histograms[first_key].SetMinimum(minimum_value)
if plot_ratios:
x_axis_scale = 0.0
else:
x_axis_scale = self.options['overall_text_scale']
self.histograms[first_key].GetXaxis().SetLabelSize(self.histograms[first_key].GetXaxis().GetLabelSize() * x_axis_scale)
self.histograms[first_key].GetYaxis().SetLabelSize(self.histograms[first_key].GetYaxis().GetLabelSize() * self.options['overall_text_scale'])
self.histograms[first_key].GetXaxis().SetTitleSize(self.histograms[first_key].GetXaxis().GetTitleSize() * x_axis_scale)
self.histograms[first_key].GetYaxis().SetTitleSize(self.histograms[first_key].GetYaxis().GetTitleSize() * self.options['overall_text_scale'])
self.histograms[first_key].GetYaxis().SetTitleOffset(0.95)
self.histograms[first_key].SetLineWidth(int(self.histograms[first_key].GetLineWidth() * self.options['line_width_scale']))
if not self.histograms[first_key].GetYaxis().GetTitle():
self.histograms[first_key].GetYaxis().SetTitle('Untitled')
self.histograms[first_key].Draw('HIST')
for i in range(1, num_histograms):
self.histograms[histogram_keys[i]].SetLineWidth(self.histograms[first_key].GetLineWidth())
self.histograms[histogram_keys[i]].Draw('same HIST')
### Last: draw the legend ##
legend.SetTextSize(self.options['legend_text_size'])
if self.options['legend_automatic_columns']:
import math
n_columns = int(math.ceil(legend.GetNRows()/6))
legend.SetNColumns(n_columns)
elif self.options['legend_n_columns'] > 0:
legend.SetNColumns(self.options['legend_n_columns'])
legend.Draw()
if self.options['do_atlas_label']:
raise Exception('do_atlas_label not yet implemented!')
canv.Print(output_file_name + '.' + self.options['output_file_type'], self.options['output_file_type'])
return True
| {
"repo_name": "ChristopherBock/pyUtilityClasses",
"path": "UtilityClasses/RatioHistogram.py",
"copies": "1",
"size": "14298",
"license": "mit",
"hash": -3061960387643353000,
"line_mean": 42.4589665653,
"line_max": 180,
"alpha_frac": 0.59434886,
"autogenerated": false,
"ratio": 3.8580679978413386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9942312078048412,
"avg_score": 0.002020955958585291,
"num_lines": 329
} |
__author__ = 'Christopher Bock'
class LoggingClass(object):
"""
Serves as base class for classes implementing rudimentary logging functions. In case no logger is supplied to the
constructor, the output will be printed to the console via the print function. If a logger is supplied it will have
to implement a print_log function accepting three parameters:
message being the message to be shown
msg_type the type of the message
suppress_timestamp whether or not timestamps should be shown alongside with the message
the parameters do not need to be named.
"""
def __init__(self, logger=None):
self.logger = logger
return
def print_line(self, msg_type='INFO', suppress_timestamp=False):
self.print_log('-'*25, msg_type, suppress_timestamp)
def print_log(self, message, msg_type='INFO', suppress_timestamp=False):
if self.logger:
self.logger.print_log(message, msg_type, suppress_timestamp)
else:
if suppress_timestamp:
print("%s: %s" % (msg_type, message))
else:
import time
print("%s-%s: %s" % (time.strftime("%y-%m-%d/%H:%M:%S"), msg_type, message))
return
| {
"repo_name": "ChristopherBock/pyUtilityClasses",
"path": "UtilityClasses/LoggingClass.py",
"copies": "1",
"size": "1278",
"license": "mit",
"hash": 8977156897058103000,
"line_mean": 40.2258064516,
"line_max": 119,
"alpha_frac": 0.6236306729,
"autogenerated": false,
"ratio": 4.231788079470198,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5355418752370198,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christopher Fonnesbeck, fonnesbeck@maths.otago.ac.nz'
from pymc.StepMethods import *
class TWalk(StepMethod):
"""
The t-walk is a scale-independent, adaptive MCMC algorithm for arbitrary
continuous distributions and correltation structures. The t-walk maintains two
independent points in the sample space, and moves are based on proposals that
are accepted or rejected with a standard M-H acceptance probability on the
product space. The t-walk is strictly non-adaptive on the product space, but
displays adaptive behaviour on the original state space. There are four proposal
distributions (walk, blow, hop, traverse) that together offer an algorithm that
is effective in sampling distributions of arbitrary scale.
The t-walk was proposed by J.A. Christen an C. Fox (unpublished manuscript).
:Parameters:
- stochastic : Stochastic
The variable over which self has jurisdiction.
- kernel_probs (optional) : iterable
The probabilities of choosing each kernel.
- walk_theta (optional) : float
Parameter for the walk move.
- traverse_theta (optional) : float
Paramter for the traverse move.
- verbose (optional) : integer
Level of output verbosity: 0=none, 1=low, 2=medium, 3=high
- tally (optional) : bool
Flag for recording values for trace (Defaults to True).
"""
def __init__(self, stochastic, kernel_probs=[0.0008, 0.4914, 0.4914, 0.0082, 0.0082], walk_theta=0.5, traverse_theta=4.0, verbose=None, tally=True):
# Initialize superclass
StepMethod.__init__(self, [stochastic], verbose=verbose, tally=tally)
@staticmethod
def competence(stochastic):
"""
The competence function for TWalk.
"""
if stochastic.dtype in integer_dtypes:
return 0
else:
return 1
def walk(self):
"""Walk proposal kernel"""
pass
def hop(self):
"""Hop proposal kernel"""
pass
def traverse(self):
"""Traverse proposal kernel"""
pass
def beta(self, a):
"""Auxiliary method for traverse proposal"""
if (random() < (a-1)/(2*a)):
return exp(1/(a+1)*log(random()))
else:
return exp(1/(1-a)*log(random()))
def blow(self):
"""Blow proposal kernel"""
pass
| {
"repo_name": "matthew-brett/pymc",
"path": "pymc/sandbox/TWalk.py",
"copies": "1",
"size": "2413",
"license": "mit",
"hash": 5659422517132439000,
"line_mean": 34.4852941176,
"line_max": 152,
"alpha_frac": 0.6369664318,
"autogenerated": false,
"ratio": 4.062289562289562,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5199255994089562,
"avg_score": null,
"num_lines": null
} |
'''Library to manipulate .srt subtitle files. Currently srtTool can shift
subtitles by seconds or change to new frame rates. It also can match film
script files to spotted timecodes. PAL uses a frame rate of 25, while NTSC
uses a frame rate of 29.97. 35mm videos have a frame rate of 24. But transfer
from telecining for PAL is 23.976.'''
import datetime
import sys
import time
class srtLib:
def __init__(self, srtFile):
self.subs = srtFile.split("\n\n")
self.tcs = [s.split("\n")[1].split(" --> ")
for s in self.subs if len(s) > 0]
self.sub_text = [s.split("\n")[2:] for s in self.subs if len(s) > 0]
def to_seconds(self, timecode):
'''Converts timecode strings HH:MM:SS,SSS to seconds'''
x = time.strptime(timecode.split(',')[0], '%H:%M:%S')
secs = float(datetime.timedelta(
hours=x.tm_hour,
minutes=x.tm_min,
seconds=x.tm_sec).total_seconds())
return(secs)
def to_tc(self, seconds):
'''Converts seconds to timecode strings HH:MM:SS,SSS'''
m, s = divmod(seconds, 60)
sd = "%.3f" % s
sd = "," + sd[-3:]
h, m = divmod(m, 60)
tc = "%02d:%02d:%02d" % (h, m, s)
tc += sd
return(tc)
def shift_tcs(self, t_shift, rate=False):
'''Shifts timecodes by seconds or a rate (when converting to different
codecs)'''
secs = [(self.to_seconds(x[0]), self.to_seconds(x[1]))
for x in self.tcs]
if not rate:
secs = [(x[0] + t_shift, x[1] + t_shift) for x in secs]
elif rate:
secs = [(x[0] * t_shift, x[1] * t_shift) for x in secs]
s_tcs = [(self.to_tc(x[0]), self.to_tc(x[1])) for x in secs]
newFile = ""
for i, s in enumerate(self.sub_text):
newFile += str(i + 1) + "\n"
newFile += s_tcs[i][0] + " --> " + s_tcs[i][1] + "\n"
for x in s:
newFile += x + "\n"
newFile += "\n"
with open("shifted_subs.srt", "w") as f:
f.write(newFile)
return (s_tcs)
def script(self):
'''Generates script of text only from subtitle file'''
with open("script.txt", "w") as f:
f.write("\n".join([i for s in self.sub_text for i in s]))
def match_new(self, new_subs):
'''This method matches subtitle text formatted in .srt without any
timecode to a blank srt file with only time codes. This is helpful
for spotters who happen to have a script.'''
new_text = [[l for l in sub.split("\n") if len(l) > 0]
for sub in new_subs.split("\n\n")]
assert len(new_text) == len(self.tcs)
new_srt = ""
for i, x in enumerate(new_text):
new_srt += str(x[0]) + "\n"
new_srt += self.tcs[i][0] + " --> " + self.tcs[i][1] + "\n"
new_srt += "\n".join(x[1:]) + "\n\n"
with open("newly_matched.srt", "w") as f:
f.write(new_srt)
if __name__ == '__main__':
filePath1 = sys.argv[1]
action = str(sys.argv[2])
with open(filePath1, 'r', encoding='utf-8') as f:
srt = f.read()
subs = srtLib(srt)
if action == "shift":
# num is seconds to shift or ratio of frame rate move
sec_or_rate = sys.argv[3]
num = sys.argv[4]
if sec_or_rate == "seconds":
subs.shift_tcs(float(num))
elif sec_or_rate == "rate":
subs.shift_tcs(float(num), rate=True)
# elif action == "frate":
# orig = sys.argv[3]
# new = sys.argv[4]
# if orig == "NTSC" and new == "PAL":
# subs.shift_subs_rate(23.976 / 25)
# elif orig == "PAL" and new == "NTSC":
# subs.shift_subs_rate(25 / 23.976)
# elif orig == "FILM" and new == "PAL":
# subs.shift_subs_rate(24 / 25)
# elif orig == "FILM" and new == "NTSC":
# subs.shift_subs_rate(25 / 24)
elif action == "script":
subs.script()
elif action == "match_new":
new_srt = sys.argv[3]
with open(new_srt, 'r', encoding='utf-8') as f:
new_text = f.read()
subs.match_new(new_text)
| {
"repo_name": "henchc/srtTool",
"path": "srt-tool/srt-Shift.py",
"copies": "1",
"size": "4344",
"license": "mit",
"hash": 55928467688551950,
"line_mean": 32.9375,
"line_max": 80,
"alpha_frac": 0.5080570902,
"autogenerated": false,
"ratio": 3.208271787296898,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42163288774968977,
"avg_score": null,
"num_lines": null
} |
'''Working product to automatically generate SRT file from script. Intervals are given as user inputs while watching video. Line breaks are determined by a tree parsing algorithm.'''
import time
from nltk.parse import stanford
from nltk import sent_tokenize, Tree
from string import punctuation
punctuation = punctuation.replace("'", "")
line_limit = 34
with open("script_eng.txt") as f:
raw_script = f.read()
# prepare stanford parser
parser = stanford.StanfordParser(
path_to_jar="/Users/chench/Documents/stanford-parser-full-2015-12-09/stanford-parser.jar",
path_to_models_jar="/Users/chench/Documents/stanford-parser-full-2015-12-09/stanford-parser-3.6.0-models.jar")
sentences = parser.raw_parse_sents(sent_tokenize(raw_script))
def get_all_nodes(parent):
'''
extracts all chunk and word relations
'''
for node in parent:
if isinstance(node, Tree):
label_leaves = (node.label(), node.leaves())
all_results.append(label_leaves)
get_all_nodes(node)
all_results = []
for t in sentences:
get_all_nodes(t)
# build subtitle lines
st_line = ""
all_lines = []
for tup in all_results:
# rename elements
label = tup[0]
word_list = tup[1]
word_string = " ".join(word_list)
# clean st_line
for p in punctuation:
st_line = st_line.replace(" " + p, p)
st_line = st_line.replace(" n't", "n't")
st_line = st_line.replace(" 's", "'s")
st_line = st_line.replace("I 'm", "I'm")
# identify beginning of sentence
if label == "ROOT":
ROOT = word_list
count = 0
# iterate through chunks and words
if len(word_list) > 1 and label[-1] == "P":
if len(
word_string +
st_line) <= line_limit and word_list[0] == ROOT[count]:
st_line += word_string + " "
count += len(word_list)
elif len(word_list) == 1 and word_list[0] == ROOT[count]:
if len(st_line + word_string) <= line_limit:
st_line += word_string + " "
count += len(word_list)
else:
p_bin = 0
for i, char in enumerate(st_line.strip()[::-1][1:]):
if i < 10 and char in punctuation:
all_lines.append(st_line[:-i - 1].strip())
st_line = st_line[-i - 1:] + word_string + " "
p_bin = 1
break
elif i < 20 and char in ".!?":
all_lines.append(st_line[:-i - 1].strip())
st_line = st_line[-i - 1:] + word_string + " "
p_bin = 1
break
if p_bin == 0:
all_lines.append(st_line.strip())
st_line = word_string + " "
count += len(word_list)
# fix and add final line
for p in punctuation + "'":
st_line = st_line.replace(" " + p, p)
st_line = st_line.replace(" n't", "n't")
st_line = st_line.replace(" 's", "'s")
st_line = st_line.replace("I 'm", "I'm")
all_lines.append(st_line.strip())
for i in range(len(all_lines)):
if all_lines[i][0] in punctuation:
all_lines[i - 1] = all_lines[i - 1] + all_lines[i][0]
all_lines[i] = all_lines[i][1:].strip()
count = 0
all_subs = []
sub = []
for l in all_lines:
if count == 2:
all_subs.append(sub)
sub = []
count = 0
if count != 2:
if l[-1] == ".":
sub.append(l)
all_subs.append(sub)
sub = []
count = 0
else:
sub.append(l)
count += 1
ks = input("Press 'Enter' to start:")
time_stamps = []
for i in range(len(all_subs)):
print()
ks = input("\n".join(all_subs[i]))
begin = time.time()
ks = input()
end = time.time()
time_stamps.append((begin, end))
start_time = time_stamps[0][0]
for i in range(len(time_stamps)):
time_stamps[i] = (
time_stamps[i][0] -
start_time,
time_stamps[i][1] -
start_time)
def to_tc(seconds):
'''Converts seconds to timecode strings HH:MM:SS,SSS'''
m, s = divmod(seconds, 60)
sd = "%.3f" % s
sd = "," + sd[-3:]
h, m = divmod(m, 60)
tc = "%02d:%02d:%02d" % (h, m, s)
tc += sd
return(tc)
#assert len(new_text) == len(self.tcs)
new_srt = ""
for i, x in enumerate(all_subs):
new_srt += str(i + 1) + "\n"
new_srt += to_tc(time_stamps[i][0]) + " --> " + \
to_tc(time_stamps[i][1]) + "\n"
new_srt += "\n".join(all_subs[i]) + "\n\n"
with open("newly_matched.srt", "w") as f:
f.write(new_srt)
| {
"repo_name": "henchc/srtTool",
"path": "srt-tool/srt-Script2SRT.py",
"copies": "1",
"size": "4704",
"license": "mit",
"hash": -7099935273747335000,
"line_mean": 25.5762711864,
"line_max": 182,
"alpha_frac": 0.525297619,
"autogenerated": false,
"ratio": 3.1934826883910388,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9213773229667824,
"avg_score": 0.0010014155446429732,
"num_lines": 177
} |
# Basic tools
import itertools
# Scalers
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import Binarizer
# Feature selection tools
from sklearn.feature_selection import SelectKBest, f_classif
# Unsupervised learning tools
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.manifold import TSNE
# Classifiers
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import AdaBoostClassifier
# Regression tools
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
class PipelineBuilder(object):
"""
Builds a collection of scikit-learn pipelines based on a combinatorial
schematic.
"""
def build_pipeline_bundle(self, pipeline_bundle_schematic):
"""
Returns a list of scikit-learn pipelines given a pipeline bundle
schematic.
TODO: Create a comprehensive description of the pipeline schematic
The general form of the pipeline bundle schematic is:
pipeline_bundle_schematic = [
step_1,
...
step_n
]
Steps take the form:
step_n = {
'step_n_type': {
'none': {}, # optional, used to not include the step as a permutation
step_n_option_1: {},
}
}
pipeline_bundle_schematic = [
{'step_1_type': {
'none': {}
'step_1': {
'step_1_parameter_1': [step_1_parameter_1_value_1 ... step_1_parameter_1_value_p]
...
'step_1_parameter_2': [step_1_parameter_2_value_1 ... step_1_parameter_2_value_m]
}
}},
...
]
"""
# Get supported scikit-learn objects
sklearn_packages = self.get_supported_sklearn_objects()
# Obtain all corresponding scikit-learn package options with all
# parameter combinations for each step
pipeline_options = []
for step in pipeline_bundle_schematic:
step_name = list(step.keys())[0]
step_options = step[step_name]
step_iterations = []
for step_option, step_parameters in step_options.items():
if step_option != 'none':
# Get the parameter names for the current step option
parameter_names = [parameter_name for parameter_name \
in step_parameters.keys()]
# Obtain scikit-learn object for the step option
if 'sklo' in parameter_names:
# Use user-provided object if they mark one of the
# step-option parameter names as 'sklo' (scikit-learn
# object)
sklearn_object = step_parameters['sklo']
else:
# Use default object if supported
if step_option != 'none':
sklearn_object = sklearn_packages[step_name][step_option]
# Form all parameter combinations for the current step option
parameter_combos = [step_parameters[step_parameter_name] \
for step_parameter_name in parameter_names \
if step_parameter_name != 'sklo']
# Remove 'sklo'
parameter_names = [parameter_name for parameter_name \
in parameter_names \
if parameter_name != 'sklo']
# Form all parameter combinations for current step option
# and form and append step tuple
for parameter_combo in list(itertools.product(*parameter_combos)):
parameter_kwargs = {pair[0]: pair[1] \
for pair in zip(parameter_names,
parameter_combo)}
step_addendum = (step_name,
sklearn_object(**parameter_kwargs))
step_iterations.append(step_addendum)
else:
# Append nothing if the step is to be ignored
step_iterations.append(None)
pipeline_options.append(step_iterations)
# Form all step/parameter permutations and convert to scikit-learn
# pipelines
pipelines = []
for pipeline_skeleton in itertools.product(*pipeline_options):
pipelines.append(Pipeline([step for step in pipeline_skeleton \
if step]))
return pipelines
def get_supported_sklearn_objects(self):
"""
Returns supported scikit-learn estimators, selectors, and transformers
"""
sklearn_packages = {
'feature_selection': {
'select_k_best': SelectKBest
},
'scaler': {
'standard': StandardScaler,
'normal': Normalizer,
'min_max': MinMaxScaler,
'binary': Binarizer
},
'transform': {
'pca': PCA
# 't-sne': pipeline_TSNE(n_components=2, init='pca')
},
'pre_estimator': {
'polynomial_features': PolynomialFeatures
},
'estimator': {
'knn': KNeighborsClassifier,
'logistic_regression': LogisticRegression,
'svm': SVC,
'linear_regression': LinearRegression,
'multilayer_perceptron': MLPClassifier,
'random_forest': RandomForestClassifier,
'adaboost': AdaBoostClassifier
}
}
return sklearn_packages
def get_default_pipeline_step_parameters(self,feature_count):
# Set pre-processing pipeline step parameters
pre_processing_grid_parameters = {
'select_k_best': {
'k': range(1,feature_count+1)
}
}
# Set classifier pipeline step parameters
classifier_grid_parameters = {
'knn': {
'n_neighbors': range(1,31),
'weights': ['uniform','distance']
},
'logistic_regression': {
'C': np.logspace(-10,10,5)
},
'svm': {},
'multilayer_perceptron': {
'hidden_layer_sizes': [[x] for x in range(min(3,feature_count),
max(3,feature_count)+1)]
},
'random_forest': {
'n_estimators': range(90,100)
},
'adaboost': {}
}
# Set regression pipeline step parameters
regression_grid_parameters = {
'polynomial_regression': {
'degree': range(1,5)
}
}
# Return defaults
return pre_processing_grid_parameters,classifier_grid_parameters,regression_grid_parameters
| {
"repo_name": "JaggedParadigm/pyplearnr",
"path": "build/lib/pyplearnr/pipeline_builder.py",
"copies": "2",
"size": "7784",
"license": "apache-2.0",
"hash": -2304020637028828700,
"line_mean": 34.7064220183,
"line_max": 101,
"alpha_frac": 0.5381551901,
"autogenerated": false,
"ratio": 4.868042526579112,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6406197716679112,
"avg_score": null,
"num_lines": null
} |
import numpy as np
# Classification metrics
import sklearn.metrics as sklearn_metrics
from sklearn.metrics import classification_report
class PipelineEvaluator(object):
"""
Class used to evaluate pipelines
"""
def get_score(self, y, y_pred, scoring_metric):
"""
Returns the score given target values, predicted values, and a scoring
metric.
Parameters
----------
y : numpy.array
Actual target array
y_pred : numpy.array
Predicted target array
scoring_metric : str, {'auc'}
Metric used to score estimator
Returns
-------
score : floats
Score of type scoring_metric determined from the actual and
predicted target values
"""
############### Check inputs ###############
if not self.metric_supported(scoring_metric):
raise Exception("The third positional argumet, indicating the " \
"estimator scoring metric, %s, is currently "\
"unsupported"%(scoring_metric))
if type(y) is not np.ndarray or type(y_pred) is not np.ndarray:
raise Exception("The 1st and 2nd positional arguments, " \
"representing the respective actual and " \
"predicted target arrays, must be of type " \
"numpy.array")
if len(y.shape) != 1 or len(y_pred.shape) != 1 \
or y.shape[0] != y_pred.shape[0]:
raise Exception("The 1st and 2nd positional arguments, " \
"representing the respective actual and " \
"predicted target arrays, must both be of shape " \
"(m, )")
y, y_pred = self.remove_nan(y, y_pred)
############### Calculate score ###############
if scoring_metric == 'auc':
# Get ROC curve points
false_positive_rate, true_positive_rate, _ = \
sklearn_metrics.roc_curve(y, y_pred)
# Calculate the area under the curve
score = sklearn_metrics.auc(false_positive_rate, true_positive_rate)
elif scoring_metric == 'rmse':
# Calculate the root mean square error
score = -np.sqrt(sklearn_metrics.mean_squared_error(y, y_pred))
elif scoring_metric == 'accuracy':
score = sklearn_metrics.accuracy_score(y, y_pred)
return score
def remove_nan(self, y, y_pred):
"""
Removes rows from y and y_pred if either have nan values
"""
kept_inds = [row_ind for row_ind in range(y.shape[0]) if ~np.isnan(y_pred[row_ind])]
no_nan_y = y[kept_inds]
no_nan_y_pred = y_pred[kept_inds]
return no_nan_y, no_nan_y_pred
def metric_supported(self, metric):
"""
Tells whether estimator scoring metric (Ex: 'auc') is currently
supported
Parameters
----------
metric : str
Returns
-------
support_flag : boolean
True : if the metric is supported
False : if the metric is not supported
"""
supported_metrics = ['auc', 'accuracy', 'rmse']
if metric:
if metric in supported_metrics:
support_flag = True
else:
support_flag = False
else:
raise Exception("The first positional argument must be an " \
"estimator scoring metric (Ex: 'auc')")
return support_flag
| {
"repo_name": "JaggedParadigm/pyplearnr",
"path": "build/lib/pyplearnr/pipeline_evaluator.py",
"copies": "2",
"size": "3758",
"license": "apache-2.0",
"hash": -6960512631214257000,
"line_mean": 31.3965517241,
"line_max": 92,
"alpha_frac": 0.5332623736,
"autogenerated": false,
"ratio": 4.468489892984542,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6001752266584542,
"avg_score": null,
"num_lines": null
} |
# Python 2/3 compatibility
from __future__ import print_function
# Basic tools
import numpy as np
import pandas as pd
import random
import re
# For scikit-learn pipeline cloning
from sklearn.base import clone
# Graphing
import pylab as plt
import matplotlib
import matplotlib.pyplot as mpl_plt
import matplotlib.colors as mpl_colors
import matplotlib.cm as cmx
# Cross validation tools
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
# Other pyplearnr classes
from .folds import Fold, OuterFold
from .trained_pipeline import OuterFoldTrainedPipeline
from .pipeline_builder import PipelineBuilder
class NestedKFoldCrossValidation(object):
"""
Class that handles nested k-fold cross validation, whereby the inner loop
handles the model selection and the outer loop is used to provide an
estimate of the chosen model/pipeline's out of sample score.
"""
def __init__(self, outer_loop_fold_count=3, inner_loop_fold_count=3,
outer_loop_split_seed=None, inner_loop_split_seeds=None,
shuffle_seed=None, shuffle_flag=True,
random_combinations=None, random_combination_seed=None,
schematic=None):
"""
Parameters
----------
outer_loop_fold_count : int, optional
Number of folds in the outer-loop of the nested k-fold
cross-validation.
inner_loop_fold_count : int, optional
Number of folds in the inner loops of the nested k-fold
cross-validation for each outer-fold.
outer_loop_split_seed : int, optional
Seed determining how the data will be split into outer folds. This,
along with the other two seeds should be sufficient to reproduce
results.
inner_loop_split_seeds : list of int, optional
Seeds determining how the training data of the outer folds will be
split. These, along with the other two seeds should be sufficient
to reproduce results.
shuffle_seed : int, optional
Seed determining how the data will be shuffled if the shuffle_flag
is set to True. This, along with the other two seeds should be
sufficient to reproduce results.
shuffle_flag : boolean, optional
Determines whether the data will be shuffled.
True : Shuffle data and store shuffled data and the indices used
to generate it using the shuffle_seed. Seed is randomly
assigned if not provided.
False : Data is not shuffled and indices that, if used as indices
for the data will result in the same data, are saved.
"""
############### Save initial inputs ###############
self.shuffle_flag = shuffle_flag
self.shuffle_seed = shuffle_seed
self.outer_loop_fold_count = outer_loop_fold_count
self.inner_loop_fold_count = inner_loop_fold_count
self.outer_loop_split_seed = outer_loop_split_seed
self.inner_loop_split_seeds = inner_loop_split_seeds
self.random_combinations = random_combinations
self.random_combination_seed = random_combination_seed
############### Initialize other fields ###############
# Shuffled data indices
self.shuffled_data_inds = None
# OuterFold objects fold indices
self.outer_folds = {}
# Input data and targets
self.X = None
self.y = None
self.shuffled_X = None
self.shuffled_y = None
self.pipelines = None
# Best pipeline trained on all data
self.pipeline = None
# Essentially, uses this measure of centrality (mean or median) of the
# inner-fold scores to decide winning for that outer-fold
self.score_type = None
# Metric to calculate as the pipeline scores (Ex: 'auc', 'rmse')
self.scoring_metric = None
self.best_pipeline_ind = None
############### Populate fields with defaults ###############
if self.shuffle_seed is None:
self.shuffle_seed = random.randint(1,5000)
# Generate seeds if not given (*200 since 5-fold CV results in range
# of 1 to 1000)
if self.outer_loop_split_seed is None:
self.outer_loop_split_seed = random.randint(
1,
self.outer_loop_fold_count*200)
if self.inner_loop_split_seeds is None:
self.inner_loop_split_seeds = np.random.randint(
1,
high=self.inner_loop_fold_count*200,
size=self.outer_loop_fold_count)
if self.random_combinations is not None:
if self.random_combination_seed is None:
self.random_combination_seed = random.randint(1,5000)
############### Check fields so far ###############
outer_loop_fold_count_error = "The outer_loop_fold_count" \
" keyword argument, dictating the number of folds in the outer " \
"loop, must be a positive integer."
assert type(self.outer_loop_fold_count) is int, \
outer_loop_fold_count_error
assert self.outer_loop_fold_count > 0, outer_loop_fold_count_error
inner_loop_fold_count_error = "The inner_loop_fold_count" \
" keyword argument, dictating the number of folds in the inner" \
" loop, must be a positive integer"
assert type(self.inner_loop_fold_count) is int, inner_loop_fold_count_error
assert self.inner_loop_fold_count > 0, inner_loop_fold_count_error
assert type(self.outer_loop_split_seed) is int, "The " \
"outer_loop_split_seed keyword argument, dictating how the data "\
"is split into folds for the outer loop, must be an integer."
if type(self.inner_loop_split_seeds) is not np.ndarray \
and type(self.inner_loop_split_seeds) is not list:
raise Exception("The inner_loop_split_seed keyword argument," \
" dictating how the data is split into folds for the inner"\
" loop, must be of type np.ndarray or list" )
assert len(self.inner_loop_split_seeds) == self.outer_loop_fold_count, \
"The number of inner-loop contest seeds must be equal to the " \
"number of outer-folds"
if self.random_combinations is not None:
assert type(self.random_combinations) is int, "The number of " \
"pipeline step/parameter combinations, random_combinations," \
" must be of type int."
if self.random_combination_seed is not None:
assert type(self.random_combination_seed) is int, "The seed " \
"determining how the exact pipeline step/parameter " \
"combinations is chosen, random_combination_seed," \
" must be of type int."
def fit(self, X, y, pipelines=None, stratified=True, scoring_metric='auc',
tie_breaker='choice', best_inner_fold_pipeline_inds=None,
best_outer_fold_pipeline=None, score_type='median',
pipeline_schematic=None):
"""
Perform nested k-fold cross-validation on the data using the user-
provided pipelines.
This method is used in stages, depending on the output:
1) Train and score the pipelines on the inner-loop folds of each
outer-fold. Decide the winning pipeline based on the highest mean
or median score. Alert the user if no winner can be chosen because
they have the same score. If a winner is chosen, go to step 3.
2) If no winning pipeline is chosen in a particular inner-loop contest,
The rule dictated by the tie_breaker keyword argument will decide
the winner. If tie_breaker is 'choice', the user is alerted and
asked to run the fit method again with
best_inner_fold_pipeline_inds keyword argument to decide the winner
(preferably the simplest model).
3) If a winner is chosen for each inner-loop contest for each outer-
loop in step 1 or the user designates one in step 2, the winning
pipelines of all outer-folds are collected and the ultimate winner
is chosen with the highest number of outer-folds it has won. If
no winner is chosen, the user is alerted and asked to run the fit
method again with the best_outer_fold_pipeline keyword argument to
decide the final winner (again, preferably the simplest).
4) The ultimate winning pipeline is trained on the training set of the
outer-folds and tested on it's testing set (the validation set),
scored, and those values are used as an estimate of out-of-sample
scoring. The final pipeline is then trained on all available data
for use in prediction/production. A final report is output with
details of the entire procedure.
Parameters
----------
X : numpy.ndarray, shape (m, n)
Feature input matrix. Rows are values of each column feature for a
given observation.
y : numpy.ndarray, shape (m, )
Target vector. Each entry is the output given the corresponding row
of feature inputs in X.
pipelines : list of sklearn.pipeline.Pipeline objects
The scikit-learn pipelines that will be copied and evaluated
stratified : boolean, optional
Determines if the data will be stratified so that the target labels
in the resulting feature matrix and target vector will have the
same overall composition as all of the data. This is a best
practice for classification problems.
True : Stratify the data
False : Don't stratify the data
scoring_metric : str, {'auc', 'rmse', 'accuracy'}, optional
Metric used to score estimator.
auc : Area under the receiver operating characteristic (ROC)
curve.
accuracy : Percent of correctly classified targets
rmse : Root mean-squared error. Essentially, distance of actual
from predicted target values.
tie_breaker : str, {'choice', 'first'}, optional
Decision rule to use to decide the winner in the event of a tie
choice : Inform the user that a tie has occured between
pipelines, either in the inner-loop contest or
outer-loop contest of the nested k-fold cross-
validation, and that they need to include either the
best_inner_fold_pipeline_inds or
best_outer_fold_pipeline keyword arguments when running
the fit method again to decide the winner(s).
first : Simply use the first pipeline, in the order provided,
with the same score.
best_inner_fold_pipeline_inds : dict, optional
best_outer_fold_pipeline
score_type
"""
# Build list of scikit-learn pipelines if not provided by user
if pipelines is None:
if pipeline_schematic is not None:
# Form scikit-learn pipelines using the PipelineBuilder
pipelines = PipelineBuilder().build_pipeline_bundle(
pipeline_schematic)
else:
raise Exception("A pipeline schematic keyword argument, " \
"pipeline_schematic, must be provided if no list of " \
"pipelines in the pipelines keyword argument is provided")
if not best_outer_fold_pipeline:
######## Choose best inner fold pipelines for outer folds ########
if not best_inner_fold_pipeline_inds:
############### Save inputs ###############
self.X = X
self.y = y
self.scoring_metric = scoring_metric
self.score_type = score_type
############### Check inputs ###############
self.check_feature_target_data_consistent(self.X, self.y)
# TODO: add check for pipelines once this is working
############ Shuffle data and save it and indices ############
self.shuffle_data()
############### Save pipelines ###############
if self.random_combinations is None:
self.pipelines = {pipeline_ind: pipeline \
for pipeline_ind, pipeline in enumerate(pipelines)}
else:
shuffled_pipeline_inds = np.arange(len(pipelines))
random.seed(self.random_combination_seed)
random.shuffle(shuffled_pipeline_inds)
self.pipelines = {
int(pipeline_ind): pipelines[pipeline_ind] for pipeline_ind \
in shuffled_pipeline_inds[:self.random_combinations]
}
########## Derive outer and inner loop split indices ##########
self.get_outer_split_indices(self.shuffled_X, y=self.shuffled_y,
stratified=stratified)
########### Perform nested k-fold cross-validation ###########
for outer_fold_ind, outer_fold in self.outer_folds.items():
outer_fold.fit(self.shuffled_X, self.shuffled_y,
self.pipelines,
scoring_metric=self.scoring_metric)
else:
for outer_fold_ind, best_pipeline_ind in \
best_inner_fold_pipeline_inds.items():
self.outer_folds[outer_fold_ind].fit(
self.shuffled_X, self.shuffled_y, self.pipelines,
scoring_metric=self.scoring_metric,
best_inner_fold_pipeline_ind=best_pipeline_ind)
############### Choose best outer fold pipeline ###############
self.choose_best_outer_fold_pipeline(
tie_breaker=tie_breaker,
best_outer_fold_pipeline=best_outer_fold_pipeline)
############### Train winning pipeline on outer folds ###############
self.train_winning_pipeline_on_outer_folds()
############### Train production pipeline ###############
self.train_production_pipeline()
############### Output report ###############
self.print_report()
def train_production_pipeline(self):
"""
Collects validation scores for and trains winning pipeline on all of
the data for use in production
"""
best_pipeline_ind = self.best_pipeline_ind
if best_pipeline_ind is not None:
############### Initialize production pipeline ###############
pipeline_kwargs = {
'pipeline_id': best_pipeline_ind,
'pipeline': clone(self.pipelines[best_pipeline_ind], safe=True),
'scoring_metric': self.scoring_metric,
'score_type': self.score_type,
}
self.pipeline = OuterFoldTrainedPipeline(**pipeline_kwargs)
############### Collect outer fold validation scores ###############
inner_loop_train_scores = []
inner_loop_test_scores = []
for outer_fold in self.outer_folds.values():
best_pipeline = outer_fold.pipelines[best_pipeline_ind]
inner_loop_test_scores.append(best_pipeline.test_scores[0])
inner_loop_train_scores.append(best_pipeline.train_scores[0])
############### Set scores in production pipeine ###############
self.pipeline.set_inner_loop_scores(inner_loop_train_scores,
inner_loop_test_scores)
############# Train production pipeline on all data #############
# Accessing internal pipeline because the TrainedPipeline fit
# method actually fits the data and then scores the resulting
# pipeline. There is no test data when training on all data. Hence
# it doesn't make sense to the pipeline after fitting.
self.pipeline.pipeline.fit(self.shuffled_X, self.shuffled_y)
def predict(self, X):
"""
Uses the best pipeline to make a class prediction.
"""
return self.pipeline.pipeline.predict(X)
def predict_proba(self, X):
"""
Uses the best pipeline to give the probability of each result
"""
return self.pipeline.pipeline.predict_proba(X)
def train_winning_pipeline_on_outer_folds(self):
"""
Trains and obtains validation scores for the winning model of the
nested k-fold cross-validation inner loop contest if a winner has
been chosen (self.best_pipeline_ind is set)
"""
if self.best_pipeline_ind is not None:
for outer_fold in self.outer_folds.values():
outer_fold.train_winning_pipeline(self.best_pipeline_ind)
def choose_best_outer_fold_pipeline(self, tie_breaker='choice',
best_outer_fold_pipeline=None):
"""
Pools winners of outer fold contests and selects ultimate winner by
majority vote or by user choice (preferably simplest model for better
out-of-sample performance) or some other decision rule,
Parameters
----------
tie_breaker : str, {'choice', 'first'}
Decision rule to use to decide the winner in the event of a tie
choice : Inform the user that they need to use the
choose_best_pipelines method to pick the winner of the
inner loop contest
first : Simply use the first model with the same score
"""
# Collect all winning pipelines from each inner loop contest of each
# outer fold
outer_fold_winners = [outer_fold.best_pipeline_ind \
for outer_fold_ind, outer_fold in self.outer_folds.items()]
# Check if all folds have winners yet
none_flag = False
for outer_fold_winner in outer_fold_winners:
if outer_fold_winner is None:
none_flag = True
if not none_flag:
# Determine winner of all folds by majority vote
counts = {x: outer_fold_winners.count(x) for x in outer_fold_winners}
max_count = max([count for x, count in counts.items()])
mode_inds = [x for x, count in counts.items() if count==max_count]
best_pipeline_ind = None
if best_outer_fold_pipeline is None:
if len(mode_inds) == 1:
# Save winner if only one
best_pipeline_ind = mode_inds[0]
else:
if tie_breaker=='choice':
# Encourage user to choose simplest model if there is no clear
# winner
for mode_ind in mode_inds:
print(mode_ind, self.pipelines[mode_ind])
print("\n\nNo model was chosen because there is no clear winner. " \
"Please use the same fit method with one of the "\
"indices above.\n\nExample:\tkfcv.fit(X.values, " \
"y.values, pipelines)\n\t\t"\
"kfcv.fit(X.values, y.values, pipelines, " \
"best_outer_fold_pipeline=9)")
elif tie_breaker=='first':
best_pipeline_ind = mode_inds[0]
else:
best_pipeline_ind = best_outer_fold_pipeline
if best_pipeline_ind is not None:
self.best_pipeline_ind = best_pipeline_ind
def shuffle_data(self):
"""
Shuffles and saves the feature data matrix, self.X, and target vector,
self.y, if the self.shuffle_flag field is set to True and saves the
corresponding indices as well.
"""
# Calculate and save shuffled data indices
self.get_shuffled_data_inds()
# Shuffle and save data
self.shuffled_X = self.X[self.shuffled_data_inds]
self.shuffled_y = self.y[self.shuffled_data_inds]
def get_shuffled_data_inds(self):
"""
Calculates and saves shuffled data indices
"""
point_count = self.X.shape[0]
shuffled_data_inds = np.arange(point_count)
if self.shuffle_flag:
random.seed(self.shuffle_seed)
random.shuffle(shuffled_data_inds)
self.shuffled_data_inds = shuffled_data_inds
def get_outer_split_indices(self, X, y=None, stratified=True):
"""
Returns test-fold indices given the feature matrix, X, optional target
values, y, and whether the split is to be stratified, stratified.
"""
################ Check inputs ###############
self.check_feature_target_data_consistent(X, y)
assert type(stratified) is bool, "The keyword argument determining " \
"whether the splits are to be stratified or not, stratified, must" \
" be boolean (True or False)."
if stratified:
assert y.any(), "Target value vector keyword argument, y, must " \
"be present if stratified split keyword argument, stratified," \
" is True."
################ Choose K-fold cross-validation type ################
if not stratified or self.scoring_metric=='rmse':
outer_k_fold_splitter = KFold(n_splits=self.outer_loop_fold_count,
random_state=self.outer_loop_split_seed)
outer_split_kwargs = {}
inner_k_fold_splitters = \
[KFold(n_splits=self.inner_loop_fold_count, random_state=seed) \
for seed in self.inner_loop_split_seeds]
else:
outer_k_fold_splitter = StratifiedKFold(
n_splits=self.outer_loop_fold_count,
random_state=self.outer_loop_split_seed)
outer_split_kwargs = {'y': y}
inner_k_fold_splitters = \
[StratifiedKFold(n_splits=self.inner_loop_fold_count,
random_state=seed) \
for seed in self.inner_loop_split_seeds]
################ Calculate and save outer and inner fold split indices ################
for fold_id, (outer_train_inds, outer_test_inds) in enumerate(outer_k_fold_splitter.split(X,**outer_split_kwargs)):
self.outer_folds[fold_id] = OuterFold(
fold_id=fold_id,
test_fold_inds=outer_test_inds,
train_fold_inds=outer_train_inds)
# Make sure the targets are available for a stratified run
if not stratified:
inner_split_kwargs = {}
else:
inner_split_kwargs = {'y': y[outer_train_inds]}
# Get inner fold splitter for current outer fold
inner_k_fold_splitter = inner_k_fold_splitters[fold_id]
# Save inner fold test/train split indices
for inner_fold_id, (inner_train_inds, inner_test_inds) in enumerate(inner_k_fold_splitter.split(X[outer_train_inds],**inner_split_kwargs)):
self.outer_folds[fold_id].inner_folds[inner_fold_id] = \
Fold(
fold_id=inner_fold_id,
test_fold_inds=inner_test_inds,
train_fold_inds=inner_train_inds)
def check_feature_target_data_consistent(self, X, y):
"""
Checks to make sure the feature matrix and target vector are of the
proper types and have the correct sizes
"""
assert type(X) is np.ndarray, "Feature matrix, X, must be of type " \
"numpy.ndarray."
if y.any():
assert type(y) is np.ndarray, "Target vector, y, must be of type " \
"numpy.ndarray if given."
assert len(y.shape) == 1, "Target vector must have a flat shape. " \
"In other words the shape should be (m,) instead of (m,1) or " \
"(1,m)."
assert len(X.shape) == 2, "Feature matrix, X, must be 2-dimensional. " \
"If the intention was to have only one data point with a single " \
"value for each feature make the array (1,n). If there is only " \
"one feature make the array nx1 (instead of just having a shape " \
"of (n,))."
if y.any():
assert X.shape[0] == y.shape[0], "The number of rows of the " \
"feature matrix, X, must match the length of the target " \
"value vector, y, if given."
def print_report(self):
if self.best_pipeline_ind is not None:
print(self.get_report())
def get_report(self):
"""
Generates report string
"""
############### Get validation scores for best pipeline ###############
inner_loop_test_scores = self.pipeline.inner_loop_test_scores
############### Form pipeline string ###############
pipeline_str = '\n'.join(['{}:\n{}\n'.format(*step) \
for step in self.pipeline.pipeline.steps])
############### Build inner/outer-fold scores matrix ###############
score_matrix = np.zeros([self.outer_loop_fold_count,
self.inner_loop_fold_count])
outer_fold_inds = []
inner_fold_inds = []
# Collect all outer- and inner-fold labels and populate score matrix
for outer_fold_ind, outer_fold in self.outer_folds.items():
if outer_fold_ind not in outer_fold_inds:
outer_fold_inds.append(outer_fold_ind)
for inner_fold_ind, inner_fold in outer_fold.inner_folds.items():
if inner_fold_ind not in inner_fold_inds:
inner_fold_inds.append(inner_fold_ind)
score = inner_fold.pipelines[ \
self.best_pipeline_ind].test_scores[0]
score_matrix[outer_fold_ind, inner_fold_ind] = score
# Form headers for validation section
quartile_headers = ['min', '25%', '50%', '75%', 'max']
mean_based_headers = ['mean', 'std']
outer_fold_headers = ['%d'%(outer_fold_ind) \
for outer_fold_ind in outer_fold_inds]
# Get validation scores and their mean, std, and quartiles
validation_scores = inner_loop_test_scores
validation_mean = np.mean(inner_loop_test_scores)
validation_std = np.std(inner_loop_test_scores, ddof=1)
validation_quartiles = np.percentile(inner_loop_test_scores,
[0, 25, 50, 75, 100])
# Calculate means, standard deviations, and quartiles
means = np.mean(score_matrix, axis=1)
stds = np.std(score_matrix, axis=1, ddof=1)
quartiles = np.percentile(score_matrix, [0, 25, 50, 75, 100],
axis=1)
# Initialize data report
data_report = []
# Form base header and data row format strings
header_str = '{0:>4}{1:>10}{2:>15}'
data_row_str = '{0:>4}{1:>10.4}{2:>15}'
# Form in-report dividers based on number of outer folds
data_report_divider = '---------------------- ------'
data_report_divider += (10*len(outer_fold_inds))*'-'
# Add additional columns based on number of outer folds
inner_loop_contest_headers = ['','','']
for outer_fold_ind_ind, outer_fold_ind in enumerate(outer_fold_inds):
inner_loop_contest_headers.append('OF%d'%(outer_fold_ind))
header_str += '{%d:>10}'%(outer_fold_ind_ind+3)
data_row_str += '{%d:>10.4}'%(outer_fold_ind_ind+3)
# Add quartile data rows
data_report.append(header_str.format(*inner_loop_contest_headers))
for quartile_header_ind, quartile_header in enumerate(quartile_headers):
row_values = [quartile_header, validation_quartiles[quartile_header_ind], quartile_header]
for outer_fold_quartile_score in quartiles[quartile_header_ind]:
row_values.append(outer_fold_quartile_score)
data_report.append(data_row_str.format(*row_values))
data_report.append(data_report_divider)
# Start mean data rows
row_values = ['mean', validation_mean, 'mean']
for outer_fold_mean_score in means:
row_values.append(outer_fold_mean_score)
data_report.append(data_row_str.format(*row_values))
row_values = ['std', validation_std, 'std']
for outer_fold_score_std in stds:
row_values.append(outer_fold_score_std)
data_report.append(data_row_str.format(*row_values))
data_report.append(data_report_divider)
# Fill rows where there are both validation and inner fold scores
outer_fold_count = len(outer_fold_inds)
inner_fold_count = len(inner_fold_inds)
outer_fold_ind = 0
inner_fold_ind = 0
while outer_fold_ind <= outer_fold_count-1 \
and inner_fold_ind <= inner_fold_count-1:
row_values = ['OF%d'%(outer_fold_ind),
validation_scores[outer_fold_ind],
'IF%d'%(inner_fold_ind)]
for outer_inner_fold_score in score_matrix.T[inner_fold_ind]:
row_values.append(outer_inner_fold_score)
data_report.append(data_row_str.format(*row_values))
inner_fold_ind += 1
outer_fold_ind += 1
if outer_fold_ind <= outer_fold_count-1: # Still more outer folds
while outer_fold_ind <= outer_fold_count-1:
row_values = ['OF%d'%(outer_fold_ind),
validation_scores[outer_fold_ind],
'-']
for outer_inner_fold_score in score_matrix.T[inner_fold_ind-1]:
row_values.append('-')
data_report.append(data_row_str.format(*row_values))
outer_fold_ind += 1
elif inner_fold_ind <= inner_fold_count-1: # Still more innerfolds
while inner_fold_ind <= inner_fold_count-1:
row_values = ['-',
'-',
'IF%d'%(inner_fold_ind)]
for outer_inner_fold_score in score_matrix.T[inner_fold_ind]:
row_values.append(outer_inner_fold_score)
data_report.append(data_row_str.format(*row_values))
inner_fold_ind += 1
############### Form and print report ###############
str_inputs = {
'data_report': '\n'.join(data_report),
'data_report_divider': data_report_divider,
'divider': 80*'-',
'best_pipeline_ind': self.best_pipeline_ind,
'pipeline': pipeline_str,
'outer_loop_fold_count': self.outer_loop_fold_count,
'inner_loop_fold_count': self.inner_loop_fold_count,
'shuffle_seed': self.shuffle_seed,
'outer_loop_split_seed': self.outer_loop_split_seed,
'inner_loop_split_seeds': ', '.join(['%d'%(seed) \
for seed in self.inner_loop_split_seeds]),
'scoring_metric': self.scoring_metric,
'score_type': self.score_type,
'random_combinations': self.random_combinations,
'random_combination_seed': self.random_combination_seed
}
report_str = \
"""
{divider}
Best pipeline: {best_pipeline_ind}
{divider}
{data_report_divider}
Validation performance Inner-loop scores
{data_report_divider}
{data_report}
{data_report_divider}
{divider}
Pipeline steps
---------------
{pipeline}
{divider}
Nested k-fold cross-validation parameters
-----------------------------------------
scoring metric:\t\t\t{scoring_metric}
scoring type:\t\t\t{score_type}
outer-fold count:\t\t{outer_loop_fold_count}
inner-fold count:\t\t{inner_loop_fold_count}
shuffle seed:\t\t\t{shuffle_seed}
outer-loop split seed:\t\t{outer_loop_split_seed}
inner-loop split seeds:\t\t{inner_loop_split_seeds}
random combinations:\t\t{random_combinations}
random combination seed:\t{random_combination_seed}
{divider}
""".format(**str_inputs)
# Replace extra spaces resulting from indentation
report_str = re.sub('\n ', '\n', report_str)
return report_str
def plot_best_pipeline_scores(self, fontsize=10, figsize=(9, 3),
markersize=8, draw_points=False,
box_line_thickness=1):
# Get data
best_pipeline_data = {}
for outer_fold_ind, outer_fold in self.outer_folds.items():
best_pipeline_data[outer_fold_ind] = \
outer_fold.pipelines[self.best_pipeline_ind].inner_loop_test_scores
best_pipeline_data['val'] = self.pipeline.inner_loop_test_scores
df = pd.DataFrame(best_pipeline_data)
self.box_plot(df, x_label=self.scoring_metric, fontsize=fontsize,
figsize=figsize, markersize=markersize,
draw_points=draw_points,
box_line_thickness=box_line_thickness)
def plot_contest(self, fontsize=6, figsize=(10, 30), markersize=2,
all_folds=False, color_by=None, color_map='viridis',
legend_loc='best', legend_font_size='10',
legend_marker_size=0.85, box_line_thickness=0.5,
draw_points=False, highlight_best=False):
colors = None
custom_legend = None
# Collect pipeline data for each outer-fold contest
pipeline_data = {pipeline_ind: {} for pipeline_ind in self.pipelines}
for outer_fold_ind, outer_fold in self.outer_folds.items():
for pipeline_ind, pipeline in outer_fold.pipelines.items():
pipeline_data[pipeline_ind][outer_fold_ind] = \
outer_fold.pipelines[pipeline_ind].inner_loop_test_scores
# Plot
if not all_folds:
# Do a separate box-and-whisker plot for each outer fold contest
for outer_fold_ind in self.outer_folds:
# Collect data for all pipelines corresponding to the current
# outer-fold
current_fold_data = {}
for pipeline_ind in self.pipelines:
current_fold_data[pipeline_ind] = \
pipeline_data[pipeline_ind][outer_fold_ind]
df = pd.DataFrame(current_fold_data)
medians = df.median()
medians.sort_values(ascending=True, inplace=True)
df = df[medians.index]
if color_by:
colors = self.get_colors(
df, color_by=color_by, color_map=color_map,
highlight_best=highlight_best)
custom_legend = self.get_custom_legend(
df,
color_by=color_by,
color_map=color_map)
self.box_plot(df, x_label=self.scoring_metric,
fontsize=fontsize, figsize=figsize,
markersize=markersize, colors=colors,
custom_legend=custom_legend,
legend_loc=legend_loc,
legend_font_size=legend_font_size,
legend_marker_size=legend_marker_size,
box_line_thickness=box_line_thickness,
draw_points=draw_points)
else:
# Combine all data for each pipeline and graph all together
all_fold_data = {}
for pipeline_ind, outer_fold_pipeline_data in pipeline_data.items():
if pipeline_ind not in all_fold_data:
all_fold_data[pipeline_ind] = []
for outer_fold_ind, outer_fold in self.outer_folds.items():
all_fold_data[pipeline_ind].extend(
pipeline_data[pipeline_ind][outer_fold_ind])
df = pd.DataFrame(all_fold_data)
medians = df.median()
medians.sort_values(ascending=True, inplace=True)
df = df[medians.index]
if color_by:
colors = self.get_colors(df, color_by=color_by,
color_map=color_map,
highlight_best=highlight_best)
custom_legend = self.get_custom_legend(df,
color_by=color_by,
color_map=color_map)
self.box_plot(df, x_label=self.scoring_metric,
fontsize=fontsize, figsize=figsize,
markersize=markersize, colors=colors,
custom_legend=custom_legend, legend_loc=legend_loc,
legend_font_size=legend_font_size,
legend_marker_size=legend_marker_size,
box_line_thickness=box_line_thickness,
draw_points=draw_points)
def box_plot(self, df, x_label=None, fontsize=25, figsize=(15, 10),
markersize=12, colors=None, custom_legend=None,
legend_loc='best', legend_font_size='10',
legend_marker_size=0.85, box_line_thickness=1.75,
draw_points=False):
"""
Plots all data in a dataframe as a box-and-whisker plot with optional
axis label
"""
tick_labels = [str(column) for column in df.columns]
fontsize = fontsize
# Draw figure and axis
fig, ax = plt.subplots(figsize=figsize)
# Set background to opaque
fig.patch.set_facecolor('white')
# Set grid parameters
ax.yaxis.grid(False)
ax.xaxis.grid(True, linestyle='--', which='both', color='black',
alpha=0.5, zorder=1)
# Set left frame attributes
ax.spines['left'].set_linewidth(1.8)
ax.spines['left'].set_color('gray')
ax.spines['left'].set_alpha(1.0)
# Remove all but bottom frame line
# ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
# Draw box plot
box_plot_kwargs = dict(
notch=0,
sym='+',
vert=False,
whis=5,
patch_artist=True,
capprops=dict(
color='k',
linestyle='-',
linewidth=box_line_thickness
),
boxprops=dict(
linestyle='-',
linewidth=box_line_thickness,
color='black'
),
medianprops=dict(
linestyle='none',
color='k',
linewidth=box_line_thickness
),
whiskerprops=dict(
color='k',
linestyle='-',
linewidth=box_line_thickness
)
)
bp = plt.boxplot(df.values,**box_plot_kwargs)
# Set custom colors
if colors:
for item in ['boxes']: #'medians' 'whiskers', 'fliers', 'caps'
for patch, color in zip(bp[item],colors):
patch.set_color(color)
for patch, color in zip(bp['medians'],colors):
patch.set_color('black')
else:
for patch in bp['boxes']:
patch.set_color('black')
for patch in bp['medians']:
patch.set_color('black')
# Draw overlying data points
if draw_points == True:
for column_ind,column in enumerate(df.columns):
# Get data
y = (column_ind+1)*np.ones(len(df[column]))
x = df[column].values
# Plot data points
plt.plot(x,y,'.',color='k',markersize=markersize)
# Set tick labels and sizes
plt.setp(ax, yticklabels=tick_labels)
plt.setp(ax.get_yticklabels(), fontsize=fontsize)
plt.setp(ax.get_xticklabels(), fontsize=fontsize)
# Adjust limits so plot elements aren't cut off
x_ticks, x_tick_labels = plt.xticks()
# shift half of range to left
range_factor = 2
x_min = x_ticks[0]
x_max = x_ticks[-1] + (x_ticks[-1] - x_ticks[-2])/float(range_factor)
# Set new limits
plt.xlim(x_min, x_max)
# Set tick positions
plt.xticks(x_ticks)
# Place x- and y-labels
plt.xlabel(x_label, size=fontsize)
# plt.ylabel(y_label,size=small_text_size)
# Move ticks to where I want them
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('left')
if custom_legend:
ax.legend(custom_legend[1], custom_legend[0],
handlelength=legend_marker_size,
handleheight=legend_marker_size,
frameon=False, loc=legend_loc)
plt.setp(plt.gca().get_legend().get_texts(),
fontsize=legend_font_size)
# Draw a white dot for medians
for column_ind,column in enumerate(df.columns):
x_median = np.median(df[column].values)
y_median = (column_ind+1)*np.ones(1)
# Plot data points
plt.plot(x_median,y_median,'o',color='white',markersize=markersize,
markeredgecolor='white', zorder=3)
# Display plot
plt.show()
def get_organized_pipelines(self, step_type=None):
"""
Collects pipeline indices for each option (Ex: knn, svm,
logistic_regression) for the desired step type (Ex: estimator).
Collects pipeline indices of pipelines without the desired step type
under a 'None' dictionary entry.
Parameters
----------
"""
organized_pipelines = {}
if '__' in step_type:
step_type, step_option, step_parameter = step_type.split('__')
else:
step_type, step_option, step_parameter = step_type, None, None
for pipeline_ind, pipeline in self.pipelines.items():
step_type_found = False
# Does this pipeline have this step?
for step in self.pipelines[pipeline_ind].steps:
if step[0] == step_type:
step_type_found = True
step_name = step[1].__class__.__name__
# Are we interested in coloring by step parameter?
if step_option is not None and step_parameter is not None:
# if '__' in step_type:
parameter_name_found = False
step_parameters = step[1].get_params()
if step_parameter in step_parameters:
parameter_name_found = True
parameter_value = step_parameters[step_parameter]
value_parameter = "%s__%s"%(parameter_value,
step_parameter)
step_name = "%s__%s"%(value_parameter, step_name)
# Initialize the pipeline indices for this step name if not
# found
if step_name not in organized_pipelines:
organized_pipelines[step_name] = {
'pipeline_inds': []
}
organized_pipelines[step_name]['pipeline_inds'].append(
pipeline_ind)
# Lump pipeline in with default if step not found
if not step_type_found:
if 'None' not in organized_pipelines:
organized_pipelines['None'] = {
'pipeline_inds': []
}
organized_pipelines['None']['pipeline_inds'].append(
pipeline_ind)
return organized_pipelines
def order_by_parameter(self, parameter_str):
parameter_value = parameter_str.split('__')[0]
try:
key_value = int(parameter_value)
except:
key_value = parameter_value
return key_value
def get_step_colors(self, df, color_by=None, color_map='viridis'):
"""
"""
######### Collect pipeline indices with desired attribute #########
step_colors = self.get_organized_pipelines(step_type=color_by)
############### Build working/indexible colormap ###############
color_count = len(step_colors.keys())
cmap = mpl_plt.get_cmap(color_map)
cNorm = mpl_colors.Normalize(vmin=0, vmax=color_count-1)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmap)
sorted_steps = sorted(step_colors.keys(), key=self.order_by_parameter)
# Set internal colors
color_ind = 0
for step_name in sorted_steps:
step = step_colors[step_name]
if step_name == 'None':
step['color'] = 'k'
else:
step['color'] = scalarMap.to_rgba(color_ind)
color_ind += 1
return step_colors
def get_colors(self, df, color_by=None, color_map='viridis',
highlight_best=None):
"""
"""
# Choose colors for each step option and collect corresponding
# pipeline indices
step_colors = self.get_step_colors(df, color_by=color_by,
color_map=color_map)
# Build colors list
colors = []
for pipeline_ind in df.columns:
ind_found = False
for step_name, step in step_colors.items():
if pipeline_ind in step['pipeline_inds']:
colors.append(step['color'])
ind_found = True
if not ind_found:
colors.append(step_colors['None']['color'])
return colors
def get_custom_legend(self, df, color_by=None, color_map='viridis'):
step_colors = self.get_step_colors(df, color_by=color_by,
color_map=color_map)
labels = sorted(step_colors.keys(), key=self.order_by_parameter)
proxies = [self.create_proxy(step_colors[item]['color']) \
for item in labels]
return (labels, proxies)
def create_proxy(self, color):
rect = plt.Rectangle((0,0), 1, 1, color=color)
return rect
| {
"repo_name": "JaggedParadigm/pyplearnr",
"path": "pyplearnr/nested_k_fold_cross_validation.py",
"copies": "2",
"size": "48375",
"license": "apache-2.0",
"hash": -7030119948803383000,
"line_mean": 39.2789342215,
"line_max": 151,
"alpha_frac": 0.5459431525,
"autogenerated": false,
"ratio": 4.293893129770993,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5839836282270993,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christopher'
from ase.atoms import Atoms
import ase.io as aseio
from pyiid.calc.calc_1d import Calc1D
from pyiid.utils import build_sphere_np
import matplotlib.pyplot as plt
import time
from copy import deepcopy as dc
from pyiid.experiments.elasticscatter import ElasticScatter
import numpy as np
scat = ElasticScatter()
atoms = Atoms('Au4', [[0, 0, 0], [3, 0, 0], [0, 3, 0], [3, 3, 0]])
pdf = scat.get_pdf(atoms)
type_list = []
time_list = []
benchmarks = [
('GPU', 'flat'),
('CPU', 'flat'),
]
sizes = np.arange(10, 55, 5)
pes_speed = None
fq_speed = None
atoms_list = [
build_sphere_np('/mnt/work-data/dev/pyIID/benchmarks/1100138.cif',
float(i) / 2) for i in sizes]
number_of_atoms = np.asarray([len(a) for a in atoms_list])
print(sizes)
print(number_of_atoms)
hdr = ''
# Prep everything so we don't need to recompute their scattering power
_ = [scat._wrap_atoms(a) for a in atoms_list]
for proc, alg in benchmarks:
print(proc, alg)
scat.set_processor(proc, alg)
type_list.append((proc, alg))
nrg_l = []
f_l = []
fq_l = []
g_fq_l = []
for atoms, s in zip(atoms_list, sizes):
print(len(atoms), s)
calc = Calc1D(target_data=pdf, exp_function=scat.get_pdf,
exp_grad_function=scat.get_grad_pdf)
atoms.set_calculator(calc)
print('start grad fq')
atoms.rattle()
s = time.time()
scat.get_grad_fq(atoms)
f = time.time()
g_fq_l.append(f - s)
print('start forces')
atoms.rattle()
s = time.time()
force = atoms.get_forces()
# scat.get_grad_fq(atoms)
f = time.time()
f_l.append(f - s)
print('start fq')
atoms.rattle()
s = time.time()
scat.get_fq(atoms)
f = time.time()
fq_l.append(f - s)
print('start potential energy')
atoms.rattle()
s = time.time()
nrg = atoms.get_potential_energy()
f = time.time()
nrg_l.append(f - s)
hdr += ', {0}_energy, {0}_forces'.format(proc)
if pes_speed is None:
pes_speed = np.vstack((np.asarray(nrg_l), np.asarray(f_l)))
fq_speed = np.vstack((np.asarray(fq_l), np.asarray(g_fq_l)))
else:
pes_speed = np.vstack((pes_speed, np.asarray(nrg_l), np.asarray(f_l)))
fq_speed = np.vstack((fq_speed, np.asarray(fq_l), np.asarray(g_fq_l)))
names = ['GPU', 'GPU', 'CPU', 'CPU']
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
colors = ['b', 'b', 'r', 'r']
lines = ['o', 's'] * 2
calc_type = ['energy', 'force'] * 2
for i in range(len(names)):
ax1.semilogy(sizes, pes_speed[i], color=colors[i], marker=lines[i],
label='{0} {1}'.format(names[i], calc_type[i]))
ax1.legend(loc='best')
ax1.set_xlabel('NP diameter in Angstrom')
ax1.set_ylabel('Elapsed running time (s)')
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(ax1.get_xticks())
ax2.set_xticklabels(number_of_atoms)
ax2.set_tick_params(which='major', pad=20)
ax2.set_xlabel('Number of Atoms')
# plt.savefig('/mnt/bulk-data/Dropbox/BNL_Project/HMC_paper/new_figures/speed_log.eps', bbox_inches='tight', transparent=True)
# plt.savefig('/mnt/bulk-data/Dropbox/BNL_Project/HMC_paper/new_figures/speed_log.png', bbox_inches='tight', transparent=True)
plt.show()
calc_type = ['F(Q)', 'Grad F(Q)'] * 2
for i in range(len(names)):
ax1.semilogy(sizes, fq_speed[i], color=colors[i], marker=lines[i],
label='{0} {1}'.format(names[i], calc_type[i]))
ax1.legend(loc='best')
ax1.set_xlabel('NP diameter in Angstrom')
ax1.set_ylabel('Elapsed running time (s)')
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(ax1.get_xticks())
ax2.set_xticklabels(number_of_atoms)
ax2.set_xlabel('Number of Atoms')
# plt.savefig('/mnt/bulk-data/Dropbox/BNL_Project/HMC_paper/new_figures/speed_log.eps', bbox_inches='tight', transparent=True)
# plt.savefig('/mnt/bulk-data/Dropbox/BNL_Project/HMC_paper/new_figures/speed_log.png', bbox_inches='tight', transparent=True)
plt.show()
# '''
pes_speed = pes_speed.T
fq_speed = fq_speed.T
np.savetxt(
'/mnt/bulk-data/Dropbox/BNL_Project/HMC_paper/new_figures/sizes_speed.txt',
sizes)
np.savetxt(
'/mnt/bulk-data/Dropbox/BNL_Project/HMC_paper/new_figures/n_atoms_speed.txt',
number_of_atoms)
np.savetxt(
'/mnt/bulk-data/Dropbox/BNL_Project/HMC_paper/new_figures/pes_speed.txt',
pes_speed, header=hdr)
np.savetxt(
'/mnt/bulk-data/Dropbox/BNL_Project/HMC_paper/new_figures/fq_speed.txt',
fq_speed, header=hdr)
# '''
| {
"repo_name": "CJ-Wright/pyIID",
"path": "benchmarks/time_comparison.py",
"copies": "1",
"size": "4558",
"license": "bsd-3-clause",
"hash": -593192009839550100,
"line_mean": 30.2191780822,
"line_max": 126,
"alpha_frac": 0.6259324265,
"autogenerated": false,
"ratio": 2.6954464813719694,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3821378907871969,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christopher'
def is_coor_formatted(coor):
if "/" in coor:
return True
else:
return False
def format_coor(coor): #used to go from 100 64 100 format to 100 S / 100 N format
x = coor.split()[0]
y = coor.split()[2]
z = coor.split()[1]
if float(x) > 0:
x = x.replace("-", "") + " E"
else:
x = x.replace("-", "") + " W"
if float(y) > 0:
y = y.replace("-", "") + " N"
else:
y = y.replace("-", "") + " S"
formatted = y + " / " + x
return formatted
def convert_coor(coor): #used to go from 100 S / 100 E format to 100 54 100 format
split_coor = coor.split()
x = split_coor[3]
y = split_coor[0]
if split_coor[1] == "S":
y = "-" + y
if split_coor[4] == "W":
x = "-" + x
converted = x + " " + "64" + " " + y
return converted
def in_radius(obj1,obj2,radius):
if is_coor_formatted(obj1):
obj1 = convert_coor(obj1)
if is_coor_formatted(obj2):
obj2 = convert_coor(obj2)
obj1x = obj1.split(" ")[0]
obj1y = obj1.split(" ")[2]
obj2x = obj2.split(" ")[0]
obj2y = obj2.split(" ")[2]
if float(obj2x) - int(radius) <= float(obj1x) <= float(obj2x) + int(radius) and float(obj2y) - int(radius) <= float(obj1y) <= float(obj2y)+ int(radius):
return True
else:
return False
| {
"repo_name": "christopher-roelofs/7dtd-server-manager",
"path": "util.py",
"copies": "1",
"size": "1381",
"license": "mit",
"hash": 5493508096174041000,
"line_mean": 24.1090909091,
"line_max": 156,
"alpha_frac": 0.5104996379,
"autogenerated": false,
"ratio": 2.78989898989899,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.380039862779899,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christopher'
import parse
import thread
import commands
import memorydb
import playerdb
import logger
import runtime
import event
import util
def route(line):
try:
p = parse.parse_log(line)
if p.type == "Filtered":
pass
if p.type == "GMSG":
logger.log(p.formatted_text)
if p.type == "SystemEvent":
if p.event == "Stats":
runtime.time = p.time
runtime.fps = p.fps
runtime.heap = p.heap
runtime.max = p.max
runtime.chunks = p.chunks
runtime.cgo = p.cgo
runtime.ply = p.ply
runtime.zom = p.zom
runtime.ent = p.ent
runtime.items = p.items
if runtime.gui:
system_event = []
system_event.append("SystemUpdate")
event.gui_event.append(system_event)
if p.event == "Version":
runtime.version = p.version
if runtime.gui:
system_event = []
system_event.append("SystemUpdate")
event.gui_event.append(system_event)
if p.event == "Port":
runtime.server_port = p.port
if runtime.gui:
system_event = []
system_event.append("SystemUpdate")
event.gui_event.append(system_event)
if p.event == "MaxPlayers":
runtime.max_players = p.max_players
if runtime.gui:
system_event = []
system_event.append("SystemUpdate")
event.gui_event.append(system_event)
if p.event == "GameMode":
runtime.game_mode = p.game_mode
if runtime.gui:
system_event = []
system_event.append("SystemUpdate")
event.gui_event.append(system_event)
if p.event == "World":
runtime.world = p.world
if runtime.gui:
system_event = []
system_event.append("SystemUpdate")
event.gui_event.append(system_event)
if p.event == "GameName":
runtime.game_name = p.game_name
if runtime.gui:
system_event = []
system_event.append("SystemUpdate")
event.gui_event.append(system_event)
if p.event == "Difficulty":
runtime.difficulty = p.difficulty
if runtime.gui:
system_event = []
system_event.append("SystemUpdate")
event.gui_event.append(system_event)
if p.type == "GameEvent":
if p.event == "Airdrop":
location = util.format_coor(p.location)
memorydb.airdrops.append(p.location)
logger.log("Airdrop: " + location)
playerdb.save_airdrop(p.location)
if runtime.server:
commands.say("Airdrop: " + location)
if p.type == "GameEvent":
if p.event == "Horde":
logger.log("Spawning Wandering Horde")
if runtime.server:
commands.say("Spawning Wandering Horde")
if p.type == "PlayerEvent":
if p.event == "Connected":
#logger.log("Player Connected: " + p.name)
memorydb.add_online_player(p.name)
player_event = []
player_event.append("PlayerUpdate")
event.gui_event.append(player_event)
if runtime.server:
thread.start_new_thread(commands.send_motd,(p.name,))
if p.event == "Disconnected":
#logger.log("Player Disconnected: " + p.name)
memorydb.remove_online_player(p.name)
player_event = []
player_event.append("PlayerUpdate")
event.gui_event.append(player_event)
if p.event == "Died":
player = memorydb.get_player_from_name(p.name)
player.bag = player.location
playerdb.save_player(p.name)
logger.log_verbose("Setting " + player.name + " revive point to: " + util.format_coor(player.location))
logger.log(p.formatted_text)
if runtime.server:
commands.pm(player.name, "Setting your revive point to: "+ util.format_coor(player.location))
if p.event == "Update":
memorydb.add_online_player(p.name)
player_event = []
player_event.append("PlayerUpdate")
event.gui_event.append(player_event)
if memorydb.player_exists_from_name(p.name):
memorydb.update_player(p)
else:
memorydb.add_player(p.name, p.entityid, p.steamid, p.ip)
logger.log_verbose("Adding new player: " + p.name)
playerdb.save_player(p.name)
if p.type == "PlayerCommand":
if p.event == "Sethome":
logger.log(p.formatted_text)
memorydb.set_player_home(p.name)
player = memorydb.get_player_from_name(p.name)
logger.log("Setting "+util.format_coor(player.home) + " as home for " + player.name)
playerdb.save_player(p.name)
if runtime.server:
commands.pm(player.name,"Home has been set to: " + util.format_coor(player.home))
if p.event == "Home":
player = memorydb.get_player_from_name(p.name)
logger.log(p.formatted_text)
if player.home == "":
logger.log_verbose("No home set for: " + player.name)
if runtime.server:
commands.pm(player.name, "You need to set a home first")
else:
logger.log_verbose("Teleporting "+player.name + " to " + util.format_coor(player.home))
if runtime.server:
commands.teleport(player.name,player.home)
if p.event == "Setpoi":
logger.log(p.formatted_text)
player = memorydb.get_player_from_name(p.name)
playerdb.save_poi(p.name,p.poiname,player.location)
memorydb.add_poi(p.name,p.poiname)
logger.log("Poi set for "+p.name +" with name "+ p.poiname +" at: " + util.format_coor(player.location))
if runtime.server:
commands.pm(player.name,"Poi " + p.poiname + " set: "+ util.format_coor(player.location))
if p.event == "Poi":
logger.log(p.formatted_text)
location = memorydb.get_poi(p.name,p.poiname)
if location == "":
if runtime.server:
commands.pm(p.name,"No poi with that name.")
else:
logger.log("Teleporting "+p.name + " to " + util.format_coor(location))
if runtime.server:
commands.teleport(p.name,location)
if p.event == "Listpoi":
logger.log(p.formatted_text)
if runtime.server:
player = memorydb.get_player_from_name(p.name)
if len(player.pois) == 0:
commands.pm(p.name,"No pois to list")
for poi in player.pois:
name = poi.split(",")[0]
location = poi.split(",")[1]
commands.pm(player.name,name + ": " + util.format_coor(location))
if p.event == "Removepoi":
logger.log(p.formatted_text)
if memorydb.poi_exists(p.name,p.poiname):
memorydb.remove_poi(p.name,p.poiname)
playerdb.delete_poi(p.name,p.poiname)
if runtime.server:
commands.pm(p.name,"Poi " + p.poiname+ " has been removed")
else:
if runtime.server:
commands.pm(p.name,"No poi with that name")
if p.event == "Clearpoi":
logger.log(p.formatted_text)
memorydb.remove_all_pois(p.name)
playerdb.delete_all_poi(p.name)
if runtime.server:
commands.pm(p.name,"All pois have been removed")
if p.event == "Killme":
logger.log(p.formatted_text)
if runtime.server:
commands.kill_player(p.name)
if p.event == "Help":
logger.log(p.formatted_text)
if runtime.server:
commands.help(p.name)
if p.event == "Bag":
logger.log(p.formatted_text)
if runtime.server:
player = memorydb.get_player_from_name(p.name)
if player.bag != "":
commands.teleport(p.name,player.bag)
if p.event == "Goto":
logger.log(p.formatted_text)
if runtime.server:
if memorydb.player_exists_from_name(p.othername):
commands.teleport(p.name,p.othername)
else:
commands.pm(p.name,"Player does not exist: " + p.othername)
if p.event == "Where":
logger.log(p.formatted_text)
if runtime.server:
player = memorydb.get_player_from_name(p.name)
commands.pm(p.name,"Current location: " + util.format_coor(player.location))
if p.event == "Drop":
logger.log(p.formatted_text)
if runtime.server:
for drop in memorydb.airdrops:
if util.is_coor_formatted(drop):
commands.pm(p.name,"Airdrop: " + drop)
else:
commands.pm(p.name,"Airdrop: " + util.format_coor(drop))
if p.event == "Claim":
logger.log(p.formatted_text)
found = 0
if runtime.server:
player = memorydb.get_player_from_name(p.name)
obj1 = player.location
for drop in memorydb.airdrops:
if util.in_radius(obj1,drop,runtime.drop_claim_radius):
memorydb.airdrops.remove(drop)
playerdb.delete_airdrop(drop)
if util.is_coor_formatted(drop):
commands.pm(p.name,"You have claimed the airdrop at: " + str(drop))
else:
commands.pm(p.name,"You have claimed the airdrop at: " + str(util.format_coor(drop)))
found = 1
if found == 0:
commands.pm(p.name,"You need to be in a " + str(runtime.drop_claim_radius) + " block radius of an airdrop to claim")
if p.type == "":
logger.log_verbose(p.formated_text)
except Exception as e:
print(e.message) | {
"repo_name": "christopher-roelofs/7dtd-server-manager",
"path": "director.py",
"copies": "1",
"size": "11519",
"license": "mit",
"hash": -4981774707975874000,
"line_mean": 39.1393728223,
"line_max": 140,
"alpha_frac": 0.4792082646,
"autogenerated": false,
"ratio": 4.214782290523234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5193990555123235,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christopher'
global player_array
global last_airdrop
player_array = []
online_players = []
last_airdrop = ""
airdrops= []
import logger
class player_object(object):
def __init__(self):
self.name = ""
self.entityid = 0
self.steamid = 0
self.ip= ""
self.lastlogon = ""
self.home = ""
self.warned = 0
self.location = ""
self.home = ""
self.health = 0
self.deaths = 0
self.zombies = 0
self.players = 0
self.score = 0
self.ping = 0
self.position = ""
self.pois = []
self.tprequests = []
self.admin = False
self.adminlevel = 0
self.mod = False
self.bag = ""
def player_exists(steamid):
for person in player_array:
if str(person.steamid) == steamid:
return True
return False
def player_exists_from_name(name):
for person in player_array:
if person.name == name:
return True
return False
def poi_exists(player,poiname):
person = get_player_from_name(player)
for poi in person.pois:
if poiname == poi.split(",")[0]:
return True
return False
def add_online_player(player):
if player not in online_players:
online_players.append(player)
def get_online_players():
player_list = []
for player in online_players:
player_list.append(player)
return player_list
def remove_online_player(player):
online_players.remove(player)
def get_player_from_steamid(steamid):
for person in player_array:
if steamid == str(person.steamid):
return person
def get_player_from_name(name):
for person in player_array:
if name == person.name:
return person
def get_poi(player,poiname):
person = get_player_from_name(player)
for poi in person.pois:
if poiname == poi.split(",")[0]:
poilocation = poi.split(",")[1]
return poilocation
return ""
def add_player(name,entityid,steamid,ip,):
logger.log("memorydb adding new player")
player = player_object()
player.name = name
player.entityid = entityid
player.steamid = steamid
player.ip = ip
player_array.append(player)
def update_player(pl):
player = get_player_from_steamid(pl.steamid)
player.location = pl.position
player.name = pl.name
player.health = pl.health
player.deaths = pl.deaths
player.zombies = pl.zombies
player.players= pl.players
player.score = pl.score
player.ping = pl.ping
def add_poi(player,name):
person = get_player_from_name(player)
if poi_exists(player,name):
for poi in person.pois:
if name == poi.split(",")[0]:
person.pois.remove(poi)
person.pois.append(name + "," + person.location)
else:
person.pois.append(name + "," + person.location)
def remove_poi(player,name):
for person in player_array:
if player == person.name:
for poi in person.pois:
if name == poi.split(",")[0]:
person.pois.remove(poi)
def remove_all_pois(player):
person = get_player_from_name(player)
del person.pois[:]
def set_player_home(player):
person = get_player_from_name(player)
person.home = person.location | {
"repo_name": "christopher-roelofs/7dtd-server-manager",
"path": "memorydb.py",
"copies": "1",
"size": "3368",
"license": "mit",
"hash": 5832777666298886000,
"line_mean": 23.7720588235,
"line_max": 64,
"alpha_frac": 0.5950118765,
"autogenerated": false,
"ratio": 3.4508196721311477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4545831548631148,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christopher'
import threading
import telnetlib
import director
import runtime
import logger
class telnet_connect_telnetlib(threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
try:
global out
out = telnetlib.Telnet(runtime.host, runtime.port,5)
out.read_until("password:")
out.write(runtime.password + "\n")
while runtime.run:
line = out.read_until("\r\n").strip()
if line != "":
try:
director.route(line)
#print line
except Exception as e:
logger.log_debug(e.message)
except Exception as e:
if runtime.run:
logger.log("unable to connect : " + e.message )
def write_out(cmd):
try:
out.write(cmd + "\n")
except Exception as e:
logger.log_debug(e.message)
def initialize():
try:
t = telnet_connect_telnetlib(1, "Thread-1", 1)
t.start()
except Exception as e:
logger.log_debug(e.message)
| {
"repo_name": "christopher-roelofs/7dtd-server-manager",
"path": "telconn.py",
"copies": "1",
"size": "1277",
"license": "mit",
"hash": 4328668558540861000,
"line_mean": 24.0392156863,
"line_max": 64,
"alpha_frac": 0.5293657009,
"autogenerated": false,
"ratio": 4.092948717948718,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5122314418848718,
"avg_score": null,
"num_lines": null
} |
__author__ = 'christopher'
from Tkinter import *
from ttk import *
import memorydb
import telconn
import threading
import logger
import event
import runtime
import time
import config
selected_player = ""
def toggle_verbose():
if verbose_chk.get() == 1:
runtime.verbose = True
else:
runtime.verbose = False
def toggle_debug():
if debug_chk.get() == 1:
runtime.debug = True
else:
runtime.debug = False
def toggle_server():
if server_chk.get() == 1:
runtime.server = True
else:
runtime.server = False
def save_settings():
runtime.host = host_input.get()
runtime.port = port_input.get()
runtime.drop_claim_radius = claim_input.get()
config.save_config()
root = Tk()
root.title("7dtd Telnet Client")
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
note = Notebook(root)
note.columnconfigure(0, weight=1)
note.rowconfigure(0, weight=1)
note.grid(sticky=NSEW)
#console_tab stuff here
console_tab = Frame(note)
console_tab.columnconfigure(0, weight=1)
console_tab.columnconfigure(1, weight=1)
console_tab.rowconfigure(0, weight=1)
textbox = Text(console_tab, height=20, width=80)
textbox.grid(row=0, column=0, sticky=NSEW, columnspan=2)
textbox.columnconfigure(0, weight=1)
textbox.rowconfigure(0, weight=1)
playerbox = Text(console_tab, height=20, width=20)
playerbox.grid(row=0, column=3, sticky=N+S)
commandLabel = Label(console_tab, width=10, text="Command:")
commandLabel.grid(row=1, column=0, sticky=W)
input = Entry(console_tab, width=80)
input.grid(row=1, column=1, sticky=E+W, columnspan=3)
#players_tab stuff here
players_tab = Frame(note)
players_tab.columnconfigure(0, weight=1)
players_tab.rowconfigure(0, weight=1)
playerlist = Listbox(players_tab, height=20, width=20)
playerlist.grid(row=0,column=1,sticky=N+S)
infobox = Text(players_tab, height=10, width=80)
infobox.grid(row=0, column=0, sticky=N+E+W)
#settings_tab stuff here
settings_tab = Frame(note)
settings_tab.rowconfigure(0, weight=1)
settings_tab.rowconfigure(1, weight=1)
settings_tab.rowconfigure(2, weight=1)
settings_tab.rowconfigure(3, weight=1)
settings_tab.rowconfigure(4, weight=1)
settings_tab.rowconfigure(5, weight=1)
settings_tab.rowconfigure(6, weight=1)
verbose_chk = IntVar()
verbose_checkbox = Checkbutton(settings_tab,text = "Verbose Logging",command = toggle_verbose,variable=verbose_chk)
verbose_checkbox.grid(row=0, column=0, sticky=W)
if runtime.verbose:
verbose_chk.set(1)
debug_chk = IntVar()
debug_checkbox = Checkbutton(settings_tab,text = "Debug Logging",command = toggle_debug,variable=debug_chk)
debug_checkbox.grid(row=1, column=0, sticky=W)
if runtime.debug:
debug_chk.set(1)
server_chk = IntVar()
server_checkbox = Checkbutton(settings_tab,text = "Server",command = toggle_server,variable=server_chk)
server_checkbox.grid(row=2, column=0, sticky=W)
if runtime.server:
server_chk.set(1)
motd_label = Label(settings_tab, width=10, text="MOTD: ")
motd_label.grid(row=3, column=0)
motd_input = Entry(settings_tab, width=30)
motd_input.grid(row=3, column=1, sticky=W)
motd_input.insert(0,runtime.motd)
host_label = Label(settings_tab, width=10, text="Host: ")
host_label.grid(row=4, column=0)
host_input = Entry(settings_tab, width=15)
host_input.grid(row=4, column=1, sticky=W)
host_input.insert(0,runtime.host)
port_label = Label(settings_tab, width=10, text="Port: ")
port_label.grid(row=5, column=0)
port_input = Entry(settings_tab, width=15)
port_input.grid(row=5, column=1, sticky=W)
port_input.insert(0,runtime.port)
claim_label = Label(settings_tab, width=10, text="Claim Radius: ")
claim_label.grid(row=6, column=0)
claim_input = Entry(settings_tab, width=15)
claim_input.grid(row=6, column=1, sticky=W)
claim_input.insert(0,runtime.drop_claim_radius)
save_btn = Button(settings_tab,text = "Save",command = save_settings)
save_btn.grid(row=7, column=0, sticky=E)
spacer = Label(settings_tab).grid(row=8,column=0)
#system_tab stuff here
system_tab = Frame(note)
time_var = StringVar()
time_var.set("Time: 0m")
time_label = Label(system_tab, width=15, textvariable = time_var)
time_label.grid(row=0, column=0)
fps_var = StringVar()
fps_var.set("FPS: 0")
fps_label = Label(system_tab, width=15, textvariable = fps_var)
fps_label.grid(row=1, column=0)
heap_var = StringVar()
heap_var.set("Heap: 0MB")
heap_label = Label(system_tab, width=15, textvariable = heap_var)
heap_label.grid(row=2, column=0)
max_var = StringVar()
max_var.set("Max: 0MB")
max_label = Label(system_tab, width=15, textvariable = max_var)
max_label.grid(row=3, column=0)
chunks_var = StringVar()
chunks_var.set("Chunks: 0")
chunks_label = Label(system_tab, width=15, textvariable = chunks_var)
chunks_label.grid(row=4, column=0)
cgo_var = StringVar()
cgo_var.set("CGO: 0M")
cgo_label = Label(system_tab, width=15, textvariable = cgo_var)
cgo_label.grid(row=5, column=0)
ply_var = StringVar()
ply_var.set("PLY: 0")
ply_label = Label(system_tab, width=15, textvariable = ply_var)
ply_label.grid(row=6, column=0)
zom_var = StringVar()
zom_var.set("Zom: 0")
zom_label = Label(system_tab, width=15, textvariable = zom_var)
zom_label.grid(row=7, column=0)
ent_var = StringVar()
ent_var.set("ENT: 0")
ent_label = Label(system_tab, width=15, textvariable = ent_var)
ent_label.grid(row=8, column=0)
items_var = StringVar()
items_var.set("Items: 0")
items_label = Label(system_tab, width=15, textvariable = items_var)
items_label.grid(row=9, column=0)
version_var = StringVar()
version_var.set("Version: 0")
version_label = Label(system_tab, width=30, textvariable = version_var)
version_label.grid(row=0, column=1)
port_var = StringVar()
port_var.set("Port: 0")
port_label = Label(system_tab, width=30, textvariable = port_var)
port_label.grid(row=1, column=1)
max_players_var = StringVar()
max_players_var.set("Max Players: 0")
max_players_label = Label(system_tab, width=30, textvariable = max_players_var)
max_players_label.grid(row=2, column=1)
game_mode_var = StringVar()
game_mode_var.set("Game Mode: 0")
game_mode_label = Label(system_tab, width=30, textvariable = game_mode_var)
game_mode_label.grid(row=3, column=1)
world_var = StringVar()
world_var.set("World: 0")
world_label = Label(system_tab, width=30, textvariable = world_var)
world_label.grid(row=4, column=1)
game_name_var = StringVar()
game_name_var.set("Game Name: 0")
game_name_label = Label(system_tab, width=30, textvariable = game_name_var)
game_name_label.grid(row=5, column=1)
difficulty_var = StringVar()
difficulty_var.set("Difficulty: 0")
difficulty_label = Label(system_tab, width=30, textvariable = difficulty_var)
difficulty_label.grid(row=6, column=1)
def show_player_info(name):
infobox.delete("1.0", END)
player = memorydb.get_player_from_name(name)
infobox.insert(END, "Name:"+ player.name+ "\n")
infobox.insert(END, "SteamID:"+ str(player.steamid)+ "\n")
infobox.insert(END, "IP:"+ player.ip+ "\n")
infobox.insert(END, "Last Location:"+ player.location+ "\n")
def addInfo(info):
textbox.insert(END,info + '\n')
textbox.see(END)
def refreshPlayerList():
if int(playerbox.index('end-1c').split(".")[0])-1 != len(memorydb.online_players):
playerbox.delete("1.0", END)
online_players = memorydb.get_online_players()
for player in online_players:
playerbox.insert("1.0",player+ "\n")
def refreshInfoList():
if int(playerlist.index('end')) != len(memorydb.player_array):
playerlist.delete(0, END)
for player in memorydb.player_array:
playerlist.insert(1, player.name)
def func(event):
cmd = input.get()
telconn.write_out(cmd)
logger.log("Command sent: " + cmd)
input.delete(0, END)
def listclick(e):
show_player_info(str(playerlist.get(playerlist.curselection())))
def set_motd(e):
runtime.motd = motd_input.get()
def refresh_system_stats():
time_var.set("Time: " + str(runtime.time))
fps_var.set("FPS: " + str(runtime.fps))
heap_var.set("Heap: " + str(runtime.heap))
max_var.set("Max: " + str(runtime.max))
chunks_var.set("Chunks: " + str(runtime.chunks))
cgo_var.set("CGO: " + str(runtime.cgo))
ply_var.set("PLY: " + str(runtime.ply))
zom_var.set("Zom: " + str(runtime.zom))
ent_var.set("Ent: " + str(runtime.ent))
items_var.set("Items: " + str(runtime.items))
version_var.set("Version: " + str(runtime.version))
port_var.set("Port: " + runtime.server_port )
max_players_var.set("Max Players: " + runtime.max_players)
game_mode_var.set("Game Mode: " +runtime.game_mode )
world_var.set("World: " +runtime.world )
game_name_var.set("Game Name: " + runtime.game_name)
difficulty_var.set("Difficulty: " + runtime.difficulty)
def handler():
runtime.run = False
root.destroy()
telconn.write_out("exit")
input.bind('<Return>', func)
note.add(console_tab, text="Console", compound=TOP)
playerlist.bind('<<ListboxSelect>>', listclick)
note.add(players_tab, text = "Players")
motd_input.bind('<KeyRelease>',set_motd)
note.add(settings_tab, text = "Settings")
note.add(system_tab, text = "System")
root.protocol("WM_DELETE_WINDOW", handler)
def update():
while runtime.run:
time.sleep(.1)
for event_record in event.gui_event: # this needs to be fixed but works for now
if event.gui_event[-1][0] == "Log":
addInfo(event.gui_event[-1][1])
event.gui_event.pop()
if event_record[0] == "PlayerUpdate":
refreshPlayerList()
refreshInfoList()
event.gui_event.pop()
if event_record[0] == "SystemUpdate":
refresh_system_stats()
event.gui_event.pop()
def start():
if runtime.gui:
refreshInfoList()
threading._start_new_thread(update, ())
root.mainloop()
| {
"repo_name": "christopher-roelofs/7dtd-server-manager",
"path": "gui.py",
"copies": "1",
"size": "9881",
"license": "mit",
"hash": -7036192003446827000,
"line_mean": 28.4077380952,
"line_max": 115,
"alpha_frac": 0.6870762069,
"autogenerated": false,
"ratio": 2.8410005750431284,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.896982442975423,
"avg_score": 0.01165047043777984,
"num_lines": 336
} |
__author__ = 'christopher'
#parses the log and returns a parsed_log object
import re
import string
import memorydb
import logger
from time import strftime
class ParsedLog(object):
def __init__(self):
self.type = ""
self.event = ""
self.full_text = ""
def parse_log(line):
pl = ParsedLog()
pl.full_text = line
INF = re.search("INF", line)
WRN = re.search("WRN", line)
ERR = re.search("ERR", line)
LISTPLAYERS = re.search("([0-9][.]\sid=)", line)
F2 = re.search("Total of", line) #message from player, system or admin
VERSION = re.search("Server version:",line)
PORT = re.search("Server port:",line)
MAXP = re.search("Max players:",line)
GAMEMODE = re.search("Game mode:",line)
WORLD = re.search("World:",line)
GAMENAME = re.search("Game name:",line)
DIFFICULTY = re.search("Difficulty:",line)
if INF:
seperated_line = line.split(" ")
F1 = re.search("Executing command", line) #message from player, system or admin
GMSG = re.search("GMSG", line) #message from player, system or admin
CONNECTED = re.search("Player connected", line) #message from player, system or admin
DICONNECTED = re.search("Player disconnected", line) #message from player, system or admin
TIME = re.search("INF Time", line)
HORDE = re.search("INF Spawning Wandering Horde",line)
AIRDROP = re.search("AIAirDrop: Spawned supply crate",line)
if CONNECTED:
player = seperated_line[6].split("=")[1][:-1]
steamid = seperated_line[7].split("=")[1][:-1]
pl.type = "PlayerEvent"
pl.event = "Connected"
pl.name = player
pl.steamid = steamid
return pl
elif DICONNECTED:
player = seperated_line[8].split("=")[1][:-1].replace("'","")
steamid = seperated_line[7].split("=")[1][:-1].replace("'","")
pl.type = "PlayerEvent"
pl.event = "Disconnected"
pl.name = player
pl.steamid = steamid
return pl
elif TIME:
try:
pl.type = "SystemEvent"
pl.event = "Stats"
pl.time = seperated_line[4]
pl.fps = seperated_line[6]
pl.heap = seperated_line[8]
pl.max = seperated_line[10]
pl.chunks = seperated_line[12]
pl.cgo = seperated_line[14]
pl.ply = seperated_line[16]
pl.zom = seperated_line[18]
pl.ent = seperated_line[20] + " " + seperated_line[21]
pl.items = seperated_line[23]
return pl
except Exception as e:
return pl
logger.log_debug("Error parsing stats update: "+e.message)
elif AIRDROP:
try:
pl.type = "GameEvent"
pl.event = "Airdrop"
seperated_line = line.replace("(","").replace(")","").replace(",","").split()
pl.x = seperated_line[8]
pl.y = seperated_line[10]
pl.z = seperated_line[9]
pl.location = seperated_line[8] + " " + seperated_line[9] + " " + seperated_line[10]
return pl
except Exception as e:
logger.log(line)
logger.log_debug("Error parsing airdrop info")
return pl
elif HORDE:
pl.type = "GameEvent"
pl.event = "Horde"
return pl
elif F1:
pl.type = "Filtered"
return pl
elif GMSG:
player = seperated_line[4][:-1]
SETHOME = re.search("/sethome", line)
HOME = re.search("/home", line)
RANDOM = re.search("/random", line)
SETPOI = re.search("/setpoi", line)
POI = re.search("/poi", line)
RPOI = re.search("/rpoi", line)
LISTPOI = re.search("/listpoi", line)
LPOI = re.search("/lpoi", line)
CLEARPOI = re.search("/clearpoi", line)
KILLME = re.search("/killme", line)
GOTO = re.search("/goto", line)
HELP = re.search("/help", line)
BAG = re.search("/bag",line)
DIED = re.search("died",line)
WHERE = re.search("/where",line)
DROP = re.search("/drop",line)
CLAIM = re.search("/claim",line)
if DIED:
try:
player = seperated_line[5]
pl.formatted_text = " ".join(pl.full_text.split()[4:])
pl.type = "PlayerEvent"
pl.event = "Died"
pl.name = player
return pl
except Exception as e:
logger.log("Error parsing died system message: "+ e.message)
return pl
elif HELP:
pl.formatted_text = " ".join(pl.full_text.split()[4:])
pl.type = "PlayerCommand"
pl.event = "Help"
pl.name = player
return pl
elif SETHOME:
pl.formatted_text = " ".join(pl.full_text.split()[4:])
pl.type = "PlayerCommand"
pl.event = "Sethome"
pl.name = player
return pl
elif HOME:
pl.formatted_text = " ".join(pl.full_text.split()[4:])
pl.type = "PlayerCommand"
pl.event = "Home"
pl.name = player
return pl
elif SETPOI:
try:
pl.formatted_text = " ".join(pl.full_text.split()[4:])
poiname = seperated_line[6]
pl.type = "PlayerCommand"
pl.event = "Setpoi"
pl.name = player
pl.poiname = poiname
return pl
except Exception as e:
logger.log_verbose("Error parsing setpoi command: "+ e.message)
return pl
elif POI:
try:
pl.formatted_text = " ".join(pl.full_text.split()[4:])
poiname = seperated_line[6]
pl.type = "PlayerCommand"
pl.event = "Poi"
pl.name = player
pl.poiname = poiname
return pl
except Exception as e:
logger.log_verbose("Error parsing poi command: "+ e.message)
return pl
elif LISTPOI or LPOI:
pl.formatted_text = " ".join(pl.full_text.split()[4:])
pl.type = "PlayerCommand"
pl.event = "Listpoi"
pl.name = player
return pl
elif RPOI:
try:
pl.formatted_text = " ".join(pl.full_text.split()[4:])
poiname = seperated_line[6]
pl.type = "PlayerCommand"
pl.event = "Removepoi"
pl.name = player
pl.poiname = poiname
return pl
except Exception as e:
logger.log("Error parsing rpoi command: "+ e.message)
return pl
elif CLEARPOI:
pl.formatted_text = " ".join(pl.full_text.split()[4:])
pl.type = "PlayerCommand"
pl.event = "Clearpoi"
pl.name = player
return pl
elif GOTO:
try:
pl.formatted_text = " ".join(pl.full_text.split()[4:])
othername = seperated_line[6]
pl.type = "PlayerCommand"
pl.event = "Goto"
pl.name = player
pl.othername = othername
return pl
except Exception as e:
logger.log("Error parsing goto command: "+ e.message)
return pl
elif BAG:
pl.formatted_text = " ".join(pl.full_text.split()[4:])
pl.type = "PlayerCommand"
pl.event = "Bag"
pl.name = player
return pl
elif KILLME:
pl.formatted_text = " ".join(pl.full_text.split()[4:])
pl.type = "PlayerCommand"
pl.event = "Killme"
pl.name = player
return pl
elif WHERE:
pl.formatted_text = " ".join(pl.full_text.split()[4:])
pl.type = "PlayerCommand"
pl.event = "Where"
pl.name = player
return pl
elif DROP:
pl.formatted_text = " ".join(pl.full_text.split()[4:])
pl.type = "PlayerCommand"
pl.event = "Drop"
pl.name = player
return pl
elif CLAIM:
pl.formatted_text = " ".join(pl.full_text.split()[4:])
pl.type = "PlayerCommand"
pl.event = "Claim"
pl.name = player
#pl.drop = seperated_line[6] +" " + seperated_line[7] + " " + seperated_line[8] + " " + seperated_line[9] + " " + seperated_line[10]
return pl
else:
pl.type = "GMSG"
pl.event = "Msg"
pl.formatted_text = " ".join(pl.full_text.split()[4:])
return pl
else:
pl.formated_text = " ".join(pl.full_text.split()[3:])
return pl
elif LISTPLAYERS:
try:
position = str(int(round(float(line.split(" ")[3].replace("pos=(", "").replace(",", ""))))) + " " + str(int(round(float(line.split(" ")[4].replace(",", ""))))) + " " + str(int(round(float(line.split(" ")[5].replace("),", "")))))
entityid = line.split(",")[0].split("=")[1]
name = line.split(",")[1].replace(" ", "")
health = line.split(",")[9].split("=")[1]
deaths = line.split(",")[10].split("=")[1]
zombies = line.split(",")[11].split("=")[1]
players = line.split(",")[12].split("=")[1]
score = line.split(",")[13].split("=")[1]
steamid = line.split(",")[15].split("=")[1]
ip = line.split(",")[16].split("=")[1]
ping = line.split(",")[17].split("=")[1].rstrip(string.whitespace)
pl.type = "PlayerEvent"
pl.event = "Update"
pl.entityid = entityid
pl.position = position
pl.name = name
pl.health = health
pl.deaths = deaths
pl.zombies = zombies
pl.players= players
pl.score = score
pl.steamid = steamid
pl.ip = ip
pl.ping = ping
return pl
except Exception as e:
logger.log_debug("Error parsing player update: "+ e.message)
return pl
elif WRN:
pl.formated_text = " ".join(pl.full_text.split()[3:])
return pl
elif ERR:
pl.formated_text = " ".join(pl.full_text.split()[3:])
return pl
elif F2:
pl.type = "Filtered"
return pl
elif VERSION:
pl.type = "SystemEvent"
pl.event = "Version"
pl.version = line.split()[3] + " " + line.split()[4] + " "+ line.split()[5]
return pl
elif PORT:
pl.type = "SystemEvent"
pl.event = "Port"
pl.port = line.split()[2]
return pl
elif MAXP:
pl.type = "SystemEvent"
pl.event = "MaxPlayers"
pl.max_players = line.split()[2]
return pl
elif GAMEMODE:
pl.type = "SystemEvent"
pl.event = "GameMode"
pl.game_mode = line.split()[2]
return pl
elif WORLD:
pl.type = "SystemEvent"
pl.event = "World"
pl.world = line.split()[1]
return pl
elif GAMENAME:
pl.type = "SystemEvent"
pl.event = "GameName"
pl.game_name = line.split()[2]
return pl
elif DIFFICULTY:
pl.type = "SystemEvent"
pl.event = "Difficulty"
pl.difficulty = line.split()[1]
return pl
else:
pl.formated_text = pl.full_text
return pl
| {
"repo_name": "christopher-roelofs/7dtd-server-manager",
"path": "parse.py",
"copies": "1",
"size": "12491",
"license": "mit",
"hash": 6345382945512915000,
"line_mean": 32.044973545,
"line_max": 240,
"alpha_frac": 0.4628132255,
"autogenerated": false,
"ratio": 3.905878674171357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4868691899671357,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christoph Heindl'
__copyright__ = 'Copyright 2017, Profactor GmbH'
__license__ = 'BSD'
import glob
import os
import cv2
import numpy as np
import re
import matplotlib.pyplot as plt
import matplotlib
import time
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sensor_correction.gp_cpu import GPRegressor
from sensor_correction.utils import sensor_unproject, create_batches
def crop(img, border):
return img[border[1]:-border[1], border[0]:-border[0]]
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Evaluate Gaussian Process')
parser.add_argument('regressor', type=str, help='Trained GP')
parser.add_argument('depth', type=str, help='Preprocessed depth')
parser.add_argument('intrinsics', type=str, help='Camera intrinsics')
parser.add_argument('--output', type=str, help='Result file', default='corrected_depths.npz')
parser.add_argument('--gpu', action='store_true', help='Use GPU')
args = parser.parse_args()
matplotlib.rcParams.update({'font.size': 20})
# Load depth data
data = np.load(args.depth)
temps = data['temps']
poses = data['poses']
all_depths_ir = data['depth_ir'][()]
all_depths_rgb = data['depth_rgb'][()]
h, w = all_depths_ir[(poses[0], temps[0])].shape
x = np.arange(0, w, 1)
y = np.arange(0, h, 1)
xx, yy = np.meshgrid(x, y)
xy = np.hstack((xx.reshape(-1,1), yy.reshape(-1,1)))
# Load regressor
r = GPRegressor()
r.load(args.regressor)
if args.gpu:
import tensorflow as tf
from sensor_correction.gp_gpu import GPRegressorGPU
sess = tf.Session()
xfeed = tf.placeholder(dtype=tf.float32, shape=[16384 ,4])
r = GPRegressorGPU(r, xfeed)
# Load intrinsics
K = np.loadtxt(args.intrinsics).reshape(3,3)
Kinv = np.linalg.inv(K)
all_depths = {}
all_deltae = {}
total_time = 0.
total_count = 0
for p in poses:
for t in temps:
print('Processing pos {}, temperature {}'.format(p, t))
depth_ir = all_depths_ir[(p, t)] # Actual
start_time = time.time()
xyz = sensor_unproject(xy, depth_ir.ravel(), Kinv)
xyzt = np.column_stack((xyz, np.ones(xyz.shape[0])*t))
batches = create_batches(xyzt, 16384, pad=True)
deltae = []
for b in batches:
if args.gpu:
br = sess.run(r.predict, feed_dict={xfeed : b})
else:
br = r.predict(b)
deltae.append(br)
deltae = np.concatenate(deltae)[:xyzt.shape[0]].reshape(depth_ir.shape)
depth_corr = depth_ir + deltae
total_time += (time.time() - start_time)
total_count += 1
all_deltae[(p, t)] = deltae
all_depths[(p, t)] = depth_corr
print('Processing took {:.3f}sec total, {:.3f}sec on average'.format(total_time, total_time / total_count))
np.savez(args.output, depth_corrected=all_depths, depth_deltae=all_deltae, temps=temps, poses=poses)
| {
"repo_name": "cheind/rgbd-correction",
"path": "sensor_correction/apps/correct_depth.py",
"copies": "1",
"size": "3179",
"license": "bsd-3-clause",
"hash": -5414958977407274000,
"line_mean": 31.1111111111,
"line_max": 111,
"alpha_frac": 0.5920100661,
"autogenerated": false,
"ratio": 3.385516506922258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9435876672091493,
"avg_score": 0.008329980186152924,
"num_lines": 99
} |
__author__ = 'Christoph Heindl'
__copyright__ = 'Copyright 2017, Profactor GmbH'
__license__ = 'BSD'
import glob
import os
import cv2
import numpy as np
import re
def model_points(pattern):
corners = np.zeros((pattern[0]*pattern[1], 3), dtype=np.float32)
for i in range(pattern[1]):
for j in range(pattern[0]):
corners[i * pattern[0] + j] = [j * pattern[2], i * pattern[2], 0.0]
return corners
def camera_rays(K, w, h):
Kinv = np.linalg.inv(K)
rays = np.zeros((w*h, 3), dtype=np.float32)
for y in range(h):
for x in range(w):
rays[y * w + x] = Kinv.dot([x, y, 1.])
return rays
if __name__ == '__main__':
def size(s):
return tuple(map(int, s.split('x')))
import argparse
parser = argparse.ArgumentParser(description='Dense depth from pattern under planar assumption')
parser.add_argument('intrinsics', type=str, help='File containing color camera matrix. Single row 3x3 matrix stored in row-major layout.')
parser.add_argument('distortions', type=str, help='File containing distortion parameters.Single row 1x4 matrix stored in row-major layout.')
parser.add_argument('indir', type=str, help='Source directory containing images')
parser.add_argument('-outdir', type=str, nargs='?', help='Target directory. If not specified indir is used.')
parser.add_argument('-pattern', type=size, metavar='WIDTHxHEIGHTxSIZE', help='Pattern size.', default=(10,7,34))
args = parser.parse_args()
if args.outdir is None:
args.outdir = args.indir
os.makedirs(args.outdir, exist_ok=True)
K = np.loadtxt(args.intrinsics).reshape(3,3)
D = np.loadtxt(args.distortions)
images = sorted(glob.glob(os.path.join(args.indir, '*_color.png')))
modelpoints = model_points(args.pattern)
rays = None
axis = np.float32([[0,0,0],[1,0,0], [0,1,0], [0,0,1]]).reshape(-1,3)
axis *= args.pattern[2] * 3
regex = re.compile(r'(?P<id>\d+)_t(?P<temp>\d+)_p(?P<axis>\d+)_(?P<type>depth|color).png$', flags=re.I)
for fname in images:
print('Processing {}'.format(fname))
r = regex.search(fname)
if r is None:
print('Warning file format does not match for \'{}\'.'.format(fname))
continue
img = cv2.imread(fname)
imggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h,w = imggray.shape[0:2]
(found, corners) = cv2.findChessboardCorners(imggray, args.pattern[0:2])
if found:
# Estimate position of chessboard w.r.t camera
cv2.cornerSubPix(imggray, corners, (5,5), (-1,-1), (cv2.TERM_CRITERIA_MAX_ITER | cv2.TERM_CRITERIA_EPS, 100, 0.01))
(rv, rvec, tvec) = cv2.solvePnP(modelpoints, corners, K, D, None, None, False, cv2.SOLVEPNP_ITERATIVE)
(R,jacobian) = cv2.Rodrigues(rvec)
T = np.eye(4)
T[0:3, 0:3] = R
T[0:3, 3] = tvec.reshape(3)
# Generate virtual depth map
pn = T[0:3, 2] # Plane normal
p0 = T[0:3, 3] # Plane origin
d = p0.dot(pn)
if rays is None:
rays = camera_rays(K, w, h)
t = d / (rays.dot(pn.reshape(3,1)))
isects = np.multiply(rays, t)
depths = isects[:,2].reshape(h, w, 1)
fnamenew = '{:06d}_t{:02d}_p{:04d}_sdepth'.format(int(r['id']), int(r['temp']), int(r['axis']))
cv2.imwrite(os.path.join(args.outdir, fnamenew + '.png'), depths.astype(np.ushort)) # ushort
cv2.imwrite(os.path.join(args.outdir, fnamenew + '.exr'), depths.astype(np.float32)) # floats
np.savetxt(os.path.join(args.outdir, fnamenew + '.txt'), T.reshape(1,-1))
pts, _ = cv2.projectPoints(axis, rvec, tvec, K, D)
orig = tuple(pts[0].ravel())
cv2.line(img, orig, tuple(pts[1].ravel()), (0,0,255), 2)
cv2.line(img, orig, tuple(pts[2].ravel()), (0,255,0), 2)
cv2.line(img, orig, tuple(pts[3].ravel()), (255,0,0), 2)
else:
print('No pattern found in {}'.format(os.path.basename(fname)))
cv2.drawChessboardCorners(img, args.pattern[0:2], corners, found)
cv2.imshow('x', img)
if cv2.waitKey(50) == ord('x'):
break
| {
"repo_name": "cheind/rgbd-correction",
"path": "sensor_correction/apps/depth_from_pattern.py",
"copies": "1",
"size": "4335",
"license": "bsd-3-clause",
"hash": 6990403979453158000,
"line_mean": 34.5327868852,
"line_max": 144,
"alpha_frac": 0.5760092272,
"autogenerated": false,
"ratio": 3.072289156626506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4148298383826506,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christoph Heindl'
__copyright__ = 'Copyright 2017, Profactor GmbH'
__license__ = 'BSD'
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
from sensor_correction.utils import sensor_unproject
from sensor_correction.gp_cpu import GPRegressor
def select_data(temps, poses, all_depths_ir, all_depths_rgb, Kinv, xy, target='rgb'):
sel_xyzt = []
sel_deltas = []
for p in poses:
if target == 'rgb':
depth_target = all_depths_rgb[(p, temps[0])]
elif target == 'ir':
depth_target = all_depths_ir[(p, temps[0])]
d_target = depth_target[xy[:,1], xy[:,0]]
for t in temps:
depth_ir = all_depths_ir[(p, t)] # Actual
d_ir = depth_ir[xy[:,1], xy[:,0]]
xyz = sensor_unproject(xy, d_ir, Kinv)
xyzt = np.empty((xyz.shape[0], 4), dtype=np.float32)
xyzt[:, :3] = xyz
xyzt[:, 3] = t
delta = d_target - d_ir
mask = d_ir > 0.
"""
plt.imshow(depth_rgb - depth_ir)
plt.plot(xy[:,0][mask], xy[:,1][mask], 'k+')
plt.colorbar()
plt.show()
"""
sel_xyzt.append(xyzt[mask])
sel_deltas.append(delta[mask])
sel_xyzt = np.concatenate(sel_xyzt)
sel_deltas = np.concatenate(sel_deltas)
return sel_xyzt, sel_deltas
if __name__ == '__main__':
np.random.seed(1)
import argparse
parser = argparse.ArgumentParser(description='Train Gaussian Process for depth correction.')
parser.add_argument('depth', type=str, help='Preprocessed depth data')
parser.add_argument('intrinsics', type=str, help='Camera intrinsics')
parser.add_argument('--output', type=str, help='Result regressor filename', default='gpr.pkl')
parser.add_argument('--target', type=str, help='Target depth to train for, RGB or IR.', default='rgb')
args = parser.parse_args()
# Load depth data
data = np.load(args.depth)
temps = data['temps']
poses = data['poses']
all_depths_ir = data['depth_ir'][()]
all_depths_rgb = data['depth_rgb'][()]
h, w = all_depths_ir[(poses[0], temps[0])].shape
# Load intrinsics
K = np.loadtxt(args.intrinsics).reshape(3,3)
Kinv = np.linalg.inv(K)
# Create train and test data
x = np.linspace(0, w-1, 8, dtype=np.int32)
y = np.linspace(0, h-1, 8, dtype=np.int32)
xx, yy = np.meshgrid(x, y)
xy_train = np.hstack((xx.reshape(-1,1), yy.reshape(-1,1)))
train_xyzt, train_deltae = select_data(
temps[::2],
poses,
all_depths_ir,
all_depths_rgb,
Kinv,
xy_train,
target=args.target.lower())
xy_test = np.random.uniform(0, [w-1,h-1], size=(10,2)).astype(np.int32)
test_xyzt, test_deltae = select_data(
temps[::2],
poses[::2],
all_depths_ir,
all_depths_rgb,
Kinv,
xy_test,
target=args.target.lower())
r = GPRegressor()
r.fit(train_xyzt, train_deltae, length_scale=[0.5, 0.5, 0.5, 10], signal_std=1., noise_std=0.002, optimize=True, normalize=True, repeat=2)
ypred = r.predict(test_xyzt)
d = ypred - test_deltae
rmse = np.sqrt(np.mean(np.square(d)))
print('RMSE {:e}'.format(rmse))
print('Optimized length scale {}'.format(r.length_scale))
print('Optimized signal std {}'.format(r.signal_std))
print('Optimized noise std {}'.format(r.noise_std))
r.save(args.output) | {
"repo_name": "cheind/rgbd-correction",
"path": "sensor_correction/apps/train.py",
"copies": "1",
"size": "3582",
"license": "bsd-3-clause",
"hash": -3197969751457897000,
"line_mean": 30.1565217391,
"line_max": 142,
"alpha_frac": 0.5642099386,
"autogenerated": false,
"ratio": 3.1896705253784505,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9195199841812516,
"avg_score": 0.01173612443318705,
"num_lines": 115
} |
__author__ = 'Christoph Heindl'
__copyright__ = 'Copyright 2017, Profactor GmbH'
__license__ = 'BSD'
import glob
import os
import numpy as np
import re
import matplotlib.pyplot as plt
import matplotlib
from sensor_correction.utils import mask_outliers
from sensor_correction.utils import sensor_unproject
import seaborn as sbn
sbn.set_context('paper')
sbn.set(font_scale=2)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Plot error statistics')
parser.add_argument('depth', type=str, help='Preprocessed depth')
parser.add_argument('corrected', type=str, help='Corrected depth')
parser.add_argument('intrinsics', type=str, help='Camera intrinsics')
parser.add_argument('--no-show', action='store_true', help='Do not display results, just save image')
parser.add_argument('--temps', nargs='*', type=int)
parser.add_argument('--poses', nargs='*', type=int)
args = parser.parse_args()
# Load depth data
data = np.load(args.depth)
temps = data['temps']
poses = data['poses']
if args.temps:
temps = np.array(args.temps)
if args.poses:
poses = np.array(args.poses)
all_depths_ir = data['depth_ir'][()]
all_depths_rgb = data['depth_rgb'][()]
data = np.load(args.corrected)
all_corrected = data['depth_corrected'][()]
all_deltae = data['depth_deltae'][()]
# Load intrinsics
K = np.loadtxt(args.intrinsics).reshape(3,3)
Kinv = np.linalg.inv(K)
h, w = all_depths_ir[(poses[0], temps[0])].shape
x = np.arange(0, w, 1)
y = np.arange(0, h, 1)
xx, yy = np.meshgrid(x, y)
xy = np.hstack((xx.reshape(-1,1), yy.reshape(-1,1)))
se_xyz_before = []
se_xyz_after = []
se_depth_before = []
se_depth_after = []
for p in poses:
depth_target = all_depths_rgb[(p, temps[0])]
for t in temps:
print('Processing pos {}, temperature {}'.format(p, t))
depth_ir = all_depths_ir[(p, t)] # Actual
depth_c = all_corrected[(p, t)] # Corrected
xyz_t = sensor_unproject(xy, depth_target.ravel(), Kinv)
xyz_a = sensor_unproject(xy, depth_ir.ravel(), Kinv)
xyz_c = sensor_unproject(xy, depth_c.ravel(), Kinv)
# remove extreme outliers
outliers = mask_outliers(np.abs(depth_ir - depth_target)).ravel()
before_xyz = np.square((xyz_t - xyz_a)[~outliers])
after_xyz = np.square((xyz_t - xyz_c)[~outliers])
se_xyz_before.append(before_xyz)
se_xyz_after.append(after_xyz)
rmse_xyz_before = np.sqrt(np.mean(before_xyz, axis=0))
rmse_xyz_after = np.sqrt(np.mean(after_xyz, axis=0))
print(' RMSE before (x,y,z) {}'.format(rmse_xyz_before))
print(' RMSE after (x,y,z) {}'.format(rmse_xyz_after))
print('Overall')
rmse_xyz_before = np.sqrt(np.mean(np.concatenate(se_xyz_before), axis=0))
rmse_xyz_after = np.sqrt(np.mean(np.concatenate(se_xyz_after), axis=0))
print(' RMSE before (x,y,z) {}'.format(rmse_xyz_before))
print(' RMSE after (x,y,z) {}'.format(rmse_xyz_after))
| {
"repo_name": "cheind/rgbd-correction",
"path": "sensor_correction/apps/plot_statistics.py",
"copies": "1",
"size": "3208",
"license": "bsd-3-clause",
"hash": -729235003929808400,
"line_mean": 33.4946236559,
"line_max": 105,
"alpha_frac": 0.6006857855,
"autogenerated": false,
"ratio": 3.1358748778103616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9215637112217571,
"avg_score": 0.004184710218558153,
"num_lines": 93
} |
__author__ = 'Christoph Heindl'
__copyright__ = 'Copyright 2017, Profactor GmbH'
__license__ = 'BSD'
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sn
from collections import defaultdict
import seaborn as sbn
sbn.set_context('paper')
sbn.set(font_scale=2)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Plot transform values as function of temperature')
parser.add_argument('depth', type=str, help='Preprocessed depth')
parser.add_argument('--axis', type=int, help='Axis position', default=1000)
parser.add_argument('--corrected-depth', type=str, help='Corrected depth')
args = parser.parse_args()
data = np.load(args.depth)
temps = data['temps']
depths_ir = data['depth_ir'][()]
depths_rgb = data['depth_rgb'][()]
h,w = depths_ir[(args.axis, temps[0])].shape
wnd = 20
crops = {
'Top-Left Crop': [np.s_[0:wnd], np.s_[0:wnd]],
'Top-Right Crop' : [np.s_[0:wnd], np.s_[w-wnd:-1]],
'Bottom-Left Crop' : [np.s_[h-wnd:-1], np.s_[0:wnd]],
'Bottom-Right Crop' : [np.s_[h-wnd:-1], np.s_[w-wnd:-1]],
'Center Crop' : [np.s_[h//2-wnd//2:h//2+wnd//2], np.s_[w//2-wnd//2:w//2+wnd//2]] # center crop
}
depths_corrected = None
if args.corrected_depth:
data = np.load(args.corrected_depth)
depths_corrected = data['depth_corrected'][()]
d_rgb = defaultdict(list)
d_ir = defaultdict(list)
d_corr = defaultdict(list)
for t in temps:
for k, c in crops.items():
ir = np.mean(depths_ir[(args.axis, t)][c[0], c[1]])
rgb = np.mean(depths_rgb[(args.axis, t)][c[0], c[1]])
d_ir[k].append(ir)
d_rgb[k].append(rgb)
if depths_corrected:
c = np.mean(depths_corrected[(args.axis, t)][c[0], c[1]])
d_corr[k].append(c)
display_crops = ['Bottom-Right Crop']
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
plt.xlabel('Temperature (°C)')
plt.ylabel('Depth (m)')
for cr, c in zip(display_crops, colors):
plt.plot(temps, d_rgb[cr], c=c, label='Depth from RGB')
for cr, c in zip(display_crops, colors):
plt.plot(temps, d_ir[cr], c=c, linestyle='--', label='Depth from IR')
plt.legend(loc='best')
plt.tight_layout()
plt.savefig('depth_vs_temp_p{}.png'.format(args.axis), dpi=300, bbox_inches='tight')
plt.savefig('depth_vs_temp_p{}.pdf'.format(args.axis), dpi=300, bbox_inches='tight', transparent=True)
ylim = plt.gca().get_ylim()
plt.show()
if depths_corrected:
plt.xlabel('Temperature (°C)')
plt.ylabel('Depth (m)')
for cr, c in zip(display_crops, colors):
plt.plot(temps, np.repeat(d_rgb[cr][0], temps.shape[0]), c=c, label='Depth from RGB')
#for c in display_crops:
# plt.plot(temps, d_ir[c], linestyle='--', label='Depth from IR / {}'.format(c))
for cr, c in zip(display_crops, colors):
plt.plot(temps, d_corr[cr], linestyle='-.', c=c, label='Corrected Depth')
plt.ylim(ylim)
plt.legend(loc='bottom left')
plt.tight_layout()
plt.savefig('depth_vs_temp_p{}_corrected.pdf'.format(args.axis), dpi=300, bbox_inches='tight', transparent=True)
plt.savefig('depth_vs_temp_p{}_corrected.png'.format(args.axis), dpi=300, bbox_inches='tight')
plt.show()
| {
"repo_name": "cheind/rgbd-correction",
"path": "sensor_correction/apps/plot_depth_vs_temperature.py",
"copies": "1",
"size": "3538",
"license": "bsd-3-clause",
"hash": 4978591717838028000,
"line_mean": 35.8333333333,
"line_max": 120,
"alpha_frac": 0.5777714932,
"autogenerated": false,
"ratio": 3.022222222222222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.901919636996148,
"avg_score": 0.016159469092148503,
"num_lines": 96
} |
__author__ = 'Christoph Heindl'
__copyright__ = 'Copyright 2017, Profactor GmbH'
__license__ = 'BSD'
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import cv2
def crop(img, border):
return img[border[1]:-border[1], border[0]:-border[0]]
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Create mean depth images for RGB/IR by averaging over redundant captures.')
parser.add_argument('index', type=str, help='Index CSV')
parser.add_argument('--crop', type=int, help='Crop images by this size', default=100)
parser.add_argument('--unitscale', type=float, help='Scale depth by this value', default=0.001)
parser.add_argument('--output', type=str, help='Result file', default='input_depths.npz')
args = parser.parse_args()
df = pd.DataFrame.from_csv(args.index, sep=' ')
depth_ir = {}
depth_rgb = {}
temps = []
poses = []
groups = df.groupby(df.Temp)
first = True
for t, tgroup in groups:
temps.append(t)
print('Processing temperature {}'.format(t))
for p, pgroup in tgroup.groupby(tgroup.Axis):
if first:
poses.append(p)
print(' Processing position {}'.format(p))
# Read IR Depth
d = []
for name in pgroup[pgroup.Type == 'depth.png']['Name']:
fname = os.path.join(os.path.dirname(args.index), name)
dm = cv2.imread(fname, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH).astype(np.float32)
dm[d==0] = np.nan
d.append(dm)
d = np.stack(d, axis=0)
d = np.mean(d, axis=0)
depth_ir[(p, t)] = d * args.unitscale
d = []
for name in pgroup[pgroup.Type == 'sdepth.exr']['Name']:
fname = os.path.join(os.path.dirname(args.index), name)
dm = cv2.imread(fname, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH).astype(np.float32)
dm[d==0] = np.nan
d.append(dm)
d = np.stack(d, axis=0)
d = np.mean(d, axis=0)
depth_rgb[(p, t)] = d * args.unitscale
first = False
depth_ir = {k: crop(img, (args.crop, args.crop)) for k, img in depth_ir.items()}
depth_rgb = {k: crop(img, (args.crop, args.crop)) for k, img in depth_rgb.items()}
np.savez(args.output, depth_ir=depth_ir, depth_rgb=depth_rgb, temps=temps, poses=poses)
| {
"repo_name": "cheind/rgbd-correction",
"path": "sensor_correction/apps/preprocess_depth.py",
"copies": "1",
"size": "2503",
"license": "bsd-3-clause",
"hash": 8982445481966697000,
"line_mean": 35.2898550725,
"line_max": 125,
"alpha_frac": 0.5713144227,
"autogenerated": false,
"ratio": 3.3021108179419527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43734252406419527,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christoph Heindl'
__copyright__ = 'Copyright 2017, Profactor GmbH'
__license__ = 'BSD'
import numpy as np
import tensorflow as tf
from tensorflow.contrib.staging import StagingArea
from tensorflow.python.ops import data_flow_ops
import time
import math
from sensor_correction.gp_cpu import GPRegressor
from sensor_correction.gp_gpu import GPRegressorGPU
from sensor_correction.utils import sensor_unproject, create_batches
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Evaluate Gaussian Process')
parser.add_argument('regressor', type=str, help='Trained GP')
parser.add_argument('depth', type=str, help='Preprocessed depth')
parser.add_argument('intrinsics', type=str, help='Camera intrinsics')
parser.add_argument('--batch-sizes', nargs='*', type=int, default=[128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768])
args = parser.parse_args()
data = np.load(args.depth)
temps = data['temps']
poses = data['poses']
all_depths_ir = data['depth_ir'][()]
all_depths_rgb = data['depth_rgb'][()]
h, w = all_depths_ir[(poses[0], temps[0])].shape
x = np.arange(0, w, 1)
y = np.arange(0, h, 1)
xx, yy = np.meshgrid(x, y)
xy = np.hstack((xx.reshape(-1,1), yy.reshape(-1,1)))
# Load regressor
r_cpu = GPRegressor()
r_cpu.load(args.regressor)
# Load intrinsics
K = np.loadtxt(args.intrinsics).reshape(3,3)
Kinv = np.linalg.inv(K)
total_time_cpu = 0.
total_time_gpu = 0.
total_count = 0
depth_ir = all_depths_ir[(poses[0], temps[0])]
xyz = sensor_unproject(xy, depth_ir.ravel(), Kinv)
xyzt = np.column_stack((xyz, np.ones(xyz.shape[0])*temps[0]))
times_gpu = []
times_cpu = []
for bs in args.batch_sizes:
batches = create_batches(xyzt, bs)
# gpu without staging
with tf.Graph().as_default():
with tf.Session().as_default() as sess:
xyzt_gpu = tf.placeholder(dtype=tf.float32, shape=[bs, 4])
r_gpu = GPRegressorGPU(r_cpu, xyzt_gpu)
# warm-up
for b in batches:
deltae_gpu = sess.run(r_gpu.predict, feed_dict={xyzt_gpu : b})
start_time = time.time()
for b in batches:
deltae_gpu = sess.run(r_gpu.predict, feed_dict={xyzt_gpu : b})
t = time.time() - start_time
times_gpu.append(t)
print('GPU {:.3f}sec / Batch size {}'.format(t, bs))
start_time = time.time()
for b in batches:
deltae_cpu = r_cpu.predict(b)
t = time.time() - start_time
print('CPU {:.3f}sec / Batch size {}'.format(t, bs))
times_cpu.append(t)
np.savez('times.npz', batches=args.batch_sizes, cpu=times_cpu, gpu=times_gpu) | {
"repo_name": "cheind/rgbd-correction",
"path": "sensor_correction/apps/compare_cpu_gpu.py",
"copies": "1",
"size": "2914",
"license": "bsd-3-clause",
"hash": 854781091558726800,
"line_mean": 32.8953488372,
"line_max": 124,
"alpha_frac": 0.5868222375,
"autogenerated": false,
"ratio": 3.3113636363636365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43981858738636365,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christoph Heindl'
__copyright__ = 'Copyright 2017, Profactor GmbH'
__license__ = 'BSD'
import numpy as np
import tensorflow as tf
class GPRegressorGPU:
'''Gaussian Process regressor on GPU.
Takes a pre-fitted Gaussian Process regressor (CPU) and prepares a TensorFlow graph for
prediction using pre-computed values.
'''
def __init__(self, gp_cpu, x):
'''Initialize regressor.
Params
------
gp_cpu : sensor_correction.GPRegressor
Pre-fitted Gaussing Process regressor
x : Nx4 tensor
Feature vectors on GPU.
'''
# Prepare constants
signal_var = tf.constant(gp_cpu.signal_std**2, dtype=tf.float32)
mean_y = tf.constant(gp_cpu.gpr.y_train_mean, dtype=tf.float32)
w = tf.constant(np.reciprocal(gp_cpu.length_scale), dtype=tf.float32)
# Pre-scale train and test
xtrain = tf.constant(gp_cpu.gpr.X_train_, dtype=tf.float32) * w
xtest = x * w
alpha = tf.expand_dims(tf.constant(gp_cpu.gpr.alpha_, dtype=tf.float32), -1)
# Compute pairwise squared distance
a = tf.matmul(
tf.expand_dims(tf.reduce_sum(tf.square(xtrain), 1), 1),
tf.ones(shape=(1, xtest.shape[0]))
)
b = tf.transpose(tf.matmul(
tf.reshape(tf.reduce_sum(tf.square(xtest), 1), shape=[-1, 1]),
tf.ones(shape=(xtrain.shape[0], 1)),
transpose_b=True
))
d = tf.add(a, b) - 2 * tf.matmul(xtrain, xtest, transpose_b=True)
# Eval kernel
k = signal_var * tf.exp(-0.5 * d)
# Predict
self.predict = tf.matmul(tf.transpose(k), alpha) + mean_y
| {
"repo_name": "cheind/rgbd-correction",
"path": "sensor_correction/gp_gpu.py",
"copies": "1",
"size": "1725",
"license": "bsd-3-clause",
"hash": 3465722650811564500,
"line_mean": 30.3636363636,
"line_max": 91,
"alpha_frac": 0.5785507246,
"autogenerated": false,
"ratio": 3.409090909090909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9369095300153318,
"avg_score": 0.023709266707518148,
"num_lines": 55
} |
__author__ = 'Christoph Heindl'
__copyright__ = 'Copyright 2017, Profactor GmbH'
__license__ = 'BSD'
import numpy as np
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel, WhiteKernel
from sklearn.externals import joblib
class GPRegressor:
'''Gaussian Process regressor on CPU.
Takes input feature vectors X and target regression values Y and fits a Gaussian process,
that can be used to predict for query points X*. This implementation uses a squared
exponential kernel for determining the similarity between feature vectors. Tuning of
hyper-parameters is supported by optimizing the negative log-marginal-likelihood through
sklearn provided code.
'''
def fit(self, X, Y, length_scale=1.0, signal_std=1.0, noise_std=1e-10, normalize=False, optimize=False, repeat=0):
'''Fit a Gaussian Process regressor.
Params
------
X : mx4 array
Training feature vectors
Y : mx1 array
Target values
Kwargs
------
length_scale : scalar or 4x1 array, optional
Kernel length scaling input feature dimensions
signal_std : scalar, optional
Signal sigma
noise_std : scalar, optional
Observation noise sigma
normalize : bool, optional
Whether or not to normalize Y by mean adjustment
optimize : bool or list
Turn on/off optimization. If list, only the parameters in list will be tuned.
'''
optimizer = 'fmin_l_bfgs_b'
bounds_ls = bounds_ss = bounds_ns = (1e-3, 1e3)
signal_var = signal_std**2
noise_var = noise_std**2
if isinstance(optimize, list):
bounds_ls = (1e-3, 1e3) if 'length_scale' in optimize else (length_scale, length_scale)
bounds_ss = (1e-3, 1e3) if 'signal_std' in optimize else (signal_var, signal_var)
bounds_ns = (1e-3, 1e3) if 'noise_std' in optimize else (noise_var, noise_var)
elif not optimize:
optimizer = None
kernel = ConstantKernel(signal_var, bounds_ss) * RBF(length_scale, bounds_ls) + WhiteKernel(noise_var, bounds_ns)
self.gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0, normalize_y=normalize, optimizer=optimizer, n_restarts_optimizer=repeat)
self.gpr.fit(X, Y)
def predict(self, X, return_std=False):
'''Predict values.
Params
------
X : nx4 array
Input feature vectors.
Kwargs
------
return_std : bool, optional
If true, returns the uncertainty variances for query points. Useful for
computing confidence values.
Returns
-------
Y : nx1 array
Predictions
K : nx1 array
Variances for query points. Only if return_std = true
'''
return self.gpr.predict(X, return_std=return_std)
@property
def length_scale(self):
return self.gpr.kernel_.k1.k2.length_scale
@property
def signal_std(self):
return np.sqrt(self.gpr.kernel_.k1.k1.constant_value)
@property
def noise_std(self):
return np.sqrt(self.gpr.kernel_.k2.noise_level)
def save(self, fname):
joblib.dump(self.gpr, fname)
def load(self, fname):
self.gpr = joblib.load(fname)
class GPRegressorStandalone:
'''Standalone Gaussian Process regressor.'''
def fit(self, X, Y, W, signal_std=1.0, noise_std=1e-10, normalize=False):
self.noise_std = noise_std
self.signal_std = signal_std
self.W = W
self.X = X
if normalize:
self.ymean = np.mean(Y)
Y = Y - self.ymean
else:
self.ymean = np.zeros(1)
self.K = GPRegressorStandalone.kernel(X, X, self.W, self.signal_std) + np.eye(X.shape[0]) * self.noise_std
self.L = np.linalg.cholesky(self.K)
self.Li = stri(self.L.T, np.eye(self.L.shape[0]))
self.Ki = self.Li.dot(self.Li.T)
self.alpha = stri(self.L.T, stri(self.L, Y, check_finite=False, lower=True))
def predict(self, X, return_std=False):
Ks = GPRegressorStandalone.kernel(self.X, X, self.W, self.signal_std)
pred = Ks.T.dot(self.alpha) # Zero mean
pred += self.ymean
if return_std:
Kss = GPRegressorStandalone.kernel(X, X, self.W, self.signal_std)
sigma = np.copy(np.diag(Kss))
sigma -= np.einsum("ij,ij->i", np.dot(Ks.T, self.Ki), Ks.T)
sigma[sigma < 0.] = 0.
sigma = np.sqrt(sigma)
return pred, sigma
else:
return pred
@staticmethod
def dist(A, B, W):
'''Pairwise squared weighted distance.'''
diff = A[np.newaxis, :, :] - B[:, np.newaxis, :]
d = np.einsum('jil,jil->ij', np.tensordot(diff, W, axes=(2,0)), diff)
return d
@staticmethod
def kernel(A, B, W, signal_std=1.):
'''Squared exponential covariance function.'''
d = GPRegressorStandalone.dist(A, B, W)
return signal_std**2 * np.exp(-0.5 * d)
| {
"repo_name": "cheind/rgbd-correction",
"path": "sensor_correction/gp_cpu.py",
"copies": "1",
"size": "5241",
"license": "bsd-3-clause",
"hash": -1424031738002748000,
"line_mean": 32.8129032258,
"line_max": 142,
"alpha_frac": 0.5956878458,
"autogenerated": false,
"ratio": 3.6753155680224405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.477100341382244,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christoph Heindl'
__copyright__ = 'Copyright 2017, Profactor GmbH'
__license__ = 'BSD'
import pandas as pd
import numpy as np
import glob
import re
import os
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Convert raw image files to pandas csv with unique file naming.')
parser.add_argument('indir', type=str, help='Directory containing caputured files')
args = parser.parse_args()
regex = re.compile(r'(?P<id>\d+)_t(?P<temp>\d+)_p(?P<axis>\d+)_(?P<type>.*)', flags=re.I)
items = []
for fname in glob.glob(os.path.join(args.indir, '*')):
print(fname)
r = regex.search(fname)
if r is not None:
items.append((
int(r['id']),
int(r['temp']),
int(r['axis']),
r['type'].lower(),
os.path.basename(fname)
))
else:
print('File \'{}\' does not match pattern.'.format(fname))
df = pd.DataFrame.from_records(items, columns=['Id', 'Temp', 'Axis', 'Type', 'Name'])
df.to_csv('index.csv', header=True, sep=' ', index=False)
| {
"repo_name": "cheind/rgbd-correction",
"path": "sensor_correction/apps/create_pandas.py",
"copies": "1",
"size": "1178",
"license": "bsd-3-clause",
"hash": 1572360680582106400,
"line_mean": 28.45,
"line_max": 114,
"alpha_frac": 0.5483870968,
"autogenerated": false,
"ratio": 3.56969696969697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46180840664969697,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christoph Heindl'
__copyright__ = 'Copyright 2017, Profactor GmbH'
__license__ = 'BSD'
'''
Converts raw caputered 4D data (temperature, axis-position, color, depth) from @gebenh capture tool to
a list of unique filenames matching the following pattern
<id>_t<temperature>_p<axisposition>_<color|depth>.png
'''
import glob
import os
import re
import numpy as np
from shutil import copyfile
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Convert raw image files to pandas csv with unique file naming.')
parser.add_argument('indir', type=str, help='Directory containing caputured files')
parser.add_argument('outdir', type=str, help='Result directory. Will be created if not found.')
args = parser.parse_args()
regex = re.compile(r'experiment_(?P<temp>\d+)[/\\](?P<id>\d+)_(?P<type>depth|color).png$', flags=re.I)
os.makedirs(args.outdir)
axis = None
for idx, fname in enumerate(sorted(glob.iglob(os.path.join(args.indir, '**', '*.png'), recursive=True))):
if axis is None:
# Assume same axis movement for all temperature directories.
axis = np.loadtxt(os.path.join(os.path.dirname(fname), 'axis.txt'), dtype=int, skiprows=1)[:, 1]
r = regex.search(fname)
if r is None:
print('Warning file \'{}\' does not match format'.format(fname))
else:
newfname = '{:06d}_t{:02d}_p{:04d}_{}.png'.format(idx//2, int(r['temp']), axis[int(r['id'])], r['type'])
copyfile(fname, os.path.join(args.outdir, newfname)) | {
"repo_name": "cheind/rgbd-correction",
"path": "sensor_correction/apps/convert.py",
"copies": "1",
"size": "1598",
"license": "bsd-3-clause",
"hash": 2787575978822131000,
"line_mean": 38,
"line_max": 116,
"alpha_frac": 0.6464330413,
"autogenerated": false,
"ratio": 3.451403887688985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45978369289889853,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christoph Jansen, HTW Berlin'
from data import CorpusOptions
def accuracy(flags):
c = correct(flags)
w = wrong(flags)
if c == 0:
return 0
return c / (c + w)
def correct(flags):
tn, fp, tp, fn = flags
return tp + tn
def wrong(flags):
tn, fp, tp, fn = flags
return fp + fn
def precision(flags):
tn, fp, tp, fn = flags
if tp == 0:
return 0
return tp / (tp + fp)
def recall(flags):
tn, fp, tp, fn = flags
if tp == 0:
return 0
return tp / (tp + fn)
def f_score(flags, beta=1):
p = precision(flags)
r = recall(flags)
b = beta
if p == 0 or r == 0:
return 0
return (1 + b*b) * (p*r / (b*b*p + r))
def eval(intend_token, predict_token):
if predict_token == intend_token:
return (1, 0)
return (0, 1)
def w_flags(flags, artificial_error):
e = artificial_error
tn, fp, tp, fn = flags
w_tn = tn * (1.0-e)
w_fp = fp * (1.0-e)
w_tp = tp * e
w_fn = fn * e
return (w_tn, w_fp, w_tp, w_fn)
def w_error_corpus(flags, do_weighting):
tn, fp, tp, fn = flags
if do_weighting:
if tp + fn == 0 or tn + fp == 0:
error_corpus_weight = 1
else:
error_corpus_weight = (tn + fp) / (tp + fn)
tp = tp * error_corpus_weight
fn = fn * error_corpus_weight
return (tn, fp, tp, fn)
class AccuracyScore:
def __init__(self, do_weighting=False):
self.w = do_weighting
pass
def score(self, flags):
return accuracy(w_error_corpus(flags, self.w))
def __str__(self):
return 'accuracy'
class WAccuracyScore:
def __init__(self, artificial_error, do_weighting=False):
self.e = artificial_error
self.w = do_weighting
def score(self, flags):
return accuracy(w_flags(w_error_corpus(flags, self.w), self.e))
def __str__(self):
return 'w_accuracy'
class WPrecisionScore:
def __init__(self, artificial_error, do_weighting=False):
self.e = artificial_error
self.w = do_weighting
def score(self, flags):
return precision(w_flags(w_error_corpus(flags, self.w), self.e))
def __str__(self):
return 'w_precision'
class F1Score:
def __init__(self, artificial_error, do_weighting=False):
self.e = artificial_error
self.w = do_weighting
def score(self, flags):
return f_score(w_flags(w_error_corpus(flags, self.w), self.e))
def __str__(self):
return 'f_1_score'
class F05Score:
def __init__(self, artificial_error, do_weighting=False):
self.e = artificial_error
self.w = do_weighting
self.b = 0.5
def score(self, flags):
return f_score(w_flags(w_error_corpus(flags, self.w), self.e), beta=self.b)
def __str__(self):
return 'f_05_score'
class F025Score:
def __init__(self, artificial_error, do_weighting=False):
self.e = artificial_error
self.w = do_weighting
self.b = 0.25
def score(self, flags):
return f_score(w_flags(w_error_corpus(flags, self.w), self.e), beta=self.b)
def __str__(self):
return 'f_025_score'
def sort_rules_by_train_results(all_train_results, rules, score, last_results):
l = score.score(last_results.get_data(CorpusOptions.TRAIN))
calculated_scores = []
for i, ar in enumerate(all_train_results):
s = score.score(ar.get_data(CorpusOptions.TRAIN))
t = (s, i)
calculated_scores.append(t)
calculated_scores.sort(reverse=True)
sorted_rules = []
for s, i in calculated_scores:
if s <= l:
break
sorted_rules.append(rules[i])
return sorted_rules
def select_rule_by_cv_results(all_cv_results, score, last_results):
l = score.score(last_results.get_data(CorpusOptions.CV))
selected_index = -1
for i, ar in enumerate(all_cv_results):
s = score.score(ar.get_data(CorpusOptions.CV))
if s > l:
selected_index = i
break
return selected_index | {
"repo_name": "Gnork/confusion-words",
"path": "transformation_based_rule_learning/scores.py",
"copies": "1",
"size": "4072",
"license": "mit",
"hash": 6662285926998216000,
"line_mean": 24.7784810127,
"line_max": 83,
"alpha_frac": 0.5790766208,
"autogenerated": false,
"ratio": 3.129900076863951,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4208976697663951,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christoph Jansen, HTW Berlin'
import copy
class TokenOrPOS:
def __init__(self, value: str, isToken: bool):
self.value = value
self.isToken = isToken
class Collocations:
def __init__(self, check_sequence: [TokenOrPOS], k: int, origin_token: str, replace_token: str):
self.check_sequence = check_sequence
self.k = k
self.origin_token = origin_token
self.replace_token = replace_token
def apply(self, tokens: [str], pos_tags: [str], index: int) -> str:
len_tokens = len(tokens)
if tokens[index] == self.origin_token:
idx = index + self.k
if idx < 0:
return tokens[index]
found_sequence = True
for j in range(len(self.check_sequence)):
if idx == index:
idx += 1
if idx >= len_tokens:
found_sequence = False
break
check_item = self.check_sequence[j]
if check_item.isToken:
if tokens[idx] != check_item.value:
found_sequence = False
break
else:
if pos_tags[idx] != check_item.value:
found_sequence = False
break
idx += 1
if found_sequence:
return self.replace_token
return tokens[index]
def _sequences_equal(self, sequence_a, sequence_b):
if len(sequence_a) != len(sequence_b):
return False
for a, b in zip(sequence_a, sequence_b):
if (a.isToken and not b.isToken) or (not a.isToken and b.isToken):
return False
if a.value != b.value:
return False
return True
def equals(self, rule) -> bool:
if self.k == rule.k and self.origin_token == rule.origin_token and self.replace_token == rule.replace_token and self._sequences_equal(self.check_sequence, rule.check_sequence):
return True
return False
def __str__(self):
fill_symbol = '<>'
result = []
copy = list(self.check_sequence)
start = min(self.k, 0)
while True:
if start == 0:
result.append(self.origin_token)
elif not copy:
if start > 0:
break
else:
result.append(fill_symbol)
elif start < self.k:
result.append(fill_symbol)
else:
result.append(copy[0].value)
del copy[0]
start += 1
result_string = ' '.join(result)
return result_string
def hash(self):
return 'collo%s%s%s%s' %(str(self), str(self.k), self.origin_token, self.replace_token)
class CoOccurrences:
def __init__(self, check_token: str, window: int,
origin_token: str, replace_token: str):
self.check_token = check_token # Feature
self.window = window # Fenstergroesse k
self.origin_token = origin_token # Quellwort
self.replace_token = replace_token # Zielwort
def apply(self, tokens: [str], pos_tags: [str], index: int) -> str:
len_tokens = len(tokens)
if tokens[index] == self.origin_token:
start = max(index - self.window, 0)
end = min(index + self.window, len_tokens)
for j in range(start, end):
if tokens[j] == self.check_token:
return self.replace_token
return tokens[index]
def equals(self, rule) -> bool:
if self.window == rule.window and self.check_token == rule.check_token and self.origin_token == rule.origin_token and self.replace_token == rule.replace_token:
return True
return False
def __str__(self):
return '"%s" in window -%d to %d' % (self.check_token, self.window, self.window)
def hash(self):
return 'coocc%s%s%s%s' %(str(self.window), self.check_token, self.origin_token, self.replace_token)
class SimpleBaselinePrediction:
def __init__(self, origin_token: str, replace_token: str):
self.origin_token = origin_token
self.replace_token = replace_token
def apply(self, tokens: [str], pos_tags: [str], index: int) -> [str]:
result_tokens = list(tokens)
if tokens[index] == self.origin_token:
return self.replace_token
return tokens[index]
def __str__(self):
return '%s_to_%s' % (self.origin_token, self.replace_token)
class IdentityBaselinePrediction:
def __init__(self):
self.origin_token = ''
self.replace_token = ''
def apply(self, tokens: [str], pos: [str], index: int) -> str:
return tokens[index]
def __str__(self):
return 'identity'
def generate_collocations(tokens: [str], pos:[str], index:int, intend_token: str, predict_token, confusion_set: (str, str)):
window = 3
max_sequence_length = 2
len_tokens = len(tokens)
result = []
origin_token = predict_token
if intend_token != origin_token:
replace_token = confusion_set[0]
if origin_token == confusion_set[0]:
replace_token = confusion_set[1]
start = max(index - window, 0)
end = min(index + window, len_tokens)
for sequence_length in range(1, max_sequence_length + 1):
sequences = _gen_collocation_sequences(tokens, pos, start, end, index, sequence_length)
for sequence in sequences:
rule = Collocations(sequence[0], sequence[1], origin_token, replace_token)
result.append(rule)
return result
def _gen_collocation_sequences(tokens: [str], pos: [str], start: int, end: int, i: int, sequence_length: int) -> [([TokenOrPOS], int)]:
sequences = []
len_tokens = len(tokens)
for x in range(start, end):
can_create_sequence = True
index = x
item_sequence = []
for j in range(1, sequence_length + 1):
if index == i:
index += 1
if index >= len_tokens:
can_create_sequence = False
break
item = (tokens[index], pos[index])
item_sequence.append(item)
index += 1
if can_create_sequence:
k = x - i
sequences += _gen_collocation_sequences_at_k(item_sequence, k)
return sequences
def _gen_collocation_sequences_at_k(item_sequence: [(str, str)], k: int) -> [([TokenOrPOS], int)]:
item = item_sequence[0]
del item_sequence[0]
sequences = [[TokenOrPOS(item[0], True)],[TokenOrPOS(item[1], False)]]
for item in item_sequence:
new_sequences = []
for sequence in sequences:
new_sequence_token = copy.copy(sequence)
new_sequence_token.append(TokenOrPOS(item[0], True))
new_sequence_pos = copy.copy(sequence)
new_sequence_pos.append(TokenOrPOS(item[1], False))
new_sequences.append(new_sequence_token)
new_sequences.append(new_sequence_pos)
sequences = new_sequences
result = []
for sequence in sequences:
#print(len(sequence))
result_sequence = (sequence, k)
result.append(result_sequence)
return result
def generate_cooccurences(tokens: [str], pos:[str], index:int, intend_token: str, predict_token, confusion_set: (str, str)):
window = 2
len_tokens = len(tokens)
result = []
origin_token = predict_token
if intend_token != origin_token:
start = max(index - window, 0)
end = min(index + window, len_tokens)
for j in range(start, end):
check_token = tokens[j]
if check_token != confusion_set[0] and check_token != confusion_set[1]:
replace_token = confusion_set[0]
if origin_token == replace_token:
replace_token = confusion_set[1]
rule = CoOccurrences(check_token, window, origin_token, replace_token)
result.append(rule)
return result
def remove_rule_duplicates(rules) -> None:
start = 0
end = len(rules)
while start < end:
current_rule = rules[start]
new_start = start + 1
while new_start < end:
if current_rule.equals(rules[new_start]):
del rules[new_start]
end -= 1
else:
new_start += 1
start += 1 | {
"repo_name": "Gnork/confusion-words",
"path": "transformation_based_rule_learning/rule_templates.py",
"copies": "1",
"size": "8572",
"license": "mit",
"hash": 4163761751442372600,
"line_mean": 34.7208333333,
"line_max": 184,
"alpha_frac": 0.5531964536,
"autogenerated": false,
"ratio": 3.8699774266365687,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4923173880236569,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christoph Jansen, HTW Berlin'
import os
import normalization
import pickle
def export_rule_set(path, rule_set):
with open(path, 'wb') as f:
pickle.dump(rule_set, f)
def import_rule_set(path):
with open(path, 'rb') as f:
rule_set = pickle.load(f)
return rule_set
class TSVData:
def __init__(self, is_correct:bool, label:str, index:int, tokens:[str], pos:[str]):
self.is_correct = is_correct
self.label = label
self.index = index
self.tokens = tokens
self.pos = pos
def __str__(self):
is_c = 'c'
if not self.is_correct:
is_c = 'X'
sentence = ' '.join(normalization.join_tokens_and_pos(self.tokens, self.pos))
return('%s\t%s\t%d\t%s' % (is_c, self.label, self.index, sentence))
class TSVReader:
def __init__(self, input_path):
self.open_input = open(input_path, 'r')
self.open_input_iter = self.open_input.__iter__()
self.headline = self.open_input_iter.__next__().strip()
def __iter__(self):
return self
def _line_to_data(self, line):
t = line.split('\t')
is_correct = True
if t[0] == 'X':
is_correct = False
label = t[1]
index = int(t[2])
sentence = t[3]
if len(t[3:]) > 1:
sentence = '\t'.join(t[3:])
tokens, pos = normalization.split_tokens_and_pos(sentence.split())
return TSVData(is_correct, label, index, tokens, pos)
def __next__(self):
try:
line = self.open_input_iter.__next__()
except:
self.open_input.close()
raise StopIteration()
return self._line_to_data(line)
def close(self):
self.open_input.close()
class TSVWriter:
def __init__(self, output_path):
self.open_output = open(output_path, 'w')
print('?\tlabel\tindex\tsentence', file=self.open_output)
def out(self, tsv_data):
print(tsv_data, file=self.open_output)
def close(self):
self.open_output.close()
class MultiInputReader:
def __init__(self, input_paths):
self.open_inputs = []
self.iters = []
for path in input_paths:
open_input = open(path, 'r')
self.open_inputs.append(open_input)
self.iters.append(open_input.__iter__())
self._used = False
def __iter__(self):
if self._used:
raise Exception('Cannot reuse iterable. Create new Object first.')
self._used = True
return self
def __next__(self):
lines = []
try:
for it in self.iters:
line = it.__next__()
lines.append(line)
except:
for open_input in self.open_inputs:
open_input.close()
raise StopIteration()
return tuple(lines)
def close(self):
for open_input in self.open_inputs:
open_input.close()
class PersistantBalancedCorpusIO:
def __init__(self, input_path: str, output_path: str):
self._input_path = input_path
self._output_path = output_path
self._open_input = open(self._input_path, 'r')
self._open_input_iter = self._open_input.__iter__()
self._open_output = open(self._output_path, 'w')
self._in_count = 0
self._out_count = 0
self._used = False
def __iter__(self):
if self._used:
raise Exception('Cannot reuse iterable. Create new Object first.')
self._used = True
return self
def __next__(self):
if self._in_count > self._out_count:
m = 'Cannot read new input line before using out function to store intermediate results.'
raise Exception(m)
self._in_count += 1
try:
return self._open_input_iter.__next__()
except:
self._open_input.close()
self._open_output.close()
#print('Stop iteration: processed %d lines' % self._out_count)
raise StopIteration()
def out(self, line: str):
if self._out_count >= self._in_count:
m = 'Cannot store more itermediate results without reading new input line first.'
raise Exception(m)
self._out_count += 1
if not line:
line = ''
print(line, file=self._open_output)
def flush(self):
self._open_output.flush()
os.fsync(self._open_output.fileno())
def close(self):
if self._in_count > self._out_count:
m = 'Write last intermediate result before closing files'
raise Exception(m)
self._open_input.close()
self._open_output.close() | {
"repo_name": "Gnork/confusion-words",
"path": "transformation_based_rule_learning/io_wrapper.py",
"copies": "1",
"size": "4741",
"license": "mit",
"hash": 1325073796735393000,
"line_mean": 28.6375,
"line_max": 101,
"alpha_frac": 0.5498839907,
"autogenerated": false,
"ratio": 3.7155172413793105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9745062340542976,
"avg_score": 0.0040677783072668935,
"num_lines": 160
} |
__author__ = 'Christoph Jansen, HTW Berlin'
import os
import scores
from data import CorpusOptions
import json
def log_exp_settings(log_path, exp_settings):
open_log = open(log_path, 'w')
data = json.dumps(exp_settings, sort_keys=True)
print(data, file=open_log)
open_log.close()
print(data)
class TrainingLogger:
def __init__(self, log_path):
self.open_log = open(log_path, 'w')
self.common_fields = ['index',
'rule',
'origin',
'replace',
'skips']
self.individual_fields = ['tn',
'fp',
'tp',
'fn',
'accuracy',
'precision',
'recall',
'w_accuracy',
'w_precision',
'w_recall',
'f_1_score',
'f_05_score',
'f_025_score']
print(self._headline(), file=self.open_log)
def _headline(self):
fields = self._headline_fields()
line = ''
for field in fields:
line += '%s;' % field
return line.rstrip(';')
def _headline_fields(self):
train_fields = ['train_%s' % f for f in self.individual_fields]
cv_fields = ['cv_%s' % f for f in self.individual_fields]
cv2_fields = ['cv2_%s' % f for f in self.individual_fields]
test_fields = ['test_%s' % f for f in self.individual_fields]
return self.common_fields + train_fields + test_fields + cv_fields + cv2_fields
def _dataline(self, fields, d):
line = ''
for field in fields:
val = d.get(field, '')
line += '%s;' % str(val)
return line.rstrip(';')
def _common_data(self, common):
cd = {}
for f in self.common_fields:
cd[f] = common.get(f, '')
return cd
def _individual_data(self, d, p, e, w):
data = {'%s_tn' % p: d[0],
'%s_fp' % p: d[1],
'%s_tp' % p: d[2],
'%s_fn' % p: d[3],
'%s_accuracy' % p: scores.accuracy(scores.w_error_corpus(d, w)),
'%s_precision' % p: scores.precision(scores.w_error_corpus(d, w)),
'%s_recall' % p: scores.recall(scores.w_error_corpus(d, w)),
'%s_w_accuracy' % p: scores.accuracy(scores.w_flags(scores.w_error_corpus(d, w), e)),
'%s_w_precision' % p: scores.precision(scores.w_flags(scores.w_error_corpus(d, w), e)),
'%s_w_recall' % p: scores.recall(scores.w_flags(scores.w_error_corpus(d, w), e)),
'%s_f_1_score' % p: scores.f_score(scores.w_flags(scores.w_error_corpus(d, w), e)),
'%s_f_05_score' % p: scores.f_score(scores.w_flags(scores.w_error_corpus(d, w), e), beta=0.5),
'%s_f_025_score' % p: scores.f_score(scores.w_flags(scores.w_error_corpus(d, w), e), beta=0.25)}
return data
def out(self, data, artificial_error, do_weighting):
combined_data = self._common_data(data.common)
combined_data.update(self._individual_data(data.get_data(CorpusOptions.TRAIN), 'train', artificial_error, do_weighting))
combined_data.update(self._individual_data(data.get_data(CorpusOptions.CV), 'cv', artificial_error, do_weighting))
combined_data.update(self._individual_data(data.get_data(CorpusOptions.CV2), 'cv2', artificial_error, do_weighting))
combined_data.update(self._individual_data(data.get_data(CorpusOptions.TEST), 'test', artificial_error, do_weighting))
line = self._dataline(self._headline_fields(), combined_data)
print(line)
print(line, file = self.open_log)
self._flush()
def close(self):
self.open_log.close()
def _flush(self):
self.open_log.flush()
os.fsync(self.open_log.fileno())
class TestLogger(TrainingLogger):
def out(self, data, artificial_error, do_weighting):
combined_data = self._individual_data(data.get_data(CorpusOptions.TEST), 'test', artificial_error, do_weighting)
line = self._dataline(self._headline_fields(), combined_data)
print(line)
print(line, file = self.open_log)
self._flush() | {
"repo_name": "Gnork/confusion-words",
"path": "transformation_based_rule_learning/logger.py",
"copies": "1",
"size": "4521",
"license": "mit",
"hash": -2857388433375327700,
"line_mean": 40.1090909091,
"line_max": 128,
"alpha_frac": 0.5169210352,
"autogenerated": false,
"ratio": 3.5767405063291138,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9575922159554586,
"avg_score": 0.003547876394905441,
"num_lines": 110
} |
__author__ = 'Christoph Jansen, HTW Berlin'
import theano
import theano.tensor as T
from theano import dot
from theano.tensor.nnet import sigmoid as sigm
from theano.tensor import tanh
from theano.tensor.nnet import softmax
from theano.tensor.nnet import categorical_crossentropy
import os
import numpy as np
from datetime import datetime
import helper
home_dir = os.path.expanduser('~')
### BEGING SETTINGS ###
# text corpus
corpus_file = os.path.join(home_dir, 'brown_tagged.txt')
# word2vec embeddings
embeddings_file = os.path.join(home_dir, 'normalized_embeddings.bin')
# work dir will contain pickled lstm weights and pickled list of training errors
work_dir = os.path.join(home_dir, 'training_lstm_w2v_lm')
# if training should be continued from existing weights, timestamp, start_epoch and start_iteration must be given
# every training is identified by a generated timestamp
# else set values to None
timestamp = None # string
start_epoch = None # int
start_iteration = None # int
# number of neurons in hidden layer of lstm
hidden_layer_size = 512
# 40% of occurrences of these tokens will be excluded from training corpus for cv and test
preserve_tokens = ['than', 'then', 'except', 'accept', 'well', 'good']
# number of training epochs
# complete corpus will be given to lstm for training once per epoch
max_epochs = 1
# after training lstm language model will be applied to this confusion set
# order matters: algorithm will generate rules for occurrences of first word in list
confusion_set = ['than', 'then']
# minimum occurence of tokens in training data
# tokens with less occurences will be substituted to 'U' for unknown
# 'U' can also serve as substitute for unseen tokens at test time
min_occurrence = 20
### END SETTINGS ###
# init
if not os.path.exists(work_dir):
os.makedirs(work_dir)
with open(corpus_file) as f:
sents = [[helper.normalization(twp.split('|')[0].lower()) for twp in line.split()] for line in f]
train_sents = list(helper.acs(sents, preserve_tokens))
token_embeddings = helper.TokenEmbeddings(train_sents, min_occurrence)
w2v_embeddings = helper.Word2VecEmbeddings(embeddings_file)
if timestamp and start_epoch and start_iteration:
errors = helper.load_errors('%s-%d-%d.errors' % (timestamp, start_epoch, start_iteration), work_dir)
load_weights = '%s-%d-%d.weights' % (timestamp, start_epoch, start_iteration)
print('init previous states...')
print('timestamp: ', timestamp)
print('start_epoch: ', start_epoch)
print('start_iteration: ', start_iteration)
else:
errors = []
start_epoch = 0
start_iteration = 0
timestamp = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
load_weights = None
print('init new states...')
print('timestamp: ', timestamp)
print()
# initialize lstm weights
inp = w2v_embeddings.embeddings_size # input/output size
hid = hidden_layer_size # hidden size
out = token_embeddings.num_tokens
if not load_weights:
W_xi = helper.init_weights((inp, hid))
W_hi = helper.init_weights((hid, hid))
W_ci = helper.init_weights((hid, hid))
b_i = helper.init_zero_vec(hid)
W_xf = helper.init_weights((inp, hid))
W_hf = helper.init_weights((hid, hid))
W_cf = helper.init_weights((hid, hid))
b_f = helper.init_zero_vec(hid)
W_xc = helper.init_weights((inp, hid))
W_hc = helper.init_weights((hid, hid))
b_c = helper.init_zero_vec(hid)
W_xo = helper.init_weights((inp, hid))
W_ho = helper.init_weights((hid, hid))
W_co = helper.init_weights((hid, hid))
b_o = helper.init_zero_vec(hid)
W_hy = helper.init_weights((hid, out))
b_y = helper.init_zero_vec(out)
else:
W_xi, W_hi, W_ci, b_i, \
W_xf, W_hf, W_cf, b_f, \
W_xc, W_hc, b_c, \
W_xo, W_ho, W_co, b_o, \
W_hy, b_y = helper.load_states(load_weights, work_dir)
# LSTM code
S_h = helper.init_zero_vec(hid) # init values for hidden units
S_c = helper.init_zero_vec(hid) # init values for cell units
S_x = T.matrix() # inputs
Y = T.matrix() # targets
# BEGIN code inspired by Christian Herta
# http://christianherta.de/lehre/dataScience/machineLearning/neuralNetworks/LSTM.php
def step(S_x, S_h, S_c,
W_xi, W_hi, W_ci, b_i,
W_xf, W_hf, W_cf, b_f,
W_xc, W_hc, b_c,
W_xo, W_ho, W_co, b_o,
W_hy, b_y):
S_i = sigm(dot(S_x, W_xi) + dot(S_h, W_hi) + dot(S_c, W_ci) + b_i)
S_f = sigm(dot(S_x, W_xf) + dot(S_h, W_hf) + dot(S_c, W_cf) + b_f)
S_c = S_f * S_c + S_i * tanh(dot(S_x, W_xc) + dot(S_h, W_hc) + b_c)
S_o = sigm(dot(S_x, W_xo) + dot(S_h, W_ho) + dot(S_c, W_co) + b_o)
S_h = S_o * tanh(S_c)
S_y = dot(S_h, W_hy) + b_y
return [S_h, S_c, S_y]
# scan loops through input sequence and applies step function to each time step
(S_h_r, S_c_r, S_y_r ), _ = theano.scan(fn = step,
sequences = S_x,
outputs_info = [S_h, S_c, None],
non_sequences = [W_xi, W_hi, W_ci, b_i,
W_xf, W_hf, W_cf, b_f,
W_xc, W_hc, b_c,
W_xo, W_ho, W_co, b_o,
W_hy, b_y])
# END code inspired by Christian Herta
# cost and gradient descent
cost = T.mean(categorical_crossentropy(softmax(S_y_r), Y))
def gradient_descent(cost, weights, lr=0.05):
grads = T.grad(cost=cost, wrt=weights)
updates = []
for w, g in zip(weights, grads):
updates.append([w, w - lr * g])
return updates
updates = gradient_descent(cost,
[W_xi, W_hi, W_ci, b_i,
W_xf, W_hf, W_cf, b_f,
W_xc, W_hc, b_c,
W_xo, W_ho, W_co, b_o,
W_hy, b_y])
# training theano function
train = theano.function(inputs=[S_x, Y],
outputs=cost,
updates=updates,
allow_input_downcast=True)
# prediction theano function
predict = theano.function(inputs=[S_x],
outputs=S_y_r,
allow_input_downcast=True)
# sampling theano functions
S_h_v = T.vector()
S_c_v = T.vector()
S_h_s, S_c_s, S_y_s = step(S_x, S_h_v, S_c_v,
W_xi, W_hi, W_ci, b_i,
W_xf, W_hf, W_cf, b_f,
W_xc, W_hc, b_c,
W_xo, W_ho, W_co, b_o,
W_hy, b_y)
sampling = theano.function(inputs = [S_x, S_h_v, S_c_v],
outputs = [S_h_s, S_c_s, S_y_s],
allow_input_downcast=True)
# sampling python functions
def apply_sampling(token_embeddings, w2v_embeddings, hid, start='S', end='E', t=1.0, max_tokens=50):
tokens = token_embeddings.tokens
S_x = w2v_embeddings.token_to_vec(start)
S_h = np.zeros(hid, dtype=theano.config.floatX)
S_c = np.zeros(hid, dtype=theano.config.floatX)
sampled_tokens = [start]
counter = 0
while sampled_tokens[-1] != end:
if counter == max_tokens:
sampled_tokens.append(end)
break
S_x = helper.theano_cast(S_x)
S_x = np.reshape(S_x, (1, -1))
S_h, S_c, S_y = sampling(S_x, S_h.flatten(), S_c.flatten())
S_y = S_y.flatten()
distribution = helper.t_softmax(S_y, t=t)
S_x = np.random.multinomial(n=1, pvals=distribution)
idx = helper.vec_to_index(S_x)
sampled_token = tokens[idx]
sampled_tokens.append(sampled_token)
S_x = w2v_embeddings.token_to_vec(sampled_token)
counter += 1
return sampled_tokens[1:-1]
def resample(token_embeddings, w2v_embeddings, hid, min_tokens=0, max_tokens=50, trials=100, t=1.0):
for i in range(trials):
try:
sample = apply_sampling(token_embeddings, w2v_embeddings, hid, t=t, max_tokens=max_tokens)
if len(sample) < min_tokens:
continue
return ' '.join(sample)
except:
pass
return 'NO SAMPLE IN %d STEPS' % trials
# training
print('start training...')
print()
log_steps = 500
save_steps = 5000
weights_changed = False
for e in range(max_epochs):
if e < start_epoch:
continue
error = 0
for i, (inp, tar) in enumerate(helper.token_sequence_generator(train_sents, token_embeddings, w2v_embeddings)):
if e == start_epoch and i < start_iteration:
continue
cost = train(inp, tar)
error += cost
weights_changed = True
if (i+1) % log_steps == 0:
error /= log_steps
errors.append(error)
print('epoch: %d\titerations: %d\terror: %f' %(e, (i+1), error))
print(resample(token_embeddings, w2v_embeddings, hid))
print()
error = 0
if (i+1) % save_steps == 0:
helper.save_states([W_xi, W_hi, W_ci, b_i,
W_xf, W_hf, W_cf, b_f,
W_xc, W_hc, b_c,
W_xo, W_ho, W_co, b_o,
W_hy, b_y],
'%s-%d-%d.weights' % (timestamp, e, (i+1)), work_dir)
helper.save_errors(errors, '%s-%d-%d.errors' % (timestamp, e, (i+1)), work_dir)
weights_changed = False
print('weights saved:')
print('%s-%d-%d.weights' % (timestamp, e, (i+1)))
print('errors saved:')
print('%s-%d-%d.errors' % (timestamp, e, (i+1)))
print()
print('end training')
print()
# save current weights if training has been performed
if weights_changed:
helper.save_states([W_xi, W_hi, W_ci, b_i,
W_xf, W_hf, W_cf, b_f,
W_xc, W_hc, b_c,
W_xo, W_ho, W_co, b_o,
W_hy, b_y], '%s-%d-%d.weights' % (timestamp, e, (i+1)), work_dir)
helper.save_errors(errors, '%s-%d-%d.errors' % (timestamp, e, (i+1)), work_dir)
print('final weights saved:')
print('%s-%d-%d.weights' % (timestamp, e, (i+1)))
print('final errors saved:')
print('%s-%d-%d.errors' % (timestamp, e, (i+1)))
print()
# generate samples
min_tokens = 5
max_tokens = 50
num_samples = 20
print('genrate samples')
print('minimum number of tokens per sample: ', min_tokens)
print()
for t in [0.8, 1.0, 1.2]:
print('temperature: ', t)
print()
for i in range(num_samples):
print(resample(token_embeddings, w2v_embeddings, hid, min_tokens=min_tokens, max_tokens=max_tokens, trials=100, t=t))
print() | {
"repo_name": "Gnork/confusion-words",
"path": "lstm_word2vec_language_model/__main__.py",
"copies": "1",
"size": "11014",
"license": "mit",
"hash": -727890525369080800,
"line_mean": 31.9790419162,
"line_max": 125,
"alpha_frac": 0.5524786635,
"autogenerated": false,
"ratio": 3.1289772727272727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41814559362272724,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christoph Jansen'
import nltk
import os
from brocas_lm.model import Normalization
from brocas_lm.model import NormalizationIter
from brocas_lm.model import LanguageModel
# create work dir
work_dir = os.path.join(os.path.expanduser('~'), 'brocas_models')
lm_file = os.path.join(work_dir, 'test_model.bin')
if not os.path.exists(work_dir):
os.makedirs(work_dir)
# get text corpus
nltk.download('brown')
sents = nltk.corpus.brown.sents()[:100]
# preprocessing
normalizer = Normalization(sents, min_count=15)
training_data = NormalizationIter(normalizer, sents)
lm = LanguageModel(tokenized_sentences=training_data, input_layer_size=64, hidden_layer_size=128)
print()
# train model
lm.train(training_data, epochs=5, backup_directory=work_dir, log_interval=20)
print()
# test trained model
normalized_sentence = normalizer.normalize(sents[0])
print('normalized sentence:')
print(' '.join(normalized_sentence))
print('probability: ', lm.sentence_log_probability(normalized_sentence))
print()
start_tag = normalized_sentence[0]
end_tag = normalized_sentence[-1]
print('sample:')
print(' '.join(lm.sample([start_tag], end_tag=end_tag)))
print()
# save, load and test loaded model
lm.save(lm_file)
print()
lm_clone = LanguageModel(lm_file=lm_file)
print()
print('probability: ', lm_clone.sentence_log_probability(normalized_sentence))
print()
print('sample:')
print(' '.join(lm_clone.sample([start_tag], end_tag=end_tag)))
print()
# use predict and token_probabilities functions
print('predict:')
print(lm_clone.predict(normalized_sentence))
print()
print('token probabilities:')
print(lm_clone.token_probabilities(normalized_sentence))
print() | {
"repo_name": "PandoIO/brocas-lm",
"path": "examples/functionality_test.py",
"copies": "1",
"size": "1663",
"license": "mit",
"hash": -9420340432076412,
"line_mean": 26.7333333333,
"line_max": 97,
"alpha_frac": 0.7522549609,
"autogenerated": false,
"ratio": 3.1676190476190476,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9407118906478231,
"avg_score": 0.0025510204081632655,
"num_lines": 60
} |
__author__ = 'Christoph Jansen'
import numpy as np
import theano
import theano.tensor as T
from theano import dot
from theano.tensor.nnet import sigmoid as sigm
from theano.tensor import tanh
from theano.tensor.nnet import softmax
from theano.tensor.nnet import categorical_crossentropy
class _LSTM:
def __init__(self, i_size, h_size, o_size, weights=None):
if not weights:
self.W_xi = _init_weights((i_size, h_size))
self.W_hi = _init_weights((h_size, h_size))
self.W_ci = _init_weights((h_size, h_size))
self.b_i = _init_zero_vec(h_size)
self.W_xf = _init_weights((i_size, h_size))
self.W_hf = _init_weights((h_size, h_size))
self.W_cf = _init_weights((h_size, h_size))
self.b_f = _init_zero_vec(h_size)
self.W_xc = _init_weights((i_size, h_size))
self.W_hc = _init_weights((h_size, h_size))
self.b_c = _init_zero_vec(h_size)
self.W_xo = _init_weights((i_size, h_size))
self.W_ho = _init_weights((h_size, h_size))
self.W_co = _init_weights((h_size, h_size))
self.b_o = _init_zero_vec(h_size)
self.W_hy = _init_weights((h_size, o_size))
self.b_y = _init_zero_vec(o_size)
else:
self.W_xi = weights['W_xi']
self.W_hi = weights['W_hi']
self.W_ci = weights['W_ci']
self.b_i = weights['b_i']
self.W_xf = weights['W_xf']
self.W_hf = weights['W_hf']
self.W_cf = weights['W_cf']
self.b_f = weights['b_f']
self.W_xc = weights['W_xc']
self.W_hc = weights['W_hc']
self.b_c = weights['b_c']
self.W_xo = weights['W_xo']
self.W_ho = weights['W_ho']
self.W_co = weights['W_co']
self.b_o = weights['b_o']
self.W_hy = weights['W_hy']
self.b_y = weights['b_y']
S_h = _init_zero_vec(h_size) # init values for hidden units
S_c = _init_zero_vec(h_size) # init values for cell units
S_x = T.matrix() # inputs
Y = T.matrix() # targets
(S_h_r, S_c_r, S_y_r ), _ = theano.scan(fn = _step,
sequences = S_x,
outputs_info = [S_h, S_c, None],
non_sequences = [self.W_xi, self.W_hi, self.W_ci, self.b_i,
self.W_xf, self.W_hf, self.W_cf, self.b_f,
self.W_xc, self.W_hc, self.b_c,
self.W_xo, self.W_ho, self.W_co, self.b_o,
self.W_hy, self.b_y])
cost = T.mean(categorical_crossentropy(softmax(S_y_r), Y))
updates = _gradient_descent(cost,
[self.W_xi, self.W_hi, self.W_ci, self.b_i,
self.W_xf, self.W_hf, self.W_cf, self.b_f,
self.W_xc, self.W_hc, self.b_c,
self.W_xo, self.W_ho, self.W_co, self.b_o,
self.W_hy, self.b_y])
self.train = theano.function(inputs=[S_x, Y],
outputs=cost,
updates=updates,
allow_input_downcast=True)
self.predict = theano.function(inputs=[S_x],
outputs=S_y_r,
allow_input_downcast=True)
S_h_v = T.vector()
S_c_v = T.vector()
S_h_s, S_c_s, S_y_s = _step(S_x, S_h_v, S_c_v,
self.W_xi, self.W_hi, self.W_ci, self.b_i,
self.W_xf, self.W_hf, self.W_cf, self.b_f,
self.W_xc, self.W_hc, self.b_c,
self.W_xo, self.W_ho, self.W_co, self.b_o,
self.W_hy, self.b_y)
self.sampling = theano.function(inputs = [S_x, S_h_v, S_c_v],
outputs = [S_h_s, S_c_s, S_y_s],
allow_input_downcast=True)
def _gradient_descent(cost, weights, lr=0.05):
grads = T.grad(cost=cost, wrt=weights)
updates = []
for w, g in zip(weights, grads):
updates.append([w, w - lr * g])
return updates
def _step(S_x, S_h, S_c,
W_xi, W_hi, W_ci, b_i,
W_xf, W_hf, W_cf, b_f,
W_xc, W_hc, b_c,
W_xo, W_ho, W_co, b_o,
W_hy, b_y):
S_i = sigm(dot(S_x, W_xi) + dot(S_h, W_hi) + dot(S_c, W_ci) + b_i)
S_f = sigm(dot(S_x, W_xf) + dot(S_h, W_hf) + dot(S_c, W_cf) + b_f)
S_c = S_f * S_c + S_i * tanh(dot(S_x, W_xc) + dot(S_h, W_hc) + b_c)
S_o = sigm(dot(S_x, W_xo) + dot(S_h, W_ho) + dot(S_c, W_co) + b_o)
S_h = S_o * tanh(S_c)
S_y = dot(S_h, W_hy) + b_y
return [S_h, S_c, S_y]
def _init_weights(shape, factor=0.01):
return theano.shared(np.asarray(np.random.randn(shape[0], shape[1]) * factor, dtype=theano.config.floatX))
def _init_zero_vec(size):
vec = np.zeros(size, dtype=theano.config.floatX)
return theano.shared(vec) | {
"repo_name": "PandoIO/brocas-lm",
"path": "brocas_lm/_lstm.py",
"copies": "1",
"size": "5605",
"license": "mit",
"hash": -8051213868425291000,
"line_mean": 39.5333333333,
"line_max": 110,
"alpha_frac": 0.428367529,
"autogenerated": false,
"ratio": 3.044541010320478,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39729085393204777,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Christoph Jansen'
import os
import pickle
import numpy as np
import theano
from collections import Counter
from datetime import datetime
from brocas_lm._lstm import _LSTM
from gensim.models import Word2Vec
class LanguageModel:
def __init__(self,
verbose=True,
lm_file=None,
tokenized_sentences=None,
input_layer_size=128,
hidden_layer_size=512):
if verbose:
self.print = _echo
else:
self.print = _silence
if lm_file:
self._initialize_from_file(lm_file)
elif tokenized_sentences:
self._input_layer_size = input_layer_size
self._hidden_layer_size = hidden_layer_size
self._initialize(tokenized_sentences)
else:
raise Exception('ERROR: either set tokenized_sentences OR lm_file parameter')
def _initialize_from_file(self, lm_file):
weights = {}
with open(lm_file, 'rb') as f:
self._input_layer_size = pickle.load(f)
self._hidden_layer_size = pickle.load(f)
self._output_layer_size = pickle.load(f)
self.sparse_embeddings = pickle.load(f)
self.w2v_embeddings = pickle.load(f)
weights['W_xi'] = pickle.load(f)
weights['W_hi'] = pickle.load(f)
weights['W_ci'] = pickle.load(f)
weights['b_i'] = pickle.load(f)
weights['W_xf'] = pickle.load(f)
weights['W_hf'] = pickle.load(f)
weights['W_cf'] = pickle.load(f)
weights['b_f'] = pickle.load(f)
weights['W_xc'] = pickle.load(f)
weights['W_hc'] = pickle.load(f)
weights['b_c'] = pickle.load(f)
weights['W_xo'] = pickle.load(f)
weights['W_ho'] = pickle.load(f)
weights['W_co'] = pickle.load(f)
weights['b_o'] = pickle.load(f)
weights['W_hy'] = pickle.load(f)
weights['b_y'] = pickle.load(f)
self._lstm = _LSTM(self._input_layer_size, self._hidden_layer_size, self._output_layer_size, weights=weights)
self.print('initialized model from file: %s' % lm_file)
def _initialize(self, tokenized_sentences):
self.w2v_embeddings = Word2Vec(tokenized_sentences, size=self._input_layer_size, min_count=1)
vocab = set()
for s in tokenized_sentences:
vocab.update(s)
self.sparse_embeddings = {key: i for i, key in enumerate(vocab)}
self._output_layer_size = len(self.sparse_embeddings)
self._lstm = _LSTM(self._input_layer_size, self._hidden_layer_size, self._output_layer_size)
self.print('initialized new model')
def train(self, tokenized_sentences, epochs=10, backup_directory=None, return_cost=True, log_interval=1000):
if backup_directory and not os.path.exists(backup_directory):
os.makedirs(backup_directory)
timestamp = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
cost_logs = []
self.print('start training...')
for e in range(epochs):
cost_log = 0
for i, (S_x, Y) in enumerate(self._sequences(tokenized_sentences)):
cost = self._lstm.train(S_x, Y)
cost_log += cost
if (i+1) % log_interval == 0:
cost_log /= log_interval
self.print('epoch: %d\tcount: %d\tcost: %f' % ((e+1), (i+1), cost_log))
cost_log = 0
if return_cost:
cost_logs.append(cost_log)
if backup_directory:
backup_file = '%s_epoch_%04d_model.bin' %(timestamp, (e+1))
self.save(os.path.join(backup_directory, backup_file))
self.print('end training')
if return_cost:
return cost_logs
def predict(self, tokenized_sentence):
S_x, Y = self._sequence(tokenized_sentence)
return self._lstm.predict(S_x)
def token_probabilities(self, tokenized_sentence, temperature=1.0):
S_y = self.predict(tokenized_sentence)
result = []
for prediction, token in zip(S_y, tokenized_sentence[1:]):
probabilities = tempered_softmax(prediction, temperature=temperature)
idx = self.sparse_embeddings[token]
probability = probabilities[idx]
result.append(probability)
return result
def sentence_log_probability(self, tokenized_sentence, temperature=1.0):
return sum(map(np.log, self.token_probabilities(tokenized_sentence, temperature=temperature)))
def sample(self, start_tokens, end_tag=None, max_tokens=100, temperature=1.0):
tokens = [''] * len(self.sparse_embeddings)
for token, idx in self.sparse_embeddings.items():
tokens[idx] = token
if not start_tokens or len(start_tokens) < 1:
raise Exception('ERROR: at least one start token in list start_tokens must be given')
S_h = np.zeros(self._hidden_layer_size, dtype=theano.config.floatX)
S_c = np.zeros(self._hidden_layer_size, dtype=theano.config.floatX)
sequence = [start_tokens[0]]
# warm up LSTM with start_tokens
for token in start_tokens[1:]:
if len(sequence) >= max_tokens:
return sequence
if end_tag and sequence[-1] == end_tag:
return sequence
S_x = np.asarray(self.w2v_embeddings[sequence[-1]])
S_x = np.reshape(S_x, (1, -1))
S_h, S_c, S_y = self._lstm.sampling(S_x, S_h.flatten(), S_c.flatten())
sequence.append(token)
# sample random tokens to continue sequence
while True:
if len(sequence) >= max_tokens:
return sequence
if end_tag and sequence[-1] == end_tag:
return sequence
S_x = np.asarray(self.w2v_embeddings[sequence[-1]])
S_h, S_c, S_y = self._lstm.sampling(np.reshape(S_x, (1, -1)), S_h.flatten(), S_c.flatten())
probabilities = tempered_softmax(S_y.flatten(), temperature=temperature)
token = np.random.choice(tokens, 1, p=probabilities)[0]
sequence.append(token)
def _sequence(self, tokenized_sentence):
rows = len(tokenized_sentence) - 1
columns = len(self.sparse_embeddings)
S_x = np.asarray([self.w2v_embeddings[t] for t in tokenized_sentence[:-1]], dtype=theano.config.floatX)
Y = np.zeros((rows, columns), dtype=theano.config.floatX)
for i in range(0, rows):
t = tokenized_sentence[i+1]
k = self.sparse_embeddings[t]
Y[i][k] = 1
return (S_x, Y)
def _sequences(self, tokenized_sentences):
for s in tokenized_sentences:
yield self._sequence(s)
def save(self, lm_file):
objects = [self._input_layer_size,
self._hidden_layer_size,
self._output_layer_size,
self.sparse_embeddings,
self.w2v_embeddings,
self._lstm.W_xi,
self._lstm.W_hi,
self._lstm.W_ci,
self._lstm.b_i,
self._lstm.W_xf,
self._lstm.W_hf,
self._lstm.W_cf,
self._lstm.b_f,
self._lstm.W_xc,
self._lstm.W_hc,
self._lstm.b_c,
self._lstm.W_xo,
self._lstm.W_ho,
self._lstm.W_co,
self._lstm.b_o,
self._lstm.W_hy,
self._lstm.b_y]
with open(lm_file, 'wb') as f:
for obj in objects:
pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)
self.print('saved model to file: %s' % lm_file)
class Normalization:
def __init__(self,
tokenized_sentences,
lower_case=True,
min_count=20,
start_tag='<SEQ>',
end_tag='</SEQ>',
unknown_tag='<UNK/>',
digit_tag='<D/>'):
self._tokenized_sentences = tokenized_sentences
self._min_count = min_count
self._unknown_tag= unknown_tag
self._normalization_functions = []
if digit_tag:
self._digit_tag = digit_tag
self._normalization_functions.append(self._replace_digits)
if lower_case:
self._normalization_functions.append(self._lower_case)
if start_tag:
self._start_list = [start_tag]
else:
self._start_list = []
if end_tag:
self._end_list = [end_tag]
else:
self._end_list = []
self._vocab = self._generate_vocabulary()
def normalize(self, tokenized_sentence):
s = self._start_list + [self._normalize(token) for token in tokenized_sentence] + self._end_list
return [token if token in self._vocab else self._unknown_tag for token in s]
def _normalize(self, token):
for f in self._normalization_functions:
token = f(token)
return token
def _generate_vocabulary(self):
c = Counter()
for s in self._tokenized_sentences:
s = self._start_list + [self._normalize(token) for token in s] + self._end_list
c.update(s)
return {key for key, val in c.items() if val >= self._min_count}
def _replace_digits(self, token):
return ''.join([self._digit_tag if c.isdigit() else c for c in token])
def _lower_case(self, token):
return token.lower()
class NormalizationIter:
def __init__(self, normalization, tokenized_sentences):
self._n = normalization
self._tokenized_sentences = tokenized_sentences
def __iter__(self):
for s in self._tokenized_sentences:
yield self._n.normalize(s)
def _echo(message):
print(message)
def _silence(message):
pass
def tempered_softmax(vals, temperature=1.0):
exps = [np.exp(val/temperature) for val in vals]
s = sum(exps)
return [val/s for val in exps] | {
"repo_name": "PandoIO/brocas-lm",
"path": "brocas_lm/model.py",
"copies": "1",
"size": "10514",
"license": "mit",
"hash": 6736286323381115000,
"line_mean": 36.24,
"line_max": 117,
"alpha_frac": 0.5389005136,
"autogenerated": false,
"ratio": 3.8274481252275208,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48663486388275207,
"avg_score": null,
"num_lines": null
} |
_dtype_str_translation = { 'int': 'i',
'i': 'i',
'float': 'f',
'f': 'f',
'double': 'd',
'd': 'd',
'ui': 'uint',
'uint': 'uint',
'unsigned int': 'uint',
'ui8': 'uint8',
'uint8': 'uint8',
'uint8_t': 'uint8',
'unsigned char': 'uint8',
'uchar': 'uint8',
'uc': 'uint8',
'char': 'int8',
'c': 'int8',
'int16': 'int16',
'int16_t': 'int16',
'i16': 'int16',
'uint16': 'uint16',
'uint16_t': 'uint16',
'ui16': 'uint16',
'std::vector<float>': 'fv',
'std::vector<double>': 'dv',
'std::pair<float,std::shared_ptr<std::vector<int16_t>>>': 'hp',
'std::vector<std::pair<float,std::shared_ptr<std::vector<int16_t>>>>': 'vhp',
'std::pair<std::shared_ptr<std::vector<float>>,std::shared_ptr<std::vector<float>>>': 'rpf',
'std::vector<std::pair<std::pair<std::shared_ptr<std::vector<float>>,std::shared_ptr<std::vector<float>>>,float>>': 'vprpff',
'std::pair<std::shared_ptr<std::vector<double>>,std::shared_ptr<std::vector<double>>>': 'rpd',
'std::vector<std::pair<std::pair<std::shared_ptr<std::vector<double>>,std::shared_ptr<std::vector<double>>>,float>>': 'vprpfd' }
# Translations from C++ to C types.
# #include <stdint.h> is required!
_dtype_c_translation = {'int': 'int',
'void': 'void',
'float': 'float',
'double': 'double',
'uint': 'unsigned int',
'fertilized::uint': 'unsigned int',
'unsigned int': 'unsigned int',
'uint8': 'uint8_t',
'uint8_t': 'uint8_t',
'int16_t': 'int16_t',
'size_t': 'size_t', # otherwise unsigned long long int
'bool': 'int',
'std::string': 'char*'}
# See http://www.mathworks.de/help/matlab/apiref/mxcreatenumericarray.html.
_matlab_cpp_translation = {"mxDOUBLE_CLASS":"double",
"mxSINGLE_CLASS":"float",
"mxUINT64_CLASS":"uint64_t",
"mxINT64_CLASS":"int64_t",
"mxUINT32_CLASS":"uint32_t",
"mxINT32_CLASS":"int32_t",
"mxUINT16_CLASS":"uint16_t",
"mxINT16_CLASS":"int16_t",
"mxUINT8_CLASS":"uint8_t",
"mxINT8_CLASS":"int8_t"};
_cpp_matlab_translation = {}
for _key, _val in list(_matlab_cpp_translation.items()):
_cpp_matlab_translation[_val] = _key
if not _val in ['float', 'double']:
_cpp_matlab_translation['std::'+_val] = _key
_cpp_matlab_translation['long'] = 'mxINT64_CLASS'
_cpp_matlab_translation['ulong'] = 'mxUINT64_CLASS'
_cpp_matlab_translation['int'] = 'mxINT32_CLASS'
_cpp_matlab_translation['uint'] = 'mxUINT32_CLASS'
_cpp_matlab_translation['fertilized::uint'] = 'mxUINT32_CLASS'
_cpp_matlab_translation['short'] = 'mxINT16_CLASS'
_cpp_matlab_translation['ushort'] = 'mxUINT16_CLASS'
_cpp_matlab_translation['char'] = 'mxINT8_CLASS'
_cpp_matlab_translation['uchar'] = 'mxUINT8_CLASS'
_cpp_matlab_translation['size_t'] = 'mxUINT64_CLASS'
| {
"repo_name": "classner/fertilized-devtools",
"path": "binding_generator/TypeTranslations.py",
"copies": "2",
"size": "3948",
"license": "bsd-2-clause",
"hash": 2134578903973619500,
"line_mean": 50.9473684211,
"line_max": 156,
"alpha_frac": 0.4295845998,
"autogenerated": false,
"ratio": 3.731568998109641,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004942859594732633,
"num_lines": 76
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.