text stringlengths 0 1.05M | meta dict |
|---|---|
import os.path as op
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_allclose
from mne import Epochs, read_evokeds, pick_types
from mne.io.compensator import make_compensator, get_current_comp
from mne.io import Raw
from mne.utils import _TempDir, requires_mne, run_subprocess
base_dir = op.join(op.dirname(__file__), 'data')
ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
def test_compensation():
"""Test compensation
"""
tempdir = _TempDir()
raw = Raw(ctf_comp_fname, compensation=None)
comp1 = make_compensator(raw.info, 3, 1, exclude_comp_chs=False)
assert_true(comp1.shape == (340, 340))
comp2 = make_compensator(raw.info, 3, 1, exclude_comp_chs=True)
assert_true(comp2.shape == (311, 340))
# make sure that changing the comp doesn't modify the original data
raw2 = Raw(ctf_comp_fname, compensation=2)
assert_true(get_current_comp(raw2.info) == 2)
fname = op.join(tempdir, 'ctf-raw.fif')
raw2.save(fname)
raw2 = Raw(fname, compensation=None)
data, _ = raw[:, :]
data2, _ = raw2[:, :]
assert_allclose(data, data2, rtol=1e-9, atol=1e-20)
for ch1, ch2 in zip(raw.info['chs'], raw2.info['chs']):
assert_true(ch1['coil_type'] == ch2['coil_type'])
@requires_mne
def test_compensation_mne():
"""Test comensation by comparing with MNE
"""
tempdir = _TempDir()
def make_evoked(fname, comp):
raw = Raw(fname, compensation=comp)
picks = pick_types(raw.info, meg=True, ref_meg=True)
events = np.array([[0, 0, 1]], dtype=np.int)
evoked = Epochs(raw, events, 1, 0, 20e-3, picks=picks).average()
return evoked
def compensate_mne(fname, comp):
tmp_fname = '%s-%d-ave.fif' % (fname[:-4], comp)
cmd = ['mne_compensate_data', '--in', fname,
'--out', tmp_fname, '--grad', str(comp)]
run_subprocess(cmd)
return read_evokeds(tmp_fname)[0]
# save evoked response with default compensation
fname_default = op.join(tempdir, 'ctf_default-ave.fif')
make_evoked(ctf_comp_fname, None).save(fname_default)
for comp in [0, 1, 2, 3]:
evoked_py = make_evoked(ctf_comp_fname, comp)
evoked_c = compensate_mne(fname_default, comp)
picks_py = pick_types(evoked_py.info, meg=True, ref_meg=True)
picks_c = pick_types(evoked_c.info, meg=True, ref_meg=True)
assert_allclose(evoked_py.data[picks_py], evoked_c.data[picks_c],
rtol=1e-3, atol=1e-17)
| {
"repo_name": "trachelr/mne-python",
"path": "mne/io/tests/test_compensator.py",
"copies": "21",
"size": "2641",
"license": "bsd-3-clause",
"hash": 7839780865193153000,
"line_mean": 35.6805555556,
"line_max": 73,
"alpha_frac": 0.6410450587,
"autogenerated": false,
"ratio": 2.9344444444444444,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
import os.path as op
from nose.tools import assert_true
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
import numpy as np
from scipy import linalg
import warnings
from mne.cov import regularize, whiten_evoked
from mne import (read_cov, write_cov, Epochs, merge_events,
find_events, compute_raw_data_covariance,
compute_covariance, read_evokeds)
from mne import pick_channels_cov, pick_channels, pick_types
from mne.io import Raw
from mne.utils import _TempDir
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
cov_fname = op.join(base_dir, 'test-cov.fif')
cov_gz_fname = op.join(base_dir, 'test-cov.fif.gz')
cov_km_fname = op.join(base_dir, 'test-km-cov.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
ave_fname = op.join(base_dir, 'test-ave.fif')
erm_cov_fname = op.join(base_dir, 'test_erm-cov.fif')
def test_io_cov():
"""Test IO for noise covariance matrices
"""
tempdir = _TempDir()
cov = read_cov(cov_fname)
cov.save(op.join(tempdir, 'test-cov.fif'))
cov2 = read_cov(op.join(tempdir, 'test-cov.fif'))
assert_array_almost_equal(cov.data, cov2.data)
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
cov['bads'] = ['EEG 039']
cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
assert_true(cov_sel['dim'] == (len(cov['data']) - len(cov['bads'])))
assert_true(cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim']))
cov_sel.save(op.join(tempdir, 'test-cov.fif'))
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(op.join(tempdir, 'test-cov.fif.gz'))
cov2 = read_cov(op.join(tempdir, 'test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
cov_badname = op.join(tempdir, 'test-bad-name.fif.gz')
write_cov(cov_badname, cov)
read_cov(cov_badname)
assert_true(len(w) == 2)
def test_cov_estimation_on_raw_segment():
"""Test estimation from raw on continuous recordings (typically empty room)
"""
tempdir = _TempDir()
raw = Raw(raw_fname, preload=False)
cov = compute_raw_data_covariance(raw)
cov_mne = read_cov(erm_cov_fname)
assert_true(cov_mne.ch_names == cov.ch_names)
assert_true(linalg.norm(cov.data - cov_mne.data, ord='fro')
/ linalg.norm(cov.data, ord='fro') < 1e-4)
# test IO when computation done in Python
cov.save(op.join(tempdir, 'test-cov.fif')) # test saving
cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
assert_true(cov_read.ch_names == cov.ch_names)
assert_true(cov_read.nfree == cov.nfree)
assert_array_almost_equal(cov.data, cov_read.data)
# test with a subset of channels
picks = pick_channels(raw.ch_names, include=raw.ch_names[:5])
cov = compute_raw_data_covariance(raw, picks=picks)
assert_true(cov_mne.ch_names[:5] == cov.ch_names)
assert_true(linalg.norm(cov.data - cov_mne.data[picks][:, picks],
ord='fro') / linalg.norm(cov.data, ord='fro') < 1e-4)
# make sure we get a warning with too short a segment
raw_2 = raw.crop(0, 1)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
cov = compute_raw_data_covariance(raw_2)
assert_true(len(w) == 1)
def test_cov_estimation_with_triggers():
"""Test estimation from raw with triggers
"""
tempdir = _TempDir()
raw = Raw(raw_fname, preload=False)
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True,
reject=reject, preload=True)
cov = compute_covariance(epochs, keep_sample_mean=True)
cov_mne = read_cov(cov_km_fname)
assert_true(cov_mne.ch_names == cov.ch_names)
assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro')
/ linalg.norm(cov.data, ord='fro')) < 0.005)
# Test with tmin and tmax (different but not too much)
cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
assert_true(np.all(cov.data != cov_tmin_tmax.data))
assert_true((linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro')
/ linalg.norm(cov_tmin_tmax.data, ord='fro')) < 0.05)
# cov using a list of epochs and keep_sample_mean=True
epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject)
for ev_id in event_ids]
cov2 = compute_covariance(epochs, keep_sample_mean=True)
assert_array_almost_equal(cov.data, cov2.data)
assert_true(cov.ch_names == cov2.ch_names)
# cov with keep_sample_mean=False using a list of epochs
cov = compute_covariance(epochs, keep_sample_mean=False)
cov_mne = read_cov(cov_fname)
assert_true(cov_mne.ch_names == cov.ch_names)
assert_true((linalg.norm(cov.data - cov_mne.data, ord='fro')
/ linalg.norm(cov.data, ord='fro')) < 0.005)
# test IO when computation done in Python
cov.save(op.join(tempdir, 'test-cov.fif')) # test saving
cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
assert_true(cov_read.ch_names == cov.ch_names)
assert_true(cov_read.nfree == cov.nfree)
assert_true((linalg.norm(cov.data - cov_read.data, ord='fro')
/ linalg.norm(cov.data, ord='fro')) < 1e-5)
# cov with list of epochs with different projectors
epochs = [Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject),
Epochs(raw, events[:4], event_ids[0], tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=False, reject=reject)]
# these should fail
assert_raises(ValueError, compute_covariance, epochs)
assert_raises(ValueError, compute_covariance, epochs, projs=None)
# these should work, but won't be equal to above
with warnings.catch_warnings(record=True) as w: # too few samples warning
warnings.simplefilter('always')
cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
cov = compute_covariance(epochs, projs=[])
assert_true(len(w) == 2)
# test new dict support
epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject)
compute_covariance(epochs)
def test_arithmetic_cov():
"""Test arithmetic with noise covariance matrices
"""
cov = read_cov(cov_fname)
cov_sum = cov + cov
assert_array_almost_equal(2 * cov.nfree, cov_sum.nfree)
assert_array_almost_equal(2 * cov.data, cov_sum.data)
assert_true(cov.ch_names == cov_sum.ch_names)
cov += cov
assert_array_almost_equal(cov_sum.nfree, cov.nfree)
assert_array_almost_equal(cov_sum.data, cov.data)
assert_true(cov_sum.ch_names == cov.ch_names)
def test_regularize_cov():
"""Test cov regularization
"""
raw = Raw(raw_fname, preload=False)
raw.info['bads'].append(raw.ch_names[0]) # test with bad channels
noise_cov = read_cov(cov_fname)
# Regularize noise cov
reg_noise_cov = regularize(noise_cov, raw.info,
mag=0.1, grad=0.1, eeg=0.1, proj=True,
exclude='bads')
assert_true(noise_cov['dim'] == reg_noise_cov['dim'])
assert_true(noise_cov['data'].shape == reg_noise_cov['data'].shape)
assert_true(np.mean(noise_cov['data'] < reg_noise_cov['data']) < 0.08)
def test_evoked_whiten():
"""Test whitening of evoked data"""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
cov = read_cov(cov_fname)
###########################################################################
# Show result
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
noise_cov = regularize(cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1,
exclude='bads')
evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True)
whiten_baseline_data = evoked_white.data[picks][:, evoked.times < 0]
mean_baseline = np.mean(np.abs(whiten_baseline_data), axis=1)
assert_true(np.all(mean_baseline < 1.))
assert_true(np.all(mean_baseline > 0.2))
| {
"repo_name": "effigies/mne-python",
"path": "mne/tests/test_cov.py",
"copies": "1",
"size": "9068",
"license": "bsd-3-clause",
"hash": 2355721487476759000,
"line_mean": 40.0316742081,
"line_max": 79,
"alpha_frac": 0.6301279224,
"autogenerated": false,
"ratio": 3.0583473861720067,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41884753085720067,
"avg_score": null,
"num_lines": null
} |
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_almost_equal)
from nose.tools import assert_true, assert_raises
import warnings
from mne.datasets import testing
from mne import read_forward_solution
from mne.simulation import simulate_sparse_stc, simulate_evoked
from mne import read_cov
from mne.io import Raw
from mne import pick_types_forward, read_evokeds
from mne.utils import run_tests_if_main
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
fwd_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_raw.fif')
ave_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
cov_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-cov.fif')
@testing.requires_testing_data
def test_simulate_evoked():
""" Test simulation of evoked data """
raw = Raw(raw_fname)
fwd = read_forward_solution(fwd_fname, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
cov = read_cov(cov_fname)
evoked_template = read_evokeds(ave_fname, condition=0, baseline=None)
evoked_template.pick_types(meg=True, eeg=True, exclude=raw.info['bads'])
snr = 6 # dB
tmin = -0.1
sfreq = 1000. # Hz
tstep = 1. / sfreq
n_samples = 600
times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
# Generate times series for 2 dipoles
stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times)
stc._data *= 1e-9
# Generate noisy evoked data
iir_filter = [1, -0.9]
evoked = simulate_evoked(fwd, stc, evoked_template.info, cov, snr,
tmin=0.0, tmax=0.2, iir_filter=iir_filter)
assert_array_almost_equal(evoked.times, stc.times)
assert_true(len(evoked.data) == len(fwd['sol']['data']))
# make a vertex that doesn't exist in fwd, should throw error
stc_bad = stc.copy()
mv = np.max(fwd['src'][0]['vertno'][fwd['src'][0]['inuse']])
stc_bad.vertices[0][0] = mv + 1
assert_raises(RuntimeError, simulate_evoked, fwd, stc_bad,
evoked_template.info, cov, snr, tmin=0.0, tmax=0.2)
evoked_1 = simulate_evoked(fwd, stc, evoked_template.info, cov, np.inf,
tmin=0.0, tmax=0.2)
evoked_2 = simulate_evoked(fwd, stc, evoked_template.info, cov, np.inf,
tmin=0.0, tmax=0.2)
assert_array_equal(evoked_1.data, evoked_2.data)
# test snr definition in dB
evoked_noise = simulate_evoked(fwd, stc, evoked_template.info, cov,
snr=snr, tmin=None, tmax=None,
iir_filter=None)
evoked_clean = simulate_evoked(fwd, stc, evoked_template.info, cov,
snr=np.inf, tmin=None, tmax=None,
iir_filter=None)
noise = evoked_noise.data - evoked_clean.data
empirical_snr = 10 * np.log10(np.mean((evoked_clean.data ** 2).ravel()) /
np.mean((noise ** 2).ravel()))
assert_almost_equal(snr, empirical_snr, decimal=5)
cov['names'] = cov.ch_names[:-2] # Error channels are different.
assert_raises(ValueError, simulate_evoked, fwd, stc, evoked_template.info,
cov, snr=3., tmin=None, tmax=None, iir_filter=None)
run_tests_if_main()
| {
"repo_name": "alexandrebarachant/mne-python",
"path": "mne/simulation/tests/test_evoked.py",
"copies": "2",
"size": "3750",
"license": "bsd-3-clause",
"hash": 6148970045337752000,
"line_mean": 38.4736842105,
"line_max": 79,
"alpha_frac": 0.6037333333,
"autogenerated": false,
"ratio": 3.125,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47287333333,
"avg_score": null,
"num_lines": null
} |
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
from nose.tools import assert_true, assert_raises
import warnings
from mne.datasets import testing
from mne import read_forward_solution
from mne.simulation import simulate_sparse_stc, simulate_evoked
from mne import read_cov
from mne.io import read_raw_fif
from mne import pick_types_forward, read_evokeds
from mne.cov import regularize
from mne.utils import run_tests_if_main
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
fwd_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_raw.fif')
ave_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
cov_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-cov.fif')
@testing.requires_testing_data
def test_simulate_evoked():
"""Test simulation of evoked data."""
raw = read_raw_fif(raw_fname)
fwd = read_forward_solution(fwd_fname, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
cov = read_cov(cov_fname)
evoked_template = read_evokeds(ave_fname, condition=0, baseline=None)
evoked_template.pick_types(meg=True, eeg=True, exclude=raw.info['bads'])
cov = regularize(cov, evoked_template.info)
nave = evoked_template.nave
tmin = -0.1
sfreq = 1000. # Hz
tstep = 1. / sfreq
n_samples = 600
times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
# Generate times series for 2 dipoles
stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times,
random_state=42)
# Generate noisy evoked data
iir_filter = [1, -0.9]
evoked = simulate_evoked(fwd, stc, evoked_template.info, cov,
iir_filter=iir_filter, nave=nave)
assert_array_almost_equal(evoked.times, stc.times)
assert_true(len(evoked.data) == len(fwd['sol']['data']))
assert_equal(evoked.nave, nave)
# make a vertex that doesn't exist in fwd, should throw error
stc_bad = stc.copy()
mv = np.max(fwd['src'][0]['vertno'][fwd['src'][0]['inuse']])
stc_bad.vertices[0][0] = mv + 1
assert_raises(RuntimeError, simulate_evoked, fwd, stc_bad,
evoked_template.info, cov)
evoked_1 = simulate_evoked(fwd, stc, evoked_template.info, cov,
nave=np.inf)
evoked_2 = simulate_evoked(fwd, stc, evoked_template.info, cov,
nave=np.inf)
assert_array_equal(evoked_1.data, evoked_2.data)
# Test the equivalence snr to nave
with warnings.catch_warnings(record=True): # deprecation
evoked = simulate_evoked(fwd, stc, evoked_template.info, cov,
snr=6, random_state=42)
assert_allclose(np.linalg.norm(evoked.data, ord='fro'),
0.00078346820226502716)
cov['names'] = cov.ch_names[:-2] # Error channels are different.
assert_raises(ValueError, simulate_evoked, fwd, stc, evoked_template.info,
cov, nave=nave, iir_filter=None)
run_tests_if_main()
| {
"repo_name": "jaeilepp/mne-python",
"path": "mne/simulation/tests/test_evoked.py",
"copies": "1",
"size": "3512",
"license": "bsd-3-clause",
"hash": -6713431779630030000,
"line_mean": 36.7634408602,
"line_max": 79,
"alpha_frac": 0.6272779043,
"autogenerated": false,
"ratio": 3.219065077910174,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9346342982210174,
"avg_score": 0,
"num_lines": 93
} |
import os.path as op
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_true, assert_raises
import warnings
from mne.datasets import testing
from mne import read_label, read_forward_solution
from mne.time_frequency import morlet
from mne.simulation import generate_sparse_stc, generate_evoked
from mne import read_cov
from mne.io import Raw
from mne import pick_types_forward, read_evokeds
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
fwd_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_raw.fif')
ave_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
cov_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-cov.fif')
@testing.requires_testing_data
def test_simulate_evoked():
""" Test simulation of evoked data """
raw = Raw(raw_fname)
fwd = read_forward_solution(fwd_fname, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
cov = read_cov(cov_fname)
label_names = ['Aud-lh', 'Aud-rh']
labels = [read_label(op.join(data_path, 'MEG', 'sample', 'labels',
'%s.label' % label)) for label in label_names]
evoked_template = read_evokeds(ave_fname, condition=0, baseline=None)
evoked_template.pick_types(meg=True, eeg=True, exclude=raw.info['bads'])
snr = 6 # dB
tmin = -0.1
sfreq = 1000. # Hz
tstep = 1. / sfreq
n_samples = 600
times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)
# Generate times series from 2 Morlet wavelets
stc_data = np.zeros((len(labels), len(times)))
Ws = morlet(sfreq, [3, 10], n_cycles=[1, 1.5])
stc_data[0][:len(Ws[0])] = np.real(Ws[0])
stc_data[1][:len(Ws[1])] = np.real(Ws[1])
stc_data *= 100 * 1e-9 # use nAm as unit
# time translation
stc_data[1] = np.roll(stc_data[1], 80)
stc = generate_sparse_stc(fwd['src'], labels, stc_data, tmin, tstep,
random_state=0)
# Generate noisy evoked data
iir_filter = [1, -0.9]
with warnings.catch_warnings(record=True):
warnings.simplefilter('always') # positive semidefinite warning
evoked = generate_evoked(fwd, stc, evoked_template, cov, snr,
tmin=0.0, tmax=0.2, iir_filter=iir_filter)
assert_array_almost_equal(evoked.times, stc.times)
assert_true(len(evoked.data) == len(fwd['sol']['data']))
# make a vertex that doesn't exist in fwd, should throw error
stc_bad = stc.copy()
mv = np.max(fwd['src'][0]['vertno'][fwd['src'][0]['inuse']])
stc_bad.vertices[0][0] = mv + 1
assert_raises(RuntimeError, generate_evoked, fwd, stc_bad,
evoked_template, cov, snr, tmin=0.0, tmax=0.2)
| {
"repo_name": "matthew-tucker/mne-python",
"path": "mne/simulation/tests/test_evoked.py",
"copies": "7",
"size": "3120",
"license": "bsd-3-clause",
"hash": 7096174052178008000,
"line_mean": 37.5185185185,
"line_max": 79,
"alpha_frac": 0.6205128205,
"autogenerated": false,
"ratio": 3.0439024390243903,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 81
} |
import warnings
import os.path as op
from nose.tools import assert_true
from mne import io, Epochs, read_events, pick_types
from mne.utils import requires_sklearn
from mne.decoding import time_generalization
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
@requires_sklearn
def test_time_generalization():
"""Test time generalization decoding
"""
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg='mag', stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
decim = 30
with warnings.catch_warnings(record=True):
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, decim=decim)
epochs_list = [epochs[k] for k in event_id.keys()]
scores = time_generalization(epochs_list, cv=2, random_state=42)
n_times = len(epochs.times)
assert_true(scores.shape == (n_times, n_times))
assert_true(scores.max() <= 1.)
assert_true(scores.min() >= 0.)
| {
"repo_name": "effigies/mne-python",
"path": "mne/decoding/tests/test_time_gen.py",
"copies": "2",
"size": "1373",
"license": "bsd-3-clause",
"hash": -482678710559433600,
"line_mean": 31.6904761905,
"line_max": 75,
"alpha_frac": 0.6394756009,
"autogenerated": false,
"ratio": 3.0715883668903805,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9711063967790381,
"avg_score": 0,
"num_lines": 42
} |
import warnings
import os.path as op
from nose.tools import assert_true
from mne import io, Epochs, read_events, pick_types
from mne.utils import _TempDir, requires_sklearn
from mne.decoding import time_generalization
tempdir = _TempDir()
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
@requires_sklearn
def test_time_generalization():
"""Test time generalization decoding
"""
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg='mag', stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
decim = 30
with warnings.catch_warnings(record=True) as w:
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, decim=decim)
epochs_list = [epochs[k] for k in event_id.keys()]
scores = time_generalization(epochs_list, cv=2, random_state=42)
n_times = len(epochs.times)
assert_true(scores.shape == (n_times, n_times))
assert_true(scores.max() <= 1.)
assert_true(scores.min() >= 0.)
| {
"repo_name": "jaeilepp/eggie",
"path": "mne/decoding/tests/test_time_gen.py",
"copies": "2",
"size": "1415",
"license": "bsd-2-clause",
"hash": -7174546397776386000,
"line_mean": 31.1590909091,
"line_max": 75,
"alpha_frac": 0.6374558304,
"autogenerated": false,
"ratio": 3.0760869565217392,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.971147667121926,
"avg_score": 0.00041322314049586776,
"num_lines": 44
} |
import numpy as np
from ..utils import logger, verbose
@verbose
def is_equal(first, second, verbose=None):
"""Check if 2 python structures are the same.
Designed to handle dict, list, np.ndarray etc.
"""
all_equal = True
# Check all keys in first dict
if type(first) != type(second):
all_equal = False
if isinstance(first, dict):
for key in first.keys():
if (key not in second):
logger.info("Missing key %s in %s" % (key, second))
all_equal = False
else:
if not is_equal(first[key], second[key]):
all_equal = False
elif isinstance(first, np.ndarray):
if not np.allclose(first, second):
all_equal = False
elif isinstance(first, list):
for a, b in zip(first, second):
if not is_equal(a, b):
logger.info('%s and\n%s are different' % (a, b))
all_equal = False
else:
if first != second:
logger.info('%s and\n%s are different' % (first, second))
all_equal = False
return all_equal
| {
"repo_name": "adykstra/mne-python",
"path": "mne/io/diff.py",
"copies": "10",
"size": "1231",
"license": "bsd-3-clause",
"hash": -3715550367287711000,
"line_mean": 29.775,
"line_max": 70,
"alpha_frac": 0.5613322502,
"autogenerated": false,
"ratio": 3.8348909657320873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9396223215932087,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from ..utils import logger, verbose
@verbose
def is_equal(first, second, verbose=None):
""" Says if 2 python structures are the same. Designed to
handle dict, list, np.ndarray etc.
"""
all_equal = True
# Check all keys in first dict
if type(first) != type(second):
all_equal = False
if isinstance(first, dict):
for key in first.keys():
if (key not in second):
logger.info("Missing key %s in %s" % (key, second))
all_equal = False
else:
if not is_equal(first[key], second[key]):
all_equal = False
elif isinstance(first, np.ndarray):
if not np.allclose(first, second):
all_equal = False
elif isinstance(first, list):
for a, b in zip(first, second):
if not is_equal(a, b):
logger.info('%s and\n%s are different' % (a, b))
all_equal = False
else:
if first != second:
logger.info('%s and\n%s are different' % (first, second))
all_equal = False
return all_equal
| {
"repo_name": "Odingod/mne-python",
"path": "mne/io/diff.py",
"copies": "25",
"size": "1230",
"license": "bsd-3-clause",
"hash": 7805532882965248000,
"line_mean": 30.5384615385,
"line_max": 70,
"alpha_frac": 0.5609756098,
"autogenerated": false,
"ratio": 3.8198757763975157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from copy import deepcopy
import numpy as np
from scipy import linalg, signal
from ..source_estimate import SourceEstimate
from ..minimum_norm.inverse import combine_xyz, _prepare_forward
from ..forward import compute_orient_prior, is_fixed_orient, _to_fixed_ori
from ..io.pick import pick_channels_evoked
from .mxne_optim import mixed_norm_solver, norm_l2inf, tf_mixed_norm_solver
from ..utils import logger, verbose
@verbose
def _prepare_gain(gain, forward, whitener, depth, loose, weights, weights_min,
verbose=None):
logger.info('Whitening lead field matrix.')
gain = np.dot(whitener, gain)
# Handle depth prior scaling
source_weighting = np.sum(gain ** 2, axis=0) ** depth
# apply loose orientations
orient_prior = compute_orient_prior(forward, loose)
source_weighting /= orient_prior
source_weighting = np.sqrt(source_weighting)
gain /= source_weighting[None, :]
# Handle weights
mask = None
if weights is not None:
if isinstance(weights, SourceEstimate):
# weights = np.sqrt(np.sum(weights.data ** 2, axis=1))
weights = np.max(np.abs(weights.data), axis=1)
weights_max = np.max(weights)
if weights_min > weights_max:
raise ValueError('weights_min > weights_max (%s > %s)' %
(weights_min, weights_max))
weights_min = weights_min / weights_max
weights = weights / weights_max
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T)
if len(weights) != gain.shape[1]:
raise ValueError('weights do not have the correct dimension '
' (%d != %d)' % (len(weights), gain.shape[1]))
nz_idx = np.where(weights != 0.0)[0]
source_weighting[nz_idx] /= weights[nz_idx]
gain *= weights[None, :]
if weights_min is not None:
mask = (weights > weights_min)
gain = gain[:, mask]
n_sources = np.sum(mask) / n_dip_per_pos
logger.info("Reducing source space to %d sources" % n_sources)
return gain, source_weighting, mask
@verbose
def _make_sparse_stc(X, active_set, forward, tmin, tstep,
active_is_idx=False, verbose=None):
if not is_fixed_orient(forward):
logger.info('combining the current components...')
X = combine_xyz(X)
if not active_is_idx:
active_idx = np.where(active_set)[0]
else:
active_idx = active_set
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
if n_dip_per_pos > 1:
active_idx = np.unique(active_idx // n_dip_per_pos)
src = forward['src']
n_lh_points = len(src[0]['vertno'])
lh_vertno = src[0]['vertno'][active_idx[active_idx < n_lh_points]]
rh_vertno = src[1]['vertno'][active_idx[active_idx >= n_lh_points]
- n_lh_points]
vertices = [lh_vertno, rh_vertno]
stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)
return stc
@verbose
def mixed_norm(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,
maxit=3000, tol=1e-4, active_set_size=10, pca=True,
debias=True, time_pca=True, weights=None, weights_min=None,
solver='auto', return_residual=False, verbose=None):
"""Mixed-norm estimate (MxNE)
Compute L1/L2 mixed-norm solution on evoked data.
References:
Gramfort A., Kowalski M. and Hamalainen, M,
Mixed-norm estimates for the M/EEG inverse problem using accelerated
gradient methods, Physics in Medicine and Biology, 2012
http://dx.doi.org/10.1088/0031-9155/57/7/1937
Parameters
----------
evoked : instance of Evoked or list of instances of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
alpha : float
Regularization parameter.
loose : float in [0, 1]
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 or None then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
depth: None | float in [0, 1]
Depth weighting coefficients. If None, no depth weighting is performed.
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
active_set_size : int | None
Size of active set increment. If None, no active set strategy is used.
pca : bool
If True the rank of the data is reduced to true dimension.
debias : bool
Remove coefficient amplitude bias due to L1 penalty.
time_pca : bool or int
If True the rank of the concatenated epochs is reduced to
its true dimension. If is 'int' the rank is limited to this value.
weights : None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None or
1d array of length n_sources or a SourceEstimate e.g. obtained
with wMNE or dSPM or fMRI.
weights_min : float
Do not consider in the estimation sources for which weights
is less than weights_min.
solver : 'prox' | 'cd' | 'auto'
The algorithm to use for the optimization. prox stands for
proximal interations using the FISTA algorithm while cd uses
coordinate descent. cd is only available for fixed orientation.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
return_residual : bool
If True, the residual is returned as an Evoked instance.
Returns
-------
stc : SourceEstimate | list of SourceEstimate
Source time courses for each evoked data passed as input.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
"""
if not isinstance(evoked, list):
evoked = [evoked]
all_ch_names = evoked[0].ch_names
if not all(all_ch_names == evoked[i].ch_names
for i in range(1, len(evoked))):
raise Exception('All the datasets must have the same good channels.')
# put the forward solution in fixed orientation if it's not already
if loose is None and not is_fixed_orient(forward):
forward = deepcopy(forward)
_to_fixed_ori(forward)
info = evoked[0].info
gain_info, gain, _, whitener, _ = _prepare_forward(forward, info,
noise_cov, pca)
# Whiten lead field.
gain, source_weighting, mask = _prepare_gain(gain, forward, whitener,
depth, loose, weights,
weights_min)
sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
M = np.concatenate([e.data[sel] for e in evoked], axis=1)
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
if time_pca:
U, s, Vh = linalg.svd(M, full_matrices=False)
if not isinstance(time_pca, bool) and isinstance(time_pca, int):
U = U[:, :time_pca]
s = s[:time_pca]
Vh = Vh[:time_pca]
M = U * s
# Scaling to make setting of alpha easy
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
alpha_max *= 0.01
gain /= alpha_max
source_weighting *= alpha_max
X, active_set, E = mixed_norm_solver(M, gain, alpha,
maxit=maxit, tol=tol,
active_set_size=active_set_size,
debias=debias,
n_orient=n_dip_per_pos,
solver=solver)
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=np.bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
if time_pca:
X = np.dot(X, Vh)
if active_set.sum() == 0:
raise Exception("No active dipoles found. alpha is too big.")
# Reapply weights to have correct unit
X /= source_weighting[active_set][:, None]
stcs = list()
residual = list()
cnt = 0
for e in evoked:
tmin = e.times[0]
tstep = 1.0 / e.info['sfreq']
Xe = X[:, cnt:(cnt + len(e.times))]
stc = _make_sparse_stc(Xe, active_set, forward, tmin, tstep)
stcs.append(stc)
cnt += len(e.times)
if return_residual:
sel = [forward['sol']['row_names'].index(c)
for c in gain_info['ch_names']]
r = deepcopy(e)
r = pick_channels_evoked(r, include=gain_info['ch_names'])
r.data -= np.dot(forward['sol']['data'][sel, :][:, active_set], Xe)
residual.append(r)
logger.info('[done]')
if len(stcs) == 1:
out = stcs[0]
if return_residual:
residual = residual[0]
else:
out = stcs
if return_residual:
out = out, residual
return out
def _window_evoked(evoked, size):
"""Window evoked (size in seconds)"""
if isinstance(size, (float, int)):
lsize = rsize = float(size)
else:
lsize, rsize = size
evoked = deepcopy(evoked)
sfreq = float(evoked.info['sfreq'])
lsize = int(lsize * sfreq)
rsize = int(rsize * sfreq)
lhann = signal.hann(lsize * 2)
rhann = signal.hann(rsize * 2)
window = np.r_[lhann[:lsize],
np.ones(len(evoked.times) - lsize - rsize),
rhann[-rsize:]]
evoked.data *= window[None, :]
return evoked
@verbose
def tf_mixed_norm(evoked, forward, noise_cov, alpha_space, alpha_time,
loose=0.2, depth=0.8, maxit=3000, tol=1e-4,
weights=None, weights_min=None, pca=True, debias=True,
wsize=64, tstep=4, window=0.02,
return_residual=False, verbose=None):
"""Time-Frequency Mixed-norm estimate (TF-MxNE)
Compute L1/L2 + L1 mixed-norm solution on time frequency
dictionary. Works with evoked data.
References:
A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations
Neuroimage, Volume 70, 15 April 2013, Pages 410-422, ISSN 1053-8119,
DOI: 10.1016/j.neuroimage.2012.12.051.
A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries
Proceedings Information Processing in Medical Imaging
Lecture Notes in Computer Science, 2011, Volume 6801/2011,
600-611, DOI: 10.1007/978-3-642-22092-0_49
http://dx.doi.org/10.1007/978-3-642-22092-0_49
Parameters
----------
evoked : instance of Evoked
Evoked data to invert.
forward : dict
Forward operator.
noise_cov : instance of Covariance
Noise covariance to compute whitener.
alpha_space : float
Regularization parameter for spatial sparsity. If larger than 100,
then no source will be active.
alpha_time : float
Regularization parameter for temporal sparsity. It set to 0,
no temporal regularization is applied. It this case, TF-MxNE is
equivalent to MxNE with L21 norm.
loose : float in [0, 1]
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 or None then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
depth: None | float in [0, 1]
Depth weighting coefficients. If None, no depth weighting is performed.
maxit : int
Maximum number of iterations.
tol : float
Tolerance parameter.
weights: None | array | SourceEstimate
Weight for penalty in mixed_norm. Can be None or
1d array of length n_sources or a SourceEstimate e.g. obtained
with wMNE or dSPM or fMRI.
weights_min: float
Do not consider in the estimation sources for which weights
is less than weights_min.
pca: bool
If True the rank of the data is reduced to true dimension.
wsize: int
Length of the STFT window in samples (must be a multiple of 4).
tstep: int
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
window : float or (float, float)
Length of time window used to take care of edge artifacts in seconds.
It can be one float or float if the values are different for left
and right window length.
debias: bool
Remove coefficient amplitude bias due to L1 penalty.
return_residual : bool
If True, the residual is returned as an Evoked instance.
verbose: bool
Verbose output or not.
Returns
-------
stc : instance of SourceEstimate
Source time courses.
residual : instance of Evoked
The residual a.k.a. data not explained by the sources.
Only returned if return_residual is True.
"""
all_ch_names = evoked.ch_names
info = evoked.info
# put the forward solution in fixed orientation if it's not already
if loose is None and not is_fixed_orient(forward):
forward = deepcopy(forward)
_to_fixed_ori(forward)
gain_info, gain, _, whitener, _ = _prepare_forward(forward,
info, noise_cov, pca)
# Whiten lead field.
gain, source_weighting, mask = _prepare_gain(gain, forward, whitener,
depth, loose, weights, weights_min)
if window is not None:
evoked = _window_evoked(evoked, window)
sel = [all_ch_names.index(name) for name in gain_info["ch_names"]]
M = evoked.data[sel]
# Whiten data
logger.info('Whitening data matrix.')
M = np.dot(whitener, M)
# Scaling to make setting of alpha easy
n_dip_per_pos = 1 if is_fixed_orient(forward) else 3
alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)
alpha_max *= 0.01
gain /= alpha_max
source_weighting *= alpha_max
X, active_set, E = tf_mixed_norm_solver(M, gain,
alpha_space, alpha_time,
wsize=wsize, tstep=tstep,
maxit=maxit, tol=tol,
verbose=verbose,
n_orient=n_dip_per_pos,
debias=debias)
if active_set.sum() == 0:
raise Exception("No active dipoles found. alpha is too big.")
if mask is not None:
active_set_tmp = np.zeros(len(mask), dtype=np.bool)
active_set_tmp[mask] = active_set
active_set = active_set_tmp
del active_set_tmp
# Reapply weights to have correct unit
X /= source_weighting[active_set][:, None]
if return_residual:
sel = [forward['sol']['row_names'].index(c)
for c in gain_info['ch_names']]
residual = deepcopy(evoked)
residual = pick_channels_evoked(residual, include=gain_info['ch_names'])
residual.data -= np.dot(forward['sol']['data'][sel, :][:, active_set],
X)
tmin = evoked.times[0]
tstep = 1.0 / info['sfreq']
out = _make_sparse_stc(X, active_set, forward, tmin, tstep)
logger.info('[done]')
if return_residual:
out = out, residual
return out
| {
"repo_name": "jaeilepp/eggie",
"path": "mne/inverse_sparse/mxne_inverse.py",
"copies": "3",
"size": "16230",
"license": "bsd-2-clause",
"hash": -7868014449688966000,
"line_mean": 36.5694444444,
"line_max": 80,
"alpha_frac": 0.5980899569,
"autogenerated": false,
"ratio": 3.7037882245549976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021753019184386955,
"num_lines": 432
} |
import numpy as np
from .utils import logger, verbose
@verbose
def read_dip(fname, verbose=None):
"""Read .dip file from Neuromag/xfit or MNE
Parameters
----------
fname : str
The name of the .dip file.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
time : array, shape=(n_dipoles,)
The time instants at which each dipole was fitted.
pos : array, shape=(n_dipoles, 3)
The dipoles positions in meters
amplitude : array, shape=(n_dipoles,)
The amplitude of the dipoles in nAm
ori : array, shape=(n_dipoles, 3)
The dipolar moments. Amplitude of the moment is in nAm.
gof : array, shape=(n_dipoles,)
The goodness of fit
"""
try:
data = np.loadtxt(fname, comments='%')
except:
data = np.loadtxt(fname, comments='#') # handle 2 types of comments...
if data.ndim == 1:
data = data[None, :]
logger.info("%d dipole(s) found" % len(data))
time = data[:, 0]
pos = 1e-3 * data[:, 2:5] # put data in meters
amplitude = data[:, 5]
ori = data[:, 6:9]
gof = data[:, 9]
return time, pos, amplitude, ori, gof
| {
"repo_name": "effigies/mne-python",
"path": "mne/dipole.py",
"copies": "4",
"size": "1336",
"license": "bsd-3-clause",
"hash": -2855935113749935000,
"line_mean": 28.0434782609,
"line_max": 79,
"alpha_frac": 0.6017964072,
"autogenerated": false,
"ratio": 3.4081632653061225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6009959672506123,
"avg_score": null,
"num_lines": null
} |
import os.path as op
from nose.tools import assert_true, assert_raises
import numpy as np
from numpy.testing import assert_array_almost_equal
from mne import io, Epochs, read_events, pick_types
from mne.decoding.csp import CSP
from mne.utils import requires_sklearn
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# if stop is too small pca may fail in some cases, but we're okay on this file
start, stop = 0, 8
def test_csp():
"""Test Common Spatial Patterns algorithm on epochs
"""
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_components = 3
csp = CSP(n_components=n_components)
csp.fit(epochs_data, epochs.events[:, -1])
y = epochs.events[:, -1]
X = csp.fit_transform(epochs_data, y)
assert_true(csp.filters_.shape == (n_channels, n_channels))
assert_true(csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(csp.fit(epochs_data, y).transform(epochs_data),
X)
# test init exception
assert_raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
assert_raises(ValueError, csp.fit, epochs, y)
assert_raises(ValueError, csp.transform, epochs, y)
csp.n_components = n_components
sources = csp.transform(epochs_data)
assert_true(sources.shape[1] == n_components)
@requires_sklearn
def test_regularized_csp():
"""Test Common Spatial Patterns algorithm using regularized covariance
"""
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_components = 3
reg_cov = [None, 0.05, 'lws', 'oas']
for reg in reg_cov:
csp = CSP(n_components=n_components, reg=reg)
csp.fit(epochs_data, epochs.events[:, -1])
y = epochs.events[:, -1]
X = csp.fit_transform(epochs_data, y)
assert_true(csp.filters_.shape == (n_channels, n_channels))
assert_true(csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(csp.fit(epochs_data, y).
transform(epochs_data), X)
# test init exception
assert_raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
assert_raises(ValueError, csp.fit, epochs, y)
assert_raises(ValueError, csp.transform, epochs, y)
csp.n_components = n_components
sources = csp.transform(epochs_data)
assert_true(sources.shape[1] == n_components)
| {
"repo_name": "aestrivex/mne-python",
"path": "mne/decoding/tests/test_csp.py",
"copies": "6",
"size": "3498",
"license": "bsd-3-clause",
"hash": -6284642169471464000,
"line_mean": 35.8210526316,
"line_max": 78,
"alpha_frac": 0.6309319611,
"autogenerated": false,
"ratio": 3.229916897506925,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6860848858606924,
"avg_score": null,
"num_lines": null
} |
import os.path as op
import pytest
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from mne import io, Epochs, read_events, pick_types
from mne.decoding.csp import CSP, _ajd_pham, SPoC
from mne.utils import requires_sklearn
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# if stop is too small pca may fail in some cases, but we're okay on this file
start, stop = 0, 8
def simulate_data(target, n_trials=100, n_channels=10, random_state=42):
"""Simulate data according to an instantaneous mixin model.
Data are simulated in the statistical source space, where one source is
modulated according to a target variable, before being mixed with a
random mixing matrix.
"""
rs = np.random.RandomState(random_state)
# generate a orthogonal mixin matrix
mixing_mat = np.linalg.svd(rs.randn(n_channels, n_channels))[0]
S = rs.randn(n_trials, n_channels, 50)
S[:, 0] *= np.atleast_2d(np.sqrt(target)).T
S[:, 1:] *= 0.01 # less noise
X = np.dot(mixing_mat, S).transpose((1, 0, 2))
return X, mixing_mat
@pytest.mark.slowtest
def test_csp():
"""Test Common Spatial Patterns algorithm on epochs."""
raw = io.read_raw_fif(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[2:12:3] # subselect channels -> disable proj!
raw.add_proj([], remove_existing=True)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, proj=False)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
y = epochs.events[:, -1]
# Init
pytest.raises(ValueError, CSP, n_components='foo', norm_trace=False)
for reg in ['foo', -0.1, 1.1]:
csp = CSP(reg=reg, norm_trace=False)
pytest.raises(ValueError, csp.fit, epochs_data, epochs.events[:, -1])
for reg in ['oas', 'ledoit_wolf', 0, 0.5, 1.]:
CSP(reg=reg, norm_trace=False)
for cov_est in ['foo', None]:
pytest.raises(ValueError, CSP, cov_est=cov_est, norm_trace=False)
pytest.raises(ValueError, CSP, norm_trace='foo')
for cov_est in ['concat', 'epoch']:
CSP(cov_est=cov_est, norm_trace=False)
n_components = 3
# Fit
for norm_trace in [True, False]:
csp = CSP(n_components=n_components, norm_trace=norm_trace)
csp.fit(epochs_data, epochs.events[:, -1])
assert_equal(len(csp.mean_), n_components)
assert_equal(len(csp.std_), n_components)
# Transform
X = csp.fit_transform(epochs_data, y)
sources = csp.transform(epochs_data)
assert (sources.shape[1] == n_components)
assert (csp.filters_.shape == (n_channels, n_channels))
assert (csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(sources, X)
# Test data exception
pytest.raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
pytest.raises(ValueError, csp.fit, epochs, y)
pytest.raises(ValueError, csp.transform, epochs)
# Test plots
epochs.pick_types(meg='mag')
cmap = ('RdBu', True)
components = np.arange(n_components)
for plot in (csp.plot_patterns, csp.plot_filters):
plot(epochs.info, components=components, res=12, show=False, cmap=cmap)
# Test with more than 2 classes
epochs = Epochs(raw, events, tmin=tmin, tmax=tmax, picks=picks,
event_id=dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4),
baseline=(None, 0), proj=False, preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_channels = epochs_data.shape[1]
for cov_est in ['concat', 'epoch']:
csp = CSP(n_components=n_components, cov_est=cov_est, norm_trace=False)
csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data)
assert_equal(len(csp._classes), 4)
assert_array_equal(csp.filters_.shape, [n_channels, n_channels])
assert_array_equal(csp.patterns_.shape, [n_channels, n_channels])
# Test average power transform
n_components = 2
assert (csp.transform_into == 'average_power')
feature_shape = [len(epochs_data), n_components]
X_trans = dict()
for log in (None, True, False):
csp = CSP(n_components=n_components, log=log, norm_trace=False)
assert (csp.log is log)
Xt = csp.fit_transform(epochs_data, epochs.events[:, 2])
assert_array_equal(Xt.shape, feature_shape)
X_trans[str(log)] = Xt
# log=None => log=True
assert_array_almost_equal(X_trans['None'], X_trans['True'])
# Different normalization return different transform
assert (np.sum((X_trans['True'] - X_trans['False']) ** 2) > 1.)
# Check wrong inputs
pytest.raises(ValueError, CSP, transform_into='average_power', log='foo')
# Test csp space transform
csp = CSP(transform_into='csp_space', norm_trace=False)
assert (csp.transform_into == 'csp_space')
for log in ('foo', True, False):
pytest.raises(ValueError, CSP, transform_into='csp_space', log=log,
norm_trace=False)
n_components = 2
csp = CSP(n_components=n_components, transform_into='csp_space',
norm_trace=False)
Xt = csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data)
feature_shape = [len(epochs_data), n_components, epochs_data.shape[2]]
assert_array_equal(Xt.shape, feature_shape)
# Check mixing matrix on simulated data
y = np.array([100] * 50 + [1] * 50)
X, A = simulate_data(y)
for cov_est in ['concat', 'epoch']:
# fit csp
csp = CSP(n_components=1, cov_est=cov_est, norm_trace=False)
csp.fit(X, y)
# check the first pattern match the mixing matrix
# the sign might change
corr = np.abs(np.corrcoef(csp.patterns_[0, :].T, A[:, 0])[0, 1])
assert np.abs(corr) > 0.99
# check output
out = csp.transform(X)
corr = np.abs(np.corrcoef(out[:, 0], y)[0, 1])
assert np.abs(corr) > 0.95
@requires_sklearn
def test_regularized_csp():
"""Test Common Spatial Patterns algorithm using regularized covariance."""
raw = io.read_raw_fif(raw_fname)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_components = 3
reg_cov = [None, 0.05, 'ledoit_wolf', 'oas']
for reg in reg_cov:
csp = CSP(n_components=n_components, reg=reg, norm_trace=False,
rank=None)
csp.fit(epochs_data, epochs.events[:, -1])
y = epochs.events[:, -1]
X = csp.fit_transform(epochs_data, y)
assert (csp.filters_.shape == (n_channels, n_channels))
assert (csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(csp.fit(epochs_data, y).
transform(epochs_data), X)
# test init exception
pytest.raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
pytest.raises(ValueError, csp.fit, epochs, y)
pytest.raises(ValueError, csp.transform, epochs)
csp.n_components = n_components
sources = csp.transform(epochs_data)
assert (sources.shape[1] == n_components)
@requires_sklearn
def test_csp_pipeline():
"""Test if CSP works in a pipeline."""
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
csp = CSP(reg=1, norm_trace=False)
svc = SVC()
pipe = Pipeline([("CSP", csp), ("SVC", svc)])
pipe.set_params(CSP__reg=0.2)
assert (pipe.get_params()["CSP__reg"] == 0.2)
def test_ajd():
"""Test approximate joint diagonalization."""
# The implementation shuold obtain the same
# results as the Matlab implementation by Pham Dinh-Tuan.
# Generate a set of cavariances matrices for test purpose
n_times, n_channels = 10, 3
seed = np.random.RandomState(0)
diags = 2.0 + 0.1 * seed.randn(n_times, n_channels)
A = 2 * seed.rand(n_channels, n_channels) - 1
A /= np.atleast_2d(np.sqrt(np.sum(A ** 2, 1))).T
covmats = np.empty((n_times, n_channels, n_channels))
for i in range(n_times):
covmats[i] = np.dot(np.dot(A, np.diag(diags[i])), A.T)
V, D = _ajd_pham(covmats)
# Results obtained with original matlab implementation
V_matlab = [[-3.507280775058041, -5.498189967306344, 7.720624541198574],
[0.694689013234610, 0.775690358505945, -1.162043086446043],
[-0.592603135588066, -0.598996925696260, 1.009550086271192]]
assert_array_almost_equal(V, V_matlab)
def test_spoc():
"""Test SPoC."""
X = np.random.randn(10, 10, 20)
y = np.random.randn(10)
spoc = SPoC(n_components=4)
spoc.fit(X, y)
Xt = spoc.transform(X)
assert_array_equal(Xt.shape, [10, 4])
spoc = SPoC(n_components=4, transform_into='csp_space')
spoc.fit(X, y)
Xt = spoc.transform(X)
assert_array_equal(Xt.shape, [10, 4, 20])
assert_array_equal(spoc.filters_.shape, [10, 10])
assert_array_equal(spoc.patterns_.shape, [10, 10])
# check y
pytest.raises(ValueError, spoc.fit, X, y * 0)
# Check that doesn't take CSP-spcific input
pytest.raises(TypeError, SPoC, cov_est='epoch')
# Check mixing matrix on simulated data
rs = np.random.RandomState(42)
y = rs.rand(100) * 50 + 1
X, A = simulate_data(y)
# fit spoc
spoc = SPoC(n_components=1)
spoc.fit(X, y)
# check the first patterns match the mixing matrix
corr = np.abs(np.corrcoef(spoc.patterns_[0, :].T, A[:, 0])[0, 1])
assert np.abs(corr) > 0.99
# check output
out = spoc.transform(X)
corr = np.abs(np.corrcoef(out[:, 0], y)[0, 1])
assert np.abs(corr) > 0.85
| {
"repo_name": "adykstra/mne-python",
"path": "mne/decoding/tests/test_csp.py",
"copies": "3",
"size": "10603",
"license": "bsd-3-clause",
"hash": -7507074701161823000,
"line_mean": 36.5992907801,
"line_max": 79,
"alpha_frac": 0.6247288503,
"autogenerated": false,
"ratio": 3.0993861444022217,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5224114994702223,
"avg_score": null,
"num_lines": null
} |
import os.path as op
from nose.tools import assert_true, assert_raises, assert_equal
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mne import io, Epochs, read_events, pick_types
from mne.decoding.csp import CSP, _ajd_pham
from mne.utils import requires_sklearn, slow_test
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# if stop is too small pca may fail in some cases, but we're okay on this file
start, stop = 0, 8
@slow_test
def test_csp():
"""Test Common Spatial Patterns algorithm on epochs
"""
raw = io.read_raw_fif(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[2:12:3] # subselect channels -> disable proj!
raw.add_proj([], remove_existing=True)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, proj=False)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
y = epochs.events[:, -1]
# Init
assert_raises(ValueError, CSP, n_components='foo')
for reg in ['foo', -0.1, 1.1]:
assert_raises(ValueError, CSP, reg=reg)
for reg in ['oas', 'ledoit_wolf', 0, 0.5, 1.]:
CSP(reg=reg)
for cov_est in ['foo', None]:
assert_raises(ValueError, CSP, cov_est=cov_est)
for cov_est in ['concat', 'epoch']:
CSP(cov_est=cov_est)
n_components = 3
csp = CSP(n_components=n_components)
# Fit
csp.fit(epochs_data, epochs.events[:, -1])
assert_equal(len(csp.mean_), n_components)
assert_equal(len(csp.std_), n_components)
# Transform
X = csp.fit_transform(epochs_data, y)
sources = csp.transform(epochs_data)
assert_true(sources.shape[1] == n_components)
assert_true(csp.filters_.shape == (n_channels, n_channels))
assert_true(csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(sources, X)
# Test data exception
assert_raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
assert_raises(ValueError, csp.fit, epochs, y)
assert_raises(ValueError, csp.transform, epochs)
# Test plots
epochs.pick_types(meg='mag')
cmap = ('RdBu', True)
components = np.arange(n_components)
for plot in (csp.plot_patterns, csp.plot_filters):
plot(epochs.info, components=components, res=12, show=False, cmap=cmap)
# Test covariance estimation methods (results should be roughly equal)
np.random.seed(0)
csp_epochs = CSP(cov_est="epoch")
csp_epochs.fit(epochs_data, y)
for attr in ('filters_', 'patterns_'):
corr = np.corrcoef(getattr(csp, attr).ravel(),
getattr(csp_epochs, attr).ravel())[0, 1]
assert_true(corr >= 0.94)
# Test with more than 2 classes
epochs = Epochs(raw, events, tmin=tmin, tmax=tmax, picks=picks,
event_id=dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4),
baseline=(None, 0), proj=False, preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_channels = epochs_data.shape[1]
for cov_est in ['concat', 'epoch']:
csp = CSP(n_components=n_components, cov_est=cov_est)
csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data)
assert_equal(len(csp._classes), 4)
assert_array_equal(csp.filters_.shape, [n_channels, n_channels])
assert_array_equal(csp.patterns_.shape, [n_channels, n_channels])
# Test average power transform
n_components = 2
assert_true(csp.transform_into == 'average_power')
feature_shape = [len(epochs_data), n_components]
X_trans = dict()
for log in (None, True, False):
csp = CSP(n_components=n_components, log=log)
assert_true(csp.log is log)
Xt = csp.fit_transform(epochs_data, epochs.events[:, 2])
assert_array_equal(Xt.shape, feature_shape)
X_trans[str(log)] = Xt
# log=None => log=True
assert_array_almost_equal(X_trans['None'], X_trans['True'])
# Different normalization return different transform
assert_true(np.sum((X_trans['True'] - X_trans['False']) ** 2) > 1.)
# Check wrong inputs
assert_raises(ValueError, CSP, transform_into='average_power', log='foo')
# Test csp space transform
csp = CSP(transform_into='csp_space')
assert_true(csp.transform_into == 'csp_space')
for log in ('foo', True, False):
assert_raises(ValueError, CSP, transform_into='csp_space', log=log)
n_components = 2
csp = CSP(n_components=n_components, transform_into='csp_space')
Xt = csp.fit(epochs_data, epochs.events[:, 2]).transform(epochs_data)
feature_shape = [len(epochs_data), n_components, epochs_data.shape[2]]
assert_array_equal(Xt.shape, feature_shape)
@requires_sklearn
def test_regularized_csp():
"""Test Common Spatial Patterns algorithm using regularized covariance."""
raw = io.read_raw_fif(raw_fname)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_components = 3
reg_cov = [None, 0.05, 'ledoit_wolf', 'oas']
for reg in reg_cov:
csp = CSP(n_components=n_components, reg=reg)
csp.fit(epochs_data, epochs.events[:, -1])
y = epochs.events[:, -1]
X = csp.fit_transform(epochs_data, y)
assert_true(csp.filters_.shape == (n_channels, n_channels))
assert_true(csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(csp.fit(epochs_data, y).
transform(epochs_data), X)
# test init exception
assert_raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
assert_raises(ValueError, csp.fit, epochs, y)
assert_raises(ValueError, csp.transform, epochs)
csp.n_components = n_components
sources = csp.transform(epochs_data)
assert_true(sources.shape[1] == n_components)
@requires_sklearn
def test_csp_pipeline():
"""Test if CSP works in a pipeline
"""
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
csp = CSP(reg=1)
svc = SVC()
pipe = Pipeline([("CSP", csp), ("SVC", svc)])
pipe.set_params(CSP__reg=0.2)
assert_true(pipe.get_params()["CSP__reg"] == 0.2)
def test_ajd():
"""Test if Approximate joint diagonalization implementation obtains same
results as the Matlab implementation by Pham Dinh-Tuan.
"""
# Generate a set of cavariances matrices for test purpose
n_times, n_channels = 10, 3
seed = np.random.RandomState(0)
diags = 2.0 + 0.1 * seed.randn(n_times, n_channels)
A = 2 * seed.rand(n_channels, n_channels) - 1
A /= np.atleast_2d(np.sqrt(np.sum(A ** 2, 1))).T
covmats = np.empty((n_times, n_channels, n_channels))
for i in range(n_times):
covmats[i] = np.dot(np.dot(A, np.diag(diags[i])), A.T)
V, D = _ajd_pham(covmats)
# Results obtained with original matlab implementation
V_matlab = [[-3.507280775058041, -5.498189967306344, 7.720624541198574],
[0.694689013234610, 0.775690358505945, -1.162043086446043],
[-0.592603135588066, -0.598996925696260, 1.009550086271192]]
assert_array_almost_equal(V, V_matlab)
| {
"repo_name": "nicproulx/mne-python",
"path": "mne/decoding/tests/test_csp.py",
"copies": "2",
"size": "8007",
"license": "bsd-3-clause",
"hash": -4774235602375425000,
"line_mean": 38.4433497537,
"line_max": 79,
"alpha_frac": 0.6331959535,
"autogenerated": false,
"ratio": 3.1449332285938727,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4778129182093872,
"avg_score": null,
"num_lines": null
} |
import os.path as op
from nose.tools import assert_true, assert_raises
import numpy as np
from numpy.testing import assert_array_almost_equal
from mne import io, Epochs, read_events, pick_types
from mne.decoding.csp import CSP
from mne.utils import requires_sklearn
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# if stop is too small pca may fail in some cases, but we're okay on this file
start, stop = 0, 8
def test_csp():
"""Test Common Spatial Patterns algorithm on epochs
"""
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[2:9:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_components = 3
csp = CSP(n_components=n_components)
csp.fit(epochs_data, epochs.events[:, -1])
y = epochs.events[:, -1]
X = csp.fit_transform(epochs_data, y)
assert_true(csp.filters_.shape == (n_channels, n_channels))
assert_true(csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(csp.fit(epochs_data, y).transform(epochs_data),
X)
# test init exception
assert_raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
assert_raises(ValueError, csp.fit, epochs, y)
assert_raises(ValueError, csp.transform, epochs, y)
csp.n_components = n_components
sources = csp.transform(epochs_data)
assert_true(sources.shape[1] == n_components)
epochs.pick_types(meg='mag', copy=False)
# test plot patterns
components = np.arange(n_components)
csp.plot_patterns(epochs.info, components=components, res=12,
show=False)
# test plot filters
csp.plot_filters(epochs.info, components=components, res=12,
show=False)
@requires_sklearn
def test_regularized_csp():
"""Test Common Spatial Patterns algorithm using regularized covariance
"""
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_components = 3
reg_cov = [None, 0.05, 'lws', 'oas']
for reg in reg_cov:
csp = CSP(n_components=n_components, reg=reg)
csp.fit(epochs_data, epochs.events[:, -1])
y = epochs.events[:, -1]
X = csp.fit_transform(epochs_data, y)
assert_true(csp.filters_.shape == (n_channels, n_channels))
assert_true(csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(csp.fit(epochs_data, y).
transform(epochs_data), X)
# test init exception
assert_raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
assert_raises(ValueError, csp.fit, epochs, y)
assert_raises(ValueError, csp.transform, epochs, y)
csp.n_components = n_components
sources = csp.transform(epochs_data)
assert_true(sources.shape[1] == n_components)
| {
"repo_name": "antiface/mne-python",
"path": "mne/decoding/tests/test_csp.py",
"copies": "2",
"size": "3829",
"license": "bsd-3-clause",
"hash": -4078091132965676500,
"line_mean": 34.785046729,
"line_max": 78,
"alpha_frac": 0.6291459911,
"autogenerated": false,
"ratio": 3.283876500857633,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4913022491957633,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alexandre'
#from .. import strings
# def test_filter_objlist(olist, fieldname, fieldval):
# """
# Returns a list with of the objetcts in olist that have a fieldname valued as fieldval
#
# @param olist: list of objects
# @param fieldname: string
# @param fieldval: anything
#
# @return: list of objets
# """
#
# return [x for x in olist if getattr(x, fieldname) == fieldval]
# def pretty_mapping(mapping, getterfunc=None):
# """
# Make pretty string from mapping
#
# Adjusts text column to print values on basis of longest key.
# Probably only sensible if keys are mainly strings.
#
# You can pass in a callable that does clever things to get the values
# out of the mapping, given the names. By default, we just use
# ``__getitem__``
#
# This function has been copied from NiBabel:
# http://nipy.org/nibabel/
# Which has a MIT License
#
# Parameters
# ----------
# :param mapping : mapping
# implementing iterator returning keys and .items()
# :param getterfunc : None or callable
# callable taking two arguments, ``obj`` and ``key`` where ``obj``
# is the passed mapping. If None, just use ``lambda obj, key:
# obj[key]``
#
# Returns
# -------
# :return str : string
#
# Examples
# --------
# >>> d = {'a key': 'a value'}
# >>> print(pretty_mapping(d))
# a key : a value
# >>> class C(object): # to control ordering, show get_ method
# ... def __iter__(self):
# ... return iter(('short_field','longer_field'))
# ... def __getitem__(self, key):
# ... if key == 'short_field':
# ... return 0
# ... if key == 'longer_field':
# ... return 'str'
# ... def get_longer_field(self):
# ... return 'method string'
# >>> def getter(obj, key):
# ... # Look for any 'get_<name>' methods
# ... try:
# ... return obj.__getattribute__('get_' + key)()
# ... except AttributeError:
# ... return obj[key]
# >>> print(pretty_mapping(C(), getter))
# short_field : 0
# longer_field : method string
# """
# import numpy as np
#
# if getterfunc is None:
# getterfunc = lambda obj, key: obj[key]
# lens = [len(str(name)) for name in mapping]
# mxlen = np.max(lens)
# fmt = '%%-%ds : %%s' % mxlen
# out = [fmt % (name, getterfunc(mapping, name)) for name in mapping]
# #for name in mapping:
# # value = getterfunc(mapping, name)
# # out.append(fmt % (name, value))
# return '\n'.join(out)
#
#
# def filter_list(lst, filt):
# """
# :param lst: list
# :param filter: function
# Unary string filter function
# :return: list
# List of strings that passed the filter
#
# :example
# l = ['12123123', 'N123213']
# filt = re.compile('\d*').match
# nu_l = list_filter(l, filt)
# """
# return [m for s in lst for m in (filt(s),) if m]
#
#
# def match_list(lst, pattern, group_names=[]):
# """
# @param lst: list of strings
#
# @param regex: string
#
# @param group_names: list of strings
# See re.MatchObject group docstring
#
# @return: list of strings
# Filtered list, with the strings that match the pattern
# """
# filtfn = re.compile(pattern).match
# filtlst = filter_list(lst, filtfn)
# if group_names:
# return [m.group(group_names) for m in filtlst]
# else:
# return [m.string for m in filtlst]
#
#
# def search_list(lst, pattern):
# """
# @param pattern: string
# @param lst: list of strings
# @return: list of strings
# Filtered lists with the strings in which the pattern is found.
#
# """
# filt = re.compile(pattern).search
# return filter_list(lst, filt)
#
#
# def append_to_keys(adict, preffix):
# """
# @param adict:
# @param preffix:
# @return:
# """
# return {preffix + str(key): (value if isinstance(value, dict) else value)
# for key, value in list(adict.items())}
#
#
# def append_to_list(lst, preffix):
# """
# @param lst:
# @param preffix:
# @return:
# """
# return [preffix + str(item) for item in lst] | {
"repo_name": "Neurita/boyle",
"path": "tests/test_strings.py",
"copies": "1",
"size": "4299",
"license": "bsd-3-clause",
"hash": 4505393461648565000,
"line_mean": 27.4768211921,
"line_max": 91,
"alpha_frac": 0.5545475692,
"autogenerated": false,
"ratio": 3.168017686072218,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9220163393289302,
"avg_score": 0.00048037239658316536,
"num_lines": 151
} |
__author__ = 'Alexandre'
import cPickle
import numpy as np
import os
import random
from math import exp
from time import time, sleep
from GSkernel import load_AA_matrix, GS_kernel, compute_psi_dict
from GSkernel_fast import GS_gram_matrix_fast
def GS_kernel_naive(str1, str2, sigmaPos, sigmaAA, L, amino_acids, aa_descriptors):
k_value = 0.0
denom_p = -2.0 * (sigmaPos ** 2)
denom_c = -2.0 * (sigmaAA ** 2)
aa_indexes = {}
for i in xrange(len(amino_acids)):
aa_indexes[amino_acids[i]] = i
for l in xrange(L):
l += 1
for i in xrange(len(str1) - l + 1):
#Compute phi^l(x_i+1, .., x_i+l)
s1 = str1[i:i + l]
phi_l_str1 = np.array([])
for aa in s1:
phi_aa = aa_descriptors[aa_indexes[aa]]
phi_l_str1 = np.append(phi_l_str1, phi_aa)
for j in xrange(len(str2) - l + 1):
dist_p = i - j
# Compute phi^l(x_j+1, .., x_j+l)
s2 = str2[j:j + l]
phi_l_str2 = np.array([])
for aa in s2:
phi_aa = aa_descriptors[aa_indexes[aa]]
phi_l_str2 = np.append(phi_l_str2, phi_aa)
dist_c = np.linalg.norm(phi_l_str1 - phi_l_str2)
k_value += exp((dist_p ** 2) / denom_p) * exp((dist_c ** 2) / denom_c)
return k_value
def GS_kernel_precomp_P(str1, str2, psiDict, sigmaPos, sigmaAA, L, P):
len_str1 = len(str1)
len_str2 = len(str2)
A = np.zeros((len_str1, len_str2))
for i in xrange(len_str1):
for j in xrange(len_str2):
try:
A[i, j] = psiDict[str1[i], str2[j]]
except:
if str1[i] != str2[j]:
A[i, j] = 4.0
A /= -2.0 * (sigmaAA ** 2.0)
A = np.exp(A)
B = np.zeros((len_str1, len_str2))
for i in xrange(len_str1):
for j in xrange(len_str2):
tmp = 1.0
for l in xrange(L):
if i + l < len_str1 and j + l < len_str2:
tmp *= A[i + l, j + l]
B[i, j] += tmp
return np.sum(P * B)
def precompute_P(len_x_1, len_x_2, sigma_pos):
P = np.zeros((len_x_1, len_x_2))
for i in xrange(len_x_1):
for j in xrange(len_x_2):
P[i, j] = i - j
P = np.square(P)
P /= -2.0 * (sigma_pos ** 2.0)
P = np.exp(P)
return P
def GS_gram_matrix(kernel_func, X, amino_acid_property_file, sigma_position=1.0, sigma_amino_acid=1.0,
substring_length=2):
if kernel_func == 'GS_kernel_naive' or kernel_func == 'GS_kernel_precomp_P':
# Load amino acids descriptors
(amino_acids, aa_descriptors) = load_AA_matrix(amino_acid_property_file)
# For every amino acids couple (a_1, a_2) psi_dict is a hash table
# that contain the squared Euclidean distance between the descriptors
# of a_1 and a_2
psi_dict = compute_psi_dict(amino_acids, aa_descriptors)
# Declaration of the Gram matrix
K = np.zeros((len(X), len(X)))
maxLen = max([len(s) for s in X])
P = precompute_P(maxLen, maxLen, sigma_position)
if kernel_func == 'GS_kernel_naive':
# Fill the symmetric matrix
for i in xrange(len(X)):
K[i, i] = GS_kernel_naive(X[i],
X[i],
sigma_position,
sigma_amino_acid,
substring_length,
amino_acids,
aa_descriptors)
for j in xrange(i):
K[i, j] = GS_kernel_naive(X[i],
X[j],
sigma_position,
sigma_amino_acid,
substring_length,
amino_acids,
aa_descriptors)
K[j, i] = K[i, j]
elif kernel_func == 'GS_kernel_precomp_P':
for i in xrange(len(X)):
K[i, i] = GS_kernel_precomp_P(X[i],
X[i],
psi_dict,
sigma_position,
sigma_amino_acid,
substring_length,
P)
for j in xrange(i):
K[i, j] = GS_kernel_precomp_P(X[i],
X[j],
psi_dict,
sigma_position,
sigma_amino_acid,
substring_length,
P)
K[j, i] = K[i, j]
elif kernel_func == 'GS_kernel_fast':
K = GS_gram_matrix_fast(X=X,
Y=X,
amino_acid_property_file=amino_acid_property_file,
sigma_position=sigma_position,
sigma_amino_acid=sigma_amino_acid,
substring_length=substring_length,
approximate=False,
normalize_matrix=False)
elif kernel_func == 'GS_kernel_fast_approx':
K = GS_gram_matrix_fast(X=X,
Y=X,
amino_acid_property_file=amino_acid_property_file,
sigma_position=sigma_position,
sigma_amino_acid=sigma_amino_acid,
substring_length=substring_length,
approximate=True,
normalize_matrix=False)
return K
def generate_peptides(amino_acids, peptide_length, n_peptides):
return [''.join(random.choice(amino_acids) for x in range(peptide_length)) for i in xrange(n_peptides)]
def pickle(filename, data):
f = open(filename, 'wb')
cPickle.dump(data, f, cPickle.HIGHEST_PROTOCOL)
f.close()
def benchmark1():
n_peptides = 50
peptide_length = 15
sigma_pos = 0.5
sigma_aa = 0.5
L_range = range(1, 16)
n_run_average = 5
X = generate_peptides('ARNDCQEGHILKMFPSTWYVBZX*', peptide_length=peptide_length, n_peptides=n_peptides)
results = {}
kernel_funcs = ['GS_kernel_naive', 'GS_kernel_precomp_P']
for kernel in kernel_funcs:
results[kernel] = []
for L in L_range:
print 'L = ', L
for kernel in kernel_funcs:
runtimes = []
for i in xrange(n_run_average):
t = time()
GS_gram_matrix(kernel, X, '../amino_acids_matrix/AA.blosum50.dat', sigma_position=sigma_pos,
sigma_amino_acid=sigma_aa, substring_length=L)
elapsed = time() - t
runtimes.append(elapsed)
result = np.median(runtimes)
results[kernel].append(result)
print kernel, ' completed in ', result, ' seconds (average over', n_run_average, 'runs).'
sleep(0.1) # allow to switch CPU
#Checkpoint! Save intermediate results
pickle('benchmark1_results_L=' + str(L) + '.pkl', results)
print
print 'Saving results...'
pickle('benchmark1_results.pkl', results)
for f in os.listdir('.'):
import re
if re.search('benchmark1_results_L', f):
os.remove(f)
print 'Done.'
print
print 'Matlab:'
print '-' * 50
print 'Lrange =', L_range, ';'
for kernel in kernel_funcs:
print kernel, '=', results[kernel], ';'
print 'clf;'
print 'hold all;'
for kernel in kernel_funcs:
print 'plot(Lrange,', kernel, ');'
print "legend('" + "','".join([x for x in kernel_funcs]) + "');"
def benchmark2():
n_peptides = 1000
peptide_length = 100
sigma_pos_range = np.arange(0.1, 30.0, 0.5)
sigma_aa = 0.5
L = 2
n_run_average = 15
results = {}
kernel_funcs = ['GS_kernel_fast', 'GS_kernel_fast_approx']
for kernel in kernel_funcs:
results[kernel] = []
for sigma_pos in sigma_pos_range:
print 'sigma_pos = ', sigma_pos
for kernel in kernel_funcs:
runtimes = []
for i in xrange(n_run_average):
X = generate_peptides('ARNDCQEGHILKMFPSTWYVBZX*', peptide_length=peptide_length, n_peptides=n_peptides)
t = time()
GS_gram_matrix(kernel, X, '../amino_acids_matrix/AA.blosum50.dat', sigma_position=sigma_pos,
sigma_amino_acid=sigma_aa, substring_length=L)
elapsed = time() - t
runtimes.append(elapsed)
result = np.median(runtimes)
results[kernel].append(result)
print kernel, ' completed in ', result, ' seconds (average over', n_run_average, 'runs).'
#Checkpoint! Save intermediate results
pickle('benchmark2_results_sigma_pos=' + str(sigma_pos) + '.pkl', results)
print
print 'Saving results...'
pickle('benchmark2_results.pkl', results)
for f in os.listdir('.'):
import re
if re.search('benchmark2_results_sigma_pos', f):
os.remove(f)
print 'Done.'
print
print 'Matlab:'
print '-' * 50
print 'sigma_pos_range =', list(sigma_pos_range), ';'
for kernel in kernel_funcs:
print kernel, '=', results[kernel], ';'
print 'clf;'
print 'hold all;'
for kernel in kernel_funcs:
print 'plot(sigma_pos_range, ', kernel, ');'
print "legend('" + "','".join([x for x in kernel_funcs]) + "');"
if __name__ == '__main__':
benchmark1()
#benchmark2() | {
"repo_name": "aldro61/microbiome-summer-school-2017",
"path": "exercises/code/GSkernel_source/benchmark/bench.py",
"copies": "2",
"size": "9314",
"license": "mit",
"hash": -411378807522712060,
"line_mean": 31.1206896552,
"line_max": 119,
"alpha_frac": 0.5155679622,
"autogenerated": false,
"ratio": 3.3371551415263347,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4852723103726334,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alexandre'
import os
import os.path as op
import logging
import subprocess
from boyle.nifti.storage import save_niigz
from boyle.files.names import get_temp_file, get_temp_dir
log = logging.getLogger(__name__)
class FslViewCaller(object):
fslview_bin = op.join(os.environ['FSLDIR'], 'bin', 'fslview')
def __init__(self):
self._tmpdir = get_temp_dir('fslviewcaller_')
self._volume_files = set()
self._tmp_volume_files = set()
def add_volume_from_path(self, nii_path):
if not op.exists(nii_path):
log.error('File {} not found.'.format(nii_path))
self._add_volume_from_path(nii_path, is_tmp_file=False)
def add_volume(self, vol_data, affine=None, header=None):
tmp_file = get_temp_file(self._tmpdir.name, suffix='.nii.gz')
save_niigz(tmp_file.name, vol_data, affine, header)
self._add_volume_from_path(tmp_file, is_tmp_file=True)
def _add_volume_from_path(self, nii_path, is_tmp_file):
if is_tmp_file:
self._tmp_volume_files.add(nii_path)
else:
self._volume_files.add(nii_path)
def show(self):
fslview_args = [self.fslview_bin]
fslview_args.extend(self._volume_files)
fslview_args.extend(self._tmp_volume_files)
subprocess.call(fslview_args)
def close(self):
import shutil
try:
for volf in self._tmp_volume_files:
os.remove(volf)
except OSError:
log.exception('Error closing {} on deleting '
'file {}.'.format(self.__name__, volf))
raise
try:
shutil.rmtree(self._tmpdir.name)
except OSError:
log.exception('Error closing {} on deleting '
'temp folder {}.'.format(self._tmpdir))
| {
"repo_name": "Neurita/cajal",
"path": "cajal/fslview.py",
"copies": "1",
"size": "1857",
"license": "bsd-3-clause",
"hash": 1731205962660581400,
"line_mean": 29.95,
"line_max": 69,
"alpha_frac": 0.5875067313,
"autogenerated": false,
"ratio": 3.3580470162748646,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9421418098544136,
"avg_score": 0.004827129806145636,
"num_lines": 60
} |
__author__ = 'alexandre'
import os.path as op
from datetime import datetime, timedelta
from collections import Counter
from operator import itemgetter
import dataset
import sqlalchemy
class VoterAlreadyVoted(Exception):
pass
class VoteRoundNotFound(Exception):
pass
class VoteRoundIsFinished(Exception):
pass
class MoreThanOneVoteRoundFound(Exception):
pass
class VoteRoundAlreadyDone(Exception):
pass
class VoteRounds(object):
""" A class that saves different voting rounds in SQLite. """
def __init__(self, db_file_path):
if not op.exists(db_file_path):
self._init_database(db_file_path)
self._db = self.get_db_connection(db_file_path)
@staticmethod
def get_db_connection(db_file_path):
try:
db = dataset.connect('sqlite:///{}'.format(db_file_path))
except:
raise
else:
return db
def _init_database(self, db_file_path):
db = self.get_db_connection(db_file_path)
vote = db.create_table('vote')
vote.create_column('round_code', sqlalchemy.String(10))
vote.create_column('voter', sqlalchemy.String(50))
vote.create_column('positives', sqlalchemy.SmallInteger)
vote.create_column('negatives', sqlalchemy.SmallInteger)
vote.create_column('absents', sqlalchemy.SmallInteger)
vote.create_index ('round_code', 'round_code_idx')
vote_round = db.create_table('vote_round', primary_id='code', primary_type='String(10)')
vote_round.create_column('topic', sqlalchemy.String(200))
vote_round.create_column('start_date', sqlalchemy.DateTime)
vote_round.create_column('end_date', sqlalchemy.DateTime)
vote_round.create_column('is_finished', sqlalchemy.Boolean)
vote_round.create_column('has_deadline', sqlalchemy.Boolean)
vote_round.create_column('manager', sqlalchemy.String(50))
vote_round.create_column('timestamp', sqlalchemy.String(20))
vote_round.create_index ('timestamp', 'timestamp_idx')
# ------------------------------------------------------------------------
# VOTE ROUNDS
# ------------------------------------------------------------------------
def get_all_vote_rounds(self):
return self._db['vote_round'].all()
def get_all_open_vote_rounds(self):
vote_rounds = []
polls = self.get_all_vote_rounds()
for poll in polls:
if not self.is_round_finished(poll['code']):
vote_rounds.append(poll)
return vote_rounds
def find_vote_round(self, round_code):
vote_round = self._db['vote_round'].find_one(code=round_code)
if vote_round is None:
raise VoteRoundNotFound('Could not find a vote round with code {}.'.format(round_code))
return vote_round
def find_one_vote_round_by_timestamp(self, timestamp):
vote_round = self._db['vote_round'].find_one(timestamp=timestamp)
if vote_round is None:
return None
def find_vote_round_by_timestamp(self, timestamp):
vote_rounds = self._db['vote_round'].find(timestamp=timestamp)
vote_rounds = list(vote_rounds)
if not vote_rounds:
return None
if len(vote_rounds) > 1:
raise MoreThanOneVoteRoundFound('Found {} ({}) vote rounds with '
'timestamp {}.'.format(len(vote_rounds),
[v['code'] for v in vote_rounds],
timestamp))
return vote_rounds[0]
def close_vote_round(self, round_code, user_id):
vote_round = self.find_vote_round(round_code)
if vote_round['manager'] != user_id:
raise PermissionError('The user {} is not the manager for the vote round {}.'.format(user_id, round_code))
vote_round['is_finished'] = True
vote_round['end_date'] = datetime.now()
self._db['vote_round'].upsert(vote_round, ['code'])
def is_round_finished(self, round_code):
vote_round = self.find_vote_round(round_code)
if vote_round['is_finished']:
return True
if vote_round['has_deadline']:
is_late = vote_round['end_date'] < datetime.now()
if is_late:
# self.close_vote_round(round_code, user_id)
return True
return False
def set_round_deadline(self, round_code, hours):
vote_round = self.find_vote_round(round_code)
vote_round['end_date'] = datetime.now() + timedelta(hours=hours)
vote_round['has_deadline'] = True
self._db ['vote_round'].upsert(vote_round, ['code'])
def start_vote_round(self, topic, code, user_id, start_date, end_date=None):
try:
_ = self.find_vote_round(code)
except:
pass
else:
raise VoteRoundAlreadyDone('The vote round with code {} has been already done.'.format(code))
has_deadline = True
if end_date is None:
end_date = start_date + timedelta(hours=24)
has_deadline = False
try:
result = self._db['vote_round'].insert(dict(topic=topic, code=code, manager=user_id,
start_date=start_date, end_date=end_date,
is_finished=False, has_deadline=has_deadline))
except Exception as exc:
self._db.rollback()
raise Exception('Error inserting new vote round `{}`.'.format(code)) from exc
else:
return result
def get_round_manager(self, round_code):
vote_round = self.find_vote_round(round_code)
return vote_round['manager']
def set_round_timestamp(self, round_code, timestamp):
vote_round = self.find_vote_round(round_code)
vote_round['timestamp'] = timestamp
self._db ['vote_round'].upsert(vote_round, ['code'])
# ------------------------------------------------------------------------
# VOTES
# ------------------------------------------------------------------------
def _check_vote(self, round_code, vote_value):
if self.is_round_finished(round_code):
raise VoteRoundIsFinished('The vote round with code {} is already finished.'.format(round_code))
try:
#if int(vote_value) not in (+1, -1, 0):
# raise Exception()
value = int(vote_value)
except:
#raise ValueError('Could not transform vote value `{}` into one of (1, -1, 0).'.format(vote_value))
raise ValueError('Could not transform vote value `{}` into an intenger.'.format(vote_value))
else:
return value
def find_vote_by_id(self, vote_id):
return self._db['vote'].find_one(id=vote_id)
def find_vote(self, round_code, voter_name):
return self._db['vote'].find_one(round_code=round_code, voter=voter_name)
def _insert_blank_vote(self, round_code, voter_name):
vote = dict(round_code = round_code,
voter = voter_name,
positives = 0,
negatives = 0,
absents = 0)
try:
result = self._db['vote'].insert(vote)
except Exception as exc:
self._db.rollback()
raise Exception('Error inserting new vote for `{}` in round `{}`.'.format(voter_name,
round_code)) from exc
else:
return self.find_vote_by_id(result)
def _update_vote(self, vote, vote_value):
try:
if vote_value > 0:
vote['positives'] += 1
elif vote_value == 0:
vote['absents'] += 1
else:
vote['negatives'] += 1
result = self._db['vote'].upsert(vote, ['id'])
except Exception as exc:
self._db.rollback()
raise Exception('Error updating vote for `{}` in round `{}`.'.format(vote['voter'],
vote['round_code'])) from exc
else:
return self.find_vote_by_id(result)
def _clear_vote(self, vote):
try:
vote['positives'] = 0
vote['negatives'] = 0
vote['absents'] = 0
result = self._db['vote'].upsert(vote, ['id'])
except Exception as exc:
self._db.rollback()
raise Exception('Error clearing vote for `{}` in round `{}`.'.format(vote['voter'],
vote['round_code'])) from exc
else:
return self.find_vote_by_id(result)
def insert_vote(self, round_code, voter_name, vote_value, definitive=True):
""" Insert a `vote_value` vote for the `voter_name` in round `round_code`.
If definitive, will clear previous votes and make only this one as valid.
:param round_code: str
Code of the voting round.
:param voter_name:
The id of the voter.
:param vote_value: str or int
The value of the vote. Must be an integer, in string or not.
:param definitive:
If definitive, will clear previous votes and make only this one as valid.
Otherwise will sum this value to the `voter_name` votes count.
:return: dict
The vote row.
"""
try:
value = self._check_vote(round_code, vote_value)
except:
raise
else:
vote = self.find_vote(round_code, voter_name)
if vote is None:
self._insert_blank_vote(round_code, voter_name)
else:
if definitive:
vote = self._clear_vote(vote)
return self._update_vote(vote, int(vote_value))
@staticmethod
def get_vote_value(vote):
vote_value = dict(positives = 1, negatives = -1, absents = 0)
max_value = max((vote['positives'], vote['negatives'], vote['absents']), key=itemgetter(1))
return vote_value[max_value]
def get_all_votes(self, round_code):
return self._db['vote'].find(round_code=round_code)
def get_round_result(self, round_code):
votes = self.get_all_votes(round_code)
values = [self.get_vote_value(vote) for vote in votes]
return Counter(values)
| {
"repo_name": "PythonSanSebastian/pyper_the_bot",
"path": "implants/vote_rounds.py",
"copies": "1",
"size": "10736",
"license": "bsd-3-clause",
"hash": -7630368859046469000,
"line_mean": 35.8934707904,
"line_max": 118,
"alpha_frac": 0.5397727273,
"autogenerated": false,
"ratio": 4.063588190764572,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5103360918064572,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alexandre'
import pandas as pd
from gdrive import get_spreadsheet, get_worksheet, worksheet_to_dict
def get_ws_data(api_key_file, doc_key, ws_tab_idx, header=None):
""" Return the content of the spreadsheet in the ws_tab_idx tab of the spreadsheet with doc_key
as a pandas DataFrame.
Parameters
----------
api_key_file: str
Path to the Google API key json file.
doc_key: str
ws_tab_idx: int
Index of the worksheet within the spreadsheet.
header: List[str]
List of values to assign to the header of the result.
Returns
-------
content: pandas.DataFrame
"""
spread = get_spreadsheet(api_key_file, doc_key)
ws = get_worksheet(spread, ws_tab_idx)
ws_dict = worksheet_to_dict(ws, header=header, start_row=1)
return pd.DataFrame(ws_dict)
def get_sponsors_ws_data(api_key_file, doc_key):
header = ['date', 'company', 'country', 'address',
'vat', 'Others', 'representative',
'identification', 'document', 'email',
'services', 'price', 'VAT']
return get_ws_data(api_key_file, doc_key, ws_tab_idx=0, header=header)
def get_sponsors_ws_tasks(api_key_file, doc_key):
header = ['Sponsor', 'Contact', 'Manager',
'AGREEMENT BY EPS/ACPySS',
'contacted to fill form?',
'sent agreement',
'agreement signed',
'put logo on web',
'1st invoice sent',
'1st invoice paid',
'2dn invoice sent',
'2nd invoice paid']
return get_ws_data(api_key_file, doc_key, ws_tab_idx=1, header=header)
"""
responses_filepath = 'sponsors-responses.csv'
df.to_csv(responses_filepath)
sponsor_idx = 35
template_filepath = 'sponsor_agreement_1invoice.tex'
company_name = data[sponsor_idx]['company']
#!docstamp -i $responses_filepath -t $template_filepath -f company -c xelatex --idx $sponsor_idx -v
""" | {
"repo_name": "PythonSanSebastian/pyper_the_bot",
"path": "implants/sponsors_agreements_factory.py",
"copies": "1",
"size": "1979",
"license": "bsd-3-clause",
"hash": -9207512832977489000,
"line_mean": 26.8873239437,
"line_max": 99,
"alpha_frac": 0.6109146033,
"autogenerated": false,
"ratio": 3.4179620034542313,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45288766067542313,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alexandre'
import ply.lex as lex
reserved_words = (
'color',
'point',
'line',
'circle',
'rect',
'ellipse',
'customshape',
'text',
'rotate',
'scale',
'translate',
'hide',
'if',
'while',
'for',
'step',
'apply',
'rgb',
'hex',
'name',
'x',
'y',
'p1',
'p2',
'fill_color',
'border_color',
'width',
'c',
'r',
'border_width',
'o',
'height',
'rx',
'ry',
'p',
'close',
'content',
'font',
'size',
'angle',
'sx',
'sy',
'h',
)
tokens = (
'INTEGER',
'BOOLEAN',
'STRING',
'ADD_OP',
'MUL_OP',
'COND_OP',
'IDENTIFIER'
) + tuple(map(lambda s:s.upper(),reserved_words))
literals = '():,;={}'
def t_INTEGER(token):
r'[+-]?\d+'
try:
token.value = int(token.value)
except ValueError:
print ("Line %d: Problem while parsing %s!" % (token.lineno,token.value))
token.value = 0
return token
def t_BOOLEAN(token):
r'(YES|NO)'
try:
token.value = token.value == 'YES'
except ValueError:
print ("Line %d: Problem while parsing %s!" % (token.lineno,token.value))
token.value = 0
return token
def t_STRING(token):
r'"(?:[^"\\]|\\.)*"'
try:
token.value = str(token.value)[1:-1]
except ValueError:
print ("Line %d: Problem while parsing %s!" % (token.lineno,token.value))
token.value = ""
return token
def t_ADD_OP(token):
r'[+-]'
return token
def t_MUL_OP(token):
r'[*/%]'
return token
def t_COND_OP(token):
r'(==|<=|>=|<|>)'
return token
def t_IDENTIFIER(token):
r'[A-Za-z_]\w*'
if token.value in reserved_words:
token.type = token.value.upper()
return token
def t_newline(token):
r'\n+'
token.lexer.lineno += len(token.value)
t_ignore = ' \t'
def t_error(token):
print ("Illegal character '%s'" % repr(token.value[0]))
token.lexer.skip(1)
lex.lex()
if __name__ == "__main__":
import sys
prog = open(sys.argv[1]).read()
lex.input(prog)
while 1:
tok = lex.token()
if not tok: break
print ("line %d: %s(%s)" % (tok.lineno, tok.type, tok.value)) | {
"repo_name": "thedarkmammouth/PNP",
"path": "lex.py",
"copies": "2",
"size": "2038",
"license": "mit",
"hash": 6396340051251862000,
"line_mean": 14.8062015504,
"line_max": 75,
"alpha_frac": 0.5574092247,
"autogenerated": false,
"ratio": 2.5036855036855035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8831338254647281,
"avg_score": 0.045951294747644374,
"num_lines": 129
} |
__author__ = "Alexan Mardigian"
__version__ = "1.0.0"
import os
import time
import tingbot
from tingbot import *
SAVEFILE = 'saved_font.sav'
def load_fonts():
x = 0
f = {}
path = "./fonts/"
files = os.listdir(path)
for filename in files:
if filename.endswith(".ttf"):
f.update( {x: path + filename} )
x += 1
return f
fonts = load_fonts()
state = { 'is_fomrat_pressed': False,
'selected_font': 0
}
def get_saved_font():
infile = open(SAVEFILE, 'a+')
try:
font_path = infile.readlines()[0]
except IndexError:
font_path = ''
infile.close()
return font_path
@left_button.press
def cycle_font():
state['selected_font'] += 1
# Check if we have reached the end of the list.
# If we have, then go back to the beginning of the list.
if state['selected_font'] >= len(fonts):
state['selected_font'] = 0
outfile = open(SAVEFILE, 'w')
outfile.write(fonts[state['selected_font']])
outfile.close()
@right_button.press
def set_time_format():
s = state['is_fomrat_pressed']
if s:
s = False
else:
s = True
state['is_fomrat_pressed'] = s
@touch()
def on_touch(xy, action):
if action == 'down':
set_time_format()
@every(seconds=1.0/30)
def loop():
date_format_str = "%d %B %Y"
time_format_str = "%H:%M:%S"
sf = get_saved_font()
if not sf:
sf = fonts[state['selected_font']]
if state['is_fomrat_pressed']:
time_format_str = "%I:%M:%S %p"
current_date = time.strftime("%d %B %Y")
current_time = time.strftime(time_format_str)
screen.fill(color='black')
screen.text(current_time, xy=(160, 110), color='yellow', font_size=47, font=sf)
screen.text(current_date, xy=(160, 180), color='yellow', font_size=24, font=sf)
tingbot.run()
| {
"repo_name": "Techno-Hwizrdry/clok",
"path": "clok.tingapp/main.py",
"copies": "1",
"size": "1938",
"license": "mit",
"hash": -3928323641208251400,
"line_mean": 19.6170212766,
"line_max": 83,
"alpha_frac": 0.560371517,
"autogenerated": false,
"ratio": 3.2246256239600664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9052639459645719,
"avg_score": 0.0464715362628695,
"num_lines": 94
} |
__author__ = "Alexan Mardigian"
__version__ = "1.2.3"
from argparse import ArgumentParser
from time import sleep
import json
import requests
import sys
PWNED_API_URL = "https://haveibeenpwned.com/api/v3/%s/%s?truncateResponse=%s"
HEADERS = {
"User-Agent": "checkpwnedemails",
"hibp-api-key": "",
}
EMAILINDEX = 0
PWNEDINDEX = 1
DATAINDEX = 2
BREACHED = "breachedaccount"
PASTEBIN = "pasteaccount"
RATE_LIMIT = 1.6 # in seconds
class PwnedArgParser(ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' %message)
self.print_help()
sys.exit(2)
def get_args():
parser = PwnedArgParser()
parser.add_argument('-a', dest='apikey_path', help='Path to text file that contains your HIBP API key.')
parser.add_argument('-b', action="store_true", dest='only_breaches', help='Return results for breaches only.')
parser.add_argument('-i', dest='input_path', help='Path to text file that lists email addresses.')
parser.add_argument('-n', action="store_true", dest='names_only', help='Return the name of the breach(es) only.')
parser.add_argument('-o', dest='output_path', help='Path to output (tab deliminated) text file.')
parser.add_argument('-p', action="store_true", dest='only_pwned', help='Print only the pwned email addresses.')
parser.add_argument('-s', dest="single_email", help='Send query for just one email address.')
parser.add_argument('-t', action="store_true", dest='only_pastebins', help='Return results for pastebins only.')
if len(sys.argv) == 1: # If no arguments were provided, then print help and exit.
parser.print_help()
sys.exit(1)
return parser.parse_args()
# Used for removing the trailing '\n' character on each email.
def clean_list(list_of_strings):
return [str(x).strip() for x in list_of_strings]
# This function will print the appropriate output string based on the
# HTTP error code what was passed in. If an invalid HIBP API key was used
# (error code 401), then checkpwnedemails.py should stop running.
def printHTTPErrorOutput(http_error_code, hibp_api_key, email=None):
ERROR_CODE_OUTPUT = {
400: "HTTP Error 400. %s does not appear to be a valid email address." % (email),
401: "HTTP Error 401. Unauthorised - the API key provided (%s) was not valid." % (hibp_api_key),
403: "HTTP Error 403. Forbidden - no user agent has been specified in the request.",
429: "HTTP Error 429. Too many requests; the rate limit has been exceeded.",
503: "HTTP Error 503. Service unavailable."
}
try:
print(ERROR_CODE_OUTPUT[http_error_code])
except KeyError:
print("HTTP Error %s" % (http_error_code))
if http_error_code == 401:
sys.exit(1)
def get_results(email_list, service, opts, hibp_api_key):
results = [] # list of tuples (email adress, been pwned?, json data)
for email in email_list:
email = email.strip()
data = []
names_only = "true" if opts.names_only else "false"
try:
response = requests.get(url=PWNED_API_URL % (service, email, names_only), headers=HEADERS)
is_pwned = True
# Before parsing the response (for JSON), check if any content was returned.
# Otherwise, a json.decoder.JSONDecodeError will be thrown because we were trying
# to parse JSON from an empty response.
if response.content:
data = response.json()
else:
data = None # No results came back for this email. According to HIBP, this email was not pwned.
is_pwned = False
results.append( (email, is_pwned, data) )
except requests.exceptions.HTTPError as e:
if e.code == 404 and not opts.only_pwned:
results.append( (email, False, data) ) # No results came back for this email. According to HIBP, this email was not pwned.
elif e.code != 404:
printHTTPErrorOutput(e.code, hibp_api_key, email)
sleep(RATE_LIMIT) # This delay is for rate limiting.
if not opts.output_path:
try:
last_result = results[-1]
if not last_result[PWNEDINDEX]:
if service == BREACHED:
print("Email address %s not pwned. Yay!" % (email))
else:
print("Email address %s was not found in any pastes. Yay!" %(email))
else:
print("\n%s pwned!\n==========" % (email))
print(json.dumps(data, indent=4))
print('\n')
except IndexError:
pass
return results
# This function will convert every item, in dlist, into a string and
# encode any unicode strings into an 8-bit string.
def clean_and_encode(dlist):
cleaned_list = []
for d in dlist:
try:
cleaned_list.append(str(d))
except UnicodeEncodeError:
cleaned_list.append(str(d.encode('utf-8'))) # Clean the data.
return cleaned_list
def tab_delimited_string(data):
DATACLASSES = 'DataClasses'
begining_sub_str = data[EMAILINDEX] + '\t' + str(data[PWNEDINDEX])
output_list = []
if data[DATAINDEX]:
for bp in data[DATAINDEX]: # bp stands for breaches/pastbins
d = bp
try:
flat_data_classes = [str(x) for x in d[DATACLASSES]]
d[DATACLASSES] = flat_data_classes
except KeyError:
pass # Not processing a string for a breach.
flat_d = clean_and_encode(d.values())
output_list.append(begining_sub_str + '\t' + "\t".join(flat_d))
else:
output_list.append(begining_sub_str)
return '\n'.join(output_list)
def write_results_to_file(filename, results, opts):
BREACHESTXT = "_breaches.txt"
PASTESTXT = "_pastes.txt"
BREACH_HEADER = ("Email Address", "Is Pwned", "Name", "Title", "Domain", "Breach Date", "Added Date", "Modified Date", "Pwn Count", "Description", "Logo Path", "Data Classes", "Is Verified", "Is Fabricated", "Is Sensitive", "Is Retired", "Is SpamList")
PASTES_HEADER = ("Email Address", "Is Pwned", "ID", "Source", "Title", "Date", "Email Count")
files = []
file_headers = {
BREACHESTXT: "\t".join(BREACH_HEADER),
PASTESTXT: "\t".join(PASTES_HEADER)
}
if opts.only_breaches:
files.append(BREACHESTXT)
elif opts.only_pastebins:
files.append(PASTESTXT)
else:
files.append(BREACHESTXT)
files.append(PASTESTXT)
if filename.rfind('.') > -1:
filename = filename[:filename.rfind('.')]
for res, f in zip(results, files):
outfile = open(filename + f, 'w', encoding='utf-8')
outfile.write(file_headers[f] + '\n')
for r in res:
outfile.write(tab_delimited_string(r) + '\n')
outfile.close()
def main():
hibp_api_key = ""
email_list = []
opts = get_args()
if not opts.apikey_path:
print("\nThe path to the file containing the HaveIBeenPwned API key was not found.")
print("Please provide the file path with the -a switch and try again.\n")
sys.exit(1)
else:
try:
with open(opts.apikey_path) as apikey_file:
hibp_api_key = apikey_file.readline().strip()
HEADERS["hibp-api-key"] = hibp_api_key
except IOError:
print("\nCould not read file:", opts.apikey_path)
print("Check if the file path is valid, and try again.\n")
sys.exit(1)
if opts.single_email:
email_list = [opts.single_email]
elif opts.input_path:
email_list_file = open(opts.input_path, 'r')
email_list = clean_list(email_list_file.readlines())
email_list_file.close()
else:
print("\nNo email addresses were provided.")
print("Please provide a single email address (using -s) or a list of email addresses (using -i).\n")
sys.exit(1)
results = []
if opts.only_breaches:
results.append(get_results(email_list, BREACHED, opts, hibp_api_key))
elif opts.only_pastebins:
results.append(get_results(email_list, PASTEBIN, opts, hibp_api_key))
else:
results.append(get_results(email_list, BREACHED, opts, hibp_api_key))
results.append(get_results(email_list, PASTEBIN, opts, hibp_api_key))
if opts.output_path:
write_results_to_file(opts.output_path, results, opts)
if __name__ == '__main__':
main()
| {
"repo_name": "Techno-Hwizrdry/checkpwnedemails",
"path": "checkpwnedemails.py",
"copies": "1",
"size": "7719",
"license": "mit",
"hash": -1498258399363076900,
"line_mean": 31.2970711297,
"line_max": 253,
"alpha_frac": 0.6798808136,
"autogenerated": false,
"ratio": 2.9327507598784193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8927927356705281,
"avg_score": 0.03694084335462763,
"num_lines": 239
} |
__author__ = "alex balzer <abalzer22@gmail.com>"
__version__ = "0.1.0"
# TODO: need to come up with different ways that you can mess with the vectors for each node so that you can create various matrices that all have relavance to specific applications.
class vectree(object):
def __init__(self,root):
self.root = root
self.search_node = None
def walk(self,cnode,do_print=True):
if do_print:
print str(' '*cnode.indent)+str(cnode.val)+' ->> '+str(hex(id(cnode)))
if not cnode.children:
return
for i in cnode.children:
self.walk(i)
def search(self,cnode,val):
q = [cnode]
while q:
curr = q.pop(0)
print str(' '*curr.indent)+str(curr.val)+' ->> '+str(hex(id(curr)))
if curr.val == val:
return curr
for i in curr.children:
q.append(i)
return None
def dsearch(self,cnode,val):
print str(' '*cnode.indent)+str(cnode.val)+' ->> '+str(hex(id(cnode)))
if cnode.val == val:
return cnode
if cnode.children:
for i in cnode.children:
self.dsearch(i,val)
return None
def insert(self,tnode,cnode,place=None):
if place == None:
cnode.children.append(tnode)
elif isinstance(place,int):
cnode.children.insert(place,tnode)
def insert_v(self,tnode,cnode):
if tnode.val <= cnode.val:
if not cnode.children:
cnode.children = [tnode]
return
for i in cnode.children:
self.insert_v(tnode,i)
def remove(self,cnode,val):
if cnode.children:
for i in xrange(len(cnode.children)):
if cnode.children[i].val == val:
cnode.children.pop(i)
return True
for i in cnode.children:
self.remove(i,val)
def update_nodes(self,cnode):
if not cnode.children:
return
for i in cnode.children:
i.parent = cnode
i.indent = cnode.indent+1
self.update_nodes(i)
class node(object):
"""
a node object that contains the vector for that specific node.
"""
def __init__(self,val,children=[],parent=None,indent=0,siblings=[]):
self.val = val
self.parent = parent
self.indent = indent
self.children = children
self.siblings = siblings
def append_child(self,child):
self.children.append(child)
def insert_child(self,n,child):
self.children.insert(n,child)
if __name__ == "__main__":
# create root node
root_node = node(45,[])
# create n-tree
tree = vectree(root_node)
g = node(8,[node(12,[node(34,[]),node(46,[node(78,[])])]),node(15,[]),node(32,[node(88,[]),node(89,[])]),node(33,[]),node(34,[node(168,[]),node(99,[])])]) # get used to this syntax...
# add a sub tree to the root node
root_node.append_child(g)
tree.walk(root_node)
# add proper class info to all new nodes added to the root
tree.update_nodes(root_node)
print '-='*32
# depth-first-search
tree.walk(root_node)
print '-='*32
# breadth-first-search
search_node = tree.search(root_node,78)
print search_node
g1 = node(865,[node(12345,[node(3465,[]),node(4996,[node(758,[])])]),node(1565,[]),node(3732,[node(8878,[]),node(8349,[])]),node(3223,[node(1,[]),node(2,[]),node(3,[node(4,[node(5,[node(6,[node(7,[node(8,[node(9,[])])])])])])])]),node(3114,[node(1658,[]),node(92349,[])])]) # get used to this syntax...
search_node.append_child(g1)
tree.update_nodes(root_node)
tree.walk(root_node)
print '-='*32
tree.search(root_node,457982)
print '-='*32
print '\n\ndsearch\n\n'
tree.dsearch(root_node,4537987)
print '-='*32
tree.remove(root_node,78)
tree.walk(root_node)
# print '-='*32
# print '\n\ndsearch\n\n'
# tree.dsearch(root_node,78)
| {
"repo_name": "baallezx/vectree",
"path": "src/tree/vectree.py",
"copies": "1",
"size": "3455",
"license": "apache-2.0",
"hash": 2798091200344644000,
"line_mean": 27.7916666667,
"line_max": 303,
"alpha_frac": 0.6541244573,
"autogenerated": false,
"ratio": 2.6194086429112966,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37735331002112965,
"avg_score": null,
"num_lines": null
} |
__author__ = "Alex Baranov"
from oriented_packing import oriented_packer, oriented_container_selector
from copy import deepcopy
from box import Box
class RPacker(object):
"""
Class is used to rectangular elements to the rectangular containers.
"""
def __init__(self, **kwargs):
"""
Creates instance of the RPacker class.
Arguments:
- rn: number of dimensions
- allow_rotation: allows rotation of the elements.
"""
super(RPacker, self).__init__()
# by default rotation is allowed.
self.allow_rotation = kwargs.get("allow_rotation", True)
def pack(self, boxes,
containers,
packer=None,
container_select_func=None,
split_included_containers=False,
verbose=True, **kwargs):
"""
Packs rectangles to the containers.
Arguments:
- boxes: the list of the rectangles.
- containers: the list of containers.
Should be provided in the same form as 'rectangles'.
- packer: function that packs one box to one container. Should accept two parameters:
(container, rect)
- container_select_func: function that select the containers for the box. Accepts three
parameters: (all_available_containers, box to pack, packer function). Should return the
container or None (when unable to find valid container for box)
Returns:
- the list of packed rectangles.
"""
# TODO Add description or all optional parameters
packer = packer or oriented_packer
container_select_func = container_select_func or oriented_container_selector
result = []
if verbose:
print "Packing boxes '{0}' to containers '{1}'".format(map(str, boxes), map(str, containers))
internal_containers = deepcopy(list(containers))
internal_elements = deepcopy(list(boxes))
actions = []
max_x = []
loading_coefs = []
weights = []
for i, rectangle in enumerate(internal_elements):
if rectangle.kind == "unpack":
# remove already packed box if present
for packed in filter(lambda x: x.name == rectangle.name and x.kind == "solid", result):
if verbose:
print "Unpacking box: {}".format(packed)
result.remove(packed)
# add container instead of removed control
parent_container = filter(lambda c: c.includes(packed), containers)
if parent_container:
cc = Box(bottom_left=packed.polus,
size=tuple(
parent_container[0].size[i] - packed.polus[i] for i in range(len(packed.polus))))
internal_containers.append(cc)
if split_included_containers:
internal_containers = self.remove_included_containers(internal_containers)
# remove intersected containers
for packed_element in result:
internal_containers = self.split_intersected_containers(internal_containers, packed_element)
# add actions
actions.append(("unpack", packed))
# recalculate max_x and loading
max_x.append(self.__calculate_max_x(result))
loading_coefs.append(self.__calculate_loading(result, containers))
weights.append(self.__calculate_weights(result))
else:
# select the best container
target_container, packed_element = container_select_func(internal_containers, rectangle,
packer=packer,
packed_boxes=result,
all_boxes=boxes,
**kwargs)
# if target container is null, return the currently packed elements.
if not target_container:
break
if verbose:
print "Packing box: {} ".format(packed_element)
result.append(packed_element)
actions.append(("pack", packed_element))
# calculate solution dynamic parameters
max_x.append(self.__calculate_max_x(result))
loading_coefs.append(self.__calculate_loading(result, containers))
weights.append(self.__calculate_weights(result))
# get new containers
new_containers = target_container.find_free_containers(packed_element)
# remove target container
internal_containers.remove(target_container)
# add new containers
internal_containers += new_containers
# remove included containers
if split_included_containers:
internal_containers = self.remove_included_containers(internal_containers)
# remove intersected containers
internal_containers = self.split_intersected_containers(internal_containers, packed_element)
params = {"actions": actions, "max_x": max_x, "loading": loading_coefs, "weights": weights}
return result, params
def fit_by_rotation(self, rect, container):
"""
Rotates the rectangle to fit it into container.
The polus of the rectangle won't be affected.
Arguments:
- rect: rectangle to fit.
- container: container to fit rectangle into.
- minimize_index: the minimize direction. Specifies the index of the dimension to minimize
"""
res = sorted(rect.size)
if all((container.size[index] - res[index]) >= 0 for index in xrange(len(res))):
return rect
def remove_included_containers(self, containers):
"""
Removes containers that's are completely within another container.
Arguments:
- containers: the list of all available containers.
"""
not_included = [cont for index, cont in enumerate(containers)
if all([not cc.includes(cont) for num, cc in enumerate(containers)
if num != index])]
return not_included
def split_intersected_containers(self, containers, box):
"""
Splits the containers that are intersected by the packed box.
Arguments:
- containers:: the list of all available containers.
- box: packed box.
Returns:
- a new resulting list of containers.
"""
result = []
for container in containers:
if box.intersects(container):
result += container.find_free_containers(box)
else:
result.append(container)
return result
def __calculate_max_x(self, packed_boxes):
if packed_boxes:
b = max(packed_boxes, key=lambda box: box.polus[0] + box.size[0])
return b.polus[0] + b.size[0]
else:
return 0
def __calculate_loading(self, packed_boxes, initial_containers):
if packed_boxes:
b = sum(map(lambda p: p.get_area(), packed_boxes)) / float(
sum(map(lambda p: p.get_area(), initial_containers)))
return b
else:
return 0
def __calculate_weights(self, packed_boxes):
if packed_boxes:
return sum(b.weight for b in packed_boxes)
else:
return 0 | {
"repo_name": "stonelake/pyoptimization",
"path": "pyopt/packing/rectangular/rpacker.py",
"copies": "1",
"size": "7958",
"license": "apache-2.0",
"hash": -1953455592599639600,
"line_mean": 37.8243902439,
"line_max": 120,
"alpha_frac": 0.547373712,
"autogenerated": false,
"ratio": 5.023989898989899,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001520878568505433,
"num_lines": 205
} |
__author__ = 'Alex Baranov'
import numpy as np
import itertools as iter
__PRINT_DEBUG = False
class InequalitiesSolver(object):
last_system = None
last_found_fundamental_system = None
min_random = 1
max_random = 1000
def find_foundamental_system_of_solution(self, system):
"""
Searches the fundamental systems of non-nagative solutions for the ineqalities system
Parameters
- system: matrix of ineqailities coefs (ax + by + cz + d .... <= 0)
"""
self.last_system = np.array(system)
self.last_found_fundamental_system = find_sfs_of_even_inequalities_system(system)
return self.last_found_fundamental_system
def get_solution(self, system, p=None):
"""
Gets the solution that agrees with the system.
"""
npsystem = np.array(system)
if not np.array_equal(npsystem, self.last_system):
# need to recalcualate the fundamental system
self.find_foundamental_system_of_solution(system)
# numbers of rows in the fundamental system
n = self.last_found_fundamental_system.shape[0]
if (self.last_found_fundamental_system == 0).all():
return [0] * (npsystem.shape[1] - 1)
# use provided or generate random coefficients
if p is None:
# generate random coefs
p = np.random.random_integers(self.min_random, self.max_random, (n, 1))
while(p <= 0).all():
p = np.random.random_integers(self.min_random, self.max_random, (n, 1))
# getting random solution
mult = p * self.last_found_fundamental_system
b = sum(mult[:, :-1]) / sum(mult[:, -1])
return b
def add_additional_constraints(constraints_system, constraint_coefs, add_less_then_zero=True, add_simplex=True):
"""
Adds additional constraints to the constraints system.
First adds the constraints of type: -x_i <= 0
If add_simplex parameter is True than add also constraints to bounds all the elements of the
combinatorial set with the simplex.
Keyword arguments:
constraints_system -- the matrix that represents the constraint system
constraint_coefs -- the array of coefficients that will be used to add new constraints
add_less_then_zero -- specifies whether the constraints of type: -x_i <= 0 should be added (default - True)
add_simplex -- specifies whether the simplex constraints should be added (default - True)
"""
var_count = constraints_system.shape[1]
if add_less_then_zero:
# add conditional constraints that all variables are less or equal than zero
left_part = -1 * np.eye(var_count - 1)
right_part = np.zeros([var_count - 1, 1])
positive_variables_consts = np.hstack((left_part, right_part))
constraints_system = np.vstack((constraints_system, positive_variables_consts))
if add_simplex:
left_part = np.eye(var_count - 1)
min = -1 * constraint_coefs.min()
sum = constraint_coefs.sum()
right_part1 = min * np.ones([var_count - 1, 1])
right_part2 = sum * np.ones([var_count - 1, 1])
# first add constraints of type: x_i >= min
type1 = np.hstack((-1 * left_part, right_part1))
# first add constraints of type: x_i <= sum
type2 = np.hstack((left_part, right_part2))
constraints_system = np.vstack((constraints_system, type1))
constraints_system = np.vstack((constraints_system, type2))
return constraints_system
def find_sfs_of_equation_system(system):
"""
Calculates the system of the fundamental solutions using the Chernikov method for the linear system of equations
Keyword arguments:
system -- constraints matrix
"""
constraints_system = np.array(system)
# First build initial T1 and T2 matrices
# T1 is a matrix with ones on the main diagonal
T1 = [np.eye(constraints_system.shape[1])]
# T2 is a transposed matrix of the initial matrix formed by the constraints system
T2 = [constraints_system.transpose().copy()]
current_index = 0
next_index = 1
while np.any(T2[current_index] != 0):
# No choose the main column
main_column = __get_main_column_index_simple(T2[current_index])
if __PRINT_DEBUG:
print "----> Main colum is ", main_column
# Copy to a new T1 and T2 rows from T1 and T2 that are intersected
# with the main column by zero elements
T1.append(np.zeros((0, T1[current_index].shape[1]), dtype=T1[current_index].dtype))
T2.append(np.zeros((0, T2[current_index].shape[1]), dtype=T2[current_index].dtype))
rows_to_modify = []
for index, row in enumerate(T2[current_index]):
if (T2[current_index][index, main_column] == 0):
#copy this row into a new T
T1[next_index] = np.vstack((T1[next_index], T1[current_index][index]))
T2[next_index] = np.vstack((T2[next_index], row))
if __PRINT_DEBUG:
print "Copying row to the new table: ", index
else:
rows_to_modify.append(index)
# Not find all pairs from rows_to_modify where the sign on main column are different
pairs = list(iter.combinations(rows_to_modify, 2))
valid_pairs = []
for i, j in pairs:
main_i = T2[current_index][i, main_column]
main_j = T2[current_index][j, main_column]
if ((main_i != 0) & (main_j != 0) & (cmp(main_i, 0) != cmp(main_j, 0))):
# also need to check that there are zero columns in the T1 for given pair
columns = np.where((T1[current_index][i] == 0) & (T1[current_index][j] == 0))[0]
# check whether there is another row (except i,j) in T1 which intersect all the columns with zereos
tmp = np.delete(T1[current_index][:, columns], [i, j], 0)
if ((np.where(np.all(tmp == 0, axis=1))[0].size == 0) or (len(pairs) == 0)):
valid_pairs.append([i, j])
for i, j in valid_pairs:
if __PRINT_DEBUG:
print "Checking row pair: ", (i, j)
#build linear combinations for valid pairs to have zeros on main column
coef_j = abs(T2[current_index][j][main_column])
coef_i = abs(T2[current_index][i][main_column])
coefs = np.array([coef_i, coef_j])
# trying to reduce coefs
min = coefs.min()
if (np.all(coefs % min == 0)):
coefs = coefs / min
new_row_T1 = T1[current_index][i] * coefs[1] + T1[current_index][j] * coefs[0]
new_row_T2 = T2[current_index][i] * coefs[1] + T2[current_index][j] * coefs[0]
T1[next_index] = np.vstack((T1[next_index], new_row_T1))
T2[next_index] = np.vstack((T2[next_index], new_row_T2))
current_index = current_index + 1
next_index = next_index + 1
if (T1[-1].size == 0):
# return zero solution
result = T1[-2]
result.fill(0)
else:
result = T1[-1]
reduce_table(result)
if __PRINT_DEBUG:
__print_T(T1, T2)
return result
def find_sfs_of_even_inequalities_system(system):
"""
Calculates the system of the fundamental solutions using the Chernikov method for the even linear system of inequalities
Keyword arguments:
system -- constraints matrix
"""
# casting to array.
constraints_system = np.array(system)
# First build initial T1 and T2 matrices
# T1 is a matrix with ones on the main diagonal
T1 = [np.eye(constraints_system.shape[1])]
# T2 is a transposed matrix of the initial matrix formed by the constraints system
T2 = [constraints_system.transpose().copy()]
current_index = 0
next_index = 1
# stores all the previous main columns
saved_main_column = []
saved_main_column_indexes = []
while np.any(T2[current_index] > 0):
# replace negative all negative columns with zero
for index, column in enumerate(T2[current_index].T):
if (column < 0).all() and not(index in saved_main_column_indexes):
T2[current_index][:, index] = 0
main_column = __get_positive_main_column_index(T2[current_index])
if __PRINT_DEBUG:
print "Main column is: ", main_column
# Copy to a new T1 and T2 rows from T1 and T2 that are intersected
# with the main column by negative (<=) elements
T1.append(np.zeros((0, T1[current_index].shape[1]), dtype=T1[current_index].dtype))
T2.append(np.zeros((0, T2[current_index].shape[1]), dtype=T2[current_index].dtype))
for index, row in enumerate(T2[current_index]):
if (T2[current_index][index, main_column] <= 0):
#copy this row into a new T
T1[next_index] = np.vstack((T1[next_index], T1[current_index][index]))
T2[next_index] = np.vstack((T2[next_index], row))
# find all 'valid pairs'
pairs = list(iter.combinations(range(T2[current_index].shape[0]), 2))
if __PRINT_DEBUG:
print "All row pairs to check: ", pairs
# forming the temporary T1 table which includes the saved main rows
temp_T1 = __build_adjusted_T1(T1[current_index], T2[current_index], saved_main_column_indexes)
for i, j in pairs:
main_i = T2[current_index][i, main_column]
main_j = T2[current_index][j, main_column]
valid_pairs = []
if ((main_i != 0) & (main_j != 0) & (cmp(main_i, 0) != cmp(main_j, 0))):
if (temp_T1.shape[0] <= 2):
valid_pairs.append([i, j])
else:
# also need to check that there are zero columns in the T1 for given pair
columns = np.where((temp_T1[i] == 0) & (temp_T1[j] == 0))[0]
# check whether there is another row (except i,j) in T1 which intersect all the columns with zereos
tmp = np.delete(temp_T1[:, columns], [i, j], 0)
if np.where(np.all(tmp == 0, axis=1))[0].size == 0 and tmp.shape[1] > 1:
valid_pairs.append([i, j])
for i, j in valid_pairs:
if __PRINT_DEBUG:
print "Performing calcualtions for pair: ", (i, j)
#build linear combinations for valid pairs to have zeros on main column
coef_j = abs(T2[current_index][j][main_column])
coef_i = abs(T2[current_index][i][main_column])
coefs = np.array([coef_i, coef_j])
# trying to reduce coefs
d = gcd(coef_i, coef_j)
coefs = coefs / d
new_row_T1 = T1[current_index][i] * coefs[1] + T1[current_index][j] * coefs[0]
new_row_T2 = T2[current_index][i] * coefs[1] + T2[current_index][j] * coefs[0]
T1[next_index] = np.vstack((T1[next_index], new_row_T1))
T2[next_index] = np.vstack((T2[next_index], new_row_T2))
# replace by -1 all the non-zero element of the main column. also replace all the saved main
for column_index in saved_main_column_indexes + [main_column]:
T2[next_index][np.where(T2[next_index][:, column_index] != 0), column_index] = -1
if np.all((T2[next_index] > 0), axis=0).any():
# some column in the T2 is strickly positive"
# in this case we have zero solution
T1[next_index] = np.zeros_like(T1[next_index])
break
# saving the main column
c = T2[next_index][:, main_column].copy()
c = c.reshape(c.shape[0], 1)
saved_main_column.append(c)
saved_main_column_indexes.append(main_column)
# going to the next tables
current_index = current_index + 1
next_index = next_index + 1
# searching GCD
reduce_table(T1[-1])
if __PRINT_DEBUG:
__print_T(T1, T2)
# return the last left table
return T1[-1]
def reduce_table(T1):
for index, value in enumerate(T1):
d = reduce(gcd, value)
if (d != 0):
T1[index] /= d
def __get_main_column_index_simple(T2):
for index, value in enumerate(T2.T):
if (value != 0).any():
return index
return -1
def __get_positive_main_column_index(T2):
for index, value in enumerate(T2.T):
if (value > 0).any():
return index
return -1
def __print_T(T1, T2):
"""
Prints the T1 and T2 tables.
"""
np.set_printoptions(precision=3, suppress=True, threshold=np.nan)
for t_index in xrange(len(T1)):
print "Table T[{0}]:".format(t_index)
result = np.zeros((T1[t_index].shape[0], 0), dtype=T1[t_index].dtype)
result = np.hstack((result, T1[t_index]))
delimiter = np.zeros((T1[t_index].shape[0], 1))
delimiter.fill(np.nan)
result = np.hstack((result, delimiter))
result = np.hstack((result, T2[t_index]))
print result
print "-" * (T1[t_index].shape[1] + T2[t_index].shape[1])
def __build_adjusted_T1(T1, T2, saved_columns_indexes):
temp_T1 = T1.copy()
if len(saved_columns_indexes) <= 0:
return temp_T1
# getting the actual saved columns from the T2
saved_columns = T2[:, saved_columns_indexes]
temp_T1 = np.hstack((temp_T1, saved_columns))
return temp_T1
def gcd(a, b):
return a if b == 0 else gcd(b, a % b)
def lcm(a, b):
return a * b / gcd(a, b)
if __name__ == '__main__':
a = np.float_([[-1, 0, 0, 1],
[0, -1, 0, 1],
[0, 0, -1, 1],
[1, 1, 1, -6]])
#a = [[1,1,1,-1],
# [-1,1,-1,1],
# [-2, 1, -1,-2],
# [1,2,-2,1],
# [-2,-2,1,-2]]
s = InequalitiesSolver()
b = s.get_solution(a)
b0 = s.get_solution(a)
b1 = s.get_solution(a)
b2 = s.get_solution(a)
print b
print b1
print b2
| {
"repo_name": "stonelake/pyoptimization",
"path": "pyopt/discrete/inequalities/chernikov.py",
"copies": "1",
"size": "14199",
"license": "apache-2.0",
"hash": -7417542188763899000,
"line_mean": 36.2677165354,
"line_max": 124,
"alpha_frac": 0.5744066484,
"autogenerated": false,
"ratio": 3.458952496954933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9525924143058342,
"avg_score": 0.0014870004593182885,
"num_lines": 381
} |
__author__ = 'Alex Baranov'
import unittest
from time import *
import numpy as np
from ..discrete.inequalities import chernikov as c
class TestFind_system_of_fundamental_solutions(unittest.TestCase):
def _test_pulp(self):
import pulp as p
prob = p.LpProblem("The Whiskas Problem", p.LpMinimize)
elapsed = -clock()
# creating variables
x1 = p.LpVariable("ChickenPercent", 0, None, p.LpInteger)
x2 = p.LpVariable("BeefPercent", 0, None, p.LpInteger)
# goal function
a = 0.013 * x1 + 0.008 * x2
prob += 0.013 * x1 + 0.008 * x2
# constraints
prob += x1 + x2 == 100, "PercentagesSum"
prob += 0.100 * x1 + 0.200 * x2 >= 8.0, "ProteinRequirement"
prob += 0.080 * x1 + 0.100 * x2 >= 6.0, "FatRequirement"
prob += 0.001 * x1 + 0.005 * x2 <= 2.0, "FibreRequirement"
prob += 0.002 * x1 + 0.005 * x2 <= 0.4, "SaltRequirement"
# solution
prob.writeLP("WhiskasModel.lp")
prob.solve(p.GLPK(msg=0))
print "Status:", p.LpStatus[prob.status]
for v in prob.variables():
print v.name, "=", v.varValue
print "Total Cost of Ingredients per can = ", p.value(prob.objective)
elapsed = +clock()
print "Solution time = ", prob.solutionTime
print "Solution time2 = ", elapsed
def test_find_system_of_fundamental_solutions(self):
"""
Verify simple scenario for Chernikov method
"""
sys = [[-5,-5,6,-8,-10,0],[0,-5,3,1,0,-10]]
result1 = c.find_sfs_of_equation_system(sys)
expected1 = np.array([[ 12., 0., 10., 0., 0., 3.],
[ 0., 0., 8., 6., 0., 3.],
[ 0., 0., 10., 0., 6., 3.],
[ 3., 3., 5., 0., 0., 0.],
[ 0., 2., 3., 1., 0., 0.],
[ 0., 6., 10., 0., 3., 0.]])
self.assertTrue(np.array_equal(expected1,np.array(result1)))
sys2 = [[1,-1,3,-8,5],[-1,2,-1,1,-1],[2,-1,-2,1,0],[-3,1,-1,6,-3],[1,1,-3,2,-1]]
result2 = c.find_sfs_of_even_inequalities_system(sys2)
expected2 = np.array([[ 13., 0., 17., 8., 0.],
[ 15., 11., 12., 5., 0.],
[ 5., 0., 9., 4., 0.],
[ 5., 5., 8., 3., 0.],
[ 2., 0., 3., 2., 1.],
[ 11., 0., 15., 8., 0.],
[ 13., 9., 12., 7., 0.],
[ 1., 1., 1., 1., 1.]])
self.assertTrue(np.array_equal(expected2, np.array(result2)))
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "stonelake/pyoptimization",
"path": "pyopt/tests/test_find_system_of_fundamental_solutions.py",
"copies": "1",
"size": "2808",
"license": "apache-2.0",
"hash": 4999355709457662000,
"line_mean": 38,
"line_max": 88,
"alpha_frac": 0.4405270655,
"autogenerated": false,
"ratio": 3.072210065645514,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4012737131145514,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex Baranov'
from copy import deepcopy
from json import JSONEncoder
class Box(object):
"""Represents the box element"""
@staticmethod
def from_json_dict(d):
"""
Parses the box from the JSON dict
"""
return Box(d['size'], bottom_left=d['polus'], name=d['name'], kind=d['kind'], weight=d['weight'])
def __init__(self, size=(), bottom_left=(), name="", kind="solid", weight=None):
"""
Constructor for box.
+-------------+
/ /|
+-------------+ +
| |/
+-------------+
Arguments:
- size: (tuple or list) the element linear sizes.
- bottom_left: (list) the coordinates to the button left corner of the element.
- name: (string) the name of the box.
- kind: (string) the type of the box.
"""
super(Box, self).__init__()
self.size = size
self.name = name
self.kind = kind
self.weight = weight or 0
if bottom_left == ():
bottom_left = tuple(0 for _ in xrange(len(size)))
if len(bottom_left) != len(size):
raise ValueError("The length of the 'size' argument should be equal to the size of 'bottom_left' argument")
self.polus = bottom_left
@property
def diagonal_polus(self):
"""
The top left cornet of the rectangle
"""
return list(self.polus[i] + self.size[i] for i in xrange(len(self.size)))
@property
def center(self):
"""
Get the center position of the box.
"""
return list(self.polus[i] + self.size[i] / 2.0 for i in xrange(len(self.size)))
def get_area(self):
"""
Calculates the area of the rectangle
"""
return reduce(lambda x, y: x * y, self.size)
def find_phi_function_value(self, p):
"""
Gets the Phi-function value that characterise the placement of two box's
If phi value is greater than 0: objects are NOT intersecting.
If phi = 0: objects are touching.
If fi less than 0: object are intersecting.
Arguments:
- p : another box.
"""
n = len(self.size)
if len(p.size) != n:
raise ValueError("Unable to compare box's with different dimensions")
values = []
for i in xrange(n):
values.append(p.polus[i] - self.polus[i] - self.size[i])
values.append(-p.polus[i] + self.polus[i] - p.size[i])
return max(values)
def touches(self, other):
"""
Checks whether current box touches the another one.
"""
return self.find_phi_function_value(other) == 0
def intersects(self, other):
"""
Checks whether two box's are intersecting.
"""
phi = self.find_phi_function_value(other)
return phi < 0
def includes_point(self, p):
"""
Checks whether the provided point is within the box.
Arguments:
p - the list of point coordinates.
"""
return all((p[i] - x >= 0 for i, x in enumerate(self.polus))) and all(
(x - p[i] >= 0 for i, x in enumerate(self.diagonal_polus)))
def includes(self, other):
"""
Checks whether the current box includes the another one.
"""
return self.includes_point(other.polus) and self.includes_point(other.diagonal_polus)
def can_accept(self, other):
"""
Checks whether the box can accept the another one.
Arguments:
- other: the another box
Returns:
- True or False.
"""
return all((value - other.size[index]) >= 0 for index, value in enumerate(self.size))
def find_free_containers(self, other):
"""
Gets the list of containers that can be received by placing
another box into current.
+-------------------+
| a |
| |
+-------+ |
| other | b |
+-------+-----------+
Arguments:
- other : the placed box. The polus should be defined.
Returns:
- the list of container received.
"""
assert isinstance(other, Box)
result = []
# TODO Rework this to be more pythonic
n = len(self.size)
for i in range(n):
if other.diagonal_polus[i] < self.diagonal_polus[i]:
# create free container
polus = []
size = []
for j in range(n):
if i != j:
polus.append(self.polus[j])
else:
polus.append(other.diagonal_polus[j])
size.append(self.diagonal_polus[j] - polus[j])
result.append(Box(tuple(size), tuple(polus)))
if other.polus[i] > self.polus[i]:
polus = self.polus[:]
size = self.size[:i] + (other.polus[i], ) + self.size[i + 1:]
result.append(Box(size, polus))
return result
def is_blocked(self, other, axes=()):
"""
Checks whether the current box is blocked the another one.
By default checks whether the box is blocked by another one by +X axis.
Arguments:
other: the other box.
axes: the tuple of axes to check. For example (1,0,0) checks that block by X axis for 3d case.
Returns:
- boolean value.
"""
if len(other.size) != len(self.size):
raise ValueError("Boxes sizes should have the same lengths")
if not axes:
axes = [0] * len(self.size)
axes[0] = 1
checks = []
c = deepcopy(self)
for i, a in enumerate(axes):
# create a copy of the current box.
c.size = list(c.size)
c.size[i] += (other.polus[i] + other.size[i]) * a
checks.append(other.intersects(c))
return any(checks)
def is_basis_for(self, other, axes=()):
"""
Check if current box is basis for another box.
Be default check is performed by Y axis.
Example: self is basis for other
+-------+
| other |
+--+-------+--------+
| |
| self |
| |
| |
+-------------------+
"""
if len(other.size) != len(self.size):
raise ValueError("Boxes sizes should have the same lengths")
if not axes:
axes = [0] * len(self.size)
axes[1] = 1
if not self.touches(other):
return False
c = deepcopy(self)
for i, a in enumerate(axes):
c.size = list(c.size)
c.size[i] += (other.polus[i] + other.size[i]) * a
return c.includes(other)
def __str__(self):
"""
String representation of the object.
"""
return "Box: Name '{}'; Size: '{}'; Polus: '{};".format(self.name, self.size, self.polus)
def __eq__(self, other):
"""
Checks whether the current container is equal to other.
Position is ignored.
"""
return self.size == other.size and self.name == other.name and \
self.kind == other.kind
def __lt__(self, other):
"""
Compares the areas of boxes
"""
self.get_area() < other.get_area()
def __le__(self, other):
"""
Compares the areas of boxes
"""
self.get_area() <= other.get_area()
def __gt__(self, other):
"""
Compares the areas of boxes
"""
self.get_area() > other.get_area()
def __ge__(self, other):
"""
Compares the areas of boxes
"""
self.get_area() >= other.get_area()
def __ne__(self, other):
"""
Checks that currents box is not equal to another one.
"""
return not self == other
def clone(self):
"""
Returns the deep copy of the box.
"""
return deepcopy(self)
class BoxJsonEncoder(JSONEncoder):
"""
Serializes box into json format.
"""
def default(self, obj):
if not isinstance(obj, Box):
return super(BoxJsonEncoder, self).default(obj)
return dict(obj.__dict__, **{'__type__': 'Box'})
| {
"repo_name": "stonelake/pyoptimization",
"path": "pyopt/packing/rectangular/box.py",
"copies": "1",
"size": "8885",
"license": "apache-2.0",
"hash": -6329059491010484000,
"line_mean": 27.3234323432,
"line_max": 119,
"alpha_frac": 0.4794597636,
"autogenerated": false,
"ratio": 4.261390887290168,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5240850650890168,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex Baranov'
from box import Box
from itertools import permutations
from operator import itemgetter
from copy import deepcopy
def orthogonal_packer(container, rect, axes_priorities=None, allowed_rotation_axes=None, **kwargs):
"""
Packs rect to the container. Rotation of packed are allowed on 90.
Arguments:
container : the box that should accept another box (acceptor).
rect: the box to pack into container.
Return:
Pair (bool, rect):
- bool: indicates whether the rect was packed to the container.
- rect: the resulting packed box.
"""
# by default rotate to fit first by X then by Y then by Z
axes_priorities = axes_priorities or tuple(a for a in range(len(container.size)))
# by default allow rotation by any axe
allowed_rotation_axes = allowed_rotation_axes or tuple(1 for _ in range(len(container.size)))
assert isinstance(container, Box)
assert isinstance(rect, Box)
allowed_sizes = __get_all_allowed_box_permutations(rect, axes_priorities, allowed_rotation_axes)
copied_box = deepcopy(rect)
for s in allowed_sizes:
copied_box.size = s
if container.can_accept(copied_box):
copied_box.polus = container.polus[:]
return True, copied_box
return False, None
def __get_all_allowed_box_permutations(rect, axes, allowed_rotation_axes):
"""
Gets the list of allowed box size permutations.
"""
# get valid(allowed) permutations
valid_permutations = []
for p in permutations(rect.size):
restricted_indexes = (i for i, r in enumerate(allowed_rotation_axes) if r == 0)
if all(rect.size[ind] == p[ind] for ind in restricted_indexes):
valid_permutations.append(p)
# sort list by defined axes order
return sorted(valid_permutations, key=itemgetter(*axes))
if __name__ == "__main__":
pass
| {
"repo_name": "stonelake/pyoptimization",
"path": "pyopt/packing/rectangular/orthogonal_packing.py",
"copies": "1",
"size": "1968",
"license": "apache-2.0",
"hash": 7729018379292422000,
"line_mean": 31.9310344828,
"line_max": 100,
"alpha_frac": 0.6524390244,
"autogenerated": false,
"ratio": 4.057731958762886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5210170983162886,
"avg_score": null,
"num_lines": null
} |
__author__ = "Alex Baranov"
from inequalities import chernikov as c
from permutations import *
import numpy as np
def find_minimum(goal_func,
constraints_system,
combinatorial_set,
add_constraints=True,
series_count=3,
experiments_per_series=5,
quiet=True):
"""
Gets the minimum of the linear function with linear constraints
on the combinatorial set
Returns:
- (point and function value)
"""
# define function to calculate goal function value
f = lambda x: sum(i * j for i, j in zip(goal_func, x))
# copying the constraints system to modify it then
copied_system = list(constraints_system)
if add_constraints:
if not quiet:
print "Addding additional constraints to the constraints system"
copied_system = add_additional_constraints(copied_system, combinatorial_set.generation_elements)
if not quiet:
print "Constraints system is: \n", np.array(copied_system)
solver = c.InequalitiesSolver()
best_func_value = None
best_point = None
last_system_index = len(copied_system)
const_was_inserted = False
# starting series of experiments
for series_number in xrange(series_count):
if not quiet:
print "---> Starting series #", series_number
# store the valid points in the dict
experiment_valid_points = dict()
for experiment_number in xrange(experiments_per_series):
if not quiet:
print "Starting experiment #", experiment_number
# getting some solution of the system
s = solver.get_solution(copied_system)
if not quiet:
print "Generated new point within the search area: ", s
# get the nearest point of the set
nearest_set_point = combinatorial_set.find_nearest_set_point(s)
if not quiet:
print "The nearest combinatorial set point is: ", nearest_set_point
# check whether the set point is valid
if is_solution(copied_system, nearest_set_point):
func_value = f(nearest_set_point)
experiment_valid_points[func_value] = nearest_set_point
if not quiet:
print "Found point is valid. Goal function value in this point is: ", func_value
else:
if not quiet:
print "The nearest set point is not valid"
# save this point
if len(experiment_valid_points):
current_min = min(experiment_valid_points)
if best_func_value is None or current_min < best_func_value:
best_func_value = min(experiment_valid_points)
best_point = experiment_valid_points[best_func_value]
if not quiet:
print "Current best point {0} with function value = {1}".format(best_point, best_func_value)
# add the aditional constraint to shrink the search area.
if not quiet:
print "Added additional constraints: {0} <= {1}".format(goal_func, best_func_value)
if not const_was_inserted:
copied_system.append(goal_func + (-1 * best_func_value,))
else:
copied_system.insert(last_system_index, goal_func + (-1 * best_func_value,))
return best_point, best_func_value
def add_additional_constraints(system, coefs, add_less_then_zero=False, add_simplex=True):
"""
Adds additional constraints to the constraints system.
First adds the constraints of type: -x_i <= 0
If add_simplex parameter is True than add also constraints to bounds all the elements of the
combinatorial set with the simplex.
Arguments:
system -- the matrix that represents the constraint system
coefs -- the array of coefficients that will be used to add new constraints
add_less_then_zero -- specifies whether the constraints of type: -x_i <= 0 should be added (default - True)
add_simplex -- specifies whether the simplex constraints should be added (default - True)
"""
constraints_system = np.array(system)
constraint_coefs = np.array(coefs)
var_count = constraints_system.shape[1]
if add_less_then_zero:
# add conditional constraints that all variables are less or equal than zero
left_part = -1 * np.eye(var_count - 1)
right_part = np.zeros([var_count - 1, 1])
positive_variables_consts = np.hstack((left_part, right_part))
constraints_system = np.vstack((constraints_system, positive_variables_consts))
if add_simplex:
left_part = np.eye(var_count - 1)
min = constraint_coefs.min()
sum = constraint_coefs.sum()
right_part1 = min * np.ones([var_count - 1, 1])
# first add constraints of type: x_i >= min
type1 = np.hstack((-1 * left_part, right_part1))
# first add constraints of type: sum(x_i) <= sum
type2 = np.hstack((np.ones(var_count - 1), -1*sum))
constraints_system = np.vstack((constraints_system, type1))
constraints_system = np.vstack((constraints_system, type2))
return constraints_system.tolist()
def find_minimum_with_exhaustive_search(goal_func,
system,
combinatorial_set):
"""
Gets the solution by iterating all the elements in the set
Retruns pair of combinatorial element and minimal function value
"""
#calcualte goal functions for all the elements
valid_values = map(lambda e: (e, sum(i * j for i, j in zip(goal_func, e))) if is_solution(system, e) else None, combinatorial_set)
# remove all the None
valid_values = filter(lambda x: x != None, valid_values)
# get minimal value
return min(valid_values, key=lambda x: x[1])
def is_solution(system, point):
"""
Checks whether the point is the solution for a given constraints system.
"""
a = np.array(system)
# get the left part
left = a[:, :-1] * point
left = sum(left.T)
# get the right part
right = (-1) * a[:, -1]
return np.all(left <= right)
if __name__ == '__main__':
s = [[1, -2, 3, 0], [-4, 1, 1, 2]]
func = (-1, 1, 2)
pset = PermutationSet((1, 2, 3))
point, func_value = find_minimum_with_exhaustive_search(func, s, pset)
print "Point and min fuc value found using exhaustive search: ", (point, func_value)
point2, func_value2 = find_minimum(func, s, pset, quiet=False)
print "Point and min fuc value found using random search: ", (point2, func_value2)
| {
"repo_name": "stonelake/pyoptimization",
"path": "pyopt/discrete/randomsearch.py",
"copies": "1",
"size": "6894",
"license": "apache-2.0",
"hash": -6910973765954224000,
"line_mean": 36.3,
"line_max": 134,
"alpha_frac": 0.5999419785,
"autogenerated": false,
"ratio": 4.115820895522388,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5215762874022388,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex Baranov'
from oriented_packing import oriented_packer
from operator import itemgetter
from itertools import ifilter
def get_non_blocking_boxes(current_box, all_boxes, packed_boxes):
"""
Get boxes that are not allowed to block with the current box.
"""
result = []
current_box_index = all_boxes.index(current_box) if current_box in all_boxes else None
f = filter(lambda x: x.name == current_box.name and x.kind == "unpack", all_boxes)
current_unpack = f[0] if f else None
# unpack missing. do now allow to block any of currently packed boxes
if current_unpack is None:
return packed_boxes[:]
if not current_box_index or not current_unpack:
return result
current_unpack_index = all_boxes.index(current_unpack)
# get the packed boxes in all boxes collection
for packed_box in packed_boxes:
current_packed = next(ifilter(lambda x: x.name == packed_box.name and x.kind == packed_box.kind, all_boxes), None)
packed_index = all_boxes.index(current_packed)
#packed_index = all_boxes.index(packed_box) if packed_box in all_boxes else -1
if packed_index != -1 and packed_index < current_box_index:
# check if current box is within the delivery interval of the packed box
f = filter(lambda x: x.name == packed_box.name and x.kind == "unpack", all_boxes)
unpack_index = -1
if f:
unpack_index = all_boxes.index(f[0])
# first check: current box should not block another box that will be unpacked after current
if unpack_index != -1 and packed_index < current_box_index < unpack_index < current_unpack_index:
# packed box should not be bocked by current box
result.append(packed_box)
return result
def get_block_boxes(current_box, all_boxes, packed_boxes):
"""
Get boxes that are not allowed to block with the current box.
"""
result = []
current_box_index = all_boxes.index(current_box) if current_box in all_boxes else None
current_unpack = next(ifilter(lambda x: x.name == current_box.name and x.kind == "unpack", all_boxes), None)
if not current_box_index or not current_unpack:
return result
current_unpack_index = all_boxes.index(current_unpack)
for packed_box in packed_boxes:
# find index of the packed box
current_packed = next(ifilter(lambda x: x.name == packed_box.name and x.kind == packed_box.kind, all_boxes), None)
packed_index = all_boxes.index(current_packed)
if packed_index != -1 and packed_index < current_box_index:
# check if already packed box blocks the current box
f = filter(lambda x: x.name == packed_box.name and x.kind == "unpack", all_boxes)
unpack_index = -1
if f:
unpack_index = all_boxes.index(f[0])
if unpack_index != -1 and packed_index < current_box_index < current_unpack_index < unpack_index:
result.append(packed_box)
return result
def non_blocking_container_selector(available_containers,
box,
packer=None,
packed_boxes=None,
all_boxes=None, **kwargs):
"""
Selects the best container for so the target do not block already packed containers.
"""
packer = packer or oriented_packer
axes = kwargs.get("axes", __get_default_axes(box))
valid_containers = []
# get the list of boxes that should not be blocked by current box.
non_blocking_boxes = get_non_blocking_boxes(box, all_boxes, packed_boxes)
# get the list of boxes that should not block the current box
blocking_boxes = get_block_boxes(box, all_boxes, packed_boxes)
for c in available_containers:
b, rect = packer(c, box, **kwargs)
# also need to perform second block check:
# current box should not be blocked by the packed boxes
# if current unpack will took place prior unpacking the packed box.
# Example: d c -a- b a b c d. Current box is a. 'a' should not be blocked by 'c' and 'd'
check2 = all(not rect.is_blocked(bb, axes=axes) for bb in blocking_boxes)
if b and __does_not_block_all(rect, non_blocking_boxes, axes) and check2:
valid_containers.append((c, rect))
# return none if we can't find valid containers
if not valid_containers:
return None, None
# sort containers by polus X coordinate
return sorted(valid_containers, key=lambda cont: cont[0].polus[0])[0]
def stable_non_blocking_container_selector(available_containers,
box,
packer=None,
packed_boxes=None,
all_boxes=None, **kwargs):
"""
Selects the best container for the box.
"""
packer = packer or oriented_packer
# get the non block directions.
# by default unpack boxes in +X direction and do not block box that is below another
# so default value for axes is (1,1,0)
axes = kwargs.get("axes", __get_default_axes(box))
# the place axes defines the sorting order for the the available containers.
# for example (1,2,0) defines that containers will be sorted b Y then b Z and then by X
if len(box.polus) > 2:
place_axes = kwargs.get("place_axes", (0, 1, 2))
else:
place_axes = kwargs.get("place_axes", (0, 1))
non_blocking_boxes = get_non_blocking_boxes(box, all_boxes, packed_boxes)
# get the list of boxes that should not block the current box
blocking_boxes = get_block_boxes(box, all_boxes, packed_boxes)
valid_containers = []
for c in available_containers:
pack_result, rect = packer(c, box, **kwargs)
if pack_result:
if packed_boxes:
check2 = all(not rect.is_blocked(bb, axes=axes) for bb in blocking_boxes)
floor = min(available_containers, key=lambda x: x.polus[1]).polus[1]
if any([rect.polus[1] == floor or pb.is_basis_for(rect) for pb in packed_boxes]) and \
__does_not_block_all(rect, non_blocking_boxes, axes) and check2:
valid_containers.append((c, rect))
else:
valid_containers.append((c, rect))
# return none if we can't find valid containers
if not valid_containers:
return None, None
return sorted(valid_containers, key=lambda cont: itemgetter(*place_axes)(cont[0].polus))[0]
def __does_not_block_all(packed_box, non_blocking_boxes, axes):
return all(not pp.is_blocked(packed_box, axes=axes) for pp in non_blocking_boxes)
def __get_default_axes(box):
default_axes = [0] * len(box.size)
default_axes[:2] = [1, 1]
return default_axes
| {
"repo_name": "stonelake/pyoptimization",
"path": "pyopt/packing/rectangular/pdp_packing.py",
"copies": "1",
"size": "7157",
"license": "apache-2.0",
"hash": 745417469527810700,
"line_mean": 38.6647727273,
"line_max": 122,
"alpha_frac": 0.6033254157,
"autogenerated": false,
"ratio": 3.915207877461707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015679285654948188,
"num_lines": 176
} |
__author__ = "Alex Baranov"
from random import randrange
from visual import *
from reports import ReportsBuilder
class BoxDrawer(object):
"""
Draws the boxes
"""
def __init__(self, packing_params=None, display_labels=True, **kwargs):
"""
Start the box drawing.
"""
self.win = display(title='Packing results', background=(0.1, 0.1, 0.1), randrange=scene.autoscale)
self.win.select()
self.display_labels = display_labels
# create frames
self.arrows_frame = frame()
self.containers_frame = frame()
self.boxes_frame = frame()
self.labels_frame = frame()
# draw arrows
self.__draw_arrows()
# assign default variables
self.container_color = kwargs.get("container_color", color.green)
# packing params
self.pack_params = packing_params
self.actions = []
self.action_index = 0
if packing_params:
self.actions = packing_params.get("actions", [])
@classmethod
def show_packing_results(cls, result, params, containers):
"""
Displays the packing results.
"""
bd = BoxDrawer(packing_params=params)
bd.add_containers(containers)
bd.add_boxes(result)
bd.display()
def __get_random_color(self):
"""
Generates the random color.
"""
return [randrange(0, 255) / 255. for _ in range(3)]
def add_boxes(self, boxes, change_action_pointer=True):
"""
Draws all the boxes that should or were packed.
"""
for pbox in boxes:
bcolor = self.__get_random_color()
box(frame=self.boxes_frame, pos=pbox.center, size=pbox.size, color=bcolor)
label(frame=self.labels_frame, pos=pbox.center, box=0,
text='name={}\npolus={}\nsize={}'.format(pbox.name, pbox.polus, pbox.size))
# if some boxes were added set the actions index to max
if boxes and change_action_pointer:
self.action_index = len(self.actions)
def remove_box(self, box):
"""
Removes bo from the display.
"""
for element in filter(lambda x: x.pos == box.center and x.size == box.size, self.boxes_frame.objects):
element.visible = False
# remove also label
for label in filter(lambda x: x.pos == box.center, self.labels_frame.objects):
label.visible = False
def add_containers(self, containers, random_color=False, opacity=None, centered_labels=False):
"""
Add container to the screen.
"""
op = opacity or 0.1
for index, container in enumerate(containers):
if random_color:
c = self.__get_random_color()
else:
c = self.container_color
box(frame=self.containers_frame, pos=container.center, size=container.size, opacity=op, color=c)
if centered_labels:
pos = container.polus
else:
pos = container.diagonal_polus
label(frame=self.labels_frame, pos=pos, box=0,
text='Container #{}\npolus={}\nsize={}'.format(index, container.polus, container.size))
def __draw_arrows(self):
"""
Draws the x,y,z arrows.
"""
#x
arrow(frame=self.arrows_frame, pos=(0, 0, 0), axis=(10, 0, 0), shaftwidth=0.01)
label(frame=self.arrows_frame, pos=(10, 0, 0), box=0, text='X')
#y
arrow(frame=self.arrows_frame, pos=(0, 0, 0), axis=(0, 10, 0), shaftwidth=0.01)
label(frame=self.arrows_frame, pos=(0, 10, 0), box=0, text='Y')
#z
arrow(frame=self.arrows_frame, pos=(0, 0, 0), axis=(0, 0, 10), shaftwidth=0.01)
label(frame=self.arrows_frame, pos=(0, 0, 10), box=0, text='Z')
for obj in self.arrows_frame.objects:
obj.color = color.orange
def __draw_action(self, action_pair):
name = action_pair[0]
b = action_pair[1]
if name == "pack":
self.add_boxes((b, ), change_action_pointer=False)
elif name == "unpack":
self.remove_box(b)
def __remove_all_boxes(self):
for element in self.boxes_frame.objects:
element.visible = False
# remove labels
for label in filter(lambda x: x.pos == element.pos,
self.labels_frame.objects):
label.visible = False
def display(self):
print "-------------------------------------------------------"
while 1:
rate(100)
if self.win.kb.keys:
s = self.win.kb.getkey()
if len(s) == 1:
if s == 'l' or s == 'L':
if self.display_labels:
self.labels_frame.visible = False
self.display_labels = False
else:
self.labels_frame.visible = True
self.display_labels = True
# display actions
if s == 'n' or s == 'N':
if not self.actions:
continue
else:
if len(self.actions) == self.action_index:
self.action_index = 0
if self.boxes_frame.objects:
# remove non-unpacked boxes
self.__remove_all_boxes()
print "Packing completed \n\n"
else:
if self.actions[self.action_index][0] == "pack":
frmt = "Packing"
else:
frmt = "Unpacking"
print "{} box: '{}'".format(frmt, self.actions[self.action_index][1])
self.__draw_action(self.actions[self.action_index])
self.action_index += 1
# display reports
if s == 'r' or s == 'R':
if self.pack_params:
ReportsBuilder.show_dynamic_report(self.pack_params)
| {
"repo_name": "stonelake/pyoptimization",
"path": "pyopt/packing/rectangular/drawer.py",
"copies": "1",
"size": "6596",
"license": "apache-2.0",
"hash": 1516229222471202000,
"line_mean": 34.2417582418,
"line_max": 110,
"alpha_frac": 0.4810491207,
"autogenerated": false,
"ratio": 4.244530244530244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5225579365230244,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex Baranov'
from reports import ReportsBuilder
from pdp_packing import stable_non_blocking_container_selector
from orthogonal_packing import orthogonal_packer
from drawer import BoxDrawer
from rpacker import RPacker
from box import Box
from optparse import OptionParser
# define the command line arguments
parser = OptionParser()
parser.add_option("-o", "--output", dest="output_filename",
help="write solution report to FILE", metavar="FILE", default='output.txt')
parser.add_option("-b", "--boxes", dest="boxes_filename",
help="the file to read boxes data from", metavar="FILE")
parser.add_option("-n", "--boxes_count", dest="boxes_count",
help="the file to read boxes data from", type='int')
parser.add_option("-r", "--route", dest="route",
help="the route data")
parser.add_option("-c", "--container", dest="container",
help="the route data")
parser.add_option("-a", "--axes", dest="axes", default="1 0 2",
help="the axes priorities")
parser.add_option("-p", "--partial-route", action="store_true", dest="is_partial_route",
help="specifies whether the route is partial", default=False)
parser.add_option("-g", "--draw-result", action="store_true", dest="draw_results",
help="specifies whether the route is partial")
def pack_boxes(boxes, container,
container_select_func=stable_non_blocking_container_selector,
packer=orthogonal_packer, # allows rotations
allowed_rotation_axes=(1, 0, 1), # allows rotation only by X and Z axes
is_partial_route=False,
**kwargs):
"""
Packs the defined sequence (as string) of nodes to visit.
:param boxes: the list of boxes to pack
:param container: the container to pack boxes in.
:param draw_results: specifies whether the we should draw results
:param container_select_func: the container packing select function
:param kwargs: other keyword arguments
"""
pp = RPacker()
result, params = pp.pack(boxes, (container, ),
container_select_func=container_select_func,
packer=packer,
allowed_rotation_axes=allowed_rotation_axes,
**kwargs)
return is_packing_successful(result, params, boxes, container, is_partial_route), result, params
def read_korobki_file(box_number, korobki_file):
"""
Returns the dictionary with the boxes. The keys are the box indexes.
"""
res = {}
with open(korobki_file, 'r') as kfile:
for index, line in enumerate(kfile):
if 0 < index <= box_number:
splited = line.split(' ')[4:]
l = float(splited[0].replace(',', '.'))
w = float(splited[1].replace(',', '.'))
h = float(splited[2].replace(',', '.'))
weight = float(splited[3].lstrip('\n').replace(',', '.'))
res[index] = Box(size=(l, w, h), weight=weight, name=index)
return res
def parse_container_data(container_string):
h, w, l, m = str.split(container_string)
return Box(size=(float(h.replace(',', '.')), float(w.replace(',', '.')), float(l.replace(',', '.'))),
weight=float(m.replace(',', '.')))
def pack_route(boxes_dict, route, cont, is_partial_route, **kwargs):
"""
Packs the boxes to the containers according the defined route data.
Returns:
- tuple (pack result: bool, packed_boxes: list, packing result parameters: dict)
"""
box_sequence = {}
values = route.split()
box_number = len(boxes_dict)
box_sequence['price'] = values[-1]
box_sequence['route'] = []
for box_index in values[:-1]:
box_index = int(box_index)
if box_index <= box_number:
# add the packing box
box_sequence['route'].append(boxes_dict[box_index])
else:
# add unpack
box_sequence['route'].append(Box(name=boxes_dict[box_index - box_number].name, kind="unpack"))
return pack_boxes(box_sequence['route'], cont, is_partial_route=is_partial_route, **kwargs)
def pack_from_files(box_number,
korobki_file,
pdp_file,
container=Box(size=(20, 20, 20), weight=100),
container_select_func=stable_non_blocking_container_selector,
**kwargs):
"""
Gets box data from the file.
"""
# read and parse korobki file
kfile = open(korobki_file, 'r')
rfile = open(pdp_file, 'r')
# parse boxes from kfile
boxes = {}
for index, line in enumerate(kfile):
if 0 < index <= box_number:
splited = line.split(' ')[4:]
l = float(splited[0].replace(',', '.'))
w = float(splited[1].replace(',', '.'))
h = float(splited[2].replace(',', '.'))
weight = float(splited[3].lstrip('\n').replace(',', '.'))
boxes[index] = Box(size=(l, w, h), weight=weight, name=index)
# read packing routes
box_sequences = []
for route in rfile:
box_sequence = {}
values = route.split()
box_sequence['price'] = values[-1]
box_sequence['route'] = []
for box_index in values[:-1]:
box_index = int(box_index)
if box_index <= box_number:
# add the packing box
box_sequence['route'].append(boxes[box_index])
else:
# add unpack
box_sequence['route'].append(Box(name=boxes[box_index - box_number].name, kind="unpack"))
box_sequences.append(box_sequence)
# pass all the box sequences one by one
for bb in box_sequences:
res, packed_boxes, params = pack_boxes(bb['route'], container, container_select_func=container_select_func,
**kwargs)
if res:
return bb['price'], packed_boxes, params, container
def is_packing_successful(packing_results, packing_params, boxes_to_pack, container, is_partial_route):
"""
Checks whether the packing was successful and all the boxes were delivered.
"""
if not is_partial_route:
return len(packing_results) == 0 and len(packing_params['actions']) == len(boxes_to_pack) and all(
container.weight >= w for w in packing_params['weights'])
else:
return len(packing_params['actions']) == len(boxes_to_pack)
def draw_results(result, params, container):
BoxDrawer.show_packing_results(result, params, (container,))
def show_reports(params, container, **kwargs):
ReportsBuilder.show_dynamic_report(params, container, **kwargs)
if __name__ == "__main__":
(options, args) = parser.parse_args()
paxes = tuple([int(x.strip()) for x in options.axes.split(" ")])
print paxes
print options
# read the korobki file first
boxes = read_korobki_file(options.boxes_count, options.boxes_filename)
container = parse_container_data(options.container)
res = pack_route(boxes, options.route, container,
is_partial_route=options.is_partial_route,
place_axes=(2, 1, 0), #the container selection criteria. Getting the lowest containe
axes_priorities=paxes)
print res[0]
# saving to file
with open(options.output_filename, 'w') as o:
o.writelines(map(lambda s: repr(s) + '\n', res))
# price, packed_boxes, params, container = pack_from_files(4,
# 'Korobki.txt',
# 'PDP.txt',
# place_axes=(1, 0, 2))
#print('Route price is ' + price)
#show_reports(params, container, pdf=False)
if options.draw_results:
draw_results(res[1], res[2], container)
| {
"repo_name": "stonelake/pyoptimization",
"path": "pyopt/packing/rectangular/pdphelper.py",
"copies": "1",
"size": "8217",
"license": "apache-2.0",
"hash": -3984635566719759000,
"line_mean": 39.085,
"line_max": 115,
"alpha_frac": 0.5635876841,
"autogenerated": false,
"ratio": 3.935344827586207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4998932511686207,
"avg_score": null,
"num_lines": null
} |
__author__ = "Alex Baranov"
import itertools as it
from combinatorial_set import CombinatorialSet
class PermutationSet(CombinatorialSet):
"""
Describes the set of permutations
"""
def __init__(self, s=()):
super(PermutationSet, self).__init__(s)
def __iter__(self):
for x in it.permutations(self.generation_elements):
yield x
def find_min_of_linear_function(self, coefs):
"""
Gets the minimum of the linear function on the given set.
Parameters:
- coefs - the coefficients (c_i) of the linear function of type F(x) = sum(c_i*x_i)
"""
# getting the func coefs with the initial indexes
dict_coefs = dict(enumerate(coefs))
# getting indexes. In this case we know which element of set correspond to the given element of the coefs
keys = sorted(dict_coefs, key=dict_coefs.get, reverse=True)
# copy generation elements
res = list(self.generation_elements)
# take each set elements according the keys.
for i, j in enumerate(keys):
res[j] = self.generation_elements[i]
return res
def find_nearest_set_point(self, p):
"""
Gets the nearest set point related to the given point 'p'
Parameters:
- p - some point in the space
"""
# converting point
c = [-2 * x for x in p]
return self.find_min_of_linear_function(c)
if __name__ == '__main__':
p = PermutationSet((1, 2, 3, 4))
print p.generation_elements
| {
"repo_name": "stonelake/pyoptimization",
"path": "pyopt/discrete/permutations.py",
"copies": "1",
"size": "1611",
"license": "apache-2.0",
"hash": 3052014972508641000,
"line_mean": 28.3962264151,
"line_max": 113,
"alpha_frac": 0.5834885164,
"autogenerated": false,
"ratio": 4.120204603580563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5203693119980562,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex Berriman <aberriman@formcorp.com.au>'
import sys
import formcorp.api
# FormCorp configurations
public_key = ''
private_key = ''
form_id = 0
# Initialise the module
formcorp.api.init(private_key, public_key)
# Set the form id
formcorp.api.set_form_id(form_id)
print "======================================================="
print "============= FormCorp Sample Application ============="
print "=======================================================\n"
# Fetch the token and shoot off the api call
print "Retrieving token..."
token = formcorp.api.get_token()
if not token:
print "Unable to retrieve token from remote API\n"
sys.exit()
print "Retrieved token: {0}\n".format(token)
# Fetch submissions from the server
print "Retrieving submissions for form..."
try:
submissions = formcorp.api.call('v1/submissions/ids', "POST", {
'formId': form_id,
'token': token
})
except:
print "There was an error when attempting to retrieve the form submissions.\n"
sys.exit()
print "Successfully received {0} submissions.\n".format(len(submissions))
# Retrieve submission data
submission_id = submissions[0]
print "Fetching submission data for id: {0}...".format(submission_id['id'])
submission = formcorp.api.call('v1/submissions/view', "POST", {
'token': token,
'id': submission_id,
'formId': form_id
})
print submission
| {
"repo_name": "formcorp/python-formcorp",
"path": "sample-app.py",
"copies": "1",
"size": "1392",
"license": "apache-2.0",
"hash": 5252192670987343000,
"line_mean": 25.7692307692,
"line_max": 82,
"alpha_frac": 0.6293103448,
"autogenerated": false,
"ratio": 3.462686567164179,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9578819201120805,
"avg_score": 0.002635542168674699,
"num_lines": 52
} |
__author__ = 'Alex Breshears'
__license__ = '''
Copyright (C) 2012 Alex Breshears
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from django.contrib.admin import site
from django.contrib import admin
from shorturls.models import *
class LinkClickInline(admin.TabularInline):
model = LinkClick
extras = 0
class LinkAdmin(admin.ModelAdmin):
inlines = [LinkClickInline]
def save_model(self, request, obj, form, change):
obj.save()
site.register(Link, LinkAdmin) | {
"repo_name": "t3hi3x/p-k.co",
"path": "shorturls/admin.py",
"copies": "1",
"size": "1450",
"license": "mit",
"hash": -3901632474489135000,
"line_mean": 38.2162162162,
"line_max": 79,
"alpha_frac": 0.7875862069,
"autogenerated": false,
"ratio": 4.178674351585014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5466260558485014,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex Breshears'
from shorturls.utils import *
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.template import Context, RequestContext
from django.template.loader import get_template
from chartit import PivotChart, PivotDataPool
from django.db.models import Count
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from forms import *
from django.shortcuts import render_to_response
import simplejson as json
import datetime
from django.conf import settings
def ajax_links(request):
my_links = Link.objects.filter(user=request.user).all()
template = get_template('ajax/links.html')
links = []
if my_links:
max_char_length = 55
for link in my_links:
link_to_add = {
'base_64': link.base64,
'link_total_click': link.linkclick_set.count(),
'short_default_url': link.long_url_default[:max_char_length] + (link.long_url_default[max_char_length:] and '...'),
'short_iphone_url': link.long_url_iphone[:max_char_length] + (link.long_url_iphone[max_char_length:] and '...'),
'short_ipad_url': link.long_url_ipad[:max_char_length] + (link.long_url_ipad[max_char_length:] and '...'),
'short_android_url': link.long_url_android[:max_char_length] + (link.long_url_android[max_char_length:] and '...'),
'short_blackberry_url': link.long_url_blackberry[:max_char_length] + (link.long_url_blackberry[max_char_length:] and '...'),
'short_windows_mobile_url': link.long_url_windows_mobile[:max_char_length] + (link.long_url_windows_mobile[max_char_length:] and '...'),
'short_mac_url': link.long_url_mac[:max_char_length] + (link.long_url_mac[max_char_length:] and '...'),
'short_pc_url': link.long_url_pc[:max_char_length] + (link.long_url_pc[max_char_length:] and '...'),
'default_url': link.long_url_default,
'iphone_url': link.long_url_iphone,
'ipad_url': link.long_url_ipad,
'android_url': link.long_url_android,
'blackberry_url': link.long_url_blackberry,
'windows_mobile_url': link.long_url_windows_mobile,
'mac_url': link.long_url_mac,
'pc_url': link.long_url_pc,
'category': link.category,
'description': link.description,
'name': link.name,
'short_link':link.short_link() + '.info',
'qr_code':link.qrcode()
}
links.append(link_to_add)
context = {'links':links}
return render_to_response('ajax/links.html', locals(), context_instance = RequestContext(request=request, dict=context))
def ajax_devices(request):
if not request.user.is_authenticated():
raise Http404
link_all = Link.objects.filter(user=request.user)
if link_all:
template = get_template('ajax/devices.html')
if request.GET.get('begin_date'):
begin_date_string = request.GET.get('begin_date')
begin_date = datetime.datetime.strptime(begin_date_string, '%m/%d/%Y').date()
else:
begin_date = datetime.date.today() - datetime.timedelta(45)
if request.GET.get('end_date'):
end_date_string = request.GET.get('end_date')
end_date = datetime.datetime.strptime(end_date_string, '%m/%d/%Y').date()
else:
end_date = datetime.date.today()
if request.GET.get('link_id'):
link = Link.objects.get(pk=request.GET.get('link_id'))
else:
link=None
raw_device_data = get_devices(begin_date, end_date, request.user, link)
raw_timeline_data = get_timeline(begin_date, end_date, request.user, link)
unique_devices = get_unique_devices(begin_date, end_date, request.user, link)
categories = get_unique_dates(begin_date, end_date)
series = []
colors = settings.GRAPH_COLORS
#Devices Lines
for index, d in enumerate(unique_devices):
data = []
found_one = False
for c in date_range(begin_date, end_date):
for s in raw_device_data:
if s[2] == d[0] and c == s[1]:
data.append(s[0])
found_one = True
if not found_one:
data.append(0)
found_one = False
try:
color = colors[index]
new_series = {
'name': d,
'data': data,
'color': color
}
except Exception:
new_series = {
'name': d,
'data': data
}
series.append(new_series)
#Pie Chart
pie_data = []
if not link:
aggregate_device_data = LinkClick.objects.filter(date_time__range=[begin_date,end_date]).exclude(browser='').values('browser').annotate(Count('browser'))
else:
aggregate_device_data = LinkClick.objects.filter(date_time__range=[begin_date,end_date], link=link).exclude(browser='').values('browser').annotate(Count('browser'))
for index, a in enumerate(aggregate_device_data):
new_pie_data = {
'name': a['browser'],
'y': a['browser__count']
}
try:
color = colors[index]
new_pie_data['color'] = color
finally :
pie_data.append(new_pie_data)
pie_series = {
'type': 'pie',
'name': 'Device Breakout',
'data': pie_data,
'center': [100, 80],
'size': 100,
'showInLegend': False,
'dataLabels': {
'enabled': False
}
}
series.append(pie_series)
#Total Line
found_one = False
data = []
for c in date_range(begin_date, end_date):
for s in raw_timeline_data:
if c == s[1]:
data.append(s[0])
found_one = True
if not found_one:
data.append(0)
found_one = False
try:
color = colors[len(unique_devices)]
new_series = {
'name': 'Total',
'data': data,
'color': color
}
except Exception:
new_series = {
'name': 'Total',
'data': data
}
series.append(new_series)
cont_dict = {
'series': json.dumps(series),
'categories': json.dumps(categories),
'tick_interval': int(round(len(categories) / 4, 0)),
'begin_date': format_date(begin_date),
'end_date': format_date(end_date)
}
if link:
cont_dict['link_id'] = link.pk
context = Context(cont_dict, autoescape=False)
return HttpResponse(template.render(context))
return HttpResponse("You don't have any links. Create some to see the analytics!")
def ajax_locations(request):
return HttpResponse("This feature isn't implemented yet. Stay tuned.") | {
"repo_name": "t3hi3x/p-k.co",
"path": "ajax/views.py",
"copies": "1",
"size": "6081",
"license": "mit",
"hash": 9220784576119864000,
"line_mean": 33.9540229885,
"line_max": 168,
"alpha_frac": 0.6625555007,
"autogenerated": false,
"ratio": 2.949078564500485,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4111634065200485,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alexei'
from telnetlib import Telnet
class JamesHelper:
def __init__(self, app):
self.app = app
def ensure_user_exists(self, username, password):
james_config = self.app.config['james']
session = JamesHelper.Session(
james_config['host'], james_config['port'], james_config['username'], james_config['password'])
if session.is_users_registered(username):
session.reset_password(username, password)
else:
session.create_user(username, password)
session.quit()
class Session:
def __init__(self, host, port, username, password):
self.telnet = Telnet(host, port, 5)
self.read_until("Login id:")
self.write(username + "\n")
self.read_until("Password:")
self.write(password + "\n")
self.read_until("Welcome root. HELP for a list of commands")
def read_until(self, text):
self.telnet.read_until(text.encode('ascii'), 5)
def write(self, text):
self.telnet.write(text.encode('ascii'))
def is_users_registered(self, username):
self.write("verify %s\n" % username)
res = self.telnet.expect([b"exists", b"does not exist"])
return res[0] == 0
def create_user(self, username, password):
self.write("adduser %s %s\n" % (username, password))
self.read_until("User %s added" % username)
def reset_password(self, username, password):
self.write("setpassword %s %s\n" % (username, password))
self.read_until("Password for %s reset" % username)
def quit(self):
self.write("quit\n") | {
"repo_name": "barancev/python_training_mantis",
"path": "fixture/james.py",
"copies": "1",
"size": "1732",
"license": "apache-2.0",
"hash": 320728738823281300,
"line_mean": 32.9803921569,
"line_max": 107,
"alpha_frac": 0.5750577367,
"autogenerated": false,
"ratio": 3.848888888888889,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9909655212142419,
"avg_score": 0.002858282689294041,
"num_lines": 51
} |
__author__ = 'alexei'
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("username").click()
wd.find_element_by_name("username").clear()
wd.find_element_by_name("username").send_keys(username)
wd.find_element_by_name("password").click()
wd.find_element_by_name("password").clear()
wd.find_element_by_name("password").send_keys(password)
wd.find_element_by_css_selector('input[type="submit"]').click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
wd = self.app.wd
return self.get_logged_user() == username
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_css_selector("td.login-info-left span").text
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
def ensure_login(self, username, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password) | {
"repo_name": "barancev/python_training_mantis",
"path": "fixture/session.py",
"copies": "1",
"size": "1456",
"license": "apache-2.0",
"hash": 5158036655489781000,
"line_mean": 29.3541666667,
"line_max": 78,
"alpha_frac": 0.5776098901,
"autogenerated": false,
"ratio": 3.466666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9541535328696491,
"avg_score": 0.0005482456140350877,
"num_lines": 48
} |
__author__ = 'Alexendar Perez'
#####################
# #
# Introduction #
# #
#####################
"""check which genes the gRNAs used for training data in CRISPR ML task are hitting in mm10"""
#################
# #
# Libraries #
# #
#################
import sys
import pickle
import argparse
#########################
# #
# Auxillary Function #
# #
#########################
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-p','--pickle',help='absolute filepath to gRNA_target_coordinates_annotation_dict.pkl',required=True)
parser.add_argument('-i','--infile',help='absolute filepath to ultra.txt',required=True)
parser.add_argument('-o','--outdir',help='absolute filepath to output directory',required=True)
parser.add_argument('--cpf1',help='indication if cpf1 is being queried',default=None)
args = parser.parse_args()
ultra = args.infile
in_pickle = args.pickle
outdir = args.outdir
cpf1 = args.cpf1
return ultra,in_pickle,outdir,cpf1
def gene_symbol_writeout(outdir, gene_set):
"""write out file with gene symbols that are targeted
:param outdir: absolute filepath to output directory
:param gene_set: first output of determine_gene_targets()
:return: writes out a file with gene symbols that are targeted
"""
with open('%s/gene_set.txt' % (outdir), 'w') as outfile:
for gene in gene_set:
outfile.write('%s\n' % gene)
sys.stdout.write('gene symbols for targeted genes written out\n')
def determine_gene_targets(ultra,gRNA_target_coordinates_annotation_dict,cpf1=None):
"""determine how many genes are targeted by ClassTask gRNAs
:param ultra: list of gRNA sequences to be used for ClassTask
:param gRNA_target_coordinates_annotation_dict: first output of load_pickle()
:return: set object with gene symbols
"""
gene_lst = []
with open(ultra, 'r') as infile:
for line in infile:
clean_line = line.lstrip().rstrip()
parts = clean_line.split()
if cpf1:
gRNA = 'TTTN%s' % parts[0]
else:
gRNA = '%sNGG' % parts[0]
gene = gRNA_target_coordinates_annotation_dict[gRNA]
if gene: # coding exon
for i in gene:
symbol = i.split("_['")[1].split('_')[0]
gene_lst.append(symbol)
else: # UTR
continue
gene_set = set(gene_lst)
sys.stdout.write('%s genes are targeted' % (len(gene_set)))
return gene_set
def load_pickle(in_pickle):
"""deserialize pickle
:param in_pickle: absolute filepath to serialized pickle object
:return: deserialized pickle object
"""
with open(in_pickle, 'r') as p:
data = pickle.load(p)
sys.stdout.write('pickle object deserialized\n')
return data
#####################
# #
# Main Function #
# #
#####################
def main():
"""
outdir = '/Users/pereza1/Projects/Ventura/ClassTask/data/gRNAs_for_screen/mm10/gRNAs_in_mouse_exons/Cas9/split/human_trie_processed/aggregate/genes_hit'
ultra = '/Users/pereza1/Projects/Ventura/ClassTask/data/gRNAs_for_screen/mm10/gRNAs_in_mouse_exons/Cas9/split/human_trie_processed/aggregate/filtered/ultra.txt'
in_pickle = '/Users/pereza1/Projects/Ventura/ClassTask/data/gRNAs_for_screen/mm10/gRNAs_in_mouse_exons/Cas9/augmented_annotations/gRNA_target_coordinates_annotation_dict.pkl'
"""
# user inputs
ultra, in_pickle, outdir, cpf1 = arg_parser()
# deserialize pickle
gRNA_target_coordinates_annotation_dict = load_pickle(in_pickle)
# get gRNAs that meet processing requirements
gene_set = determine_gene_targets(ultra,gRNA_target_coordinates_annotation_dict,cpf1)
# write out value of gene set to file
gene_symbol_writeout(outdir,gene_set)
# user end message
sys.stdout.write('gene intersection processing complete\n')
if __name__ == '__main__':
main()
| {
"repo_name": "lzamparo/crisprML",
"path": "src/gRNA_data_gene_check.py",
"copies": "1",
"size": "4157",
"license": "bsd-3-clause",
"hash": -6554842826290211000,
"line_mean": 30.9769230769,
"line_max": 178,
"alpha_frac": 0.6050036084,
"autogenerated": false,
"ratio": 3.5867126833477134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9630021274297401,
"avg_score": 0.012339003490062493,
"num_lines": 130
} |
__author__ = 'Alexendar Perez'
#####################
# #
# Introduction #
# #
#####################
"""compute specificity score, Hamming, and Levinstein distance neighborhoods for strings"""
#################
# #
# Libraries #
# #
#################
import sys
import os
import pickle
import argparse
import sqlite3
import gzip
import numpy as np
import pandas as pd
from Bio import trie
#############################
# #
# CFD Scoring Functions #
# #
#############################
def calc_cfd(wt,sg,pam,mm_scores,pam_scores):
#mm_scores,pam_scores = get_mm_pam_scores()
score = 1
sg = sg.replace('T','U')
wt = wt.replace('T','U')
s_list = list(sg)
wt_list = list(wt)
for i,sl in enumerate(s_list):
if wt_list[i] == sl:
score*=1
else:
try:
key = 'r'+wt_list[i]+':d'+revcom(sl)+','+str(i+1)
score*= mm_scores[key]
except KeyError:
continue
score*=pam_scores[pam]
return (score)
def get_mm_pam_scores(mms,pams):
try:
mm_scores = pickle.load(open(mms,'rb'))
pam_scores = pickle.load(open(pams,'rb'))
sys.stdout.write('CFD scoring matrices loaded\n')
return (mm_scores,pam_scores)
except:
raise Exception("Could not find file with mismatch scores or PAM scores")
def revcom(s):
basecomp = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'U': 'A'}
letters = list(s[::-1])
letters = [basecomp[base] for base in letters]
return ''.join(letters)
#########################
# #
# Auxillary Function #
# #
#########################
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-i','--infile',help='absolute filepath to file with gRNA sequences',required=True)
parser.add_argument('-o','--outdir',help='absolute filepath to output directory',required=True)
parser.add_argument('-k','--kmer',help='absolute filepath to kmers_counted.txt file',required=True)
parser.add_argument('-t','--trie',help='absolute filepath to trie.dat file',required=True)
parser.add_argument('-m','--mismatch',help='absolute filepath to mismatch_score.pkl for CFD',required=True)
parser.add_argument('-p','--pam',help='absolute filepath to pam_scores.pkl for CFD',required=True)
parser.add_argument('--header',help='boolian value of whether header is present in infile, default = True',default=True)
parser.add_argument('--sequence_field',help='if sequences not in first field of file, default = 0',default=0)
parser.add_argument('--cpf1',help='cpf1 enzyme processing',default=False)
args = parser.parse_args()
in_file = args.infile
outdir = args.outdir
kmer_counts_file = args.kmer
trie_file = args.trie
mismatch_score = args.mismatch
pam_score = args.pam
header = args.header
sequence_field = args.sequence_field
cpf1 = args.cpf1
return in_file,outdir,kmer_counts_file,trie_file,mismatch_score,pam_score,header,int(sequence_field),cpf1
def load_pickle(infile):
"""load pickle file
:param infile: absolute filepath to pickle
:return: deserialized pickle
"""
with open(infile, 'r') as in_file:
data = pickle.load(in_file)
return data
def sequence_data_extraction(data,header,sequence_field):
""" extract sequence data from data array
:param data: numpy array of data, first output of sequence_file_read_in()
:param header: boolian value indicating if header is present in data array; will skip first line if header present
:param sequence_field: if sequence data is not in 0 field specify; defaults to 0 field
:return: sequence data column
"""
if header:
sys.stdout.write('skipping first line in data array due to header\n')
if sequence_field:
sys.stdout.write('sequence field specified as %s\n' % sequence_field)
sequence_data = data[1:,sequence_field]
else:
sys.stdout.write('sequence field defaulted to 0\n')
sequence_data = data[1:,0]
else:
if sequence_field:
sys.stdout.write('sequence field specified as %s\n' % sequence_field)
sequence_data = data[:,sequence_field]
else:
sys.stdout.write('sequence field defaulted to 0\n')
sequence_data = data[:,0]
return sequence_data
def sequence_file_read_in(in_file):
"""read in file with sequences like gRNAs
:param in_file: absolute filepath to file containing sequences
:return: numpy array representation of data accessed through either pickle or pandas modules
"""
sys.stdout.write(
'%s is being used to compute features for ClassTask\n***Sequence data should be in first field***\n' % in_file)
try:
sys.stdout.write('attempting to read %s as pickle\n' % in_file)
file_format = 'pickle'
data = load_pickle(in_file)
except:
try:
sys.stdout.write('attempting to read %s with pandas as excel\n' % in_file)
file_format = 'excel'
data = np.array(pd.read_excel(in_file, header=None))
except:
try:
sys.stdout.write('attempting to read %s with pandas as text file\n' % in_file)
file_format = 'text'
data = np.array(pd.read_table(in_file, header=None))
except:
sys.stderr.write('%s file format not recognized as pickle, excel, or text file; aborting\n' % in_file)
sys.exit(1)
sys.stdout.write('%s successfully read in as %s\n' % (in_file, file_format))
return data
def load_trie(trie_file):
"""deserialize trie
:param trie_file: serialized trie from BioPython and produced through GuideScan processer.py: x__all_trie.dat
:return: deserialized trie object
"""
tr_file = open(trie_file, 'r')
tr = trie.load(tr_file)
tr_file.close()
sys.stdout.write('trie loaded into memory from %s \n' % trie_file)
return tr
# TODO: this is better stored as a DB; it also does not get used as the docstring
# suggests: they keys are parts[1], which are *genomic positions*, and the values
# are kmers.
def kmer_exact_occurrence_dictionary(kmer_counts_file):
"""generate genome-wide kmer occurrence dictionary as an sqlite database. If the
filename suggests that the kmer_counts_file is already a .db, then just return the cursor.
:param kmer_counts_file: absolute filepath to XXX_all_kmers_counted.txt.gz file, or a .db file
:return: connection object to the kmer-count dictionary
"""
if kmer_counts_file.endswith('db'):
conn = sqlite3.connect(kmer_counts_file)
return conn
kmer_dictionary = {}
records = 0
sqlite_file = 'hg38_kmers.db'
table_name = 'kmer_counts'
first = 'kmer'
first_type = 'text'
second = 'count'
second_type = 'INTEGER'
# Connect to the database file
try:
conn = sqlite3.connect(os.path.join(os.path.dirname(kmer_counts_file),sqlite_file))
except:
sys.stdout.write('Cannot open the sqlite db! \n')
sys.exit()
c = conn.cursor()
# Create the table
c.execute('CREATE TABLE {tn} ({fc} {ft}, {sc} {st})'\
.format(tn=table_name, fc=first, ft=first_type, sc=second, st=second_type))
my_open = gzip.open if kmer_counts_file.endswith('.gz') else open
with my_open(kmer_counts_file, 'r') as kmers:
for line in kmers:
clean_line = line.lstrip().rstrip()
parts = clean_line.split()
if kmer_dictionary.has_key(parts[1]):
sys.stdout.write('kmer duplication detected %s %s \n' % (parts[1], parts[0]))
else:
kmer_dictionary[parts[1]] = parts[0]
records += 1
# dump dict into the database, then reset it
if records > 100000:
for k, v in kmer_dictionary.items():
c.execute("INSERT INTO kmer_counts VALUES (?,?)", (k, v))
kmer_dictionary = {}
records = 0
# handle the remaining records
for k, v in kmer_dictionary.items():
c.execute("INSERT INTO kmer_counts VALUES (?,?)", (k, v))
sys.stdout.write('kmer dictionary generated \n')
# commit the changes to the db
conn.commit()
return conn
def add_features_to_feature_array(feature_array,augmenting_array):
"""add new features to features array
:param feature_array: numpy array with sequences as UI and previously computed features
:param augmenting_array: numpy array with new features to be added to feature array
:return: new feature array with features from the augmented array added
"""
equivilence = np.all(feature_array[:, 0].reshape(feature_array.shape[0], 1) == augmenting_array[:, 0].reshape(augmenting_array.shape[0], 1))
if equivilence:
try:
feature_array = np.concatenate((feature_array,augmenting_array[:,1:]),1)
return feature_array
except IndexError:
feature_array = np.concatenate((feature_array.reshape(feature_array.shape[0],1), augmenting_array[:, 1:]), 1)
return feature_array
else:
sys.stdout.write('original data array and new features array NOT in same order: attempt sort\n')
feature_array = feature_array[feature_array[:, 0].argsort()]
augmenting_array = augmenting_array[augmenting_array[:, 0].argsort()]
return add_features_to_feature_array(feature_array,augmenting_array)
def hamming_distance(s1, s2):
"""calculate the Hamming distance between two bit strings
:param s1: first string
:param s2: second string
:return: Hamming distance between s1 and s2
"""
assert len(s1) == len(s2)
return sum(c1 != c2 for c1, c2 in zip(s1, s2))
def query_db(c, key):
"""
Query the kmer file sqlite3 database with cursor `c` to extract the count associated with kmer `key`
"""
try:
result = c.execute("SELECT count FROM kmer_counts WHERE kmer == ?", (key,))
result_list = result.fetchall()
if len(result_list) == 1:
return result_list[0][0]
elif len(result_list) == 0:
return 0
except:
sys.stderr.write('querying db returned loads of hits for {0}: {1}'.format(key, ' '.join([str(r) for r in result_list])))
def compute_specificity_score_and_mismatch_neighborhoods(sequence_data, final_header, kmer_dictionary_cursor, tr, mm_scores,
pam_scores,cpf1):
"""compute GuideScan based features
:param sequence_data: numpy array with sequence data in 0 field
:param final_header: numpy array with header information
:param kmer_dictionary_cursor: cursor for the sqlite database; output of kmer_exact_occurrence_dictionary()
:param tr: trie datastructure from load_trie() function
:param mm_scores: first output of get_mm_pam_scores()
:param pam_scores: second output of get_mm_pam_scores()
:return: feature array with GuideScan derived features
"""
distance = 3
guidescan_array, seq_array = np.zeros((sequence_data.shape[0], 10)), np.empty((sequence_data.shape[0], 1)).astype(
str)
for j, on_target_sequence in enumerate(sequence_data[:, 0]):
# sequence array value
on_target_sequence_value = on_target_sequence
print "Processing guide " + str(j)
sys.stdout.flush()
# guidescan format
if cpf1:
on_target_sequence = 'TTTN%s' % (on_target_sequence)
else:
on_target_sequence = '%sNGG' % (on_target_sequence)
# query trie, get all near matches
query_sequences = tr.get_approximate(on_target_sequence, distance)
# specificity score lists
cfd_lst, writeout_lst = [], []
# neighborhood enumeration dictionaries
hamming_key, hamming_distance_dict, levinstein_key, levinstein_distance_dict = {}, {0: 0, 1: 0, 2: 0,
3: 0}, {}, {1: 0, 2: 0,
3: 0}
for i in query_sequences:
# occurrence of sequence in genome
ot_sequence_occurence = int(query_db(kmer_dictionary_cursor,i[0]))
if hamming_distance(on_target_sequence, i[0]) <= distance:
# record key
if hamming_key.has_key(i[0]):
continue
else:
if cpf1:
pass
else:
# cfd computation
pam = i[0][-2:]
sg = i[0][:-3]
cfd_score = calc_cfd(on_target_sequence, sg, pam, mm_scores, pam_scores)
total_cfd_contribution = cfd_score * float(ot_sequence_occurence)
cfd_lst.append(total_cfd_contribution)
# augment count for Hamming neighbors at n mismatches
if hamming_distance_dict.has_key(hamming_distance(on_target_sequence, i[0])):
hamming_distance_dict[hamming_distance(on_target_sequence, i[0])] = \
hamming_distance_dict[hamming_distance(on_target_sequence, i[0])] + ot_sequence_occurence
hamming_key[i[0]] = 1
# establish count for Hamming neighbors at n mismatches
else:
hamming_distance_dict[hamming_distance(on_target_sequence, i[0])] = ot_sequence_occurence
hamming_key[i[0]] = 1
else:
# record key
if levinstein_key.has_key(i[0]):
continue
else:
# augment count for Levinstein neighbors at n mismatches
if levinstein_distance_dict.has_key(i[2]):
levinstein_distance_dict[i[2]] = levinstein_distance_dict[i[2]] + ot_sequence_occurence
levinstein_key[i[0]] = 1
# establish count for Hamming neighbors at n mismatches
else:
levinstein_distance_dict[i[2]] = ot_sequence_occurence
levinstein_key[i[0]] = 1
# cfd composite specificity score
if cpf1:
cfd_aggregate_score = 0
else:
cfd_array = np.array(cfd_lst)
cfd_aggregate_score = 1.0 / (cfd_array.sum())
# fill in features into feature array
seq_array[j, 0] = on_target_sequence_value
guidescan_array[j, 0] = cfd_aggregate_score
guidescan_array[j, 1] = int(hamming_distance_dict[0])
guidescan_array[j, 2] = int(hamming_distance_dict[1])
guidescan_array[j, 3] = int(hamming_distance_dict[2])
guidescan_array[j, 4] = int(hamming_distance_dict[3])
guidescan_array[j, 5] = sum(hamming_distance_dict.values())
guidescan_array[j, 6] = int(levinstein_distance_dict[1])
guidescan_array[j, 7] = int(levinstein_distance_dict[2])
guidescan_array[j, 8] = int(levinstein_distance_dict[3])
guidescan_array[j, 9] = sum(levinstein_distance_dict.values())
"""
sys.stdout.write('Hamming enumerated neighbors = %s\nLevinstein enumerated neighbors = %s\nSpecificity score = %s\nhamming sequence neigbors = %s\n'
% (sum(hamming_distance_dict.values()),sum(levinstein_distance_dict.values()),cfd_aggregate_score,len(cfd_lst)))
"""
# generate final augmented features array
seq_guidescan_array = np.concatenate((seq_array, guidescan_array), 1)
sequence_data = add_features_to_feature_array(sequence_data, seq_guidescan_array)
header_value = np.array(['Specificity_Score', 'Occurrences_at_Hamming_0', 'Occurrences_at_Hamming_1',
'Occurrences_at_Hamming_2', 'Occurrences_at_Hamming_3', 'Sum_Hamming_Neighbors',
'Occurrences_at_Levinstein_1', 'Occurrences_at_Levinstein_2',
'Occurrences_at_Levinstein_3',
'Sum_Levinstein_Neighbors']).reshape(1, 10)
final_header = np.concatenate((final_header, header_value), 1)
sys.stdout.write('GuideScan based features computed\n')
return sequence_data, final_header
#####################
# #
# Main Function #
# #
#####################
def main():
"""
in_file = '/Users/pereza1/Projects/Jo/data/gecko_proper_excel/mouse_library_A_gecko.xlsx'
header = True
sequence_field = 0
"""
# user inputs
in_file,outdir,kmer_counts_file,trie_file,mismatch_score,pam_score,header,sequence_field,cpf1 = arg_parser()
# data read in
data = sequence_file_read_in(in_file)
# sequence data extraction
sequence_data = sequence_data_extraction(data,header,sequence_field)
sequence_data = sequence_data.reshape(sequence_data.shape[0],1)
final_header = np.array(['sequence']).reshape(1,1)
# compute or load kmer dictionary object
kmer_dictionary = kmer_exact_occurrence_dictionary(kmer_counts_file)
kmer_dictionary_cursor = kmer_dictionary.cursor()
# load CFD scoring matrices
mm_scores, pam_scores = get_mm_pam_scores(mismatch_score, pam_score)
# load trie
tr = load_trie(trie_file)
# compute specificity score and mismatch neighborhoods
sequence_data,final_header = compute_specificity_score_and_mismatch_neighborhoods(sequence_data,final_header,
kmer_dictionary_cursor,tr,mm_scores,
pam_scores,cpf1)
# generate final feature arrays
final_feature_array = np.concatenate((final_header,sequence_data),0)
#final_feature_array_standardized = np.concatenate((final_header,sequence_data_standardized),0)
sys.stdout.write('final feature arrays generated\n')
# write output to csv
column_length = final_feature_array.shape[1]
np.savetxt('%s/raw_features_computed_%s.csv' % (outdir,in_file.split('/')[-1].split('.')[0]), final_feature_array,
fmt='%' + '%ss' % (column_length), delimiter=',')
#np.savetxt('%s/standarized_features_computed_%s.csv' % (outdir,in_file.split('/')[-1].split('.')[0]), final_feature_array_standardized,
# fmt='%' + '%ss' % (column_length), delimiter=',')
sys.stdout.write('final arrays written to csv\n%s\n' % ('%s/features_computed_%s.csv' % (outdir,in_file.split('/')[-1].split('.')[0])))
# close the kmer_dictionary db
kmer_dictionary.close()
# completion stdout
sys.stdout.write('feature generation for %s complete\n' % (in_file))
if __name__ == '__main__':
main() | {
"repo_name": "lzamparo/crisprML",
"path": "src/specificity_score_distance_neighbors.py",
"copies": "1",
"size": "16743",
"license": "bsd-3-clause",
"hash": 1234933588811401200,
"line_mean": 33.1018329939,
"line_max": 150,
"alpha_frac": 0.6780146927,
"autogenerated": false,
"ratio": 2.9962419470293487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4174256639729349,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alexendar Perez'
#####################
# #
# Introduction #
# #
#####################
"""extract candidate gRNAs for cutting efficiency screen"""
#################
# #
# Libraries #
# #
#################
import sys
import argparse
import pdb
#########################
# #
# Auxillary Function #
# #
#########################
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-i','--infile',help='absolute filepath to output of specificity_score_distance_neighbors.py file',required=True)
parser.add_argument('-o','--outdir',help='absolute filepath to output directory',required=True)
parser.add_argument('--enzyme',help='enter Cas9 or Cpf1, default = Cas9',default='Cas9')
args = parser.parse_args()
in_file = args.infile
outdir = args.outdir
enzyme = args.enzyme
return in_file,outdir,enzyme
def string_set(string_file):
"""get set of strings
:param string_file: absolute filepath to single column string file
:return: set object of strings
"""
string_lst = []
with open(string_file, 'r') as infile:
for line in infile:
clean_line = line.lstrip().rstrip()
parts = clean_line.split()
string_lst.append(parts[0])
string_set_return = set(string_lst)
return string_set_return
def get_gRNAs(db):
"""get gRNAs
:param db: ultra or g_ultra list
:return: set object with sequences as elements
"""
hold = []
for j in range(len(db)):
hold.append(db[j].split(',')[0])
hold_set = set(hold)
return hold_set
def extract_candidate_gRNAs(in_file,enzyme):
"""extract candidate gRNAs for cutting efficiency screen
:param in_file: absolute filepath to input file
:return: list object, gRNAs with no near matches <= 3 starts with G,
gRNAs with no near matches <= 3,
gRNAs with no near matches <= 2
gRNAs with no near matches <= 1
gRNAs with no perfect matches
"""
lst_c, lst_b, lst_a, ultra, g_ultra = [], [], [], [], []
with open(in_file, 'r') as infile:
for line in infile:
clean_line = line.lstrip().rstrip()
parts = clean_line.split(',')
if enzyme == 'Cas9':
enz_val = 'inf'
elif enzyme == 'Cpf1':
enz_val = '0.0'
else:
sys.stderr.write('%s not recognized: enter either Cas9 or Cpf1' % enzyme)
return
try:
if parts[1].strip() == enz_val: #Cas9 = inf, cpf1 = 0.0
if float(parts[2]) == 0.0 and float(parts[3]) == 0.0 and float(parts[4]) == 0.0 and float(
parts[5]) == 0.0:
if parts[0][0] == 'G':
g_ultra.append(line)
ultra.append(line)
else:
ultra.append(line) # no duplicate or near neighbors within Hamming distance 3
else:
pass
else:
pass
except ValueError:
sys.stderr.write('skipping %s\n' % line)
continue
sys.stdout.write('gRNA extraction for %s complete\n' % (in_file))
return g_ultra, ultra, lst_a, lst_b, lst_c
#####################
# #
# Main Function #
# #
#####################
def main():
"""
in_file = '/Users/pereza1/Projects/Ventura/ClassTask/data/gRNAs_for_screen/mm10/gRNAs_in_mouse_exons/Cas9/GuideScan_database/split/human_trie_processed/aggregate/raw_features_computed_cas9_guidescan_gRNAs_aggreagate.csv'
outdir = '/cbio/cllab/home/aperez/Project/Ventura/ClassTask/data/gRNAs_for_screen/mm10/gRNAs_in_mouse_exons/Cpf1/GuideScan_database/split/human_trie_processed/aggregate/filtered'
"""
# user inputs
in_file,outdir,enzyme = arg_parser()
# extract candidate gRNAs
g_ultra, ultra, lst_a, lst_b, lst_c = extract_candidate_gRNAs(in_file,enzyme)
pdb.set_trace()
# write out
with open('%s/g_ultra.txt' % outdir, 'w') as g_ultra_writeout:
for i in g_ultra:
g_ultra_writeout.write('%s\n' % i.split()[0].strip(','))
sys.stdout.write('%s written\n' % '%s/g_ultra.txt' % outdir)
with open('%s/ultra.txt' % outdir, 'w') as ultra_writeout:
for i in ultra:
ultra_writeout.write('%s\n' % i.split()[0].strip(','))
sys.stdout.write('%s written\n' % '%s/ultra.txt' % outdir)
# user end message
sys.stdout.write('gRNA extraction complete\n')
if __name__ == '__main__':
main()
| {
"repo_name": "lzamparo/crisprML",
"path": "src/extract_screening_gRNAs.py",
"copies": "1",
"size": "4855",
"license": "bsd-3-clause",
"hash": 9107329105069600000,
"line_mean": 30.7320261438,
"line_max": 224,
"alpha_frac": 0.5274974253,
"autogenerated": false,
"ratio": 3.5155684286748734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45430658539748736,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alexendar Perez'
#####################
# #
# Introduction #
# #
#####################
"""select gRNAs from a set that meet certain annotation requirements"""
#################
# #
# Libraries #
# #
#################
import sys
import pickle
import argparse
from collections import defaultdict
from bx.intervals.intersection import IntervalTree
#########################
# #
# Auxillary Function #
# #
#########################
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-a','--annotations',help='absolute filepath to annotation BED file',required=True)
parser.add_argument('-i','--infile',help='absolute filepath to GuideScan *_all_kmers.txt file',required=True)
parser.add_argument('-o','--outdir',help='absolute filepath to output directory',required=True)
parser.add_argument('--cpf1',help='if Cpf1 infile then --cpf1 = 1, else process as Cas9, default = 0',default=0)
args = parser.parse_args()
annotations = args.annotations
gRNA_file = args.infile
outdir = args.outdir
cpf1 = args.cpf1
return annotations,gRNA_file,outdir,int(cpf1)
def create_interval_tree(annotations):
"""create annotation tree datastructure
:param annotations: absolute filepath to annotation BED file
:return: interval tree
"""
with open(annotations,'r') as infile:
interval_tree = create_inttree_from_file(infile)
sys.stdout.write('interval tree for %s generated\n' % annotations)
return interval_tree
def gRNA_w_annotations_writeout(target_sequence_overlapping_exon, outdir, cpf1):
"""write out to file those gRNAs which fulfill annotation requirement
:param target_sequence_overlapping_exon: list object, first output object of gRNA_annotation_through_interval_tree()
:param outdir: absolute filepath to output directory
:param cpf1: integer value, 0 indicates Cpf1 enzyme is being passed, != 0 means Cas9 is being passed
:return: output file that can be passed to specificity_score_distance_neighbors()
"""
if cpf1 == 1:
enzyme = 'Cpf1'
else:
enzyme = 'Cas9'
unique_dictionary = {}
with open('%s/%s_sequences_fitting_annotation.txt' % (outdir, enzyme), 'w') as outfile:
outfile.write('seq\n')
for sequence in target_sequence_overlapping_exon:
if enzyme == 'Cas9':
if 'NGG' in sequence:
gRNA = sequence.replace('NGG', '')
if unique_dictionary.has_key(gRNA):
unique_dictionary[gRNA] = unique_dictionary[gRNA] + 1
else:
unique_dictionary[gRNA] = 1
else:
continue
else:
gRNA = sequence.replace('TTTN', '')
if unique_dictionary.has_key(gRNA):
unique_dictionary[gRNA] = unique_dictionary[gRNA] + 1
else:
unique_dictionary[gRNA] = 1
for key in unique_dictionary.keys():
if unique_dictionary[key] == 1:
outfile.write('%s\n' % key)
else:
continue
sys.stdout.write('gRNA fitting annotation requirement for %s enzyme complete\n' % enzyme)
def gRNA_annotation_through_interval_tree(gRNA_file, interval_tree, cpf1):
"""annotate gRNAs according to genomic feature
:param gRNA_file: absolute filepath to gRNA_file (field 1 gRNA target sequence, field 2 is coordinate)
:param interval_tree: tree object, first output of create_inttree_from_file()
:return: list and dictionary object, list of gRNA target sequences fulfilling annotation requirement: dictionary
object with gRNA target sequence as key and coordinates as values
"""
sequence_dictionary = {}
target_dictionary,target_dictionary_annotation = defaultdict(list),defaultdict(list)
target_sequence_overlapping_exon = []
with open(gRNA_file, 'r') as infile:
for line in infile:
clean_line = line.lstrip().rstrip()
parts = clean_line.split()
target_sequence, target_coordinate = parts[0], parts[1]
if sequence_dictionary.has_key(target_sequence):
sequence_dictionary[target_sequence] = sequence_dictionary[target_sequence] + 1
else:
sequence_dictionary[target_sequence] = 1
target_coordinate_parts = target_coordinate.split(':')
chromosome, coordinate, strand = target_coordinate_parts[0], int(target_coordinate_parts[1]), \
target_coordinate_parts[2]
if cpf1 == 1:
if strand == '+':
end_coordinate = coordinate + 19 # only for Cpf1
start_coordinate = end_coordinate
elif strand == '-':
start_coordinate = coordinate - 19 # only for Cpf1
end_coordinate = start_coordinate
else:
sys.stderr.write('%s is not a valid strand character\n' % strand)
continue
else:
if strand == '+':
end_coordinate = coordinate + 17 # only for Cas9
start_coordinate = end_coordinate
elif strand == '-':
start_coordinate = coordinate - 17 # only for Cas9
end_coordinate = start_coordinate
else:
sys.stderr.write('%s is not a valid strand character\n' % strand)
continue
try:
annotation_tree_grab = interval_tree.get(chromosome)
annotation_tree_traversal = annotation_tree_grab.find(start_coordinate, end_coordinate)
if annotation_tree_traversal:
target_dictionary[target_sequence].append(
'%s:%s-%s' % (chromosome, start_coordinate, end_coordinate))
target_dictionary_annotation[target_sequence].append('%s:%s-%s_%s' % (chromosome, start_coordinate, end_coordinate,annotation_tree_traversal))
else:
continue
except AttributeError:
sys.stderr.write('%s has attribute not found in interval tree\n' % (target_coordinate))
continue
for key in target_dictionary.keys():
if sequence_dictionary[key] == 1:
target_sequence_overlapping_exon.append(key)
else:
continue
sys.stdout.write('coordinate annotation query complete\n')
return target_sequence_overlapping_exon,target_dictionary,target_dictionary_annotation
def create_inttree_from_file(infile):
"""Create interval tree to store annotations
Args:
infile: handle of open BED file with annotations
Return:
dictionary {chromosome name : interval tree with coordinates}
"""
genome = {}
for line in infile:
clean_line = line.strip()
parts = clean_line.split()
chrom, start, stop = parts[0], int(parts[1]), int(parts[2])
name = parts[3]
tree = None
#if chromosome already in tree, index to this tree
if chrom in genome:
tree = genome[chrom]
else:
#first time we encounter chromosome, create a new interval tree
tree = IntervalTree()
genome[chrom] = tree
#add interval to tree
tree.add(start, stop, name)
return genome
#####################
# #
# Main Function #
# #
#####################
def main():
"""
cpf1=1
gRNA_file = '/Users/pereza1/Projects/Ventura/CRISPR/data/dm6/dm6_all_kmers.txt'
annotations = '/Users/pereza1/Projects/Ventura/CRISPR/code/crispr-project/guidescan-crispr/guidescan/annotation_bed/dm6/dm6_exons_completeAnnotation.bed'
outdir = '/Users/pereza1/Desktop'
interval_tree = create_interval_tree(annotations)
x = interval_tree.get('chr4')
x.find(1050297,1053703)
cpf1 = 1
gRNA_file = '/Users/pereza1/Projects/Ventura/CRISPR/data/dm6/dm6_all_kmers.txt'
annotations = '/Users/pereza1/Projects/Ventura/CRISPR/code/crispr-project/guidescan-crispr/guidescan/annotation_bed/dm6/dm6_exons_completeAnnotation.bed'
outdir = '/Users/pereza1/Desktop'
interval_tree = create_interval_tree(annotations)
x = interval_tree.get('chr4')
x.find(1047640, 1047640)
"""
# user inputs
annotations,gRNA_file,outdir,cpf1 = arg_parser()
# generate interval tree
interval_tree = create_interval_tree(annotations)
# annotation of target sites with interval tree
target_sequence_overlapping_exon,target_dictionary,target_dictionary_annotation = gRNA_annotation_through_interval_tree(gRNA_file,interval_tree,cpf1)
# pickle dictionary of sequences and coordinates
with open('%s/gRNA_target_coordinates_dict.pkl' % outdir, 'w') as out_pickle:
pickle.dump(target_dictionary,out_pickle)
with open('%s/gRNA_target_coordinates_annotation_dict.pkl' % outdir,'w') as out_annotation_pickle:
pickle.dump(target_dictionary_annotation,out_annotation_pickle)
# write out
gRNA_w_annotations_writeout(target_sequence_overlapping_exon,outdir,cpf1)
# end message for user
sys.stdout.write('annotation selection of gRNAs for %s complete\n' % gRNA_file)
if __name__ == '__main__':
main()
| {
"repo_name": "lzamparo/crisprML",
"path": "src/gRNA_from_annotations.py",
"copies": "1",
"size": "9584",
"license": "bsd-3-clause",
"hash": 6337414569868282000,
"line_mean": 37.0317460317,
"line_max": 162,
"alpha_frac": 0.6044449082,
"autogenerated": false,
"ratio": 4.0507185122569735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5155163420456973,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from scipy import sparse
import igraph
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils.validation import NotFittedError, check_X_y, check_array
class MultiIsotonicRegressor(BaseEstimator, RegressorMixin):
"""Regress a target value as a non-decreasing function of each input attribute,
when the other attributes are non-decreasing
min_partition_size is the minimum allowable size to which to partition the
training set, to avoid overfitting
"""
def __init__(self, min_partition_size=1):
self.min_partition_size = min_partition_size
def fit(self, X, y):
"""Fit a multidimensional isotonic regression model
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Training data
y : array-like, shape=(n_samples,)
Target values
Returns
-------
self : object
Returns an instance of self
"""
X, y = check_X_y(X, y, y_numeric=True) # In principle, Infs would be OK, but better to complain and let the user handle it
myorder = np.argsort(X[:, 0]) # order along the first axis to at least avoid some of the comparisons
self._training_set = X[myorder, :]
ysort = np.array(y, dtype=np.float64)[myorder]
indices = []
indptr = [0]
for (i, Xrow) in enumerate(self._training_set[:, 1:]):
indices.append(np.flatnonzero((Xrow <= self._training_set[i+1:, 1:]).all(1))+i+1)
indptr.append(indptr[-1]+len(indices[-1]))
all_comparisons = sparse.csr_matrix((np.ones(indptr[-1], dtype=np.bool), np.concatenate(indices), indptr),
shape=(X.shape[0], X.shape[0]), dtype=np.bool)
edges_to_add = zip(*(all_comparisons-all_comparisons.dot(all_comparisons)).nonzero())
mygraph = igraph.Graph(n=y.size, edges=edges_to_add, directed=True, vertex_attrs={'y': ysort})
def _add_source_sink(graph_part):
"""Add in the edges connecting the source and sink vertices to the internal nodes of the graph"""
y_part = np.array(graph_part.vs['y'])
y_part -= y_part.mean()
maxval = np.abs(y_part).sum()+1
vsrc = graph_part.vcount()
vsnk = vsrc+1
graph_part.add_vertices(2)
src_snk_edges = [(vsrc, curr_v) if curr_y > 0 else (curr_v, vsnk) for (curr_v, curr_y) in enumerate(y_part)]
n_internal_edges = graph_part.ecount()
graph_part.add_edges(src_snk_edges)
graph_part.es['c'] = ([maxval]*n_internal_edges)+list(np.abs(y_part))
def _partition_graph(origV):
"""Recursively partition a subgraph (indexed by origV) according to the mincut algorithm
Parameters
----------
origV : list-like
A list of indices of mygraph corresponding to the subgraph to partition
Returns
-------
partition : list of lists
A list of lists of indices indicating the final partitioning of the graph
"""
currgraph = mygraph.subgraph(origV)
_add_source_sink(currgraph)
currpart = currgraph.mincut(currgraph.vcount()-2, currgraph.vcount()-1, 'c').partition
if len(currpart[0])-1 < self.min_partition_size or len(currpart[1])-1 < self.min_partition_size:
# this partitioning would result in one of the sets being too small - so don't do it!
return [origV]
else:
return _partition_graph([origV[idx] for idx in currpart[0][:-1]]) + _partition_graph([origV[idx] for idx in currpart[1][:-1]])
nodes_to_cover = y.size
self._training_set_scores = np.empty(y.size)
for part in _partition_graph(range(y.size)):
self._training_set_scores[part] = ysort[part].mean()
nodes_to_cover -= len(part)
assert nodes_to_cover == 0
return self
def predict(self, X):
"""Predict according to the isotonic fit
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Returns
-------
C : array, shape=(n_samples,)
Predicted values
"""
if not hasattr(self, '_training_set'):
raise NotFittedError
X = check_array(X)
res = np.empty(X.shape[0])
minval = self._training_set_scores.min() # when the features are below the entire training set, set to the minimum training set value
for (i, Xrow) in enumerate(X):
lower_training_set = (self._training_set <= Xrow).all(1)
if lower_training_set.any():
res[i] = self._training_set_scores[lower_training_set].max()
else:
res[i] = minval
return res
| {
"repo_name": "alexfields/multiisotonic",
"path": "multiisotonic.py",
"copies": "1",
"size": "4968",
"license": "bsd-3-clause",
"hash": 4078524916012369000,
"line_mean": 40.7478991597,
"line_max": 142,
"alpha_frac": 0.5841384863,
"autogenerated": false,
"ratio": 3.8098159509202456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48939544372202454,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex Frank'
from scipy import stats
import numpy as np
import json
def main():
timestamps = []
bottom_norms = []
top_norms = []
# expects norms.dat in same directory. Can be changed to be a command-line arg
f = open('norms.dat', 'r')
for line in f:
words = line.split(' ')
timestamps.append(words[0][4:20])
bottom_norms.append(words[1])
top_norms.append(words[2])
slope, intercept, r_value, p_value, std_err = stats.linregress(np.asarray(timestamps, float), np.asarray(bottom_norms, float))
bottom_result = {
'slope': slope,
'intercept': intercept,
'start_time': timestamps[0]
}
slope, intercept, r_value, p_value, std_err = stats.linregress(np.asarray(timestamps, float), np.asarray(top_norms, float))
top_result = {
'slope': slope,
'intercept': intercept,
'start_time': timestamps[0]
}
result = {
"bottom": bottom_result,
"top": top_result
}
return result
| {
"repo_name": "acic2015/findr",
"path": "deprecated/linearReg.py",
"copies": "1",
"size": "1037",
"license": "mit",
"hash": -6715648850382036000,
"line_mean": 24.2926829268,
"line_max": 130,
"alpha_frac": 0.5949855352,
"autogenerated": false,
"ratio": 3.575862068965517,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4670847604165517,
"avg_score": null,
"num_lines": null
} |
import re
import os
import numpy as np
import pandas as pd
from operator import itemgetter
from itertools import groupby
# ========================================= Loads the skeletal data and labels =================================================================
# Returns: a dataframe with the whole training set frame by frame. (num frames x 28)
def load_data(sk_data_file):
# Go find all the saved data.
df = pd.read_csv(sk_data_file)
return df
# ================================= Get the previous position of both hands and elbows. ========================================================
# We are going to use this positions to calculate movements and velocities.
# Returns: the original df with the hands and elbows previous positions added.
def get_previous_pos(df):
# Put the indices into matrices for faster manipulation.
lh_x,lh_y,rh_x,rh_y = df['lhX'].as_matrix(),df['lhY'].as_matrix(),df['rhX'].as_matrix(),df['rhY'].as_matrix()
le_x,le_y,re_x,re_y = df['leX'].as_matrix(),df['leY'].as_matrix(),df['reX'].as_matrix(),df['reY'].as_matrix()
# Create zero-like matrices.
pr_lh_x,pr_lh_y,pr_rh_x,pr_rh_y = np.zeros_like(lh_x),np.zeros_like(lh_y),np.zeros_like(rh_x),np.zeros_like(rh_y)
pr_le_x,pr_le_y,pr_re_x,pr_re_y = np.zeros_like(le_x),np.zeros_like(le_y),np.zeros_like(re_x),np.zeros_like(re_y)
# Get the previous positions.
pr_lh_x[1:],pr_lh_y[1:],pr_rh_x[1:],pr_rh_y[1:] = lh_x[:-1],lh_y[:-1],rh_x[:-1],rh_y[:-1]
pr_le_x[1:],pr_le_y[1:],pr_re_x[1:],pr_re_y[1:] = le_x[:-1],le_y[:-1],re_x[:-1],re_y[:-1]
# Put them in the dataframes.
df['pre_lhX'],df['pre_lhY'],df['pre_rhX'],df['pre_rhY'] = pr_lh_x,pr_lh_y,pr_rh_x,pr_rh_y
df['pre_leX'],df['pre_leY'],df['pre_reX'],df['pre_reY'] = pr_le_x,pr_le_y,pr_re_x,pr_re_y
return df
#================================= Get the previous velocity of both hands and elbows ==========================================================
# THis will be used to calculate accelerations
def get_previous_vel(df):
# Put the velocities into matrices for faster manipulation.
lh_vel, rh_vel = df['lh_v'].as_matrix(),df['rh_v'].as_matrix()
le_vel, re_vel = df['le_v'].as_matrix(),df['re_v'].as_matrix()
# Create zero-like matrices.
pr_lh_vel,pr_rh_vel = np.zeros_like(lh_vel),np.zeros_like(rh_vel)
pr_le_vel,pr_re_vel = np.zeros_like(le_vel),np.zeros_like(re_vel)
# Get the previous positions.
pr_lh_vel[1:],pr_rh_vel[1:] = lh_vel[:-1],rh_vel[:-1]
pr_le_vel[1:],pr_re_vel[1:] = le_vel[:-1],re_vel[:-1]
# Put them in the dataframes.
df['pre_lh_v'],df['pre_rh_v'] = pr_lh_vel,pr_rh_vel
df['pre_le_v'],df['pre_re_v'] = pr_le_vel,pr_re_vel
return df
# ======================================== Calculate and return hand and elbow velocities. =====================================================
# We use the current and previous positions of elbows and hands to calculate the velocities as the distance in pixels between consecutive frames.
# Returns the original data frame with the velocity columns added.
def calculate_velocities(df):
# Load positions to arrays for faster computation.
lh_x,lh_y,rh_x,rh_y = df['lhX'].as_matrix(),df['lhY'].as_matrix(),df['rhX'].as_matrix(),df['rhY'].as_matrix()
le_x,le_y,re_x,re_y = df['leX'].as_matrix(),df['leY'].as_matrix(),df['reX'].as_matrix(),df['reY'].as_matrix()
pr_lh_x,pr_lh_y,pr_rh_x,pr_rh_y = df['pre_lhX'].as_matrix(),df['pre_lhY'].as_matrix(),df['pre_rhX'].as_matrix(),df['pre_rhY'].as_matrix()
pr_le_x,pr_le_y,pr_re_x,pr_re_y = df['pre_leX'].as_matrix(),df['pre_leY'].as_matrix(),df['pre_reX'].as_matrix(),df['pre_reY'].as_matrix()
# Create the zero arrays to store velocities.
lh_vel,rh_vel = np.zeros_like(lh_x), np.zeros_like(rh_x)
le_vel,re_vel = np.zeros_like(le_x), np.zeros_like(re_x)
# Create the position vectors from the x,y vectors.
lh, pre_lh, rh, pre_rh = np.array((lh_x,lh_y)), np.array((pr_lh_x,pr_lh_y)), np.array((rh_x,rh_y)), np.array((pr_rh_x,pr_rh_y))
le, pre_le, re, pre_re = np.array((le_x,le_y)), np.array((pr_le_x,pr_le_y)), np.array((re_x,re_y)), np.array((pr_re_x,pr_re_y))
# Vectorized computation of the euclidean distance between the previous and current position.
dist_lh, dist_rh = (lh - pre_lh)**2, (rh - pre_rh)**2
dist_le, dist_re = (le - pre_le)**2, (re - pre_re)**2
dist_lh, dist_rh = dist_lh.sum(axis=0), dist_rh.sum(axis=0)
dist_le, dist_re = dist_le.sum(axis=0), dist_re.sum(axis=0)
dist_lh, dist_rh = np.sqrt(dist_lh), np.sqrt(dist_rh)
dist_le, dist_re = np.sqrt(dist_le), np.sqrt(dist_re)
# Store the velocities back to the dataframe.
lh_vel[5:],rh_vel[5:] = dist_lh[5:], dist_rh[5:]
le_vel[5:],re_vel[5:] = dist_le[5:], dist_re[5:]
df['lh_v'], df['rh_v'] = lh_vel, rh_vel
df['le_v'], df['re_v'] = le_vel, re_vel
return df
#================================= Get the acceleration of both hands and elbows ==========================================================
# We use the current and previous velocity of the hands and elbows to calculate acceleration
def calculate_accelerations(df):
# Load positions to arrays for faster computation.
lh_vel,rh_vel = df['lh_v'].as_matrix(),df['rh_v'].as_matrix()
le_vel,re_vel = df['le_v'].as_matrix(),df['re_v'].as_matrix()
pr_lh_vel,pr_rh_vel = df['pre_lh_v'].as_matrix(),df['pre_rh_v'].as_matrix()
pr_le_vel,pr_re_vel = df['pre_le_v'].as_matrix(),df['pre_re_v'].as_matrix()
# Create the zero arrays to store velocities.
lh_acc,rh_acc = np.zeros_like(lh_vel), np.zeros_like(rh_vel)
le_acc,re_acc = np.zeros_like(le_vel), np.zeros_like(re_vel)
lh_dv, rh_dv = (lh_vel - pr_lh_vel), (rh_vel - pr_rh_vel)
le_dv, re_dv = (le_vel - pr_le_vel), (re_vel - pr_re_vel)
# Store the accelerations back to the dataframe.
lh_acc[5:],rh_acc[5:] = lh_dv[5:], rh_dv[5:]
le_acc[5:],re_acc[5:] = le_dv[5:], re_dv[5:]
df['lh_a'], df['rh_a'] = lh_acc, rh_acc
df['le_a'], df['re_a'] = le_acc, re_acc
return df
# ============================== Calculate the distances of joints from the hip center and shoulder center =====================================
# Calculates the distances of both elbows and hands from the hip center and the shoulder center.
# Returns: the original dataframe with the hip-elbows, hip-hands, should center-elbows and shoulder center-hands distances columns added.
def calculate_distances(df):
# Load the joint positions into arrays for faster computation.
lh_x,lh_y,rh_x,rh_y = df['lhX'].as_matrix(),df['lhY'].as_matrix(),df['rhX'].as_matrix(),df['rhY'].as_matrix()
le_x,le_y,re_x,re_y = df['leX'].as_matrix(),df['leY'].as_matrix(),df['reX'].as_matrix(),df['reY'].as_matrix()
hip_x,hip_y,shc_x,shc_y = df['hipX'].as_matrix(),df['hipY'].as_matrix(),df['shcX'].as_matrix(),df['shcY'].as_matrix()
# Create the empty arrays to put the values.
hands_dist = np.zeros_like(lh_x)
lh_hip_dist,rh_hip_dist = np.zeros_like(lh_x), np.zeros_like(rh_x)
le_hip_dist,re_hip_dist = np.zeros_like(le_x), np.zeros_like(re_x)
lh_shoulder_center_dist,rh_shoulder_center_dist = np.zeros_like(lh_x), np.zeros_like(rh_x)
le_shoulder_center_dist,re_shoulder_center_dist = np.zeros_like(le_x), np.zeros_like(re_x)
# Create the position vectors from the x,y vectors.
lh, rh = np.array((lh_x,lh_y)), np.array((rh_x,rh_y))
le, re = np.array((le_x,le_y)), np.array((re_x,re_y))
hip, shc = np.array((hip_x,hip_y)), np.array((shc_x,shc_y))
# Calculate the euclidean distance between hands.
hands_dist = (lh - rh)**2
hands_dist = hands_dist.sum(axis=0)
hands_dist = np.sqrt(hands_dist)
# Calculate the euclidean distance of hands and elbows from the hip.
lh_hip_dist, rh_hip_dist = (lh - hip)**2, (rh - hip)**2
le_hip_dist, re_hip_dist = (le - hip)**2, (re - hip)**2
lh_hip_dist, rh_hip_dist = lh_hip_dist.sum(axis=0), rh_hip_dist.sum(axis=0)
le_hip_dist, re_hip_dist = le_hip_dist.sum(axis=0), re_hip_dist.sum(axis=0)
lh_hip_dist, rh_hip_dist = np.sqrt(lh_hip_dist), np.sqrt(rh_hip_dist)
le_hip_dist, re_hip_dist = np.sqrt(le_hip_dist), np.sqrt(re_hip_dist)
# Calculate the euclidean distance of hands and elbows from the shoulder center.
lh_shoulder_center_dist, rh_shoulder_center_dist = (lh - shc)**2, (rh - shc)**2
le_shoulder_center_dist, re_shoulder_center_dist = (le - shc)**2, (re - shc)**2
lh_shoulder_center_dist, rh_shoulder_center_dist = lh_shoulder_center_dist.sum(axis=0), rh_shoulder_center_dist.sum(axis=0)
le_shoulder_center_dist, re_shoulder_center_dist = le_shoulder_center_dist.sum(axis=0), re_shoulder_center_dist.sum(axis=0)
lh_shoulder_center_dist, rh_shoulder_center_dist = np.sqrt(lh_shoulder_center_dist), np.sqrt(rh_shoulder_center_dist)
le_shoulder_center_dist, re_shoulder_center_dist = np.sqrt(le_shoulder_center_dist), np.sqrt(re_shoulder_center_dist)
# Put the distances back into the dataframe to be returned.
df['hands_d'] = hands_dist
df['lh_hip_d'], df['rh_hip_d'] = lh_hip_dist, rh_hip_dist
df['le_hip_d'], df['re_hip_d'] = le_hip_dist, re_hip_dist
df['lh_shc_d'], df['rh_shc_d'] = lh_shoulder_center_dist, rh_shoulder_center_dist
df['le_shc_d'], df['re_shc_d'] = le_shoulder_center_dist, re_shoulder_center_dist
return df
#================================================== Calculate some sets of angles ==============================================================
# Here we calculate the angles of hands-hip, hands-shoulder center and hands-elbows.
# Returns: the original df with the angles columns added to it.
def calculate_angles(df):
# Load all the positions into arrays for faster computation.
lh_x,lh_y,rh_x,rh_y = df['lhX'].as_matrix(),df['lhY'].as_matrix(),df['rhX'].as_matrix(),df['rhY'].as_matrix()
le_x,le_y,re_x,re_y = df['leX'].as_matrix(),df['leY'].as_matrix(),df['reX'].as_matrix(),df['reY'].as_matrix()
hip_x,hip_y,shc_x,shc_y = df['hipX'].as_matrix(),df['hipY'].as_matrix(),df['shcX'].as_matrix(),df['shcY'].as_matrix()
# Create the position vectors from the x,y vectors.
lh, rh = np.array((lh_x,lh_y)), np.array((rh_x,rh_y))
le, re = np.array((le_x,le_y)), np.array((re_x,re_y))
hip, shc = np.array((hip_x,hip_y)), np.array((shc_x,shc_y))
# Calculate the distances between joints.
lh_hip_dist, rh_hip_dist = np.array((lh-hip)), np.array((rh-hip))
lh_shc_dist, rh_shc_dist = np.array((lh-shc)), np.array((rh-shc))
lh_el_dist, rh_el_dist = np.array((lh-le)), np.array((rh-re))
# Calculate some angles.
Theta_lh_hip, Theta_rh_hip = np.arctan2(lh_hip_dist[1],lh_hip_dist[0]), np.arctan2(rh_hip_dist[1],rh_hip_dist[0])
Theta_lh_shc, Theta_rh_shc = np.arctan2(lh_shc_dist[1],lh_shc_dist[0]), np.arctan2(rh_shc_dist[1],rh_shc_dist[0])
Theta_lh_el, Theta_rh_el = np.arctan2(lh_el_dist[1],lh_el_dist[0]), np.arctan2(rh_el_dist[1],rh_el_dist[0])
# Store back into the data frame to be returned.
df['lh_hip_ang'], df['rh_hip_ang'] = Theta_lh_hip, Theta_rh_hip
df['lh_shc_ang'], df['rh_shc_ang'] = Theta_lh_shc, Theta_rh_shc
df['lh_el_ang'], df['rh_el_ang'] = Theta_lh_el, Theta_rh_el
return df
#====================================================== Write labels to training datafile =======================================================
def write_train_labs(train_df):
# Map gesture names to numbers
gestures_dict = {'vattene':1, 'vieniqui':2, 'perfetto':3, 'furbo':4, 'cheduepalle':5, 'chevuoi':6, 'daccordo':7, 'seipazzo':8, 'combinato':9,
'freganiente':10, 'ok':11, 'cosatifarei':12, 'basta':13, 'prendere':14, 'noncenepiu':15, 'fame':16, 'tantotempo':17,
'buonissimo':18, 'messidaccordo':19, 'sonostufo':20}
# Here are saved the labeling data
training_lab_path = '/home/alex/Documents/Data/Labels'
# save all gestures with end frame, start frame and video number in a df to match the train_df
lab_frame = pd.DataFrame()
# Go through all lab files and load the label info in a dataframe
for lab_file in sorted(os.listdir(training_lab_path)):
lf = pd.read_csv(training_lab_path + '/' + lab_file, delimiter=' ', header=None).drop([1,3],1)
# Get file number
file_number = re.findall("Sample(\d*)_data_labels",lab_file)[0]
lf['file_number'] = pd.Series(int(file_number), index=lf.index)
lab_frame = lab_frame.append(lf)
# Put all labels in one big vector
label_vector = []
# Go through each file
for f_num in train_df['file_number'].unique():
#Labeling data for the file
lab_f = lab_frame[lab_frame['file_number'] == f_num]
# THe dataframe of the file
tr_f = train_df[train_df['file_number'] == f_num]
# vector of frames
frames = tr_f['frame'].as_matrix()
# vector of activity/inactivity info
inactive = tr_f['inactive'].as_matrix()
# label vector to be created
lab_v = np.zeros_like(inactive)
# use the label and activity/inactivity info to label frames (inactive -> 0)
for f, i in zip(frames, inactive):
#if inactive do nothing
if i == 1: continue
try:
gest_lab = lab_f[(lab_f[2] < f) & (lab_f[4] >= f)][0].values[0]
# if there is no label do nothing
except: continue
# map gesture name to number
lab_v[f] = gestures_dict[gest_lab]
# append to the big vector
label_vector = np.hstack((label_vector,lab_v))
train_df['labels'] = label_vector.astype(int)
return train_df
#============================================================= Main function ====================================================================
# This is the function that goes through the feature extraction process and writes the output.
mode = raw_input('Select train/val mode: ')
print mode
if mode == 'train':
sk_data_file = "training_data.csv"
out_file = "Training_set_skeletal.csv"
elif mode == 'val':
sk_data_file = "validation_data.csv"
out_file = "Validation_set_skeletal.csv"
else:
sk_data_file = "final_data.csv"
out_file = "final_set_skeletal.csv"
print "Loading data..."
df = load_data(sk_data_file)
print "Finished loading."
df = get_previous_pos(df)
print "Calculating velocities..."
df = calculate_velocities(df)
df = get_previous_vel(df)
print "Calculating accelerations..."
df = calculate_accelerations(df)
print "Calculating distances..."
df = calculate_distances(df)
print "Calculating angles..."
df = calculate_angles(df)
print "Writing output to csv..."
df.to_csv(out_file,
index=False)
| {
"repo_name": "AlexGidiotis/Multimodal-Gesture-Recognition-with-LSTMs-and-CTC",
"path": "skeletal_network/skeletal_feature_extraction.py",
"copies": "1",
"size": "14319",
"license": "mit",
"hash": -7257514341169482000,
"line_mean": 45.1903225806,
"line_max": 145,
"alpha_frac": 0.6271387667,
"autogenerated": false,
"ratio": 2.584657039711191,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3711795806411191,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex Gusev <alex@flancer64.com>'
from prxgt.const import ATTR_TYPE_INT
from prxgt.domain.attribute import Attribute
from prxgt.domain.meta.entity import Entity
ATTR_ID_NAME = "id"
class Instance(Entity):
"""
Entity instance representation.
"""
def __init__(self, id_=None, attrs=None):
self._id = id_
if attrs is None:
self._attrs = {}
else:
self._attrs = attrs
pass
@property
def id(self):
return self._id
@id.setter
def id(self, val):
self._id = int(val)
attr = Attribute()
attr.name = ATTR_ID_NAME
attr.type = ATTR_TYPE_INT
attr.value = int(val)
self.add_attr(attr)
@property
def attrs(self):
return self._attrs
@attrs.setter
def attrs(self, val):
self._attrs = val
def add_attr(self, attr: Attribute):
"""
Set (add or replace) new attribute to this instance.
"""
self._attrs[attr.name] = attr
pass
def get_attr(self, attr_name) -> Attribute:
result = None
if attr_name in self._attrs:
result = self.attrs[attr_name]
return result | {
"repo_name": "praxigento/teq_test_db_schema_attrs",
"path": "prxgt/domain/instance.py",
"copies": "1",
"size": "1217",
"license": "mit",
"hash": 4806697462854603000,
"line_mean": 21.5555555556,
"line_max": 60,
"alpha_frac": 0.5579293344,
"autogenerated": false,
"ratio": 3.733128834355828,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47910581687558285,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex Gusev <alex@flancer64.com>'
from prxgt.domain.filter.filter import Filter
from prxgt.domain.filter.filter_rule import FilterRule
from prxgt.domain.filter.condition import Condition
class ConditionRule(FilterRule):
"""
ConditionRule представляет собой логическое условие (AND, OR, NOT), применяемое к одному или более фильтров,
и может быть использовано в качестве части другого ConditionRule.
"""
def __init__(self, cond: Condition, *parts: Filter):
self._condition = cond
self._filters = []
# TODO we should check parts of the rule - 1 for NOT and > 1 for AND & OR
for one in parts:
self._filters.append(one)
pass
@property
def condition(self) -> Condition:
return self._condition
@condition.setter
def condition(self, val: Condition):
self._condition = val
@property
def filters(self):
return self._filters
@filters.setter
def filters(self, val):
self._filters = val
def __repr__(self):
p = ""
for one in self.filters:
p += repr(one) + ", "
return repr(self.condition) + "(" + p[:-2] + ")" | {
"repo_name": "praxigento/teq_test_db_schema_attrs",
"path": "prxgt/domain/filter/condition_rule.py",
"copies": "1",
"size": "1305",
"license": "mit",
"hash": -1540374810823010000,
"line_mean": 28.1463414634,
"line_max": 112,
"alpha_frac": 0.622278057,
"autogenerated": false,
"ratio": 3.1256544502617802,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9243188382587331,
"avg_score": 0.0009488249348899089,
"num_lines": 41
} |
__author__ = 'Alex Gusev <alex@flancer64.com>'
import json
import logging
class Config:
_filename = None
_data = None
def __init__(self, filename='config.json'):
self._filename = filename
def load(self):
cfg_file = open(self._filename)
self._data = json.load(cfg_file)
logging.info("configuration is loaded from file '%s';", self._filename)
cfg_file.close()
def get_dom_attrs_total(self):
return self._data['domain']['attrs_total']
def get_dom_attrs_per_instance_min(self):
return self._data['domain']['attrs_per_instance_min']
def get_dom_attrs_per_instance_max(self):
return self._data['domain']['attrs_per_instance_max']
def get_dom_inst_total(self):
return self._data['domain']['instances_total']
def get_oper_inst_count(self):
return self._data['operations']['get_instance']['count']
def get_oper_filter_count(self):
return self._data['operations']['get_by_filter']['count']
def get_oper_filter_attrs_max(self):
return self._data['operations']['get_by_filter']['attrs_in_filter_max']
def get_oper_filter_attrs_min(self):
return self._data['operations']['get_by_filter']['attrs_in_filter_min']
| {
"repo_name": "praxigento/teq_test_db_schema_attrs",
"path": "prxgt/config.py",
"copies": "1",
"size": "1262",
"license": "mit",
"hash": -7290052338089364000,
"line_mean": 29.7804878049,
"line_max": 79,
"alpha_frac": 0.6283676704,
"autogenerated": false,
"ratio": 3.5649717514124295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46933394218124297,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex Gusev <alex@flancer64.com>'
import prxgt.const as const
from prxgt.domain.meta.attribute import Attribute as AttributeBase
class Attribute(AttributeBase):
"""
Attribute model contains data.
"""
def __init__(self, name=None, type_=None, value=None):
super(Attribute, self).__init__(name, type_)
self._value = value
return
@property
def value(self):
return self._value
@value.setter
def value(self, val):
self._value = val
@property
def meta(self):
"""
META Attribute (name and type only)
:return:
"""
return AttributeBase(self._name, self._type)
def __repr__(self):
result = super(Attribute, self).__repr__()
if (self.value is not None) and (self.type == const.ATTR_TYPE_TXT):
# [name@type='value']
result += "=" + repr(self.value[:4] + "...")
else:
# [name@text='valu...']
result += "=" + repr(self.value)
return result | {
"repo_name": "praxigento/teq_test_db_schema_attrs",
"path": "prxgt/domain/attribute.py",
"copies": "1",
"size": "1049",
"license": "mit",
"hash": 5439268651677813000,
"line_mean": 25.25,
"line_max": 75,
"alpha_frac": 0.5510009533,
"autogenerated": false,
"ratio": 3.870848708487085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4921849661787085,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex Gusev <alex@flancer64.com>'
import unittest
import os
from mock import Mock
from prxgt.config import Config
from prxgt.repo.repository import Repository
from prxgt.domain.instance import Instance
from prxgt.domain.attribute import Attribute
ATTR_NAME = "a0"
INST_ID = 0
ATTR_TYPE = "som type"
ATTR_VALUE = "some value"
_TEST_CONFIG_ATTRS_TOTAL = 10
class Test(unittest.TestCase):
def setUp(self):
return
def _mock_config(self) -> Config:
result = Mock(Config())
result.get_dom_attrs_total = Mock(return_value=_TEST_CONFIG_ATTRS_TOTAL)
result.get_dom_attrs_per_instance_min = Mock(return_value=3)
result.get_dom_attrs_per_instance_max = Mock(return_value=5)
result.get_dom_inst_total = Mock(return_value=50)
return result
def test_init(self):
# prepare data
config = self._mock_config()
# tests
repo = Repository(config)
self.assertIsNotNone(repo)
return
def test_init_all(self):
# prepare data
config = self._mock_config()
# tests
repo = Repository(config)
repo.init_all()
self.assertTrue(isinstance(repo._instances, dict))
self.assertEqual(50, len(repo._instances))
return
def test_save_load(self):
filename = '../_test/tmp/repository.json'
dirname = os.path.dirname(os.path.abspath(__file__))
fullpath = dirname + '/' + filename
# prepare data
config = self._mock_config()
# tests
repo = Repository(config)
repo.init_all()
before = repo._instances[0]
assert isinstance(before, Instance)
repo.save(fullpath)
repo.load(fullpath)
after = repo._instances[0]
assert isinstance(after, Instance)
for attr_name in before.attrs:
self.assertEqual(before.get_attr(attr_name).value, after.get_attr(attr_name).value)
return
def test_get_attr_names(self):
# prepare data
config = self._mock_config()
# tests
repo = Repository(config)
repo._init_attrs()
names = repo.get_attr_names()
self.assertTrue(isinstance(names, list))
self.assertEqual(_TEST_CONFIG_ATTRS_TOTAL + 1, len(names)) # + "id" attribute
return
def test_get_attr_by_name(self):
# prepare data
config = self._mock_config()
# tests
repo = Repository(config)
repo._init_attrs()
attr = repo.get_attr_by_name("a0")
self.assertEqual("a0", attr.name)
return
def test_add_instance(self):
# prepare data
config = self._mock_config()
attr = Attribute()
attr.name = ATTR_NAME
attr.type = ATTR_TYPE
attr.value = ATTR_VALUE
inst = Instance()
inst.add_attr(attr)
# tests
repo = Repository(config)
repo.add_instance(inst)
all_ = repo.instances
self.assertTrue(isinstance(all_, dict))
test_inst = all_[INST_ID]
assert isinstance(test_inst, Instance)
self.assertEqual(INST_ID, test_inst.id)
test_attr = test_inst.get_attr(ATTR_NAME)
assert isinstance(test_attr, Attribute)
self.assertEqual(ATTR_NAME, test_attr.name)
self.assertEqual(ATTR_TYPE, test_attr.type)
self.assertEqual(ATTR_VALUE, test_attr.value)
return
if __name__ == '__main__':
unittest.main() | {
"repo_name": "praxigento/teq_test_db_schema_attrs",
"path": "prxgt/repo/test_repository.py",
"copies": "1",
"size": "3483",
"license": "mit",
"hash": 1471161916735666700,
"line_mean": 28.7777777778,
"line_max": 95,
"alpha_frac": 0.6032156187,
"autogenerated": false,
"ratio": 3.7858695652173915,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.988537201393876,
"avg_score": 0.000742633995726017,
"num_lines": 117
} |
__author__ = 'Alex Gusev <alex@flancer64.com>'
import unittest
from prxgt.domain.filter.alias import Alias
from prxgt.domain.filter.condition import Condition
from prxgt.domain.filter.condition_rule import ConditionRule
from prxgt.domain.filter.filter import Filter
from prxgt.domain.filter.function import Function
from prxgt.domain.filter.function_rule import FunctionRule
from prxgt.domain.filter.value import Value
class Test(unittest.TestCase):
def test_init(self):
and_ = "AND"
cond = Condition(and_)
frule = FunctionRule(Function("gt", 2), Alias("customer_id"), Value(5))
filter1 = Filter(frule)
filter2 = Filter(frule)
crule = ConditionRule(cond, filter1, filter2)
self.assertEqual(cond, crule.condition)
pass
def test_properties(self):
and_ = "AND"
or_ = "OR"
cond1 = Condition(and_)
frule = FunctionRule(Function("gt", 2), Alias("customer_id"), Value(5))
filter1 = Filter(frule)
filter2 = Filter(frule)
crule = ConditionRule(cond1, filter1, filter2)
self.assertEqual(cond1, crule.condition)
cond2 = Condition(or_)
crule.condition = cond2
self.assertEqual(cond2, crule.condition)
filters = (filter2, filter1)
crule.filters = filters
self.assertEqual(filters, crule.filters)
pass
def test_repr(self):
and_ = "AND"
cond = Condition(and_)
frule1 = FunctionRule(Function("gt", 2), Alias("customer_id"), Value(5))
frule2 = FunctionRule(Function("lt", 2), Alias("customer_id"), Value(25))
filter1 = Filter(frule1)
filter2 = Filter(frule2)
crule = ConditionRule(cond, filter1, filter2)
self.assertEqual("'AND'(gt:2('customer_id', 5), lt:2('customer_id', 25))", repr(crule))
pass
if __name__ == '__main__':
unittest.main() | {
"repo_name": "praxigento/teq_test_db_schema_attrs",
"path": "prxgt/domain/filter/test_condition_rule.py",
"copies": "1",
"size": "1902",
"license": "mit",
"hash": -6450459622406062000,
"line_mean": 34.2407407407,
"line_max": 95,
"alpha_frac": 0.6388012618,
"autogenerated": false,
"ratio": 3.541899441340782,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9672590606235449,
"avg_score": 0.0016220193810666719,
"num_lines": 54
} |
__author__ = 'Alex Gusev <alex@flancer64.com>'
import unittest
import prxgt.const as const
from prxgt.repo.generator import Generator
class Test(unittest.TestCase):
def test_init(self):
# tests
gene = Generator()
self.assertIsNotNone(gene)
return
def test_get_value(self):
# tests simple generators.
gene = Generator()
self.assertIsNotNone(gene)
value = gene.get_value(const.ATTR_TYPE_DEC)
self.assertTrue(isinstance(value, float))
value = gene.get_value(const.ATTR_TYPE_INT)
self.assertTrue(isinstance(value, int))
value = gene.get_value(const.ATTR_TYPE_STR)
self.assertTrue(isinstance(value, str))
self.assertTrue(len(value) == 8)
value = gene.get_value(const.ATTR_TYPE_TXT)
self.assertTrue(isinstance(value, str))
self.assertTrue(len(value) == 512)
return
def test_set_for_type(self):
# tests custom generator for some type.
gene = Generator()
self.assertIsNotNone(gene)
value = gene.get_value(const.ATTR_TYPE_STR)
self.assertTrue(isinstance(value, str))
self.assertTrue(len(value) == 8)
gene.set_for_type(const.ATTR_TYPE_STR, custom_generator)
value = gene.get_value(const.ATTR_TYPE_STR)
self.assertTrue(isinstance(value, int))
self.assertTrue(value == 5)
def custom_generator(self):
result = 5
return result
if __name__ == '__main__':
unittest.main() | {
"repo_name": "praxigento/teq_test_db_schema_attrs",
"path": "prxgt/repo/test_generator.py",
"copies": "1",
"size": "1512",
"license": "mit",
"hash": 9222583931975565000,
"line_mean": 29.26,
"line_max": 64,
"alpha_frac": 0.6302910053,
"autogenerated": false,
"ratio": 3.7241379310344827,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4854428936334482,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex Gusev <alex@flancer64.com>'
from prxgt.domain.filter.expression import Expression
from prxgt.domain.filter.filter_rule import FilterRule
from prxgt.domain.filter.function import Function
class FunctionRule(Expression, FilterRule):
"""
FunctionRule представляет собой функцию с некоторым набором параметров,
возвращающую True или False и может бытьиспользована в качестве FilterRule
или части другой FunctionRule
"""
def __init__(self, func: Function, *parameters: Expression):
self._func = func
self._params = []
# TODO we should check parts of the rule - 1 for NOT and > 1 for AND &
# OR
for one in parameters:
self._params.append(one)
pass
@property
def func(self) -> Function:
return self._func
@func.setter
def func(self, val: Function):
self._func = val
@property
def params(self):
return self._params
@params.setter
def params(self, val):
self._params = val
def __repr__(self):
p = ""
for one in self.params:
p += repr(one) + ", "
return repr(self.func) + "(" + p[:-2] + ")"
| {
"repo_name": "praxigento/teq_test_db_schema_attrs",
"path": "prxgt/domain/filter/function_rule.py",
"copies": "1",
"size": "1309",
"license": "mit",
"hash": 2784788847880604000,
"line_mean": 25.6222222222,
"line_max": 78,
"alpha_frac": 0.6135225376,
"autogenerated": false,
"ratio": 3.1279373368146213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42414598744146215,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex Gusev <alex@flancer64.com>'
from prxgt.domain.filter.filter import Filter
from prxgt.domain.instance import Instance
from prxgt.proc.base import ProcessorBase
from prxgt.repo.repository import Repository
from prxgt.proc.filtrator import Filtrator
class RepoProcessor(ProcessorBase):
"""
Simple in-memory processor based on Repository object. Used in development
purposes only to test operations.
"""
def __init__(self, repo: Repository):
self._repo = repo
"""Repository to store data and meta data"""
def get_list_paged(self, filter_data, order_data, pages_data):
super().get_list_paged(filter_data, order_data, pages_data)
def get_list_ordered(self, filter_data, order_data):
super().get_list_ordered(filter_data, order_data)
def add_instance(self, inst: Instance):
self._repo.add_instance(inst)
return
def get_inst_by_id(self, instance_id) -> Instance:
result = self._repo.instances[instance_id]
return result
def get_list_by_filter(self, filter_: Filter):
result = {}
assert (isinstance(self._repo, Repository))
instances = self._repo.instances
assert (isinstance(instances, dict))
for id_ in instances:
one = instances[id_]
# apply current filter for the concrete instance
if Filtrator.is_applied(filter_, one):
result[one.id] = one
return result
# TODO: should we register subclasses in parent?
# ProcessorBase.register(RepoProcessor)
| {
"repo_name": "praxigento/teq_test_db_schema_attrs",
"path": "prxgt/proc/repo.py",
"copies": "1",
"size": "1573",
"license": "mit",
"hash": 1691764970543538700,
"line_mean": 32.4680851064,
"line_max": 78,
"alpha_frac": 0.6649713922,
"autogenerated": false,
"ratio": 3.8935643564356437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5058535748635644,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex Gusev <alex@flancer64.com>'
import random
import string
import prxgt.const as const
TYPE_DEC = const.ATTR_TYPE_DEC
TYPE_INT = const.ATTR_TYPE_INT
TYPE_STR = const.ATTR_TYPE_STR
TYPE_TXT = const.ATTR_TYPE_TXT
class Generator(object):
"""
Values generator for various types data.
Простой генератор, который возвращает значение для данных какого-либо типа.
Функция генерации значения может переопределяться через метод set_for_type
(type, function).
Переопределение функции сделано криво - это и не Java (с отдельным типом
параметра - setForType(TypeGenerator newOne)), и не JavaScript (пока что
я не знаю как сделать, чтобы внешняя функция стала "родным" методом для
объекта).
"""
def __init__(self):
self._type_gen = {
TYPE_DEC: _simple_dec,
TYPE_INT: _simple_int,
TYPE_STR: _simple_str,
TYPE_TXT: _simple_txt
}
pass
def set_for_type(self, type_, function_):
self._type_gen[type_] = function_
def get_value(self, type_):
result = self._type_gen[type_](self)
return result
"""
Simple generators bound to the types by default.
"""
def _simple_dec(self):
result = random.randint(0, 10000) / 100
return result
def _simple_int(self):
result = random.randint(0, 10)
return result
def _simple_str(self):
chars = string.ascii_letters + string.digits
result = ''.join(random.choice(chars) for _ in range(8))
return result
def _simple_txt(self):
chars = string.ascii_letters + string.digits + " "
result = ''.join(random.choice(chars) for _ in range(512))
return result
| {
"repo_name": "praxigento/teq_test_db_schema_attrs",
"path": "prxgt/repo/generator.py",
"copies": "1",
"size": "1946",
"license": "mit",
"hash": 4943983617352146000,
"line_mean": 23.4782608696,
"line_max": 79,
"alpha_frac": 0.6477205447,
"autogenerated": false,
"ratio": 2.5475113122171944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36952318569171944,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alexis.koalla@orange.com'
from flask import request
from S3.bucket import S3Bucket
from Log import Log
from subprocess import PIPE, Popen
import simplejson as json
from model.chunk import Chunk
from model.osd import OSD
from model.pg import PG
from model.S3Object import S3Object
import requests
import re
from S3.user import S3User
import rados
from datetime import datetime
class S3ObjectCtrl:
#Log.debug("Entering in S3ObjectCtrl class <<")
def __init__(self, conf):
self.admin = conf.get("radosgw_admin", "admin")
self.key = conf.get("radosgw_key", "")
self.secret = conf.get("radosgw_secret", "")
self.conffile = conf.get("ceph_conf", '/etc/ceph/ceph.conf')
self.radosgw_url = conf.get("radosgw_url", "127.0.0.1")
self.clusterName = conf.get("cluster", "ceph")
self.secure = self.radosgw_url.startswith("https://")
ceph_rest_api_subfolder = conf.get("ceph_rest_api_subfolder", "")
ceph_rest_api_subfolder = ceph_rest_api_subfolder.strip('/')
if ceph_rest_api_subfolder != '':
ceph_rest_api_subfolder = "/" + ceph_rest_api_subfolder
self.cephRestApiUrl = "http://"+conf.get("ceph_rest_api", "")+ceph_rest_api_subfolder+"/api/v0.1/"
self.inkscopeCtrlUrl = "http://"+conf.get("inkscope_root", "")+"/inkscopeCtrl/"
if not self.radosgw_url.endswith('/'):
self.radosgw_url += '/'
self.url = self.radosgw_url + self.admin
self.cluster = rados.Rados(conffile=str(self.conffile))
print "\nlibrados version: " + str(self.cluster.version())
print "Will attempt to connect to: " + str(self.cluster.conf_get('mon initial members'))
self.cluster.connect()
print "\nCluster ID: " + self.cluster.get_fsid()
# print "config url: "+self.url
# print "config admin: "+self.admin
# print "config key: "+self.key
# print(json.dumps(conf))
def getAdminConnection(self):
return S3Bucket(self.admin, access_key=self.key, secret_key=self.secret , base_url=self.url, secure=self.secure)
def getObjectStructure(self) :
startdate = datetime.now()
print(str(startdate) + ' -Calling method getObjectStructure() begins <<')
print" __request", request
objectId = request.args.get('objectId')
bucketname = request.args.get('bucketName')
osd_dump = self.getOsdDump()
# objectIdd=request.form['objectId']
# bucketnamee=request.form['bucketName']
Log.debug("__getS3Object(objectId=" + str(objectId) + ", bucketName= " + str(bucketname) + ")")
# Log.debug("getS3Object(objectIdd="+str(objectIdd)+", bucketNamee= "+str(bucketnamee)+")")
# Retrieve the bucketId using the bucket name
bucketId = self.getBucketInfo(bucketname)["bucketid"]
# Get the pool name using the
poolname = self.getBucketInfo(bucketname)["poolname"]
# Retrieve the pool id
poolid = self.getPoolId(poolname)
# poolname=getPoolName(bucketName)
extended_objectId = bucketId + "_" + objectId
# Retrieve the user.rgw.manifest that contains the chunks list for the object
# usermnf=self.getUserRgwManifest(poolname,extended_objectId)
# Retrieve the chunk base name in the user.rgw.manifest attribute
chunkbasename = self.getChunkBaseName(poolname, extended_objectId)
print '__Chunk base name: ', chunkbasename
if len(chunkbasename): # chek if there is chunk por not for the object
# Retrieve the chunks list of the object
chunks = self.getChunks(bucketId, poolname, objectId, chunkbasename)
chunks.append(extended_objectId) # Add the last object that is around 512.0 kb
else :
chunks = [extended_objectId]
print "__Chunks list", chunks
# bucketInfo=self.getBucketInfo(bucketId)
chunklist = []
pgs = []
osds = []
osdids = []
pgid4osd = []
for chunk in chunks :
if len(chunk) > 0 :
print 'Chunk= ', chunk
chunksize = self.getChunkSize(poolname, chunk)
pgid = self.getPgId(poolname, ' ' + chunk)
c = Chunk(chunk, chunksize, pgid[0])
chunklist.append(c)
if pgid4osd.count(pgid[1]) == 0:
pgid4osd.append(pgid[1])
if pgid4osd.count(pgid[0]) == 0:
pgid4osd.append(pgid[0])
# Create the PG for this chunk
# ef __init__(self,pgid,state,acting, up, acting_primary, up_primary):
pginfos = self.getOsdMapInfos(pgid[1]);
pg = PG(pgid[0], pginfos['state'],
pginfos['acting'],
pginfos['up'],
pginfos['acting_primary'],
pginfos['up_primary'])
# print(pg.dump())
pgs.append(pg) # Append the PG in the pgs list
# print "____ OSD List for PG ", pgid[1],self.getOsdsListForPg(pgid[1])
for pgid in pgid4osd:
for id in self.getOsdsListForPg(pgid): # sortir la boucle pour les pg
if osdids.count(id) == 0:
osdids.append(id) # construct the list of the OSD to be displayed
# Log.debug("Total number of chunks retrived:"+str(nbchunks))
# print "_____osds list=",osdids
for osdid in osdids: # Loop the OSD list and retrieve the osd and add it in the osds list fot the S3 object
osd = self.getOsdInfos(osd_dump, osdid)
# print(osd.dump())
osds.append(osd)
s3object = S3Object(extended_objectId,
bucketname,
bucketId,
poolid,
poolname,
self.getPoolType(poolname, poolid),
self.getChunkSize(poolname, extended_objectId),
chunklist,
pgs,
osds)
print(s3object.dump())
duration = datetime.now() - startdate
Log.info(str(datetime.now()) + ' ___Calling method getObjectStructure() end >> duration= ' + str(duration.seconds))
return s3object.dump()
# This method returns the pool id of a given pool name
def getPoolId(self, poolname):
Log.info("___getPoolId(poolname=" + str(poolname) + ")")
outdata = self.executeCmd('ceph osd pool stats ', [poolname], [])
poolid = outdata.strip().split('\n')[0].split(' id ')[1] # ['pool .rgw.buckets', ' 16']
return poolid
# This method returns the pool type of a pool using the poolname and the pool id parameters
def getPoolType(self, poolname, poolId):
Log.info("___getPoolType(poolname=" + str(poolname) + ", poolId=" + str(poolId) + ")")
outdata = self.executeCmd('ceph osd dump ', [], [poolname, ' ' + poolId])
pooltype = outdata.strip().split(' ')[3] # ['pool', '26', "'.rgw.buckets'", 'replicated', 'size', '2', 'min_size', '1', 'crush_ruleset', '0', 'object_hash', 'rjenkins', 'pg_num', '8', 'pgp_num', '8', 'last_change', '408', 'stripe_width', '0']
return pooltype
# This method computes the size of an object
# arguments: bucketName: The bucket name to look for
# objectId: the object id we want to compute the size
def getChunkSize2(self, poolName, objectid):
Log.debug("___getChunkSize(poolName=" + str(poolName) + ", objectId=" + str(objectid) + ")")
outdata = self.executeCmd('rados --pool=', [poolName, ' stat ', objectid], [])
#'.rgw.buckets/default.4651.2__shadow__0cIEZvHYuHkJ6xyyh9lwX4pj5ZsHrFD_125 mtime 1391001418, size 4194304\n'
objectsize = outdata[outdata.index('size') + 5: outdata.index('\n')]
return objectsize.rstrip()
def getChunkSize(self, poolname, objectid): # Method OK
Log.info('___getChunkSize(poolName=' + str(poolname) + ', objectId=' + str(objectid) + ')')
ioctx = self.cluster.open_ioctx(str(poolname))
size = ioctx.stat(str(objectid))
return int(size[0])
# This method returns the lists of osds for a given pgid
# The following command is performed : ceph pg map 16.7 result= osdmap e11978 pg 16.7 (16.7) -> up [9,6] acting [9,6]
def getUpActing(self, pgid):
Log.info("___getUpActing(pgid=" + str(pgid) + ")")
outdata = self.executeCmd('ceph pg map ', [pgid], [])
pgid = outdata.strip().split(' -> ', 2)[1].split(' ', 4) # 'up' '[9,6]' 'acting' '[9,6]'
osds = {"up":pgid[1], "acting":pgid[3]}
return osds
# This method retrieves the information about the status of an osd: acting, up, primary_acting, primary_up
# The PG id is used as an input argument
def getOsdMapInfos(self, pgid):
Log.info("___getOsdMapInfos(pgid=" + str(pgid) + ")")
cephRestApiUrl = self.cephRestApiUrl + 'tell/' + pgid + '/query.json';
Log.debug("____cephRestApiUrl Request=" + cephRestApiUrl)
osdmap = []
data = requests.get(cephRestApiUrl)
r = data.content
if data.status_code != 200:
print 'Error ' + str(data.status_code) + ' on the request getting pools'
return osdmap
# print(r)
if len(r) > 0:
osdmap = json.loads(r)
else:
Log.err('The getOsdMapInfos() method returns empty data')
osdmap = json.loads(r)
# osdmap=r.read()
# Log.debug(osdmap)
acting = osdmap["output"]["acting"]
up = osdmap["output"]["up"]
state = osdmap["output"]["state"]
acting_primary = osdmap["output"] ["info"]["stats"]["acting_primary"]
up_primary = osdmap["output"]["info"]["stats"]["up_primary"]
osdmap_infos = {"acting":acting, "acting_primary":acting_primary, "state":state, "up":up, "up_primary":up_primary}
return osdmap_infos
# {
# id : "osd.1",
# status : ['in','up'],
# host : "p-sbceph12",
# capacity : 1000000000,
# occupation : 0.236
# },
# This method returns the information for a given osd id that is passed in argument
# The information of the osd is retrieved thanks to mongoDB inkscopeCtrl/{clusterName}/osd?depth=2 REST URI
def getOsdDump(self):
Log.debug("___getOsdDump()")
# print str(datetime.datetime.now()), "-- Process OSDDump"
cephRestUrl = self.inkscopeCtrlUrl + self.cluster.get_fsid() + '/osd?depth=2'
print(cephRestUrl)
# Set HTTP credentials for url callback (requests.)
data = requests.get(cephRestUrl)
#
osds = []
if data.status_code != 200:
print 'Error ' + str(data.status_code) + ' on the request getting osd'
return osds
r = data.content
if r != '[]':
osds = json.loads(r)
else:
Log.err('The osd dump returns empty data')
return osds
def getOsdInfos(self, osds, osdid):
Log.info("___getOsdInfos(osdid=" + str(osdid) + ")")
i = 0
osdnodename = ''
capacity = 0
used = 0
total = 1
hostid = ''
while i < len(osds):
if osds[i]["node"]["_id"] == osdid:
osdnodename = osds[i]["node"]["name"]
break
i = i + 1
Log.debug("_____OSD Node Name= " + str(osdnodename))
stat = []
try:
while i < len(osds):
if osds[i]["stat"]["osd"]["node"]["$id"] == osdid:
state = osds[i]["stat"]
if state["in"]:
stat.append("in")
if state["up"]:
stat.append("up")
hostid = osds[i]["stat"]["osd"]["host"]["$id"]
capacity = osds[i]["host"]["network_interfaces"][0]["capacity"]
if osds[i]["partition"]["stat"] != 'null':
used = osds[i]["partition"]["stat"]["used"]
if osds[i]["partition"]["stat"] != 'null':
total = osds[i]["partition"]["stat"]["total"]
break
i = i + 1
except TypeError, e:
Log.err(e.__str__())
# Log.debug("OSD node infos [: ")
print "|_______________ [up, acting ]=", stat
Log.debug("|______________Host id=" + str(hostid))
Log.debug("|______________Capacity =" + str(capacity))
Log.debug("|______________Used =" + str(used))
Log.debug("|______________Total =" + str(total))
Log.debug(" ]")
occupation = "null"
if int(used) > 0:
occupation = round(float(used) / float(total), 3)
osd = OSD(osdnodename,
stat, hostid,
capacity,
occupation
)
print "_______________ osd= ", osd.dump()
return osd
# This method returns a list of osds for a given PG id.
# This consist of the concatenation of the acting and the up array of the PG.
# We careful with double entry whena dding the osd id, thanks to the list.count(x) method for the comparison
def getOsdsListForPg(self, pgid):
Log.info("____getOsdsListForPg(pgid=" + str(pgid) + ")")
cephRestApiUrl = self.cephRestApiUrl + 'pg/map.json?pgid=' + pgid
data = requests.get(cephRestApiUrl)
# r = data.json()
r = data.content
osdmap = json.loads(r)
osdslist = []
# print "acting[",osdmap["output"]["acting"],"]"
for osd in osdmap["output"]["acting"]:
# print "osd=",int(osd)
if osdslist.count(int(osd)) == 0: # the list does not contains the element yet
osdslist.append(int(osd))
# print "up[",osdmap["output"]["up"],"]"
for osd in osdmap["output"]["up"]:
# print "osd=",int(osd)
if osdslist.count(int(osd)) == 0: # the list does not contains the element yet
osdslist.append(int(osd))
# print "OSD LIST Contructed =", osdslist
return osdslist
def executeCmd(self, command, args=[], filters=[]):
print "___Building unix with = " + command, "___args=" + json.dumps(args), "___filters=" + json.dumps(filters)
cmd = command
if len(args):
i = 0
while i < len(args):
cmd = cmd + args[i].replace('(','\(').replace(')','\)')
i = i + 1
if len(filters):
i = 0
while i < len(filters):
cmd = cmd + ' |grep ' + filters[i].replace('(','\(').replace(')','\)')
i = i + 1
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
outdata, errdata = p.communicate()
if len(errdata):
raise RuntimeError('unable to execute the command[%s] , Reason= %s' % (cmd, errdata))
else:
print "_____Execute Command successful %s", outdata
return outdata
# This method returns the name of the pool that hold the bucket which name is passed in argument
# AN exception is thrown if an error occurs
#
# { "key": "bucket:cephfun",
# "ver": { "tag": "_AQT53GiChuJ5ovYxlK3YitJ",
# "ver": 1},
# "mtime": 1410342120,
# "data": { "bucket": { "name": "cephfun",
# "pool": ".rgw.buckets",
# "data_extra_pool": "",
# "index_pool": ".rgw.buckets.index",
# "marker": "default.896476.1",
# "bucket_id": "default.896476.1"},
# "owner": "cephfun",
# "creation_time": 1410342120,
# "linked": "true",
def getBucketInfo (self, bucket):
myargs = []
stats = request.form.get('stats', None)
if stats is not None:
myargs.append(("stats", stats))
if bucket is not None:
myargs.append(("bucket", bucket))
conn = self.getAdminConnection()
request2 = conn.request(method="GET", key="bucket", args=myargs)
res = conn.send(request2)
info = res.read()
jsondata = json.loads(info)
print jsondata
poolname_bucketid = {"poolname":jsondata['pool'], "bucketid": jsondata['id']}
return poolname_bucketid
# This method returns the base name of the chunks that compose the object. The chunk base name is in the user.rgw.manifest attribute of the object
# An exception is thrown if the object does not exist or there an issue
def getChunkBaseName(self, poolName, objectid):
Log.info("____Get the chunks list for the object [" + objectid + "] and the pool[ " + str(poolName) + "]")
ioctx = self.cluster.open_ioctx(str(poolName))
xattr = ioctx.get_xattr(str(objectid), 'user.rgw.manifest')
shadow = xattr.replace('\x00', '').replace('\x01', '').replace('\x02', '').replace('\x03', '').\
replace('\x04', '').replace('\x05', '').replace('\x06', '').replace('\x07', '').replace('\x08', '').replace('\x09', '')\
.replace('\x10', '').replace('\x11', '').replace('\x12', '').replace('\x0e', '').replace('\x0b', '').replace('\x0c', '')
# '.index\x08\x08!.a8IqjFd0B9KyTAxmOh77aJEAB8lhGUV_\x01\x02\x01 \x08@\x07\x03_\x07cephfun\x0c'
Log.debug("___Shadow: "+shadow)
if shadow.count('shadow') > 0:
shadow_motif = re.search('(?<=_shadow__)(\w(\-)*)+', shadow)
print "_____ shadow motif= ", shadow_motif
# chunkname=shadow_motif[shadow_motif.index('_shadow__')+9:,]
chunkname = shadow_motif.group(0)
chunkname = chunkname[0:chunkname.index('_')]
elif shadow.count('!.'):
# shadow_motif = re.search('(?<=\!\.)\w+', shadow)
# chunkname=shadow_motif.group(0)
pos = shadow.index('!.') + 2
chunkname = shadow[pos:pos + 30] # The lenght of the shadow base name is 30!
else : # The case the object has no chunk because it's not too large
chunkname = ''
Log.debug("____Chunkbasename= " + chunkname)
return chunkname
def getChunkBaseName1(self,poolName, objId):
Log.debug("____Get the chunks list for the object [" + str(objId) + "] and the pool[ " + str(poolName) + "]")
outdata =self.executeCmd('rados --pool ', [poolName ,' getxattr ' , '"'+objId+'"'+' ' , 'user.rgw.manifest'],[])
shadow = outdata.replace('\x00', '').replace('\x07', '').replace('\x01', '').replace('\x02', '').\
replace('\x08','').replace('\x03', '').replace('\x11', '').replace('\x12','')
# '.index\x08\x08!.a8IqjFd0B9KyTAxmOh77aJEAB8lhGUV_\x01\x02\x01 \x08@\x07\x03_\x07cephfun\x0c'
#Log.debug("___Shadow: "+shadow)
if shadow.count('shadow') > 0:
shadow_motif = re.search('(?<=_shadow__)(\w(\-)*)+', shadow)
print "_____ shadow motif= ", shadow_motif
#chunkname=shadow_motif[shadow_motif.index('_shadow__')+9:,]
chunkname=shadow_motif.group(0)
chunkname=chunkname[0:chunkname.index('_')]
elif shadow.count('!.'):
#shadow_motif = re.search('(?<=\!\.)\w+', shadow)
#chunkname=shadow_motif.group(0)
chunkname = shadow[shadow.index('!.')+3:shadow.index('_ @')]
else :# The case the object has no chunk because it's not too large
chunkname=''
Log.debug("____Chunkbasename= "+chunkname)
return chunkname
# This method returns the chunks list using the poolname, the bucketId and the chunk baseName as inpout argument
# An exception is thrown if the object does not exist or there an issue
def getChunks(self, bucketId, poolName, objectid, chunkBaseName) :
Log.info("____Get the chunks list using that id is " + str(bucketId) + " the poolName " + str(poolName) + " and the chunk base name " + str(chunkBaseName))
cmd = 'rados --pool=' + poolName + ' ls|grep ' + bucketId + '|grep shadow|sort|grep ' + '"'+chunkBaseName.replace('-','\-')+'"'
# on protege les caracteres speciaux en debut de shadow
if objectid == chunkBaseName: # The object has no chunk because it's smaller than 4Mo
cmd = 'rados --pool=' + poolName + ' ls|grep ' + bucketId + '|grep ' + '"'+objectid.replace('-','\-')+'"'
p = Popen(cmd,
shell=True,
stdout=PIPE,
stderr=PIPE)
outdata, errdata = p.communicate()
print("chunks= ")
print(outdata)
print(errdata)
if len(errdata) > 0:
raise RuntimeError('unable to get the chunks list for the pool % the bucketId %s and the chunkBaseName the manifest %s : %s' % (poolName, bucketId, chunkBaseName, errdata))
return outdata.split('\n')
# This method retrieves the PG ID for a given pool and an object
# The output looks like this : ['osdmap', 'e11978', 'pool', "'.rgw.buckets'", '(16)', 'object', "'default.4726.8_fileMaps/000001fd/9731af0ba5f8997929df14de6df583aff39ff94b'", '->', 'pg', '16.c1107af', '(16.7) -> up ([9,6], p9) acting ([9,6], p9)\n']
def getPgId(self, poolName, objectId):
Log.info("____getPgId(poolname=" + poolName + ", object=" + objectId + ")")
outdata = self.executeCmd('ceph osd map ', [poolName, ' ' + '\''+objectId+'\''], [])
pgids = [outdata.split(' -> ')[1].split('(')[0].split(' ')[1],
outdata.split(' -> ')[1].split('(')[1].split(')')[0]]
# pgids={'26.2c717bcf','26.7'}
print "_____pgids=" , pgids
return pgids
def getUser(self, uid):
Log.debug("____get user with uid " + uid)
return S3User.view(uid, self.getAdminConnection())
| {
"repo_name": "inkscope/inkscope",
"path": "inkscopeCtrl/S3ObjectCtrl.py",
"copies": "1",
"size": "22021",
"license": "apache-2.0",
"hash": 6564030995216200000,
"line_mean": 44.7817047817,
"line_max": 251,
"alpha_frac": 0.5517914718,
"autogenerated": false,
"ratio": 3.52392382781245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9537029912143604,
"avg_score": 0.007737077493769176,
"num_lines": 481
} |
__author__ = 'Alexis.Koalla@orange.com'
import json
class PG:
#""" Definition de la classe """
def __init__(self,pgid,state,acting, up, acting_primary, up_primary):
self.pgid=pgid
self.state= state
self.up = up
self.acting=acting
self.acting_primary=acting_primary
self.up_primary = up_primary
def getPgid(self):
#""" Accesseur """
return self.pgid
def getState(self):
#""" Accesseur """
return self.state
def getActing(self):
return self.acting
def getUp(self):
return self.up
def getActingPrimary(self):
return self.acting_primary
def getUpPrimary(self):
return self.up_primary
def dump(self):
#content={"pgid":self._pgid,"state":self._state,"acting":self._acting,"acting_primary":self._acting_primary,"up_primary":self._up_primary}
#print(json.dumps(content))
return json.dumps(self, default=lambda o: o.__dict__,sort_keys=True, indent=4)
| {
"repo_name": "abrefort/inkscope-debian",
"path": "inkscopeCtrl/model/pg.py",
"copies": "2",
"size": "1030",
"license": "apache-2.0",
"hash": 8806410706835295000,
"line_mean": 24.1219512195,
"line_max": 147,
"alpha_frac": 0.6038834951,
"autogenerated": false,
"ratio": 3.388157894736842,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9797938897614665,
"avg_score": 0.0388204984444352,
"num_lines": 41
} |
from .cygmm import cy_gmm
import numpy as np
def gmm(x, n_clusters=10, max_num_iterations=100, covariance_bound=None,
init_mode='rand', init_priors=None, init_means=None, init_covars=None,
n_repetitions=1, verbose=False):
"""Fit a Gaussian mixture model
Parameters
----------
x : [n_samples, n_features] `float32/float64` `ndarray`
The data to be fit. One data point per row.
n_clusters : `int`, optional
Number of output clusters.
max_num_iterations : `int`, optional
The maximum number of EM iterations.
covariance_bound : `float` or `ndarray`, optional
A lower bound on the value of the covariance. If a float is given
then the same value is given for all features/dimensions. If an
array is given it should have shape [n_features] and give the
lower bound for each feature.
init_mode: {'rand', 'kmeans', 'custom'}, optional
The initialization mode:
- rand: Initial mean positions are randomly chosen among
data samples
- kmeans: The K-Means algorithm is used to initialize the cluster
means
- custom: The intial parameters are provided by the user, through
the use of ``init_priors``, ``init_means`` and
``init_covars``. Note that if those arguments are given
then the ``init_mode`` value is always considered as
``custom``
init_priors : [n_clusters,] `ndarray`, optional
The initial prior probabilities on each components
init_means : [n_clusters, n_features] `ndarray`, optional
The initial component means.
init_covars : [n_clusters, n_features] `ndarray`, optional
The initial diagonal values of the covariances for each component.
n_repetitions : `int`, optional
The number of times the fit is performed. The fit with the highest
likelihood is kept.
verbose : `bool`, optional
If ``True``, display information about computing the mixture model.
Returns
-------
means : [n_clusters, n_features] `ndarray`
The means of the components
covars : [n_clusters, n_features] `ndarray`
The diagonal elements of the covariance matrix for each component.
priors : [n_clusters] `ndarray`
The prior probability of each component
ll : `float`
The found log-likelihood of the input data w.r.t the fitted model
posteriors : [n_samples, n_clusters] `ndarray`
The posterior probability of each cluster w.r.t each data points.
"""
n_samples = x.shape[0]
n_features = x.shape[1]
if x.shape[0] == 0:
raise ValueError('x should contain at least one row')
if np.isnan(x).any() or np.isinf(x).any():
raise ValueError("x contains Nans or Infs.")
if n_clusters <= 0 or n_clusters > n_samples:
raise ValueError(
'n_clusters {} must be a positive integer smaller than the '
'number of data points {}'.format(n_clusters, n_samples)
)
if max_num_iterations < 0:
raise ValueError('max_num_iterations must be non negative')
if n_repetitions <= 0:
raise ValueError('n_repetitions must be a positive integer')
if init_mode not in {'rand', 'custom', 'kmeans'}:
raise ValueError("init_mode must be one of {'rand', 'custom', 'kmeans'")
# Make sure we have the correct types
x = np.ascontiguousarray(x)
if x.dtype not in [np.float32, np.float64]:
raise ValueError('Input data matrix must be of type float32 or float64')
if covariance_bound is not None:
covariance_bound = np.asarray(covariance_bound, dtype=x.dtype)
if init_priors is not None:
init_priors = np.require(init_priors, requirements='C', dtype=x.dtype)
if init_priors.shape != (n_clusters,):
raise ValueError('init_priors does not have the correct size')
if init_means is not None:
init_means = np.require(init_means, requirements='C', dtype=x.dtype)
if init_means.shape != (n_clusters, n_features):
raise ValueError('init_means does not have the correct size')
if init_covars is not None:
init_covars = np.require(init_covars, requirements='C', dtype=x.dtype)
if init_covars.shape != (n_clusters, n_features):
raise ValueError('init_covars does not have the correct size')
all_inits = (init_priors, init_means, init_covars)
if any(all_inits) and not all(all_inits):
raise ValueError('Either all or none of init_priors, init_means and '
'init_covars must be set.')
if init_mode == "custom" and not all(all_inits):
raise ValueError('init_mode==custom implies that all initial '
'parameters are given')
return cy_gmm(x, n_clusters, max_num_iterations, init_mode.encode('utf8'),
n_repetitions, int(verbose),
covariance_bound=covariance_bound, init_priors=init_priors,
init_means=init_means, init_covars=init_covars)
| {
"repo_name": "menpo/cyvlfeat",
"path": "cyvlfeat/gmm/gmm.py",
"copies": "1",
"size": "5190",
"license": "bsd-2-clause",
"hash": 4039682227487087600,
"line_mean": 43.7413793103,
"line_max": 80,
"alpha_frac": 0.6337186898,
"autogenerated": false,
"ratio": 3.9709257842387147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021285653469561513,
"num_lines": 116
} |
from .cygmm import cy_gmm
import numpy as np
def gmm(X, n_clusters=10, max_num_iterations=100, covariance_bound=None,
init_mode='rand', init_priors=None, init_means=None, init_covars=None,
n_repetitions=1, verbose=False):
"""Fit a Gaussian mixture model
Parameters
----------
X : [n_samples, n_features] `float32/float64` `ndarray`
The data to be fit. One data point per row.
n_clusters : `int`, optional
Number of output clusters.
max_num_iterations : `int`, optional
The maximum number of EM iterations.
covariance_bound : `float` or `ndarray`, optional
A lower bound on the value of the covariance. If a float is given
then the same value is given for all features/dimensions. If an
array is given it should have shape [n_features] and give the
lower bound for each feature.
init_mode: {'rand', 'kmeans', 'custom'}, optional
The initialization mode:
- rand: Initial mean positions are randomly chosen among
data samples
- kmeans: The K-Means algorithm is used to initialize the cluster
means
- custom: The intial parameters are provided by the user, through
the use of ``init_priors``, ``init_means`` and
``init_covars``. Note that if those arguments are given
then the ``init_mode`` value is always considered as
``custom``
init_priors : [n_clusters,] `ndarray`, optional
The initial prior probabilities on each components
init_means : [n_clusters, n_features] `ndarray`, optional
The initial component means.
init_covars : [n_clusters, n_features] `ndarray`, optional
The initial diagonal values of the covariances for each component.
n_repetitions : `int`, optional
The number of times the fit is performed. The fit with the highest
likelihood is kept.
verbose : `bool`, optional
If ``True``, display information about computing the mixture model.
Returns
-------
priors : [n_clusters] `ndarray`
The prior probability of each component
means : [n_clusters, n_features] `ndarray`
The means of the components
covars : [n_clusters, n_features] `ndarray`
The diagonal elements of the covariance matrix for each component.
ll : `float`
The found log-likelihood of the input data w.r.t the fitted model
posteriors : [n_samples, n_clusters] `ndarray`
The posterior probability of each cluster w.r.t each data points.
"""
n_samples = X.shape[0]
n_features = X.shape[1]
if X.shape[0] == 0:
raise ValueError('X should contain at least one row')
if np.isnan(X).any() or np.isinf(X).any():
raise ValueError("X contains Nans or Infs.")
if n_clusters <= 0 or n_clusters > n_samples:
raise ValueError(
'n_clusters {} must be a positive integer smaller than the '
'number of data points {}'.format(n_clusters, n_samples)
)
if max_num_iterations < 0:
raise ValueError('max_num_iterations must be non negative')
if n_repetitions <= 0:
raise ValueError('n_repetitions must be a positive integer')
if init_mode not in {'rand', 'custom', 'kmeans'}:
raise ValueError("init_mode must be one of {'rand', 'custom', 'kmeans'")
# Make sure we have the correct types
X = np.ascontiguousarray(X)
if X.dtype not in [np.float32, np.float64]:
raise ValueError('Input data matrix must be of type float32 or float64')
if covariance_bound is not None:
covariance_bound = np.asarray(covariance_bound, dtype=np.float)
if init_priors is not None:
init_priors = np.require(init_priors, requirements='C', dtype=X.dtype)
if init_priors.shape != (n_clusters,):
raise ValueError('init_priors does not have the correct size')
if init_means is not None:
init_means = np.require(init_means, requirements='C', dtype=X.dtype)
if init_means.shape != (n_clusters, n_features):
raise ValueError('init_means does not have the correct size')
if init_covars is not None:
init_covars = np.require(init_covars, requirements='C', dtype=X.dtype)
if init_covars.shape != (n_clusters, n_features):
raise ValueError('init_covars does not have the correct size')
all_inits = (init_priors, init_means, init_covars)
if any(all_inits) and not all(all_inits):
raise ValueError('Either all or none of init_priors, init_means and '
'init_covars must be set.')
if init_mode == "custom" and not all(all_inits):
raise ValueError('init_mode==custom implies that all initial '
'parameters are given')
return cy_gmm(X, n_clusters, max_num_iterations, init_mode.encode('utf8'),
n_repetitions, int(verbose),
covariance_bound=covariance_bound, init_priors=init_priors,
init_means=init_means, init_covars=init_covars)
| {
"repo_name": "simmimourya1/cyvlfeat",
"path": "cyvlfeat/gmm/gmm.py",
"copies": "1",
"size": "5191",
"license": "bsd-2-clause",
"hash": -7215718410456610000,
"line_mean": 43.75,
"line_max": 80,
"alpha_frac": 0.6337892506,
"autogenerated": false,
"ratio": 3.971690895179801,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5105480145779802,
"avg_score": null,
"num_lines": null
} |
import numpy as np
from numpy.testing import assert_allclose
from nose.tools import raises
from cyvlfeat.gmm import gmm
np.random.seed(1)
X = np.random.randn(1000, 2)
X[500:] *= (2, 3)
X[500:] += (4, 4)
def test_gmm_2_clusters_rand_init():
means, covars, priors, LL, posteriors = gmm(X, n_clusters=2)
assert_allclose(LL, -4341.0, atol=0.1)
assert_allclose(priors, [0.5, 0.5], atol=0.1)
assert_allclose(posteriors[0], [0.0, 1.0], atol=0.1)
assert_allclose(means, [[4, 4], [0, 0]], atol=0.2)
def test_gmm_2_clusters_kmeans_init():
means, covars, priors, LL, posteriors = gmm(X, n_clusters=2,
init_mode='kmeans')
assert_allclose(LL, -4341.0, atol=0.1)
assert_allclose(priors, [0.5, 0.5], atol=0.1)
assert_allclose(posteriors[0], [0.0, 1.0], atol=0.1)
assert_allclose(means, [[4, 4], [0, 0]], atol=0.2)
@raises(ValueError)
def test_gmm_2_clusters_custom_init_fail():
_ = gmm(X, n_clusters=2, init_mode='custom')
| {
"repo_name": "simmimourya1/cyvlfeat",
"path": "cyvlfeat/gmm/tests/test_gmm.py",
"copies": "1",
"size": "1063",
"license": "bsd-2-clause",
"hash": -4474160106751692300,
"line_mean": 29.3714285714,
"line_max": 67,
"alpha_frac": 0.6208842897,
"autogenerated": false,
"ratio": 2.513002364066194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8491029510909051,
"avg_score": 0.02857142857142857,
"num_lines": 35
} |
__author__ = 'alexjch'
import signal
try:
import bluetooth as bt
except:
pass
BUFF_SIZE = 1024
def find_device(device_name):
discovered = bt.discover_devices()
target = [d for d in discovered if bt.lookup_name(d) == device_name]
return target.pop() if len(target) else None
class BTAgent(object):
""" Bluetooth agent, this object asbtracts communication with
a bluetooth device. This object is in charge of channel
initialization and provides methods to send and receive
data"""
def __init__(self, dev_address, port=1):
object.__init__(self)
self.sock = bt.BluetoothSocket(bt.RFCOMM)
self.sock.connect((dev_address, port))
def receive(self):
chunks = []
receiving = 1
while receiving:
chunk = self.sock.recv(BUFF_SIZE)
chunks.append(chunk.replace("\r", "\n"))
if chr(62) in chunk:
return "".join(chunks)
def send(self, msg):
msg = "{}{}".format(msg, chr(13))
return self.sock.send(msg)
def close(self):
self.sock.close()
def __del__(self):
self.close()
| {
"repo_name": "alexjch/car_monitor",
"path": "src/CarMonitor/bt_spp_comm.py",
"copies": "1",
"size": "1174",
"license": "mit",
"hash": -5435883681817516000,
"line_mean": 23.9787234043,
"line_max": 72,
"alpha_frac": 0.5868824532,
"autogenerated": false,
"ratio": 3.811688311688312,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48985707648883114,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alexjch'
import sys
import time
import signal
import argparse
from bt_spp_comm import BTAgent as bta, find_device
SLEEP_TIME = 5
def arguments_parser():
ap = argparse.ArgumentParser(description="ODBII communication tool")
device_id_group = ap.add_mutually_exclusive_group(required=True)
device_id_group.add_argument("--device_name", help="Name of the device to connect to", default=None)
device_id_group.add_argument("--device_addr", help="Address of the device to connect to", default=None)
op_mode_group = ap.add_mutually_exclusive_group(required=True)
op_mode_group.add_argument("-I", "--interactive", action="store_true", default=False,
help="Interactive mode, in this mode the connection will stay \
open and the data that is typed in command line is send to the \
device")
op_mode_group.add_argument("-F", "--parameters_file", default=None,
help="File with a list of parameters to read from vehicle")
op_mode_group.add_argument("-L", "--listening", default=False,
help="Puts the LM327 in listening mode to monitor traffic on \
the can bus")
ap.add_argument("-C", "--continuous", default=False, action="store_true", help="Do it until CTRL^C", type=int)
ap.add_argument("-D", "--data_base", default="odb")
return ap.parse_args()
def sigint_handler(signal, frame):
print >> sys.stdout, "\n"
sys.exit(0)
def interactive_mode(bt):
signal.signal(signal.SIGINT, sigint_handler)
while True:
msg = raw_input("btagent# ")
bt.send(msg)
print("received: {}".format(bt.receive()))
def parameters_file(file_name, bt):
with open(file_name) as input:
parameters = filter(lambda x: x != "", input.read().split("\n"))
for parameter in parameters:
bt.send(parameter)
yield bt.receive()
def continuous_parameters_file(file_name, bt):
while True:
for _ in parameters_file(file_name, bt):
print _
time.sleep(SLEEP_TIME)
def monitor_bus(bt):
pass
def main(args):
address = args.device_addr \
if args.device_addr is not None else find_device(args.device_name)
bt = bta(address)
if args.interactive:
interactive_mode(bt)
elif args.continuous and args.parameters_file:
continuous_parameters_file(args.parameters_file, bt)
elif args.parameters_file:
parameters_file(args.parameters_file, bt)
elif args.listening_mode:
monitor_bus(bt)
if __name__ == "__main__":
main(arguments_parser())
| {
"repo_name": "alexjch/car_monitor",
"path": "src/CarMonitor/main.py",
"copies": "1",
"size": "2724",
"license": "mit",
"hash": -4279480341377723000,
"line_mean": 33.05,
"line_max": 114,
"alpha_frac": 0.6196769457,
"autogenerated": false,
"ratio": 3.788595271210014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4908272216910014,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alexjch'
import os
import sqlite3
CREATE_DB = '''CREATE TABLE telemetry (ID INTEGER PRIMARY KEY AUTOINCREMENT,
STREAM TEXT,
Timestamp DATETIME DEFAULT CURRENT_TIMESTAMP)'''
DB_INSERT = '''INSERT INTO telemetry (STREAM) values("?")'''
DB_SELECT = '''SELECT * FROM telemetry order by Timestamp LIMIT 100'''
class DB(object):
""" Database object abstraction that provides storage for data
comming from connector"""
def __init__(self, db_file="ODB.sqlite"):
db_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), db_file)
self.conn = sqlite3.connect(db_path)
self.conn.isolation_level = None
self.db = self.conn.cursor()
if not os.path.exists(db_path):
self.db.execute(CREATE_DB)
self.db.commit()
def insert(self, stream):
self.db.execute(DB_INSERT, (stream,))
def extract(self):
self.db.execute(DB_SELECT, ())
return self.db.fetchall()
def close(self):
self.db.close()
self.conn.close()
| {
"repo_name": "alexjch/car_monitor",
"path": "src/CarMonitor/db.py",
"copies": "1",
"size": "1131",
"license": "mit",
"hash": 4809279743270470000,
"line_mean": 28.7631578947,
"line_max": 87,
"alpha_frac": 0.5862068966,
"autogenerated": false,
"ratio": 3.913494809688581,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4999701706288581,
"avg_score": null,
"num_lines": null
} |
import web
from web import http
import pycurl, random, re, cStringIO, types, urllib
import urlparse as _urlparse
from lxml import etree
from md5 import md5
from datetime import datetime
def url_encode(url):
return http.urlencode(url)
def url_unquote(url):
return urllib.unquote_plus(url)
def url_parse(url):
return web.storage(
zip(('scheme', 'netloc', 'path', 'params', 'query', 'fragment'), _urlparse.urlparse(url)))
def url_join(url, url_relative):
if '://' not in url_relative:
if not url_relative.startswith('/'):
url_relative = '/' + url_relative
return _urlparse.urljoin(url, url_relative)
def get_user_ip():
return web.ctx.get('ip', '000.000.000.000')
def parse_xml(txt):
xml = re.sub('xmlns\s*=\s*["\'].*?["\']', ' ', txt) # we remove the xmlns for simplicity
return etree.fromstring(xml, parser=etree.XMLParser(resolve_entities=False))
def curl_init():
curl = pycurl.Curl()
curl.setopt(pycurl.USERAGENT, "Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)")
curl.setopt(pycurl.FOLLOWLOCATION, True)
#curl.setopt(pycurl.CONNECTTIMEOUT, 3)
#curl.setopt(pycurl.TIMEOUT, 30)
return curl
# PIL complains when only f is returned but all we are doing is stringIO(f.getvalue()) twice.
def open_url(curl, url, referer=None):
curl.setopt(pycurl.URL, url)
if referer:
curl.setopt(pycurl.REFERER, referer)
f = cStringIO.StringIO()
curl.setopt(pycurl.WRITEFUNCTION, f.write)
curl.perform()
html = f.getvalue()
f.close()
return html
def dnl(url, referer = None):
c = curl_init()
f = open_url(c, url, referer)
c.close()
return f
def dict_remove(d, *keys):
for k in keys:
if d.has_key(k):
del d[k]
def get_extension_from_url(url):
path = url_parse(url).path
return path[path.rindex('.')+1:]
def get_unique_md5():
return md5(str(datetime.now().microsecond)).hexdigest()
def get_guid():
guid = get_unique_md5().upper()
return '%s-%s-%s-%s-%s' % (guid[0:8], guid[8:12], guid[12:16], guid[16:20], guid[20:32])
def get_all_functions(module):
functions = {}
for f in [module.__dict__.get(a) for a in dir(module)
if isinstance(module.__dict__.get(a), types.FunctionType)]:
functions[f.__name__] = f
return functions
def email_errors():
if web.config.email_errors:
web.emailerrors(web.config.email_errors, djangoerror())
def is_blacklisted(text, blacklist):
text = text.strip().lower()
for banned_word in blacklist:
banned_word = banned_word.decode('utf-8').strip()
if banned_word.lower() in text:
return banned_word
return False | {
"repo_name": "Nitecon/webframe",
"path": "webframe/view/helpers/utils.py",
"copies": "1",
"size": "2775",
"license": "apache-2.0",
"hash": -3515031585155694600,
"line_mean": 26.76,
"line_max": 98,
"alpha_frac": 0.6299099099,
"autogenerated": false,
"ratio": 3.2685512367491167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9267392455962281,
"avg_score": 0.026213738137367076,
"num_lines": 100
} |
# TODO:
# - if the module submitted is in quoted HTML then it must unquoted
# - bad idea to catch generic exceptions
import web, os, sys
from app.models import modules
from app.helpers import utils
from app.helpers import image
def submit(module_url, tags=''):
success, err_msg = False, ''
try:
# parse the module xml
module = parse_module(module_url)
# set some default values
set_defaults(module)
# check if the module is valid
is_valid(module)
# get the module screenshot
module.screenshot = grab_screenshot(module.screenshot)
except:
err_msg = sys.exc_info()[0]
else:
# add module and its tags to the db
modules.add(module, tags)
success = True
return success, err_msg
def parse_module(module_url):
module = web.storage(
url=module_url, cached_xml='', screenshot='', title='', title_url='',
directory_title='', description='', author='', author_email='',
author_affiliation='', author_location='', render_inline='')
if not module_url.startswith('http://'):
raise 'Ooops! Submission has failed – the URL seems to be invalid.'
try:
html = utils.dnl(module_url)
html = web.htmlunquote(html) # this may confuse the parser
xml = utils.parse_xml(html)
except:
raise 'Ooops! Submission has failed – the XML or HTML page could not be loaded successfully.'
xnodes = xml.xpath('//ModulePrefs')
if not xnodes:
raise 'Ooops! The XML is valid, but we cannot find the module.'
xnodes = xnodes[0]
for attr in module:
module[attr] = xnodes.get(attr) or module[attr]
return module
def set_defaults(module):
if not module.screenshot.startswith('http://'):
module.screenshot = utils.urljoin(module.url, module.screenshot)
if module.directory_title:
module.title = module.directory_title
if not module.render_inline:
module.render_inline = 'never'
def is_valid(module):
if modules.is_banned_site(module.url):
raise '<p>Ooops! The site you tried to submit is banned...</p>'
elif not module.title or not module.author:
raise 'Ooops! Submission has failed – please provide title and author name in your XML.'
elif module.render_inline not in ['never', 'optional', 'required']:
raise 'Ooops! Submission has failed – ' \
+ 'your <em>render_inline</em> value must be either "optional", "required" or "never".'
elif not module.screenshot:
raise 'Ooops! Submission has failed – please provide a link to a screenshot in your XML.'
elif modules.already_exists(module.url):
raise 'Ooops! Submission has failed – this module has been submitted already ' \
+ '(possible changes will automatically be updated).';
def grab_screenshot(screenshot_url):
try:
data = utils.dnl(screenshot_url)
guid = utils.get_guid() + '.' + utils.get_extension_from_url(screenshot_url)
image.save(data, 'public/img/screenshot/' + guid)
except:
raise 'Ooops! Submission has failed – <a href="%s">' % web.urlquote(screenshot) \
+ ' the screenshot</a> in your XML could not be found, was broken, or had the wrong dimensions' \
+ ' (should be above 30x20 and below 460x420).'
return guid
| {
"repo_name": "Nitecon/webframe",
"path": "models/submission.py",
"copies": "4",
"size": "3556",
"license": "apache-2.0",
"hash": -7427854764305517000,
"line_mean": 33.1923076923,
"line_max": 109,
"alpha_frac": 0.6217660292,
"autogenerated": false,
"ratio": 3.9776286353467563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.04046402121766956,
"num_lines": 104
} |
from config import db
from app.helpers import tag_cloud
import re, sets
def get_tags(module_id):
return db.select('tags',
vars = dict(id=module_id),
what = 'tag',
where = 'module_id=$id')
def get_tag_cloud():
"""Return a tag cloud of most popular modules."""
tags = db.select('tags',
what = 'tag, count(tag) as count',
where = 'length(tag) >= 2',
group = 'tag having count >= 4',
limit = 10000)
return tag_cloud.make_cloud(tags, min_tag_length=2, min_count=4,
max_count=12, plurial=True)
def get_author_cloud():
"""Return a tag cloud of most popular authors."""
tags = db.select('modules',
what = 'author as tag, count(author) as count',
where = 'length(author) >= 3',
group = 'author',
limit = 10000)
return tag_cloud.make_cloud(tags, min_tag_length=3, min_count=2,
max_count=17, plurial=False, randomize=True)
def add(module_id, tags):
tags = get_nice_tags(tags)
tags = sets.Set(tags.split())
for tag in tags:
db.insert('tags', module_id=module_id, tag=tag)
def get_nice_tags(tags):
tags = tags.lower().strip()
tags = re.sub('[,;\"]', ' ', tags)
return re.sub('\s{2,}', ' ', tags)
| {
"repo_name": "minixalpha/SourceLearning",
"path": "webpy/sample/googlemodules/src/app/models/tags.py",
"copies": "4",
"size": "1344",
"license": "apache-2.0",
"hash": -1345021795168026000,
"line_mean": 27.8666666667,
"line_max": 69,
"alpha_frac": 0.556547619,
"autogenerated": false,
"ratio": 3.302211302211302,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.045785908463068244,
"num_lines": 45
} |
import web
from config import db
def add(module_id, vote, user_ip):
if already_voted(module_id, user_ip):
success = True
else:
success = False
if module_id and -5 <= vote <= 5:
db.insert('votes',
module_id=module_id, vote=vote, ip=user_ip,
datetime_created=web.SQLLiteral('now()'))
success = True
update_calculated_vote(module_id)
return success
def update_calculated_vote(module_id):
min_votes = 5
r = web.listget(
db.select('votes',
vars = dict(id=module_id),
what = 'sum(vote) / count(module_id) as calculated_vote',
where = 'module_id = $id',
group = 'module_id having count(module_id) > %s' % min_votes), 0, False)
if r:
db.update('modules',
vars = dict(id=module_id),
where='id = $id',
calculated_vote=r.calculated_vote)
def already_voted(module_id, user_ip):
return web.listget(
db.select('votes',
vars = dict(ip=user_ip, id=module_id),
what = 'count(vote)',
where = 'ip = $ip and module_id = $id',
group = 'module_id having count(module_id) > 0'), 0, False) | {
"repo_name": "Nitecon/webframe",
"path": "models/votes.py",
"copies": "4",
"size": "1315",
"license": "apache-2.0",
"hash": 1158601321032302600,
"line_mean": 30.925,
"line_max": 84,
"alpha_frac": 0.5209125475,
"autogenerated": false,
"ratio": 3.53494623655914,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6055858784059139,
"avg_score": null,
"num_lines": null
} |
import web
from config import db
def get_latest(offset=0, limit=20):
has_next = False
t = list(db.select('forum_threads',
what = 'id, id as idd, title, author, content, datetime_created,\
(select count(id) from forum_threads where reply_to = idd) as no_replies,\
(select max(datetime_created) from forum_threads where reply_to = idd) as last_reply_datetime',
where = 'reply_to = 0',
order = 'datetime_created desc',
offset = offset,
limit = limit+1))
if len(t) > limit:
has_next = True
return t[:limit], has_next
def get_thread(thread_id):
return web.listget(
db.select('forum_threads',
vars = dict(id=thread_id),
what = 'id, title, author, content, datetime_created',
where = 'id = $id'), 0, False)
def get_conversation(thread_id):
t = [get_thread(thread_id)]
t.extend(
db.select('forum_threads',
vars = dict(id=thread_id),
what = 'id, title, author, content, datetime_created',
where = 'reply_to = $id'))
return t
def add(thread):
success = False
if thread.author and thread.title and thread.content:
db.insert('forum_threads', **thread)
success = True
return success
def reply(thread):
success = False
if thread.reply_to and thread.author and thread.content:
db.insert('forum_threads', **thread)
success = True
return success
| {
"repo_name": "minixalpha/SourceLearning",
"path": "webpy/sample/googlemodules/src/app_forum/models/threads.py",
"copies": "3",
"size": "1573",
"license": "apache-2.0",
"hash": -200995408655756960,
"line_mean": 26.6,
"line_max": 103,
"alpha_frac": 0.5715193897,
"autogenerated": false,
"ratio": 3.7452380952380953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5816757484938095,
"avg_score": null,
"num_lines": null
} |
import web
from config import db
from app.helpers import utils
def get_latest():
"""Get latest comments on modules."""
return db.select('comments',
what = 'content, module_id',
order = 'datetime_created desc',
limit = 4)
def get_comments(module_id):
return db.select('comments',
vars = dict(id=module_id),
what = 'datetime_created, author, content',
where = 'module_id=$id')
def add(module_id, author, comment):
success, err_msg = False, ''
banned_word = is_banned_keyword(author + ' ' + comment)
if banned_word:
err_msg = 'Ooops! Please go back to remove "%s" if you can...' % banned_word
elif module_id and author and comment:
db.insert('comments',
module_id=module_id, author=author, content=comment,
datetime_created=web.SQLLiteral('now()'))
success = True
return success, err_msg
def get_latest_for_author(author):
return db.select('modules as m, comments as c',
vars = dict(author=author),
what = 'm.title as module_title, c.author, content, module_id',
where = 'm.id = module_id and m.author = $author and calculated_vote >= -1',
order = 'c.datetime_created desc',
limit = 3)
def is_banned_keyword(text):
return utils.is_blacklisted(text, open('data/blacklist.dat'))
| {
"repo_name": "minixalpha/SourceLearning",
"path": "webpy/sample/googlemodules/src/app/models/comments.py",
"copies": "4",
"size": "1453",
"license": "apache-2.0",
"hash": 3135923185789532000,
"line_mean": 30.2888888889,
"line_max": 84,
"alpha_frac": 0.5911906401,
"autogenerated": false,
"ratio": 3.669191919191919,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6260382559291919,
"avg_score": null,
"num_lines": null
} |
# TODO:
# - should be made into a class
import Image, cStringIO, os
def save(fi, filename, min_width=30, min_height=20, max_width=460, max_height=420, max_kb=40):
im = get_image_object(fi)
width, height = im.size
if min_width <= width <= max_width and min_height <= height <= max_height:
if len(fi) <= max_kb * 1024:
open(filename, 'wb').write(fi)
else:
compress(im, filename)
else:
thumbnail(im, filename, max_width, max_height)
if os.path.getsize(filename) > max_kb * 1024:
compress(im, filename)
def get_image_object(fi):
if not isinstance(fi, file):
fi = cStringIO.StringIO(fi)
im = Image.open(fi)
if im.mode != "RGB":
im = im.convert("RGB")
return im
def compress(im, out):
im.save(out, format='jpeg')
def thumbnail(im, out, max_width, max_height):
im.thumbnail((max_width, max_height), Image.ANTIALIAS)
im.save(out) | {
"repo_name": "minixalpha/SourceLearning",
"path": "webpy/sample/googlemodules/src/app/helpers/image.py",
"copies": "4",
"size": "1038",
"license": "apache-2.0",
"hash": -7869711781367444000,
"line_mean": 26.1081081081,
"line_max": 94,
"alpha_frac": 0.5712909441,
"autogenerated": false,
"ratio": 3.337620578778135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.024217752234993613,
"num_lines": 37
} |
"""Framework & steps:
# 1. Parse XML, retrieve all book titles / authors /
Get books from Gutenberg (desc)
wget geonames (wget -r -np -k -nd http://download.geonames.org/export/dump/)
"""
if __name__ == "__main__":
from os import chdir
from os import walk
import logging
import psycopg2
# Set working directory to script location
chdir("D:\git\gutenberg")
# Script modules
from lib import analyze_epub
from lib import alter_database_refactored as alter_db
gutenberg_db = alter_db.database_operations()
gutenberg_db.set_database_credentials(user = "postgres", password = "postgres")
gutenberg_db.connect_to_default_database(default_database_name = "postgres")
gutenberg_db.set_project_database_name(project_database_name = "gutenberg")
gutenberg_db.create_project_database()
gutenberg_db.close_default_db_connection()
gutenberg_db.connect_to_project_database()
gutenberg_db.create_project_database_postgis_extension()
## Create database for location data ##
gutenberg_db.set_location_table_name(location_table_name = "location_lut_1000")
gutenberg_db.create_location_table(overwrite = False)
gutenberg_db.insert_location_file(location_file_name = "D:/git/gutenberg/data/cities1000.txt")
"""
# Note-to-self: Refactor to wrapper later
subfolders = [directory[0] for directory in walk(r'D:\cygwinfolders\gutenberg-generated')]
for directory in subfolders[2:3]:
book_text = analyze_epub.process_text_in_ebook(directory)
print(book_text)
"""
| {
"repo_name": "Bixbeat/gutenberg-place-mentions",
"path": "main.py",
"copies": "1",
"size": "1650",
"license": "mit",
"hash": -5208668671090769000,
"line_mean": 32.6734693878,
"line_max": 98,
"alpha_frac": 0.6878787879,
"autogenerated": false,
"ratio": 3.459119496855346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9296654355337192,
"avg_score": 0.07006878588363089,
"num_lines": 49
} |
__author__ = 'Alex Malyshev <malyshevalex@gmail.com>'
from collections import MutableSet
from .serializers import JsonSerializer
DEFAULT_SERIALIZER = JsonSerializer()
class StringSet(MutableSet):
def __init__(self, *args):
self.data = set()
for arg in args:
self.add(arg)
def add(self, value):
if value:
if isinstance(value, (StringSet, list, tuple)):
for i in value:
self.add(i)
elif isinstance(value, str):
self.data.add(value)
else:
raise TypeError('Arguments should be string, StringSet, list or tuple rather than %s' % type(value))
return self
def discard(self, value):
self.data.discard(value)
return self
def __contains__(self, x):
return x in self.data
def __len__(self):
return len(self.data)
def __iter__(self):
for r in self.data:
yield r
return
def serialize(self, serializer=DEFAULT_SERIALIZER):
return serializer.from_python(list(self))
@classmethod
def deserialize(cls, data, serializer=DEFAULT_SERIALIZER):
return cls(serializer.to_python(data))
| {
"repo_name": "malyshevalex/django-stringset",
"path": "__init__.py",
"copies": "1",
"size": "1234",
"license": "mit",
"hash": -8970118726305730000,
"line_mean": 25.2553191489,
"line_max": 116,
"alpha_frac": 0.5875202593,
"autogenerated": false,
"ratio": 4.140939597315437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5228459856615436,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex'
from PyQt4.QtGui import *
from PyQt4.phonon import Phonon
import sys
from PyQt4 import uic
class Window(QMainWindow):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
#SE CARGA LA VISTA O INTERFAZ GRAFICA
uic.loadUi("window.ui",self)
#LLAMA a los binding para conectar con la ui
self.mediaSource=Phonon.MediaSource("aqui se pone el video")#los videos soportados por el momento son wvm
self.videoPlayer.load(self.mediaSource)
#auto reproduce el video
self.videoPlayer.play()
#BINDINGS DE BOTONES
self.btnPlay.clicked.connect(self.Play)
self.btnStop.clicked.connect(self.Stop)
self.btnPause.clicked.connect(self.Pause)
self.volumen.valueChanged.connect(self.Volumen)
def Play(self):
if self.videoPlayer.isPlaying():
pass
else:
self.videoPlayer.play()
def Stop(self):
self.videoPlayer.stop()
def Pause(self):
if self.videoPlayer.isPaused():
self.videoPlayer.play()
else:
self.videoPlayer.pause()
def Volumen(self):
volum=(float(self.volumen.value())*float(self.volumen.maximum()))/10000
self.videoPlayer.setVolume(float(volum))
def Salir(self):
app.exit()
if __name__=="__main__":
try:
app=QApplication(sys.argv)
ventana=Window()
ventana.show()
sys.exit(app.exec_())
except SystemExit:
ventana.Salir()
| {
"repo_name": "AlexEnriquez/PyQtPlayer",
"path": "app.py",
"copies": "1",
"size": "1540",
"license": "bsd-3-clause",
"hash": -6670854179914911000,
"line_mean": 27,
"line_max": 113,
"alpha_frac": 0.6181818182,
"autogenerated": false,
"ratio": 3.484162895927602,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9566383834117257,
"avg_score": 0.007192176002068902,
"num_lines": 55
} |
__author__ = 'Alex'
from PyQt4.QtGui import *
import sys
import json
import requests,base64
from PyQt4 import uic
import threading
class Window(QWidget):
def __init__(self,parent=None):
QWidget.__init__(self)
authThread=threading.Thread(target=self.Auth())
uiThread=threading.Thread(target=self.UiInit())
#getRemithread=threading.Thread(target=self.DatosDeRemitentes())
#getDestiThread=threading.Thread(target=self.DatosDeDestinatarios())
authThread.start()
#getRemithread.start()
#getDestiThread.start()
uiThread.start()
def Auth(self):
username='here the username of your api django rest'
password='passwd of your api'
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
self.cabecera = {
#"Content-Type" : "application/json",
"Authorization": "Basic %s"%(base64string),
#"Accept":"application/json"
}
"""def DatosDeRemitentes(self):
url="http://192.168.0.2:8000/remitentes"
response=requests.get(url,headers=self.cabecera)
print response.json()
list=response.json()
print "\nREMITENTES\n"
for item in list:
id=item['id']
nombre=item['nombre']
correo= item['correo']
cadena= "\nId: "+str(id)+"\n"+"Nombre: "+nombre+"\n"+"Correo: "+correo
print cadena"""
"""def DatosDeDestinatarios(self):
url="http://192.168.0.2:8000/destinatarios"
response=requests.get(url,headers=self.cabecera)
print response.json()
list=response.json()
print "\nDESTINATARIOS\n"
for item in list:
id=item['id']
nombre=item['nombre']
correo= item['correo']
cadena= "\nId: "+str(id)+"\n"+"Nombre: "+nombre+"\n"+"Correo: "+correo
print cadena"""
def UiInit(self):
uic.loadUi("view.ui",self)
#sendThread=threading.Thread(target=self.SendMail())
self.btnSend.clicked.connect(self.SendData)
def SendData(self):
_from=self.leFrom.text()
_to=self.leTo.text()
_subject=self.leSubject.text()
_message=self.teMessage.toPlainText()
correo=json.dumps({"remitente":unicode(_from),"destinatario":unicode(_to),"asunto":unicode(_subject),"mensaje":unicode(_message)})
url = 'http://192.168.0.2:8000/sendMail/'
username='alex'
password='admin'
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
cabeceras = {
"Content-Type" : "application/json",
"Authorization": "Basic %s"%(base64string),
"Accept":"application/json"
}
response=requests.post(url,data=correo,headers=cabeceras)
print response
if __name__=="__main__":
app=QApplication(sys.argv)
ventana=Window()
ventana.show()
sys.exit(app.exec_())
| {
"repo_name": "AlexEnriquez/PyQtMail",
"path": "PyQtMail/app.py",
"copies": "1",
"size": "3071",
"license": "mit",
"hash": 2534850358112260600,
"line_mean": 33.1222222222,
"line_max": 138,
"alpha_frac": 0.5789645067,
"autogenerated": false,
"ratio": 3.513729977116705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4592694483816705,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex'
from sys import maxsize
class Infos:
def __init__(self, firstname = None ,middelname = None,lastname = None,nickname = None, title = None,company = None,address = None,home = None,mobile = None,
fax= None,homepage = None,day_Birthday= None,month_Birthday= None,year_Birthday= None,day_Anniversary= None,
month_Anniversary= None,year_Anniversary= None,address2= None,phone2= None,notes= None,work= None,photo= None,id=None, all_phones_on_hp=None, all_email_on_hp=None,email=None,email2=None,email3=None):
self.firstname = firstname
self.middelname = middelname
self.lastname = lastname
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.home = home
self.mobile = mobile
self.fax = fax
self.homepage = homepage
self.day_Birthday = day_Birthday
self.month_Birthday = month_Birthday
self.year_Birthday = year_Birthday
self.day_Anniversary = day_Anniversary
self.month_Anniversary = month_Anniversary
self.year_Anniversary = year_Anniversary
self.address2 = address2
self.phone2 = phone2
self.notes = notes
self.work = work
self.photo = photo
self.id = id
self.email = email
self.email2 = email2
self.email3 = email3
self.all_phones_on_hp = all_phones_on_hp
self.all_email_on_hp = all_email_on_hp
def __repr__(self):
return "%s:%s:%s:%s:%s" % (self.lastname, self.firstname, self.email, self.home, self.id,)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) \
and self.lastname == other.lastname \
and self.firstname == other.firstname
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize | {
"repo_name": "Alex-Chizhov/python_training",
"path": "home_works/model/info_contact.py",
"copies": "1",
"size": "2232",
"license": "apache-2.0",
"hash": -2943863213501133000,
"line_mean": 41.9423076923,
"line_max": 225,
"alpha_frac": 0.5389784946,
"autogenerated": false,
"ratio": 3.701492537313433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47404710319134324,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex'
from sys import maxsize
class Infos:
def __init__(self, firstname = None ,middelname = None,lastname = None,nickname = None, title = None,company = None,address = None,home = None,mobile = None,
fax= None,homepage = None,day_Birthday= None,month_Birthday= None,year_Birthday= None,day_Anniversary= None,
month_Anniversary= None,year_Anniversary= None,address2= None,phone2= None,notes= None,work= None,photo= None,id =None):
self.firstname = firstname
self.middelname = middelname
self.lastname = lastname
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.home = home
self.mobile = mobile
self.fax = fax
self.homepage = homepage
self.day_Birthday = day_Birthday
self.month_Birthday = month_Birthday
self.year_Birthday = year_Birthday
self.day_Anniversary = day_Anniversary
self.month_Anniversary = month_Anniversary
self.year_Anniversary = year_Anniversary
self.address2 = address2
self.phone2 = phone2
self.notes = notes
self.work = work
self.photo = photo
self.id = id
def __repr__(self):
return '%s:%s %s' % (self.id,self.firstname,self.lastname)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id)\
and self.firstname == other.firstname and self.lastname == other.lastname
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize | {
"repo_name": "Alex-Chizhov/python_training",
"path": "error/home_works/model/info_contact.py",
"copies": "1",
"size": "1888",
"license": "apache-2.0",
"hash": 9159035586713218000,
"line_mean": 39.1914893617,
"line_max": 161,
"alpha_frac": 0.5391949153,
"autogenerated": false,
"ratio": 3.8609406952965237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9788735467404945,
"avg_score": 0.022280028638315692,
"num_lines": 47
} |
_author__ = 'alex'
import sys
import xml.dom.minidom as dom
from floyd import floyd_algs
def get_Res_Matrix(length,nodes,nets_d,elem_type):
Res = [[[] for j in range(length)] for i in range(length)]
for i in range(nodes.length):
if nodes[i].nodeType != elem_type: continue
name = nodes[i].nodeName
if name == "diode":
net_from, net_to = nets_d[(int)(nodes[i].getAttribute("net_from"))], nets_d[(int)(nodes[i].getAttribute("net_to"))]
res, rev_res = (float)(nodes[i].getAttribute("resistance")), (float)(nodes[i].getAttribute("reverse_resistance"))
Res[net_from][net_to].append(res)
Res[net_to][net_from].append(rev_res)
else:
if name == "capactor" or name == "resistor":
net_from, net_to = nets_d[(int)(nodes[i].getAttribute("net_from"))], nets_d[(int)(nodes[i].getAttribute("net_to"))]
res = (float)(nodes[i].getAttribute("resistance"))
Res[net_from][net_to].append(res)
Res[net_to][net_from].append(res)
for i in range(len(Res)):
for j in range(length):
res = 0
if i != j:
a = Res[i][j]
if len(a) == 0: res = 0
else:
if len(a) == 1: res = a[0]
else:
for item in a:
if item <= 0: res = 0; break
res += 1 / item
res = 1 / res
Res[i][j] = res
#print (Res)
return Res
def parse_xml():
elem_type = dom.Element.ELEMENT_NODE
doc = dom.parse(sys.argv[1])
#parse xml
for node in doc.childNodes:
if node.nodeName == "schematics": break
nodes = node.childNodes
nets_d = {}
for i in range(nodes.length):
if nodes[i].nodeType != elem_type: continue
if nodes[i].nodeName != "net": continue
nets_d[(int)(nodes[i].getAttribute("id"))] = 0
length = 0
for x in sorted(nets_d):
nets_d[x] = length
length += 1
return nodes,nets_d,elem_type,length
if __name__ == "__main__":
if len(sys.argv) != 3:
print("check the arguments")
exit()
nodes,nets_d,elem_type,length = parse_xml()
Res = get_Res_Matrix(length,nodes,nets_d,elem_type)
#print (Res)
c_floyd = floyd_algs(Res)
#print ("c_floyd",c_floyd)
out = open(sys.argv[2], 'w')
for line in c_floyd:
out.write(','.join(map(str, [line[i] for i in range(0, len(line))])))
out.write('\n')
| {
"repo_name": "BaydinAlexey/proglangs_baydin",
"path": "main.py",
"copies": "1",
"size": "2159",
"license": "mit",
"hash": -703794670920265500,
"line_mean": 29.4084507042,
"line_max": 119,
"alpha_frac": 0.6215840667,
"autogenerated": false,
"ratio": 2.604342581423402,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8401124773586026,
"avg_score": 0.06496037490747496,
"num_lines": 71
} |
__author__ = 'Alex'
class Infos:
def __init__(self, firstname,middelname,lastname,nickname, title,company,addres,home,mobile,
fax,homepage,day_Birthday,month_Birthday,year_Birthday,day_Anniversary,
month_Anniversary,year_Anniversary,address2,phone2,notes,work,photo):
self.firstname = firstname
self.middelname = middelname
self.lastname = lastname
self.nickname = nickname
self.title = title
self.company = company
self.addres = addres
self.home = home
self.mobile = mobile
self.fax = fax
self.homepage = homepage
self.day_Birthday = day_Birthday
self.month_Birthday = month_Birthday
self.year_Birthday = year_Birthday
self.day_Anniversary = day_Anniversary
self.month_Anniversary = month_Anniversary
self.year_Anniversary = year_Anniversary
self.address2 = address2
self.phone2 = phone2
self.notes = notes
self.work = work
self.photo = photo
| {
"repo_name": "Alex-Chizhov/python_training",
"path": "home_work_6/model/info_contact.py",
"copies": "5",
"size": "1267",
"license": "apache-2.0",
"hash": 5434086529490704000,
"line_mean": 37.3939393939,
"line_max": 97,
"alpha_frac": 0.5272296764,
"autogenerated": false,
"ratio": 3.9226006191950464,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.05572319254362013,
"num_lines": 33
} |
__author__ = 'alex'
from cement.core import foundation, controller
from SearchManager import SearchManager
from InteractionManager import OutputInteraction
import time
# define an application base controller
class FindForMeBasedController(controller.CementBaseController):
# Define command line arguments and default values
class Meta:
label = 'base'
description = "Robust directory search application."
# set default config options
config_defaults = dict(
quiet=False,
debug=False
)
arguments = [
(['-E', '--exact'], dict(action='store_true',
help='Define whether search returns exact matches only.')),
(['-C', '--case'], dict(action='store_true',
help='Define whether search returns case sensitive results only.')),
(['-N', '--name'], dict(action='store',
help='Search on this text.')),
(['-DC', '--show_file_content'], dict(action='store_true',
help='Show file content when searching through content.')),
(['-SC', '--search_content'], dict(action='store_true',
help='Search through file content.')),
(['-D', '--dir'], dict(action='store',
help='Directory in which search should be performed.')),
(['-R', '--recursive'], dict(action='store_true',
help='Define that search will only occur on top directory.')),
(['-F', '--filter'], dict(action='store',
help='Filter search by file types. Separate multiple by comma.'))
]
# Define class variables
__exact_search = None
__search_query = None
__file_types = None
@controller.expose(hide=True, aliases=['run'])
def default(self):
self.log.info('Search Initializing...')
self.log.info('**************************************************************')
# This delay simply ensures that the cement messages appear before the FindForMe messages begin.
time.sleep(0.2)
__exact_case, __exact_search, __is_recursive, __search_dir, __search_query, __file_types, __search_content, \
__show_file_content = self.setup_search_parameters()
# Create SearchEngine object
search_engine = SearchManager()
# search_engine.search_by_file_type(__search_dir, __search_query, __exact_search, __file_types)
search_results = search_engine.execute_search(__search_dir, __search_query, __exact_case, __exact_search,
__file_types, __search_content, __show_file_content,
__is_recursive)
# If at least one result is returned then initiate user interaction
if len(search_results) > 0:
# Create Interaction object in order to interact with user via input.
interaction = OutputInteraction()
# Allow interaction object handle interaction with user
interaction.request_result_item(__search_dir, search_results, "Choose a number from the list of results:")
def setup_search_parameters(self):
if self.pargs.case:
__exact_case = True
else:
__exact_case = False
if self.pargs.exact:
__exact_search = True
else:
__exact_search = False
if self.pargs.show_file_content:
__show_file_content = True
else:
__show_file_content = False
if self.pargs.search_content:
__search_content = True
else:
__search_content = False
if self.pargs.recursive:
__is_recursive = True
else:
__is_recursive = False
if self.pargs.name:
__search_query = str(self.pargs.name).replace("'", "")
else:
__search_query = ''
if self.pargs.dir:
__search_dir = str(self.pargs.dir).replace("'", "")
else:
__search_dir = ''
if self.pargs.filter:
__file_types = str(self.pargs.filter).replace("'", "")
else:
__file_types = '*'
return __exact_case, __exact_search, __is_recursive, __search_dir, __search_query, __file_types, \
__search_content, __show_file_content
class FindForMe(foundation.CementApp):
class Meta:
label = 'FindForMe'
base_controller = FindForMeBasedController
# create the app
app = FindForMe()
try:
# setup the application
app.setup()
app.args.add_argument
# run the application
app.run()
finally:
# close the app
app.close() | {
"repo_name": "masterpiece91/FindForMe",
"path": "FindForMe/FindForMe.py",
"copies": "1",
"size": "4916",
"license": "mit",
"hash": -5424051461285300000,
"line_mean": 35.1544117647,
"line_max": 118,
"alpha_frac": 0.5374288039,
"autogenerated": false,
"ratio": 4.5602968460111315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5597725649911132,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex'
from datetime import date, timedelta
import pyFWI.FWIFunctions as FWI
import sqlite3
conn = sqlite3.connect('FWI.db')
cur = conn.cursor()
start = date(2015, 8, 19)
end = date(2015, 8, 22)
for i in range(1,(end-start).days+1):
yesterday = start + timedelta(days=i-1)
today = start + timedelta(days=i)
cur.execute("SELECT temp, humidity, wind, rain FROM observations WHERE date = '%s'"%yesterday.strftime('%Y-%m-%d'))
c = cur.fetchone()
temp = c[0]
humd = c[1]
wind = c[2]
rain = c[3]
cur.execute("SELECT ffmc, dmc, dc FROM calculations WHERE date = '%s'"%yesterday.strftime('%Y-%m-%d'))
c = cur.fetchone()
ffmc = FWI.FFMC(temp, humd, wind, rain, c[0])
dmc = FWI.DMC(temp, humd, rain, c[1], -33.60, today.month)
dc = FWI.DC(temp, rain, c[2], -33.60, today.month)
isi = FWI.ISI(wind, ffmc)
bui = FWI.BUI(dmc, dc)
fwi = FWI.FWI(isi, bui)
cur.execute("INSERT INTO calculations VALUES ('%s', %f, %f, %f, %f, %f, %f)"%(today.strftime('%Y-%m-%d'), ffmc, dmc, dc, isi, bui, fwi))
conn.commit()
print "FFMC: %.2f"%ffmc
print "DMC : %.2f"%dmc
print "DC : %.2f"%dc
print "ISI : %.2f"%isi
print "BUI : %.2f"%bui
print "FWI : %.2f"%fwi | {
"repo_name": "parko636/pyfwi",
"path": "fwi_batch.py",
"copies": "1",
"size": "1241",
"license": "bsd-3-clause",
"hash": -9044822394350220000,
"line_mean": 33.5,
"line_max": 140,
"alpha_frac": 0.5938759065,
"autogenerated": false,
"ratio": 2.537832310838446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8584754265352539,
"avg_score": 0.009390790397181398,
"num_lines": 36
} |
__author__ = 'alex'
from gmail import Gmail
import datetime
import re
class EmailHandler():
def __init__(self, username, password ):
self.g = Gmail()
self.g.login(username, password)
def logout(self):
self.g.logout()
def get_sent_mail(self):
return self.g.sent_mail()
def store_emails(self, start_date , store_at):
'''
Download the emails and store them in a plain text file separated by "===" string
'''
all_emails = []
for message in self.get_sent_mail().mail(after=start_date):
message.fetch()
for line in message.body.split('\r\n'):
#do some cleaning before storing the emails
if "-------- Original Message --------" in line:
break
if "---------- Forwarded message ----------" in line:
break
line = re.sub("\d+", "", line)
line = re.sub('<[^>]*>', '', line)
if line and line[0] != '>' and line[0] != '<' : #ignore quoting previous email or http links
all_emails.append(line)
all_emails.append("="*30)
#save the emails
f = open(store_at,"wr+")
f.write('\n'.join(all_emails))
f.close()
print "Done getting and storing emails"
| {
"repo_name": "aparij/EmailGrammar",
"path": "email_handler.py",
"copies": "1",
"size": "1362",
"license": "mit",
"hash": 6391883427681816000,
"line_mean": 29.2666666667,
"line_max": 108,
"alpha_frac": 0.5007342144,
"autogenerated": false,
"ratio": 4.242990654205608,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.027044883261997162,
"num_lines": 45
} |
__author__ = 'Alex'
from lexer import *
from ast import *
import exception
class Parser(object):
def __init__(self, tokens):
self.tokens = tokens
self.position = 0
self.length = len(tokens)
def error(self, message, *args):
line, col = self.token.line_col
raise exception.ParserException(message.format(*args), (line, col))
@property
def token(self):
"""
peek current token
:rtype : JSONxToken
"""
return self.tokens[self.position] if self.position < self.length else None
def expect(self, expected_type):
"""
:rtype : JSONxToken | None
"""
token = self.tokens[self.position] if self.position < self.length else None
if not token or token.type != expected_type:
return None
self.position += 1
return token
def ensure(self, expected_type, message, *args):
"""
raise ParserException when fail
:rtype : JSONxToken | None
"""
token = self.tokens[self.position] if self.position < self.length else None
if not token or token.type != expected_type:
self.error(message, *args)
self.position += 1
return token
class JSONxParser(Parser):
# keyword -> 'true' | 'false' | 'null'
def parse_keyword(self):
token = self.expect(Type.KEYWORD)
if not token:
return None
if token.value == 'true':
return TrueNode()
if token.value == 'false':
return FalseNode()
if token.value == 'null':
return NullNode()
# number -> ([+-]?(0|[1-9][0-9]*)(\.[0-9]*)?([eE][+-]?[0-9]+)?)
def parse_number(self):
token = self.expect(Type.NUMBER)
if token:
try:
return NumberNode(int(token.value))
except ValueError:
return NumberNode(float(token.value))
# string -> ("(?:[^"\\]|\\.)*")
def parse_string(self):
token = self.expect(Type.STRING)
if token:
return StringNode(token.value)
# object -> '{' pairs '}' | '{' '}'
def parse_object(self):
left_bracket = self.expect(Type.LEFT_CURLY_BRACKET)
if not left_bracket:
return None
right_bracket = self.expect(Type.RIGHT_CURLY_BRACKET)
if right_bracket:
return ObjectNode([])
pairs = self.parse_pairs()
self.ensure(Type.RIGHT_CURLY_BRACKET, 'OBJECT: "}}" expected, got "{}"', self.token.value)
return ObjectNode(pairs)
# pairs -> pair (',' pair)*
def parse_pairs(self):
pairs = []
while True:
if pairs and not self.expect(Type.COMMA):
break
pair = self.parse_pair()
if not pair:
self.error('PAIR: <pair> expected, got "{}"', self.token.value)
pairs += pair,
return pairs
# pair -> string ':' value
def parse_pair(self):
key = self.parse_string()
if not key:
return None
self.ensure(Type.COLON, 'PAIR: ":" expected, got "{}"', self.token.value)
value = self.parse_value()
if not value:
self.error('PAIR: <value> expected, got "{}"', self.token.value)
return PairNode(key, value)
# array -> '[' elements ']' | '[' ']'
def parse_array(self):
left_bracket = self.expect(Type.LEFT_SQUARE_BRACKET)
if not left_bracket:
return None
right_bracket = self.expect(Type.RIGHT_SQUARE_BRACKET)
if right_bracket:
return ArrayNode([])
elements = self.parse_elements()
self.ensure(Type.RIGHT_SQUARE_BRACKET, 'ARRAY: "]" expected, got "{}"', self.token.value)
return ArrayNode(elements)
# elements -> value (',' value)*
def parse_elements(self):
elements = []
while True:
if elements and not self.expect(Type.COMMA):
break
value = self.parse_value()
if not value:
self.error('ARRAY: <value> expected, got "{}"', self.token.value)
elements += value,
return elements
# reference -> '$' '{' string (':' string)? '}'
def parse_reference(self):
if not self.expect(Type.DOLLAR):
return None
self.ensure(Type.LEFT_CURLY_BRACKET, 'REFERENCE: <{{> expected, got {}', self.token.value)
object_path = self.expect(Type.STRING)
file_path = None
if not object_path:
self.error('REFERENCE: <string> expected, got {}', self.token)
if self.expect(Type.COLON):
file_path = object_path
object_path = self.expect(Type.STRING)
self.ensure(Type.RIGHT_CURLY_BRACKET, 'REFERENCE: <}}> expected, got', self.token.value)
file_path = file_path and file_path.value
return ReferenceNode(file_path, object_path.value)
# value -> object | array | reference | string | number
def parse_value(self):
token = self.token
if token.type == Type.LEFT_CURLY_BRACKET:
return self.parse_object()
elif token.type == Type.LEFT_SQUARE_BRACKET:
return self.parse_array()
elif token.type == Type.STRING:
return self.parse_string()
elif token.type == Type.NUMBER:
return self.parse_number()
elif token.type == Type.KEYWORD:
return self.parse_keyword()
elif token.type == Type.DOLLAR:
return self.parse_reference()
# JSONx -> value eof
def parse(self):
value = self.parse_value()
self.ensure(Type.EOF, 'PARSER: <EOF> expected, got "{}"', self.token.value)
return value
def parse(tokens):
parser = JSONxParser(tokens)
return parser.parse()
| {
"repo_name": "AlexYukikaze/JSONx",
"path": "JSONx/parser.py",
"copies": "1",
"size": "5847",
"license": "mit",
"hash": -6650881494309548000,
"line_mean": 32.0338983051,
"line_max": 98,
"alpha_frac": 0.5560116299,
"autogenerated": false,
"ratio": 4.004794520547946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006376728958926429,
"num_lines": 177
} |
__author__ = 'Alex'
from Movement import Movement
class BaseCommand:
def __init__(self, movement):
assert isinstance(movement, Movement)
self.name = 'unknown'
self.m = movement
def execute(selfself):pass
class Forward(BaseCommand):
def __init__(self, movement):
assert isinstance(movement, Movement)
self.name = 'forward'
self.m = movement
def execute(self):
self.m.moveCM(10)
class Reverse(BaseCommand):
def __init__(self, movement):
assert isinstance(movement, Movement)
self.name = 'reverse'
self.m = movement
def execute(self):
self.m.moveCM(10)
class Left(BaseCommand):
def __init__(self, movement):
assert isinstance(movement, Movement)
self.name = 'left'
self.m = movement
def execute(self):
self.m.turnDegrees(-90)
class Right(BaseCommand):
def __init__(self, movement):
assert isinstance(movement, Movement)
self.name = 'right'
self.m = movement
def execute(self):
self.m.turnDegrees(90)
| {
"repo_name": "RobotTurtles/mid-level-routines",
"path": "Apps/TurtleCommands.py",
"copies": "1",
"size": "1101",
"license": "apache-2.0",
"hash": 749848484552750600,
"line_mean": 21.9375,
"line_max": 45,
"alpha_frac": 0.6076294278,
"autogenerated": false,
"ratio": 3.682274247491639,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47899036752916385,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alex'
import glob
import os
import itertools
import fnmatch
import mmap
import re
import contextlib
import InteractionManager
from Common import Common
from os.path import join, getsize
# This object will contain all properties for a result item
class ResultController:
def __init__(self):
self.common_tools = Common()
self.search_type = self.common_tools.enum(SubDirectory=1, FileName=2, FileContent=3)
@staticmethod
def convert_item_to_result_object(item, current_root_directory, current_search_type, item_content=None):
result_dictionary = {}
result_dictionary["size"] = getsize(join(current_root_directory, item))
result_dictionary["is_directory"] = os.path.isdir(os.path.join(os.path.expanduser(current_root_directory), item))
result_dictionary["root_directory"] = str(current_root_directory)
result_dictionary["name"] = str(item)
result_dictionary["search_type"] = current_search_type
if not item_content is None:
result_dictionary["item_content"] = item_content
return result_dictionary
def convert_list_to_result_object_dictionary(self, current_list, last_dict_enumerator, current_root_directory,
current_search_type):
dictionary_enumerator = last_dict_enumerator
new_dictionary = {}
for item in current_list:
assert isinstance(item, str)
dictionary_enumerator += 1
new_dictionary[dictionary_enumerator] = self.convert_item_to_result_object(item, current_root_directory,
current_search_type)
return new_dictionary
class SearchManager:
def __init__(self):
pass
common_tools = Common()
def search_by_file_type(self, search_dir, search_query, match_case, search_pattern):
# Set up counter to ensure keep track of records found
record_counter = 0
# Fix directory path
clean_search_dir = os.path.expanduser(search_dir)
if not match_case:
search_query = search_query.lower()
for fileName in self.__multiple_file_types(clean_search_dir, search_pattern):
if match_case:
cur_file = fileName
if cur_file.find(search_query) != -1:
record_counter += 1
print fileName
else:
cur_file = fileName.lower()
if cur_file.find(search_query.lower()) != -1:
record_counter += 1
print fileName
if record_counter == 0:
print 'No results found'
def execute_search(self, search_dir, search_query, match_case, exact_search, search_pattern, search_content,
show_file_content, is_recursive=True):
# TODO - Push all console interaction to the FindForMe class. Class will remain for search mechanism. (OOP)
# Assert that method is receiving correct parameter object types
assert isinstance(search_dir, str)
assert isinstance(search_query, str)
assert isinstance(match_case, bool)
assert isinstance(exact_search, bool)
assert isinstance(search_pattern, str)
assert isinstance(search_content, bool)
assert isinstance(show_file_content, bool)
assert isinstance(is_recursive, bool)
# Set up counters to ensure keep track of records found
file_item_enumerator = 0
sub_directory_enumerator = 0
file_content_enumerator = 0
result_enumerator = 0
flat_results_enumerator = 0
result_file_size = 0
# Set up result controller
result_controller = ResultController()
# Set up search result dictionary
flat_search_results = {}
sub_directory_results = {}
file_content_results = {}
file_results = {}
# Fix directory path
if search_dir == '':
clean_search_dir = os.path.dirname(os.path.abspath(__file__))
else:
clean_search_dir = os.path.expanduser(search_dir)
if not match_case:
search_query = search_query.lower()
# Perform recursive search through all sub-directories
for root_directory, sub_directories, files in os.walk(clean_search_dir):
filtered_sub_directories = self.__filter_directories(sub_directories, search_query, exact_search,
match_case)
for sub_dir in filtered_sub_directories:
# If a pattern does exist then do not search through sub directories. User is clearly
# looking for files that fit the pattern therefore breakout of loop
if search_pattern != '':
break
sub_directory_enumerator += 1
flat_results_enumerator += 1
# Add to dictionary for future retrieval
sub_directory_results[sub_directory_enumerator] = \
result_controller.convert_item_to_result_object(sub_dir, root_directory,
result_controller.search_type.SubDirectory)
# Add to flat dictionary. This dictionary is the one returned to client
flat_search_results[flat_results_enumerator] = \
result_controller.convert_item_to_result_object(sub_dir, root_directory,
result_controller.search_type.SubDirectory)
# If search should not be recursive then simply delete all sub directories from list
if not is_recursive:
del sub_directories[:]
# Filter file list by pattern
filtered_files = fnmatch.filter(files, search_pattern)
for listed_file in filtered_files:
cur_file = listed_file
if match_case:
if (cur_file.find(search_query) != -1 and exact_search is False) or (
cur_file == search_query and exact_search is True):
# Keep count of search results
file_item_enumerator += 1
flat_results_enumerator += 1
# Add to dictionary for future retrieval
file_results[file_item_enumerator] = result_controller. \
convert_item_to_result_object(listed_file, root_directory,
result_controller.search_type.FileName)
# Add to flat dictionary. This dictionary is the one returned to client
flat_search_results[flat_results_enumerator] = result_controller. \
convert_item_to_result_object(listed_file, root_directory,
result_controller.search_type.FileName)
else:
if (cur_file.lower().find(search_query.lower()) != -1 and exact_search is False) or (
cur_file.lower() == search_query.lower() and exact_search is True):
# Keep count of search results
file_item_enumerator += 1
flat_results_enumerator += 1
# Add to dictionary for future retrieval
file_results[file_item_enumerator] = result_controller. \
convert_item_to_result_object(listed_file, root_directory,
result_controller.search_type.FileName)
# Add to flat dictionary. This dictionary is the one returned to client
flat_search_results[flat_results_enumerator] = result_controller. \
convert_item_to_result_object(listed_file, root_directory,
result_controller.search_type.FileName)
# File content search is bound by the pattern provided. The use case for this is that I may only want
# to find information in certain files such as a word document.
if search_content:
# Keep count of how many occurrences are found
instance_enumerator = 0
# Instantiate content dictionary. Initialization happens here because we don't want to use more
# memory than needed
file_content_dict = {}
for content_extract in self.__search_file_content(listed_file, root_directory, search_query,
match_case, exact_search):
instance_enumerator += 1
# If it is not desired to show content results then simply break from loop once first instance
# is found
if not show_file_content:
break
# Add content lines to dictionary. Will be used later to show content.
file_content_dict[instance_enumerator] = content_extract
# At least one instance of search query was found and therefore we should add the result object
# to our file results dictionary
if instance_enumerator >= 1:
file_content_enumerator += 1
flat_results_enumerator += 1
# Add to dictionary for future retrieval
file_content_results[file_content_enumerator] = result_controller.convert_item_to_result_object(
listed_file, root_directory, result_controller.search_type.FileContent, file_content_dict)
# Add to flat dictionary. This dictionary is the one returned to client
flat_search_results[flat_results_enumerator] = result_controller.convert_item_to_result_object(
listed_file, root_directory, result_controller.search_type.FileContent, file_content_dict)
# Instantiate temp dictionary utilized for printing purposes
temp_search_results = {}
# Insert result dictionaries into one overall dictionary
if len(sub_directory_results) > 0:
temp_search_results["sub_directories"] = sub_directory_results
if len(file_content_results) > 0:
temp_search_results["file_content"] = file_content_results
if len(file_results) > 0:
temp_search_results["files"] = file_results
# Print results
result_file_size, result_enumerator = self.__print_results(temp_search_results, root_directory,
result_enumerator, result_file_size)
# Before looping through make sure to clear dictionaries
file_results = {}
file_content_results = {}
sub_directory_results = {}
if result_enumerator > 0:
notification_manager = InteractionManager.NotificationController()
# Print Search Statistics
print '' # Skip line
notification_manager.print_with_style(
"*********************************************************************************",
notification_manager.notification_category.Style)
notification_manager.print_with_style("\t\t\t\tResult File Size: %0.1f MB" % (result_file_size / 1024.0) +
" (%i" % result_file_size + ")",
notification_manager.notification_category.Small_Print)
notification_manager.print_with_style(os.linesep + "\t\t\t\tResult Count: %i" % result_enumerator,
notification_manager.notification_category.Small_Print)
return flat_search_results
@staticmethod
def __print_results(result_dictionary, current_directory, overall_enumerator, overall_result_file_size):
sub_directory_counter = 0
file_content_counter = 0
file_counter = 0
result_enumerator = overall_enumerator
result_file_size = overall_result_file_size
#try:
notification_manager = InteractionManager.NotificationController()
if len(result_dictionary) == 0:
return result_file_size, result_enumerator
notification_manager.print_with_style("Directory Searched: %s" % current_directory,
notification_manager.notification_category.Large_Header)
for result_dictionary_item in result_dictionary:
result_item = result_dictionary[result_dictionary_item]
if "sub_directories" in result_dictionary_item and len(result_item) > 0:
# Print as header. It should print at before any results are printed.
print '' # Skip line
notification_manager.print_with_style("Sub Directory Results:",
notification_manager.notification_category.Header)
notification_manager.print_with_style("------------------------------------------------",
notification_manager.notification_category.Style)
for sub_directory in result_item:
result_enumerator += 1
sub_directory_counter += 1
sub_directory_item = result_item[sub_directory]
result_file_size += int(sub_directory_item["size"])
notification_manager.print_with_style("(" + str(result_enumerator) + ") - " +
str(sub_directory_counter) + ".\t%s" %
sub_directory_item["name"] +
" (%0.1f MB" % (sub_directory_item["size"] / 1024.0) + ")",
notification_manager.notification_category.Normal)
elif "file_content" in result_dictionary_item and len(result_item) > 0:
# Print as header. It should print at before any results are printed.
print '' # Skip line
notification_manager.print_with_style("File Content Results:",
notification_manager.notification_category.Header)
notification_manager.print_with_style("------------------------------------------------",
notification_manager.notification_category.Style)
for file_content in result_item:
result_enumerator += 1
file_content_counter += 1
file_content_item = result_item[file_content]
result_file_size += int(file_content_item["size"])
if not "item_content" in file_content_item:
notification_manager.print_with_style("(" + str(result_enumerator) + ") - " +
str(file_content_counter) + ".\t%s" %
file_content_item["name"] +
" (%0.1f MB" % (file_content_item["size"] / 1024.0) + ")",
notification_manager.notification_category.Normal)
else:
notification_manager.print_with_style("(" + str(result_enumerator) + ") - " +
str(file_content_counter) +
".\tFile Name: %s" % file_content_item["name"] +
" (%0.1f MB" % (file_content_item["size"] / 1024.0) + ")",
notification_manager.notification_category.Normal)
notification_manager. \
print_with_style('*****************************************************************',
notification_manager.notification_category.Style)
# set up content dictionary in order to iterate over it.
file_content_dictionary = file_content_item["item_content"]
for file_content_item in file_content_dictionary:
notification_manager.print_with_style(file_content_dictionary[file_content_item],
notification_manager.notification_category.
Small_Print)
elif "files" in result_dictionary_item and len(result_item) > 0:
# Print as header. It should print at before any results are printed.
print '' # Skip line
notification_manager.print_with_style("File Name Results:",
notification_manager.notification_category.Header)
notification_manager.print_with_style("------------------------------------------------",
notification_manager.notification_category.Style)
for current_file in result_item:
result_enumerator += 1
file_counter += 1
file_item = result_item[current_file]
result_file_size += int(file_item["size"])
notification_manager.print_with_style("(" + str(result_enumerator) + ") - " + str(file_counter) +
".\t%s" % file_item["name"] +
" (%0.1f MB" % (file_item["size"] / 1024.0) + ")",
notification_manager.notification_category.Normal)
return result_file_size, result_enumerator
@staticmethod
def __search_file_content(current_file, current_directory, search_query, match_case, exact_search):
if exact_search:
search_query = r"\b" + re.escape(search_query) + r"\b"
if match_case:
pattern = re.compile(search_query)
else:
pattern = re.compile(search_query, re.IGNORECASE)
with open(os.path.join(current_directory, current_file), mode='r') as file_object:
try:
with contextlib.closing(mmap.mmap(file_object.fileno(), 0, access=mmap.ACCESS_READ)) as file_stream:
previous_file_content_location = 0
for match in pattern.finditer(file_stream):
# Find beginning of line where instance exists
new_line_starting_location = file_stream.rfind(os.linesep,
previous_file_content_location,
match.start())
# Save current location for next iteration.
# This will allow next iteration what is the beginning of the line
previous_file_content_location = match.start()
# Move to position where first instance was found from the last current position
if new_line_starting_location != -1:
file_stream.seek(new_line_starting_location + 1, os.SEEK_SET)
yield str(file_stream.readline()).strip("\t").strip(os.linesep)
except ValueError:
pass
def __multiple_file_types(self, search_dir, patterns):
pattern_list = self.__arrange_pattern_array(patterns)
return itertools.chain.from_iterable(glob.glob1(search_dir, pattern) for pattern in pattern_list)
@staticmethod
def __filter_directories(directories, search_query, exact_search, match_case):
# If there is no search criteria then return the same directory listing
if not search_query:
return directories
# Ensure that you match case if required only. Otherwise filter by any string match
if exact_search:
if match_case:
filtered_directories = [directory for directory in directories if directory == search_query]
else:
filtered_directories = [directory for directory in directories if
directory.lower() == search_query.lower()]
else:
if match_case:
filtered_directories = [directory for directory in directories if
directory.find(search_query) >= 0]
else:
filtered_directories = [directory for directory in directories if
directory.lower().find(search_query.lower()) >= 0]
return filtered_directories
@staticmethod
def __arrange_pattern_array(pattern):
return pattern.replace(' ', '').split(",") | {
"repo_name": "masterpiece91/FindForMe",
"path": "FindForMe/SearchManager.py",
"copies": "1",
"size": "21841",
"license": "mit",
"hash": -26934375619215664,
"line_mean": 51.5048076923,
"line_max": 121,
"alpha_frac": 0.524609679,
"autogenerated": false,
"ratio": 5.208919627951348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6233529306951348,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex'
import JSONx.utils as utils
import JSONx
import os
class JSONxLoaderException(Exception):
def __init__(self, message, file_path):
super(JSONxLoaderException, self).__init__(message)
self.message = message
self.file = file_path
class JSONxLoader(object):
def __init__(self, file_name, log_func):
self.file_cache = {}
self.data_cache = {}
self.root_file = file_name
self.log_func = log_func
def load(self):
root = {"$ref": {"file": self.root_file, "path": "."}}
result = self.visit(root, [], '', 32)
return result
def load_config(self, path):
path = os.path.normpath(path)
if path in self.data_cache:
return self.data_cache[path]
try:
config = self.load_file(path)
result = self.data_cache[path] = JSONx.parse(config)
return result
except JSONx.JSONxException, e:
line, col = e.error_position
raise JSONxLoaderException("{} at {}:{} in \"{}\"".format(e.message, line, col, path), path)
def visit(self, root, path, file_name, level):
if level < 0:
return root, path, file_name, level
if isinstance(root, dict):
return self.visit_dict(root, path, file_name, level)
elif isinstance(root, list):
return self.visit_list(root, path, file_name, level)
else:
return root
def visit_dict(self, root, path, file_name, level):
if '$ref' not in root:
return {key: self.visit(value, path + [key], file_name, level - 1) for key, value in root.iteritems()}
ref_path = root['$ref'].get('path') or '.'
ref_file = root['$ref'].get('file')
config_file = get_path(file_name, ref_file)
config = self.load_config(config_file)
result, err = utils.get_dict_path(config, ref_path)
if err:
obj_path = '/'.join(path)
raise JSONxLoaderException('Bad reference: ${{"{}": "{}"}} in "{}:{}"\n{}'
.format(ref_file or file_name, ref_path, config_file, obj_path, err), file_name)
if isinstance(result, dict):
items = (item for item in root.iteritems() if item[0] != '$ref')
result.update(items)
return self.visit(result, path, config_file, level - 1)
def visit_list(self, root, path, file_name, level):
result = []
if not path:
path.append('root')
key = path.pop()
for i, item in enumerate(root):
new_path = path + ['{}[{}]'.format(key, i)]
val = self.visit(item, new_path, file_name, level + 1)
result.append(val)
return result
def load_file(self, path, encoding='utf-8-sig'):
import codecs
if path in self.file_cache:
return self.file_cache[path]
try:
stream = codecs.open(path, 'r', encoding)
self.file_cache[path] = stream.read()
if self.log_func is not None:
self.log_func('[JSONxLoader] load: {}'.format(path))
stream.close()
return self.file_cache[path]
except IOError, e:
raise JSONxLoaderException('File not found', e.filename)
def get_path(root_file, ref):
import os
if root_file and ref:
root_dir = os.path.dirname(root_file)
file_path = os.path.join(root_dir, ref)
return file_path
elif root_file:
return root_file
return ref
| {
"repo_name": "AlexYukikaze/JSONx",
"path": "JSONxLoader/loader.py",
"copies": "1",
"size": "3574",
"license": "mit",
"hash": -2078804446673174800,
"line_mean": 33.0380952381,
"line_max": 119,
"alpha_frac": 0.5540011192,
"autogenerated": false,
"ratio": 3.6845360824742266,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47385372016742267,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alex'
import os
import subprocess
from colorama import Style, init, Back, Fore
from Common import Common
class OutputInteraction:
def __init__(self):
self.common_tools = Common()
def request_result_item(self, search_directory, result_dictionary, input_message):
message_tool = NotificationController()
user_input = raw_input(input_message)
# If escape key is pressed then escape method
if user_input == '':
return
# If the input provided is not a number indicate to user and continue to loop through method until correct input
# provided.
if not user_input.isdigit():
message_tool.print_with_style("No such option exists", message_tool.notification_category.Warning)
return self.request_result_item(search_directory, result_dictionary, input_message)
# If the input provided is not a number indicate to user and request another option
if not user_input.isdigit():
user_input = raw_input("")
current_operating_system = self.common_tools.get_operating_system()
# Make sure that the requested key exists otherwise return a message.
if int(user_input) in result_dictionary:
if result_dictionary[int(user_input)]["is_directory"]:
message_tool.print_with_style(os.path.join(
os.path.expanduser(result_dictionary[int(user_input)]["root_directory"]),
result_dictionary[int(user_input)]["name"]), message_tool.notification_category.Header)
print ""
if current_operating_system == "Windows":
subprocess.check_call(["dir", os.path.join(
os.path.expanduser(result_dictionary[int(user_input)]["root_directory"]),
result_dictionary[int(user_input)]["name"])])
else:
subprocess.check_call(["ls", "-l",
os.path.join(os.path.expanduser(
result_dictionary[int(user_input)]["root_directory"]),
result_dictionary[int(user_input)]["name"])])
else:
# os.system('vi %s' % search_results[int(user_input)])
subprocess.call(["nano",
os.path.join(os.path.expanduser(
result_dictionary[int(user_input)]["root_directory"]),
result_dictionary[int(user_input)]["name"])])
else:
message_tool.print_with_style("No such option exists.", message_tool.notification_category.Warning)
return self.request_result_item(search_directory, result_dictionary, input_message)
class NotificationController:
def __init__(self):
self.common_tools = Common()
self.notification_category = self.common_tools.enum(Normal=Style.NORMAL + Fore.CYAN,
Large_Header=Style.BRIGHT + Back.RED + Fore.WHITE,
Header=Style.BRIGHT + Back.WHITE + Fore.BLACK,
Small_Header=Style.NORMAL + Fore.MAGENTA,
Small_Print=Style.NORMAL + Fore.GREEN,
Warning=Style.BRIGHT + Fore.YELLOW,
Error=Style.BRIGHT + Fore.RED,
Style=Style.NORMAL + Fore.BLUE)
def print_with_style(self, output, notification_category):
# Initialize colorama object
init()
if not notification_category:
notification_category = self.notification_category.Normal
print notification_category + output + Style.RESET_ALL
| {
"repo_name": "masterpiece91/FindForMe",
"path": "FindForMe/InteractionManager.py",
"copies": "1",
"size": "4029",
"license": "mit",
"hash": -7291795230444833000,
"line_mean": 50,
"line_max": 120,
"alpha_frac": 0.5395879871,
"autogenerated": false,
"ratio": 4.9496314496314495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002995873859122943,
"num_lines": 79
} |
__author__ = 'alex'
import requests
from collections import Counter
from lxml import objectify
from xml.etree import ElementTree
class Error:
""" AtD Error Object
These are to be returned in a list by checkText()
Available properties are: string, description, precontext, type, url
and suggestions.
Look at http://www.afterthedeadline.com/api.slp for more information."""
def __init__(self, e):
self.string = e.find('string').text
self.description = e.find('description').text
self.precontext = e.find('precontext').text
self.type = e.find('type').text
if not e.find('url') is None:
self.url = e.find('url').text
else:
self.url = ""
if not e.find('suggestions') is None:
self.suggestions = map(lambda o: o.text,
e.find('suggestions').findall('option'))
else:
self.suggestions = []
def __str__(self):
return "%s (%s)" % (self.string, self.description)
if __name__ == "__main__":
'''
Some simple script to test a different backend than the LanguageTool
Using After The Dark - AtD as grammar server
Error class was copied from the excellent after_the_dark python wrapper
https://bitbucket.org/miguelventura/after_the_deadline
By Miguel Ventura
'''
messages_str = open("myemails.txt", "r+").readlines()
payload = {'data': "\n".join(messages_str)}
r = requests.post('http://127.0.0.1:1049/checkDocument?', data=payload)
e = ElementTree.fromstring(r.content)
errs = e.findall('message')
if len(errs) > 0:
raise Exception('Server returned an error: %s' % errs[0].text)
errors_list= map(lambda err: Error(err), e.findall('error'))
problems_counter = Counter()
print errors_list
for e in errors_list:
problems_counter[e.string +" -- " + e.description +"--" + e.type] += 1
f = open("results_atd.txt","w+")
#store 100 most comment errors
for item in problems_counter.most_common(300):
try:
f.write("%s %d \n" % item)
except:
pass
f.close()
| {
"repo_name": "aparij/EmailGrammar",
"path": "atd_processing.py",
"copies": "1",
"size": "2191",
"license": "mit",
"hash": 2498054354055019500,
"line_mean": 29.8591549296,
"line_max": 79,
"alpha_frac": 0.5960748517,
"autogenerated": false,
"ratio": 3.7452991452991453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48413739969991454,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alex'
import requests
from lxml import objectify
class GrammarChecker():
USELESS_RULES = ["WHITESPACE_RULE", "EN_UNPAIRED_BRACKETS", "EN_QUOTES", 'COMMA_PARENTHESIS_WHITESPACE']
USELESS_CATEGORY = ["Capitalization"]
def __init__(self, url, lang='en-US'):
self.url = url
self.language = lang
self.problemsXML = None
self.text = None
def post_check(self,text=None):
payload = {'language': self.language, 'text': text}
r = requests.post(self.url, data=payload)
if r.status_code == 200:
self.problemsXML = r.content
else:
self.problemsXML = None
def getFilteredProblemList(self):
problem_list = []
if self.problemsXML:
root = objectify.fromstring(self.problemsXML)
if hasattr(root, 'error'):
for e in root.error:
if e.attrib['category'] not in self.USELESS_CATEGORY and e.attrib['ruleId'] not in self.USELESS_RULES \
and e.attrib['errorlength'] != '2' and e.attrib['replacements'] != '':
if "gmail" not in e.attrib['context'].lower() and u'\ufffd' not in e.attrib['context']:
problem_list.append(e.attrib)
return problem_list
| {
"repo_name": "aparij/EmailGrammar",
"path": "check_grammar.py",
"copies": "1",
"size": "1311",
"license": "mit",
"hash": 7479280391510994000,
"line_mean": 35.4166666667,
"line_max": 123,
"alpha_frac": 0.5751334859,
"autogenerated": false,
"ratio": 3.8558823529411765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9775923550576232,
"avg_score": 0.031018457652988916,
"num_lines": 36
} |
__author__ = 'alex'
import requests
from .market import Market
from private_markets import cryptsy
class Cryptsy(Market):
def __init__(self):
super(Cryptsy, self).__init__()
self.update_rate = 60
self.fees = {"buy": {"fee": 0.002, "coin": "s_coin"}, "sell": {"fee": 0.003, "coin": "s_coin"}}
# Hack to use private market method to update cryptsy depths
self.a = cryptsy.PrivateCryptsy()
def update_depth(self):
#url = 'http://pubapi.cryptsy.com/api.php?method=singleorderdata&marketid=132'
#res = requests.get(url)
#depth = res.json()
depth = self.a.query("depth", {"marketid": 132})
self.depth = self.format_depth(depth['return']['buy'], depth['return']['sell'], 0, 1)
def update_prices(self):
url = 'http://pubapi.cryptsy.com/api.php?method=orderdatav2'
res = requests.get(url)
prices = res.json()
self.prices = self.format_prices(prices)
def format_prices(self, prices):
for pair in prices['return']:
pair_depth = prices['return'][pair]
pair_name = (pair_depth['primarycode'], pair_depth['secondarycode'])
pair_depth = self.format_depth(pair_depth['buyorders'],
pair_depth['sellorders'], 'price', 'quantity')
self.prices[pair_name] = {'bid': pair_depth['bids'][0]['price'],
'ask': pair_depth['asks'][0]['price']}
return self.prices
if __name__ == "__main__":
market = Cryptsy()
print(market.get_ticker())
| {
"repo_name": "acontry/altcoin-arbitrage",
"path": "arbitrage/public_markets/cryptsy.py",
"copies": "1",
"size": "1609",
"license": "mit",
"hash": -6515328082566921000,
"line_mean": 37.3095238095,
"line_max": 103,
"alpha_frac": 0.5599751398,
"autogenerated": false,
"ratio": 3.5755555555555554,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46355306953555553,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alex'
import requests
from .market import Market
class Vircurex(Market):
def __init__(self):
super(Vircurex, self).__init__()
self.update_rate = 60
self.update_prices()
# self.triangular_arbitrage()
def update_depth(self):
url = 'https://api.vircurex.com/api/orderbook.json'
price_query = {'base': self.p_coin, 'alt': self.s_coin}
res = requests.get(url, data=price_query)
depth = res.json()
self.depth = self.format_depth(depth['bids'], depth['asks'], 0, 1)
def update_prices(self):
url = 'https://api.vircurex.com/api/get_info_for_currency.json'
res = requests.get(url)
prices = res.json()
self.prices = self.format_prices(prices)
def format_prices(self, prices):
prices.pop('status', None)
for p_coin in prices:
if p_coin == 'BTC':
continue
for s_coin in prices[p_coin]:
if (s_coin, p_coin) in self.prices.keys():
continue
pair_key = (p_coin, s_coin)
bid = float(prices[p_coin][s_coin]['highest_bid'])
ask = float(prices[p_coin][s_coin]['lowest_ask'])
if bid != 0 and ask != 0:
self.prices[pair_key] = {'bid': bid, 'ask': ask}
return self.prices
if __name__ == "__main__":
market = Vircurex()
print(market.get_ticker())
| {
"repo_name": "acontry/altcoin-arbitrage",
"path": "arbitrage/public_markets/vircurex.py",
"copies": "1",
"size": "1456",
"license": "mit",
"hash": -9162045229499139000,
"line_mean": 31.3555555556,
"line_max": 74,
"alpha_frac": 0.5350274725,
"autogenerated": false,
"ratio": 3.3781902552204177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9410214724717414,
"avg_score": 0.0006006006006006007,
"num_lines": 45
} |
__author__ = 'Alex'
# noinspection PyMethodMayBeStatic
class JSONxVisitor(object):
def visit(self, node):
method_name = 'visit_' + node.__class__.__name__
method = getattr(self, method_name, self.visit_generic)
return method(node)
def visit_generic(self, node):
raise RuntimeError('Unknown node ' + node.__class__.__name__)
def visit_NumberNode(self, node):
return node.value
def visit_StringNode(self, node):
return node.value
def visit_TrueNode(self, node):
return True
def visit_FalseNode(self, node):
return False
def visit_NullNode(self, node):
return None
def visit_PairNode(self, node):
return self.visit(node.key), self.visit(node.value)
def visit_ReferenceNode(self, node):
return {"$ref": {"file": node.file, "path": node.path}}
def visit_ObjectNode(self, node):
result = {}
for child in node.children:
key, value = self.visit(child)
result[key] = value
return result
def visit_ArrayNode(self, node):
result = [self.visit(child) for child in node.children]
return result
class Node(object):
def __eq__(self, other):
return isinstance(other, Node)
def __repr__(self):
strings = [repr(item) for item in self.__dict__.itervalues()]
return "{}({})".format(self.__class__.__name__, ', '.join(strings))
def __str__(self):
return self.__class__.__name__ + '()'
class NumberNode(Node):
def __init__(self, value):
self.value = value
def __eq__(self, other):
return super(NumberNode, self).__eq__(other) \
and self.value == other.value
class StringNode(Node):
def __init__(self, value):
self.value = value
def __eq__(self, other):
return super(StringNode, self).__eq__(other) \
and self.value == other.value
class TrueNode(Node):
def __eq__(self, other):
return isinstance(other, TrueNode)
class FalseNode(Node):
def __eq__(self, other):
return isinstance(other, FalseNode)
class NullNode(Node):
def __eq__(self, other):
return isinstance(other, NullNode)
class ReferenceNode(Node):
def __init__(self, file_path, object_path):
self.file = file_path
self.path = object_path
def __eq__(self, other):
return super(ReferenceNode, self).__eq__(other) \
and (self.file, self.path) == (other.file, other.path)
class ArrayNode(Node):
def __init__(self, nodes):
self.children = nodes
def __eq__(self, other):
return super(ArrayNode, self).__eq__(other) \
and self.children == other.children
class ObjectNode(Node):
def __init__(self, pairs):
self.children = pairs
def __eq__(self, other):
return super(ObjectNode, self).__eq__(other) \
and self.children == other.children
class PairNode(Node):
def __init__(self, key, value):
self.key = key
self.value = value
def __eq__(self, other):
return super(PairNode, self).__eq__(other) \
and (self.key, self.value) == (other.key, other.value)
| {
"repo_name": "AlexYukikaze/JSONx",
"path": "JSONx/ast.py",
"copies": "1",
"size": "3242",
"license": "mit",
"hash": 247735780426796380,
"line_mean": 24.5275590551,
"line_max": 75,
"alpha_frac": 0.578038248,
"autogenerated": false,
"ratio": 3.8321513002364065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9910189548236407,
"avg_score": 0,
"num_lines": 127
} |
__author__ = 'alex'
# This function is terrible don't use it
def triangular_arbitrage(self):
self.prices.pop('current', None)
self.prices.pop('last_updated', None)
for pair1 in self.prices:
pair1_name = pair1
if pair1_name[1] != 'BTC':
continue
for pair2 in self.prices:
if pair1 == pair2:
continue
pair2_name = pair2
if pair2_name[0] == pair1_name[0]:
trade2 = 'sell'
elif pair2_name[1] == pair1_name[0]:
trade2 = 'buy'
else:
continue
for pair3 in self.prices:
pair3_name = pair3
if trade2 == 'sell':
if pair2_name[1] == pair3_name[0] and pair3_name[1] == 'BTC':
trade3 = 'sell'
elif pair2_name[1] == pair3_name[1] and pair3_name[0] == 'BTC':
trade3 = 'buy'
else:
continue
elif trade2 == 'buy':
if pair2_name[0] == pair3_name[0] and pair3_name[1] == 'BTC':
trade3 = 'sell'
elif pair2_name[0] == pair3_name[1] and pair3_name[0] == 'BTC':
trade3 = 'buy'
else:
continue
if trade2 == 'buy' and trade3 == 'buy':
profit = self.prices[pair1]['ask'] * self.prices[pair2]['ask'] * self.prices[pair3]['ask']
elif trade2 == 'buy' and trade3 == 'sell':
profit = self.prices[pair1]['ask'] * self.prices[pair2]['ask'] / self.prices[pair3]['bid']
elif trade2 == 'sell' and trade3 == 'sell':
profit = self.prices[pair1]['ask'] / self.prices[pair2]['bid'] / self.prices[pair3]['bid']
elif trade2 == 'sell' and trade3 == 'buy':
profit = self.prices[pair1]['ask'] / self.prices[pair2]['bid'] * self.prices[pair3]['ask']
profit = 1 / profit # Screwed up, quick fix
if profit > 1.006:
print("%s -> %s -> %s profit = %f" % (pair1, pair2, pair3, profit)) | {
"repo_name": "acontry/altcoin-arbitrage",
"path": "arbitrage/observers/triangulartraderbot.py",
"copies": "1",
"size": "2230",
"license": "mit",
"hash": 697730117431501400,
"line_mean": 42.7450980392,
"line_max": 110,
"alpha_frac": 0.4502242152,
"autogenerated": false,
"ratio": 3.7416107382550337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46918349534550335,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alex parij'
import requests
import simplejson as json
class API(object):
def __init__(self, base_url=None, api_key=None, cid=None, minor_rev=None, locale='en_US', currency_code='USD'):
self._base_url = base_url
self._store = {
"apiKey": api_key,
"cid": cid,
"minor_rev": minor_rev,
"locale": locale,
"currency_code": currency_code
}
def _serialize_response(self, data):
return json.loads(data)
def GetHotelInfo(self, hotel_id=None):
"""
HotelInfo end point
:param hotel_id:
:return:
JSON response
Usage::
previous config
API_KEY = 'SAMPLE KEY esdfsdfdsfdsfsdfvfftkg5'
CID = '55505'
LOCALE='en_US'
CURRENCY_CODE='USD'
MINOR_REV = '20'
API_ENDPOINT = 'http://api.ean.com/ean-services/rs/hotel/v3/'
>>> from eanapi import api
>>> api_service = api.API(API_ENDPOINT, API_KEY, CID, MINOR_REV)
>>> req = api_service.GetHotelInfo(566671)
<Response >
"""
url = '%sinfo' % self._base_url
payload = {}
for key, value in self._store.iteritems():
payload[key] = value
payload['hotelId'] = hotel_id
try:
resp = requests.get(url, params=payload)
except Exception ,exc:
print exc.message
raise exc
if 200 <= resp.status_code <= 299:
return self._serialize_response(resp.text)
else:
raise Exception("Request error")
def GetRoomAvailability(self, hotel_id=None, arrival_date=None, departure_date=None, room_occupancy=[]):
"""
RoomAvailability end point
:param hotel_id:
:param arrival_date: date string "%m/%d/%Y" for arrival/start query request
:param departure_date: date string "%m/%d/%Y" for departure/end query request
:param room_occupancy: number of people in each room
:return:
JSON response
Usage::
previous config
API_KEY = 'SAMPLE KEY efsdfdsfdsfsdfvfftkg5'
CID = '55505'
LOCALE='en_US'
CURRENCY_CODE='USD'
MINOR_REV = '20'
API_ENDPOINT = 'http://api.ean.com/ean-services/rs/hotel/v3/'
>>> from eanapi import api
>>> api_service = api.API(API_ENDPOINT, API_KEY, CID, MINOR_REV)
>>> req = api_service.GetRoomAvailability(566671,'01/22/2014','01/27/2014',[1])
<Response >
"""
url = '%savail' % self._base_url
payload = {}
for key, value in self._store.iteritems():
payload[key] = value
payload['arrivalDate'] = arrival_date
payload['departureDate'] = departure_date
payload['hotelId'] = hotel_id
for i, v in enumerate(room_occupancy):
payload['room'+str(i+1)] = v
try:
resp = requests.get(url, params=payload)
except Exception ,exc:
print exc.message
raise exc
if 200 <= resp.status_code <= 299:
return self._serialize_response(resp.text)
else:
raise Exception("Request error")
class ErrorEAN(object):
def __init__(self, e):
"""
:param e: JSON response object
"""
self.category = ''
self.hotel_id = ''
self.message = ''
self.handling = ''
self.verbose_message = ''
if 'hotelId' in e:
self.hotel_id = e['hotelId']
if 'EanWsError' in e:
self.category = e['EanWsError']['category']
self.message = e['EanWsError']['presentationMessage']
self.handling = e['EanWsError']['handling']
if 'verboseMessage' in e['EanWsError']:
self.verbose_message = e['EanWsError']['verboseMessage']
def __str__(self):
return "%s (%s)" % (self.category, self.message)
class Room(object):
def __init__(self, r):
"""
:param r: JSON response
"""
self.allotment = r['currentAllotment']
self.room_code = r['roomTypeCode']
def __str__(self):
return "%s (%s)" % (self.room_code)
| {
"repo_name": "aparij/eanapi",
"path": "eanapi/api.py",
"copies": "1",
"size": "4328",
"license": "mit",
"hash": -4297231479979952600,
"line_mean": 26.7435897436,
"line_max": 115,
"alpha_frac": 0.530961183,
"autogenerated": false,
"ratio": 3.67402376910017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9631765350309232,
"avg_score": 0.014643920358187588,
"num_lines": 156
} |
__author__ = 'Alex Parkinson, Matt van Breugel'
global version
version = 'v0.1'
import urllib2
from lxml import html
from datetime import datetime, date, timedelta
import pytz
import pyFWI.FWIFunctions as FWI
import sqlite3
def is_dst(zonename):
"""
Description
-----------
Hmm...
Parameters
----------
zonename : ???
Returns
-------
???
"""
tz = pytz.timezone(zonename)
now = pytz.utc.localize(datetime.utcnow())
return now.astimezone(tz).dst() != timedelta(0)
if __name__ == "__main__":
today = date.today()
yesterday = today + timedelta(days=-1)
conn = sqlite3.connect(r"D:\code\fireWeather\weatherLog.db")
cur = conn.cursor()
#Uncomment to Initialize SQLite DB
#cur.execute("CREATE TABLE observations ( date STRING, temp FLOAT, humid FLOAT, wind FLOAT, rain FLOAT )") # makes table of assumed requirments
#cur.execute("CREATE TABLE calculations ( date STRING, ffmc FLOAT, dmc FLOAT, dc FLOAT, isi FLOAT, bui FLOAT, fwi FLOAT )") # makes table of assumed requirments
#cur.execute("INSERT INTO calculations VALUES ('%s', 60, 25, 250, 0, 0, 0)"%(yesterday.strftime('%Y-%m-%d'))) # init values
tree = html.fromstring(urllib2.urlopen("http://www.bom.gov.au/products/IDN60801/IDN60801.95753.shtml").read())
print today.strftime('%Y-%m-%d')
search = today.strftime("%d") + '/'
search_yesterday = yesterday.strftime("%d") + '/'
if is_dst('Australia/Sydney'):
search_now = search + '11:00am'
search_yesterday = search_yesterday + '11:00am'
else:
search_now = search + '12:00pm'
search_yesterday = search_yesterday + '12:00pm'
search_9am = date.today().strftime("%d") + '/09:00am'
for e in tree.find_class('rowleftcolumn'):
if e.getchildren()[0].text_content() == search_now:
c = e.getchildren()
temp = float(c[1].text_content())
humd = float(c[4].text_content())
wind = float(c[7].text_content())
rainnow = float(c[13].text_content())
print "Temp: %.2f"%temp
print "Humd: %.2f"%humd
print "Wind: %.2f"%wind
print "Rain 12pm: %.2f"%rainnow
if e.getchildren()[0].text_content() == search_9am:
rain9am = float(e.getchildren()[13].text_content())
print "Rain 9am: %.2f"%rain9am
if e.getchildren()[0].text_content() == search_yesterday:
rain_yesterday = float(e.getchildren()[13].text_content())
print 'Rain Yesterday: %.2f'%rain_yesterday
rain = rainnow + rain9am - rain_yesterday
print "Rain Total: %.2f"%rain
cur.execute("INSERT INTO observations VALUES ('%s', %f, %f, %f, %f)"%(today.strftime('%Y-%m-%d'), temp, humd, wind, rain))
conn.commit()
cur.execute("SELECT ffmc, dmc, dc FROM calculations WHERE date = '%s'"%yesterday.strftime('%Y-%m-%d'))
c = cur.fetchone()
ffmc = FWI.FFMC(temp, humd, wind, rain, c[0])
dmc = FWI.DMC(temp, humd, rain, c[1], -33.60, today.month)
dc = FWI.DC(temp, rain, c[2], -33.60, today.month)
isi = FWI.ISI(wind, ffmc)
bui = FWI.BUI(dmc, dc)
fwi = FWI.FWI(isi, bui)
cur.execute("INSERT INTO calculations VALUES ('%s', %f, %f, %f, %f, %f, %f)"%(today.strftime('%Y-%m-%d'), ffmc, dmc, dc, isi, bui, fwi))
conn.commit()
print "FFMC : %.2f"%ffmc
print "DMC : %.2f"%dmc
print "DC : %.2f"%dc
print "ISI : %.2f"%isi
print "BUI : %.2f"%bui
print "FWI : %.2f"%fwi
df = 5 # no remote scraping yet
print "FDI : %.2f"%FWI.simpFDI(temp, humd, wind, df)
print "\r\n-----------------------------\r\n" | {
"repo_name": "parko636/pyfwi",
"path": "bomScrape.py",
"copies": "1",
"size": "3734",
"license": "bsd-3-clause",
"hash": -3095798737715038700,
"line_mean": 38.7340425532,
"line_max": 164,
"alpha_frac": 0.5747188002,
"autogenerated": false,
"ratio": 3.0961857379767825,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4170904538176783,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex P'
from google.appengine.ext import ndb
class user(ndb.Model):
uniqueGivenID = ndb.StringProperty() #good for checking isCurrentUser.
nickname = ndb.StringProperty()
picture = ndb.BlobKeyProperty()
pictureURL = ndb.StringProperty()
numSlogans = ndb.IntegerProperty()
email = ndb.StringProperty()
bio = ndb.TextProperty()
slogarma = ndb.IntegerProperty()
createdAt = ndb.DateTimeProperty(auto_now_add=True)
class slogan(ndb.Model):
uniqueAuthorID = ndb.IntegerProperty(required=True) #key.id()
authorNickname = ndb.StringProperty()
authorThumbnail = ndb.BlobProperty()
text = ndb.StringProperty(required=True)
highlightedWord = ndb.IntegerProperty()
subpageTag1 = ndb.StringProperty()
subpageTag2 = ndb.StringProperty()
numComments = ndb.IntegerProperty()
numLikes = ndb.IntegerProperty()
numDislikes = ndb.IntegerProperty()
globalRank = ndb.IntegerProperty()
temporalRank = ndb.IntegerProperty(default=0) #This is computed in the handler as it's a per-user value
createdAt = ndb.DateTimeProperty(auto_now_add=True) #argument automatically sets createdAt to the current time.
class comment(ndb.Model):
uniqueAuthorID = ndb.IntegerProperty(required=True) #key.id()
uniqueSloganID = ndb.IntegerProperty(required=True) #key.id()
userNickname = ndb.StringProperty()
text = ndb.TextProperty()
parentCommentID = ndb.IntegerProperty() #this will hold a key.id() of another comment if it is a reply
createdAt = ndb.DateTimeProperty(auto_now_add=True)
class vote(ndb.Model):
uniqueVoterID = ndb.StringProperty(required=True) #uniqueGivenID
uniqueSloganID = ndb.IntegerProperty(required=True) #key.id() | {
"repo_name": "petestreet/raygun-app-backend",
"path": "models.py",
"copies": "1",
"size": "1735",
"license": "mit",
"hash": -3978981187050640400,
"line_mean": 41.3414634146,
"line_max": 115,
"alpha_frac": 0.734870317,
"autogenerated": false,
"ratio": 3.6914893617021276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4926359678702128,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Alex P'
import os
import logging
import webapp2
import models
import jinja2
template_path = os.path.join(os.path.dirname(__file__))
jinja2_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_path),
autoescape=True
)
#a helper class
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja2_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
class HomepageHandler(Handler):
def get(self):
sloganRows = models.slogan.gql('ORDER BY globalRank DESC LIMIT 5').fetch()
template_values = {"sloganRows": sloganRows}
template = jinja2_env.get_template('site-html/index.html')
self.response.out.write(template.render(template_values))
def handle_404(request, response, exception):
logging.exception(exception)
template = jinja2_env.get_template('site-html/404.html')
response.write(template.render())
response.set_status(404)
def handle_500(request, response, exception):
logging.exception(exception)
template = jinja2_env.get_template('site-html/500.html')
response.write(template.render())
response.set_status(500)
application = webapp2.WSGIApplication([
("/", HomepageHandler),
], debug=True)
application.error_handlers[404] = handle_404
#application.error_handlers[500] = handle_500 | {
"repo_name": "petestreet/raygun-app-backend",
"path": "frontsite.py",
"copies": "1",
"size": "1516",
"license": "mit",
"hash": -2402039631711540000,
"line_mean": 26.0892857143,
"line_max": 82,
"alpha_frac": 0.6965699208,
"autogenerated": false,
"ratio": 3.429864253393665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46264341741936654,
"avg_score": null,
"num_lines": null
} |
__author__ = 'alexrdz'
import unicodecsv, csv
import xlrd
import datetime
from models import MonederoUser
from models import PreAccountStatement
from StatementProcessor import StatementProcessor
def parse(xlsfile, name_of_sheet):
"""
:param xlsfile: Excel file to be read
:param name_of_sheet: Name of the sheet of excel book
"""
book = xlrd.open_workbook(xlsfile)
sheet = book.sheet_by_name(name_of_sheet)
output = open('data.csv', 'wb')
write = unicodecsv.writer(output, quoting=unicodecsv.QUOTE_ALL)
for row in xrange(1, sheet.nrows):
write.writerow((sheet.row_values(row)))
print "Data written on " + output.name
output.close()
class UserList:
def __init__(self):
self.csv_list = []
self.students = []
self.user = None
self.statement_list = []
self.statement = None
def to_list(self, csvfile, delimiter=','):
"""
:param csvfile: CSV file to be read
:param delimiter:
:return: Returns a list obtained of the csv file
"""
with open(csvfile) as csv_file:
users = unicodecsv.reader(csv_file, dialect='excel', delimiter=delimiter)
for row in users:
self.user = MonederoUser(row)
#if self.user.student_id[2] == '0':
# self.user.student_id = self.user.student_id[3:]
#else:
# self.user.student_id = self.user.student_id[2:]
self.csv_list.append(self.user)
return self.csv_list
def sort_by_column(self, csv_list):
"""
Sorts the list by the ID column
:param csv_list: list to be read
:return:
"""
data = sorted(csv_list, key=lambda user: self.user.student_id)
return data
def get_students_id(self, csv_list):
"""
Extracts the IDs of all of the students on file
:param csv_list:
:return:
"""
for self.user in csv_list:
self.students.append(self.user.student_id)
return self.students
def get_statements(self, csvfile, delimiter=','):
with open(csvfile, 'rU') as csv_file:
csv_statements = csv.reader(csv_file, dialect=csv.excel_tab, delimiter=delimiter)
for row in csv_statements:
self.statement = PreAccountStatement(row)
self.statement_list.append(self.statement)
return self.statement_list
def get_users(self, statement_list):
user_list = []
for statement in statement_list:
user_list.append(statement.statement_student)
return user_list
def upload_users_to_server(self, students_list, statements_list):
for student in students_list:
if student.student_id in list(statements_list):
student.upload()
def upload_statements_to_server(self, students_list, statements_list, statement):
for student in statements_list:
if student in students_list:
matches = StatementProcessor.search_for_statements_by_student(statement, student)
StatementProcessor.process_transactions(matches, student)
start = datetime.datetime.now()
# 1a. Parse the xls file to csv
parse('alumnos CITA.xlsx', 'prof')
# 1b. Creates an instance of UserList
app = UserList()
# 2a. Creates a list from the Users file
students = app.to_list('data.csv')
# 2b. Creates a list from the Statements file
statements = app.get_statements('test.csv')
# 3. Sorts the User list by the id column
sorted_students = app.sort_by_column(students)
# 4. Get the IDs of the students
students_id = app.get_students_id(sorted_students)
print students_id
# 5. Get users of statements
user_of_statements = app.get_users(statements)
print user_of_statements
# 5. Delete duplicates
student_set = set(user_of_statements)
# 6. Search for statements and check what students have statements in file
print "In file..."
app.upload_users_to_server(sorted_students,student_set)
app.upload_statements_to_server(students_id,student_set, statements)
finish = datetime.datetime.now()
print ("Time spent uploading files: %s" % (finish - start))
| {
"repo_name": "EnriqueRE/Estado-de-Cuenta",
"path": "Transaction Uploader/UserList.py",
"copies": "1",
"size": "4232",
"license": "apache-2.0",
"hash": 1684654630050975500,
"line_mean": 28.3888888889,
"line_max": 97,
"alpha_frac": 0.6368147448,
"autogenerated": false,
"ratio": 3.7551020408163267,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48919167856163265,
"avg_score": null,
"num_lines": null
} |
from natto import MeCab
# 31 + 32
with open("verbs.txt", "w+"): pass
text = open("neko.txt","r+")
res_file = open("verbs.txt", "a+")
reader = text.readlines()
for line in reader:
with MeCab('-F%f[0],%f[6]') as nm:
for n in nm.parse(line, as_nodes=True):
if not n.is_eos() and n.is_nor():
klass, word = n.feature.split(',', 1)
if klass in ['動詞']: #['名詞', '形容詞', '形容動詞','動詞']:
print word
res_file.write(word + ' ')
res_file.write('\n')
text.close()
res_file.close()
# 33
with open("neko_hen.txt", "w+"): pass
text = open("neko.txt","r+")
res_file = open("neko_hen.txt", "a+")
reader = text.readlines()
for line in reader:
with MeCab('-F%f[1],%f[6]') as nm:
for n in nm.parse(line, as_nodes=True):
if not n.is_eos() and n.is_nor():
klass, word = n.feature.split(',', 1)
if klass in ['サ変接続']: #['名詞', '形容詞', '形容動詞','動詞']:
print word
res_file.write(word + ' ')
res_file.write('\n')
text.close()
res_file.close()
| {
"repo_name": "yasutaka/nlp_100",
"path": "alex/31-33.py",
"copies": "1",
"size": "1258",
"license": "mit",
"hash": 8824542614189609000,
"line_mean": 27.3170731707,
"line_max": 66,
"alpha_frac": 0.46921797,
"autogenerated": false,
"ratio": 2.6770601336302895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8563375632277316,
"avg_score": 0.0165804942705947,
"num_lines": 41
} |
__author__ = 'Alex Rogozhnikov'
import functools
from ..einops import TransformRecipe, _prepare_transformation_recipe
from .. import EinopsError
class RearrangeMixin:
"""
Rearrange layer behaves identically to einops.rearrange operation.
:param pattern: str, rearrangement pattern
:param axes_lengths: any additional specification of dimensions
See einops.rearrange for source_examples.
"""
def __init__(self, pattern, **axes_lengths):
super().__init__()
self.pattern = pattern
self.axes_lengths = axes_lengths
self._recipe = self.recipe() # checking parameters
def __repr__(self):
params = repr(self.pattern)
for axis, length in self.axes_lengths.items():
params += ', {}={}'.format(axis, length)
return '{}({})'.format(self.__class__.__name__, params)
@functools.lru_cache(maxsize=1024)
def recipe(self) -> TransformRecipe:
try:
hashable_lengths = tuple(sorted(self.axes_lengths.items()))
return _prepare_transformation_recipe(self.pattern, operation='rearrange', axes_lengths=hashable_lengths)
except EinopsError as e:
raise EinopsError(' Error while preparing {!r}\n {}'.format(self, e))
def _apply_recipe(self, x):
return self._recipe.apply(x)
class ReduceMixin:
"""
Reduce layer behaves identically to einops.reduce operation.
:param pattern: str, rearrangement pattern
:param reduction: one of available reductions ('min', 'max', 'sum', 'mean', 'prod'), case-sensitive
:param axes_lengths: any additional specification of dimensions
See einops.reduce for source_examples.
"""
def __init__(self, pattern, reduction, **axes_lengths):
super().__init__()
self.pattern = pattern
self.reduction = reduction
self.axes_lengths = axes_lengths
self._recipe = self.recipe() # checking parameters
def __repr__(self):
params = '{!r}, {!r}'.format(self.pattern, self.reduction)
for axis, length in self.axes_lengths.items():
params += ', {}={}'.format(axis, length)
return '{}({})'.format(self.__class__.__name__, params)
@functools.lru_cache(maxsize=1024)
def recipe(self) -> TransformRecipe:
try:
hashable_lengths = tuple(sorted(self.axes_lengths.items()))
return _prepare_transformation_recipe(self.pattern, operation=self.reduction, axes_lengths=hashable_lengths)
except EinopsError as e:
raise EinopsError(' Error while preparing {!r}\n {}'.format(self, e))
def _apply_recipe(self, x):
return self._recipe.apply(x)
| {
"repo_name": "arogozhnikov/einops",
"path": "einops/layers/__init__.py",
"copies": "1",
"size": "2689",
"license": "mit",
"hash": 3885976044842920400,
"line_mean": 34.3815789474,
"line_max": 120,
"alpha_frac": 0.6355522499,
"autogenerated": false,
"ratio": 3.9955423476968797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006676929947227295,
"num_lines": 76
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.